From 287bf610b41fa8b86e0db1904a103db7ec3ec37f Mon Sep 17 00:00:00 2001 From: aws-sdk-go-automation <43143561+aws-sdk-go-automation@users.noreply.github.com> Date: Wed, 21 Jul 2021 11:18:14 -0700 Subject: [PATCH] Release v1.40.5 (2021-07-21) (#4017) Release v1.40.5 (2021-07-21) === ### Service Client Updates * `service/codebuild`: Updates service API and documentation * AWS CodeBuild now allows you to set the access permissions for build artifacts, project artifacts, and log files that are uploaded to an Amazon S3 bucket that is owned by another account. * `service/elasticloadbalancingv2`: Updates service documentation * `service/elasticmapreduce`: Updates service API, documentation, and paginators * EMR now supports new DescribeReleaseLabel and ListReleaseLabel APIs. They can provide Amazon EMR release label details. You can programmatically list available releases and applications for a specific Amazon EMR release label. * `service/iam`: Updates service documentation * Documentation updates for AWS Identity and Access Management (IAM). * `service/kendra`: Updates service API and documentation * Amazon Kendra now provides a data source connector for Amazon WorkDocs. For more information, see https://docs.aws.amazon.com/kendra/latest/dg/data-source-workdocs.html * `service/lambda`: Updates service API and documentation * New ResourceConflictException error code for PutFunctionEventInvokeConfig, UpdateFunctionEventInvokeConfig, and DeleteFunctionEventInvokeConfig operations. * `service/personalize`: Updates service API and documentation * `service/proton`: Updates service documentation * `service/rds`: Updates service API, documentation, waiters, paginators, and examples * Adds the OriginalSnapshotCreateTime field to the DBSnapshot response object. This field timestamps the underlying data of a snapshot and doesn't change when the snapshot is copied. --- CHANGELOG.md | 20 + aws/version.go | 2 +- models/apis/codebuild/2016-10-06/api-2.json | 17 +- models/apis/codebuild/2016-10-06/docs-2.json | 278 +++--- .../2015-12-01/docs-2.json | 4 +- .../elasticmapreduce/2009-03-31/api-2.json | 80 ++ .../elasticmapreduce/2009-03-31/docs-2.json | 166 ++-- .../2009-03-31/paginators-1.json | 5 + models/apis/iam/2010-05-08/docs-2.json | 4 +- models/apis/kendra/2019-02-03/api-2.json | 24 +- models/apis/kendra/2019-02-03/docs-2.json | 37 +- models/apis/lambda/2015-03-31/api-2.json | 9 +- models/apis/lambda/2015-03-31/docs-2.json | 34 +- models/apis/personalize/2018-05-22/api-2.json | 3 +- .../apis/personalize/2018-05-22/docs-2.json | 24 +- models/apis/proton/2020-07-20/docs-2.json | 78 +- models/apis/rds/2014-10-31/api-2.json | 3 +- models/apis/rds/2014-10-31/docs-2.json | 9 +- service/codebuild/api.go | 827 ++++++++++-------- service/codebuild/doc.go | 19 +- service/codebuild/errors.go | 9 +- service/elbv2/api.go | 18 +- service/emr/api.go | 804 ++++++++++++++--- service/emr/doc.go | 5 +- service/emr/emriface/interface.go | 11 + service/iam/api.go | 18 +- service/kendra/api.go | 195 ++++- service/lambda/api.go | 80 +- service/personalize/api.go | 59 +- service/proton/api.go | 118 +-- service/proton/doc.go | 57 +- service/proton/errors.go | 2 +- service/rds/api.go | 32 +- 33 files changed, 2111 insertions(+), 940 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index c1468139a2..ecf3441ffb 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,23 @@ +Release v1.40.5 (2021-07-21) +=== + +### Service Client Updates +* `service/codebuild`: Updates service API and documentation + * AWS CodeBuild now allows you to set the access permissions for build artifacts, project artifacts, and log files that are uploaded to an Amazon S3 bucket that is owned by another account. +* `service/elasticloadbalancingv2`: Updates service documentation +* `service/elasticmapreduce`: Updates service API, documentation, and paginators + * EMR now supports new DescribeReleaseLabel and ListReleaseLabel APIs. They can provide Amazon EMR release label details. You can programmatically list available releases and applications for a specific Amazon EMR release label. +* `service/iam`: Updates service documentation + * Documentation updates for AWS Identity and Access Management (IAM). +* `service/kendra`: Updates service API and documentation + * Amazon Kendra now provides a data source connector for Amazon WorkDocs. For more information, see https://docs.aws.amazon.com/kendra/latest/dg/data-source-workdocs.html +* `service/lambda`: Updates service API and documentation + * New ResourceConflictException error code for PutFunctionEventInvokeConfig, UpdateFunctionEventInvokeConfig, and DeleteFunctionEventInvokeConfig operations. +* `service/personalize`: Updates service API and documentation +* `service/proton`: Updates service documentation +* `service/rds`: Updates service API, documentation, waiters, paginators, and examples + * Adds the OriginalSnapshotCreateTime field to the DBSnapshot response object. This field timestamps the underlying data of a snapshot and doesn't change when the snapshot is copied. + Release v1.40.4 (2021-07-20) === diff --git a/aws/version.go b/aws/version.go index 36faa11913..9a521b2a30 100644 --- a/aws/version.go +++ b/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.40.4" +const SDKVersion = "1.40.5" diff --git a/models/apis/codebuild/2016-10-06/api-2.json b/models/apis/codebuild/2016-10-06/api-2.json index 2d6916d457..343ea5861e 100644 --- a/models/apis/codebuild/2016-10-06/api-2.json +++ b/models/apis/codebuild/2016-10-06/api-2.json @@ -699,6 +699,14 @@ } }, "Boolean":{"type":"boolean"}, + "BucketOwnerAccess":{ + "type":"string", + "enum":[ + "NONE", + "READ_ONLY", + "FULL" + ] + }, "Build":{ "type":"structure", "members":{ @@ -744,7 +752,8 @@ "md5sum":{"shape":"String"}, "overrideArtifactName":{"shape":"WrapperBoolean"}, "encryptionDisabled":{"shape":"WrapperBoolean"}, - "artifactIdentifier":{"shape":"String"} + "artifactIdentifier":{"shape":"String"}, + "bucketOwnerAccess":{"shape":"BucketOwnerAccess"} } }, "BuildArtifactsList":{ @@ -1721,7 +1730,8 @@ "packaging":{"shape":"ArtifactPackaging"}, "overrideArtifactName":{"shape":"WrapperBoolean"}, "encryptionDisabled":{"shape":"WrapperBoolean"}, - "artifactIdentifier":{"shape":"String"} + "artifactIdentifier":{"shape":"String"}, + "bucketOwnerAccess":{"shape":"BucketOwnerAccess"} } }, "ProjectArtifactsList":{ @@ -2114,7 +2124,8 @@ "members":{ "status":{"shape":"LogsConfigStatusType"}, "location":{"shape":"String"}, - "encryptionDisabled":{"shape":"WrapperBoolean"} + "encryptionDisabled":{"shape":"WrapperBoolean"}, + "bucketOwnerAccess":{"shape":"BucketOwnerAccess"} } }, "S3ReportExportConfig":{ diff --git a/models/apis/codebuild/2016-10-06/docs-2.json b/models/apis/codebuild/2016-10-06/docs-2.json index ca98b58111..7d467d4dce 100644 --- a/models/apis/codebuild/2016-10-06/docs-2.json +++ b/models/apis/codebuild/2016-10-06/docs-2.json @@ -1,6 +1,6 @@ { "version": "2.0", - "service": "
CodeBuild is a fully managed build service in the cloud. CodeBuild compiles your source code, runs unit tests, and produces artifacts that are ready to deploy. CodeBuild eliminates the need to provision, manage, and scale your own build servers. It provides prepackaged build environments for the most popular programming languages and build tools, such as Apache Maven, Gradle, and more. You can also fully customize build environments in CodeBuild to use your own build tools. CodeBuild scales automatically to meet peak build requests. You pay only for the build time you consume. For more information about CodeBuild, see the CodeBuild User Guide.
", + "service": "AWS CodeBuild is a fully managed build service in the cloud. AWS CodeBuild compiles your source code, runs unit tests, and produces artifacts that are ready to deploy. AWS CodeBuild eliminates the need to provision, manage, and scale your own build servers. It provides prepackaged build environments for the most popular programming languages and build tools, such as Apache Maven, Gradle, and more. You can also fully customize build environments in AWS CodeBuild to use your own build tools. AWS CodeBuild scales automatically to meet peak build requests. You pay only for the build time you consume. For more information about AWS CodeBuild, see the AWS CodeBuild User Guide.
", "operations": { "BatchDeleteBuilds": "Deletes one or more builds.
", "BatchGetBuildBatches": "Retrieves information about one or more batch builds.
", @@ -10,31 +10,31 @@ "BatchGetReports": "Returns an array of reports.
", "CreateProject": "Creates a build project.
", "CreateReportGroup": "Creates a report group. A report group contains a collection of reports.
", - "CreateWebhook": "For an existing CodeBuild build project that has its source code stored in a GitHub or Bitbucket repository, enables CodeBuild to start rebuilding the source code every time a code change is pushed to the repository.
If you enable webhooks for an CodeBuild project, and the project is used as a build step in CodePipeline, then two identical builds are created for each commit. One build is triggered through webhooks, and one through CodePipeline. Because billing is on a per-build basis, you are billed for both builds. Therefore, if you are using CodePipeline, we recommend that you disable webhooks in CodeBuild. In the CodeBuild console, clear the Webhook box. For more information, see step 5 in Change a Build Project's Settings.
For an existing AWS CodeBuild build project that has its source code stored in a GitHub or Bitbucket repository, enables AWS CodeBuild to start rebuilding the source code every time a code change is pushed to the repository.
If you enable webhooks for an AWS CodeBuild project, and the project is used as a build step in AWS CodePipeline, then two identical builds are created for each commit. One build is triggered through webhooks, and one through AWS CodePipeline. Because billing is on a per-build basis, you are billed for both builds. Therefore, if you are using AWS CodePipeline, we recommend that you disable webhooks in AWS CodeBuild. In the AWS CodeBuild console, clear the Webhook box. For more information, see step 5 in Change a Build Project's Settings.
Deletes a batch build.
", "DeleteProject": "Deletes a build project. When you delete a project, its builds are not deleted.
", "DeleteReport": "Deletes a report.
", "DeleteReportGroup": "Deletes a report group. Before you delete a report group, you must delete its reports.
", "DeleteResourcePolicy": "Deletes a resource policy that is identified by its resource ARN.
", "DeleteSourceCredentials": "Deletes a set of GitHub, GitHub Enterprise, or Bitbucket source credentials.
", - "DeleteWebhook": "For an existing CodeBuild build project that has its source code stored in a GitHub or Bitbucket repository, stops CodeBuild from rebuilding the source code every time a code change is pushed to the repository.
", + "DeleteWebhook": "For an existing AWS CodeBuild build project that has its source code stored in a GitHub or Bitbucket repository, stops AWS CodeBuild from rebuilding the source code every time a code change is pushed to the repository.
", "DescribeCodeCoverages": "Retrieves one or more code coverage reports.
", "DescribeTestCases": "Returns a list of details about test cases for a report.
", "GetReportGroupTrend": "Analyzes and accumulates test report values for the specified test reports.
", "GetResourcePolicy": "Gets a resource policy that is identified by its resource ARN.
", - "ImportSourceCredentials": "Imports the source repository credentials for an CodeBuild project that has its source code stored in a GitHub, GitHub Enterprise, or Bitbucket repository.
", + "ImportSourceCredentials": "Imports the source repository credentials for an AWS CodeBuild project that has its source code stored in a GitHub, GitHub Enterprise, or Bitbucket repository.
", "InvalidateProjectCache": "Resets the cache for a project.
", "ListBuildBatches": "Retrieves the identifiers of your build batches in the current region.
", "ListBuildBatchesForProject": "Retrieves the identifiers of the build batches for a specific project.
", "ListBuilds": "Gets a list of build IDs, with each build ID representing a single build.
", "ListBuildsForProject": "Gets a list of build identifiers for the specified build project, with each build identifier representing a single build.
", - "ListCuratedEnvironmentImages": "Gets information about Docker images that are managed by CodeBuild.
", + "ListCuratedEnvironmentImages": "Gets information about Docker images that are managed by AWS CodeBuild.
", "ListProjects": "Gets a list of build project names, with each build project name representing a single build project.
", - "ListReportGroups": "Gets a list ARNs for the report groups in the current Amazon Web Services account.
", - "ListReports": "Returns a list of ARNs for the reports in the current Amazon Web Services account.
", + "ListReportGroups": "Gets a list ARNs for the report groups in the current AWS account.
", + "ListReports": "Returns a list of ARNs for the reports in the current AWS account.
", "ListReportsForReportGroup": " Returns a list of ARNs for the reports that belong to a ReportGroup
.
Gets a list of projects that are shared with other Amazon Web Services accounts or users.
", - "ListSharedReportGroups": "Gets a list of report groups that are shared with other Amazon Web Services accounts or users.
", + "ListSharedProjects": "Gets a list of projects that are shared with other AWS accounts or users.
", + "ListSharedReportGroups": "Gets a list of report groups that are shared with other AWS accounts or users.
", "ListSourceCredentials": " Returns a list of SourceCredentialsInfo
objects.
Stores a resource policy for the ARN of a Project
or ReportGroup
object.
Restarts a build.
", @@ -45,37 +45,37 @@ "StopBuildBatch": "Stops a running batch build.
", "UpdateProject": "Changes the settings of a build project.
", "UpdateReportGroup": "Updates a report group.
", - "UpdateWebhook": "Updates the webhook associated with an CodeBuild build project.
If you use Bitbucket for your repository, rotateSecret
is ignored.
Updates the webhook associated with an AWS CodeBuild build project.
If you use Bitbucket for your repository, rotateSecret
is ignored.
An Amazon Web Services service limit was exceeded for the calling Amazon Web Services account.
", + "base": "An AWS service limit was exceeded for the calling AWS account.
", "refs": { } }, "ArtifactNamespace": { "base": null, "refs": { - "ProjectArtifacts$namespaceType": "Along with path
and name
, the pattern that CodeBuild uses to determine the name and location to store the output artifact:
If type
is set to CODEPIPELINE
, CodePipeline ignores this value if specified. This is because CodePipeline manages its build output names instead of CodeBuild.
If type
is set to NO_ARTIFACTS
, this value is ignored if specified, because no build output is produced.
If type
is set to S3
, valid values include:
BUILD_ID
: Include the build ID in the location of the build output artifact.
NONE
: Do not include the build ID. This is the default if namespaceType
is not specified.
For example, if path
is set to MyArtifacts
, namespaceType
is set to BUILD_ID
, and name
is set to MyArtifact.zip
, the output artifact is stored in MyArtifacts/<build-ID>/MyArtifact.zip
.
Along with path
and name
, the pattern that AWS CodeBuild uses to determine the name and location to store the output artifact:
If type
is set to CODEPIPELINE
, AWS CodePipeline ignores this value if specified. This is because AWS CodePipeline manages its build output names instead of AWS CodeBuild.
If type
is set to NO_ARTIFACTS
, this value is ignored if specified, because no build output is produced.
If type
is set to S3
, valid values include:
BUILD_ID
: Include the build ID in the location of the build output artifact.
NONE
: Do not include the build ID. This is the default if namespaceType
is not specified.
For example, if path
is set to MyArtifacts
, namespaceType
is set to BUILD_ID
, and name
is set to MyArtifact.zip
, the output artifact is stored in MyArtifacts/<build-ID>/MyArtifact.zip
.
The type of build output artifact to create:
If type
is set to CODEPIPELINE
, CodePipeline ignores this value if specified. This is because CodePipeline manages its build output artifacts instead of CodeBuild.
If type
is set to NO_ARTIFACTS
, this value is ignored if specified, because no build output is produced.
If type
is set to S3
, valid values include:
NONE
: CodeBuild creates in the output bucket a folder that contains the build output. This is the default if packaging
is not specified.
ZIP
: CodeBuild creates in the output bucket a ZIP file that contains the build output.
The type of build output artifact to create:
If type
is set to CODEPIPELINE
, AWS CodePipeline ignores this value if specified. This is because AWS CodePipeline manages its build output artifacts instead of AWS CodeBuild.
If type
is set to NO_ARTIFACTS
, this value is ignored if specified, because no build output is produced.
If type
is set to S3
, valid values include:
NONE
: AWS CodeBuild creates in the output bucket a folder that contains the build output. This is the default if packaging
is not specified.
ZIP
: AWS CodeBuild creates in the output bucket a ZIP file that contains the build output.
The type of build output artifact. Valid values include:
CODEPIPELINE
: The build project has build output generated through CodePipeline.
The CODEPIPELINE
type is not supported for secondaryArtifacts
.
NO_ARTIFACTS
: The build project does not produce any build output.
S3
: The build project stores build output in Amazon S3.
The type of build output artifact. Valid values include:
CODEPIPELINE
: The build project has build output generated through AWS CodePipeline.
The CODEPIPELINE
type is not supported for secondaryArtifacts
.
NO_ARTIFACTS
: The build project does not produce any build output.
S3
: The build project stores build output in Amazon S3.
Specifies the type of artifact.
" } }, "AuthType": { "base": null, "refs": { - "ImportSourceCredentialsInput$authType": "The type of authentication used to connect to a GitHub, GitHub Enterprise, or Bitbucket repository. An OAUTH connection is not supported by the API and must be created using the CodeBuild console.
", + "ImportSourceCredentialsInput$authType": "The type of authentication used to connect to a GitHub, GitHub Enterprise, or Bitbucket repository. An OAUTH connection is not supported by the API and must be created using the AWS CodeBuild console.
", "SourceCredentialsInfo$authType": "The type of authentication used by the credentials. Valid options are OAUTH, BASIC_AUTH, or PERSONAL_ACCESS_TOKEN.
" } }, @@ -156,6 +156,14 @@ "UpdateWebhookInput$rotateSecret": " A boolean value that specifies whether the associated GitHub repository's secret token should be updated. If you use Bitbucket for your repository, rotateSecret
is ignored.
Specifies the access for objects that are uploaded to an Amazon S3 bucket that is owned by another account.
By default, only the account that uploads the objects to the bucket has access to these objects. This property allows you to give the bucket owner access to these objects.
The bucket owner does not have access to the objects. This is the default.
The bucket owner has read only access to the objects. The uploading account retains ownership of the objects.
The bucket owner has full access to the objects. Object ownership is determined by the following criteria:
If the bucket is configured with the Bucket owner preferred setting, the bucket owner owns the objects. The uploading account will have object access as specified by the bucket's policy.
Otherwise, the uploading account retains ownership of the objects.
For more information about Amazon S3 object ownership, see Controlling ownership of uploaded objects using S3 Object Ownership in the Amazon Simple Storage Service User Guide.
Information about a build.
", "refs": { @@ -284,7 +292,7 @@ } }, "BuildStatusConfig": { - "base": "Contains information that defines how the CodeBuild build project reports the build status to the source provider.
", + "base": "Contains information that defines how the AWS CodeBuild build project reports the build status to the source provider.
", "refs": { "ProjectSource$buildStatusConfig": "Contains information that defines how the build project reports the build status to the source provider. This option is only used when the source provider is GITHUB
, GITHUB_ENTERPRISE
, or BITBUCKET
.
Contains information that defines how the build project reports the build status to the source provider. This option is only used when the source provider is GITHUB
, GITHUB_ENTERPRISE
, or BITBUCKET
.
Information about CloudWatch Logs for a build project.
", + "base": "Information about Amazon CloudWatch Logs for a build project.
", "refs": { - "LogsConfig$cloudWatchLogs": "Information about CloudWatch Logs for a build project. CloudWatch Logs are enabled by default.
", - "LogsLocation$cloudWatchLogs": "Information about CloudWatch Logs for a build project.
" + "LogsConfig$cloudWatchLogs": "Information about Amazon CloudWatch Logs for a build project. Amazon CloudWatch Logs are enabled by default.
", + "LogsLocation$cloudWatchLogs": "Information about Amazon CloudWatch Logs for a build project.
" } }, "CodeCoverage": { @@ -356,7 +364,7 @@ "ComputeType": { "base": null, "refs": { - "ProjectEnvironment$computeType": "Information about the compute resources the build project uses. Available values include:
BUILD_GENERAL1_SMALL
: Use up to 3 GB memory and 2 vCPUs for builds.
BUILD_GENERAL1_MEDIUM
: Use up to 7 GB memory and 4 vCPUs for builds.
BUILD_GENERAL1_LARGE
: Use up to 16 GB memory and 8 vCPUs for builds, depending on your environment type.
BUILD_GENERAL1_2XLARGE
: Use up to 145 GB memory, 72 vCPUs, and 824 GB of SSD storage for builds. This compute type supports Docker images up to 100 GB uncompressed.
If you use BUILD_GENERAL1_LARGE
:
For environment type LINUX_CONTAINER
, you can use up to 15 GB memory and 8 vCPUs for builds.
For environment type LINUX_GPU_CONTAINER
, you can use up to 255 GB memory, 32 vCPUs, and 4 NVIDIA Tesla V100 GPUs for builds.
For environment type ARM_CONTAINER
, you can use up to 16 GB memory and 8 vCPUs on ARM-based processors for builds.
For more information, see Build Environment Compute Types in the CodeBuild User Guide.
", + "ProjectEnvironment$computeType": "Information about the compute resources the build project uses. Available values include:
BUILD_GENERAL1_SMALL
: Use up to 3 GB memory and 2 vCPUs for builds.
BUILD_GENERAL1_MEDIUM
: Use up to 7 GB memory and 4 vCPUs for builds.
BUILD_GENERAL1_LARGE
: Use up to 16 GB memory and 8 vCPUs for builds, depending on your environment type.
BUILD_GENERAL1_2XLARGE
: Use up to 145 GB memory, 72 vCPUs, and 824 GB of SSD storage for builds. This compute type supports Docker images up to 100 GB uncompressed.
If you use BUILD_GENERAL1_LARGE
:
For environment type LINUX_CONTAINER
, you can use up to 15 GB memory and 8 vCPUs for builds.
For environment type LINUX_GPU_CONTAINER
, you can use up to 255 GB memory, 32 vCPUs, and 4 NVIDIA Tesla V100 GPUs for builds.
For environment type ARM_CONTAINER
, you can use up to 16 GB memory and 8 vCPUs on ARM-based processors for builds.
For more information, see Build Environment Compute Types in the AWS CodeBuild User Guide.
", "StartBuildBatchInput$computeTypeOverride": "The name of a compute type for this batch build that overrides the one specified in the batch build project.
", "StartBuildInput$computeTypeOverride": "The name of a compute type for this build that overrides the one specified in the build project.
" } @@ -364,7 +372,7 @@ "ComputeTypesAllowed": { "base": null, "refs": { - "BatchRestrictions$computeTypesAllowed": "An array of strings that specify the compute types that are allowed for the batch build. See Build environment compute types in the CodeBuild User Guide for these values.
" + "BatchRestrictions$computeTypesAllowed": "An array of strings that specify the compute types that are allowed for the batch build. See Build environment compute types in the AWS CodeBuild User Guide for these values.
" } }, "CreateProjectInput": { @@ -400,7 +408,7 @@ "CredentialProviderType": { "base": null, "refs": { - "RegistryCredential$credentialProvider": "The service that created the credentials to access a private Docker registry. The valid value, SECRETS_MANAGER, is for Secrets Manager.
" + "RegistryCredential$credentialProvider": "The service that created the credentials to access a private Docker registry. The valid value, SECRETS_MANAGER, is for AWS Secrets Manager.
" } }, "DebugSession": { @@ -500,7 +508,7 @@ } }, "EnvironmentImage": { - "base": "Information about a Docker image that is managed by CodeBuild.
", + "base": "Information about a Docker image that is managed by AWS CodeBuild.
", "refs": { "EnvironmentImages$member": null } @@ -512,7 +520,7 @@ } }, "EnvironmentLanguage": { - "base": "A set of Docker images that are related by programming language and are managed by CodeBuild.
", + "base": "A set of Docker images that are related by programming language and are managed by AWS CodeBuild.
", "refs": { "EnvironmentLanguages$member": null } @@ -524,7 +532,7 @@ } }, "EnvironmentPlatform": { - "base": "A set of Docker images that are related by platform and are managed by CodeBuild.
", + "base": "A set of Docker images that are related by platform and are managed by AWS CodeBuild.
", "refs": { "EnvironmentPlatforms$member": null } @@ -532,13 +540,13 @@ "EnvironmentPlatforms": { "base": null, "refs": { - "ListCuratedEnvironmentImagesOutput$platforms": "Information about supported platforms for Docker images that are managed by CodeBuild.
" + "ListCuratedEnvironmentImagesOutput$platforms": "Information about supported platforms for Docker images that are managed by AWS CodeBuild.
" } }, "EnvironmentType": { "base": null, "refs": { - "ProjectEnvironment$type": "The type of build environment to use for related builds.
The environment type ARM_CONTAINER
is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), Asia Pacific (Mumbai), Asia Pacific (Tokyo), Asia Pacific (Sydney), and EU (Frankfurt).
The environment type LINUX_CONTAINER
with compute type build.general1.2xlarge
is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), Canada (Central), EU (Ireland), EU (London), EU (Frankfurt), Asia Pacific (Tokyo), Asia Pacific (Seoul), Asia Pacific (Singapore), Asia Pacific (Sydney), China (Beijing), and China (Ningxia).
The environment type LINUX_GPU_CONTAINER
is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), Canada (Central), EU (Ireland), EU (London), EU (Frankfurt), Asia Pacific (Tokyo), Asia Pacific (Seoul), Asia Pacific (Singapore), Asia Pacific (Sydney) , China (Beijing), and China (Ningxia).
The environment types WINDOWS_CONTAINER
and WINDOWS_SERVER_2019_CONTAINER
are available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), and EU (Ireland).
For more information, see Build environment compute types in the CodeBuild user guide.
", + "ProjectEnvironment$type": "The type of build environment to use for related builds.
The environment type ARM_CONTAINER
is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), Asia Pacific (Mumbai), Asia Pacific (Tokyo), Asia Pacific (Sydney), and EU (Frankfurt).
The environment type LINUX_CONTAINER
with compute type build.general1.2xlarge
is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), Canada (Central), EU (Ireland), EU (London), EU (Frankfurt), Asia Pacific (Tokyo), Asia Pacific (Seoul), Asia Pacific (Singapore), Asia Pacific (Sydney), China (Beijing), and China (Ningxia).
The environment type LINUX_GPU_CONTAINER
is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), Canada (Central), EU (Ireland), EU (London), EU (Frankfurt), Asia Pacific (Tokyo), Asia Pacific (Seoul), Asia Pacific (Singapore), Asia Pacific (Sydney) , China (Beijing), and China (Ningxia).
A container type for this batch build that overrides the one specified in the batch build project.
", "StartBuildInput$environmentTypeOverride": "A container type for this build that overrides the one specified in the build project.
" } @@ -552,7 +560,7 @@ "EnvironmentVariableType": { "base": null, "refs": { - "EnvironmentVariable$type": "The type of environment variable. Valid values include:
PARAMETER_STORE
: An environment variable stored in Systems Manager Parameter Store. To learn how to specify a parameter store environment variable, see env/parameter-store in the CodeBuild User Guide.
PLAINTEXT
: An environment variable in plain text format. This is the default value.
SECRETS_MANAGER
: An environment variable stored in Secrets Manager. To learn how to specify a secrets manager environment variable, see env/secrets-manager in the CodeBuild User Guide.
The type of environment variable. Valid values include:
PARAMETER_STORE
: An environment variable stored in Amazon EC2 Systems Manager Parameter Store. To learn how to specify a parameter store environment variable, see env/parameter-store in the AWS CodeBuild User Guide.
PLAINTEXT
: An environment variable in plain text format. This is the default value.
SECRETS_MANAGER
: An environment variable stored in AWS Secrets Manager. To learn how to specify a secrets manager environment variable, see env/secrets-manager in the AWS CodeBuild User Guide.
Contains information about an exported environment variable.
Exported environment variables are used in conjunction with CodePipeline to export environment variables from the current build stage to subsequent stages in the pipeline. For more information, see Working with variables in the CodePipeline User Guide.
During a build, the value of a variable is available starting with the install
phase. It can be updated between the start of the install
phase and the end of the post_build
phase. After the post_build
phase ends, the value of exported variables cannot change.
Contains information about an exported environment variable.
Exported environment variables are used in conjunction with AWS CodePipeline to export environment variables from the current build stage to subsequent stages in the pipeline. For more information, see Working with variables in the AWS CodePipeline User Guide.
During a build, the value of a variable is available starting with the install
phase. It can be updated between the start of the install
phase and the end of the post_build
phase. After the post_build
phase ends, the value of exported variables cannot change.
A list of exported environment variables for this build.
Exported environment variables are used in conjunction with CodePipeline to export environment variables from the current build stage to subsequent stages in the pipeline. For more information, see Working with variables in the CodePipeline User Guide.
" + "Build$exportedEnvironmentVariables": "A list of exported environment variables for this build.
Exported environment variables are used in conjunction with AWS CodePipeline to export environment variables from the current build stage to subsequent stages in the pipeline. For more information, see Working with variables in the AWS CodePipeline User Guide.
" } }, "FileSystemType": { @@ -624,11 +632,11 @@ } }, "GitSubmodulesConfig": { - "base": "Information about the Git submodules configuration for an CodeBuild build project.
", + "base": "Information about the Git submodules configuration for an AWS CodeBuild build project.
", "refs": { "ProjectSource$gitSubmodulesConfig": "Information about the Git submodules configuration for the build project.
", "StartBuildBatchInput$gitSubmodulesConfigOverride": "A GitSubmodulesConfig
object that overrides the Git submodules configuration for this batch build.
Information about the Git submodules configuration for this build of an CodeBuild build project.
" + "StartBuildInput$gitSubmodulesConfigOverride": "Information about the Git submodules configuration for this build of an AWS CodeBuild build project.
" } }, "Identifiers": { @@ -640,9 +648,9 @@ "ImagePullCredentialsType": { "base": null, "refs": { - "ProjectEnvironment$imagePullCredentialsType": "The type of credentials CodeBuild uses to pull images in your build. There are two valid values:
CODEBUILD
specifies that CodeBuild uses its own credentials. This requires that you modify your ECR repository policy to trust CodeBuild service principal.
SERVICE_ROLE
specifies that CodeBuild uses your build project's service role.
When you use a cross-account or private registry image, you must use SERVICE_ROLE credentials. When you use an CodeBuild curated image, you must use CODEBUILD credentials.
", - "StartBuildBatchInput$imagePullCredentialsTypeOverride": "The type of credentials CodeBuild uses to pull images in your batch build. There are two valid values:
Specifies that CodeBuild uses its own credentials. This requires that you modify your ECR repository policy to trust CodeBuild's service principal.
Specifies that CodeBuild uses your build project's service role.
When using a cross-account or private registry image, you must use SERVICE_ROLE
credentials. When using an CodeBuild curated image, you must use CODEBUILD
credentials.
The type of credentials CodeBuild uses to pull images in your build. There are two valid values:
Specifies that CodeBuild uses its own credentials. This requires that you modify your ECR repository policy to trust CodeBuild's service principal.
Specifies that CodeBuild uses your build project's service role.
When using a cross-account or private registry image, you must use SERVICE_ROLE
credentials. When using an CodeBuild curated image, you must use CODEBUILD
credentials.
The type of credentials AWS CodeBuild uses to pull images in your build. There are two valid values:
CODEBUILD
specifies that AWS CodeBuild uses its own credentials. This requires that you modify your ECR repository policy to trust AWS CodeBuild's service principal.
SERVICE_ROLE
specifies that AWS CodeBuild uses your build project's service role.
When you use a cross-account or private registry image, you must use SERVICE_ROLE credentials. When you use an AWS CodeBuild curated image, you must use CODEBUILD credentials.
", + "StartBuildBatchInput$imagePullCredentialsTypeOverride": "The type of credentials AWS CodeBuild uses to pull images in your batch build. There are two valid values:
Specifies that AWS CodeBuild uses its own credentials. This requires that you modify your ECR repository policy to trust AWS CodeBuild's service principal.
Specifies that AWS CodeBuild uses your build project's service role.
When using a cross-account or private registry image, you must use SERVICE_ROLE
credentials. When using an AWS CodeBuild curated image, you must use CODEBUILD
credentials.
The type of credentials AWS CodeBuild uses to pull images in your build. There are two valid values:
Specifies that AWS CodeBuild uses its own credentials. This requires that you modify your ECR repository policy to trust AWS CodeBuild's service principal.
Specifies that AWS CodeBuild uses your build project's service role.
When using a cross-account or private registry image, you must use SERVICE_ROLE
credentials. When using an AWS CodeBuild curated image, you must use CODEBUILD
credentials.
Information about logs for a build project. These can be logs in CloudWatch Logs, built in a specified S3 bucket, or both.
", + "base": "Information about logs for a build project. These can be logs in Amazon CloudWatch Logs, built in a specified S3 bucket, or both.
", "refs": { "BuildBatch$logConfig": null, - "CreateProjectInput$logsConfig": "Information about logs for the build project. These can be logs in CloudWatch Logs, logs uploaded to a specified S3 bucket, or both.
", - "Project$logsConfig": "Information about logs for the build project. A project can create logs in CloudWatch Logs, an S3 bucket, or both.
", + "CreateProjectInput$logsConfig": "Information about logs for the build project. These can be logs in Amazon CloudWatch Logs, logs uploaded to a specified S3 bucket, or both.
", + "Project$logsConfig": "Information about logs for the build project. A project can create logs in Amazon CloudWatch Logs, an S3 bucket, or both.
", "StartBuildBatchInput$logsConfigOverride": "A LogsConfig
object that override the log settings defined in the batch build project.
Log settings for this build that override the log settings defined in the build project.
", - "UpdateProjectInput$logsConfig": "Information about logs for the build project. A project can create logs in CloudWatch Logs, logs in an S3 bucket, or both.
" + "UpdateProjectInput$logsConfig": "Information about logs for the build project. A project can create logs in Amazon CloudWatch Logs, logs in an S3 bucket, or both.
" } }, "LogsConfigStatusType": { "base": null, "refs": { - "CloudWatchLogsConfig$status": "The current status of the logs in CloudWatch Logs for a build project. Valid values are:
ENABLED
: CloudWatch Logs are enabled for this build project.
DISABLED
: CloudWatch Logs are not enabled for this build project.
The current status of the logs in Amazon CloudWatch Logs for a build project. Valid values are:
ENABLED
: Amazon CloudWatch Logs are enabled for this build project.
DISABLED
: Amazon CloudWatch Logs are not enabled for this build project.
The current status of the S3 build logs. Valid values are:
ENABLED
: S3 build logs are enabled for this build project.
DISABLED
: S3 build logs are not enabled for this build project.
Information about build logs in CloudWatch Logs.
", + "base": "Information about build logs in Amazon CloudWatch Logs.
", "refs": { - "Build$logs": "Information about the build's logs in CloudWatch Logs.
" + "Build$logs": "Information about the build's logs in Amazon CloudWatch Logs.
" } }, "NetworkInterface": { @@ -843,18 +851,18 @@ "refs": { "Build$id": "The unique ID for the build.
", "Build$arn": "The Amazon Resource Name (ARN) of the build.
", - "Build$sourceVersion": "Any version identifier for the version of the source code to be built. If sourceVersion
is specified at the project level, then this sourceVersion
(at the build level) takes precedence.
For more information, see Source Version Sample with CodeBuild in the CodeBuild User Guide.
", - "Build$resolvedSourceVersion": "An identifier for the version of this build's source code.
For CodeCommit, GitHub, GitHub Enterprise, and BitBucket, the commit ID.
For CodePipeline, the source revision provided by CodePipeline.
For Amazon S3, this does not apply.
The name of the CodeBuild project.
", + "Build$sourceVersion": "Any version identifier for the version of the source code to be built. If sourceVersion
is specified at the project level, then this sourceVersion
(at the build level) takes precedence.
For more information, see Source Version Sample with CodeBuild in the AWS CodeBuild User Guide.
", + "Build$resolvedSourceVersion": "An identifier for the version of this build's source code.
For AWS CodeCommit, GitHub, GitHub Enterprise, and BitBucket, the commit ID.
For AWS CodePipeline, the source revision provided by AWS CodePipeline.
For Amazon S3, this does not apply.
The name of the AWS CodeBuild project.
", "Build$serviceRole": "The name of a service role used for this build.
", - "Build$encryptionKey": "The Key Management Service customer master key (CMK) to be used for encrypting the build output artifacts.
You can use a cross-account KMS key to encrypt the build output artifacts if your service role has permission to that key.
You can specify either the Amazon Resource Name (ARN) of the CMK or, if available, the CMK's alias (using the format alias/<alias-name>
).
The AWS Key Management Service (AWS KMS) customer master key (CMK) to be used for encrypting the build output artifacts.
You can use a cross-account KMS key to encrypt the build output artifacts if your service role has permission to that key.
You can specify either the Amazon Resource Name (ARN) of the CMK or, if available, the CMK's alias (using the format alias/<alias-name>
).
The identifier of the batch build.
", "BuildBatch$arn": "The ARN of the batch build.
", "BuildBatch$sourceVersion": "The identifier of the version of the source code to be built.
", - "BuildBatch$resolvedSourceVersion": "The identifier of the resolved version of this batch build's source code.
For CodeCommit, GitHub, GitHub Enterprise, and BitBucket, the commit ID.
For CodePipeline, the source revision provided by CodePipeline.
For Amazon S3, this does not apply.
The identifier of the resolved version of this batch build's source code.
For AWS CodeCommit, GitHub, GitHub Enterprise, and BitBucket, the commit ID.
For AWS CodePipeline, the source revision provided by AWS CodePipeline.
For Amazon S3, this does not apply.
The name of the batch build project.
", "BuildBatch$serviceRole": "The name of a service role used for builds in the batch.
", - "BuildBatch$encryptionKey": "The Key Management Service customer master key (CMK) to be used for encrypting the batch build output artifacts.
You can use a cross-account KMS key to encrypt the build output artifacts if your service role has permission to that key.
You can specify either the Amazon Resource Name (ARN) of the CMK or, if available, the CMK's alias (using the format alias/<alias-name>
).
The AWS Key Management Service (AWS KMS) customer master key (CMK) to be used for encrypting the batch build output artifacts.
You can use a cross-account KMS key to encrypt the build output artifacts if your service role has permission to that key.
You can specify either the Amazon Resource Name (ARN) of the CMK or, if available, the CMK's alias (using the format alias/<alias-name>
).
The ID of the build that could not be successfully deleted.
", @@ -862,8 +870,8 @@ "CodeCoverage$reportARN": "The ARN of the report.
", "CodeCoverage$filePath": "The path of the test report file.
", "ComputeTypesAllowed$member": null, - "CreateProjectInput$serviceRole": "The ARN of the Identity and Access Management role that enables CodeBuild to interact with dependent Amazon Web Services services on behalf of the Amazon Web Services account.
", - "CreateProjectInput$encryptionKey": "The Key Management Service customer master key (CMK) to be used for encrypting the build output artifacts.
You can use a cross-account KMS key to encrypt the build output artifacts if your service role has permission to that key.
You can specify either the Amazon Resource Name (ARN) of the CMK or, if available, the CMK's alias (using the format alias/<alias-name>
).
The ARN of the AWS Identity and Access Management (IAM) role that enables AWS CodeBuild to interact with dependent AWS services on behalf of the AWS account.
", + "CreateProjectInput$encryptionKey": "The AWS Key Management Service (AWS KMS) customer master key (CMK) to be used for encrypting the build output artifacts.
You can use a cross-account KMS key to encrypt the build output artifacts if your service role has permission to that key.
You can specify either the Amazon Resource Name (ARN) of the CMK or, if available, the CMK's alias (using the format alias/<alias-name>
).
Contains the identifier of the Session Manager session used for the build. To work with the paused build, you open this session to examine, control, and resume the build.
", "DeleteBuildBatchInput$id": "The identifier of the batch build to delete.
", "DeleteProjectInput$name": "The name of the build project.
", @@ -881,23 +889,23 @@ "Identifiers$member": null, "ImportSourceCredentialsInput$username": " The Bitbucket username when the authType
is BASIC_AUTH. This parameter is not valid for other types of source providers or connections.
The Amazon Resource Name (ARN) of the token.
", - "InvalidateProjectCacheInput$projectName": "The name of the CodeBuild build project that the cache is reset for.
", + "InvalidateProjectCacheInput$projectName": "The name of the AWS CodeBuild build project that the cache is reset for.
", "ListBuildBatchesForProjectInput$projectName": "The name of the project.
", - "ListBuildsForProjectInput$projectName": "The name of the CodeBuild project.
", + "ListBuildsForProjectInput$projectName": "The name of the AWS CodeBuild project.
", "ListProjectsInput$nextToken": "During a previous call, if there are more than 100 items in the list, only the first 100 items are returned, along with a unique string called a nextToken. To get the next batch of items in the list, call this operation again, adding the next token to the call. To get all of the items in the list, keep calling this operation with each subsequent next token that is returned, until no more next tokens are returned.
", "ListSharedProjectsInput$nextToken": " During a previous call, the maximum number of items that can be returned is the value specified in maxResults
. If there more items in the list, then a unique string called a nextToken is returned. To get the next batch of items in the list, call this operation again, adding the next token to the call. To get all of the items in the list, keep calling this operation with each subsequent next token that is returned, until no more next tokens are returned.
The ID of the subnet.
", "NetworkInterface$networkInterfaceId": "The ID of the network interface.
", - "Project$serviceRole": "The ARN of the Identity and Access Management role that enables CodeBuild to interact with dependent Amazon Web Services services on behalf of the Amazon Web Services account.
", - "Project$encryptionKey": "The Key Management Service customer master key (CMK) to be used for encrypting the build output artifacts.
You can use a cross-account KMS key to encrypt the build output artifacts if your service role has permission to that key.
You can specify either the Amazon Resource Name (ARN) of the CMK or, if available, the CMK's alias (using the format alias/<alias-name>
). If you don't specify a value, CodeBuild uses the managed CMK for Amazon Simple Storage Service (Amazon S3).
The ARN of the AWS Identity and Access Management (IAM) role that enables AWS CodeBuild to interact with dependent AWS services on behalf of the AWS account.
", + "Project$encryptionKey": "The AWS Key Management Service (AWS KMS) customer master key (CMK) to be used for encrypting the build output artifacts.
You can use a cross-account KMS key to encrypt the build output artifacts if your service role has permission to that key.
You can specify either the Amazon Resource Name (ARN) of the CMK or, if available, the CMK's alias (using the format alias/<alias-name>
).
Specifies the service role ARN for the batch build project.
", - "ProjectEnvironment$image": "The image tag or image digest that identifies the Docker image to use for this build project. Use the following formats:
For an image tag: <registry>/<repository>:<tag>
. For example, in the Docker repository that CodeBuild uses to manage its Docker images, this would be aws/codebuild/standard:4.0
.
For an image digest: <registry>/<repository>@<digest>
. For example, to specify an image with the digest \"sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf,\" use <registry>/<repository>@sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf
.
For more information, see Docker images provided by CodeBuild in the CodeBuild user guide.
", + "ProjectEnvironment$image": "The image tag or image digest that identifies the Docker image to use for this build project. Use the following formats:
For an image tag: <registry>/<repository>:<tag>
. For example, in the Docker repository that CodeBuild uses to manage its Docker images, this would be aws/codebuild/standard:4.0
.
For an image digest: <registry>/<repository>@<digest>
. For example, to specify an image with the digest \"sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf,\" use <registry>/<repository>@sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf
.
A JSON-formatted resource policy. For more information, see Sharing a Project and Sharing a Report Group in the CodeBuild User Guide.
", + "PutResourcePolicyInput$policy": "A JSON-formatted resource policy. For more information, see Sharing a Project and Sharing a Report Group in the AWS CodeBuild User Guide.
", "PutResourcePolicyInput$resourceArn": " The ARN of the Project
or ReportGroup
resource you want to associate with a resource policy.
The ARN of the Project
or ReportGroup
resource that is associated with a resource policy.
The Amazon Resource Name (ARN) or name of credentials created using Secrets Manager.
The credential
can use the name of the credentials only if they exist in your current Region.
The Amazon Resource Name (ARN) or name of credentials created using AWS Secrets Manager.
The credential
can use the name of the credentials only if they exist in your current AWS Region.
The ARN of the report run.
", "Report$reportGroupArn": "The ARN of the report group associated with this report.
", "ReportArns$member": null, @@ -913,22 +921,22 @@ "StartBuildBatchInput$projectName": "The name of the project.
", "StartBuildBatchInput$imageOverride": "The name of an image for this batch build that overrides the one specified in the batch build project.
", "StartBuildBatchInput$serviceRoleOverride": "The name of a service role for this batch build that overrides the one specified in the batch build project.
", - "StartBuildBatchInput$encryptionKeyOverride": "The Key Management Service customer master key (CMK) that overrides the one specified in the batch build project. The CMK key encrypts the build output artifacts.
You can use a cross-account KMS key to encrypt the build output artifacts if your service role has permission to that key.
You can specify either the Amazon Resource Name (ARN) of the CMK or, if available, the CMK's alias (using the format alias/<alias-name>
).
The name of the CodeBuild build project to start running a build.
", + "StartBuildBatchInput$encryptionKeyOverride": "The AWS Key Management Service (AWS KMS) customer master key (CMK) that overrides the one specified in the batch build project. The CMK key encrypts the build output artifacts.
You can use a cross-account KMS key to encrypt the build output artifacts if your service role has permission to that key.
You can specify either the Amazon Resource Name (ARN) of the CMK or, if available, the CMK's alias (using the format alias/<alias-name>
).
The name of the AWS CodeBuild build project to start running a build.
", "StartBuildInput$imageOverride": "The name of an image for this build that overrides the one specified in the build project.
", "StartBuildInput$serviceRoleOverride": "The name of a service role for this build that overrides the one specified in the build project.
", - "StartBuildInput$encryptionKeyOverride": "The Key Management Service customer master key (CMK) that overrides the one specified in the build project. The CMK key encrypts the build output artifacts.
You can use a cross-account KMS key to encrypt the build output artifacts if your service role has permission to that key.
You can specify either the Amazon Resource Name (ARN) of the CMK or, if available, the CMK's alias (using the format alias/<alias-name>
).
The AWS Key Management Service (AWS KMS) customer master key (CMK) that overrides the one specified in the build project. The CMK key encrypts the build output artifacts.
You can use a cross-account KMS key to encrypt the build output artifacts if your service role has permission to that key.
You can specify either the Amazon Resource Name (ARN) of the CMK or, if available, the CMK's alias (using the format alias/<alias-name>
).
The identifier of the batch build to stop.
", "StopBuildInput$id": "The ID of the build.
", "Subnets$member": null, "TestCase$reportArn": "The ARN of the report to which the test case belongs.
", "UpdateProjectInput$name": "The name of the build project.
You cannot change a build project's name.
The replacement ARN of the Identity and Access Management role that enables CodeBuild to interact with dependent Amazon Web Services services on behalf of the Amazon Web Services account.
", - "UpdateProjectInput$encryptionKey": "The Key Management Service customer master key (CMK) to be used for encrypting the build output artifacts.
You can use a cross-account KMS key to encrypt the build output artifacts if your service role has permission to that key.
You can specify either the Amazon Resource Name (ARN) of the CMK or, if available, the CMK's alias (using the format alias/<alias-name>
).
The replacement ARN of the AWS Identity and Access Management (IAM) role that enables AWS CodeBuild to interact with dependent AWS services on behalf of the AWS account.
", + "UpdateProjectInput$encryptionKey": "The AWS Key Management Service (AWS KMS) customer master key (CMK) to be used for encrypting the build output artifacts.
You can use a cross-account KMS key to encrypt the build output artifacts if your service role has permission to that key.
You can specify either the Amazon Resource Name (ARN) of the CMK or, if available, the CMK's alias (using the format alias/<alias-name>
).
The ARN of the report group to update.
", "VpcConfig$vpcId": "The ID of the Amazon VPC.
", "Webhook$url": "The URL to the webhook.
", - "Webhook$payloadUrl": "The CodeBuild endpoint where webhook events are sent.
", + "Webhook$payloadUrl": "The AWS CodeBuild endpoint where webhook events are sent.
", "Webhook$secret": "The secret token of the associated repository.
A Bitbucket webhook does not support secret
.
The list of ARNs for the build projects shared with the current Amazon Web Services account or user.
" + "ListSharedProjectsOutput$projects": "The list of ARNs for the build projects shared with the current AWS account or user.
" } }, "ProjectArtifacts": { @@ -1102,16 +1110,16 @@ "base": null, "refs": { "CreateProjectInput$name": "The name of the build project.
", - "CreateWebhookInput$projectName": "The name of the CodeBuild project.
", - "DeleteWebhookInput$projectName": "The name of the CodeBuild project.
", + "CreateWebhookInput$projectName": "The name of the AWS CodeBuild project.
", + "DeleteWebhookInput$projectName": "The name of the AWS CodeBuild project.
", "Project$name": "The name of the build project.
", - "UpdateWebhookInput$projectName": "The name of the CodeBuild project.
" + "UpdateWebhookInput$projectName": "The name of the AWS CodeBuild project.
" } }, "ProjectNames": { "base": null, "refs": { - "BatchGetProjectsInput$names": "The names or ARNs of the build projects. To get information about a project shared with your Amazon Web Services account, its ARN must be specified. You cannot specify a shared project using its name.
", + "BatchGetProjectsInput$names": "The names or ARNs of the build projects. To get information about a project shared with your AWS account, its ARN must be specified. You cannot specify a shared project using its name.
", "BatchGetProjectsOutput$projectsNotFound": "The names of build projects for which information could not be found.
", "ListProjectsOutput$projects": "The list of build project names, with each build project name representing a single build project.
" } @@ -1119,8 +1127,8 @@ "ProjectSecondarySourceVersions": { "base": null, "refs": { - "Build$secondarySourceVersions": " An array of ProjectSourceVersion
objects. Each ProjectSourceVersion
must be one of:
For CodeCommit: the commit ID, branch, or Git tag to use.
For GitHub: the commit ID, pull request ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a pull request ID is specified, it must use the format pr/pull-request-ID
(for example, pr/25
). If a branch name is specified, the branch's HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is used.
For Bitbucket: the commit ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a branch name is specified, the branch's HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is used.
For Amazon S3: the version ID of the object that represents the build input ZIP file to use.
An array of ProjectSourceVersion
objects. Each ProjectSourceVersion
must be one of:
For CodeCommit: the commit ID, branch, or Git tag to use.
For GitHub: the commit ID, pull request ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a pull request ID is specified, it must use the format pr/pull-request-ID
(for example, pr/25
). If a branch name is specified, the branch's HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is used.
For Bitbucket: the commit ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a branch name is specified, the branch's HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is used.
For Amazon S3: the version ID of the object that represents the build input ZIP file to use.
An array of ProjectSourceVersion
objects. Each ProjectSourceVersion
must be one of:
For AWS CodeCommit: the commit ID, branch, or Git tag to use.
For GitHub: the commit ID, pull request ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a pull request ID is specified, it must use the format pr/pull-request-ID
(for example, pr/25
). If a branch name is specified, the branch's HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is used.
For Bitbucket: the commit ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a branch name is specified, the branch's HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is used.
For Amazon S3: the version ID of the object that represents the build input ZIP file to use.
An array of ProjectSourceVersion
objects. Each ProjectSourceVersion
must be one of:
For AWS CodeCommit: the commit ID, branch, or Git tag to use.
For GitHub: the commit ID, pull request ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a pull request ID is specified, it must use the format pr/pull-request-ID
(for example, pr/25
). If a branch name is specified, the branch's HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is used.
For Bitbucket: the commit ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a branch name is specified, the branch's HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is used.
For Amazon S3: the version ID of the object that represents the build input ZIP file to use.
An array of ProjectSourceVersion
objects. If secondarySourceVersions
is specified at the build level, then they take precedence over these secondarySourceVersions
(at the project level).
An array of ProjectSourceVersion
objects. If secondarySourceVersions
is specified at the build level, then they take over these secondarySourceVersions
(at the project level).
An array of ProjectSourceVersion
objects that override the secondary source versions in the batch build project.
Information about credentials that provide access to a private Docker registry. When this is set:
imagePullCredentialsType
must be set to SERVICE_ROLE
.
images cannot be curated or an Amazon ECR image.
For more information, see Private Registry with Secrets Manager Sample for CodeBuild.
", + "base": "Information about credentials that provide access to a private Docker registry. When this is set:
imagePullCredentialsType
must be set to SERVICE_ROLE
.
images cannot be curated or an Amazon ECR image.
For more information, see Private Registry with AWS Secrets Manager Sample for AWS CodeBuild.
", "refs": { "ProjectEnvironment$registryCredential": "The credentials for access to a private registry.
", "StartBuildBatchInput$registryCredentialOverride": "A RegistryCredential
object that overrides credentials for access to a private registry.
An array of ARNs that identify the Report
objects to return.
An array of ARNs passed to BatchGetReportGroups
that are not associated with a Report
.
The list of report ARNs.
", - "ListReportsOutput$reports": "The list of returned ARNs for the reports in the current Amazon Web Services account.
" + "ListReportsOutput$reports": "The list of returned ARNs for the reports in the current AWS account.
" } }, "ReportCodeCoverageSortByType": { @@ -1243,8 +1251,8 @@ "refs": { "BatchGetReportGroupsInput$reportGroupArns": "An array of report group ARNs that identify the report groups to return.
", "BatchGetReportGroupsOutput$reportGroupsNotFound": " An array of ARNs passed to BatchGetReportGroups
that are not associated with a ReportGroup
.
The list of ARNs for the report groups in the current Amazon Web Services account.
", - "ListSharedReportGroupsOutput$reportGroups": "The list of ARNs for the report groups shared with the current Amazon Web Services account or user.
" + "ListReportGroupsOutput$reportGroups": "The list of ARNs for the report groups in the current AWS account.
", + "ListSharedReportGroupsOutput$reportGroups": "The list of ARNs for the report groups shared with the current AWS account or user.
" } }, "ReportGroupName": { @@ -1293,7 +1301,7 @@ "ReportPackagingType": { "base": null, "refs": { - "S3ReportExportConfig$packaging": "The type of build output artifact to create. Valid values include:
NONE
: CodeBuild creates the raw data in the output bucket. This is the default if packaging is not specified.
ZIP
: CodeBuild creates a ZIP file with the raw data in the output bucket.
The type of build output artifact to create. Valid values include:
NONE
: AWS CodeBuild creates the raw data in the output bucket. This is the default if packaging is not specified.
ZIP
: AWS CodeBuild creates a ZIP file with the raw data in the output bucket.
The specified Amazon Web Services resource cannot be created, because an Amazon Web Services resource with the same settings already exists.
", + "base": "The specified AWS resource cannot be created, because an AWS resource with the same settings already exists.
", "refs": { } }, "ResourceNotFoundException": { - "base": "The specified Amazon Web Services resource cannot be found.
", + "base": "The specified AWS resource cannot be found.
", "refs": { } }, @@ -1413,8 +1421,8 @@ "SharedResourceSortByType": { "base": null, "refs": { - "ListSharedProjectsInput$sortBy": "The criterion to be used to list build projects shared with the current Amazon Web Services account or user. Valid values include:
ARN
: List based on the ARN.
MODIFIED_TIME
: List based on when information about the shared project was last changed.
The criterion to be used to list report groups shared with the current Amazon Web Services account or user. Valid values include:
ARN
: List based on the ARN.
MODIFIED_TIME
: List based on when information about the shared report group was last changed.
The criterion to be used to list build projects shared with the current AWS account or user. Valid values include:
ARN
: List based on the ARN.
MODIFIED_TIME
: List based on when information about the shared project was last changed.
The criterion to be used to list report groups shared with the current AWS account or user. Valid values include:
ARN
: List based on the ARN.
MODIFIED_TIME
: List based on when information about the shared report group was last changed.
Information about the authorization settings for CodeBuild to access the source code to be built.
This information is for the CodeBuild console's use only. Your code should not get or set this information directly.
", + "base": "Information about the authorization settings for AWS CodeBuild to access the source code to be built.
This information is for the AWS CodeBuild console's use only. Your code should not get or set this information directly.
", "refs": { - "ProjectSource$auth": "Information about the authorization settings for CodeBuild to access the source code to be built.
This information is for the CodeBuild console's use only. Your code should not get or set this information directly.
", + "ProjectSource$auth": "Information about the authorization settings for AWS CodeBuild to access the source code to be built.
This information is for the AWS CodeBuild console's use only. Your code should not get or set this information directly.
", "StartBuildBatchInput$sourceAuthOverride": "A SourceAuth
object that overrides the one defined in the batch build project. This override applies only if the build project's source is BitBucket or GitHub.
An authorization type for this build that overrides the one defined in the build project. This override applies only if the build project's source is BitBucket or GitHub.
" } @@ -1462,7 +1470,7 @@ "SourceType": { "base": null, "refs": { - "ProjectSource$type": "The type of repository that contains the source code to be built. Valid values include:
BITBUCKET
: The source code is in a Bitbucket repository.
CODECOMMIT
: The source code is in an CodeCommit repository.
CODEPIPELINE
: The source code settings are specified in the source action of a pipeline in CodePipeline.
GITHUB
: The source code is in a GitHub or GitHub Enterprise Cloud repository.
GITHUB_ENTERPRISE
: The source code is in a GitHub Enterprise Server repository.
NO_SOURCE
: The project does not have input source code.
S3
: The source code is in an Amazon S3 bucket.
The type of repository that contains the source code to be built. Valid values include:
BITBUCKET
: The source code is in a Bitbucket repository.
CODECOMMIT
: The source code is in an AWS CodeCommit repository.
CODEPIPELINE
: The source code settings are specified in the source action of a pipeline in AWS CodePipeline.
GITHUB
: The source code is in a GitHub or GitHub Enterprise Cloud repository.
GITHUB_ENTERPRISE
: The source code is in a GitHub Enterprise Server repository.
NO_SOURCE
: The project does not have input source code.
S3
: The source code is in an Amazon S3 bucket.
The source input type that overrides the source input defined in the batch build project.
", "StartBuildInput$sourceTypeOverride": "A source input type, for this build, that overrides the source input defined in the build project.
" } @@ -1522,23 +1530,23 @@ "base": null, "refs": { "Build$currentPhase": "The current build phase.
", - "Build$initiator": "The entity that started the build. Valid values include:
If CodePipeline started the build, the pipeline's name (for example, codepipeline/my-demo-pipeline
).
If an Identity and Access Management user started the build, the user's name (for example, MyUserName
).
If the Jenkins plugin for CodeBuild started the build, the string CodeBuild-Jenkins-Plugin
.
The entity that started the build. Valid values include:
If AWS CodePipeline started the build, the pipeline's name (for example, codepipeline/my-demo-pipeline
).
If an AWS Identity and Access Management (IAM) user started the build, the user's name (for example, MyUserName
).
If the Jenkins plugin for AWS CodeBuild started the build, the string CodeBuild-Jenkins-Plugin
.
The ARN of the batch build that this build is a member of, if applicable.
", "BuildArtifacts$location": "Information about the location of the build artifacts.
", "BuildArtifacts$sha256sum": "The SHA-256 hash of the build artifact.
You can use this hash along with a checksum tool to confirm file integrity and authenticity.
This value is available only if the build project's packaging
value is set to ZIP
.
The MD5 hash of the build artifact.
You can use this hash along with a checksum tool to confirm file integrity and authenticity.
This value is available only if the build project's packaging
value is set to ZIP
.
An identifier for this artifact definition.
", "BuildBatch$currentPhase": "The current phase of the batch build.
", - "BuildBatch$initiator": "The entity that started the batch build. Valid values include:
If CodePipeline started the build, the pipeline's name (for example, codepipeline/my-demo-pipeline
).
If an Identity and Access Management user started the build, the user's name.
If the Jenkins plugin for CodeBuild started the build, the string CodeBuild-Jenkins-Plugin
.
The entity that started the batch build. Valid values include:
If AWS CodePipeline started the build, the pipeline's name (for example, codepipeline/my-demo-pipeline
).
If an AWS Identity and Access Management (IAM) user started the build, the user's name.
If the Jenkins plugin for AWS CodeBuild started the build, the string CodeBuild-Jenkins-Plugin
.
Contains the identifier of the build group.
", "BuildNotDeleted$statusCode": "Additional information about the build that could not be successfully deleted.
", "BuildReportArns$member": null, "BuildStatusConfig$context": "Specifies the context of the build status CodeBuild sends to the source provider. The usage of this parameter depends on the source provider.
This parameter is used for the name
parameter in the Bitbucket commit status. For more information, see build in the Bitbucket API documentation.
This parameter is used for the context
parameter in the GitHub commit status. For more information, see Create a commit status in the GitHub developer guide.
Specifies the target url of the build status CodeBuild sends to the source provider. The usage of this parameter depends on the source provider.
This parameter is used for the url
parameter in the Bitbucket commit status. For more information, see build in the Bitbucket API documentation.
This parameter is used for the target_url
parameter in the GitHub commit status. For more information, see Create a commit status in the GitHub developer guide.
The batch build ARN.
", - "CloudWatchLogsConfig$groupName": "The group name of the logs in CloudWatch Logs. For more information, see Working with Log Groups and Log Streams.
", - "CloudWatchLogsConfig$streamName": "The prefix of the stream name of the CloudWatch Logs. For more information, see Working with Log Groups and Log Streams.
", - "CreateProjectInput$sourceVersion": "A version of the build input to be built for this project. If not specified, the latest version is used. If specified, it must be one of:
For CodeCommit: the commit ID, branch, or Git tag to use.
For GitHub: the commit ID, pull request ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a pull request ID is specified, it must use the format pr/pull-request-ID
(for example pr/25
). If a branch name is specified, the branch's HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is used.
For Bitbucket: the commit ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a branch name is specified, the branch's HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is used.
For Amazon S3: the version ID of the object that represents the build input ZIP file to use.
If sourceVersion
is specified at the build level, then that version takes precedence over this sourceVersion
(at the project level).
For more information, see Source Version Sample with CodeBuild in the CodeBuild User Guide.
", + "CloudWatchLogsConfig$groupName": "The group name of the logs in Amazon CloudWatch Logs. For more information, see Working with Log Groups and Log Streams.
", + "CloudWatchLogsConfig$streamName": "The prefix of the stream name of the Amazon CloudWatch Logs. For more information, see Working with Log Groups and Log Streams.
", + "CreateProjectInput$sourceVersion": "A version of the build input to be built for this project. If not specified, the latest version is used. If specified, it must be one of:
For AWS CodeCommit: the commit ID, branch, or Git tag to use.
For GitHub: the commit ID, pull request ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a pull request ID is specified, it must use the format pr/pull-request-ID
(for example pr/25
). If a branch name is specified, the branch's HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is used.
For Bitbucket: the commit ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a branch name is specified, the branch's HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is used.
For Amazon S3: the version ID of the object that represents the build input ZIP file to use.
If sourceVersion
is specified at the build level, then that version takes precedence over this sourceVersion
(at the project level).
For more information, see Source Version Sample with CodeBuild in the AWS CodeBuild User Guide.
", "CreateWebhookInput$branchFilter": "A regular expression used to determine which repository branches are built when a webhook is triggered. If the name of a branch matches the regular expression, then it is built. If branchFilter
is empty, then all branches are built.
It is recommended that you use filterGroups
instead of branchFilter
.
The status code.
", "DescribeCodeCoveragesInput$nextToken": "The nextToken
value returned from a previous call to DescribeCodeCoverages
. This specifies the next item to return. To return the beginning of the list, exclude this parameter.
During a previous call, the maximum number of items that can be returned is the value specified in maxResults
. If there more items in the list, then a unique string called a nextToken is returned. To get the next batch of items in the list, call this operation again, adding the next token to the call. To get all of the items in the list, keep calling this operation with each subsequent next token that is returned, until no more next tokens are returned.
The name of the Docker image.
", "EnvironmentImage$description": "The description of the Docker image.
", - "EnvironmentVariable$value": "The value of the environment variable.
We strongly discourage the use of PLAINTEXT
environment variables to store sensitive values, especially Amazon Web Services secret key IDs and secret access keys. PLAINTEXT
environment variables can be displayed in plain text using the CodeBuild console and the AWS Command Line Interface (AWS CLI). For sensitive values, we recommend you use an environment variable of type PARAMETER_STORE
or SECRETS_MANAGER
.
The value of the environment variable.
We strongly discourage the use of PLAINTEXT
environment variables to store sensitive values, especially AWS secret key IDs and secret access keys. PLAINTEXT
environment variables can be displayed in plain text using the AWS CodeBuild console and the AWS Command Line Interface (AWS CLI). For sensitive values, we recommend you use an environment variable of type PARAMETER_STORE
or SECRETS_MANAGER
.
The value assigned to the exported environment variable.
", "ImageVersions$member": null, "ListBuildBatchesForProjectInput$nextToken": "The nextToken
value returned from a previous call to ListBuildBatchesForProject
. This specifies the next item to return. To return the beginning of the list, exclude this parameter.
During a previous call, the maximum number of items that can be returned is the value specified in maxResults
. If there more items in the list, then a unique string called a nextToken is returned. To get the next batch of items in the list, call this operation again, adding the next token to the call. To get all of the items in the list, keep calling this operation with each subsequent next token that is returned, until no more next tokens are returned.
During a previous call, the maximum number of items that can be returned is the value specified in maxResults
. If there more items in the list, then a unique string called a nextToken is returned. To get the next batch of items in the list, call this operation again, adding the next token to the call. To get all of the items in the list, keep calling this operation with each subsequent next token that is returned, until no more next tokens are returned.
During a previous call, the maximum number of items that can be returned is the value specified in maxResults
. If there more items in the list, then a unique string called a nextToken is returned. To get the next batch of items in the list, call this operation again, adding the next token to the call. To get all of the items in the list, keep calling this operation with each subsequent next token that is returned, until no more next tokens are returned.
The name of the CloudWatch Logs group for the build logs.
", - "LogsLocation$streamName": "The name of the CloudWatch Logs stream for the build logs.
", - "LogsLocation$deepLink": "The URL to an individual build log in CloudWatch Logs.
", + "LogsLocation$groupName": "The name of the Amazon CloudWatch Logs group for the build logs.
", + "LogsLocation$streamName": "The name of the Amazon CloudWatch Logs stream for the build logs.
", + "LogsLocation$deepLink": "The URL to an individual build log in Amazon CloudWatch Logs.
", "LogsLocation$s3DeepLink": "The URL to a build log in an S3 bucket.
", - "LogsLocation$cloudWatchLogsArn": " The ARN of CloudWatch Logs for a build project. Its format is arn:${Partition}:logs:${Region}:${Account}:log-group:${LogGroupName}:log-stream:${LogStreamName}
. For more information, see Resources Defined by CloudWatch Logs.
The ARN of Amazon CloudWatch Logs for a build project. Its format is arn:${Partition}:logs:${Region}:${Account}:log-group:${LogGroupName}:log-stream:${LogStreamName}
. For more information, see Resources Defined by Amazon CloudWatch Logs.
The ARN of S3 logs for a build project. Its format is arn:${Partition}:s3:::${BucketName}/${ObjectName}
. For more information, see Resources Defined by Amazon S3.
The status code for the context of the build phase.
", "PhaseContext$message": "An explanation of the build phase's context. This might include a command ID and an exit code.
", "Project$arn": "The Amazon Resource Name (ARN) of the build project.
", - "Project$sourceVersion": "A version of the build input to be built for this project. If not specified, the latest version is used. If specified, it must be one of:
For CodeCommit: the commit ID, branch, or Git tag to use.
For GitHub: the commit ID, pull request ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a pull request ID is specified, it must use the format pr/pull-request-ID
(for example pr/25
). If a branch name is specified, the branch's HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is used.
For Bitbucket: the commit ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a branch name is specified, the branch's HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is used.
For Amazon S3: the version ID of the object that represents the build input ZIP file to use.
If sourceVersion
is specified at the build level, then that version takes precedence over this sourceVersion
(at the project level).
For more information, see Source Version Sample with CodeBuild in the CodeBuild User Guide.
", - "ProjectArtifacts$location": "Information about the build output artifact location:
If type
is set to CODEPIPELINE
, CodePipeline ignores this value if specified. This is because CodePipeline manages its build output locations instead of CodeBuild.
If type
is set to NO_ARTIFACTS
, this value is ignored if specified, because no build output is produced.
If type
is set to S3
, this is the name of the output bucket.
Along with namespaceType
and name
, the pattern that CodeBuild uses to name and store the output artifact:
If type
is set to CODEPIPELINE
, CodePipeline ignores this value if specified. This is because CodePipeline manages its build output names instead of CodeBuild.
If type
is set to NO_ARTIFACTS
, this value is ignored if specified, because no build output is produced.
If type
is set to S3
, this is the path to the output artifact. If path
is not specified, path
is not used.
For example, if path
is set to MyArtifacts
, namespaceType
is set to NONE
, and name
is set to MyArtifact.zip
, the output artifact is stored in the output bucket at MyArtifacts/MyArtifact.zip
.
Along with path
and namespaceType
, the pattern that CodeBuild uses to name and store the output artifact:
If type
is set to CODEPIPELINE
, CodePipeline ignores this value if specified. This is because CodePipeline manages its build output names instead of CodeBuild.
If type
is set to NO_ARTIFACTS
, this value is ignored if specified, because no build output is produced.
If type
is set to S3
, this is the name of the output artifact object. If you set the name to be a forward slash (\"/\"), the artifact is stored in the root of the output bucket.
For example:
If path
is set to MyArtifacts
, namespaceType
is set to BUILD_ID
, and name
is set to MyArtifact.zip
, then the output artifact is stored in MyArtifacts/<build-ID>/MyArtifact.zip
.
If path
is empty, namespaceType
is set to NONE
, and name
is set to \"/
\", the output artifact is stored in the root of the output bucket.
If path
is set to MyArtifacts
, namespaceType
is set to BUILD_ID
, and name
is set to \"/
\", the output artifact is stored in MyArtifacts/<build-ID>
.
A version of the build input to be built for this project. If not specified, the latest version is used. If specified, it must be one of:
For AWS CodeCommit: the commit ID, branch, or Git tag to use.
For GitHub: the commit ID, pull request ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a pull request ID is specified, it must use the format pr/pull-request-ID
(for example pr/25
). If a branch name is specified, the branch's HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is used.
For Bitbucket: the commit ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a branch name is specified, the branch's HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is used.
For Amazon S3: the version ID of the object that represents the build input ZIP file to use.
If sourceVersion
is specified at the build level, then that version takes precedence over this sourceVersion
(at the project level).
For more information, see Source Version Sample with CodeBuild in the AWS CodeBuild User Guide.
", + "ProjectArtifacts$location": "Information about the build output artifact location:
If type
is set to CODEPIPELINE
, AWS CodePipeline ignores this value if specified. This is because AWS CodePipeline manages its build output locations instead of AWS CodeBuild.
If type
is set to NO_ARTIFACTS
, this value is ignored if specified, because no build output is produced.
If type
is set to S3
, this is the name of the output bucket.
Along with namespaceType
and name
, the pattern that AWS CodeBuild uses to name and store the output artifact:
If type
is set to CODEPIPELINE
, AWS CodePipeline ignores this value if specified. This is because AWS CodePipeline manages its build output names instead of AWS CodeBuild.
If type
is set to NO_ARTIFACTS
, this value is ignored if specified, because no build output is produced.
If type
is set to S3
, this is the path to the output artifact. If path
is not specified, path
is not used.
For example, if path
is set to MyArtifacts
, namespaceType
is set to NONE
, and name
is set to MyArtifact.zip
, the output artifact is stored in the output bucket at MyArtifacts/MyArtifact.zip
.
Along with path
and namespaceType
, the pattern that AWS CodeBuild uses to name and store the output artifact:
If type
is set to CODEPIPELINE
, AWS CodePipeline ignores this value if specified. This is because AWS CodePipeline manages its build output names instead of AWS CodeBuild.
If type
is set to NO_ARTIFACTS
, this value is ignored if specified, because no build output is produced.
If type
is set to S3
, this is the name of the output artifact object. If you set the name to be a forward slash (\"/\"), the artifact is stored in the root of the output bucket.
For example:
If path
is set to MyArtifacts
, namespaceType
is set to BUILD_ID
, and name
is set to MyArtifact.zip
, then the output artifact is stored in MyArtifacts/<build-ID>/MyArtifact.zip
.
If path
is empty, namespaceType
is set to NONE
, and name
is set to \"/
\", the output artifact is stored in the root of the output bucket.
If path
is set to MyArtifacts
, namespaceType
is set to BUILD_ID
, and name
is set to \"/
\", the output artifact is stored in MyArtifacts/<build-ID>
.
An identifier for this artifact definition.
", "ProjectBadge$badgeRequestUrl": "The publicly-accessible URL through which you can access the build badge for your project.
", "ProjectCache$location": "Information about the cache location:
NO_CACHE
or LOCAL
: This value is ignored.
S3
: This is the S3 bucket name/prefix.
The ARN of the Amazon S3 bucket, path prefix, and object key that contains the PEM-encoded certificate for the build project. For more information, see certificate in the CodeBuild User Guide.
", - "ProjectFileSystemLocation$location": "A string that specifies the location of the file system created by Amazon EFS. Its format is efs-dns-name:/directory-path
. You can find the DNS name of file system when you view it in the Amazon EFS console. The directory path is a path to a directory in the file system that CodeBuild mounts. For example, if the DNS name of a file system is fs-abcd1234.efs.us-west-2.amazonaws.com
, and its mount directory is my-efs-mount-directory
, then the location
is fs-abcd1234.efs.us-west-2.amazonaws.com:/my-efs-mount-directory
.
The directory path in the format efs-dns-name:/directory-path
is optional. If you do not specify a directory path, the location is only the DNS name and CodeBuild mounts the entire file system.
The ARN of the Amazon S3 bucket, path prefix, and object key that contains the PEM-encoded certificate for the build project. For more information, see certificate in the AWS CodeBuild User Guide.
", + "ProjectFileSystemLocation$location": "A string that specifies the location of the file system created by Amazon EFS. Its format is efs-dns-name:/directory-path
. You can find the DNS name of file system when you view it in the AWS EFS console. The directory path is a path to a directory in the file system that CodeBuild mounts. For example, if the DNS name of a file system is fs-abcd1234.efs.us-west-2.amazonaws.com
, and its mount directory is my-efs-mount-directory
, then the location
is fs-abcd1234.efs.us-west-2.amazonaws.com:/my-efs-mount-directory
.
The directory path in the format efs-dns-name:/directory-path
is optional. If you do not specify a directory path, the location is only the DNS name and CodeBuild mounts the entire file system.
The location in the container where you mount the file system.
", "ProjectFileSystemLocation$identifier": "The name used to access a file system created by Amazon EFS. CodeBuild creates an environment variable by appending the identifier
in all capital letters to CODEBUILD_
. For example, if you specify my_efs
for identifier
, a new environment variable is create named CODEBUILD_MY_EFS
.
The identifier
is used to mount your file system.
The mount options for a file system created by Amazon EFS. The default mount options used by CodeBuild are nfsvers=4.1,rsize=1048576,wsize=1048576,hard,timeo=600,retrans=2
. For more information, see Recommended NFS Mount Options.
Information about the location of the source code to be built. Valid values include:
For source code settings that are specified in the source action of a pipeline in CodePipeline, location
should not be specified. If it is specified, CodePipeline ignores it. This is because CodePipeline uses the settings in a pipeline's source action instead of this value.
For source code in an CodeCommit repository, the HTTPS clone URL to the repository that contains the source code and the buildspec file (for example, https://git-codecommit.<region-ID>.amazonaws.com/v1/repos/<repo-name>
).
For source code in an Amazon S3 input bucket, one of the following.
The path to the ZIP file that contains the source code (for example, <bucket-name>/<path>/<object-name>.zip
).
The path to the folder that contains the source code (for example, <bucket-name>/<path-to-source-code>/<folder>/
).
For source code in a GitHub repository, the HTTPS clone URL to the repository that contains the source and the buildspec file. You must connect your account to your GitHub account. Use the CodeBuild console to start creating a build project. When you use the console to connect (or reconnect) with GitHub, on the GitHub Authorize application page, for Organization access, choose Request access next to each repository you want to allow CodeBuild to have access to, and then choose Authorize application. (After you have connected to your GitHub account, you do not need to finish creating the build project. You can leave the CodeBuild console.) To instruct CodeBuild to use this connection, in the source
object, set the auth
object's type
value to OAUTH
.
For source code in a Bitbucket repository, the HTTPS clone URL to the repository that contains the source and the buildspec file. You must connect your Amazon Web Services account to your Bitbucket account. Use the CodeBuild console to start creating a build project. When you use the console to connect (or reconnect) with Bitbucket, on the Bitbucket Confirm access to your account page, choose Grant access. (After you have connected to your Bitbucket account, you do not need to finish creating the build project. You can leave the CodeBuild console.) To instruct CodeBuild to use this connection, in the source
object, set the auth
object's type
value to OAUTH
.
If you specify CODEPIPELINE
for the Type
property, don't specify this property. For all of the other types, you must specify Location
.
The buildspec file declaration to use for the builds in this build project.
If this value is set, it can be either an inline buildspec definition, the path to an alternate buildspec file relative to the value of the built-in CODEBUILD_SRC_DIR
environment variable, or the path to an S3 bucket. The bucket must be in the same Region as the build project. Specify the buildspec file using its ARN (for example, arn:aws:s3:::my-codebuild-sample2/buildspec.yml
). If this value is not provided or is set to an empty string, the source code must contain a buildspec file in its root directory. For more information, see Buildspec File Name and Storage Location.
The mount options for a file system created by AWS EFS. The default mount options used by CodeBuild are nfsvers=4.1,rsize=1048576,wsize=1048576,hard,timeo=600,retrans=2
. For more information, see Recommended NFS Mount Options.
Information about the location of the source code to be built. Valid values include:
For source code settings that are specified in the source action of a pipeline in AWS CodePipeline, location
should not be specified. If it is specified, AWS CodePipeline ignores it. This is because AWS CodePipeline uses the settings in a pipeline's source action instead of this value.
For source code in an AWS CodeCommit repository, the HTTPS clone URL to the repository that contains the source code and the buildspec file (for example, https://git-codecommit.<region-ID>.amazonaws.com/v1/repos/<repo-name>
).
For source code in an Amazon S3 input bucket, one of the following.
The path to the ZIP file that contains the source code (for example, <bucket-name>/<path>/<object-name>.zip
).
The path to the folder that contains the source code (for example, <bucket-name>/<path-to-source-code>/<folder>/
).
For source code in a GitHub repository, the HTTPS clone URL to the repository that contains the source and the buildspec file. You must connect your AWS account to your GitHub account. Use the AWS CodeBuild console to start creating a build project. When you use the console to connect (or reconnect) with GitHub, on the GitHub Authorize application page, for Organization access, choose Request access next to each repository you want to allow AWS CodeBuild to have access to, and then choose Authorize application. (After you have connected to your GitHub account, you do not need to finish creating the build project. You can leave the AWS CodeBuild console.) To instruct AWS CodeBuild to use this connection, in the source
object, set the auth
object's type
value to OAUTH
.
For source code in a Bitbucket repository, the HTTPS clone URL to the repository that contains the source and the buildspec file. You must connect your AWS account to your Bitbucket account. Use the AWS CodeBuild console to start creating a build project. When you use the console to connect (or reconnect) with Bitbucket, on the Bitbucket Confirm access to your account page, choose Grant access. (After you have connected to your Bitbucket account, you do not need to finish creating the build project. You can leave the AWS CodeBuild console.) To instruct AWS CodeBuild to use this connection, in the source
object, set the auth
object's type
value to OAUTH
.
The buildspec file declaration to use for the builds in this build project.
If this value is set, it can be either an inline buildspec definition, the path to an alternate buildspec file relative to the value of the built-in CODEBUILD_SRC_DIR
environment variable, or the path to an S3 bucket. The bucket must be in the same AWS Region as the build project. Specify the buildspec file using its ARN (for example, arn:aws:s3:::my-codebuild-sample2/buildspec.yml
). If this value is not provided or is set to an empty string, the source code must contain a buildspec file in its root directory. For more information, see Buildspec File Name and Storage Location.
An identifier for this project source. The identifier can only contain alphanumeric characters and underscores, and must be less than 128 characters in length.
", "ProjectSourceVersion$sourceIdentifier": "An identifier for a source in the build project. The identifier can only contain alphanumeric characters and underscores, and must be less than 128 characters in length.
", - "ProjectSourceVersion$sourceVersion": "The source version for the corresponding source identifier. If specified, must be one of:
For CodeCommit: the commit ID, branch, or Git tag to use.
For GitHub: the commit ID, pull request ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a pull request ID is specified, it must use the format pr/pull-request-ID
(for example, pr/25
). If a branch name is specified, the branch's HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is used.
For Bitbucket: the commit ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a branch name is specified, the branch's HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is used.
For Amazon S3: the version ID of the object that represents the build input ZIP file to use.
For more information, see Source Version Sample with CodeBuild in the CodeBuild User Guide.
", + "ProjectSourceVersion$sourceVersion": "The source version for the corresponding source identifier. If specified, must be one of:
For AWS CodeCommit: the commit ID, branch, or Git tag to use.
For GitHub: the commit ID, pull request ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a pull request ID is specified, it must use the format pr/pull-request-ID
(for example, pr/25
). If a branch name is specified, the branch's HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is used.
For Bitbucket: the commit ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a branch name is specified, the branch's HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is used.
For Amazon S3: the version ID of the object that represents the build input ZIP file to use.
For more information, see Source Version Sample with CodeBuild in the AWS CodeBuild User Guide.
", "Report$name": "The name of the report that was run.
", "Report$executionId": "The ARN of the build run that generated this report.
", "ReportGroupTrendStats$average": "Contains the average of all values analyzed.
", @@ -1605,22 +1613,22 @@ "ReportWithRawData$data": "The value of the requested data field from the report.
", "ResolvedArtifact$location": "The location of the artifact.
", "ResolvedArtifact$identifier": "The identifier of the artifact.
", - "RetryBuildBatchInput$idempotencyToken": "A unique, case sensitive identifier you provide to ensure the idempotency of the RetryBuildBatch
request. The token is included in the RetryBuildBatch
request and is valid for five minutes. If you repeat the RetryBuildBatch
request with the same token, but change a parameter, CodeBuild returns a parameter mismatch error.
A unique, case sensitive identifier you provide to ensure the idempotency of the RetryBuild
request. The token is included in the RetryBuild
request and is valid for five minutes. If you repeat the RetryBuild
request with the same token, but change a parameter, CodeBuild returns a parameter mismatch error.
A unique, case sensitive identifier you provide to ensure the idempotency of the RetryBuildBatch
request. The token is included in the RetryBuildBatch
request and is valid for five minutes. If you repeat the RetryBuildBatch
request with the same token, but change a parameter, AWS CodeBuild returns a parameter mismatch error.
A unique, case sensitive identifier you provide to ensure the idempotency of the RetryBuild
request. The token is included in the RetryBuild
request and is valid for five minutes. If you repeat the RetryBuild
request with the same token, but change a parameter, AWS CodeBuild returns a parameter mismatch error.
The ARN of an S3 bucket and the path prefix for S3 logs. If your Amazon S3 bucket name is my-bucket
, and your path prefix is build-log
, then acceptable formats are my-bucket/build-log
or arn:aws:s3:::my-bucket/build-log
.
The Amazon Web Services account identifier of the owner of the Amazon S3 bucket. This allows report data to be exported to an Amazon S3 bucket that is owned by an account other than the account running the build.
", + "S3ReportExportConfig$bucketOwner": "The AWS account identifier of the owner of the Amazon S3 bucket. This allows report data to be exported to an Amazon S3 bucket that is owned by an account other than the account running the build.
", "S3ReportExportConfig$path": "The path to the exported report's raw data results.
", "SourceAuth$resource": "The resource value that applies to the specified authorization type.
", - "StartBuildBatchInput$sourceVersion": "The version of the batch build input to be built, for this build only. If not specified, the latest version is used. If specified, the contents depends on the source provider:
The commit ID, branch, or Git tag to use.
The commit ID, pull request ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a pull request ID is specified, it must use the format pr/pull-request-ID
(for example pr/25
). If a branch name is specified, the branch's HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is used.
The commit ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a branch name is specified, the branch's HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is used.
The version ID of the object that represents the build input ZIP file to use.
If sourceVersion
is specified at the project level, then this sourceVersion
(at the build level) takes precedence.
For more information, see Source Version Sample with CodeBuild in the CodeBuild User Guide.
", + "StartBuildBatchInput$sourceVersion": "The version of the batch build input to be built, for this build only. If not specified, the latest version is used. If specified, the contents depends on the source provider:
The commit ID, branch, or Git tag to use.
The commit ID, pull request ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a pull request ID is specified, it must use the format pr/pull-request-ID
(for example pr/25
). If a branch name is specified, the branch's HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is used.
The commit ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a branch name is specified, the branch's HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is used.
The version ID of the object that represents the build input ZIP file to use.
If sourceVersion
is specified at the project level, then this sourceVersion
(at the build level) takes precedence.
For more information, see Source Version Sample with CodeBuild in the AWS CodeBuild User Guide.
", "StartBuildBatchInput$sourceLocationOverride": "A location that overrides, for this batch build, the source location defined in the batch build project.
", - "StartBuildBatchInput$buildspecOverride": "A buildspec file declaration that overrides, for this build only, the latest one already defined in the build project.
If this value is set, it can be either an inline buildspec definition, the path to an alternate buildspec file relative to the value of the built-in CODEBUILD_SRC_DIR
environment variable, or the path to an S3 bucket. The bucket must be in the same Region as the build project. Specify the buildspec file using its ARN (for example, arn:aws:s3:::my-codebuild-sample2/buildspec.yml
). If this value is not provided or is set to an empty string, the source code must contain a buildspec file in its root directory. For more information, see Buildspec File Name and Storage Location.
A buildspec file declaration that overrides, for this build only, the latest one already defined in the build project.
If this value is set, it can be either an inline buildspec definition, the path to an alternate buildspec file relative to the value of the built-in CODEBUILD_SRC_DIR
environment variable, or the path to an S3 bucket. The bucket must be in the same AWS Region as the build project. Specify the buildspec file using its ARN (for example, arn:aws:s3:::my-codebuild-sample2/buildspec.yml
). If this value is not provided or is set to an empty string, the source code must contain a buildspec file in its root directory. For more information, see Buildspec File Name and Storage Location.
The name of a certificate for this batch build that overrides the one specified in the batch build project.
", - "StartBuildBatchInput$idempotencyToken": "A unique, case sensitive identifier you provide to ensure the idempotency of the StartBuildBatch
request. The token is included in the StartBuildBatch
request and is valid for five minutes. If you repeat the StartBuildBatch
request with the same token, but change a parameter, CodeBuild returns a parameter mismatch error.
The version of the build input to be built, for this build only. If not specified, the latest version is used. If specified, the contents depends on the source provider:
The commit ID, branch, or Git tag to use.
The commit ID, pull request ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a pull request ID is specified, it must use the format pr/pull-request-ID
(for example pr/25
). If a branch name is specified, the branch's HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is used.
The commit ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a branch name is specified, the branch's HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is used.
The version ID of the object that represents the build input ZIP file to use.
If sourceVersion
is specified at the project level, then this sourceVersion
(at the build level) takes precedence.
For more information, see Source Version Sample with CodeBuild in the CodeBuild User Guide.
", + "StartBuildBatchInput$idempotencyToken": "A unique, case sensitive identifier you provide to ensure the idempotency of the StartBuildBatch
request. The token is included in the StartBuildBatch
request and is valid for five minutes. If you repeat the StartBuildBatch
request with the same token, but change a parameter, AWS CodeBuild returns a parameter mismatch error.
The version of the build input to be built, for this build only. If not specified, the latest version is used. If specified, the contents depends on the source provider:
The commit ID, branch, or Git tag to use.
The commit ID, pull request ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a pull request ID is specified, it must use the format pr/pull-request-ID
(for example pr/25
). If a branch name is specified, the branch's HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is used.
The commit ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a branch name is specified, the branch's HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is used.
The version ID of the object that represents the build input ZIP file to use.
If sourceVersion
is specified at the project level, then this sourceVersion
(at the build level) takes precedence.
For more information, see Source Version Sample with CodeBuild in the AWS CodeBuild User Guide.
", "StartBuildInput$sourceLocationOverride": "A location that overrides, for this build, the source location for the one defined in the build project.
", - "StartBuildInput$buildspecOverride": "A buildspec file declaration that overrides, for this build only, the latest one already defined in the build project.
If this value is set, it can be either an inline buildspec definition, the path to an alternate buildspec file relative to the value of the built-in CODEBUILD_SRC_DIR
environment variable, or the path to an S3 bucket. The bucket must be in the same Region as the build project. Specify the buildspec file using its ARN (for example, arn:aws:s3:::my-codebuild-sample2/buildspec.yml
). If this value is not provided or is set to an empty string, the source code must contain a buildspec file in its root directory. For more information, see Buildspec File Name and Storage Location.
A buildspec file declaration that overrides, for this build only, the latest one already defined in the build project.
If this value is set, it can be either an inline buildspec definition, the path to an alternate buildspec file relative to the value of the built-in CODEBUILD_SRC_DIR
environment variable, or the path to an S3 bucket. The bucket must be in the same AWS Region as the build project. Specify the buildspec file using its ARN (for example, arn:aws:s3:::my-codebuild-sample2/buildspec.yml
). If this value is not provided or is set to an empty string, the source code must contain a buildspec file in its root directory. For more information, see Buildspec File Name and Storage Location.
The name of a certificate for this build that overrides the one specified in the build project.
", - "StartBuildInput$idempotencyToken": "A unique, case sensitive identifier you provide to ensure the idempotency of the StartBuild request. The token is included in the StartBuild request and is valid for 5 minutes. If you repeat the StartBuild request with the same token, but change a parameter, CodeBuild returns a parameter mismatch error.
", + "StartBuildInput$idempotencyToken": "A unique, case sensitive identifier you provide to ensure the idempotency of the StartBuild request. The token is included in the StartBuild request and is valid for 5 minutes. If you repeat the StartBuild request with the same token, but change a parameter, AWS CodeBuild returns a parameter mismatch error.
", "TestCase$testRawDataPath": "The path to the raw data file that contains the test result.
", "TestCase$prefix": "A string that is applied to a series of related test cases. CodeBuild generates the prefix. The prefix depends on the framework used to generate the tests.
", "TestCase$name": "The name of the test case.
", @@ -1628,7 +1636,7 @@ "TestCase$message": "A message associated with a test case. For example, an error message or stack trace.
", "TestCaseFilter$status": "The status used to filter test cases. A TestCaseFilter
can have one status. Valid values are:
SUCCEEDED
FAILED
ERROR
SKIPPED
UNKNOWN
A keyword that is used to filter on the name
or the prefix
of the test cases. Only test cases where the keyword is a substring of the name
or the prefix
will be returned.
A version of the build input to be built for this project. If not specified, the latest version is used. If specified, it must be one of:
For CodeCommit: the commit ID, branch, or Git tag to use.
For GitHub: the commit ID, pull request ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a pull request ID is specified, it must use the format pr/pull-request-ID
(for example pr/25
). If a branch name is specified, the branch's HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is used.
For Bitbucket: the commit ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a branch name is specified, the branch's HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is used.
For Amazon S3: the version ID of the object that represents the build input ZIP file to use.
If sourceVersion
is specified at the build level, then that version takes precedence over this sourceVersion
(at the project level).
For more information, see Source Version Sample with CodeBuild in the CodeBuild User Guide.
", + "UpdateProjectInput$sourceVersion": "A version of the build input to be built for this project. If not specified, the latest version is used. If specified, it must be one of:
For AWS CodeCommit: the commit ID, branch, or Git tag to use.
For GitHub: the commit ID, pull request ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a pull request ID is specified, it must use the format pr/pull-request-ID
(for example pr/25
). If a branch name is specified, the branch's HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is used.
For Bitbucket: the commit ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a branch name is specified, the branch's HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is used.
For Amazon S3: the version ID of the object that represents the build input ZIP file to use.
If sourceVersion
is specified at the build level, then that version takes precedence over this sourceVersion
(at the project level).
For more information, see Source Version Sample with CodeBuild in the AWS CodeBuild User Guide.
", "UpdateWebhookInput$branchFilter": "A regular expression used to determine which repository branches are built when a webhook is triggered. If the name of a branch matches the regular expression, then it is built. If branchFilter
is empty, then all branches are built.
It is recommended that you use filterGroups
instead of branchFilter
.
A regular expression used to determine which repository branches are built when a webhook is triggered. If the name of a branch matches the regular expression, then it is built. If branchFilter
is empty, then all branches are built.
It is recommended that you use filterGroups
instead of branchFilter
.
For a WebHookFilter
that uses EVENT
type, a comma-separated string that specifies one or more events. For example, the webhook filter PUSH, PULL_REQUEST_CREATED, PULL_REQUEST_UPDATED
allows all push, pull request created, and pull request updated events to trigger a build.
For a WebHookFilter
that uses any of the other filter types, a regular expression pattern. For example, a WebHookFilter
that uses HEAD_REF
for its type
and the pattern ^refs/heads/
triggers a build when the head reference is a branch with a reference name refs/heads/branch-name
.
A tag, consisting of a key and a value.
This tag is available for use by Amazon Web Services services that support tags in CodeBuild.
", + "base": "A tag, consisting of a key and a value.
This tag is available for use by AWS services that support tags in AWS CodeBuild.
", "refs": { "TagList$member": null } @@ -1649,12 +1657,12 @@ "TagList": { "base": null, "refs": { - "CreateProjectInput$tags": "A list of tag key and value pairs associated with this build project.
These tags are available for use by Amazon Web Services services that support CodeBuild build project tags.
", - "CreateReportGroupInput$tags": "A list of tag key and value pairs associated with this report group.
These tags are available for use by Amazon Web Services services that support CodeBuild report group tags.
", - "Project$tags": "A list of tag key and value pairs associated with this build project.
These tags are available for use by Amazon Web Services services that support CodeBuild build project tags.
", - "ReportGroup$tags": "A list of tag key and value pairs associated with this report group.
These tags are available for use by Amazon Web Services services that support CodeBuild report group tags.
", - "UpdateProjectInput$tags": "An updated list of tag key and value pairs associated with this build project.
These tags are available for use by Amazon Web Services services that support CodeBuild build project tags.
", - "UpdateReportGroupInput$tags": "An updated list of tag key and value pairs associated with this report group.
These tags are available for use by Amazon Web Services services that support CodeBuild report group tags.
" + "CreateProjectInput$tags": "A list of tag key and value pairs associated with this build project.
These tags are available for use by AWS services that support AWS CodeBuild build project tags.
", + "CreateReportGroupInput$tags": "A list of tag key and value pairs associated with this report group.
These tags are available for use by AWS services that support AWS CodeBuild report group tags.
", + "Project$tags": "A list of tag key and value pairs associated with this build project.
These tags are available for use by AWS services that support AWS CodeBuild build project tags.
", + "ReportGroup$tags": "A list of tag key and value pairs associated with this report group.
These tags are available for use by AWS services that support AWS CodeBuild report group tags.
", + "UpdateProjectInput$tags": "An updated list of tag key and value pairs associated with this build project.
These tags are available for use by AWS services that support AWS CodeBuild build project tags.
", + "UpdateReportGroupInput$tags": "An updated list of tag key and value pairs associated with this report group.
These tags are available for use by AWS services that support AWS CodeBuild report group tags.
" } }, "TestCase": { @@ -1684,15 +1692,15 @@ "TimeOut": { "base": null, "refs": { - "CreateProjectInput$timeoutInMinutes": "How long, in minutes, from 5 to 480 (8 hours), for CodeBuild to wait before it times out any build that has not been marked as completed. The default is 60 minutes.
", + "CreateProjectInput$timeoutInMinutes": "How long, in minutes, from 5 to 480 (8 hours), for AWS CodeBuild to wait before it times out any build that has not been marked as completed. The default is 60 minutes.
", "CreateProjectInput$queuedTimeoutInMinutes": "The number of minutes a build is allowed to be queued before it times out.
", - "Project$timeoutInMinutes": "How long, in minutes, from 5 to 480 (8 hours), for CodeBuild to wait before timing out any related build that did not get marked as completed. The default is 60 minutes.
", + "Project$timeoutInMinutes": "How long, in minutes, from 5 to 480 (8 hours), for AWS CodeBuild to wait before timing out any related build that did not get marked as completed. The default is 60 minutes.
", "Project$queuedTimeoutInMinutes": "The number of minutes a build is allowed to be queued before it times out.
", "StartBuildBatchInput$buildTimeoutInMinutesOverride": "Overrides the build timeout specified in the batch build project.
", "StartBuildBatchInput$queuedTimeoutInMinutesOverride": "The number of minutes a batch build is allowed to be queued before it times out.
", "StartBuildInput$timeoutInMinutesOverride": "The number of build timeout minutes, from 5 to 480 (8 hours), that overrides, for this build only, the latest setting already defined in the build project.
", "StartBuildInput$queuedTimeoutInMinutesOverride": "The number of minutes a build is allowed to be queued before it times out.
", - "UpdateProjectInput$timeoutInMinutes": "The replacement value in minutes, from 5 to 480 (8 hours), for CodeBuild to wait before timing out any related build that did not get marked as completed.
", + "UpdateProjectInput$timeoutInMinutes": "The replacement value in minutes, from 5 to 480 (8 hours), for AWS CodeBuild to wait before timing out any related build that did not get marked as completed.
", "UpdateProjectInput$queuedTimeoutInMinutes": "The number of minutes a build is allowed to be queued before it times out.
" } }, @@ -1756,21 +1764,21 @@ } }, "VpcConfig": { - "base": "Information about the VPC configuration that CodeBuild accesses.
", + "base": "Information about the VPC configuration that AWS CodeBuild accesses.
", "refs": { - "Build$vpcConfig": "If your CodeBuild project accesses resources in an Amazon VPC, you provide this parameter that identifies the VPC ID and the list of security group IDs and subnet IDs. The security groups and subnets must belong to the same VPC. You must provide at least one security group and one subnet ID.
", + "Build$vpcConfig": "If your AWS CodeBuild project accesses resources in an Amazon VPC, you provide this parameter that identifies the VPC ID and the list of security group IDs and subnet IDs. The security groups and subnets must belong to the same VPC. You must provide at least one security group and one subnet ID.
", "BuildBatch$vpcConfig": null, - "CreateProjectInput$vpcConfig": "VpcConfig enables CodeBuild to access resources in an Amazon VPC.
", - "Project$vpcConfig": "Information about the VPC configuration that CodeBuild accesses.
", - "UpdateProjectInput$vpcConfig": "VpcConfig enables CodeBuild to access resources in an Amazon VPC.
" + "CreateProjectInput$vpcConfig": "VpcConfig enables AWS CodeBuild to access resources in an Amazon VPC.
", + "Project$vpcConfig": "Information about the VPC configuration that AWS CodeBuild accesses.
", + "UpdateProjectInput$vpcConfig": "VpcConfig enables AWS CodeBuild to access resources in an Amazon VPC.
" } }, "Webhook": { - "base": "Information about a webhook that connects repository events to a build project in CodeBuild.
", + "base": "Information about a webhook that connects repository events to a build project in AWS CodeBuild.
", "refs": { - "CreateWebhookOutput$webhook": "Information about a webhook that connects repository events to a build project in CodeBuild.
", - "Project$webhook": "Information about a webhook that connects repository events to a build project in CodeBuild.
", - "UpdateWebhookOutput$webhook": "Information about a repository's webhook that is associated with a project in CodeBuild.
" + "CreateWebhookOutput$webhook": "Information about a webhook that connects repository events to a build project in AWS CodeBuild.
", + "Project$webhook": "Information about a webhook that connects repository events to a build project in AWS CodeBuild.
", + "UpdateWebhookOutput$webhook": "Information about a repository's webhook that is associated with a project in AWS CodeBuild.
" } }, "WebhookBuildType": { @@ -1801,13 +1809,13 @@ "BuildBatch$debugSessionEnabled": "Specifies if session debugging is enabled for this batch build. For more information, see Viewing a running build in Session Manager. Batch session debugging is not supported for matrix batch builds.
", "CreateProjectInput$badgeEnabled": "Set this to true to generate a publicly accessible URL for your project's build badge.
", "DebugSession$sessionEnabled": "Specifies if session debugging is enabled for this build.
", - "GitSubmodulesConfig$fetchSubmodules": "Set to true to fetch Git submodules for your CodeBuild build project.
", + "GitSubmodulesConfig$fetchSubmodules": "Set to true to fetch Git submodules for your AWS CodeBuild build project.
", "ImportSourceCredentialsInput$shouldOverwrite": " Set to false
to prevent overwriting the repository source credentials. Set to true
to overwrite the repository source credentials. The default value is true
.
If this flag is set, a name specified in the buildspec file overrides the artifact name. The name specified in a buildspec file is calculated at build time and uses the Shell Command Language. For example, you can append a date and time to your artifact name so that it is always unique.
", "ProjectArtifacts$encryptionDisabled": "Set to true if you do not want your output artifacts encrypted. This option is valid only if your artifacts type is Amazon S3. If this is set with another artifacts type, an invalidInputException is thrown.
", "ProjectBuildBatchConfig$combineArtifacts": "Specifies if the build artifacts for the batch build should be combined into a single artifact location.
", "ProjectEnvironment$privilegedMode": "Enables running the Docker daemon inside a Docker container. Set to true only if the build project is used to build Docker images. Otherwise, a build that attempts to interact with the Docker daemon fails. The default setting is false
.
You can initialize the Docker daemon during the install phase of your build by adding one of the following sets of commands to the install phase of your buildspec file:
If the operating system's base image is Ubuntu Linux:
- nohup /usr/local/bin/dockerd --host=unix:///var/run/docker.sock --host=tcp://0.0.0.0:2375 --storage-driver=overlay&
- timeout 15 sh -c \"until docker info; do echo .; sleep 1; done\"
If the operating system's base image is Alpine Linux and the previous command does not work, add the -t
argument to timeout
:
- nohup /usr/local/bin/dockerd --host=unix:///var/run/docker.sock --host=tcp://0.0.0.0:2375 --storage-driver=overlay&
- timeout -t 15 sh -c \"until docker info; do echo .; sleep 1; done\"
Set to true to report the status of a build's start and finish to your source provider. This option is valid only when your source provider is GitHub, GitHub Enterprise, or Bitbucket. If this is set and you use a different source provider, an invalidInputException
is thrown.
To be able to report the build status to the source provider, the user associated with the source provider must have write access to the repo. If the user does not have write access, the build status cannot be updated. For more information, see Source provider access in the CodeBuild User Guide.
The status of a build triggered by a webhook is always reported to your source provider.
Set to true to report the status of a build's start and finish to your source provider. This option is valid only when your source provider is GitHub, GitHub Enterprise, or Bitbucket. If this is set and you use a different source provider, an invalidInputException
is thrown.
To be able to report the build status to the source provider, the user associated with the source provider must have write access to the repo. If the user does not have write access, the build status cannot be updated. For more information, see Source provider access in the AWS CodeBuild User Guide.
The status of a build triggered by a webhook is always reported to your source provider.
Enable this flag to ignore SSL warnings while connecting to the project source code.
", "Report$truncated": "A boolean that specifies if this report run is truncated. The list of test cases is truncated after the maximum number of test cases is reached.
", "S3LogsConfig$encryptionDisabled": "Set to true if you do not want your S3 build log output encrypted. By default S3 build logs are encrypted.
", @@ -1817,7 +1825,7 @@ "StartBuildBatchInput$privilegedModeOverride": "Enable this flag to override privileged mode in the batch build project.
", "StartBuildBatchInput$debugSessionEnabled": "Specifies if session debugging is enabled for this batch build. For more information, see Viewing a running build in Session Manager. Batch session debugging is not supported for matrix batch builds.
", "StartBuildInput$insecureSslOverride": "Enable this flag to override the insecure SSL setting that is specified in the build project. The insecure SSL setting determines whether to ignore SSL warnings while connecting to the project source code. This override applies only if the build's source is GitHub Enterprise.
", - "StartBuildInput$reportBuildStatusOverride": " Set to true to report to your source provider the status of a build's start and completion. If you use this option with a source provider other than GitHub, GitHub Enterprise, or Bitbucket, an invalidInputException
is thrown.
To be able to report the build status to the source provider, the user associated with the source provider must have write access to the repo. If the user does not have write access, the build status cannot be updated. For more information, see Source provider access in the CodeBuild User Guide.
The status of a build triggered by a webhook is always reported to your source provider.
Set to true to report to your source provider the status of a build's start and completion. If you use this option with a source provider other than GitHub, GitHub Enterprise, or Bitbucket, an invalidInputException
is thrown.
To be able to report the build status to the source provider, the user associated with the source provider must have write access to the repo. If the user does not have write access, the build status cannot be updated. For more information, see Source provider access in the AWS CodeBuild User Guide.
The status of a build triggered by a webhook is always reported to your source provider.
Enable this flag to override privileged mode in the build project.
", "StartBuildInput$debugSessionEnabled": "Specifies if session debugging is enabled for this build. For more information, see Viewing a running build in Session Manager.
", "UpdateProjectInput$badgeEnabled": "Set this to true to generate a publicly accessible URL for your project's build badge.
", @@ -1828,7 +1836,7 @@ "base": null, "refs": { "BatchRestrictions$maximumBuildsAllowed": "Specifies the maximum number of builds allowed.
", - "Build$timeoutInMinutes": "How long, in minutes, for CodeBuild to wait before timing out this build if it does not get marked as completed.
", + "Build$timeoutInMinutes": "How long, in minutes, for AWS CodeBuild to wait before timing out this build if it does not get marked as completed.
", "Build$queuedTimeoutInMinutes": "The number of minutes a build is allowed to be queued before it times out.
", "BuildBatch$buildTimeoutInMinutes": "Specifies the maximum amount of time, in minutes, that the build in a batch must be completed in.
", "BuildBatch$queuedTimeoutInMinutes": "Specifies the amount of time, in minutes, that the batch build is allowed to be queued before it times out.
", diff --git a/models/apis/elasticloadbalancingv2/2015-12-01/docs-2.json b/models/apis/elasticloadbalancingv2/2015-12-01/docs-2.json index 975c444230..ce270e837b 100644 --- a/models/apis/elasticloadbalancingv2/2015-12-01/docs-2.json +++ b/models/apis/elasticloadbalancingv2/2015-12-01/docs-2.json @@ -901,7 +901,7 @@ "LoadBalancerAttributeKey": { "base": null, "refs": { - "LoadBalancerAttribute$Key": "The name of the attribute.
The following attribute is supported by all load balancers:
deletion_protection.enabled
- Indicates whether deletion protection is enabled. The value is true
or false
. The default is false
.
The following attributes are supported by both Application Load Balancers and Network Load Balancers:
access_logs.s3.enabled
- Indicates whether access logs are enabled. The value is true
or false
. The default is false
.
access_logs.s3.bucket
- The name of the S3 bucket for the access logs. This attribute is required if access logs are enabled. The bucket must exist in the same region as the load balancer and have a bucket policy that grants Elastic Load Balancing permissions to write to the bucket.
access_logs.s3.prefix
- The prefix for the location in the S3 bucket for the access logs.
The following attributes are supported by only Application Load Balancers:
idle_timeout.timeout_seconds
- The idle timeout value, in seconds. The valid range is 1-4000 seconds. The default is 60 seconds.
routing.http.desync_mitigation_mode
- Determines how the load balancer handles requests that might pose a security risk to your application. The possible values are monitor
, defensive
, and strictest
. The default is defensive
.
routing.http.drop_invalid_header_fields.enabled
- Indicates whether HTTP headers with invalid header fields are removed by the load balancer (true
) or routed to targets (false
). The default is false
.
routing.http2.enabled
- Indicates whether HTTP/2 is enabled. The value is true
or false
. The default is true
. Elastic Load Balancing requires that message header names contain only alphanumeric characters and hyphens.
waf.fail_open.enabled
- Indicates whether to allow a WAF-enabled load balancer to route requests to targets if it is unable to forward the request to Amazon Web Services WAF. The value is true
or false
. The default is false
.
The following attribute is supported by Network Load Balancers and Gateway Load Balancers:
load_balancing.cross_zone.enabled
- Indicates whether cross-zone load balancing is enabled. The value is true
or false
. The default is false
.
The name of the attribute.
The following attribute is supported by all load balancers:
deletion_protection.enabled
- Indicates whether deletion protection is enabled. The value is true
or false
. The default is false
.
The following attributes are supported by both Application Load Balancers and Network Load Balancers:
access_logs.s3.enabled
- Indicates whether access logs are enabled. The value is true
or false
. The default is false
.
access_logs.s3.bucket
- The name of the S3 bucket for the access logs. This attribute is required if access logs are enabled. The bucket must exist in the same region as the load balancer and have a bucket policy that grants Elastic Load Balancing permissions to write to the bucket.
access_logs.s3.prefix
- The prefix for the location in the S3 bucket for the access logs.
The following attributes are supported by only Application Load Balancers:
idle_timeout.timeout_seconds
- The idle timeout value, in seconds. The valid range is 1-4000 seconds. The default is 60 seconds.
routing.http.desync_mitigation_mode
- Determines how the load balancer handles requests that might pose a security risk to your application. The possible values are monitor
, defensive
, and strictest
. The default is defensive
.
routing.http.drop_invalid_header_fields.enabled
- Indicates whether HTTP headers with invalid header fields are removed by the load balancer (true
) or routed to targets (false
). The default is false
.
Indicates whether the two headers (x-amzn-tls-version
and x-amzn-tls-cipher-suite
), which contain information about the negotiated TLS version and cipher suite, are added to the client request before sending it to the target. The x-amzn-tls-version
header has information about the TLS protocol version negotiated with the client, and the x-amzn-tls-cipher-suite
header has information about the cipher suite negotiated with the client. Both headers are in OpenSSL format. The possible values for the attribute are true
and false
. The default is false
.
routing.http2.enabled
- Indicates whether HTTP/2 is enabled. The value is true
or false
. The default is true
. Elastic Load Balancing requires that message header names contain only alphanumeric characters and hyphens.
waf.fail_open.enabled
- Indicates whether to allow a WAF-enabled load balancer to route requests to targets if it is unable to forward the request to Amazon Web Services WAF. The value is true
or false
. The default is false
.
The following attribute is supported by Network Load Balancers and Gateway Load Balancers:
load_balancing.cross_zone.enabled
- Indicates whether cross-zone load balancing is enabled. The value is true
or false
. The default is false
.
The protocol the load balancer uses when performing health checks on targets. For Application Load Balancers, the default is HTTP. For Network Load Balancers and Gateway Load Balancers, the default is TCP. The TCP protocol is not supported for health checks if the protocol of the target group is HTTP or HTTPS. The GENEVE, TLS, UDP, and TCP_UDP protocols are not supported for health checks.
", "Listener$Protocol": "The protocol for connections from clients to the load balancer.
", "ModifyListenerInput$Protocol": "The protocol for connections from clients to the load balancer. Application Load Balancers support the HTTP and HTTPS protocols. Network Load Balancers support the TCP, TLS, UDP, and TCP_UDP protocols. You can’t change the protocol to UDP or TCP_UDP if dual-stack mode is enabled. You cannot specify a protocol for a Gateway Load Balancer.
", - "ModifyTargetGroupInput$HealthCheckProtocol": "The protocol the load balancer uses when performing health checks on targets. For Application Load Balancers, the default is HTTP. For Network Load Balancers and Gateway Load Balancers, the default is TCP. The TCP protocol is not supported for health checks if the protocol of the target group is HTTP or HTTPS. It is supported for health checks only if the protocol of the target group is TCP, TLS, UDP, or TCP_UDP. The GENEVE, TLS, UDP, and TCP_UDP protocols are not supported for health checks.
With Network Load Balancers, you can't modify this setting.
", + "ModifyTargetGroupInput$HealthCheckProtocol": "The protocol the load balancer uses when performing health checks on targets. The TCP protocol is supported for health checks only if the protocol of the target group is TCP, TLS, UDP, or TCP_UDP. The GENEVE, TLS, UDP, and TCP_UDP protocols are not supported for health checks.
With Network Load Balancers, you can't modify this setting.
", "TargetGroup$Protocol": "The protocol to use for routing traffic to the targets.
", "TargetGroup$HealthCheckProtocol": "The protocol to use to connect with the target. The GENEVE, TLS, UDP, and TCP_UDP protocols are not supported for health checks.
" } diff --git a/models/apis/elasticmapreduce/2009-03-31/api-2.json b/models/apis/elasticmapreduce/2009-03-31/api-2.json index c1e0aeb609..5ef0dd8698 100644 --- a/models/apis/elasticmapreduce/2009-03-31/api-2.json +++ b/models/apis/elasticmapreduce/2009-03-31/api-2.json @@ -190,6 +190,19 @@ {"shape":"InvalidRequestException"} ] }, + "DescribeReleaseLabel":{ + "name":"DescribeReleaseLabel", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeReleaseLabelInput"}, + "output":{"shape":"DescribeReleaseLabelOutput"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"InvalidRequestException"} + ] + }, "DescribeSecurityConfiguration":{ "name":"DescribeSecurityConfiguration", "http":{ @@ -342,6 +355,19 @@ {"shape":"InvalidRequestException"} ] }, + "ListReleaseLabels":{ + "name":"ListReleaseLabels", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListReleaseLabelsInput"}, + "output":{"shape":"ListReleaseLabelsOutput"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"InvalidRequestException"} + ] + }, "ListSecurityConfigurations":{ "name":"ListSecurityConfigurations", "http":{ @@ -1160,6 +1186,22 @@ "NotebookExecution":{"shape":"NotebookExecution"} } }, + "DescribeReleaseLabelInput":{ + "type":"structure", + "members":{ + "ReleaseLabel":{"shape":"String"}, + "NextToken":{"shape":"String"}, + "MaxResults":{"shape":"MaxResultsNumber"} + } + }, + "DescribeReleaseLabelOutput":{ + "type":"structure", + "members":{ + "ReleaseLabel":{"shape":"String"}, + "Applications":{"shape":"SimplifiedApplicationList"}, + "NextToken":{"shape":"String"} + } + }, "DescribeSecurityConfigurationInput":{ "type":"structure", "required":["Name"], @@ -2019,6 +2061,21 @@ "Marker":{"shape":"Marker"} } }, + "ListReleaseLabelsInput":{ + "type":"structure", + "members":{ + "Filters":{"shape":"ReleaseLabelFilter"}, + "NextToken":{"shape":"String"}, + "MaxResults":{"shape":"MaxResultsNumber"} + } + }, + "ListReleaseLabelsOutput":{ + "type":"structure", + "members":{ + "ReleaseLabels":{"shape":"StringList"}, + "NextToken":{"shape":"String"} + } + }, "ListSecurityConfigurationsInput":{ "type":"structure", "members":{ @@ -2092,6 +2149,11 @@ "SPOT" ] }, + "MaxResultsNumber":{ + "type":"integer", + "max":100, + "min":1 + }, "MetricDimension":{ "type":"structure", "members":{ @@ -2322,6 +2384,13 @@ "members":{ } }, + "ReleaseLabelFilter":{ + "type":"structure", + "members":{ + "Prefix":{"shape":"String"}, + "Application":{"shape":"String"} + } + }, "RemoveAutoScalingPolicyInput":{ "type":"structure", "required":[ @@ -2556,6 +2625,17 @@ "CoolDown":{"shape":"Integer"} } }, + "SimplifiedApplication":{ + "type":"structure", + "members":{ + "Name":{"shape":"String"}, + "Version":{"shape":"String"} + } + }, + "SimplifiedApplicationList":{ + "type":"list", + "member":{"shape":"SimplifiedApplication"} + }, "SpotProvisioningAllocationStrategy":{ "type":"string", "enum":["capacity-optimized"] diff --git a/models/apis/elasticmapreduce/2009-03-31/docs-2.json b/models/apis/elasticmapreduce/2009-03-31/docs-2.json index 50cd4bbd6f..94e59f3418 100644 --- a/models/apis/elasticmapreduce/2009-03-31/docs-2.json +++ b/models/apis/elasticmapreduce/2009-03-31/docs-2.json @@ -1,12 +1,12 @@ { "version": "2.0", - "service": "Amazon EMR is a web service that makes it easier to process large amounts of data efficiently. Amazon EMR uses Hadoop processing combined with several AWS services to do tasks such as web indexing, data mining, log file analysis, machine learning, scientific simulation, and data warehouse management.
", + "service": "Amazon EMR is a web service that makes it easier to process large amounts of data efficiently. Amazon EMR uses Hadoop processing combined with several Amazon Web Services services to do tasks such as web indexing, data mining, log file analysis, machine learning, scientific simulation, and data warehouse management.
", "operations": { "AddInstanceFleet": "Adds an instance fleet to a running cluster.
The instance fleet configuration is available only in Amazon EMR versions 4.8.0 and later, excluding 5.0.x.
Adds one or more instance groups to a running cluster.
", "AddJobFlowSteps": "AddJobFlowSteps adds new steps to a running cluster. A maximum of 256 steps are allowed in each job flow.
If your cluster is long-running (such as a Hive data warehouse) or complex, you may require more than 256 steps to process your data. You can bypass the 256-step limitation in various ways, including using SSH to connect to the master node and submitting queries directly to the software running on the master node, such as Hive and Hadoop. For more information on how to do this, see Add More than 256 Steps to a Cluster in the Amazon EMR Management Guide.
A step specifies the location of a JAR file stored either on the master node of the cluster or in Amazon S3. Each step is performed by the main function of the main class of the JAR file. The main class can be specified either in the manifest of the JAR or by using the MainFunction parameter of the step.
Amazon EMR executes each step in the order listed. For a step to be considered complete, the main function must exit with a zero exit code and all Hadoop jobs started while the step was running must have completed and run successfully.
You can only add steps to a cluster that is in one of the following states: STARTING, BOOTSTRAPPING, RUNNING, or WAITING.
", "AddTags": "Adds tags to an Amazon EMR resource. Tags make it easier to associate clusters in various ways, such as grouping clusters to track your Amazon EMR resource allocation costs. For more information, see Tag Clusters.
", - "CancelSteps": "Cancels a pending step or steps in a running cluster. Available only in Amazon EMR versions 4.8.0 and later, excluding version 5.0.0. A maximum of 256 steps are allowed in each CancelSteps request. CancelSteps is idempotent but asynchronous; it does not guarantee that a step will be canceled, even if the request is successfully submitted. You can only cancel steps that are in a PENDING
state.
Cancels a pending step or steps in a running cluster. Available only in Amazon EMR versions 4.8.0 and later, excluding version 5.0.0. A maximum of 256 steps are allowed in each CancelSteps request. CancelSteps is idempotent but asynchronous; it does not guarantee that a step will be canceled, even if the request is successfully submitted. When you use Amazon EMR versions 5.28.0 and later, you can cancel steps that are in a PENDING
or RUNNING
state. In earlier versions of Amazon EMR, you can only cancel steps that are in a PENDING
state.
Creates a security configuration, which is stored in the service and can be specified when a cluster is created.
", "CreateStudio": "Creates a new Amazon EMR Studio.
", "CreateStudioSessionMapping": "Maps a user or group to the Amazon EMR Studio specified by StudioId
, and applies a session policy to refine Studio permissions for that user or group.
Provides cluster-level details including status, hardware and software configuration, VPC settings, and so on.
", "DescribeJobFlows": "This API is no longer supported and will eventually be removed. We recommend you use ListClusters, DescribeCluster, ListSteps, ListInstanceGroups and ListBootstrapActions instead.
DescribeJobFlows returns a list of job flows that match all of the supplied parameters. The parameters can include a list of job flow IDs, job flow states, and restrictions on job flow creation date and time.
Regardless of supplied parameters, only job flows created within the last two months are returned.
If no parameters are supplied, then job flows matching either of the following criteria are returned:
Job flows created and completed in the last two weeks
Job flows created within the last two months that are in one of the following states: RUNNING
, WAITING
, SHUTTING_DOWN
, STARTING
Amazon EMR can return a maximum of 512 job flow descriptions.
", "DescribeNotebookExecution": "Provides details of a notebook execution.
", + "DescribeReleaseLabel": "Provides EMR release label details, such as releases available the region where the API request is run, and the available applications for a specific EMR release label. Can also list EMR release versions that support a specified version of Spark.
", "DescribeSecurityConfiguration": "Provides the details of a security configuration by returning the configuration JSON.
", "DescribeStep": "Provides more detail about the cluster step.
", "DescribeStudio": "Returns details for the specified Amazon EMR Studio including ID, Name, VPC, Studio access URL, and so on.
", - "GetBlockPublicAccessConfiguration": "Returns the Amazon EMR block public access configuration for your AWS account in the current Region. For more information see Configure Block Public Access for Amazon EMR in the Amazon EMR Management Guide.
", + "GetBlockPublicAccessConfiguration": "Returns the Amazon EMR block public access configuration for your account in the current Region. For more information see Configure Block Public Access for Amazon EMR in the Amazon EMR Management Guide.
", "GetManagedScalingPolicy": "Fetches the attached managed scaling policy for an Amazon EMR cluster.
", "GetStudioSessionMapping": "Fetches mapping details for the specified Amazon EMR Studio and identity (user or group).
", "ListBootstrapActions": "Provides information about the bootstrap actions associated with a cluster.
", - "ListClusters": "Provides the status of all clusters visible to this AWS account. Allows you to filter the list of clusters based on certain criteria; for example, filtering by cluster creation date and time or by status. This call returns a maximum of 50 clusters per call, but returns a marker to track the paging of the cluster list across multiple ListClusters calls.
", + "ListClusters": "Provides the status of all clusters visible to this account. Allows you to filter the list of clusters based on certain criteria; for example, filtering by cluster creation date and time or by status. This call returns a maximum of 50 clusters in unsorted order per call, but returns a marker to track the paging of the cluster list across multiple ListClusters calls.
", "ListInstanceFleets": "Lists all available details about the instance fleets in a cluster.
The instance fleet configuration is available only in Amazon EMR versions 4.8.0 and later, excluding 5.0.x versions.
Provides all available details about the instance groups in a cluster.
", "ListInstances": "Provides information for all active EC2 instances and EC2 instances terminated in the last 30 days, up to a maximum of 2,000. EC2 instances in any of the following states are considered active: AWAITING_FULFILLMENT, PROVISIONING, BOOTSTRAPPING, RUNNING.
", "ListNotebookExecutions": "Provides summaries of all notebook executions. You can filter the list based on multiple criteria such as status, time range, and editor id. Returns a maximum of 50 notebook executions and a marker to track the paging of a longer notebook execution list across multiple ListNotebookExecution
calls.
Retrieves release labels of EMR services in the region where the API is called.
", "ListSecurityConfigurations": "Lists all the security configurations visible to this account, providing their creation dates and times, and their names. This call returns a maximum of 50 clusters per call, but returns a marker to track the paging of the cluster list across multiple ListSecurityConfigurations calls.
", - "ListSteps": "Provides a list of steps for the cluster in reverse order unless you specify stepIds
with the request of filter by StepStates
. You can specify a maximum of 10 stepIDs
.
Provides a list of steps for the cluster in reverse order unless you specify stepIds
with the request or filter by StepStates
. You can specify a maximum of 10 stepIDs
. The CLI automatically paginates results to return a list greater than 50 steps. To return more than 50 steps using the CLI, specify a Marker
, which is a pagination token that indicates the next set of steps to retrieve.
Returns a list of all user or group session mappings for the Amazon EMR Studio specified by StudioId
.
Returns a list of all Amazon EMR Studios associated with the AWS account. The list includes details such as ID, Studio Access URL, and creation time for each Studio.
", + "ListStudios": "Returns a list of all Amazon EMR Studios associated with the account. The list includes details such as ID, Studio Access URL, and creation time for each Studio.
", "ModifyCluster": "Modifies the number of steps that can be executed concurrently for the cluster specified using ClusterID.
", "ModifyInstanceFleet": "Modifies the target On-Demand and target Spot capacities for the instance fleet with the specified InstanceFleetID within the cluster specified using ClusterID. The call either succeeds or fails atomically.
The instance fleet configuration is available only in Amazon EMR versions 4.8.0 and later, excluding 5.0.x versions.
ModifyInstanceGroups modifies the number of nodes and configuration settings of an instance group. The input parameters include the new target instance count for the group and the instance group ID. The call will either succeed or fail atomically.
", "PutAutoScalingPolicy": "Creates or updates an automatic scaling policy for a core instance group or task instance group in an Amazon EMR cluster. The automatic scaling policy defines how an instance group dynamically adds and terminates EC2 instances in response to the value of a CloudWatch metric.
", - "PutBlockPublicAccessConfiguration": "Creates or updates an Amazon EMR block public access configuration for your AWS account in the current Region. For more information see Configure Block Public Access for Amazon EMR in the Amazon EMR Management Guide.
", + "PutBlockPublicAccessConfiguration": "Creates or updates an Amazon EMR block public access configuration for your account in the current Region. For more information see Configure Block Public Access for Amazon EMR in the Amazon EMR Management Guide.
", "PutManagedScalingPolicy": "Creates or updates a managed scaling policy for an Amazon EMR cluster. The managed scaling policy defines the limits for resources, such as EC2 instances that can be added or terminated from a cluster. The policy only applies to the core and task nodes. The master node cannot be scaled after initial configuration.
", "RemoveAutoScalingPolicy": "Removes an automatic scaling policy from a specified instance group within an EMR cluster.
", "RemoveManagedScalingPolicy": "Removes a managed scaling policy from a specified EMR cluster.
", "RemoveTags": "Removes tags from an Amazon EMR resource. Tags make it easier to associate clusters in various ways, such as grouping clusters to track your Amazon EMR resource allocation costs. For more information, see Tag Clusters.
The following example removes the stack tag with value Prod from a cluster:
", "RunJobFlow": "RunJobFlow creates and starts running a new cluster (job flow). The cluster runs the steps specified. After the steps complete, the cluster stops and the HDFS partition is lost. To prevent loss of data, configure the last step of the job flow to store results in Amazon S3. If the JobFlowInstancesConfig KeepJobFlowAliveWhenNoSteps
parameter is set to TRUE
, the cluster transitions to the WAITING state rather than shutting down after the steps have completed.
For additional protection, you can set the JobFlowInstancesConfig TerminationProtected
parameter to TRUE
to lock the cluster and prevent it from being terminated by API call, user intervention, or in the event of a job flow error.
A maximum of 256 steps are allowed in each job flow.
If your cluster is long-running (such as a Hive data warehouse) or complex, you may require more than 256 steps to process your data. You can bypass the 256-step limitation in various ways, including using the SSH shell to connect to the master node and submitting queries directly to the software running on the master node, such as Hive and Hadoop. For more information on how to do this, see Add More than 256 Steps to a Cluster in the Amazon EMR Management Guide.
For long running clusters, we recommend that you periodically store your results.
The instance fleets configuration is available only in Amazon EMR versions 4.8.0 and later, excluding 5.0.x versions. The RunJobFlow request can contain InstanceFleets parameters or InstanceGroups parameters, but not both.
SetTerminationProtection locks a cluster (job flow) so the EC2 instances in the cluster cannot be terminated by user intervention, an API call, or in the event of a job-flow error. The cluster still terminates upon successful completion of the job flow. Calling SetTerminationProtection
on a cluster is similar to calling the Amazon EC2 DisableAPITermination
API on all EC2 instances in a cluster.
SetTerminationProtection
is used to prevent accidental termination of a cluster and to ensure that in the event of an error, the instances persist so that you can recover any data stored in their ephemeral instance storage.
To terminate a cluster that has been locked by setting SetTerminationProtection
to true
, you must first unlock the job flow by a subsequent call to SetTerminationProtection
in which you set the value to false
.
For more information, seeManaging Cluster Termination in the Amazon EMR Management Guide.
", - "SetVisibleToAllUsers": "Sets the Cluster$VisibleToAllUsers value, which determines whether the cluster is visible to all IAM users of the AWS account associated with the cluster. Only the IAM user who created the cluster or the AWS account root user can call this action. The default value, true
, indicates that all IAM users in the AWS account can perform cluster actions if they have the proper IAM policy permissions. If set to false
, only the IAM user that created the cluster can perform actions. This action works on running clusters. You can override the default true
setting when you create a cluster by using the VisibleToAllUsers
parameter with RunJobFlow
.
Sets the Cluster$VisibleToAllUsers value for an EMR cluster. When true
, IAM principals in the account can perform EMR cluster actions that their IAM policies allow. When false
, only the IAM principal that created the cluster and the account root user can perform EMR actions on the cluster, regardless of IAM permissions policies attached to other IAM principals.
This action works on running clusters. When you create a cluster, use the RunJobFlowInput$VisibleToAllUsers parameter.
For more information, see Understanding the EMR Cluster VisibleToAllUsers Setting in the Amazon EMR Management Guide.
", "StartNotebookExecution": "Starts a notebook execution.
", "StopNotebookExecution": "Stops a notebook execution.
", "TerminateJobFlows": "TerminateJobFlows shuts a list of clusters (job flows) down. When a job flow is shut down, any step not yet completed is canceled and the EC2 instances on which the cluster is running are stopped. Any log files not already saved are uploaded to Amazon S3 if a LogUri was specified when the cluster was created.
The maximum number of clusters allowed is 10. The call to TerminateJobFlows
is asynchronous. Depending on the configuration of the cluster, it may take up to 1-5 minutes for the cluster to completely terminate and release allocated resources, such as Amazon EC2 instances.
The action to take when the cluster step fails. Possible values are TERMINATE_CLUSTER, CANCEL_AND_WAIT, and CONTINUE. TERMINATE_JOB_FLOW is provided for backward compatibility. We recommend using TERMINATE_CLUSTER instead.
", - "StepConfig$ActionOnFailure": "The action to take when the cluster step fails. Possible values are TERMINATE_CLUSTER, CANCEL_AND_WAIT, and CONTINUE. TERMINATE_JOB_FLOW is provided for backward compatibility. We recommend using TERMINATE_CLUSTER instead.
", - "StepSummary$ActionOnFailure": "The action to take when the cluster step fails. Possible values are TERMINATE_CLUSTER, CANCEL_AND_WAIT, and CONTINUE. TERMINATE_JOB_FLOW is available for backward compatibility. We recommend using TERMINATE_CLUSTER instead.
" + "Step$ActionOnFailure": "The action to take when the cluster step fails. Possible values are TERMINATE_CLUSTER
, CANCEL_AND_WAIT
, and CONTINUE
. TERMINATE_JOB_FLOW
is provided for backward compatibility. We recommend using TERMINATE_CLUSTER
instead.
If a cluster's StepConcurrencyLevel
is greater than 1
, do not use AddJobFlowSteps
to submit a step with this parameter set to CANCEL_AND_WAIT
or TERMINATE_CLUSTER
. The step is not submitted and the action fails with a message that the ActionOnFailure
setting is not valid.
If you change a cluster's StepConcurrencyLevel
to be greater than 1 while a step is running, the ActionOnFailure
parameter may not behave as you expect. In this case, for a step that fails with this parameter set to CANCEL_AND_WAIT
, pending steps and the running step are not canceled; for a step that fails with this parameter set to TERMINATE_CLUSTER
, the cluster does not terminate.
The action to take when the step fails. Use one of the following values:
TERMINATE_CLUSTER
- Shuts down the cluster.
CANCEL_AND_WAIT
- Cancels any pending steps and returns the cluster to the WAITING
state.
CONTINUE
- Continues to the next step in the queue.
TERMINATE_JOB_FLOW
- Shuts down the cluster. TERMINATE_JOB_FLOW
is provided for backward compatibility. We recommend using TERMINATE_CLUSTER
instead.
If a cluster's StepConcurrencyLevel
is greater than 1
, do not use AddJobFlowSteps
to submit a step with this parameter set to CANCEL_AND_WAIT
or TERMINATE_CLUSTER
. The step is not submitted and the action fails with a message that the ActionOnFailure
setting is not valid.
If you change a cluster's StepConcurrencyLevel
to be greater than 1 while a step is running, the ActionOnFailure
parameter may not behave as you expect. In this case, for a step that fails with this parameter set to CANCEL_AND_WAIT
, pending steps and the running step are not canceled; for a step that fails with this parameter set to TERMINATE_CLUSTER
, the cluster does not terminate.
The action to take when the cluster step fails. Possible values are TERMINATE_CLUSTER, CANCEL_AND_WAIT, and CONTINUE. TERMINATE_JOB_FLOW is available for backward compatibility.
" } }, "AddInstanceFleetInput": { @@ -183,9 +185,9 @@ } }, "BlockPublicAccessConfigurationMetadata": { - "base": "Properties that describe the AWS principal that created the BlockPublicAccessConfiguration
using the PutBlockPublicAccessConfiguration
action as well as the date and time that the configuration was created. Each time a configuration for block public access is updated, Amazon EMR updates this metadata.
Properties that describe the Amazon Web Services principal that created the BlockPublicAccessConfiguration
using the PutBlockPublicAccessConfiguration
action as well as the date and time that the configuration was created. Each time a configuration for block public access is updated, Amazon EMR updates this metadata.
Properties that describe the AWS principal that created the BlockPublicAccessConfiguration
using the PutBlockPublicAccessConfiguration
action as well as the date and time that the configuration was created. Each time a configuration for block public access is updated, Amazon EMR updates this metadata.
Properties that describe the Amazon Web Services principal that created the BlockPublicAccessConfiguration
using the PutBlockPublicAccessConfiguration
action as well as the date and time that the configuration was created. Each time a configuration for block public access is updated, Amazon EMR updates this metadata.
Indicates whether Amazon EMR block public access is enabled (true
) or disabled (false
). By default, the value is false
for accounts that have created EMR clusters before July 2019. For accounts created after this, the default is true
.
Specifies whether the cluster should terminate after completing all steps.
", "Cluster$TerminationProtected": "Indicates whether Amazon EMR will lock the cluster to prevent the EC2 instances from being terminated by an API call or user intervention, or in the event of a cluster error.
", - "Cluster$VisibleToAllUsers": "Indicates whether the cluster is visible to all IAM users of the AWS account associated with the cluster. The default value, true
, indicates that all IAM users in the AWS account can perform cluster actions if they have the proper IAM policy permissions. If this value is false
, only the IAM user that created the cluster can perform actions. This value can be changed on a running cluster by using the SetVisibleToAllUsers action. You can override the default value of true
when you create a cluster by using the VisibleToAllUsers
parameter of the RunJobFlow
action.
Indicates whether the cluster is visible to all IAM users of the AWS account associated with the cluster. The default value, true
, indicates that all IAM users in the AWS account can perform cluster actions if they have the proper IAM policy permissions. If this value is false
, only the IAM user that created the cluster can perform actions. This value can be changed on a running cluster by using the SetVisibleToAllUsers action. You can override the default value of true
when you create a cluster by using the VisibleToAllUsers
parameter of the RunJobFlow
action.
Specifies whether the cluster should remain available after completing all steps.
", + "Cluster$VisibleToAllUsers": "Indicates whether the cluster is visible to IAM principals in the account associated with the cluster. When true
, IAM principals in the account can perform EMR cluster actions on the cluster that their IAM policies allow. When false
, only the IAM principal that created the cluster and the account root user can perform EMR actions, regardless of IAM permissions policies attached to other IAM principals.
The default value is false
if a value is not provided when creating a cluster using the EMR API RunJobFlow command or the CLI create-cluster command. The default value is true
when a cluster is created using the Management Console. IAM principals that are allowed to perform actions on the cluster can use the SetVisibleToAllUsers action to change the value on a running cluster. For more information, see Understanding the EMR Cluster VisibleToAllUsers Setting in the Amazon EMR Management Guide.
Indicates whether the cluster is visible to IAM principals in the account associated with the cluster. When true
, IAM principals in the account can perform EMR cluster actions that their IAM policies allow. When false
, only the IAM principal that created the cluster and the account root user can perform EMR actions, regardless of IAM permissions policies attached to other IAM principals.
The default value is false
if a value is not provided when creating a cluster using the EMR API RunJobFlow command or the CLI create-cluster command. The default value is true
when a cluster is created using the Management Console. IAM principals that are authorized to perform actions on the cluster can use the SetVisibleToAllUsers action to change the value on a running cluster. For more information, see Understanding the EMR Cluster VisibleToAllUsers Setting in the Amazon EMR Management Guide.
Specifies whether the cluster should remain available after completing all steps. Defaults to true
. For more information about configuring cluster termination, see Control Cluster Termination in the EMR Management Guide.
Specifies whether to lock the cluster to prevent the Amazon EC2 instances from being terminated by API call, user intervention, or in the event of a job-flow error.
", "JobFlowInstancesDetail$KeepJobFlowAliveWhenNoSteps": "Specifies whether the cluster should remain available after completing all steps.
", "JobFlowInstancesDetail$TerminationProtected": "Specifies whether the Amazon EC2 instances in the cluster are protected from termination by API calls, user intervention, or in the event of a job-flow error.
", - "RunJobFlowInput$VisibleToAllUsers": "A value of true
indicates that all IAM users in the AWS account can perform cluster actions if they have the proper IAM policy permissions. This is the default. A value of false
indicates that only the IAM user who created the cluster can perform actions.
Set this value to true
so that IAM principals in the account associated with the cluster can perform EMR actions on the cluster that their IAM policies allow. This value defaults to false
for clusters created using the EMR API or the CLI create-cluster command.
When set to false
, only the IAM principal that created the cluster and the account root user can perform EMR actions for the cluster, regardless of the IAM permissions policies attached to other IAM principals. For more information, see Understanding the EMR Cluster VisibleToAllUsers Setting in the Amazon EMR Management Guide.
A Boolean that indicates whether to protect the cluster and prevent the Amazon EC2 instances in the cluster from shutting down due to API calls, user intervention, or job-flow error.
", - "SetVisibleToAllUsersInput$VisibleToAllUsers": "A value of true
indicates that all IAM users in the AWS account can perform cluster actions if they have the proper IAM policy permissions. This is the default. A value of false
indicates that only the IAM user who created the cluster can perform actions.
A value of true
indicates that an IAM principal in the account can perform EMR actions on the cluster that the IAM policies attached to the principal allow. A value of false
indicates that only the IAM principal that created the cluster and the Amazon Web Services root user can perform EMR actions on the cluster.
The cluster state filters to apply when listing clusters.
" + "ListClustersInput$ClusterStates": "The cluster state filters to apply when listing clusters. Clusters that change state while this action runs may be not be returned as expected in the list of clusters.
" } }, "ClusterStatus": { @@ -391,7 +393,7 @@ "refs": { "Cluster$Configurations": "Applies only to Amazon EMR releases 4.x and later. The list of Configurations supplied to the EMR cluster.
", "Configuration$Configurations": "A list of additional configurations to apply within a configuration object.
", - "InstanceGroup$Configurations": "Amazon EMR releases 4.x or later.
The list of configurations supplied for an EMR cluster instance group. You can specify a separate configuration for each instance group (master, core, and task).
", + "InstanceGroup$Configurations": "Amazon EMR releases 4.x or later.
The list of configurations supplied for an Amazon EMR cluster instance group. You can specify a separate configuration for each instance group (master, core, and task).
", "InstanceGroup$LastSuccessfullyAppliedConfigurations": "A list of configurations that were successfully applied for an instance group last time.
", "InstanceGroupConfig$Configurations": "Amazon EMR releases 4.x or later.
The list of configurations supplied for an EMR cluster instance group. You can specify a separate configuration for each instance group (master, core, and task).
", "InstanceGroupModifyConfig$Configurations": "A list of new or modified configurations to apply for an instance group.
", @@ -525,6 +527,16 @@ "refs": { } }, + "DescribeReleaseLabelInput": { + "base": null, + "refs": { + } + }, + "DescribeReleaseLabelOutput": { + "base": null, + "refs": { + } + }, "DescribeSecurityConfigurationInput": { "base": null, "refs": { @@ -590,14 +602,14 @@ "base": null, "refs": { "InstanceGroup$EbsBlockDevices": "The EBS block devices that are mapped to this instance group.
", - "InstanceTypeSpecification$EbsBlockDevices": "The configuration of Amazon Elastic Block Storage (Amazon EBS) attached to each instance as defined by InstanceType
.
The configuration of Amazon Elastic Block Store (Amazon EBS) attached to each instance as defined by InstanceType
.
The Amazon EBS configuration of a cluster instance.
", "refs": { "InstanceGroupConfig$EbsConfiguration": "EBS configurations that will be attached to each EC2 instance in the instance group.
", - "InstanceTypeConfig$EbsConfiguration": "The configuration of Amazon Elastic Block Storage (Amazon EBS) attached to each instance as defined by InstanceType
.
The configuration of Amazon Elastic Block Store (Amazon EBS) attached to each instance as defined by InstanceType
.
The list of EBS volumes that are attached to this instance.
" + "Instance$EbsVolumes": "The list of Amazon EBS volumes that are attached to this instance.
" } }, "Ec2InstanceAttributes": { @@ -755,7 +767,7 @@ "InstanceFleetModifyConfig": { "base": "Configuration parameters for an instance fleet modification request.
The instance fleet configuration is available only in Amazon EMR versions 4.8.0 and later, excluding 5.0.x versions.
The unique identifier of the instance fleet.
" + "ModifyInstanceFleetInput$InstanceFleet": "The configuration parameters of the instance fleet.
" } }, "InstanceFleetProvisioningSpecifications": { @@ -994,7 +1006,7 @@ } }, "InstanceTypeConfig": { - "base": "An instance type configuration for each instance type in an instance fleet, which determines the EC2 instances Amazon EMR attempts to provision to fulfill On-Demand and Spot target capacities. There can be a maximum of five instance type configurations in a fleet.
The instance fleet configuration is available only in Amazon EMR versions 4.8.0 and later, excluding 5.0.x versions.
An instance type configuration for each instance type in an instance fleet, which determines the EC2 instances Amazon EMR attempts to provision to fulfill On-Demand and Spot target capacities. When you use an allocation strategy, you can include a maximum of 30 instance type configurations for a fleet. For more information about how to use an allocation strategy, see Configure Instance Fleets. Without an allocation strategy, you may specify a maximum of five instance type configurations for a fleet.
The instance fleet configuration is available only in Amazon EMR versions 4.8.0 and later, excluding 5.0.x versions.
The specification for the instance types that comprise an instance fleet. Up to five unique instance specifications may be defined for each instance fleet.
" + "InstanceFleet$InstanceTypeSpecifications": "An array of specifications for the instance types that comprise an instance fleet.
" } }, "Integer": { @@ -1041,7 +1053,7 @@ "JobFlowInstancesConfig$InstanceCount": "The number of EC2 instances in the cluster.
", "JobFlowInstancesDetail$InstanceCount": "The number of Amazon EC2 instances in the cluster. If the value is 1, the same instance serves as both the master and core and task node. If the value is greater than 1, one instance is the master node and all others are core and task nodes.
", "JobFlowInstancesDetail$NormalizedInstanceHours": "An approximation of the cost of the cluster, represented in m1.small/hours. This value is increased one time for every hour that an m1.small instance runs. Larger instances are weighted more heavily, so an Amazon EC2 instance that is roughly four times more expensive would result in the normalized instance hours being increased incrementally four times. This result is only an approximation and does not reflect the actual billing rate.
", - "ModifyClusterInput$StepConcurrencyLevel": "The number of steps that can be executed concurrently. You can specify a minimum of 1 step and a maximum of 256 steps.
", + "ModifyClusterInput$StepConcurrencyLevel": "The number of steps that can be executed concurrently. You can specify a minimum of 1 step and a maximum of 256 steps. We recommend that you do not change this parameter while steps are running or the ActionOnFailure
setting may not behave as expected. For more information see Step$ActionOnFailure.
The number of steps that can be executed concurrently.
", "RunJobFlowInput$EbsRootVolumeSize": "The size, in GiB, of the Amazon EBS root device volume of the Linux AMI that is used for each EC2 instance. Available in Amazon EMR version 4.x and later.
", "RunJobFlowInput$StepConcurrencyLevel": "Specifies the number of steps that can be executed concurrently. The default value is 1
. The maximum value is 256
.
A pagination token that a subsequent ListNotebookExecutions
can use to determine the next set of results to retrieve.
The pagination token that indicates the set of results to retrieve.
", "ListSecurityConfigurationsOutput$Marker": "A pagination token that indicates the next set of results to retrieve. Include the marker in the next ListSecurityConfiguration call to retrieve the next page of results, if required.
", - "ListStepsInput$Marker": "The pagination token that indicates the next set of results to retrieve.
", - "ListStepsOutput$Marker": "The pagination token that indicates the next set of results to retrieve.
", + "ListStepsInput$Marker": "The maximum number of steps that a single ListSteps
action returns is 50. To return a longer list of steps, use multiple ListSteps
actions along with the Marker
parameter, which is a pagination token that indicates the next set of results to retrieve.
The maximum number of steps that a single ListSteps
action returns is 50. To return a longer list of steps, use multiple ListSteps
actions along with the Marker
parameter, which is a pagination token that indicates the next set of results to retrieve.
The pagination token that indicates the set of results to retrieve.
", "ListStudioSessionMappingsOutput$Marker": "The pagination token that indicates the next set of results to retrieve.
", "ListStudiosInput$Marker": "The pagination token that indicates the set of results to retrieve.
", @@ -1281,6 +1303,13 @@ "ScalingAction$Market": "Not available for instance groups. Instance groups use the market type specified for the group.
" } }, + "MaxResultsNumber": { + "base": null, + "refs": { + "DescribeReleaseLabelInput$MaxResults": "Reserved for future use. Currently set to null.
", + "ListReleaseLabelsInput$MaxResults": "Defines the maximum number of release labels to return in a single response. The default is 100
.
A CloudWatch dimension, which is specified using a Key
(known as a Name
in CloudWatch), Value
pair. By default, Amazon EMR uses one dimension whose Key
is JobFlowID
and Value
is a variable representing the cluster ID, which is ${emr.clusterId}
. This enables the rule to bootstrap when the cluster ID becomes available.
Details for a notebook execution. The details include information such as the unique ID and status of the notebook execution.
", "refs": { "NotebookExecutionSummaryList$member": null } @@ -1368,7 +1397,7 @@ "OnDemandCapacityReservationUsageStrategy": { "base": null, "refs": { - "OnDemandCapacityReservationOptions$UsageStrategy": "Indicates whether to use unused Capacity Reservations for fulfilling On-Demand capacity.
If you specify use-capacity-reservations-first
, the fleet uses unused Capacity Reservations to fulfill On-Demand capacity up to the target On-Demand capacity. If multiple instance pools have unused Capacity Reservations, the On-Demand allocation strategy (lowest-price
) is applied. If the number of unused Capacity Reservations is less than the On-Demand target capacity, the remaining On-Demand target capacity is launched according to the On-Demand allocation strategy (lowest-price
).
If you do not specify a value, the fleet fulfils the On-Demand capacity according to the chosen On-Demand allocation strategy.
" + "OnDemandCapacityReservationOptions$UsageStrategy": "Indicates whether to use unused Capacity Reservations for fulfilling On-Demand capacity.
If you specify use-capacity-reservations-first
, the fleet uses unused Capacity Reservations to fulfill On-Demand capacity up to the target On-Demand capacity. If multiple instance pools have unused Capacity Reservations, the On-Demand allocation strategy (lowest-price
) is applied. If the number of unused Capacity Reservations is less than the On-Demand target capacity, the remaining On-Demand target capacity is launched according to the On-Demand allocation strategy (lowest-price
).
If you do not specify a value, the fleet fulfills the On-Demand capacity according to the chosen On-Demand allocation strategy.
" } }, "OnDemandProvisioningAllocationStrategy": { @@ -1465,6 +1494,12 @@ "refs": { } }, + "ReleaseLabelFilter": { + "base": "The release label filters by application or version prefix.
", + "refs": { + "ListReleaseLabelsInput$Filters": "Filters the results of the request. Prefix
specifies the prefix of release labels to return. Application
specifies the application (with/without version) of release labels to return.
The type of adjustment the automatic scaling activity makes when triggered, and the periodicity of the adjustment.
" } }, + "SimplifiedApplication": { + "base": "The returned release label application names or versions.
", + "refs": { + "SimplifiedApplicationList$member": null + } + }, + "SimplifiedApplicationList": { + "base": null, + "refs": { + "DescribeReleaseLabelOutput$Applications": "The list of applications available for the target release label. Name
is the name of the application. Version
is the concise version of the application.
Specification of a cluster (job flow) step.
", + "base": "Specification for a cluster (job flow) step.
", "refs": { "StepConfigList$member": null, "StepDetail$StepConfig": "The step configuration.
" @@ -1791,11 +1838,11 @@ "CloudWatchAlarmDefinition$Namespace": "The namespace for the CloudWatch metric. The default is AWS/ElasticMapReduce
.
The name of the cluster.
", "Cluster$LogUri": "The path to the Amazon S3 location where logs for this cluster are stored.
", - "Cluster$LogEncryptionKmsKeyId": "The AWS KMS customer master key (CMK) used for encrypting log files. This attribute is only available with EMR version 5.30.0 and later, excluding EMR 6.0.0.
", + "Cluster$LogEncryptionKmsKeyId": "The KMS key used for encrypting log files. This attribute is only available with EMR version 5.30.0 and later, excluding EMR 6.0.0.
", "Cluster$RequestedAmiVersion": "The AMI version requested for this cluster.
", "Cluster$RunningAmiVersion": "The AMI version running on this cluster.
", "Cluster$ReleaseLabel": "The Amazon EMR release label, which determines the version of open-source application packages installed on the cluster. Release labels are in the form emr-x.x.x
, where x.x.x is an Amazon EMR release version such as emr-5.14.0
. For more information about Amazon EMR release versions and included application versions and features, see https://docs.aws.amazon.com/emr/latest/ReleaseGuide/. The release label applies only to Amazon EMR releases version 4.0 and later. Earlier versions use AmiVersion
.
The IAM role that will be assumed by the Amazon EMR service to access AWS resources on your behalf.
", + "Cluster$ServiceRole": "The IAM role that will be assumed by the Amazon EMR service to access Amazon Web Services resources on your behalf.
", "Cluster$MasterPublicDnsName": "The DNS name of the master node. If the cluster is on a private subnet, this is the private DNS name. On a public subnet, this is the public DNS name.
", "ClusterStateChangeReason$Message": "The descriptive message for the state change reason.
", "ClusterSummary$Name": "The name of the cluster.
", @@ -1803,6 +1850,10 @@ "Command$ScriptPath": "The Amazon S3 location of the command script.
", "Configuration$Classification": "The classification within a configuration.
", "CreateSecurityConfigurationInput$SecurityConfiguration": "The security configuration details in JSON format. For JSON parameters and examples, see Use Security Configurations to Set Up Cluster Security in the Amazon EMR Management Guide.
", + "DescribeReleaseLabelInput$ReleaseLabel": "The target release label to be described.
", + "DescribeReleaseLabelInput$NextToken": "The pagination token. Reserved for future use. Currently set to null.
", + "DescribeReleaseLabelOutput$ReleaseLabel": "The target release label described in the response.
", + "DescribeReleaseLabelOutput$NextToken": "The pagination token. Reserved for future use. Currently set to null.
", "DescribeSecurityConfigurationOutput$SecurityConfiguration": "The security configuration details in JSON format.
", "EbsBlockDevice$Device": "The device name that is exposed to the instance, such as /dev/sdh.
", "EbsVolume$Device": "The device name that is exposed to the instance, such as /dev/sdh.
", @@ -1829,11 +1880,17 @@ "InstanceGroup$BidPrice": "If specified, indicates that the instance group uses Spot Instances. This is the maximum price you are willing to pay for Spot Instances. Specify OnDemandPrice
to set the amount equal to the On-Demand price, or specify an amount in USD.
The status change reason description.
", "InstanceStateChangeReason$Message": "The status change reason description.
", + "ListReleaseLabelsInput$NextToken": "Specifies the next page of results. If NextToken
is not specified, which is usually the case for the first request of ListReleaseLabels, the first page of results are determined by other filtering parameters or by the latest version. The ListReleaseLabels
request fails if the identity (AWS AccountID) and all filtering parameters are different from the original request, or if the NextToken
is expired or tampered with.
Used to paginate the next page of results if specified in the next ListReleaseLabels
request.
The dimension name.
", "MetricDimension$Value": "The dimension value.
", "ModifyClusterInput$ClusterId": "The unique identifier of the cluster.
", + "ReleaseLabelFilter$Prefix": "Optional release label version prefix filter. For example, emr-5
.
Optional release label application filter. For example, spark@2.1.0
.
The name used to identify an automatic scaling rule. Rule names must be unique within a scaling policy.
", "ScalingRule$Description": "A friendly, more verbose description of the automatic scaling rule.
", + "SimplifiedApplication$Name": "The returned release label application name. For example, hadoop
.
The returned release label application version. For example, 3.2.1
.
The name of the cluster step.
", "StepStateChangeReason$Message": "The descriptive message for the state change reason.
", "StepSummary$Name": "The name of the cluster step.
", @@ -1854,6 +1911,7 @@ "Ec2InstanceAttributes$AdditionalMasterSecurityGroups": "A list of additional Amazon EC2 security group IDs for the master node.
", "Ec2InstanceAttributes$AdditionalSlaveSecurityGroups": "A list of additional Amazon EC2 security group IDs for the core and task nodes.
", "HadoopStepConfig$Args": "The list of command line arguments to pass to the JAR file's main function for execution.
", + "ListReleaseLabelsOutput$ReleaseLabels": "The returned release labels.
", "RemoveTagsInput$TagKeys": "A list of tag keys to remove from a resource.
" } }, @@ -1974,7 +2032,7 @@ "Cluster$AutoScalingRole": "An IAM role for automatic scaling policies. The default role is EMR_AutoScaling_DefaultRole
. The IAM role provides permissions that the automatic scaling feature requires to launch and terminate EC2 instances in an instance group.
The name of the security configuration.
", "CreateSecurityConfigurationOutput$Name": "The name of the security configuration.
", - "CreateStudioInput$ServiceRole": "The IAM role that will be assumed by the Amazon EMR Studio. The service role provides a way for Amazon EMR Studio to interoperate with other AWS services.
", + "CreateStudioInput$ServiceRole": "The IAM role that will be assumed by the Amazon EMR Studio. The service role provides a way for Amazon EMR Studio to interoperate with other Amazon Web Services services.
", "CreateStudioInput$UserRole": "The IAM user role that will be assumed by users and groups logged in to an Amazon EMR Studio. The permissions attached to this IAM role can be scoped down for each user or group using session policies.
", "CreateStudioInput$DefaultS3Location": "The Amazon S3 location to back up Amazon EMR Studio Workspaces and notebook files.
", "CreateStudioOutput$Url": "The unique Studio access URL.
", @@ -1985,9 +2043,9 @@ "HadoopJarStepConfig$MainClass": "The name of the main class in the specified Java file. If not specified, the JAR file should specify a Main-Class in its manifest file.
", "InstanceGroupDetail$LastStateChangeReason": "Details regarding the state of the instance group.
", "JobFlowDetail$LogUri": "The location in Amazon S3 where log files for the job are stored.
", - "JobFlowDetail$LogEncryptionKmsKeyId": "The AWS KMS customer master key (CMK) used for encrypting log files. This attribute is only available with EMR version 5.30.0 and later, excluding EMR 6.0.0.
", + "JobFlowDetail$LogEncryptionKmsKeyId": "The KMS key used for encrypting log files. This attribute is only available with EMR version 5.30.0 and later, excluding EMR 6.0.0.
", "JobFlowDetail$JobFlowRole": "The IAM role that was specified when the job flow was launched. The EC2 instances of the job flow assume this role.
", - "JobFlowDetail$ServiceRole": "The IAM role that is assumed by the Amazon EMR service to access AWS resources on your behalf.
", + "JobFlowDetail$ServiceRole": "The IAM role that is assumed by the Amazon EMR service to access Amazon Web Services resources on your behalf.
", "JobFlowDetail$AutoScalingRole": "An IAM role for automatic scaling policies. The default role is EMR_AutoScaling_DefaultRole
. The IAM role provides a way for the automatic scaling feature to get the required permissions it needs to launch and terminate EC2 instances in an instance group.
Description of the job flow last changed state.
", "JobFlowInstancesDetail$MasterPublicDnsName": "The DNS name of the master node. If the cluster is on a private subnet, this is the private DNS name. On a public subnet, this is the public DNS name.
", @@ -1999,13 +2057,13 @@ "NotebookExecution$LastStateChangeReason": "The reason for the latest status change of the notebook execution.
", "PlacementType$AvailabilityZone": "The Amazon EC2 Availability Zone for the cluster. AvailabilityZone
is used for uniform instance groups, while AvailabilityZones
(plural) is used for instance fleets.
The location in Amazon S3 to write the log files of the job flow. If a value is not provided, logs are not created.
", - "RunJobFlowInput$LogEncryptionKmsKeyId": "The AWS KMS customer master key (CMK) used for encrypting log files. If a value is not provided, the logs remain encrypted by AES-256. This attribute is only available with Amazon EMR version 5.30.0 and later, excluding Amazon EMR 6.0.0.
", + "RunJobFlowInput$LogEncryptionKmsKeyId": "The KMS key used for encrypting log files. If a value is not provided, the logs remain encrypted by AES-256. This attribute is only available with Amazon EMR version 5.30.0 and later, excluding Amazon EMR 6.0.0.
", "RunJobFlowInput$AdditionalInfo": "A JSON string for selecting additional features.
", "RunJobFlowInput$JobFlowRole": "Also called instance profile and EC2 role. An IAM role for an EMR cluster. The EC2 instances of the cluster assume this role. The default role is EMR_EC2_DefaultRole
. In order to use the default role, you must have already created it using the CLI or console.
The IAM role that will be assumed by the Amazon EMR service to access AWS resources on your behalf.
", + "RunJobFlowInput$ServiceRole": "The IAM role that will be assumed by the Amazon EMR service to access Amazon Web Services resources on your behalf.
", "RunJobFlowInput$SecurityConfiguration": "The name of a security configuration to apply to the cluster.
", "RunJobFlowInput$AutoScalingRole": "An IAM role for automatic scaling policies. The default role is EMR_AutoScaling_DefaultRole
. The IAM role provides permissions that the automatic scaling feature requires to launch and terminate EC2 instances in an instance group.
Location of the script to run during a bootstrap action. Can be either a location in Amazon S3 or on a local file system.
", + "ScriptBootstrapActionConfig$Path": "Location in Amazon S3 of the script to run during a bootstrap action.
", "SecurityConfigurationSummary$Name": "The name of the security configuration.
", "StartNotebookExecutionInput$RelativePath": "The path and file name of the notebook file for this execution, relative to the path specified for the EMR Notebook. For example, if you specify a path of s3://MyBucket/MyNotebooks
when you create an EMR Notebook for a notebook with an ID of e-ABCDEFGHIJK1234567890ABCD
(the EditorID
of this request), and you specify a RelativePath
of my_notebook_executions/notebook_execution.ipynb
, the location of the file for the notebook execution is s3://MyBucket/MyNotebooks/e-ABCDEFGHIJK1234567890ABCD/my_notebook_executions/notebook_execution.ipynb
.
Input parameters in JSON format passed to the EMR Notebook at runtime for execution.
", @@ -2050,20 +2108,20 @@ "CreateStudioInput$EngineSecurityGroupId": "The ID of the Amazon EMR Studio Engine security group. The Engine security group allows inbound network traffic from the Workspace security group, and it must be in the same VPC specified by VpcId
.
The ID of the Amazon EMR Studio.
", "CreateStudioSessionMappingInput$StudioId": "The ID of the Amazon EMR Studio to which the user or group will be mapped.
", - "CreateStudioSessionMappingInput$IdentityId": "The globally unique identifier (GUID) of the user or group from the AWS SSO Identity Store. For more information, see UserId and GroupId in the AWS SSO Identity Store API Reference. Either IdentityName
or IdentityId
must be specified.
The name of the user or group. For more information, see UserName and DisplayName in the AWS SSO Identity Store API Reference. Either IdentityName
or IdentityId
must be specified.
The Amazon Resource Name (ARN) for the session policy that will be applied to the user or group. Session policies refine Studio user permissions without the need to use multiple IAM user roles.
", + "CreateStudioSessionMappingInput$IdentityId": "The globally unique identifier (GUID) of the user or group from the Amazon Web Services SSO Identity Store. For more information, see UserId and GroupId in the Amazon Web Services SSO Identity Store API Reference. Either IdentityName
or IdentityId
must be specified.
The name of the user or group. For more information, see UserName and DisplayName in the Amazon Web Services SSO Identity Store API Reference. Either IdentityName
or IdentityId
must be specified.
The Amazon Resource Name (ARN) for the session policy that will be applied to the user or group. You should specify the ARN for the session policy that you want to apply, not the ARN of your user role. For more information, see Create an EMR Studio User Role with Session Policies.
", "DeleteStudioInput$StudioId": "The ID of the Amazon EMR Studio.
", "DeleteStudioSessionMappingInput$StudioId": "The ID of the Amazon EMR Studio.
", - "DeleteStudioSessionMappingInput$IdentityId": "The globally unique identifier (GUID) of the user or group to remove from the Amazon EMR Studio. For more information, see UserId and GroupId in the AWS SSO Identity Store API Reference. Either IdentityName
or IdentityId
must be specified.
The name of the user name or group to remove from the Amazon EMR Studio. For more information, see UserName and DisplayName in the AWS SSO Identity Store API Reference. Either IdentityName
or IdentityId
must be specified.
The globally unique identifier (GUID) of the user or group to remove from the Amazon EMR Studio. For more information, see UserId and GroupId in the Amazon Web Services SSO Identity Store API Reference. Either IdentityName
or IdentityId
must be specified.
The name of the user name or group to remove from the Amazon EMR Studio. For more information, see UserName and DisplayName in the Amazon Web Services SSO Store API Reference. Either IdentityName
or IdentityId
must be specified.
The unique identifier of the notebook execution.
", "DescribeStudioInput$StudioId": "The Amazon EMR Studio ID.
", "ExecutionEngineConfig$Id": "The unique identifier of the execution engine. For an EMR cluster, this is the cluster ID.
", "ExecutionEngineConfig$MasterInstanceSecurityGroupId": "An optional unique ID of an EC2 security group to associate with the master instance of the EMR cluster for this notebook execution. For more information see Specifying EC2 Security Groups for EMR Notebooks in the EMR Management Guide.
", "GetStudioSessionMappingInput$StudioId": "The ID of the Amazon EMR Studio.
", - "GetStudioSessionMappingInput$IdentityId": "The globally unique identifier (GUID) of the user or group. For more information, see UserId and GroupId in the AWS SSO Identity Store API Reference. Either IdentityName
or IdentityId
must be specified.
The name of the user or group to fetch. For more information, see UserName and DisplayName in the AWS SSO Identity Store API Reference. Either IdentityName
or IdentityId
must be specified.
The globally unique identifier (GUID) of the user or group. For more information, see UserId and GroupId in the Amazon Web Services SSO Identity Store API Reference. Either IdentityName
or IdentityId
must be specified.
The name of the user or group to fetch. For more information, see UserName and DisplayName in the Amazon Web Services SSO Identity Store API Reference. Either IdentityName
or IdentityId
must be specified.
A friendly name for the instance fleet.
", "InstanceFleetConfig$Name": "The friendly name of the instance fleet.
", "InstanceGroupConfig$Name": "Friendly name given to the instance group.
", @@ -2081,8 +2139,8 @@ "JobFlowInstancesConfig$Ec2KeyName": "The name of the EC2 key pair that can be used to connect to the master node using SSH as the user called \"hadoop.\"
", "JobFlowInstancesConfig$HadoopVersion": "Applies only to Amazon EMR release versions earlier than 4.0. The Hadoop version for the cluster. Valid inputs are \"0.18\" (no longer maintained), \"0.20\" (no longer maintained), \"0.20.205\" (no longer maintained), \"1.0.3\", \"2.2.0\", or \"2.4.0\". If you do not set this value, the default of 0.18 is used, unless the AmiVersion
parameter is set in the RunJobFlow call, in which case the default version of Hadoop for that AMI version is used.
Applies to clusters that use the uniform instance group configuration. To launch the cluster in Amazon Virtual Private Cloud (Amazon VPC), set this parameter to the identifier of the Amazon VPC subnet where you want the cluster to launch. If you do not specify this value and your account supports EC2-Classic, the cluster launches in EC2-Classic.
", - "JobFlowInstancesConfig$EmrManagedMasterSecurityGroup": "The identifier of the Amazon EC2 security group for the master node.
", - "JobFlowInstancesConfig$EmrManagedSlaveSecurityGroup": "The identifier of the Amazon EC2 security group for the core and task nodes.
", + "JobFlowInstancesConfig$EmrManagedMasterSecurityGroup": "The identifier of the Amazon EC2 security group for the master node. If you specify EmrManagedMasterSecurityGroup
, you must also specify EmrManagedSlaveSecurityGroup
.
The identifier of the Amazon EC2 security group for the core and task nodes. If you specify EmrManagedSlaveSecurityGroup
, you must also specify EmrManagedMasterSecurityGroup
.
The identifier of the Amazon EC2 security group for the Amazon EMR service to access clusters in VPC private subnets.
", "JobFlowInstancesDetail$Ec2KeyName": "The name of an Amazon EC2 key pair that can be used to connect to the master node using SSH.
", "JobFlowInstancesDetail$Ec2SubnetId": "For clusters launched within Amazon Virtual Private Cloud, this is the identifier of the subnet where the cluster was launched.
", @@ -2111,11 +2169,11 @@ "SecurityGroupsList$member": null, "SessionMappingDetail$StudioId": "The ID of the Amazon EMR Studio.
", "SessionMappingDetail$IdentityId": "The globally unique identifier (GUID) of the user or group.
", - "SessionMappingDetail$IdentityName": "The name of the user or group. For more information, see UserName and DisplayName in the AWS SSO Identity Store API Reference.
", + "SessionMappingDetail$IdentityName": "The name of the user or group. For more information, see UserName and DisplayName in the Amazon Web Services SSO Identity Store API Reference.
", "SessionMappingDetail$SessionPolicyArn": "The Amazon Resource Name (ARN) of the session policy associated with the user or group.
", "SessionMappingSummary$StudioId": "The ID of the Amazon EMR Studio.
", - "SessionMappingSummary$IdentityId": "The globally unique identifier (GUID) of the user or group from the AWS SSO Identity Store.
", - "SessionMappingSummary$IdentityName": "The name of the user or group. For more information, see UserName and DisplayName in the AWS SSO Identity Store API Reference.
", + "SessionMappingSummary$IdentityId": "The globally unique identifier (GUID) of the user or group from the Amazon Web Services SSO Identity Store.
", + "SessionMappingSummary$IdentityName": "The name of the user or group. For more information, see UserName and DisplayName in the Amazon Web Services SSO Identity Store API Reference.
", "SessionMappingSummary$SessionPolicyArn": "The Amazon Resource Name (ARN) of the session policy associated with the user or group.
", "StartNotebookExecutionInput$EditorId": "The unique identifier of the EMR Notebook to use for notebook execution.
", "StartNotebookExecutionInput$NotebookExecutionName": "An optional name for the notebook execution.
", @@ -2142,8 +2200,8 @@ "UpdateStudioInput$Name": "A descriptive name for the Amazon EMR Studio.
", "UpdateStudioInput$Description": "A detailed description to assign to the Amazon EMR Studio.
", "UpdateStudioSessionMappingInput$StudioId": "The ID of the Amazon EMR Studio.
", - "UpdateStudioSessionMappingInput$IdentityId": "The globally unique identifier (GUID) of the user or group. For more information, see UserId and GroupId in the AWS SSO Identity Store API Reference. Either IdentityName
or IdentityId
must be specified.
The name of the user or group to update. For more information, see UserName and DisplayName in the AWS SSO Identity Store API Reference. Either IdentityName
or IdentityId
must be specified.
The globally unique identifier (GUID) of the user or group. For more information, see UserId and GroupId in the Amazon Web Services SSO Identity Store API Reference. Either IdentityName
or IdentityId
must be specified.
The name of the user or group to update. For more information, see UserName and DisplayName in the Amazon Web Services SSO Identity Store API Reference. Either IdentityName
or IdentityId
must be specified.
The Amazon Resource Name (ARN) of the session policy to associate with the specified user or group.
", "XmlStringMaxLen256List$member": null } diff --git a/models/apis/elasticmapreduce/2009-03-31/paginators-1.json b/models/apis/elasticmapreduce/2009-03-31/paginators-1.json index d5b5407b34..5ea61f9252 100644 --- a/models/apis/elasticmapreduce/2009-03-31/paginators-1.json +++ b/models/apis/elasticmapreduce/2009-03-31/paginators-1.json @@ -33,6 +33,11 @@ "output_token": "Marker", "result_key": "NotebookExecutions" }, + "ListReleaseLabels": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken" + }, "ListSecurityConfigurations": { "input_token": "Marker", "output_token": "Marker", diff --git a/models/apis/iam/2010-05-08/docs-2.json b/models/apis/iam/2010-05-08/docs-2.json index 8aa5232c8f..9319cfd24c 100644 --- a/models/apis/iam/2010-05-08/docs-2.json +++ b/models/apis/iam/2010-05-08/docs-2.json @@ -14,7 +14,7 @@ "CreateGroup": "Creates a new group.
For information about the number of groups you can create, see IAM and STS quotas in the IAM User Guide.
", "CreateInstanceProfile": "Creates a new instance profile. For information about instance profiles, see Using roles for applications on Amazon EC2 in the IAM User Guide, and Instance profiles in the Amazon EC2 User Guide.
For information about the number of instance profiles you can create, see IAM object quotas in the IAM User Guide.
", "CreateLoginProfile": "Creates a password for the specified IAM user. A password allows an IAM user to access Amazon Web Services services through the Management Console.
You can use the CLI, the Amazon Web Services API, or the Users page in the IAM console to create a password for any IAM user. Use ChangePassword to update your own existing password in the My Security Credentials page in the Management Console.
For more information about managing passwords, see Managing passwords in the IAM User Guide.
", - "CreateOpenIDConnectProvider": "Creates an IAM entity to describe an identity provider (IdP) that supports OpenID Connect (OIDC).
The OIDC provider that you create with this operation can be used as a principal in a role's trust policy. Such a policy establishes a trust relationship between Amazon Web Services and the OIDC provider.
If you are using an OIDC identity provider from Google, Facebook, or Amazon Cognito, you don't need to create a separate IAM identity provider. These OIDC identity providers are already built-in to Amazon Web Services and are available for your use. Instead, you can move directly to creating new roles using your identity provider. To learn more, see Creating a role for web identity or OpenID connect federation in the IAM User Guide.
When you create the IAM OIDC provider, you specify the following:
The URL of the OIDC identity provider (IdP) to trust
A list of client IDs (also known as audiences) that identify the application or applications allowed to authenticate using the OIDC provider
A list of thumbprints of one or more server certificates that the IdP uses
You get all of this information from the OIDC IdP that you want to use to access Amazon Web Services.
The trust for the OIDC provider is derived from the IAM provider that this operation creates. Therefore, it is best to limit access to the CreateOpenIDConnectProvider operation to highly privileged users.
Creates an IAM entity to describe an identity provider (IdP) that supports OpenID Connect (OIDC).
The OIDC provider that you create with this operation can be used as a principal in a role's trust policy. Such a policy establishes a trust relationship between Amazon Web Services and the OIDC provider.
If you are using an OIDC identity provider from Google, Facebook, or Amazon Cognito, you don't need to create a separate IAM identity provider. These OIDC identity providers are already built-in to Amazon Web Services and are available for your use. Instead, you can move directly to creating new roles using your identity provider. To learn more, see Creating a role for web identity or OpenID connect federation in the IAM User Guide.
When you create the IAM OIDC provider, you specify the following:
The URL of the OIDC identity provider (IdP) to trust
A list of client IDs (also known as audiences) that identify the application or applications allowed to authenticate using the OIDC provider
A list of thumbprints of one or more server certificates that the IdP uses
You get all of this information from the OIDC IdP that you want to use to access Amazon Web Services.
Amazon Web Services secures communication with some OIDC identity providers (IdPs) through our library of trusted certificate authorities (CAs) instead of using a certificate thumbprint to verify your IdP server certificate. These OIDC IdPs include Google, and those that use an Amazon S3 bucket to host a JSON Web Key Set (JWKS) endpoint. In these cases, your legacy thumbprint remains in your configuration, but is no longer used for validation.
The trust for the OIDC provider is derived from the IAM provider that this operation creates. Therefore, it is best to limit access to the CreateOpenIDConnectProvider operation to highly privileged users.
Creates a new managed policy for your account.
This operation creates a policy version with a version identifier of v1
and sets v1 as the policy's default version. For more information about policy versions, see Versioning for managed policies in the IAM User Guide.
As a best practice, you can validate your IAM policies. To learn more, see Validating IAM policies in the IAM User Guide.
For more information about managed policies in general, see Managed policies and inline policies in the IAM User Guide.
", "CreatePolicyVersion": "Creates a new version of the specified managed policy. To update a managed policy, you create a new policy version. A managed policy can have up to five versions. If the policy has five versions, you must delete an existing version using DeletePolicyVersion before you create a new version.
Optionally, you can set the new version as the policy's default version. The default version is the version that is in effect for the IAM users, groups, and roles to which the policy is attached.
For more information about managed policy versions, see Versioning for managed policies in the IAM User Guide.
", "CreateRole": "Creates a new role for your account. For more information about roles, see IAM roles. For information about quotas for role names and the number of roles you can create, see IAM and STS quotas in the IAM User Guide.
", @@ -148,7 +148,7 @@ "UpdateAssumeRolePolicy": "Updates the policy that grants an IAM entity permission to assume a role. This is typically referred to as the \"role trust policy\". For more information about roles, see Using roles to delegate permissions and federate identities.
", "UpdateGroup": "Updates the name and/or the path of the specified IAM group.
You should understand the implications of changing a group's path or name. For more information, see Renaming users and groups in the IAM User Guide.
The person making the request (the principal), must have permission to change the role group with the old name and the new name. For example, to change the group named Managers
to MGRs
, the principal must have a policy that allows them to update both groups. If the principal has permission to update the Managers
group, but not the MGRs
group, then the update fails. For more information about permissions, see Access management.
Changes the password for the specified IAM user. You can use the CLI, the Amazon Web Services API, or the Users page in the IAM console to change the password for any IAM user. Use ChangePassword to change your own password in the My Security Credentials page in the Management Console.
For more information about modifying passwords, see Managing passwords in the IAM User Guide.
", - "UpdateOpenIDConnectProviderThumbprint": "Replaces the existing list of server certificate thumbprints associated with an OpenID Connect (OIDC) provider resource object with a new list of thumbprints.
The list that you pass with this operation completely replaces the existing list of thumbprints. (The lists are not merged.)
Typically, you need to update a thumbprint only when the identity provider's certificate changes, which occurs rarely. However, if the provider's certificate does change, any attempt to assume an IAM role that specifies the OIDC provider as a principal fails until the certificate thumbprint is updated.
Trust for the OIDC provider is derived from the provider's certificate and is validated by the thumbprint. Therefore, it is best to limit access to the UpdateOpenIDConnectProviderThumbprint
operation to highly privileged users.
Replaces the existing list of server certificate thumbprints associated with an OpenID Connect (OIDC) provider resource object with a new list of thumbprints.
The list that you pass with this operation completely replaces the existing list of thumbprints. (The lists are not merged.)
Typically, you need to update a thumbprint only when the identity provider certificate changes, which occurs rarely. However, if the provider's certificate does change, any attempt to assume an IAM role that specifies the OIDC provider as a principal fails until the certificate thumbprint is updated.
Amazon Web Services secures communication with some OIDC identity providers (IdPs) through our library of trusted certificate authorities (CAs) instead of using a certificate thumbprint to verify your IdP server certificate. These OIDC IdPs include Google, and those that use an Amazon S3 bucket to host a JSON Web Key Set (JWKS) endpoint. In these cases, your legacy thumbprint remains in your configuration, but is no longer used for validation.
Trust for the OIDC provider is derived from the provider certificate and is validated by the thumbprint. Therefore, it is best to limit access to the UpdateOpenIDConnectProviderThumbprint
operation to highly privileged users.
Updates the description or maximum session duration setting of a role.
", "UpdateRoleDescription": "Use UpdateRole instead.
Modifies only the description of a role. This operation performs the same function as the Description
parameter in the UpdateRole
operation.
Updates the metadata document for an existing SAML provider resource object.
This operation requires Signature Version 4.
Searches an active index. Use this API to search your documents using query. The Query
operation enables to do faceted search and to filter results based on document attributes.
It also enables you to provide user context that Amazon Kendra uses to enforce document access control in the search results.
Amazon Kendra searches your index for text content and question and answer (FAQ) content. By default the response contains three types of results.
Relevant passages
Matching FAQs
Relevant documents
You can specify that the query return only one type of result using the QueryResultTypeConfig
parameter.
Each query returns the 100 most relevant results.
", "StartDataSourceSyncJob": "Starts a synchronization job for a data source. If a synchronization job is already in progress, Amazon Kendra returns a ResourceInUseException
exception.
Stops a running synchronization job. You can't stop a scheduled synchronization job.
", - "SubmitFeedback": "Enables you to provide feedback to Amazon Kendra to improve the performance of the service.
", + "SubmitFeedback": "Enables you to provide feedback to Amazon Kendra to improve the performance of your index.
", "TagResource": "Adds the specified tag to the specified index, FAQ, or data source resource. If the tag already exists, the existing value is replaced with the new value.
", "UntagResource": "Removes a tag from an index, FAQ, or a data source.
", "UpdateDataSource": "Updates an existing Amazon Kendra data source.
", @@ -97,7 +97,7 @@ } }, "AttributeFilter": { - "base": "Provides filtering the query results based on document attributes.
When you use the AndAllFilters
or OrAllFilters
, filters you can use 2 layers under the first attribute filter. For example, you can use:
<AndAllFilters>
<OrAllFilters>
<EqualTo>
If you use more than 2 layers, you receive a ValidationException
exception with the message \"AttributeFilter
cannot have a depth of more than 2.\"
Provides filtering the query results based on document attributes.
When you use the AndAllFilters
or OrAllFilters
, filters you can use 2 layers under the first attribute filter. For example, you can use:
<AndAllFilters>
<OrAllFilters>
<EqualTo>
If you use more than 2 layers, you receive a ValidationException
exception with the message \"AttributeFilter
cannot have a depth of more than 2.\"
If you use more than 10 attribute filters, you receive a ValidationException
exception with the message \"AttributeFilter
cannot have a length of more than 10\".
Performs a logical NOT
operation on all supplied filters.
Indicates whether Amazon Kendra should crawl attachments to the service catalog items.
", "SharePointConfiguration$CrawlAttachments": " TRUE
to include attachments to documents stored in your Microsoft SharePoint site in the index; otherwise, FALSE
.
Set to TRUE
to use the Microsoft SharePoint change log to determine the documents that need to be updated in the index. Depending on the size of the SharePoint change log, it may take longer for Amazon Kendra to use the change log than it takes it to determine the changed documents using the Amazon Kendra document crawler.
A Boolean value that specifies whether local groups are disabled (True
) or enabled (False
).
A Boolean value that specifies whether local groups are disabled (True
) or enabled (False
).
TRUE
to include comments on documents in your index. Including comments in your index means each comment is a document that can be searched on.
The default is set to FALSE
.
TRUE
to use the change logs to update documents in your index instead of scanning all documents.
If you are syncing your Amazon WorkDocs data source with your index for the first time, all documents are scanned. After your first sync, you can use the change logs to update your documents in your index for future syncs.
The default is set to FALSE
.
A list of regular expression patterns. Documents that match the patterns are included in the index. Documents that don't match the patterns are excluded from the index. If a document matches both an inclusion pattern and an exclusion pattern, the document is not included in the index.
The regex is applied to the display URL of the SharePoint document.
", "SharePointConfiguration$ExclusionPatterns": "A list of regular expression patterns. Documents that match the patterns are excluded from the index. Documents that don't match the patterns are included in the index. If a document matches both an exclusion pattern and an inclusion pattern, the document is not included in the index.
The regex is applied to the display URL of the SharePoint document.
", "WebCrawlerConfiguration$UrlInclusionPatterns": "The regular expression pattern to include certain URLs to crawl.
If there is a regular expression pattern to exclude certain URLs that conflicts with the include pattern, the exclude pattern takes precedence.
", - "WebCrawlerConfiguration$UrlExclusionPatterns": "The regular expression pattern to exclude certain URLs to crawl.
If there is a regular expression pattern to include certain URLs that conflicts with the exclude pattern, the exclude pattern takes precedence.
" + "WebCrawlerConfiguration$UrlExclusionPatterns": "The regular expression pattern to exclude certain URLs to crawl.
If there is a regular expression pattern to include certain URLs that conflicts with the exclude pattern, the exclude pattern takes precedence.
", + "WorkDocsConfiguration$InclusionPatterns": "A list of regular expression patterns to include certain files in your Amazon WorkDocs site repository. Files that match the patterns are included in the index. Files that don't match the patterns are excluded from the index. If a file matches both an inclusion pattern and an exclusion pattern, the exclusion pattern takes precedence and the file isn’t included in the index.
", + "WorkDocsConfiguration$ExclusionPatterns": "A list of regular expression patterns to exclude certain files in your Amazon WorkDocs site repository. Files that match the patterns are excluded from the index. Files that don’t match the patterns are included in the index. If a file matches both an inclusion pattern and an exclusion pattern, the exclusion pattern takes precedence and the file isn’t included in the index.
" } }, "DataSourceInclusionsExclusionsStringsMember": { @@ -664,7 +668,8 @@ "SalesforceStandardObjectConfiguration$FieldMappings": "One or more objects that map fields in the standard object to Amazon Kendra index fields. The index field must exist before you can map a Salesforce field to it.
", "ServiceNowKnowledgeArticleConfiguration$FieldMappings": "Mapping between ServiceNow fields and Amazon Kendra index fields. You must create the index field before you map the field.
", "ServiceNowServiceCatalogConfiguration$FieldMappings": "Mapping between ServiceNow fields and Amazon Kendra index fields. You must create the index field before you map the field.
", - "SharePointConfiguration$FieldMappings": "A list of DataSourceToIndexFieldMapping
objects that map Microsoft SharePoint attributes to custom fields in the Amazon Kendra index. You must first create the index fields using the UpdateIndex
operation before you map SharePoint attributes. For more information, see Mapping Data Source Fields.
A list of DataSourceToIndexFieldMapping
objects that map Microsoft SharePoint attributes to custom fields in the Amazon Kendra index. You must first create the index fields using the UpdateIndex
operation before you map SharePoint attributes. For more information, see Mapping Data Source Fields.
A list of DataSourceToIndexFieldMapping
objects that map Amazon WorkDocs field names to custom index field names in Amazon Kendra. You must first create the custom index fields using the UpdateIndex
operation before you map to Amazon WorkDocs fields. For more information, see Mapping Data Source Fields. The Amazon WorkDocs data source field names need to exist in your Amazon WorkDocs custom metadata.
The Amazon Kendra edition to use for the index. Choose DEVELOPER_EDITION
for indexes intended for development, testing, or proof of concept. Use ENTERPRISE_EDITION
for your production databases. Once you set the edition for an index, it can't be changed.
The Edition
parameter is optional. If you don't supply a value, the default is ENTERPRISE_EDITION
.
The Amazon Kendra edition to use for the index. Choose DEVELOPER_EDITION
for indexes intended for development, testing, or proof of concept. Use ENTERPRISE_EDITION
for your production databases. Once you set the edition for an index, it can't be changed.
The Edition
parameter is optional. If you don't supply a value, the default is ENTERPRISE_EDITION
.
For more information on quota limits for enterprise and developer editions, see Quotas.
", "DescribeIndexResponse$Edition": "The Amazon Kendra edition used for the index. You decide the edition when you create the index.
", "IndexConfigurationSummary$Edition": "Indicates whether the index is a enterprise edition index or a developer edition index.
" } @@ -1705,6 +1710,12 @@ "Relevance$RankOrder": "Determines how values should be interpreted.
When the RankOrder
field is ASCENDING
, higher numbers are better. For example, a document with a rating score of 10 is higher ranking than a document with a rating score of 1.
When the RankOrder
field is DESCENDING
, lower numbers are better. For example, in a task tracking application, a priority 1 task is more important than a priority 5 task.
Only applies to LONG
and DOUBLE
fields.
The identifier of the directory corresponding to your Amazon WorkDocs site repository.
You can find the organization ID in the AWS Directory Service by going to Active Directory, then Directories. Your Amazon WorkDocs site directory has an ID, which is the organization ID. You can also set up a new Amazon WorkDocs directory in the AWS Directory Service console and enable a Amazon WorkDocs site for the directory in the Amazon WorkDocs console.
" + } + }, "Port": { "base": null, "refs": { @@ -1770,7 +1781,7 @@ "QueryCapacityUnit": { "base": null, "refs": { - "CapacityUnitsConfiguration$QueryCapacityUnits": "The amount of extra query capacity for an index and GetQuerySuggestions capacity.
A single extra capacity unit for an index provides 0.5 queries per second or approximately 40,000 queries per day.
GetQuerySuggestions
capacity is 5 times the provisioned query capacity for an index. For example, the base capacity for an index is 0.5 queries per second, so GetQuerySuggestions capacity is 2.5 calls per second. If adding another 0.5 queries per second to total 1 queries per second for an index, the GetQuerySuggestions
capacity is 5 calls per second.
The amount of extra query capacity for an index and GetQuerySuggestions capacity.
A single extra capacity unit for an index provides 0.1 queries per second or approximately 8,000 queries per day.
GetQuerySuggestions
capacity is five times the provisioned query capacity for an index, or the base capacity of 2.5 calls per second, whichever is higher. For example, the base capacity for an index is 0.1 queries per second, and GetQuerySuggestions
capacity has a base of 2.5 calls per second. If you add another 0.1 queries per second to total 0.2 queries per second for an index, the GetQuerySuggestions
capacity is 2.5 calls per second (higher than five times 0.2 queries per second).
Your secret ARN, which you can create in AWS Secrets Manager
You use a secret if basic authentication credentials are required to connect to a website. The secret stores your credentials of user name and password.
", "ConfluenceConfiguration$SecretArn": "The Amazon Resource Name (ARN) of an Secrets Managersecret that contains the key/value pairs required to connect to your Confluence server. The secret must contain a JSON structure with the following keys:
username - The user name or email address of a user with administrative privileges for the Confluence server.
password - The password associated with the user logging in to the Confluence server.
The Amazon Resource Name (ARN) of credentials stored in AWS Secrets Manager. The credentials should be a user/password pair. For more information, see Using a Database Data Source. For more information about AWS Secrets Manager, see What Is AWS Secrets Manager in the Secrets Manager user guide.
", + "ConnectionConfiguration$SecretArn": "The Amazon Resource Name (ARN) of credentials stored in Secrets Manager. The credentials should be a user/password pair. For more information, see Using a Database Data Source. For more information about Secrets Manager, see What Is Secrets Manager in the Secrets Manager user guide.
", "GoogleDriveConfiguration$SecretArn": "The Amazon Resource Name (ARN) of a Secrets Managersecret that contains the credentials required to connect to Google Drive. For more information, see Using a Google Workspace Drive data source.
", "OneDriveConfiguration$SecretArn": "The Amazon Resource Name (ARN) of an Secrets Managersecret that contains the user name and password to connect to OneDrive. The user namd should be the application ID for the OneDrive application, and the password is the application key for the OneDrive application.
", "ProxyConfiguration$Credentials": "Your secret ARN, which you can create in AWS Secrets Manager
The credentials are optional. You use a secret if web proxy credentials are required to connect to a website host. Amazon Kendra currently support basic authentication to connect to a web proxy server. The secret stores your credentials.
", "SalesforceConfiguration$SecretArn": "The Amazon Resource Name (ARN) of an Secrets Managersecret that contains the key/value pairs required to connect to your Salesforce instance. The secret must contain a JSON structure with the following keys:
authenticationUrl - The OAUTH endpoint that Amazon Kendra connects to get an OAUTH token.
consumerKey - The application public key generated when you created your Salesforce application.
consumerSecret - The application private key generated when you created your Salesforce application.
password - The password associated with the user logging in to the Salesforce instance.
securityToken - The token associated with the user account logging in to the Salesforce instance.
username - The user name of the user logging in to the Salesforce instance.
The Amazon Resource Name (ARN) of the Secrets Manager secret that contains the user name and password required to connect to the ServiceNow instance.
", - "SharePointConfiguration$SecretArn": "The Amazon Resource Name (ARN) of credentials stored in AWS Secrets Manager. The credentials should be a user/password pair. If you use SharePoint Sever, you also need to provide the sever domain name as part of the credentials. For more information, see Using a Microsoft SharePoint Data Source. For more information about AWS Secrets Manager, see What Is AWS Secrets Manager in the Secrets Manager user guide.
" + "SharePointConfiguration$SecretArn": "The Amazon Resource Name (ARN) of credentials stored in Secrets Manager. The credentials should be a user/password pair. If you use SharePoint Server, you also need to provide the sever domain name as part of the credentials. For more information, see Using a Microsoft SharePoint Data Source. For more information about Secrets Manager, see What Is Secrets Manager in the Secrets Manager user guide.
" } }, "SecurityGroupIdList": { @@ -2284,7 +2295,7 @@ "StorageCapacityUnit": { "base": null, "refs": { - "CapacityUnitsConfiguration$StorageCapacityUnits": "The amount of extra storage capacity for an index. A single capacity unit for an index provides 150 GB of storage space or 500,000 documents, whichever is reached first.
" + "CapacityUnitsConfiguration$StorageCapacityUnits": "The amount of extra storage capacity for an index. A single capacity unit provides 30 GB of storage space or 100,000 documents, whichever is reached first.
" } }, "String": { @@ -2669,6 +2680,12 @@ "refs": { "SeedUrlConfiguration$WebCrawlerMode": "You can choose one of the following modes:
HOST_ONLY
– crawl only the website host names. For example, if the seed URL is \"abc.example.com\", then only URLs with host name \"abc.example.com\" are crawled.
SUBDOMAINS
– crawl the website host names with subdomains. For example, if the seed URL is \"abc.example.com\", then \"a.abc.example.com\" and \"b.abc.example.com\" are also crawled.
EVERYTHING
– crawl the website host names with subdomains and other domains that the webpages link to.
The default mode is set to HOST_ONLY
.
Provides the configuration information to connect to Amazon WorkDocs as your data source.
Amazon WorkDocs connector is available in Oregon, North Virginia, Sydney, Singapore and Ireland regions.
", + "refs": { + "DataSourceConfiguration$WorkDocsConfiguration": "Provides the configuration information to connect to WorkDocs as your data source.
" + } } } } diff --git a/models/apis/lambda/2015-03-31/api-2.json b/models/apis/lambda/2015-03-31/api-2.json index 98a470ccb3..fdb414a096 100644 --- a/models/apis/lambda/2015-03-31/api-2.json +++ b/models/apis/lambda/2015-03-31/api-2.json @@ -226,7 +226,8 @@ {"shape":"ServiceException"}, {"shape":"ResourceNotFoundException"}, {"shape":"InvalidParameterValueException"}, - {"shape":"TooManyRequestsException"} + {"shape":"TooManyRequestsException"}, + {"shape":"ResourceConflictException"} ] }, "DeleteLayerVersion":{ @@ -789,7 +790,8 @@ {"shape":"ServiceException"}, {"shape":"ResourceNotFoundException"}, {"shape":"InvalidParameterValueException"}, - {"shape":"TooManyRequestsException"} + {"shape":"TooManyRequestsException"}, + {"shape":"ResourceConflictException"} ] }, "PutProvisionedConcurrencyConfig":{ @@ -980,7 +982,8 @@ {"shape":"ServiceException"}, {"shape":"ResourceNotFoundException"}, {"shape":"InvalidParameterValueException"}, - {"shape":"TooManyRequestsException"} + {"shape":"TooManyRequestsException"}, + {"shape":"ResourceConflictException"} ] } }, diff --git a/models/apis/lambda/2015-03-31/docs-2.json b/models/apis/lambda/2015-03-31/docs-2.json index 5236260190..4d101d3ab8 100644 --- a/models/apis/lambda/2015-03-31/docs-2.json +++ b/models/apis/lambda/2015-03-31/docs-2.json @@ -313,7 +313,7 @@ "refs": { "CreateEventSourceMappingRequest$StartingPositionTimestamp": "With StartingPosition
set to AT_TIMESTAMP
, the time from which to start reading.
With StartingPosition
set to AT_TIMESTAMP
, the time from which to start reading.
The date that the event source mapping was last updated, or its state changed.
", + "EventSourceMappingConfiguration$LastModified": "The date that the event source mapping was last updated or that its state changed.
", "FunctionEventInvokeConfig$LastModified": "The date and time that the configuration was last updated.
" } }, @@ -522,7 +522,7 @@ } }, "EventSourceMappingConfiguration": { - "base": "A mapping between an Amazon Web Services resource and an Lambda function. See CreateEventSourceMapping for details.
", + "base": "A mapping between an Amazon Web Services resource and a Lambda function. For details, see CreateEventSourceMapping.
", "refs": { "EventSourceMappingsList$member": null } @@ -537,7 +537,7 @@ "base": null, "refs": { "CreateEventSourceMappingRequest$StartingPosition": "The position in a stream from which to start reading. Required for Amazon Kinesis, Amazon DynamoDB, and Amazon MSK Streams sources. AT_TIMESTAMP
is only supported for Amazon Kinesis streams.
The position in a stream from which to start reading. Required for Amazon Kinesis, Amazon DynamoDB, and Amazon MSK Streams sources. AT_TIMESTAMP
is only supported for Amazon Kinesis streams.
The position in a stream from which to start reading. Required for Amazon Kinesis, Amazon DynamoDB, and Amazon MSK stream sources. AT_TIMESTAMP
is supported only for Amazon Kinesis streams.
Limit the number of aliases returned.
", "ListCodeSigningConfigsRequest$MaxItems": "Maximum number of items to return.
", - "ListEventSourceMappingsRequest$MaxItems": "The maximum number of event source mappings to return.
", + "ListEventSourceMappingsRequest$MaxItems": "The maximum number of event source mappings to return. Note that ListEventSourceMappings returns a maximum of 100 items in each response, even if you set the number higher.
", "ListFunctionsByCodeSigningConfigRequest$MaxItems": "Maximum number of items to return.
", "ListFunctionsRequest$MaxItems": "The maximum number of functions to return in the response. Note that ListFunctions
returns a maximum of 50 items in each response, even if you set the number higher.
The maximum number of versions to return. Note that ListVersionsByFunction
returns a maximum of 50 items in each response, even if you set the number higher.
(Streams and SQS standard queues) The maximum amount of time to gather records before invoking the function, in seconds.
", - "EventSourceMappingConfiguration$MaximumBatchingWindowInSeconds": "(Streams and SQS standard queues) The maximum amount of time to gather records before invoking the function, in seconds. The default value is zero.
", + "EventSourceMappingConfiguration$MaximumBatchingWindowInSeconds": "(Streams and Amazon SQS standard queues) The maximum amount of time to gather records before invoking the function, in seconds. The default value is zero.
", "UpdateEventSourceMappingRequest$MaximumBatchingWindowInSeconds": "(Streams and SQS standard queues) The maximum amount of time to gather records before invoking the function, in seconds.
" } }, @@ -1350,7 +1350,7 @@ "base": null, "refs": { "CreateEventSourceMappingRequest$ParallelizationFactor": "(Streams only) The number of batches to process from each shard concurrently.
", - "EventSourceMappingConfiguration$ParallelizationFactor": "(Streams only) The number of batches to process from each shard concurrently. The default value is 1.
", + "EventSourceMappingConfiguration$ParallelizationFactor": "(Streams only) The number of batches to process concurrently from each shard. The default value is 1.
", "UpdateEventSourceMappingRequest$ParallelizationFactor": "(Streams only) The number of batches to process from each shard concurrently.
" } }, @@ -1478,7 +1478,7 @@ "base": null, "refs": { "CreateEventSourceMappingRequest$Queues": "(MQ) The name of the Amazon MQ broker destination queue to consume.
", - "EventSourceMappingConfiguration$Queues": "(MQ) The name of the Amazon MQ broker destination queue to consume.
" + "EventSourceMappingConfiguration$Queues": "(Amazon MQ) The name of the Amazon MQ broker destination queue to consume.
" } }, "RemoveLayerVersionPermissionRequest": { @@ -1587,10 +1587,10 @@ } }, "SelfManagedEventSource": { - "base": "The Self-Managed Apache Kafka cluster for your event source.
", + "base": "The self-managed Apache Kafka cluster for your event source.
", "refs": { "CreateEventSourceMappingRequest$SelfManagedEventSource": "The Self-Managed Apache Kafka cluster to send records.
", - "EventSourceMappingConfiguration$SelfManagedEventSource": "The Self-Managed Apache Kafka cluster for your event source.
" + "EventSourceMappingConfiguration$SelfManagedEventSource": "The self-managed Apache Kafka cluster for your event source.
" } }, "SensitiveString": { @@ -1612,7 +1612,7 @@ } }, "SourceAccessConfiguration": { - "base": "You can specify the authentication protocol, or the VPC components to secure access to your event source.
", + "base": "To secure and define access to your event source, you can specify the authentication protocol, VPC components, or virtual host.
", "refs": { "SourceAccessConfigurations$member": null } @@ -1620,15 +1620,15 @@ "SourceAccessConfigurations": { "base": null, "refs": { - "CreateEventSourceMappingRequest$SourceAccessConfigurations": "An array of the authentication protocol, or the VPC components to secure your event source.
", - "EventSourceMappingConfiguration$SourceAccessConfigurations": "An array of the authentication protocol, or the VPC components to secure your event source.
", - "UpdateEventSourceMappingRequest$SourceAccessConfigurations": "An array of the authentication protocol, or the VPC components to secure your event source.
" + "CreateEventSourceMappingRequest$SourceAccessConfigurations": "An array of authentication protocols or VPC components required to secure your event source.
", + "EventSourceMappingConfiguration$SourceAccessConfigurations": "An array of the authentication protocol, VPC components, or virtual host to secure and define your event source.
", + "UpdateEventSourceMappingRequest$SourceAccessConfigurations": "An array of authentication protocols or VPC components required to secure your event source.
" } }, "SourceAccessType": { "base": null, "refs": { - "SourceAccessConfiguration$Type": "The type of authentication protocol or the VPC components for your event source. For example: \"Type\":\"SASL_SCRAM_512_AUTH\"
.
BASIC_AUTH
- (MQ) The Secrets Manager secret that stores your broker credentials.
VPC_SUBNET
- The subnets associated with your VPC. Lambda connects to these subnets to fetch data from your Self-Managed Apache Kafka cluster.
VPC_SECURITY_GROUP
- The VPC security group used to manage access to your Self-Managed Apache Kafka brokers.
SASL_SCRAM_256_AUTH
- The Secrets Manager ARN of your secret key used for SASL SCRAM-256 authentication of your Self-Managed Apache Kafka brokers.
SASL_SCRAM_512_AUTH
- The Secrets Manager ARN of your secret key used for SASL SCRAM-512 authentication of your Self-Managed Apache Kafka brokers.
VIRTUAL_HOST
- The name of the virtual host in your RabbitMQ broker. Lambda will use this host as the event source.
The type of authentication protocol, VPC components, or virtual host for your event source. For example: \"Type\":\"SASL_SCRAM_512_AUTH\"
.
BASIC_AUTH
- (Amazon MQ) The Secrets Manager secret that stores your broker credentials.
BASIC_AUTH
- (Self-managed Apache Kafka) The Secrets Manager ARN of your secret key used for SASL/PLAIN authentication of your Apache Kafka brokers.
VPC_SUBNET
- The subnets associated with your VPC. Lambda connects to these subnets to fetch data from your self-managed Apache Kafka cluster.
VPC_SECURITY_GROUP
- The VPC security group used to manage access to your self-managed Apache Kafka brokers.
SASL_SCRAM_256_AUTH
- The Secrets Manager ARN of your secret key used for SASL SCRAM-256 authentication of your self-managed Apache Kafka brokers.
SASL_SCRAM_512_AUTH
- The Secrets Manager ARN of your secret key used for SASL SCRAM-512 authentication of your self-managed Apache Kafka brokers.
VIRTUAL_HOST
- (Amazon MQ) The name of the virtual host in your RabbitMQ broker. Lambda uses this RabbitMQ host as the event source.
The error code.
", "EventSourceMappingConfiguration$UUID": "The identifier of the event source mapping.
", - "EventSourceMappingConfiguration$LastProcessingResult": "The result of the last Lambda invocation of your Lambda function.
", + "EventSourceMappingConfiguration$LastProcessingResult": "The result of the last Lambda invocation of your function.
", "EventSourceMappingConfiguration$State": "The state of the event source mapping. It can be one of the following: Creating
, Enabling
, Enabled
, Disabling
, Disabled
, Updating
, or Deleting
.
Indicates whether the last change to the event source mapping was made by a user, or by the Lambda service.
", + "EventSourceMappingConfiguration$StateTransitionReason": "Indicates whether a user or Lambda made the last change to the event source mapping.
", "FunctionCode$ImageUri": "URI of a container image in the Amazon ECR registry.
", "FunctionCodeLocation$RepositoryType": "The service that's hosting the file.
", "FunctionCodeLocation$Location": "A presigned URL that you can use to download the deployment package.
", @@ -1930,7 +1930,7 @@ "base": null, "refs": { "CreateEventSourceMappingRequest$TumblingWindowInSeconds": "(Streams only) The duration in seconds of a processing window. The range is between 1 second up to 900 seconds.
", - "EventSourceMappingConfiguration$TumblingWindowInSeconds": "(Streams only) The duration in seconds of a processing window. The range is between 1 second up to 900 seconds.
", + "EventSourceMappingConfiguration$TumblingWindowInSeconds": "(Streams only) The duration in seconds of a processing window. The range is 1–900 seconds.
", "UpdateEventSourceMappingRequest$TumblingWindowInSeconds": "(Streams only) The duration in seconds of a processing window. The range is between 1 second up to 900 seconds.
" } }, diff --git a/models/apis/personalize/2018-05-22/api-2.json b/models/apis/personalize/2018-05-22/api-2.json index 64cb86a861..a7a00ada83 100644 --- a/models/apis/personalize/2018-05-22/api-2.json +++ b/models/apis/personalize/2018-05-22/api-2.json @@ -923,8 +923,7 @@ "type":"structure", "required":[ "name", - "solutionVersionArn", - "minProvisionedTPS" + "solutionVersionArn" ], "members":{ "name":{"shape":"Name"}, diff --git a/models/apis/personalize/2018-05-22/docs-2.json b/models/apis/personalize/2018-05-22/docs-2.json index cf5bd55112..cfccb2d5f7 100644 --- a/models/apis/personalize/2018-05-22/docs-2.json +++ b/models/apis/personalize/2018-05-22/docs-2.json @@ -5,9 +5,9 @@ "CreateBatchInferenceJob": "Creates a batch inference job. The operation can handle up to 50 million records and the input file must be in JSON format. For more information, see recommendations-batch.
", "CreateCampaign": "Creates a campaign by deploying a solution version. When a client calls the GetRecommendations and GetPersonalizedRanking APIs, a campaign is specified in the request.
Minimum Provisioned TPS and Auto-Scaling
A transaction is a single GetRecommendations
or GetPersonalizedRanking
call. Transactions per second (TPS) is the throughput and unit of billing for Amazon Personalize. The minimum provisioned TPS (minProvisionedTPS
) specifies the baseline throughput provisioned by Amazon Personalize, and thus, the minimum billing charge.
If your TPS increases beyond minProvisionedTPS
, Amazon Personalize auto-scales the provisioned capacity up and down, but never below minProvisionedTPS
. There's a short time delay while the capacity is increased that might cause loss of transactions.
The actual TPS used is calculated as the average requests/second within a 5-minute window. You pay for maximum of either the minimum provisioned TPS or the actual TPS. We recommend starting with a low minProvisionedTPS
, track your usage using Amazon CloudWatch metrics, and then increase the minProvisionedTPS
as necessary.
Status
A campaign can be in one of the following states:
CREATE PENDING > CREATE IN_PROGRESS > ACTIVE -or- CREATE FAILED
DELETE PENDING > DELETE IN_PROGRESS
To get the campaign status, call DescribeCampaign.
Wait until the status
of the campaign is ACTIVE
before asking the campaign for recommendations.
Related APIs
", "CreateDataset": "Creates an empty dataset and adds it to the specified dataset group. Use CreateDatasetImportJob to import your training data to a dataset.
There are three types of datasets:
Interactions
Items
Users
Each dataset type has an associated schema with required field types. Only the Interactions
dataset is required in order to train a model (also referred to as creating a solution).
A dataset can be in one of the following states:
CREATE PENDING > CREATE IN_PROGRESS > ACTIVE -or- CREATE FAILED
DELETE PENDING > DELETE IN_PROGRESS
To get the status of the dataset, call DescribeDataset.
Related APIs
", - "CreateDatasetExportJob": " Creates a job that exports data from your dataset to an Amazon S3 bucket. To allow Amazon Personalize to export the training data, you must specify an service-linked AWS Identity and Access Management (IAM) role that gives Amazon Personalize PutObject
permissions for your Amazon S3 bucket. For information, see Exporting a dataset in the Amazon Personalize developer guide.
Status
A dataset export job can be in one of the following states:
CREATE PENDING > CREATE IN_PROGRESS > ACTIVE -or- CREATE FAILED
To get the status of the export job, call DescribeDatasetExportJob, and specify the Amazon Resource Name (ARN) of the dataset export job. The dataset export is complete when the status shows as ACTIVE. If the status shows as CREATE FAILED, the response includes a failureReason
key, which describes why the job failed.
Creates an empty dataset group. A dataset group contains related datasets that supply data for training a model. A dataset group can contain at most three datasets, one for each type of dataset:
Interactions
Items
Users
To train a model (create a solution), a dataset group that contains an Interactions
dataset is required. Call CreateDataset to add a dataset to the group.
A dataset group can be in one of the following states:
CREATE PENDING > CREATE IN_PROGRESS > ACTIVE -or- CREATE FAILED
DELETE PENDING
To get the status of the dataset group, call DescribeDatasetGroup. If the status shows as CREATE FAILED, the response includes a failureReason
key, which describes why the creation failed.
You must wait until the status
of the dataset group is ACTIVE
before adding a dataset to the group.
You can specify an AWS Key Management Service (KMS) key to encrypt the datasets in the group. If you specify a KMS key, you must also include an AWS Identity and Access Management (IAM) role that has permission to access the key.
APIs that require a dataset group ARN in the request
Related APIs
", - "CreateDatasetImportJob": "Creates a job that imports training data from your data source (an Amazon S3 bucket) to an Amazon Personalize dataset. To allow Amazon Personalize to import the training data, you must specify an AWS Identity and Access Management (IAM) service role that has permission to read from the data source, as Amazon Personalize makes a copy of your data and processes it in an internal AWS system. For information on granting access to your Amazon S3 bucket, see Giving Amazon Personalize Access to Amazon S3 Resources.
The dataset import job replaces any existing data in the dataset that you imported in bulk.
Status
A dataset import job can be in one of the following states:
CREATE PENDING > CREATE IN_PROGRESS > ACTIVE -or- CREATE FAILED
To get the status of the import job, call DescribeDatasetImportJob, providing the Amazon Resource Name (ARN) of the dataset import job. The dataset import is complete when the status shows as ACTIVE. If the status shows as CREATE FAILED, the response includes a failureReason
key, which describes why the job failed.
Importing takes time. You must wait until the status shows as ACTIVE before training a model using the dataset.
Related APIs
", + "CreateDatasetExportJob": " Creates a job that exports data from your dataset to an Amazon S3 bucket. To allow Amazon Personalize to export the training data, you must specify an service-linked IAM role that gives Amazon Personalize PutObject
permissions for your Amazon S3 bucket. For information, see Exporting a dataset in the Amazon Personalize developer guide.
Status
A dataset export job can be in one of the following states:
CREATE PENDING > CREATE IN_PROGRESS > ACTIVE -or- CREATE FAILED
To get the status of the export job, call DescribeDatasetExportJob, and specify the Amazon Resource Name (ARN) of the dataset export job. The dataset export is complete when the status shows as ACTIVE. If the status shows as CREATE FAILED, the response includes a failureReason
key, which describes why the job failed.
Creates an empty dataset group. A dataset group contains related datasets that supply data for training a model. A dataset group can contain at most three datasets, one for each type of dataset:
Interactions
Items
Users
To train a model (create a solution), a dataset group that contains an Interactions
dataset is required. Call CreateDataset to add a dataset to the group.
A dataset group can be in one of the following states:
CREATE PENDING > CREATE IN_PROGRESS > ACTIVE -or- CREATE FAILED
DELETE PENDING
To get the status of the dataset group, call DescribeDatasetGroup. If the status shows as CREATE FAILED, the response includes a failureReason
key, which describes why the creation failed.
You must wait until the status
of the dataset group is ACTIVE
before adding a dataset to the group.
You can specify an Key Management Service (KMS) key to encrypt the datasets in the group. If you specify a KMS key, you must also include an Identity and Access Management (IAM) role that has permission to access the key.
APIs that require a dataset group ARN in the request
Related APIs
", + "CreateDatasetImportJob": "Creates a job that imports training data from your data source (an Amazon S3 bucket) to an Amazon Personalize dataset. To allow Amazon Personalize to import the training data, you must specify an IAM service role that has permission to read from the data source, as Amazon Personalize makes a copy of your data and processes it internally. For information on granting access to your Amazon S3 bucket, see Giving Amazon Personalize Access to Amazon S3 Resources.
The dataset import job replaces any existing data in the dataset that you imported in bulk.
Status
A dataset import job can be in one of the following states:
CREATE PENDING > CREATE IN_PROGRESS > ACTIVE -or- CREATE FAILED
To get the status of the import job, call DescribeDatasetImportJob, providing the Amazon Resource Name (ARN) of the dataset import job. The dataset import is complete when the status shows as ACTIVE. If the status shows as CREATE FAILED, the response includes a failureReason
key, which describes why the job failed.
Importing takes time. You must wait until the status shows as ACTIVE before training a model using the dataset.
Related APIs
", "CreateEventTracker": "Creates an event tracker that you use when adding event data to a specified dataset group using the PutEvents API.
Only one event tracker can be associated with a dataset group. You will get an error if you call CreateEventTracker
using the same dataset group as an existing event tracker.
When you create an event tracker, the response includes a tracking ID, which you pass as a parameter when you use the PutEvents operation. Amazon Personalize then appends the event data to the Interactions dataset of the dataset group you specify in your event tracker.
The event tracker can be in one of the following states:
CREATE PENDING > CREATE IN_PROGRESS > ACTIVE -or- CREATE FAILED
DELETE PENDING > DELETE IN_PROGRESS
To get the status of the event tracker, call DescribeEventTracker.
The event tracker must be in the ACTIVE state before using the tracking ID.
Related APIs
", "CreateFilter": "Creates a recommendation filter. For more information, see filter.
", "CreateSchema": "Creates an Amazon Personalize schema from the specified schema string. The schema you create must be in Avro JSON format.
Amazon Personalize recognizes three schema variants. Each schema is associated with a dataset type and has a set of required field and keywords. You specify a schema when you call CreateDataset.
Related APIs
", @@ -54,7 +54,7 @@ "AccountId": { "base": null, "refs": { - "EventTracker$accountId": "The Amazon AWS account that owns the event tracker.
" + "EventTracker$accountId": "The Amazon Web Services account that owns the event tracker.
" } }, "Algorithm": { @@ -113,13 +113,13 @@ "Dataset$schemaArn": "The ARN of the associated schema.
", "DatasetExportJob$datasetExportJobArn": "The Amazon Resource Name (ARN) of the dataset export job.
", "DatasetExportJob$datasetArn": "The Amazon Resource Name (ARN) of the dataset to export.
", - "DatasetExportJob$roleArn": "The Amazon Resource Name (ARN) of the AWS Identity and Access Management service role that has permissions to add data to your output Amazon S3 bucket.
", + "DatasetExportJob$roleArn": "The Amazon Resource Name (ARN) of the IAM service role that has permissions to add data to your output Amazon S3 bucket.
", "DatasetExportJobSummary$datasetExportJobArn": "The Amazon Resource Name (ARN) of the dataset export job.
", "DatasetGroup$datasetGroupArn": "The Amazon Resource Name (ARN) of the dataset group.
", "DatasetGroupSummary$datasetGroupArn": "The Amazon Resource Name (ARN) of the dataset group.
", "DatasetImportJob$datasetImportJobArn": "The ARN of the dataset import job.
", "DatasetImportJob$datasetArn": "The Amazon Resource Name (ARN) of the dataset that receives the imported data.
", - "DatasetImportJob$roleArn": "The ARN of the AWS Identity and Access Management (IAM) role that has permissions to read from the Amazon S3 data source.
", + "DatasetImportJob$roleArn": "The ARN of the IAM role that has permissions to read from the Amazon S3 data source.
", "DatasetImportJobSummary$datasetImportJobArn": "The Amazon Resource Name (ARN) of the dataset import job.
", "DatasetSchema$schemaArn": "The Amazon Resource Name (ARN) of the schema.
", "DatasetSchemaSummary$schemaArn": "The Amazon Resource Name (ARN) of the schema.
", @@ -486,7 +486,7 @@ } }, "DatasetGroup": { - "base": "A dataset group is a collection of related datasets (Interactions, User, and Item). You create a dataset group by calling CreateDatasetGroup. You then create a dataset and add it to a dataset group by calling CreateDataset. The dataset group is used to create and train a solution by calling CreateSolution. A dataset group can contain only one of each type of dataset.
You can specify an AWS Key Management Service (KMS) key to encrypt the datasets in the group.
", + "base": "A dataset group is a collection of related datasets (Interactions, User, and Item). You create a dataset group by calling CreateDatasetGroup. You then create a dataset and add it to a dataset group by calling CreateDataset. The dataset group is used to create and train a solution by calling CreateSolution. A dataset group can contain only one of each type of dataset.
You can specify an Key Management Service (KMS) key to encrypt the datasets in the group.
", "refs": { "DescribeDatasetGroupResponse$datasetGroup": "A listing of the dataset group's properties.
" } @@ -1056,9 +1056,9 @@ "KmsKeyArn": { "base": null, "refs": { - "CreateDatasetGroupRequest$kmsKeyArn": "The Amazon Resource Name (ARN) of a KMS key used to encrypt the datasets.
", - "DatasetGroup$kmsKeyArn": "The Amazon Resource Name (ARN) of the KMS key used to encrypt the datasets.
", - "S3DataConfig$kmsKeyArn": "The Amazon Resource Name (ARN) of the Amazon Key Management Service (KMS) key that Amazon Personalize uses to encrypt or decrypt the input and output files of a batch inference job.
" + "CreateDatasetGroupRequest$kmsKeyArn": "The Amazon Resource Name (ARN) of a Key Management Service (KMS) key used to encrypt the datasets.
", + "DatasetGroup$kmsKeyArn": "The Amazon Resource Name (ARN) of the Key Management Service (KMS) key used to encrypt the datasets.
", + "S3DataConfig$kmsKeyArn": "The Amazon Resource Name (ARN) of the Key Management Service (KMS) key that Amazon Personalize uses to encrypt or decrypt the input and output files of a batch inference job.
" } }, "LimitExceededException": { @@ -1412,8 +1412,8 @@ "refs": { "BatchInferenceJob$roleArn": "The ARN of the Amazon Identity and Access Management (IAM) role that requested the batch inference job.
", "CreateBatchInferenceJobRequest$roleArn": "The ARN of the Amazon Identity and Access Management role that has permissions to read and write to your input and output Amazon S3 buckets respectively.
", - "CreateDatasetExportJobRequest$roleArn": "The Amazon Resource Name (ARN) of the AWS Identity and Access Management service role that has permissions to add data to your output Amazon S3 bucket.
", - "CreateDatasetGroupRequest$roleArn": "The ARN of the IAM role that has permissions to access the KMS key. Supplying an IAM role is only valid when also specifying a KMS key.
", + "CreateDatasetExportJobRequest$roleArn": "The Amazon Resource Name (ARN) of the IAM service role that has permissions to add data to your output Amazon S3 bucket.
", + "CreateDatasetGroupRequest$roleArn": "The ARN of the Identity and Access Management (IAM) role that has permissions to access the Key Management Service (KMS) key. Supplying an IAM role is only valid when also specifying a KMS key.
", "CreateDatasetImportJobRequest$roleArn": "The ARN of the IAM role that has permissions to read from the Amazon S3 data source.
", "DatasetGroup$roleArn": "The ARN of the IAM role that has permissions to create the dataset group.
" } diff --git a/models/apis/proton/2020-07-20/docs-2.json b/models/apis/proton/2020-07-20/docs-2.json index 6c184eb231..2268419c06 100644 --- a/models/apis/proton/2020-07-20/docs-2.json +++ b/models/apis/proton/2020-07-20/docs-2.json @@ -1,20 +1,20 @@ { "version": "2.0", - "service": "This is the AWS Proton Service API Reference. It provides descriptions, syntax and usage examples for each of the actions and data types for the AWS Proton service.
The documentation for each action shows the Query API request parameters and the XML response.
Alternatively, you can use the AWS CLI to access an API. For more information, see the AWS Command Line Interface User Guide.
The AWS Proton service is a two-pronged automation framework. Administrators create service templates to provide standardized infrastructure and deployment tooling for serverless and container based applications. Developers, in turn, select from the available service templates to automate their application or service deployments.
Because administrators define the infrastructure and tooling that AWS Proton deploys and manages, they need permissions to use all of the listed API operations.
When developers select a specific infrastructure and tooling set, AWS Proton deploys their applications. To monitor their applications that are running on AWS Proton, developers need permissions to the service create, list, update and delete API operations and the service instance list and update API operations.
To learn more about AWS Proton administration, see the AWS Proton Administration Guide.
To learn more about deploying serverless and containerized applications on AWS Proton, see the AWS Proton User Guide.
Ensuring Idempotency
When you make a mutating API request, the request typically returns a result before the asynchronous workflows of the operation are complete. Operations might also time out or encounter other server issues before they're complete, even if the request already returned a result. This might make it difficult to determine whether the request succeeded. Moreover, you might need to retry the request multiple times to ensure that the operation completes successfully. However, if the original request and the subsequent retries are successful, the operation occurs multiple times. This means that you might create more resources than you intended.
Idempotency ensures that an API request action completes no more than one time. With an idempotent request, if the original request action completes successfully, any subsequent retries complete successfully without performing any further actions. However, the result might contain updated information, such as the current creation status.
The following lists of APIs are grouped according to methods that ensure idempotency.
Idempotent create APIs with a client token
The API actions in this list support idempotency with the use of a client token. The corresponding AWS CLI commands also support idempotency using a client token. A client token is a unique, case-sensitive string of up to 64 ASCII characters. To make an idempotent API request using one of these actions, specify a client token in the request. We recommend that you don't reuse the same client token for other API requests. If you don’t provide a client token for these APIs, a default client token is automatically provided by SDKs.
Given a request action that has succeeded:
If you retry the request using the same client token and the same parameters, the retry succeeds without performing any further actions other than returning the original resource detail data in the response.
If you retry the request using the same client token, but one or more of the parameters are different, the retry throws a ValidationException
with an IdempotentParameterMismatch
error.
Client tokens expire eight hours after a request is made. If you retry the request with the expired token, a new resource is created.
If the original resource is deleted and you retry the request, a new resource is created.
Idempotent create APIs with a client token:
CreateEnvironmentTemplateVersion
CreateServiceTemplateVersion
CreateEnvironmentAccountConnection
<p> <b>Idempotent delete APIs</b> </p> <p>Given a request action that has succeeded:</p> <p>When you retry the request with an API from this group and the resource was deleted, its metadata is returned in the response.</p> <p>If you retry and the resource doesn't exist, the response is empty.</p> <p>In both cases, the retry succeeds.</p> <p>Idempotent delete APIs:</p> <ul> <li> <p>DeleteEnvironmentTemplate</p> </li> <li> <p>DeleteEnvironmentTemplateVersion</p> </li> <li> <p>DeleteServiceTemplate</p> </li> <li> <p>DeleteServiceTemplateVersion</p> </li> <li> <p>DeleteEnvironmentAccountConnection</p> </li> </ul> <p> <b>Asynchronous idempotent delete APIs</b> </p> <p>Given a request action that has succeeded:</p> <p>If you retry the request with an API from this group, if the original request delete operation status is <code>DELETE_IN_PROGRESS</code>, the retry returns the resource detail data in the response without performing any further actions.</p> <p>If the original request delete operation is complete, a retry returns an empty response.</p> <p>Asynchronous idempotent delete APIs:</p> <ul> <li> <p>DeleteEnvironment</p> </li> <li> <p>DeleteService</p> </li> </ul>
",
+ "service": "This is the AWS Proton Service API Reference. It provides descriptions, syntax and usage examples for each of the actions and data types for the AWS Proton service.
The documentation for each action shows the Query API request parameters and the XML response.
Alternatively, you can use the AWS CLI to access an API. For more information, see the AWS Command Line Interface User Guide.
The AWS Proton service is a two-pronged automation framework. Administrators create service templates to provide standardized infrastructure and deployment tooling for serverless and container based applications. Developers, in turn, select from the available service templates to automate their application or service deployments.
Because administrators define the infrastructure and tooling that AWS Proton deploys and manages, they need permissions to use all of the listed API operations.
When developers select a specific infrastructure and tooling set, AWS Proton deploys their applications. To monitor their applications that are running on AWS Proton, developers need permissions to the service create, list, update and delete API operations and the service instance list and update API operations.
To learn more about AWS Proton administration, see the AWS Proton Administrator Guide.
To learn more about deploying serverless and containerized applications on AWS Proton, see the AWS Proton User Guide.
Ensuring Idempotency
When you make a mutating API request, the request typically returns a result before the asynchronous workflows of the operation are complete. Operations might also time out or encounter other server issues before they're complete, even if the request already returned a result. This might make it difficult to determine whether the request succeeded. Moreover, you might need to retry the request multiple times to ensure that the operation completes successfully. However, if the original request and the subsequent retries are successful, the operation occurs multiple times. This means that you might create more resources than you intended.
Idempotency ensures that an API request action completes no more than one time. With an idempotent request, if the original request action completes successfully, any subsequent retries complete successfully without performing any further actions. However, the result might contain updated information, such as the current creation status.
The following lists of APIs are grouped according to methods that ensure idempotency.
Idempotent create APIs with a client token
The API actions in this list support idempotency with the use of a client token. The corresponding AWS CLI commands also support idempotency using a client token. A client token is a unique, case-sensitive string of up to 64 ASCII characters. To make an idempotent API request using one of these actions, specify a client token in the request. We recommend that you don't reuse the same client token for other API requests. If you don’t provide a client token for these APIs, a default client token is automatically provided by SDKs.
Given a request action that has succeeded:
If you retry the request using the same client token and the same parameters, the retry succeeds without performing any further actions other than returning the original resource detail data in the response.
If you retry the request using the same client token, but one or more of the parameters are different, the retry throws a ValidationException
with an IdempotentParameterMismatch
error.
Client tokens expire eight hours after a request is made. If you retry the request with the expired token, a new resource is created.
If the original resource is deleted and you retry the request, a new resource is created.
Idempotent create APIs with a client token:
CreateEnvironmentTemplateVersion
CreateServiceTemplateVersion
CreateEnvironmentAccountConnection
Idempotent delete APIs
Given a request action that has succeeded:
When you retry the request with an API from this group and the resource was deleted, its metadata is returned in the response.
If you retry and the resource doesn't exist, the response is empty.
In both cases, the retry succeeds.
Idempotent delete APIs:
DeleteEnvironmentTemplate
DeleteEnvironmentTemplateVersion
DeleteServiceTemplate
DeleteServiceTemplateVersion
DeleteEnvironmentAccountConnection
Asynchronous idempotent delete APIs
Given a request action that has succeeded:
If you retry the request with an API from this group, if the original request delete operation status is DELETE_IN_PROGRESS
, the retry returns the resource detail data in the response without performing any further actions.
If the original request delete operation is complete, a retry returns an empty response.
Asynchronous idempotent delete APIs:
DeleteEnvironment
DeleteService
In a management account, an environment account connection request is accepted. When the environment account connection request is accepted, AWS Proton can use the associated IAM role to provision environment infrastructure resources in the associated environment account.
For more information, see Environment account connections in the AWS Proton Administration guide.
", - "CancelEnvironmentDeployment": "Attempts to cancel an environment deployment on an UpdateEnvironment action, if the deployment is IN_PROGRESS
. For more information, see Update an environment in the AWS Proton Administration guide.
The following list includes potential cancellation scenarios.
If the cancellation attempt succeeds, the resulting deployment state is CANCELLED
.
If the cancellation attempt fails, the resulting deployment state is FAILED
.
If the current UpdateEnvironment action succeeds before the cancellation attempt starts, the resulting deployment state is SUCCEEDED
and the cancellation attempt has no effect.
Attempts to cancel a service instance deployment on an UpdateServiceInstance action, if the deployment is IN_PROGRESS
. For more information, see Update a service instance in the AWS Proton Administration guide or the AWS Proton User guide.
The following list includes potential cancellation scenarios.
If the cancellation attempt succeeds, the resulting deployment state is CANCELLED
.
If the cancellation attempt fails, the resulting deployment state is FAILED
.
If the current UpdateServiceInstance action succeeds before the cancellation attempt starts, the resulting deployment state is SUCCEEDED
and the cancellation attempt has no effect.
Attempts to cancel a service pipeline deployment on an UpdateServicePipeline action, if the deployment is IN_PROGRESS
. For more information, see Update a service pipeline in the AWS Proton Administration guide or the AWS Proton User guide.
The following list includes potential cancellation scenarios.
If the cancellation attempt succeeds, the resulting deployment state is CANCELLED
.
If the cancellation attempt fails, the resulting deployment state is FAILED
.
If the current UpdateServicePipeline action succeeds before the cancellation attempt starts, the resulting deployment state is SUCCEEDED
and the cancellation attempt has no effect.
Deploy a new environment. An AWS Proton environment is created from an environment template that defines infrastructure and resources that can be shared across services. For more information, see the Environments in the AWS Proton Administration Guide.
", - "CreateEnvironmentAccountConnection": "Create an environment account connection in an environment account so that environment infrastructure resources can be provisioned in the environment account from the management account.
An environment account connection is a secure bi-directional connection between a management account and an environment account that maintains authorization and permissions. For more information, see Environment account connections in the AWS Proton Administration guide.
", - "CreateEnvironmentTemplate": "Create an environment template for AWS Proton. For more information, see Environment Templates in the AWS Proton Administration Guide.
You can create an environment template in one of the two following ways:
Register and publish a standard environment template that instructs AWS Proton to deploy and manage environment infrastructure.
Register and publish a customer managed environment template that connects AWS Proton to your existing provisioned infrastructure that you manage. AWS Proton doesn't manage your existing provisioned infrastructure. To create an environment template for customer provisioned and managed infrastructure, include the provisioning
parameter and set the value to CUSTOMER_MANAGED
. For more information, see Register and publish an environment template in the AWS Proton Administration Guide.
In a management account, an environment account connection request is accepted. When the environment account connection request is accepted, AWS Proton can use the associated IAM role to provision environment infrastructure resources in the associated environment account.
For more information, see Environment account connections in the AWS Proton Administrator guide.
", + "CancelEnvironmentDeployment": "Attempts to cancel an environment deployment on an UpdateEnvironment action, if the deployment is IN_PROGRESS
. For more information, see Update an environment in the AWS Proton Administrator guide.
The following list includes potential cancellation scenarios.
If the cancellation attempt succeeds, the resulting deployment state is CANCELLED
.
If the cancellation attempt fails, the resulting deployment state is FAILED
.
If the current UpdateEnvironment action succeeds before the cancellation attempt starts, the resulting deployment state is SUCCEEDED
and the cancellation attempt has no effect.
Attempts to cancel a service instance deployment on an UpdateServiceInstance action, if the deployment is IN_PROGRESS
. For more information, see Update a service instance in the AWS Proton Administrator guide or the AWS Proton User guide.
The following list includes potential cancellation scenarios.
If the cancellation attempt succeeds, the resulting deployment state is CANCELLED
.
If the cancellation attempt fails, the resulting deployment state is FAILED
.
If the current UpdateServiceInstance action succeeds before the cancellation attempt starts, the resulting deployment state is SUCCEEDED
and the cancellation attempt has no effect.
Attempts to cancel a service pipeline deployment on an UpdateServicePipeline action, if the deployment is IN_PROGRESS
. For more information, see Update a service pipeline in the AWS Proton Administrator guide or the AWS Proton User guide.
The following list includes potential cancellation scenarios.
If the cancellation attempt succeeds, the resulting deployment state is CANCELLED
.
If the cancellation attempt fails, the resulting deployment state is FAILED
.
If the current UpdateServicePipeline action succeeds before the cancellation attempt starts, the resulting deployment state is SUCCEEDED
and the cancellation attempt has no effect.
Deploy a new environment. An AWS Proton environment is created from an environment template that defines infrastructure and resources that can be shared across services. For more information, see the Environments in the AWS Proton Administrator Guide.
", + "CreateEnvironmentAccountConnection": "Create an environment account connection in an environment account so that environment infrastructure resources can be provisioned in the environment account from a management account.
An environment account connection is a secure bi-directional connection between a management account and an environment account that maintains authorization and permissions. For more information, see Environment account connections in the AWS Proton Administrator guide.
", + "CreateEnvironmentTemplate": "Create an environment template for AWS Proton. For more information, see Environment Templates in the AWS Proton Administrator Guide.
You can create an environment template in one of the two following ways:
Register and publish a standard environment template that instructs AWS Proton to deploy and manage environment infrastructure.
Register and publish a customer managed environment template that connects AWS Proton to your existing provisioned infrastructure that you manage. AWS Proton doesn't manage your existing provisioned infrastructure. To create an environment template for customer provisioned and managed infrastructure, include the provisioning
parameter and set the value to CUSTOMER_MANAGED
. For more information, see Register and publish an environment template in the AWS Proton Administrator Guide.
Create a new major or minor version of an environment template. A major version of an environment template is a version that isn't backwards compatible. A minor version of an environment template is a version that's backwards compatible within its major version.
", - "CreateService": "Create an AWS Proton service. An AWS Proton service is an instantiation of a service template and often includes several service instances and pipeline. For more information, see Services in the AWS Proton Administration Guide and Services in the AWS Proton User Guide.
", - "CreateServiceTemplate": "Create a service template. The administrator creates a service template to define standardized infrastructure and an optional CICD service pipeline. Developers, in turn, select the service template from AWS Proton. If the selected service template includes a service pipeline definition, they provide a link to their source code repository. AWS Proton then deploys and manages the infrastructure defined by the selected service template. For more information, see Service Templates in the AWS Proton Administration Guide.
", + "CreateService": "Create an AWS Proton service. An AWS Proton service is an instantiation of a service template and often includes several service instances and pipeline. For more information, see Services in the AWS Proton Administrator Guide and Services in the AWS Proton User Guide.
", + "CreateServiceTemplate": "Create a service template. The administrator creates a service template to define standardized infrastructure and an optional CICD service pipeline. Developers, in turn, select the service template from AWS Proton. If the selected service template includes a service pipeline definition, they provide a link to their source code repository. AWS Proton then deploys and manages the infrastructure defined by the selected service template. For more information, see Service Templates in the AWS Proton Administrator Guide.
", "CreateServiceTemplateVersion": "Create a new major or minor version of a service template. A major version of a service template is a version that isn't backwards compatible. A minor version of a service template is a version that's backwards compatible within its major version.
", "DeleteEnvironment": "Delete an environment.
", - "DeleteEnvironmentAccountConnection": "In an environment account, delete an environment account connection.
After you delete an environment account connection that’s in use by an AWS Proton environment, AWS Proton can’t manage the environment infrastructure resources until a new environment account connection is accepted for the environment account and associated environment. You're responsible for cleaning up provisioned resources that remain without an environment connection.
For more information, see Environment account connections in the AWS Proton Administration guide.
", + "DeleteEnvironmentAccountConnection": "In an environment account, delete an environment account connection.
After you delete an environment account connection that’s in use by an AWS Proton environment, AWS Proton can’t manage the environment infrastructure resources until a new environment account connection is accepted for the environment account and associated environment. You're responsible for cleaning up provisioned resources that remain without an environment connection.
For more information, see Environment account connections in the AWS Proton Administrator guide.
", "DeleteEnvironmentTemplate": "If no other major or minor versions of an environment template exist, delete the environment template.
", "DeleteEnvironmentTemplateVersion": "If no other minor versions of an environment template exist, delete a major version of the environment template if it's not the Recommended
version. Delete the Recommended
version of the environment template if no other major versions or minor versions of the environment template exist. A major version of an environment template is a version that's not backwards compatible.
Delete a minor version of an environment template if it isn't the Recommended
version. Delete a Recommended
minor version of the environment template if no other minor versions of the environment template exist. A minor version of an environment template is a version that's backwards compatible.
Delete a service.
", @@ -22,14 +22,14 @@ "DeleteServiceTemplateVersion": "If no other minor versions of a service template exist, delete a major version of the service template if it's not the Recommended
version. Delete the Recommended
version of the service template if no other major versions or minor versions of the service template exist. A major version of a service template is a version that isn't backwards compatible.
Delete a minor version of a service template if it's not the Recommended
version. Delete a Recommended
minor version of the service template if no other minor versions of the service template exist. A minor version of a service template is a version that's backwards compatible.
Get detail data for the AWS Proton pipeline service role.
", "GetEnvironment": "Get detail data for an environment.
", - "GetEnvironmentAccountConnection": "In an environment account, view the detail data for an environment account connection.
For more information, see Environment account connections in the AWS Proton Administration guide.
", + "GetEnvironmentAccountConnection": "In an environment account, view the detail data for an environment account connection.
For more information, see Environment account connections in the AWS Proton Administrator guide.
", "GetEnvironmentTemplate": "Get detail data for an environment template.
", "GetEnvironmentTemplateVersion": "View detail data for a major or minor version of an environment template.
", "GetService": "Get detail data for a service.
", "GetServiceInstance": "Get detail data for a service instance. A service instance is an instantiation of service template, which is running in a specific environment.
", "GetServiceTemplate": "Get detail data for a service template.
", "GetServiceTemplateVersion": "View detail data for a major or minor version of a service template.
", - "ListEnvironmentAccountConnections": "View a list of environment account connections.
For more information, see Environment account connections in the AWS Proton Administration guide.
", + "ListEnvironmentAccountConnections": "View a list of environment account connections.
For more information, see Environment account connections in the AWS Proton Administrator guide.
", "ListEnvironmentTemplateVersions": "List major or minor versions of an environment template with detail data.
", "ListEnvironmentTemplates": "List environment templates.
", "ListEnvironments": "List environments with detail data summaries.
", @@ -37,13 +37,13 @@ "ListServiceTemplateVersions": "List major or minor versions of a service template with detail data.
", "ListServiceTemplates": "List service templates with detail data.
", "ListServices": "List services with summaries of detail data.
", - "ListTagsForResource": "List tags for a resource. For more information, see AWS Proton resources and tagging in the AWS Proton Administration Guide or AWS Proton User Guide.
", - "RejectEnvironmentAccountConnection": "In a management account, reject an environment account connection from another environment account.
After you reject an environment account connection request, you won’t be able to accept or use the rejected environment account connection.
You can’t reject an environment account connection that is connected to an environment.
For more information, see Environment account connections in the AWS Proton Administration guide.
", - "TagResource": "Tag a resource. For more information, see AWS Proton resources and tagging in the AWS Proton Administration Guide or AWS Proton User Guide.
", - "UntagResource": "Remove a tag from a resource. For more information, see AWS Proton resources and tagging in the AWS Proton Administration Guide or AWS Proton User Guide.
", + "ListTagsForResource": "List tags for a resource. For more information, see AWS Proton resources and tagging in the AWS Proton Administrator Guide or AWS Proton User Guide.
", + "RejectEnvironmentAccountConnection": "In a management account, reject an environment account connection from another environment account.
After you reject an environment account connection request, you won’t be able to accept or use the rejected environment account connection.
You can’t reject an environment account connection that is connected to an environment.
For more information, see Environment account connections in the AWS Proton Administrator guide.
", + "TagResource": "Tag a resource. For more information, see AWS Proton resources and tagging in the AWS Proton Administrator Guide or AWS Proton User Guide.
", + "UntagResource": "Remove a tag from a resource. For more information, see AWS Proton resources and tagging in the AWS Proton Administrator Guide or AWS Proton User Guide.
", "UpdateAccountSettings": "Update the AWS Proton pipeline service account settings.
", - "UpdateEnvironment": "Update an environment.
If the environment is associated with an environment account connection, don't update or include the protonServiceRoleArn
parameter to update or connect to an environment account connection.
You can only update to a new environment account connection if it was created in the same environment account that the current environment account connection was created in and associated with the current environment.
If the environment isn't associated with an environment account connection, don't update or include the environmentAccountConnectionId
parameter to update or connect to an environment account connection.
You can update either the environmentAccountConnectionId
or protonServiceRoleArn
parameter and value. You can’t update both.
There are four modes for updating an environment as described in the following. The deploymentType
field defines the mode.
NONE
In this mode, a deployment doesn't occur. Only the requested metadata parameters are updated.
CURRENT_VERSION
In this mode, the environment is deployed and updated with the new spec that you provide. Only requested parameters are updated. Don’t include minor or major version parameters when you use this deployment-type
.
MINOR_VERSION
In this mode, the environment is deployed and updated with the published, recommended (latest) minor version of the current major version in use, by default. You can also specify a different minor version of the current major version in use.
MAJOR_VERSION
In this mode, the environment is deployed and updated with the published, recommended (latest) major and minor version of the current template, by default. You can also specify a different major version that's higher than the major version in use and a minor version (optional).
In an environment account, update an environment account connection to use a new IAM role.
For more information, see Environment account connections in the AWS Proton Administration guide.
", + "UpdateEnvironment": "Update an environment.
If the environment is associated with an environment account connection, don't update or include the protonServiceRoleArn
parameter to update or connect to an environment account connection.
You can only update to a new environment account connection if it was created in the same environment account that the current environment account connection was created in and is associated with the current environment.
If the environment isn't associated with an environment account connection, don't update or include the environmentAccountConnectionId
parameter to update or connect to an environment account connection.
You can update either the environmentAccountConnectionId
or protonServiceRoleArn
parameter and value. You can’t update both.
There are four modes for updating an environment as described in the following. The deploymentType
field defines the mode.
NONE
In this mode, a deployment doesn't occur. Only the requested metadata parameters are updated.
CURRENT_VERSION
In this mode, the environment is deployed and updated with the new spec that you provide. Only requested parameters are updated. Don’t include minor or major version parameters when you use this deployment-type
.
MINOR_VERSION
In this mode, the environment is deployed and updated with the published, recommended (latest) minor version of the current major version in use, by default. You can also specify a different minor version of the current major version in use.
MAJOR_VERSION
In this mode, the environment is deployed and updated with the published, recommended (latest) major and minor version of the current template, by default. You can also specify a different major version that's higher than the major version in use and a minor version (optional).
In an environment account, update an environment account connection to use a new IAM role.
For more information, see Environment account connections in the AWS Proton Administrator guide.
", "UpdateEnvironmentTemplate": "Update an environment template.
", "UpdateEnvironmentTemplateVersion": "Update a major or minor version of an environment template.
", "UpdateService": "Edit a service description or use a spec to add and delete service instances.
Existing service instances and the service pipeline can't be edited using this API. They can only be deleted.
Use the description
parameter to modify the description.
Edit the spec
parameter to add or delete instances.
The Amazon Resource Name (ARN) of the IAM service role that's created in the environment account. AWS Proton uses this role to provision infrastructure resources in the associated environment account.
", "CreateEnvironmentInput$protonServiceRoleArn": "The Amazon Resource Name (ARN) of the AWS Proton service role that allows AWS Proton to make calls to other services on your behalf. You must include either the environmentAccountConnectionId
or protonServiceRoleArn
parameter and value.
A customer provided encryption key that AWS Proton uses to encrypt data.
", - "CreateServiceInput$repositoryConnectionArn": "The ARN of the repository connection. For more information, see Set up repository connection in the AWS Proton Administration Guide and Getting started in the AWS Proton User Guide. Don't include this parameter if your service template doesn't include a service pipeline.
", + "CreateServiceInput$repositoryConnectionArn": "The Amazon Resource Name (ARN) of the repository connection. For more information, see Set up repository connection in the AWS Proton Administrator Guide and Setting up with AWS Proton in the AWS Proton User Guide. Don't include this parameter if your service template doesn't include a service pipeline.
", "CreateServiceTemplateInput$encryptionKey": "A customer provided encryption key that's used to encrypt data.
", - "Environment$protonServiceRoleArn": "The ARN of the AWS Proton service role that allows AWS Proton to make calls to other services on your behalf.
", + "Environment$protonServiceRoleArn": "The Amazon Resource Name (ARN) of the AWS Proton service role that allows AWS Proton to make calls to other services on your behalf.
", "EnvironmentAccountConnection$roleArn": "The IAM service role that's associated with the environment account connection.
", "EnvironmentAccountConnectionSummary$roleArn": "The IAM service role that's associated with the environment account connection.
", - "EnvironmentSummary$protonServiceRoleArn": "The ARN of the AWS Proton service role that allows AWS Proton to make calls to other services on your behalf.
", + "EnvironmentSummary$protonServiceRoleArn": "The Amazon Resource Name (ARN) of the AWS Proton service role that allows AWS Proton to make calls to other services on your behalf.
", "EnvironmentTemplate$encryptionKey": "The customer provided encryption key for the environment template.
", - "ListTagsForResourceInput$resourceArn": "The ARN of the resource for the listed tags.
", - "Service$repositoryConnectionArn": "The ARN of the repository connection. For more information, see Set up a repository connection in the AWS Proton Administration Guide and Getting started in the AWS Proton User Guide.
", + "ListTagsForResourceInput$resourceArn": "The Amazon Resource Name (ARN) of the resource for the listed tags.
", + "Service$repositoryConnectionArn": "The Amazon Resource Name (ARN) of the repository connection. For more information, see Set up a repository connection in the AWS Proton Administrator Guide and Setting up with AWS Proton in the AWS Proton User Guide.
", "ServicePipeline$arn": "The Amazon Resource Name (ARN) of the service pipeline.
", "ServiceTemplate$encryptionKey": "The customer provided service template encryption key that's used to encrypt data.
", "TagResourceInput$resourceArn": "The Amazon Resource Name (ARN) of the resource that the resource tag is applied to.
", "UntagResourceInput$resourceArn": "The Amazon Resource Name (ARN) of the resource that the tag is to be removed from.
", "UpdateAccountSettingsInput$pipelineServiceRoleArn": "The Amazon Resource Name (ARN) of the AWS Proton pipeline service role.
", "UpdateEnvironmentAccountConnectionInput$roleArn": "The Amazon Resource Name (ARN) of the IAM service role that is associated with the environment account connection to update.
", - "UpdateEnvironmentInput$protonServiceRoleArn": "The ARN of the AWS Proton service role that allows AWS Proton to make API calls to other services your behalf.
" + "UpdateEnvironmentInput$protonServiceRoleArn": "The Amazon Resource Name (ARN) of the AWS Proton service role that allows AWS Proton to make API calls to other services your behalf.
" } }, "AwsAccountId": { @@ -412,7 +412,7 @@ "base": null, "refs": { "AcceptEnvironmentAccountConnectionInput$id": "The ID of the environment account connection.
", - "CreateEnvironmentInput$environmentAccountConnectionId": "The ID of the environment account connection that you provide if you're provisioning your environment infrastructure resources to an environment account. You must include either the environmentAccountConnectionId
or protonServiceRoleArn
parameter and value. For more information, see Environment account connections in the AWS Proton Administration guide.
The ID of the environment account connection that you provide if you're provisioning your environment infrastructure resources to an environment account. You must include either the environmentAccountConnectionId
or protonServiceRoleArn
parameter and value. For more information, see Environment account connections in the AWS Proton Administrator guide.
The ID of the environment account connection to delete.
", "Environment$environmentAccountConnectionId": "The ID of the environment account connection that's used to provision infrastructure resources in an environment account.
", "EnvironmentAccountConnection$id": "The ID of the environment account connection.
", @@ -421,7 +421,7 @@ "GetEnvironmentAccountConnectionInput$id": "The ID of the environment account connection.
", "RejectEnvironmentAccountConnectionInput$id": "The ID of the environment account connection to reject.
", "UpdateEnvironmentAccountConnectionInput$id": "The ID of the environment account connection to update.
", - "UpdateEnvironmentInput$environmentAccountConnectionId": "The ID of the environment account connection.
You can only update to a new environment account connection if it was created in the same environment account that the current environment account connection was created in and associated with the current environment.
" + "UpdateEnvironmentInput$environmentAccountConnectionId": "The ID of the environment account connection.
You can only update to a new environment account connection if it was created in the same environment account that the current environment account connection was created in and is associated with the current environment.
" } }, "EnvironmentAccountConnectionRequesterAccountType": { @@ -795,7 +795,7 @@ "base": null, "refs": { "CreateEnvironmentTemplateInput$provisioning": "When included, indicates that the environment template is for customer provisioned and managed infrastructure.
", - "CreateServiceTemplateInput$pipelineProvisioning": "AWS Proton includes a service pipeline for your service by default. When included, this parameter indicates that an AWS Proton service pipeline won't be included for your service. Once specified, this parameter can't be changed. For more information, see Service template bundles in the AWS Proton Administration Guide.
", + "CreateServiceTemplateInput$pipelineProvisioning": "AWS Proton includes a service pipeline for your service by default. When included, this parameter indicates that an AWS Proton service pipeline won't be included for your service. Once specified, this parameter can't be changed. For more information, see Service template bundles in the AWS Proton Administrator Guide.
", "Environment$provisioning": "When included, indicates that the environment template is for customer provisioned and managed infrastructure.
", "EnvironmentSummary$provisioning": "When included, indicates that the environment template is for customer provisioned and managed infrastructure.
", "EnvironmentTemplate$provisioning": "When included, indicates that the environment template is for customer provisioned and managed infrastructure.
", @@ -832,7 +832,7 @@ "CompatibleEnvironmentTemplateInput$templateName": "The compatible environment template name.
", "CreateEnvironmentAccountConnectionInput$environmentName": "The name of the AWS Proton environment that's created in the associated management account.
", "CreateEnvironmentInput$name": "The name of the environment.
", - "CreateEnvironmentInput$templateName": "The name of the environment template. For more information, see Environment Templates in the AWS Proton Administration Guide.
", + "CreateEnvironmentInput$templateName": "The name of the environment template. For more information, see Environment Templates in the AWS Proton Administrator Guide.
", "CreateEnvironmentTemplateInput$name": "The name of the environment template.
", "CreateEnvironmentTemplateVersionInput$templateName": "The name of the environment template.
", "CreateServiceInput$name": "The service name.
", @@ -846,7 +846,7 @@ "DeleteServiceTemplateInput$name": "The name of the service template to delete.
", "DeleteServiceTemplateVersionInput$templateName": "The name of the service template.
", "Environment$name": "The name of the environment.
", - "Environment$templateName": "The ARN of the environment template.
", + "Environment$templateName": "The Amazon Resource Name (ARN) of the environment template.
", "EnvironmentAccountConnection$environmentName": "The name of the environment that's associated with the environment account connection.
", "EnvironmentAccountConnectionSummary$environmentName": "The name of the environment that's associated with the environment account connection.
", "EnvironmentSummary$name": "The name of the environment.
", @@ -971,7 +971,7 @@ } }, "ServiceQuotaExceededException": { - "base": "A quota was exceeded. For more information, see AWS Proton Quotas in the AWS Proton Administration Guide.
", + "base": "A quota was exceeded. For more information, see AWS Proton Quotas in the AWS Proton Administrator Guide.
", "refs": { } }, @@ -1053,14 +1053,14 @@ "SpecContents": { "base": null, "refs": { - "CreateEnvironmentInput$spec": "A link to a YAML formatted spec file that provides inputs as defined in the environment template bundle schema file. For more information, see Environments in the AWS Proton Administration Guide.
", - "CreateServiceInput$spec": "A link to a spec file that provides inputs as defined in the service template bundle schema file. The spec file is in YAML format. Don’t include pipeline inputs in the spec if your service template doesn’t include a service pipeline. For more information, see Create a service in the AWS Proton Administration Guide and Create a service in the AWS Proton User Guide.
", + "CreateEnvironmentInput$spec": "A link to a YAML formatted spec file that provides inputs as defined in the environment template bundle schema file. For more information, see Environments in the AWS Proton Administrator Guide.
", + "CreateServiceInput$spec": "A link to a spec file that provides inputs as defined in the service template bundle schema file. The spec file is in YAML format. Don’t include pipeline inputs in the spec if your service template doesn’t include a service pipeline. For more information, see Create a service in the AWS Proton Administrator Guide and Create a service in the AWS Proton User Guide.
", "Environment$spec": "The environment spec.
", "Service$spec": "The formatted specification that defines the service.
", "ServiceInstance$spec": "The service spec that was used to create the service instance.
", "ServicePipeline$spec": "The service spec that was used to create the service pipeline.
", "UpdateEnvironmentInput$spec": "The formatted specification that defines the update.
", - "UpdateServiceInput$spec": "Lists the service instances to add and the existing service instances to remain. Omit the existing service instances to delete from the list. Don't include edits to the existing service instances or pipeline. For more information, see Edit a service in the AWS Proton Administration Guide or the AWS Proton User Guide.
", + "UpdateServiceInput$spec": "Lists the service instances to add and the existing service instances to remain. Omit the existing service instances to delete from the list. Don't include edits to the existing service instances or pipeline. For more information, see Edit a service in the AWS Proton Administrator Guide or the AWS Proton User Guide.
", "UpdateServiceInstanceInput$spec": "The formatted specification that defines the service instance update.
", "UpdateServicePipelineInput$spec": "The spec for the service pipeline to update.
" } @@ -1110,11 +1110,11 @@ "TagList": { "base": null, "refs": { - "CreateEnvironmentInput$tags": "Create tags for your environment. For more information, see AWS Proton resources and tagging in the AWS Proton Administration Guide or AWS Proton User Guide.
", - "CreateEnvironmentTemplateInput$tags": "Create tags for your environment template. For more information, see AWS Proton resources and tagging in the AWS Proton Administration Guide or AWS Proton User Guide.
", + "CreateEnvironmentInput$tags": "Create tags for your environment. For more information, see AWS Proton resources and tagging in the AWS Proton Administrator Guide or AWS Proton User Guide.
", + "CreateEnvironmentTemplateInput$tags": "Create tags for your environment template. For more information, see AWS Proton resources and tagging in the AWS Proton Administrator Guide or AWS Proton User Guide.
", "CreateEnvironmentTemplateVersionInput$tags": "Create tags for a new version of an environment template.
", - "CreateServiceInput$tags": "Create tags for your service. For more information, see AWS Proton resources and tagging in the AWS Proton Administration Guide or AWS Proton User Guide.
", - "CreateServiceTemplateInput$tags": "Create tags for your service template. For more information, see AWS Proton resources and tagging in the AWS Proton Administration Guide or AWS Proton User Guide.
", + "CreateServiceInput$tags": "Create tags for your service. For more information, see AWS Proton resources and tagging in the AWS Proton Administrator Guide or AWS Proton User Guide.
", + "CreateServiceTemplateInput$tags": "Create tags for your service template. For more information, see AWS Proton resources and tagging in the AWS Proton Administrator Guide or AWS Proton User Guide.
", "CreateServiceTemplateVersionInput$tags": "Create tags for a new version of a service template.
", "ListTagsForResourceOutput$tags": "An array of resource tags with detail data.
", "TagResourceInput$tags": "An array of resource tags to apply to a resource.
" @@ -1150,7 +1150,7 @@ "CompatibleEnvironmentTemplateInput$majorVersion": "The major version of the compatible environment template.
", "CreateEnvironmentInput$templateMajorVersion": "The ID of the major version of the environment template.
", "CreateEnvironmentInput$templateMinorVersion": "The ID of the minor version of the environment template.
", - "CreateEnvironmentTemplateVersionInput$majorVersion": "To create a new minor version of the environment template, include a majorVersion
.
To create a new major and minor version of the environment template, exclude majorVersion
.
To create a new minor version of the environment template, include a majorVersion
.
To create a new major and minor version of the environment template, exclude majorVersion
.
The ID of the major version of the service template that was used to create the service.
", "CreateServiceInput$templateMinorVersion": "The ID of the minor version of the service template that was used to create the service.
", "CreateServiceTemplateVersionInput$majorVersion": "To create a new minor version of the service template, include a majorVersion
.
To create a new major and minor version of the service template, exclude majorVersion
.
This parameter isn't currently supported.
", "DescribeDBClusterParametersMessage$Filters": "This parameter isn't currently supported.
", "DescribeDBClusterSnapshotsMessage$Filters": "A filter that specifies one or more DB cluster snapshots to describe.
Supported filters:
db-cluster-id
- Accepts DB cluster identifiers and DB cluster Amazon Resource Names (ARNs).
db-cluster-snapshot-id
- Accepts DB cluster snapshot identifiers.
snapshot-type
- Accepts types of DB cluster snapshots.
engine
- Accepts names of database engines.
A filter that specifies one or more DB clusters to describe.
Supported filters:
db-cluster-id
- Accepts DB cluster identifiers and DB cluster Amazon Resource Names (ARNs). The results list will only include information about the DB clusters identified by these ARNs.
A filter that specifies one or more DB clusters to describe.
Supported filters:
clone-group-id
- Accepts clone group identifiers. The results list will only include information about the DB clusters associated with these clone groups.
db-cluster-id
- Accepts DB cluster identifiers and DB cluster Amazon Resource Names (ARNs). The results list will only include information about the DB clusters identified by these ARNs.
domain
- Accepts Active Directory directory IDs. The results list will only include information about the DB clusters associated with these domains.
engine
- Accepts engine names. The results list will only include information about the DB clusters for these engines.
This parameter isn't currently supported.
", "DescribeDBInstanceAutomatedBackupsMessage$Filters": "A filter that specifies which resources to return based on status.
Supported filters are the following:
status
active
- automated backups for current instances
retained
- automated backups for deleted instances and after backup replication is stopped
creating
- automated backups that are waiting for the first automated snapshot to be available
db-instance-id
- Accepts DB instance identifiers and Amazon Resource Names (ARNs). The results list includes only information about the DB instance automated backups identified by these ARNs.
dbi-resource-id
- Accepts DB resource identifiers and Amazon Resource Names (ARNs). The results list includes only information about the DB instance resources identified by these ARNs.
Returns all resources by default. The status for each resource is specified in the response.
", "DescribeDBInstancesMessage$Filters": "A filter that specifies one or more DB instances to describe.
Supported filters:
db-cluster-id
- Accepts DB cluster identifiers and DB cluster Amazon Resource Names (ARNs). The results list will only include information about the DB instances associated with the DB clusters identified by these ARNs.
db-instance-id
- Accepts DB instance identifiers and DB instance Amazon Resource Names (ARNs). The results list will only include information about the DB instances identified by these ARNs.
dbi-resource-id
- Accepts DB instance resource identifiers. The results list will only include information about the DB instances identified by these DB instance resource identifiers.
domain
- Accepts Active Directory directory IDs. The results list will only include information about the DB instances associated with these domains.
engine
- Accepts engine names. The results list will only include information about the DB instances for these engines.
This parameter isn't currently supported.
", "DescribeEventsMessage$Filters": "This parameter isn't currently supported.
", "DescribeExportTasksMessage$Filters": "Filters specify one or more snapshot exports to describe. The filters are specified as name-value pairs that define what to include in the output. Filter names and values are case-sensitive.
Supported filters include the following:
export-task-identifier
- An identifier for the snapshot export task.
s3-bucket
- The Amazon S3 bucket the snapshot is exported to.
source-arn
- The Amazon Resource Name (ARN) of the snapshot exported to Amazon S3
status
- The status of the export task. Must be lowercase, for example, complete
.
A filter that specifies one or more global DB clusters to describe.
Supported filters:
db-cluster-id
- Accepts DB cluster identifiers and DB cluster Amazon Resource Names (ARNs). The results list will only include information about the DB clusters identified by these ARNs.
This parameter isn't currently supported.
", "DescribeInstallationMediaMessage$Filters": "A filter that specifies one or more installation media to describe. Supported filters include the following:
custom-availability-zone-id
- Accepts custom Availability Zone (AZ) identifiers. The results list includes information about only the custom AZs identified by these identifiers.
engine
- Accepts database engines. The results list includes information about only the database engines identified by these identifiers.
For more information about the valid engines for installation media, see ImportInstallationMedia.
This parameter isn't currently supported.
", "DescribeOptionGroupsMessage$Filters": "This parameter isn't currently supported.
", @@ -3965,7 +3965,7 @@ "DBSnapshot$SnapshotType": "Provides the type of the DB snapshot.
", "DBSnapshot$OptionGroupName": "Provides the option group name for the DB snapshot.
", "DBSnapshot$SourceRegion": "The Amazon Web Services Region that the DB snapshot was created in or copied from.
", - "DBSnapshot$SourceDBSnapshotIdentifier": "The DB snapshot Amazon Resource Name (ARN) that the DB snapshot was copied from. It only has value in case of cross-customer or cross-region copy.
", + "DBSnapshot$SourceDBSnapshotIdentifier": "The DB snapshot Amazon Resource Name (ARN) that the DB snapshot was copied from. It only has a value in the case of a cross-account or cross-Region copy.
", "DBSnapshot$StorageType": "Specifies the storage type associated with DB snapshot.
", "DBSnapshot$TdeCredentialArn": "The ARN from the key store with which to associate the instance for TDE encryption.
", "DBSnapshot$KmsKeyId": " If Encrypted
is true, the Amazon Web Services KMS key identifier for the encrypted DB snapshot.
The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the Amazon Web Services KMS customer master key (CMK).
", @@ -4633,8 +4633,9 @@ "DBProxyEndpoint$CreatedDate": "The date and time when the DB proxy endpoint was first created.
", "DBProxyTargetGroup$CreatedDate": "The date and time when the target group was first created.
", "DBProxyTargetGroup$UpdatedDate": "The date and time when the target group was last updated.
", - "DBSnapshot$SnapshotCreateTime": "Specifies when the snapshot was taken in Coordinated Universal Time (UTC).
", + "DBSnapshot$SnapshotCreateTime": "Specifies when the snapshot was taken in Coordinated Universal Time (UTC). Changes for the copy when the snapshot is copied.
", "DBSnapshot$InstanceCreateTime": "Specifies the time in Coordinated Universal Time (UTC) when the DB instance, from which the snapshot was taken, was created.
", + "DBSnapshot$OriginalSnapshotCreateTime": "Specifies the time of the CreateDBSnapshot operation in Coordinated Universal Time (UTC). Doesn't change when the snapshot is copied.
", "DescribeEventsMessage$StartTime": "The beginning of the time interval to retrieve events for, specified in ISO 8601 format. For more information about ISO 8601, go to the ISO8601 Wikipedia page.
Example: 2009-07-08T18:00Z
", "DescribeEventsMessage$EndTime": "The end of the time interval for which to retrieve events, specified in ISO 8601 format. For more information about ISO 8601, go to the ISO8601 Wikipedia page.
Example: 2009-07-08T18:00Z
", "Event$Date": "Specifies the date and time of the event.
", diff --git a/service/codebuild/api.go b/service/codebuild/api.go index cb2fd11f24..c8c16b8606 100644 --- a/service/codebuild/api.go +++ b/service/codebuild/api.go @@ -545,12 +545,11 @@ func (c *CodeBuild) CreateProjectRequest(input *CreateProjectInput) (req *reques // The input value that was provided is not valid. // // * ResourceAlreadyExistsException -// The specified Amazon Web Services resource cannot be created, because an -// Amazon Web Services resource with the same settings already exists. +// The specified AWS resource cannot be created, because an AWS resource with +// the same settings already exists. // // * AccountLimitExceededException -// An Amazon Web Services service limit was exceeded for the calling Amazon -// Web Services account. +// An AWS service limit was exceeded for the calling AWS account. // // See also, https://docs.aws.amazon.com/goto/WebAPI/codebuild-2016-10-06/CreateProject func (c *CodeBuild) CreateProject(input *CreateProjectInput) (*CreateProjectOutput, error) { @@ -632,12 +631,11 @@ func (c *CodeBuild) CreateReportGroupRequest(input *CreateReportGroupInput) (req // The input value that was provided is not valid. // // * ResourceAlreadyExistsException -// The specified Amazon Web Services resource cannot be created, because an -// Amazon Web Services resource with the same settings already exists. +// The specified AWS resource cannot be created, because an AWS resource with +// the same settings already exists. // // * AccountLimitExceededException -// An Amazon Web Services service limit was exceeded for the calling Amazon -// Web Services account. +// An AWS service limit was exceeded for the calling AWS account. // // See also, https://docs.aws.amazon.com/goto/WebAPI/codebuild-2016-10-06/CreateReportGroup func (c *CodeBuild) CreateReportGroup(input *CreateReportGroupInput) (*CreateReportGroupOutput, error) { @@ -705,17 +703,18 @@ func (c *CodeBuild) CreateWebhookRequest(input *CreateWebhookInput) (req *reques // CreateWebhook API operation for AWS CodeBuild. // -// For an existing CodeBuild build project that has its source code stored in -// a GitHub or Bitbucket repository, enables CodeBuild to start rebuilding the -// source code every time a code change is pushed to the repository. +// For an existing AWS CodeBuild build project that has its source code stored +// in a GitHub or Bitbucket repository, enables AWS CodeBuild to start rebuilding +// the source code every time a code change is pushed to the repository. // -// If you enable webhooks for an CodeBuild project, and the project is used -// as a build step in CodePipeline, then two identical builds are created for -// each commit. One build is triggered through webhooks, and one through CodePipeline. -// Because billing is on a per-build basis, you are billed for both builds. -// Therefore, if you are using CodePipeline, we recommend that you disable webhooks -// in CodeBuild. In the CodeBuild console, clear the Webhook box. For more information, -// see step 5 in Change a Build Project's Settings (https://docs.aws.amazon.com/codebuild/latest/userguide/change-project.html#change-project-console). +// If you enable webhooks for an AWS CodeBuild project, and the project is used +// as a build step in AWS CodePipeline, then two identical builds are created +// for each commit. One build is triggered through webhooks, and one through +// AWS CodePipeline. Because billing is on a per-build basis, you are billed +// for both builds. Therefore, if you are using AWS CodePipeline, we recommend +// that you disable webhooks in AWS CodeBuild. In the AWS CodeBuild console, +// clear the Webhook box. For more information, see step 5 in Change a Build +// Project's Settings (https://docs.aws.amazon.com/codebuild/latest/userguide/change-project.html#change-project-console). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -732,11 +731,11 @@ func (c *CodeBuild) CreateWebhookRequest(input *CreateWebhookInput) (req *reques // There was a problem with the underlying OAuth provider. // // * ResourceAlreadyExistsException -// The specified Amazon Web Services resource cannot be created, because an -// Amazon Web Services resource with the same settings already exists. +// The specified AWS resource cannot be created, because an AWS resource with +// the same settings already exists. // // * ResourceNotFoundException -// The specified Amazon Web Services resource cannot be found. +// The specified AWS resource cannot be found. // // See also, https://docs.aws.amazon.com/goto/WebAPI/codebuild-2016-10-06/CreateWebhook func (c *CodeBuild) CreateWebhook(input *CreateWebhookInput) (*CreateWebhookOutput, error) { @@ -1218,7 +1217,7 @@ func (c *CodeBuild) DeleteSourceCredentialsRequest(input *DeleteSourceCredential // The input value that was provided is not valid. // // * ResourceNotFoundException -// The specified Amazon Web Services resource cannot be found. +// The specified AWS resource cannot be found. // // See also, https://docs.aws.amazon.com/goto/WebAPI/codebuild-2016-10-06/DeleteSourceCredentials func (c *CodeBuild) DeleteSourceCredentials(input *DeleteSourceCredentialsInput) (*DeleteSourceCredentialsOutput, error) { @@ -1287,9 +1286,9 @@ func (c *CodeBuild) DeleteWebhookRequest(input *DeleteWebhookInput) (req *reques // DeleteWebhook API operation for AWS CodeBuild. // -// For an existing CodeBuild build project that has its source code stored in -// a GitHub or Bitbucket repository, stops CodeBuild from rebuilding the source -// code every time a code change is pushed to the repository. +// For an existing AWS CodeBuild build project that has its source code stored +// in a GitHub or Bitbucket repository, stops AWS CodeBuild from rebuilding +// the source code every time a code change is pushed to the repository. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1303,7 +1302,7 @@ func (c *CodeBuild) DeleteWebhookRequest(input *DeleteWebhookInput) (req *reques // The input value that was provided is not valid. // // * ResourceNotFoundException -// The specified Amazon Web Services resource cannot be found. +// The specified AWS resource cannot be found. // // * OAuthProviderException // There was a problem with the underlying OAuth provider. @@ -1531,7 +1530,7 @@ func (c *CodeBuild) DescribeTestCasesRequest(input *DescribeTestCasesInput) (req // The input value that was provided is not valid. // // * ResourceNotFoundException -// The specified Amazon Web Services resource cannot be found. +// The specified AWS resource cannot be found. // // See also, https://docs.aws.amazon.com/goto/WebAPI/codebuild-2016-10-06/DescribeTestCases func (c *CodeBuild) DescribeTestCases(input *DescribeTestCasesInput) (*DescribeTestCasesOutput, error) { @@ -1665,7 +1664,7 @@ func (c *CodeBuild) GetReportGroupTrendRequest(input *GetReportGroupTrendInput) // The input value that was provided is not valid. // // * ResourceNotFoundException -// The specified Amazon Web Services resource cannot be found. +// The specified AWS resource cannot be found. // // See also, https://docs.aws.amazon.com/goto/WebAPI/codebuild-2016-10-06/GetReportGroupTrend func (c *CodeBuild) GetReportGroupTrend(input *GetReportGroupTrendInput) (*GetReportGroupTrendOutput, error) { @@ -1744,7 +1743,7 @@ func (c *CodeBuild) GetResourcePolicyRequest(input *GetResourcePolicyInput) (req // // Returned Error Types: // * ResourceNotFoundException -// The specified Amazon Web Services resource cannot be found. +// The specified AWS resource cannot be found. // // * InvalidInputException // The input value that was provided is not valid. @@ -1815,8 +1814,8 @@ func (c *CodeBuild) ImportSourceCredentialsRequest(input *ImportSourceCredential // ImportSourceCredentials API operation for AWS CodeBuild. // -// Imports the source repository credentials for an CodeBuild project that has -// its source code stored in a GitHub, GitHub Enterprise, or Bitbucket repository. +// Imports the source repository credentials for an AWS CodeBuild project that +// has its source code stored in a GitHub, GitHub Enterprise, or Bitbucket repository. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1830,12 +1829,11 @@ func (c *CodeBuild) ImportSourceCredentialsRequest(input *ImportSourceCredential // The input value that was provided is not valid. // // * AccountLimitExceededException -// An Amazon Web Services service limit was exceeded for the calling Amazon -// Web Services account. +// An AWS service limit was exceeded for the calling AWS account. // // * ResourceAlreadyExistsException -// The specified Amazon Web Services resource cannot be created, because an -// Amazon Web Services resource with the same settings already exists. +// The specified AWS resource cannot be created, because an AWS resource with +// the same settings already exists. // // See also, https://docs.aws.amazon.com/goto/WebAPI/codebuild-2016-10-06/ImportSourceCredentials func (c *CodeBuild) ImportSourceCredentials(input *ImportSourceCredentialsInput) (*ImportSourceCredentialsOutput, error) { @@ -1918,7 +1916,7 @@ func (c *CodeBuild) InvalidateProjectCacheRequest(input *InvalidateProjectCacheI // The input value that was provided is not valid. // // * ResourceNotFoundException -// The specified Amazon Web Services resource cannot be found. +// The specified AWS resource cannot be found. // // See also, https://docs.aws.amazon.com/goto/WebAPI/codebuild-2016-10-06/InvalidateProjectCache func (c *CodeBuild) InvalidateProjectCache(input *InvalidateProjectCacheInput) (*InvalidateProjectCacheOutput, error) { @@ -2143,7 +2141,7 @@ func (c *CodeBuild) ListBuildBatchesForProjectRequest(input *ListBuildBatchesFor // The input value that was provided is not valid. // // * ResourceNotFoundException -// The specified Amazon Web Services resource cannot be found. +// The specified AWS resource cannot be found. // // See also, https://docs.aws.amazon.com/goto/WebAPI/codebuild-2016-10-06/ListBuildBatchesForProject func (c *CodeBuild) ListBuildBatchesForProject(input *ListBuildBatchesForProjectInput) (*ListBuildBatchesForProjectOutput, error) { @@ -2421,7 +2419,7 @@ func (c *CodeBuild) ListBuildsForProjectRequest(input *ListBuildsForProjectInput // The input value that was provided is not valid. // // * ResourceNotFoundException -// The specified Amazon Web Services resource cannot be found. +// The specified AWS resource cannot be found. // // See also, https://docs.aws.amazon.com/goto/WebAPI/codebuild-2016-10-06/ListBuildsForProject func (c *CodeBuild) ListBuildsForProject(input *ListBuildsForProjectInput) (*ListBuildsForProjectOutput, error) { @@ -2541,7 +2539,7 @@ func (c *CodeBuild) ListCuratedEnvironmentImagesRequest(input *ListCuratedEnviro // ListCuratedEnvironmentImages API operation for AWS CodeBuild. // -// Gets information about Docker images that are managed by CodeBuild. +// Gets information about Docker images that are managed by AWS CodeBuild. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2759,8 +2757,7 @@ func (c *CodeBuild) ListReportGroupsRequest(input *ListReportGroupsInput) (req * // ListReportGroups API operation for AWS CodeBuild. // -// Gets a list ARNs for the report groups in the current Amazon Web Services -// account. +// Gets a list ARNs for the report groups in the current AWS account. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2897,8 +2894,7 @@ func (c *CodeBuild) ListReportsRequest(input *ListReportsInput) (req *request.Re // ListReports API operation for AWS CodeBuild. // -// Returns a list of ARNs for the reports in the current Amazon Web Services -// account. +// Returns a list of ARNs for the reports in the current AWS account. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -3049,7 +3045,7 @@ func (c *CodeBuild) ListReportsForReportGroupRequest(input *ListReportsForReport // The input value that was provided is not valid. // // * ResourceNotFoundException -// The specified Amazon Web Services resource cannot be found. +// The specified AWS resource cannot be found. // // See also, https://docs.aws.amazon.com/goto/WebAPI/codebuild-2016-10-06/ListReportsForReportGroup func (c *CodeBuild) ListReportsForReportGroup(input *ListReportsForReportGroupInput) (*ListReportsForReportGroupOutput, error) { @@ -3175,8 +3171,7 @@ func (c *CodeBuild) ListSharedProjectsRequest(input *ListSharedProjectsInput) (r // ListSharedProjects API operation for AWS CodeBuild. // -// Gets a list of projects that are shared with other Amazon Web Services accounts -// or users. +// Gets a list of projects that are shared with other AWS accounts or users. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -3313,8 +3308,7 @@ func (c *CodeBuild) ListSharedReportGroupsRequest(input *ListSharedReportGroupsI // ListSharedReportGroups API operation for AWS CodeBuild. // -// Gets a list of report groups that are shared with other Amazon Web Services -// accounts or users. +// Gets a list of report groups that are shared with other AWS accounts or users. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -3535,7 +3529,7 @@ func (c *CodeBuild) PutResourcePolicyRequest(input *PutResourcePolicyInput) (req // // Returned Error Types: // * ResourceNotFoundException -// The specified Amazon Web Services resource cannot be found. +// The specified AWS resource cannot be found. // // * InvalidInputException // The input value that was provided is not valid. @@ -3620,11 +3614,10 @@ func (c *CodeBuild) RetryBuildRequest(input *RetryBuildInput) (req *request.Requ // The input value that was provided is not valid. // // * ResourceNotFoundException -// The specified Amazon Web Services resource cannot be found. +// The specified AWS resource cannot be found. // // * AccountLimitExceededException -// An Amazon Web Services service limit was exceeded for the calling Amazon -// Web Services account. +// An AWS service limit was exceeded for the calling AWS account. // // See also, https://docs.aws.amazon.com/goto/WebAPI/codebuild-2016-10-06/RetryBuild func (c *CodeBuild) RetryBuild(input *RetryBuildInput) (*RetryBuildOutput, error) { @@ -3707,7 +3700,7 @@ func (c *CodeBuild) RetryBuildBatchRequest(input *RetryBuildBatchInput) (req *re // The input value that was provided is not valid. // // * ResourceNotFoundException -// The specified Amazon Web Services resource cannot be found. +// The specified AWS resource cannot be found. // // See also, https://docs.aws.amazon.com/goto/WebAPI/codebuild-2016-10-06/RetryBuildBatch func (c *CodeBuild) RetryBuildBatch(input *RetryBuildBatchInput) (*RetryBuildBatchOutput, error) { @@ -3789,11 +3782,10 @@ func (c *CodeBuild) StartBuildRequest(input *StartBuildInput) (req *request.Requ // The input value that was provided is not valid. // // * ResourceNotFoundException -// The specified Amazon Web Services resource cannot be found. +// The specified AWS resource cannot be found. // // * AccountLimitExceededException -// An Amazon Web Services service limit was exceeded for the calling Amazon -// Web Services account. +// An AWS service limit was exceeded for the calling AWS account. // // See also, https://docs.aws.amazon.com/goto/WebAPI/codebuild-2016-10-06/StartBuild func (c *CodeBuild) StartBuild(input *StartBuildInput) (*StartBuildOutput, error) { @@ -3875,7 +3867,7 @@ func (c *CodeBuild) StartBuildBatchRequest(input *StartBuildBatchInput) (req *re // The input value that was provided is not valid. // // * ResourceNotFoundException -// The specified Amazon Web Services resource cannot be found. +// The specified AWS resource cannot be found. // // See also, https://docs.aws.amazon.com/goto/WebAPI/codebuild-2016-10-06/StartBuildBatch func (c *CodeBuild) StartBuildBatch(input *StartBuildBatchInput) (*StartBuildBatchOutput, error) { @@ -3957,7 +3949,7 @@ func (c *CodeBuild) StopBuildRequest(input *StopBuildInput) (req *request.Reques // The input value that was provided is not valid. // // * ResourceNotFoundException -// The specified Amazon Web Services resource cannot be found. +// The specified AWS resource cannot be found. // // See also, https://docs.aws.amazon.com/goto/WebAPI/codebuild-2016-10-06/StopBuild func (c *CodeBuild) StopBuild(input *StopBuildInput) (*StopBuildOutput, error) { @@ -4039,7 +4031,7 @@ func (c *CodeBuild) StopBuildBatchRequest(input *StopBuildBatchInput) (req *requ // The input value that was provided is not valid. // // * ResourceNotFoundException -// The specified Amazon Web Services resource cannot be found. +// The specified AWS resource cannot be found. // // See also, https://docs.aws.amazon.com/goto/WebAPI/codebuild-2016-10-06/StopBuildBatch func (c *CodeBuild) StopBuildBatch(input *StopBuildBatchInput) (*StopBuildBatchOutput, error) { @@ -4121,7 +4113,7 @@ func (c *CodeBuild) UpdateProjectRequest(input *UpdateProjectInput) (req *reques // The input value that was provided is not valid. // // * ResourceNotFoundException -// The specified Amazon Web Services resource cannot be found. +// The specified AWS resource cannot be found. // // See also, https://docs.aws.amazon.com/goto/WebAPI/codebuild-2016-10-06/UpdateProject func (c *CodeBuild) UpdateProject(input *UpdateProjectInput) (*UpdateProjectOutput, error) { @@ -4203,7 +4195,7 @@ func (c *CodeBuild) UpdateReportGroupRequest(input *UpdateReportGroupInput) (req // The input value that was provided is not valid. // // * ResourceNotFoundException -// The specified Amazon Web Services resource cannot be found. +// The specified AWS resource cannot be found. // // See also, https://docs.aws.amazon.com/goto/WebAPI/codebuild-2016-10-06/UpdateReportGroup func (c *CodeBuild) UpdateReportGroup(input *UpdateReportGroupInput) (*UpdateReportGroupOutput, error) { @@ -4271,7 +4263,7 @@ func (c *CodeBuild) UpdateWebhookRequest(input *UpdateWebhookInput) (req *reques // UpdateWebhook API operation for AWS CodeBuild. // -// Updates the webhook associated with an CodeBuild build project. +// Updates the webhook associated with an AWS CodeBuild build project. // // If you use Bitbucket for your repository, rotateSecret is ignored. // @@ -4287,7 +4279,7 @@ func (c *CodeBuild) UpdateWebhookRequest(input *UpdateWebhookInput) (req *reques // The input value that was provided is not valid. // // * ResourceNotFoundException -// The specified Amazon Web Services resource cannot be found. +// The specified AWS resource cannot be found. // // * OAuthProviderException // There was a problem with the underlying OAuth provider. @@ -4314,8 +4306,7 @@ func (c *CodeBuild) UpdateWebhookWithContext(ctx aws.Context, input *UpdateWebho return out, req.Send() } -// An Amazon Web Services service limit was exceeded for the calling Amazon -// Web Services account. +// An AWS service limit was exceeded for the calling AWS account. type AccountLimitExceededException struct { _ struct{} `type:"structure"` RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` @@ -4591,8 +4582,8 @@ type BatchGetProjectsInput struct { _ struct{} `type:"structure"` // The names or ARNs of the build projects. To get information about a project - // shared with your Amazon Web Services account, its ARN must be specified. - // You cannot specify a shared project using its name. + // shared with your AWS account, its ARN must be specified. You cannot specify + // a shared project using its name. // // Names is a required field Names []*string `locationName:"names" min:"1" type:"list" required:"true"` @@ -4816,7 +4807,7 @@ type BatchRestrictions struct { // An array of strings that specify the compute types that are allowed for the // batch build. See Build environment compute types (https://docs.aws.amazon.com/codebuild/latest/userguide/build-env-ref-compute-types.html) - // in the CodeBuild User Guide for these values. + // in the AWS CodeBuild User Guide for these values. ComputeTypesAllowed []*string `locationName:"computeTypesAllowed" type:"list"` // Specifies the maximum number of builds allowed. @@ -4890,8 +4881,8 @@ type Build struct { // Contains information about the debug session for this build. DebugSession *DebugSession `locationName:"debugSession" type:"structure"` - // The Key Management Service customer master key (CMK) to be used for encrypting - // the build output artifacts. + // The AWS Key Management Service (AWS KMS) customer master key (CMK) to be + // used for encrypting the build output artifacts. // // You can use a cross-account KMS key to encrypt the build output artifacts // if your service role has permission to that key. @@ -4908,11 +4899,11 @@ type Build struct { // A list of exported environment variables for this build. // - // Exported environment variables are used in conjunction with CodePipeline + // Exported environment variables are used in conjunction with AWS CodePipeline // to export environment variables from the current build stage to subsequent // stages in the pipeline. For more information, see Working with variables // (https://docs.aws.amazon.com/codepipeline/latest/userguide/actions-variables.html) - // in the CodePipeline User Guide. + // in the AWS CodePipeline User Guide. ExportedEnvironmentVariables []*ExportedEnvironmentVariable `locationName:"exportedEnvironmentVariables" type:"list"` // An array of ProjectFileSystemLocation objects for a CodeBuild build project. @@ -4925,16 +4916,17 @@ type Build struct { // The entity that started the build. Valid values include: // - // * If CodePipeline started the build, the pipeline's name (for example, + // * If AWS CodePipeline started the build, the pipeline's name (for example, // codepipeline/my-demo-pipeline). // - // * If an Identity and Access Management user started the build, the user's - // name (for example, MyUserName). + // * If an AWS Identity and Access Management (IAM) user started the build, + // the user's name (for example, MyUserName). // - // * If the Jenkins plugin for CodeBuild started the build, the string CodeBuild-Jenkins-Plugin. + // * If the Jenkins plugin for AWS CodeBuild started the build, the string + // CodeBuild-Jenkins-Plugin. Initiator *string `locationName:"initiator" type:"string"` - // Information about the build's logs in CloudWatch Logs. + // Information about the build's logs in Amazon CloudWatch Logs. Logs *LogsLocation `locationName:"logs" type:"structure"` // Describes a network interface. @@ -4944,7 +4936,7 @@ type Build struct { // about any current build phase that is not yet complete. Phases []*BuildPhase `locationName:"phases" type:"list"` - // The name of the CodeBuild project. + // The name of the AWS CodeBuild project. ProjectName *string `locationName:"projectName" min:"1" type:"string"` // The number of minutes a build is allowed to be queued before it times out. @@ -4955,10 +4947,10 @@ type Build struct { // An identifier for the version of this build's source code. // - // * For CodeCommit, GitHub, GitHub Enterprise, and BitBucket, the commit + // * For AWS CodeCommit, GitHub, GitHub Enterprise, and BitBucket, the commit // ID. // - // * For CodePipeline, the source revision provided by CodePipeline. + // * For AWS CodePipeline, the source revision provided by AWS CodePipeline. // // * For Amazon S3, this does not apply. ResolvedSourceVersion *string `locationName:"resolvedSourceVersion" min:"1" type:"string"` @@ -4969,7 +4961,7 @@ type Build struct { // An array of ProjectSourceVersion objects. Each ProjectSourceVersion must // be one of: // - // * For CodeCommit: the commit ID, branch, or Git tag to use. + // * For AWS CodeCommit: the commit ID, branch, or Git tag to use. // // * For GitHub: the commit ID, pull request ID, branch name, or tag name // that corresponds to the version of the source code you want to build. @@ -5001,17 +4993,17 @@ type Build struct { // (at the build level) takes precedence. // // For more information, see Source Version Sample with CodeBuild (https://docs.aws.amazon.com/codebuild/latest/userguide/sample-source-version.html) - // in the CodeBuild User Guide. + // in the AWS CodeBuild User Guide. SourceVersion *string `locationName:"sourceVersion" min:"1" type:"string"` // When the build process started, expressed in Unix time format. StartTime *time.Time `locationName:"startTime" type:"timestamp"` - // How long, in minutes, for CodeBuild to wait before timing out this build + // How long, in minutes, for AWS CodeBuild to wait before timing out this build // if it does not get marked as completed. TimeoutInMinutes *int64 `locationName:"timeoutInMinutes" type:"integer"` - // If your CodeBuild project accesses resources in an Amazon VPC, you provide + // If your AWS CodeBuild project accesses resources in an Amazon VPC, you provide // this parameter that identifies the VPC ID and the list of security group // IDs and subnet IDs. The security groups and subnets must belong to the same // VPC. You must provide at least one security group and one subnet ID. @@ -5227,6 +5219,38 @@ type BuildArtifacts struct { // An identifier for this artifact definition. ArtifactIdentifier *string `locationName:"artifactIdentifier" type:"string"` + // Specifies the access for objects that are uploaded to an Amazon S3 bucket + // that is owned by another account. + // + // By default, only the account that uploads the objects to the bucket has access + // to these objects. This property allows you to give the bucket owner access + // to these objects. + // + // NONE + // + // The bucket owner does not have access to the objects. This is the default. + // + // READ_ONLY + // + // The bucket owner has read only access to the objects. The uploading account + // retains ownership of the objects. + // + // FULL + // + // The bucket owner has full access to the objects. Object ownership is determined + // by the following criteria: + // + // * If the bucket is configured with the Bucket owner preferred setting, + // the bucket owner owns the objects. The uploading account will have object + // access as specified by the bucket's policy. + // + // * Otherwise, the uploading account retains ownership of the objects. + // + // For more information about Amazon S3 object ownership, see Controlling ownership + // of uploaded objects using S3 Object Ownership (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) + // in the Amazon Simple Storage Service User Guide. + BucketOwnerAccess *string `locationName:"bucketOwnerAccess" type:"string" enum:"BucketOwnerAccess"` + // Information that tells you if encryption for build artifacts is disabled. EncryptionDisabled *bool `locationName:"encryptionDisabled" type:"boolean"` @@ -5274,6 +5298,12 @@ func (s *BuildArtifacts) SetArtifactIdentifier(v string) *BuildArtifacts { return s } +// SetBucketOwnerAccess sets the BucketOwnerAccess field's value. +func (s *BuildArtifacts) SetBucketOwnerAccess(v string) *BuildArtifacts { + s.BucketOwnerAccess = &v + return s +} + // SetEncryptionDisabled sets the EncryptionDisabled field's value. func (s *BuildArtifacts) SetEncryptionDisabled(v bool) *BuildArtifacts { s.EncryptionDisabled = &v @@ -5348,8 +5378,8 @@ type BuildBatch struct { // Batch session debugging is not supported for matrix batch builds. DebugSessionEnabled *bool `locationName:"debugSessionEnabled" type:"boolean"` - // The Key Management Service customer master key (CMK) to be used for encrypting - // the batch build output artifacts. + // The AWS Key Management Service (AWS KMS) customer master key (CMK) to be + // used for encrypting the batch build output artifacts. // // You can use a cross-account KMS key to encrypt the build output artifacts // if your service role has permission to that key. @@ -5374,16 +5404,17 @@ type BuildBatch struct { // The entity that started the batch build. Valid values include: // - // * If CodePipeline started the build, the pipeline's name (for example, + // * If AWS CodePipeline started the build, the pipeline's name (for example, // codepipeline/my-demo-pipeline). // - // * If an Identity and Access Management user started the build, the user's - // name. + // * If an AWS Identity and Access Management (IAM) user started the build, + // the user's name. // - // * If the Jenkins plugin for CodeBuild started the build, the string CodeBuild-Jenkins-Plugin. + // * If the Jenkins plugin for AWS CodeBuild started the build, the string + // CodeBuild-Jenkins-Plugin. Initiator *string `locationName:"initiator" type:"string"` - // Information about logs for a build project. These can be logs in CloudWatch + // Information about logs for a build project. These can be logs in Amazon CloudWatch // Logs, built in a specified S3 bucket, or both. LogConfig *LogsConfig `locationName:"logConfig" type:"structure"` @@ -5399,10 +5430,10 @@ type BuildBatch struct { // The identifier of the resolved version of this batch build's source code. // - // * For CodeCommit, GitHub, GitHub Enterprise, and BitBucket, the commit + // * For AWS CodeCommit, GitHub, GitHub Enterprise, and BitBucket, the commit // ID. // - // * For CodePipeline, the source revision provided by CodePipeline. + // * For AWS CodePipeline, the source revision provided by AWS CodePipeline. // // * For Amazon S3, this does not apply. ResolvedSourceVersion *string `locationName:"resolvedSourceVersion" min:"1" type:"string"` @@ -5414,7 +5445,7 @@ type BuildBatch struct { // An array of ProjectSourceVersion objects. Each ProjectSourceVersion must // be one of: // - // * For CodeCommit: the commit ID, branch, or Git tag to use. + // * For AWS CodeCommit: the commit ID, branch, or Git tag to use. // // * For GitHub: the commit ID, pull request ID, branch name, or tag name // that corresponds to the version of the source code you want to build. @@ -5447,7 +5478,7 @@ type BuildBatch struct { // The date and time that the batch build started. StartTime *time.Time `locationName:"startTime" type:"timestamp"` - // Information about the VPC configuration that CodeBuild accesses. + // Information about the VPC configuration that AWS CodeBuild accesses. VpcConfig *VpcConfig `locationName:"vpcConfig" type:"structure"` } @@ -6013,7 +6044,7 @@ func (s *BuildPhase) SetStartTime(v time.Time) *BuildPhase { return s } -// Contains information that defines how the CodeBuild build project reports +// Contains information that defines how the AWS CodeBuild build project reports // the build status to the source provider. type BuildStatusConfig struct { _ struct{} `type:"structure"` @@ -6159,25 +6190,25 @@ func (s *BuildSummary) SetSecondaryArtifacts(v []*ResolvedArtifact) *BuildSummar return s } -// Information about CloudWatch Logs for a build project. +// Information about Amazon CloudWatch Logs for a build project. type CloudWatchLogsConfig struct { _ struct{} `type:"structure"` - // The group name of the logs in CloudWatch Logs. For more information, see - // Working with Log Groups and Log Streams (https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/Working-with-log-groups-and-streams.html). + // The group name of the logs in Amazon CloudWatch Logs. For more information, + // see Working with Log Groups and Log Streams (https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/Working-with-log-groups-and-streams.html). GroupName *string `locationName:"groupName" type:"string"` - // The current status of the logs in CloudWatch Logs for a build project. Valid - // values are: + // The current status of the logs in Amazon CloudWatch Logs for a build project. + // Valid values are: // - // * ENABLED: CloudWatch Logs are enabled for this build project. + // * ENABLED: Amazon CloudWatch Logs are enabled for this build project. // - // * DISABLED: CloudWatch Logs are not enabled for this build project. + // * DISABLED: Amazon CloudWatch Logs are not enabled for this build project. // // Status is a required field Status *string `locationName:"status" type:"string" required:"true" enum:"LogsConfigStatusType"` - // The prefix of the stream name of the CloudWatch Logs. For more information, + // The prefix of the stream name of the Amazon CloudWatch Logs. For more information, // see Working with Log Groups and Log Streams (https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/Working-with-log-groups-and-streams.html). StreamName *string `locationName:"streamName" type:"string"` } @@ -6439,8 +6470,8 @@ type CreateProjectInput struct { // A description that makes the build project easy to identify. Description *string `locationName:"description" type:"string"` - // The Key Management Service customer master key (CMK) to be used for encrypting - // the build output artifacts. + // The AWS Key Management Service (AWS KMS) customer master key (CMK) to be + // used for encrypting the build output artifacts. // // You can use a cross-account KMS key to encrypt the build output artifacts // if your service role has permission to that key. @@ -6459,8 +6490,8 @@ type CreateProjectInput struct { // mountPoint, and type of a file system created using Amazon Elastic File System. FileSystemLocations []*ProjectFileSystemLocation `locationName:"fileSystemLocations" type:"list"` - // Information about logs for the build project. These can be logs in CloudWatch - // Logs, logs uploaded to a specified S3 bucket, or both. + // Information about logs for the build project. These can be logs in Amazon + // CloudWatch Logs, logs uploaded to a specified S3 bucket, or both. LogsConfig *LogsConfig `locationName:"logsConfig" type:"structure"` // The name of the build project. @@ -6482,9 +6513,9 @@ type CreateProjectInput struct { // An array of ProjectSource objects. SecondarySources []*ProjectSource `locationName:"secondarySources" type:"list"` - // The ARN of the Identity and Access Management role that enables CodeBuild - // to interact with dependent Amazon Web Services services on behalf of the - // Amazon Web Services account. + // The ARN of the AWS Identity and Access Management (IAM) role that enables + // AWS CodeBuild to interact with dependent AWS services on behalf of the AWS + // account. // // ServiceRole is a required field ServiceRole *string `locationName:"serviceRole" min:"1" type:"string" required:"true"` @@ -6497,7 +6528,7 @@ type CreateProjectInput struct { // A version of the build input to be built for this project. If not specified, // the latest version is used. If specified, it must be one of: // - // * For CodeCommit: the commit ID, branch, or Git tag to use. + // * For AWS CodeCommit: the commit ID, branch, or Git tag to use. // // * For GitHub: the commit ID, pull request ID, branch name, or tag name // that corresponds to the version of the source code you want to build. @@ -6518,21 +6549,21 @@ type CreateProjectInput struct { // precedence over this sourceVersion (at the project level). // // For more information, see Source Version Sample with CodeBuild (https://docs.aws.amazon.com/codebuild/latest/userguide/sample-source-version.html) - // in the CodeBuild User Guide. + // in the AWS CodeBuild User Guide. SourceVersion *string `locationName:"sourceVersion" type:"string"` // A list of tag key and value pairs associated with this build project. // - // These tags are available for use by Amazon Web Services services that support - // CodeBuild build project tags. + // These tags are available for use by AWS services that support AWS CodeBuild + // build project tags. Tags []*Tag `locationName:"tags" type:"list"` - // How long, in minutes, from 5 to 480 (8 hours), for CodeBuild to wait before - // it times out any build that has not been marked as completed. The default - // is 60 minutes. + // How long, in minutes, from 5 to 480 (8 hours), for AWS CodeBuild to wait + // before it times out any build that has not been marked as completed. The + // default is 60 minutes. TimeoutInMinutes *int64 `locationName:"timeoutInMinutes" min:"5" type:"integer"` - // VpcConfig enables CodeBuild to access resources in an Amazon VPC. + // VpcConfig enables AWS CodeBuild to access resources in an Amazon VPC. VpcConfig *VpcConfig `locationName:"vpcConfig" type:"structure"` } @@ -6826,8 +6857,8 @@ type CreateReportGroupInput struct { // A list of tag key and value pairs associated with this report group. // - // These tags are available for use by Amazon Web Services services that support - // CodeBuild report group tags. + // These tags are available for use by AWS services that support AWS CodeBuild + // report group tags. Tags []*Tag `locationName:"tags" type:"list"` // The type of report group. @@ -6952,7 +6983,7 @@ type CreateWebhookInput struct { // array must pass. For a filter group to pass, each of its filters must pass. FilterGroups [][]*WebhookFilter `locationName:"filterGroups" type:"list"` - // The name of the CodeBuild project. + // The name of the AWS CodeBuild project. // // ProjectName is a required field ProjectName *string `locationName:"projectName" min:"2" type:"string" required:"true"` @@ -7012,7 +7043,7 @@ type CreateWebhookOutput struct { _ struct{} `type:"structure"` // Information about a webhook that connects repository events to a build project - // in CodeBuild. + // in AWS CodeBuild. Webhook *Webhook `locationName:"webhook" type:"structure"` } @@ -7455,7 +7486,7 @@ func (s *DeleteSourceCredentialsOutput) SetArn(v string) *DeleteSourceCredential type DeleteWebhookInput struct { _ struct{} `type:"structure"` - // The name of the CodeBuild project. + // The name of the AWS CodeBuild project. // // ProjectName is a required field ProjectName *string `locationName:"projectName" min:"2" type:"string" required:"true"` @@ -7762,7 +7793,7 @@ func (s *DescribeTestCasesOutput) SetTestCases(v []*TestCase) *DescribeTestCases return s } -// Information about a Docker image that is managed by CodeBuild. +// Information about a Docker image that is managed by AWS CodeBuild. type EnvironmentImage struct { _ struct{} `type:"structure"` @@ -7805,7 +7836,7 @@ func (s *EnvironmentImage) SetVersions(v []*string) *EnvironmentImage { } // A set of Docker images that are related by programming language and are managed -// by CodeBuild. +// by AWS CodeBuild. type EnvironmentLanguage struct { _ struct{} `type:"structure"` @@ -7838,7 +7869,8 @@ func (s *EnvironmentLanguage) SetLanguage(v string) *EnvironmentLanguage { return s } -// A set of Docker images that are related by platform and are managed by CodeBuild. +// A set of Docker images that are related by platform and are managed by AWS +// CodeBuild. type EnvironmentPlatform struct { _ struct{} `type:"structure"` @@ -7882,28 +7914,27 @@ type EnvironmentVariable struct { // The type of environment variable. Valid values include: // - // * PARAMETER_STORE: An environment variable stored in Systems Manager Parameter - // Store. To learn how to specify a parameter store environment variable, - // see env/parameter-store (https://docs.aws.amazon.com/codebuild/latest/userguide/build-spec-ref.html#build-spec.env.parameter-store) - // in the CodeBuild User Guide. + // * PARAMETER_STORE: An environment variable stored in Amazon EC2 Systems + // Manager Parameter Store. To learn how to specify a parameter store environment + // variable, see env/parameter-store (https://docs.aws.amazon.com/codebuild/latest/userguide/build-spec-ref.html#build-spec.env.parameter-store) + // in the AWS CodeBuild User Guide. // // * PLAINTEXT: An environment variable in plain text format. This is the // default value. // - // * SECRETS_MANAGER: An environment variable stored in Secrets Manager. + // * SECRETS_MANAGER: An environment variable stored in AWS Secrets Manager. // To learn how to specify a secrets manager environment variable, see env/secrets-manager // (https://docs.aws.amazon.com/codebuild/latest/userguide/build-spec-ref.html#build-spec.env.secrets-manager) - // in the CodeBuild User Guide. + // in the AWS CodeBuild User Guide. Type *string `locationName:"type" type:"string" enum:"EnvironmentVariableType"` // The value of the environment variable. // // We strongly discourage the use of PLAINTEXT environment variables to store - // sensitive values, especially Amazon Web Services secret key IDs and secret - // access keys. PLAINTEXT environment variables can be displayed in plain text - // using the CodeBuild console and the AWS Command Line Interface (AWS CLI). - // For sensitive values, we recommend you use an environment variable of type - // PARAMETER_STORE or SECRETS_MANAGER. + // sensitive values, especially AWS secret key IDs and secret access keys. PLAINTEXT + // environment variables can be displayed in plain text using the AWS CodeBuild + // console and the AWS Command Line Interface (AWS CLI). For sensitive values, + // we recommend you use an environment variable of type PARAMETER_STORE or SECRETS_MANAGER. // // Value is a required field Value *string `locationName:"value" type:"string" required:"true"` @@ -7958,11 +7989,11 @@ func (s *EnvironmentVariable) SetValue(v string) *EnvironmentVariable { // Contains information about an exported environment variable. // -// Exported environment variables are used in conjunction with CodePipeline +// Exported environment variables are used in conjunction with AWS CodePipeline // to export environment variables from the current build stage to subsequent // stages in the pipeline. For more information, see Working with variables // (https://docs.aws.amazon.com/codepipeline/latest/userguide/actions-variables.html) -// in the CodePipeline User Guide. +// in the AWS CodePipeline User Guide. // // During a build, the value of a variable is available starting with the install // phase. It can be updated between the start of the install phase and the end @@ -8206,12 +8237,12 @@ func (s *GetResourcePolicyOutput) SetPolicy(v string) *GetResourcePolicyOutput { return s } -// Information about the Git submodules configuration for an CodeBuild build +// Information about the Git submodules configuration for an AWS CodeBuild build // project. type GitSubmodulesConfig struct { _ struct{} `type:"structure"` - // Set to true to fetch Git submodules for your CodeBuild build project. + // Set to true to fetch Git submodules for your AWS CodeBuild build project. // // FetchSubmodules is a required field FetchSubmodules *bool `locationName:"fetchSubmodules" type:"boolean" required:"true"` @@ -8251,7 +8282,7 @@ type ImportSourceCredentialsInput struct { // The type of authentication used to connect to a GitHub, GitHub Enterprise, // or Bitbucket repository. An OAUTH connection is not supported by the API - // and must be created using the CodeBuild console. + // and must be created using the AWS CodeBuild console. // // AuthType is a required field AuthType *string `locationName:"authType" type:"string" required:"true" enum:"AuthType"` @@ -8424,7 +8455,7 @@ func (s *InvalidInputException) RequestID() string { type InvalidateProjectCacheInput struct { _ struct{} `type:"structure"` - // The name of the CodeBuild build project that the cache is reset for. + // The name of the AWS CodeBuild build project that the cache is reset for. // // ProjectName is a required field ProjectName *string `locationName:"projectName" min:"1" type:"string" required:"true"` @@ -8706,7 +8737,7 @@ type ListBuildsForProjectInput struct { // more next tokens are returned. NextToken *string `locationName:"nextToken" type:"string"` - // The name of the CodeBuild project. + // The name of the AWS CodeBuild project. // // ProjectName is a required field ProjectName *string `locationName:"projectName" min:"1" type:"string" required:"true"` @@ -8899,7 +8930,7 @@ type ListCuratedEnvironmentImagesOutput struct { _ struct{} `type:"structure"` // Information about supported platforms for Docker images that are managed - // by CodeBuild. + // by AWS CodeBuild. Platforms []*EnvironmentPlatform `locationName:"platforms" type:"list"` } @@ -9120,8 +9151,7 @@ type ListReportGroupsOutput struct { // returned. NextToken *string `locationName:"nextToken" type:"string"` - // The list of ARNs for the report groups in the current Amazon Web Services - // account. + // The list of ARNs for the report groups in the current AWS account. ReportGroups []*string `locationName:"reportGroups" min:"1" type:"list"` } @@ -9360,8 +9390,7 @@ type ListReportsOutput struct { // returned. NextToken *string `locationName:"nextToken" type:"string"` - // The list of returned ARNs for the reports in the current Amazon Web Services - // account. + // The list of returned ARNs for the reports in the current AWS account. Reports []*string `locationName:"reports" min:"1" type:"list"` } @@ -9404,8 +9433,8 @@ type ListSharedProjectsInput struct { // returned. NextToken *string `locationName:"nextToken" min:"1" type:"string"` - // The criterion to be used to list build projects shared with the current Amazon - // Web Services account or user. Valid values include: + // The criterion to be used to list build projects shared with the current AWS + // account or user. Valid values include: // // * ARN: List based on the ARN. // @@ -9483,8 +9512,8 @@ type ListSharedProjectsOutput struct { // returned. NextToken *string `locationName:"nextToken" type:"string"` - // The list of ARNs for the build projects shared with the current Amazon Web - // Services account or user. + // The list of ARNs for the build projects shared with the current AWS account + // or user. Projects []*string `locationName:"projects" min:"1" type:"list"` } @@ -9527,8 +9556,8 @@ type ListSharedReportGroupsInput struct { // returned. NextToken *string `locationName:"nextToken" type:"string"` - // The criterion to be used to list report groups shared with the current Amazon - // Web Services account or user. Valid values include: + // The criterion to be used to list report groups shared with the current AWS + // account or user. Valid values include: // // * ARN: List based on the ARN. // @@ -9603,8 +9632,8 @@ type ListSharedReportGroupsOutput struct { // returned. NextToken *string `locationName:"nextToken" type:"string"` - // The list of ARNs for the report groups shared with the current Amazon Web - // Services account or user. + // The list of ARNs for the report groups shared with the current AWS account + // or user. ReportGroups []*string `locationName:"reportGroups" min:"1" type:"list"` } @@ -9669,13 +9698,13 @@ func (s *ListSourceCredentialsOutput) SetSourceCredentialsInfos(v []*SourceCrede return s } -// Information about logs for a build project. These can be logs in CloudWatch +// Information about logs for a build project. These can be logs in Amazon CloudWatch // Logs, built in a specified S3 bucket, or both. type LogsConfig struct { _ struct{} `type:"structure"` - // Information about CloudWatch Logs for a build project. CloudWatch Logs are - // enabled by default. + // Information about Amazon CloudWatch Logs for a build project. Amazon CloudWatch + // Logs are enabled by default. CloudWatchLogs *CloudWatchLogsConfig `locationName:"cloudWatchLogs" type:"structure"` // Information about logs built to an S3 bucket for a build project. S3 logs @@ -9725,21 +9754,21 @@ func (s *LogsConfig) SetS3Logs(v *S3LogsConfig) *LogsConfig { return s } -// Information about build logs in CloudWatch Logs. +// Information about build logs in Amazon CloudWatch Logs. type LogsLocation struct { _ struct{} `type:"structure"` - // Information about CloudWatch Logs for a build project. + // Information about Amazon CloudWatch Logs for a build project. CloudWatchLogs *CloudWatchLogsConfig `locationName:"cloudWatchLogs" type:"structure"` - // The ARN of CloudWatch Logs for a build project. Its format is arn:${Partition}:logs:${Region}:${Account}:log-group:${LogGroupName}:log-stream:${LogStreamName}. - // For more information, see Resources Defined by CloudWatch Logs (https://docs.aws.amazon.com/IAM/latest/UserGuide/list_amazoncloudwatchlogs.html#amazoncloudwatchlogs-resources-for-iam-policies). + // The ARN of Amazon CloudWatch Logs for a build project. Its format is arn:${Partition}:logs:${Region}:${Account}:log-group:${LogGroupName}:log-stream:${LogStreamName}. + // For more information, see Resources Defined by Amazon CloudWatch Logs (https://docs.aws.amazon.com/IAM/latest/UserGuide/list_amazoncloudwatchlogs.html#amazoncloudwatchlogs-resources-for-iam-policies). CloudWatchLogsArn *string `locationName:"cloudWatchLogsArn" type:"string"` - // The URL to an individual build log in CloudWatch Logs. + // The URL to an individual build log in Amazon CloudWatch Logs. DeepLink *string `locationName:"deepLink" type:"string"` - // The name of the CloudWatch Logs group for the build logs. + // The name of the Amazon CloudWatch Logs group for the build logs. GroupName *string `locationName:"groupName" type:"string"` // The URL to a build log in an S3 bucket. @@ -9752,7 +9781,7 @@ type LogsLocation struct { // For more information, see Resources Defined by Amazon S3 (https://docs.aws.amazon.com/IAM/latest/UserGuide/list_amazons3.html#amazons3-resources-for-iam-policies). S3LogsArn *string `locationName:"s3LogsArn" type:"string"` - // The name of the CloudWatch Logs stream for the build logs. + // The name of the Amazon CloudWatch Logs stream for the build logs. StreamName *string `locationName:"streamName" type:"string"` } @@ -9971,16 +10000,14 @@ type Project struct { // A description that makes the build project easy to identify. Description *string `locationName:"description" type:"string"` - // The Key Management Service customer master key (CMK) to be used for encrypting - // the build output artifacts. + // The AWS Key Management Service (AWS KMS) customer master key (CMK) to be + // used for encrypting the build output artifacts. // // You can use a cross-account KMS key to encrypt the build output artifacts // if your service role has permission to that key. // // You can specify either the Amazon Resource Name (ARN) of the CMK or, if available, - // the CMK's alias (using the format alias/Idempotent delete APIs
Given a request action that -// has succeeded:
When you retry the request with an API from this -// group and the resource was deleted, its metadata is returned in the response.
-//If you retry and the resource doesn't exist, the response is empty.
-//In both cases, the retry succeeds.
Idempotent delete APIs:
-//DeleteEnvironmentTemplate
DeleteEnvironmentTemplateVersion
-//DeleteServiceTemplate
DeleteServiceTemplateVersion
-//DeleteEnvironmentAccountConnection
Asynchronous -// idempotent delete APIs
Given a request action that has succeeded:
-//If you retry the request with an API from this group, if the original
-// request delete operation status is DELETE_IN_PROGRESS
, the
-// retry returns the resource detail data in the response without performing
-// any further actions.
If the original request delete operation is -// complete, a retry returns an empty response.
Asynchronous idempotent -// delete APIs:
DeleteEnvironment
DeleteService
-//