From 625f24b7d45371da2ce5739758e96a2c4edb3738 Mon Sep 17 00:00:00 2001 From: aws-sdk-go-automation <43143561+aws-sdk-go-automation@users.noreply.github.com> Date: Thu, 14 Jul 2022 11:24:57 -0700 Subject: [PATCH] Release v1.44.55 (2022-07-14) (#4479) Release v1.44.55 (2022-07-14) === ### Service Client Updates * `service/athena`: Updates service API and documentation * This release updates data types that contain either QueryExecutionId, NamedQueryId or ExpectedBucketOwner. Ids must be between 1 and 128 characters and contain only non-whitespace characters. ExpectedBucketOwner must be 12-digit string. * `service/codeartifact`: Updates service API and documentation * `service/config`: Updates service API and documentation * `service/ec2`: Updates service API * This release adds flow logs for Transit Gateway to allow customers to gain deeper visibility and insights into network traffic through their Transit Gateways. * `service/fms`: Updates service API and documentation * `service/glue`: Updates service API and documentation * This release adds an additional worker type for Glue Streaming jobs. * `service/inspector2`: Updates service API and documentation * `service/kendra`: Updates service API, documentation, and paginators * This release adds AccessControlConfigurations which allow you to redefine your document level access control without the need for content re-indexing. * `service/nimble`: Updates service API and documentation * `service/outposts`: Updates service API and documentation * `service/sagemaker`: Updates service API and documentation * This release adds support for G5, P4d, and C6i instance types in Amazon SageMaker Inference and increases the number of hyperparameters that can be searched from 20 to 30 in Amazon SageMaker Automatic Model Tuning --- CHANGELOG.md | 21 + aws/endpoints/defaults.go | 81 + aws/version.go | 2 +- models/apis/athena/2017-05-18/api-2.json | 29 +- models/apis/athena/2017-05-18/docs-2.json | 24 +- .../apis/codeartifact/2018-09-22/api-2.json | 236 ++- .../apis/codeartifact/2018-09-22/docs-2.json | 180 +- models/apis/config/2014-11-12/api-2.json | 17 +- models/apis/config/2014-11-12/docs-2.json | 6 +- models/apis/ec2/2016-11-15/api-2.json | 7 +- models/apis/fms/2018-01-01/api-2.json | 21 +- models/apis/fms/2018-01-01/docs-2.json | 33 +- models/apis/glue/2017-03-31/api-2.json | 3 +- models/apis/glue/2017-03-31/docs-2.json | 76 +- models/apis/inspector2/2020-06-08/api-2.json | 115 +- models/apis/inspector2/2020-06-08/docs-2.json | 60 +- models/apis/kendra/2019-02-03/api-2.json | 216 +- models/apis/kendra/2019-02-03/docs-2.json | 382 ++-- .../apis/kendra/2019-02-03/paginators-1.json | 5 + models/apis/nimble/2020-08-01/api-2.json | 145 +- models/apis/nimble/2020-08-01/docs-2.json | 144 +- models/apis/outposts/2019-12-03/api-2.json | 51 +- models/apis/outposts/2019-12-03/docs-2.json | 43 + models/apis/sagemaker/2017-07-24/api-2.json | 28 +- models/apis/sagemaker/2017-07-24/docs-2.json | 4 +- models/endpoints/endpoints.json | 39 + service/athena/api.go | 74 +- service/codeartifact/api.go | 1343 ++++++++++-- .../codeartifactiface/interface.go | 8 + service/codeartifact/doc.go | 7 + service/configservice/api.go | 80 +- service/ec2/api.go | 15 +- service/fms/api.go | 248 ++- service/glue/api.go | 142 +- service/glue/errors.go | 2 + service/inspector2/api.go | 554 ++++- .../inspector2/inspector2iface/interface.go | 8 + service/kendra/api.go | 1816 ++++++++++++++--- service/kendra/kendraiface/interface.go | 23 + service/nimblestudio/api.go | 94 +- service/outposts/api.go | 124 ++ service/sagemaker/api.go | 88 +- 42 files changed, 5492 insertions(+), 1102 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index ce8189d509c..29ab5d06d75 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,24 @@ +Release v1.44.55 (2022-07-14) +=== + +### Service Client Updates +* `service/athena`: Updates service API and documentation + * This release updates data types that contain either QueryExecutionId, NamedQueryId or ExpectedBucketOwner. Ids must be between 1 and 128 characters and contain only non-whitespace characters. ExpectedBucketOwner must be 12-digit string. +* `service/codeartifact`: Updates service API and documentation +* `service/config`: Updates service API and documentation +* `service/ec2`: Updates service API + * This release adds flow logs for Transit Gateway to allow customers to gain deeper visibility and insights into network traffic through their Transit Gateways. +* `service/fms`: Updates service API and documentation +* `service/glue`: Updates service API and documentation + * This release adds an additional worker type for Glue Streaming jobs. +* `service/inspector2`: Updates service API and documentation +* `service/kendra`: Updates service API, documentation, and paginators + * This release adds AccessControlConfigurations which allow you to redefine your document level access control without the need for content re-indexing. +* `service/nimble`: Updates service API and documentation +* `service/outposts`: Updates service API and documentation +* `service/sagemaker`: Updates service API and documentation + * This release adds support for G5, P4d, and C6i instance types in Amazon SageMaker Inference and increases the number of hyperparameters that can be searched from 20 to 30 in Amazon SageMaker Automatic Model Tuning + Release v1.44.54 (2022-07-13) === diff --git a/aws/endpoints/defaults.go b/aws/endpoints/defaults.go index a5b051ff774..b054dd32f60 100644 --- a/aws/endpoints/defaults.go +++ b/aws/endpoints/defaults.go @@ -21058,6 +21058,67 @@ var awsPartition = partition{ }: endpoint{}, }, }, + "sso": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, "states": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -30192,6 +30253,26 @@ var awsusgovPartition = partition{ }, }, }, + "sso": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Hostname: "sso.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Hostname: "sso.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, "states": service{ Endpoints: serviceEndpoints{ endpointKey{ diff --git a/aws/version.go b/aws/version.go index 504aded0328..2cbf88585ee 100644 --- a/aws/version.go +++ b/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.44.54" +const SDKVersion = "1.44.55" diff --git a/models/apis/athena/2017-05-18/api-2.json b/models/apis/athena/2017-05-18/api-2.json index e4fbf15c7b8..1230522bd56 100644 --- a/models/apis/athena/2017-05-18/api-2.json +++ b/models/apis/athena/2017-05-18/api-2.json @@ -520,6 +520,12 @@ "ErrorMessage":{"shape":"String"} } }, + "AwsAccountId":{ + "type":"string", + "max":12, + "min":12, + "pattern":"^[0-9]+$" + }, "BatchGetNamedQueryInput":{ "type":"structure", "required":["NamedQueryIds"], @@ -1270,7 +1276,12 @@ "max":1024, "min":0 }, - "NamedQueryId":{"type":"string"}, + "NamedQueryId":{ + "type":"string", + "max":128, + "min":1, + "pattern":"\\S+" + }, "NamedQueryIdList":{ "type":"list", "member":{"shape":"NamedQueryId"}, @@ -1343,7 +1354,12 @@ "Catalog":{"shape":"CatalogNameString"} } }, - "QueryExecutionId":{"type":"string"}, + "QueryExecutionId":{ + "type":"string", + "max":128, + "min":1, + "pattern":"\\S+" + }, "QueryExecutionIdList":{ "type":"list", "member":{"shape":"QueryExecutionId"}, @@ -1402,25 +1418,26 @@ "ResultConfiguration":{ "type":"structure", "members":{ - "OutputLocation":{"shape":"String"}, + "OutputLocation":{"shape":"ResultOutputLocation"}, "EncryptionConfiguration":{"shape":"EncryptionConfiguration"}, - "ExpectedBucketOwner":{"shape":"String"}, + "ExpectedBucketOwner":{"shape":"AwsAccountId"}, "AclConfiguration":{"shape":"AclConfiguration"} } }, "ResultConfigurationUpdates":{ "type":"structure", "members":{ - "OutputLocation":{"shape":"String"}, + "OutputLocation":{"shape":"ResultOutputLocation"}, "RemoveOutputLocation":{"shape":"BoxedBoolean"}, "EncryptionConfiguration":{"shape":"EncryptionConfiguration"}, "RemoveEncryptionConfiguration":{"shape":"BoxedBoolean"}, - "ExpectedBucketOwner":{"shape":"String"}, + "ExpectedBucketOwner":{"shape":"AwsAccountId"}, "RemoveExpectedBucketOwner":{"shape":"BoxedBoolean"}, "AclConfiguration":{"shape":"AclConfiguration"}, "RemoveAclConfiguration":{"shape":"BoxedBoolean"} } }, + "ResultOutputLocation":{"type":"string"}, "ResultSet":{ "type":"structure", "members":{ diff --git a/models/apis/athena/2017-05-18/docs-2.json b/models/apis/athena/2017-05-18/docs-2.json index 94c5e83cc2b..da171605efc 100644 --- a/models/apis/athena/2017-05-18/docs-2.json +++ b/models/apis/athena/2017-05-18/docs-2.json @@ -62,8 +62,15 @@ "QueryExecutionStatus$AthenaError": "
Provides information about an Athena query error.
" } }, - "BatchGetNamedQueryInput": { + "AwsAccountId": { "base": null, + "refs": { + "ResultConfiguration$ExpectedBucketOwner": "The Amazon Web Services account ID that you expect to be the owner of the Amazon S3 bucket specified by ResultConfiguration$OutputLocation. If set, Athena uses the value for ExpectedBucketOwner
when it makes Amazon S3 calls to your specified output location. If the ExpectedBucketOwner
Amazon Web Services account ID does not match the actual owner of the Amazon S3 bucket, the call fails with a permissions error.
This is a client-side setting. If workgroup settings override client-side settings, then the query uses the ExpectedBucketOwner
setting that is specified for the workgroup, and also uses the location for storing query results specified in the workgroup. See WorkGroupConfiguration$EnforceWorkGroupConfiguration and Workgroup Settings Override Client-Side Settings.
The Amazon Web Services account ID that you expect to be the owner of the Amazon S3 bucket specified by ResultConfiguration$OutputLocation. If set, Athena uses the value for ExpectedBucketOwner
when it makes Amazon S3 calls to your specified output location. If the ExpectedBucketOwner
Amazon Web Services account ID does not match the actual owner of the Amazon S3 bucket, the call fails with a permissions error.
If workgroup settings override client-side settings, then the query uses the ExpectedBucketOwner
setting that is specified for the workgroup, and also uses the location for storing query results specified in the workgroup. See WorkGroupConfiguration$EnforceWorkGroupConfiguration and Workgroup Settings Override Client-Side Settings.
Contains an array of named query IDs.
", "refs": { } }, @@ -83,7 +90,7 @@ } }, "BatchGetQueryExecutionInput": { - "base": null, + "base": "Contains an array of query execution IDs.
", "refs": { } }, @@ -885,6 +892,13 @@ "WorkGroupConfigurationUpdates$ResultConfigurationUpdates": "The result configuration information about the queries in this workgroup that will be updated. Includes the updated results location and an updated option for encrypting query results.
" } }, + "ResultOutputLocation": { + "base": null, + "refs": { + "ResultConfiguration$OutputLocation": "The location in Amazon S3 where your query results are stored, such as s3://path/to/query/bucket/
. To run the query, you must specify the query results location using one of the ways: either for individual queries using either this setting (client-side), or in the workgroup, using WorkGroupConfiguration. If none of them is set, Athena issues an error that no output location is provided. For more information, see Query Results. If workgroup settings override client-side settings, then the query uses the settings specified for the workgroup. See WorkGroupConfiguration$EnforceWorkGroupConfiguration.
The location in Amazon S3 where your query results are stored, such as s3://path/to/query/bucket/
. For more information, see Query Results If workgroup settings override client-side settings, then the query uses the location for the query results and the encryption configuration that are specified for the workgroup. The \"workgroup settings override\" is specified in EnforceWorkGroupConfiguration
(true/false) in the WorkGroupConfiguration
. See WorkGroupConfiguration$EnforceWorkGroupConfiguration.
The metadata and rows that make up a query result set. The metadata describes the column structure and data types. To return a ResultSet
object, use GetQueryResults.
The data type of the column.
", "EncryptionConfiguration$KmsKey": "For SSE_KMS
and CSE_KMS
, this is the KMS key ARN or ID.
The location and file name of a data manifest file. The manifest file is saved to the Athena query results location in Amazon S3. The manifest file tracks files that the query wrote to Amazon S3. If the query fails, the manifest file also tracks files that the query intended to write. The manifest is useful for identifying orphaned files resulting from a failed query. For more information, see Working with Query Results, Output Files, and Query History in the Amazon Athena User Guide.
", - "QueryExecutionStatus$StateChangeReason": "Further detail about the status of the query.
", - "ResultConfiguration$OutputLocation": "The location in Amazon S3 where your query results are stored, such as s3://path/to/query/bucket/
. To run the query, you must specify the query results location using one of the ways: either for individual queries using either this setting (client-side), or in the workgroup, using WorkGroupConfiguration. If none of them is set, Athena issues an error that no output location is provided. For more information, see Query Results. If workgroup settings override client-side settings, then the query uses the settings specified for the workgroup. See WorkGroupConfiguration$EnforceWorkGroupConfiguration.
The Amazon Web Services account ID that you expect to be the owner of the Amazon S3 bucket specified by ResultConfiguration$OutputLocation. If set, Athena uses the value for ExpectedBucketOwner
when it makes Amazon S3 calls to your specified output location. If the ExpectedBucketOwner
Amazon Web Services account ID does not match the actual owner of the Amazon S3 bucket, the call fails with a permissions error.
This is a client-side setting. If workgroup settings override client-side settings, then the query uses the ExpectedBucketOwner
setting that is specified for the workgroup, and also uses the location for storing query results specified in the workgroup. See WorkGroupConfiguration$EnforceWorkGroupConfiguration and Workgroup Settings Override Client-Side Settings.
The location in Amazon S3 where your query results are stored, such as s3://path/to/query/bucket/
. For more information, see Query Results If workgroup settings override client-side settings, then the query uses the location for the query results and the encryption configuration that are specified for the workgroup. The \"workgroup settings override\" is specified in EnforceWorkGroupConfiguration
(true/false) in the WorkGroupConfiguration
. See WorkGroupConfiguration$EnforceWorkGroupConfiguration.
The Amazon Web Services account ID that you expect to be the owner of the Amazon S3 bucket specified by ResultConfiguration$OutputLocation. If set, Athena uses the value for ExpectedBucketOwner
when it makes Amazon S3 calls to your specified output location. If the ExpectedBucketOwner
Amazon Web Services account ID does not match the actual owner of the Amazon S3 bucket, the call fails with a permissions error.
If workgroup settings override client-side settings, then the query uses the ExpectedBucketOwner
setting that is specified for the workgroup, and also uses the location for storing query results specified in the workgroup. See WorkGroupConfiguration$EnforceWorkGroupConfiguration and Workgroup Settings Override Client-Side Settings.
Further detail about the status of the query.
" } }, "TableMetadata": { diff --git a/models/apis/codeartifact/2018-09-22/api-2.json b/models/apis/codeartifact/2018-09-22/api-2.json index d8923d66e01..58612adef0b 100644 --- a/models/apis/codeartifact/2018-09-22/api-2.json +++ b/models/apis/codeartifact/2018-09-22/api-2.json @@ -184,6 +184,22 @@ {"shape":"ValidationException"} ] }, + "DescribePackage":{ + "name":"DescribePackage", + "http":{ + "method":"GET", + "requestUri":"/v1/package" + }, + "input":{"shape":"DescribePackageRequest"}, + "output":{"shape":"DescribePackageResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"} + ] + }, "DescribePackageVersion":{ "name":"DescribePackageVersion", "http":{ @@ -492,6 +508,22 @@ {"shape":"ValidationException"} ] }, + "PutPackageOriginConfiguration":{ + "name":"PutPackageOriginConfiguration", + "http":{ + "method":"POST", + "requestUri":"/v1/package" + }, + "input":{"shape":"PutPackageOriginConfigurationRequest"}, + "output":{"shape":"PutPackageOriginConfigurationResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"} + ] + }, "PutRepositoryPermissionsPolicy":{ "name":"PutRepositoryPermissionsPolicy", "http":{ @@ -593,6 +625,20 @@ "min":12, "pattern":"[0-9]{12}" }, + "AllowPublish":{ + "type":"string", + "enum":[ + "ALLOW", + "BLOCK" + ] + }, + "AllowUpstream":{ + "type":"string", + "enum":[ + "ALLOW", + "BLOCK" + ] + }, "Arn":{ "type":"string", "max":1011, @@ -977,6 +1023,54 @@ "domain":{"shape":"DomainDescription"} } }, + "DescribePackageRequest":{ + "type":"structure", + "required":[ + "domain", + "repository", + "format", + "package" + ], + "members":{ + "domain":{ + "shape":"DomainName", + "location":"querystring", + "locationName":"domain" + }, + "domainOwner":{ + "shape":"AccountId", + "location":"querystring", + "locationName":"domain-owner" + }, + "repository":{ + "shape":"RepositoryName", + "location":"querystring", + "locationName":"repository" + }, + "format":{ + "shape":"PackageFormat", + "location":"querystring", + "locationName":"format" + }, + "namespace":{ + "shape":"PackageNamespace", + "location":"querystring", + "locationName":"namespace" + }, + "package":{ + "shape":"PackageName", + "location":"querystring", + "locationName":"package" + } + } + }, + "DescribePackageResult":{ + "type":"structure", + "required":["package"], + "members":{ + "package":{"shape":"PackageDescription"} + } + }, "DescribePackageVersionRequest":{ "type":"structure", "required":[ @@ -1064,7 +1158,7 @@ "Description":{ "type":"string", "max":1000, - "pattern":"\\P{C}+" + "pattern":"\\P{C}*" }, "DisassociateExternalConnectionRequest":{ "type":"structure", @@ -1168,6 +1262,13 @@ "s3BucketArn":{"shape":"Arn"} } }, + "DomainEntryPoint":{ + "type":"structure", + "members":{ + "repositoryName":{"shape":"RepositoryName"}, + "externalConnectionName":{"shape":"ExternalConnectionName"} + } + }, "DomainName":{ "type":"string", "max":50, @@ -1199,6 +1300,8 @@ "ErrorMessage":{"type":"string"}, "ExternalConnectionName":{ "type":"string", + "max":100, + "min":2, "pattern":"[A-Za-z0-9][A-Za-z0-9._\\-:]{1,99}" }, "ExternalConnectionStatus":{ @@ -1716,6 +1819,11 @@ "shape":"PaginationToken", "location":"querystring", "locationName":"next-token" + }, + "originType":{ + "shape":"PackageVersionOriginType", + "location":"querystring", + "locationName":"originType" } } }, @@ -1781,6 +1889,16 @@ "shape":"PaginationToken", "location":"querystring", "locationName":"next-token" + }, + "publish":{ + "shape":"AllowPublish", + "location":"querystring", + "locationName":"publish" + }, + "upstream":{ + "shape":"AllowUpstream", + "location":"querystring", + "locationName":"upstream" } } }, @@ -1903,6 +2021,15 @@ "type":"list", "member":{"shape":"PackageDependency"} }, + "PackageDescription":{ + "type":"structure", + "members":{ + "format":{"shape":"PackageFormat"}, + "namespace":{"shape":"PackageNamespace"}, + "name":{"shape":"PackageName"}, + "originConfiguration":{"shape":"PackageOriginConfiguration"} + } + }, "PackageFormat":{ "type":"string", "enum":[ @@ -1916,20 +2043,38 @@ "type":"string", "max":255, "min":1, - "pattern":"[^!#/\\s]+" + "pattern":"[^#/\\s]+" }, "PackageNamespace":{ "type":"string", "max":255, "min":1, - "pattern":"[^!#/\\s]+" + "pattern":"[^#/\\s]+" + }, + "PackageOriginConfiguration":{ + "type":"structure", + "members":{ + "restrictions":{"shape":"PackageOriginRestrictions"} + } + }, + "PackageOriginRestrictions":{ + "type":"structure", + "required":[ + "publish", + "upstream" + ], + "members":{ + "publish":{"shape":"AllowPublish"}, + "upstream":{"shape":"AllowUpstream"} + } }, "PackageSummary":{ "type":"structure", "members":{ "format":{"shape":"PackageFormat"}, "namespace":{"shape":"PackageNamespace"}, - "package":{"shape":"PackageName"} + "package":{"shape":"PackageName"}, + "originConfiguration":{"shape":"PackageOriginConfiguration"} } }, "PackageSummaryList":{ @@ -1940,7 +2085,7 @@ "type":"string", "max":255, "min":1, - "pattern":"[^!#/\\s]+" + "pattern":"[^#/\\s]+" }, "PackageVersionDescription":{ "type":"structure", @@ -1956,7 +2101,8 @@ "publishedTime":{"shape":"Timestamp"}, "licenses":{"shape":"LicenseInfoList"}, "revision":{"shape":"PackageVersionRevision"}, - "status":{"shape":"PackageVersionStatus"} + "status":{"shape":"PackageVersionStatus"}, + "origin":{"shape":"PackageVersionOrigin"} } }, "PackageVersionError":{ @@ -1987,6 +2133,21 @@ "member":{"shape":"PackageVersion"}, "max":100 }, + "PackageVersionOrigin":{ + "type":"structure", + "members":{ + "domainEntryPoint":{"shape":"DomainEntryPoint"}, + "originType":{"shape":"PackageVersionOriginType"} + } + }, + "PackageVersionOriginType":{ + "type":"string", + "enum":[ + "INTERNAL", + "EXTERNAL", + "UNKNOWN" + ] + }, "PackageVersionRevision":{ "type":"string", "max":50, @@ -2022,7 +2183,8 @@ "members":{ "version":{"shape":"PackageVersion"}, "revision":{"shape":"PackageVersionRevision"}, - "status":{"shape":"PackageVersionStatus"} + "status":{"shape":"PackageVersionStatus"}, + "origin":{"shape":"PackageVersionOrigin"} } }, "PackageVersionSummaryList":{ @@ -2037,8 +2199,9 @@ }, "PolicyDocument":{ "type":"string", - "max":5120, - "min":1 + "max":7168, + "min":1, + "pattern":"[\\P{C}\\s]+" }, "PolicyRevision":{ "type":"string", @@ -2065,6 +2228,55 @@ "policy":{"shape":"ResourcePolicy"} } }, + "PutPackageOriginConfigurationRequest":{ + "type":"structure", + "required":[ + "domain", + "repository", + "format", + "package", + "restrictions" + ], + "members":{ + "domain":{ + "shape":"DomainName", + "location":"querystring", + "locationName":"domain" + }, + "domainOwner":{ + "shape":"AccountId", + "location":"querystring", + "locationName":"domain-owner" + }, + "repository":{ + "shape":"RepositoryName", + "location":"querystring", + "locationName":"repository" + }, + "format":{ + "shape":"PackageFormat", + "location":"querystring", + "locationName":"format" + }, + "namespace":{ + "shape":"PackageNamespace", + "location":"querystring", + "locationName":"namespace" + }, + "package":{ + "shape":"PackageName", + "location":"querystring", + "locationName":"package" + }, + "restrictions":{"shape":"PackageOriginRestrictions"} + } + }, + "PutPackageOriginConfigurationResult":{ + "type":"structure", + "members":{ + "originConfiguration":{"shape":"PackageOriginConfiguration"} + } + }, "PutRepositoryPermissionsPolicyRequest":{ "type":"structure", "required":[ @@ -2217,7 +2429,8 @@ "TagKey":{ "type":"string", "max":128, - "min":1 + "min":1, + "pattern":"\\P{C}+" }, "TagKeyList":{ "type":"list", @@ -2254,7 +2467,8 @@ "TagValue":{ "type":"string", "max":256, - "min":0 + "min":0, + "pattern":"\\P{C}*" }, "ThrottlingException":{ "type":"structure", diff --git a/models/apis/codeartifact/2018-09-22/docs-2.json b/models/apis/codeartifact/2018-09-22/docs-2.json index 1a3a293e092..17c304be56b 100644 --- a/models/apis/codeartifact/2018-09-22/docs-2.json +++ b/models/apis/codeartifact/2018-09-22/docs-2.json @@ -1,6 +1,6 @@ { "version": "2.0", - "service": "CodeArtifact is a fully managed artifact repository compatible with language-native package managers and build tools such as npm, Apache Maven, pip, and dotnet. You can use CodeArtifact to share packages with development teams and pull packages. Packages can be pulled from both public and CodeArtifact repositories. You can also create an upstream relationship between a CodeArtifact repository and another repository, which effectively merges their contents from the point of view of a package manager client.
CodeArtifact Components
Use the information in this guide to help you work with the following CodeArtifact components:
Repository: A CodeArtifact repository contains a set of package versions, each of which maps to a set of assets, or files. Repositories are polyglot, so a single repository can contain packages of any supported type. Each repository exposes endpoints for fetching and publishing packages using tools like the npm
CLI, the Maven CLI ( mvn
), Python CLIs ( pip
and twine
), and NuGet CLIs (nuget
and dotnet
).
Domain: Repositories are aggregated into a higher-level entity known as a domain. All package assets and metadata are stored in the domain, but are consumed through repositories. A given package asset, such as a Maven JAR file, is stored once per domain, no matter how many repositories it's present in. All of the assets and metadata in a domain are encrypted with the same customer master key (CMK) stored in Key Management Service (KMS).
Each repository is a member of a single domain and can't be moved to a different domain.
The domain allows organizational policy to be applied across multiple repositories, such as which accounts can access repositories in the domain, and which public repositories can be used as sources of packages.
Although an organization can have multiple domains, we recommend a single production domain that contains all published artifacts so that teams can find and share packages across their organization.
Package: A package is a bundle of software and the metadata required to resolve dependencies and install the software. CodeArtifact supports npm, PyPI, Maven, and NuGet package formats.
In CodeArtifact, a package consists of:
A name (for example, webpack
is the name of a popular npm package)
An optional namespace (for example, @types
in @types/node
)
A set of versions (for example, 1.0.0
, 1.0.1
, 1.0.2
, etc.)
Package-level metadata (for example, npm tags)
Package version: A version of a package, such as @types/node 12.6.9
. The version number format and semantics vary for different package formats. For example, npm package versions must conform to the Semantic Versioning specification. In CodeArtifact, a package version consists of the version identifier, metadata at the package version level, and a set of assets.
Upstream repository: One repository is upstream of another when the package versions in it can be accessed from the repository endpoint of the downstream repository, effectively merging the contents of the two repositories from the point of view of a client. CodeArtifact allows creating an upstream relationship between two repositories.
Asset: An individual file stored in CodeArtifact associated with a package version, such as an npm .tgz
file or Maven POM and JAR files.
CodeArtifact supports these operations:
AssociateExternalConnection
: Adds an existing external connection to a repository.
CopyPackageVersions
: Copies package versions from one repository to another repository in the same domain.
CreateDomain
: Creates a domain
CreateRepository
: Creates a CodeArtifact repository in a domain.
DeleteDomain
: Deletes a domain. You cannot delete a domain that contains repositories.
DeleteDomainPermissionsPolicy
: Deletes the resource policy that is set on a domain.
DeletePackageVersions
: Deletes versions of a package. After a package has been deleted, it can be republished, but its assets and metadata cannot be restored because they have been permanently removed from storage.
DeleteRepository
: Deletes a repository.
DeleteRepositoryPermissionsPolicy
: Deletes the resource policy that is set on a repository.
DescribeDomain
: Returns a DomainDescription
object that contains information about the requested domain.
DescribePackageVersion
: Returns a PackageVersionDescription object that contains details about a package version.
DescribeRepository
: Returns a RepositoryDescription
object that contains detailed information about the requested repository.
DisposePackageVersions
: Disposes versions of a package. A package version with the status Disposed
cannot be restored because they have been permanently removed from storage.
DisassociateExternalConnection
: Removes an existing external connection from a repository.
GetAuthorizationToken
: Generates a temporary authorization token for accessing repositories in the domain. The token expires the authorization period has passed. The default authorization period is 12 hours and can be customized to any length with a maximum of 12 hours.
GetDomainPermissionsPolicy
: Returns the policy of a resource that is attached to the specified domain.
GetPackageVersionAsset
: Returns the contents of an asset that is in a package version.
GetPackageVersionReadme
: Gets the readme file or descriptive text for a package version.
GetRepositoryEndpoint
: Returns the endpoint of a repository for a specific package format. A repository has one endpoint for each package format:
maven
npm
nuget
pypi
GetRepositoryPermissionsPolicy
: Returns the resource policy that is set on a repository.
ListDomains
: Returns a list of DomainSummary
objects. Each returned DomainSummary
object contains information about a domain.
ListPackages
: Lists the packages in a repository.
ListPackageVersionAssets
: Lists the assets for a given package version.
ListPackageVersionDependencies
: Returns a list of the direct dependencies for a package version.
ListPackageVersions
: Returns a list of package versions for a specified package in a repository.
ListRepositories
: Returns a list of repositories owned by the Amazon Web Services account that called this method.
ListRepositoriesInDomain
: Returns a list of the repositories in a domain.
PutDomainPermissionsPolicy
: Attaches a resource policy to a domain.
PutRepositoryPermissionsPolicy
: Sets the resource policy on a repository that specifies permissions to access it.
UpdatePackageVersionsStatus
: Updates the status of one or more versions of a package.
UpdateRepository
: Updates the properties of a repository.
CodeArtifact is a fully managed artifact repository compatible with language-native package managers and build tools such as npm, Apache Maven, pip, and dotnet. You can use CodeArtifact to share packages with development teams and pull packages. Packages can be pulled from both public and CodeArtifact repositories. You can also create an upstream relationship between a CodeArtifact repository and another repository, which effectively merges their contents from the point of view of a package manager client.
CodeArtifact Components
Use the information in this guide to help you work with the following CodeArtifact components:
Repository: A CodeArtifact repository contains a set of package versions, each of which maps to a set of assets, or files. Repositories are polyglot, so a single repository can contain packages of any supported type. Each repository exposes endpoints for fetching and publishing packages using tools like the npm
CLI, the Maven CLI ( mvn
), Python CLIs ( pip
and twine
), and NuGet CLIs (nuget
and dotnet
).
Domain: Repositories are aggregated into a higher-level entity known as a domain. All package assets and metadata are stored in the domain, but are consumed through repositories. A given package asset, such as a Maven JAR file, is stored once per domain, no matter how many repositories it's present in. All of the assets and metadata in a domain are encrypted with the same customer master key (CMK) stored in Key Management Service (KMS).
Each repository is a member of a single domain and can't be moved to a different domain.
The domain allows organizational policy to be applied across multiple repositories, such as which accounts can access repositories in the domain, and which public repositories can be used as sources of packages.
Although an organization can have multiple domains, we recommend a single production domain that contains all published artifacts so that teams can find and share packages across their organization.
Package: A package is a bundle of software and the metadata required to resolve dependencies and install the software. CodeArtifact supports npm, PyPI, Maven, and NuGet package formats.
In CodeArtifact, a package consists of:
A name (for example, webpack
is the name of a popular npm package)
An optional namespace (for example, @types
in @types/node
)
A set of versions (for example, 1.0.0
, 1.0.1
, 1.0.2
, etc.)
Package-level metadata (for example, npm tags)
Package version: A version of a package, such as @types/node 12.6.9
. The version number format and semantics vary for different package formats. For example, npm package versions must conform to the Semantic Versioning specification. In CodeArtifact, a package version consists of the version identifier, metadata at the package version level, and a set of assets.
Upstream repository: One repository is upstream of another when the package versions in it can be accessed from the repository endpoint of the downstream repository, effectively merging the contents of the two repositories from the point of view of a client. CodeArtifact allows creating an upstream relationship between two repositories.
Asset: An individual file stored in CodeArtifact associated with a package version, such as an npm .tgz
file or Maven POM and JAR files.
CodeArtifact supports these operations:
AssociateExternalConnection
: Adds an existing external connection to a repository.
CopyPackageVersions
: Copies package versions from one repository to another repository in the same domain.
CreateDomain
: Creates a domain
CreateRepository
: Creates a CodeArtifact repository in a domain.
DeleteDomain
: Deletes a domain. You cannot delete a domain that contains repositories.
DeleteDomainPermissionsPolicy
: Deletes the resource policy that is set on a domain.
DeletePackageVersions
: Deletes versions of a package. After a package has been deleted, it can be republished, but its assets and metadata cannot be restored because they have been permanently removed from storage.
DeleteRepository
: Deletes a repository.
DeleteRepositoryPermissionsPolicy
: Deletes the resource policy that is set on a repository.
DescribeDomain
: Returns a DomainDescription
object that contains information about the requested domain.
DescribePackage
: Returns a PackageDescription object that contains details about a package.
DescribePackageVersion
: Returns a PackageVersionDescription object that contains details about a package version.
DescribeRepository
: Returns a RepositoryDescription
object that contains detailed information about the requested repository.
DisposePackageVersions
: Disposes versions of a package. A package version with the status Disposed
cannot be restored because they have been permanently removed from storage.
DisassociateExternalConnection
: Removes an existing external connection from a repository.
GetAuthorizationToken
: Generates a temporary authorization token for accessing repositories in the domain. The token expires the authorization period has passed. The default authorization period is 12 hours and can be customized to any length with a maximum of 12 hours.
GetDomainPermissionsPolicy
: Returns the policy of a resource that is attached to the specified domain.
GetPackageVersionAsset
: Returns the contents of an asset that is in a package version.
GetPackageVersionReadme
: Gets the readme file or descriptive text for a package version.
GetRepositoryEndpoint
: Returns the endpoint of a repository for a specific package format. A repository has one endpoint for each package format:
maven
npm
nuget
pypi
GetRepositoryPermissionsPolicy
: Returns the resource policy that is set on a repository.
ListDomains
: Returns a list of DomainSummary
objects. Each returned DomainSummary
object contains information about a domain.
ListPackages
: Lists the packages in a repository.
ListPackageVersionAssets
: Lists the assets for a given package version.
ListPackageVersionDependencies
: Returns a list of the direct dependencies for a package version.
ListPackageVersions
: Returns a list of package versions for a specified package in a repository.
ListRepositories
: Returns a list of repositories owned by the Amazon Web Services account that called this method.
ListRepositoriesInDomain
: Returns a list of the repositories in a domain.
PutDomainPermissionsPolicy
: Attaches a resource policy to a domain.
PutPackageOriginConfiguration
: Sets the package origin configuration for a package, which determine how new versions of the package can be added to a specific repository.
PutRepositoryPermissionsPolicy
: Sets the resource policy on a repository that specifies permissions to access it.
UpdatePackageVersionsStatus
: Updates the status of one or more versions of a package.
UpdateRepository
: Updates the properties of a repository.
Adds an existing external connection to a repository. One external connection is allowed per repository.
A repository can have one or more upstream repositories, or an external connection.
Copies package versions from one repository to another repository in the same domain.
You must specify versions
or versionRevisions
. You cannot specify both.
Deletes a repository.
", "DeleteRepositoryPermissionsPolicy": "Deletes the resource policy that is set on a repository. After a resource policy is deleted, the permissions allowed and denied by the deleted policy are removed. The effect of deleting a resource policy might not be immediate.
Use DeleteRepositoryPermissionsPolicy
with caution. After a policy is deleted, Amazon Web Services users, roles, and accounts lose permissions to perform the repository actions granted by the deleted policy.
Returns a DomainDescription object that contains information about the requested domain.
", + "DescribePackage": "Returns a PackageDescription object that contains information about the requested package.
", "DescribePackageVersion": "Returns a PackageVersionDescription object that contains information about the requested package version.
", "DescribeRepository": " Returns a RepositoryDescription
object that contains detailed information about the requested repository.
Removes an existing external connection from a repository.
", @@ -19,7 +20,7 @@ "GetAuthorizationToken": " Generates a temporary authorization token for accessing repositories in the domain. This API requires the codeartifact:GetAuthorizationToken
and sts:GetServiceBearerToken
permissions. For more information about authorization tokens, see CodeArtifact authentication and tokens.
CodeArtifact authorization tokens are valid for a period of 12 hours when created with the login
command. You can call login
periodically to refresh the token. When you create an authorization token with the GetAuthorizationToken
API, you can set a custom authorization period, up to a maximum of 12 hours, with the durationSeconds
parameter.
The authorization period begins after login
or GetAuthorizationToken
is called. If login
or GetAuthorizationToken
is called while assuming a role, the token lifetime is independent of the maximum session duration of the role. For example, if you call sts assume-role
and specify a session duration of 15 minutes, then generate a CodeArtifact authorization token, the token will be valid for the full authorization period even though this is longer than the 15-minute session duration.
See Using IAM Roles for more information on controlling session duration.
Returns the resource policy attached to the specified domain.
The policy is a resource-based policy, not an identity-based policy. For more information, see Identity-based policies and resource-based policies in the IAM User Guide.
Returns an asset (or file) that is in a package. For example, for a Maven package version, use GetPackageVersionAsset
to download a JAR
file, a POM
file, or any other assets in the package version.
Gets the readme file or descriptive text for a package version.
The returned text might contain formatting. For example, it might contain formatting for Markdown or reStructuredText.
", + "GetPackageVersionReadme": " Gets the readme file or descriptive text for a package version. For packages that do not contain a readme file, CodeArtifact extracts a description from a metadata file. For example, from the <description>
element in the pom.xml
file of a Maven package.
The returned text might contain formatting. For example, it might contain formatting for Markdown or reStructuredText.
", "GetRepositoryEndpoint": "Returns the endpoint of a repository for a specific package format. A repository has one endpoint for each package format:
maven
npm
nuget
pypi
Returns the resource policy that is set on a repository.
", "ListDomains": " Returns a list of DomainSummary objects for all domains owned by the Amazon Web Services account that makes this call. Each returned DomainSummary
object contains information about a domain.
Returns a list of RepositorySummary objects. Each RepositorySummary
contains information about a repository in the specified domain and that matches the input parameters.
Gets information about Amazon Web Services tags for a specified Amazon Resource Name (ARN) in CodeArtifact.
", "PutDomainPermissionsPolicy": "Sets a resource policy on a domain that specifies permissions to access it.
When you call PutDomainPermissionsPolicy
, the resource policy on the domain is ignored when evaluting permissions. This ensures that the owner of a domain cannot lock themselves out of the domain, which would prevent them from being able to update the resource policy.
Sets the package origin configuration for a package.
The package origin configuration determines how new versions of a package can be added to a repository. You can allow or block direct publishing of new package versions, or ingestion and retaining of new package versions from an external connection or upstream source. For more information about package origin controls and configuration, see Editing package origin controls in the CodeArtifact User Guide.
PutPackageOriginConfiguration
can be called on a package that doesn't yet exist in the repository. When called on a package that does not exist, a package is created in the repository with no versions and the requested restrictions are set on the package. This can be used to preemptively block ingesting or retaining any versions from external connections or upstream repositories, or to block publishing any versions of the package into the repository before connecting any package managers or publishers to the repository.
Sets the resource policy on a repository that specifies permissions to access it.
When you call PutRepositoryPermissionsPolicy
, the resource policy on the repository is ignored when evaluting permissions. This ensures that the owner of a repository cannot lock themselves out of the repository, which would prevent them from being able to update the resource policy.
Adds or updates tags for a resource in CodeArtifact.
", "UntagResource": "Removes tags from a resource in CodeArtifact.
", @@ -55,6 +57,7 @@ "DeleteRepositoryPermissionsPolicyRequest$domainOwner": "The 12-digit account number of the Amazon Web Services account that owns the domain. It does not include dashes or spaces.
", "DeleteRepositoryRequest$domainOwner": "The 12-digit account number of the Amazon Web Services account that owns the domain. It does not include dashes or spaces.
", "DescribeDomainRequest$domainOwner": "The 12-digit account number of the Amazon Web Services account that owns the domain. It does not include dashes or spaces.
", + "DescribePackageRequest$domainOwner": "The 12-digit account number of the Amazon Web Services account that owns the domain. It does not include dashes or spaces.
", "DescribePackageVersionRequest$domainOwner": "The 12-digit account number of the Amazon Web Services account that owns the domain. It does not include dashes or spaces.
", "DescribeRepositoryRequest$domainOwner": "The 12-digit account number of the Amazon Web Services account that owns the domain. It does not include dashes or spaces.
", "DisassociateExternalConnectionRequest$domainOwner": "The 12-digit account number of the Amazon Web Services account that owns the domain. It does not include dashes or spaces.
", @@ -74,6 +77,7 @@ "ListRepositoriesInDomainRequest$domainOwner": "The 12-digit account number of the Amazon Web Services account that owns the domain. It does not include dashes or spaces.
", "ListRepositoriesInDomainRequest$administratorAccount": "Filter the list of repositories to only include those that are managed by the Amazon Web Services account ID.
", "PutDomainPermissionsPolicyRequest$domainOwner": "The 12-digit account number of the Amazon Web Services account that owns the domain. It does not include dashes or spaces.
", + "PutPackageOriginConfigurationRequest$domainOwner": "The 12-digit account number of the Amazon Web Services account that owns the domain. It does not include dashes or spaces.
", "PutRepositoryPermissionsPolicyRequest$domainOwner": "The 12-digit account number of the Amazon Web Services account that owns the domain. It does not include dashes or spaces.
", "RepositoryDescription$administratorAccount": "The 12-digit account number of the Amazon Web Services account that manages the repository.
", "RepositoryDescription$domainOwner": "The 12-digit account number of the Amazon Web Services account that owns the domain that contains the repository. It does not include dashes or spaces.
", @@ -83,6 +87,20 @@ "UpdateRepositoryRequest$domainOwner": "The 12-digit account number of the Amazon Web Services account that owns the domain. It does not include dashes or spaces.
" } }, + "AllowPublish": { + "base": null, + "refs": { + "ListPackagesRequest$publish": "The value of the Publish
package origin control restriction used to filter requested packages. Only packages with the provided restriction are returned. For more information, see PackageOriginRestrictions.
The package origin configuration that determines if new versions of the package can be published directly to the repository.
" + } + }, + "AllowUpstream": { + "base": null, + "refs": { + "ListPackagesRequest$upstream": "The value of the Upstream
package origin control restriction used to filter requested packages. Only packages with the provided restriction are returned. For more information, see PackageOriginRestrictions.
The package origin configuration that determines if new versions of the package can be added to the repository from an external connection or upstream source.
" + } + }, "Arn": { "base": null, "refs": { @@ -250,6 +268,16 @@ "refs": { } }, + "DescribePackageRequest": { + "base": null, + "refs": { + } + }, + "DescribePackageResult": { + "base": null, + "refs": { + } + }, "DescribePackageVersionRequest": { "base": null, "refs": { @@ -307,6 +335,12 @@ "DescribeDomainResult$domain": null } }, + "DomainEntryPoint": { + "base": "Information about how a package originally entered the CodeArtifact domain. For packages published directly to CodeArtifact, the entry point is the repository it was published to. For packages ingested from an external repository, the entry point is the external connection that it was ingested from. An external connection is a CodeArtifact repository that is connected to an external repository such as the npm registry or NuGet gallery.
", + "refs": { + "PackageVersionOrigin$domainEntryPoint": "A DomainEntryPoint object that contains information about from which repository or external connection the package version was added to the domain.
" + } + }, "DomainName": { "base": null, "refs": { @@ -320,6 +354,7 @@ "DeleteRepositoryPermissionsPolicyRequest$domain": "The name of the domain that contains the repository associated with the resource policy to be deleted.
", "DeleteRepositoryRequest$domain": "The name of the domain that contains the repository to delete.
", "DescribeDomainRequest$domain": "A string that specifies the name of the requested domain.
", + "DescribePackageRequest$domain": "The name of the domain that contains the repository that contains the package.
", "DescribePackageVersionRequest$domain": "The name of the domain that contains the repository that contains the package version.
", "DescribeRepositoryRequest$domain": "The name of the domain that contains the repository to describe.
", "DisassociateExternalConnectionRequest$domain": "The name of the domain that contains the repository from which to remove the external repository.
", @@ -334,10 +369,11 @@ "GetRepositoryPermissionsPolicyRequest$domain": "The name of the domain containing the repository whose associated resource policy is to be retrieved.
", "ListPackageVersionAssetsRequest$domain": "The name of the domain that contains the repository associated with the package version assets.
", "ListPackageVersionDependenciesRequest$domain": "The name of the domain that contains the repository that contains the requested package version dependencies.
", - "ListPackageVersionsRequest$domain": "The name of the domain that contains the repository that contains the returned package versions.
", - "ListPackagesRequest$domain": "The name of the domain that contains the repository that contains the requested list of packages.
", + "ListPackageVersionsRequest$domain": "The name of the domain that contains the repository that contains the requested package versions.
", + "ListPackagesRequest$domain": "The name of the domain that contains the repository that contains the requested packages.
", "ListRepositoriesInDomainRequest$domain": "The name of the domain that contains the returned list of repositories.
", "PutDomainPermissionsPolicyRequest$domain": "The name of the domain on which to set the resource policy.
", + "PutPackageOriginConfigurationRequest$domain": "The name of the domain that contains the repository that contains the package.
", "PutRepositoryPermissionsPolicyRequest$domain": "The name of the domain containing the repository to set the resource policy on.
", "RepositoryDescription$domainName": "The name of the domain that contains the repository.
", "RepositorySummary$domainName": "The name of the domain that contains the repository.
", @@ -373,8 +409,9 @@ "ExternalConnectionName": { "base": null, "refs": { - "AssociateExternalConnectionRequest$externalConnection": "The name of the external connection to add to the repository. The following values are supported:
public:npmjs
- for the npm public repository.
public:nuget-org
- for the NuGet Gallery.
public:pypi
- for the Python Package Index.
public:maven-central
- for Maven Central.
public:maven-googleandroid
- for the Google Android repository.
public:maven-gradleplugins
- for the Gradle plugins repository.
public:maven-commonsware
- for the CommonsWare Android repository.
The name of the external connection to add to the repository. The following values are supported:
public:npmjs
- for the npm public repository.
public:pypi
- for the Python Package Index.
public:maven-central
- for Maven Central.
public:maven-googleandroid
- for the Google Android repository.
public:maven-gradleplugins
- for the Gradle plugins repository.
public:maven-commonsware
- for the CommonsWare Android repository.
The name of the external connection to be removed from the repository.
", + "DomainEntryPoint$externalConnectionName": "The name of the external connection that a package was ingested from.
", "RepositoryExternalConnectionInfo$externalConnectionName": "The name of the external connection associated with a repository.
" } }, @@ -619,26 +656,35 @@ "ListPackageVersionDependenciesResult$dependencies": "The returned list of PackageDependency objects.
" } }, + "PackageDescription": { + "base": "Details about a package.
", + "refs": { + "DescribePackageResult$package": "A PackageDescription object that contains information about the requested package.
" + } + }, "PackageFormat": { "base": null, "refs": { - "CopyPackageVersionsRequest$format": "The format of the package that is copied.
", + "CopyPackageVersionsRequest$format": "The format of the package versions to be copied.
", "DeletePackageVersionsRequest$format": "The format of the package versions to delete.
", + "DescribePackageRequest$format": "A format that specifies the type of the requested package.
", "DescribePackageVersionRequest$format": "A format that specifies the type of the requested package version.
", "DisposePackageVersionsRequest$format": "A format that specifies the type of package versions you want to dispose.
", "GetPackageVersionAssetRequest$format": "A format that specifies the type of the package version with the requested asset file.
", - "GetPackageVersionReadmeRequest$format": "A format that specifies the type of the package version with the requested readme file.
Although maven
is listed as a valid value, CodeArtifact does not support displaying readme files for Maven packages.
A format that specifies the type of the package version with the requested readme file.
", "GetPackageVersionReadmeResult$format": "The format of the package with the requested readme file.
", "GetRepositoryEndpointRequest$format": "Returns which endpoint of a repository to return. A repository has one endpoint for each package format.
", - "ListPackageVersionAssetsRequest$format": "The format of the package that contains the returned package version assets.
", - "ListPackageVersionAssetsResult$format": "The format of the package that contains the returned package version assets.
", + "ListPackageVersionAssetsRequest$format": "The format of the package that contains the requested package version assets.
", + "ListPackageVersionAssetsResult$format": "The format of the package that contains the requested package version assets.
", "ListPackageVersionDependenciesRequest$format": "The format of the package with the requested dependencies.
", "ListPackageVersionDependenciesResult$format": "A format that specifies the type of the package that contains the returned dependencies.
", - "ListPackageVersionsRequest$format": "The format of the returned packages.
", + "ListPackageVersionsRequest$format": "The format of the returned package versions.
", "ListPackageVersionsResult$format": "A format of the package.
", - "ListPackagesRequest$format": "The format of the packages.
", + "ListPackagesRequest$format": "The format used to filter requested packages. Only packages from the provided format will be returned.
", + "PackageDescription$format": "A format that specifies the type of the package.
", "PackageSummary$format": "The format of the package.
", "PackageVersionDescription$format": "The format of the package version.
", + "PutPackageOriginConfigurationRequest$format": "A format that specifies the type of the package to be updated.
", "RepositoryExternalConnectionInfo$packageFormat": "The package format associated with a repository's external connection. The valid package formats are:
npm
: A Node Package Manager (npm) package.
pypi
: A Python Package Index (PyPI) package.
maven
: A Maven package that contains compiled code in a distributable format, such as a JAR file.
nuget
: A NuGet package.
A format that specifies the type of the package with the statuses to update.
" } @@ -646,47 +692,68 @@ "PackageName": { "base": null, "refs": { - "CopyPackageVersionsRequest$package": "The name of the package that is copied.
", + "CopyPackageVersionsRequest$package": "The name of the package that contains the versions to be copied.
", "DeletePackageVersionsRequest$package": "The name of the package with the versions to delete.
", + "DescribePackageRequest$package": "The name of the requested package.
", "DescribePackageVersionRequest$package": "The name of the requested package version.
", "DisposePackageVersionsRequest$package": "The name of the package with the versions you want to dispose.
", "GetPackageVersionAssetRequest$package": "The name of the package that contains the requested asset.
", "GetPackageVersionReadmeRequest$package": "The name of the package version that contains the requested readme file.
", "GetPackageVersionReadmeResult$package": "The name of the package that contains the returned readme file.
", - "ListPackageVersionAssetsRequest$package": "The name of the package that contains the returned package version assets.
", - "ListPackageVersionAssetsResult$package": "The name of the package that contains the returned package version assets.
", + "ListPackageVersionAssetsRequest$package": "The name of the package that contains the requested package version assets.
", + "ListPackageVersionAssetsResult$package": "The name of the package that contains the requested package version assets.
", "ListPackageVersionDependenciesRequest$package": "The name of the package versions' package.
", "ListPackageVersionDependenciesResult$package": "The name of the package that contains the returned package versions dependencies.
", - "ListPackageVersionsRequest$package": "The name of the package for which you want to return a list of package versions.
", + "ListPackageVersionsRequest$package": "The name of the package for which you want to request package versions.
", "ListPackageVersionsResult$package": "The name of the package.
", - "ListPackagesRequest$packagePrefix": " A prefix used to filter returned packages. Only packages with names that start with packagePrefix
are returned.
A prefix used to filter requested packages. Only packages with names that start with packagePrefix
are returned.
The name of the package that this package depends on.
", + "PackageDescription$name": "The name of the package.
", "PackageSummary$package": "The name of the package.
", "PackageVersionDescription$packageName": "The name of the requested package.
", + "PutPackageOriginConfigurationRequest$package": "The name of the package to be updated.
", "UpdatePackageVersionsStatusRequest$package": "The name of the package with the version statuses to update.
" } }, "PackageNamespace": { "base": null, "refs": { - "CopyPackageVersionsRequest$namespace": "The namespace of the package. The package component that specifies its namespace depends on its type. For example:
The namespace of a Maven package is its groupId
.
The namespace of an npm package is its scope
.
A Python package does not contain a corresponding component, so Python packages do not have a namespace.
The namespace of the package. The package component that specifies its namespace depends on its type. For example:
The namespace of a Maven package is its groupId
.
The namespace of an npm package is its scope
.
A Python package does not contain a corresponding component, so Python packages do not have a namespace.
The namespace of the package. The package component that specifies its namespace depends on its type. For example:
The namespace of a Maven package is its groupId
.
The namespace of an npm package is its scope
.
A Python package does not contain a corresponding component, so Python packages do not have a namespace.
The namespace of the package. The package component that specifies its namespace depends on its type. For example:
The namespace of a Maven package is its groupId
.
The namespace of an npm package is its scope
.
A Python package does not contain a corresponding component, so Python packages do not have a namespace.
The namespace of the package. The package component that specifies its namespace depends on its type. For example:
The namespace of a Maven package is its groupId
.
The namespace of an npm package is its scope
.
A Python package does not contain a corresponding component, so Python packages do not have a namespace.
The namespace of the package. The package component that specifies its namespace depends on its type. For example:
The namespace of a Maven package is its groupId
.
The namespace of an npm package is its scope
.
A Python package does not contain a corresponding component, so Python packages do not have a namespace.
The namespace of the package. The package component that specifies its namespace depends on its type. For example:
The namespace of a Maven package is its groupId
.
The namespace of an npm package is its scope
.
A Python package does not contain a corresponding component, so Python packages do not have a namespace.
The namespace of the package. The package component that specifies its namespace depends on its type. For example:
The namespace of a Maven package is its groupId
.
The namespace of an npm package is its scope
.
A Python package does not contain a corresponding component, so Python packages do not have a namespace.
The namespace of the package. The package component that specifies its namespace depends on its type. For example:
The namespace of a Maven package is its groupId
.
The namespace of an npm package is its scope
.
A Python package does not contain a corresponding component, so Python packages do not have a namespace.
The namespace of the package. The package component that specifies its namespace depends on its type. For example:
The namespace of a Maven package is its groupId
.
The namespace of an npm package is its scope
.
A Python package does not contain a corresponding component, so Python packages do not have a namespace.
The namespace of the package. The package component that specifies its namespace depends on its type. For example:
The namespace of a Maven package is its groupId
.
The namespace of an npm package is its scope
.
A Python package does not contain a corresponding component, so Python packages do not have a namespace.
The namespace of the package. The package component that specifies its namespace depends on its type. For example:
The namespace of a Maven package is its groupId
.
The namespace of an npm package is its scope
.
A Python package does not contain a corresponding component, so Python packages do not have a namespace.
The namespace of the package. The package component that specifies its namespace depends on its type. For example:
The namespace of a Maven package is its groupId
.
The namespace of an npm package is its scope
.
A Python package does not contain a corresponding component, so Python packages do not have a namespace.
The namespace of the package. The package component that specifies its namespace depends on its type. For example:
The namespace of a Maven package is its groupId
.
The namespace of an npm package is its scope
.
A Python package does not contain a corresponding component, so Python packages do not have a namespace.
The namespace of the package. The package component that specifies its namespace depends on its type. For example:
The namespace of a Maven package is its groupId
.
The namespace of an npm package is its scope
.
A Python package does not contain a corresponding component, so Python packages do not have a namespace.
The namespace of the package. The package component that specifies its namespace depends on its type. For example:
The namespace of a Maven package is its groupId
.
The namespace of an npm package is its scope
.
A Python package does not contain a corresponding component, so Python packages do not have a namespace.
The namespace of the package. The package component that specifies its namespace depends on its type. For example:
The namespace of a Maven package is its groupId
.
The namespace of an npm package is its scope
.
A Python package does not contain a corresponding component, so Python packages do not have a namespace.
The namespace of the package. The package component that specifies its namespace depends on its type. For example:
The namespace of a Maven package is its groupId
.
The namespace of an npm package is its scope
.
A Python package does not contain a corresponding component, so Python packages do not have a namespace.
The namespace of the package versions to be copied. The package version component that specifies its namespace depends on its type. For example:
The namespace of a Maven package version is its groupId
. The namespace is required when copying Maven package versions.
The namespace of an npm package version is its scope
.
Python and NuGet package versions do not contain a corresponding component, package versions of those formats do not have a namespace.
The namespace of the package versions to be deleted. The package version component that specifies its namespace depends on its type. For example:
The namespace of a Maven package version is its groupId
. The namespace is required when deleting Maven package versions.
The namespace of an npm package version is its scope
.
Python and NuGet package versions do not contain a corresponding component, package versions of those formats do not have a namespace.
The namespace of the requested package. The package component that specifies its namespace depends on its type. For example:
The namespace of a Maven package is its groupId
. The namespace is required when requesting Maven packages.
The namespace of an npm package is its scope
.
Python and NuGet packages do not contain a corresponding component, packages of those formats do not have a namespace.
The namespace of the requested package version. The package version component that specifies its namespace depends on its type. For example:
The namespace of a Maven package version is its groupId
.
The namespace of an npm package version is its scope
.
Python and NuGet package versions do not contain a corresponding component, package versions of those formats do not have a namespace.
The namespace of the package versions to be disposed. The package version component that specifies its namespace depends on its type. For example:
The namespace of a Maven package version is its groupId
.
The namespace of an npm package version is its scope
.
Python and NuGet package versions do not contain a corresponding component, package versions of those formats do not have a namespace.
The namespace of the package version with the requested asset file. The package version component that specifies its namespace depends on its type. For example:
The namespace of a Maven package version is its groupId
.
The namespace of an npm package version is its scope
.
Python and NuGet package versions do not contain a corresponding component, package versions of those formats do not have a namespace.
The namespace of the package version with the requested readme file. The package version component that specifies its namespace depends on its type. For example:
The namespace of a Maven package version is its groupId
.
The namespace of an npm package version is its scope
.
Python and NuGet package versions do not contain a corresponding component, package versions of those formats do not have a namespace.
The namespace of the package version with the requested readme file. The package version component that specifies its namespace depends on its type. For example:
The namespace of a Maven package version is its groupId
.
The namespace of an npm package version is its scope
.
Python and NuGet package versions do not contain a corresponding component, package versions of those formats do not have a namespace.
The namespace of the package version that contains the requested package version assets. The package version component that specifies its namespace depends on its type. For example:
The namespace of a Maven package version is its groupId
.
The namespace of an npm package version is its scope
.
Python and NuGet package versions do not contain a corresponding component, package versions of those formats do not have a namespace.
The namespace of the package version that contains the requested package version assets. The package version component that specifies its namespace depends on its type. For example:
The namespace of a Maven package version is its groupId
.
The namespace of an npm package version is its scope
.
Python and NuGet package versions do not contain a corresponding component, package versions of those formats do not have a namespace.
The namespace of the package version with the requested dependencies. The package version component that specifies its namespace depends on its type. For example:
The namespace of a Maven package version is its groupId
.
The namespace of an npm package version is its scope
.
Python and NuGet package versions do not contain a corresponding component, package versions of those formats do not have a namespace.
The namespace of the package version that contains the returned dependencies. The package version component that specifies its namespace depends on its type. For example:
The namespace of a Maven package version is its groupId
.
The namespace of an npm package version is its scope
.
Python and NuGet package versions do not contain a corresponding component, package versions of those formats do not have a namespace.
The namespace of the package that contains the requested package versions. The package component that specifies its namespace depends on its type. For example:
The namespace of a Maven package is its groupId
.
The namespace of an npm package is its scope
.
Python and NuGet packages do not contain a corresponding component, packages of those formats do not have a namespace.
The namespace of the package that contains the requested package versions. The package component that specifies its namespace depends on its type. For example:
The namespace of a Maven package is its groupId
.
The namespace of an npm package is its scope
.
Python and NuGet packages do not contain a corresponding component, packages of those formats do not have a namespace.
The namespace used to filter requested packages. Only packages with the provided namespace will be returned. The package component that specifies its namespace depends on its type. For example:
The namespace of a Maven package is its groupId
.
The namespace of an npm package is its scope
.
Python and NuGet packages do not contain a corresponding component, packages of those formats do not have a namespace.
The namespace of the package that this package depends on. The package component that specifies its namespace depends on its type. For example:
The namespace of a Maven package is its groupId
.
The namespace of an npm package is its scope
.
Python and NuGet packages do not contain a corresponding component, packages of those formats do not have a namespace.
The namespace of the package. The package component that specifies its namespace depends on its type. For example:
The namespace of a Maven package is its groupId
.
The namespace of an npm package is its scope
.
Python and NuGet packages do not contain a corresponding component, packages of those formats do not have a namespace.
The namespace of the package. The package component that specifies its namespace depends on its type. For example:
The namespace of a Maven package is its groupId
.
The namespace of an npm package is its scope
.
Python and NuGet packages do not contain a corresponding component, packages of those formats do not have a namespace.
The namespace of the package version. The package version component that specifies its namespace depends on its type. For example:
The namespace of a Maven package version is its groupId
.
The namespace of an npm package version is its scope
.
Python and NuGet package versions do not contain a corresponding component, package versions of those formats do not have a namespace.
The namespace of the package to be updated. The package component that specifies its namespace depends on its type. For example:
The namespace of a Maven package is its groupId
.
The namespace of an npm package is its scope
.
Python and NuGet packages do not contain a corresponding component, packages of those formats do not have a namespace.
The namespace of the package version to be updated. The package version component that specifies its namespace depends on its type. For example:
The namespace of a Maven package version is its groupId
.
The namespace of an npm package version is its scope
.
Python and NuGet package versions do not contain a corresponding component, package versions of those formats do not have a namespace.
Details about the package origin configuration of a package.
", + "refs": { + "PackageDescription$originConfiguration": "The package origin configuration for the package.
", + "PackageSummary$originConfiguration": "A PackageOriginConfiguration object that contains a PackageOriginRestrictions object that contains information about the upstream and publish package origin restrictions.
", + "PutPackageOriginConfigurationResult$originConfiguration": "A PackageOriginConfiguration object that describes the origin configuration set for the package. It contains a PackageOriginRestrictions object that describes how new versions of the package can be introduced to the repository.
" + } + }, + "PackageOriginRestrictions": { + "base": "Details about the origin restrictions set on the package. The package origin restrictions determine how new versions of a package can be added to a specific repository.
", + "refs": { + "PackageOriginConfiguration$restrictions": "A PackageOriginRestrictions
object that contains information about the upstream and publish package origin configuration for the package.
A PackageOriginRestrictions object that contains information about the upstream
and publish
package origin restrictions. The upstream
restriction determines if new package versions can be ingested or retained from external connections or upstream repositories. The publish
restriction determines if new package versions can be published directly to the repository.
You must include both the desired upstream
and publish
restrictions.
A string that contains the package version (for example, 3.5.2
).
The version of the package with the requested readme file.
", "ListPackageVersionAssetsRequest$packageVersion": " A string that contains the package version (for example, 3.5.2
).
The version of the package associated with the returned assets.
", + "ListPackageVersionAssetsResult$version": "The version of the package associated with the requested assets.
", "ListPackageVersionDependenciesRequest$packageVersion": " A string that contains the package version (for example, 3.5.2
).
The version of the package that is specified in the request.
", "ListPackageVersionsResult$defaultDisplayVersion": "The default package version to display. This depends on the package format:
For Maven and PyPI packages, it's the most recently published package version.
For npm packages, it's the version referenced by the latest
tag. If the latest
tag is not set, it's the most recently published package version.
An error associated with package.
", + "base": "l An error associated with package.
", "refs": { "PackageVersionErrorMap$value": null } @@ -752,12 +819,26 @@ "PackageVersionList": { "base": null, "refs": { - "CopyPackageVersionsRequest$versions": "The versions of the package to copy.
You must specify versions
or versionRevisions
. You cannot specify both.
The versions of the package to be copied.
You must specify versions
or versionRevisions
. You cannot specify both.
An array of strings that specify the versions of the package to delete.
", "DisposePackageVersionsRequest$versions": "The versions of the package you want to dispose.
", "UpdatePackageVersionsStatusRequest$versions": "An array of strings that specify the versions of the package with the statuses to update.
" } }, + "PackageVersionOrigin": { + "base": "Information about how a package version was added to a repository.
", + "refs": { + "PackageVersionDescription$origin": "A PackageVersionOrigin object that contains information about how the package version was added to the repository.
", + "PackageVersionSummary$origin": "A PackageVersionOrigin object that contains information about how the package version was added to the repository.
" + } + }, + "PackageVersionOriginType": { + "base": null, + "refs": { + "ListPackageVersionsRequest$originType": "The originType
used to filter package versions. Only package versions with the provided originType
will be returned.
Describes how the package version was originally added to the domain. An INTERNAL
origin type means the package version was published directly to a repository in the domain. An EXTERNAL
origin type means the package version was ingested from an external connection.
How to sort the returned list of package versions.
" + "ListPackageVersionsRequest$sortBy": "How to sort the requested list of package versions.
" } }, "PackageVersionStatus": { @@ -790,7 +871,7 @@ "refs": { "DeletePackageVersionsRequest$expectedStatus": "The expected status of the package version to delete.
", "DisposePackageVersionsRequest$expectedStatus": "The expected status of the package version to dispose.
", - "ListPackageVersionsRequest$status": "A string that specifies the status of the package versions to include in the returned list.
", + "ListPackageVersionsRequest$status": "A string that filters the requested package versions by status.
", "PackageVersionDescription$status": "A string that contains the status of the package version.
", "PackageVersionSummary$status": "A string that contains the status of the package version. It can be one of the following:
", "SuccessfulPackageVersionInfo$status": "The status of a package version.
", @@ -857,6 +938,16 @@ "refs": { } }, + "PutPackageOriginConfigurationRequest": { + "base": null, + "refs": { + } + }, + "PutPackageOriginConfigurationResult": { + "base": null, + "refs": { + } + }, "PutRepositoryPermissionsPolicyRequest": { "base": null, "refs": { @@ -894,26 +985,29 @@ "base": null, "refs": { "AssociateExternalConnectionRequest$repository": "The name of the repository to which the external connection is added.
", - "CopyPackageVersionsRequest$sourceRepository": "The name of the repository that contains the package versions to copy.
", + "CopyPackageVersionsRequest$sourceRepository": "The name of the repository that contains the package versions to be copied.
", "CopyPackageVersionsRequest$destinationRepository": "The name of the repository into which package versions are copied.
", "CreateRepositoryRequest$repository": "The name of the repository to create.
", "DeletePackageVersionsRequest$repository": "The name of the repository that contains the package versions to delete.
", "DeleteRepositoryPermissionsPolicyRequest$repository": "The name of the repository that is associated with the resource policy to be deleted
", "DeleteRepositoryRequest$repository": "The name of the repository to delete.
", + "DescribePackageRequest$repository": "The name of the repository that contains the requested package.
", "DescribePackageVersionRequest$repository": "The name of the repository that contains the package version.
", "DescribeRepositoryRequest$repository": "A string that specifies the name of the requested repository.
", "DisassociateExternalConnectionRequest$repository": "The name of the repository from which the external connection will be removed.
", "DisposePackageVersionsRequest$repository": "The name of the repository that contains the package versions you want to dispose.
", + "DomainEntryPoint$repositoryName": "The name of the repository that a package was originally published to.
", "GetPackageVersionAssetRequest$repository": "The repository that contains the package version with the requested asset.
", "GetPackageVersionReadmeRequest$repository": "The repository that contains the package with the requested readme file.
", "GetRepositoryEndpointRequest$repository": "The name of the repository.
", "GetRepositoryPermissionsPolicyRequest$repository": "The name of the repository whose associated resource policy is to be retrieved.
", - "ListPackageVersionAssetsRequest$repository": "The name of the repository that contains the package that contains the returned package version assets.
", + "ListPackageVersionAssetsRequest$repository": "The name of the repository that contains the package that contains the requested package version assets.
", "ListPackageVersionDependenciesRequest$repository": "The name of the repository that contains the requested package version.
", - "ListPackageVersionsRequest$repository": "The name of the repository that contains the package.
", - "ListPackagesRequest$repository": "The name of the repository from which packages are to be listed.
", + "ListPackageVersionsRequest$repository": "The name of the repository that contains the requested package versions.
", + "ListPackagesRequest$repository": "The name of the repository that contains the requested packages.
", "ListRepositoriesInDomainRequest$repositoryPrefix": " A prefix used to filter returned repositories. Only repositories with names that start with repositoryPrefix
are returned.
A prefix used to filter returned repositories. Only repositories with names that start with repositoryPrefix
are returned.
The name of the repository that contains the package.
", "PutRepositoryPermissionsPolicyRequest$repository": "The name of the repository to set the resource policy on.
", "RepositoryDescription$name": "The name of the repository.
", "RepositorySummary$name": "The name of the repository.
", diff --git a/models/apis/config/2014-11-12/api-2.json b/models/apis/config/2014-11-12/api-2.json index 091df65c339..4410254df27 100644 --- a/models/apis/config/2014-11-12/api-2.json +++ b/models/apis/config/2014-11-12/api-2.json @@ -4509,7 +4509,22 @@ "AWS::ECR::PublicRepository", "AWS::GuardDuty::Detector", "AWS::EMR::SecurityConfiguration", - "AWS::SageMaker::CodeRepository" + "AWS::SageMaker::CodeRepository", + "AWS::Route53Resolver::ResolverEndpoint", + "AWS::Route53Resolver::ResolverRule", + "AWS::Route53Resolver::ResolverRuleAssociation", + "AWS::DMS::ReplicationSubnetGroup", + "AWS::DMS::EventSubscription", + "AWS::MSK::Cluster", + "AWS::StepFunctions::Activity", + "AWS::WorkSpaces::Workspace", + "AWS::WorkSpaces::ConnectionAlias", + "AWS::SageMaker::Model", + "AWS::ElasticLoadBalancingV2::Listener", + "AWS::StepFunctions::StateMachine", + "AWS::Batch::JobQueue", + "AWS::Batch::ComputeEnvironment", + "AWS::AccessAnalyzer::Analyzer" ] }, "ResourceTypeList":{ diff --git a/models/apis/config/2014-11-12/docs-2.json b/models/apis/config/2014-11-12/docs-2.json index 572df67876e..e8c26ea261b 100644 --- a/models/apis/config/2014-11-12/docs-2.json +++ b/models/apis/config/2014-11-12/docs-2.json @@ -68,14 +68,14 @@ "ListStoredQueries": "Lists the stored queries for a single Amazon Web Services account and a single Amazon Web Services Region. The default is 100.
", "ListTagsForResource": "List the tags for Config resource.
", "PutAggregationAuthorization": "Authorizes the aggregator account and region to collect data from the source account and region.
", - "PutConfigRule": "Adds or updates an Config rule for evaluating whether your Amazon Web Services resources comply with your desired configurations.
You can use this action for Config custom rules and Config managed rules. A Config custom rule is a rule that you develop and maintain. An Config managed rule is a customizable, predefined rule that Config provides.
If you are adding a new Config custom rule, you must first create the Lambda function that the rule invokes to evaluate your resources. When you use the PutConfigRule
action to add the rule to Config, you must specify the Amazon Resource Name (ARN) that Lambda assigns to the function. Specify the ARN for the SourceIdentifier
key. This key is part of the Source
object, which is part of the ConfigRule
object.
If you are adding an Config managed rule, specify the rule's identifier for the SourceIdentifier
key. To reference Config managed rule identifiers, see About Config managed rules.
For any new rule that you add, specify the ConfigRuleName
in the ConfigRule
object. Do not specify the ConfigRuleArn
or the ConfigRuleId
. These values are generated by Config for new rules.
If you are updating a rule that you added previously, you can specify the rule by ConfigRuleName
, ConfigRuleId
, or ConfigRuleArn
in the ConfigRule
data type that you use in this request.
The maximum number of rules that Config supports is 150.
For information about requesting a rule limit increase, see Config Limits in the Amazon Web Services General Reference Guide.
For more information about developing and using Config rules, see Evaluating Amazon Web Services resource Configurations with Config in the Config Developer Guide.
", + "PutConfigRule": "Adds or updates an Config rule for evaluating whether your Amazon Web Services resources comply with your desired configurations.
You can use this action for Config custom rules and Config managed rules. A Config custom rule is a rule that you develop and maintain. An Config managed rule is a customizable, predefined rule that Config provides.
If you are adding a new Config custom rule, you must first create the Lambda function that the rule invokes to evaluate your resources. When you use the PutConfigRule
action to add the rule to Config, you must specify the Amazon Resource Name (ARN) that Lambda assigns to the function. Specify the ARN for the SourceIdentifier
key. This key is part of the Source
object, which is part of the ConfigRule
object.
If you are adding an Config managed rule, specify the rule's identifier for the SourceIdentifier
key. To reference Config managed rule identifiers, see About Config managed rules.
For any new rule that you add, specify the ConfigRuleName
in the ConfigRule
object. Do not specify the ConfigRuleArn
or the ConfigRuleId
. These values are generated by Config for new rules.
If you are updating a rule that you added previously, you can specify the rule by ConfigRuleName
, ConfigRuleId
, or ConfigRuleArn
in the ConfigRule
data type that you use in this request.
For information on how many Config rules you can have per account, see Service Limits in the Config Developer Guide.
For more information about developing and using Config rules, see Evaluating Amazon Web Services resource Configurations with Config in the Config Developer Guide.
", "PutConfigurationAggregator": "Creates and updates the configuration aggregator with the selected source accounts and regions. The source account can be individual account(s) or an organization.
accountIds
that are passed will be replaced with existing accounts. If you want to add additional accounts into the aggregator, call DescribeConfigurationAggregators
to get the previous accounts and then append new ones.
Config should be enabled in source accounts and regions you want to aggregate.
If your source type is an organization, you must be signed in to the management account or a registered delegated administrator and all the features must be enabled in your organization. If the caller is a management account, Config calls EnableAwsServiceAccess
API to enable integration between Config and Organizations. If the caller is a registered delegated administrator, Config calls ListDelegatedAdministrators
API to verify whether the caller is a valid delegated administrator.
To register a delegated administrator, see Register a Delegated Administrator in the Config developer guide.
Creates a new configuration recorder to record the selected resource configurations.
You can use this action to change the role roleARN
or the recordingGroup
of an existing recorder. To change the role, call the action on the existing configuration recorder and specify a role.
Currently, you can specify only one configuration recorder per region in your account.
If ConfigurationRecorder
does not have the recordingGroup parameter specified, the default is to record all supported resource types.
Creates or updates a conformance pack. A conformance pack is a collection of Config rules that can be easily deployed in an account and a region and across Amazon Web Services Organization.
This API creates a service linked role AWSServiceRoleForConfigConforms
in your account. The service linked role is created only when the role does not exist in your account.
You must specify either the TemplateS3Uri
or the TemplateBody
parameter, but not both. If you provide both Config uses the TemplateS3Uri
parameter and ignores the TemplateBody
parameter.
Creates or updates a conformance pack. A conformance pack is a collection of Config rules that can be easily deployed in an account and a region and across Amazon Web Services Organization. For information on how many conformance packs you can have per account, see Service Limits in the Config Developer Guide.
This API creates a service linked role AWSServiceRoleForConfigConforms
in your account. The service linked role is created only when the role does not exist in your account.
You must specify either the TemplateS3Uri
or the TemplateBody
parameter, but not both. If you provide both Config uses the TemplateS3Uri
parameter and ignores the TemplateBody
parameter.
Creates a delivery channel object to deliver configuration information to an Amazon S3 bucket and Amazon SNS topic.
Before you can create a delivery channel, you must create a configuration recorder.
You can use this action to change the Amazon S3 bucket or an Amazon SNS topic of the existing delivery channel. To change the Amazon S3 bucket or an Amazon SNS topic, call this action and specify the changed values for the S3 bucket and the SNS topic. If you specify a different value for either the S3 bucket or the SNS topic, this action will keep the existing value for the parameter that is not changed.
You can have only one delivery channel per region in your account.
Used by an Lambda function to deliver evaluation results to Config. This action is required in every Lambda function that is invoked by an Config rule.
", "PutExternalEvaluation": "Add or updates the evaluations for process checks. This API checks if the rule is a process check when the name of the Config rule is provided.
", - "PutOrganizationConfigRule": "Adds or updates organization Config rule for your entire organization evaluating whether your Amazon Web Services resources comply with your desired configurations.
Only a master account and a delegated administrator can create or update an organization Config rule. When calling this API with a delegated administrator, you must ensure Organizations ListDelegatedAdministrator
permissions are added.
This API enables organization service access through the EnableAWSServiceAccess
action and creates a service linked role AWSServiceRoleForConfigMultiAccountSetup
in the master or delegated administrator account of your organization. The service linked role is created only when the role does not exist in the caller account. Config verifies the existence of role with GetRole
action.
To use this API with delegated administrator, register a delegated administrator by calling Amazon Web Services Organization register-delegated-administrator
for config-multiaccountsetup.amazonaws.com
.
You can use this action to create both Config custom rules and Config managed rules. If you are adding a new Config custom rule, you must first create Lambda function in the master account or a delegated administrator that the rule invokes to evaluate your resources. You also need to create an IAM role in the managed-account that can be assumed by the Lambda function. When you use the PutOrganizationConfigRule
action to add the rule to Config, you must specify the Amazon Resource Name (ARN) that Lambda assigns to the function. If you are adding an Config managed rule, specify the rule's identifier for the RuleIdentifier
key.
The maximum number of organization Config rules that Config supports is 150 and 3 delegated administrator per organization.
Prerequisite: Ensure you call EnableAllFeatures
API to enable all features in an organization.
Specify either OrganizationCustomRuleMetadata
or OrganizationManagedRuleMetadata
.
Adds or updates organization Config rule for your entire organization evaluating whether your Amazon Web Services resources comply with your desired configurations. For information on how many organization Config rules you can have per account, see Service Limits in the Config Developer Guide.
Only a master account and a delegated administrator can create or update an organization Config rule. When calling this API with a delegated administrator, you must ensure Organizations ListDelegatedAdministrator
permissions are added. An organization can have up to 3 delegated administrators.
This API enables organization service access through the EnableAWSServiceAccess
action and creates a service linked role AWSServiceRoleForConfigMultiAccountSetup
in the master or delegated administrator account of your organization. The service linked role is created only when the role does not exist in the caller account. Config verifies the existence of role with GetRole
action.
To use this API with delegated administrator, register a delegated administrator by calling Amazon Web Services Organization register-delegated-administrator
for config-multiaccountsetup.amazonaws.com
.
You can use this action to create both Config custom rules and Config managed rules. If you are adding a new Config custom rule, you must first create Lambda function in the master account or a delegated administrator that the rule invokes to evaluate your resources. You also need to create an IAM role in the managed-account that can be assumed by the Lambda function. When you use the PutOrganizationConfigRule
action to add the rule to Config, you must specify the Amazon Resource Name (ARN) that Lambda assigns to the function. If you are adding an Config managed rule, specify the rule's identifier for the RuleIdentifier
key.
Prerequisite: Ensure you call EnableAllFeatures
API to enable all features in an organization.
Specify either OrganizationCustomRuleMetadata
or OrganizationManagedRuleMetadata
.
Deploys conformance packs across member accounts in an Amazon Web Services Organization. For information on how many organization conformance packs and how many Config rules you can have per account, see Service Limits in the Config Developer Guide.
Only a master account and a delegated administrator can call this API. When calling this API with a delegated administrator, you must ensure Organizations ListDelegatedAdministrator
permissions are added. An organization can have up to 3 delegated administrators.
This API enables organization service access for config-multiaccountsetup.amazonaws.com
through the EnableAWSServiceAccess
action and creates a service linked role AWSServiceRoleForConfigMultiAccountSetup
in the master or delegated administrator account of your organization. The service linked role is created only when the role does not exist in the caller account. To use this API with delegated administrator, register a delegated administrator by calling Amazon Web Services Organization register-delegate-admin
for config-multiaccountsetup.amazonaws.com
.
Prerequisite: Ensure you call EnableAllFeatures
API to enable all features in an organization.
You must specify either the TemplateS3Uri
or the TemplateBody
parameter, but not both. If you provide both Config uses the TemplateS3Uri
parameter and ignores the TemplateBody
parameter.
Config sets the state of a conformance pack to CREATE_IN_PROGRESS and UPDATE_IN_PROGRESS until the conformance pack is created or updated. You cannot update a conformance pack while it is in this state.
Adds or updates the remediation configuration with a specific Config rule with the selected target or action. The API creates the RemediationConfiguration
object for the Config rule. The Config rule must already exist for you to add a remediation configuration. The target (SSM document) must exist and have permissions to use the target.
If you make backward incompatible changes to the SSM document, you must call this again to ensure the remediations can run.
This API does not support adding remediation configurations for service-linked Config Rules such as Organization Config rules, the rules deployed by conformance packs, and rules deployed by Amazon Web Services Security Hub.
For manual remediation configuration, you need to provide a value for automationAssumeRole
or use a value in the assumeRole
field to remediate your resources. The SSM automation document can use either as long as it maps to a valid parameter.
However, for automatic remediation configuration, the only valid assumeRole
field value is AutomationAssumeRole
and you need to provide a value for AutomationAssumeRole
to remediate your resources.
A remediation exception is when a specific resource is no longer considered for auto-remediation. This API adds a new exception or updates an existing exception for a specific resource with a specific Config rule.
Config generates a remediation exception when a problem occurs executing a remediation action to a specific resource. Remediation exceptions blocks auto-remediation until the exception is cleared.
Defines the deployment model to use for the firewall policy. To use a distributed model, set PolicyOption to NULL
.
Defines the deployment model to use for the third-party firewall.
" + "ThirdPartyFirewallPolicy$FirewallDeploymentModel": "Defines the deployment model to use for the third-party firewall policy.
" } }, "FirewallPolicyId": { @@ -703,13 +703,13 @@ "base": null, "refs": { "FMSPolicyUpdateFirewallCreationConfigAction$FirewallCreationConfig": "A FirewallCreationConfig
that you can copy into your current policy's SecurityServiceData in order to remedy scope violations.
Details about the service that are specific to the service type, in JSON format.
Example: DNS_FIREWALL
\"{\\\"type\\\":\\\"DNS_FIREWALL\\\",\\\"preProcessRuleGroups\\\":[{\\\"ruleGroupId\\\":\\\"rslvr-frg-1\\\",\\\"priority\\\":10}],\\\"postProcessRuleGroups\\\":[{\\\"ruleGroupId\\\":\\\"rslvr-frg-2\\\",\\\"priority\\\":9911}]}\"
Valid values for preProcessRuleGroups
are between 1 and 99. Valid values for postProcessRuleGroups
are between 9901 and 10000.
Example: DNS_FIREWALL
\"{\\\"type\\\":\\\"DNS_FIREWALL\\\",\\\"preProcessRuleGroups\\\":[{\\\"ruleGroupId\\\":\\\"rslvr-frg-1\\\",\\\"priority\\\":10}],\\\"postProcessRuleGroups\\\":[{\\\"ruleGroupId\\\":\\\"rslvr-frg-2\\\",\\\"priority\\\":9911}]}\"
Valid values for preProcessRuleGroups
are between 1 and 99. Valid values for postProcessRuleGroups
are between 9901 and 10000.
Example: NETWORK_FIREWALL
- Distributed deployment model with automatic Availability Zone configuration. With automatic Availbility Zone configuration, Firewall Manager chooses which Availability Zones to create the endpoints in.
\"{ \\\"type\\\": \\\"NETWORK_FIREWALL\\\", \\\"networkFirewallStatelessRuleGroupReferences\\\": [ { \\\"resourceARN\\\": \\\"arn:aws:network-firewall:us-east-1:123456789011:stateless-rulegroup/test\\\", \\\"priority\\\": 1 } ], \\\"networkFirewallStatelessDefaultActions\\\": [ \\\"aws:forward_to_sfe\\\", \\\"customActionName\\\" ], \\\"networkFirewallStatelessFragmentDefaultActions\\\": [ \\\"aws:forward_to_sfe\\\", \\\"customActionName\\\" ], \\\"networkFirewallStatelessCustomActions\\\": [ { \\\"actionName\\\": \\\"customActionName\\\", \\\"actionDefinition\\\": { \\\"publishMetricAction\\\": { \\\"dimensions\\\": [ { \\\"value\\\": \\\"metricdimensionvalue\\\" } ] } } } ], \\\"networkFirewallStatefulRuleGroupReferences\\\": [ { \\\"resourceARN\\\": \\\"arn:aws:network-firewall:us-east-1:123456789011:stateful-rulegroup/test\\\" } ], \\\"networkFirewallOrchestrationConfig\\\": { \\\"singleFirewallEndpointPerVPC\\\": false, \\\"allowedIPV4CidrList\\\": [ \\\"10.0.0.0/28\\\", \\\"192.168.0.0/28\\\" ], \\\"routeManagementAction\\\": \\\"OFF\\\" }, \\\"networkFirewallLoggingConfiguration\\\": { \\\"logDestinationConfigs\\\": [ { \\\"logDestinationType\\\": \\\"S3\\\", \\\"logType\\\": \\\"ALERT\\\", \\\"logDestination\\\": { \\\"bucketName\\\": \\\"s3-bucket-name\\\" } }, { \\\"logDestinationType\\\": \\\"S3\\\", \\\"logType\\\": \\\"FLOW\\\", \\\"logDestination\\\": { \\\"bucketName\\\": \\\"s3-bucket-name\\\" } } ], \\\"overrideExistingConfig\\\": true } }\"
To use the distributed deployment model, you must set PolicyOption to NULL
.
Example: NETWORK_FIREWALL
- Distributed deployment model with automatic Availability Zone configuration, and route management.
\"{ \\\"type\\\": \\\"NETWORK_FIREWALL\\\", \\\"networkFirewallStatelessRuleGroupReferences\\\": [ { \\\"resourceARN\\\": \\\"arn:aws:network-firewall:us-east-1:123456789011:stateless-rulegroup/test\\\", \\\"priority\\\": 1 } ], \\\"networkFirewallStatelessDefaultActions\\\": [ \\\"aws:forward_to_sfe\\\", \\\"customActionName\\\" ], \\\"networkFirewallStatelessFragmentDefaultActions\\\": [ \\\"aws:forward_to_sfe\\\", \\\"customActionName\\\" ], \\\"networkFirewallStatelessCustomActions\\\": [ { \\\"actionName\\\": \\\"customActionName\\\", \\\"actionDefinition\\\": { \\\"publishMetricAction\\\": { \\\"dimensions\\\": [ { \\\"value\\\": \\\"metricdimensionvalue\\\" } ] } } } ], \\\"networkFirewallStatefulRuleGroupReferences\\\": [ { \\\"resourceARN\\\": \\\"arn:aws:network-firewall:us-east-1:123456789011:stateful-rulegroup/test\\\" } ], \\\"networkFirewallOrchestrationConfig\\\": { \\\"singleFirewallEndpointPerVPC\\\": false, \\\"allowedIPV4CidrList\\\": [ \\\"10.0.0.0/28\\\", \\\"192.168.0.0/28\\\" ], \\\"routeManagementAction\\\": \\\"MONITOR\\\", \\\"routeManagementTargetTypes\\\": [ \\\"InternetGateway\\\" ] }, \\\"networkFirewallLoggingConfiguration\\\": { \\\"logDestinationConfigs\\\": [ { \\\"logDestinationType\\\": \\\"S3\\\", \\\"logType\\\": \\\"ALERT\\\", \\\"logDestination\\\": { \\\"bucketName\\\": \\\"s3-bucket-name\\\" } }, { \\\"logDestinationType\\\": \\\"S3\\\", \\\"logType\\\": \\\"FLOW\\\", \\\"logDestination\\\": { \\\"bucketName\\\": \\\"s3-bucket-name\\\" } } ], \\\"overrideExistingConfig\\\": true } }\"
Example: NETWORK_FIREWALL
- Distributed deployment model with custom Availability Zone configuration. With custom Availability Zone configuration, you define which specific Availability Zones to create endpoints in by configuring firewallCreationConfig
.
\"{ \\\"type\\\":\\\"NETWORK_FIREWALL\\\",\\\"networkFirewallStatelessRuleGroupReferences\\\":[{\\\"resourceARN\\\":\\\"arn:aws:network-firewall:us-east-1:123456789011:stateless-rulegroup/test\\\",\\\"priority\\\":1}], \\\"networkFirewallStatelessDefaultActions\\\":[ \\\"aws:forward_to_sfe\\\", \\\"customActionName\\\" ], \\\"networkFirewallStatelessFragmentDefaultActions\\\":[ \\\"aws:forward_to_sfe\\\", \\\"fragmentcustomactionname\\\" ], \\\"networkFirewallStatelessCustomActions\\\":[ { \\\"actionName\\\":\\\"customActionName\\\", \\\"actionDefinition\\\":{ \\\"publishMetricAction\\\":{ \\\"dimensions\\\":[ { \\\"value\\\":\\\"metricdimensionvalue\\\" } ] } } }, { \\\"actionName\\\":\\\"fragmentcustomactionname\\\", \\\"actionDefinition\\\":{ \\\"publishMetricAction\\\":{ \\\"dimensions\\\":[ { \\\"value\\\":\\\"fragmentmetricdimensionvalue\\\" } ] } } } ], \\\"networkFirewallStatefulRuleGroupReferences\\\":[ { \\\"resourceARN\\\":\\\"arn:aws:network-firewall:us-east-1:123456789011:stateful-rulegroup/test\\\" } ], \\\"networkFirewallOrchestrationConfig\\\":{ \\\"firewallCreationConfig\\\":{ \\\"endpointLocation\\\":{ \\\"availabilityZoneConfigList\\\":[ { \\\"availabilityZoneId\\\":null, \\\"availabilityZoneName\\\":\\\"us-east-1a\\\", \\\"allowedIPV4CidrList\\\":[ \\\"10.0.0.0/28\\\" ] }, { ¯\\\"availabilityZoneId\\\":null, \\\"availabilityZoneName\\\":\\\"us-east-1b\\\", \\\"allowedIPV4CidrList\\\":[ \\\"10.0.0.0/28\\\" ] } ] } }, \\\"singleFirewallEndpointPerVPC\\\":false, \\\"allowedIPV4CidrList\\\":null, \\\"routeManagementAction\\\":\\\"OFF\\\", \\\"networkFirewallLoggingConfiguration\\\":{ \\\"logDestinationConfigs\\\":[ { \\\"logDestinationType\\\":\\\"S3\\\", \\\"logType\\\":\\\"ALERT\\\", \\\"logDestination\\\":{ \\\"bucketName\\\":\\\"s3-bucket-name\\\" } }, { \\\"logDestinationType\\\":\\\"S3\\\", \\\"logType\\\":\\\"FLOW\\\", \\\"logDestination\\\":{ \\\"bucketName\\\":\\\"s3-bucket-name\\\" } } ], \\\"overrideExistingConfig\\\":boolean } }\"
Example: NETWORK_FIREWALL
- Distributed deployment model with custom Availability Zone configuration, and route management.
\"{ \\\"type\\\":\\\"NETWORK_FIREWALL\\\",\\\"networkFirewallStatelessRuleGroupReferences\\\":[{\\\"resourceARN\\\":\\\"arn:aws:network-firewall:us-east-1:123456789011:stateless-rulegroup/test\\\",\\\"priority\\\":1}], \\\"networkFirewallStatelessDefaultActions\\\":[ \\\"aws:forward_to_sfe\\\", \\\"customActionName\\\" ], \\\"networkFirewallStatelessFragmentDefaultActions\\\":[ \\\"aws:forward_to_sfe\\\", \\\"fragmentcustomactionname\\\" ], \\\"networkFirewallStatelessCustomActions\\\":[ { \\\"actionName\\\":\\\"customActionName\\\", \\\"actionDefinition\\\":{ \\\"publishMetricAction\\\":{ \\\"dimensions\\\":[ { \\\"value\\\":\\\"metricdimensionvalue\\\" } ] } } }, { \\\"actionName\\\":\\\"fragmentcustomactionname\\\", \\\"actionDefinition\\\":{ \\\"publishMetricAction\\\":{ \\\"dimensions\\\":[ { \\\"value\\\":\\\"fragmentmetricdimensionvalue\\\" } ] } } } ], \\\"networkFirewallStatefulRuleGroupReferences\\\":[ { \\\"resourceARN\\\":\\\"arn:aws:network-firewall:us-east-1:123456789011:stateful-rulegroup/test\\\" } ], \\\"networkFirewallOrchestrationConfig\\\":{ \\\"firewallCreationConfig\\\":{ \\\"endpointLocation\\\":{ \\\"availabilityZoneConfigList\\\":[ { \\\"availabilityZoneId\\\":null, \\\"availabilityZoneName\\\":\\\"us-east-1a\\\", \\\"allowedIPV4CidrList\\\":[ \\\"10.0.0.0/28\\\" ] }, { ¯\\\"availabilityZoneId\\\":null, \\\"availabilityZoneName\\\":\\\"us-east-1b\\\", \\\"allowedIPV4CidrList\\\":[ \\\"10.0.0.0/28\\\" ] } ] } }, \\\"singleFirewallEndpointPerVPC\\\":false, \\\"allowedIPV4CidrList\\\":null, \\\"routeManagementAction\\\":\\\"MONITOR\\\", \\\"routeManagementTargetTypes\\\":[ \\\"InternetGateway\\\" ], \\\"routeManagementConfig\\\":{ \\\"allowCrossAZTrafficIfNoEndpoint\\\":true } }, \\\"networkFirewallLoggingConfiguration\\\":{ \\\"logDestinationConfigs\\\":[ { \\\"logDestinationType\\\":\\\"S3\\\", \\\"logType\\\":\\\"ALERT\\\", \\\"logDestination\\\":{ \\\"bucketName\\\":\\\"s3-bucket-name\\\" } }, { \\\"logDestinationType\\\":\\\"S3\\\", \\\"logType\\\":\\\"FLOW\\\", \\\"logDestination\\\":{ \\\"bucketName\\\":\\\"s3-bucket-name\\\" } } ], \\\"overrideExistingConfig\\\":boolean } }\"
Example: PARTNER_FIREWALL
for Firewall Manager
\"{\\\"type\\\":\\\"THIRD_PARTY_FIREWALL\\\",\\\"thirdPartyrFirewall\\\":\\\"PALO_ALTO_NETWORKS_CLOUD_NGFW\\\",\\\"thirdPartyFirewallConfig\\\":{\\\"thirdPartyFirewallPolicyList\\\":[\\\"global-123456789012-1\\\"],\\\"networkFirewallLoggingConfiguration\\\":null},\\\"firewallDeploymentModel\\\":{\\\"distributedFirewallDeploymentModel\\\":{\\\"distributedFirewallOrchestrationConfig\\\":{\\\"firewallCreationConfig\\\":{\\\"endpointLocation\\\":{\\\"availabilityZoneConfigList\\\":[{\\\"availabilityZoneId\\\":null,\\\"availabilityZoneName\\\":\\\"us-east-1a\\\",\\\"allowedIPV4CidrList\\\":[\\\"10.0.1.0/28\\\"]}]}},\\\"allowedIPV4CidrList\\\":null},\\\"distributedRouteManagementConfig\\\":null},\\\"centralizedFirewallDeploymentModel\\\":null}}\"\"
Specification for SHIELD_ADVANCED
for Amazon CloudFront distributions
\"{\\\"type\\\":\\\"SHIELD_ADVANCED\\\",\\\"automaticResponseConfiguration\\\": {\\\"automaticResponseStatus\\\":\\\"ENABLED|IGNORED|DISABLED\\\", \\\"automaticResponseAction\\\":\\\"BLOCK|COUNT\\\"}, \\\"overrideCustomerWebaclClassic\\\":true|false}\"
For example: \"{\\\"type\\\":\\\"SHIELD_ADVANCED\\\",\\\"automaticResponseConfiguration\\\": {\\\"automaticResponseStatus\\\":\\\"ENABLED\\\", \\\"automaticResponseAction\\\":\\\"COUNT\\\"}}\"
The default value for automaticResponseStatus
is IGNORED
. The value for automaticResponseAction
is only required when automaticResponseStatus
is set to ENABLED
. The default value for overrideCustomerWebaclClassic
is false
.
For other resource types that you can protect with a Shield Advanced policy, this ManagedServiceData
configuration is an empty string.
Example: WAFV2
\"{\\\"type\\\":\\\"WAFV2\\\",\\\"preProcessRuleGroups\\\":[{\\\"ruleGroupArn\\\":null,\\\"overrideAction\\\":{\\\"type\\\":\\\"NONE\\\"},\\\"managedRuleGroupIdentifier\\\":{\\\"version\\\":null,\\\"vendorName\\\":\\\"AWS\\\",\\\"managedRuleGroupName\\\":\\\"AWSManagedRulesAmazonIpReputationList\\\"},\\\"ruleGroupType\\\":\\\"ManagedRuleGroup\\\",\\\"excludeRules\\\":[{\\\"name\\\":\\\"NoUserAgent_HEADER\\\"}]}],\\\"postProcessRuleGroups\\\":[],\\\"defaultAction\\\":{\\\"type\\\":\\\"ALLOW\\\"},\\\"overrideCustomerWebACLAssociation\\\":false,\\\"loggingConfiguration\\\":{\\\"logDestinationConfigs\\\":[\\\"arn:aws:firehose:us-west-2:12345678912:deliverystream/aws-waf-logs-fms-admin-destination\\\"],\\\"redactedFields\\\":[{\\\"redactedFieldType\\\":\\\"SingleHeader\\\",\\\"redactedFieldValue\\\":\\\"Cookies\\\"},{\\\"redactedFieldType\\\":\\\"Method\\\"}]}}\"
In the loggingConfiguration
, you can specify one logDestinationConfigs
, you can optionally provide up to 20 redactedFields
, and the RedactedFieldType
must be one of URI
, QUERY_STRING
, HEADER
, or METHOD
.
Example: WAF Classic
\"{\\\"type\\\": \\\"WAF\\\", \\\"ruleGroups\\\": [{\\\"id\\\":\\\"12345678-1bcd-9012-efga-0987654321ab\\\", \\\"overrideAction\\\" : {\\\"type\\\": \\\"COUNT\\\"}}], \\\"defaultAction\\\": {\\\"type\\\": \\\"BLOCK\\\"}}\"
Example: WAFV2
- Firewall Manager support for WAF managed rule group versioning
\"{\\\"type\\\":\\\"WAFV2\\\",\\\"preProcessRuleGroups\\\":[{\\\"ruleGroupArn\\\":null,\\\"overrideAction\\\":{\\\"type\\\":\\\"NONE\\\"},\\\"managedRuleGroupIdentifier\\\":{\\\"versionEnabled\\\":true,\\\"version\\\":\\\"Version_2.0\\\",\\\"vendorName\\\":\\\"AWS\\\",\\\"managedRuleGroupName\\\":\\\"AWSManagedRulesCommonRuleSet\\\"},\\\"ruleGroupType\\\":\\\"ManagedRuleGroup\\\",\\\"excludeRules\\\":[{\\\"name\\\":\\\"NoUserAgent_HEADER\\\"}]}],\\\"postProcessRuleGroups\\\":[],\\\"defaultAction\\\":{\\\"type\\\":\\\"ALLOW\\\"},\\\"overrideCustomerWebACLAssociation\\\":false,\\\"loggingConfiguration\\\":{\\\"logDestinationConfigs\\\":[\\\"arn:aws:firehose:us-west-2:12345678912:deliverystream/aws-waf-logs-fms-admin-destination\\\"],\\\"redactedFields\\\":[{\\\"redactedFieldType\\\":\\\"SingleHeader\\\",\\\"redactedFieldValue\\\":\\\"Cookies\\\"},{\\\"redactedFieldType\\\":\\\"Method\\\"}]}}\"
To use a specific version of a WAF managed rule group in your Firewall Manager policy, you must set versionEnabled
to true
, and set version
to the version you'd like to use. If you don't set versionEnabled
to true
, or if you omit versionEnabled
, then Firewall Manager uses the default version of the WAF managed rule group.
Example: SECURITY_GROUPS_COMMON
\"{\\\"type\\\":\\\"SECURITY_GROUPS_COMMON\\\",\\\"revertManualSecurityGroupChanges\\\":false,\\\"exclusiveResourceSecurityGroupManagement\\\":false, \\\"applyToAllEC2InstanceENIs\\\":false,\\\"securityGroups\\\":[{\\\"id\\\":\\\" sg-000e55995d61a06bd\\\"}]}\"
Example: Shared VPCs. Apply the preceding policy to resources in shared VPCs as well as to those in VPCs that the account owns
\"{\\\"type\\\":\\\"SECURITY_GROUPS_COMMON\\\",\\\"revertManualSecurityGroupChanges\\\":false,\\\"exclusiveResourceSecurityGroupManagement\\\":false, \\\"applyToAllEC2InstanceENIs\\\":false,\\\"includeSharedVPC\\\":true,\\\"securityGroups\\\":[{\\\"id\\\":\\\" sg-000e55995d61a06bd\\\"}]}\"
Example: SECURITY_GROUPS_CONTENT_AUDIT
\"{\\\"type\\\":\\\"SECURITY_GROUPS_CONTENT_AUDIT\\\",\\\"securityGroups\\\":[{\\\"id\\\":\\\"sg-000e55995d61a06bd\\\"}],\\\"securityGroupAction\\\":{\\\"type\\\":\\\"ALLOW\\\"}}\"
The security group action for content audit can be ALLOW
or DENY
. For ALLOW
, all in-scope security group rules must be within the allowed range of the policy's security group rules. For DENY
, all in-scope security group rules must not contain a value or a range that matches a rule value or range in the policy security group.
Example: SECURITY_GROUPS_USAGE_AUDIT
\"{\\\"type\\\":\\\"SECURITY_GROUPS_USAGE_AUDIT\\\",\\\"deleteUnusedSecurityGroups\\\":true,\\\"coalesceRedundantSecurityGroups\\\":true}\"
Details about the service that are specific to the service type, in JSON format.
Example: DNS_FIREWALL
\"{\\\"type\\\":\\\"DNS_FIREWALL\\\",\\\"preProcessRuleGroups\\\":[{\\\"ruleGroupId\\\":\\\"rslvr-frg-1\\\",\\\"priority\\\":10}],\\\"postProcessRuleGroups\\\":[{\\\"ruleGroupId\\\":\\\"rslvr-frg-2\\\",\\\"priority\\\":9911}]}\"
Valid values for preProcessRuleGroups
are between 1 and 99. Valid values for postProcessRuleGroups
are between 9901 and 10000.
Example: NETWORK_FIREWALL
- Centralized deployment model
\"{\\\"type\\\":\\\"NETWORK_FIREWALL\\\",\\\"awsNetworkFirewallConfig\\\":{\\\"networkFirewallStatelessRuleGroupReferences\\\":[{\\\"resourceARN\\\":\\\"arn:aws:network-firewall:us-east-1:123456789011:stateless-rulegroup/test\\\",\\\"priority\\\":1}],\\\"networkFirewallStatelessDefaultActions\\\":[\\\"aws:forward_to_sfe\\\",\\\"customActionName\\\"],\\\"networkFirewallStatelessFragmentDefaultActions\\\":[\\\"aws:forward_to_sfe\\\",\\\"customActionName\\\"],\\\"networkFirewallStatelessCustomActions\\\":[{\\\"actionName\\\":\\\"customActionName\\\",\\\"actionDefinition\\\":{\\\"publishMetricAction\\\":{\\\"dimensions\\\":[{\\\"value\\\":\\\"metricdimensionvalue\\\"}]}}}],\\\"networkFirewallStatefulRuleGroupReferences\\\":[{\\\"resourceARN\\\":\\\"arn:aws:network-firewall:us-east-1:123456789011:stateful-rulegroup/test\\\"}],\\\"networkFirewallLoggingConfiguration\\\":{\\\"logDestinationConfigs\\\":[{\\\"logDestinationType\\\":\\\"S3\\\",\\\"logType\\\":\\\"ALERT\\\",\\\"logDestination\\\":{\\\"bucketName\\\":\\\"s3-bucket-name\\\"}},{\\\"logDestinationType\\\":\\\"S3\\\",\\\"logType\\\":\\\"FLOW\\\",\\\"logDestination\\\":{\\\"bucketName\\\":\\\"s3-bucket-name\\\"}}],\\\"overrideExistingConfig\\\":true}},\\\"firewallDeploymentModel\\\":{\\\"centralizedFirewallDeploymentModel\\\":{\\\"centralizedFirewallOrchestrationConfig\\\":{\\\"inspectionVpcIds\\\":[{\\\"resourceId\\\":\\\"vpc-1234\\\",\\\"accountId\\\":\\\"123456789011\\\"}],\\\"firewallCreationConfig\\\":{\\\"endpointLocation\\\":{\\\"availabilityZoneConfigList\\\":[{\\\"availabilityZoneId\\\":null,\\\"availabilityZoneName\\\":\\\"us-east-1a\\\",\\\"allowedIPV4CidrList\\\":[\\\"10.0.0.0/28\\\"]}]}},\\\"allowedIPV4CidrList\\\":[]}}}}\"
To use the centralized deployment model, you must set PolicyOption to CENTRALIZED
.
Example: NETWORK_FIREWALL
- Distributed deployment model with automatic Availability Zone configuration
\"{\\\"type\\\":\\\"NETWORK_FIREWALL\\\",\\\"networkFirewallStatelessRuleGroupReferences\\\":[{\\\"resourceARN\\\":\\\"arn:aws:network-firewall:us-east-1:123456789011:stateless-rulegroup/test\\\",\\\"priority\\\":1}],\\\"networkFirewallStatelessDefaultActions\\\":[\\\"aws:forward_to_sfe\\\",\\\"customActionName\\\"],\\\"networkFirewallStatelessFragmentDefaultActions\\\":[\\\"aws:forward_to_sfe\\\",\\\"customActionName\\\"],\\\"networkFirewallStatelessCustomActions\\\":[{\\\"actionName\\\":\\\"customActionName\\\",\\\"actionDefinition\\\":{\\\"publishMetricAction\\\":{\\\"dimensions\\\":[{\\\"value\\\":\\\"metricdimensionvalue\\\"}]}}}],\\\"networkFirewallStatefulRuleGroupReferences\\\":[{\\\"resourceARN\\\":\\\"arn:aws:network-firewall:us-east-1:123456789011:stateful-rulegroup/test\\\"}],\\\"networkFirewallOrchestrationConfig\\\":{\\\"singleFirewallEndpointPerVPC\\\":false,\\\"allowedIPV4CidrList\\\":[\\\"10.0.0.0/28\\\",\\\"192.168.0.0/28\\\"],\\\"routeManagementAction\\\":\\\"OFF\\\"},\\\"networkFirewallLoggingConfiguration\\\":{\\\"logDestinationConfigs\\\":[{\\\"logDestinationType\\\":\\\"S3\\\",\\\"logType\\\":\\\"ALERT\\\",\\\"logDestination\\\":{\\\"bucketName\\\":\\\"s3-bucket-name\\\"}},{\\\"logDestinationType\\\":\\\"S3\\\",\\\"logType\\\":\\\"FLOW\\\",\\\"logDestination\\\":{\\\"bucketName\\\":\\\"s3-bucket-name\\\"}}],\\\"overrideExistingConfig\\\":true}}\"
With automatic Availbility Zone configuration, Firewall Manager chooses which Availability Zones to create the endpoints in. To use the distributed deployment model, you must set PolicyOption to NULL
.
Example: NETWORK_FIREWALL
- Distributed deployment model with automatic Availability Zone configuration and route management
\"{\\\"type\\\":\\\"NETWORK_FIREWALL\\\",\\\"networkFirewallStatelessRuleGroupReferences\\\":[{\\\"resourceARN\\\":\\\"arn:aws:network-firewall:us-east-1:123456789011:stateless-rulegroup/test\\\",\\\"priority\\\":1}],\\\"networkFirewallStatelessDefaultActions\\\":[\\\"aws:forward_to_sfe\\\",\\\"customActionName\\\"],\\\"networkFirewallStatelessFragmentDefaultActions\\\":[\\\"aws:forward_to_sfe\\\",\\\"customActionName\\\"],\\\"networkFirewallStatelessCustomActions\\\":[{\\\"actionName\\\":\\\"customActionName\\\",\\\"actionDefinition\\\":{\\\"publishMetricAction\\\":{\\\"dimensions\\\":[{\\\"value\\\":\\\"metricdimensionvalue\\\"}]}}}],\\\"networkFirewallStatefulRuleGroupReferences\\\":[{\\\"resourceARN\\\":\\\"arn:aws:network-firewall:us-east-1:123456789011:stateful-rulegroup/test\\\"}],\\\"networkFirewallOrchestrationConfig\\\":{\\\"singleFirewallEndpointPerVPC\\\":false,\\\"allowedIPV4CidrList\\\":[\\\"10.0.0.0/28\\\",\\\"192.168.0.0/28\\\"],\\\"routeManagementAction\\\":\\\"MONITOR\\\",\\\"routeManagementTargetTypes\\\":[\\\"InternetGateway\\\"]},\\\"networkFirewallLoggingConfiguration\\\":{\\\"logDestinationConfigs\\\":[{\\\"logDestinationType\\\":\\\"S3\\\",\\\"logType\\\":\\\"ALERT\\\",\\\"logDestination\\\":{\\\"bucketName\\\":\\\"s3-bucket-name\\\"}},{\\\"logDestinationType\\\":\\\"S3\\\",\\\"logType\\\": \\\"FLOW\\\",\\\"logDestination\\\":{\\\"bucketName\\\":\\\"s3-bucket-name\\\"}}],\\\"overrideExistingConfig\\\":true}}\"
To use the distributed deployment model, you must set PolicyOption to NULL
.
Example: NETWORK_FIREWALL
- Distributed deployment model with custom Availability Zone configuration
\"{\\\"type\\\":\\\"NETWORK_FIREWALL\\\",\\\"networkFirewallStatelessRuleGroupReferences\\\":[{\\\"resourceARN\\\":\\\"arn:aws:network-firewall:us-east-1:123456789011:stateless-rulegroup/test\\\",\\\"priority\\\":1}],\\\"networkFirewallStatelessDefaultActions\\\":[\\\"aws:forward_to_sfe\\\",\\\"customActionName\\\"],\\\"networkFirewallStatelessFragmentDefaultActions\\\":[\\\"aws:forward_to_sfe\\\",\\\"fragmentcustomactionname\\\"],\\\"networkFirewallStatelessCustomActions\\\":[{\\\"actionName\\\":\\\"customActionName\\\", \\\"actionDefinition\\\":{\\\"publishMetricAction\\\":{\\\"dimensions\\\":[{\\\"value\\\":\\\"metricdimensionvalue\\\"}]}}},{\\\"actionName\\\":\\\"fragmentcustomactionname\\\",\\\"actionDefinition\\\":{\\\"publishMetricAction\\\":{\\\"dimensions\\\":[{\\\"value\\\":\\\"fragmentmetricdimensionvalue\\\"}]}}}],\\\"networkFirewallStatefulRuleGroupReferences\\\":[{\\\"resourceARN\\\":\\\"arn:aws:network-firewall:us-east-1:123456789011:stateful-rulegroup/test\\\"}],\\\"networkFirewallOrchestrationConfig\\\":{\\\"firewallCreationConfig\\\":{ \\\"endpointLocation\\\":{\\\"availabilityZoneConfigList\\\":[{\\\"availabilityZoneName\\\":\\\"us-east-1a\\\",\\\"allowedIPV4CidrList\\\":[\\\"10.0.0.0/28\\\"]},{\\\"availabilityZoneName\\\":\\\"us-east-1b\\\",\\\"allowedIPV4CidrList\\\":[ \\\"10.0.0.0/28\\\"]}]} },\\\"singleFirewallEndpointPerVPC\\\":false,\\\"allowedIPV4CidrList\\\":null,\\\"routeManagementAction\\\":\\\"OFF\\\",\\\"networkFirewallLoggingConfiguration\\\":{\\\"logDestinationConfigs\\\":[{\\\"logDestinationType\\\":\\\"S3\\\",\\\"logType\\\":\\\"ALERT\\\",\\\"logDestination\\\":{\\\"bucketName\\\":\\\"s3-bucket-name\\\"}},{\\\"logDestinationType\\\":\\\"S3\\\",\\\"logType\\\":\\\"FLOW\\\",\\\"logDestination\\\":{\\\"bucketName\\\":\\\"s3-bucket-name\\\"}}],\\\"overrideExistingConfig\\\":boolean}}\"
With custom Availability Zone configuration, you define which specific Availability Zones to create endpoints in by configuring firewallCreationConfig
. To configure the Availability Zones in firewallCreationConfig
, specify either the availabilityZoneName
or availabilityZoneId
parameter, not both parameters.
To use the distributed deployment model, you must set PolicyOption to NULL
.
Example: NETWORK_FIREWALL
- Distributed deployment model with custom Availability Zone configuration and route management
\"{\\\"type\\\":\\\"NETWORK_FIREWALL\\\",\\\"networkFirewallStatelessRuleGroupReferences\\\":[{\\\"resourceARN\\\":\\\"arn:aws:network-firewall:us-east-1:123456789011:stateless-rulegroup/test\\\",\\\"priority\\\":1}],\\\"networkFirewallStatelessDefaultActions\\\":[\\\"aws:forward_to_sfe\\\",\\\"customActionName\\\"],\\\"networkFirewallStatelessFragmentDefaultActions\\\":[\\\"aws:forward_to_sfe\\\",\\\"fragmentcustomactionname\\\"],\\\"networkFirewallStatelessCustomActions\\\":[{\\\"actionName\\\":\\\"customActionName\\\",\\\"actionDefinition\\\":{\\\"publishMetricAction\\\":{\\\"dimensions\\\":[{\\\"value\\\":\\\"metricdimensionvalue\\\"}]}}},{\\\"actionName\\\":\\\"fragmentcustomactionname\\\",\\\"actionDefinition\\\":{\\\"publishMetricAction\\\":{\\\"dimensions\\\":[{\\\"value\\\":\\\"fragmentmetricdimensionvalue\\\"}]}}}],\\\"networkFirewallStatefulRuleGroupReferences\\\":[{\\\"resourceARN\\\":\\\"arn:aws:network-firewall:us-east-1:123456789011:stateful-rulegroup/test\\\"}],\\\"networkFirewallOrchestrationConfig\\\":{\\\"firewallCreationConfig\\\":{\\\"endpointLocation\\\":{\\\"availabilityZoneConfigList\\\":[{\\\"availabilityZoneName\\\":\\\"us-east-1a\\\",\\\"allowedIPV4CidrList\\\":[\\\"10.0.0.0/28\\\"]},{\\\"availabilityZoneName\\\":\\\"us-east-1b\\\",\\\"allowedIPV4CidrList\\\":[\\\"10.0.0.0/28\\\"]}]}},\\\"singleFirewallEndpointPerVPC\\\":false,\\\"allowedIPV4CidrList\\\":null,\\\"routeManagementAction\\\":\\\"MONITOR\\\",\\\"routeManagementTargetTypes\\\":[\\\"InternetGateway\\\"],\\\"routeManagementConfig\\\":{\\\"allowCrossAZTrafficIfNoEndpoint\\\":true}},\\\"networkFirewallLoggingConfiguration\\\":{\\\"logDestinationConfigs\\\":[{\\\"logDestinationType\\\":\\\"S3\\\",\\\"logType\\\":\\\"ALERT\\\",\\\"logDestination\\\":{\\\"bucketName\\\":\\\"s3-bucket-name\\\"}},{\\\"logDestinationType\\\":\\\"S3\\\",\\\"logType\\\":\\\"FLOW\\\",\\\"logDestination\\\":{\\\"bucketName\\\":\\\"s3-bucket-name\\\"}}],\\\"overrideExistingConfig\\\":boolean}}\"
To use the distributed deployment model, you must set PolicyOption to NULL
.
Example: THIRD_PARTY_FIREWALL
\"{ \"type\":\"THIRD_PARTY_FIREWALL\", \"thirdPartyFirewall\":\"PALO_ALTO_NETWORKS_CLOUD_NGFW\", \"thirdPartyFirewallConfig\":{ \"thirdPartyFirewallPolicyList\":[\"global-1\"] }, \"firewallDeploymentModel\":{ \"distributedFirewallDeploymentModel\":{ \"distributedFirewallOrchestrationConfig\":{ \"firewallCreationConfig\":{ \"endpointLocation\":{ \"availabilityZoneConfigList\":[ { \"availabilityZoneName\":\"${AvailabilityZone}\" } ] } }, \"allowedIPV4CidrList\":[ ] } } } }\"
Specification for SHIELD_ADVANCED
for Amazon CloudFront distributions
\"{\\\"type\\\":\\\"SHIELD_ADVANCED\\\",\\\"automaticResponseConfiguration\\\": {\\\"automaticResponseStatus\\\":\\\"ENABLED|IGNORED|DISABLED\\\", \\\"automaticResponseAction\\\":\\\"BLOCK|COUNT\\\"}, \\\"overrideCustomerWebaclClassic\\\":true|false}\"
For example: \"{\\\"type\\\":\\\"SHIELD_ADVANCED\\\",\\\"automaticResponseConfiguration\\\": {\\\"automaticResponseStatus\\\":\\\"ENABLED\\\", \\\"automaticResponseAction\\\":\\\"COUNT\\\"}}\"
The default value for automaticResponseStatus
is IGNORED
. The value for automaticResponseAction
is only required when automaticResponseStatus
is set to ENABLED
. The default value for overrideCustomerWebaclClassic
is false
.
For other resource types that you can protect with a Shield Advanced policy, this ManagedServiceData
configuration is an empty string.
Example: WAFV2
\"{\\\"type\\\":\\\"WAFV2\\\",\\\"preProcessRuleGroups\\\":[{\\\"ruleGroupArn\\\":null,\\\"overrideAction\\\":{\\\"type\\\":\\\"NONE\\\"},\\\"managedRuleGroupIdentifier\\\":{\\\"version\\\":null,\\\"vendorName\\\":\\\"AWS\\\",\\\"managedRuleGroupName\\\":\\\"AWSManagedRulesAmazonIpReputationList\\\"},\\\"ruleGroupType\\\":\\\"ManagedRuleGroup\\\",\\\"excludeRules\\\":[{\\\"name\\\":\\\"NoUserAgent_HEADER\\\"}]}],\\\"postProcessRuleGroups\\\":[],\\\"defaultAction\\\":{\\\"type\\\":\\\"ALLOW\\\"},\\\"overrideCustomerWebACLAssociation\\\":false,\\\"loggingConfiguration\\\":{\\\"logDestinationConfigs\\\":[\\\"arn:aws:firehose:us-west-2:12345678912:deliverystream/aws-waf-logs-fms-admin-destination\\\"],\\\"redactedFields\\\":[{\\\"redactedFieldType\\\":\\\"SingleHeader\\\",\\\"redactedFieldValue\\\":\\\"Cookies\\\"},{\\\"redactedFieldType\\\":\\\"Method\\\"}]}}\"
In the loggingConfiguration
, you can specify one logDestinationConfigs
, you can optionally provide up to 20 redactedFields
, and the RedactedFieldType
must be one of URI
, QUERY_STRING
, HEADER
, or METHOD
.
Example: WAF Classic
\"{\\\"type\\\": \\\"WAF\\\", \\\"ruleGroups\\\": [{\\\"id\\\":\\\"12345678-1bcd-9012-efga-0987654321ab\\\", \\\"overrideAction\\\" : {\\\"type\\\": \\\"COUNT\\\"}}], \\\"defaultAction\\\": {\\\"type\\\": \\\"BLOCK\\\"}}\"
Example: WAFV2
- Firewall Manager support for WAF managed rule group versioning
\"{\\\"type\\\":\\\"WAFV2\\\",\\\"preProcessRuleGroups\\\":[{\\\"ruleGroupArn\\\":null,\\\"overrideAction\\\":{\\\"type\\\":\\\"NONE\\\"},\\\"managedRuleGroupIdentifier\\\":{\\\"versionEnabled\\\":true,\\\"version\\\":\\\"Version_2.0\\\",\\\"vendorName\\\":\\\"AWS\\\",\\\"managedRuleGroupName\\\":\\\"AWSManagedRulesCommonRuleSet\\\"},\\\"ruleGroupType\\\":\\\"ManagedRuleGroup\\\",\\\"excludeRules\\\":[{\\\"name\\\":\\\"NoUserAgent_HEADER\\\"}]}],\\\"postProcessRuleGroups\\\":[],\\\"defaultAction\\\":{\\\"type\\\":\\\"ALLOW\\\"},\\\"overrideCustomerWebACLAssociation\\\":false,\\\"loggingConfiguration\\\":{\\\"logDestinationConfigs\\\":[\\\"arn:aws:firehose:us-west-2:12345678912:deliverystream/aws-waf-logs-fms-admin-destination\\\"],\\\"redactedFields\\\":[{\\\"redactedFieldType\\\":\\\"SingleHeader\\\",\\\"redactedFieldValue\\\":\\\"Cookies\\\"},{\\\"redactedFieldType\\\":\\\"Method\\\"}]}}\"
To use a specific version of a WAF managed rule group in your Firewall Manager policy, you must set versionEnabled
to true
, and set version
to the version you'd like to use. If you don't set versionEnabled
to true
, or if you omit versionEnabled
, then Firewall Manager uses the default version of the WAF managed rule group.
Example: SECURITY_GROUPS_COMMON
\"{\\\"type\\\":\\\"SECURITY_GROUPS_COMMON\\\",\\\"revertManualSecurityGroupChanges\\\":false,\\\"exclusiveResourceSecurityGroupManagement\\\":false, \\\"applyToAllEC2InstanceENIs\\\":false,\\\"securityGroups\\\":[{\\\"id\\\":\\\" sg-000e55995d61a06bd\\\"}]}\"
Example: SECURITY_GROUPS_COMMON
- Security group tag distribution
\"\"{\\\"type\\\":\\\"SECURITY_GROUPS_COMMON\\\",\\\"securityGroups\\\":[{\\\"id\\\":\\\"sg-000e55995d61a06bd\\\"}],\\\"revertManualSecurityGroupChanges\\\":true,\\\"exclusiveResourceSecurityGroupManagement\\\":false,\\\"applyToAllEC2InstanceENIs\\\":false,\\\"includeSharedVPC\\\":false,\\\"enableTagDistribution\\\":true}\"\"
Firewall Manager automatically distributes tags from the primary group to the security groups created by this policy. To use security group tag distribution, you must also set revertManualSecurityGroupChanges
to true
, otherwise Firewall Manager won't be able to create the policy. When you enable revertManualSecurityGroupChanges
, Firewall Manager identifies and reports when the security groups created by this policy become non-compliant.
Firewall Manager won't distrubute system tags added by Amazon Web Services services into the replica security groups. System tags begin with the aws:
prefix.
Example: Shared VPCs. Apply the preceding policy to resources in shared VPCs as well as to those in VPCs that the account owns
\"{\\\"type\\\":\\\"SECURITY_GROUPS_COMMON\\\",\\\"revertManualSecurityGroupChanges\\\":false,\\\"exclusiveResourceSecurityGroupManagement\\\":false, \\\"applyToAllEC2InstanceENIs\\\":false,\\\"includeSharedVPC\\\":true,\\\"securityGroups\\\":[{\\\"id\\\":\\\" sg-000e55995d61a06bd\\\"}]}\"
Example: SECURITY_GROUPS_CONTENT_AUDIT
\"{\\\"type\\\":\\\"SECURITY_GROUPS_CONTENT_AUDIT\\\",\\\"securityGroups\\\":[{\\\"id\\\":\\\"sg-000e55995d61a06bd\\\"}],\\\"securityGroupAction\\\":{\\\"type\\\":\\\"ALLOW\\\"}}\"
The security group action for content audit can be ALLOW
or DENY
. For ALLOW
, all in-scope security group rules must be within the allowed range of the policy's security group rules. For DENY
, all in-scope security group rules must not contain a value or a range that matches a rule value or range in the policy security group.
Example: SECURITY_GROUPS_USAGE_AUDIT
\"{\\\"type\\\":\\\"SECURITY_GROUPS_USAGE_AUDIT\\\",\\\"deleteUnusedSecurityGroups\\\":true,\\\"coalesceRedundantSecurityGroups\\\":true}\"
The status for subscribing to the third-party firewall vendor in the AWS Marketplace.
NO_SUBSCRIPTION
- The Firewall Manager policy administrator isn't subscribed to the third-party firewall service in the AWS Marketplace.
NOT_COMPLETE
- The Firewall Manager policy administrator is in the process of subscribing to the third-party firewall service in the Amazon Web Services Marketplace, but doesn't yet have an active subscription.
COMPLETE
- The Firewall Manager policy administrator has an active subscription to the third-party firewall service in the Amazon Web Services Marketplace.
The status for subscribing to the third-party firewall vendor in the Amazon Web Services Marketplace.
NO_SUBSCRIPTION
- The Firewall Manager policy administrator isn't subscribed to the third-party firewall service in the Amazon Web Services Marketplace.
NOT_COMPLETE
- The Firewall Manager policy administrator is in the process of subscribing to the third-party firewall service in the Amazon Web Services Marketplace, but doesn't yet have an active subscription.
COMPLETE
- The Firewall Manager policy administrator has an active subscription to the third-party firewall service in the Amazon Web Services Marketplace.
The actions to take on packets that don't match any of the stateless rule groups.
", "NetworkFirewallPolicyDescription$StatelessFragmentDefaultActions": "The actions to take on packet fragments that don't match any of the stateless rule groups.
", - "NetworkFirewallPolicyDescription$StatelessCustomActions": "Names of custom actions that are available for use in the stateless default actions settings.
" + "NetworkFirewallPolicyDescription$StatelessCustomActions": "Names of custom actions that are available for use in the stateless default actions settings.
", + "NetworkFirewallPolicyDescription$StatefulDefaultActions": "The default actions to take on a packet that doesn't match any stateful rules. The stateful default action is optional, and is only valid when using the strict rule order.
Valid values of the stateful default action:
aws:drop_strict
aws:drop_established
aws:alert_strict
aws:alert_established
Contains the Network Firewall firewall policy options to configure a centralized deployment model.
", + "base": "Contains the Network Firewall firewall policy options to configure the policy's deployment model and third-party firewall policy settings.
", "refs": { "SecurityServicePolicyData$PolicyOption": "Contains the Network Firewall firewall policy options to configure a centralized deployment model.
" } @@ -971,6 +972,12 @@ "ProtocolsListData$PreviousProtocolsList": "A map of previous version numbers to their corresponding protocol arrays.
" } }, + "PriorityNumber": { + "base": null, + "refs": { + "StatefulRuleGroup$Priority": "An integer setting that indicates the order in which to run the stateful rule groups in a single Network Firewall firewall policy. This setting only applies to firewall policies that specify the STRICT_ORDER
rule order in the stateful engine options settings.
Network Firewall evalutes each stateful rule group against a packet starting with the group that has the lowest priority setting. You must ensure that the priority settings are unique within each policy. For information about
You can change the priority settings of your rule groups at any time. To make it easier to insert rule groups later, number them so there's a wide range in between, for example use 100, 200, and so on.
" + } + }, "ProtectionData": { "base": null, "refs": { @@ -1278,6 +1285,12 @@ "RouteHasOutOfScopeEndpointViolation$InternetGatewayRoutes": "The routes in the route table associated with the Internet Gateway.
" } }, + "RuleOrder": { + "base": null, + "refs": { + "StatefulEngineOptions$RuleOrder": "Indicates how to manage the order of stateful rule evaluation for the policy. DEFAULT_ACTION_ORDER
is the default behavior. Stateful rules are provided to the rule engine as Suricata compatible strings, and Suricata evaluates them based on certain settings. For more information, see Evaluation order for stateful rules in the Network Firewall Developer Guide.
Remediation option for the rule specified in the ViolationTarget
.
The service that the policy is using to protect the resources. This specifies the type of policy that is created, either an WAF policy, a Shield Advanced policy, or a security group policy. For security group policies, Firewall Manager supports one security group for each common policy and for each content audit policy. This is an adjustable limit that you can increase by contacting Amazon Web Services Support.
" } }, + "StatefulEngineOptions": { + "base": "Configuration settings for the handling of the stateful rule groups in a Network Firewall firewall policy.
", + "refs": { + "NetworkFirewallPolicyDescription$StatefulEngineOptions": "Additional options governing how Network Firewall handles stateful rules. The stateful rule groups that you use in your policy must have stateful rule options settings that are compatible with these settings.
" + } + }, "StatefulRuleGroup": { "base": "Network Firewall stateful rule group, used in a NetworkFirewallPolicyDescription.
", "refs": { @@ -1432,7 +1451,7 @@ } }, "ThirdPartyFirewallFirewallPolicy": { - "base": "Configures the firewall policy deployment model for a third-party firewall. The deployment model can either be distributed or centralized.
", + "base": "Configures the third-party firewall's firewall policy.
", "refs": { "ThirdPartyFirewallFirewallPolicies$member": null } @@ -1456,7 +1475,7 @@ } }, "ThirdPartyFirewallPolicy": { - "base": "Configures the policy for the third-party firewall.
", + "base": "Configures the deployment model for the third-party firewall.
", "refs": { "PolicyOption$ThirdPartyFirewallPolicy": "Defines the policy options for a third-party firewall policy.
" } diff --git a/models/apis/glue/2017-03-31/api-2.json b/models/apis/glue/2017-03-31/api-2.json index 0c31f7f089d..38430908526 100644 --- a/models/apis/glue/2017-03-31/api-2.json +++ b/models/apis/glue/2017-03-31/api-2.json @@ -11051,7 +11051,8 @@ "enum":[ "Standard", "G.1X", - "G.2X" + "G.2X", + "G.025X" ] }, "Workflow":{ diff --git a/models/apis/glue/2017-03-31/docs-2.json b/models/apis/glue/2017-03-31/docs-2.json index 615ab285359..e073d3b5738 100644 --- a/models/apis/glue/2017-03-31/docs-2.json +++ b/models/apis/glue/2017-03-31/docs-2.json @@ -18,7 +18,7 @@ "BatchStopJobRun": "Stops one or more job runs for a specified job definition.
", "BatchUpdatePartition": "Updates one or more partitions in a batch operation.
", "CancelMLTaskRun": "Cancels (stops) a task run. Machine learning task runs are asynchronous tasks that Glue runs on your behalf as part of various machine learning workflows. You can cancel a machine learning task run at any time by calling CancelMLTaskRun
with a task run's parent transform's TransformID
and the task run's TaskRunId
.
Cancels the statement..
", + "CancelStatement": "Cancels the statement.
", "CheckSchemaVersionValidity": "Validates the supplied schema. This call has no side effects, it simply validates using the supplied schema using DataFormat
as the format. Since it does not take a schema set name, no compatibility checks are performed.
Registers a blueprint with Glue.
", "CreateClassifier": "Creates a classifier in the user's account. This can be a GrokClassifier
, an XMLClassifier
, a JsonClassifier
, or a CsvClassifier
, depending on which field of the request is present.
Returns a list of registries that you have created, with minimal registry information. Registries in the Deleting
status will not be included in the results. Empty results will be returned if there are no registries available.
Returns a list of schema versions that you have created, with minimal information. Schema versions in Deleted status will not be included in the results. Empty results will be returned if there are no schema versions available.
", "ListSchemas": "Returns a list of schemas with minimal details. Schemas in Deleting status will not be included in the results. Empty results will be returned if there are no schemas available.
When the RegistryId
is not provided, all the schemas across registries will be part of the API response.
Retrieve a session..
", + "ListSessions": "Retrieve a list of sessions.
", "ListStatements": "Lists statements for the session.
", "ListTriggers": "Retrieves the names of all trigger resources in this Amazon Web Services account, or the resources with the specified tag. This operation allows you to see which resources are available in your account, and their names.
This operation takes the optional Tags
field, which you can use as a filter on the response so that tagged resources can be retrieved as a group. If you choose to use tags filtering, only resources with the tag are retrieved.
Lists names of workflows created in the account.
", @@ -177,7 +177,7 @@ "UpdateCrawlerSchedule": "Updates the schedule of a crawler using a cron
expression.
Updates an existing database definition in a Data Catalog.
", "UpdateDevEndpoint": "Updates a specified development endpoint.
", - "UpdateJob": "Updates an existing job definition.
", + "UpdateJob": "Updates an existing job definition. The previous job definition is completely overwritten by this information.
", "UpdateMLTransform": "Updates an existing machine learning transform. Call this operation to tune the algorithm parameters to achieve better results.
After calling this operation, you can call the StartMLEvaluationTaskRun
operation to assess how well your new parameters achieved your goals (such as improving the quality of your machine learning transform, or making it more cost-effective).
Updates a partition.
", "UpdateRegistry": "Updates an existing registry which is used to hold a collection of schemas. The updated properties relate to the registry, and do not modify any of the schemas within the registry.
", @@ -2315,7 +2315,7 @@ } }, "Edge": { - "base": "An edge represents a directed connection between two components on a workflow graph.
", + "base": "An edge represents a directed connection between two Glue components that are part of the workflow the edge belongs to.
", "refs": { "EdgeList$member": null } @@ -2739,14 +2739,14 @@ "base": null, "refs": { "Action$Arguments": "The job arguments used when this trigger fires. For this job run, they replace the default arguments set in the job definition itself.
You can specify arguments here that your own job-execution script consumes, as well as arguments that Glue itself consumes.
For information about how to specify and consume your own Job arguments, see the Calling Glue APIs in Python topic in the developer guide.
For information about the key-value pairs that Glue consumes to set up your job, see the Special Parameters Used by Glue topic in the developer guide.
", - "CreateJobRequest$DefaultArguments": "The default arguments for this job.
You can specify arguments here that your own job-execution script consumes, as well as arguments that Glue itself consumes.
For information about how to specify and consume your own Job arguments, see the Calling Glue APIs in Python topic in the developer guide.
For information about the key-value pairs that Glue consumes to set up your job, see the Special Parameters Used by Glue topic in the developer guide.
", + "CreateJobRequest$DefaultArguments": "The default arguments for this job.
You can specify arguments here that your own job-execution script consumes, as well as arguments that Glue itself consumes.
Job arguments may be logged. Do not pass plaintext secrets as arguments. Retrieve secrets from a Glue Connection, Secrets Manager or other secret management mechanism if you intend to keep them within the Job.
For information about how to specify and consume your own Job arguments, see the Calling Glue APIs in Python topic in the developer guide.
For information about the key-value pairs that Glue consumes to set up your job, see the Special Parameters Used by Glue topic in the developer guide.
", "CreateJobRequest$NonOverridableArguments": "Non-overridable arguments for this job, specified as name-value pairs.
", "Job$DefaultArguments": "The default arguments for this job, specified as name-value pairs.
You can specify arguments here that your own job-execution script consumes, as well as arguments that Glue itself consumes.
For information about how to specify and consume your own Job arguments, see the Calling Glue APIs in Python topic in the developer guide.
For information about the key-value pairs that Glue consumes to set up your job, see the Special Parameters Used by Glue topic in the developer guide.
", "Job$NonOverridableArguments": "Non-overridable arguments for this job, specified as name-value pairs.
", "JobRun$Arguments": "The job arguments associated with this run. For this job run, they replace the default arguments set in the job definition itself.
You can specify arguments here that your own job-execution script consumes, as well as arguments that Glue itself consumes.
For information about how to specify and consume your own job arguments, see the Calling Glue APIs in Python topic in the developer guide.
For information about the key-value pairs that Glue consumes to set up your job, see the Special Parameters Used by Glue topic in the developer guide.
", "JobUpdate$DefaultArguments": "The default arguments for this job.
You can specify arguments here that your own job-execution script consumes, as well as arguments that Glue itself consumes.
For information about how to specify and consume your own Job arguments, see the Calling Glue APIs in Python topic in the developer guide.
For information about the key-value pairs that Glue consumes to set up your job, see the Special Parameters Used by Glue topic in the developer guide.
", "JobUpdate$NonOverridableArguments": "Non-overridable arguments for this job, specified as name-value pairs.
", - "StartJobRunRequest$Arguments": "The job arguments specifically for this run. For this job run, they replace the default arguments set in the job definition itself.
You can specify arguments here that your own job-execution script consumes, as well as arguments that Glue itself consumes.
For information about how to specify and consume your own Job arguments, see the Calling Glue APIs in Python topic in the developer guide.
For information about the key-value pairs that Glue consumes to set up your job, see the Special Parameters Used by Glue topic in the developer guide.
" + "StartJobRunRequest$Arguments": "The job arguments specifically for this run. For this job run, they replace the default arguments set in the job definition itself.
You can specify arguments here that your own job-execution script consumes, as well as arguments that Glue itself consumes.
Job arguments may be logged. Do not pass plaintext secrets as arguments. Retrieve secrets from a Glue Connection, Secrets Manager or other secret management mechanism if you intend to keep them within the Job.
For information about how to specify and consume your own Job arguments, see the Calling Glue APIs in Python topic in the developer guide.
For information about the key-value pairs that Glue consumes to set up your job, see the Special Parameters Used by Glue topic in the developer guide.
" } }, "GenericString": { @@ -3600,14 +3600,14 @@ "CreateDevEndpointResponse$GlueVersion": "Glue version determines the versions of Apache Spark and Python that Glue supports. The Python version indicates the version supported for running your ETL scripts on development endpoints.
For more information about the available Glue versions and corresponding Spark and Python versions, see Glue version in the developer guide.
", "CreateJobRequest$GlueVersion": "Glue version determines the versions of Apache Spark and Python that Glue supports. The Python version indicates the version supported for jobs of type Spark.
For more information about the available Glue versions and corresponding Spark and Python versions, see Glue version in the developer guide.
Jobs that are created without specifying a Glue version default to Glue 0.9.
", "CreateMLTransformRequest$GlueVersion": "This value determines which version of Glue this machine learning transform is compatible with. Glue 1.0 is recommended for most customers. If the value is not set, the Glue compatibility defaults to Glue 0.9. For more information, see Glue Versions in the developer guide.
", - "CreateSessionRequest$GlueVersion": "The Glue version determines the versions of Apache Spark and Python that AWS Glue supports. The GlueVersion must be greater than 2.0.
", + "CreateSessionRequest$GlueVersion": "The Glue version determines the versions of Apache Spark and Python that Glue supports. The GlueVersion must be greater than 2.0.
", "DevEndpoint$GlueVersion": "Glue version determines the versions of Apache Spark and Python that Glue supports. The Python version indicates the version supported for running your ETL scripts on development endpoints.
For more information about the available Glue versions and corresponding Spark and Python versions, see Glue version in the developer guide.
Development endpoints that are created without specifying a Glue version default to Glue 0.9.
You can specify a version of Python support for development endpoints by using the Arguments
parameter in the CreateDevEndpoint
or UpdateDevEndpoint
APIs. If no arguments are provided, the version defaults to Python 2.
This value determines which version of Glue this machine learning transform is compatible with. Glue 1.0 is recommended for most customers. If the value is not set, the Glue compatibility defaults to Glue 0.9. For more information, see Glue Versions in the developer guide.
", "Job$GlueVersion": "Glue version determines the versions of Apache Spark and Python that Glue supports. The Python version indicates the version supported for jobs of type Spark.
For more information about the available Glue versions and corresponding Spark and Python versions, see Glue version in the developer guide.
Jobs that are created without specifying a Glue version default to Glue 0.9.
", "JobRun$GlueVersion": "Glue version determines the versions of Apache Spark and Python that Glue supports. The Python version indicates the version supported for jobs of type Spark.
For more information about the available Glue versions and corresponding Spark and Python versions, see Glue version in the developer guide.
Jobs that are created without specifying a Glue version default to Glue 0.9.
", "JobUpdate$GlueVersion": "Glue version determines the versions of Apache Spark and Python that Glue supports. The Python version indicates the version supported for jobs of type Spark.
For more information about the available Glue versions and corresponding Spark and Python versions, see Glue version in the developer guide.
", "MLTransform$GlueVersion": "This value determines which version of Glue this machine learning transform is compatible with. Glue 1.0 is recommended for most customers. If the value is not set, the Glue compatibility defaults to Glue 0.9. For more information, see Glue Versions in the developer guide.
", - "Session$GlueVersion": "The Glue version determines the versions of Apache Spark and Python that AWS Glue supports. The GlueVersion must be greater than 2.0.
", + "Session$GlueVersion": "The Glue version determines the versions of Apache Spark and Python that Glue supports. The GlueVersion must be greater than 2.0.
", "TransformFilterCriteria$GlueVersion": "This value determines which version of Glue this machine learning transform is compatible with. Glue 1.0 is recommended for most customers. If the value is not set, the Glue compatibility defaults to Glue 0.9. For more information, see Glue Versions in the developer guide.
", "UpdateMLTransformRequest$GlueVersion": "This value determines which version of Glue this machine learning transform is compatible with. Glue 1.0 is recommended for most customers. If the value is not set, the Glue compatibility defaults to Glue 0.9. For more information, see Glue Versions in the developer guide.
" } @@ -3713,7 +3713,7 @@ } }, "IllegalBlueprintStateException": { - "base": null, + "base": "The blueprint is in an invalid state to perform a requested operation.
", "refs": { } }, @@ -3764,18 +3764,18 @@ "CreateDevEndpointRequest$NumberOfNodes": "The number of Glue Data Processing Units (DPUs) to allocate to this DevEndpoint
.
The Apache Zeppelin port for the remote Apache Spark interpreter.
", "CreateDevEndpointResponse$NumberOfNodes": "The number of Glue Data Processing Units (DPUs) allocated to this DevEndpoint.
", - "CreateJobRequest$AllocatedCapacity": "This parameter is deprecated. Use MaxCapacity
instead.
The number of Glue data processing units (DPUs) to allocate to this Job. You can allocate from 2 to 100 DPUs; the default is 10. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the Glue pricing page.
", + "CreateJobRequest$AllocatedCapacity": "This parameter is deprecated. Use MaxCapacity
instead.
The number of Glue data processing units (DPUs) to allocate to this Job. You can allocate a minimum of 2 DPUs; the default is 10. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the Glue pricing page.
", "DevEndpoint$ZeppelinRemoteSparkInterpreterPort": "The Apache Zeppelin port for the remote Apache Spark interpreter.
", "DevEndpoint$NumberOfNodes": "The number of Glue Data Processing Units (DPUs) allocated to this DevEndpoint
.
The Id of the statement.
", - "Job$AllocatedCapacity": "This field is deprecated. Use MaxCapacity
instead.
The number of Glue data processing units (DPUs) allocated to runs of this job. You can allocate from 2 to 100 DPUs; the default is 10. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the Glue pricing page.
", + "Job$AllocatedCapacity": "This field is deprecated. Use MaxCapacity
instead.
The number of Glue data processing units (DPUs) allocated to runs of this job. You can allocate a minimum of 2 DPUs; the default is 10. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the Glue pricing page.
", "JobBookmarkEntry$Version": "The version of the job.
", "JobBookmarkEntry$Run": "The run ID number.
", "JobBookmarkEntry$Attempt": "The attempt ID number.
", "JobRun$AllocatedCapacity": "This field is deprecated. Use MaxCapacity
instead.
The number of Glue data processing units (DPUs) allocated to this JobRun. From 2 to 100 DPUs can be allocated; the default is 10. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the Glue pricing page.
", - "JobUpdate$AllocatedCapacity": "This field is deprecated. Use MaxCapacity
instead.
The number of Glue data processing units (DPUs) to allocate to this job. You can allocate from 2 to 100 DPUs; the default is 10. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the Glue pricing page.
", + "JobUpdate$AllocatedCapacity": "This field is deprecated. Use MaxCapacity
instead.
The number of Glue data processing units (DPUs) to allocate to this job. You can allocate a minimum of 2 DPUs; the default is 10. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the Glue pricing page.
", "RunStatementResponse$Id": "Returns the Id of the statement that was run.
", - "StartJobRunRequest$AllocatedCapacity": "This field is deprecated. Use MaxCapacity
instead.
The number of Glue data processing units (DPUs) to allocate to this JobRun. From 2 to 100 DPUs can be allocated; the default is 10. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the Glue pricing page.
", + "StartJobRunRequest$AllocatedCapacity": "This field is deprecated. Use MaxCapacity
instead.
The number of Glue data processing units (DPUs) to allocate to this JobRun. You can allocate a minimum of 2 DPUs; the default is 10. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the Glue pricing page.
", "Statement$Id": "The ID of the statement.
", "StatementOutput$ExecutionCount": "The execution count of the output.
", "WorkflowRunStatistics$TotalActions": "Total number of Actions in the workflow run.
", @@ -3936,7 +3936,7 @@ "JobUpdate": { "base": "Specifies information used to update an existing job definition. The previous job definition is completely overwritten by this information.
", "refs": { - "UpdateJobRequest$JobUpdate": "Specifies the values with which to update the job definition.
" + "UpdateJobRequest$JobUpdate": "Specifies the values with which to update the job definition. Unspecified configuration is removed or reset to default values.
" } }, "Join": { @@ -4448,7 +4448,7 @@ "EntityNotFoundException$Message": "A message describing the problem.
", "GlueEncryptionException$Message": "The message describing the problem.
", "IdempotentParameterMismatchException$Message": "A message describing the problem.
", - "IllegalBlueprintStateException$Message": null, + "IllegalBlueprintStateException$Message": "A message describing the problem.
", "IllegalSessionStateException$Message": "A message describing the problem.
", "IllegalWorkflowStateException$Message": "A message describing the problem.
", "InternalServiceException$Message": "A message describing the problem.
", @@ -4772,7 +4772,7 @@ "SerDeInfo$SerializationLibrary": "Usually the class that implements the SerDe. An example is org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
.
The ID of the session.
", "Session$SecurityConfiguration": "The name of the SecurityConfiguration structure to be used with the session.
", - "SessionCommand$Name": "Specifies the name of the SessionCommand.Can be 'glueetl' or 'gluestreaming'.
", + "SessionCommand$Name": "Specifies the name of the SessionCommand. Can be 'glueetl' or 'gluestreaming'.
", "SessionIdList$member": null, "StartCrawlerRequest$Name": "Name of the crawler to start.
", "StartCrawlerScheduleRequest$CrawlerName": "Name of the crawler to schedule.
", @@ -5060,18 +5060,18 @@ "NullableDouble": { "base": null, "refs": { - "CreateJobRequest$MaxCapacity": "For Glue version 1.0 or earlier jobs, using the standard worker type, the number of Glue data processing units (DPUs) that can be allocated when this job runs. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the Glue pricing page.
Do not set Max Capacity
if using WorkerType
and NumberOfWorkers
.
The value that can be allocated for MaxCapacity
depends on whether you are running a Python shell job or an Apache Spark ETL job:
When you specify a Python shell job (JobCommand.Name
=\"pythonshell\"), you can allocate either 0.0625 or 1 DPU. The default is 0.0625 DPU.
When you specify an Apache Spark ETL job (JobCommand.Name
=\"glueetl\") or Apache Spark streaming ETL job (JobCommand.Name
=\"gluestreaming\"), you can allocate from 2 to 100 DPUs. The default is 10 DPUs. This job type cannot have a fractional DPU allocation.
For Glue version 2.0 jobs, you cannot instead specify a Maximum capacity
. Instead, you should specify a Worker type
and the Number of workers
.
For Glue version 1.0 or earlier jobs, using the standard worker type, the number of Glue data processing units (DPUs) that can be allocated when this job runs. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the Glue pricing page.
Do not set Max Capacity
if using WorkerType
and NumberOfWorkers
.
The value that can be allocated for MaxCapacity
depends on whether you are running a Python shell job or an Apache Spark ETL job:
When you specify a Python shell job (JobCommand.Name
=\"pythonshell\"), you can allocate either 0.0625 or 1 DPU. The default is 0.0625 DPU.
When you specify an Apache Spark ETL job (JobCommand.Name
=\"glueetl\") or Apache Spark streaming ETL job (JobCommand.Name
=\"gluestreaming\"), you can allocate a minimum of 2 DPUs. The default is 10 DPUs. This job type cannot have a fractional DPU allocation.
For Glue version 2.0 jobs, you cannot instead specify a Maximum capacity
. Instead, you should specify a Worker type
and the Number of workers
.
The number of Glue data processing units (DPUs) that are allocated to task runs for this transform. You can allocate from 2 to 100 DPUs; the default is 10. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the Glue pricing page.
MaxCapacity
is a mutually exclusive option with NumberOfWorkers
and WorkerType
.
If either NumberOfWorkers
or WorkerType
is set, then MaxCapacity
cannot be set.
If MaxCapacity
is set then neither NumberOfWorkers
or WorkerType
can be set.
If WorkerType
is set, then NumberOfWorkers
is required (and vice versa).
MaxCapacity
and NumberOfWorkers
must both be at least 1.
When the WorkerType
field is set to a value other than Standard
, the MaxCapacity
field is set automatically and becomes read-only.
When the WorkerType
field is set to a value other than Standard
, the MaxCapacity
field is set automatically and becomes read-only.
The number of AWS Glue data processing units (DPUs) that can be allocated when the job runs. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB memory.
", + "CreateSessionRequest$MaxCapacity": "The number of Glue data processing units (DPUs) that can be allocated when the job runs. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB memory.
", "DynamoDBTarget$scanRate": "The percentage of the configured read capacity units to use by the Glue crawler. Read capacity units is a term defined by DynamoDB, and is a numeric value that acts as rate limiter for the number of reads that can be performed on that table per second.
The valid values are null or a value between 0.1 to 1.5. A null value is used when user does not provide a value, and defaults to 0.5 of the configured Read Capacity Unit (for provisioned tables), or 0.25 of the max configured Read Capacity Unit (for tables using on-demand mode).
", "GetMLTransformResponse$MaxCapacity": "The number of Glue data processing units (DPUs) that are allocated to task runs for this transform. You can allocate from 2 to 100 DPUs; the default is 10. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the Glue pricing page.
When the WorkerType
field is set to a value other than Standard
, the MaxCapacity
field is set automatically and becomes read-only.
For Glue version 1.0 or earlier jobs, using the standard worker type, the number of Glue data processing units (DPUs) that can be allocated when this job runs. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the Glue pricing page.
Do not set Max Capacity
if using WorkerType
and NumberOfWorkers
.
The value that can be allocated for MaxCapacity
depends on whether you are running a Python shell job, an Apache Spark ETL job, or an Apache Spark streaming ETL job:
When you specify a Python shell job (JobCommand.Name
=\"pythonshell\"), you can allocate either 0.0625 or 1 DPU. The default is 0.0625 DPU.
When you specify an Apache Spark ETL job (JobCommand.Name
=\"glueetl\") or Apache Spark streaming ETL job (JobCommand.Name
=\"gluestreaming\"), you can allocate from 2 to 100 DPUs. The default is 10 DPUs. This job type cannot have a fractional DPU allocation.
For Glue version 2.0 jobs, you cannot instead specify a Maximum capacity
. Instead, you should specify a Worker type
and the Number of workers
.
The number of Glue data processing units (DPUs) that can be allocated when this job runs. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the Glue pricing page.
Do not set Max Capacity
if using WorkerType
and NumberOfWorkers
.
The value that can be allocated for MaxCapacity
depends on whether you are running a Python shell job or an Apache Spark ETL job:
When you specify a Python shell job (JobCommand.Name
=\"pythonshell\"), you can allocate either 0.0625 or 1 DPU. The default is 0.0625 DPU.
When you specify an Apache Spark ETL job (JobCommand.Name
=\"glueetl\"), you can allocate from 2 to 100 DPUs. The default is 10 DPUs. This job type cannot have a fractional DPU allocation.
For Glue version 1.0 or earlier jobs, using the standard worker type, the number of Glue data processing units (DPUs) that can be allocated when this job runs. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the Glue pricing page.
Do not set Max Capacity
if using WorkerType
and NumberOfWorkers
.
The value that can be allocated for MaxCapacity
depends on whether you are running a Python shell job, an Apache Spark ETL job, or an Apache Spark streaming ETL job:
When you specify a Python shell job (JobCommand.Name
=\"pythonshell\"), you can allocate either 0.0625 or 1 DPU. The default is 0.0625 DPU.
When you specify an Apache Spark ETL job (JobCommand.Name
=\"glueetl\") or Apache Spark streaming ETL job (JobCommand.Name
=\"gluestreaming\"), you can allocate a minimum of 2 DPUs. The default is 10 DPUs. This job type cannot have a fractional DPU allocation.
For Glue version 2.0 jobs, you cannot instead specify a Maximum capacity
. Instead, you should specify a Worker type
and the Number of workers
.
The number of Glue data processing units (DPUs) that can be allocated when this job runs. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the Glue pricing page.
Do not set Max Capacity
if using WorkerType
and NumberOfWorkers
.
The value that can be allocated for MaxCapacity
depends on whether you are running a Python shell job or an Apache Spark ETL job:
When you specify a Python shell job (JobCommand.Name
=\"pythonshell\"), you can allocate either 0.0625 or 1 DPU. The default is 0.0625 DPU.
When you specify an Apache Spark ETL job (JobCommand.Name
=\"glueetl\"), you can allocate a minimum of 2 DPUs. The default is 10 DPUs. This job type cannot have a fractional DPU allocation.
This field populates only when an Auto Scaling job run completes, and represents the total time each executor ran during the lifecycle of a job run in seconds, multiplied by a DPU factor (1 for G.1X
and 2 for G.2X
workers). This value may be different than the executionEngineRuntime
* MaxCapacity
as in the case of Auto Scaling jobs, as the number of executors running at a given time may be less than the MaxCapacity
. Therefore, it is possible that the value of DPUSeconds
is less than executionEngineRuntime
* MaxCapacity
.
For Glue version 1.0 or earlier jobs, using the standard worker type, the number of Glue data processing units (DPUs) that can be allocated when this job runs. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the Glue pricing page.
Do not set Max Capacity
if using WorkerType
and NumberOfWorkers
.
The value that can be allocated for MaxCapacity
depends on whether you are running a Python shell job or an Apache Spark ETL job:
When you specify a Python shell job (JobCommand.Name
=\"pythonshell\"), you can allocate either 0.0625 or 1 DPU. The default is 0.0625 DPU.
When you specify an Apache Spark ETL job (JobCommand.Name
=\"glueetl\") or Apache Spark streaming ETL job (JobCommand.Name
=\"gluestreaming\"), you can allocate from 2 to 100 DPUs. The default is 10 DPUs. This job type cannot have a fractional DPU allocation.
For Glue version 2.0 jobs, you cannot instead specify a Maximum capacity
. Instead, you should specify a Worker type
and the Number of workers
.
For Glue version 1.0 or earlier jobs, using the standard worker type, the number of Glue data processing units (DPUs) that can be allocated when this job runs. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the Glue pricing page.
Do not set Max Capacity
if using WorkerType
and NumberOfWorkers
.
The value that can be allocated for MaxCapacity
depends on whether you are running a Python shell job or an Apache Spark ETL job:
When you specify a Python shell job (JobCommand.Name
=\"pythonshell\"), you can allocate either 0.0625 or 1 DPU. The default is 0.0625 DPU.
When you specify an Apache Spark ETL job (JobCommand.Name
=\"glueetl\") or Apache Spark streaming ETL job (JobCommand.Name
=\"gluestreaming\"), you can allocate a minimum of 2 DPUs. The default is 10 DPUs. This job type cannot have a fractional DPU allocation.
For Glue version 2.0 jobs, you cannot instead specify a Maximum capacity
. Instead, you should specify a Worker type
and the Number of workers
.
The number of Glue data processing units (DPUs) that are allocated to task runs for this transform. You can allocate from 2 to 100 DPUs; the default is 10. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the Glue pricing page.
MaxCapacity
is a mutually exclusive option with NumberOfWorkers
and WorkerType
.
If either NumberOfWorkers
or WorkerType
is set, then MaxCapacity
cannot be set.
If MaxCapacity
is set then neither NumberOfWorkers
or WorkerType
can be set.
If WorkerType
is set, then NumberOfWorkers
is required (and vice versa).
MaxCapacity
and NumberOfWorkers
must both be at least 1.
When the WorkerType
field is set to a value other than Standard
, the MaxCapacity
field is set automatically and becomes read-only.
The number of AWS Glue data processing units (DPUs) that can be allocated when the job runs. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB memory.
", - "StartJobRunRequest$MaxCapacity": "The number of Glue data processing units (DPUs) that can be allocated when this job runs. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the Glue pricing page.
Do not set Max Capacity
if using WorkerType
and NumberOfWorkers
.
The value that can be allocated for MaxCapacity
depends on whether you are running a Python shell job, or an Apache Spark ETL job:
When you specify a Python shell job (JobCommand.Name
=\"pythonshell\"), you can allocate either 0.0625 or 1 DPU. The default is 0.0625 DPU.
When you specify an Apache Spark ETL job (JobCommand.Name
=\"glueetl\"), you can allocate from 2 to 100 DPUs. The default is 10 DPUs. This job type cannot have a fractional DPU allocation.
The number of Glue data processing units (DPUs) that can be allocated when the job runs. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB memory.
", + "StartJobRunRequest$MaxCapacity": "The number of Glue data processing units (DPUs) that can be allocated when this job runs. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the Glue pricing page.
Do not set Max Capacity
if using WorkerType
and NumberOfWorkers
.
The value that can be allocated for MaxCapacity
depends on whether you are running a Python shell job, or an Apache Spark ETL job:
When you specify a Python shell job (JobCommand.Name
=\"pythonshell\"), you can allocate either 0.0625 or 1 DPU. The default is 0.0625 DPU.
When you specify an Apache Spark ETL job (JobCommand.Name
=\"glueetl\"), you can allocate a minimum of 2 DPUs. The default is 10 DPUs. This job type cannot have a fractional DPU allocation.
The number of Glue data processing units (DPUs) that are allocated to task runs for this transform. You can allocate from 2 to 100 DPUs; the default is 10. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the Glue pricing page.
When the WorkerType
field is set to a value other than Standard
, the MaxCapacity
field is set automatically and becomes read-only.
The number of workers of a defined workerType
that are allocated to the development endpoint.
The maximum number of workers you can define are 299 for G.1X
, and 149 for G.2X
.
The number of workers of a defined workerType
that are allocated to the development endpoint.
The number of workers of a defined workerType
that are allocated when a job runs.
The maximum number of workers you can define are 299 for G.1X
, and 149 for G.2X
.
The number of workers of a defined workerType
that are allocated when a job runs.
The number of workers of a defined workerType
that are allocated when this task runs.
If WorkerType
is set, then NumberOfWorkers
is required (and vice versa).
The maximum number of times to retry a task for this transform after a task run fails.
", - "CreateSessionRequest$NumberOfWorkers": "The number of workers to use for the session.
", + "CreateSessionRequest$NumberOfWorkers": "The number of workers of a defined WorkerType
to use for the session.
You can use this parameter to prevent unwanted multiple updates to data, to control costs, or in some cases, to prevent exceeding the maximum number of concurrent runs of any of the component jobs. If you leave this parameter blank, there is no limit to the number of concurrent workflow runs.
", "DevEndpoint$NumberOfWorkers": "The number of workers of a defined workerType
that are allocated to the development endpoint.
The maximum number of workers you can define are 299 for G.1X
, and 149 for G.2X
.
The number of workers of a defined workerType
that are allocated when this task runs.
The maximum number of times to retry a task for this transform after a task run fails.
", - "Job$NumberOfWorkers": "The number of workers of a defined workerType
that are allocated when a job runs.
The maximum number of workers you can define are 299 for G.1X
, and 149 for G.2X
.
The number of workers of a defined workerType
that are allocated when a job runs.
The maximum number of workers you can define are 299 for G.1X
, and 149 for G.2X
.
The number of workers of a defined workerType
that are allocated when a job runs.
The maximum number of workers you can define are 299 for G.1X
, and 149 for G.2X
.
The number of workers of a defined workerType
that are allocated when a job runs.
The number of workers of a defined workerType
that are allocated when a job runs.
The number of workers of a defined workerType
that are allocated when a job runs.
The number of workers of a defined workerType
that are allocated when a task of the transform runs.
If WorkerType
is set, then NumberOfWorkers
is required (and vice versa).
The maximum number of times to retry after an MLTaskRun
of the machine learning transform fails.
Sets the number of files in each leaf folder to be crawled when crawling sample files in a dataset. If not set, all the files are crawled. A valid value is an integer between 1 and 249.
", - "StartJobRunRequest$NumberOfWorkers": "The number of workers of a defined workerType
that are allocated when a job runs.
The maximum number of workers you can define are 299 for G.1X
, and 149 for G.2X
.
The number of workers of a defined workerType
that are allocated when a job runs.
Number of events in the batch.
", "StartingEventBatchCondition$BatchWindow": "Duration of the batch window in seconds.
", "UpdateMLTransformRequest$NumberOfWorkers": "The number of workers of a defined workerType
that are allocated when this task runs.
The token for the next set of results, or null if there are no more result.
", "ListSessionsResponse$NextToken": "The token for the next set of results, or null if there are no more result.
", - "ListStatementsRequest$NextToken": null, - "ListStatementsResponse$NextToken": null + "ListStatementsRequest$NextToken": "A continuation token, if this is a continuation call.
", + "ListStatementsResponse$NextToken": "A continuation token, if not all statements have yet been returned.
" } }, "Order": { @@ -6287,7 +6287,7 @@ "SessionIdList": { "base": null, "refs": { - "ListSessionsResponse$Ids": "Returns the Id of the session.
" + "ListSessionsResponse$Ids": "Returns the ID of the session.
" } }, "SessionList": { @@ -7518,16 +7518,16 @@ "refs": { "CreateDevEndpointRequest$WorkerType": "The type of predefined worker that is allocated to the development endpoint. Accepts a value of Standard, G.1X, or G.2X.
For the Standard
worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.
For the G.1X
worker type, each worker maps to 1 DPU (4 vCPU, 16 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.
For the G.2X
worker type, each worker maps to 2 DPU (8 vCPU, 32 GB of memory, 128 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.
Known issue: when a development endpoint is created with the G.2X
WorkerType
configuration, the Spark drivers for the development endpoint will run on 4 vCPU, 16 GB of memory, and a 64 GB disk.
The type of predefined worker that is allocated to the development endpoint. May be a value of Standard, G.1X, or G.2X.
", - "CreateJobRequest$WorkerType": "The type of predefined worker that is allocated when a job runs. Accepts a value of Standard, G.1X, or G.2X.
For the Standard
worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.
For the G.1X
worker type, each worker maps to 1 DPU (4 vCPU, 16 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.
For the G.2X
worker type, each worker maps to 2 DPU (8 vCPU, 32 GB of memory, 128 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.
The type of predefined worker that is allocated when a job runs. Accepts a value of Standard, G.1X, G.2X, or G.025X.
For the Standard
worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.
For the G.1X
worker type, each worker maps to 1 DPU (4 vCPU, 16 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.
For the G.2X
worker type, each worker maps to 2 DPU (8 vCPU, 32 GB of memory, 128 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.
For the G.025X
worker type, each worker maps to 0.25 DPU (2 vCPU, 4 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs.
The type of predefined worker that is allocated when this task runs. Accepts a value of Standard, G.1X, or G.2X.
For the Standard
worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.
For the G.1X
worker type, each worker provides 4 vCPU, 16 GB of memory and a 64GB disk, and 1 executor per worker.
For the G.2X
worker type, each worker provides 8 vCPU, 32 GB of memory and a 128GB disk, and 1 executor per worker.
MaxCapacity
is a mutually exclusive option with NumberOfWorkers
and WorkerType
.
If either NumberOfWorkers
or WorkerType
is set, then MaxCapacity
cannot be set.
If MaxCapacity
is set then neither NumberOfWorkers
or WorkerType
can be set.
If WorkerType
is set, then NumberOfWorkers
is required (and vice versa).
MaxCapacity
and NumberOfWorkers
must both be at least 1.
The Worker Type. Can be one of G.1X, G.2X, Standard
", + "CreateSessionRequest$WorkerType": "The type of predefined worker that is allocated to use for the session. Accepts a value of Standard, G.1X, G.2X, or G.025X.
For the Standard
worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.
For the G.1X
worker type, each worker maps to 1 DPU (4 vCPU, 16 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.
For the G.2X
worker type, each worker maps to 2 DPU (8 vCPU, 32 GB of memory, 128 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.
For the G.025X
worker type, each worker maps to 0.25 DPU (2 vCPU, 4 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs.
The type of predefined worker that is allocated to the development endpoint. Accepts a value of Standard, G.1X, or G.2X.
For the Standard
worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.
For the G.1X
worker type, each worker maps to 1 DPU (4 vCPU, 16 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.
For the G.2X
worker type, each worker maps to 2 DPU (8 vCPU, 32 GB of memory, 128 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.
Known issue: when a development endpoint is created with the G.2X
WorkerType
configuration, the Spark drivers for the development endpoint will run on 4 vCPU, 16 GB of memory, and a 64 GB disk.
The type of predefined worker that is allocated when this task runs. Accepts a value of Standard, G.1X, or G.2X.
For the Standard
worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.
For the G.1X
worker type, each worker provides 4 vCPU, 16 GB of memory and a 64GB disk, and 1 executor per worker.
For the G.2X
worker type, each worker provides 8 vCPU, 32 GB of memory and a 128GB disk, and 1 executor per worker.
The type of predefined worker that is allocated when a job runs. Accepts a value of Standard, G.1X, or G.2X.
For the Standard
worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.
For the G.1X
worker type, each worker maps to 1 DPU (4 vCPU, 16 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.
For the G.2X
worker type, each worker maps to 2 DPU (8 vCPU, 32 GB of memory, 128 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.
The type of predefined worker that is allocated when a job runs. Accepts a value of Standard, G.1X, or G.2X.
For the Standard
worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.
For the G.1X
worker type, each worker provides 4 vCPU, 16 GB of memory and a 64GB disk, and 1 executor per worker.
For the G.2X
worker type, each worker provides 8 vCPU, 32 GB of memory and a 128GB disk, and 1 executor per worker.
The type of predefined worker that is allocated when a job runs. Accepts a value of Standard, G.1X, or G.2X.
For the Standard
worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.
For the G.1X
worker type, each worker maps to 1 DPU (4 vCPU, 16 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.
For the G.2X
worker type, each worker maps to 2 DPU (8 vCPU, 32 GB of memory, 128 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.
The type of predefined worker that is allocated when a job runs. Accepts a value of Standard, G.1X, G.2X, or G.025X.
For the Standard
worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.
For the G.1X
worker type, each worker maps to 1 DPU (4 vCPU, 16 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.
For the G.2X
worker type, each worker maps to 2 DPU (8 vCPU, 32 GB of memory, 128 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.
For the G.025X
worker type, each worker maps to 0.25 DPU (2 vCPU, 4 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs.
The type of predefined worker that is allocated when a job runs. Accepts a value of Standard, G.1X, G.2X, or G.025X.
For the Standard
worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.
For the G.1X
worker type, each worker provides 4 vCPU, 16 GB of memory and a 64GB disk, and 1 executor per worker.
For the G.2X
worker type, each worker provides 8 vCPU, 32 GB of memory and a 128GB disk, and 1 executor per worker.
For the G.025X
worker type, each worker maps to 0.25 DPU (2 vCPU, 4 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs.
The type of predefined worker that is allocated when a job runs. Accepts a value of Standard, G.1X, G.2X, or G.025X.
For the Standard
worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.
For the G.1X
worker type, each worker maps to 1 DPU (4 vCPU, 16 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.
For the G.2X
worker type, each worker maps to 2 DPU (8 vCPU, 32 GB of memory, 128 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.
For the G.025X
worker type, each worker maps to 0.25 DPU (2 vCPU, 4 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs.
The type of predefined worker that is allocated when a task of this transform runs. Accepts a value of Standard, G.1X, or G.2X.
For the Standard
worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.
For the G.1X
worker type, each worker provides 4 vCPU, 16 GB of memory and a 64GB disk, and 1 executor per worker.
For the G.2X
worker type, each worker provides 8 vCPU, 32 GB of memory and a 128GB disk, and 1 executor per worker.
MaxCapacity
is a mutually exclusive option with NumberOfWorkers
and WorkerType
.
If either NumberOfWorkers
or WorkerType
is set, then MaxCapacity
cannot be set.
If MaxCapacity
is set then neither NumberOfWorkers
or WorkerType
can be set.
If WorkerType
is set, then NumberOfWorkers
is required (and vice versa).
MaxCapacity
and NumberOfWorkers
must both be at least 1.
The type of predefined worker that is allocated when a job runs. Accepts a value of Standard, G.1X, or G.2X.
For the Standard
worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.
For the G.1X
worker type, each worker provides 4 vCPU, 16 GB of memory and a 64GB disk, and 1 executor per worker.
For the G.2X
worker type, each worker provides 8 vCPU, 32 GB of memory and a 128GB disk, and 1 executor per worker.
The type of predefined worker that is allocated when a job runs. Accepts a value of Standard, G.1X, G.2X, or G.025X.
For the Standard
worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.
For the G.1X
worker type, each worker provides 4 vCPU, 16 GB of memory and a 64GB disk, and 1 executor per worker.
For the G.2X
worker type, each worker provides 8 vCPU, 32 GB of memory and a 128GB disk, and 1 executor per worker.
For the G.025X
worker type, each worker maps to 0.25 DPU (2 vCPU, 4 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs.
The type of predefined worker that is allocated when this task runs. Accepts a value of Standard, G.1X, or G.2X.
For the Standard
worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.
For the G.1X
worker type, each worker provides 4 vCPU, 16 GB of memory and a 64GB disk, and 1 executor per worker.
For the G.2X
worker type, each worker provides 8 vCPU, 32 GB of memory and a 128GB disk, and 1 executor per worker.
Disassociates a member account from an Amazon Inspector delegated administrator.
", "Enable": "Enables Amazon Inspector scans for one or more Amazon Web Services accounts.
", "EnableDelegatedAdminAccount": "Enables the Amazon Inspector delegated administrator for your Organizations organization.
", + "GetConfiguration": "Retrieves setting configurations for Inspector scans.
", "GetDelegatedAdminAccount": "Retrieves information about the Amazon Inspector delegated administrator for your organization.
", "GetFindingsReportStatus": "Gets the status of a findings report.
", "GetMember": "Gets member information for your organization.
", @@ -30,6 +31,7 @@ "ListUsageTotals": "Lists the Amazon Inspector usage totals over the last 30 days.
", "TagResource": "Adds tags to a resource.
", "UntagResource": "Removes tags from a resource.
", + "UpdateConfiguration": "Updates setting configurations for your Amazon Inspector account. When you use this API as an Amazon Inspector delegated administrator this updates the setting for all accounts you manage. Member accounts in an organization cannot update this setting.
", "UpdateFilter": "Specifies the action that is to be applied to the findings that match the filter.
", "UpdateOrganizationConfiguration": "Updates the configurations for your Amazon Inspector organization.
" }, @@ -386,7 +388,7 @@ "CoverageFilterCriteria$ecrImageTags": "The Amazon ECR image tags to filter on.
", "CoverageFilterCriteria$ecrRepositoryName": "The Amazon ECR repository name to filter on.
", "CoverageFilterCriteria$resourceId": "An array of Amazon Web Services resource IDs to return coverage statistics for.
", - "CoverageFilterCriteria$resourceType": "An array of Amazon Web Services resource types to return coverage statistics for.
", + "CoverageFilterCriteria$resourceType": "An array of Amazon Web Services resource types to return coverage statistics for. The values can be AWS_EC2_INSTANCE
or AWS_ECR_REPOSITORY
.
The scan status code to filter on.
", "CoverageFilterCriteria$scanStatusReason": "The scan status reason to filter on.
", "CoverageFilterCriteria$scanType": "An array of Amazon Inspector scan types to return coverage statistics for.
" @@ -486,6 +488,7 @@ "refs": { "AwsEc2InstanceDetails$launchedAt": "The date and time the Amazon EC2 instance was launched at.
", "AwsEcrContainerImageDetails$pushedAt": "The date and time the Amazon ECR container image was pushed.
", + "EcrRescanDurationState$updatedAt": "A timestamp representing when the last time the ECR scan duration setting was changed.
", "Filter$createdAt": "The date and time this filter was created at.
", "Filter$updatedAt": "The date and time the filter was last updated at.
", "Finding$firstObservedAt": "The date and time that the finding was first observed.
", @@ -623,6 +626,18 @@ "Ec2Metadata$platform": "The platform of the instance.
" } }, + "EcrConfiguration": { + "base": "Details about the ECR automated re-scan duration setting for your environment
", + "refs": { + "UpdateConfigurationRequest$ecrConfiguration": "Specifies how the ECR automated re-scan will be updated for your environment.
" + } + }, + "EcrConfigurationState": { + "base": "Details about the state of the ECR scans for your environment.
", + "refs": { + "GetConfigurationResponse$ecrConfiguration": "Specifies how the ECR automated re-scan duration is currently configured for your environment.
" + } + }, "EcrContainerImageMetadata": { "base": "Information on the Amazon ECR image metadata associated with a finding.
", "refs": { @@ -635,6 +650,25 @@ "ResourceScanMetadata$ecrRepository": "An object that contains details about the repository an Amazon ECR image resides in.
" } }, + "EcrRescanDuration": { + "base": null, + "refs": { + "EcrConfiguration$rescanDuration": "The ECR automated re-scan duration defines how long an ECR image will be actively scanned by Amazon Inspector. When the number of days since an image was last pushed exceeds the automated re-scan duration the monitoring state of that image becomes inactive
and all associated findings are scheduled for closure.
The ECR automated re-scan duration defines how long an ECR image will be actively scanned by Amazon Inspector. When the number of days since an image was last pushed exceeds the automated re-scan duration the monitoring state of that image becomes inactive
and all associated findings are scheduled for closure.
Details about the state of any changes to the ECR automated re-scan duration setting.
", + "refs": { + "EcrConfigurationState$rescanDurationState": "An object that contains details about the state of the ECR automated re-scan setting.
" + } + }, + "EcrRescanDurationStatus": { + "base": null, + "refs": { + "EcrRescanDurationState$status": "The status of changes to the ECR automated re-scan duration.
" + } + }, "EcrScanFrequency": { "base": null, "refs": { @@ -775,7 +809,9 @@ "FilterReason": { "base": null, "refs": { - "Filter$reason": "The reason for the filter.
" + "CreateFilterRequest$reason": "The reason for creating the filter.
", + "Filter$reason": "The reason for the filter.
", + "UpdateFilterRequest$reason": "The reason the filter was updated.
" } }, "Finding": { @@ -892,6 +928,16 @@ "FreeTrialInfo$type": "The type of scan covered by the Amazon Inspector free trail.
" } }, + "GetConfigurationRequest": { + "base": null, + "refs": { + } + }, + "GetConfigurationResponse": { + "base": null, + "refs": { + } + }, "GetDelegatedAdminAccountRequest": { "base": null, "refs": { @@ -1890,6 +1936,16 @@ "refs": { } }, + "UpdateConfigurationRequest": { + "base": null, + "refs": { + } + }, + "UpdateConfigurationResponse": { + "base": null, + "refs": { + } + }, "UpdateFilterRequest": { "base": null, "refs": { diff --git a/models/apis/kendra/2019-02-03/api-2.json b/models/apis/kendra/2019-02-03/api-2.json index de8f185c222..309aa33276f 100644 --- a/models/apis/kendra/2019-02-03/api-2.json +++ b/models/apis/kendra/2019-02-03/api-2.json @@ -116,6 +116,24 @@ {"shape":"InternalServerException"} ] }, + "CreateAccessControlConfiguration":{ + "name":"CreateAccessControlConfiguration", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateAccessControlConfigurationRequest"}, + "output":{"shape":"CreateAccessControlConfigurationResponse"}, + "errors":[ + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"ConflictException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"} + ] + }, "CreateDataSource":{ "name":"CreateDataSource", "http":{ @@ -225,6 +243,23 @@ {"shape":"InternalServerException"} ] }, + "DeleteAccessControlConfiguration":{ + "name":"DeleteAccessControlConfiguration", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteAccessControlConfigurationRequest"}, + "output":{"shape":"DeleteAccessControlConfigurationResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"} + ] + }, "DeleteDataSource":{ "name":"DeleteDataSource", "http":{ @@ -338,6 +373,22 @@ {"shape":"InternalServerException"} ] }, + "DescribeAccessControlConfiguration":{ + "name":"DescribeAccessControlConfiguration", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeAccessControlConfigurationRequest"}, + "output":{"shape":"DescribeAccessControlConfigurationResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"} + ] + }, "DescribeDataSource":{ "name":"DescribeDataSource", "http":{ @@ -531,6 +582,22 @@ {"shape":"InternalServerException"} ] }, + "ListAccessControlConfigurations":{ + "name":"ListAccessControlConfigurations", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListAccessControlConfigurationsRequest"}, + "output":{"shape":"ListAccessControlConfigurationsResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"} + ] + }, "ListDataSourceSyncJobs":{ "name":"ListDataSourceSyncJobs", "http":{ @@ -824,6 +891,24 @@ {"shape":"InternalServerException"} ] }, + "UpdateAccessControlConfiguration":{ + "name":"UpdateAccessControlConfiguration", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateAccessControlConfigurationRequest"}, + "output":{"shape":"UpdateAccessControlConfigurationResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"InternalServerException"} + ] + }, "UpdateDataSource":{ "name":"UpdateDataSource", "http":{ @@ -923,6 +1008,29 @@ } }, "shapes":{ + "AccessControlConfigurationId":{ + "type":"string", + "max":36, + "min":1, + "pattern":"[a-zA-Z0-9-]+" + }, + "AccessControlConfigurationName":{ + "type":"string", + "max":200, + "min":1, + "pattern":"[\\S\\s]*" + }, + "AccessControlConfigurationSummary":{ + "type":"structure", + "required":["Id"], + "members":{ + "Id":{"shape":"AccessControlConfigurationId"} + } + }, + "AccessControlConfigurationSummaryList":{ + "type":"list", + "member":{"shape":"AccessControlConfigurationSummary"} + }, "AccessControlListConfiguration":{ "type":"structure", "members":{ @@ -1542,6 +1650,31 @@ "max":10, "min":0 }, + "CreateAccessControlConfigurationRequest":{ + "type":"structure", + "required":[ + "IndexId", + "Name" + ], + "members":{ + "IndexId":{"shape":"IndexId"}, + "Name":{"shape":"AccessControlConfigurationName"}, + "Description":{"shape":"Description"}, + "AccessControlList":{"shape":"PrincipalList"}, + "HierarchicalAccessControlList":{"shape":"HierarchicalPrincipalList"}, + "ClientToken":{ + "shape":"ClientTokenName", + "idempotencyToken":true + } + } + }, + "CreateAccessControlConfigurationResponse":{ + "type":"structure", + "required":["Id"], + "members":{ + "Id":{"shape":"AccessControlConfigurationId"} + } + }, "CreateDataSourceRequest":{ "type":"structure", "required":[ @@ -1970,6 +2103,22 @@ "max":65535, "min":1 }, + "DeleteAccessControlConfigurationRequest":{ + "type":"structure", + "required":[ + "IndexId", + "Id" + ], + "members":{ + "IndexId":{"shape":"IndexId"}, + "Id":{"shape":"AccessControlConfigurationId"} + } + }, + "DeleteAccessControlConfigurationResponse":{ + "type":"structure", + "members":{ + } + }, "DeleteDataSourceRequest":{ "type":"structure", "required":[ @@ -2050,6 +2199,28 @@ "IndexId":{"shape":"IndexId"} } }, + "DescribeAccessControlConfigurationRequest":{ + "type":"structure", + "required":[ + "IndexId", + "Id" + ], + "members":{ + "IndexId":{"shape":"IndexId"}, + "Id":{"shape":"AccessControlConfigurationId"} + } + }, + "DescribeAccessControlConfigurationResponse":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{"shape":"AccessControlConfigurationName"}, + "Description":{"shape":"Description"}, + "ErrorMessage":{"shape":"ErrorMessage"}, + "AccessControlList":{"shape":"PrincipalList"}, + "HierarchicalAccessControlList":{"shape":"HierarchicalPrincipalList"} + } + }, "DescribeDataSourceRequest":{ "type":"structure", "required":[ @@ -2323,7 +2494,8 @@ "Attributes":{"shape":"DocumentAttributeList"}, "AccessControlList":{"shape":"PrincipalList"}, "HierarchicalAccessControlList":{"shape":"HierarchicalPrincipalList"}, - "ContentType":{"shape":"ContentType"} + "ContentType":{"shape":"ContentType"}, + "AccessControlConfigurationId":{"shape":"AccessControlConfigurationId"} } }, "DocumentAttribute":{ @@ -3276,6 +3448,23 @@ "min":2, "pattern":"[a-zA-Z-]*" }, + "ListAccessControlConfigurationsRequest":{ + "type":"structure", + "required":["IndexId"], + "members":{ + "IndexId":{"shape":"IndexId"}, + "NextToken":{"shape":"String"}, + "MaxResults":{"shape":"MaxResultsIntegerForListAccessControlConfigurationsRequest"} + } + }, + "ListAccessControlConfigurationsResponse":{ + "type":"structure", + "required":["AccessControlConfigurations"], + "members":{ + "NextToken":{"shape":"String"}, + "AccessControlConfigurations":{"shape":"AccessControlConfigurationSummaryList"} + } + }, "ListDataSourceSyncJobsRequest":{ "type":"structure", "required":[ @@ -3485,6 +3674,11 @@ "max":1000, "min":1 }, + "MaxResultsIntegerForListAccessControlConfigurationsRequest":{ + "type":"integer", + "max":100, + "min":1 + }, "MaxResultsIntegerForListDataSourceSyncJobsRequest":{ "type":"integer", "max":10, @@ -4823,6 +5017,26 @@ "members":{ } }, + "UpdateAccessControlConfigurationRequest":{ + "type":"structure", + "required":[ + "IndexId", + "Id" + ], + "members":{ + "IndexId":{"shape":"IndexId"}, + "Id":{"shape":"AccessControlConfigurationId"}, + "Name":{"shape":"AccessControlConfigurationName"}, + "Description":{"shape":"Description"}, + "AccessControlList":{"shape":"PrincipalList"}, + "HierarchicalAccessControlList":{"shape":"HierarchicalPrincipalList"} + } + }, + "UpdateAccessControlConfigurationResponse":{ + "type":"structure", + "members":{ + } + }, "UpdateDataSourceRequest":{ "type":"structure", "required":[ diff --git a/models/apis/kendra/2019-02-03/docs-2.json b/models/apis/kendra/2019-02-03/docs-2.json index 17ba42d2181..3b93d2fb69e 100644 --- a/models/apis/kendra/2019-02-03/docs-2.json +++ b/models/apis/kendra/2019-02-03/docs-2.json @@ -8,12 +8,14 @@ "BatchGetDocumentStatus": "Returns the indexing status for one or more documents submitted with the BatchPutDocument API.
When you use the BatchPutDocument
API, documents are indexed asynchronously. You can use the BatchGetDocumentStatus
API to get the current status of a list of documents so that you can determine if they have been successfully indexed.
You can also use the BatchGetDocumentStatus
API to check the status of the BatchDeleteDocument API. When a document is deleted from the index, Amazon Kendra returns NOT_FOUND
as the status.
Adds one or more documents to an index.
The BatchPutDocument
API enables you to ingest inline documents or a set of documents stored in an Amazon S3 bucket. Use this API to ingest your text and unstructured text into an index, add custom attributes to the documents, and to attach an access control list to the documents added to the index.
The documents are indexed asynchronously. You can see the progress of the batch using Amazon Web Services CloudWatch. Any error messages related to processing the batch are sent to your Amazon Web Services CloudWatch log.
For an example of ingesting inline documents using Python and Java SDKs, see Adding files directly to an index.
", "ClearQuerySuggestions": "Clears existing query suggestions from an index.
This deletes existing suggestions only, not the queries in the query log. After you clear suggestions, Amazon Kendra learns new suggestions based on new queries added to the query log from the time you cleared suggestions. If you do not see any new suggestions, then please allow Amazon Kendra to collect enough queries to learn new suggestions.
ClearQuerySuggestions
is currently not supported in the Amazon Web Services GovCloud (US-West) region.
Creates an access configuration for your documents. This includes user and group access information for your documents. This is useful for user context filtering, where search results are filtered based on the user or their group access to documents.
You can use this to re-configure your existing document level access control without indexing all of your documents again. For example, your index contains top-secret company documents that only certain employees or users should access. One of these users leaves the company or switches to a team that should be blocked from access to top-secret documents. Your documents in your index still give this user access to top-secret documents due to the user having access at the time your documents were indexed. You can create a specific access control configuration for this user with deny access. You can later update the access control configuration to allow access in the case the user returns to the company and re-joins the 'top-secret' team. You can re-configure access control for your documents circumstances change.
To apply your access control configuration to certain documents, you call the BatchPutDocument API with the AccessControlConfigurationId
included in the Document object. If you use an S3 bucket as a data source, you update the .metadata.json
with the AccessControlConfigurationId
and synchronize your data source. Amazon Kendra currently only supports access control configuration for S3 data sources and documents indexed using the BatchPutDocument
API.
Creates a data source that you want to use with an Amazon Kendra index.
You specify a name, data source connector type and description for your data source. You also specify configuration information for the data source connector.
CreateDataSource
is a synchronous operation. The operation returns 200 if the data source was successfully created. Otherwise, an exception is raised.
Amazon S3 and custom data sources are the only supported data sources in the Amazon Web Services GovCloud (US-West) region.
For an example of creating an index and data source using the Python SDK, see Getting started with Python SDK. For an example of creating an index and data source using the Java SDK, see Getting started with Java SDK.
", "CreateExperience": "Creates an Amazon Kendra experience such as a search application. For more information on creating a search application experience, including using the Python and Java SDKs, see Building a search experience with no code.
", - "CreateFaq": "Creates an new set of frequently asked question (FAQ) questions and answers.
Adding FAQs to an index is an asynchronous operation.
For an example of adding an FAQ to an index using Python and Java SDKs, see Using you FAQ file.
", - "CreateIndex": "Creates a new Amazon Kendra index. Index creation is an asynchronous API. To determine if index creation has completed, check the Status
field returned from a call to DescribeIndex
. The Status
field is set to ACTIVE
when the index is ready to use.
Once the index is active you can index your documents using the BatchPutDocument
API or using one of the supported data sources.
For an example of creating an index and data source using the Python SDK, see Getting started with Python SDK. For an example of creating an index and data source using the Java SDK, see Getting started with Java SDK.
", + "CreateFaq": "Creates an new set of frequently asked question (FAQ) questions and answers.
Adding FAQs to an index is an asynchronous operation.
For an example of adding an FAQ to an index using Python and Java SDKs, see Using your FAQ file.
", + "CreateIndex": "Creates an Amazon Kendra index. Index creation is an asynchronous API. To determine if index creation has completed, check the Status
field returned from a call to DescribeIndex
. The Status
field is set to ACTIVE
when the index is ready to use.
Once the index is active you can index your documents using the BatchPutDocument
API or using one of the supported data sources.
For an example of creating an index and data source using the Python SDK, see Getting started with Python SDK. For an example of creating an index and data source using the Java SDK, see Getting started with Java SDK.
", "CreateQuerySuggestionsBlockList": "Creates a block list to exlcude certain queries from suggestions.
Any query that contains words or phrases specified in the block list is blocked or filtered out from being shown as a suggestion.
You need to provide the file location of your block list text file in your S3 bucket. In your text file, enter each block word or phrase on a separate line.
For information on the current quota limits for block lists, see Quotas for Amazon Kendra.
CreateQuerySuggestionsBlockList
is currently not supported in the Amazon Web Services GovCloud (US-West) region.
For an example of creating a block list for query suggestions using the Python SDK, see Query suggestions block list.
", "CreateThesaurus": "Creates a thesaurus for an index. The thesaurus contains a list of synonyms in Solr format.
For an example of adding a thesaurus file to an index, see Adding custom synonyms to an index.
", + "DeleteAccessControlConfiguration": "Deletes an access control configuration that you created for your documents in an index. This includes user and group access information for your documents. This is useful for user context filtering, where search results are filtered based on the user or their group access to documents.
", "DeleteDataSource": "Deletes an Amazon Kendra data source. An exception is not thrown if the data source is already being deleted. While the data source is being deleted, the Status
field returned by a call to the DescribeDataSource
API is set to DELETING
. For more information, see Deleting Data Sources.
Deletes your Amazon Kendra experience such as a search application. For more information on creating a search application experience, see Building a search experience with no code.
", "DeleteFaq": "Removes an FAQ from an index.
", @@ -21,18 +23,20 @@ "DeletePrincipalMapping": "Deletes a group so that all users and sub groups that belong to the group can no longer access documents only available to that group.
For example, after deleting the group \"Summer Interns\", all interns who belonged to that group no longer see intern-only documents in their search results.
If you want to delete or replace users or sub groups of a group, you need to use the PutPrincipalMapping
operation. For example, if a user in the group \"Engineering\" leaves the engineering team and another user takes their place, you provide an updated list of users or sub groups that belong to the \"Engineering\" group when calling PutPrincipalMapping
. You can update your internal list of users or sub groups and input this list when calling PutPrincipalMapping
.
DeletePrincipalMapping
is currently not supported in the Amazon Web Services GovCloud (US-West) region.
Deletes a block list used for query suggestions for an index.
A deleted block list might not take effect right away. Amazon Kendra needs to refresh the entire suggestions list to add back the queries that were previously blocked.
DeleteQuerySuggestionsBlockList
is currently not supported in the Amazon Web Services GovCloud (US-West) region.
Deletes an existing Amazon Kendra thesaurus.
", + "DescribeAccessControlConfiguration": "Gets information about an access control configuration that you created for your documents in an index. This includes user and group access information for your documents. This is useful for user context filtering, where search results are filtered based on the user or their group access to documents.
", "DescribeDataSource": "Gets information about an Amazon Kendra data source.
", "DescribeExperience": "Gets information about your Amazon Kendra experience such as a search application. For more information on creating a search application experience, see Building a search experience with no code.
", "DescribeFaq": "Gets information about an FAQ list.
", - "DescribeIndex": "Describes an existing Amazon Kendra index.
", + "DescribeIndex": "Gets information about an existing Amazon Kendra index.
", "DescribePrincipalMapping": "Describes the processing of PUT
and DELETE
actions for mapping users to their groups. This includes information on the status of actions currently processing or yet to be processed, when actions were last updated, when actions were received by Amazon Kendra, the latest action that should process and apply after other actions, and useful error messages if an action could not be processed.
DescribePrincipalMapping
is currently not supported in the Amazon Web Services GovCloud (US-West) region.
Describes a block list used for query suggestions for an index.
This is used to check the current settings that are applied to a block list.
DescribeQuerySuggestionsBlockList
is currently not supported in the Amazon Web Services GovCloud (US-West) region.
Describes the settings of query suggestions for an index.
This is used to check the current settings applied to query suggestions.
DescribeQuerySuggestionsConfig
is currently not supported in the Amazon Web Services GovCloud (US-West) region.
Describes an existing Amazon Kendra thesaurus.
", + "DescribeQuerySuggestionsBlockList": "Gets information about a block list used for query suggestions for an index.
This is used to check the current settings that are applied to a block list.
DescribeQuerySuggestionsBlockList
is currently not supported in the Amazon Web Services GovCloud (US-West) region.
Gets information on the settings of query suggestions for an index.
This is used to check the current settings applied to query suggestions.
DescribeQuerySuggestionsConfig
is currently not supported in the Amazon Web Services GovCloud (US-West) region.
Gets information about an existing Amazon Kendra thesaurus.
", "DisassociateEntitiesFromExperience": "Prevents users or groups in your Amazon Web Services SSO identity source from accessing your Amazon Kendra experience. You can create an Amazon Kendra experience such as a search application. For more information on creating a search application experience, see Building a search experience with no code.
", "DisassociatePersonasFromEntities": "Removes the specific permissions of users or groups in your Amazon Web Services SSO identity source with access to your Amazon Kendra experience. You can create an Amazon Kendra experience such as a search application. For more information on creating a search application experience, see Building a search experience with no code.
", "GetQuerySuggestions": "Fetches the queries that are suggested to your users.
GetQuerySuggestions
is currently not supported in the Amazon Web Services GovCloud (US-West) region.
Retrieves search metrics data. The data provides a snapshot of how your users interact with your search application and how effective the application is.
", + "ListAccessControlConfigurations": "Lists one or more access control configurations for an index. This includes user and group access information for your documents. This is useful for user context filtering, where search results are filtered based on the user or their group access to documents.
", "ListDataSourceSyncJobs": "Gets statistics about synchronizing Amazon Kendra with a data source.
", "ListDataSources": "Lists the data sources that you have created.
", "ListEntityPersonas": "Lists specific permissions of users and groups with access to your Amazon Kendra experience.
", @@ -43,22 +47,54 @@ "ListIndices": "Lists the Amazon Kendra indexes that you created.
", "ListQuerySuggestionsBlockLists": "Lists the block lists used for query suggestions for an index.
For information on the current quota limits for block lists, see Quotas for Amazon Kendra.
ListQuerySuggestionsBlockLists
is currently not supported in the Amazon Web Services GovCloud (US-West) region.
Gets a list of tags associated with a specified resource. Indexes, FAQs, and data sources can have tags associated with them.
", - "ListThesauri": "Lists the Amazon Kendra thesauri associated with an index.
", - "PutPrincipalMapping": "Maps users to their groups so that you only need to provide the user ID when you issue the query.
You can also map sub groups to groups. For example, the group \"Company Intellectual Property Teams\" includes sub groups \"Research\" and \"Engineering\". These sub groups include their own list of users or people who work in these teams. Only users who work in research and engineering, and therefore belong in the intellectual property group, can see top-secret company documents in their search results.
You map users to their groups when you want to filter search results for different users based on their group’s access to documents. For more information on filtering search results for different users, see Filtering on user context.
If more than five PUT
actions for a group are currently processing, a validation exception is thrown.
PutPrincipalMapping
is currently not supported in the Amazon Web Services GovCloud (US-West) region.
Lists the thesauri for an index.
", + "PutPrincipalMapping": "Maps users to their groups so that you only need to provide the user ID when you issue the query.
You can also map sub groups to groups. For example, the group \"Company Intellectual Property Teams\" includes sub groups \"Research\" and \"Engineering\". These sub groups include their own list of users or people who work in these teams. Only users who work in research and engineering, and therefore belong in the intellectual property group, can see top-secret company documents in their search results.
This is useful for user context filtering, where search results are filtered based on the user or their group access to documents. For more information, see Filtering on user context.
If more than five PUT
actions for a group are currently processing, a validation exception is thrown.
PutPrincipalMapping
is currently not supported in the Amazon Web Services GovCloud (US-West) region.
Searches an active index. Use this API to search your documents using query. The Query
API enables to do faceted search and to filter results based on document attributes.
It also enables you to provide user context that Amazon Kendra uses to enforce document access control in the search results.
Amazon Kendra searches your index for text content and question and answer (FAQ) content. By default the response contains three types of results.
Relevant passages
Matching FAQs
Relevant documents
You can specify that the query return only one type of result using the QueryResultTypeConfig
parameter.
Each query returns the 100 most relevant results.
", "StartDataSourceSyncJob": "Starts a synchronization job for a data source. If a synchronization job is already in progress, Amazon Kendra returns a ResourceInUseException
exception.
Stops a synchronization job that is currently running. You can't stop a scheduled synchronization job.
", "SubmitFeedback": "Enables you to provide feedback to Amazon Kendra to improve the performance of your index.
SubmitFeedback
is currently not supported in the Amazon Web Services GovCloud (US-West) region.
Adds the specified tag to the specified index, FAQ, or data source resource. If the tag already exists, the existing value is replaced with the new value.
", "UntagResource": "Removes a tag from an index, FAQ, or a data source.
", + "UpdateAccessControlConfiguration": "Updates an access control configuration for your documents in an index. This includes user and group access information for your documents. This is useful for user context filtering, where search results are filtered based on the user or their group access to documents.
You can update an access control configuration you created without indexing all of your documents again. For example, your index contains top-secret company documents that only certain employees or users should access. You created an 'allow' access control configuration for one user who recently joined the 'top-secret' team, switching from a team with 'deny' access to top-secret documents. However, the user suddenly returns to their previous team and should no longer have access to top secret documents. You can update the access control configuration to re-configure access control for your documents as circumstances change.
You call the BatchPutDocument API to apply the updated access control configuration, with the AccessControlConfigurationId
included in the Document object. If you use an S3 bucket as a data source, you synchronize your data source to apply the the AccessControlConfigurationId
in the .metadata.json
file. Amazon Kendra currently only supports access control configuration for S3 data sources and documents indexed using the BatchPutDocument
API.
Updates an existing Amazon Kendra data source.
", "UpdateExperience": "Updates your Amazon Kendra experience such as a search application. For more information on creating a search application experience, see Building a search experience with no code.
", "UpdateIndex": "Updates an existing Amazon Kendra index.
", "UpdateQuerySuggestionsBlockList": "Updates a block list used for query suggestions for an index.
Updates to a block list might not take effect right away. Amazon Kendra needs to refresh the entire suggestions list to apply any updates to the block list. Other changes not related to the block list apply immediately.
If a block list is updating, then you need to wait for the first update to finish before submitting another update.
Amazon Kendra supports partial updates, so you only need to provide the fields you want to update.
UpdateQuerySuggestionsBlockList
is currently not supported in the Amazon Web Services GovCloud (US-West) region.
Updates the settings of query suggestions for an index.
Amazon Kendra supports partial updates, so you only need to provide the fields you want to update.
If an update is currently processing (i.e. 'happening'), you need to wait for the update to finish before making another update.
Updates to query suggestions settings might not take effect right away. The time for your updated settings to take effect depends on the updates made and the number of search queries in your index.
You can still enable/disable query suggestions at any time.
UpdateQuerySuggestionsConfig
is currently not supported in the Amazon Web Services GovCloud (US-West) region.
Updates a thesaurus file associated with an index.
" + "UpdateThesaurus": "Updates a thesaurus for an index.
" }, "shapes": { + "AccessControlConfigurationId": { + "base": null, + "refs": { + "AccessControlConfigurationSummary$Id": "The identifier of the access control configuration.
", + "CreateAccessControlConfigurationResponse$Id": "The identifier of the access control configuration for your documents in an index.
", + "DeleteAccessControlConfigurationRequest$Id": "The identifier of the access control configuration you want to delete.
", + "DescribeAccessControlConfigurationRequest$Id": "The identifier of the access control configuration you want to get information on.
", + "Document$AccessControlConfigurationId": "The identifier of the access control configuration that you want to apply to the document.
", + "UpdateAccessControlConfigurationRequest$Id": "The identifier of the access control configuration you want to update.
" + } + }, + "AccessControlConfigurationName": { + "base": null, + "refs": { + "CreateAccessControlConfigurationRequest$Name": "A name for the access control configuration.
", + "DescribeAccessControlConfigurationResponse$Name": "The name for the access control configuration.
", + "UpdateAccessControlConfigurationRequest$Name": "A new name for the access control configuration.
" + } + }, + "AccessControlConfigurationSummary": { + "base": "Summary information on an access control configuration that you created for your documents in an index.
", + "refs": { + "AccessControlConfigurationSummaryList$member": null + } + }, + "AccessControlConfigurationSummaryList": { + "base": null, + "refs": { + "ListAccessControlConfigurationsResponse$AccessControlConfigurations": "The details of your access control configurations.
" + } + }, "AccessControlListConfiguration": { "base": "Access Control List files for the documents in a data source. For the format of the file, see Access control for S3 data sources.
", "refs": { @@ -101,7 +137,7 @@ } }, "AlfrescoConfiguration": { - "base": "Provides the configuration information to connect to Alfresco as your data source.
", + "base": "Provides the configuration information to connect to Alfresco as your data source.
Alfresco data source connector is currently in preview mode. Basic authentication is currently supported. If you would like to use Alfresco connector in production, contact Support.
Provides the configuration information to connect to Alfresco as your data source.
" } @@ -170,7 +206,7 @@ "AuthenticationConfiguration": { "base": "Provides the configuration information to connect to websites that require user authentication.
", "refs": { - "WebCrawlerConfiguration$AuthenticationConfiguration": "Configuration information required to connect to websites using authentication.
You can connect to websites using basic authentication of user name and password.
You must provide the website host name and port number. For example, the host name of https://a.example.com/page1.html is \"a.example.com\" and the port is 443, the standard port for HTTPS. You use a secret in Secrets Manager to store your authentication credentials.
" + "WebCrawlerConfiguration$AuthenticationConfiguration": "Configuration information required to connect to websites using authentication.
You can connect to websites using basic authentication of user name and password. You use a secret in Secrets Manager to store your authentication credentials.
You must provide the website host name and port number. For example, the host name of https://a.example.com/page1.html is \"a.example.com\" and the port is 443, the standard port for HTTPS.
" } }, "BasicAuthenticationConfiguration": { @@ -261,7 +297,7 @@ "base": null, "refs": { "AlfrescoConfiguration$CrawlSystemFolders": " TRUE
to index shared files.
TRUE
to index comments of wikis and blogs.
TRUE
to index comments of blogs and other content.
TRUE
to use the Slack change log to determine which documents require updating in the index. Depending on the data source change log's size, it may take longer for Amazon Kendra to use the change log than to scan all of your documents.
TRUE
to index comments.
TRUE
to index the contents of tasks.
Determines whether the field is used in the search. If the Searchable
field is true
, you can use relevance tuning to manually tune how Amazon Kendra weights the field in the search. The default is true
for string fields and false
for number and date fields.
Determines whether the field is returned in the query response. The default is true
.
Determines whether the field can be used to sort the results of a query. If you specify sorting on a field that does not have Sortable
set to true
, Amazon Kendra returns an exception. The default is false
.
Indicates whether Amazon Kendra should index attachments to knowledge articles.
", - "ServiceNowServiceCatalogConfiguration$CrawlAttachments": "Indicates whether Amazon Kendra should crawl attachments to the service catalog items.
", + "ServiceNowKnowledgeArticleConfiguration$CrawlAttachments": " TRUE
to index attachments to knowledge articles.
TRUE
to index attachments to service catalog items.
TRUE
to index document attachments.
TRUE
to use the SharePoint change log to determine which documents require updating in the index. Depending on the change log's size, it may take longer for Amazon Kendra to use the change log than to scan all of your documents in SharePoint.
TRUE
to disable local groups information.
A token that you provide to identify the request to create a data source. Multiple calls to the CreateDataSource
API with the same client token will create only one data source.
A token that you provide to identify the request to create an access control configuration. Multiple calls to the CreateAccessControlConfiguration
API with the same client token will create only one access control configuration.
A token that you provide to identify the request to create a data source connector. Multiple calls to the CreateDataSource
API with the same client token will create only one data source connector.
A token that you provide to identify the request to create your Amazon Kendra experience. Multiple calls to the CreateExperience
API with the same client token creates only one Amazon Kendra experience.
A token that you provide to identify the request to create a FAQ. Multiple calls to the CreateFaqRequest
API with the same client token will create only one FAQ.
A token that you provide to identify the request to create an index. Multiple calls to the CreateIndex
API with the same client token will create only one index.
The version or the type of the Confluence installation to connect to.
" + "ConfluenceConfiguration$Version": "The version or the type of Confluence installation to connect to.
" } }, "ConnectionConfiguration": { @@ -541,6 +578,16 @@ "WebCrawlerConfiguration$CrawlDepth": "Specifies the number of levels in a website that you want to crawl.
The first level begins from the website seed or starting point URL. For example, if a website has 3 levels – index level (i.e. seed in this example), sections level, and subsections level – and you are only interested in crawling information up to the sections level (i.e. levels 0-1), you can set your depth to 1.
The default crawl depth is set to 2.
" } }, + "CreateAccessControlConfigurationRequest": { + "base": null, + "refs": { + } + }, + "CreateAccessControlConfigurationResponse": { + "base": null, + "refs": { + } + }, "CreateDataSourceRequest": { "base": null, "refs": { @@ -605,17 +652,17 @@ "base": "Provides the configuration information for altering document metadata and content during the document ingestion process.
For more information, see Customizing document metadata during the ingestion process.
", "refs": { "BatchPutDocumentRequest$CustomDocumentEnrichmentConfiguration": "Configuration information for altering your document metadata and content during the document ingestion process when you use the BatchPutDocument
API.
For more information on how to create, modify and delete document metadata, or make other content alterations when you ingest documents into Amazon Kendra, see Customizing document metadata during the ingestion process.
", - "CreateDataSourceRequest$CustomDocumentEnrichmentConfiguration": "Configuration information for altering document metadata and content during the document ingestion process when you create a data source.
For more information on how to create, modify and delete document metadata, or make other content alterations when you ingest documents into Amazon Kendra, see Customizing document metadata during the ingestion process.
", + "CreateDataSourceRequest$CustomDocumentEnrichmentConfiguration": "Configuration information for altering document metadata and content during the document ingestion process.
For more information on how to create, modify and delete document metadata, or make other content alterations when you ingest documents into Amazon Kendra, see Customizing document metadata during the ingestion process.
", "DescribeDataSourceResponse$CustomDocumentEnrichmentConfiguration": "Configuration information for altering document metadata and content during the document ingestion process when you describe a data source.
For more information on how to create, modify and delete document metadata, or make other content alterations when you ingest documents into Amazon Kendra, see Customizing document metadata during the ingestion process.
", - "UpdateDataSourceRequest$CustomDocumentEnrichmentConfiguration": "Configuration information for altering document metadata and content during the document ingestion process when you update a data source.
For more information on how to create, modify and delete document metadata, or make other content alterations when you ingest documents into Amazon Kendra, see Customizing document metadata during the ingestion process.
" + "UpdateDataSourceRequest$CustomDocumentEnrichmentConfiguration": "Configuration information you want to update for altering document metadata and content during the document ingestion process.
For more information on how to create, modify and delete document metadata, or make other content alterations when you ingest documents into Amazon Kendra, see Customizing document metadata during the ingestion process.
" } }, "DataSourceConfiguration": { "base": "Provides the configuration information for an Amazon Kendra data source.
", "refs": { - "CreateDataSourceRequest$Configuration": "Configuration information that is required to access the data source repository.
You can't specify the Configuration
parameter when the Type
parameter is set to CUSTOM
. If you do, you receive a ValidationException
exception.
The Configuration
parameter is required for all other data sources.
Describes how the data source is configured. The specific information in the description depends on the data source provider.
", - "UpdateDataSourceRequest$Configuration": "Configuration information for an Amazon Kendra data source you want to update.
" + "CreateDataSourceRequest$Configuration": "Configuration information to connect to your data source repository.
You can't specify the Configuration
parameter when the Type
parameter is set to CUSTOM
. If you do, you receive a ValidationException
exception.
The Configuration
parameter is required for all other data sources.
Configuration details for the data source. This shows how the data source is configured. The configuration options for a data source depend on the data source provider.
", + "UpdateDataSourceRequest$Configuration": "Configuration information you want to update for the data source connector.
" } }, "DataSourceDateFieldFormat": { @@ -663,14 +710,14 @@ "DataSourceId": { "base": null, "refs": { - "CreateDataSourceResponse$Id": "A unique identifier for the data source.
", + "CreateDataSourceResponse$Id": "The identifier of the data source connector.
", "DataSourceGroup$DataSourceId": "The identifier of the data source group you want to add to your list of data source groups. This is for filtering search results based on the groups' access to documents in that data source.
", "DataSourceIdList$member": null, "DataSourceSummary$Id": "The unique identifier for the data source.
", "DataSourceSyncJobMetricTarget$DataSourceId": "The ID of the data source that is running the sync job.
", - "DeleteDataSourceRequest$Id": "The unique identifier of the data source to delete.
", - "DeletePrincipalMappingRequest$DataSourceId": "The identifier of the data source you want to delete a group from.
This is useful if a group is tied to multiple data sources and you want to delete a group from accessing documents in a certain data source. For example, the groups \"Research\", \"Engineering\", and \"Sales and Marketing\" are all tied to the company's documents stored in the data sources Confluence and Salesforce. You want to delete \"Research\" and \"Engineering\" groups from Salesforce, so that these groups cannot access customer-related documents stored in Salesforce. Only \"Sales and Marketing\" should access documents in the Salesforce data source.
", - "DescribeDataSourceRequest$Id": "The unique identifier of the data source to describe.
", + "DeleteDataSourceRequest$Id": "The identifier of the data source you want to delete.
", + "DeletePrincipalMappingRequest$DataSourceId": "The identifier of the data source you want to delete a group from.
A group can be tied to multiple data sources. You can delete a group from accessing documents in a certain data source. For example, the groups \"Research\", \"Engineering\", and \"Sales and Marketing\" are all tied to the company's documents stored in the data sources Confluence and Salesforce. You want to delete \"Research\" and \"Engineering\" groups from Salesforce, so that these groups cannot access customer-related documents stored in Salesforce. Only \"Sales and Marketing\" should access documents in the Salesforce data source.
", + "DescribeDataSourceRequest$Id": "The identifier of the data source.
", "DescribeDataSourceResponse$Id": "The identifier of the data source.
", "DescribePrincipalMappingRequest$DataSourceId": "The identifier of the data source to check the processing of PUT
and DELETE
actions for mapping users to their groups.
Shows the identifier of the data source to see information on the processing of PUT
and DELETE
actions for mapping users to their groups.
The identifier of the data source you want to map users to their groups.
This is useful if a group is tied to multiple data sources, but you only want the group to access documents of a certain data source. For example, the groups \"Research\", \"Engineering\", and \"Sales and Marketing\" are all tied to the company's documents stored in the data sources Confluence and Salesforce. However, \"Sales and Marketing\" team only needs access to customer-related documents stored in Salesforce.
", "StartDataSourceSyncJobRequest$Id": "The identifier of the data source to synchronize.
", "StopDataSourceSyncJobRequest$Id": "The identifier of the data source for which to stop the synchronization jobs.
", - "UpdateDataSourceRequest$Id": "The unique identifier of the data source to update.
" + "UpdateDataSourceRequest$Id": "The identifier of the data source you want to update.
" } }, "DataSourceIdList": { @@ -737,10 +784,10 @@ "DataSourceName": { "base": null, "refs": { - "CreateDataSourceRequest$Name": "A unique name for the data source. A data source name can't be changed without deleting and recreating the data source.
", + "CreateDataSourceRequest$Name": "A unique name for the data source connector. A data source name can't be changed without deleting and recreating the data source connector.
", "DataSourceSummary$Name": "The name of the data source.
", "DescribeDataSourceResponse$Name": "The name that you gave the data source when it was created.
", - "UpdateDataSourceRequest$Name": "The name of the data source to update. The name of the data source can't be updated. To rename a data source you must delete the data source and re-create it.
" + "UpdateDataSourceRequest$Name": "A new name for the data source connector. You must first delete the data source and re-create it to change the name of the data source.
" } }, "DataSourceStatus": { @@ -850,7 +897,7 @@ "DataSourceType": { "base": null, "refs": { - "CreateDataSourceRequest$Type": "The type of repository that contains the data source.
", + "CreateDataSourceRequest$Type": "The type of data source repository. For example, SHAREPOINT
.
The type of the data source.
", "DescribeDataSourceResponse$Type": "The type of the data source.
" } @@ -900,6 +947,16 @@ "ConnectionConfiguration$DatabasePort": "The port that the database uses for connections.
" } }, + "DeleteAccessControlConfigurationRequest": { + "base": null, + "refs": { + } + }, + "DeleteAccessControlConfigurationResponse": { + "base": null, + "refs": { + } + }, "DeleteDataSourceRequest": { "base": null, "refs": { @@ -940,6 +997,16 @@ "refs": { } }, + "DescribeAccessControlConfigurationRequest": { + "base": null, + "refs": { + } + }, + "DescribeAccessControlConfigurationResponse": { + "base": null, + "refs": { + } + }, "DescribeDataSourceRequest": { "base": null, "refs": { @@ -1023,23 +1090,26 @@ "Description": { "base": null, "refs": { - "CreateDataSourceRequest$Description": "A description for the data source.
", + "CreateAccessControlConfigurationRequest$Description": "A description for the access control configuration.
", + "CreateDataSourceRequest$Description": "A description for the data source connector.
", "CreateExperienceRequest$Description": "A description for your Amazon Kendra experience.
", - "CreateFaqRequest$Description": "A description of the FAQ.
", + "CreateFaqRequest$Description": "A description for the FAQ.
", "CreateIndexRequest$Description": "A description for the index.
", "CreateQuerySuggestionsBlockListRequest$Description": "A user-friendly description for the block list.
For example, the description \"List of all offensive words that can appear in user queries and need to be blocked from suggestions.\"
", - "CreateThesaurusRequest$Description": "The description for the new thesaurus.
", - "DescribeDataSourceResponse$Description": "The description of the data source.
", + "CreateThesaurusRequest$Description": "A description for the thesaurus.
", + "DescribeAccessControlConfigurationResponse$Description": "The description for the access control configuration.
", + "DescribeDataSourceResponse$Description": "The description for the data source.
", "DescribeExperienceResponse$Description": "Shows the description for your Amazon Kendra experience.
", "DescribeFaqResponse$Description": "The description of the FAQ that you provided when it was created.
", "DescribeIndexResponse$Description": "The description for the index.
", - "DescribeQuerySuggestionsBlockListResponse$Description": "Shows the description for the block list.
", + "DescribeQuerySuggestionsBlockListResponse$Description": "The description for the block list.
", "DescribeThesaurusResponse$Description": "The thesaurus description.
", - "UpdateDataSourceRequest$Description": "The new description for the data source.
", - "UpdateExperienceRequest$Description": "The description of your Amazon Kendra experience you want to update.
", + "UpdateAccessControlConfigurationRequest$Description": "A new description for the access control configuration.
", + "UpdateDataSourceRequest$Description": "A new description for the data source connector.
", + "UpdateExperienceRequest$Description": "A new description for your Amazon Kendra experience.
", "UpdateIndexRequest$Description": "A new description for the index.
", - "UpdateQuerySuggestionsBlockListRequest$Description": "The description for a block list.
", - "UpdateThesaurusRequest$Description": "The updated description of the thesaurus.
" + "UpdateQuerySuggestionsBlockListRequest$Description": "A new description for the block list.
", + "UpdateThesaurusRequest$Description": "A new description for the thesaurus.
" } }, "DisassociateEntitiesFromExperienceRequest": { @@ -1210,7 +1280,7 @@ } }, "DocumentMetadataConfiguration": { - "base": "Specifies the properties of a custom index field.
", + "base": "Specifies the properties, such as relevance tuning and searchability, of an index field.
", "refs": { "DocumentMetadataConfigurationList$member": null } @@ -1218,15 +1288,15 @@ "DocumentMetadataConfigurationList": { "base": null, "refs": { - "DescribeIndexResponse$DocumentMetadataConfigurations": "Configuration settings for any metadata applied to the documents in the index.
", - "UpdateIndexRequest$DocumentMetadataConfigurationUpdates": "The document metadata you want to update.
" + "DescribeIndexResponse$DocumentMetadataConfigurations": "Configuration information for document metadata or fields. Document metadata are fields or attributes associated with your documents. For example, the company department name associated with each document.
", + "UpdateIndexRequest$DocumentMetadataConfigurationUpdates": "The document metadata configuration you want to update for the index. Document metadata are fields or attributes associated with your documents. For example, the company department name associated with each document.
" } }, "DocumentMetadataConfigurationName": { "base": null, "refs": { "DocumentMetadataConfiguration$Name": "The name of the index field.
", - "DocumentRelevanceConfiguration$Name": "The name of the tuning configuration to override document relevance at the index level.
" + "DocumentRelevanceConfiguration$Name": "The name of the index field.
" } }, "DocumentRelevanceConfiguration": { @@ -1362,11 +1432,12 @@ "BatchPutDocumentResponseFailedDocument$ErrorMessage": "A description of the reason why the document could not be indexed.
", "ConflictException$Message": null, "DataSourceSyncJob$ErrorMessage": "If the Status
field is set to ERROR
, the ErrorMessage
field contains a description of the error that caused the synchronization to fail.
The error message containing details if there are issues processing the access control configuration.
", "DescribeDataSourceResponse$ErrorMessage": "When the Status
field value is FAILED
, the ErrorMessage
field contains a description of the error that caused the data source to fail.
The reason your Amazon Kendra experience could not properly process.
", "DescribeFaqResponse$ErrorMessage": "If the Status
field is FAILED
, the ErrorMessage
field contains the reason why the FAQ failed.
When the Status
field value is FAILED
, the ErrorMessage
field contains a message that explains why.
Shows the error message with details when there are issues in processing the block list.
", + "DescribeQuerySuggestionsBlockListResponse$ErrorMessage": "The error message containing details if there are issues processing the block list.
", "DescribeThesaurusResponse$ErrorMessage": "When the Status
field value is FAILED
, the ErrorMessage
field provides more information.
The reason the user or group in your Amazon Web Services SSO identity source failed to properly configure with your Amazon Kendra experience.
", "InternalServerException$Message": null, @@ -1403,7 +1474,7 @@ "refs": { "CreateExperienceRequest$Configuration": "Configuration information for your Amazon Kendra experience. This includes ContentSourceConfiguration
, which specifies the data source IDs and/or FAQ IDs, and UserIdentityConfiguration
, which specifies the user or group information to grant access to your Amazon Kendra experience.
Shows the configuration information for your Amazon Kendra experience. This includes ContentSourceConfiguration
, which specifies the data source IDs and/or FAQ IDs, and UserIdentityConfiguration
, which specifies the user or group information to grant access to your Amazon Kendra experience.
Configuration information for your Amazon Kendra you want to update.
" + "UpdateExperienceRequest$Configuration": "Configuration information you want to update for your Amazon Kendra experience.
" } }, "ExperienceEndpoint": { @@ -1454,7 +1525,7 @@ "CreateExperienceRequest$Name": "A name for your Amazon Kendra experience.
", "DescribeExperienceResponse$Name": "Shows the name of your Amazon Kendra experience.
", "ExperiencesSummary$Name": "The name of your Amazon Kendra experience.
", - "UpdateExperienceRequest$Name": "The name of your Amazon Kendra experience you want to update.
" + "UpdateExperienceRequest$Name": "A new name for your Amazon Kendra experience.
" } }, "ExperienceStatus": { @@ -1526,7 +1597,7 @@ "FaqFileFormat": { "base": null, "refs": { - "CreateFaqRequest$FileFormat": "The format of the input file. You can choose between a basic CSV format, a CSV format that includes customs attributes in a header, and a JSON format that includes custom attributes.
The format must match the format of the file stored in the S3 bucket identified in the S3Path
parameter.
For more information, see Adding questions and answers.
", + "CreateFaqRequest$FileFormat": "The format of the FAQ input file. You can choose between a basic CSV format, a CSV format that includes customs attributes in a header, and a JSON format that includes custom attributes.
The format must match the format of the file stored in the S3 bucket identified in the S3Path
parameter.
For more information, see Adding questions and answers.
", "DescribeFaqResponse$FileFormat": "The file format used by the input files for the FAQ.
", "FaqSummary$FileFormat": "The file type used to create the FAQ.
" } @@ -1535,8 +1606,8 @@ "base": null, "refs": { "CreateFaqResponse$Id": "The unique identifier of the FAQ.
", - "DeleteFaqRequest$Id": "The identifier of the FAQ to remove.
", - "DescribeFaqRequest$Id": "The unique identifier of the FAQ.
", + "DeleteFaqRequest$Id": "The identifier of the FAQ you want to remove.
", + "DescribeFaqRequest$Id": "The identifier of the FAQ you want to get information on.
", "DescribeFaqResponse$Id": "The identifier of the FAQ.
", "FaqIdsList$member": null, "FaqSummary$Id": "The unique identifier of the FAQ.
" @@ -1551,7 +1622,7 @@ "FaqName": { "base": null, "refs": { - "CreateFaqRequest$Name": "The name that should be associated with the FAQ.
", + "CreateFaqRequest$Name": "A name for the FAQ.
", "DescribeFaqResponse$Name": "The name that you gave the FAQ when it was created.
", "FaqSummary$Name": "The name that you assigned the FAQ when you created or updated the FAQ.
" } @@ -1570,7 +1641,7 @@ } }, "FaqSummary": { - "base": "Provides information about a frequently asked questions and answer contained in an index.
", + "base": "Summary information for frequently asked questions and answers included in an index.
", "refs": { "FaqSummaryItems$member": null } @@ -1602,7 +1673,7 @@ "FolderIdList": { "base": null, "refs": { - "QuipConfiguration$FolderIds": "The identifier of the Quip folders you want to index.
" + "QuipConfiguration$FolderIds": "The identifiers of the Quip folders you want to index.
" } }, "FsxConfiguration": { @@ -1667,13 +1738,13 @@ "DeletePrincipalMappingRequest$GroupId": "The identifier of the group you want to delete.
", "DescribePrincipalMappingRequest$GroupId": "The identifier of the group required to check the processing of PUT
and DELETE
actions for mapping users to their groups.
Shows the identifier of the group to see information on the processing of PUT
and DELETE
actions for mapping users to their groups.
The identifier of the group you want group summary information on.
", + "GroupSummary$GroupId": "The identifier of the group you want group summary information on.
", "MemberGroup$GroupId": "The identifier of the sub group you want to map to a group.
", "PutPrincipalMappingRequest$GroupId": "The identifier of the group you want to map its users to.
" } }, "GroupMembers": { - "base": "A list of users or sub groups that belong to a group. Users and groups are useful for filtering search results to different users based on their group's access to documents.
", + "base": "A list of users or sub groups that belong to a group. This is useful for user context filtering, where search results are filtered based on the user or their group access to documents.
", "refs": { "PutPrincipalMappingRequest$GroupMembers": "The list that contains your users or sub groups that belong the same group.
For example, the group \"Company\" includes the user \"CEO\" and the sub groups \"Research\", \"Engineering\", and \"Sales and Marketing\".
If you have more than 1000 users and/or sub groups for a single group, you need to provide the path to the S3 file that lists your users and sub groups for a group. Your sub groups can contain more than 1000 users, but the list of sub groups that belong to a group (and/or users) must be no more than 1000.
" } @@ -1685,13 +1756,13 @@ } }, "GroupOrderingIdSummary": { - "base": "Information on the processing of PUT
and DELETE
actions for mapping users to their groups.
Summary information on the processing of PUT
and DELETE
actions for mapping users to their groups.
Group summary information.
", + "base": "Summary information for groups.
", "refs": { "ListOfGroupSummaries$member": null } @@ -1711,7 +1782,10 @@ "HierarchicalPrincipalList": { "base": "A list of principal lists that define the hierarchy for which documents users should have access to. Each hierarchical list specifies which user or group has allow or deny access for each document.
", "refs": { - "Document$HierarchicalAccessControlList": "The list of principal lists that define the hierarchy for which documents users should have access to.
" + "CreateAccessControlConfigurationRequest$HierarchicalAccessControlList": "The list of principal lists that define the hierarchy for which documents users should have access to.
", + "DescribeAccessControlConfigurationResponse$HierarchicalAccessControlList": "The list of principal lists that define the hierarchy for which documents users should have access to.
", + "Document$HierarchicalAccessControlList": "The list of principal lists that define the hierarchy for which documents users should have access to.
", + "UpdateAccessControlConfigurationRequest$HierarchicalAccessControlList": "The updated list of principal lists that define the hierarchy for which documents users should have access to.
" } }, "Highlight": { @@ -1760,7 +1834,7 @@ } }, "IndexConfigurationSummary": { - "base": "A summary of information on the configuration of an index.
", + "base": "Summary information on the configuration of an index.
", "refs": { "IndexConfigurationSummaryList$member": null } @@ -1798,68 +1872,73 @@ "BatchGetDocumentStatusRequest$IndexId": "The identifier of the index to add documents to. The index ID is returned by the CreateIndex API.
", "BatchPutDocumentRequest$IndexId": "The identifier of the index to add the documents to. You need to create the index first using the CreateIndex
API.
The identifier of the index you want to clear query suggestions from.
", - "CreateDataSourceRequest$IndexId": "The identifier of the index that should be associated with this data source.
", + "CreateAccessControlConfigurationRequest$IndexId": "The identifier of the index to create an access control configuration for your documents.
", + "CreateDataSourceRequest$IndexId": "The identifier of the index you want to use with the data source connector.
", "CreateExperienceRequest$IndexId": "The identifier of the index for your Amazon Kendra experience.
", - "CreateFaqRequest$IndexId": "The identifier of the index that contains the FAQ.
", + "CreateFaqRequest$IndexId": "The identifier of the index for the FAQ.
", "CreateIndexResponse$Id": "The unique identifier of the index. Use this identifier when you query an index, set up a data source, or index a document.
", "CreateQuerySuggestionsBlockListRequest$IndexId": "The identifier of the index you want to create a query suggestions block list for.
", - "CreateThesaurusRequest$IndexId": "The unique identifier of the index for the new thesaurus.
", - "DeleteDataSourceRequest$IndexId": "The unique identifier of the index associated with the data source.
", - "DeleteExperienceRequest$IndexId": "The identifier of the index for your Amazon Kendra experience you want to delete.
", - "DeleteFaqRequest$IndexId": "The index to remove the FAQ from.
", - "DeleteIndexRequest$Id": "The identifier of the index to delete.
", + "CreateThesaurusRequest$IndexId": "The identifier of the index for the thesaurus.
", + "DeleteAccessControlConfigurationRequest$IndexId": "The identifier of the index for an access control configuration.
", + "DeleteDataSourceRequest$IndexId": "The identifier of the index used with the data source.
", + "DeleteExperienceRequest$IndexId": "The identifier of the index for your Amazon Kendra experience.
", + "DeleteFaqRequest$IndexId": "The identifier of the index for the FAQ.
", + "DeleteIndexRequest$Id": "The identifier of the index you want to delete.
", "DeletePrincipalMappingRequest$IndexId": "The identifier of the index you want to delete a group from.
", - "DeleteQuerySuggestionsBlockListRequest$IndexId": "The identifier of the you want to delete a block list from.
", - "DeleteThesaurusRequest$IndexId": "The identifier of the index associated with the thesaurus to delete.
", - "DescribeDataSourceRequest$IndexId": "The identifier of the index that contains the data source.
", + "DeleteQuerySuggestionsBlockListRequest$IndexId": "The identifier of the index for the block list.
", + "DeleteThesaurusRequest$IndexId": "The identifier of the index for the thesaurus.
", + "DescribeAccessControlConfigurationRequest$IndexId": "The identifier of the index for an access control configuration.
", + "DescribeDataSourceRequest$IndexId": "The identifier of the index used with the data source.
", "DescribeDataSourceResponse$IndexId": "The identifier of the index that contains the data source.
", - "DescribeExperienceRequest$IndexId": "The identifier of the index for your Amazon Kendra experience you want to get information on.
", + "DescribeExperienceRequest$IndexId": "The identifier of the index for your Amazon Kendra experience.
", "DescribeExperienceResponse$IndexId": "Shows the identifier of the index for your Amazon Kendra experience.
", - "DescribeFaqRequest$IndexId": "The identifier of the index that contains the FAQ.
", - "DescribeFaqResponse$IndexId": "The identifier of the index that contains the FAQ.
", - "DescribeIndexRequest$Id": "The identifier of the index to describe.
", + "DescribeFaqRequest$IndexId": "The identifier of the index for the FAQ.
", + "DescribeFaqResponse$IndexId": "The identifier of the index for the FAQ.
", + "DescribeIndexRequest$Id": "The identifier of the index you want to get information on.
", "DescribeIndexResponse$Id": "The identifier of the index.
", "DescribePrincipalMappingRequest$IndexId": "The identifier of the index required to check the processing of PUT
and DELETE
actions for mapping users to their groups.
Shows the identifier of the index to see information on the processing of PUT
and DELETE
actions for mapping users to their groups.
The identifier of the index for the block list.
", - "DescribeQuerySuggestionsBlockListResponse$IndexId": "Shows the identifier of the index for the block list.
", - "DescribeQuerySuggestionsConfigRequest$IndexId": "The identifier of the index you want to describe query suggestions settings for.
", - "DescribeThesaurusRequest$IndexId": "The identifier of the index associated with the thesaurus to describe.
", - "DescribeThesaurusResponse$IndexId": "The identifier of the index associated with the thesaurus to describe.
", + "DescribeQuerySuggestionsBlockListResponse$IndexId": "The identifier of the index for the block list.
", + "DescribeQuerySuggestionsConfigRequest$IndexId": "The identifier of the index with query suggestions that you want to get information on.
", + "DescribeThesaurusRequest$IndexId": "The identifier of the index for the thesaurus.
", + "DescribeThesaurusResponse$IndexId": "The identifier of the index for the thesaurus.
", "DisassociateEntitiesFromExperienceRequest$IndexId": "The identifier of the index for your Amazon Kendra experience.
", "DisassociatePersonasFromEntitiesRequest$IndexId": "The identifier of the index for your Amazon Kendra experience.
", "GetQuerySuggestionsRequest$IndexId": "The identifier of the index you want to get query suggestions from.
", "GetSnapshotsRequest$IndexId": "The identifier of the index to get search metrics data.
", "IndexConfigurationSummary$Id": "A unique identifier for the index. Use this to identify the index when you are using APIs such as Query
, DescribeIndex
, UpdateIndex
, and DeleteIndex
.
The identifier of the index that contains the data source.
", - "ListDataSourcesRequest$IndexId": "The identifier of the index that contains the data source.
", + "ListAccessControlConfigurationsRequest$IndexId": "The identifier of the index for the access control configuration.
", + "ListDataSourceSyncJobsRequest$IndexId": "The identifier of the index used with the data source.
", + "ListDataSourcesRequest$IndexId": "The identifier of the index used with one or more data sources.
", "ListEntityPersonasRequest$IndexId": "The identifier of the index for your Amazon Kendra experience.
", "ListExperienceEntitiesRequest$IndexId": "The identifier of the index for your Amazon Kendra experience.
", "ListExperiencesRequest$IndexId": "The identifier of the index for your Amazon Kendra experience.
", "ListFaqsRequest$IndexId": "The index that contains the FAQ lists.
", "ListGroupsOlderThanOrderingIdRequest$IndexId": "The identifier of the index for getting a list of groups mapped to users before a given ordering or timestamp identifier.
", "ListQuerySuggestionsBlockListsRequest$IndexId": "The identifier of the index for a list of all block lists that exist for that index.
For information on the current quota limits for block lists, see Quotas for Amazon Kendra.
", - "ListThesauriRequest$IndexId": "The identifier of the index associated with the thesaurus to list.
", + "ListThesauriRequest$IndexId": "The identifier of the index with one or more thesauri.
", "PutPrincipalMappingRequest$IndexId": "The identifier of the index you want to map users to their groups.
", "QueryRequest$IndexId": "The unique identifier of the index to search. The identifier is returned in the response from the CreateIndex
API.
The identifier of the index that contains the data source.
", "StopDataSourceSyncJobRequest$IndexId": "The identifier of the index that contains the data source.
", "SubmitFeedbackRequest$IndexId": "The identifier of the index that was queried.
", - "UpdateDataSourceRequest$IndexId": "The identifier of the index that contains the data source to update.
", - "UpdateExperienceRequest$IndexId": "The identifier of the index for your Amazon Kendra experience you want to update.
", - "UpdateIndexRequest$Id": "The identifier of the index to update.
", - "UpdateQuerySuggestionsBlockListRequest$IndexId": "The identifier of the index for a block list.
", - "UpdateQuerySuggestionsConfigRequest$IndexId": "The identifier of the index you want to update query suggestions settings for.
", - "UpdateThesaurusRequest$IndexId": "The identifier of the index associated with the thesaurus to update.
" + "UpdateAccessControlConfigurationRequest$IndexId": "The identifier of the index for an access control configuration.
", + "UpdateDataSourceRequest$IndexId": "The identifier of the index used with the data source connector.
", + "UpdateExperienceRequest$IndexId": "The identifier of the index for your Amazon Kendra experience.
", + "UpdateIndexRequest$Id": "The identifier of the index you want to update.
", + "UpdateQuerySuggestionsBlockListRequest$IndexId": "The identifier of the index for the block list.
", + "UpdateQuerySuggestionsConfigRequest$IndexId": "The identifier of the index with query suggestions you want to update.
", + "UpdateThesaurusRequest$IndexId": "The identifier of the index for the thesaurus.
" } }, "IndexName": { "base": null, "refs": { - "CreateIndexRequest$Name": "The name for the new index.
", + "CreateIndexRequest$Name": "A name for the index.
", "DescribeIndexResponse$Name": "The name of the index.
", "IndexConfigurationSummary$Name": "The identifier of the index.
", - "UpdateIndexRequest$Name": "The name of the index to update.
" + "UpdateIndexRequest$Name": "The name of the index you want to update.
" } }, "IndexStatistics": { @@ -1910,9 +1989,9 @@ "refs": { "Correction$BeginOffset": "The zero-based location in the response string or text where the corrected word starts.
", "Correction$EndOffset": "The zero-based location in the response string or text where the corrected word ends.
", - "DescribeQuerySuggestionsBlockListResponse$ItemCount": "Shows the current number of valid, non-empty words or phrases in the block list text file.
", - "DescribeQuerySuggestionsConfigResponse$QueryLogLookBackWindowInDays": "Shows how recent your queries are in your query log time window (in days).
", - "DescribeQuerySuggestionsConfigResponse$TotalSuggestionsCount": "Shows the current total count of query suggestions for an index.
This count can change when you update your query suggestions settings, if you filter out certain queries from suggestions using a block list, and as the query log accumulates more queries for Amazon Kendra to learn from.
", + "DescribeQuerySuggestionsBlockListResponse$ItemCount": "The current number of valid, non-empty words or phrases in the block list text file.
", + "DescribeQuerySuggestionsConfigResponse$QueryLogLookBackWindowInDays": "How recent your queries are in your query log time window (in days).
", + "DescribeQuerySuggestionsConfigResponse$TotalSuggestionsCount": "The current total count of query suggestions for an index.
This count can change when you update your query suggestions settings, if you filter out certain queries from suggestions using a block list, and as the query log accumulates more queries for Amazon Kendra to learn from.
", "DocumentAttributeValueCountPair$Count": "The number of documents in the response that have the attribute value for the key.
", "GetQuerySuggestionsRequest$MaxSuggestionsCount": "The maximum number of query suggestions you want to show to your users.
", "GetSnapshotsRequest$MaxResults": "The maximum number of returned data for the metric.
", @@ -2018,13 +2097,23 @@ "LanguageCode": { "base": "The code for a language. The default language is English. For more information on supported languages, including their codes, see Adding documents in languages other than English.
", "refs": { - "CreateDataSourceRequest$LanguageCode": "The code for a language. This allows you to support a language for all documents when creating the data source. English is supported by default. For more information on supported languages, including their codes, see Adding documents in languages other than English.
", + "CreateDataSourceRequest$LanguageCode": "The code for a language. This allows you to support a language for all documents when creating the data source connector. English is supported by default. For more information on supported languages, including their codes, see Adding documents in languages other than English.
", "CreateFaqRequest$LanguageCode": "The code for a language. This allows you to support a language for the FAQ document. English is supported by default. For more information on supported languages, including their codes, see Adding documents in languages other than English.
", "DataSourceSummary$LanguageCode": "The code for a language. This shows a supported language for all documents in the data source. English is supported by default. For more information on supported languages, including their codes, see Adding documents in languages other than English.
", "DescribeDataSourceResponse$LanguageCode": "The code for a language. This shows a supported language for all documents in the data source. English is supported by default. For more information on supported languages, including their codes, see Adding documents in languages other than English.
", "DescribeFaqResponse$LanguageCode": "The code for a language. This shows a supported language for the FAQ document. English is supported by default. For more information on supported languages, including their codes, see Adding documents in languages other than English.
", "FaqSummary$LanguageCode": "The code for a language. This shows a supported language for the FAQ document as part of the summary information for FAQs. English is supported by default. For more information on supported languages, including their codes, see Adding documents in languages other than English.
", - "UpdateDataSourceRequest$LanguageCode": "The code for a language. This allows you to support a language for all documents when updating the data source. English is supported by default. For more information on supported languages, including their codes, see Adding documents in languages other than English.
" + "UpdateDataSourceRequest$LanguageCode": "The code for a language you want to update for the data source connector. This allows you to support a language for all documents when updating the data source. English is supported by default. For more information on supported languages, including their codes, see Adding documents in languages other than English.
" + } + }, + "ListAccessControlConfigurationsRequest": { + "base": null, + "refs": { + } + }, + "ListAccessControlConfigurationsResponse": { + "base": null, + "refs": { } }, "ListDataSourceSyncJobsRequest": { @@ -2146,7 +2235,7 @@ "Long": { "base": null, "refs": { - "DescribeQuerySuggestionsBlockListResponse$FileSizeBytes": "Shows the current size of the block list text file in S3.
", + "DescribeQuerySuggestionsBlockListResponse$FileSizeBytes": "The current size of the block list text file in S3.
", "DescribeThesaurusResponse$FileSizeBytes": "The size of the thesaurus file in bytes.
", "DescribeThesaurusResponse$TermCount": "The number of unique terms in the thesaurus file. For example, the synonyms a,b,c
and a=>d
, the term count would be 4.
The number of synonym rules in the thesaurus file.
", @@ -2171,6 +2260,12 @@ "WebCrawlerConfiguration$MaxLinksPerPage": "The maximum number of URLs on a webpage to include when crawling a website. This number is per webpage.
As a website’s webpages are crawled, any URLs the webpages link to are also crawled. URLs on a webpage are crawled in order of appearance.
The default maximum links per page is 100.
" } }, + "MaxResultsIntegerForListAccessControlConfigurationsRequest": { + "base": null, + "refs": { + "ListAccessControlConfigurationsRequest$MaxResults": "The maximum number of access control configurations to return.
" + } + }, "MaxResultsIntegerForListDataSourceSyncJobsRequest": { "base": null, "refs": { @@ -2280,21 +2375,21 @@ "MinimumNumberOfQueryingUsers": { "base": null, "refs": { - "DescribeQuerySuggestionsConfigResponse$MinimumNumberOfQueryingUsers": "Shows the minimum number of unique users who must search a query in order for the query to be eligible to suggest to your users.
", + "DescribeQuerySuggestionsConfigResponse$MinimumNumberOfQueryingUsers": "The minimum number of unique users who must search a query in order for the query to be eligible to suggest to your users.
", "UpdateQuerySuggestionsConfigRequest$MinimumNumberOfQueryingUsers": "The minimum number of unique users who must search a query in order for the query to be eligible to suggest to your users.
Increasing this number might decrease the number of suggestions. However, this ensures a query is searched by many users and is truly popular to suggest to users.
How you tune this setting depends on your specific needs.
" } }, "MinimumQueryCount": { "base": null, "refs": { - "DescribeQuerySuggestionsConfigResponse$MinimumQueryCount": "Shows the minimum number of times a query must be searched in order for the query to be eligible to suggest to your users.
", + "DescribeQuerySuggestionsConfigResponse$MinimumQueryCount": "The minimum number of times a query must be searched in order for the query to be eligible to suggest to your users.
", "UpdateQuerySuggestionsConfigRequest$MinimumQueryCount": "The the minimum number of times a query must be searched in order to be eligible to suggest to your users.
Decreasing this number increases the number of suggestions. However, this affects the quality of suggestions as it sets a low bar for a query to be considered popular to suggest to users.
How you tune this setting depends on your specific needs.
" } }, "Mode": { "base": null, "refs": { - "DescribeQuerySuggestionsConfigResponse$Mode": "Shows whether query suggestions are currently in ENABLED
mode or LEARN_ONLY
mode.
By default, Amazon Kendra enables query suggestions.LEARN_ONLY
turns off query suggestions for your users. You can change the mode using the UpdateQuerySuggestionsConfig API.
Whether query suggestions are currently in ENABLED
mode or LEARN_ONLY
mode.
By default, Amazon Kendra enables query suggestions.LEARN_ONLY
turns off query suggestions for your users. You can change the mode using the UpdateQuerySuggestionsConfig API.
Set the mode to ENABLED
or LEARN_ONLY
.
By default, Amazon Kendra enables query suggestions. LEARN_ONLY
mode allows you to turn off query suggestions. You can to update this at any time.
In LEARN_ONLY
mode, Amazon Kendra continues to learn from new queries to keep suggestions up to date for when you are ready to switch to ENABLED mode again.
Shows whether Amazon Kendra uses all queries or only uses queries that include user information to generate query suggestions.
", + "DescribeQuerySuggestionsConfigResponse$IncludeQueriesWithoutUserInformation": " TRUE
to use all queries, otherwise use only queries that include user information to generate the query suggestions.
TRUE
to include queries without user information (i.e. all queries, irrespective of the user), otherwise FALSE
to only include queries with user information.
If you pass user information to Amazon Kendra along with the queries, you can set this flag to FALSE
and instruct Amazon Kendra to only consider queries with user information.
If you set to FALSE
, Amazon Kendra only considers queries searched at least MinimumQueryCount
times across MinimumNumberOfQueryingUsers
unique users for suggestions.
If you set to TRUE
, Amazon Kendra ignores all user information and learns from all queries.
Provides user and group information for document access filtering.
", + "base": "Provides user and group information for user context filtering.
", "refs": { "PrincipalList$member": null } @@ -2426,8 +2521,11 @@ "PrincipalList": { "base": null, "refs": { - "Document$AccessControlList": "Information on user and group access rights, which is used for user context filtering.
", - "HierarchicalPrincipal$PrincipalList": "A list of principal lists that define the hierarchy for which documents users should have access to. Each hierarchical list specifies which user or group has allow or deny access for each document.
" + "CreateAccessControlConfigurationRequest$AccessControlList": "Information on principals (users and/or groups) and which documents they should have access to. This is useful for user context filtering, where search results are filtered based on the user or their group access to documents.
", + "DescribeAccessControlConfigurationResponse$AccessControlList": "Information on principals (users and/or groups) and which documents they should have access to. This is useful for user context filtering, where search results are filtered based on the user or their group access to documents.
", + "Document$AccessControlList": "Information on principals (users and/or groups) and which documents they should have access to. This is useful for user context filtering, where search results are filtered based on the user or their group access to documents.
", + "HierarchicalPrincipal$PrincipalList": "A list of principal lists that define the hierarchy for which documents users should have access to. Each hierarchical list specifies which user or group has allow or deny access for each document.
", + "UpdateAccessControlConfigurationRequest$AccessControlList": "Information you want to update on principals (users and/or groups) and which documents they should have access to. This is useful for user context filtering, where search results are filtered based on the user or their group access to documents.
" } }, "PrincipalMappingStatus": { @@ -2450,7 +2548,7 @@ "refs": { "DeletePrincipalMappingRequest$OrderingId": "The timestamp identifier you specify to ensure Amazon Kendra does not override the latest DELETE
action with previous actions. The highest number ID, which is the ordering ID, is the latest action you want to process and apply on top of other actions with lower number IDs. This prevents previous actions with lower number IDs from possibly overriding the latest action.
The ordering ID can be the UNIX time of the last update you made to a group members list. You would then provide this list when calling PutPrincipalMapping
. This ensures your DELETE
action for that updated group with the latest members list doesn't get overwritten by earlier DELETE
actions for the same group which are yet to be processed.
The default ordering ID is the current UNIX time in milliseconds that the action was received by Amazon Kendra.
", "GroupOrderingIdSummary$OrderingId": "The order in which actions should complete processing. An action can be a PUT
or DELETE
action for mapping users to their groups.
The timestamp identifier used for the latest PUT
or DELETE
action.
The timestamp identifier used for the latest PUT
or DELETE
action.
The timestamp identifier used for the latest PUT
or DELETE
action for mapping users to their groups.
The timestamp identifier you specify to ensure Amazon Kendra does not override the latest PUT
action with previous actions. The highest number ID, which is the ordering ID, is the latest action you want to process and apply on top of other actions with lower number IDs. This prevents previous actions with lower number IDs from possibly overriding the latest action.
The ordering ID can be the UNIX time of the last update you made to a group members list. You would then provide this list when calling PutPrincipalMapping
. This ensures your PUT
action for that updated group with the latest members list doesn't get overwritten by earlier PUT
actions for the same group which are yet to be processed.
The default ordering ID is the current UNIX time in milliseconds that the action was received by Amazon Kendra.
" } @@ -2542,26 +2640,26 @@ "base": null, "refs": { "CreateQuerySuggestionsBlockListResponse$Id": "The unique identifier of the created block list.
", - "DeleteQuerySuggestionsBlockListRequest$Id": "The unique identifier of the block list that needs to be deleted.
", - "DescribeQuerySuggestionsBlockListRequest$Id": "The unique identifier of the block list.
", - "DescribeQuerySuggestionsBlockListResponse$Id": "Shows the unique identifier of the block list.
", + "DeleteQuerySuggestionsBlockListRequest$Id": "The identifier of the block list you want to delete.
", + "DescribeQuerySuggestionsBlockListRequest$Id": "The identifier of the block list you want to get information on.
", + "DescribeQuerySuggestionsBlockListResponse$Id": "The identifier of the block list.
", "QuerySuggestionsBlockListSummary$Id": "The identifier of a block list.
", - "UpdateQuerySuggestionsBlockListRequest$Id": "The unique identifier of a block list.
" + "UpdateQuerySuggestionsBlockListRequest$Id": "The identifier of the block list you want to update.
" } }, "QuerySuggestionsBlockListName": { "base": null, "refs": { "CreateQuerySuggestionsBlockListRequest$Name": "A user friendly name for the block list.
For example, the block list named 'offensive-words' includes all offensive words that could appear in user queries and need to be blocked from suggestions.
", - "DescribeQuerySuggestionsBlockListResponse$Name": "Shows the name of the block list.
", + "DescribeQuerySuggestionsBlockListResponse$Name": "The name of the block list.
", "QuerySuggestionsBlockListSummary$Name": "The name of the block list.
", - "UpdateQuerySuggestionsBlockListRequest$Name": "The name of a block list.
" + "UpdateQuerySuggestionsBlockListRequest$Name": "A new name for the block list.
" } }, "QuerySuggestionsBlockListStatus": { "base": null, "refs": { - "DescribeQuerySuggestionsBlockListResponse$Status": "Shows whether the current status of the block list is ACTIVE
or INACTIVE
.
The current status of the block list. When the value is ACTIVE
, the block list is ready for use.
The status of the block list.
" } }, @@ -2586,7 +2684,7 @@ "QuerySuggestionsStatus": { "base": null, "refs": { - "DescribeQuerySuggestionsConfigResponse$Status": "Shows whether the status of query suggestions settings is currently Active or Updating.
Active means the current settings apply and Updating means your changed settings are in the process of applying.
" + "DescribeQuerySuggestionsConfigResponse$Status": "Whether the status of query suggestions settings is currently ACTIVE
or UPDATING
.
Active means the current settings apply and Updating means your changed settings are in the process of applying.
" } }, "QueryText": { @@ -2604,14 +2702,14 @@ "ReadAccessType": { "base": null, "refs": { - "Principal$Access": "Whether to allow or deny access to the principal.
" + "Principal$Access": "Whether to allow or deny document access to the principal.
" } }, "Relevance": { - "base": "Provides information for manually tuning the relevance of a field in a search. When a query includes terms that match the field, the results are given a boost in the response based on these tuning parameters.
", + "base": "Provides information for tuning the relevance of a field in a search. When a query includes terms that match the field, the results are given a boost in the response based on these tuning parameters.
", "refs": { - "DocumentMetadataConfiguration$Relevance": "Provides manual tuning parameters to determine how the field affects the search results.
", - "DocumentRelevanceConfiguration$Relevance": null + "DocumentMetadataConfiguration$Relevance": "Provides tuning parameters to determine how the field affects the search results.
", + "DocumentRelevanceConfiguration$Relevance": "Provides information for tuning the relevance of a field in a search. When a query includes terms that match the field, the results are given a boost in the response based on these tuning parameters.
" } }, "RelevanceFeedback": { @@ -2677,7 +2775,7 @@ "base": null, "refs": { "BatchPutDocumentRequest$RoleArn": "The Amazon Resource Name (ARN) of a role that is allowed to run the BatchPutDocument
API. For more information, see IAM Roles for Amazon Kendra.
The Amazon Resource Name (ARN) of a role with permission to access the data source. For more information, see IAM Roles for Amazon Kendra.
You can't specify the RoleArn
parameter when the Type
parameter is set to CUSTOM
. If you do, you receive a ValidationException
exception.
The RoleArn
parameter is required for all other data sources.
The Amazon Resource Name (ARN) of a role with permission to access the data source connector. For more information, see IAM Roles for Amazon Kendra.
You can't specify the RoleArn
parameter when the Type
parameter is set to CUSTOM
. If you do, you receive a ValidationException
exception.
The RoleArn
parameter is required for all other data sources.
The Amazon Resource Name (ARN) of a role with permission to access Query
API, QuerySuggestions
API, SubmitFeedback
API, and Amazon Web Services SSO that stores your user and group information. For more information, see IAM roles for Amazon Kendra.
The Amazon Resource Name (ARN) of a role with permission to access the S3 bucket that contains the FAQs. For more information, see IAM Roles for Amazon Kendra.
", "CreateIndexRequest$RoleArn": "An Identity and Access Management (IAM) role that gives Amazon Kendra permissions to access your Amazon CloudWatch logs and metrics. This is also the role you use when you call the BatchPutDocument
API to index documents from an Amazon S3 bucket.
Shows the Amazon Resource Name (ARN) of a role with permission to access Query
API, QuerySuggestions
API, SubmitFeedback
API, and Amazon Web Services SSO that stores your user and group information.
The Amazon Resource Name (ARN) of the role that provides access to the S3 bucket containing the input files for the FAQ.
", "DescribeIndexResponse$RoleArn": "The Amazon Resource Name (ARN) of the IAM role that gives Amazon Kendra permission to write to your Amazon Cloudwatch logs.
", - "DescribeQuerySuggestionsBlockListResponse$RoleArn": "Shows the current IAM (Identity and Access Management) role used by Amazon Kendra to access the block list text file in S3.
The role needs S3 read permissions to your file in S3 and needs to give STS (Security Token Service) assume role permissions to Amazon Kendra.
", + "DescribeQuerySuggestionsBlockListResponse$RoleArn": "The IAM (Identity and Access Management) role used by Amazon Kendra to access the block list text file in S3.
The role needs S3 read permissions to your file in S3 and needs to give STS (Security Token Service) assume role permissions to Amazon Kendra.
", "DescribeThesaurusResponse$RoleArn": "An IAM role that gives Amazon Kendra permissions to access thesaurus file specified in SourceS3Path
.
The Amazon Resource Name (arn) of the secret.
", "PutPrincipalMappingRequest$RoleArn": "The Amazon Resource Name (ARN) of a role that has access to the S3 file that contains your list of users or sub groups that belong to a group.
For more information, see IAM roles for Amazon Kendra.
", - "UpdateDataSourceRequest$RoleArn": "The Amazon Resource Name (ARN) of the new role to use when the data source is accessing resources on your behalf.
", + "UpdateDataSourceRequest$RoleArn": "The Amazon Resource Name (ARN) of a role with permission to access the data source. For more information, see IAM Roles for Amazon Kendra.
", "UpdateExperienceRequest$RoleArn": "The Amazon Resource Name (ARN) of a role with permission to access Query
API, QuerySuggestions
API, SubmitFeedback
API, and Amazon Web Services SSO that stores your user and group information. For more information, see IAM roles for Amazon Kendra.
A new IAM role that gives Amazon Kendra permission to access your Amazon CloudWatch logs.
", + "UpdateIndexRequest$RoleArn": "An Identity and Access Management (IAM) role that gives Amazon Kendra permission to access Amazon CloudWatch logs and metrics.
", "UpdateQuerySuggestionsBlockListRequest$RoleArn": "The IAM (Identity and Access Management) role used to access the block list text file in S3.
", - "UpdateThesaurusRequest$RoleArn": "The updated role ARN of the thesaurus.
" + "UpdateThesaurusRequest$RoleArn": "An IAM role that gives Amazon Kendra permissions to access thesaurus file specified in SourceS3Path
.
Information required to find a specific file in an Amazon S3 bucket.
", "refs": { "AlfrescoConfiguration$SslCertificateS3Path": "The path to the SSL certificate stored in an Amazon S3 bucket. You use this to connect to Alfresco.
", - "CreateFaqRequest$S3Path": "The S3 location of the FAQ input data.
", + "CreateFaqRequest$S3Path": "The path to the FAQ file in S3.
", "CreateQuerySuggestionsBlockListRequest$SourceS3Path": "The S3 path to your block list text file in your S3 bucket.
Each block word or phrase should be on a separate line in a text file.
For information on the current quota limits for block lists, see Quotas for Amazon Kendra.
", - "CreateThesaurusRequest$SourceS3Path": "The thesaurus file Amazon S3 source path.
", + "CreateThesaurusRequest$SourceS3Path": "The path to the thesaurus file in S3.
", "DescribeFaqResponse$S3Path": null, "DescribeQuerySuggestionsBlockListResponse$SourceS3Path": "Shows the current S3 path to your block list text file in your S3 bucket.
Each block word or phrase should be on a separate line in a text file.
For information on the current quota limits for block lists, see Quotas for Amazon Kendra.
", "DescribeThesaurusResponse$SourceS3Path": null, @@ -2839,9 +2937,9 @@ "ScanSchedule": { "base": null, "refs": { - "CreateDataSourceRequest$Schedule": "Sets the frequency for Amazon Kendra to check the documents in your repository and update the index. If you don't set a schedule Amazon Kendra will not periodically update the index. You can call the StartDataSourceSyncJob
API to update the index.
You can't specify the Schedule
parameter when the Type
parameter is set to CUSTOM
. If you do, you receive a ValidationException
exception.
Sets the frequency for Amazon Kendra to check the documents in your data source repository and update the index. If you don't set a schedule Amazon Kendra will not periodically update the index. You can call the StartDataSourceSyncJob
API to update the index.
You can't specify the Schedule
parameter when the Type
parameter is set to CUSTOM
. If you do, you receive a ValidationException
exception.
The schedule for Amazon Kendra to update the index.
", - "UpdateDataSourceRequest$Schedule": "The new update schedule for the data source.
" + "UpdateDataSourceRequest$Schedule": "The sync schedule you want to update for the data source connector.
" } }, "ScoreAttributes": { @@ -2868,7 +2966,7 @@ "AlfrescoConfiguration$SecretArn": "The Amazon Resource Name (ARN) of an Secrets Manager secret that contains the key-value pairs required to connect to your Alfresco data source. The secret must contain a JSON structure with the following keys:
username—The user name of the Alfresco account.
password—The password of the Alfresco account.
Your secret ARN, which you can create in Secrets Manager
You use a secret if basic authentication credentials are required to connect to a website. The secret stores your credentials of user name and password.
", "BoxConfiguration$SecretArn": "The Amazon Resource Name (ARN) of an Secrets Manager secret that contains the key-value pairs required to connect to your Box platform. The secret must contain a JSON structure with the following keys:
clientID—The identifier of the client OAuth 2.0 authentication application created in Box.
clientSecret—A set of characters known only to the OAuth 2.0 authentication application created in Box.
publicKeyId—The identifier of the public key contained within an identity certificate.
privateKey—A set of characters that make up an encryption key.
passphrase—A set of characters that act like a password.
You create an application in Box to generate the keys or credentials required for the secret. For more information, see Authentication for a Box data source.
", - "ConfluenceConfiguration$SecretArn": "The Amazon Resource Name (ARN) of an Secrets Manager secret that contains the user name and password required to connect to the Confluence instance. If you use Confluence cloud, you use a generated API token as the password. For more information, see Using a Confluemce data source.
", + "ConfluenceConfiguration$SecretArn": "The Amazon Resource Name (ARN) of an Secrets Manager secret that contains the user name and password required to connect to the Confluence instance. If you use Confluence cloud, you use a generated API token as the password. For more information, see Using a Confluence data source.
", "ConnectionConfiguration$SecretArn": "The Amazon Resource Name (ARN) of credentials stored in Secrets Manager. The credentials should be a user/password pair. For more information, see Using a Database Data Source. For more information about Secrets Manager, see What Is Secrets Manager in the Secrets Manager user guide.
", "FsxConfiguration$SecretArn": "The Amazon Resource Name (ARN) of an Secrets Manager secret that contains the key-value pairs required to connect to your Amazon FSx file system. Windows is currently the only supported type. The secret must contain a JSON structure with the following keys:
username—The Active Directory user name, along with the Domain Name System (DNS) domain name. For example, user@corp.example.com. The Active Directory user account must have read and mounting access to the Amazon FSx file system for Windows.
password—The password of the Active Directory user account with read and mounting access to the Amazon FSx Windows file system.
The Amazon Resource Name (ARN) of an Secrets Manager secret that contains the key-value pairs required to connect to your GitHub. The secret must contain a JSON structure with the following keys:
githubToken—The access token created in GitHub. For more information on creating a token in GitHub, see Authentication for a GitHub data source.
The type of authentication used to connect to the ServiceNow instance. If you choose HTTP_BASIC
, Amazon Kendra is authenticated using the user name and password provided in the Secrets Manager secret in the SecretArn
field. When you choose OAUTH2
, Amazon Kendra is authenticated using the OAuth token and secret provided in the Secrets Manager secret, and the user name and password are used to determine which information Amazon Kendra has access to.
When you use OAUTH2
authentication, you must generate a token and a client secret using the ServiceNow console. For more information, see Using a ServiceNow data source.
The type of authentication used to connect to the ServiceNow instance. If you choose HTTP_BASIC
, Amazon Kendra is authenticated using the user name and password provided in the Secrets Manager secret in the SecretArn
field. If you choose OAUTH2
, Amazon Kendra is authenticated using the credentials of client ID, client secret, user name and password.
When you use OAUTH2
authentication, you must generate a token and a client secret using the ServiceNow console. For more information, see Using a ServiceNow data source.
The user name attribute field.
", "JsonTokenTypeConfiguration$GroupAttributeField": "The group attribute field.
", + "ListAccessControlConfigurationsRequest$NextToken": "If the previous response was incomplete (because there is more data to retrieve), Amazon Kendra returns a pagination token in the response. You can use this pagination token to retrieve the next set of access control configurations.
", + "ListAccessControlConfigurationsResponse$NextToken": "If the response is truncated, Amazon Kendra returns this token that you can use in the subsequent request to retrieve the next set of access control configurations.
", "PrivateChannelFilter$member": null, "Project$member": null, "PublicChannelFilter$member": null, @@ -3250,7 +3350,7 @@ "TagList": { "base": null, "refs": { - "CreateDataSourceRequest$Tags": "A list of key-value pairs that identify the data source. You can use the tags to identify and organize your resources and to control access to resources.
", + "CreateDataSourceRequest$Tags": "A list of key-value pairs that identify the data source connector. You can use the tags to identify and organize your resources and to control access to resources.
", "CreateFaqRequest$Tags": "A list of key-value pairs that identify the FAQ. You can use the tags to identify and organize your resources and to control access to resources.
", "CreateIndexRequest$Tags": "A list of key-value pairs that identify the index. You can use the tags to identify and organize your resources and to control access to resources.
", "CreateQuerySuggestionsBlockListRequest$Tags": "A tag that you can assign to a block list that categorizes the block list.
", @@ -3305,20 +3405,20 @@ "base": null, "refs": { "CreateThesaurusResponse$Id": "The unique identifier of the thesaurus.
", - "DeleteThesaurusRequest$Id": "The identifier of the thesaurus to delete.
", - "DescribeThesaurusRequest$Id": "The identifier of the thesaurus to describe.
", + "DeleteThesaurusRequest$Id": "The identifier of the thesaurus you want to delete.
", + "DescribeThesaurusRequest$Id": "The identifier of the thesaurus you want to get information on.
", "DescribeThesaurusResponse$Id": "The identifier of the thesaurus.
", "ThesaurusSummary$Id": "The identifier of the thesaurus.
", - "UpdateThesaurusRequest$Id": "The identifier of the thesaurus to update.
" + "UpdateThesaurusRequest$Id": "The identifier of the thesaurus you want to update.
" } }, "ThesaurusName": { "base": null, "refs": { - "CreateThesaurusRequest$Name": "The name for the new thesaurus.
", + "CreateThesaurusRequest$Name": "A name for the thesaurus.
", "DescribeThesaurusResponse$Name": "The thesaurus name.
", "ThesaurusSummary$Name": "The name of the thesaurus.
", - "UpdateThesaurusRequest$Name": "The updated name of the thesaurus.
" + "UpdateThesaurusRequest$Name": "A new name for the thesaurus.
" } }, "ThesaurusStatus": { @@ -3368,10 +3468,10 @@ "DescribeFaqResponse$UpdatedAt": "The date and time that the FAQ was last updated.
", "DescribeIndexResponse$CreatedAt": "The Unix datetime that the index was created.
", "DescribeIndexResponse$UpdatedAt": "The Unix datetime that the index was last updated.
", - "DescribeQuerySuggestionsBlockListResponse$CreatedAt": "Shows the date-time a block list for query suggestions was created.
", - "DescribeQuerySuggestionsBlockListResponse$UpdatedAt": "Shows the date-time a block list for query suggestions was last updated.
", - "DescribeQuerySuggestionsConfigResponse$LastSuggestionsBuildTime": "Shows the date-time query suggestions for an index was last updated.
", - "DescribeQuerySuggestionsConfigResponse$LastClearTime": "Shows the date-time query suggestions for an index was last cleared.
After you clear suggestions, Amazon Kendra learns new suggestions based on new queries added to the query log from the time you cleared suggestions. Amazon Kendra only considers re-occurences of a query from the time you cleared suggestions.
", + "DescribeQuerySuggestionsBlockListResponse$CreatedAt": "The date-time a block list for query suggestions was created.
", + "DescribeQuerySuggestionsBlockListResponse$UpdatedAt": "The date-time a block list for query suggestions was last updated.
", + "DescribeQuerySuggestionsConfigResponse$LastSuggestionsBuildTime": "The date-time query suggestions for an index was last updated.
", + "DescribeQuerySuggestionsConfigResponse$LastClearTime": "The date-time query suggestions for an index was last cleared.
After you clear suggestions, Amazon Kendra learns new suggestions based on new queries added to the query log from the time you cleared suggestions. Amazon Kendra only considers re-occurences of a query from the time you cleared suggestions.
", "DescribeThesaurusResponse$CreatedAt": "The Unix datetime that the thesaurus was created.
", "DescribeThesaurusResponse$UpdatedAt": "The Unix datetime that the thesaurus was last updated.
", "DocumentAttributeValue$DateValue": "A date expressed as an ISO 8601 string.
It is important for the time zone to be included in the ISO 8601 date-time format. For example, 2012-03-25T12:30:10+01:00 is the ISO 8601 date-time format for March 25th 2012 at 12:30PM (plus 10 seconds) in Central European Time.
", @@ -3426,6 +3526,16 @@ "refs": { } }, + "UpdateAccessControlConfigurationRequest": { + "base": null, + "refs": { + } + }, + "UpdateAccessControlConfigurationResponse": { + "base": null, + "refs": { + } + }, "UpdateDataSourceRequest": { "base": null, "refs": { @@ -3481,7 +3591,7 @@ } }, "UserContext": { - "base": "Provides information about the user context for an Amazon Kendra index.
This is used for filtering search results for different users based on their access to documents.
You provide one of the following:
User token
User ID, the groups the user belongs to, and any data sources the groups can access.
If you provide both, an exception is thrown.
", + "base": "Provides information about the user context for an Amazon Kendra index.
User context filtering is a kind of personalized search with the benefit of controlling access to documents. For example, not all teams that search the company portal for information should access top-secret company documents, nor are these documents relevant to all users. Only specific users or groups of teams given access to top-secret documents should see these documents in their search results.
You provide one of the following:
User token
User ID, the groups the user belongs to, and any data sources the groups can access.
If you provide both, an exception is thrown.
", "refs": { "QueryRequest$UserContext": "The user context token or user and group information.
" } @@ -3495,10 +3605,10 @@ } }, "UserGroupResolutionConfiguration": { - "base": "Provides the configuration information to fetch access levels of groups and users from an Amazon Web Services Single Sign On identity source. This is useful for setting up user context filtering, where Amazon Kendra filters search results for different users based on their group's access to documents. You can also map your users to their groups for user context filtering using the PutPrincipalMapping API.
To set up an Amazon Web Services SSO identity source in the console to use with Amazon Kendra, see Getting started with an Amazon Web Services SSO identity source. You must also grant the required permissions to use Amazon Web Services SSO with Amazon Kendra. For more information, see IAM roles for Amazon Web Services SSO.
Amazon Kendra currently does not support using UserGroupResolutionConfiguration
with an Amazon Web Services organization member account for your Amazon Web Services SSO identify source. You must create your index in the management account for the organization in order to use UserGroupResolutionConfiguration
.
Provides the configuration information to fetch access levels of groups and users from an Amazon Web Services Single Sign On identity source. This is useful for user context filtering, where search results are filtered based on the user or their group access to documents. You can also use the PutPrincipalMapping API to map users to their groups so that you only need to provide the user ID when you issue the query.
To set up an Amazon Web Services SSO identity source in the console to use with Amazon Kendra, see Getting started with an Amazon Web Services SSO identity source. You must also grant the required permissions to use Amazon Web Services SSO with Amazon Kendra. For more information, see IAM roles for Amazon Web Services SSO.
Amazon Kendra currently does not support using UserGroupResolutionConfiguration
with an Amazon Web Services organization member account for your Amazon Web Services SSO identify source. You must create your index in the management account for the organization in order to use UserGroupResolutionConfiguration
.
Enables fetching access levels of groups and users from an Amazon Web Services Single Sign On identity source. To configure this, see UserGroupResolutionConfiguration.
", - "DescribeIndexResponse$UserGroupResolutionConfiguration": "Shows whether you have enabled the configuration for fetching access levels of groups and users from an Amazon Web Services Single Sign On identity source.
", + "DescribeIndexResponse$UserGroupResolutionConfiguration": "Whether you have enabled the configuration for fetching access levels of groups and users from an Amazon Web Services Single Sign On identity source.
", "UpdateIndexRequest$UserGroupResolutionConfiguration": "Enables fetching access levels of groups and users from an Amazon Web Services Single Sign On identity source. To configure this, see UserGroupResolutionConfiguration.
" } }, diff --git a/models/apis/kendra/2019-02-03/paginators-1.json b/models/apis/kendra/2019-02-03/paginators-1.json index c5e5cfcf10d..c9f25ff9d5d 100644 --- a/models/apis/kendra/2019-02-03/paginators-1.json +++ b/models/apis/kendra/2019-02-03/paginators-1.json @@ -5,6 +5,11 @@ "output_token": "NextToken", "limit_key": "MaxResults" }, + "ListAccessControlConfigurations": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults" + }, "ListDataSourceSyncJobs": { "input_token": "NextToken", "output_token": "NextToken", diff --git a/models/apis/nimble/2020-08-01/api-2.json b/models/apis/nimble/2020-08-01/api-2.json index 012333718d7..6795ffcd13f 100644 --- a/models/apis/nimble/2020-08-01/api-2.json +++ b/models/apis/nimble/2020-08-01/api-2.json @@ -1009,7 +1009,7 @@ "type":"structure", "members":{ "activeDirectoryUser":{"shape":"String"}, - "endpoint":{"shape":"SyntheticComputeFarmConfigurationString"} + "endpoint":{"shape":"SensitiveString"} } }, "ConflictException":{ @@ -1076,9 +1076,9 @@ "location":"header", "locationName":"X-Amz-Client-Token" }, - "description":{"shape":"SyntheticCreateStreamingImageRequestStreamingImageDescription"}, + "description":{"shape":"StreamingImageDescription"}, "ec2ImageId":{"shape":"EC2ImageId"}, - "name":{"shape":"SyntheticCreateStreamingImageRequestStreamingImageName"}, + "name":{"shape":"StreamingImageName"}, "studioId":{ "shape":"String", "location":"uri", @@ -1172,7 +1172,9 @@ "ec2SecurityGroupIds":{"shape":"StudioComponentSecurityGroupIdList"}, "initializationScripts":{"shape":"StudioComponentInitializationScriptList"}, "name":{"shape":"StudioComponentName"}, + "runtimeRoleArn":{"shape":"RoleArn"}, "scriptParameters":{"shape":"StudioComponentScriptParameterKeyValueList"}, + "secureInitializationRoleArn":{"shape":"RoleArn"}, "studioId":{ "shape":"String", "location":"uri", @@ -1198,18 +1200,18 @@ "userRoleArn" ], "members":{ - "adminRoleArn":{"shape":"String"}, + "adminRoleArn":{"shape":"RoleArn"}, "clientToken":{ "shape":"ClientToken", "idempotencyToken":true, "location":"header", "locationName":"X-Amz-Client-Token" }, - "displayName":{"shape":"SyntheticCreateStudioRequestStudioDisplayName"}, + "displayName":{"shape":"StudioDisplayName"}, "studioEncryptionConfiguration":{"shape":"StudioEncryptionConfiguration"}, "studioName":{"shape":"StudioName"}, "tags":{"shape":"Tags"}, - "userRoleArn":{"shape":"String"} + "userRoleArn":{"shape":"RoleArn"} } }, "CreateStudioResponse":{ @@ -1865,7 +1867,9 @@ "LaunchProfileInitializationScript":{ "type":"structure", "members":{ + "runtimeRoleArn":{"shape":"RoleArn"}, "script":{"shape":"StudioComponentInitializationScriptContent"}, + "secureInitializationRoleArn":{"shape":"RoleArn"}, "studioComponentId":{"shape":"StudioComponentId"}, "studioComponentName":{"shape":"StudioComponentName"} } @@ -2009,7 +2013,7 @@ "LicenseServiceConfiguration":{ "type":"structure", "members":{ - "endpoint":{"shape":"SyntheticLicenseServiceConfigurationString"} + "endpoint":{"shape":"SensitiveString"} } }, "LinuxMountPoint":{ @@ -2430,6 +2434,11 @@ }, "exception":true }, + "RoleArn":{ + "type":"string", + "max":2048, + "min":0 + }, "ScriptParameterKey":{ "type":"string", "max":64, @@ -2449,6 +2458,10 @@ "min":1 }, "SecurityGroupId":{"type":"string"}, + "SensitiveString":{ + "type":"string", + "sensitive":true + }, "ServiceQuotaExceededException":{ "type":"structure", "members":{ @@ -2465,10 +2478,10 @@ "SharedFileSystemConfiguration":{ "type":"structure", "members":{ - "endpoint":{"shape":"SyntheticSharedFileSystemConfigurationString"}, + "endpoint":{"shape":"SensitiveString"}, "fileSystemId":{"shape":"String"}, "linuxMountPoint":{"shape":"LinuxMountPoint"}, - "shareName":{"shape":"SyntheticSharedFileSystemConfigurationString"}, + "shareName":{"shape":"SensitiveString"}, "windowsMountDrive":{"shape":"WindowsMountDrive"} } }, @@ -2619,11 +2632,11 @@ "type":"structure", "members":{ "arn":{"shape":"String"}, - "description":{"shape":"SyntheticStreamingImageStreamingImageDescription"}, + "description":{"shape":"StreamingImageDescription"}, "ec2ImageId":{"shape":"EC2ImageId"}, "encryptionConfiguration":{"shape":"StreamingImageEncryptionConfiguration"}, "eulaIds":{"shape":"EulaIdList"}, - "name":{"shape":"SyntheticStreamingImageStreamingImageName"}, + "name":{"shape":"StreamingImageName"}, "owner":{"shape":"StreamingImageOwner"}, "platform":{"shape":"StreamingImagePlatform"}, "state":{"shape":"StreamingImageState"}, @@ -2633,6 +2646,12 @@ "tags":{"shape":"Tags"} } }, + "StreamingImageDescription":{ + "type":"string", + "max":256, + "min":0, + "sensitive":true + }, "StreamingImageEncryptionConfiguration":{ "type":"structure", "required":["keyType"], @@ -2666,6 +2685,12 @@ "type":"list", "member":{"shape":"StreamingImage"} }, + "StreamingImageName":{ + "type":"string", + "max":64, + "min":0, + "sensitive":true + }, "StreamingImageOwner":{"type":"string"}, "StreamingImagePlatform":{ "type":"string", @@ -2820,7 +2845,7 @@ "state":{"shape":"StreamingSessionStreamState"}, "statusCode":{"shape":"StreamingSessionStreamStatusCode"}, "streamId":{"shape":"String"}, - "url":{"shape":"SyntheticStreamingSessionStreamString"} + "url":{"shape":"SensitiveString"} } }, "StreamingSessionStreamExpirationInSeconds":{ @@ -2858,10 +2883,10 @@ "Studio":{ "type":"structure", "members":{ - "adminRoleArn":{"shape":"String"}, + "adminRoleArn":{"shape":"RoleArn"}, "arn":{"shape":"String"}, "createdAt":{"shape":"Timestamp"}, - "displayName":{"shape":"SyntheticStudioStudioDisplayName"}, + "displayName":{"shape":"StudioDisplayName"}, "homeRegion":{"shape":"Region"}, "ssoClientId":{"shape":"String"}, "state":{"shape":"StudioState"}, @@ -2873,7 +2898,7 @@ "studioUrl":{"shape":"String"}, "tags":{"shape":"Tags"}, "updatedAt":{"shape":"Timestamp"}, - "userRoleArn":{"shape":"String"} + "userRoleArn":{"shape":"RoleArn"} } }, "StudioComponent":{ @@ -2887,7 +2912,9 @@ "ec2SecurityGroupIds":{"shape":"StudioComponentSecurityGroupIdList"}, "initializationScripts":{"shape":"StudioComponentInitializationScriptList"}, "name":{"shape":"StudioComponentName"}, + "runtimeRoleArn":{"shape":"RoleArn"}, "scriptParameters":{"shape":"StudioComponentScriptParameterKeyValueList"}, + "secureInitializationRoleArn":{"shape":"RoleArn"}, "state":{"shape":"StudioComponentState"}, "statusCode":{"shape":"StudioComponentStatusCode"}, "statusMessage":{"shape":"String"}, @@ -3045,6 +3072,12 @@ "type":"list", "member":{"shape":"StudioComponentType"} }, + "StudioDisplayName":{ + "type":"string", + "max":64, + "min":0, + "sensitive":true + }, "StudioEncryptionConfiguration":{ "type":"structure", "required":["keyType"], @@ -3130,76 +3163,6 @@ "AWS_SSO_CONFIGURATION_REPAIR_IN_PROGRESS" ] }, - "SyntheticComputeFarmConfigurationString":{ - "type":"string", - "sensitive":true - }, - "SyntheticCreateStreamingImageRequestStreamingImageDescription":{ - "type":"string", - "max":256, - "min":0, - "sensitive":true - }, - "SyntheticCreateStreamingImageRequestStreamingImageName":{ - "type":"string", - "max":64, - "min":0, - "sensitive":true - }, - "SyntheticCreateStudioRequestStudioDisplayName":{ - "type":"string", - "max":64, - "min":0, - "sensitive":true - }, - "SyntheticLicenseServiceConfigurationString":{ - "type":"string", - "sensitive":true - }, - "SyntheticSharedFileSystemConfigurationString":{ - "type":"string", - "sensitive":true - }, - "SyntheticStreamingImageStreamingImageDescription":{ - "type":"string", - "max":256, - "min":0, - "sensitive":true - }, - "SyntheticStreamingImageStreamingImageName":{ - "type":"string", - "max":64, - "min":0, - "sensitive":true - }, - "SyntheticStreamingSessionStreamString":{ - "type":"string", - "sensitive":true - }, - "SyntheticStudioStudioDisplayName":{ - "type":"string", - "max":64, - "min":0, - "sensitive":true - }, - "SyntheticUpdateStreamingImageRequestStreamingImageDescription":{ - "type":"string", - "max":256, - "min":0, - "sensitive":true - }, - "SyntheticUpdateStreamingImageRequestStreamingImageName":{ - "type":"string", - "max":64, - "min":0, - "sensitive":true - }, - "SyntheticUpdateStudioRequestStudioDisplayName":{ - "type":"string", - "max":64, - "min":0, - "sensitive":true - }, "TagResourceRequest":{ "type":"structure", "required":["resourceArn"], @@ -3352,8 +3315,8 @@ "location":"header", "locationName":"X-Amz-Client-Token" }, - "description":{"shape":"SyntheticUpdateStreamingImageRequestStreamingImageDescription"}, - "name":{"shape":"SyntheticUpdateStreamingImageRequestStreamingImageName"}, + "description":{"shape":"StreamingImageDescription"}, + "name":{"shape":"StreamingImageName"}, "streamingImageId":{ "shape":"String", "location":"uri", @@ -3390,7 +3353,9 @@ "ec2SecurityGroupIds":{"shape":"StudioComponentSecurityGroupIdList"}, "initializationScripts":{"shape":"StudioComponentInitializationScriptList"}, "name":{"shape":"StudioComponentName"}, + "runtimeRoleArn":{"shape":"RoleArn"}, "scriptParameters":{"shape":"StudioComponentScriptParameterKeyValueList"}, + "secureInitializationRoleArn":{"shape":"RoleArn"}, "studioComponentId":{ "shape":"String", "location":"uri", @@ -3415,20 +3380,20 @@ "type":"structure", "required":["studioId"], "members":{ - "adminRoleArn":{"shape":"String"}, + "adminRoleArn":{"shape":"RoleArn"}, "clientToken":{ "shape":"ClientToken", "idempotencyToken":true, "location":"header", "locationName":"X-Amz-Client-Token" }, - "displayName":{"shape":"SyntheticUpdateStudioRequestStudioDisplayName"}, + "displayName":{"shape":"StudioDisplayName"}, "studioId":{ "shape":"String", "location":"uri", "locationName":"studioId" }, - "userRoleArn":{"shape":"String"} + "userRoleArn":{"shape":"RoleArn"} } }, "UpdateStudioResponse":{ diff --git a/models/apis/nimble/2020-08-01/docs-2.json b/models/apis/nimble/2020-08-01/docs-2.json index cda382686a5..5d3487062dc 100644 --- a/models/apis/nimble/2020-08-01/docs-2.json +++ b/models/apis/nimble/2020-08-01/docs-2.json @@ -32,7 +32,7 @@ "ListLaunchProfileMembers": "Get all users in a given launch profile membership.
", "ListLaunchProfiles": "List all the launch profiles a studio.
", "ListStreamingImages": "List the streaming image resources available to this studio.
This list will contain both images provided by Amazon Web Services, as well as streaming images that you have created in your studio.
", - "ListStreamingSessions": "Lists the streaming image resources in a studio.
", + "ListStreamingSessions": "Lists the streaming sessions in a studio.
", "ListStudioComponents": "Lists the StudioComponents in a studio.
", "ListStudioMembers": "Get all users in a given studio membership.
ListStudioMembers
only returns admin members.
List studios in your Amazon Web Services account in the requested Amazon Web Services Region.
", @@ -842,6 +842,25 @@ "refs": { } }, + "RoleArn": { + "base": null, + "refs": { + "CreateStudioComponentRequest$runtimeRoleArn": "An IAM role attached to a Studio Component that gives the studio component access to AWS resources at anytime while the instance is running.
", + "CreateStudioComponentRequest$secureInitializationRoleArn": "An IAM role attached to Studio Component when the system initialization script runs which give the studio component access to AWS resources when the system initialization script runs.
", + "CreateStudioRequest$adminRoleArn": "The IAM role that Studio Admins will assume when logging in to the Nimble Studio portal.
", + "CreateStudioRequest$userRoleArn": "The IAM role that Studio Users will assume when logging in to the Nimble Studio portal.
", + "LaunchProfileInitializationScript$runtimeRoleArn": "An IAM role attached to a Studio Component that gives the studio component access to AWS resources at anytime while the instance is running.
", + "LaunchProfileInitializationScript$secureInitializationRoleArn": "An IAM role attached to Studio Component when the system initialization script runs which give the studio component access to AWS resources when the system initialization script runs.
", + "Studio$adminRoleArn": "The IAM role that studio admins assume when logging in to the Nimble Studio portal.
", + "Studio$userRoleArn": "The IAM role that studio users assume when logging in to the Nimble Studio portal.
", + "StudioComponent$runtimeRoleArn": "An IAM role attached to a Studio Component that gives the studio component access to AWS resources at anytime while the instance is running.
", + "StudioComponent$secureInitializationRoleArn": "An IAM role attached to Studio Component when the system initialization script runs which give the studio component access to AWS resources when the system initialization script runs.
", + "UpdateStudioComponentRequest$runtimeRoleArn": "An IAM role attached to a Studio Component that gives the studio component access to AWS resources at anytime while the instance is running.
", + "UpdateStudioComponentRequest$secureInitializationRoleArn": "An IAM role attached to Studio Component when the system initialization script runs which give the studio component access to AWS resources when the system initialization script runs.
", + "UpdateStudioRequest$adminRoleArn": "The IAM role that Studio Admins will assume when logging in to the Nimble Studio portal.
", + "UpdateStudioRequest$userRoleArn": "The IAM role that Studio Users will assume when logging in to the Nimble Studio portal.
" + } + }, "ScriptParameterKey": { "base": "A script parameter key.
", "refs": { @@ -867,6 +886,16 @@ "StudioComponentSecurityGroupIdList$member": null } }, + "SensitiveString": { + "base": null, + "refs": { + "ComputeFarmConfiguration$endpoint": "The endpoint of the ComputeFarm that is accessed by the studio component resource.
", + "LicenseServiceConfiguration$endpoint": "The endpoint of the license service that is accessed by the studio component resource.
", + "SharedFileSystemConfiguration$endpoint": "The endpoint of the shared file system that is accessed by the studio component resource.
", + "SharedFileSystemConfiguration$shareName": "The name of the file share.
", + "StreamingSessionStream$url": "The URL to connect to this stream using the DCV client.
" + } + }, "ServiceQuotaExceededException": { "base": "Your current quota does not allow you to perform the request action. You can request increases for some quotas, and other quotas cannot be increased.
Please use AWS Service Quotas to request an increase.
", "refs": { @@ -931,8 +960,8 @@ "StreamConfigurationMaxStoppedSessionLengthInMinutes": { "base": null, "refs": { - "StreamConfiguration$maxStoppedSessionLengthInMinutes": "Integer that determines if you can start and stop your sessions and how long a session can stay in the STOPPED state. The default value is 0. The maximum value is 5760.
If the value is missing or set to 0, your sessions can’t be stopped. If you then call StopStreamingSession
, the session fails. If the time that a session stays in the READY state exceeds the maxSessionLengthInMinutes
value, the session will automatically be terminated by AWS (instead of stopped).
If the value is set to a positive number, the session can be stopped. You can call StopStreamingSession
to stop sessions in the READY state. If the time that a session stays in the READY state exceeds the maxSessionLengthInMinutes
value, the session will automatically be stopped by AWS (instead of terminated).
Integer that determines if you can start and stop your sessions and how long a session can stay in the STOPPED state. The default value is 0. The maximum value is 5760.
If the value is missing or set to 0, your sessions can’t be stopped. If you then call StopStreamingSession
, the session fails. If the time that a session stays in the READY state exceeds the maxSessionLengthInMinutes
value, the session will automatically be terminated by AWS (instead of stopped).
If the value is set to a positive number, the session can be stopped. You can call StopStreamingSession
to stop sessions in the READY state. If the time that a session stays in the READY state exceeds the maxSessionLengthInMinutes
value, the session will automatically be stopped by AWS (instead of terminated).
Integer that determines if you can start and stop your sessions and how long a session can stay in the STOPPED state. The default value is 0. The maximum value is 5760.
If the value is missing or set to 0, your sessions can’t be stopped. If you then call StopStreamingSession
, the session fails. If the time that a session stays in the READY state exceeds the maxSessionLengthInMinutes
value, the session will automatically be terminated (instead of stopped).
If the value is set to a positive number, the session can be stopped. You can call StopStreamingSession
to stop sessions in the READY state. If the time that a session stays in the READY state exceeds the maxSessionLengthInMinutes
value, the session will automatically be stopped (instead of terminated).
Integer that determines if you can start and stop your sessions and how long a session can stay in the STOPPED state. The default value is 0. The maximum value is 5760.
If the value is missing or set to 0, your sessions can’t be stopped. If you then call StopStreamingSession
, the session fails. If the time that a session stays in the READY state exceeds the maxSessionLengthInMinutes
value, the session will automatically be terminated (instead of stopped).
If the value is set to a positive number, the session can be stopped. You can call StopStreamingSession
to stop sessions in the READY state. If the time that a session stays in the READY state exceeds the maxSessionLengthInMinutes
value, the session will automatically be stopped (instead of terminated).
The description.
", + "refs": { + "CreateStreamingImageRequest$description": "A human-readable description of the streaming image.
", + "StreamingImage$description": "A human-readable description of the streaming image.
", + "UpdateStreamingImageRequest$description": "The description.
" + } + }, "StreamingImageEncryptionConfiguration": { "base": "Specifies how a streaming image is encrypted.
", "refs": { @@ -1000,6 +1037,14 @@ "ListStreamingImagesResponse$streamingImages": "A collection of streaming images.
" } }, + "StreamingImageName": { + "base": "A friendly name for a streaming image resource.
", + "refs": { + "CreateStreamingImageRequest$name": "A friendly name for a streaming image resource.
", + "StreamingImage$name": "A friendly name for a streaming image resource.
", + "UpdateStreamingImageRequest$name": "The name for the streaming image.
" + } + }, "StreamingImageOwner": { "base": "StreamingImageOwner is the owner of a particular streaming image.
This string is either the studioId that contains the streaming image, or the word 'AMAZON' for images provided by Nimble Studio.
", "refs": { @@ -1145,8 +1190,6 @@ "CreateStreamingSessionStreamRequest$sessionId": "The streaming session ID.
", "CreateStreamingSessionStreamRequest$studioId": "The studio ID.
", "CreateStudioComponentRequest$studioId": "The studio ID.
", - "CreateStudioRequest$adminRoleArn": "The IAM role that Studio Admins will assume when logging in to the Nimble Studio portal.
", - "CreateStudioRequest$userRoleArn": "The IAM role that Studio Users will assume when logging in to the Nimble Studio portal.
", "DeleteLaunchProfileMemberRequest$launchProfileId": "The Launch Profile ID.
", "DeleteLaunchProfileMemberRequest$principalId": "The principal ID. This currently supports a Amazon Web Services SSO UserId.
", "DeleteLaunchProfileMemberRequest$studioId": "The studio ID.
", @@ -1266,13 +1309,11 @@ "StreamingSessionStream$ownedBy": "The user ID of the user that owns the streaming session. The user that owns the session will be logging into the session and interacting with the virtual workstation.
", "StreamingSessionStream$streamId": "The stream ID.
", "StringList$member": null, - "Studio$adminRoleArn": "The IAM role that studio admins assume when logging in to the Nimble Studio portal.
", "Studio$arn": "The Amazon Resource Name (ARN) that is assigned to a studio resource and uniquely identifies it. ARNs are unique across all Regions.
", "Studio$ssoClientId": "The Amazon Web Services SSO application client ID used to integrate with Amazon Web Services SSO to enable Amazon Web Services SSO users to log in to Nimble Studio portal.
", "Studio$statusMessage": "Additional detail on the studio state.
", "Studio$studioId": "The unique identifier for a studio resource. In Nimble Studio, all other resources are contained in a studio resource.
", "Studio$studioUrl": "The address of the web page for the studio.
", - "Studio$userRoleArn": "The IAM role that studio users assume when logging in to the Nimble Studio portal.
", "StudioComponent$arn": "The ARN of the resource.
", "StudioComponent$createdBy": "The user ID of the user that created the studio component.
", "StudioComponent$statusMessage": "The status message for the studio component.
", @@ -1297,9 +1338,7 @@ "UpdateStreamingImageRequest$studioId": "The studio ID.
", "UpdateStudioComponentRequest$studioComponentId": "The studio component ID.
", "UpdateStudioComponentRequest$studioId": "The studio ID.
", - "UpdateStudioRequest$adminRoleArn": "The IAM role that Studio Admins will assume when logging in to the Nimble Studio portal.
", "UpdateStudioRequest$studioId": "The studio ID.
", - "UpdateStudioRequest$userRoleArn": "The IAM role that Studio Users will assume when logging in to the Nimble Studio portal.
", "ValidationException$code": "A more specific error code.
", "ValidationException$message": "A human-readable description of the error.
" } @@ -1476,6 +1515,14 @@ "ListStudioComponentsRequest$types": "Filters the request to studio components that are of one of the given types.
" } }, + "StudioDisplayName": { + "base": null, + "refs": { + "CreateStudioRequest$displayName": "A friendly name for the studio.
", + "Studio$displayName": "A friendly name for the studio.
", + "UpdateStudioRequest$displayName": "A friendly name for the studio.
" + } + }, "StudioEncryptionConfiguration": { "base": "Configuration of the encryption method that is used for the studio.
", "refs": { @@ -1540,85 +1587,6 @@ "Studio$statusCode": "Status codes that provide additional detail on the studio state.
" } }, - "SyntheticComputeFarmConfigurationString": { - "base": null, - "refs": { - "ComputeFarmConfiguration$endpoint": "The endpoint of the ComputeFarm that is accessed by the studio component resource.
" - } - }, - "SyntheticCreateStreamingImageRequestStreamingImageDescription": { - "base": "The description.
", - "refs": { - "CreateStreamingImageRequest$description": "A human-readable description of the streaming image.
" - } - }, - "SyntheticCreateStreamingImageRequestStreamingImageName": { - "base": "A friendly name for a streaming image resource.
", - "refs": { - "CreateStreamingImageRequest$name": "A friendly name for a streaming image resource.
" - } - }, - "SyntheticCreateStudioRequestStudioDisplayName": { - "base": null, - "refs": { - "CreateStudioRequest$displayName": "A friendly name for the studio.
" - } - }, - "SyntheticLicenseServiceConfigurationString": { - "base": null, - "refs": { - "LicenseServiceConfiguration$endpoint": "The endpoint of the license service that is accessed by the studio component resource.
" - } - }, - "SyntheticSharedFileSystemConfigurationString": { - "base": null, - "refs": { - "SharedFileSystemConfiguration$endpoint": "The endpoint of the shared file system that is accessed by the studio component resource.
", - "SharedFileSystemConfiguration$shareName": "The name of the file share.
" - } - }, - "SyntheticStreamingImageStreamingImageDescription": { - "base": "The description.
", - "refs": { - "StreamingImage$description": "A human-readable description of the streaming image.
" - } - }, - "SyntheticStreamingImageStreamingImageName": { - "base": "A friendly name for a streaming image resource.
", - "refs": { - "StreamingImage$name": "A friendly name for a streaming image resource.
" - } - }, - "SyntheticStreamingSessionStreamString": { - "base": null, - "refs": { - "StreamingSessionStream$url": "The URL to connect to this stream using the DCV client.
" - } - }, - "SyntheticStudioStudioDisplayName": { - "base": null, - "refs": { - "Studio$displayName": "A friendly name for the studio.
" - } - }, - "SyntheticUpdateStreamingImageRequestStreamingImageDescription": { - "base": "The description.
", - "refs": { - "UpdateStreamingImageRequest$description": "The description.
" - } - }, - "SyntheticUpdateStreamingImageRequestStreamingImageName": { - "base": "A friendly name for a streaming image resource.
", - "refs": { - "UpdateStreamingImageRequest$name": "The name for the streaming image.
" - } - }, - "SyntheticUpdateStudioRequestStudioDisplayName": { - "base": null, - "refs": { - "UpdateStudioRequest$displayName": "A friendly name for the studio.
" - } - }, "TagResourceRequest": { "base": null, "refs": { diff --git a/models/apis/outposts/2019-12-03/api-2.json b/models/apis/outposts/2019-12-03/api-2.json index 585c5b2b6dc..7aec566142e 100644 --- a/models/apis/outposts/2019-12-03/api-2.json +++ b/models/apis/outposts/2019-12-03/api-2.json @@ -637,9 +637,9 @@ }, "ConnectionId":{ "type":"string", - "max":255, + "max":1024, "min":1, - "pattern":"^([\\w-]+)$" + "pattern":"^[a-zA-Z0-9+/=]{1,1024}$" }, "ContactName":{ "type":"string", @@ -994,9 +994,22 @@ "CatalogItemId":{"shape":"SkuCode"}, "LineItemId":{"shape":"LineItemId"}, "Quantity":{"shape":"LineItemQuantity"}, - "Status":{"shape":"LineItemStatus"} + "Status":{"shape":"LineItemStatus"}, + "ShipmentInformation":{"shape":"ShipmentInformation"}, + "AssetInformationList":{"shape":"LineItemAssetInformationList"} + } + }, + "LineItemAssetInformation":{ + "type":"structure", + "members":{ + "AssetId":{"shape":"AssetId"}, + "MacAddressList":{"shape":"MacAddressList"} } }, + "LineItemAssetInformationList":{ + "type":"list", + "member":{"shape":"LineItemAssetInformation"} + }, "LineItemId":{ "type":"string", "pattern":"ooi-[a-f0-9]{17}" @@ -1229,6 +1242,16 @@ "Tags":{"shape":"TagMap"} } }, + "MacAddress":{ + "type":"string", + "max":17, + "min":17, + "pattern":"^([0-9A-Fa-f]{2}[:-]){5}([0-9A-Fa-f]{2})$" + }, + "MacAddressList":{ + "type":"list", + "member":{"shape":"MacAddress"} + }, "MaxResults1000":{ "type":"integer", "box":true, @@ -1495,6 +1518,22 @@ "error":{"httpStatusCode":402}, "exception":true }, + "ShipmentCarrier":{ + "type":"string", + "enum":[ + "DHL", + "DBS", + "FEDEX", + "UPS" + ] + }, + "ShipmentInformation":{ + "type":"structure", + "members":{ + "ShipmentTrackingNumber":{"shape":"TrackingId"}, + "ShipmentCarrier":{"shape":"ShipmentCarrier"} + } + }, "Site":{ "type":"structure", "members":{ @@ -1658,6 +1697,12 @@ "min":1, "pattern":"^(\\d+)##(\\S+)$" }, + "TrackingId":{ + "type":"string", + "max":42, + "min":6, + "pattern":"^[a-zA-Z0-9]+$" + }, "UnderlayIpAddress":{ "type":"string", "max":15, diff --git a/models/apis/outposts/2019-12-03/docs-2.json b/models/apis/outposts/2019-12-03/docs-2.json index 3889e661d90..ca3a01fe654 100644 --- a/models/apis/outposts/2019-12-03/docs-2.json +++ b/models/apis/outposts/2019-12-03/docs-2.json @@ -90,6 +90,7 @@ "base": null, "refs": { "AssetInfo$AssetId": "The ID of the asset.
", + "LineItemAssetInformation$AssetId": "The ID of the asset.
", "StartConnectionRequest$AssetId": "The ID of the Outpost server.
" } }, @@ -516,6 +517,18 @@ "LineItemListDefinition$member": null } }, + "LineItemAssetInformation": { + "base": "Information about a line item asset.
", + "refs": { + "LineItemAssetInformationList$member": null + } + }, + "LineItemAssetInformationList": { + "base": null, + "refs": { + "LineItem$AssetInformationList": "Information about assets.
" + } + }, "LineItemId": { "base": null, "refs": { @@ -621,6 +634,18 @@ "refs": { } }, + "MacAddress": { + "base": null, + "refs": { + "MacAddressList$member": null + } + }, + "MacAddressList": { + "base": null, + "refs": { + "LineItemAssetInformation$MacAddressList": "MAC addresses of the asset.
" + } + }, "MaxResults1000": { "base": "The maximum page size.
", "refs": { @@ -863,6 +888,18 @@ "refs": { } }, + "ShipmentCarrier": { + "base": null, + "refs": { + "ShipmentInformation$ShipmentCarrier": "The carrier of the shipment.
" + } + }, + "ShipmentInformation": { + "base": "Information about a line item shipment.
", + "refs": { + "LineItem$ShipmentInformation": "Information about a line item shipment.
" + } + }, "Site": { "base": "Information about a site.
", "refs": { @@ -1048,6 +1085,12 @@ "ListSitesOutput$NextToken": null } }, + "TrackingId": { + "base": null, + "refs": { + "ShipmentInformation$ShipmentTrackingNumber": "The tracking number of the shipment.
" + } + }, "UnderlayIpAddress": { "base": null, "refs": { diff --git a/models/apis/sagemaker/2017-07-24/api-2.json b/models/apis/sagemaker/2017-07-24/api-2.json index 1700eb4f8ab..8da11cef60d 100644 --- a/models/apis/sagemaker/2017-07-24/api-2.json +++ b/models/apis/sagemaker/2017-07-24/api-2.json @@ -4098,7 +4098,7 @@ "CategoricalParameterRanges":{ "type":"list", "member":{"shape":"CategoricalParameterRange"}, - "max":20, + "max":30, "min":0 }, "CategoricalParameters":{ @@ -4557,7 +4557,7 @@ "ContinuousParameterRanges":{ "type":"list", "member":{"shape":"ContinuousParameterRange"}, - "max":20, + "max":30, "min":0 }, "CreateActionRequest":{ @@ -9870,7 +9870,7 @@ "IntegerParameterRanges":{ "type":"list", "member":{"shape":"IntegerParameterRange"}, - "max":20, + "max":30, "min":0 }, "IntegerValue":{"type":"integer"}, @@ -13055,7 +13055,7 @@ "ParameterValues":{ "type":"list", "member":{"shape":"ParameterValue"}, - "max":20, + "max":30, "min":1 }, "Parent":{ @@ -13754,7 +13754,25 @@ "ml.inf1.xlarge", "ml.inf1.2xlarge", "ml.inf1.6xlarge", - "ml.inf1.24xlarge" + "ml.inf1.24xlarge", + "ml.c6i.large", + "ml.c6i.xlarge", + "ml.c6i.2xlarge", + "ml.c6i.4xlarge", + "ml.c6i.8xlarge", + "ml.c6i.12xlarge", + "ml.c6i.16xlarge", + "ml.c6i.24xlarge", + "ml.c6i.32xlarge", + "ml.g5.xlarge", + "ml.g5.2xlarge", + "ml.g5.4xlarge", + "ml.g5.8xlarge", + "ml.g5.12xlarge", + "ml.g5.16xlarge", + "ml.g5.24xlarge", + "ml.g5.48xlarge", + "ml.p4d.24xlarge" ] }, "ProductionVariantList":{ diff --git a/models/apis/sagemaker/2017-07-24/docs-2.json b/models/apis/sagemaker/2017-07-24/docs-2.json index 3813961311f..3f53a1e781a 100644 --- a/models/apis/sagemaker/2017-07-24/docs-2.json +++ b/models/apis/sagemaker/2017-07-24/docs-2.json @@ -5324,7 +5324,7 @@ } }, "InstanceGroup": { - "base": "Defines an instance group for heterogeneous cluster training. When requesting a training job using the CreateTrainingJob API, you can configure up to 5 different ML training instance groups.
", + "base": "Defines an instance group for heterogeneous cluster training. When requesting a training job using the CreateTrainingJob API, you can configure multiple instance groups .
", "refs": { "InstanceGroups$member": null } @@ -8194,7 +8194,7 @@ } }, "ParameterRanges": { - "base": "Specifies ranges of integer, continuous, and categorical hyperparameters that a hyperparameter tuning job searches. The hyperparameter tuning job launches training jobs with hyperparameter values within these ranges to find the combination of values that result in the training job with the best performance as measured by the objective metric of the hyperparameter tuning job.
You can specify a maximum of 20 hyperparameters that a hyperparameter tuning job can search over. Every possible value of a categorical parameter range counts against this limit.
Specifies ranges of integer, continuous, and categorical hyperparameters that a hyperparameter tuning job searches. The hyperparameter tuning job launches training jobs with hyperparameter values within these ranges to find the combination of values that result in the training job with the best performance as measured by the objective metric of the hyperparameter tuning job.
The maximum number of items specified for Array Members
refers to the maximum number of hyperparameters for each range and also the maximum for the hyperparameter tuning job itself. That is, the sum of the number of hyperparameters for all the ranges can't exceed the maximum number specified.
The ParameterRanges object that specifies the ranges of hyperparameters that this tuning job searches.
" diff --git a/models/endpoints/endpoints.json b/models/endpoints/endpoints.json index e186122a2f2..54b7c6346b3 100644 --- a/models/endpoints/endpoints.json +++ b/models/endpoints/endpoints.json @@ -12246,6 +12246,29 @@ "us-west-2" : { } } }, + "sso" : { + "endpoints" : { + "ap-east-1" : { }, + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-northeast-3" : { }, + "ap-south-1" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ca-central-1" : { }, + "eu-central-1" : { }, + "eu-north-1" : { }, + "eu-south-1" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "me-south-1" : { }, + "sa-east-1" : { }, + "us-east-1" : { }, + "us-east-2" : { }, + "us-west-2" : { } + } + }, "states" : { "endpoints" : { "af-south-1" : { }, @@ -18425,6 +18448,22 @@ } } }, + "sso" : { + "endpoints" : { + "us-gov-east-1" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "hostname" : "sso.us-gov-east-1.amazonaws.com" + }, + "us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "hostname" : "sso.us-gov-west-1.amazonaws.com" + } + } + }, "states" : { "endpoints" : { "fips-us-gov-east-1" : { diff --git a/service/athena/api.go b/service/athena/api.go index 5a0cb6699da..42c8795eb49 100644 --- a/service/athena/api.go +++ b/service/athena/api.go @@ -3836,6 +3836,7 @@ func (s *AthenaError) SetRetryable(v bool) *AthenaError { return s } +// Contains an array of named query IDs. type BatchGetNamedQueryInput struct { _ struct{} `type:"structure"` @@ -4026,6 +4027,7 @@ func (s *BatchGetPreparedStatementOutput) SetUnprocessedPreparedStatementNames(v return s } +// Contains an array of query execution IDs. type BatchGetQueryExecutionInput struct { _ struct{} `type:"structure"` @@ -4565,7 +4567,7 @@ type CreateNamedQueryOutput struct { _ struct{} `type:"structure"` // The unique ID of the query. - NamedQueryId *string `type:"string"` + NamedQueryId *string `min:"1" type:"string"` } // String returns the string representation. @@ -5113,7 +5115,7 @@ type DeleteNamedQueryInput struct { _ struct{} `type:"structure"` // The unique ID of the query to delete. - NamedQueryId *string `type:"string" idempotencyToken:"true"` + NamedQueryId *string `min:"1" type:"string" idempotencyToken:"true"` } // String returns the string representation. @@ -5134,6 +5136,19 @@ func (s DeleteNamedQueryInput) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteNamedQueryInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteNamedQueryInput"} + if s.NamedQueryId != nil && len(*s.NamedQueryId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NamedQueryId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // SetNamedQueryId sets the NamedQueryId field's value. func (s *DeleteNamedQueryInput) SetNamedQueryId(v string) *DeleteNamedQueryInput { s.NamedQueryId = &v @@ -5634,7 +5649,7 @@ type GetNamedQueryInput struct { // The unique ID of the query. Use ListNamedQueries to get query IDs. // // NamedQueryId is a required field - NamedQueryId *string `type:"string" required:"true"` + NamedQueryId *string `min:"1" type:"string" required:"true"` } // String returns the string representation. @@ -5661,6 +5676,9 @@ func (s *GetNamedQueryInput) Validate() error { if s.NamedQueryId == nil { invalidParams.Add(request.NewErrParamRequired("NamedQueryId")) } + if s.NamedQueryId != nil && len(*s.NamedQueryId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NamedQueryId", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -5805,7 +5823,7 @@ type GetQueryExecutionInput struct { // The unique ID of the query execution. // // QueryExecutionId is a required field - QueryExecutionId *string `type:"string" required:"true"` + QueryExecutionId *string `min:"1" type:"string" required:"true"` } // String returns the string representation. @@ -5832,6 +5850,9 @@ func (s *GetQueryExecutionInput) Validate() error { if s.QueryExecutionId == nil { invalidParams.Add(request.NewErrParamRequired("QueryExecutionId")) } + if s.QueryExecutionId != nil && len(*s.QueryExecutionId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("QueryExecutionId", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -5891,7 +5912,7 @@ type GetQueryResultsInput struct { // The unique ID of the query execution. // // QueryExecutionId is a required field - QueryExecutionId *string `type:"string" required:"true"` + QueryExecutionId *string `min:"1" type:"string" required:"true"` } // String returns the string representation. @@ -5924,6 +5945,9 @@ func (s *GetQueryResultsInput) Validate() error { if s.QueryExecutionId == nil { invalidParams.Add(request.NewErrParamRequired("QueryExecutionId")) } + if s.QueryExecutionId != nil && len(*s.QueryExecutionId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("QueryExecutionId", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -7434,7 +7458,7 @@ type NamedQuery struct { Name *string `min:"1" type:"string" required:"true"` // The unique identifier of the query. - NamedQueryId *string `type:"string"` + NamedQueryId *string `min:"1" type:"string"` // The SQL statements that make up the query. // @@ -7626,7 +7650,7 @@ type QueryExecution struct { QueryExecutionContext *QueryExecutionContext `type:"structure"` // The unique identifier for each query execution. - QueryExecutionId *string `type:"string"` + QueryExecutionId *string `min:"1" type:"string"` // The location in Amazon S3 where query results were stored and the encryption // option, if any, used for query results. These are known as "client-side settings". @@ -8072,7 +8096,7 @@ type ResultConfiguration struct { // for the workgroup, and also uses the location for storing query results specified // in the workgroup. See WorkGroupConfiguration$EnforceWorkGroupConfiguration // and Workgroup Settings Override Client-Side Settings (https://docs.aws.amazon.com/athena/latest/ug/workgroups-settings-override.html). - ExpectedBucketOwner *string `type:"string"` + ExpectedBucketOwner *string `min:"12" type:"string"` // The location in Amazon S3 where your query results are stored, such as s3://path/to/query/bucket/. // To run the query, you must specify the query results location using one of @@ -8106,6 +8130,9 @@ func (s ResultConfiguration) GoString() string { // Validate inspects the fields of the type to determine if they are valid. func (s *ResultConfiguration) Validate() error { invalidParams := request.ErrInvalidParams{Context: "ResultConfiguration"} + if s.ExpectedBucketOwner != nil && len(*s.ExpectedBucketOwner) < 12 { + invalidParams.Add(request.NewErrParamMinLen("ExpectedBucketOwner", 12)) + } if s.AclConfiguration != nil { if err := s.AclConfiguration.Validate(); err != nil { invalidParams.AddNested("AclConfiguration", err.(request.ErrInvalidParams)) @@ -8170,7 +8197,7 @@ type ResultConfigurationUpdates struct { // also uses the location for storing query results specified in the workgroup. // See WorkGroupConfiguration$EnforceWorkGroupConfiguration and Workgroup Settings // Override Client-Side Settings (https://docs.aws.amazon.com/athena/latest/ug/workgroups-settings-override.html). - ExpectedBucketOwner *string `type:"string"` + ExpectedBucketOwner *string `min:"12" type:"string"` // The location in Amazon S3 where your query results are stored, such as s3://path/to/query/bucket/. // For more information, see Query Results (https://docs.aws.amazon.com/athena/latest/ug/querying.html) @@ -8237,6 +8264,9 @@ func (s ResultConfigurationUpdates) GoString() string { // Validate inspects the fields of the type to determine if they are valid. func (s *ResultConfigurationUpdates) Validate() error { invalidParams := request.ErrInvalidParams{Context: "ResultConfigurationUpdates"} + if s.ExpectedBucketOwner != nil && len(*s.ExpectedBucketOwner) < 12 { + invalidParams.Add(request.NewErrParamMinLen("ExpectedBucketOwner", 12)) + } if s.AclConfiguration != nil { if err := s.AclConfiguration.Validate(); err != nil { invalidParams.AddNested("AclConfiguration", err.(request.ErrInvalidParams)) @@ -8537,7 +8567,7 @@ type StartQueryExecutionOutput struct { _ struct{} `type:"structure"` // The unique ID of the query that ran as a result of this request. - QueryExecutionId *string `type:"string"` + QueryExecutionId *string `min:"1" type:"string"` } // String returns the string representation. @@ -8568,7 +8598,7 @@ type StopQueryExecutionInput struct { _ struct{} `type:"structure"` // The unique ID of the query execution to stop. - QueryExecutionId *string `type:"string" idempotencyToken:"true"` + QueryExecutionId *string `min:"1" type:"string" idempotencyToken:"true"` } // String returns the string representation. @@ -8589,6 +8619,19 @@ func (s StopQueryExecutionInput) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *StopQueryExecutionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StopQueryExecutionInput"} + if s.QueryExecutionId != nil && len(*s.QueryExecutionId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("QueryExecutionId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // SetQueryExecutionId sets the QueryExecutionId field's value. func (s *StopQueryExecutionInput) SetQueryExecutionId(v string) *StopQueryExecutionInput { s.QueryExecutionId = &v @@ -8952,7 +8995,7 @@ type UnprocessedNamedQueryId struct { ErrorMessage *string `type:"string"` // The unique identifier of the named query. - NamedQueryId *string `type:"string"` + NamedQueryId *string `min:"1" type:"string"` } // String returns the string representation. @@ -9063,7 +9106,7 @@ type UnprocessedQueryExecutionId struct { ErrorMessage *string `type:"string"` // The unique identifier of the query execution. - QueryExecutionId *string `type:"string"` + QueryExecutionId *string `min:"1" type:"string"` } // String returns the string representation. @@ -9324,7 +9367,7 @@ type UpdateNamedQueryInput struct { // The unique identifier (UUID) of the query. // // NamedQueryId is a required field - NamedQueryId *string `type:"string" required:"true"` + NamedQueryId *string `min:"1" type:"string" required:"true"` // The contents of the query with all query statements. // @@ -9362,6 +9405,9 @@ func (s *UpdateNamedQueryInput) Validate() error { if s.NamedQueryId == nil { invalidParams.Add(request.NewErrParamRequired("NamedQueryId")) } + if s.NamedQueryId != nil && len(*s.NamedQueryId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NamedQueryId", 1)) + } if s.QueryString == nil { invalidParams.Add(request.NewErrParamRequired("QueryString")) } diff --git a/service/codeartifact/api.go b/service/codeartifact/api.go index bd869dba1d6..bda2eee536a 100644 --- a/service/codeartifact/api.go +++ b/service/codeartifact/api.go @@ -1010,6 +1010,100 @@ func (c *CodeArtifact) DescribeDomainWithContext(ctx aws.Context, input *Describ return out, req.Send() } +const opDescribePackage = "DescribePackage" + +// DescribePackageRequest generates a "aws/request.Request" representing the +// client's request for the DescribePackage operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribePackage for more information on using the DescribePackage +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribePackageRequest method. +// req, resp := client.DescribePackageRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/codeartifact-2018-09-22/DescribePackage +func (c *CodeArtifact) DescribePackageRequest(input *DescribePackageInput) (req *request.Request, output *DescribePackageOutput) { + op := &request.Operation{ + Name: opDescribePackage, + HTTPMethod: "GET", + HTTPPath: "/v1/package", + } + + if input == nil { + input = &DescribePackageInput{} + } + + output = &DescribePackageOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribePackage API operation for CodeArtifact. +// +// Returns a PackageDescription (https://docs.aws.amazon.com/codeartifact/latest/APIReference/API_PackageDescription.html) +// object that contains information about the requested package. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for CodeArtifact's +// API operation DescribePackage for usage and error information. +// +// Returned Error Types: +// * AccessDeniedException +// The operation did not succeed because of an unauthorized access attempt. +// +// * InternalServerException +// The operation did not succeed because of an error that occurred inside CodeArtifact. +// +// * ResourceNotFoundException +// The operation did not succeed because the resource requested is not found +// in the service. +// +// * ThrottlingException +// The operation did not succeed because too many requests are sent to the service. +// +// * ValidationException +// The operation did not succeed because a parameter in the request was sent +// with an invalid value. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/codeartifact-2018-09-22/DescribePackage +func (c *CodeArtifact) DescribePackage(input *DescribePackageInput) (*DescribePackageOutput, error) { + req, out := c.DescribePackageRequest(input) + return out, req.Send() +} + +// DescribePackageWithContext is the same as DescribePackage with the addition of +// the ability to pass a context and additional request options. +// +// See DescribePackage for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CodeArtifact) DescribePackageWithContext(ctx aws.Context, input *DescribePackageInput, opts ...request.Option) (*DescribePackageOutput, error) { + req, out := c.DescribePackageRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDescribePackageVersion = "DescribePackageVersion" // DescribePackageVersionRequest generates a "aws/request.Request" representing the @@ -1760,7 +1854,10 @@ func (c *CodeArtifact) GetPackageVersionReadmeRequest(input *GetPackageVersionRe // GetPackageVersionReadme API operation for CodeArtifact. // -// Gets the readme file or descriptive text for a package version. +// Gets the readme file or descriptive text for a package version. For packages +// that do not contain a readme file, CodeArtifact extracts a description from +// a metadata file. For example, from the