From 17f96f9bbe14abad880ed6084262d468a95a445d Mon Sep 17 00:00:00 2001 From: aws-sdk-go-automation <43143561+aws-sdk-go-automation@users.noreply.github.com> Date: Tue, 19 Sep 2023 11:48:47 -0700 Subject: [PATCH] Release v1.45.13 (2023-09-19) (#4992) Release v1.45.13 (2023-09-19) === ### Service Client Updates * `service/ec2`: Updates service API * This release adds support for C7i, and R7a instance types. * `service/outposts`: Updates service API and documentation * `service/sagemaker`: Updates service API and documentation * This release adds support for one-time model monitoring schedules that are executed immediately without delay, explicit data analysis windows for model monitoring schedules and exclude features attributes to remove features from model monitor analysis. --- CHANGELOG.md | 10 + aws/endpoints/defaults.go | 3 + aws/version.go | 2 +- models/apis/ec2/2016-11-15/api-2.json | 22 +- models/apis/outposts/2019-12-03/api-2.json | 13 +- models/apis/outposts/2019-12-03/docs-2.json | 20 +- .../2019-12-03/endpoint-rule-set-1.json | 362 ++++++++---------- models/apis/sagemaker/2017-07-24/api-2.json | 14 +- models/apis/sagemaker/2017-07-24/docs-2.json | 71 ++-- models/endpoints/endpoints.json | 1 + service/ec2/api.go | 80 ++++ service/outposts/api.go | 14 +- service/sagemaker/api.go | 177 ++++++--- 13 files changed, 488 insertions(+), 301 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index be4368cb227..c9903b477f1 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,13 @@ +Release v1.45.13 (2023-09-19) +=== + +### Service Client Updates +* `service/ec2`: Updates service API + * This release adds support for C7i, and R7a instance types. +* `service/outposts`: Updates service API and documentation +* `service/sagemaker`: Updates service API and documentation + * This release adds support for one-time model monitoring schedules that are executed immediately without delay, explicit data analysis windows for model monitoring schedules and exclude features attributes to remove features from model monitor analysis. + Release v1.45.12 (2023-09-18) === diff --git a/aws/endpoints/defaults.go b/aws/endpoints/defaults.go index 86082f706ad..2b7bdca86cf 100644 --- a/aws/endpoints/defaults.go +++ b/aws/endpoints/defaults.go @@ -27990,6 +27990,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, diff --git a/aws/version.go b/aws/version.go index 30d1c361228..a19163bd2c7 100644 --- a/aws/version.go +++ b/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.45.12" +const SDKVersion = "1.45.13" diff --git a/models/apis/ec2/2016-11-15/api-2.json b/models/apis/ec2/2016-11-15/api-2.json index 451281bb3ee..7793ea285e7 100755 --- a/models/apis/ec2/2016-11-15/api-2.json +++ b/models/apis/ec2/2016-11-15/api-2.json @@ -27476,7 +27476,27 @@ "r7gd.4xlarge", "r7gd.8xlarge", "r7gd.12xlarge", - "r7gd.16xlarge" + "r7gd.16xlarge", + "r7a.medium", + "r7a.large", + "r7a.xlarge", + "r7a.2xlarge", + "r7a.4xlarge", + "r7a.8xlarge", + "r7a.12xlarge", + "r7a.16xlarge", + "r7a.24xlarge", + "r7a.32xlarge", + "r7a.48xlarge", + "c7i.large", + "c7i.xlarge", + "c7i.2xlarge", + "c7i.4xlarge", + "c7i.8xlarge", + "c7i.12xlarge", + "c7i.16xlarge", + "c7i.24xlarge", + "c7i.48xlarge" ] }, "InstanceTypeHypervisor":{ diff --git a/models/apis/outposts/2019-12-03/api-2.json b/models/apis/outposts/2019-12-03/api-2.json index fd2cdd34cab..ff86a77f65e 100644 --- a/models/apis/outposts/2019-12-03/api-2.json +++ b/models/apis/outposts/2019-12-03/api-2.json @@ -628,7 +628,8 @@ "type":"structure", "members":{ "HostId":{"shape":"HostId"}, - "State":{"shape":"ComputeAssetState"} + "State":{"shape":"ComputeAssetState"}, + "InstanceFamilies":{"shape":"InstanceFamilies"} } }, "ConflictException":{ @@ -974,6 +975,16 @@ "member":{"shape":"HostId"} }, "ISO8601Timestamp":{"type":"timestamp"}, + "InstanceFamilies":{ + "type":"list", + "member":{"shape":"InstanceFamilyName"} + }, + "InstanceFamilyName":{ + "type":"string", + "max":200, + "min":1, + "pattern":"^(?:.{1,200}/)?(?:[a-z0-9-_A-Z])+$" + }, "InstanceType":{"type":"string"}, "InstanceTypeItem":{ "type":"structure", diff --git a/models/apis/outposts/2019-12-03/docs-2.json b/models/apis/outposts/2019-12-03/docs-2.json index 865537c7a1a..77312bc84da 100644 --- a/models/apis/outposts/2019-12-03/docs-2.json +++ b/models/apis/outposts/2019-12-03/docs-2.json @@ -487,6 +487,18 @@ "OrderSummary$OrderFulfilledDate": "

The fulfilment date for the order.

" } }, + "InstanceFamilies": { + "base": null, + "refs": { + "ComputeAttributes$InstanceFamilies": "

A list of the names of instance families that are currently associated with a given asset.

" + } + }, + "InstanceFamilyName": { + "base": null, + "refs": { + "InstanceFamilies$member": null + } + }, "InstanceType": { "base": "

The instance type.

", "refs": { @@ -719,7 +731,7 @@ "refs": { "CancelOrderInput$OrderId": "

The ID of the order.

", "GetOrderInput$OrderId": "

The ID of the order.

", - "LineItem$PreviousOrderId": "

The ID of the previous order item.

", + "LineItem$PreviousOrderId": "

The ID of the previous order.

", "Order$OrderId": "

The ID of the order.

", "OrderSummary$OrderId": "

The ID of the order.

" } @@ -746,8 +758,8 @@ "OrderType": { "base": null, "refs": { - "Order$OrderType": "

Type of order.

", - "OrderSummary$OrderType": "

The type of order.

" + "Order$OrderType": "

The type of order.

", + "OrderSummary$OrderType": "

The type of order.

" } }, "Outpost": { @@ -977,7 +989,7 @@ "refs": { "CatalogItem$CatalogItemId": "

The ID of the catalog item.

", "GetCatalogItemInput$CatalogItemId": "

The ID of the catalog item.

", - "LineItem$CatalogItemId": "

The ID of the catalog item.

", + "LineItem$CatalogItemId": "

The ID of the catalog item.

", "LineItemRequest$CatalogItemId": "

The ID of the catalog item.

" } }, diff --git a/models/apis/outposts/2019-12-03/endpoint-rule-set-1.json b/models/apis/outposts/2019-12-03/endpoint-rule-set-1.json index fb8b6059a98..bca1f55e1e5 100644 --- a/models/apis/outposts/2019-12-03/endpoint-rule-set-1.json +++ b/models/apis/outposts/2019-12-03/endpoint-rule-set-1.json @@ -58,52 +58,56 @@ "type": "error" }, { - "conditions": [], - "type": "tree", - "rules": [ + "conditions": [ { - "conditions": [ + "fn": "booleanEquals", + "argv": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" + "ref": "UseDualStack" }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" + true + ] } - ] + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, { - "conditions": [], + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], "type": "tree", "rules": [ { "conditions": [ { - "fn": "isSet", + "fn": "aws.partition", "argv": [ { "ref": "Region" } - ] + ], + "assign": "PartitionResult" } ], "type": "tree", @@ -111,13 +115,22 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "booleanEquals", "argv": [ { - "ref": "Region" - } - ], - "assign": "PartitionResult" + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] } ], "type": "tree", @@ -127,92 +140,83 @@ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } ] }, { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - }, - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://outposts-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" + "endpoint": { + "url": "https://outposts-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } ] } ], @@ -221,155 +225,115 @@ { "conditions": [ { - "fn": "booleanEquals", + "fn": "stringEquals", "argv": [ - true, + "aws-us-gov", { "fn": "getAttr", "argv": [ { "ref": "PartitionResult" }, - "supportsFIPS" + "name" ] } ] } ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "stringEquals", - "argv": [ - "aws-us-gov", - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "name" - ] - } - ] - } - ], - "endpoint": { - "url": "https://outposts.{Region}.amazonaws.com", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - }, - { - "conditions": [], - "endpoint": { - "url": "https://outposts-fips.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - } - ] + "endpoint": { + "url": "https://outposts.{Region}.amazonaws.com", + "properties": {}, + "headers": {} + }, + "type": "endpoint" }, { "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" + "endpoint": { + "url": "https://outposts-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://outposts.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" } - ] - }, - { - "conditions": [], + ], "type": "tree", "rules": [ { "conditions": [], "endpoint": { - "url": "https://outposts.{Region}.{PartitionResult#dnsSuffix}", + "url": "https://outposts.{Region}.{PartitionResult#dualStackDnsSuffix}", "properties": {}, "headers": {} }, "type": "endpoint" } ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" } ] + }, + { + "conditions": [], + "endpoint": { + "url": "https://outposts.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, - { - "conditions": [], - "error": "Invalid Configuration: Missing Region", - "type": "error" } ] + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } \ No newline at end of file diff --git a/models/apis/sagemaker/2017-07-24/api-2.json b/models/apis/sagemaker/2017-07-24/api-2.json index 603d20def65..32b63a2338d 100644 --- a/models/apis/sagemaker/2017-07-24/api-2.json +++ b/models/apis/sagemaker/2017-07-24/api-2.json @@ -4680,7 +4680,8 @@ "ProbabilityAttribute":{"shape":"String"}, "ProbabilityThresholdAttribute":{"shape":"ProbabilityThresholdAttribute"}, "StartTimeOffset":{"shape":"MonitoringTimeOffsetString"}, - "EndTimeOffset":{"shape":"MonitoringTimeOffsetString"} + "EndTimeOffset":{"shape":"MonitoringTimeOffsetString"}, + "ExcludeFeaturesAttribute":{"shape":"ExcludeFeaturesAttribute"} } }, "BestObjectiveNotImproving":{ @@ -10221,7 +10222,8 @@ "ProbabilityAttribute":{"shape":"String"}, "ProbabilityThresholdAttribute":{"shape":"ProbabilityThresholdAttribute"}, "StartTimeOffset":{"shape":"MonitoringTimeOffsetString"}, - "EndTimeOffset":{"shape":"MonitoringTimeOffsetString"} + "EndTimeOffset":{"shape":"MonitoringTimeOffsetString"}, + "ExcludeFeaturesAttribute":{"shape":"ExcludeFeaturesAttribute"} } }, "EndpointInputConfiguration":{ @@ -10389,6 +10391,10 @@ "max":1024, "pattern":"[\\S\\s]*" }, + "ExcludeFeaturesAttribute":{ + "type":"string", + "max":100 + }, "ExecutionRoleIdentityConfig":{ "type":"string", "enum":[ @@ -18591,7 +18597,9 @@ "type":"structure", "required":["ScheduleExpression"], "members":{ - "ScheduleExpression":{"shape":"ScheduleExpression"} + "ScheduleExpression":{"shape":"ScheduleExpression"}, + "DataAnalysisStartTime":{"shape":"String"}, + "DataAnalysisEndTime":{"shape":"String"} } }, "ScheduleExpression":{ diff --git a/models/apis/sagemaker/2017-07-24/docs-2.json b/models/apis/sagemaker/2017-07-24/docs-2.json index 8b61bec1d21..bc4df109a80 100644 --- a/models/apis/sagemaker/2017-07-24/docs-2.json +++ b/models/apis/sagemaker/2017-07-24/docs-2.json @@ -11,8 +11,8 @@ "CreateApp": "

Creates a running app for the specified UserProfile. This operation is automatically invoked by Amazon SageMaker Studio upon access to the associated Domain, and when new kernel configurations are selected by the user. A user may have multiple Apps active simultaneously.

", "CreateAppImageConfig": "

Creates a configuration for running a SageMaker image as a KernelGateway app. The configuration specifies the Amazon Elastic File System (EFS) storage volume on the image, and a list of the kernels in the image.

", "CreateArtifact": "

Creates an artifact. An artifact is a lineage tracking entity that represents a URI addressable object or data. Some examples are the S3 URI of a dataset and the ECR registry path of an image. For more information, see Amazon SageMaker ML Lineage Tracking.

", - "CreateAutoMLJob": "

Creates an Autopilot job also referred to as Autopilot experiment or AutoML job.

We recommend using the new versions CreateAutoMLJobV2 and DescribeAutoMLJobV2, which offer backward compatibility.

CreateAutoMLJobV2 can manage tabular problem types identical to those of its previous version CreateAutoMLJob, as well as time-series forecasting, and non-tabular problem types such as image or text classification.

Find guidelines about how to migrate a CreateAutoMLJob to CreateAutoMLJobV2 in Migrate a CreateAutoMLJob to CreateAutoMLJobV2.

You can find the best-performing model after you run an AutoML job by calling DescribeAutoMLJobV2 (recommended) or DescribeAutoMLJob.

", - "CreateAutoMLJobV2": "

Creates an Autopilot job also referred to as Autopilot experiment or AutoML job V2.

CreateAutoMLJobV2 and DescribeAutoMLJobV2 are new versions of CreateAutoMLJob and DescribeAutoMLJob which offer backward compatibility.

CreateAutoMLJobV2 can manage tabular problem types identical to those of its previous version CreateAutoMLJob, as well as time-series forecasting, and non-tabular problem types such as image or text classification.

Find guidelines about how to migrate a CreateAutoMLJob to CreateAutoMLJobV2 in Migrate a CreateAutoMLJob to CreateAutoMLJobV2.

For the list of available problem types supported by CreateAutoMLJobV2, see AutoMLProblemTypeConfig.

You can find the best-performing model after you run an AutoML job V2 by calling DescribeAutoMLJobV2.

", + "CreateAutoMLJob": "

Creates an Autopilot job also referred to as Autopilot experiment or AutoML job.

We recommend using the new versions CreateAutoMLJobV2 and DescribeAutoMLJobV2, which offer backward compatibility.

CreateAutoMLJobV2 can manage tabular problem types identical to those of its previous version CreateAutoMLJob, as well as non-tabular problem types such as image or text classification.

Find guidelines about how to migrate a CreateAutoMLJob to CreateAutoMLJobV2 in Migrate a CreateAutoMLJob to CreateAutoMLJobV2.

You can find the best-performing model after you run an AutoML job by calling DescribeAutoMLJobV2 (recommended) or DescribeAutoMLJob.

", + "CreateAutoMLJobV2": "

Creates an Autopilot job also referred to as Autopilot experiment or AutoML job V2.

CreateAutoMLJobV2 and DescribeAutoMLJobV2 are new versions of CreateAutoMLJob and DescribeAutoMLJob which offer backward compatibility.

CreateAutoMLJobV2 can manage tabular problem types identical to those of its previous version CreateAutoMLJob, as well as non-tabular problem types such as image or text classification.

Find guidelines about how to migrate a CreateAutoMLJob to CreateAutoMLJobV2 in Migrate a CreateAutoMLJob to CreateAutoMLJobV2.

For the list of available problem types supported by CreateAutoMLJobV2, see AutoMLProblemTypeConfig.

You can find the best-performing model after you run an AutoML job V2 by calling DescribeAutoMLJobV2.

", "CreateCodeRepository": "

Creates a Git repository as a resource in your SageMaker account. You can associate the repository with notebook instances so that you can use Git source control for the notebooks you create. The Git repository is a resource in your SageMaker account, so it can be associated with more than one notebook instance, and it persists independently from the lifecycle of any notebook instances it is associated with.

The repository can be hosted either in Amazon Web Services CodeCommit or in any other Git repository.

", "CreateCompilationJob": "

Starts a model compilation job. After the model has been compiled, Amazon SageMaker saves the resulting model artifacts to an Amazon Simple Storage Service (Amazon S3) bucket that you specify.

If you choose to host your model using Amazon SageMaker hosting services, you can use the resulting model artifacts as part of the model. You can also use the artifacts with Amazon Web Services IoT Greengrass. In that case, deploy them as an ML resource.

In the request body, you provide the following:

You can also provide a Tag to track the model compilation job's resource use and costs. The response body contains the CompilationJobArn for the compiled job.

To stop a model compilation job, use StopCompilationJob. To get information about a particular model compilation job, use DescribeCompilationJob. To get information about multiple model compilation jobs, use ListCompilationJobs.

", "CreateContext": "

Creates a context. A context is a lineage tracking entity that represents a logical grouping of other tracking or experiment entities. Some examples are an endpoint and a model package. For more information, see Amazon SageMaker ML Lineage Tracking.

", @@ -1423,7 +1423,7 @@ "Model$EnableNetworkIsolation": "

Isolates the model container. No inbound or outbound network calls can be made to or from the model container.

", "ModelDashboardIndicatorAction$Enabled": "

Indicates whether the alert action is turned on.

", "MonitoringCsvDatasetFormat$Header": "

Indicates if the CSV data has a header.

", - "MonitoringJsonDatasetFormat$Line": "

Indicates if the file should be read as a JSON object per line.

", + "MonitoringJsonDatasetFormat$Line": "

Indicates if the file should be read as a json object per line.

", "MonitoringNetworkConfig$EnableInterContainerTrafficEncryption": "

Whether to encrypt all communications between the instances used for the monitoring jobs. Choose True to encrypt communications. Encryption provides greater security for distributed jobs, but the processing might take longer.

", "MonitoringNetworkConfig$EnableNetworkIsolation": "

Whether to allow inbound and outbound network calls to and from the containers used for the monitoring job.

", "NetworkConfig$EnableInterContainerTrafficEncryption": "

Whether to encrypt all communications between distributed processing jobs. Choose True to encrypt communications. Encryption provides greater security for distributed processing jobs, but the processing might take longer.

", @@ -1575,7 +1575,7 @@ } }, "CaptureContentTypeHeader": { - "base": "

Configuration specifying how to treat different headers. If no headers are specified Amazon SageMaker will by default base64 encode when capturing the data.

", + "base": "

Configuration specifying how to treat different headers. If no headers are specified SageMaker will by default base64 encode when capturing the data.

", "refs": { "DataCaptureConfig$CaptureContentTypeHeader": "

Configuration specifying how to treat different headers. If no headers are specified SageMaker will by default base64 encode when capturing the data.

", "InferenceExperimentDataStorageConfig$ContentType": null @@ -2871,7 +2871,7 @@ "CsvContentTypes": { "base": null, "refs": { - "CaptureContentTypeHeader$CsvContentTypes": "

The list of all content type headers that Amazon SageMaker will treat as CSV and capture accordingly.

" + "CaptureContentTypeHeader$CsvContentTypes": "

The list of all content type headers that SageMaker will treat as CSV and capture accordingly.

" } }, "CustomImage": { @@ -5013,6 +5013,13 @@ "EnvironmentMap$value": null } }, + "ExcludeFeaturesAttribute": { + "base": null, + "refs": { + "BatchTransformInput$ExcludeFeaturesAttribute": "

The attributes of the input data to exclude from the analysis.

", + "EndpointInput$ExcludeFeaturesAttribute": "

The attributes of the input data to exclude from the analysis.

" + } + }, "ExecutionRoleIdentityConfig": { "base": null, "refs": { @@ -5828,7 +5835,7 @@ "HolidayConfig": { "base": null, "refs": { - "TimeSeriesForecastingJobConfig$HolidayConfig": "

The collection of holiday featurization attributes used to incorporate national holiday information into your forecasting model.

" + "TimeSeriesForecastingJobConfig$HolidayConfig": "

The collection of holidays featurization attributes used to incorporate national holiday information into your forecasting model.

" } }, "HolidayConfigAttributes": { @@ -7155,7 +7162,7 @@ "CreateEndpointConfigInput$KmsKeyId": "

The Amazon Resource Name (ARN) of a Amazon Web Services Key Management Service key that SageMaker uses to encrypt data on the storage volume attached to the ML compute instance that hosts the endpoint.

The KmsKeyId can be any of the following formats:

The KMS key policy must grant permission to the IAM role that you specify in your CreateEndpoint, UpdateEndpoint requests. For more information, refer to the Amazon Web Services Key Management Service section Using Key Policies in Amazon Web Services KMS

Certain Nitro-based instances include local storage, dependent on the instance type. Local storage volumes are encrypted using a hardware module on the instance. You can't request a KmsKeyId when using an instance type with local storage. If any of the models that you specify in the ProductionVariants parameter use nitro-based instances with local storage, do not specify a value for the KmsKeyId parameter. If you specify a value for KmsKeyId when using any nitro-based instances with local storage, the call to CreateEndpointConfig fails.

For a list of instance types that support local instance storage, see Instance Store Volumes.

For more information about local instance storage encryption, see SSD Instance Store Volumes.

", "CreateInferenceExperimentRequest$KmsKey": "

The Amazon Web Services Key Management Service (Amazon Web Services KMS) key that Amazon SageMaker uses to encrypt data on the storage volume attached to the ML compute instance that hosts the endpoint. The KmsKey can be any of the following formats:

If you use a KMS key ID or an alias of your KMS key, the Amazon SageMaker execution role must include permissions to call kms:Encrypt. If you don't provide a KMS key ID, Amazon SageMaker uses the default KMS key for Amazon S3 for your role's account. Amazon SageMaker uses server-side encryption with KMS managed keys for OutputDataConfig. If you use a bucket policy with an s3:PutObject permission that only allows objects with server-side encryption, set the condition key of s3:x-amz-server-side-encryption to \"aws:kms\". For more information, see KMS managed Encryption Keys in the Amazon Simple Storage Service Developer Guide.

The KMS key policy must grant permission to the IAM role that you specify in your CreateEndpoint and UpdateEndpoint requests. For more information, see Using Key Policies in Amazon Web Services KMS in the Amazon Web Services Key Management Service Developer Guide.

", "CreateNotebookInstanceInput$KmsKeyId": "

The Amazon Resource Name (ARN) of a Amazon Web Services Key Management Service key that SageMaker uses to encrypt data on the storage volume attached to your notebook instance. The KMS key you provide must be enabled. For information, see Enabling and Disabling Keys in the Amazon Web Services Key Management Service Developer Guide.

", - "DataCaptureConfig$KmsKeyId": "

The Amazon Resource Name (ARN) of an Key Management Service key that SageMaker uses to encrypt the captured data at rest using Amazon S3 server-side encryption.

The KmsKeyId can be any of the following formats:

", + "DataCaptureConfig$KmsKeyId": "

The Amazon Resource Name (ARN) of a Amazon Web Services Key Management Service key that SageMaker uses to encrypt the captured data at rest using Amazon S3 server-side encryption.

The KmsKeyId can be any of the following formats:

", "DataCaptureConfigSummary$KmsKeyId": "

The KMS key being used to encrypt the data in Amazon S3.

", "DescribeDomainResponse$HomeEfsFileSystemKmsKeyId": "

Use KmsKeyId.

", "DescribeDomainResponse$KmsKeyId": "

The Amazon Web Services KMS customer managed key used to encrypt the EFS volume attached to the domain.

", @@ -7170,8 +7177,8 @@ "LabelingJobOutputConfig$KmsKeyId": "

The Amazon Web Services Key Management Service ID of the key used to encrypt the output data, if any.

If you provide your own KMS key ID, you must add the required permissions to your KMS key described in Encrypt Output Data and Storage Volume with Amazon Web Services KMS.

If you don't provide a KMS key ID, Amazon SageMaker uses the default Amazon Web Services KMS key for Amazon S3 for your role's account to encrypt your output data.

If you use a bucket policy with an s3:PutObject permission that only allows objects with server-side encryption, set the condition key of s3:x-amz-server-side-encryption to \"aws:kms\". For more information, see KMS-Managed Encryption Keys in the Amazon Simple Storage Service Developer Guide.

", "LabelingJobResourceConfig$VolumeKmsKeyId": "

The Amazon Web Services Key Management Service (Amazon Web Services KMS) key that Amazon SageMaker uses to encrypt data on the storage volume attached to the ML compute instance(s) that run the training and inference jobs used for automated data labeling.

You can only specify a VolumeKmsKeyId when you create a labeling job with automated data labeling enabled using the API operation CreateLabelingJob. You cannot specify an Amazon Web Services KMS key to encrypt the storage volume used for automated data labeling model training and inference when you create a labeling job using the console. To learn more, see Output Data and Storage Volume Encryption.

The VolumeKmsKeyId can be any of the following formats:

", "ModelCardSecurityConfig$KmsKeyId": "

A Key Management Service key ID to use for encrypting a model card.

", - "MonitoringClusterConfig$VolumeKmsKeyId": "

The Key Management Service (KMS) key that Amazon SageMaker uses to encrypt data on the storage volume attached to the ML compute instance(s) that run the model monitoring job.

", - "MonitoringOutputConfig$KmsKeyId": "

The Key Management Service (KMS) key that Amazon SageMaker uses to encrypt the model artifacts at rest using Amazon S3 server-side encryption.

", + "MonitoringClusterConfig$VolumeKmsKeyId": "

The Amazon Web Services Key Management Service (Amazon Web Services KMS) key that Amazon SageMaker uses to encrypt data on the storage volume attached to the ML compute instance(s) that run the model monitoring job.

", + "MonitoringOutputConfig$KmsKeyId": "

The Amazon Web Services Key Management Service (Amazon Web Services KMS) key that Amazon SageMaker uses to encrypt the model artifacts at rest using Amazon S3 server-side encryption.

", "OnlineStoreSecurityConfig$KmsKeyId": "

The Amazon Web Services Key Management Service (KMS) key ARN that SageMaker Feature Store uses to encrypt the Amazon S3 objects at rest using Amazon S3 server-side encryption.

The caller (either user or IAM role) of CreateFeatureGroup must have below permissions to the OnlineStore KmsKeyId:

The caller (either user or IAM role) to all DataPlane operations (PutRecord, GetRecord, DeleteRecord) must have the following permissions to the KmsKeyId:

", "OutputConfig$KmsKeyId": "

The Amazon Web Services Key Management Service key (Amazon Web Services KMS) that Amazon SageMaker uses to encrypt your output models with Amazon S3 server-side encryption after compilation job. If you don't provide a KMS key ID, Amazon SageMaker uses the default KMS key for Amazon S3 for your role's account. For more information, see KMS-Managed Encryption Keys in the Amazon Simple Storage Service Developer Guide.

The KmsKeyId can be any of the following formats:

", "OutputDataConfig$KmsKeyId": "

The Amazon Web Services Key Management Service (Amazon Web Services KMS) key that SageMaker uses to encrypt the model artifacts at rest using Amazon S3 server-side encryption. The KmsKeyId can be any of the following formats:

If you use a KMS key ID or an alias of your KMS key, the SageMaker execution role must include permissions to call kms:Encrypt. If you don't provide a KMS key ID, SageMaker uses the default KMS key for Amazon S3 for your role's account. SageMaker uses server-side encryption with KMS-managed keys for OutputDataConfig. If you use a bucket policy with an s3:PutObject permission that only allows objects with server-side encryption, set the condition key of s3:x-amz-server-side-encryption to \"aws:kms\". For more information, see KMS-Managed Encryption Keys in the Amazon Simple Storage Service Developer Guide.

The KMS key policy must grant permission to the IAM role that you specify in your CreateTrainingJob, CreateTransformJob, or CreateHyperParameterTuningJob requests. For more information, see Using Key Policies in Amazon Web Services KMS in the Amazon Web Services Key Management Service Developer Guide.

", @@ -9272,7 +9279,7 @@ } }, "ModelQualityJobInput": { - "base": "

The input for the model quality monitoring job. Currently endpoints are supported for input for model quality monitoring jobs.

", + "base": "

The input for the model quality monitoring job. Currently endponts are supported for input for model quality monitoring jobs.

", "refs": { "CreateModelQualityJobDefinitionRequest$ModelQualityJobInput": "

A list of the inputs that are monitored. Currently endpoints are supported.

", "DescribeModelQualityJobDefinitionResponse$ModelQualityJobInput": "

Inputs for the model quality job.

" @@ -9498,7 +9505,7 @@ "MonitoringExecutionSortKey": { "base": null, "refs": { - "ListMonitoringExecutionsRequest$SortBy": "

Whether to sort the results by the Status, CreationTime, or ScheduledTime field. The default is CreationTime.

" + "ListMonitoringExecutionsRequest$SortBy": "

Whether to sort results by Status, CreationTime, ScheduledTime field. The default is CreationTime.

" } }, "MonitoringExecutionSummary": { @@ -9648,7 +9655,7 @@ "DescribeModelBiasJobDefinitionResponse$ModelBiasJobOutputConfig": null, "DescribeModelExplainabilityJobDefinitionResponse$ModelExplainabilityJobOutputConfig": null, "DescribeModelQualityJobDefinitionResponse$ModelQualityJobOutputConfig": null, - "MonitoringJobDefinition$MonitoringOutputConfig": "

The array of outputs from the monitoring job to be uploaded to Amazon S3.

" + "MonitoringJobDefinition$MonitoringOutputConfig": "

The array of outputs from the monitoring job to be uploaded to Amazon Simple Storage Service (Amazon S3).

" } }, "MonitoringOutputs": { @@ -9756,7 +9763,7 @@ "MonitoringScheduleSortKey": { "base": null, "refs": { - "ListMonitoringSchedulesRequest$SortBy": "

Whether to sort the results by the Status, CreationTime, or ScheduledTime field. The default is CreationTime.

" + "ListMonitoringSchedulesRequest$SortBy": "

Whether to sort results by Status, CreationTime, ScheduledTime field. The default is CreationTime.

" } }, "MonitoringScheduleSummary": { @@ -9989,7 +9996,7 @@ "ListLineageGroupsRequest$NextToken": "

If the response is truncated, SageMaker returns this token. To retrieve the next set of algorithms, use it in the subsequent request.

", "ListLineageGroupsResponse$NextToken": "

If the response is truncated, SageMaker returns this token. To retrieve the next set of algorithms, use it in the subsequent request.

", "ListModelBiasJobDefinitionsRequest$NextToken": "

The token returned if the response is truncated. To retrieve the next set of job executions, use it in the next request.

", - "ListModelBiasJobDefinitionsResponse$NextToken": "

The token returned if the response is truncated. To retrieve the next set of job executions, use it in the next request.

", + "ListModelBiasJobDefinitionsResponse$NextToken": "

If the response is truncated, Amazon SageMaker returns this token. To retrieve the next set of jobs, use it in the subsequent request.

", "ListModelCardExportJobsRequest$NextToken": "

If the response to a previous ListModelCardExportJobs request was truncated, the response includes a NextToken. To retrieve the next set of model card export jobs, use the token in the next request.

", "ListModelCardExportJobsResponse$NextToken": "

If the response is truncated, SageMaker returns this token. To retrieve the next set of model card export jobs, use it in the subsequent request.

", "ListModelCardVersionsRequest$NextToken": "

If the response to a previous ListModelCardVersions request was truncated, the response includes a NextToken. To retrieve the next set of model card versions, use the token in the next request.

", @@ -9997,7 +10004,7 @@ "ListModelCardsRequest$NextToken": "

If the response to a previous ListModelCards request was truncated, the response includes a NextToken. To retrieve the next set of model cards, use the token in the next request.

", "ListModelCardsResponse$NextToken": "

If the response is truncated, SageMaker returns this token. To retrieve the next set of model cards, use it in the subsequent request.

", "ListModelExplainabilityJobDefinitionsRequest$NextToken": "

The token returned if the response is truncated. To retrieve the next set of job executions, use it in the next request.

", - "ListModelExplainabilityJobDefinitionsResponse$NextToken": "

The token returned if the response is truncated. To retrieve the next set of job executions, use it in the next request.

", + "ListModelExplainabilityJobDefinitionsResponse$NextToken": "

If the response is truncated, Amazon SageMaker returns this token. To retrieve the next set of jobs, use it in the subsequent request.

", "ListModelMetadataRequest$NextToken": "

If the response to a previous ListModelMetadataResponse request was truncated, the response includes a NextToken. To retrieve the next set of model metadata, use the token in the next request.

", "ListModelMetadataResponse$NextToken": "

A token for getting the next set of recommendations, if there are any.

", "ListModelPackageGroupsInput$NextToken": "

If the result of the previous ListModelPackageGroups request was truncated, the response includes a NextToken. To retrieve the next set of model groups, use the token in the next request.

", @@ -10011,9 +10018,9 @@ "ListMonitoringAlertsRequest$NextToken": "

If the result of the previous ListMonitoringAlerts request was truncated, the response includes a NextToken. To retrieve the next set of alerts in the history, use the token in the next request.

", "ListMonitoringAlertsResponse$NextToken": "

If the response is truncated, SageMaker returns this token. To retrieve the next set of alerts, use it in the subsequent request.

", "ListMonitoringExecutionsRequest$NextToken": "

The token returned if the response is truncated. To retrieve the next set of job executions, use it in the next request.

", - "ListMonitoringExecutionsResponse$NextToken": "

The token returned if the response is truncated. To retrieve the next set of job executions, use it in the next request.

", + "ListMonitoringExecutionsResponse$NextToken": "

If the response is truncated, Amazon SageMaker returns this token. To retrieve the next set of jobs, use it in the subsequent reques

", "ListMonitoringSchedulesRequest$NextToken": "

The token returned if the response is truncated. To retrieve the next set of job executions, use it in the next request.

", - "ListMonitoringSchedulesResponse$NextToken": "

The token returned if the response is truncated. To retrieve the next set of job executions, use it in the next request.

", + "ListMonitoringSchedulesResponse$NextToken": "

If the response is truncated, Amazon SageMaker returns this token. To retrieve the next set of jobs, use it in the subsequent request.

", "ListNotebookInstanceLifecycleConfigsInput$NextToken": "

If the result of a ListNotebookInstanceLifecycleConfigs request was truncated, the response includes a NextToken. To get the next set of lifecycle configurations, use the token in the next request.

", "ListNotebookInstanceLifecycleConfigsOutput$NextToken": "

If the response is truncated, SageMaker returns this token. To get the next set of lifecycle configurations, use it in the next request.

", "ListNotebookInstancesInput$NextToken": "

If the previous call to the ListNotebookInstances is truncated, the response includes a NextToken. You can use this token in your subsequent ListNotebookInstances request to fetch the next set of notebook instances.

You might specify a filter or a sort order in your request. When response is truncated, you must use the same values for the filer and sort order in the next request.

", @@ -11023,7 +11030,7 @@ "base": null, "refs": { "BatchTransformInput$S3DataDistributionType": "

Whether input data distributed in Amazon S3 is fully replicated or sharded by an S3 key. Defaults to FullyReplicated

", - "EndpointInput$S3DataDistributionType": "

Whether input data distributed in Amazon S3 is fully replicated or sharded by an Amazon S3 key. Defaults to FullyReplicated

", + "EndpointInput$S3DataDistributionType": "

Whether input data distributed in Amazon S3 is fully replicated or sharded by an S3 key. Defaults to FullyReplicated

", "ProcessingS3Input$S3DataDistributionType": "

Whether to distribute the data from Amazon S3 to all processing instances with FullyReplicated, or whether the data from Amazon S3 is shared by Amazon S3 key, downloading one shard of data to each processing instance.

" } }, @@ -12078,8 +12085,8 @@ "DescribeInferenceExperimentResponse$RoleArn": "

The ARN of the IAM role that Amazon SageMaker can assume to access model artifacts and container images, and manage Amazon SageMaker Inference endpoints for model deployment.

", "DescribeInferenceRecommendationsJobResponse$RoleArn": "

The Amazon Resource Name (ARN) of the Amazon Web Services Identity and Access Management (IAM) role you provided when you initiated the job.

", "DescribeLabelingJobResponse$RoleArn": "

The Amazon Resource Name (ARN) that SageMaker assumes to perform tasks on your behalf during data labeling.

", - "DescribeModelBiasJobDefinitionResponse$RoleArn": "

The Amazon Resource Name (ARN) of the IAM role that has read permission to the input data location and write permission to the output data location in Amazon S3.

", - "DescribeModelExplainabilityJobDefinitionResponse$RoleArn": "

The Amazon Resource Name (ARN) of the IAM role that has read permission to the input data location and write permission to the output data location in Amazon S3.

", + "DescribeModelBiasJobDefinitionResponse$RoleArn": "

The Amazon Resource Name (ARN) of the Amazon Web Services Identity and Access Management (IAM) role that has read permission to the input data location and write permission to the output data location in Amazon S3.

", + "DescribeModelExplainabilityJobDefinitionResponse$RoleArn": "

The Amazon Resource Name (ARN) of the Amazon Web Services Identity and Access Management (IAM) role that has read permission to the input data location and write permission to the output data location in Amazon S3.

", "DescribeModelOutput$ExecutionRoleArn": "

The Amazon Resource Name (ARN) of the IAM role that you specified for the model.

", "DescribeModelQualityJobDefinitionResponse$RoleArn": "

The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker can assume to perform tasks on your behalf.

", "DescribeNotebookInstanceOutput$RoleArn": "

The Amazon Resource Name (ARN) of the IAM role associated with the instance.

", @@ -12207,7 +12214,7 @@ "CheckpointConfig$S3Uri": "

Identifies the S3 path where you want SageMaker to store checkpoints. For example, s3://bucket-name/key-name-prefix.

", "CreateLabelingJobRequest$LabelCategoryConfigS3Uri": "

The S3 URI of the file, referred to as a label category configuration file, that defines the categories used to label the data objects.

For 3D point cloud and video frame task types, you can add label category attributes and frame attributes to your label category configuration file. To learn how, see Create a Labeling Category Configuration File for 3D Point Cloud Labeling Jobs.

For named entity recognition jobs, in addition to \"labels\", you must provide worker instructions in the label category configuration file using the \"instructions\" parameter: \"instructions\": {\"shortInstruction\":\"<h1>Add header</h1><p>Add Instructions</p>\", \"fullInstruction\":\"<p>Add additional instructions.</p>\"}. For details and an example, see Create a Named Entity Recognition Labeling Job (API) .

For all other built-in task types and custom tasks, your label category configuration file must be a JSON file in the following format. Identify the labels you want to use by replacing label_1, label_2,...,label_n with your label categories.

{

\"document-version\": \"2018-11-28\",

\"labels\": [{\"label\": \"label_1\"},{\"label\": \"label_2\"},...{\"label\": \"label_n\"}]

}

Note the following about the label category configuration file:

", "CreateModelPackageInput$SamplePayloadUrl": "

The Amazon Simple Storage Service (Amazon S3) path where the sample payload is stored. This path must point to a single gzip compressed tar archive (.tar.gz suffix). This archive can hold multiple files that are all equally used in the load test. Each file in the archive must satisfy the size constraints of the InvokeEndpoint call.

", - "DataQualityAppSpecification$RecordPreprocessorSourceUri": "

An Amazon S3 URI to a script that is called per row prior to running analysis. It can base64 decode the payload and convert it into a flattened JSON so that the built-in container can use the converted data. Applicable only for the built-in (first party) containers.

", + "DataQualityAppSpecification$RecordPreprocessorSourceUri": "

An Amazon S3 URI to a script that is called per row prior to running analysis. It can base64 decode the payload and convert it into a flatted json so that the built-in container can use the converted data. Applicable only for the built-in (first party) containers.

", "DataQualityAppSpecification$PostAnalyticsProcessorSourceUri": "

An Amazon S3 URI to a script that is called after analysis has been performed. Applicable only for the built-in (first party) containers.

", "DebugHookConfig$S3OutputPath": "

Path to Amazon S3 storage location for metrics and tensors.

", "DebugRuleConfiguration$S3OutputPath": "

Path to Amazon S3 storage location for rules.

", @@ -12226,10 +12233,10 @@ "ModelBiasAppSpecification$ConfigUri": "

JSON formatted S3 file that defines bias parameters. For more information on this JSON configuration file, see Configure bias parameters.

", "ModelCardExportArtifacts$S3ExportArtifacts": "

The Amazon S3 URI of the exported model artifacts.

", "ModelCardExportOutputConfig$S3OutputPath": "

The Amazon S3 output path to export your model card PDF.

", - "ModelExplainabilityAppSpecification$ConfigUri": "

JSON formatted Amazon S3 file that defines explainability parameters. For more information on this JSON configuration file, see Configure model explainability parameters.

", - "ModelQualityAppSpecification$RecordPreprocessorSourceUri": "

An Amazon S3 URI to a script that is called per row prior to running analysis. It can base64 decode the payload and convert it into a flattened JSON so that the built-in container can use the converted data. Applicable only for the built-in (first party) containers.

", + "ModelExplainabilityAppSpecification$ConfigUri": "

JSON formatted S3 file that defines explainability parameters. For more information on this JSON configuration file, see Configure model explainability parameters.

", + "ModelQualityAppSpecification$RecordPreprocessorSourceUri": "

An Amazon S3 URI to a script that is called per row prior to running analysis. It can base64 decode the payload and convert it into a flatted json so that the built-in container can use the converted data. Applicable only for the built-in (first party) containers.

", "ModelQualityAppSpecification$PostAnalyticsProcessorSourceUri": "

An Amazon S3 URI to a script that is called after analysis has been performed. Applicable only for the built-in (first party) containers.

", - "MonitoringAppSpecification$RecordPreprocessorSourceUri": "

An Amazon S3 URI to a script that is called per row prior to running analysis. It can base64 decode the payload and convert it into a flattened JSON so that the built-in container can use the converted data. Applicable only for the built-in (first party) containers.

", + "MonitoringAppSpecification$RecordPreprocessorSourceUri": "

An Amazon S3 URI to a script that is called per row prior to running analysis. It can base64 decode the payload and convert it into a flatted json so that the built-in container can use the converted data. Applicable only for the built-in (first party) containers.

", "MonitoringAppSpecification$PostAnalyticsProcessorSourceUri": "

An Amazon S3 URI to a script that is called after analysis has been performed. Applicable only for the built-in (first party) containers.

", "MonitoringConstraintsResource$S3Uri": "

The Amazon S3 URI for the constraints resource.

", "MonitoringStatisticsResource$S3Uri": "

The Amazon S3 URI for the statistics resource.

", @@ -12328,7 +12335,7 @@ "ScheduleExpression": { "base": null, "refs": { - "ScheduleConfig$ScheduleExpression": "

A cron expression that describes details about the monitoring schedule.

Currently the only supported cron expressions are:

For example, the following are valid cron expressions:

To support running every 6, 12 hours, the following are also supported:

cron(0 [00-23]/[01-24] ? * * *)

For example, the following are valid cron expressions:

" + "ScheduleConfig$ScheduleExpression": "

A cron expression that describes details about the monitoring schedule.

The supported cron expressions are:

For example, the following are valid cron expressions:

To support running every 6, 12 hours, the following are also supported:

cron(0 [00-23]/[01-24] ? * * *)

For example, the following are valid cron expressions:

You can also specify the keyword NOW to run the monitoring job immediately, one time, without recurring.

" } }, "ScheduleStatus": { @@ -12657,7 +12664,7 @@ "ListAssociationsRequest$SortOrder": "

The sort order. The default value is Descending.

", "ListCompilationJobsRequest$SortOrder": "

The sort order for results. The default is Ascending.

", "ListContextsRequest$SortOrder": "

The sort order. The default value is Descending.

", - "ListDataQualityJobDefinitionsRequest$SortOrder": "

Whether to sort the results in Ascending or Descending order. The default is Descending.

", + "ListDataQualityJobDefinitionsRequest$SortOrder": "

The sort order for results. The default is Descending.

", "ListDeviceFleetsRequest$SortOrder": "

What direction to sort in.

", "ListEdgeDeploymentPlansRequest$SortOrder": "

The direction of the sorting (ascending or descending).

", "ListEdgePackagingJobsRequest$SortOrder": "

What direction to sort by.

", @@ -12677,7 +12684,7 @@ "ListModelExplainabilityJobDefinitionsRequest$SortOrder": "

Whether to sort the results in Ascending or Descending order. The default is Descending.

", "ListModelPackageGroupsInput$SortOrder": "

The sort order for results. The default is Ascending.

", "ListModelPackagesInput$SortOrder": "

The sort order for the results. The default is Ascending.

", - "ListModelQualityJobDefinitionsRequest$SortOrder": "

Whether to sort the results in Ascending or Descending order. The default is Descending.

", + "ListModelQualityJobDefinitionsRequest$SortOrder": "

The sort order for results. The default is Descending.

", "ListMonitoringAlertHistoryRequest$SortOrder": "

The sort order, whether Ascending or Descending, of the alert history. The default is Descending.

", "ListMonitoringExecutionsRequest$SortOrder": "

Whether to sort the results in Ascending or Descending order. The default is Descending.

", "ListMonitoringSchedulesRequest$SortOrder": "

Whether to sort the results in Ascending or Descending order. The default is Descending.

", @@ -13092,6 +13099,8 @@ "RenderUiTemplateResponse$RenderedContent": "

A Liquid template that renders the HTML for the worker UI.

", "RenderingError$Code": "

A unique identifier for a specific class of errors.

", "RenderingError$Message": "

A human-readable message describing the error.

", + "ScheduleConfig$DataAnalysisStartTime": "

Sets the start time for a monitoring job window. Express this time as an offset to the times that you schedule your monitoring jobs to run. You schedule monitoring jobs with the ScheduleExpression parameter. Specify this offset in ISO 8601 duration format. For example, if you want to monitor the five hours of data in your dataset that precede the start of each monitoring job, you would specify: \"-PT5H\".

The start time that you specify must not precede the end time that you specify by more than 24 hours. You specify the end time with the DataAnalysisEndTime parameter.

If you set ScheduleExpression to NOW, this parameter is required.

", + "ScheduleConfig$DataAnalysisEndTime": "

Sets the end time for a monitoring job window. Express this time as an offset to the times that you schedule your monitoring jobs to run. You schedule monitoring jobs with the ScheduleExpression parameter. Specify this offset in ISO 8601 duration format. For example, if you want to end the window one hour before the start of each monitoring job, you would specify: \"-PT1H\".

The end time that you specify must not follow the start time that you specify by more than 24 hours. You specify the start time with the DataAnalysisStartTime parameter.

If you set ScheduleExpression to NOW, this parameter is required.

", "SubscribedWorkteam$SellerName": "

The name of the vendor in the Amazon Marketplace.

", "SubscribedWorkteam$ListingId": "

Marketplace product listing ID.

", "UserContext$UserProfileArn": "

The Amazon Resource Name (ARN) of the user's profile.

", @@ -13384,7 +13393,7 @@ "CreateCodeRepositoryInput$Tags": "

An array of key-value pairs. You can use tags to categorize your Amazon Web Services resources in different ways, for example, by purpose, owner, or environment. For more information, see Tagging Amazon Web Services Resources.

", "CreateCompilationJobRequest$Tags": "

An array of key-value pairs. You can use tags to categorize your Amazon Web Services resources in different ways, for example, by purpose, owner, or environment. For more information, see Tagging Amazon Web Services Resources.

", "CreateContextRequest$Tags": "

A list of tags to apply to the context.

", - "CreateDataQualityJobDefinitionRequest$Tags": "

(Optional) An array of key-value pairs. For more information, see Using Cost Allocation Tags in the Amazon Web Services Billing and Cost Management User Guide.

", + "CreateDataQualityJobDefinitionRequest$Tags": "

(Optional) An array of key-value pairs. For more information, see Using Cost Allocation Tags in the Amazon Web Services Billing and Cost Management User Guide.

", "CreateDeviceFleetRequest$Tags": "

Creates tags for the specified fleet.

", "CreateDomainRequest$Tags": "

Tags to associated with the Domain. Each tag consists of a key and an optional value. Tag keys must be unique per resource. Tags are searchable using the Search API.

Tags that you specify for the Domain are also added to all Apps that the Domain launches.

", "CreateEdgeDeploymentPlanRequest$Tags": "

List of tags with which to tag the edge deployment plan.

", @@ -13401,13 +13410,13 @@ "CreateInferenceExperimentRequest$Tags": "

Array of key-value pairs. You can use tags to categorize your Amazon Web Services resources in different ways, for example, by purpose, owner, or environment. For more information, see Tagging your Amazon Web Services Resources.

", "CreateInferenceRecommendationsJobRequest$Tags": "

The metadata that you apply to Amazon Web Services resources to help you categorize and organize them. Each tag consists of a key and a value, both of which you define. For more information, see Tagging Amazon Web Services Resources in the Amazon Web Services General Reference.

", "CreateLabelingJobRequest$Tags": "

An array of key/value pairs. For more information, see Using Cost Allocation Tags in the Amazon Web Services Billing and Cost Management User Guide.

", - "CreateModelBiasJobDefinitionRequest$Tags": "

(Optional) An array of key-value pairs. For more information, see Using Cost Allocation Tags in the Amazon Web Services Billing and Cost Management User Guide.

", + "CreateModelBiasJobDefinitionRequest$Tags": "

(Optional) An array of key-value pairs. For more information, see Using Cost Allocation Tags in the Amazon Web Services Billing and Cost Management User Guide.

", "CreateModelCardRequest$Tags": "

Key-value pairs used to manage metadata for model cards.

", - "CreateModelExplainabilityJobDefinitionRequest$Tags": "

(Optional) An array of key-value pairs. For more information, see Using Cost Allocation Tags in the Amazon Web Services Billing and Cost Management User Guide.

", + "CreateModelExplainabilityJobDefinitionRequest$Tags": "

(Optional) An array of key-value pairs. For more information, see Using Cost Allocation Tags in the Amazon Web Services Billing and Cost Management User Guide.

", "CreateModelInput$Tags": "

An array of key-value pairs. You can use tags to categorize your Amazon Web Services resources in different ways, for example, by purpose, owner, or environment. For more information, see Tagging Amazon Web Services Resources.

", "CreateModelPackageGroupInput$Tags": "

A list of key value pairs associated with the model group. For more information, see Tagging Amazon Web Services resources in the Amazon Web Services General Reference Guide.

", "CreateModelPackageInput$Tags": "

A list of key value pairs associated with the model. For more information, see Tagging Amazon Web Services resources in the Amazon Web Services General Reference Guide.

If you supply ModelPackageGroupName, your model package belongs to the model group you specify and uses the tags associated with the model group. In this case, you cannot supply a tag argument.

", - "CreateModelQualityJobDefinitionRequest$Tags": "

(Optional) An array of key-value pairs. For more information, see Using Cost Allocation Tags in the Amazon Web Services Billing and Cost Management User Guide.

", + "CreateModelQualityJobDefinitionRequest$Tags": "

(Optional) An array of key-value pairs. For more information, see Using Cost Allocation Tags in the Amazon Web Services Billing and Cost Management User Guide.

", "CreateMonitoringScheduleRequest$Tags": "

(Optional) An array of key-value pairs. For more information, see Using Cost Allocation Tags in the Amazon Web Services Billing and Cost Management User Guide.

", "CreateNotebookInstanceInput$Tags": "

An array of key-value pairs. You can use tags to categorize your Amazon Web Services resources in different ways, for example, by purpose, owner, or environment. For more information, see Tagging Amazon Web Services Resources.

", "CreatePipelineRequest$Tags": "

A list of tags to apply to the created pipeline.

", diff --git a/models/endpoints/endpoints.json b/models/endpoints/endpoints.json index ddaa415a1c8..3361daa110f 100644 --- a/models/endpoints/endpoints.json +++ b/models/endpoints/endpoints.json @@ -16084,6 +16084,7 @@ "ap-southeast-3" : { }, "ca-central-1" : { }, "eu-central-1" : { }, + "eu-central-2" : { }, "eu-north-1" : { }, "eu-south-1" : { }, "eu-west-1" : { }, diff --git a/service/ec2/api.go b/service/ec2/api.go index 4bbf7a315ef..310584a5318 100644 --- a/service/ec2/api.go +++ b/service/ec2/api.go @@ -187865,6 +187865,66 @@ const ( // InstanceTypeR7gd16xlarge is a InstanceType enum value InstanceTypeR7gd16xlarge = "r7gd.16xlarge" + + // InstanceTypeR7aMedium is a InstanceType enum value + InstanceTypeR7aMedium = "r7a.medium" + + // InstanceTypeR7aLarge is a InstanceType enum value + InstanceTypeR7aLarge = "r7a.large" + + // InstanceTypeR7aXlarge is a InstanceType enum value + InstanceTypeR7aXlarge = "r7a.xlarge" + + // InstanceTypeR7a2xlarge is a InstanceType enum value + InstanceTypeR7a2xlarge = "r7a.2xlarge" + + // InstanceTypeR7a4xlarge is a InstanceType enum value + InstanceTypeR7a4xlarge = "r7a.4xlarge" + + // InstanceTypeR7a8xlarge is a InstanceType enum value + InstanceTypeR7a8xlarge = "r7a.8xlarge" + + // InstanceTypeR7a12xlarge is a InstanceType enum value + InstanceTypeR7a12xlarge = "r7a.12xlarge" + + // InstanceTypeR7a16xlarge is a InstanceType enum value + InstanceTypeR7a16xlarge = "r7a.16xlarge" + + // InstanceTypeR7a24xlarge is a InstanceType enum value + InstanceTypeR7a24xlarge = "r7a.24xlarge" + + // InstanceTypeR7a32xlarge is a InstanceType enum value + InstanceTypeR7a32xlarge = "r7a.32xlarge" + + // InstanceTypeR7a48xlarge is a InstanceType enum value + InstanceTypeR7a48xlarge = "r7a.48xlarge" + + // InstanceTypeC7iLarge is a InstanceType enum value + InstanceTypeC7iLarge = "c7i.large" + + // InstanceTypeC7iXlarge is a InstanceType enum value + InstanceTypeC7iXlarge = "c7i.xlarge" + + // InstanceTypeC7i2xlarge is a InstanceType enum value + InstanceTypeC7i2xlarge = "c7i.2xlarge" + + // InstanceTypeC7i4xlarge is a InstanceType enum value + InstanceTypeC7i4xlarge = "c7i.4xlarge" + + // InstanceTypeC7i8xlarge is a InstanceType enum value + InstanceTypeC7i8xlarge = "c7i.8xlarge" + + // InstanceTypeC7i12xlarge is a InstanceType enum value + InstanceTypeC7i12xlarge = "c7i.12xlarge" + + // InstanceTypeC7i16xlarge is a InstanceType enum value + InstanceTypeC7i16xlarge = "c7i.16xlarge" + + // InstanceTypeC7i24xlarge is a InstanceType enum value + InstanceTypeC7i24xlarge = "c7i.24xlarge" + + // InstanceTypeC7i48xlarge is a InstanceType enum value + InstanceTypeC7i48xlarge = "c7i.48xlarge" ) // InstanceType_Values returns all elements of the InstanceType enum @@ -188590,6 +188650,26 @@ func InstanceType_Values() []string { InstanceTypeR7gd8xlarge, InstanceTypeR7gd12xlarge, InstanceTypeR7gd16xlarge, + InstanceTypeR7aMedium, + InstanceTypeR7aLarge, + InstanceTypeR7aXlarge, + InstanceTypeR7a2xlarge, + InstanceTypeR7a4xlarge, + InstanceTypeR7a8xlarge, + InstanceTypeR7a12xlarge, + InstanceTypeR7a16xlarge, + InstanceTypeR7a24xlarge, + InstanceTypeR7a32xlarge, + InstanceTypeR7a48xlarge, + InstanceTypeC7iLarge, + InstanceTypeC7iXlarge, + InstanceTypeC7i2xlarge, + InstanceTypeC7i4xlarge, + InstanceTypeC7i8xlarge, + InstanceTypeC7i12xlarge, + InstanceTypeC7i16xlarge, + InstanceTypeC7i24xlarge, + InstanceTypeC7i48xlarge, } } diff --git a/service/outposts/api.go b/service/outposts/api.go index a8f2fee9ff6..23c6ddfd607 100644 --- a/service/outposts/api.go +++ b/service/outposts/api.go @@ -3222,6 +3222,10 @@ type ComputeAttributes struct { // The host ID of the Dedicated Host on the asset. HostId *string `min:"1" type:"string"` + // A list of the names of instance families that are currently associated with + // a given asset. + InstanceFamilies []*string `type:"list"` + // The state. // // * ACTIVE - The asset is available and can provide capacity for new compute @@ -3261,6 +3265,12 @@ func (s *ComputeAttributes) SetHostId(v string) *ComputeAttributes { return s } +// SetInstanceFamilies sets the InstanceFamilies field's value. +func (s *ComputeAttributes) SetInstanceFamilies(v []*string) *ComputeAttributes { + s.InstanceFamilies = v + return s +} + // SetState sets the State field's value. func (s *ComputeAttributes) SetState(v string) *ComputeAttributes { s.State = &v @@ -4811,7 +4821,7 @@ type LineItem struct { // The ID of the previous line item. PreviousLineItemId *string `type:"string"` - // The ID of the previous order item. + // The ID of the previous order. PreviousOrderId *string `min:"1" type:"string"` // The quantity of the line item. @@ -5768,7 +5778,7 @@ type Order struct { // The submission date for the order. OrderSubmissionDate *time.Time `type:"timestamp"` - // Type of order. + // The type of order. OrderType *string `type:"string" enum:"OrderType"` // The ID of the Outpost in the order. diff --git a/service/sagemaker/api.go b/service/sagemaker/api.go index 5945c7952de..b340c5cd045 100644 --- a/service/sagemaker/api.go +++ b/service/sagemaker/api.go @@ -811,8 +811,8 @@ func (c *SageMaker) CreateAutoMLJobRequest(input *CreateAutoMLJobInput) (req *re // which offer backward compatibility. // // CreateAutoMLJobV2 can manage tabular problem types identical to those of -// its previous version CreateAutoMLJob, as well as time-series forecasting, -// and non-tabular problem types such as image or text classification. +// its previous version CreateAutoMLJob, as well as non-tabular problem types +// such as image or text classification. // // Find guidelines about how to migrate a CreateAutoMLJob to CreateAutoMLJobV2 // in Migrate a CreateAutoMLJob to CreateAutoMLJobV2 (https://docs.aws.amazon.com/sagemaker/latest/dg/autopilot-automate-model-development-create-experiment-api.html#autopilot-create-experiment-api-migrate-v1-v2). @@ -912,8 +912,8 @@ func (c *SageMaker) CreateAutoMLJobV2Request(input *CreateAutoMLJobV2Input) (req // which offer backward compatibility. // // CreateAutoMLJobV2 can manage tabular problem types identical to those of -// its previous version CreateAutoMLJob, as well as time-series forecasting, -// and non-tabular problem types such as image or text classification. +// its previous version CreateAutoMLJob, as well as non-tabular problem types +// such as image or text classification. // // Find guidelines about how to migrate a CreateAutoMLJob to CreateAutoMLJobV2 // in Migrate a CreateAutoMLJob to CreateAutoMLJobV2 (https://docs.aws.amazon.com/sagemaker/latest/dg/autopilot-automate-model-development-create-experiment-api.html#autopilot-create-experiment-api-migrate-v1-v2). @@ -33783,6 +33783,9 @@ type BatchTransformInput_ struct { // Monitoring Jobs (https://docs.aws.amazon.com/sagemaker/latest/dg/model-monitor-model-quality-schedule.html). EndTimeOffset *string `min:"1" type:"string"` + // The attributes of the input data to exclude from the analysis. + ExcludeFeaturesAttribute *string `type:"string"` + // The attributes of the input data that are the input features. FeaturesAttribute *string `type:"string"` @@ -33877,6 +33880,12 @@ func (s *BatchTransformInput_) SetEndTimeOffset(v string) *BatchTransformInput_ return s } +// SetExcludeFeaturesAttribute sets the ExcludeFeaturesAttribute field's value. +func (s *BatchTransformInput_) SetExcludeFeaturesAttribute(v string) *BatchTransformInput_ { + s.ExcludeFeaturesAttribute = &v + return s +} + // SetFeaturesAttribute sets the FeaturesAttribute field's value. func (s *BatchTransformInput_) SetFeaturesAttribute(v string) *BatchTransformInput_ { s.FeaturesAttribute = &v @@ -34550,13 +34559,12 @@ func (s *CapacitySize) SetValue(v int64) *CapacitySize { } // Configuration specifying how to treat different headers. If no headers are -// specified Amazon SageMaker will by default base64 encode when capturing the -// data. +// specified SageMaker will by default base64 encode when capturing the data. type CaptureContentTypeHeader struct { _ struct{} `type:"structure"` - // The list of all content type headers that Amazon SageMaker will treat as - // CSV and capture accordingly. + // The list of all content type headers that SageMaker will treat as CSV and + // capture accordingly. CsvContentTypes []*string `min:"1" type:"list"` // The list of all content type headers that SageMaker will treat as JSON and @@ -47532,8 +47540,9 @@ type DataCaptureConfig struct { // InitialSamplingPercentage is a required field InitialSamplingPercentage *int64 `type:"integer" required:"true"` - // The Amazon Resource Name (ARN) of an Key Management Service key that SageMaker - // uses to encrypt the captured data at rest using Amazon S3 server-side encryption. + // The Amazon Resource Name (ARN) of a Amazon Web Services Key Management Service + // key that SageMaker uses to encrypt the captured data at rest using Amazon + // S3 server-side encryption. // // The KmsKeyId can be any of the following formats: // @@ -47912,9 +47921,9 @@ type DataQualityAppSpecification struct { PostAnalyticsProcessorSourceUri *string `type:"string"` // An Amazon S3 URI to a script that is called per row prior to running analysis. - // It can base64 decode the payload and convert it into a flattened JSON so - // that the built-in container can use the converted data. Applicable only for - // the built-in (first party) containers. + // It can base64 decode the payload and convert it into a flatted json so that + // the built-in container can use the converted data. Applicable only for the + // built-in (first party) containers. RecordPreprocessorSourceUri *string `type:"string"` } @@ -59301,9 +59310,9 @@ type DescribeModelBiasJobDefinitionOutput struct { // Networking options for a model bias job. NetworkConfig *MonitoringNetworkConfig `type:"structure"` - // The Amazon Resource Name (ARN) of the IAM role that has read permission to - // the input data location and write permission to the output data location - // in Amazon S3. + // The Amazon Resource Name (ARN) of the Amazon Web Services Identity and Access + // Management (IAM) role that has read permission to the input data location + // and write permission to the output data location in Amazon S3. // // RoleArn is a required field RoleArn *string `min:"20" type:"string" required:"true"` @@ -59899,9 +59908,9 @@ type DescribeModelExplainabilityJobDefinitionOutput struct { // Networking options for a model explainability job. NetworkConfig *MonitoringNetworkConfig `type:"structure"` - // The Amazon Resource Name (ARN) of the IAM role that has read permission to - // the input data location and write permission to the output data location - // in Amazon S3. + // The Amazon Resource Name (ARN) of the Amazon Web Services Identity and Access + // Management (IAM) role that has read permission to the input data location + // and write permission to the output data location in Amazon S3. // // RoleArn is a required field RoleArn *string `min:"20" type:"string" required:"true"` @@ -66948,6 +66957,9 @@ type EndpointInput struct { // EndpointName is a required field EndpointName *string `type:"string" required:"true"` + // The attributes of the input data to exclude from the analysis. + ExcludeFeaturesAttribute *string `type:"string"` + // The attributes of the input data that are the input features. FeaturesAttribute *string `type:"string"` @@ -66966,7 +66978,7 @@ type EndpointInput struct { ProbabilityThresholdAttribute *float64 `type:"double"` // Whether input data distributed in Amazon S3 is fully replicated or sharded - // by an Amazon S3 key. Defaults to FullyReplicated + // by an S3 key. Defaults to FullyReplicated S3DataDistributionType *string `type:"string" enum:"ProcessingS3DataDistributionType"` // Whether the Pipe or File is used as the input mode for transferring data @@ -67032,6 +67044,12 @@ func (s *EndpointInput) SetEndpointName(v string) *EndpointInput { return s } +// SetExcludeFeaturesAttribute sets the ExcludeFeaturesAttribute field's value. +func (s *EndpointInput) SetExcludeFeaturesAttribute(v string) *EndpointInput { + s.ExcludeFeaturesAttribute = &v + return s +} + // SetFeaturesAttribute sets the FeaturesAttribute field's value. func (s *EndpointInput) SetFeaturesAttribute(v string) *EndpointInput { s.FeaturesAttribute = &v @@ -74400,8 +74418,7 @@ type InferenceExperimentDataStorageConfig struct { _ struct{} `type:"structure"` // Configuration specifying how to treat different headers. If no headers are - // specified Amazon SageMaker will by default base64 encode when capturing the - // data. + // specified SageMaker will by default base64 encode when capturing the data. ContentType *CaptureContentTypeHeader `type:"structure"` // The Amazon S3 bucket where the inference request and response data is stored. @@ -78960,8 +78977,7 @@ type ListDataQualityJobDefinitionsInput struct { // The field to sort results by. The default is CreationTime. SortBy *string `type:"string" enum:"MonitoringJobDefinitionSortKey"` - // Whether to sort the results in Ascending or Descending order. The default - // is Descending. + // The sort order for results. The default is Descending. SortOrder *string `type:"string" enum:"SortOrder"` } @@ -82795,8 +82811,8 @@ type ListModelBiasJobDefinitionsOutput struct { // JobDefinitionSummaries is a required field JobDefinitionSummaries []*MonitoringJobDefinitionSummary `type:"list" required:"true"` - // The token returned if the response is truncated. To retrieve the next set - // of job executions, use it in the next request. + // If the response is truncated, Amazon SageMaker returns this token. To retrieve + // the next set of jobs, use it in the subsequent request. NextToken *string `type:"string"` } @@ -83451,8 +83467,8 @@ type ListModelExplainabilityJobDefinitionsOutput struct { // JobDefinitionSummaries is a required field JobDefinitionSummaries []*MonitoringJobDefinitionSummary `type:"list" required:"true"` - // The token returned if the response is truncated. To retrieve the next set - // of job executions, use it in the next request. + // If the response is truncated, Amazon SageMaker returns this token. To retrieve + // the next set of jobs, use it in the subsequent request. NextToken *string `type:"string"` } @@ -83960,8 +83976,7 @@ type ListModelQualityJobDefinitionsInput struct { // The field to sort results by. The default is CreationTime. SortBy *string `type:"string" enum:"MonitoringJobDefinitionSortKey"` - // Whether to sort the results in Ascending or Descending order. The default - // is Descending. + // The sort order for results. The default is Descending. SortOrder *string `type:"string" enum:"SortOrder"` } @@ -84554,8 +84569,8 @@ type ListMonitoringExecutionsInput struct { // Filter for jobs scheduled before a specified time. ScheduledTimeBefore *time.Time `type:"timestamp"` - // Whether to sort the results by the Status, CreationTime, or ScheduledTime - // field. The default is CreationTime. + // Whether to sort results by Status, CreationTime, ScheduledTime field. The + // default is CreationTime. SortBy *string `type:"string" enum:"MonitoringExecutionSortKey"` // Whether to sort the results in Ascending or Descending order. The default @@ -84701,8 +84716,8 @@ type ListMonitoringExecutionsOutput struct { // MonitoringExecutionSummaries is a required field MonitoringExecutionSummaries []*MonitoringExecutionSummary `type:"list" required:"true"` - // The token returned if the response is truncated. To retrieve the next set - // of job executions, use it in the next request. + // If the response is truncated, Amazon SageMaker returns this token. To retrieve + // the next set of jobs, use it in the subsequent reques NextToken *string `type:"string"` } @@ -84777,8 +84792,8 @@ type ListMonitoringSchedulesInput struct { // of job executions, use it in the next request. NextToken *string `type:"string"` - // Whether to sort the results by the Status, CreationTime, or ScheduledTime - // field. The default is CreationTime. + // Whether to sort results by Status, CreationTime, ScheduledTime field. The + // default is CreationTime. SortBy *string `type:"string" enum:"MonitoringScheduleSortKey"` // Whether to sort the results in Ascending or Descending order. The default @@ -84910,8 +84925,8 @@ type ListMonitoringSchedulesOutput struct { // MonitoringScheduleSummaries is a required field MonitoringScheduleSummaries []*MonitoringScheduleSummary `type:"list" required:"true"` - // The token returned if the response is truncated. To retrieve the next set - // of job executions, use it in the next request. + // If the response is truncated, Amazon SageMaker returns this token. To retrieve + // the next set of jobs, use it in the subsequent request. NextToken *string `type:"string"` } @@ -90347,9 +90362,9 @@ func (s *ModelDigests) SetArtifactDigest(v string) *ModelDigests { type ModelExplainabilityAppSpecification struct { _ struct{} `type:"structure"` - // JSON formatted Amazon S3 file that defines explainability parameters. For - // more information on this JSON configuration file, see Configure model explainability - // parameters (https://docs.aws.amazon.com/sagemaker/latest/dg/clarify-config-json-monitor-model-explainability-parameters.html). + // JSON formatted S3 file that defines explainability parameters. For more information + // on this JSON configuration file, see Configure model explainability parameters + // (https://docs.aws.amazon.com/sagemaker/latest/dg/clarify-config-json-monitor-model-explainability-parameters.html). // // ConfigUri is a required field ConfigUri *string `type:"string" required:"true"` @@ -92049,9 +92064,9 @@ type ModelQualityAppSpecification struct { ProblemType *string `type:"string" enum:"MonitoringProblemType"` // An Amazon S3 URI to a script that is called per row prior to running analysis. - // It can base64 decode the payload and convert it into a flattened JSON so - // that the built-in container can use the converted data. Applicable only for - // the built-in (first party) containers. + // It can base64 decode the payload and convert it into a flatted json so that + // the built-in container can use the converted data. Applicable only for the + // built-in (first party) containers. RecordPreprocessorSourceUri *string `type:"string"` } @@ -92190,7 +92205,7 @@ func (s *ModelQualityBaselineConfig) SetConstraintsResource(v *MonitoringConstra return s } -// The input for the model quality monitoring job. Currently endpoints are supported +// The input for the model quality monitoring job. Currently endponts are supported // for input for model quality monitoring jobs. type ModelQualityJobInput struct { _ struct{} `type:"structure"` @@ -92800,9 +92815,9 @@ type MonitoringAppSpecification struct { PostAnalyticsProcessorSourceUri *string `type:"string"` // An Amazon S3 URI to a script that is called per row prior to running analysis. - // It can base64 decode the payload and convert it into a flattened JSON so - // that the built-in container can use the converted data. Applicable only for - // the built-in (first party) containers. + // It can base64 decode the payload and convert it into a flatted json so that + // the built-in container can use the converted data. Applicable only for the + // built-in (first party) containers. RecordPreprocessorSourceUri *string `type:"string"` } @@ -92956,9 +92971,9 @@ type MonitoringClusterConfig struct { // InstanceType is a required field InstanceType *string `type:"string" required:"true" enum:"ProcessingInstanceType"` - // The Key Management Service (KMS) key that Amazon SageMaker uses to encrypt - // data on the storage volume attached to the ML compute instance(s) that run - // the model monitoring job. + // The Amazon Web Services Key Management Service (Amazon Web Services KMS) + // key that Amazon SageMaker uses to encrypt data on the storage volume attached + // to the ML compute instance(s) that run the model monitoring job. VolumeKmsKeyId *string `type:"string"` // The size of the ML storage volume, in gigabytes, that you want to provision. @@ -93387,7 +93402,8 @@ type MonitoringJobDefinition struct { // MonitoringInputs is a required field MonitoringInputs []*MonitoringInput `min:"1" type:"list" required:"true"` - // The array of outputs from the monitoring job to be uploaded to Amazon S3. + // The array of outputs from the monitoring job to be uploaded to Amazon Simple + // Storage Service (Amazon S3). // // MonitoringOutputConfig is a required field MonitoringOutputConfig *MonitoringOutputConfig `type:"structure" required:"true"` @@ -93626,7 +93642,7 @@ func (s *MonitoringJobDefinitionSummary) SetMonitoringJobDefinitionName(v string type MonitoringJsonDatasetFormat struct { _ struct{} `type:"structure"` - // Indicates if the file should be read as a JSON object per line. + // Indicates if the file should be read as a json object per line. Line *bool `type:"boolean"` } @@ -93783,8 +93799,9 @@ func (s *MonitoringOutput) SetS3Output(v *MonitoringS3Output) *MonitoringOutput type MonitoringOutputConfig struct { _ struct{} `type:"structure"` - // The Key Management Service (KMS) key that Amazon SageMaker uses to encrypt - // the model artifacts at rest using Amazon S3 server-side encryption. + // The Amazon Web Services Key Management Service (Amazon Web Services KMS) + // key that Amazon SageMaker uses to encrypt the model artifacts at rest using + // Amazon S3 server-side encryption. KmsKeyId *string `type:"string"` // Monitoring outputs for monitoring jobs. This is where the output of the periodic @@ -104006,12 +104023,39 @@ func (s *ScalingPolicyObjective) SetMinInvocationsPerMinute(v int64) *ScalingPol type ScheduleConfig struct { _ struct{} `type:"structure"` + // Sets the end time for a monitoring job window. Express this time as an offset + // to the times that you schedule your monitoring jobs to run. You schedule + // monitoring jobs with the ScheduleExpression parameter. Specify this offset + // in ISO 8601 duration format. For example, if you want to end the window one + // hour before the start of each monitoring job, you would specify: "-PT1H". + // + // The end time that you specify must not follow the start time that you specify + // by more than 24 hours. You specify the start time with the DataAnalysisStartTime + // parameter. + // + // If you set ScheduleExpression to NOW, this parameter is required. + DataAnalysisEndTime *string `type:"string"` + + // Sets the start time for a monitoring job window. Express this time as an + // offset to the times that you schedule your monitoring jobs to run. You schedule + // monitoring jobs with the ScheduleExpression parameter. Specify this offset + // in ISO 8601 duration format. For example, if you want to monitor the five + // hours of data in your dataset that precede the start of each monitoring job, + // you would specify: "-PT5H". + // + // The start time that you specify must not precede the end time that you specify + // by more than 24 hours. You specify the end time with the DataAnalysisEndTime + // parameter. + // + // If you set ScheduleExpression to NOW, this parameter is required. + DataAnalysisStartTime *string `type:"string"` + // A cron expression that describes details about the monitoring schedule. // - // Currently the only supported cron expressions are: + // The supported cron expressions are: // - // * If you want to set the job to start every hour, please use the following: - // Hourly: cron(0 * ? * * *) + // * If you want to set the job to start every hour, use the following: Hourly: + // cron(0 * ? * * *) // // * If you want to start the job daily: cron(0 [00-23] ? * * *) // @@ -104038,6 +104082,9 @@ type ScheduleConfig struct { // * We recommend that if you would like a daily schedule, you do not provide // this parameter. Amazon SageMaker will pick a time for running every day. // + // You can also specify the keyword NOW to run the monitoring job immediately, + // one time, without recurring. + // // ScheduleExpression is a required field ScheduleExpression *string `min:"1" type:"string" required:"true"` } @@ -104076,6 +104123,18 @@ func (s *ScheduleConfig) Validate() error { return nil } +// SetDataAnalysisEndTime sets the DataAnalysisEndTime field's value. +func (s *ScheduleConfig) SetDataAnalysisEndTime(v string) *ScheduleConfig { + s.DataAnalysisEndTime = &v + return s +} + +// SetDataAnalysisStartTime sets the DataAnalysisStartTime field's value. +func (s *ScheduleConfig) SetDataAnalysisStartTime(v string) *ScheduleConfig { + s.DataAnalysisStartTime = &v + return s +} + // SetScheduleExpression sets the ScheduleExpression field's value. func (s *ScheduleConfig) SetScheduleExpression(v string) *ScheduleConfig { s.ScheduleExpression = &v @@ -108583,7 +108642,7 @@ type TimeSeriesForecastingJobConfig struct { // is not provided, the AutoML job uses the quantiles p10, p50, and p90 as default. ForecastQuantiles []*string `min:"1" type:"list"` - // The collection of holiday featurization attributes used to incorporate national + // The collection of holidays featurization attributes used to incorporate national // holiday information into your forecasting model. HolidayConfig []*HolidayConfigAttributes `min:"1" type:"list"`