diff --git a/clients/client-codebuild/src/commands/BatchGetBuildBatchesCommand.ts b/clients/client-codebuild/src/commands/BatchGetBuildBatchesCommand.ts index bc3ca3a66e5b6..e9ebff077bcdf 100644 --- a/clients/client-codebuild/src/commands/BatchGetBuildBatchesCommand.ts +++ b/clients/client-codebuild/src/commands/BatchGetBuildBatchesCommand.ts @@ -147,7 +147,13 @@ export interface BatchGetBuildBatchesCommandOutput extends BatchGetBuildBatchesO * // environment: { // ProjectEnvironment * // type: "WINDOWS_CONTAINER" || "LINUX_CONTAINER" || "LINUX_GPU_CONTAINER" || "ARM_CONTAINER" || "WINDOWS_SERVER_2019_CONTAINER" || "LINUX_LAMBDA_CONTAINER" || "ARM_LAMBDA_CONTAINER" || "MAC_ARM", // required * // image: "STRING_VALUE", // required - * // computeType: "BUILD_GENERAL1_SMALL" || "BUILD_GENERAL1_MEDIUM" || "BUILD_GENERAL1_LARGE" || "BUILD_GENERAL1_XLARGE" || "BUILD_GENERAL1_2XLARGE" || "BUILD_LAMBDA_1GB" || "BUILD_LAMBDA_2GB" || "BUILD_LAMBDA_4GB" || "BUILD_LAMBDA_8GB" || "BUILD_LAMBDA_10GB", // required + * // computeType: "BUILD_GENERAL1_SMALL" || "BUILD_GENERAL1_MEDIUM" || "BUILD_GENERAL1_LARGE" || "BUILD_GENERAL1_XLARGE" || "BUILD_GENERAL1_2XLARGE" || "BUILD_LAMBDA_1GB" || "BUILD_LAMBDA_2GB" || "BUILD_LAMBDA_4GB" || "BUILD_LAMBDA_8GB" || "BUILD_LAMBDA_10GB" || "ATTRIBUTE_BASED_COMPUTE", // required + * // computeConfiguration: { // ComputeConfiguration + * // vCpu: Number("long"), + * // memory: Number("long"), + * // disk: Number("long"), + * // machineType: "GENERAL" || "NVME", + * // }, * // fleet: { // ProjectFleet * // fleetArn: "STRING_VALUE", * // }, diff --git a/clients/client-codebuild/src/commands/BatchGetBuildsCommand.ts b/clients/client-codebuild/src/commands/BatchGetBuildsCommand.ts index 9f098114d328e..340bdf8773f1f 100644 --- a/clients/client-codebuild/src/commands/BatchGetBuildsCommand.ts +++ b/clients/client-codebuild/src/commands/BatchGetBuildsCommand.ts @@ -148,7 +148,13 @@ export interface BatchGetBuildsCommandOutput extends BatchGetBuildsOutput, __Met * // environment: { // ProjectEnvironment * // type: "WINDOWS_CONTAINER" || "LINUX_CONTAINER" || "LINUX_GPU_CONTAINER" || "ARM_CONTAINER" || "WINDOWS_SERVER_2019_CONTAINER" || "LINUX_LAMBDA_CONTAINER" || "ARM_LAMBDA_CONTAINER" || "MAC_ARM", // required * // image: "STRING_VALUE", // required - * // computeType: "BUILD_GENERAL1_SMALL" || "BUILD_GENERAL1_MEDIUM" || "BUILD_GENERAL1_LARGE" || "BUILD_GENERAL1_XLARGE" || "BUILD_GENERAL1_2XLARGE" || "BUILD_LAMBDA_1GB" || "BUILD_LAMBDA_2GB" || "BUILD_LAMBDA_4GB" || "BUILD_LAMBDA_8GB" || "BUILD_LAMBDA_10GB", // required + * // computeType: "BUILD_GENERAL1_SMALL" || "BUILD_GENERAL1_MEDIUM" || "BUILD_GENERAL1_LARGE" || "BUILD_GENERAL1_XLARGE" || "BUILD_GENERAL1_2XLARGE" || "BUILD_LAMBDA_1GB" || "BUILD_LAMBDA_2GB" || "BUILD_LAMBDA_4GB" || "BUILD_LAMBDA_8GB" || "BUILD_LAMBDA_10GB" || "ATTRIBUTE_BASED_COMPUTE", // required + * // computeConfiguration: { // ComputeConfiguration + * // vCpu: Number("long"), + * // memory: Number("long"), + * // disk: Number("long"), + * // machineType: "GENERAL" || "NVME", + * // }, * // fleet: { // ProjectFleet * // fleetArn: "STRING_VALUE", * // }, diff --git a/clients/client-codebuild/src/commands/BatchGetFleetsCommand.ts b/clients/client-codebuild/src/commands/BatchGetFleetsCommand.ts index e9bdbe7405326..700fa337968d8 100644 --- a/clients/client-codebuild/src/commands/BatchGetFleetsCommand.ts +++ b/clients/client-codebuild/src/commands/BatchGetFleetsCommand.ts @@ -57,7 +57,13 @@ export interface BatchGetFleetsCommandOutput extends BatchGetFleetsOutput, __Met * // }, * // baseCapacity: Number("int"), * // environmentType: "WINDOWS_CONTAINER" || "LINUX_CONTAINER" || "LINUX_GPU_CONTAINER" || "ARM_CONTAINER" || "WINDOWS_SERVER_2019_CONTAINER" || "LINUX_LAMBDA_CONTAINER" || "ARM_LAMBDA_CONTAINER" || "MAC_ARM", - * // computeType: "BUILD_GENERAL1_SMALL" || "BUILD_GENERAL1_MEDIUM" || "BUILD_GENERAL1_LARGE" || "BUILD_GENERAL1_XLARGE" || "BUILD_GENERAL1_2XLARGE" || "BUILD_LAMBDA_1GB" || "BUILD_LAMBDA_2GB" || "BUILD_LAMBDA_4GB" || "BUILD_LAMBDA_8GB" || "BUILD_LAMBDA_10GB", + * // computeType: "BUILD_GENERAL1_SMALL" || "BUILD_GENERAL1_MEDIUM" || "BUILD_GENERAL1_LARGE" || "BUILD_GENERAL1_XLARGE" || "BUILD_GENERAL1_2XLARGE" || "BUILD_LAMBDA_1GB" || "BUILD_LAMBDA_2GB" || "BUILD_LAMBDA_4GB" || "BUILD_LAMBDA_8GB" || "BUILD_LAMBDA_10GB" || "ATTRIBUTE_BASED_COMPUTE", + * // computeConfiguration: { // ComputeConfiguration + * // vCpu: Number("long"), + * // memory: Number("long"), + * // disk: Number("long"), + * // machineType: "GENERAL" || "NVME", + * // }, * // scalingConfiguration: { // ScalingConfigurationOutput * // scalingType: "TARGET_TRACKING_SCALING", * // targetTrackingScalingConfigs: [ // TargetTrackingScalingConfigurations diff --git a/clients/client-codebuild/src/commands/BatchGetProjectsCommand.ts b/clients/client-codebuild/src/commands/BatchGetProjectsCommand.ts index 7fff1a5683f47..2a11bbe8112f7 100644 --- a/clients/client-codebuild/src/commands/BatchGetProjectsCommand.ts +++ b/clients/client-codebuild/src/commands/BatchGetProjectsCommand.ts @@ -133,7 +133,13 @@ export interface BatchGetProjectsCommandOutput extends BatchGetProjectsOutput, _ * // environment: { // ProjectEnvironment * // type: "WINDOWS_CONTAINER" || "LINUX_CONTAINER" || "LINUX_GPU_CONTAINER" || "ARM_CONTAINER" || "WINDOWS_SERVER_2019_CONTAINER" || "LINUX_LAMBDA_CONTAINER" || "ARM_LAMBDA_CONTAINER" || "MAC_ARM", // required * // image: "STRING_VALUE", // required - * // computeType: "BUILD_GENERAL1_SMALL" || "BUILD_GENERAL1_MEDIUM" || "BUILD_GENERAL1_LARGE" || "BUILD_GENERAL1_XLARGE" || "BUILD_GENERAL1_2XLARGE" || "BUILD_LAMBDA_1GB" || "BUILD_LAMBDA_2GB" || "BUILD_LAMBDA_4GB" || "BUILD_LAMBDA_8GB" || "BUILD_LAMBDA_10GB", // required + * // computeType: "BUILD_GENERAL1_SMALL" || "BUILD_GENERAL1_MEDIUM" || "BUILD_GENERAL1_LARGE" || "BUILD_GENERAL1_XLARGE" || "BUILD_GENERAL1_2XLARGE" || "BUILD_LAMBDA_1GB" || "BUILD_LAMBDA_2GB" || "BUILD_LAMBDA_4GB" || "BUILD_LAMBDA_8GB" || "BUILD_LAMBDA_10GB" || "ATTRIBUTE_BASED_COMPUTE", // required + * // computeConfiguration: { // ComputeConfiguration + * // vCpu: Number("long"), + * // memory: Number("long"), + * // disk: Number("long"), + * // machineType: "GENERAL" || "NVME", + * // }, * // fleet: { // ProjectFleet * // fleetArn: "STRING_VALUE", * // }, diff --git a/clients/client-codebuild/src/commands/CreateFleetCommand.ts b/clients/client-codebuild/src/commands/CreateFleetCommand.ts index d6dbb4b1319d8..a1eb0fd79f9bd 100644 --- a/clients/client-codebuild/src/commands/CreateFleetCommand.ts +++ b/clients/client-codebuild/src/commands/CreateFleetCommand.ts @@ -39,7 +39,13 @@ export interface CreateFleetCommandOutput extends CreateFleetOutput, __MetadataB * name: "STRING_VALUE", // required * baseCapacity: Number("int"), // required * environmentType: "WINDOWS_CONTAINER" || "LINUX_CONTAINER" || "LINUX_GPU_CONTAINER" || "ARM_CONTAINER" || "WINDOWS_SERVER_2019_CONTAINER" || "LINUX_LAMBDA_CONTAINER" || "ARM_LAMBDA_CONTAINER" || "MAC_ARM", // required - * computeType: "BUILD_GENERAL1_SMALL" || "BUILD_GENERAL1_MEDIUM" || "BUILD_GENERAL1_LARGE" || "BUILD_GENERAL1_XLARGE" || "BUILD_GENERAL1_2XLARGE" || "BUILD_LAMBDA_1GB" || "BUILD_LAMBDA_2GB" || "BUILD_LAMBDA_4GB" || "BUILD_LAMBDA_8GB" || "BUILD_LAMBDA_10GB", // required + * computeType: "BUILD_GENERAL1_SMALL" || "BUILD_GENERAL1_MEDIUM" || "BUILD_GENERAL1_LARGE" || "BUILD_GENERAL1_XLARGE" || "BUILD_GENERAL1_2XLARGE" || "BUILD_LAMBDA_1GB" || "BUILD_LAMBDA_2GB" || "BUILD_LAMBDA_4GB" || "BUILD_LAMBDA_8GB" || "BUILD_LAMBDA_10GB" || "ATTRIBUTE_BASED_COMPUTE", // required + * computeConfiguration: { // ComputeConfiguration + * vCpu: Number("long"), + * memory: Number("long"), + * disk: Number("long"), + * machineType: "GENERAL" || "NVME", + * }, * scalingConfiguration: { // ScalingConfigurationInput * scalingType: "TARGET_TRACKING_SCALING", * targetTrackingScalingConfigs: [ // TargetTrackingScalingConfigurations @@ -97,7 +103,13 @@ export interface CreateFleetCommandOutput extends CreateFleetOutput, __MetadataB * // }, * // baseCapacity: Number("int"), * // environmentType: "WINDOWS_CONTAINER" || "LINUX_CONTAINER" || "LINUX_GPU_CONTAINER" || "ARM_CONTAINER" || "WINDOWS_SERVER_2019_CONTAINER" || "LINUX_LAMBDA_CONTAINER" || "ARM_LAMBDA_CONTAINER" || "MAC_ARM", - * // computeType: "BUILD_GENERAL1_SMALL" || "BUILD_GENERAL1_MEDIUM" || "BUILD_GENERAL1_LARGE" || "BUILD_GENERAL1_XLARGE" || "BUILD_GENERAL1_2XLARGE" || "BUILD_LAMBDA_1GB" || "BUILD_LAMBDA_2GB" || "BUILD_LAMBDA_4GB" || "BUILD_LAMBDA_8GB" || "BUILD_LAMBDA_10GB", + * // computeType: "BUILD_GENERAL1_SMALL" || "BUILD_GENERAL1_MEDIUM" || "BUILD_GENERAL1_LARGE" || "BUILD_GENERAL1_XLARGE" || "BUILD_GENERAL1_2XLARGE" || "BUILD_LAMBDA_1GB" || "BUILD_LAMBDA_2GB" || "BUILD_LAMBDA_4GB" || "BUILD_LAMBDA_8GB" || "BUILD_LAMBDA_10GB" || "ATTRIBUTE_BASED_COMPUTE", + * // computeConfiguration: { // ComputeConfiguration + * // vCpu: Number("long"), + * // memory: Number("long"), + * // disk: Number("long"), + * // machineType: "GENERAL" || "NVME", + * // }, * // scalingConfiguration: { // ScalingConfigurationOutput * // scalingType: "TARGET_TRACKING_SCALING", * // targetTrackingScalingConfigs: [ // TargetTrackingScalingConfigurations diff --git a/clients/client-codebuild/src/commands/CreateProjectCommand.ts b/clients/client-codebuild/src/commands/CreateProjectCommand.ts index 1c211137792cb..8ebedec88ad5c 100644 --- a/clients/client-codebuild/src/commands/CreateProjectCommand.ts +++ b/clients/client-codebuild/src/commands/CreateProjectCommand.ts @@ -123,7 +123,13 @@ export interface CreateProjectCommandOutput extends CreateProjectOutput, __Metad * environment: { // ProjectEnvironment * type: "WINDOWS_CONTAINER" || "LINUX_CONTAINER" || "LINUX_GPU_CONTAINER" || "ARM_CONTAINER" || "WINDOWS_SERVER_2019_CONTAINER" || "LINUX_LAMBDA_CONTAINER" || "ARM_LAMBDA_CONTAINER" || "MAC_ARM", // required * image: "STRING_VALUE", // required - * computeType: "BUILD_GENERAL1_SMALL" || "BUILD_GENERAL1_MEDIUM" || "BUILD_GENERAL1_LARGE" || "BUILD_GENERAL1_XLARGE" || "BUILD_GENERAL1_2XLARGE" || "BUILD_LAMBDA_1GB" || "BUILD_LAMBDA_2GB" || "BUILD_LAMBDA_4GB" || "BUILD_LAMBDA_8GB" || "BUILD_LAMBDA_10GB", // required + * computeType: "BUILD_GENERAL1_SMALL" || "BUILD_GENERAL1_MEDIUM" || "BUILD_GENERAL1_LARGE" || "BUILD_GENERAL1_XLARGE" || "BUILD_GENERAL1_2XLARGE" || "BUILD_LAMBDA_1GB" || "BUILD_LAMBDA_2GB" || "BUILD_LAMBDA_4GB" || "BUILD_LAMBDA_8GB" || "BUILD_LAMBDA_10GB" || "ATTRIBUTE_BASED_COMPUTE", // required + * computeConfiguration: { // ComputeConfiguration + * vCpu: Number("long"), + * memory: Number("long"), + * disk: Number("long"), + * machineType: "GENERAL" || "NVME", + * }, * fleet: { // ProjectFleet * fleetArn: "STRING_VALUE", * }, @@ -291,7 +297,13 @@ export interface CreateProjectCommandOutput extends CreateProjectOutput, __Metad * // environment: { // ProjectEnvironment * // type: "WINDOWS_CONTAINER" || "LINUX_CONTAINER" || "LINUX_GPU_CONTAINER" || "ARM_CONTAINER" || "WINDOWS_SERVER_2019_CONTAINER" || "LINUX_LAMBDA_CONTAINER" || "ARM_LAMBDA_CONTAINER" || "MAC_ARM", // required * // image: "STRING_VALUE", // required - * // computeType: "BUILD_GENERAL1_SMALL" || "BUILD_GENERAL1_MEDIUM" || "BUILD_GENERAL1_LARGE" || "BUILD_GENERAL1_XLARGE" || "BUILD_GENERAL1_2XLARGE" || "BUILD_LAMBDA_1GB" || "BUILD_LAMBDA_2GB" || "BUILD_LAMBDA_4GB" || "BUILD_LAMBDA_8GB" || "BUILD_LAMBDA_10GB", // required + * // computeType: "BUILD_GENERAL1_SMALL" || "BUILD_GENERAL1_MEDIUM" || "BUILD_GENERAL1_LARGE" || "BUILD_GENERAL1_XLARGE" || "BUILD_GENERAL1_2XLARGE" || "BUILD_LAMBDA_1GB" || "BUILD_LAMBDA_2GB" || "BUILD_LAMBDA_4GB" || "BUILD_LAMBDA_8GB" || "BUILD_LAMBDA_10GB" || "ATTRIBUTE_BASED_COMPUTE", // required + * // computeConfiguration: { // ComputeConfiguration + * // vCpu: Number("long"), + * // memory: Number("long"), + * // disk: Number("long"), + * // machineType: "GENERAL" || "NVME", + * // }, * // fleet: { // ProjectFleet * // fleetArn: "STRING_VALUE", * // }, diff --git a/clients/client-codebuild/src/commands/RetryBuildBatchCommand.ts b/clients/client-codebuild/src/commands/RetryBuildBatchCommand.ts index 97dc4ec687f00..3a01aeabb1455 100644 --- a/clients/client-codebuild/src/commands/RetryBuildBatchCommand.ts +++ b/clients/client-codebuild/src/commands/RetryBuildBatchCommand.ts @@ -146,7 +146,13 @@ export interface RetryBuildBatchCommandOutput extends RetryBuildBatchOutput, __M * // environment: { // ProjectEnvironment * // type: "WINDOWS_CONTAINER" || "LINUX_CONTAINER" || "LINUX_GPU_CONTAINER" || "ARM_CONTAINER" || "WINDOWS_SERVER_2019_CONTAINER" || "LINUX_LAMBDA_CONTAINER" || "ARM_LAMBDA_CONTAINER" || "MAC_ARM", // required * // image: "STRING_VALUE", // required - * // computeType: "BUILD_GENERAL1_SMALL" || "BUILD_GENERAL1_MEDIUM" || "BUILD_GENERAL1_LARGE" || "BUILD_GENERAL1_XLARGE" || "BUILD_GENERAL1_2XLARGE" || "BUILD_LAMBDA_1GB" || "BUILD_LAMBDA_2GB" || "BUILD_LAMBDA_4GB" || "BUILD_LAMBDA_8GB" || "BUILD_LAMBDA_10GB", // required + * // computeType: "BUILD_GENERAL1_SMALL" || "BUILD_GENERAL1_MEDIUM" || "BUILD_GENERAL1_LARGE" || "BUILD_GENERAL1_XLARGE" || "BUILD_GENERAL1_2XLARGE" || "BUILD_LAMBDA_1GB" || "BUILD_LAMBDA_2GB" || "BUILD_LAMBDA_4GB" || "BUILD_LAMBDA_8GB" || "BUILD_LAMBDA_10GB" || "ATTRIBUTE_BASED_COMPUTE", // required + * // computeConfiguration: { // ComputeConfiguration + * // vCpu: Number("long"), + * // memory: Number("long"), + * // disk: Number("long"), + * // machineType: "GENERAL" || "NVME", + * // }, * // fleet: { // ProjectFleet * // fleetArn: "STRING_VALUE", * // }, diff --git a/clients/client-codebuild/src/commands/RetryBuildCommand.ts b/clients/client-codebuild/src/commands/RetryBuildCommand.ts index db579b3366e06..f21ed75ce6061 100644 --- a/clients/client-codebuild/src/commands/RetryBuildCommand.ts +++ b/clients/client-codebuild/src/commands/RetryBuildCommand.ts @@ -146,7 +146,13 @@ export interface RetryBuildCommandOutput extends RetryBuildOutput, __MetadataBea * // environment: { // ProjectEnvironment * // type: "WINDOWS_CONTAINER" || "LINUX_CONTAINER" || "LINUX_GPU_CONTAINER" || "ARM_CONTAINER" || "WINDOWS_SERVER_2019_CONTAINER" || "LINUX_LAMBDA_CONTAINER" || "ARM_LAMBDA_CONTAINER" || "MAC_ARM", // required * // image: "STRING_VALUE", // required - * // computeType: "BUILD_GENERAL1_SMALL" || "BUILD_GENERAL1_MEDIUM" || "BUILD_GENERAL1_LARGE" || "BUILD_GENERAL1_XLARGE" || "BUILD_GENERAL1_2XLARGE" || "BUILD_LAMBDA_1GB" || "BUILD_LAMBDA_2GB" || "BUILD_LAMBDA_4GB" || "BUILD_LAMBDA_8GB" || "BUILD_LAMBDA_10GB", // required + * // computeType: "BUILD_GENERAL1_SMALL" || "BUILD_GENERAL1_MEDIUM" || "BUILD_GENERAL1_LARGE" || "BUILD_GENERAL1_XLARGE" || "BUILD_GENERAL1_2XLARGE" || "BUILD_LAMBDA_1GB" || "BUILD_LAMBDA_2GB" || "BUILD_LAMBDA_4GB" || "BUILD_LAMBDA_8GB" || "BUILD_LAMBDA_10GB" || "ATTRIBUTE_BASED_COMPUTE", // required + * // computeConfiguration: { // ComputeConfiguration + * // vCpu: Number("long"), + * // memory: Number("long"), + * // disk: Number("long"), + * // machineType: "GENERAL" || "NVME", + * // }, * // fleet: { // ProjectFleet * // fleetArn: "STRING_VALUE", * // }, diff --git a/clients/client-codebuild/src/commands/StartBuildBatchCommand.ts b/clients/client-codebuild/src/commands/StartBuildBatchCommand.ts index 5562d693c1141..7c2fb6eae697d 100644 --- a/clients/client-codebuild/src/commands/StartBuildBatchCommand.ts +++ b/clients/client-codebuild/src/commands/StartBuildBatchCommand.ts @@ -114,7 +114,7 @@ export interface StartBuildBatchCommandOutput extends StartBuildBatchOutput, __M * reportBuildBatchStatusOverride: true || false, * environmentTypeOverride: "WINDOWS_CONTAINER" || "LINUX_CONTAINER" || "LINUX_GPU_CONTAINER" || "ARM_CONTAINER" || "WINDOWS_SERVER_2019_CONTAINER" || "LINUX_LAMBDA_CONTAINER" || "ARM_LAMBDA_CONTAINER" || "MAC_ARM", * imageOverride: "STRING_VALUE", - * computeTypeOverride: "BUILD_GENERAL1_SMALL" || "BUILD_GENERAL1_MEDIUM" || "BUILD_GENERAL1_LARGE" || "BUILD_GENERAL1_XLARGE" || "BUILD_GENERAL1_2XLARGE" || "BUILD_LAMBDA_1GB" || "BUILD_LAMBDA_2GB" || "BUILD_LAMBDA_4GB" || "BUILD_LAMBDA_8GB" || "BUILD_LAMBDA_10GB", + * computeTypeOverride: "BUILD_GENERAL1_SMALL" || "BUILD_GENERAL1_MEDIUM" || "BUILD_GENERAL1_LARGE" || "BUILD_GENERAL1_XLARGE" || "BUILD_GENERAL1_2XLARGE" || "BUILD_LAMBDA_1GB" || "BUILD_LAMBDA_2GB" || "BUILD_LAMBDA_4GB" || "BUILD_LAMBDA_8GB" || "BUILD_LAMBDA_10GB" || "ATTRIBUTE_BASED_COMPUTE", * certificateOverride: "STRING_VALUE", * cacheOverride: { // ProjectCache * type: "NO_CACHE" || "S3" || "LOCAL", // required @@ -267,7 +267,13 @@ export interface StartBuildBatchCommandOutput extends StartBuildBatchOutput, __M * // environment: { // ProjectEnvironment * // type: "WINDOWS_CONTAINER" || "LINUX_CONTAINER" || "LINUX_GPU_CONTAINER" || "ARM_CONTAINER" || "WINDOWS_SERVER_2019_CONTAINER" || "LINUX_LAMBDA_CONTAINER" || "ARM_LAMBDA_CONTAINER" || "MAC_ARM", // required * // image: "STRING_VALUE", // required - * // computeType: "BUILD_GENERAL1_SMALL" || "BUILD_GENERAL1_MEDIUM" || "BUILD_GENERAL1_LARGE" || "BUILD_GENERAL1_XLARGE" || "BUILD_GENERAL1_2XLARGE" || "BUILD_LAMBDA_1GB" || "BUILD_LAMBDA_2GB" || "BUILD_LAMBDA_4GB" || "BUILD_LAMBDA_8GB" || "BUILD_LAMBDA_10GB", // required + * // computeType: "BUILD_GENERAL1_SMALL" || "BUILD_GENERAL1_MEDIUM" || "BUILD_GENERAL1_LARGE" || "BUILD_GENERAL1_XLARGE" || "BUILD_GENERAL1_2XLARGE" || "BUILD_LAMBDA_1GB" || "BUILD_LAMBDA_2GB" || "BUILD_LAMBDA_4GB" || "BUILD_LAMBDA_8GB" || "BUILD_LAMBDA_10GB" || "ATTRIBUTE_BASED_COMPUTE", // required + * // computeConfiguration: { // ComputeConfiguration + * // vCpu: Number("long"), + * // memory: Number("long"), + * // disk: Number("long"), + * // machineType: "GENERAL" || "NVME", + * // }, * // fleet: { // ProjectFleet * // fleetArn: "STRING_VALUE", * // }, diff --git a/clients/client-codebuild/src/commands/StartBuildCommand.ts b/clients/client-codebuild/src/commands/StartBuildCommand.ts index fd1350dbe33fa..07cfd9cb8413d 100644 --- a/clients/client-codebuild/src/commands/StartBuildCommand.ts +++ b/clients/client-codebuild/src/commands/StartBuildCommand.ts @@ -121,7 +121,7 @@ export interface StartBuildCommandOutput extends StartBuildOutput, __MetadataBea * }, * environmentTypeOverride: "WINDOWS_CONTAINER" || "LINUX_CONTAINER" || "LINUX_GPU_CONTAINER" || "ARM_CONTAINER" || "WINDOWS_SERVER_2019_CONTAINER" || "LINUX_LAMBDA_CONTAINER" || "ARM_LAMBDA_CONTAINER" || "MAC_ARM", * imageOverride: "STRING_VALUE", - * computeTypeOverride: "BUILD_GENERAL1_SMALL" || "BUILD_GENERAL1_MEDIUM" || "BUILD_GENERAL1_LARGE" || "BUILD_GENERAL1_XLARGE" || "BUILD_GENERAL1_2XLARGE" || "BUILD_LAMBDA_1GB" || "BUILD_LAMBDA_2GB" || "BUILD_LAMBDA_4GB" || "BUILD_LAMBDA_8GB" || "BUILD_LAMBDA_10GB", + * computeTypeOverride: "BUILD_GENERAL1_SMALL" || "BUILD_GENERAL1_MEDIUM" || "BUILD_GENERAL1_LARGE" || "BUILD_GENERAL1_XLARGE" || "BUILD_GENERAL1_2XLARGE" || "BUILD_LAMBDA_1GB" || "BUILD_LAMBDA_2GB" || "BUILD_LAMBDA_4GB" || "BUILD_LAMBDA_8GB" || "BUILD_LAMBDA_10GB" || "ATTRIBUTE_BASED_COMPUTE", * certificateOverride: "STRING_VALUE", * cacheOverride: { // ProjectCache * type: "NO_CACHE" || "S3" || "LOCAL", // required @@ -267,7 +267,13 @@ export interface StartBuildCommandOutput extends StartBuildOutput, __MetadataBea * // environment: { // ProjectEnvironment * // type: "WINDOWS_CONTAINER" || "LINUX_CONTAINER" || "LINUX_GPU_CONTAINER" || "ARM_CONTAINER" || "WINDOWS_SERVER_2019_CONTAINER" || "LINUX_LAMBDA_CONTAINER" || "ARM_LAMBDA_CONTAINER" || "MAC_ARM", // required * // image: "STRING_VALUE", // required - * // computeType: "BUILD_GENERAL1_SMALL" || "BUILD_GENERAL1_MEDIUM" || "BUILD_GENERAL1_LARGE" || "BUILD_GENERAL1_XLARGE" || "BUILD_GENERAL1_2XLARGE" || "BUILD_LAMBDA_1GB" || "BUILD_LAMBDA_2GB" || "BUILD_LAMBDA_4GB" || "BUILD_LAMBDA_8GB" || "BUILD_LAMBDA_10GB", // required + * // computeType: "BUILD_GENERAL1_SMALL" || "BUILD_GENERAL1_MEDIUM" || "BUILD_GENERAL1_LARGE" || "BUILD_GENERAL1_XLARGE" || "BUILD_GENERAL1_2XLARGE" || "BUILD_LAMBDA_1GB" || "BUILD_LAMBDA_2GB" || "BUILD_LAMBDA_4GB" || "BUILD_LAMBDA_8GB" || "BUILD_LAMBDA_10GB" || "ATTRIBUTE_BASED_COMPUTE", // required + * // computeConfiguration: { // ComputeConfiguration + * // vCpu: Number("long"), + * // memory: Number("long"), + * // disk: Number("long"), + * // machineType: "GENERAL" || "NVME", + * // }, * // fleet: { // ProjectFleet * // fleetArn: "STRING_VALUE", * // }, diff --git a/clients/client-codebuild/src/commands/StopBuildBatchCommand.ts b/clients/client-codebuild/src/commands/StopBuildBatchCommand.ts index 4f4bb8ca89524..f557a326373c2 100644 --- a/clients/client-codebuild/src/commands/StopBuildBatchCommand.ts +++ b/clients/client-codebuild/src/commands/StopBuildBatchCommand.ts @@ -144,7 +144,13 @@ export interface StopBuildBatchCommandOutput extends StopBuildBatchOutput, __Met * // environment: { // ProjectEnvironment * // type: "WINDOWS_CONTAINER" || "LINUX_CONTAINER" || "LINUX_GPU_CONTAINER" || "ARM_CONTAINER" || "WINDOWS_SERVER_2019_CONTAINER" || "LINUX_LAMBDA_CONTAINER" || "ARM_LAMBDA_CONTAINER" || "MAC_ARM", // required * // image: "STRING_VALUE", // required - * // computeType: "BUILD_GENERAL1_SMALL" || "BUILD_GENERAL1_MEDIUM" || "BUILD_GENERAL1_LARGE" || "BUILD_GENERAL1_XLARGE" || "BUILD_GENERAL1_2XLARGE" || "BUILD_LAMBDA_1GB" || "BUILD_LAMBDA_2GB" || "BUILD_LAMBDA_4GB" || "BUILD_LAMBDA_8GB" || "BUILD_LAMBDA_10GB", // required + * // computeType: "BUILD_GENERAL1_SMALL" || "BUILD_GENERAL1_MEDIUM" || "BUILD_GENERAL1_LARGE" || "BUILD_GENERAL1_XLARGE" || "BUILD_GENERAL1_2XLARGE" || "BUILD_LAMBDA_1GB" || "BUILD_LAMBDA_2GB" || "BUILD_LAMBDA_4GB" || "BUILD_LAMBDA_8GB" || "BUILD_LAMBDA_10GB" || "ATTRIBUTE_BASED_COMPUTE", // required + * // computeConfiguration: { // ComputeConfiguration + * // vCpu: Number("long"), + * // memory: Number("long"), + * // disk: Number("long"), + * // machineType: "GENERAL" || "NVME", + * // }, * // fleet: { // ProjectFleet * // fleetArn: "STRING_VALUE", * // }, diff --git a/clients/client-codebuild/src/commands/StopBuildCommand.ts b/clients/client-codebuild/src/commands/StopBuildCommand.ts index 7cb48d32251f7..084cd9e99bd0f 100644 --- a/clients/client-codebuild/src/commands/StopBuildCommand.ts +++ b/clients/client-codebuild/src/commands/StopBuildCommand.ts @@ -145,7 +145,13 @@ export interface StopBuildCommandOutput extends StopBuildOutput, __MetadataBeare * // environment: { // ProjectEnvironment * // type: "WINDOWS_CONTAINER" || "LINUX_CONTAINER" || "LINUX_GPU_CONTAINER" || "ARM_CONTAINER" || "WINDOWS_SERVER_2019_CONTAINER" || "LINUX_LAMBDA_CONTAINER" || "ARM_LAMBDA_CONTAINER" || "MAC_ARM", // required * // image: "STRING_VALUE", // required - * // computeType: "BUILD_GENERAL1_SMALL" || "BUILD_GENERAL1_MEDIUM" || "BUILD_GENERAL1_LARGE" || "BUILD_GENERAL1_XLARGE" || "BUILD_GENERAL1_2XLARGE" || "BUILD_LAMBDA_1GB" || "BUILD_LAMBDA_2GB" || "BUILD_LAMBDA_4GB" || "BUILD_LAMBDA_8GB" || "BUILD_LAMBDA_10GB", // required + * // computeType: "BUILD_GENERAL1_SMALL" || "BUILD_GENERAL1_MEDIUM" || "BUILD_GENERAL1_LARGE" || "BUILD_GENERAL1_XLARGE" || "BUILD_GENERAL1_2XLARGE" || "BUILD_LAMBDA_1GB" || "BUILD_LAMBDA_2GB" || "BUILD_LAMBDA_4GB" || "BUILD_LAMBDA_8GB" || "BUILD_LAMBDA_10GB" || "ATTRIBUTE_BASED_COMPUTE", // required + * // computeConfiguration: { // ComputeConfiguration + * // vCpu: Number("long"), + * // memory: Number("long"), + * // disk: Number("long"), + * // machineType: "GENERAL" || "NVME", + * // }, * // fleet: { // ProjectFleet * // fleetArn: "STRING_VALUE", * // }, diff --git a/clients/client-codebuild/src/commands/UpdateFleetCommand.ts b/clients/client-codebuild/src/commands/UpdateFleetCommand.ts index df5ff61f79f01..9f3544b598c19 100644 --- a/clients/client-codebuild/src/commands/UpdateFleetCommand.ts +++ b/clients/client-codebuild/src/commands/UpdateFleetCommand.ts @@ -39,7 +39,13 @@ export interface UpdateFleetCommandOutput extends UpdateFleetOutput, __MetadataB * arn: "STRING_VALUE", // required * baseCapacity: Number("int"), * environmentType: "WINDOWS_CONTAINER" || "LINUX_CONTAINER" || "LINUX_GPU_CONTAINER" || "ARM_CONTAINER" || "WINDOWS_SERVER_2019_CONTAINER" || "LINUX_LAMBDA_CONTAINER" || "ARM_LAMBDA_CONTAINER" || "MAC_ARM", - * computeType: "BUILD_GENERAL1_SMALL" || "BUILD_GENERAL1_MEDIUM" || "BUILD_GENERAL1_LARGE" || "BUILD_GENERAL1_XLARGE" || "BUILD_GENERAL1_2XLARGE" || "BUILD_LAMBDA_1GB" || "BUILD_LAMBDA_2GB" || "BUILD_LAMBDA_4GB" || "BUILD_LAMBDA_8GB" || "BUILD_LAMBDA_10GB", + * computeType: "BUILD_GENERAL1_SMALL" || "BUILD_GENERAL1_MEDIUM" || "BUILD_GENERAL1_LARGE" || "BUILD_GENERAL1_XLARGE" || "BUILD_GENERAL1_2XLARGE" || "BUILD_LAMBDA_1GB" || "BUILD_LAMBDA_2GB" || "BUILD_LAMBDA_4GB" || "BUILD_LAMBDA_8GB" || "BUILD_LAMBDA_10GB" || "ATTRIBUTE_BASED_COMPUTE", + * computeConfiguration: { // ComputeConfiguration + * vCpu: Number("long"), + * memory: Number("long"), + * disk: Number("long"), + * machineType: "GENERAL" || "NVME", + * }, * scalingConfiguration: { // ScalingConfigurationInput * scalingType: "TARGET_TRACKING_SCALING", * targetTrackingScalingConfigs: [ // TargetTrackingScalingConfigurations @@ -97,7 +103,13 @@ export interface UpdateFleetCommandOutput extends UpdateFleetOutput, __MetadataB * // }, * // baseCapacity: Number("int"), * // environmentType: "WINDOWS_CONTAINER" || "LINUX_CONTAINER" || "LINUX_GPU_CONTAINER" || "ARM_CONTAINER" || "WINDOWS_SERVER_2019_CONTAINER" || "LINUX_LAMBDA_CONTAINER" || "ARM_LAMBDA_CONTAINER" || "MAC_ARM", - * // computeType: "BUILD_GENERAL1_SMALL" || "BUILD_GENERAL1_MEDIUM" || "BUILD_GENERAL1_LARGE" || "BUILD_GENERAL1_XLARGE" || "BUILD_GENERAL1_2XLARGE" || "BUILD_LAMBDA_1GB" || "BUILD_LAMBDA_2GB" || "BUILD_LAMBDA_4GB" || "BUILD_LAMBDA_8GB" || "BUILD_LAMBDA_10GB", + * // computeType: "BUILD_GENERAL1_SMALL" || "BUILD_GENERAL1_MEDIUM" || "BUILD_GENERAL1_LARGE" || "BUILD_GENERAL1_XLARGE" || "BUILD_GENERAL1_2XLARGE" || "BUILD_LAMBDA_1GB" || "BUILD_LAMBDA_2GB" || "BUILD_LAMBDA_4GB" || "BUILD_LAMBDA_8GB" || "BUILD_LAMBDA_10GB" || "ATTRIBUTE_BASED_COMPUTE", + * // computeConfiguration: { // ComputeConfiguration + * // vCpu: Number("long"), + * // memory: Number("long"), + * // disk: Number("long"), + * // machineType: "GENERAL" || "NVME", + * // }, * // scalingConfiguration: { // ScalingConfigurationOutput * // scalingType: "TARGET_TRACKING_SCALING", * // targetTrackingScalingConfigs: [ // TargetTrackingScalingConfigurations diff --git a/clients/client-codebuild/src/commands/UpdateProjectCommand.ts b/clients/client-codebuild/src/commands/UpdateProjectCommand.ts index 3096961349918..1e65df74fba85 100644 --- a/clients/client-codebuild/src/commands/UpdateProjectCommand.ts +++ b/clients/client-codebuild/src/commands/UpdateProjectCommand.ts @@ -123,7 +123,13 @@ export interface UpdateProjectCommandOutput extends UpdateProjectOutput, __Metad * environment: { // ProjectEnvironment * type: "WINDOWS_CONTAINER" || "LINUX_CONTAINER" || "LINUX_GPU_CONTAINER" || "ARM_CONTAINER" || "WINDOWS_SERVER_2019_CONTAINER" || "LINUX_LAMBDA_CONTAINER" || "ARM_LAMBDA_CONTAINER" || "MAC_ARM", // required * image: "STRING_VALUE", // required - * computeType: "BUILD_GENERAL1_SMALL" || "BUILD_GENERAL1_MEDIUM" || "BUILD_GENERAL1_LARGE" || "BUILD_GENERAL1_XLARGE" || "BUILD_GENERAL1_2XLARGE" || "BUILD_LAMBDA_1GB" || "BUILD_LAMBDA_2GB" || "BUILD_LAMBDA_4GB" || "BUILD_LAMBDA_8GB" || "BUILD_LAMBDA_10GB", // required + * computeType: "BUILD_GENERAL1_SMALL" || "BUILD_GENERAL1_MEDIUM" || "BUILD_GENERAL1_LARGE" || "BUILD_GENERAL1_XLARGE" || "BUILD_GENERAL1_2XLARGE" || "BUILD_LAMBDA_1GB" || "BUILD_LAMBDA_2GB" || "BUILD_LAMBDA_4GB" || "BUILD_LAMBDA_8GB" || "BUILD_LAMBDA_10GB" || "ATTRIBUTE_BASED_COMPUTE", // required + * computeConfiguration: { // ComputeConfiguration + * vCpu: Number("long"), + * memory: Number("long"), + * disk: Number("long"), + * machineType: "GENERAL" || "NVME", + * }, * fleet: { // ProjectFleet * fleetArn: "STRING_VALUE", * }, @@ -291,7 +297,13 @@ export interface UpdateProjectCommandOutput extends UpdateProjectOutput, __Metad * // environment: { // ProjectEnvironment * // type: "WINDOWS_CONTAINER" || "LINUX_CONTAINER" || "LINUX_GPU_CONTAINER" || "ARM_CONTAINER" || "WINDOWS_SERVER_2019_CONTAINER" || "LINUX_LAMBDA_CONTAINER" || "ARM_LAMBDA_CONTAINER" || "MAC_ARM", // required * // image: "STRING_VALUE", // required - * // computeType: "BUILD_GENERAL1_SMALL" || "BUILD_GENERAL1_MEDIUM" || "BUILD_GENERAL1_LARGE" || "BUILD_GENERAL1_XLARGE" || "BUILD_GENERAL1_2XLARGE" || "BUILD_LAMBDA_1GB" || "BUILD_LAMBDA_2GB" || "BUILD_LAMBDA_4GB" || "BUILD_LAMBDA_8GB" || "BUILD_LAMBDA_10GB", // required + * // computeType: "BUILD_GENERAL1_SMALL" || "BUILD_GENERAL1_MEDIUM" || "BUILD_GENERAL1_LARGE" || "BUILD_GENERAL1_XLARGE" || "BUILD_GENERAL1_2XLARGE" || "BUILD_LAMBDA_1GB" || "BUILD_LAMBDA_2GB" || "BUILD_LAMBDA_4GB" || "BUILD_LAMBDA_8GB" || "BUILD_LAMBDA_10GB" || "ATTRIBUTE_BASED_COMPUTE", // required + * // computeConfiguration: { // ComputeConfiguration + * // vCpu: Number("long"), + * // memory: Number("long"), + * // disk: Number("long"), + * // machineType: "GENERAL" || "NVME", + * // }, * // fleet: { // ProjectFleet * // fleetArn: "STRING_VALUE", * // }, diff --git a/clients/client-codebuild/src/models/models_0.ts b/clients/client-codebuild/src/models/models_0.ts index a4f9bf4981f1d..49ba65d2d3aae 100644 --- a/clients/client-codebuild/src/models/models_0.ts +++ b/clients/client-codebuild/src/models/models_0.ts @@ -682,11 +682,56 @@ export interface ProjectCache { modes?: CacheMode[]; } +/** + * @public + * @enum + */ +export const MachineType = { + GENERAL: "GENERAL", + NVME: "NVME", +} as const; + +/** + * @public + */ +export type MachineType = (typeof MachineType)[keyof typeof MachineType]; + +/** + *
Contains compute attributes. These attributes only need be specified when your project's or fleet's computeType
is set to ATTRIBUTE_BASED_COMPUTE
.
The number of vCPUs of the instance type included in your fleet.
+ * @public + */ + vCpu?: number; + + /** + *The amount of memory of the instance type included in your fleet.
+ * @public + */ + memory?: number; + + /** + *The amount of disk space of the instance type included in your fleet.
+ * @public + */ + disk?: number; + + /** + *The machine type of the instance type included in your fleet.
+ * @public + */ + machineType?: MachineType; +} + /** * @public * @enum */ export const ComputeType = { + ATTRIBUTE_BASED_COMPUTE: "ATTRIBUTE_BASED_COMPUTE", BUILD_GENERAL1_2XLARGE: "BUILD_GENERAL1_2XLARGE", BUILD_GENERAL1_LARGE: "BUILD_GENERAL1_LARGE", BUILD_GENERAL1_MEDIUM: "BUILD_GENERAL1_MEDIUM", @@ -956,96 +1001,108 @@ export interface ProjectEnvironment { *
- * BUILD_GENERAL1_SMALL
: Use up to 3 GB memory and 2 vCPUs for
+ * ATTRIBUTE_BASED_COMPUTE
: Specify the amount of vCPUs, memory, disk space, and the type of machine.
If you use ATTRIBUTE_BASED_COMPUTE
, you must define your attributes by using computeConfiguration
. CodeBuild
+ * will select the cheapest instance that satisfies your specified attributes. For more information, see Reserved capacity environment
+ * types in the CodeBuild User Guide.
+ * BUILD_GENERAL1_SMALL
: Use up to 4 GiB memory and 2 vCPUs for
* builds.
- * BUILD_GENERAL1_MEDIUM
: Use up to 7 GB memory and 4 vCPUs for
+ * BUILD_GENERAL1_MEDIUM
: Use up to 8 GiB memory and 4 vCPUs for
* builds.
- * BUILD_GENERAL1_LARGE
: Use up to 16 GB memory and 8 vCPUs for
+ * BUILD_GENERAL1_LARGE
: Use up to 16 GiB memory and 8 vCPUs for
* builds, depending on your environment type.
- * BUILD_GENERAL1_XLARGE
: Use up to 70 GB memory and 36 vCPUs for
+ * BUILD_GENERAL1_XLARGE
: Use up to 72 GiB memory and 36 vCPUs for
* builds, depending on your environment type.
- * BUILD_GENERAL1_2XLARGE
: Use up to 145 GB memory, 72 vCPUs, and
+ * BUILD_GENERAL1_2XLARGE
: Use up to 144 GiB memory, 72 vCPUs, and
* 824 GB of SSD storage for builds. This compute type supports Docker images up to
* 100 GB uncompressed.
- * BUILD_LAMBDA_1GB
: Use up to 1 GB memory for
+ * BUILD_LAMBDA_1GB
: Use up to 1 GiB memory for
* builds. Only available for environment type LINUX_LAMBDA_CONTAINER
and ARM_LAMBDA_CONTAINER
.
- * BUILD_LAMBDA_2GB
: Use up to 2 GB memory for
+ * BUILD_LAMBDA_2GB
: Use up to 2 GiB memory for
* builds. Only available for environment type LINUX_LAMBDA_CONTAINER
and ARM_LAMBDA_CONTAINER
.
- * BUILD_LAMBDA_4GB
: Use up to 4 GB memory for
+ * BUILD_LAMBDA_4GB
: Use up to 4 GiB memory for
* builds. Only available for environment type LINUX_LAMBDA_CONTAINER
and ARM_LAMBDA_CONTAINER
.
- * BUILD_LAMBDA_8GB
: Use up to 8 GB memory for
+ * BUILD_LAMBDA_8GB
: Use up to 8 GiB memory for
* builds. Only available for environment type LINUX_LAMBDA_CONTAINER
and ARM_LAMBDA_CONTAINER
.
- * BUILD_LAMBDA_10GB
: Use up to 10 GB memory for
+ * BUILD_LAMBDA_10GB
: Use up to 10 GiB memory for
* builds. Only available for environment type LINUX_LAMBDA_CONTAINER
and ARM_LAMBDA_CONTAINER
.
If you use BUILD_GENERAL1_SMALL
:
For environment type LINUX_CONTAINER
, you can use up to 3 GB
+ *
For environment type LINUX_CONTAINER
, you can use up to 4 GiB
* memory and 2 vCPUs for builds.
For environment type LINUX_GPU_CONTAINER
, you can use up to 16
- * GB memory, 4 vCPUs, and 1 NVIDIA A10G Tensor Core GPU for builds.
For environment type ARM_CONTAINER
, you can use up to 4 GB
+ *
For environment type ARM_CONTAINER
, you can use up to 4 GiB
* memory and 2 vCPUs on ARM-based processors for builds.
If you use BUILD_GENERAL1_LARGE
:
For environment type LINUX_CONTAINER
, you can use up to 15 GB
+ *
For environment type LINUX_CONTAINER
, you can use up to 16 GiB
* memory and 8 vCPUs for builds.
For environment type LINUX_GPU_CONTAINER
, you can use up to 255
- * GB memory, 32 vCPUs, and 4 NVIDIA Tesla V100 GPUs for builds.
For environment type ARM_CONTAINER
, you can use up to 16 GB
+ *
For environment type ARM_CONTAINER
, you can use up to 16 GiB
* memory and 8 vCPUs on ARM-based processors for builds.
If you're using compute fleets during project creation, computeType
will be ignored.
For more information, see Build Environment - * Compute Types in the CodeBuild User Guide. + *
For more information, see On-demand environment types + * in the CodeBuild User Guide. *
* @public */ computeType: ComputeType | undefined; + /** + *The compute configuration of the build project. This is only required if computeType
is set to ATTRIBUTE_BASED_COMPUTE
.
A ProjectFleet object to use for this build project.
* @public @@ -3154,68 +3211,108 @@ export interface Fleet { *
- * BUILD_GENERAL1_SMALL
: Use up to 3 GB memory and 2 vCPUs for
+ * ATTRIBUTE_BASED_COMPUTE
: Specify the amount of vCPUs, memory, disk space, and the type of machine.
If you use ATTRIBUTE_BASED_COMPUTE
, you must define your attributes by using computeConfiguration
. CodeBuild
+ * will select the cheapest instance that satisfies your specified attributes. For more information, see Reserved capacity environment
+ * types in the CodeBuild User Guide.
+ * BUILD_GENERAL1_SMALL
: Use up to 4 GiB memory and 2 vCPUs for
* builds.
- * BUILD_GENERAL1_MEDIUM
: Use up to 7 GB memory and 4 vCPUs for
+ * BUILD_GENERAL1_MEDIUM
: Use up to 8 GiB memory and 4 vCPUs for
* builds.
- * BUILD_GENERAL1_LARGE
: Use up to 16 GB memory and 8 vCPUs for
+ * BUILD_GENERAL1_LARGE
: Use up to 16 GiB memory and 8 vCPUs for
* builds, depending on your environment type.
- * BUILD_GENERAL1_XLARGE
: Use up to 70 GB memory and 36 vCPUs for
+ * BUILD_GENERAL1_XLARGE
: Use up to 72 GiB memory and 36 vCPUs for
* builds, depending on your environment type.
- * BUILD_GENERAL1_2XLARGE
: Use up to 145 GB memory, 72 vCPUs, and
+ * BUILD_GENERAL1_2XLARGE
: Use up to 144 GiB memory, 72 vCPUs, and
* 824 GB of SSD storage for builds. This compute type supports Docker images up to
* 100 GB uncompressed.
+ * BUILD_LAMBDA_1GB
: Use up to 1 GiB memory for
+ * builds. Only available for environment type LINUX_LAMBDA_CONTAINER
and ARM_LAMBDA_CONTAINER
.
+ * BUILD_LAMBDA_2GB
: Use up to 2 GiB memory for
+ * builds. Only available for environment type LINUX_LAMBDA_CONTAINER
and ARM_LAMBDA_CONTAINER
.
+ * BUILD_LAMBDA_4GB
: Use up to 4 GiB memory for
+ * builds. Only available for environment type LINUX_LAMBDA_CONTAINER
and ARM_LAMBDA_CONTAINER
.
+ * BUILD_LAMBDA_8GB
: Use up to 8 GiB memory for
+ * builds. Only available for environment type LINUX_LAMBDA_CONTAINER
and ARM_LAMBDA_CONTAINER
.
+ * BUILD_LAMBDA_10GB
: Use up to 10 GiB memory for
+ * builds. Only available for environment type LINUX_LAMBDA_CONTAINER
and ARM_LAMBDA_CONTAINER
.
If you use BUILD_GENERAL1_SMALL
:
For environment type LINUX_CONTAINER
, you can use up to 3 GB
+ *
For environment type LINUX_CONTAINER
, you can use up to 4 GiB
* memory and 2 vCPUs for builds.
For environment type LINUX_GPU_CONTAINER
, you can use up to 16
- * GB memory, 4 vCPUs, and 1 NVIDIA A10G Tensor Core GPU for builds.
For environment type ARM_CONTAINER
, you can use up to 4 GB
+ *
For environment type ARM_CONTAINER
, you can use up to 4 GiB
* memory and 2 vCPUs on ARM-based processors for builds.
If you use BUILD_GENERAL1_LARGE
:
For environment type LINUX_CONTAINER
, you can use up to 15 GB
+ *
For environment type LINUX_CONTAINER
, you can use up to 16 GiB
* memory and 8 vCPUs for builds.
For environment type LINUX_GPU_CONTAINER
, you can use up to 255
- * GB memory, 32 vCPUs, and 4 NVIDIA Tesla V100 GPUs for builds.
For environment type ARM_CONTAINER
, you can use up to 16 GB
+ *
For environment type ARM_CONTAINER
, you can use up to 16 GiB
* memory and 8 vCPUs on ARM-based processors for builds.
For more information, see Build environment - * compute types in the CodeBuild User Guide. + *
For more information, see On-demand environment types + * in the CodeBuild User Guide. *
* @public */ computeType?: ComputeType; + /** + *The compute configuration of the compute fleet. This is only required if computeType
is set to ATTRIBUTE_BASED_COMPUTE
.
The scaling configuration of the compute fleet.
* @public @@ -4792,68 +4889,108 @@ export interface CreateFleetInput { *
- * BUILD_GENERAL1_SMALL
: Use up to 3 GB memory and 2 vCPUs for
+ * ATTRIBUTE_BASED_COMPUTE
: Specify the amount of vCPUs, memory, disk space, and the type of machine.
If you use ATTRIBUTE_BASED_COMPUTE
, you must define your attributes by using computeConfiguration
. CodeBuild
+ * will select the cheapest instance that satisfies your specified attributes. For more information, see Reserved capacity environment
+ * types in the CodeBuild User Guide.
+ * BUILD_GENERAL1_SMALL
: Use up to 4 GiB memory and 2 vCPUs for
* builds.
- * BUILD_GENERAL1_MEDIUM
: Use up to 7 GB memory and 4 vCPUs for
+ * BUILD_GENERAL1_MEDIUM
: Use up to 8 GiB memory and 4 vCPUs for
* builds.
- * BUILD_GENERAL1_LARGE
: Use up to 16 GB memory and 8 vCPUs for
+ * BUILD_GENERAL1_LARGE
: Use up to 16 GiB memory and 8 vCPUs for
* builds, depending on your environment type.
- * BUILD_GENERAL1_XLARGE
: Use up to 70 GB memory and 36 vCPUs for
+ * BUILD_GENERAL1_XLARGE
: Use up to 72 GiB memory and 36 vCPUs for
* builds, depending on your environment type.
- * BUILD_GENERAL1_2XLARGE
: Use up to 145 GB memory, 72 vCPUs, and
+ * BUILD_GENERAL1_2XLARGE
: Use up to 144 GiB memory, 72 vCPUs, and
* 824 GB of SSD storage for builds. This compute type supports Docker images up to
* 100 GB uncompressed.
+ * BUILD_LAMBDA_1GB
: Use up to 1 GiB memory for
+ * builds. Only available for environment type LINUX_LAMBDA_CONTAINER
and ARM_LAMBDA_CONTAINER
.
+ * BUILD_LAMBDA_2GB
: Use up to 2 GiB memory for
+ * builds. Only available for environment type LINUX_LAMBDA_CONTAINER
and ARM_LAMBDA_CONTAINER
.
+ * BUILD_LAMBDA_4GB
: Use up to 4 GiB memory for
+ * builds. Only available for environment type LINUX_LAMBDA_CONTAINER
and ARM_LAMBDA_CONTAINER
.
+ * BUILD_LAMBDA_8GB
: Use up to 8 GiB memory for
+ * builds. Only available for environment type LINUX_LAMBDA_CONTAINER
and ARM_LAMBDA_CONTAINER
.
+ * BUILD_LAMBDA_10GB
: Use up to 10 GiB memory for
+ * builds. Only available for environment type LINUX_LAMBDA_CONTAINER
and ARM_LAMBDA_CONTAINER
.
If you use BUILD_GENERAL1_SMALL
:
For environment type LINUX_CONTAINER
, you can use up to 3 GB
+ *
For environment type LINUX_CONTAINER
, you can use up to 4 GiB
* memory and 2 vCPUs for builds.
For environment type LINUX_GPU_CONTAINER
, you can use up to 16
- * GB memory, 4 vCPUs, and 1 NVIDIA A10G Tensor Core GPU for builds.
For environment type ARM_CONTAINER
, you can use up to 4 GB
+ *
For environment type ARM_CONTAINER
, you can use up to 4 GiB
* memory and 2 vCPUs on ARM-based processors for builds.
If you use BUILD_GENERAL1_LARGE
:
For environment type LINUX_CONTAINER
, you can use up to 15 GB
+ *
For environment type LINUX_CONTAINER
, you can use up to 16 GiB
* memory and 8 vCPUs for builds.
For environment type LINUX_GPU_CONTAINER
, you can use up to 255
- * GB memory, 32 vCPUs, and 4 NVIDIA Tesla V100 GPUs for builds.
For environment type ARM_CONTAINER
, you can use up to 16 GB
+ *
For environment type ARM_CONTAINER
, you can use up to 16 GiB
* memory and 8 vCPUs on ARM-based processors for builds.
For more information, see Build environment - * compute types in the CodeBuild User Guide. + *
For more information, see On-demand environment types + * in the CodeBuild User Guide. *
* @public */ computeType: ComputeType | undefined; + /** + *The compute configuration of the compute fleet. This is only required if computeType
is set to ATTRIBUTE_BASED_COMPUTE
.
The scaling configuration of the compute fleet.
* @public @@ -8020,68 +8157,108 @@ export interface UpdateFleetInput { *
- * BUILD_GENERAL1_SMALL
: Use up to 3 GB memory and 2 vCPUs for
+ * ATTRIBUTE_BASED_COMPUTE
: Specify the amount of vCPUs, memory, disk space, and the type of machine.
If you use ATTRIBUTE_BASED_COMPUTE
, you must define your attributes by using computeConfiguration
. CodeBuild
+ * will select the cheapest instance that satisfies your specified attributes. For more information, see Reserved capacity environment
+ * types in the CodeBuild User Guide.
+ * BUILD_GENERAL1_SMALL
: Use up to 4 GiB memory and 2 vCPUs for
* builds.
- * BUILD_GENERAL1_MEDIUM
: Use up to 7 GB memory and 4 vCPUs for
+ * BUILD_GENERAL1_MEDIUM
: Use up to 8 GiB memory and 4 vCPUs for
* builds.
- * BUILD_GENERAL1_LARGE
: Use up to 16 GB memory and 8 vCPUs for
+ * BUILD_GENERAL1_LARGE
: Use up to 16 GiB memory and 8 vCPUs for
* builds, depending on your environment type.
- * BUILD_GENERAL1_XLARGE
: Use up to 70 GB memory and 36 vCPUs for
+ * BUILD_GENERAL1_XLARGE
: Use up to 72 GiB memory and 36 vCPUs for
* builds, depending on your environment type.
- * BUILD_GENERAL1_2XLARGE
: Use up to 145 GB memory, 72 vCPUs, and
+ * BUILD_GENERAL1_2XLARGE
: Use up to 144 GiB memory, 72 vCPUs, and
* 824 GB of SSD storage for builds. This compute type supports Docker images up to
* 100 GB uncompressed.
+ * BUILD_LAMBDA_1GB
: Use up to 1 GiB memory for
+ * builds. Only available for environment type LINUX_LAMBDA_CONTAINER
and ARM_LAMBDA_CONTAINER
.
+ * BUILD_LAMBDA_2GB
: Use up to 2 GiB memory for
+ * builds. Only available for environment type LINUX_LAMBDA_CONTAINER
and ARM_LAMBDA_CONTAINER
.
+ * BUILD_LAMBDA_4GB
: Use up to 4 GiB memory for
+ * builds. Only available for environment type LINUX_LAMBDA_CONTAINER
and ARM_LAMBDA_CONTAINER
.
+ * BUILD_LAMBDA_8GB
: Use up to 8 GiB memory for
+ * builds. Only available for environment type LINUX_LAMBDA_CONTAINER
and ARM_LAMBDA_CONTAINER
.
+ * BUILD_LAMBDA_10GB
: Use up to 10 GiB memory for
+ * builds. Only available for environment type LINUX_LAMBDA_CONTAINER
and ARM_LAMBDA_CONTAINER
.
If you use BUILD_GENERAL1_SMALL
:
For environment type LINUX_CONTAINER
, you can use up to 3 GB
+ *
For environment type LINUX_CONTAINER
, you can use up to 4 GiB
* memory and 2 vCPUs for builds.
For environment type LINUX_GPU_CONTAINER
, you can use up to 16
- * GB memory, 4 vCPUs, and 1 NVIDIA A10G Tensor Core GPU for builds.
For environment type ARM_CONTAINER
, you can use up to 4 GB
+ *
For environment type ARM_CONTAINER
, you can use up to 4 GiB
* memory and 2 vCPUs on ARM-based processors for builds.
If you use BUILD_GENERAL1_LARGE
:
For environment type LINUX_CONTAINER
, you can use up to 15 GB
+ *
For environment type LINUX_CONTAINER
, you can use up to 16 GiB
* memory and 8 vCPUs for builds.
For environment type LINUX_GPU_CONTAINER
, you can use up to 255
- * GB memory, 32 vCPUs, and 4 NVIDIA Tesla V100 GPUs for builds.
For environment type ARM_CONTAINER
, you can use up to 16 GB
+ *
For environment type ARM_CONTAINER
, you can use up to 16 GiB
* memory and 8 vCPUs on ARM-based processors for builds.
For more information, see Build environment - * compute types in the CodeBuild User Guide. + *
For more information, see On-demand environment types + * in the CodeBuild User Guide. *
* @public */ computeType?: ComputeType; + /** + *The compute configuration of the compute fleet. This is only required if computeType
is set to ATTRIBUTE_BASED_COMPUTE
.
The scaling configuration of the compute fleet.
* @public diff --git a/clients/client-codebuild/src/protocols/Aws_json1_1.ts b/clients/client-codebuild/src/protocols/Aws_json1_1.ts index 4015fe19c5dbd..7e044a5cdc213 100644 --- a/clients/client-codebuild/src/protocols/Aws_json1_1.ts +++ b/clients/client-codebuild/src/protocols/Aws_json1_1.ts @@ -148,6 +148,7 @@ import { CloudWatchLogsConfig, CodeCoverage, CodeCoverageReportSummary, + ComputeConfiguration, CreateFleetInput, CreateFleetOutput, CreateProjectInput, @@ -2037,6 +2038,8 @@ const de_ResourceNotFoundExceptionRes = async ( // se_CloudWatchLogsConfig omitted. +// se_ComputeConfiguration omitted. + // se_ComputeTypesAllowed omitted. /** @@ -2045,6 +2048,7 @@ const de_ResourceNotFoundExceptionRes = async ( const se_CreateFleetInput = (input: CreateFleetInput, context: __SerdeContext): any => { return take(input, { baseCapacity: [], + computeConfiguration: _json, computeType: [], environmentType: [], fleetServiceRole: [], @@ -2268,6 +2272,7 @@ const se_UpdateFleetInput = (input: UpdateFleetInput, context: __SerdeContext): return take(input, { arn: [], baseCapacity: [], + computeConfiguration: _json, computeType: [], environmentType: [], fleetServiceRole: [], @@ -2627,6 +2632,8 @@ const de_CodeCoverages = (output: any, context: __SerdeContext): CodeCoverage[] return retVal; }; +// de_ComputeConfiguration omitted. + // de_ComputeTypesAllowed omitted. /** @@ -2734,6 +2741,7 @@ const de_Fleet = (output: any, context: __SerdeContext): Fleet => { return take(output, { arn: __expectString, baseCapacity: __expectInt32, + computeConfiguration: _json, computeType: __expectString, created: (_: any) => __expectNonNull(__parseEpochTimestamp(__expectNumber(_))), environmentType: __expectString, diff --git a/codegen/sdk-codegen/aws-models/codebuild.json b/codegen/sdk-codegen/aws-models/codebuild.json index 67a84239c6331..7eef6224b5403 100644 --- a/codegen/sdk-codegen/aws-models/codebuild.json +++ b/codegen/sdk-codegen/aws-models/codebuild.json @@ -2800,6 +2800,38 @@ "target": "com.amazonaws.codebuild#CodeCoverage" } }, + "com.amazonaws.codebuild#ComputeConfiguration": { + "type": "structure", + "members": { + "vCpu": { + "target": "com.amazonaws.codebuild#WrapperLong", + "traits": { + "smithy.api#documentation": "The number of vCPUs of the instance type included in your fleet.
" + } + }, + "memory": { + "target": "com.amazonaws.codebuild#WrapperLong", + "traits": { + "smithy.api#documentation": "The amount of memory of the instance type included in your fleet.
" + } + }, + "disk": { + "target": "com.amazonaws.codebuild#WrapperLong", + "traits": { + "smithy.api#documentation": "The amount of disk space of the instance type included in your fleet.
" + } + }, + "machineType": { + "target": "com.amazonaws.codebuild#MachineType", + "traits": { + "smithy.api#documentation": "The machine type of the instance type included in your fleet.
" + } + } + }, + "traits": { + "smithy.api#documentation": "Contains compute attributes. These attributes only need be specified when your project's or fleet's computeType
is set to ATTRIBUTE_BASED_COMPUTE
.
Information about the compute resources the compute fleet uses. Available values\n include:
\n\n BUILD_GENERAL1_SMALL
: Use up to 3 GB memory and 2 vCPUs for\n builds.
\n BUILD_GENERAL1_MEDIUM
: Use up to 7 GB memory and 4 vCPUs for\n builds.
\n BUILD_GENERAL1_LARGE
: Use up to 16 GB memory and 8 vCPUs for\n builds, depending on your environment type.
\n BUILD_GENERAL1_XLARGE
: Use up to 70 GB memory and 36 vCPUs for\n builds, depending on your environment type.
\n BUILD_GENERAL1_2XLARGE
: Use up to 145 GB memory, 72 vCPUs, and\n 824 GB of SSD storage for builds. This compute type supports Docker images up to\n 100 GB uncompressed.
If you use BUILD_GENERAL1_SMALL
:
For environment type LINUX_CONTAINER
, you can use up to 3 GB\n memory and 2 vCPUs for builds.
For environment type LINUX_GPU_CONTAINER
, you can use up to 16\n GB memory, 4 vCPUs, and 1 NVIDIA A10G Tensor Core GPU for builds.
For environment type ARM_CONTAINER
, you can use up to 4 GB\n memory and 2 vCPUs on ARM-based processors for builds.
If you use BUILD_GENERAL1_LARGE
:
For environment type LINUX_CONTAINER
, you can use up to 15 GB\n memory and 8 vCPUs for builds.
For environment type LINUX_GPU_CONTAINER
, you can use up to 255\n GB memory, 32 vCPUs, and 4 NVIDIA Tesla V100 GPUs for builds.
For environment type ARM_CONTAINER
, you can use up to 16 GB\n memory and 8 vCPUs on ARM-based processors for builds.
For more information, see Build environment\n compute types in the CodeBuild User Guide.\n
", + "smithy.api#documentation": "Information about the compute resources the compute fleet uses. Available values\n include:
\n\n ATTRIBUTE_BASED_COMPUTE
: Specify the amount of vCPUs, memory, disk space, and the type of machine.
If you use ATTRIBUTE_BASED_COMPUTE
, you must define your attributes by using computeConfiguration
. CodeBuild \n will select the cheapest instance that satisfies your specified attributes. For more information, see Reserved capacity environment \n types in the CodeBuild User Guide.
\n BUILD_GENERAL1_SMALL
: Use up to 4 GiB memory and 2 vCPUs for\n builds.
\n BUILD_GENERAL1_MEDIUM
: Use up to 8 GiB memory and 4 vCPUs for\n builds.
\n BUILD_GENERAL1_LARGE
: Use up to 16 GiB memory and 8 vCPUs for\n builds, depending on your environment type.
\n BUILD_GENERAL1_XLARGE
: Use up to 72 GiB memory and 36 vCPUs for\n builds, depending on your environment type.
\n BUILD_GENERAL1_2XLARGE
: Use up to 144 GiB memory, 72 vCPUs, and\n 824 GB of SSD storage for builds. This compute type supports Docker images up to\n 100 GB uncompressed.
\n BUILD_LAMBDA_1GB
: Use up to 1 GiB memory for\n builds. Only available for environment type LINUX_LAMBDA_CONTAINER
and ARM_LAMBDA_CONTAINER
.
\n BUILD_LAMBDA_2GB
: Use up to 2 GiB memory for\n builds. Only available for environment type LINUX_LAMBDA_CONTAINER
and ARM_LAMBDA_CONTAINER
.
\n BUILD_LAMBDA_4GB
: Use up to 4 GiB memory for\n builds. Only available for environment type LINUX_LAMBDA_CONTAINER
and ARM_LAMBDA_CONTAINER
.
\n BUILD_LAMBDA_8GB
: Use up to 8 GiB memory for\n builds. Only available for environment type LINUX_LAMBDA_CONTAINER
and ARM_LAMBDA_CONTAINER
.
\n BUILD_LAMBDA_10GB
: Use up to 10 GiB memory for\n builds. Only available for environment type LINUX_LAMBDA_CONTAINER
and ARM_LAMBDA_CONTAINER
.
If you use BUILD_GENERAL1_SMALL
:
For environment type LINUX_CONTAINER
, you can use up to 4 GiB\n memory and 2 vCPUs for builds.
For environment type LINUX_GPU_CONTAINER
, you can use up to 16\n GiB memory, 4 vCPUs, and 1 NVIDIA A10G Tensor Core GPU for builds.
For environment type ARM_CONTAINER
, you can use up to 4 GiB\n memory and 2 vCPUs on ARM-based processors for builds.
If you use BUILD_GENERAL1_LARGE
:
For environment type LINUX_CONTAINER
, you can use up to 16 GiB\n memory and 8 vCPUs for builds.
For environment type LINUX_GPU_CONTAINER
, you can use up to 255\n GiB memory, 32 vCPUs, and 4 NVIDIA Tesla V100 GPUs for builds.
For environment type ARM_CONTAINER
, you can use up to 16 GiB\n memory and 8 vCPUs on ARM-based processors for builds.
For more information, see On-demand environment types \n in the CodeBuild User Guide.\n
", "smithy.api#required": {} } }, + "computeConfiguration": { + "target": "com.amazonaws.codebuild#ComputeConfiguration", + "traits": { + "smithy.api#documentation": "The compute configuration of the compute fleet. This is only required if computeType
is set to ATTRIBUTE_BASED_COMPUTE
.
Information about the compute resources the compute fleet uses. Available values\n include:
\n\n BUILD_GENERAL1_SMALL
: Use up to 3 GB memory and 2 vCPUs for\n builds.
\n BUILD_GENERAL1_MEDIUM
: Use up to 7 GB memory and 4 vCPUs for\n builds.
\n BUILD_GENERAL1_LARGE
: Use up to 16 GB memory and 8 vCPUs for\n builds, depending on your environment type.
\n BUILD_GENERAL1_XLARGE
: Use up to 70 GB memory and 36 vCPUs for\n builds, depending on your environment type.
\n BUILD_GENERAL1_2XLARGE
: Use up to 145 GB memory, 72 vCPUs, and\n 824 GB of SSD storage for builds. This compute type supports Docker images up to\n 100 GB uncompressed.
If you use BUILD_GENERAL1_SMALL
:
For environment type LINUX_CONTAINER
, you can use up to 3 GB\n memory and 2 vCPUs for builds.
For environment type LINUX_GPU_CONTAINER
, you can use up to 16\n GB memory, 4 vCPUs, and 1 NVIDIA A10G Tensor Core GPU for builds.
For environment type ARM_CONTAINER
, you can use up to 4 GB\n memory and 2 vCPUs on ARM-based processors for builds.
If you use BUILD_GENERAL1_LARGE
:
For environment type LINUX_CONTAINER
, you can use up to 15 GB\n memory and 8 vCPUs for builds.
For environment type LINUX_GPU_CONTAINER
, you can use up to 255\n GB memory, 32 vCPUs, and 4 NVIDIA Tesla V100 GPUs for builds.
For environment type ARM_CONTAINER
, you can use up to 16 GB\n memory and 8 vCPUs on ARM-based processors for builds.
For more information, see Build environment\n compute types in the CodeBuild User Guide.\n
" + "smithy.api#documentation": "Information about the compute resources the compute fleet uses. Available values\n include:
\n\n ATTRIBUTE_BASED_COMPUTE
: Specify the amount of vCPUs, memory, disk space, and the type of machine.
If you use ATTRIBUTE_BASED_COMPUTE
, you must define your attributes by using computeConfiguration
. CodeBuild \n will select the cheapest instance that satisfies your specified attributes. For more information, see Reserved capacity environment \n types in the CodeBuild User Guide.
\n BUILD_GENERAL1_SMALL
: Use up to 4 GiB memory and 2 vCPUs for\n builds.
\n BUILD_GENERAL1_MEDIUM
: Use up to 8 GiB memory and 4 vCPUs for\n builds.
\n BUILD_GENERAL1_LARGE
: Use up to 16 GiB memory and 8 vCPUs for\n builds, depending on your environment type.
\n BUILD_GENERAL1_XLARGE
: Use up to 72 GiB memory and 36 vCPUs for\n builds, depending on your environment type.
\n BUILD_GENERAL1_2XLARGE
: Use up to 144 GiB memory, 72 vCPUs, and\n 824 GB of SSD storage for builds. This compute type supports Docker images up to\n 100 GB uncompressed.
\n BUILD_LAMBDA_1GB
: Use up to 1 GiB memory for\n builds. Only available for environment type LINUX_LAMBDA_CONTAINER
and ARM_LAMBDA_CONTAINER
.
\n BUILD_LAMBDA_2GB
: Use up to 2 GiB memory for\n builds. Only available for environment type LINUX_LAMBDA_CONTAINER
and ARM_LAMBDA_CONTAINER
.
\n BUILD_LAMBDA_4GB
: Use up to 4 GiB memory for\n builds. Only available for environment type LINUX_LAMBDA_CONTAINER
and ARM_LAMBDA_CONTAINER
.
\n BUILD_LAMBDA_8GB
: Use up to 8 GiB memory for\n builds. Only available for environment type LINUX_LAMBDA_CONTAINER
and ARM_LAMBDA_CONTAINER
.
\n BUILD_LAMBDA_10GB
: Use up to 10 GiB memory for\n builds. Only available for environment type LINUX_LAMBDA_CONTAINER
and ARM_LAMBDA_CONTAINER
.
If you use BUILD_GENERAL1_SMALL
:
For environment type LINUX_CONTAINER
, you can use up to 4 GiB\n memory and 2 vCPUs for builds.
For environment type LINUX_GPU_CONTAINER
, you can use up to 16\n GiB memory, 4 vCPUs, and 1 NVIDIA A10G Tensor Core GPU for builds.
For environment type ARM_CONTAINER
, you can use up to 4 GiB\n memory and 2 vCPUs on ARM-based processors for builds.
If you use BUILD_GENERAL1_LARGE
:
For environment type LINUX_CONTAINER
, you can use up to 16 GiB\n memory and 8 vCPUs for builds.
For environment type LINUX_GPU_CONTAINER
, you can use up to 255\n GiB memory, 32 vCPUs, and 4 NVIDIA Tesla V100 GPUs for builds.
For environment type ARM_CONTAINER
, you can use up to 16 GiB\n memory and 8 vCPUs on ARM-based processors for builds.
For more information, see On-demand environment types \n in the CodeBuild User Guide.\n
" + } + }, + "computeConfiguration": { + "target": "com.amazonaws.codebuild#ComputeConfiguration", + "traits": { + "smithy.api#documentation": "The compute configuration of the compute fleet. This is only required if computeType
is set to ATTRIBUTE_BASED_COMPUTE
.
Information about build logs in CloudWatch Logs.
" } }, + "com.amazonaws.codebuild#MachineType": { + "type": "enum", + "members": { + "GENERAL": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "GENERAL" + } + }, + "NVME": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "NVME" + } + } + } + }, "com.amazonaws.codebuild#NetworkInterface": { "type": "structure", "members": { @@ -6442,10 +6509,16 @@ "computeType": { "target": "com.amazonaws.codebuild#ComputeType", "traits": { - "smithy.api#documentation": "Information about the compute resources the build project uses. Available values\n include:
\n\n BUILD_GENERAL1_SMALL
: Use up to 3 GB memory and 2 vCPUs for\n builds.
\n BUILD_GENERAL1_MEDIUM
: Use up to 7 GB memory and 4 vCPUs for\n builds.
\n BUILD_GENERAL1_LARGE
: Use up to 16 GB memory and 8 vCPUs for\n builds, depending on your environment type.
\n BUILD_GENERAL1_XLARGE
: Use up to 70 GB memory and 36 vCPUs for\n builds, depending on your environment type.
\n BUILD_GENERAL1_2XLARGE
: Use up to 145 GB memory, 72 vCPUs, and\n 824 GB of SSD storage for builds. This compute type supports Docker images up to\n 100 GB uncompressed.
\n BUILD_LAMBDA_1GB
: Use up to 1 GB memory for\n builds. Only available for environment type LINUX_LAMBDA_CONTAINER
and ARM_LAMBDA_CONTAINER
.
\n BUILD_LAMBDA_2GB
: Use up to 2 GB memory for\n builds. Only available for environment type LINUX_LAMBDA_CONTAINER
and ARM_LAMBDA_CONTAINER
.
\n BUILD_LAMBDA_4GB
: Use up to 4 GB memory for\n builds. Only available for environment type LINUX_LAMBDA_CONTAINER
and ARM_LAMBDA_CONTAINER
.
\n BUILD_LAMBDA_8GB
: Use up to 8 GB memory for\n builds. Only available for environment type LINUX_LAMBDA_CONTAINER
and ARM_LAMBDA_CONTAINER
.
\n BUILD_LAMBDA_10GB
: Use up to 10 GB memory for\n builds. Only available for environment type LINUX_LAMBDA_CONTAINER
and ARM_LAMBDA_CONTAINER
.
If you use BUILD_GENERAL1_SMALL
:
For environment type LINUX_CONTAINER
, you can use up to 3 GB\n memory and 2 vCPUs for builds.
For environment type LINUX_GPU_CONTAINER
, you can use up to 16\n GB memory, 4 vCPUs, and 1 NVIDIA A10G Tensor Core GPU for builds.
For environment type ARM_CONTAINER
, you can use up to 4 GB\n memory and 2 vCPUs on ARM-based processors for builds.
If you use BUILD_GENERAL1_LARGE
:
For environment type LINUX_CONTAINER
, you can use up to 15 GB\n memory and 8 vCPUs for builds.
For environment type LINUX_GPU_CONTAINER
, you can use up to 255\n GB memory, 32 vCPUs, and 4 NVIDIA Tesla V100 GPUs for builds.
For environment type ARM_CONTAINER
, you can use up to 16 GB\n memory and 8 vCPUs on ARM-based processors for builds.
If you're using compute fleets during project creation, computeType
will be ignored.
For more information, see Build Environment\n Compute Types in the CodeBuild User Guide.\n
", + "smithy.api#documentation": "Information about the compute resources the build project uses. Available values\n include:
\n\n ATTRIBUTE_BASED_COMPUTE
: Specify the amount of vCPUs, memory, disk space, and the type of machine.
If you use ATTRIBUTE_BASED_COMPUTE
, you must define your attributes by using computeConfiguration
. CodeBuild \n will select the cheapest instance that satisfies your specified attributes. For more information, see Reserved capacity environment \n types in the CodeBuild User Guide.
\n BUILD_GENERAL1_SMALL
: Use up to 4 GiB memory and 2 vCPUs for\n builds.
\n BUILD_GENERAL1_MEDIUM
: Use up to 8 GiB memory and 4 vCPUs for\n builds.
\n BUILD_GENERAL1_LARGE
: Use up to 16 GiB memory and 8 vCPUs for\n builds, depending on your environment type.
\n BUILD_GENERAL1_XLARGE
: Use up to 72 GiB memory and 36 vCPUs for\n builds, depending on your environment type.
\n BUILD_GENERAL1_2XLARGE
: Use up to 144 GiB memory, 72 vCPUs, and\n 824 GB of SSD storage for builds. This compute type supports Docker images up to\n 100 GB uncompressed.
\n BUILD_LAMBDA_1GB
: Use up to 1 GiB memory for\n builds. Only available for environment type LINUX_LAMBDA_CONTAINER
and ARM_LAMBDA_CONTAINER
.
\n BUILD_LAMBDA_2GB
: Use up to 2 GiB memory for\n builds. Only available for environment type LINUX_LAMBDA_CONTAINER
and ARM_LAMBDA_CONTAINER
.
\n BUILD_LAMBDA_4GB
: Use up to 4 GiB memory for\n builds. Only available for environment type LINUX_LAMBDA_CONTAINER
and ARM_LAMBDA_CONTAINER
.
\n BUILD_LAMBDA_8GB
: Use up to 8 GiB memory for\n builds. Only available for environment type LINUX_LAMBDA_CONTAINER
and ARM_LAMBDA_CONTAINER
.
\n BUILD_LAMBDA_10GB
: Use up to 10 GiB memory for\n builds. Only available for environment type LINUX_LAMBDA_CONTAINER
and ARM_LAMBDA_CONTAINER
.
If you use BUILD_GENERAL1_SMALL
:
For environment type LINUX_CONTAINER
, you can use up to 4 GiB\n memory and 2 vCPUs for builds.
For environment type LINUX_GPU_CONTAINER
, you can use up to 16\n GiB memory, 4 vCPUs, and 1 NVIDIA A10G Tensor Core GPU for builds.
For environment type ARM_CONTAINER
, you can use up to 4 GiB\n memory and 2 vCPUs on ARM-based processors for builds.
If you use BUILD_GENERAL1_LARGE
:
For environment type LINUX_CONTAINER
, you can use up to 16 GiB\n memory and 8 vCPUs for builds.
For environment type LINUX_GPU_CONTAINER
, you can use up to 255\n GiB memory, 32 vCPUs, and 4 NVIDIA Tesla V100 GPUs for builds.
For environment type ARM_CONTAINER
, you can use up to 16 GiB\n memory and 8 vCPUs on ARM-based processors for builds.
For more information, see On-demand environment types \n in the CodeBuild User Guide.\n
", "smithy.api#required": {} } }, + "computeConfiguration": { + "target": "com.amazonaws.codebuild#ComputeConfiguration", + "traits": { + "smithy.api#documentation": "The compute configuration of the build project. This is only required if computeType
is set to ATTRIBUTE_BASED_COMPUTE
.
Information about the compute resources the compute fleet uses. Available values\n include:
\n\n BUILD_GENERAL1_SMALL
: Use up to 3 GB memory and 2 vCPUs for\n builds.
\n BUILD_GENERAL1_MEDIUM
: Use up to 7 GB memory and 4 vCPUs for\n builds.
\n BUILD_GENERAL1_LARGE
: Use up to 16 GB memory and 8 vCPUs for\n builds, depending on your environment type.
\n BUILD_GENERAL1_XLARGE
: Use up to 70 GB memory and 36 vCPUs for\n builds, depending on your environment type.
\n BUILD_GENERAL1_2XLARGE
: Use up to 145 GB memory, 72 vCPUs, and\n 824 GB of SSD storage for builds. This compute type supports Docker images up to\n 100 GB uncompressed.
If you use BUILD_GENERAL1_SMALL
:
For environment type LINUX_CONTAINER
, you can use up to 3 GB\n memory and 2 vCPUs for builds.
For environment type LINUX_GPU_CONTAINER
, you can use up to 16\n GB memory, 4 vCPUs, and 1 NVIDIA A10G Tensor Core GPU for builds.
For environment type ARM_CONTAINER
, you can use up to 4 GB\n memory and 2 vCPUs on ARM-based processors for builds.
If you use BUILD_GENERAL1_LARGE
:
For environment type LINUX_CONTAINER
, you can use up to 15 GB\n memory and 8 vCPUs for builds.
For environment type LINUX_GPU_CONTAINER
, you can use up to 255\n GB memory, 32 vCPUs, and 4 NVIDIA Tesla V100 GPUs for builds.
For environment type ARM_CONTAINER
, you can use up to 16 GB\n memory and 8 vCPUs on ARM-based processors for builds.
For more information, see Build environment\n compute types in the CodeBuild User Guide.\n
" + "smithy.api#documentation": "Information about the compute resources the compute fleet uses. Available values\n include:
\n\n ATTRIBUTE_BASED_COMPUTE
: Specify the amount of vCPUs, memory, disk space, and the type of machine.
If you use ATTRIBUTE_BASED_COMPUTE
, you must define your attributes by using computeConfiguration
. CodeBuild \n will select the cheapest instance that satisfies your specified attributes. For more information, see Reserved capacity environment \n types in the CodeBuild User Guide.
\n BUILD_GENERAL1_SMALL
: Use up to 4 GiB memory and 2 vCPUs for\n builds.
\n BUILD_GENERAL1_MEDIUM
: Use up to 8 GiB memory and 4 vCPUs for\n builds.
\n BUILD_GENERAL1_LARGE
: Use up to 16 GiB memory and 8 vCPUs for\n builds, depending on your environment type.
\n BUILD_GENERAL1_XLARGE
: Use up to 72 GiB memory and 36 vCPUs for\n builds, depending on your environment type.
\n BUILD_GENERAL1_2XLARGE
: Use up to 144 GiB memory, 72 vCPUs, and\n 824 GB of SSD storage for builds. This compute type supports Docker images up to\n 100 GB uncompressed.
\n BUILD_LAMBDA_1GB
: Use up to 1 GiB memory for\n builds. Only available for environment type LINUX_LAMBDA_CONTAINER
and ARM_LAMBDA_CONTAINER
.
\n BUILD_LAMBDA_2GB
: Use up to 2 GiB memory for\n builds. Only available for environment type LINUX_LAMBDA_CONTAINER
and ARM_LAMBDA_CONTAINER
.
\n BUILD_LAMBDA_4GB
: Use up to 4 GiB memory for\n builds. Only available for environment type LINUX_LAMBDA_CONTAINER
and ARM_LAMBDA_CONTAINER
.
\n BUILD_LAMBDA_8GB
: Use up to 8 GiB memory for\n builds. Only available for environment type LINUX_LAMBDA_CONTAINER
and ARM_LAMBDA_CONTAINER
.
\n BUILD_LAMBDA_10GB
: Use up to 10 GiB memory for\n builds. Only available for environment type LINUX_LAMBDA_CONTAINER
and ARM_LAMBDA_CONTAINER
.
If you use BUILD_GENERAL1_SMALL
:
For environment type LINUX_CONTAINER
, you can use up to 4 GiB\n memory and 2 vCPUs for builds.
For environment type LINUX_GPU_CONTAINER
, you can use up to 16\n GiB memory, 4 vCPUs, and 1 NVIDIA A10G Tensor Core GPU for builds.
For environment type ARM_CONTAINER
, you can use up to 4 GiB\n memory and 2 vCPUs on ARM-based processors for builds.
If you use BUILD_GENERAL1_LARGE
:
For environment type LINUX_CONTAINER
, you can use up to 16 GiB\n memory and 8 vCPUs for builds.
For environment type LINUX_GPU_CONTAINER
, you can use up to 255\n GiB memory, 32 vCPUs, and 4 NVIDIA Tesla V100 GPUs for builds.
For environment type ARM_CONTAINER
, you can use up to 16 GiB\n memory and 8 vCPUs on ARM-based processors for builds.
For more information, see On-demand environment types \n in the CodeBuild User Guide.\n
" + } + }, + "computeConfiguration": { + "target": "com.amazonaws.codebuild#ComputeConfiguration", + "traits": { + "smithy.api#documentation": "The compute configuration of the compute fleet. This is only required if computeType
is set to ATTRIBUTE_BASED_COMPUTE
.