Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat(spanner): support defining autoscaling limit as nodes #9606

Merged
Merged
Show file tree
Hide file tree
Changes from 5 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
36 changes: 35 additions & 1 deletion mmv1/products/spanner/Instance.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -180,19 +180,53 @@ properties:
name: 'autoscalingLimits'
description: |
Defines scale in controls to reduce the risk of response latency
and outages due to abrupt scale-in events
and outages due to abrupt scale-in events. Users can define the minimum and
maximum compute capacity allocated to the instance, and the autoscaler will
only scale within that range. Users can either use nodes or processing
units to specify the limits, but should use the same unit to set both the
min_limit and max_limit.
properties:
- !ruby/object:Api::Type::Integer
name: 'minProcessingUnits'
description: |
Specifies minimum number of processing units allocated to the instance.
If set, this number should be multiples of 1000.
exactly_one_of:
- min_processing_units
- min_nodes
required_with:
- max_processing_units
- !ruby/object:Api::Type::Integer
name: 'maxProcessingUnits'
description: |
Specifies maximum number of processing units allocated to the instance.
If set, this number should be multiples of 1000 and be greater than or equal to
min_processing_units.
exactly_one_of:
- max_processing_units
- max_nodes
required_with:
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Introducing new requirements between existing fields is a breaking change and can only be done in a major release. Can we remove the two required_with blocks for the existing fields?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@c2thorn Thanks for suggestion, please help take a look again

- min_processing_units
- !ruby/object:Api::Type::Integer
name: 'minNodes'
description: |
Specifies number of nodes allocated to the instance. If set, this number
should be greater than or equal to 1.
exactly_one_of:
- min_processing_units
- min_nodes
required_with:
- max_nodes
- !ruby/object:Api::Type::Integer
name: 'maxNodes'
description: |
Specifies maximum number of nodes allocated to the instance. If set, this number
should be greater than or equal to min_nodes.
exactly_one_of:
- max_processing_units
- max_nodes
required_with:
- min_nodes
- !ruby/object:Api::Type::NestedObject
name: 'autoscalingTargets'
description: |
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,12 @@ if d.HasChange("autoscaling_config.0.autoscaling_limits.0.max_processing_units")
if d.HasChange("autoscaling_config.0.autoscaling_limits.0.min_processing_units") {
updateMask = append(updateMask, "autoscalingConfig.autoscalingLimits.minProcessingUnits")
}
if d.HasChange("autoscaling_config.0.autoscaling_limits.0.max_nodes") {
updateMask = append(updateMask, "autoscalingConfig.autoscalingLimits.maxNodes")
}
if d.HasChange("autoscaling_config.0.autoscaling_limits.0.min_nodes") {
updateMask = append(updateMask, "autoscalingConfig.autoscalingLimits.minNodes")
}
if d.HasChange("autoscaling_config.0.autoscaling_targets.0.high_priority_cpu_utilization_percent") {
updateMask = append(updateMask, "autoscalingConfig.autoscalingTargets.highPriorityCpuUtilizationPercent")
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -3,8 +3,11 @@ resource "google_spanner_instance" "example" {
display_name = "Test Spanner Instance"
autoscaling_config {
autoscaling_limits {
max_processing_units = 3000
min_processing_units = 2000
// Define the minimum and maximum compute capacity allocated to the instance
// Either use nodes or processing units to specify the limits,
// but should use the same unit to set both the min_limit and max_limit.
max_processing_units = 3000 // OR max_nodes = 3
min_processing_units = 2000 // OR min_nodes = 2
}
autoscaling_targets {
high_priority_cpu_utilization_percent = 75
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -203,6 +203,67 @@ func TestAccSpannerInstance_basicWithAutoscalingUsingProcessingUnitConfigUpdate(
})
}

func TestAccSpannerInstance_basicWithAutoscalingUsingNodeConfig(t *testing.T) {
t.Parallel()

displayName := fmt.Sprintf("spanner-test-%s-dname", acctest.RandString(t, 10))
acctest.VcrTest(t, resource.TestCase{
PreCheck: func() { acctest.AccTestPreCheck(t) },
ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t),
CheckDestroy: testAccCheckSpannerInstanceDestroyProducer(t),
Steps: []resource.TestStep{
{
Config: testAccSpannerInstance_basicWithAutoscalerConfigUsingNodesAsConfigs(displayName),
Check: resource.ComposeTestCheckFunc(
resource.TestCheckResourceAttrSet("google_spanner_instance.basic", "state"),
),
},
{
ResourceName: "google_spanner_instance.basic",
ImportState: true,
ImportStateVerify: true,
},
},
})
}

func TestAccSpannerInstance_basicWithAutoscalingUsingNodeConfigUpdate(t *testing.T) {
t.Parallel()

displayName := fmt.Sprintf("spanner-test-%s-dname", acctest.RandString(t, 10))
acctest.VcrTest(t, resource.TestCase{
PreCheck: func() { acctest.AccTestPreCheck(t) },
ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t),
CheckDestroy: testAccCheckSpannerInstanceDestroyProducer(t),
Steps: []resource.TestStep{
{
Config: testAccSpannerInstance_basicWithAutoscalerConfigUsingNodesAsConfigsUpdate(displayName, 1, 2, 65, 95),
Check: resource.ComposeTestCheckFunc(
resource.TestCheckResourceAttrSet("google_spanner_instance.basic", "state"),
),
},
{
ResourceName: "google_spanner_instance.basic",
ImportState: true,
ImportStateVerify: true,
ImportStateVerifyIgnore: []string{"labels", "terraform_labels"},
},
{
Config: testAccSpannerInstance_basicWithAutoscalerConfigUsingNodesAsConfigsUpdate(displayName, 2, 3, 75, 90),
Check: resource.ComposeTestCheckFunc(
resource.TestCheckResourceAttrSet("google_spanner_instance.basic", "state"),
),
},
{
ResourceName: "google_spanner_instance.basic",
ImportState: true,
ImportStateVerify: true,
ImportStateVerifyIgnore: []string{"labels", "terraform_labels"},
},
},
})
}

func testAccSpannerInstance_basic(name string) string {
return fmt.Sprintf(`
resource "google_spanner_instance" "basic" {
Expand Down Expand Up @@ -304,3 +365,43 @@ resource "google_spanner_instance" "basic" {
}
`, name, name, maxProcessingUnits, minProcessingUnits, cupUtilizationPercent, storageUtilizationPercent)
}

func testAccSpannerInstance_basicWithAutoscalerConfigUsingNodesAsConfigs(name string) string {
return fmt.Sprintf(`
resource "google_spanner_instance" "basic" {
name = "%s"
config = "regional-us-central1"
display_name = "%s"
autoscaling_config {
autoscaling_limits {
max_nodes = 2
min_nodes = 1
}
autoscaling_targets {
high_priority_cpu_utilization_percent = 65
storage_utilization_percent = 95
}
}
}
`, name, name)
}

func testAccSpannerInstance_basicWithAutoscalerConfigUsingNodesAsConfigsUpdate(name string, minNodes, maxNodes, cupUtilizationPercent, storageUtilizationPercent int) string {
return fmt.Sprintf(`
resource "google_spanner_instance" "basic" {
name = "%s"
config = "regional-us-central1"
display_name = "%s"
autoscaling_config {
autoscaling_limits {
max_nodes = %v
min_nodes = %v
}
autoscaling_targets {
high_priority_cpu_utilization_percent = %v
storage_utilization_percent = %v
}
}
}
`, name, name, maxNodes, minNodes, cupUtilizationPercent, storageUtilizationPercent)
}