From cc7fe55a455bda2a3fc9d3d8bd1a5b131920a03d Mon Sep 17 00:00:00 2001 From: The Magician Date: Fri, 22 Sep 2023 16:00:48 -0400 Subject: [PATCH] Avoid recreate instance when cluster not ready (#9023) (#15961) Signed-off-by: Modular Magician --- .changelog/9023.txt | 6 + .../bigtable/resource_bigtable_instance.go | 27 +- ...esource_bigtable_instance_internal_test.go | 243 +++++++++++++++++- .../docs/r/bigtable_instance.html.markdown | 1 + 4 files changed, 269 insertions(+), 8 deletions(-) create mode 100644 .changelog/9023.txt diff --git a/.changelog/9023.txt b/.changelog/9023.txt new file mode 100644 index 00000000000..9e6c35cdd76 --- /dev/null +++ b/.changelog/9023.txt @@ -0,0 +1,6 @@ +```release-note:bug +bigtable: avoided re-creation of instance when cluster is still updating and storage type changed +``` +```release-note:enhancement +bigtable: added `state` output attribute to `google_bigtable_instance` clusters +``` diff --git a/google/services/bigtable/resource_bigtable_instance.go b/google/services/bigtable/resource_bigtable_instance.go index 82ce2d28c77..6d1415c62ce 100644 --- a/google/services/bigtable/resource_bigtable_instance.go +++ b/google/services/bigtable/resource_bigtable_instance.go @@ -129,6 +129,11 @@ func ResourceBigtableInstance() *schema.Resource { }, }, }, + "state": { + Type: schema.TypeString, + Computed: true, + Description: `The state of the cluster`, + }, }, }, }, @@ -420,6 +425,7 @@ func flattenBigtableCluster(c *bigtable.ClusterInfo) map[string]interface{} { "cluster_id": c.Name, "storage_type": storageType, "kms_key_name": c.KMSKeyName, + "state": c.State, } if c.AutoscalingConfig != nil { cluster["autoscaling_config"] = make([]map[string]interface{}, 1) @@ -565,7 +571,14 @@ func resourceBigtableInstanceUniqueClusterID(_ context.Context, diff *schema.Res // This doesn't use the standard unordered list utility (https://github.com/GoogleCloudPlatform/magic-modules/blob/main/templates/terraform/unordered_list_customize_diff.erb) // because some fields can't be modified using the API and we recreate the instance // when they're changed. -func resourceBigtableInstanceClusterReorderTypeList(_ context.Context, diff *schema.ResourceDiff, meta interface{}) error { +func resourceBigtableInstanceClusterReorderTypeList(_ context.Context, diff *schema.ResourceDiff, _ interface{}) error { + // separate func to allow unit testing + return resourceBigtableInstanceClusterReorderTypeListFunc(diff, func(orderedClusters []interface{}) error { + return diff.SetNew("cluster", orderedClusters) + }) + +} +func resourceBigtableInstanceClusterReorderTypeListFunc(diff tpgresource.TerraformResourceDiff, setNew func([]interface{}) error) error { oldCount, newCount := diff.GetChange("cluster.#") // Simulate Required:true, MinItems:1 for "cluster". This doesn't work @@ -594,7 +607,9 @@ func resourceBigtableInstanceClusterReorderTypeList(_ context.Context, diff *sch for i := 0; i < newCount.(int); i++ { _, newId := diff.GetChange(fmt.Sprintf("cluster.%d.cluster_id", i)) _, c := diff.GetChange(fmt.Sprintf("cluster.%d", i)) - clusters[newId.(string)] = c + typedCluster := c.(map[string]interface{}) + typedCluster["state"] = "READY" + clusters[newId.(string)] = typedCluster } // create a list of clusters using the old order when possible to minimise @@ -630,9 +645,8 @@ func resourceBigtableInstanceClusterReorderTypeList(_ context.Context, diff *sch } } - err := diff.SetNew("cluster", orderedClusters) - if err != nil { - return fmt.Errorf("Error setting cluster diff: %s", err) + if err := setNew(orderedClusters); err != nil { + return err } // Clusters can't have their zone, storage_type or kms_key_name updated, @@ -658,8 +672,9 @@ func resourceBigtableInstanceClusterReorderTypeList(_ context.Context, diff *sch } } + currentState, _ := diff.GetChange(fmt.Sprintf("cluster.%d.state", i)) oST, nST := diff.GetChange(fmt.Sprintf("cluster.%d.storage_type", i)) - if oST != nST { + if oST != nST && currentState.(string) != "CREATING" { err := diff.ForceNew(fmt.Sprintf("cluster.%d.storage_type", i)) if err != nil { return fmt.Errorf("Error setting cluster diff: %s", err) diff --git a/google/services/bigtable/resource_bigtable_instance_internal_test.go b/google/services/bigtable/resource_bigtable_instance_internal_test.go index 51a2fb29307..dc0731f253a 100644 --- a/google/services/bigtable/resource_bigtable_instance_internal_test.go +++ b/google/services/bigtable/resource_bigtable_instance_internal_test.go @@ -10,9 +10,10 @@ import ( "cloud.google.com/go/bigtable" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" ) -func TestGetUnavailableClusterZones(t *testing.T) { +func TestUnitBigtable_getUnavailableClusterZones(t *testing.T) { cases := map[string]struct { clusterZones []string unavailableZones []string @@ -56,7 +57,7 @@ func TestGetUnavailableClusterZones(t *testing.T) { } } -func TestGetInstanceFromResponse(t *testing.T) { +func TestUnitBigtable_getInstanceFromResponse(t *testing.T) { instanceName := "test-instance" originalId := "original_value" cases := map[string]struct { @@ -132,3 +133,241 @@ func TestGetInstanceFromResponse(t *testing.T) { } } } + +func TestUnitBigtable_flattenBigtableCluster(t *testing.T) { + cases := map[string]struct { + clusterInfo *bigtable.ClusterInfo + want map[string]interface{} + }{ + "SSD auto scaling": { + clusterInfo: &bigtable.ClusterInfo{ + StorageType: bigtable.SSD, + Zone: "zone1", + ServeNodes: 5, + Name: "ssd-cluster", + KMSKeyName: "KMS", + State: "CREATING", + AutoscalingConfig: &bigtable.AutoscalingConfig{ + MinNodes: 3, + MaxNodes: 7, + CPUTargetPercent: 50, + StorageUtilizationPerNode: 60, + }, + }, + want: map[string]interface{}{ + "zone": "zone1", + "num_nodes": 5, + "cluster_id": "ssd-cluster", + "storage_type": "SSD", + "kms_key_name": "KMS", + "state": "CREATING", + "autoscaling_config": []map[string]interface{}{ + map[string]interface{}{ + "min_nodes": 3, + "max_nodes": 7, + "cpu_target": 50, + "storage_target": 60, + }, + }, + }, + }, + "HDD manual scaling": { + clusterInfo: &bigtable.ClusterInfo{ + StorageType: bigtable.HDD, + Zone: "zone2", + ServeNodes: 7, + Name: "hdd-cluster", + KMSKeyName: "KMS", + State: "READY", + }, + want: map[string]interface{}{ + "zone": "zone2", + "num_nodes": 7, + "cluster_id": "hdd-cluster", + "storage_type": "HDD", + "kms_key_name": "KMS", + "state": "READY", + }, + }, + } + + for tn, tc := range cases { + if got := flattenBigtableCluster(tc.clusterInfo); !reflect.DeepEqual(got, tc.want) { + t.Errorf("bad: %s, got %q, want %q", tn, got, tc.want) + } + } +} + +func TestUnitBigtable_resourceBigtableInstanceClusterReorderTypeListFunc_error(t *testing.T) { + d := &tpgresource.ResourceDiffMock{ + After: map[string]interface{}{ + "cluster.#": 0, + }, + } + if err := resourceBigtableInstanceClusterReorderTypeListFunc(d, nil); err == nil { + t.Errorf("expected error, got success") + } +} + +func TestUnitBigtable_resourceBigtableInstanceClusterReorderTypeListFunc(t *testing.T) { + cases := map[string]struct { + before map[string]interface{} + after map[string]interface{} + wantClusterOrder []string + wantForceNew bool + }{ + "create": { + before: map[string]interface{}{ + "cluster.#": 1, + "cluster.0.cluster_id": "some-id-a", + }, + after: map[string]interface{}{ + "name": "some-name", + "cluster.#": 1, + "cluster.0.cluster_id": "some-id-a", + "cluster.0": map[string]interface{}{ + "cluster_id": "some-id-a", + }, + }, + wantClusterOrder: []string{}, + wantForceNew: false, + }, + "no force new change": { + before: map[string]interface{}{ + "name": "some-name", + "cluster.#": 4, + "cluster.0.cluster_id": "some-id-a", + "cluster.1.cluster_id": "some-id-b", + "cluster.2.cluster_id": "some-id-c", + "cluster.3.cluster_id": "some-id-e", + }, + after: map[string]interface{}{ + "name": "some-name", + "cluster.#": 3, + "cluster.0.cluster_id": "some-id-c", + "cluster.1.cluster_id": "some-id-a", + "cluster.2.cluster_id": "some-id-d", + "cluster.0": map[string]interface{}{ + "cluster_id": "some-id-c", + }, + "cluster.1": map[string]interface{}{ + "cluster_id": "some-id-a", + }, + "cluster.2": map[string]interface{}{ + "cluster_id": "some-id-d", + }, + }, + wantClusterOrder: []string{"some-id-a", "some-id-d", "some-id-c"}, + wantForceNew: false, + }, + "force new - zone change": { + before: map[string]interface{}{ + "name": "some-name", + "cluster.#": 1, + "cluster.0.cluster_id": "some-id-a", + "cluster.0.zone": "zone-a", + }, + after: map[string]interface{}{ + "name": "some-name", + "cluster.#": 1, + "cluster.0.cluster_id": "some-id-a", + "cluster.0.zone": "zone-b", + "cluster.0": map[string]interface{}{ + "cluster_id": "some-id-a", + "zone": "zone-b", + }, + }, + wantClusterOrder: []string{"some-id-a"}, + wantForceNew: true, + }, + "force new - kms_key_name change": { + before: map[string]interface{}{ + "name": "some-name", + "cluster.#": 1, + "cluster.0.cluster_id": "some-id-a", + "cluster.0.kms_key_name": "key-a", + }, + after: map[string]interface{}{ + "name": "some-name", + "cluster.#": 1, + "cluster.0.cluster_id": "some-id-a", + "cluster.0.kms_key_name": "key-b", + "cluster.0": map[string]interface{}{ + "cluster_id": "some-id-a", + "kms_key_name": "key-b", + }, + }, + wantClusterOrder: []string{"some-id-a"}, + wantForceNew: true, + }, + "force new - storage_type change": { + before: map[string]interface{}{ + "name": "some-name", + "cluster.#": 1, + "cluster.0.cluster_id": "some-id-a", + "cluster.0.storage_type": "HDD", + "cluster.0.state": "READY", + }, + after: map[string]interface{}{ + "name": "some-name", + "cluster.#": 1, + "cluster.0.cluster_id": "some-id-a", + "cluster.0.storage_type": "SSD", + "cluster.0": map[string]interface{}{ + "cluster_id": "some-id-a", + "storage_type": "SSD", + }, + }, + wantClusterOrder: []string{"some-id-a"}, + wantForceNew: true, + }, + "skip force new - storage_type change for CREATING cluster": { + before: map[string]interface{}{ + "name": "some-name", + "cluster.#": 1, + "cluster.0.cluster_id": "some-id-a", + "cluster.0.storage_type": "SSD", + "cluster.0.state": "CREATING", + }, + after: map[string]interface{}{ + "name": "some-name", + "cluster.#": 1, + "cluster.0.cluster_id": "some-id-a", + "cluster.0.storage_type": "HDD", + "cluster.0": map[string]interface{}{ + "cluster_id": "some-id-a", + "storage_type": "HDD", + }, + }, + wantClusterOrder: []string{"some-id-a"}, + wantForceNew: false, + }, + } + for tn, tc := range cases { + t.Run(tn, func(t *testing.T) { + d := &tpgresource.ResourceDiffMock{ + Before: tc.before, + After: tc.after, + } + var clusters []interface{} + err := resourceBigtableInstanceClusterReorderTypeListFunc(d, func(gotClusters []interface{}) error { + clusters = gotClusters + return nil + }) + if err != nil { + t.Fatalf("bad: %s, error: %v", tn, err) + } + if d.IsForceNew != tc.wantForceNew { + t.Errorf("bad: %s, got %v, want %v", tn, d.IsForceNew, tc.wantForceNew) + } + gotClusterOrder := []string{} + for _, cluster := range clusters { + clusterResource := cluster.(map[string]interface{}) + gotClusterOrder = append(gotClusterOrder, clusterResource["cluster_id"].(string)) + } + if !reflect.DeepEqual(gotClusterOrder, tc.wantClusterOrder) { + t.Errorf("bad: %s, got %q, want %q", tn, gotClusterOrder, tc.wantClusterOrder) + } + }) + } +} diff --git a/website/docs/r/bigtable_instance.html.markdown b/website/docs/r/bigtable_instance.html.markdown index 1dbe10bfed8..a6f1d14cb34 100644 --- a/website/docs/r/bigtable_instance.html.markdown +++ b/website/docs/r/bigtable_instance.html.markdown @@ -142,6 +142,7 @@ If no value is set, Cloud Bigtable automatically allocates nodes based on your d In addition to the arguments listed above, the following computed attributes are exported: * `id` - an identifier for the resource with format `projects/{{project}}/instances/{{name}}` +* `cluster.0.state` - describes the current state of the cluster. ## Timeouts