Skip to content

Commit

Permalink
Avoid recreate instance when cluster not ready (#9023) (#15961)
Browse files Browse the repository at this point in the history
Signed-off-by: Modular Magician <[email protected]>
  • Loading branch information
modular-magician authored Sep 22, 2023
1 parent 8eea792 commit cc7fe55
Show file tree
Hide file tree
Showing 4 changed files with 269 additions and 8 deletions.
6 changes: 6 additions & 0 deletions .changelog/9023.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
```release-note:bug
bigtable: avoided re-creation of instance when cluster is still updating and storage type changed
```
```release-note:enhancement
bigtable: added `state` output attribute to `google_bigtable_instance` clusters
```
27 changes: 21 additions & 6 deletions google/services/bigtable/resource_bigtable_instance.go
Original file line number Diff line number Diff line change
Expand Up @@ -129,6 +129,11 @@ func ResourceBigtableInstance() *schema.Resource {
},
},
},
"state": {
Type: schema.TypeString,
Computed: true,
Description: `The state of the cluster`,
},
},
},
},
Expand Down Expand Up @@ -420,6 +425,7 @@ func flattenBigtableCluster(c *bigtable.ClusterInfo) map[string]interface{} {
"cluster_id": c.Name,
"storage_type": storageType,
"kms_key_name": c.KMSKeyName,
"state": c.State,
}
if c.AutoscalingConfig != nil {
cluster["autoscaling_config"] = make([]map[string]interface{}, 1)
Expand Down Expand Up @@ -565,7 +571,14 @@ func resourceBigtableInstanceUniqueClusterID(_ context.Context, diff *schema.Res
// This doesn't use the standard unordered list utility (https://github.com/GoogleCloudPlatform/magic-modules/blob/main/templates/terraform/unordered_list_customize_diff.erb)
// because some fields can't be modified using the API and we recreate the instance
// when they're changed.
func resourceBigtableInstanceClusterReorderTypeList(_ context.Context, diff *schema.ResourceDiff, meta interface{}) error {
func resourceBigtableInstanceClusterReorderTypeList(_ context.Context, diff *schema.ResourceDiff, _ interface{}) error {
// separate func to allow unit testing
return resourceBigtableInstanceClusterReorderTypeListFunc(diff, func(orderedClusters []interface{}) error {
return diff.SetNew("cluster", orderedClusters)
})

}
func resourceBigtableInstanceClusterReorderTypeListFunc(diff tpgresource.TerraformResourceDiff, setNew func([]interface{}) error) error {
oldCount, newCount := diff.GetChange("cluster.#")

// Simulate Required:true, MinItems:1 for "cluster". This doesn't work
Expand Down Expand Up @@ -594,7 +607,9 @@ func resourceBigtableInstanceClusterReorderTypeList(_ context.Context, diff *sch
for i := 0; i < newCount.(int); i++ {
_, newId := diff.GetChange(fmt.Sprintf("cluster.%d.cluster_id", i))
_, c := diff.GetChange(fmt.Sprintf("cluster.%d", i))
clusters[newId.(string)] = c
typedCluster := c.(map[string]interface{})
typedCluster["state"] = "READY"
clusters[newId.(string)] = typedCluster
}

// create a list of clusters using the old order when possible to minimise
Expand Down Expand Up @@ -630,9 +645,8 @@ func resourceBigtableInstanceClusterReorderTypeList(_ context.Context, diff *sch
}
}

err := diff.SetNew("cluster", orderedClusters)
if err != nil {
return fmt.Errorf("Error setting cluster diff: %s", err)
if err := setNew(orderedClusters); err != nil {
return err
}

// Clusters can't have their zone, storage_type or kms_key_name updated,
Expand All @@ -658,8 +672,9 @@ func resourceBigtableInstanceClusterReorderTypeList(_ context.Context, diff *sch
}
}

currentState, _ := diff.GetChange(fmt.Sprintf("cluster.%d.state", i))
oST, nST := diff.GetChange(fmt.Sprintf("cluster.%d.storage_type", i))
if oST != nST {
if oST != nST && currentState.(string) != "CREATING" {
err := diff.ForceNew(fmt.Sprintf("cluster.%d.storage_type", i))
if err != nil {
return fmt.Errorf("Error setting cluster diff: %s", err)
Expand Down
243 changes: 241 additions & 2 deletions google/services/bigtable/resource_bigtable_instance_internal_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -10,9 +10,10 @@ import (

"cloud.google.com/go/bigtable"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"github.com/hashicorp/terraform-provider-google/google/tpgresource"
)

func TestGetUnavailableClusterZones(t *testing.T) {
func TestUnitBigtable_getUnavailableClusterZones(t *testing.T) {
cases := map[string]struct {
clusterZones []string
unavailableZones []string
Expand Down Expand Up @@ -56,7 +57,7 @@ func TestGetUnavailableClusterZones(t *testing.T) {
}
}

func TestGetInstanceFromResponse(t *testing.T) {
func TestUnitBigtable_getInstanceFromResponse(t *testing.T) {
instanceName := "test-instance"
originalId := "original_value"
cases := map[string]struct {
Expand Down Expand Up @@ -132,3 +133,241 @@ func TestGetInstanceFromResponse(t *testing.T) {
}
}
}

func TestUnitBigtable_flattenBigtableCluster(t *testing.T) {
cases := map[string]struct {
clusterInfo *bigtable.ClusterInfo
want map[string]interface{}
}{
"SSD auto scaling": {
clusterInfo: &bigtable.ClusterInfo{
StorageType: bigtable.SSD,
Zone: "zone1",
ServeNodes: 5,
Name: "ssd-cluster",
KMSKeyName: "KMS",
State: "CREATING",
AutoscalingConfig: &bigtable.AutoscalingConfig{
MinNodes: 3,
MaxNodes: 7,
CPUTargetPercent: 50,
StorageUtilizationPerNode: 60,
},
},
want: map[string]interface{}{
"zone": "zone1",
"num_nodes": 5,
"cluster_id": "ssd-cluster",
"storage_type": "SSD",
"kms_key_name": "KMS",
"state": "CREATING",
"autoscaling_config": []map[string]interface{}{
map[string]interface{}{
"min_nodes": 3,
"max_nodes": 7,
"cpu_target": 50,
"storage_target": 60,
},
},
},
},
"HDD manual scaling": {
clusterInfo: &bigtable.ClusterInfo{
StorageType: bigtable.HDD,
Zone: "zone2",
ServeNodes: 7,
Name: "hdd-cluster",
KMSKeyName: "KMS",
State: "READY",
},
want: map[string]interface{}{
"zone": "zone2",
"num_nodes": 7,
"cluster_id": "hdd-cluster",
"storage_type": "HDD",
"kms_key_name": "KMS",
"state": "READY",
},
},
}

for tn, tc := range cases {
if got := flattenBigtableCluster(tc.clusterInfo); !reflect.DeepEqual(got, tc.want) {
t.Errorf("bad: %s, got %q, want %q", tn, got, tc.want)
}
}
}

func TestUnitBigtable_resourceBigtableInstanceClusterReorderTypeListFunc_error(t *testing.T) {
d := &tpgresource.ResourceDiffMock{
After: map[string]interface{}{
"cluster.#": 0,
},
}
if err := resourceBigtableInstanceClusterReorderTypeListFunc(d, nil); err == nil {
t.Errorf("expected error, got success")
}
}

func TestUnitBigtable_resourceBigtableInstanceClusterReorderTypeListFunc(t *testing.T) {
cases := map[string]struct {
before map[string]interface{}
after map[string]interface{}
wantClusterOrder []string
wantForceNew bool
}{
"create": {
before: map[string]interface{}{
"cluster.#": 1,
"cluster.0.cluster_id": "some-id-a",
},
after: map[string]interface{}{
"name": "some-name",
"cluster.#": 1,
"cluster.0.cluster_id": "some-id-a",
"cluster.0": map[string]interface{}{
"cluster_id": "some-id-a",
},
},
wantClusterOrder: []string{},
wantForceNew: false,
},
"no force new change": {
before: map[string]interface{}{
"name": "some-name",
"cluster.#": 4,
"cluster.0.cluster_id": "some-id-a",
"cluster.1.cluster_id": "some-id-b",
"cluster.2.cluster_id": "some-id-c",
"cluster.3.cluster_id": "some-id-e",
},
after: map[string]interface{}{
"name": "some-name",
"cluster.#": 3,
"cluster.0.cluster_id": "some-id-c",
"cluster.1.cluster_id": "some-id-a",
"cluster.2.cluster_id": "some-id-d",
"cluster.0": map[string]interface{}{
"cluster_id": "some-id-c",
},
"cluster.1": map[string]interface{}{
"cluster_id": "some-id-a",
},
"cluster.2": map[string]interface{}{
"cluster_id": "some-id-d",
},
},
wantClusterOrder: []string{"some-id-a", "some-id-d", "some-id-c"},
wantForceNew: false,
},
"force new - zone change": {
before: map[string]interface{}{
"name": "some-name",
"cluster.#": 1,
"cluster.0.cluster_id": "some-id-a",
"cluster.0.zone": "zone-a",
},
after: map[string]interface{}{
"name": "some-name",
"cluster.#": 1,
"cluster.0.cluster_id": "some-id-a",
"cluster.0.zone": "zone-b",
"cluster.0": map[string]interface{}{
"cluster_id": "some-id-a",
"zone": "zone-b",
},
},
wantClusterOrder: []string{"some-id-a"},
wantForceNew: true,
},
"force new - kms_key_name change": {
before: map[string]interface{}{
"name": "some-name",
"cluster.#": 1,
"cluster.0.cluster_id": "some-id-a",
"cluster.0.kms_key_name": "key-a",
},
after: map[string]interface{}{
"name": "some-name",
"cluster.#": 1,
"cluster.0.cluster_id": "some-id-a",
"cluster.0.kms_key_name": "key-b",
"cluster.0": map[string]interface{}{
"cluster_id": "some-id-a",
"kms_key_name": "key-b",
},
},
wantClusterOrder: []string{"some-id-a"},
wantForceNew: true,
},
"force new - storage_type change": {
before: map[string]interface{}{
"name": "some-name",
"cluster.#": 1,
"cluster.0.cluster_id": "some-id-a",
"cluster.0.storage_type": "HDD",
"cluster.0.state": "READY",
},
after: map[string]interface{}{
"name": "some-name",
"cluster.#": 1,
"cluster.0.cluster_id": "some-id-a",
"cluster.0.storage_type": "SSD",
"cluster.0": map[string]interface{}{
"cluster_id": "some-id-a",
"storage_type": "SSD",
},
},
wantClusterOrder: []string{"some-id-a"},
wantForceNew: true,
},
"skip force new - storage_type change for CREATING cluster": {
before: map[string]interface{}{
"name": "some-name",
"cluster.#": 1,
"cluster.0.cluster_id": "some-id-a",
"cluster.0.storage_type": "SSD",
"cluster.0.state": "CREATING",
},
after: map[string]interface{}{
"name": "some-name",
"cluster.#": 1,
"cluster.0.cluster_id": "some-id-a",
"cluster.0.storage_type": "HDD",
"cluster.0": map[string]interface{}{
"cluster_id": "some-id-a",
"storage_type": "HDD",
},
},
wantClusterOrder: []string{"some-id-a"},
wantForceNew: false,
},
}
for tn, tc := range cases {
t.Run(tn, func(t *testing.T) {
d := &tpgresource.ResourceDiffMock{
Before: tc.before,
After: tc.after,
}
var clusters []interface{}
err := resourceBigtableInstanceClusterReorderTypeListFunc(d, func(gotClusters []interface{}) error {
clusters = gotClusters
return nil
})
if err != nil {
t.Fatalf("bad: %s, error: %v", tn, err)
}
if d.IsForceNew != tc.wantForceNew {
t.Errorf("bad: %s, got %v, want %v", tn, d.IsForceNew, tc.wantForceNew)
}
gotClusterOrder := []string{}
for _, cluster := range clusters {
clusterResource := cluster.(map[string]interface{})
gotClusterOrder = append(gotClusterOrder, clusterResource["cluster_id"].(string))
}
if !reflect.DeepEqual(gotClusterOrder, tc.wantClusterOrder) {
t.Errorf("bad: %s, got %q, want %q", tn, gotClusterOrder, tc.wantClusterOrder)
}
})
}
}
1 change: 1 addition & 0 deletions website/docs/r/bigtable_instance.html.markdown
Original file line number Diff line number Diff line change
Expand Up @@ -142,6 +142,7 @@ If no value is set, Cloud Bigtable automatically allocates nodes based on your d
In addition to the arguments listed above, the following computed attributes are exported:

* `id` - an identifier for the resource with format `projects/{{project}}/instances/{{name}}`
* `cluster.0.state` - describes the current state of the cluster.

## Timeouts

Expand Down

0 comments on commit cc7fe55

Please sign in to comment.