Skip to content

Commit

Permalink
Adding in additional field for provisioned_throughput for Hyper disk …
Browse files Browse the repository at this point in the history
…Throughput SKUs (#8153) (#5814)

Signed-off-by: Modular Magician <[email protected]>
  • Loading branch information
modular-magician authored Jun 26, 2023
1 parent 67e7b95 commit 808153b
Show file tree
Hide file tree
Showing 4 changed files with 171 additions and 11 deletions.
3 changes: 3 additions & 0 deletions .changelog/8153.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
```release-note:enhancement
compute: added `provisioned_throughput` field to `google_compute_disk` used by `hyperdisk-throughput` pd type
```
80 changes: 72 additions & 8 deletions google-beta/resource_compute_disk_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -357,19 +357,16 @@ func TestAccComputeDisk_pdHyperDiskProvisionedIopsLifeCycle(t *testing.T) {
context_1 := map[string]interface{}{
"random_suffix": RandString(t, 10),
"provisioned_iops": 10000,
"disk_size": 64,
"lifecycle_bool": true,
}
context_2 := map[string]interface{}{
"random_suffix": context_1["random_suffix"],
"provisioned_iops": 11000,
"disk_size": 64,
"lifecycle_bool": true,
}
context_3 := map[string]interface{}{
"random_suffix": context_1["random_suffix"],
"provisioned_iops": 11000,
"disk_size": 64,
"lifecycle_bool": false,
}

Expand Down Expand Up @@ -406,6 +403,58 @@ func TestAccComputeDisk_pdHyperDiskProvisionedIopsLifeCycle(t *testing.T) {
})
}

func TestAccComputeDisk_pdHyperDiskProvisionedThroughputLifeCycle(t *testing.T) {
t.Parallel()

context_1 := map[string]interface{}{
"random_suffix": RandString(t, 10),
"provisioned_throughput": 180,
"lifecycle_bool": true,
}
context_2 := map[string]interface{}{
"random_suffix": context_1["random_suffix"],
"provisioned_throughput": 20,
"lifecycle_bool": true,
}
context_3 := map[string]interface{}{
"random_suffix": context_1["random_suffix"],
"provisioned_throughput": 20,
"lifecycle_bool": false,
}

VcrTest(t, resource.TestCase{
PreCheck: func() { acctest.AccTestPreCheck(t) },
ProtoV5ProviderFactories: ProtoV5ProviderFactories(t),
CheckDestroy: testAccCheckComputeDiskDestroyProducer(t),
Steps: []resource.TestStep{
{
Config: testAccComputeDisk_pdHyperDiskProvisionedThroughputLifeCycle(context_1),
},
{
ResourceName: "google_compute_disk.foobar",
ImportState: true,
ImportStateVerify: true,
},
{
Config: testAccComputeDisk_pdHyperDiskProvisionedThroughputLifeCycle(context_2),
},
{
ResourceName: "google_compute_disk.foobar",
ImportState: true,
ImportStateVerify: true,
},
{
Config: testAccComputeDisk_pdHyperDiskProvisionedThroughputLifeCycle(context_3),
},
{
ResourceName: "google_compute_disk.foobar",
ImportState: true,
ImportStateVerify: true,
},
},
})
}

func TestAccComputeDisk_fromSnapshot(t *testing.T) {
t.Parallel()

Expand Down Expand Up @@ -969,12 +1018,27 @@ resource "google_compute_instance_group_manager" "manager" {
func testAccComputeDisk_pdHyperDiskProvisionedIopsLifeCycle(context map[string]interface{}) string {
return Nprintf(`
resource "google_compute_disk" "foobar" {
name = "tf-test-hyperdisk-%{random_suffix}"
type = "hyperdisk-extreme"
provisioned_iops = %{provisioned_iops}
size = %{disk_size}
name = "tf-test-hyperdisk-%{random_suffix}"
type = "hyperdisk-extreme"
provisioned_iops = %{provisioned_iops}
size = 64
lifecycle {
prevent_destroy = %{lifecycle_bool}
}
}
`, context)
}

func testAccComputeDisk_pdHyperDiskProvisionedThroughputLifeCycle(context map[string]interface{}) string {
return Nprintf(`
resource "google_compute_disk" "foobar" {
name = "tf-test-hyperdisk-%{random_suffix}"
type = "hyperdisk-throughput"
zone = "us-east4-c"
provisioned_throughput = %{provisioned_throughput}
size = 2048
lifecycle {
prevent_destroy = %{lifecycle_bool}
prevent_destroy = %{lifecycle_bool}
}
}
`, context)
Expand Down
90 changes: 88 additions & 2 deletions google-beta/services/compute/resource_compute_disk.go
Original file line number Diff line number Diff line change
Expand Up @@ -487,7 +487,16 @@ the supported values for the caller's project.`,
Computed: true,
Optional: true,
Description: `Indicates how many IOPS must be provisioned for the disk.
Note: Update currently only supported by hyperdisk skus, allowing for an update of IOPS every 4 hours`,
Note: Updating currently is only supported by hyperdisk skus without the need to delete and recreate the disk, hyperdisk
allows for an update of IOPS every 4 hours. To update your hyperdisk more frequently, you'll need to manually delete and recreate it`,
},
"provisioned_throughput": {
Type: schema.TypeInt,
Computed: true,
Optional: true,
Description: `Indicates how much Throughput must be provisioned for the disk.
Note: Updating currently is only supported by hyperdisk skus without the need to delete and recreate the disk, hyperdisk
allows for an update of Throughput every 4 hours. To update your hyperdisk more frequently, you'll need to manually delete and recreate it`,
},
"resource_policies": {
Type: schema.TypeList,
Expand Down Expand Up @@ -824,6 +833,12 @@ func resourceComputeDiskCreate(d *schema.ResourceData, meta interface{}) error {
} else if v, ok := d.GetOkExists("provisioned_iops"); !tpgresource.IsEmptyValue(reflect.ValueOf(provisionedIopsProp)) && (ok || !reflect.DeepEqual(v, provisionedIopsProp)) {
obj["provisionedIops"] = provisionedIopsProp
}
provisionedThroughputProp, err := expandComputeDiskProvisionedThroughput(d.Get("provisioned_throughput"), d, config)
if err != nil {
return err
} else if v, ok := d.GetOkExists("provisioned_throughput"); !tpgresource.IsEmptyValue(reflect.ValueOf(provisionedThroughputProp)) && (ok || !reflect.DeepEqual(v, provisionedThroughputProp)) {
obj["provisionedThroughput"] = provisionedThroughputProp
}
asyncPrimaryDiskProp, err := expandComputeDiskAsyncPrimaryDisk(d.Get("async_primary_disk"), d, config)
if err != nil {
return err
Expand Down Expand Up @@ -1035,6 +1050,9 @@ func resourceComputeDiskRead(d *schema.ResourceData, meta interface{}) error {
if err := d.Set("provisioned_iops", flattenComputeDiskProvisionedIops(res["provisionedIops"], d, config)); err != nil {
return fmt.Errorf("Error reading Disk: %s", err)
}
if err := d.Set("provisioned_throughput", flattenComputeDiskProvisionedThroughput(res["provisionedThroughput"], d, config)); err != nil {
return fmt.Errorf("Error reading Disk: %s", err)
}
if err := d.Set("async_primary_disk", flattenComputeDiskAsyncPrimaryDisk(res["asyncPrimaryDisk"], d, config)); err != nil {
return fmt.Errorf("Error reading Disk: %s", err)
}
Expand Down Expand Up @@ -1236,6 +1254,53 @@ func resourceComputeDiskUpdate(d *schema.ResourceData, meta interface{}) error {
return err
}
}
if d.HasChange("provisioned_throughput") {
obj := make(map[string]interface{})

provisionedThroughputProp, err := expandComputeDiskProvisionedThroughput(d.Get("provisioned_throughput"), d, config)
if err != nil {
return err
} else if v, ok := d.GetOkExists("provisioned_throughput"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, provisionedThroughputProp)) {
obj["provisionedThroughput"] = provisionedThroughputProp
}

obj, err = resourceComputeDiskUpdateEncoder(d, meta, obj)
if err != nil {
return err
}

url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/zones/{{zone}}/disks/{{name}}?paths=provisionedThroughput")
if err != nil {
return err
}

// err == nil indicates that the billing_project value was found
if bp, err := tpgresource.GetBillingProject(d, config); err == nil {
billingProject = bp
}

res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{
Config: config,
Method: "PATCH",
Project: billingProject,
RawURL: url,
UserAgent: userAgent,
Body: obj,
Timeout: d.Timeout(schema.TimeoutUpdate),
})
if err != nil {
return fmt.Errorf("Error updating Disk %q: %s", d.Id(), err)
} else {
log.Printf("[DEBUG] Finished updating Disk %q: %#v", d.Id(), res)
}

err = ComputeOperationWaitTime(
config, res, project, "Updating Disk", userAgent,
d.Timeout(schema.TimeoutUpdate))
if err != nil {
return err
}
}

d.Partial(false)

Expand Down Expand Up @@ -1492,6 +1557,23 @@ func flattenComputeDiskProvisionedIops(v interface{}, d *schema.ResourceData, co
return v // let terraform core handle it otherwise
}

func flattenComputeDiskProvisionedThroughput(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} {
// Handles the string fixed64 format
if strVal, ok := v.(string); ok {
if intVal, err := tpgresource.StringToFixed64(strVal); err == nil {
return intVal
}
}

// number values are represented as float64
if floatVal, ok := v.(float64); ok {
intVal := int(floatVal)
return intVal
}

return v // let terraform core handle it otherwise
}

func flattenComputeDiskAsyncPrimaryDisk(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} {
if v == nil {
return nil
Expand Down Expand Up @@ -1742,6 +1824,10 @@ func expandComputeDiskProvisionedIops(v interface{}, d tpgresource.TerraformReso
return v, nil
}

func expandComputeDiskProvisionedThroughput(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) {
return v, nil
}

func expandComputeDiskAsyncPrimaryDisk(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) {
l := v.([]interface{})
if len(l) == 0 || l[0] == nil {
Expand Down Expand Up @@ -2046,7 +2132,7 @@ func resourceComputeDiskEncoder(d *schema.ResourceData, meta interface{}, obj ma

func resourceComputeDiskUpdateEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) {

if d.HasChange("provisioned_iops") && strings.Contains(d.Get("type").(string), "hyperdisk") {
if (d.HasChange("provisioned_iops") && strings.Contains(d.Get("type").(string), "hyperdisk")) || (d.HasChange("provisioned_throughput") && strings.Contains(d.Get("type").(string), "hyperdisk")) {
nameProp := d.Get("name")
if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, nameProp)) {
obj["name"] = nameProp
Expand Down
9 changes: 8 additions & 1 deletion website/docs/r/compute_disk.html.markdown
Original file line number Diff line number Diff line change
Expand Up @@ -233,7 +233,14 @@ The following arguments are supported:
* `provisioned_iops` -
(Optional)
Indicates how many IOPS must be provisioned for the disk.
Note: Update currently only supported by hyperdisk skus, allowing for an update of IOPS every 4 hours
Note: Updating currently is only supported by hyperdisk skus without the need to delete and recreate the disk, hyperdisk
allows for an update of IOPS every 4 hours. To update your hyperdisk more frequently, you'll need to manually delete and recreate it

* `provisioned_throughput` -
(Optional)
Indicates how much Throughput must be provisioned for the disk.
Note: Updating currently is only supported by hyperdisk skus without the need to delete and recreate the disk, hyperdisk
allows for an update of Throughput every 4 hours. To update your hyperdisk more frequently, you'll need to manually delete and recreate it

* `async_primary_disk` -
(Optional, [Beta](https://terraform.io/docs/providers/google/guides/provider_versions.html))
Expand Down

0 comments on commit 808153b

Please sign in to comment.