diff --git a/mmv1/third_party/terraform/services/compute/resource_compute_region_instance_group_manager.go.tmpl b/mmv1/third_party/terraform/services/compute/resource_compute_region_instance_group_manager.go.tmpl
index 3c036f3a1e86..0afcf93719a0 100644
--- a/mmv1/third_party/terraform/services/compute/resource_compute_region_instance_group_manager.go.tmpl
+++ b/mmv1/third_party/terraform/services/compute/resource_compute_region_instance_group_manager.go.tmpl
@@ -14,7 +14,7 @@ import (
"github.com/hashicorp/terraform-provider-google/google/tpgresource"
transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport"
-{{ if eq $.TargetVersionName `ga` }}
+{{ if eq $.TargetVersionName "ga" }}
"google.golang.org/api/compute/v1"
{{- else }}
compute "google.golang.org/api/compute/v0.beta"
@@ -94,6 +94,48 @@ func ResourceComputeRegionInstanceGroupManager() *schema.Resource {
},
},
+ {{- if ne $.TargetVersionName "ga" }}
+ "instance_flexibility_policy": {
+ Type: schema.TypeList,
+ Optional: true,
+ MaxItems: 1,
+ Description: `The flexibility policy for this managed instance group. Instance flexibility allowing MIG to create VMs from multiple types of machines. Instance flexibility configuration on MIG overrides instance template configuration.`,
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "instance_selections": {
+ Type: schema.TypeSet,
+ Optional: true,
+ Description: `Named instance selections configuring properties that the group will use when creating new VMs.`,
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "name": {
+ Type: schema.TypeString,
+ Required: true,
+ Description: `Instance selection name.`,
+ },
+
+ "rank": {
+ Type: schema.TypeInt,
+ Optional: true,
+ Description: `Preference of this instance selection. Lower number means higher preference. MIG will first try to create a VM based on the machine-type with lowest rank and fallback to next rank based on availability. Machine types and instance selections with the same rank have the same preference.`,
+ },
+
+ "machine_types": {
+ Type: schema.TypeSet,
+ Required: true,
+ Elem: &schema.Schema{
+ Type: schema.TypeString,
+ },
+ Description: `Full machine-type names, e.g. "n1-standard-16"`,
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ {{- end }}
+
"name": {
Type: schema.TypeString,
Required: true,
@@ -280,7 +322,7 @@ func ResourceComputeRegionInstanceGroupManager() *schema.Resource {
},
},
- {{ if ne $.TargetVersionName `ga` -}}
+ {{- if ne $.TargetVersionName "ga" }}
"standby_policy": {
Type: schema.TypeList,
Computed: true,
@@ -384,7 +426,7 @@ func ResourceComputeRegionInstanceGroupManager() *schema.Resource {
Description: `Specifies a percentage of instances between 0 to 100%, inclusive. For example, specify 80 for 80%.`,
},
- {{ if ne $.TargetVersionName `ga` -}}
+ {{- if ne $.TargetVersionName "ga" }}
"min_ready_sec": {
Type: schema.TypeInt,
Optional: true,
@@ -630,6 +672,9 @@ func resourceComputeRegionInstanceGroupManagerCreate(d *schema.ResourceData, met
TargetSuspendedSize: int64(d.Get("target_suspended_size").(int)),
TargetStoppedSize: int64(d.Get("target_stopped_size").(int)),
{{- end }}
+ {{- if ne $.TargetVersionName "ga" }}
+ InstanceFlexibilityPolicy: expandInstanceFlexibilityPolicy(d),
+ {{- end }}
UpdatePolicy: expandRegionUpdatePolicy(d.Get("update_policy").([]interface{})),
InstanceLifecyclePolicy: expandInstanceLifecyclePolicy(d.Get("instance_lifecycle_policy").([]interface{})),
AllInstancesConfig: expandAllInstancesConfig(nil, d.Get("all_instances_config").([]interface{})),
@@ -833,6 +878,11 @@ func resourceComputeRegionInstanceGroupManagerRead(d *schema.ResourceData, meta
return fmt.Errorf("Error setting target_stopped_size: %s", err)
}
{{- end }}
+ {{- if ne $.TargetVersionName "ga" }}
+ if err := d.Set("instance_flexibility_policy", flattenInstanceFlexibilityPolicy(manager.InstanceFlexibilityPolicy)); err != nil {
+ return err
+ }
+ {{- end }}
if err := d.Set("update_policy", flattenRegionUpdatePolicy(manager.UpdatePolicy)); err != nil {
return fmt.Errorf("Error setting update_policy in state: %s", err.Error())
}
@@ -906,12 +956,27 @@ func resourceComputeRegionInstanceGroupManagerUpdate(d *schema.ResourceData, met
change = true
}
+ {{- if ne $.TargetVersionName "ga" }}
+ var targetSizePatchUpdate bool
+ if d.HasChange("instance_flexibility_policy") {
+ updatedManager.InstanceFlexibilityPolicy = expandInstanceFlexibilityPolicy(d)
+ change = true
+
+ // target size update should be done by patch instead of using resize
+ if d.HasChange("target_size") {
+ updatedManager.TargetSize = int64(d.Get("target_size").(int))
+ updatedManager.ForceSendFields = append(updatedManager.ForceSendFields, "TargetSize")
+ targetSizePatchUpdate = true
+ }
+ }
+ {{- end }}
+
if d.HasChange("distribution_policy_target_shape") {
updatedManager.DistributionPolicy = expandDistributionPolicyForUpdate(d)
change = true
}
- {{ if ne $.TargetVersionName `ga` -}}
+ {{- if ne $.TargetVersionName "ga" }}
if d.HasChange("standby_policy") {
updatedManager.StandbyPolicy = expandStandbyPolicy(d)
change = true
@@ -995,7 +1060,7 @@ func resourceComputeRegionInstanceGroupManagerUpdate(d *schema.ResourceData, met
}
// target size should use resize
- if d.HasChange("target_size") {
+ if d.HasChange("target_size") {{- if ne $.TargetVersionName "ga" }} && !targetSizePatchUpdate {{- end}} {
d.Partial(true)
targetSize := int64(d.Get("target_size").(int))
op, err := config.NewComputeClient(userAgent).RegionInstanceGroupManagers.Resize(
@@ -1145,6 +1210,42 @@ func flattenRegionUpdatePolicy(updatePolicy *compute.InstanceGroupManagerUpdateP
return results
}
+{{- if ne $.TargetVersionName "ga" }}
+func expandInstanceFlexibilityPolicy(d *schema.ResourceData) *compute.InstanceGroupManagerInstanceFlexibilityPolicy {
+ instanceFlexibilityPolicy := &compute.InstanceGroupManagerInstanceFlexibilityPolicy{}
+ oldFlexibilityPolicy, newFlexibilityPolicy := d.GetChange("instance_flexibility_policy")
+ for _, flexibilityPolicy := range newFlexibilityPolicy.([]any) {
+ flexibilityPolicyData := flexibilityPolicy.(map[string]any)
+ instanceFlexibilityPolicy.InstanceSelections = expandInstanceSelections(flexibilityPolicyData["instance_selections"].(*schema.Set).List())
+ }
+ for _, flexibilityPolicy := range oldFlexibilityPolicy.([]any) {
+ flexibilityPolicyData := flexibilityPolicy.(map[string]any)
+ for _, instanceSelection := range flexibilityPolicyData["instance_selections"].(*schema.Set).List() {
+ instanceSelectionData := instanceSelection.(map[string] any)
+ name := instanceSelectionData["name"].(string)
+ if _, exist := instanceFlexibilityPolicy.InstanceSelections[name]; !exist {
+ instanceFlexibilityPolicy.NullFields = append(instanceFlexibilityPolicy.NullFields, "InstanceSelections." + name)
+ }
+ }
+ instanceFlexibilityPolicy.ForceSendFields = append(instanceFlexibilityPolicy.ForceSendFields, "InstanceSelections")
+ }
+ return instanceFlexibilityPolicy
+}
+
+func expandInstanceSelections(instanceSelections []any) map[string]compute.InstanceGroupManagerInstanceFlexibilityPolicyInstanceSelection {
+ instanceSelectionsMap := make(map[string]compute.InstanceGroupManagerInstanceFlexibilityPolicyInstanceSelection)
+ for _, instanceSelectionRaw := range instanceSelections {
+ instanceSelectionData := instanceSelectionRaw.(map[string]any)
+ instanceSelection := compute.InstanceGroupManagerInstanceFlexibilityPolicyInstanceSelection{
+ Rank: int64(instanceSelectionData["rank"].(int)),
+ MachineTypes: tpgresource.ConvertStringSet(instanceSelectionData["machine_types"].(*schema.Set)),
+ }
+ instanceSelectionsMap[instanceSelectionData["name"].(string)] = instanceSelection
+ }
+ return instanceSelectionsMap
+}
+{{- end }}
+
func expandDistributionPolicyForUpdate(d *schema.ResourceData) *compute.DistributionPolicy {
dpts := d.Get("distribution_policy_target_shape").(string)
if dpts == "" {
@@ -1180,6 +1281,30 @@ func expandDistributionPolicyForCreate(d *schema.ResourceData) *compute.Distribu
return distributionPolicy
}
+{{- if ne $.TargetVersionName "ga" }}
+func flattenInstanceFlexibilityPolicy(instanceFlexibilityPolicy *compute.InstanceGroupManagerInstanceFlexibilityPolicy) []map[string]any {
+ flattenedInstanceFlexibilityPolicy := []map[string]any{}
+ if instanceFlexibilityPolicy != nil {
+ instanceSelectionsMap := map[string]any{}
+ instanceSelectionsMap["instance_selections"] = flattenInstanceSelections(instanceFlexibilityPolicy.InstanceSelections)
+ flattenedInstanceFlexibilityPolicy = append(flattenedInstanceFlexibilityPolicy, instanceSelectionsMap)
+ }
+ return flattenedInstanceFlexibilityPolicy
+}
+
+func flattenInstanceSelections(instanceSelections map[string]compute.InstanceGroupManagerInstanceFlexibilityPolicyInstanceSelection) []map[string]any {
+ instanceSelectionsMap := make([]map[string]any, 0, len(instanceSelections))
+ for instanceSelectionName, instanceSelection := range instanceSelections {
+ instanceSelectionData := make(map[string]any)
+ instanceSelectionData["name"] = instanceSelectionName
+ instanceSelectionData["rank"] = instanceSelection.Rank
+ instanceSelectionData["machine_types"] = instanceSelection.MachineTypes
+ instanceSelectionsMap = append(instanceSelectionsMap, instanceSelectionData)
+ }
+ return instanceSelectionsMap
+}
+{{- end }}
+
func flattenDistributionPolicy(distributionPolicy *compute.DistributionPolicy) []string {
zones := make([]string, 0)
diff --git a/mmv1/third_party/terraform/services/compute/resource_compute_region_instance_group_manager_test.go.tmpl b/mmv1/third_party/terraform/services/compute/resource_compute_region_instance_group_manager_test.go.tmpl
index 2b7571e2c915..bdf636e5a69e 100644
--- a/mmv1/third_party/terraform/services/compute/resource_compute_region_instance_group_manager_test.go.tmpl
+++ b/mmv1/third_party/terraform/services/compute/resource_compute_region_instance_group_manager_test.go.tmpl
@@ -395,7 +395,7 @@ func TestAccRegionInstanceGroupManager_stateful(t *testing.T) {
}
-{{ if ne $.TargetVersionName `ga` -}}
+{{- if ne $.TargetVersionName "ga" }}
func TestAccRegionInstanceGroupManager_stoppedSuspendedTargetSize(t *testing.T) {
t.Parallel()
@@ -431,6 +431,50 @@ func TestAccRegionInstanceGroupManager_stoppedSuspendedTargetSize(t *testing.T)
}
{{- end }}
+{{- if ne $.TargetVersionName "ga" }}
+func TestAccRegionInstanceGroupManager_instanceFlexibilityPolicy(t *testing.T) {
+ t.Parallel()
+
+ template := fmt.Sprintf("tf-test-rigm-%s", acctest.RandString(t, 10))
+ igm := fmt.Sprintf("tf-test-rigm-%s", acctest.RandString(t, 10))
+ network := fmt.Sprintf("tf-test-rigm-%s", acctest.RandString(t, 10))
+
+ acctest.VcrTest(t, resource.TestCase{
+ PreCheck: func() { acctest.AccTestPreCheck(t) },
+ ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t),
+ CheckDestroy: testAccCheckRegionInstanceGroupManagerDestroyProducer(t),
+ Steps: []resource.TestStep{
+ {
+ Config: testAccRegionInstanceGroupManager_instanceFlexibilityPolicy(network, template, igm),
+ },
+ {
+ ResourceName: "google_compute_region_instance_group_manager.igm-basic",
+ ImportState: true,
+ ImportStateVerify: true,
+ ImportStateVerifyIgnore: []string{"status"},
+ },
+ {
+ Config: testAccRegionInstanceGroupManager_instanceFlexibilityPolicyUpdate(network, template, igm),
+ },
+ {
+ ResourceName: "google_compute_region_instance_group_manager.igm-basic",
+ ImportState: true,
+ ImportStateVerify: true,
+ ImportStateVerifyIgnore: []string{"status"},
+ },
+ {
+ Config: testAccRegionInstanceGroupManager_instanceFlexibilityPolicyRemove(network, template, igm),
+ },
+ {
+ ResourceName: "google_compute_region_instance_group_manager.igm-basic",
+ ImportState: true,
+ ImportStateVerify: true,
+ ImportStateVerifyIgnore: []string{"status"},
+ },
+ },
+ })
+}
+{{- end }}
func TestAccRegionInstanceGroupManager_APISideListRecordering(t *testing.T) {
t.Parallel()
@@ -450,7 +494,7 @@ func TestAccRegionInstanceGroupManager_APISideListRecordering(t *testing.T) {
})
}
-{{ if ne $.TargetVersionName `ga` -}}
+{{- if ne $.TargetVersionName "ga" }}
func TestAccRegionInstanceGroupManager_resourceManagerTags(t *testing.T) {
t.Parallel()
@@ -1814,7 +1858,7 @@ resource "google_compute_region_instance_group_manager" "igm-basic" {
`, context)
}
-{{ if ne $.TargetVersionName `ga` -}}
+{{- if ne $.TargetVersionName "ga" }}
func testAccRegionInstanceGroupManager_stoppedSuspendedTargetSize(network, template, igm string) string {
return fmt.Sprintf(`
data "google_compute_image" "my_image" {
@@ -1925,7 +1969,178 @@ resource "google_compute_region_instance_group_manager" "sr-igm" {
}
{{- end }}
-{{ if ne $.TargetVersionName `ga` -}}
+{{- if ne $.TargetVersionName "ga" }}
+func testAccRegionInstanceGroupManager_instanceFlexibilityPolicy(network, template, igm string) string {
+ return fmt.Sprintf(`
+data "google_compute_image" "my_image" {
+ family = "debian-11"
+ project = "debian-cloud"
+}
+
+resource "google_compute_network" "igm-basic" {
+ name = "%s"
+}
+
+resource "google_compute_instance_template" "igm-basic" {
+ name = "%s"
+ machine_type = "e2-medium"
+ disk {
+ source_image = data.google_compute_image.my_image.self_link
+ auto_delete = true
+ boot = true
+ device_name = "stateful-disk"
+ }
+ network_interface {
+ network = "default"
+ }
+}
+
+resource "google_compute_region_instance_group_manager" "igm-basic" {
+ description = "Terraform test instance group manager"
+ name = "%s"
+
+ version {
+ instance_template = google_compute_instance_template.igm-basic.self_link
+ name = "primary"
+ }
+
+ base_instance_name = "tf-test-igm-basic"
+ region = "us-central1"
+ target_size = 2
+ distribution_policy_target_shape = "ANY_SINGLE_ZONE"
+ update_policy {
+ instance_redistribution_type = "NONE"
+ type = "OPPORTUNISTIC"
+ minimal_action = "REPLACE"
+ max_surge_fixed = 0
+ max_unavailable_fixed = 6
+ }
+ instance_flexibility_policy {
+ instance_selections {
+ name = "instance_selection_name_1"
+ rank = 2
+ machine_types = ["n1-standard-16"]
+ }
+ instance_selections {
+ name = "instance_selection_name_2"
+ rank = 1
+ machine_types = ["n1-standard-1", "n4-standard-2"]
+ }
+ }
+}
+`, network, template, igm)
+}
+
+func testAccRegionInstanceGroupManager_instanceFlexibilityPolicyUpdate(network, template, igm string) string {
+ return fmt.Sprintf(`
+data "google_compute_image" "my_image" {
+ family = "debian-11"
+ project = "debian-cloud"
+}
+
+resource "google_compute_network" "igm-basic" {
+ name = "%s"
+}
+
+resource "google_compute_instance_template" "igm-basic" {
+ name = "%s"
+ machine_type = "e2-medium"
+ disk {
+ source_image = data.google_compute_image.my_image.self_link
+ auto_delete = true
+ boot = true
+ device_name = "stateful-disk"
+ }
+ network_interface {
+ network = "default"
+ }
+}
+
+resource "google_compute_region_instance_group_manager" "igm-basic" {
+ description = "Terraform test instance group manager"
+ name = "%s"
+
+ version {
+ instance_template = google_compute_instance_template.igm-basic.self_link
+ name = "primary"
+ }
+
+ base_instance_name = "tf-test-igm-basic"
+ region = "us-central1"
+ target_size = 0
+ distribution_policy_target_shape = "ANY_SINGLE_ZONE"
+ update_policy {
+ instance_redistribution_type = "NONE"
+ type = "OPPORTUNISTIC"
+ minimal_action = "REPLACE"
+ max_surge_fixed = 0
+ max_unavailable_fixed = 6
+ }
+ instance_flexibility_policy {
+ instance_selections {
+ name = "instance_selection_name_1"
+ machine_types = ["n1-standard-1"]
+ }
+ instance_selections {
+ name = "instance_selection_name_2_version_2"
+ machine_types = ["n1-standard-2", "n1-standard-4"]
+ }
+ }
+}
+`, network, template, igm)
+}
+
+func testAccRegionInstanceGroupManager_instanceFlexibilityPolicyRemove(network, template, igm string) string {
+ return fmt.Sprintf(`
+data "google_compute_image" "my_image" {
+ family = "debian-11"
+ project = "debian-cloud"
+}
+
+resource "google_compute_network" "igm-basic" {
+ name = "%s"
+}
+
+resource "google_compute_instance_template" "igm-basic" {
+ name = "%s"
+ machine_type = "e2-medium"
+ disk {
+ source_image = data.google_compute_image.my_image.self_link
+ auto_delete = true
+ boot = true
+ device_name = "stateful-disk"
+ }
+ network_interface {
+ network = "default"
+ }
+}
+
+resource "google_compute_region_instance_group_manager" "igm-basic" {
+ description = "Terraform test instance group manager"
+ name = "%s"
+
+ version {
+ instance_template = google_compute_instance_template.igm-basic.self_link
+ name = "primary"
+ }
+
+ base_instance_name = "tf-test-igm-basic"
+ region = "us-central1"
+ target_size = 0
+ distribution_policy_target_shape = "ANY_SINGLE_ZONE"
+ update_policy {
+ instance_redistribution_type = "NONE"
+ type = "OPPORTUNISTIC"
+ minimal_action = "REPLACE"
+ max_surge_fixed = 0
+ max_unavailable_fixed = 6
+ }
+}
+`, network, template, igm)
+}
+{{- end }}
+
+{{- if ne $.TargetVersionName "ga" }}
func testAccRegionInstanceGroupManager_resourceManagerTags(template_name, tag_name, igm_name, project_id string) string {
return fmt.Sprintf(`
data "google_compute_image" "my_image" {
diff --git a/mmv1/third_party/terraform/website/docs/r/compute_region_instance_group_manager.html.markdown b/mmv1/third_party/terraform/website/docs/r/compute_region_instance_group_manager.html.markdown
index cdc4e9963a1d..465bfb373898 100644
--- a/mmv1/third_party/terraform/website/docs/r/compute_region_instance_group_manager.html.markdown
+++ b/mmv1/third_party/terraform/website/docs/r/compute_region_instance_group_manager.html.markdown
@@ -205,6 +205,7 @@ group. You can specify one or more values. For more information, see the [offici
* `params` - (Optional [Beta](https://terraform.io/docs/providers/google/guides/provider_versions.html)) Input only additional params for instance group manager creation. Structure is [documented below](#nested_params). For more information, see [API](https://cloud.google.com/compute/docs/reference/rest/beta/instanceGroupManagers/insert).
+* `instance_flexibility_policy` - (Optional [Beta](https://terraform.io/docs/providers/google/guides/provider_versions.html)) The flexibility policy for managed instance group. Instance flexibility allows managed instance group to create VMs from multiple types of machines. Instance flexibility configuration on managed instance group overrides instance template configuration. Structure is [documented below](#nested_instance_flexibility_policy).
- - -
The `standby_policy` block supports:
@@ -262,7 +263,35 @@ instance_lifecycle_policy {
* `default_action_on_failure` - (Optional), Default behavior for all instance or health check failures. Valid options are: `REPAIR`, `DO_NOTHING`. If `DO_NOTHING` then instances will not be repaired. If `REPAIR` (default), then failed instances will be repaired.
- - -
+The `instance_flexibility_policy` block supports:
+```hcl
+instance_flexibility_policy {
+ instance_selections {
+ name = "instance_selection_name"
+ rank = 1
+ machine_types = ["n1-standard-16"]
+ }
+}
+```
+
+* `instance_selections` - (Optional), Named instance selections configuring properties that the group will use when creating new VMs. One can specify multiple instance selection to allow managed instance group to create VMs from multiple types of machines, based on preference and availability. Structure is [documented below](#nested_instance_selections).
+- - -
+The `instance_selections` block supports:
+
+```hcl
+instance_selections {
+ name = "instance_selection_name"
+ rank = 1
+ machine_types = ["n1-standard-1", "n1-standard-16"]
+}
+```
+
+* `name` - (Required), Name of the instance selection, e.g. instance_selection_with_n1_machines_types. Instance selection names must be unique within the flexibility policy.
+* `rank` - (Optional), Preference of this instance selection. Lower number means higher preference. Managed instance group will first try to create a VM based on the machine-type with lowest rank and fallback to next rank based on availability. Machine types and instance selections with the same rank have the same preference.
+* `machine_types` - (Required), A list of full machine-type names, e.g. "n1-standard-16".
+
+- - -
The `all_instances_config` block supports:
```hcl