diff --git a/CHANGELOG.md b/CHANGELOG.md index e37b13ad3a..8088cd7923 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,4 +1,10 @@ ## 1.20.1 (Unreleased) + +ENHANCEMENTS: + +* Data Source: `tencentcloud_as_scaling_groups` add optional argument `tags` and attribute `tags` for `scaling_group_list`. +* Resource: `tencentcloud_as_scaling_group` add optional argument `tags`. + ## 1.20.0 (September 24, 2019) FEATURES: diff --git a/examples/tencentcloud-as/main.tf b/examples/tencentcloud-as/main.tf index 988fa568a4..e2e39a54d5 100644 --- a/examples/tencentcloud-as/main.tf +++ b/examples/tencentcloud-as/main.tf @@ -48,6 +48,10 @@ resource "tencentcloud_as_scaling_group" "scaling_group" { desired_capacity = "${var.desired_capacity}" termination_policies = ["NEWEST_INSTANCE"] retry_policy = "INCREMENTAL_INTERVALS" + + tags = { + "test" = "test" + } } resource "tencentcloud_as_scaling_policy" "scaling_policy" { @@ -91,3 +95,7 @@ resource "tencentcloud_as_notification" "notification" { notification_types = ["SCALE_OUT_FAILED"] notification_user_group_ids = ["76955"] } + +data "tencentcloud_as_scaling_groups" "scaling_groups_tags" { + tags = "${tencentcloud_as_scaling_group.scaling_group.tags}" +} diff --git a/tencentcloud/data_source_tc_as_scaling_groups.go b/tencentcloud/data_source_tc_as_scaling_groups.go index 8adfedad70..caf38059e8 100644 --- a/tencentcloud/data_source_tc_as_scaling_groups.go +++ b/tencentcloud/data_source_tc_as_scaling_groups.go @@ -40,11 +40,18 @@ func dataSourceTencentCloudAsScalingGroups() *schema.Resource { Optional: true, Description: "A scaling group name used to query.", }, + "tags": { + Type: schema.TypeMap, + Optional: true, + Description: "Tags used to query.", + }, "result_output_file": { Type: schema.TypeString, Optional: true, Description: "Used to save results.", }, + + // computed "scaling_group_list": { Type: schema.TypeList, Computed: true, @@ -183,6 +190,11 @@ func dataSourceTencentCloudAsScalingGroups() *schema.Resource { Computed: true, Description: "The time when the AS group was created.", }, + "tags": { + Type: schema.TypeMap, + Computed: true, + Description: "Tags of the scaling group.", + }, }, }, }, @@ -213,30 +225,38 @@ func dataSourceTencentCloudAsScalingGroupRead(d *schema.ResourceData, meta inter scalingGroupName = v.(string) } - scalingGroups, err := asService.DescribeAutoScalingGroupByFilter(ctx, scalingGroupId, configurationId, scalingGroupName) + tags := getTags(d, "tags") + + scalingGroups, err := asService.DescribeAutoScalingGroupByFilter(ctx, scalingGroupId, configurationId, scalingGroupName, tags) if err != nil { return err } scalingGroupList := make([]map[string]interface{}, 0, len(scalingGroups)) for _, scalingGroup := range scalingGroups { + tags := make(map[string]string, len(scalingGroup.Tags)) + for _, tag := range scalingGroup.Tags { + tags[*tag.Key] = *tag.Value + } + mapping := map[string]interface{}{ - "scaling_group_id": *scalingGroup.AutoScalingGroupId, - "scaling_group_name": *scalingGroup.AutoScalingGroupName, - "configuration_id": *scalingGroup.LaunchConfigurationId, - "status": *scalingGroup.AutoScalingGroupStatus, - "instance_count": *scalingGroup.InstanceCount, - "max_size": *scalingGroup.MaxSize, - "min_size": *scalingGroup.MinSize, - "vpc_id": *scalingGroup.VpcId, + "scaling_group_id": scalingGroup.AutoScalingGroupId, + "scaling_group_name": scalingGroup.AutoScalingGroupName, + "configuration_id": scalingGroup.LaunchConfigurationId, + "status": scalingGroup.AutoScalingGroupStatus, + "instance_count": scalingGroup.InstanceCount, + "max_size": scalingGroup.MaxSize, + "min_size": scalingGroup.MinSize, + "vpc_id": scalingGroup.VpcId, "subnet_ids": flattenStringList(scalingGroup.SubnetIdSet), "zones": flattenStringList(scalingGroup.ZoneSet), - "default_cooldown": *scalingGroup.DefaultCooldown, - "desired_capacity": *scalingGroup.DesiredCapacity, + "default_cooldown": scalingGroup.DefaultCooldown, + "desired_capacity": scalingGroup.DesiredCapacity, "load_balancer_ids": flattenStringList(scalingGroup.LoadBalancerIdSet), "termination_policies": flattenStringList(scalingGroup.TerminationPolicySet), - "retry_policy": *scalingGroup.RetryPolicy, - "create_time": *scalingGroup.CreatedTime, + "retry_policy": scalingGroup.RetryPolicy, + "create_time": scalingGroup.CreatedTime, + "tags": tags, } if scalingGroup.ForwardLoadBalancerSet != nil && len(scalingGroup.ForwardLoadBalancerSet) > 0 { forwardLoadBalancers := make([]map[string]interface{}, 0, len(scalingGroup.ForwardLoadBalancerSet)) @@ -244,16 +264,16 @@ func dataSourceTencentCloudAsScalingGroupRead(d *schema.ResourceData, meta inter targetAttributes := make([]map[string]interface{}, 0, len(v.TargetAttributes)) for _, vv := range v.TargetAttributes { targetAttribute := map[string]interface{}{ - "port": *vv.Port, - "weight": *vv.Weight, + "port": vv.Port, + "weight": vv.Weight, } targetAttributes = append(targetAttributes, targetAttribute) } forwardLoadBalancer := map[string]interface{}{ - "load_balancer_id": *v.LoadBalancerId, - "listener_id": *v.ListenerId, + "load_balancer_id": v.LoadBalancerId, + "listener_id": v.ListenerId, "target_attributes": targetAttributes, - "location_id": *v.LocationId, + "location_id": v.LocationId, } forwardLoadBalancers = append(forwardLoadBalancers, forwardLoadBalancer) } diff --git a/tencentcloud/data_source_tc_as_scaling_groups_test.go b/tencentcloud/data_source_tc_as_scaling_groups_test.go index 16d7563b62..a67752ff85 100644 --- a/tencentcloud/data_source_tc_as_scaling_groups_test.go +++ b/tencentcloud/data_source_tc_as_scaling_groups_test.go @@ -1,6 +1,7 @@ package tencentcloud import ( + "regexp" "testing" "github.com/hashicorp/terraform/helper/resource" @@ -35,6 +36,17 @@ func TestAccTencentCloudAsScalingGroupsDataSource_basic(t *testing.T) { resource.TestCheckResourceAttr("data.tencentcloud_as_scaling_groups.scaling_groups_name", "scaling_group_list.0.subnet_ids.#", "1"), resource.TestCheckResourceAttrSet("data.tencentcloud_as_scaling_groups.scaling_groups_name", "scaling_group_list.0.status"), resource.TestCheckResourceAttrSet("data.tencentcloud_as_scaling_groups.scaling_groups_name", "scaling_group_list.0.create_time"), + + resource.TestMatchResourceAttr("data.tencentcloud_as_scaling_groups.scaling_groups_tags", "scaling_group_list.#", regexp.MustCompile(`^[1-9]\d*$`)), + resource.TestCheckResourceAttrSet("data.tencentcloud_as_scaling_groups.scaling_groups_tags", "scaling_group_list.0.scaling_group_name"), + resource.TestCheckResourceAttrSet("data.tencentcloud_as_scaling_groups.scaling_groups_tags", "scaling_group_list.0.configuration_id"), + resource.TestCheckResourceAttrSet("data.tencentcloud_as_scaling_groups.scaling_groups_tags", "scaling_group_list.0.max_size"), + resource.TestCheckResourceAttrSet("data.tencentcloud_as_scaling_groups.scaling_groups_tags", "scaling_group_list.0.min_size"), + resource.TestCheckResourceAttrSet("data.tencentcloud_as_scaling_groups.scaling_groups_tags", "scaling_group_list.0.vpc_id"), + resource.TestCheckResourceAttrSet("data.tencentcloud_as_scaling_groups.scaling_groups_tags", "scaling_group_list.0.subnet_ids.#"), + resource.TestCheckResourceAttrSet("data.tencentcloud_as_scaling_groups.scaling_groups_tags", "scaling_group_list.0.status"), + resource.TestCheckResourceAttrSet("data.tencentcloud_as_scaling_groups.scaling_groups_tags", "scaling_group_list.0.create_time"), + resource.TestCheckResourceAttr("data.tencentcloud_as_scaling_groups.scaling_groups_tags", "scaling_group_list.0.tags.test", "test"), ), }, }, @@ -72,7 +84,7 @@ func TestAccTencentCloudAsScalingGroupsDataSource_full(t *testing.T) { }) } -//todo +// todo func testAccAsScalingGroupsDataSource_basic() string { return ` resource "tencentcloud_vpc" "vpc" { @@ -100,6 +112,10 @@ resource "tencentcloud_as_scaling_group" "scaling_group" { min_size = 0 vpc_id = "${tencentcloud_vpc.vpc.id}" subnet_ids = ["${tencentcloud_subnet.subnet.id}"] + + tags = { + "test" = "test" + } } data "tencentcloud_as_scaling_groups" "scaling_groups" { @@ -109,6 +125,10 @@ data "tencentcloud_as_scaling_groups" "scaling_groups" { data "tencentcloud_as_scaling_groups" "scaling_groups_name" { scaling_group_name = "${tencentcloud_as_scaling_group.scaling_group.scaling_group_name}" } + +data "tencentcloud_as_scaling_groups" "scaling_groups_tags" { + tags = "${tencentcloud_as_scaling_group.scaling_group.tags}" +} ` } diff --git a/tencentcloud/resource_tc_as_scaling_group.go b/tencentcloud/resource_tc_as_scaling_group.go index 92487b5d0b..5a88f13602 100644 --- a/tencentcloud/resource_tc_as_scaling_group.go +++ b/tencentcloud/resource_tc_as_scaling_group.go @@ -34,7 +34,7 @@ Import AutoScaling Groups can be imported using the id, e.g. -```hcl +``` $ terraform import tencentcloud_as_scaling_group.scaling_group asg-n32ymck2 ``` */ @@ -46,10 +46,10 @@ import ( "log" "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" as "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/as/v20180419" - "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common/errors" + sdkErrors "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common/errors" + "github.com/terraform-providers/terraform-provider-tencentcloud/tencentcloud/ratelimit" ) func resourceTencentCloudAsScalingGroup() *schema.Resource { @@ -193,6 +193,11 @@ func resourceTencentCloudAsScalingGroup() *schema.Resource { ValidateFunc: validateAllowedStringValue([]string{SCALING_GROUP_RETRY_POLICY_IMMEDIATE_RETRY, SCALING_GROUP_RETRY_POLICY_INCREMENTAL_INTERVALS}), }, + "tags": { + Type: schema.TypeMap, + Optional: true, + Description: "Tags of a scaling group.", + }, // computed value "status": { @@ -295,19 +300,42 @@ func resourceTencentCloudAsScalingGroupCreate(d *schema.ResourceData, meta inter } } - response, err := meta.(*TencentCloudClient).apiV3Conn.UseAsClient().CreateAutoScalingGroup(request) - if err != nil { - log.Printf("[CRITAL]%s api[%s] fail, request body [%s], reason[%s]\n", - logId, request.GetAction(), request.ToJsonString(), err.Error()) - return err - } else { + if tags := getTags(d, "tags"); len(tags) > 0 { + for k, v := range tags { + request.Tags = append(request.Tags, &as.Tag{ + Key: stringToPointer(k), + Value: stringToPointer(v), + }) + } + } + + var id string + if err := resource.Retry(writeRetryTimeout, func() *resource.RetryError { + ratelimit.Check(request.GetAction()) + + response, err := meta.(*TencentCloudClient).apiV3Conn.UseAsClient().CreateAutoScalingGroup(request) + if err != nil { + log.Printf("[CRITAL]%s api[%s] fail, request body [%s], reason[%s]\n", + logId, request.GetAction(), request.ToJsonString(), err.Error()) + return retryError(err) + } + log.Printf("[DEBUG]%s api[%s] success, request body [%s], response body [%s]\n", logId, request.GetAction(), request.ToJsonString(), response.ToJsonString()) + + if response.Response.AutoScalingGroupId == nil { + err = fmt.Errorf("Auto scaling group id is nil") + return resource.NonRetryableError(err) + } + + id = *response.Response.AutoScalingGroupId + + return nil + }); err != nil { + return err } - if response.Response.AutoScalingGroupId == nil { - return fmt.Errorf("Auto scaling group id is nil") - } - d.SetId(*response.Response.AutoScalingGroupId) + + d.SetId(id) return resourceTencentCloudAsScalingGroupRead(d, meta) } @@ -322,27 +350,37 @@ func resourceTencentCloudAsScalingGroupRead(d *schema.ResourceData, meta interfa asService := AsService{ client: meta.(*TencentCloudClient).apiV3Conn, } - scalingGroup, err := asService.DescribeAutoScalingGroupById(ctx, scalingGroupId) - if err != nil { + + var ( + scalingGroup *as.AutoScalingGroup + err error + ) + if err := resource.Retry(readRetryTimeout, func() *resource.RetryError { + scalingGroup, err = asService.DescribeAutoScalingGroupById(ctx, scalingGroupId) + if err != nil { + return retryError(err) + } + return nil + }); err != nil { return err } - d.Set("scaling_group_name", *scalingGroup.AutoScalingGroupName) - d.Set("configuration_id", *scalingGroup.LaunchConfigurationId) - d.Set("status", *scalingGroup.AutoScalingGroupStatus) - d.Set("instance_count", *scalingGroup.InstanceCount) - d.Set("max_size", *scalingGroup.MaxSize) - d.Set("min_size", *scalingGroup.MinSize) - d.Set("vpc_id", *scalingGroup.VpcId) - d.Set("project_id", *scalingGroup.ProjectId) + d.Set("scaling_group_name", scalingGroup.AutoScalingGroupName) + d.Set("configuration_id", scalingGroup.LaunchConfigurationId) + d.Set("status", scalingGroup.AutoScalingGroupStatus) + d.Set("instance_count", scalingGroup.InstanceCount) + d.Set("max_size", scalingGroup.MaxSize) + d.Set("min_size", scalingGroup.MinSize) + d.Set("vpc_id", scalingGroup.VpcId) + d.Set("project_id", scalingGroup.ProjectId) d.Set("subnet_ids", flattenStringList(scalingGroup.SubnetIdSet)) d.Set("zones", flattenStringList(scalingGroup.ZoneSet)) - d.Set("default_cooldown", *scalingGroup.DefaultCooldown) - d.Set("desired_capacity", *scalingGroup.DesiredCapacity) + d.Set("default_cooldown", scalingGroup.DefaultCooldown) + d.Set("desired_capacity", scalingGroup.DesiredCapacity) d.Set("load_balancer_ids", flattenStringList(scalingGroup.LoadBalancerIdSet)) d.Set("termination_policies", flattenStringList(scalingGroup.TerminationPolicySet)) - d.Set("retry_policy", *scalingGroup.RetryPolicy) - d.Set("create_time", *scalingGroup.CreatedTime) + d.Set("retry_policy", scalingGroup.RetryPolicy) + d.Set("create_time", scalingGroup.CreatedTime) if scalingGroup.ForwardLoadBalancerSet != nil && len(scalingGroup.ForwardLoadBalancerSet) > 0 { forwardLoadBalancers := make([]map[string]interface{}, 0, len(scalingGroup.ForwardLoadBalancerSet)) @@ -366,6 +404,12 @@ func resourceTencentCloudAsScalingGroupRead(d *schema.ResourceData, meta interfa d.Set("forward_balancer_ids", forwardLoadBalancers) } + tags := make(map[string]string, len(scalingGroup.Tags)) + for _, tag := range scalingGroup.Tags { + tags[*tag.Key] = *tag.Value + } + d.Set("tags", tags) + return nil } @@ -373,35 +417,54 @@ func resourceTencentCloudAsScalingGroupUpdate(d *schema.ResourceData, meta inter defer logElapsed("resource.tencentcloud_as_scaling_group.update")() logId := getLogId(contextNil) + ctx := context.WithValue(context.TODO(), "logId", logId) + + client := meta.(*TencentCloudClient).apiV3Conn + tagService := TagService{client: client} + region := client.Region request := as.NewModifyAutoScalingGroupRequest() scalingGroupId := d.Id() + + d.Partial(true) + + var updateAttrs []string + request.AutoScalingGroupId = &scalingGroupId if d.HasChange("scaling_group_name") { + updateAttrs = append(updateAttrs, "scaling_group_name") request.AutoScalingGroupName = stringToPointer(d.Get("scaling_group_name").(string)) } if d.HasChange("max_size") { + updateAttrs = append(updateAttrs, "max_size") request.MaxSize = intToPointer(d.Get("max_size").(int)) } if d.HasChange("min_size") { + updateAttrs = append(updateAttrs, "max_size") request.MinSize = intToPointer(d.Get("min_size").(int)) } if d.HasChange("vpc_id") { + updateAttrs = append(updateAttrs, "vpc_id") request.VpcId = stringToPointer(d.Get("vpc_id").(string)) } if d.HasChange("project_id") { + updateAttrs = append(updateAttrs, "project_id") request.ProjectId = intToPointer(d.Get("project_id").(int)) } if d.HasChange("default_cooldown") { + updateAttrs = append(updateAttrs, "default_cooldown") request.DefaultCooldown = intToPointer(d.Get("default_cooldown").(int)) } if d.HasChange("desired_capacity") { + updateAttrs = append(updateAttrs, "desired_capacity") request.DesiredCapacity = intToPointer(d.Get("desired_capacity").(int)) } if d.HasChange("retry_policy") { + updateAttrs = append(updateAttrs, "retry_policy") request.RetryPolicy = stringToPointer(d.Get("retry_policy").(string)) } if d.HasChange("subnet_ids") { + updateAttrs = append(updateAttrs, "subnet_ids") subnetIds := d.Get("subnet_ids").([]interface{}) request.SubnetIds = make([]*string, 0, len(subnetIds)) for i := range subnetIds { @@ -410,6 +473,7 @@ func resourceTencentCloudAsScalingGroupUpdate(d *schema.ResourceData, meta inter } } if d.HasChange("zones") { + updateAttrs = append(updateAttrs, "zones") zones := d.Get("zones").([]interface{}) request.Zones = make([]*string, 0, len(zones)) for i := range zones { @@ -418,6 +482,7 @@ func resourceTencentCloudAsScalingGroupUpdate(d *schema.ResourceData, meta inter } } if d.HasChange("termination_policies") { + updateAttrs = append(updateAttrs, "termination_policies") terminationPolicies := d.Get("termination_policies").([]interface{}) request.TerminationPolicies = make([]*string, 0, len(terminationPolicies)) for i := range terminationPolicies { @@ -426,20 +491,34 @@ func resourceTencentCloudAsScalingGroupUpdate(d *schema.ResourceData, meta inter } } - response, err := meta.(*TencentCloudClient).apiV3Conn.UseAsClient().ModifyAutoScalingGroup(request) - if err != nil { - log.Printf("[CRITAL]%s api[%s] fail, request body [%s], reason[%s]\n", - logId, request.GetAction(), request.ToJsonString(), err.Error()) + if err := resource.Retry(writeRetryTimeout, func() *resource.RetryError { + ratelimit.Check(request.GetAction()) + + response, err := client.UseAsClient().ModifyAutoScalingGroup(request) + if err != nil { + log.Printf("[CRITAL]%s api[%s] fail, request body [%s], reason[%s]\n", + logId, request.GetAction(), request.ToJsonString(), err.Error()) + return retryError(err) + } + + log.Printf("[DEBUG]%s api[%s] success, request body [%s], response body [%s]\n", + logId, request.GetAction(), request.ToJsonString(), response.ToJsonString()) + + return nil + }); err != nil { return err } - log.Printf("[DEBUG]%s api[%s] success, request body [%s], response body [%s]\n", - logId, request.GetAction(), request.ToJsonString(), response.ToJsonString()) - balancerChanged := false + for _, attr := range updateAttrs { + d.SetPartial(attr) + } + updateAttrs = updateAttrs[:0] + balancerRequest := as.NewModifyLoadBalancersRequest() balancerRequest.AutoScalingGroupId = &scalingGroupId if d.HasChange("load_balancer_ids") { - balancerChanged = true + updateAttrs = append(updateAttrs, "load_balancer_ids") + loadBalancerIds := d.Get("load_balancer_ids").([]interface{}) balancerRequest.LoadBalancerIds = make([]*string, 0, len(loadBalancerIds)) for i := range loadBalancerIds { @@ -449,7 +528,8 @@ func resourceTencentCloudAsScalingGroupUpdate(d *schema.ResourceData, meta inter } if d.HasChange("forward_balancer_ids") { - balancerChanged = true + updateAttrs = append(updateAttrs, "forward_balancer_ids") + forwardBalancers := d.Get("forward_balancer_ids").([]interface{}) balancerRequest.ForwardLoadBalancers = make([]*as.ForwardLoadBalancer, 0, len(forwardBalancers)) for _, v := range forwardBalancers { @@ -474,18 +554,45 @@ func resourceTencentCloudAsScalingGroupUpdate(d *schema.ResourceData, meta inter } } - if balancerChanged { - balancerResponse, err := meta.(*TencentCloudClient).apiV3Conn.UseAsClient().ModifyLoadBalancers(balancerRequest) - if err != nil { - log.Printf("[CRITAL]%s api[%s] fail, request body [%s], reason[%s]\n", - logId, balancerRequest.GetAction(), balancerRequest.ToJsonString(), err.Error()) + if len(updateAttrs) > 0 { + if err := resource.Retry(writeRetryTimeout, func() *resource.RetryError { + ratelimit.Check(balancerRequest.GetAction()) + + balancerResponse, err := client.UseAsClient().ModifyLoadBalancers(balancerRequest) + if err != nil { + log.Printf("[CRITAL]%s api[%s] fail, request body [%s], reason[%s]\n", + logId, balancerRequest.GetAction(), balancerRequest.ToJsonString(), err.Error()) + return retryError(err) + } + + log.Printf("[DEBUG]%s api[%s] success, request body [%s], response body [%s]\n", + logId, balancerRequest.GetAction(), balancerRequest.ToJsonString(), balancerResponse.ToJsonString()) + + return nil + }); err != nil { return err } - log.Printf("[DEBUG]%s api[%s] success, request body [%s], response body [%s]\n", - logId, balancerRequest.GetAction(), balancerRequest.ToJsonString(), balancerResponse.ToJsonString()) + + for _, attr := range updateAttrs { + d.SetPartial(attr) + } } - return nil + if d.HasChange("tags") { + oldTags, newTags := d.GetChange("tags") + replaceTags, deleteTags := diffTags(oldTags.(map[string]interface{}), newTags.(map[string]interface{})) + + resourceName := BuildTagResourceName("as", "auto-scaling-group", region, scalingGroupId) + if err := tagService.ModifyTags(ctx, resourceName, replaceTags, deleteTags); err != nil { + return err + } + + d.SetPartial("tags") + } + + d.Partial(false) + + return resourceTencentCloudAsScalingGroupRead(d, meta) } func resourceTencentCloudAsScalingGroupDelete(d *schema.ResourceData, meta interface{}) error { @@ -506,15 +613,16 @@ func resourceTencentCloudAsScalingGroupDelete(d *schema.ResourceData, meta inter return err } if *scalingGroup.InstanceCount > 0 || *scalingGroup.DesiredCapacity > 0 { - err := asService.ClearScalingGroupInstance(ctx, scalingGroupId) - if err != nil { + if err := resource.Retry(writeRetryTimeout, func() *resource.RetryError { + return retryError(asService.ClearScalingGroupInstance(ctx, scalingGroupId)) + }); err != nil { return err } } err = resource.Retry(d.Timeout(schema.TimeoutDelete), func() *resource.RetryError { if errRet := asService.DeleteScalingGroup(ctx, scalingGroupId); errRet != nil { - if sdkErr, ok := errRet.(*errors.TencentCloudSDKError); ok { + if sdkErr, ok := errRet.(*sdkErrors.TencentCloudSDKError); ok { if sdkErr.Code == AsScalingGroupNotFound { return nil } else if sdkErr.Code == AsScalingGroupInProgress || sdkErr.Code == AsScalingGroupInstanceInGroup { diff --git a/tencentcloud/resource_tc_as_scaling_group_test.go b/tencentcloud/resource_tc_as_scaling_group_test.go index 5b004a99c4..e0001ebdca 100644 --- a/tencentcloud/resource_tc_as_scaling_group_test.go +++ b/tencentcloud/resource_tc_as_scaling_group_test.go @@ -31,7 +31,7 @@ func testSweepAsScalingGroups(region string) error { asService := AsService{ client: client.apiV3Conn, } - scalingGroups, err := asService.DescribeAutoScalingGroupByFilter(ctx, "", "", "") + scalingGroups, err := asService.DescribeAutoScalingGroupByFilter(ctx, "", "", "", nil) if err != nil { return fmt.Errorf("list scaling group error: %s", err.Error()) } @@ -104,6 +104,7 @@ func TestAccTencentCloudAsScalingGroup_full(t *testing.T) { resource.TestCheckResourceAttrSet("tencentcloud_as_scaling_group.scaling_group", "status"), resource.TestCheckResourceAttrSet("tencentcloud_as_scaling_group.scaling_group", "instance_count"), resource.TestCheckResourceAttrSet("tencentcloud_as_scaling_group.scaling_group", "create_time"), + resource.TestCheckResourceAttr("tencentcloud_as_scaling_group.scaling_group", "tags.test", "test"), ), }, { @@ -117,6 +118,8 @@ func TestAccTencentCloudAsScalingGroup_full(t *testing.T) { resource.TestCheckResourceAttr("tencentcloud_as_scaling_group.scaling_group", "termination_policies.#", "1"), resource.TestCheckResourceAttr("tencentcloud_as_scaling_group.scaling_group", "termination_policies.0", "OLDEST_INSTANCE"), resource.TestCheckResourceAttr("tencentcloud_as_scaling_group.scaling_group", "retry_policy", "IMMEDIATE_RETRY"), + resource.TestCheckNoResourceAttr("tencentcloud_as_scaling_group.scaling_group", "tags.test"), + resource.TestCheckResourceAttr("tencentcloud_as_scaling_group.scaling_group", "tags.abc", "abc"), ), }, }, @@ -227,6 +230,10 @@ resource "tencentcloud_as_scaling_group" "scaling_group" { desired_capacity = 1 termination_policies = ["NEWEST_INSTANCE"] retry_policy = "INCREMENTAL_INTERVALS" + + tags = { + "test" = "test" + } } ` } @@ -263,6 +270,10 @@ resource "tencentcloud_as_scaling_group" "scaling_group" { desired_capacity = 0 termination_policies = ["OLDEST_INSTANCE"] retry_policy = "IMMEDIATE_RETRY" + + tags = { + "abc" = "abc" + } } ` } diff --git a/tencentcloud/service_tencentcloud_as.go b/tencentcloud/service_tencentcloud_as.go index 0958a6ddf7..67443c5050 100644 --- a/tencentcloud/service_tencentcloud_as.go +++ b/tencentcloud/service_tencentcloud_as.go @@ -3,13 +3,13 @@ package tencentcloud import ( "context" "fmt" - "github.com/terraform-providers/terraform-provider-tencentcloud/tencentcloud/ratelimit" "log" "time" "github.com/hashicorp/terraform/helper/resource" as "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/as/v20180419" "github.com/terraform-providers/terraform-provider-tencentcloud/tencentcloud/connectivity" + "github.com/terraform-providers/terraform-provider-tencentcloud/tencentcloud/ratelimit" ) type AsService struct { @@ -129,7 +129,11 @@ func (me *AsService) DescribeAutoScalingGroupById(ctx context.Context, scalingGr return } -func (me *AsService) DescribeAutoScalingGroupByFilter(ctx context.Context, scalingGroupId, configurationId, scalingGroupName string) (scalingGroups []*as.AutoScalingGroup, errRet error) { +func (me *AsService) DescribeAutoScalingGroupByFilter( + ctx context.Context, + scalingGroupId, configurationId, scalingGroupName string, + tags map[string]string, +) (scalingGroups []*as.AutoScalingGroup, errRet error) { logId := getLogId(ctx) request := as.NewDescribeAutoScalingGroupsRequest() request.Filters = make([]*as.Filter, 0) @@ -154,6 +158,12 @@ func (me *AsService) DescribeAutoScalingGroupByFilter(ctx context.Context, scali } request.Filters = append(request.Filters, filter) } + for k, v := range tags { + request.Filters = append(request.Filters, &as.Filter{ + Name: stringToPointer("tag:" + k), + Values: []*string{stringToPointer(v)}, + }) + } offset := 0 pageSize := 100 diff --git a/website/docs/d/as_scaling_groups.html.markdown b/website/docs/d/as_scaling_groups.html.markdown index a7db92bfc3..786c88560a 100644 --- a/website/docs/d/as_scaling_groups.html.markdown +++ b/website/docs/d/as_scaling_groups.html.markdown @@ -28,6 +28,7 @@ The following arguments are supported: * `result_output_file` - (Optional) Used to save results. * `scaling_group_id` - (Optional) A specified scaling group ID used to query. * `scaling_group_name` - (Optional) A scaling group name used to query. +* `tags` - (Optional) Tags used to query. ## Attributes Reference @@ -55,6 +56,7 @@ In addition to all arguments above, the following attributes are exported: * `scaling_group_name` - Auto scaling group name. * `status` - Current status of a scaling group. * `subnet_ids` - A list of subnet IDs. + * `tags` - Tags of the scaling group. * `termination_policies` - A policy used to select a CVM instance to be terminated from the scaling group. * `vpc_id` - ID of the vpc with which the instance is associated. * `zones` - A list of available zones. diff --git a/website/docs/r/as_scaling_group.html.markdown b/website/docs/r/as_scaling_group.html.markdown index d7a1a8c2b6..76e53ab5fa 100644 --- a/website/docs/r/as_scaling_group.html.markdown +++ b/website/docs/r/as_scaling_group.html.markdown @@ -55,6 +55,7 @@ The following arguments are supported: * `project_id` - (Optional) Specifys to which project the scaling group belongs. * `retry_policy` - (Optional) Available values for retry policies include IMMEDIATE_RETRY and INCREMENTAL_INTERVALS. * `subnet_ids` - (Optional) ID list of subnet, and for VPC it is required. +* `tags` - (Optional) Tags of a scaling group. * `termination_policies` - (Optional) Available values for termination policies include OLDEST_INSTANCE and NEWEST_INSTANCE. * `zones` - (Optional) List of available zones, for Basic network it is required. @@ -82,7 +83,7 @@ In addition to all arguments above, the following attributes are exported: AutoScaling Groups can be imported using the id, e.g. -```hcl +``` $ terraform import tencentcloud_as_scaling_group.scaling_group asg-n32ymck2 ```