Skip to content

feat(tke): [120076624] tencentcloud_kubernetes_scale_worker Fix import function #2891

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 3 commits into from
Oct 18, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions .changelog/2891.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
```release-note:enhancement
resource/tencentcloud_kubernetes_scale_worker: Fix import function
```
Original file line number Diff line number Diff line change
Expand Up @@ -108,5 +108,5 @@ Import
tke scale worker can be imported, e.g.

```
$ terraform import tencentcloud_kubernetes_scale_worker.example cls-mij6c2pq#ins-n6esjkdi,ins-9h3rdxt8,ins-qretqeas
$ terraform import tencentcloud_kubernetes_scale_worker.example cls-mij6c2pq#ins-n6esjkdi
```
Original file line number Diff line number Diff line change
Expand Up @@ -22,20 +22,6 @@ import (

var importFlag1 = false

var GlobalClusterId string
var CreateClusterInstancesVpcId string
var CreateClusterInstancesProjectId int64
var WorkersInstanceIds []*string
var WorkersNewWorkerInstancesList []map[string]interface{}
var WorkersLabelsMap map[string]string

func init() {
// need to support append by multiple calls when the paging occurred
WorkersNewWorkerInstancesList = make([]map[string]interface{}, 0)
WorkersLabelsMap = make(map[string]string)
WorkersInstanceIds = make([]*string, 0)
}

func customScaleWorkerResourceImporter(ctx context.Context, d *schema.ResourceData, m interface{}) ([]*schema.ResourceData, error) {
importFlag1 = true
err := resourceTencentCloudKubernetesScaleWorkerRead(d, m)
Expand All @@ -48,6 +34,7 @@ func customScaleWorkerResourceImporter(ctx context.Context, d *schema.ResourceDa

func resourceTencentCloudKubernetesScaleWorkerReadPostRequest1(ctx context.Context, req *cvm.DescribeInstancesRequest, resp *cvm.DescribeInstancesResponse) error {
d := tccommon.ResourceDataFromContext(ctx)
ctxData := tccommon.DataFromContext(ctx)

instances := make([]*cvm.Instance, 0)
instances = append(instances, resp.Response.InstanceSet...)
Expand Down Expand Up @@ -117,85 +104,46 @@ func resourceTencentCloudKubernetesScaleWorkerReadPostRequest1(ctx context.Conte
_ = d.Set("worker_config", instanceList)
}

clusterId := ctxData.Get("clusterId").(string)
newWorkerInstancesList := ctxData.Get("newWorkerInstancesList").([]map[string]interface{})
labelsMap := ctxData.Get("labelsMap").(map[string]string)
// The machines I generated was deleted by others.
if len(WorkersNewWorkerInstancesList) == 0 {
if len(newWorkerInstancesList) == 0 {
d.SetId("")
return nil
}

_ = d.Set("cluster_id", GlobalClusterId)
_ = d.Set("labels", WorkersLabelsMap)
_ = d.Set("worker_instances_list", WorkersNewWorkerInstancesList)
_ = d.Set("cluster_id", clusterId)
_ = d.Set("labels", labelsMap)
_ = d.Set("worker_instances_list", newWorkerInstancesList)

return nil
}
func clusterInstanceParamHandle(ctx context.Context, req *tke.DescribeClusterInstancesRequest, resp *tke.DescribeClusterInstancesResponse) error {
func clusterInstanceParamHandle(ctx context.Context, workers []InstanceInfo) error {
d := tccommon.ResourceDataFromContext(ctx)
var has = map[string]bool{}

workerInstancesList := d.Get("worker_instances_list").([]interface{})
instanceMap := make(map[string]bool)
for _, v := range workerInstancesList {
infoMap, ok := v.(map[string]interface{})
if !ok || infoMap["instance_id"] == nil {
return fmt.Errorf("worker_instances_list is broken.")
}

instanceId, ok := infoMap["instance_id"].(string)
if !ok || instanceId == "" {
return fmt.Errorf("worker_instances_list.instance_id is broken.")
}

if instanceMap[instanceId] {
log.Printf("[WARN]The same instance id exists in the list")
}

instanceMap[instanceId] = true
}
workers := make([]InstanceInfo, 0, 100)
for _, item := range resp.Response.InstanceSet {
if has[*item.InstanceId] {
return fmt.Errorf("get repeated instance_id[%s] when doing DescribeClusterInstances", *item.InstanceId)
}
has[*item.InstanceId] = true
instanceInfo := InstanceInfo{
InstanceId: *item.InstanceId,
InstanceRole: *item.InstanceRole,
InstanceState: *item.InstanceState,
FailedReason: *item.FailedReason,
InstanceAdvancedSettings: item.InstanceAdvancedSettings,
}
if item.CreatedTime != nil {
instanceInfo.CreatedTime = *item.CreatedTime
}
if item.NodePoolId != nil {
instanceInfo.NodePoolId = *item.NodePoolId
}
if item.LanIP != nil {
instanceInfo.LanIp = *item.LanIP
}
if instanceInfo.InstanceRole == TKE_ROLE_WORKER {
workers = append(workers, instanceInfo)
}
}
ctxData := tccommon.DataFromContext(ctx)

newWorkerInstancesList := make([]map[string]interface{}, 0, len(workers))
labelsMap := make(map[string]string)
instanceIds := make([]*string, 0)
instanceMap := ctxData.Get("instanceMap").(map[string]bool)
for sub, cvmInfo := range workers {
if _, ok := instanceMap[cvmInfo.InstanceId]; !ok {
continue
}
WorkersInstanceIds = append(WorkersInstanceIds, &workers[sub].InstanceId)
instanceIds = append(instanceIds, &workers[sub].InstanceId)
tempMap := make(map[string]interface{})
tempMap["instance_id"] = cvmInfo.InstanceId
tempMap["instance_role"] = cvmInfo.InstanceRole
tempMap["instance_state"] = cvmInfo.InstanceState
tempMap["failed_reason"] = cvmInfo.FailedReason
tempMap["lan_ip"] = cvmInfo.LanIp

WorkersNewWorkerInstancesList = append(WorkersNewWorkerInstancesList, tempMap)
newWorkerInstancesList = append(newWorkerInstancesList, tempMap)
if cvmInfo.InstanceAdvancedSettings != nil {
if cvmInfo.InstanceAdvancedSettings.Labels != nil {
for _, v := range cvmInfo.InstanceAdvancedSettings.Labels {
WorkersLabelsMap[helper.PString(v.Name)] = helper.PString(v.Value)
labelsMap[helper.PString(v.Name)] = helper.PString(v.Value)
}
}

Expand Down Expand Up @@ -278,6 +226,10 @@ func clusterInstanceParamHandle(ctx context.Context, req *tke.DescribeClusterIns
}
}
}

ctxData.Set("newWorkerInstancesList", newWorkerInstancesList)
ctxData.Set("labelsMap", labelsMap)
ctxData.Set("instanceIds", instanceIds)
return nil
}

Expand All @@ -290,12 +242,14 @@ func resourceTencentCloudKubernetesScaleWorkerDeletePostRequest0(ctx context.Con

func resourceTencentCloudKubernetesScaleWorkerReadPostFillRequest0(ctx context.Context, req *tke.DescribeClustersRequest) error {
d := tccommon.ResourceDataFromContext(ctx)
ctxData := tccommon.DataFromContext(ctx)
items := strings.Split(d.Id(), tccommon.FILED_SP)

instanceMap := make(map[string]bool)
oldWorkerInstancesList := d.Get("worker_instances_list").([]interface{})
clusterId := ""
if importFlag1 {
GlobalClusterId = items[0]
clusterId = items[0]
if len(items[1:]) >= 2 {
return fmt.Errorf("only one additional configuration of virtual machines is now supported now, " +
"so should be 1")
Expand All @@ -305,13 +259,15 @@ func resourceTencentCloudKubernetesScaleWorkerReadPostFillRequest0(ctx context.C
}
oldWorkerInstancesList = append(oldWorkerInstancesList, infoMap)
} else {
GlobalClusterId = d.Get("cluster_id").(string)
clusterId = d.Get("cluster_id").(string)
}

if GlobalClusterId == "" {
if clusterId == "" {
return fmt.Errorf("tke.`cluster_id` is empty.")
}

ctxData.Set("clusterId", clusterId)

for _, v := range oldWorkerInstancesList {
infoMap, ok := v.(map[string]interface{})
if !ok || infoMap["instance_id"] == nil {
Expand All @@ -327,6 +283,8 @@ func resourceTencentCloudKubernetesScaleWorkerReadPostFillRequest0(ctx context.C
instanceMap[instanceId] = true
}

ctxData.Set("instanceMap", instanceMap)

return nil
}

Expand All @@ -338,7 +296,9 @@ func resourceTencentCloudKubernetesScaleWorkerReadPostRequest0(ctx context.Conte
}

func resourceTencentCloudKubernetesScaleWorkerReadPostFillRequest2(ctx context.Context, req *cvm.DescribeInstancesRequest) error {
req.InstanceIds = WorkersInstanceIds
ctxData := tccommon.DataFromContext(ctx)
instanceIds := ctxData.Get("instanceIds").([]*string)
req.InstanceIds = instanceIds
return nil
}

Expand Down Expand Up @@ -654,11 +614,6 @@ func resourceTencentCloudKubernetesScaleWorkerReadPostFillRequest1(ctx context.C
return err
}
log.Printf("[DEBUG]%s api[%s] success, request body [%s], response body [%s]\n", logId, request.GetAction(), request.ToJsonString(), response.ToJsonString())

if err := clusterInstanceParamHandle(ctx, request, response); err != nil {
return err
}

if response == nil || len(response.Response.InstanceSet) < 1 {
break
}
Expand All @@ -675,11 +630,45 @@ func resourceTencentCloudKubernetesScaleWorkerReadPostFillRequest1(ctx context.C
log.Printf("[WARN]%s resource `kubernetes_scale_worker` [%s] not found, please check if it has been deleted.\n", logId, d.Id())
return nil
}

has := map[string]bool{}
workers := make([]InstanceInfo, 0, len(instanceSet))
for _, item := range instanceSet {
if has[*item.InstanceId] {
return fmt.Errorf("get repeated instance_id[%s] when doing DescribeClusterInstances", *item.InstanceId)
}
has[*item.InstanceId] = true
instanceInfo := InstanceInfo{
InstanceId: *item.InstanceId,
InstanceRole: *item.InstanceRole,
InstanceState: *item.InstanceState,
FailedReason: *item.FailedReason,
InstanceAdvancedSettings: item.InstanceAdvancedSettings,
}
if item.CreatedTime != nil {
instanceInfo.CreatedTime = *item.CreatedTime
}
if item.NodePoolId != nil {
instanceInfo.NodePoolId = *item.NodePoolId
}
if item.LanIP != nil {
instanceInfo.LanIp = *item.LanIP
}
if instanceInfo.InstanceRole == TKE_ROLE_WORKER {
workers = append(workers, instanceInfo)
}
}

if err := clusterInstanceParamHandle(ctx, workers); err != nil {
return err
}

return nil
}

func resourceTencentCloudKubernetesScaleWorkerReadPreRequest1(ctx context.Context, req *cvm.DescribeInstancesRequest) error {
req.InstanceIds = WorkersInstanceIds

ctxData := tccommon.DataFromContext(ctx)
instanceIds := ctxData.Get("instanceIds").([]*string)
req.InstanceIds = instanceIds
return nil
}
2 changes: 1 addition & 1 deletion website/docs/r/kubernetes_scale_worker.html.markdown
Original file line number Diff line number Diff line change
Expand Up @@ -215,6 +215,6 @@ In addition to all arguments above, the following attributes are exported:
tke scale worker can be imported, e.g.

```
$ terraform import tencentcloud_kubernetes_scale_worker.example cls-mij6c2pq#ins-n6esjkdi,ins-9h3rdxt8,ins-qretqeas
$ terraform import tencentcloud_kubernetes_scale_worker.example cls-mij6c2pq#ins-n6esjkdi
```

Loading