Skip to content

Commit 03a9643

Browse files
authored
feat(tke): [120076624] tencentcloud_kubernetes_scale_worker Fix import function (#2891)
* add * add * add
1 parent 6a7d059 commit 03a9643

File tree

4 files changed

+74
-82
lines changed

4 files changed

+74
-82
lines changed

.changelog/2891.txt

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,3 @@
1+
```release-note:enhancement
2+
resource/tencentcloud_kubernetes_scale_worker: Fix import function
3+
```

tencentcloud/services/tke/resource_tc_kubernetes_scale_worker.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -112,5 +112,5 @@ Import
112112
tke scale worker can be imported, e.g.
113113

114114
```
115-
$ terraform import tencentcloud_kubernetes_scale_worker.example cls-mij6c2pq#ins-n6esjkdi,ins-9h3rdxt8,ins-qretqeas
115+
$ terraform import tencentcloud_kubernetes_scale_worker.example cls-mij6c2pq#ins-n6esjkdi
116116
```

tencentcloud/services/tke/resource_tc_kubernetes_scale_worker_extension.go

Lines changed: 69 additions & 80 deletions
Original file line numberDiff line numberDiff line change
@@ -22,20 +22,6 @@ import (
2222

2323
var importFlag1 = false
2424

25-
var GlobalClusterId string
26-
var CreateClusterInstancesVpcId string
27-
var CreateClusterInstancesProjectId int64
28-
var WorkersInstanceIds []*string
29-
var WorkersNewWorkerInstancesList []map[string]interface{}
30-
var WorkersLabelsMap map[string]string
31-
32-
func init() {
33-
// need to support append by multiple calls when the paging occurred
34-
WorkersNewWorkerInstancesList = make([]map[string]interface{}, 0)
35-
WorkersLabelsMap = make(map[string]string)
36-
WorkersInstanceIds = make([]*string, 0)
37-
}
38-
3925
func customScaleWorkerResourceImporter(ctx context.Context, d *schema.ResourceData, m interface{}) ([]*schema.ResourceData, error) {
4026
importFlag1 = true
4127
err := resourceTencentCloudKubernetesScaleWorkerRead(d, m)
@@ -48,6 +34,7 @@ func customScaleWorkerResourceImporter(ctx context.Context, d *schema.ResourceDa
4834

4935
func resourceTencentCloudKubernetesScaleWorkerReadPostRequest1(ctx context.Context, req *cvm.DescribeInstancesRequest, resp *cvm.DescribeInstancesResponse) error {
5036
d := tccommon.ResourceDataFromContext(ctx)
37+
ctxData := tccommon.DataFromContext(ctx)
5138

5239
instances := make([]*cvm.Instance, 0)
5340
instances = append(instances, resp.Response.InstanceSet...)
@@ -117,85 +104,46 @@ func resourceTencentCloudKubernetesScaleWorkerReadPostRequest1(ctx context.Conte
117104
_ = d.Set("worker_config", instanceList)
118105
}
119106

107+
clusterId := ctxData.Get("clusterId").(string)
108+
newWorkerInstancesList := ctxData.Get("newWorkerInstancesList").([]map[string]interface{})
109+
labelsMap := ctxData.Get("labelsMap").(map[string]string)
120110
// The machines I generated was deleted by others.
121-
if len(WorkersNewWorkerInstancesList) == 0 {
111+
if len(newWorkerInstancesList) == 0 {
122112
d.SetId("")
123113
return nil
124114
}
125115

126-
_ = d.Set("cluster_id", GlobalClusterId)
127-
_ = d.Set("labels", WorkersLabelsMap)
128-
_ = d.Set("worker_instances_list", WorkersNewWorkerInstancesList)
116+
_ = d.Set("cluster_id", clusterId)
117+
_ = d.Set("labels", labelsMap)
118+
_ = d.Set("worker_instances_list", newWorkerInstancesList)
129119

130120
return nil
131121
}
132-
func clusterInstanceParamHandle(ctx context.Context, req *tke.DescribeClusterInstancesRequest, resp *tke.DescribeClusterInstancesResponse) error {
122+
func clusterInstanceParamHandle(ctx context.Context, workers []InstanceInfo) error {
133123
d := tccommon.ResourceDataFromContext(ctx)
134-
var has = map[string]bool{}
135-
136-
workerInstancesList := d.Get("worker_instances_list").([]interface{})
137-
instanceMap := make(map[string]bool)
138-
for _, v := range workerInstancesList {
139-
infoMap, ok := v.(map[string]interface{})
140-
if !ok || infoMap["instance_id"] == nil {
141-
return fmt.Errorf("worker_instances_list is broken.")
142-
}
143-
144-
instanceId, ok := infoMap["instance_id"].(string)
145-
if !ok || instanceId == "" {
146-
return fmt.Errorf("worker_instances_list.instance_id is broken.")
147-
}
148-
149-
if instanceMap[instanceId] {
150-
log.Printf("[WARN]The same instance id exists in the list")
151-
}
152-
153-
instanceMap[instanceId] = true
154-
}
155-
workers := make([]InstanceInfo, 0, 100)
156-
for _, item := range resp.Response.InstanceSet {
157-
if has[*item.InstanceId] {
158-
return fmt.Errorf("get repeated instance_id[%s] when doing DescribeClusterInstances", *item.InstanceId)
159-
}
160-
has[*item.InstanceId] = true
161-
instanceInfo := InstanceInfo{
162-
InstanceId: *item.InstanceId,
163-
InstanceRole: *item.InstanceRole,
164-
InstanceState: *item.InstanceState,
165-
FailedReason: *item.FailedReason,
166-
InstanceAdvancedSettings: item.InstanceAdvancedSettings,
167-
}
168-
if item.CreatedTime != nil {
169-
instanceInfo.CreatedTime = *item.CreatedTime
170-
}
171-
if item.NodePoolId != nil {
172-
instanceInfo.NodePoolId = *item.NodePoolId
173-
}
174-
if item.LanIP != nil {
175-
instanceInfo.LanIp = *item.LanIP
176-
}
177-
if instanceInfo.InstanceRole == TKE_ROLE_WORKER {
178-
workers = append(workers, instanceInfo)
179-
}
180-
}
124+
ctxData := tccommon.DataFromContext(ctx)
181125

126+
newWorkerInstancesList := make([]map[string]interface{}, 0, len(workers))
127+
labelsMap := make(map[string]string)
128+
instanceIds := make([]*string, 0)
129+
instanceMap := ctxData.Get("instanceMap").(map[string]bool)
182130
for sub, cvmInfo := range workers {
183131
if _, ok := instanceMap[cvmInfo.InstanceId]; !ok {
184132
continue
185133
}
186-
WorkersInstanceIds = append(WorkersInstanceIds, &workers[sub].InstanceId)
134+
instanceIds = append(instanceIds, &workers[sub].InstanceId)
187135
tempMap := make(map[string]interface{})
188136
tempMap["instance_id"] = cvmInfo.InstanceId
189137
tempMap["instance_role"] = cvmInfo.InstanceRole
190138
tempMap["instance_state"] = cvmInfo.InstanceState
191139
tempMap["failed_reason"] = cvmInfo.FailedReason
192140
tempMap["lan_ip"] = cvmInfo.LanIp
193141

194-
WorkersNewWorkerInstancesList = append(WorkersNewWorkerInstancesList, tempMap)
142+
newWorkerInstancesList = append(newWorkerInstancesList, tempMap)
195143
if cvmInfo.InstanceAdvancedSettings != nil {
196144
if cvmInfo.InstanceAdvancedSettings.Labels != nil {
197145
for _, v := range cvmInfo.InstanceAdvancedSettings.Labels {
198-
WorkersLabelsMap[helper.PString(v.Name)] = helper.PString(v.Value)
146+
labelsMap[helper.PString(v.Name)] = helper.PString(v.Value)
199147
}
200148
}
201149

@@ -278,6 +226,10 @@ func clusterInstanceParamHandle(ctx context.Context, req *tke.DescribeClusterIns
278226
}
279227
}
280228
}
229+
230+
ctxData.Set("newWorkerInstancesList", newWorkerInstancesList)
231+
ctxData.Set("labelsMap", labelsMap)
232+
ctxData.Set("instanceIds", instanceIds)
281233
return nil
282234
}
283235

@@ -290,12 +242,14 @@ func resourceTencentCloudKubernetesScaleWorkerDeletePostRequest0(ctx context.Con
290242

291243
func resourceTencentCloudKubernetesScaleWorkerReadPostFillRequest0(ctx context.Context, req *tke.DescribeClustersRequest) error {
292244
d := tccommon.ResourceDataFromContext(ctx)
245+
ctxData := tccommon.DataFromContext(ctx)
293246
items := strings.Split(d.Id(), tccommon.FILED_SP)
294247

295248
instanceMap := make(map[string]bool)
296249
oldWorkerInstancesList := d.Get("worker_instances_list").([]interface{})
250+
clusterId := ""
297251
if importFlag1 {
298-
GlobalClusterId = items[0]
252+
clusterId = items[0]
299253
if len(items[1:]) >= 2 {
300254
return fmt.Errorf("only one additional configuration of virtual machines is now supported now, " +
301255
"so should be 1")
@@ -305,13 +259,15 @@ func resourceTencentCloudKubernetesScaleWorkerReadPostFillRequest0(ctx context.C
305259
}
306260
oldWorkerInstancesList = append(oldWorkerInstancesList, infoMap)
307261
} else {
308-
GlobalClusterId = d.Get("cluster_id").(string)
262+
clusterId = d.Get("cluster_id").(string)
309263
}
310264

311-
if GlobalClusterId == "" {
265+
if clusterId == "" {
312266
return fmt.Errorf("tke.`cluster_id` is empty.")
313267
}
314268

269+
ctxData.Set("clusterId", clusterId)
270+
315271
for _, v := range oldWorkerInstancesList {
316272
infoMap, ok := v.(map[string]interface{})
317273
if !ok || infoMap["instance_id"] == nil {
@@ -327,6 +283,8 @@ func resourceTencentCloudKubernetesScaleWorkerReadPostFillRequest0(ctx context.C
327283
instanceMap[instanceId] = true
328284
}
329285

286+
ctxData.Set("instanceMap", instanceMap)
287+
330288
return nil
331289
}
332290

@@ -338,7 +296,9 @@ func resourceTencentCloudKubernetesScaleWorkerReadPostRequest0(ctx context.Conte
338296
}
339297

340298
func resourceTencentCloudKubernetesScaleWorkerReadPostFillRequest2(ctx context.Context, req *cvm.DescribeInstancesRequest) error {
341-
req.InstanceIds = WorkersInstanceIds
299+
ctxData := tccommon.DataFromContext(ctx)
300+
instanceIds := ctxData.Get("instanceIds").([]*string)
301+
req.InstanceIds = instanceIds
342302
return nil
343303
}
344304

@@ -666,11 +626,6 @@ func resourceTencentCloudKubernetesScaleWorkerReadPostFillRequest1(ctx context.C
666626
return err
667627
}
668628
log.Printf("[DEBUG]%s api[%s] success, request body [%s], response body [%s]\n", logId, request.GetAction(), request.ToJsonString(), response.ToJsonString())
669-
670-
if err := clusterInstanceParamHandle(ctx, request, response); err != nil {
671-
return err
672-
}
673-
674629
if response == nil || len(response.Response.InstanceSet) < 1 {
675630
break
676631
}
@@ -687,11 +642,45 @@ func resourceTencentCloudKubernetesScaleWorkerReadPostFillRequest1(ctx context.C
687642
log.Printf("[WARN]%s resource `kubernetes_scale_worker` [%s] not found, please check if it has been deleted.\n", logId, d.Id())
688643
return nil
689644
}
645+
646+
has := map[string]bool{}
647+
workers := make([]InstanceInfo, 0, len(instanceSet))
648+
for _, item := range instanceSet {
649+
if has[*item.InstanceId] {
650+
return fmt.Errorf("get repeated instance_id[%s] when doing DescribeClusterInstances", *item.InstanceId)
651+
}
652+
has[*item.InstanceId] = true
653+
instanceInfo := InstanceInfo{
654+
InstanceId: *item.InstanceId,
655+
InstanceRole: *item.InstanceRole,
656+
InstanceState: *item.InstanceState,
657+
FailedReason: *item.FailedReason,
658+
InstanceAdvancedSettings: item.InstanceAdvancedSettings,
659+
}
660+
if item.CreatedTime != nil {
661+
instanceInfo.CreatedTime = *item.CreatedTime
662+
}
663+
if item.NodePoolId != nil {
664+
instanceInfo.NodePoolId = *item.NodePoolId
665+
}
666+
if item.LanIP != nil {
667+
instanceInfo.LanIp = *item.LanIP
668+
}
669+
if instanceInfo.InstanceRole == TKE_ROLE_WORKER {
670+
workers = append(workers, instanceInfo)
671+
}
672+
}
673+
674+
if err := clusterInstanceParamHandle(ctx, workers); err != nil {
675+
return err
676+
}
677+
690678
return nil
691679
}
692680

693681
func resourceTencentCloudKubernetesScaleWorkerReadPreRequest1(ctx context.Context, req *cvm.DescribeInstancesRequest) error {
694-
req.InstanceIds = WorkersInstanceIds
695-
682+
ctxData := tccommon.DataFromContext(ctx)
683+
instanceIds := ctxData.Get("instanceIds").([]*string)
684+
req.InstanceIds = instanceIds
696685
return nil
697686
}

website/docs/r/kubernetes_scale_worker.html.markdown

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -220,6 +220,6 @@ In addition to all arguments above, the following attributes are exported:
220220
tke scale worker can be imported, e.g.
221221

222222
```
223-
$ terraform import tencentcloud_kubernetes_scale_worker.example cls-mij6c2pq#ins-n6esjkdi,ins-9h3rdxt8,ins-qretqeas
223+
$ terraform import tencentcloud_kubernetes_scale_worker.example cls-mij6c2pq#ins-n6esjkdi
224224
```
225225

0 commit comments

Comments
 (0)