|
6 | 6 | "log"
|
7 | 7 | "strings"
|
8 | 8 |
|
| 9 | + "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common" |
9 | 10 | tchttp "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common/http"
|
10 | 11 |
|
11 | 12 | as "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/as/v20180419"
|
@@ -54,6 +55,11 @@ func resourceTencentCloudKubernetesNodePoolCreatePostFillRequest0(ctx context.Co
|
54 | 55 | return fmt.Errorf("need only one auto_scaling_config")
|
55 | 56 | }
|
56 | 57 |
|
| 58 | + // check params |
| 59 | + if err := checkParams(ctx); err != nil { |
| 60 | + return err |
| 61 | + } |
| 62 | + |
57 | 63 | groupParaStr, err := composeParameterToAsScalingGroupParaSerial(d)
|
58 | 64 | if err != nil {
|
59 | 65 | return err
|
@@ -153,6 +159,11 @@ func resourceTencentCloudKubernetesNodePoolCreatePostHandleResponse0(ctx context
|
153 | 159 | return err
|
154 | 160 | }
|
155 | 161 |
|
| 162 | + // wait node scaling |
| 163 | + if err = waitNodePoolInitializing(ctx, clusterId, nodePoolId); err != nil { |
| 164 | + return err |
| 165 | + } |
| 166 | + |
156 | 167 | return nil
|
157 | 168 | }
|
158 | 169 |
|
@@ -556,6 +567,11 @@ func resourceTencentCloudKubernetesNodePoolUpdateOnStart(ctx context.Context) er
|
556 | 567 | clusterId := items[0]
|
557 | 568 | nodePoolId := items[1]
|
558 | 569 |
|
| 570 | + // check params |
| 571 | + if err := checkParams(ctx); err != nil { |
| 572 | + return err |
| 573 | + } |
| 574 | + |
559 | 575 | d.Partial(true)
|
560 | 576 |
|
561 | 577 | nodePool, _, err := service.DescribeNodePool(ctx, clusterId, nodePoolId)
|
@@ -614,6 +630,11 @@ func resourceTencentCloudKubernetesNodePoolUpdateOnStart(ctx context.Context) er
|
614 | 630 | return err
|
615 | 631 | }
|
616 | 632 | capacityHasChanged = true
|
| 633 | + |
| 634 | + // wait node scaling |
| 635 | + if err = waitNodePoolInitializing(ctx, clusterId, nodePoolId); err != nil { |
| 636 | + return err |
| 637 | + } |
617 | 638 | }
|
618 | 639 |
|
619 | 640 | // ModifyClusterNodePool
|
@@ -697,6 +718,11 @@ func resourceTencentCloudKubernetesNodePoolUpdateOnStart(ctx context.Context) er
|
697 | 718 | if err != nil {
|
698 | 719 | return err
|
699 | 720 | }
|
| 721 | + |
| 722 | + // wait node scaling |
| 723 | + if err = waitNodePoolInitializing(ctx, clusterId, nodePoolId); err != nil { |
| 724 | + return err |
| 725 | + } |
700 | 726 | }
|
701 | 727 |
|
702 | 728 | return nil
|
@@ -1351,3 +1377,137 @@ func resourceTencentCloudKubernetesNodePoolUpdateTaints(ctx context.Context, clu
|
1351 | 1377 | }
|
1352 | 1378 | return nil
|
1353 | 1379 | }
|
| 1380 | + |
| 1381 | +func checkParams(ctx context.Context) error { |
| 1382 | + d := tccommon.ResourceDataFromContext(ctx) |
| 1383 | + var ( |
| 1384 | + enableAutoscale bool |
| 1385 | + waitNodeReady bool |
| 1386 | + ) |
| 1387 | + |
| 1388 | + if v, ok := d.GetOkExists("enable_auto_scale"); ok { |
| 1389 | + enableAutoscale = v.(bool) |
| 1390 | + } |
| 1391 | + |
| 1392 | + if v, ok := d.GetOkExists("wait_node_ready"); ok { |
| 1393 | + waitNodeReady = v.(bool) |
| 1394 | + } |
| 1395 | + |
| 1396 | + if enableAutoscale && waitNodeReady { |
| 1397 | + return fmt.Errorf("`wait_node_ready` only can be set if `enable_auto_scale` is `false`.") |
| 1398 | + } |
| 1399 | + |
| 1400 | + if _, ok := d.GetOkExists("scale_tolerance"); ok { |
| 1401 | + if !waitNodeReady { |
| 1402 | + return fmt.Errorf("`scale_tolerance` only can be set if `wait_node_ready` is `true`.") |
| 1403 | + } |
| 1404 | + } |
| 1405 | + |
| 1406 | + return nil |
| 1407 | +} |
| 1408 | + |
| 1409 | +func waitNodePoolInitializing(ctx context.Context, clusterId, nodePoolId string) (err error) { |
| 1410 | + d := tccommon.ResourceDataFromContext(ctx) |
| 1411 | + meta := tccommon.ProviderMetaFromContext(ctx) |
| 1412 | + |
| 1413 | + var ( |
| 1414 | + currentNormal int64 |
| 1415 | + desiredCapacity int64 |
| 1416 | + waitNodeReady bool |
| 1417 | + scaleTolerance int64 = 100 |
| 1418 | + autoscalingGroupId string |
| 1419 | + ) |
| 1420 | + |
| 1421 | + if v, ok := d.GetOkExists("wait_node_ready"); ok { |
| 1422 | + waitNodeReady = v.(bool) |
| 1423 | + } |
| 1424 | + |
| 1425 | + if waitNodeReady { |
| 1426 | + if v, ok := d.GetOkExists("desired_capacity"); ok { |
| 1427 | + desiredCapacity = int64(v.(int)) |
| 1428 | + if desiredCapacity == 0 { |
| 1429 | + desiredCapacity = 1 |
| 1430 | + } |
| 1431 | + } |
| 1432 | + |
| 1433 | + if v, ok := d.GetOkExists("scale_tolerance"); ok { |
| 1434 | + scaleTolerance = int64(v.(int)) |
| 1435 | + } |
| 1436 | + |
| 1437 | + logId := tccommon.GetLogId(tccommon.ContextNil) |
| 1438 | + nodePoolDetailrequest := tke.NewDescribeClusterNodePoolDetailRequest() |
| 1439 | + nodePoolDetailrequest.ClusterId = common.StringPtr(clusterId) |
| 1440 | + nodePoolDetailrequest.NodePoolId = common.StringPtr(nodePoolId) |
| 1441 | + err = resource.Retry(1*tccommon.ReadRetryTimeout, func() *resource.RetryError { |
| 1442 | + result, e := meta.(tccommon.ProviderMeta).GetAPIV3Conn().UseTkeV20180525Client().DescribeClusterNodePoolDetailWithContext(ctx, nodePoolDetailrequest) |
| 1443 | + if e != nil { |
| 1444 | + return tccommon.RetryError(e) |
| 1445 | + } else { |
| 1446 | + log.Printf("[DEBUG]%s api[%s] success, request body [%s], response body [%s]\n", logId, nodePoolDetailrequest.GetAction(), nodePoolDetailrequest.ToJsonString(), result.ToJsonString()) |
| 1447 | + } |
| 1448 | + |
| 1449 | + if result == nil || result.Response == nil || result.Response.NodePool == nil || result.Response.NodePool.NodeCountSummary == nil || result.Response.NodePool.NodeCountSummary.AutoscalingAdded == nil { |
| 1450 | + e = fmt.Errorf("Cluster %s node pool %s not exists", clusterId, nodePoolId) |
| 1451 | + return resource.NonRetryableError(e) |
| 1452 | + } |
| 1453 | + |
| 1454 | + desiredNodesNum := result.Response.NodePool.DesiredNodesNum |
| 1455 | + autoscalingAdded := result.Response.NodePool.NodeCountSummary.AutoscalingAdded |
| 1456 | + total := autoscalingAdded.Total |
| 1457 | + normal := autoscalingAdded.Normal |
| 1458 | + if *total != 0 { |
| 1459 | + if *normal > *desiredNodesNum { |
| 1460 | + return resource.RetryableError(fmt.Errorf("Node pool is still scaling")) |
| 1461 | + } |
| 1462 | + |
| 1463 | + currentTolerance := int64((float64(*normal) / float64(*desiredNodesNum)) * 100) |
| 1464 | + if currentTolerance >= scaleTolerance || *desiredNodesNum == *normal { |
| 1465 | + return nil |
| 1466 | + } |
| 1467 | + } |
| 1468 | + |
| 1469 | + currentNormal = *normal |
| 1470 | + autoscalingGroupId = *result.Response.NodePool.AutoscalingGroupId |
| 1471 | + return resource.RetryableError(fmt.Errorf("Node pool is still scaling")) |
| 1472 | + }) |
| 1473 | + |
| 1474 | + if err != nil { |
| 1475 | + if currentNormal < 1 { |
| 1476 | + var errFmt string |
| 1477 | + asRequest := as.NewDescribeAutoScalingActivitiesRequest() |
| 1478 | + asRequest.Filters = []*as.Filter{ |
| 1479 | + { |
| 1480 | + Name: common.StringPtr("auto-scaling-group-id"), |
| 1481 | + Values: common.StringPtrs([]string{autoscalingGroupId}), |
| 1482 | + }, |
| 1483 | + } |
| 1484 | + |
| 1485 | + err = resource.Retry(tccommon.ReadRetryTimeout, func() *resource.RetryError { |
| 1486 | + result, e := meta.(tccommon.ProviderMeta).GetAPIV3Conn().UseAsClient().DescribeAutoScalingActivitiesWithContext(ctx, asRequest) |
| 1487 | + if e != nil { |
| 1488 | + return tccommon.RetryError(e) |
| 1489 | + } else { |
| 1490 | + log.Printf("[DEBUG]%s api[%s] success, request body [%s], response body [%s]\n", logId, asRequest.GetAction(), asRequest.ToJsonString(), result.ToJsonString()) |
| 1491 | + } |
| 1492 | + |
| 1493 | + if result == nil || result.Response == nil || result.Response.ActivitySet == nil || len(result.Response.ActivitySet) < 1 { |
| 1494 | + e = fmt.Errorf("Describe auto scaling activities failed") |
| 1495 | + return resource.NonRetryableError(e) |
| 1496 | + } |
| 1497 | + |
| 1498 | + res := result.Response.ActivitySet[0] |
| 1499 | + errFmt = fmt.Sprintf("%s\nDescription: %s\nStatusMessage: %s", *res.StatusMessageSimplified, *res.Description, *res.StatusMessage) |
| 1500 | + return nil |
| 1501 | + }) |
| 1502 | + |
| 1503 | + if err != nil { |
| 1504 | + return fmt.Errorf("Node pool scaling failed, Reason: %s\nPlease check your resource inventory, Or adjust `desired_capacity`, `scale_tolerance` and `instance_type`, Then try again.", errFmt) |
| 1505 | + } |
| 1506 | + } else { |
| 1507 | + return fmt.Errorf("Node pool scaling failed, Desired value: %d, Actual value: %d, Scale tolerance: %d%%\nPlease check your resource inventory, Or adjust `desired_capacity`, `scale_tolerance` and `instance_type`, Then try again.", desiredCapacity, currentNormal, scaleTolerance) |
| 1508 | + } |
| 1509 | + } |
| 1510 | + } |
| 1511 | + |
| 1512 | + return nil |
| 1513 | +} |
0 commit comments