Skip to content

Commit b827da2

Browse files
committed
add
1 parent a20782e commit b827da2

4 files changed

+310
-0
lines changed

tencentcloud/services/tke/resource_tc_kubernetes_node_pool.go

Lines changed: 13 additions & 0 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

tencentcloud/services/tke/resource_tc_kubernetes_node_pool.md

Lines changed: 68 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,8 @@ Provide a resource to create an auto scaling group for kubernetes cluster.
66

77
~> **NOTE:** In order to ensure the integrity of customer data, if the cvm instance was destroyed due to shrinking, it will keep the cbs associate with cvm by default. If you want to destroy together, please set `delete_with_instance` to `true`.
88

9+
~> **NOTE:** There are two parameters `wait_node_ready` and `scale_tolerance` to ensure better management of node pool scaling operations. If this parameter is set, when creating resources, if the set criteria are not met, the resources will be marked as `tainted`.
10+
911
Example Usage
1012

1113
```hcl
@@ -143,6 +145,72 @@ resource "tencentcloud_kubernetes_node_pool" "example" {
143145
}
144146
```
145147

148+
Set `wait_node_ready` and `scale_tolerance`
149+
```hcl
150+
resource "tencentcloud_kubernetes_node_pool" "example" {
151+
name = "tf-example"
152+
cluster_id = tencentcloud_kubernetes_cluster.managed_cluster.id
153+
max_size = 100
154+
min_size = 1
155+
vpc_id = data.tencentcloud_vpc_subnets.vpc.instance_list.0.vpc_id
156+
subnet_ids = [data.tencentcloud_vpc_subnets.vpc.instance_list.0.subnet_id]
157+
retry_policy = "INCREMENTAL_INTERVALS"
158+
desired_capacity = 50
159+
enable_auto_scale = false
160+
wait_node_ready = true
161+
scale_tolerance = 90
162+
multi_zone_subnet_policy = "EQUALITY"
163+
node_os = "img-6n21msk1"
164+
delete_keep_instance = false
165+
166+
auto_scaling_config {
167+
instance_type = var.default_instance_type
168+
system_disk_type = "CLOUD_PREMIUM"
169+
system_disk_size = "50"
170+
orderly_security_group_ids = ["sg-bw28gmso"]
171+
172+
data_disk {
173+
disk_type = "CLOUD_PREMIUM"
174+
disk_size = 50
175+
delete_with_instance = true
176+
}
177+
178+
internet_charge_type = "TRAFFIC_POSTPAID_BY_HOUR"
179+
internet_max_bandwidth_out = 10
180+
public_ip_assigned = true
181+
password = "test123#"
182+
enhanced_security_service = false
183+
enhanced_monitor_service = false
184+
host_name = "12.123.0.0"
185+
host_name_style = "ORIGINAL"
186+
}
187+
188+
labels = {
189+
"test1" = "test1",
190+
"test2" = "test2",
191+
}
192+
193+
taints {
194+
key = "test_taint"
195+
value = "taint_value"
196+
effect = "PreferNoSchedule"
197+
}
198+
199+
taints {
200+
key = "test_taint2"
201+
value = "taint_value2"
202+
effect = "PreferNoSchedule"
203+
}
204+
205+
node_config {
206+
docker_graph_path = "/var/lib/docker"
207+
extra_args = [
208+
"root-dir=/var/lib/kubelet"
209+
]
210+
}
211+
}
212+
```
213+
146214
Import
147215

148216
tke node pool can be imported, e.g.

tencentcloud/services/tke/resource_tc_kubernetes_node_pool_extension.go

Lines changed: 158 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,7 @@ import (
66
"log"
77
"strings"
88

9+
"github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common"
910
tchttp "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common/http"
1011

1112
as "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/as/v20180419"
@@ -54,6 +55,11 @@ func resourceTencentCloudKubernetesNodePoolCreatePostFillRequest0(ctx context.Co
5455
return fmt.Errorf("need only one auto_scaling_config")
5556
}
5657

58+
// check params
59+
if err := checkParams(ctx); err != nil {
60+
return err
61+
}
62+
5763
groupParaStr, err := composeParameterToAsScalingGroupParaSerial(d)
5864
if err != nil {
5965
return err
@@ -153,6 +159,11 @@ func resourceTencentCloudKubernetesNodePoolCreatePostHandleResponse0(ctx context
153159
return err
154160
}
155161

162+
// wait node scaling
163+
if err = waitNodePoolInitializing(ctx, clusterId, nodePoolId); err != nil {
164+
return err
165+
}
166+
156167
return nil
157168
}
158169

@@ -556,6 +567,11 @@ func resourceTencentCloudKubernetesNodePoolUpdateOnStart(ctx context.Context) er
556567
clusterId := items[0]
557568
nodePoolId := items[1]
558569

570+
// check params
571+
if err := checkParams(ctx); err != nil {
572+
return err
573+
}
574+
559575
d.Partial(true)
560576

561577
nodePool, _, err := service.DescribeNodePool(ctx, clusterId, nodePoolId)
@@ -614,6 +630,11 @@ func resourceTencentCloudKubernetesNodePoolUpdateOnStart(ctx context.Context) er
614630
return err
615631
}
616632
capacityHasChanged = true
633+
634+
// wait node scaling
635+
if err = waitNodePoolInitializing(ctx, clusterId, nodePoolId); err != nil {
636+
return err
637+
}
617638
}
618639

619640
// ModifyClusterNodePool
@@ -697,6 +718,11 @@ func resourceTencentCloudKubernetesNodePoolUpdateOnStart(ctx context.Context) er
697718
if err != nil {
698719
return err
699720
}
721+
722+
// wait node scaling
723+
if err = waitNodePoolInitializing(ctx, clusterId, nodePoolId); err != nil {
724+
return err
725+
}
700726
}
701727

702728
return nil
@@ -1351,3 +1377,135 @@ func resourceTencentCloudKubernetesNodePoolUpdateTaints(ctx context.Context, clu
13511377
}
13521378
return nil
13531379
}
1380+
1381+
func checkParams(ctx context.Context) error {
1382+
d := tccommon.ResourceDataFromContext(ctx)
1383+
var (
1384+
enableAutoscale bool
1385+
waitNodeReady bool
1386+
)
1387+
1388+
if v, ok := d.GetOkExists("enable_auto_scale"); ok {
1389+
enableAutoscale = v.(bool)
1390+
}
1391+
1392+
if v, ok := d.GetOkExists("wait_node_ready"); ok {
1393+
waitNodeReady = v.(bool)
1394+
}
1395+
1396+
if enableAutoscale && waitNodeReady {
1397+
return fmt.Errorf("`wait_node_ready` only can be set if `enable_auto_scale` is `false`.")
1398+
}
1399+
1400+
if _, ok := d.GetOkExists("scale_tolerance"); ok {
1401+
if !waitNodeReady {
1402+
return fmt.Errorf("`scale_tolerance` only can be set if `wait_node_ready` is `true`.")
1403+
}
1404+
}
1405+
1406+
return nil
1407+
}
1408+
1409+
func waitNodePoolInitializing(ctx context.Context, clusterId, nodePoolId string) (err error) {
1410+
d := tccommon.ResourceDataFromContext(ctx)
1411+
meta := tccommon.ProviderMetaFromContext(ctx)
1412+
1413+
var (
1414+
currentNormal int64
1415+
desiredCapacity int64
1416+
waitNodeReady bool
1417+
scaleTolerance int64 = 100
1418+
autoscalingGroupId string
1419+
)
1420+
1421+
if v, ok := d.GetOkExists("desired_capacity"); ok {
1422+
desiredCapacity = int64(v.(int))
1423+
if desiredCapacity == 0 {
1424+
desiredCapacity = 1
1425+
}
1426+
}
1427+
1428+
if v, ok := d.GetOkExists("wait_node_ready"); ok {
1429+
waitNodeReady = v.(bool)
1430+
}
1431+
1432+
if waitNodeReady {
1433+
if v, ok := d.GetOkExists("scale_tolerance"); ok {
1434+
scaleTolerance = int64(v.(int))
1435+
}
1436+
1437+
logId := tccommon.GetLogId(tccommon.ContextNil)
1438+
nodePoolDetailrequest := tke.NewDescribeClusterNodePoolDetailRequest()
1439+
nodePoolDetailrequest.ClusterId = common.StringPtr(clusterId)
1440+
nodePoolDetailrequest.NodePoolId = common.StringPtr(nodePoolId)
1441+
err = resource.Retry(1*tccommon.ReadRetryTimeout, func() *resource.RetryError {
1442+
result, e := meta.(tccommon.ProviderMeta).GetAPIV3Conn().UseTkeV20180525Client().DescribeClusterNodePoolDetailWithContext(ctx, nodePoolDetailrequest)
1443+
if e != nil {
1444+
return tccommon.RetryError(e)
1445+
} else {
1446+
log.Printf("[DEBUG]%s api[%s] success, request body [%s], response body [%s]\n", logId, nodePoolDetailrequest.GetAction(), nodePoolDetailrequest.ToJsonString(), result.ToJsonString())
1447+
}
1448+
1449+
if result == nil || result.Response == nil || result.Response.NodePool == nil || result.Response.NodePool.NodeCountSummary == nil || result.Response.NodePool.NodeCountSummary.AutoscalingAdded == nil {
1450+
e = fmt.Errorf("Cluster %s node pool %s not exists", clusterId, nodePoolId)
1451+
return resource.NonRetryableError(e)
1452+
}
1453+
1454+
desiredNodesNum := result.Response.NodePool.DesiredNodesNum
1455+
autoscalingAdded := result.Response.NodePool.NodeCountSummary.AutoscalingAdded
1456+
total := autoscalingAdded.Total
1457+
normal := autoscalingAdded.Normal
1458+
if *total != 0 {
1459+
if *normal > *desiredNodesNum {
1460+
return resource.RetryableError(fmt.Errorf("Node pool is still scaling"))
1461+
}
1462+
1463+
currentTolerance := int64((float64(*normal) / float64(*desiredNodesNum)) * 100)
1464+
if currentTolerance >= scaleTolerance || *desiredNodesNum == *normal {
1465+
return nil
1466+
}
1467+
}
1468+
1469+
currentNormal = *normal
1470+
autoscalingGroupId = *result.Response.NodePool.AutoscalingGroupId
1471+
return resource.RetryableError(fmt.Errorf("Node pool is still scaling"))
1472+
})
1473+
1474+
if err != nil {
1475+
if currentNormal < 1 {
1476+
var errFmt string
1477+
asRequest := as.NewDescribeAutoScalingActivitiesRequest()
1478+
asRequest.Filters = []*as.Filter{
1479+
{
1480+
Name: common.StringPtr("auto-scaling-group-id"),
1481+
Values: common.StringPtrs([]string{autoscalingGroupId}),
1482+
},
1483+
}
1484+
1485+
err = resource.Retry(tccommon.ReadRetryTimeout, func() *resource.RetryError {
1486+
result, e := meta.(tccommon.ProviderMeta).GetAPIV3Conn().UseAsClient().DescribeAutoScalingActivitiesWithContext(ctx, asRequest)
1487+
if e != nil {
1488+
return tccommon.RetryError(e)
1489+
} else {
1490+
log.Printf("[DEBUG]%s api[%s] success, request body [%s], response body [%s]\n", logId, asRequest.GetAction(), asRequest.ToJsonString(), result.ToJsonString())
1491+
}
1492+
1493+
if result == nil || result.Response == nil || result.Response.ActivitySet == nil || len(result.Response.ActivitySet) < 1 {
1494+
e = fmt.Errorf("Describe auto scaling activities failed")
1495+
return resource.NonRetryableError(e)
1496+
}
1497+
1498+
res := result.Response.ActivitySet[0]
1499+
errFmt = fmt.Sprintf("%s\nDescription: %s\nStatusMessage: %s", *res.StatusMessageSimplified, *res.Description, *res.StatusMessage)
1500+
return nil
1501+
})
1502+
1503+
return fmt.Errorf("Node pool scaling failed, Reason: %s\nPlease check your resource inventory, Or adjust `desired_capacity`, `scale_tolerance` and `instance_type`, Then try again.", errFmt)
1504+
} else {
1505+
return fmt.Errorf("Node pool scaling failed, Desired value: %d, Actual value: %d, Scale tolerance: %d%%\nPlease check your resource inventory, Or adjust `desired_capacity`, `scale_tolerance` and `instance_type`, Then try again.", desiredCapacity, currentNormal, scaleTolerance)
1506+
}
1507+
}
1508+
}
1509+
1510+
return nil
1511+
}

0 commit comments

Comments
 (0)