Skip to content

Commit d3bcbdb

Browse files
authored
fix(tke): [121076626] tencentcloud_kubernetes_node_pool spport timeouts for create and update (#2998)
* add * add * add * add
1 parent 801e12b commit d3bcbdb

File tree

5 files changed

+32
-8
lines changed

5 files changed

+32
-8
lines changed

.changelog/2998.txt

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,3 @@
1+
```release-note:enhancement
2+
resource/tencentcloud_kubernetes_node_pool: spport `timeouts` for create and update
3+
```

tencentcloud/services/tke/resource_tc_kubernetes_node_pool.go

Lines changed: 4 additions & 0 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

tencentcloud/services/tke/resource_tc_kubernetes_node_pool.md

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -145,7 +145,7 @@ resource "tencentcloud_kubernetes_node_pool" "example" {
145145
}
146146
```
147147

148-
Wait for all scaling nodes to be ready with wait_node_ready and scale_tolerance parameters.
148+
Wait for all scaling nodes to be ready with wait_node_ready and scale_tolerance parameters. The default maximum scaling timeout is 30 minutes.
149149

150150
```hcl
151151
resource "tencentcloud_kubernetes_node_pool" "example" {
@@ -209,6 +209,11 @@ resource "tencentcloud_kubernetes_node_pool" "example" {
209209
"root-dir=/var/lib/kubelet"
210210
]
211211
}
212+
213+
timeouts {
214+
create = "30m"
215+
update = "30m"
216+
}
212217
}
213218
```
214219

tencentcloud/services/tke/resource_tc_kubernetes_node_pool_extension.go

Lines changed: 13 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -123,7 +123,7 @@ func resourceTencentCloudKubernetesNodePoolCreatePostHandleResponse0(ctx context
123123
nodePoolId := *resp.Response.NodePoolId
124124

125125
// todo wait for status ok
126-
err := resource.Retry(5*tccommon.ReadRetryTimeout, func() *resource.RetryError {
126+
err := resource.Retry(d.Timeout(schema.TimeoutCreate), func() *resource.RetryError {
127127
nodePool, _, errRet := service.DescribeNodePool(ctx, clusterId, nodePoolId)
128128
if errRet != nil {
129129
return tccommon.RetryError(errRet, tccommon.InternalError)
@@ -160,7 +160,7 @@ func resourceTencentCloudKubernetesNodePoolCreatePostHandleResponse0(ctx context
160160
}
161161

162162
// wait node scaling
163-
if err = waitNodePoolInitializing(ctx, clusterId, nodePoolId); err != nil {
163+
if err = waitNodePoolInitializing(ctx, clusterId, nodePoolId, schema.TimeoutCreate); err != nil {
164164
return err
165165
}
166166

@@ -637,7 +637,7 @@ func resourceTencentCloudKubernetesNodePoolUpdateOnStart(ctx context.Context) er
637637
capacityHasChanged = true
638638

639639
// wait node scaling
640-
if err = waitNodePoolInitializing(ctx, clusterId, nodePoolId); err != nil {
640+
if err = waitNodePoolInitializing(ctx, clusterId, nodePoolId, schema.TimeoutUpdate); err != nil {
641641
return err
642642
}
643643
}
@@ -725,7 +725,7 @@ func resourceTencentCloudKubernetesNodePoolUpdateOnStart(ctx context.Context) er
725725
}
726726

727727
// wait node scaling
728-
if err = waitNodePoolInitializing(ctx, clusterId, nodePoolId); err != nil {
728+
if err = waitNodePoolInitializing(ctx, clusterId, nodePoolId, schema.TimeoutUpdate); err != nil {
729729
return err
730730
}
731731
}
@@ -1418,7 +1418,7 @@ func checkParams(ctx context.Context) error {
14181418
return nil
14191419
}
14201420

1421-
func waitNodePoolInitializing(ctx context.Context, clusterId, nodePoolId string) (err error) {
1421+
func waitNodePoolInitializing(ctx context.Context, clusterId, nodePoolId, step string) (err error) {
14221422
d := tccommon.ResourceDataFromContext(ctx)
14231423
meta := tccommon.ProviderMetaFromContext(ctx)
14241424

@@ -1435,6 +1435,13 @@ func waitNodePoolInitializing(ctx context.Context, clusterId, nodePoolId string)
14351435
}
14361436

14371437
if waitNodeReady {
1438+
var dTimeout string
1439+
if step == schema.TimeoutCreate {
1440+
dTimeout = schema.TimeoutCreate
1441+
} else {
1442+
dTimeout = schema.TimeoutUpdate
1443+
}
1444+
14381445
if v, ok := d.GetOkExists("desired_capacity"); ok {
14391446
desiredCapacity = int64(v.(int))
14401447
if desiredCapacity == 0 {
@@ -1450,7 +1457,7 @@ func waitNodePoolInitializing(ctx context.Context, clusterId, nodePoolId string)
14501457
nodePoolDetailrequest := tke.NewDescribeClusterNodePoolDetailRequest()
14511458
nodePoolDetailrequest.ClusterId = common.StringPtr(clusterId)
14521459
nodePoolDetailrequest.NodePoolId = common.StringPtr(nodePoolId)
1453-
err = resource.Retry(10*tccommon.ReadRetryTimeout, func() *resource.RetryError {
1460+
err = resource.Retry(d.Timeout(dTimeout), func() *resource.RetryError {
14541461
result, e := meta.(tccommon.ProviderMeta).GetAPIV3Conn().UseTkeV20180525Client().DescribeClusterNodePoolDetailWithContext(ctx, nodePoolDetailrequest)
14551462
if e != nil {
14561463
return tccommon.RetryError(e)

website/docs/r/kubernetes_node_pool.html.markdown

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -156,7 +156,7 @@ resource "tencentcloud_kubernetes_node_pool" "example" {
156156
}
157157
```
158158

159-
### Wait for all scaling nodes to be ready with wait_node_ready and scale_tolerance parameters.
159+
### Wait for all scaling nodes to be ready with wait_node_ready and scale_tolerance parameters. The default maximum scaling timeout is 30 minutes.
160160

161161
```hcl
162162
resource "tencentcloud_kubernetes_node_pool" "example" {
@@ -220,6 +220,11 @@ resource "tencentcloud_kubernetes_node_pool" "example" {
220220
"root-dir=/var/lib/kubelet"
221221
]
222222
}
223+
224+
timeouts {
225+
create = "30m"
226+
update = "30m"
227+
}
223228
}
224229
```
225230

0 commit comments

Comments
 (0)