diff --git a/.changelog/2681.txt b/.changelog/2681.txt new file mode 100644 index 0000000000..8f6d504293 --- /dev/null +++ b/.changelog/2681.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/tencentcloud_kubernetes_scale_worker: Add `pre_start_user_script` and `user_script` fields +``` \ No newline at end of file diff --git a/tencentcloud/services/tke/resource_tc_kubernetes_cluster_endpoint.go b/tencentcloud/services/tke/resource_tc_kubernetes_cluster_endpoint.go index 54f92656b0..22cf4c0e96 100644 --- a/tencentcloud/services/tke/resource_tc_kubernetes_cluster_endpoint.go +++ b/tencentcloud/services/tke/resource_tc_kubernetes_cluster_endpoint.go @@ -28,6 +28,7 @@ func ResourceTencentCloudTkeClusterEndpoint() *schema.Resource { "cluster_id": { Type: schema.TypeString, Required: true, + ForceNew: true, Description: "Specify cluster ID.", }, "cluster_internet": { diff --git a/tencentcloud/services/tke/resource_tc_kubernetes_scale_worker.go b/tencentcloud/services/tke/resource_tc_kubernetes_scale_worker.go index a1cf89d418..21c1643136 100644 --- a/tencentcloud/services/tke/resource_tc_kubernetes_scale_worker.go +++ b/tencentcloud/services/tke/resource_tc_kubernetes_scale_worker.go @@ -1,3 +1,4 @@ +// Code generated by iacg; DO NOT EDIT. package tke import ( @@ -9,7 +10,9 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" tke "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/tke/v20180525" + tccommon "github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/common" + "github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/internal/helper" ) func ResourceTencentCloudKubernetesScaleWorker() *schema.Resource { @@ -156,7 +159,7 @@ func ResourceTencentCloudKubernetesScaleWorker() *schema.Resource { Optional: true, ForceNew: true, Default: 0, - Description: "Sets whether the joining node participates in the schedule. Default is '0'. Participate in scheduling.", + Description: "Set whether the added node participates in scheduling. The default value is 0, which means participating in scheduling; non-0 means not participating in scheduling. After the node initialization is completed, you can execute kubectl uncordon nodename to join the node in scheduling.", }, "worker_config": { @@ -408,12 +411,26 @@ func ResourceTencentCloudKubernetesScaleWorker() *schema.Resource { Type: schema.TypeString, Optional: true, ForceNew: true, - Description: "ase64-encoded User Data text, the length limit is 16KB.", + Description: "User data provided to instances, needs to be encoded in base64, and the maximum supported data size is 16KB.", }, }, }, }, + "pre_start_user_script": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Base64-encoded user script, executed before initializing the node, currently only effective for adding existing nodes.", + }, + + "user_script": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Base64 encoded user script, this script will be executed after the k8s component is run. The user needs to ensure that the script is reentrant and retry logic. The script and its generated log files can be viewed in the /data/ccs_userscript/ path of the node, if required. The node needs to be initialized before it can be added to the schedule. It can be used with the unschedulable parameter. After the final initialization of userScript is completed, add the kubectl uncordon nodename --kubeconfig=/root/.kube/config command to add the node to the schedule.", + }, + "worker_instances_list": { Type: schema.TypeList, Computed: true, @@ -460,83 +477,11 @@ func resourceTencentCloudKubernetesScaleWorkerCreate(d *schema.ResourceData, met ctx := tccommon.NewResourceLifeCycleHandleFuncContext(context.Background(), logId, d, meta) - var ( - clusterId string - instanceIdSet []string - ) - var ( - request = tke.NewDescribeClustersRequest() - response = tke.NewDescribeClustersResponse() - ) - - if v, ok := d.GetOk("cluster_id"); ok { - clusterId = v.(string) - } - - request.ClusterIds = []*string{&clusterId} - - err := resource.Retry(tccommon.WriteRetryTimeout, func() *resource.RetryError { - result, e := meta.(tccommon.ProviderMeta).GetAPIV3Conn().UseTkeClient().DescribeClustersWithContext(ctx, request) - if e != nil { - return tccommon.RetryError(e) - } else { - log.Printf("[DEBUG]%s api[%s] success, request body [%s], response body [%s]\n", logId, request.GetAction(), request.ToJsonString(), result.ToJsonString()) - } - if err := resourceTencentCloudKubernetesScaleWorkerCreatePostRequest0(ctx, request, result); err != nil { - return resource.NonRetryableError(err) - } - - response = result - return nil - }) - if err != nil { - log.Printf("[CRITAL]%s create kubernetes scale worker failed, reason:%+v", logId, err) - return err - } - - _ = response - - var ( - request1 = tke.NewCreateClusterInstancesRequest() - response1 = tke.NewCreateClusterInstancesResponse() - ) - - if v, ok := d.GetOk("cluster_id"); ok { - clusterId = v.(string) - } - - request1.ClusterId = &clusterId - - if err = resourceTencentCloudKubernetesScaleWorkerCreatePostFillRequest1(ctx, request1); err != nil { - return err - } - - err = resource.Retry(tccommon.WriteRetryTimeout, func() *resource.RetryError { - result, e := meta.(tccommon.ProviderMeta).GetAPIV3Conn().UseTkeClient().CreateClusterInstancesWithContext(ctx, request1) - if e != nil { - return tccommon.RetryError(e) - } else { - log.Printf("[DEBUG]%s api[%s] success, request body [%s], response body [%s]\n", logId, request1.GetAction(), request1.ToJsonString(), result.ToJsonString()) - } - - if instanceIdSet, err = resourceTencentCloudKubernetesScaleWorkerCreatePostRequest1(ctx, request1, result); err != nil { - return resource.NonRetryableError(err) - } - - response1 = result - return nil - }) - - if err != nil { - log.Printf("[CRITAL]%s create kubernetes scale worker failed, reason:%+v", logId, err) + if err := resourceTencentCloudKubernetesScaleWorkerCreateOnStart(ctx); err != nil { return err } - _ = response1 - - id := clusterId + tccommon.FILED_SP + strings.Join(instanceIdSet, tccommon.FILED_SP) - d.SetId(id) - + _ = ctx return resourceTencentCloudKubernetesScaleWorkerRead(d, meta) } @@ -579,7 +524,6 @@ func resourceTencentCloudKubernetesScaleWorkerRead(d *schema.ResourceData, meta log.Printf("[WARN]%s resource `kubernetes_scale_worker` [%s] not found, please check if it has been deleted.\n", logId, d.Id()) return nil } - respData2, err := service.DescribeKubernetesScaleWorkerById2(ctx) if err != nil { return err @@ -613,9 +557,7 @@ func resourceTencentCloudKubernetesScaleWorkerDelete(d *schema.ResourceData, met response = tke.NewDescribeClustersResponse() ) - if v, ok := d.GetOk("cluster_id"); ok { - clusterId = v.(string) - } + request.ClusterIds = []*string{helper.String(clusterId)} err := resource.Retry(tccommon.WriteRetryTimeout, func() *resource.RetryError { result, e := meta.(tccommon.ProviderMeta).GetAPIV3Conn().UseTkeClient().DescribeClustersWithContext(ctx, request) @@ -624,89 +566,23 @@ func resourceTencentCloudKubernetesScaleWorkerDelete(d *schema.ResourceData, met } else { log.Printf("[DEBUG]%s api[%s] success, request body [%s], response body [%s]\n", logId, request.GetAction(), request.ToJsonString(), result.ToJsonString()) } - if err := resourceTencentCloudKubernetesScaleWorkerDeletePostRequest0(ctx, request, result); err != nil { - return resource.NonRetryableError(err) + return err } response = result return nil }) - if err != nil { - log.Printf("[CRITAL]%s create kubernetes scale worker failed, reason:%+v", logId, err) + log.Printf("[CRITAL]%s delete kubernetes scale worker failed, reason:%+v", logId, err) return err } _ = response - - var ( - request1 = tke.NewDescribeClusterInstancesRequest() - response1 = tke.NewDescribeClusterInstancesResponse() - workers []InstanceInfo - ) - - if v, ok := d.GetOk("cluster_id"); ok { - clusterId = v.(string) - } - - request1.ClusterId = &clusterId - - err = resource.Retry(tccommon.WriteRetryTimeout, func() *resource.RetryError { - result, e := meta.(tccommon.ProviderMeta).GetAPIV3Conn().UseTkeClient().DescribeClusterInstancesWithContext(ctx, request1) - if e != nil { - return tccommon.RetryError(e) - } else { - log.Printf("[DEBUG]%s api[%s] success, request body [%s], response body [%s]\n", logId, request1.GetAction(), request1.ToJsonString(), result.ToJsonString()) - } - - if workers, err = resourceTencentCloudKubernetesScaleWorkerDeletePostRequest1(ctx, request1, result); err != nil { - return resource.NonRetryableError(err) - } - - response1 = result - return nil - }) - - if err != nil { - log.Printf("[CRITAL]%s create kubernetes scale worker failed, reason:%+v", logId, err) - return err - } - - _ = response1 - - var ( - request2 = tke.NewDeleteClusterInstancesRequest() - response2 = tke.NewDeleteClusterInstancesResponse() - ) - - if v, ok := d.GetOk("cluster_id"); ok { - clusterId = v.(string) - } - - request2.ClusterId = &clusterId - - if err = resourceTencentCloudKubernetesScaleWorkerDeletePostFillRequest2(ctx, request2, workers); err != nil { - return err - } - - err = resource.Retry(tccommon.WriteRetryTimeout, func() *resource.RetryError { - result, e := meta.(tccommon.ProviderMeta).GetAPIV3Conn().UseTkeClient().DeleteClusterInstancesWithContext(ctx, request2) - if e != nil { - return tccommon.RetryError(e) - } else { - log.Printf("[DEBUG]%s api[%s] success, request body [%s], response body [%s]\n", logId, request2.GetAction(), request2.ToJsonString(), result.ToJsonString()) - } - response2 = result - return nil - }) - - if err != nil { - log.Printf("[CRITAL]%s create kubernetes scale worker failed, reason:%+v", logId, err) + if err := resourceTencentCloudKubernetesScaleWorkerDeleteOnExit(ctx); err != nil { return err } - _ = response2 _ = instanceIdSet return nil } diff --git a/tencentcloud/services/tke/resource_tc_kubernetes_scale_worker_extension.go b/tencentcloud/services/tke/resource_tc_kubernetes_scale_worker_extension.go index f68ead3109..3b4125ec87 100644 --- a/tencentcloud/services/tke/resource_tc_kubernetes_scale_worker_extension.go +++ b/tencentcloud/services/tke/resource_tc_kubernetes_scale_worker_extension.go @@ -7,9 +7,13 @@ import ( "strings" "time" + "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common/errors" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" cvm "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/cvm/v20170312" tke "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/tke/v20180525" + tccommon "github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/common" "github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/internal/helper" ) @@ -37,31 +41,6 @@ func customScaleWorkerResourceImporter(ctx context.Context, d *schema.ResourceDa return []*schema.ResourceData{d}, nil } -func resourceTencentCloudKubernetesScaleWorkerCreatePostRequest1(ctx context.Context, req *tke.CreateClusterInstancesRequest, resp *tke.CreateClusterInstancesResponse) (instanceIdSet []string, err error) { - d := tccommon.ResourceDataFromContext(ctx) - - instanceIdSet = make([]string, 0) - workerInstancesList := make([]map[string]interface{}, 0, len(resp.Response.InstanceIdSet)) - for _, v := range resp.Response.InstanceIdSet { - if *v == "" { - return nil, fmt.Errorf("CreateClusterInstances return one instanceId is empty") - } - infoMap := make(map[string]interface{}) - infoMap["instance_id"] = v - infoMap["instance_role"] = TKE_ROLE_WORKER - workerInstancesList = append(workerInstancesList, infoMap) - instanceIdSet = append(instanceIdSet, *v) - } - - if err := d.Set("worker_instances_list", workerInstancesList); err != nil { - return nil, err - } - - //wait for LANIP - time.Sleep(tccommon.ReadRetryTimeout) - return instanceIdSet, nil -} - func resourceTencentCloudKubernetesScaleWorkerReadPostRequest1(ctx context.Context, req *tke.DescribeClusterInstancesRequest, resp *tke.DescribeClusterInstancesResponse) error { d := tccommon.ResourceDataFromContext(ctx) var has = map[string]bool{} @@ -301,57 +280,100 @@ func resourceTencentCloudKubernetesScaleWorkerReadPostRequest2(ctx context.Conte return nil } -func resourceTencentCloudKubernetesScaleWorkerDeletePostRequest0(ctx context.Context, req *tke.DescribeClustersRequest, resp *tke.DescribeClustersResponse) error { +func resourceTencentCloudKubernetesScaleWorkerDeletePostRequest0(ctx context.Context, req *tke.DescribeClustersRequest, resp *tke.DescribeClustersResponse) *resource.RetryError { if len(resp.Response.Clusters) == 0 { - return fmt.Errorf("The cluster has been deleted") + return resource.NonRetryableError(fmt.Errorf("The cluster has been deleted")) } - return nil } -func resourceTencentCloudKubernetesScaleWorkerDeletePostRequest1(ctx context.Context, req *tke.DescribeClusterInstancesRequest, resp *tke.DescribeClusterInstancesResponse) (workers []InstanceInfo, err error) { - var has = map[string]bool{} - workers = make([]InstanceInfo, 0, 100) - for _, item := range resp.Response.InstanceSet { - if has[*item.InstanceId] { - return nil, fmt.Errorf("get repeated instance_id[%s] when doing DescribeClusterInstances", *item.InstanceId) - } +func resourceTencentCloudKubernetesScaleWorkerReadPostFillRequest0(ctx context.Context, req *tke.DescribeClustersRequest) error { + d := tccommon.ResourceDataFromContext(ctx) + items := strings.Split(d.Id(), tccommon.FILED_SP) - has[*item.InstanceId] = true - instanceInfo := InstanceInfo{ - InstanceId: *item.InstanceId, - InstanceRole: *item.InstanceRole, - InstanceState: *item.InstanceState, - FailedReason: *item.FailedReason, - InstanceAdvancedSettings: item.InstanceAdvancedSettings, + instanceMap := make(map[string]bool) + oldWorkerInstancesList := d.Get("worker_instances_list").([]interface{}) + if importFlag1 { + GlobalClusterId = items[0] + if len(items[1:]) >= 2 { + return fmt.Errorf("only one additional configuration of virtual machines is now supported now, " + + "so should be 1") } - - if item.CreatedTime != nil { - instanceInfo.CreatedTime = *item.CreatedTime + infoMap := map[string]interface{}{ + "instance_id": items[1], } + oldWorkerInstancesList = append(oldWorkerInstancesList, infoMap) + } else { + GlobalClusterId = d.Get("cluster_id").(string) + } - if item.NodePoolId != nil { - instanceInfo.NodePoolId = *item.NodePoolId - } + if GlobalClusterId == "" { + return fmt.Errorf("tke.`cluster_id` is empty.") + } - if item.LanIP != nil { - instanceInfo.LanIp = *item.LanIP + for _, v := range oldWorkerInstancesList { + infoMap, ok := v.(map[string]interface{}) + if !ok || infoMap["instance_id"] == nil { + return fmt.Errorf("worker_instances_list is broken.") } - - if instanceInfo.InstanceRole == TKE_ROLE_WORKER { - workers = append(workers, instanceInfo) + instanceId, ok := infoMap["instance_id"].(string) + if !ok || instanceId == "" { + return fmt.Errorf("worker_instances_list.instance_id is broken.") + } + if instanceMap[instanceId] { + continue } + instanceMap[instanceId] = true + } + + return nil +} + +func resourceTencentCloudKubernetesScaleWorkerReadPostRequest0(ctx context.Context, req *tke.DescribeClustersRequest, resp *tke.DescribeClustersResponse) error { + if len(resp.Response.Clusters) == 0 { + return fmt.Errorf("The cluster has been deleted") } + return nil +} - return workers, nil +func resourceTencentCloudKubernetesScaleWorkerReadPostFillRequest2(ctx context.Context, req *cvm.DescribeInstancesRequest) error { + req.InstanceIds = WorkersInstanceIds + return nil } -func resourceTencentCloudKubernetesScaleWorkerCreatePostFillRequest1(ctx context.Context, req *tke.CreateClusterInstancesRequest) error { +func resourceTencentCloudKubernetesScaleWorkerCreateOnStart(ctx context.Context) error { d := tccommon.ResourceDataFromContext(ctx) meta := tccommon.ProviderMetaFromContext(ctx) var cvms RunInstancesForNode var iAdvanced tke.InstanceAdvancedSettings + cvms.Work = []string{} + + service := TkeService{client: meta.(tccommon.ProviderMeta).GetAPIV3Conn()} + + clusterId := d.Get("cluster_id").(string) + if clusterId == "" { + return fmt.Errorf("`cluster_id` is empty.") + } + + info, has, err := service.DescribeCluster(ctx, clusterId) + if err != nil { + err = resource.Retry(tccommon.ReadRetryTimeout, func() *resource.RetryError { + info, has, err = service.DescribeCluster(ctx, clusterId) + if err != nil { + return tccommon.RetryError(err) + } + return nil + }) + } + + if err != nil { + return nil + } + + if !has { + return fmt.Errorf("cluster [%s] is not exist.", clusterId) + } dMap := make(map[string]interface{}, 5) //mount_target, docker_graph_path, data_disk, extra_args, desired_pod_num @@ -361,7 +383,6 @@ func resourceTencentCloudKubernetesScaleWorkerCreatePostFillRequest1(ctx context dMap[k] = v } } - iAdvanced = tkeGetInstanceAdvancedPara(dMap, meta) iAdvanced.Labels = GetTkeLabels(d, "labels") @@ -369,11 +390,19 @@ func resourceTencentCloudKubernetesScaleWorkerCreatePostFillRequest1(ctx context iAdvanced.Unschedulable = helper.Int64(int64(temp.(int))) } + if v, ok := d.GetOk("pre_start_user_script"); ok { + iAdvanced.PreStartUserScript = helper.String(v.(string)) + } + + if v, ok := d.GetOk("user_script"); ok { + iAdvanced.UserScript = helper.String(v.(string)) + } + if workers, ok := d.GetOk("worker_config"); ok { workerList := workers.([]interface{}) for index := range workerList { worker := workerList[index].(map[string]interface{}) - paraJson, _, err := tkeGetCvmRunInstancesPara(worker, meta, CreateClusterInstancesVpcId, CreateClusterInstancesProjectId) + paraJson, _, err := tkeGetCvmRunInstancesPara(worker, meta, info.VpcId, info.ProjectId) if err != nil { return err } @@ -385,23 +414,52 @@ func resourceTencentCloudKubernetesScaleWorkerCreatePostFillRequest1(ctx context "so len(cvms.Work) should be 1") } - req.RunInstancePara = &cvms.Work[0] - req.InstanceAdvancedSettings = &iAdvanced + instanceIds, err := service.CreateClusterInstances(ctx, clusterId, cvms.Work[0], iAdvanced) + if err != nil { + return err + } + workerInstancesList := make([]map[string]interface{}, 0, len(instanceIds)) + for _, v := range instanceIds { + if v == "" { + return fmt.Errorf("CreateClusterInstances return one instanceId is empty") + } + infoMap := make(map[string]interface{}) + infoMap["instance_id"] = v + infoMap["instance_role"] = TKE_ROLE_WORKER + workerInstancesList = append(workerInstancesList, infoMap) + } + + if err = d.Set("worker_instances_list", workerInstancesList); err != nil { + return err + } + + //修改id设置,不符合id规则 + id := clusterId + tccommon.FILED_SP + strings.Join(instanceIds, tccommon.FILED_SP) + d.SetId(id) + + //wait for LANIP + time.Sleep(tccommon.ReadRetryTimeout) return nil } -func resourceTencentCloudKubernetesScaleWorkerDeletePostFillRequest2(ctx context.Context, req *tke.DeleteClusterInstancesRequest, workers []InstanceInfo) error { +func resourceTencentCloudKubernetesScaleWorkerDeleteOnExit(ctx context.Context) error { d := tccommon.ResourceDataFromContext(ctx) + meta := tccommon.ProviderMetaFromContext(ctx) + service := TkeService{client: meta.(tccommon.ProviderMeta).GetAPIV3Conn()} + idSplit := strings.Split(d.Id(), tccommon.FILED_SP) + clusterId := idSplit[0] workerInstancesList := d.Get("worker_instances_list").([]interface{}) + instanceMap := make(map[string]bool) for _, v := range workerInstancesList { + infoMap, ok := v.(map[string]interface{}) + if !ok || infoMap["instance_id"] == nil { return fmt.Errorf("worker_instances_list is broken.") } - instanceId, ok := infoMap["instance_id"].(string) if !ok || instanceId == "" { return fmt.Errorf("worker_instances_list.instance_id is broken.") @@ -412,94 +470,63 @@ func resourceTencentCloudKubernetesScaleWorkerDeletePostFillRequest2(ctx context } instanceMap[instanceId] = true - } - - needDeletes := []string{} - for _, cvmInfo := range workers { - if _, ok := instanceMap[cvmInfo.InstanceId]; ok { - needDeletes = append(needDeletes, cvmInfo.InstanceId) - } - } - - // The machines I generated was deleted by others. - if len(needDeletes) == 0 { - return fmt.Errorf("The machines I generated was deleted by others.") - } - - req.InstanceIds = make([]*string, 0, len(needDeletes)) - for index := range needDeletes { - req.InstanceIds = append(req.InstanceIds, &needDeletes[index]) } - req.InstanceDeleteMode = helper.String("terminate") - - return nil -} + _, workers, err := service.DescribeClusterInstances(ctx, clusterId) + if err != nil { + err = resource.Retry(tccommon.ReadRetryTimeout, func() *resource.RetryError { + _, workers, err = service.DescribeClusterInstances(ctx, clusterId) -func resourceTencentCloudKubernetesScaleWorkerReadPostFillRequest0(ctx context.Context, req *tke.DescribeClustersRequest) error { - d := tccommon.ResourceDataFromContext(ctx) - items := strings.Split(d.Id(), tccommon.FILED_SP) + if e, ok := err.(*errors.TencentCloudSDKError); ok { + if e.GetCode() == "InternalError.ClusterNotFound" { + return nil + } + } - instanceMap := make(map[string]bool) - oldWorkerInstancesList := d.Get("worker_instances_list").([]interface{}) - if importFlag1 { - GlobalClusterId = items[0] - if len(items[1:]) >= 2 { - return fmt.Errorf("only one additional configuration of virtual machines is now supported now, " + - "so should be 1") - } - infoMap := map[string]interface{}{ - "instance_id": items[1], - } - oldWorkerInstancesList = append(oldWorkerInstancesList, infoMap) - } else { - GlobalClusterId = d.Get("cluster_id").(string) + if err != nil { + return resource.RetryableError(err) + } + return nil + }) } - if GlobalClusterId == "" { - return fmt.Errorf("tke.`cluster_id` is empty.") + if err != nil { + return err } - for _, v := range oldWorkerInstancesList { - infoMap, ok := v.(map[string]interface{}) - if !ok || infoMap["instance_id"] == nil { - return fmt.Errorf("worker_instances_list is broken.") - } - instanceId, ok := infoMap["instance_id"].(string) - if !ok || instanceId == "" { - return fmt.Errorf("worker_instances_list.instance_id is broken.") - } - if instanceMap[instanceId] { - continue + needDeletes := []string{} + for _, cvm := range workers { + if _, ok := instanceMap[cvm.InstanceId]; ok { + needDeletes = append(needDeletes, cvm.InstanceId) } - instanceMap[instanceId] = true } - - return nil -} - -func resourceTencentCloudKubernetesScaleWorkerReadPostRequest0(ctx context.Context, req *tke.DescribeClustersRequest, resp *tke.DescribeClustersResponse) error { - if len(resp.Response.Clusters) == 0 { - return fmt.Errorf("The cluster has been deleted") + // The machines I generated was deleted by others. + if len(needDeletes) == 0 { + return nil } - return nil -} - -func resourceTencentCloudKubernetesScaleWorkerCreatePostRequest0(ctx context.Context, req *tke.DescribeClustersRequest, resp *tke.DescribeClustersResponse) error { - if len(resp.Response.Clusters) == 0 { - return fmt.Errorf("The cluster has been deleted") - } + err = service.DeleteClusterInstances(ctx, clusterId, needDeletes) + if err != nil { + err = resource.Retry(3*tccommon.WriteRetryTimeout, func() *resource.RetryError { + err = service.DeleteClusterInstances(ctx, clusterId, needDeletes) - CreateClusterInstancesVpcId = *resp.Response.Clusters[0].ClusterNetworkSettings.VpcId - projectIdUint64 := *resp.Response.Clusters[0].ProjectId - CreateClusterInstancesProjectId = int64(projectIdUint64) + if e, ok := err.(*errors.TencentCloudSDKError); ok { + if e.GetCode() == "InternalError.ClusterNotFound" { + return nil + } - return nil -} + if e.GetCode() == "InternalError.Param" && + strings.Contains(e.GetMessage(), `PARAM_ERROR[some instances []is not in right state`) { + return nil + } + } -func resourceTencentCloudKubernetesScaleWorkerReadPostFillRequest2(ctx context.Context, req *cvm.DescribeInstancesRequest) error { - req.InstanceIds = WorkersInstanceIds + if err != nil { + return tccommon.RetryError(err, tccommon.InternalError) + } + return nil + }) + } return nil } diff --git a/tencentcloud/services/tke/resource_tc_kubernetes_scale_worker_test.go b/tencentcloud/services/tke/resource_tc_kubernetes_scale_worker_test.go index 17e63d73b3..3c49a96def 100644 --- a/tencentcloud/services/tke/resource_tc_kubernetes_scale_worker_test.go +++ b/tencentcloud/services/tke/resource_tc_kubernetes_scale_worker_test.go @@ -119,17 +119,19 @@ func TestAccTencentCloudKubernetesScaleWorkerResource(t *testing.T) { resource.TestCheckResourceAttrSet(testTkeScaleWorkerResourceKey, "worker_instances_list.0.instance_id"), resource.TestCheckResourceAttrSet(testTkeScaleWorkerResourceKey, "worker_instances_list.0.instance_role"), resource.TestCheckResourceAttrSet(testTkeScaleWorkerResourceKey, "unschedulable"), + resource.TestCheckResourceAttr(testTkeScaleWorkerResourceKey, "pre_start_user_script", "IyEvYmluL3NoIGVjaG8gImhlbGxvIHdvcmxkIg=="), resource.TestCheckResourceAttr(testTkeScaleWorkerResourceKey, "user_script", "IyEvYmluL3NoIGVjaG8gImhlbGxvIHdvcmxkIg=="), ), }, - { - Config: testAccTkeScaleWorkerInstanceGpuInsTypeUpdate, - Check: resource.ComposeTestCheckFunc( - testAccCheckTkeScaleWorkerExists(testTkeScaleWorkerResourceKey), - resource.TestCheckResourceAttrSet(testTkeScaleWorkerResourceKey, "cluster_id"), - resource.TestCheckResourceAttrSet(testTkeScaleWorkerResourceKey, "gpu_args.#"), - ), - }, + // gpu_args依赖于指定镜像ID,但账号没有镜像ID权限,暂时注释 + //{ + // Config: testAccTkeScaleWorkerInstanceGpuInsTypeUpdate, + // Check: resource.ComposeTestCheckFunc( + // testAccCheckTkeScaleWorkerExists(testTkeScaleWorkerResourceKey), + // resource.TestCheckResourceAttrSet(testTkeScaleWorkerResourceKey, "cluster_id"), + // resource.TestCheckResourceAttrSet(testTkeScaleWorkerResourceKey, "gpu_args.#"), + // ), + //}, }, }) } @@ -231,9 +233,9 @@ func testAccCheckTkeScaleWorkerExists(n string) resource.TestCheckFunc { } } -const testAccTkeScaleWorkerInstance = ` +const testAccTkeScaleWorkerInstance = testAccTkeCluster + ` resource "tencentcloud_kubernetes_scale_worker" "test_scale" { - cluster_id = "cls-r8gqwjw6" + cluster_id = tencentcloud_kubernetes_cluster.managed_cluster.id extra_args = [ "root-dir=/var/lib/kubelet" ] @@ -243,17 +245,18 @@ resource "tencentcloud_kubernetes_scale_worker" "test_scale" { "test2" = "test2", } unschedulable = 0 + pre_start_user_script = "IyEvYmluL3NoIGVjaG8gImhlbGxvIHdvcmxkIg==" user_script = "IyEvYmluL3NoIGVjaG8gImhlbGxvIHdvcmxkIg==" worker_config { count = 1 availability_zone = "ap-guangzhou-3" instance_type = "S2.LARGE16" - subnet_id = "subnet-gaazc9di" + subnet_id = local.subnet_id1 system_disk_type = "CLOUD_SSD" system_disk_size = 50 internet_charge_type = "TRAFFIC_POSTPAID_BY_HOUR" - security_group_ids = ["sg-cm7fbbf3"] + security_group_ids = [local.sg_id] data_disk { disk_type = "CLOUD_PREMIUM" @@ -268,9 +271,9 @@ resource "tencentcloud_kubernetes_scale_worker" "test_scale" { } ` -const testAccTkeScaleWorkerInstanceGpuInsTypeUpdate = ` +const testAccTkeScaleWorkerInstanceGpuInsTypeUpdate = testAccTkeCluster + ` resource "tencentcloud_kubernetes_scale_worker" "test_scale" { - cluster_id = "cls-r8gqwjw6" + cluster_id = tencentcloud_kubernetes_cluster.managed_cluster.id extra_args = [ "root-dir=/var/lib/kubelet" @@ -281,17 +284,18 @@ resource "tencentcloud_kubernetes_scale_worker" "test_scale" { "test2" = "test2", } unschedulable = 0 + pre_start_user_script = "IyEvYmluL3NoIGVjaG8gImhlbGxvIHdvcmxkIg==" user_script = "IyEvYmluL3NoIGVjaG8gImhlbGxvIHdvcmxkIg==" worker_config { count = 1 availability_zone = "ap-guangzhou-3" instance_type = "GN6S.LARGE20" - subnet_id = "subnet-gaazc9di" + subnet_id = local.subnet_id1 system_disk_type = "CLOUD_SSD" system_disk_size = 50 internet_charge_type = "TRAFFIC_POSTPAID_BY_HOUR" - security_group_ids = ["sg-cm7fbbf3"] + security_group_ids = [local.sg_id] img_id = "img-oyd1zdra" data_disk { diff --git a/tencentcloud/services/tke/service_tencentcloud_tke.go b/tencentcloud/services/tke/service_tencentcloud_tke.go index 4b2389ad01..d0134397e4 100644 --- a/tencentcloud/services/tke/service_tencentcloud_tke.go +++ b/tencentcloud/services/tke/service_tencentcloud_tke.go @@ -3389,7 +3389,7 @@ func (me *TkeService) DescribeKubernetesScaleWorkerById(ctx context.Context, clu logId := tccommon.GetLogId(ctx) request := tke.NewDescribeClustersRequest() - request.ClusterIds = []*string{&clusterId} + request.ClusterIds = []*string{helper.String(clusterId)} if err := resourceTencentCloudKubernetesScaleWorkerReadPostFillRequest0(ctx, request); err != nil { return nil, err @@ -3410,7 +3410,7 @@ func (me *TkeService) DescribeKubernetesScaleWorkerById(ctx context.Context, clu } log.Printf("[DEBUG]%s api[%s] success, request body [%s], response body [%s]\n", logId, request.GetAction(), request.ToJsonString(), response.ToJsonString()) - if err = resourceTencentCloudKubernetesScaleWorkerReadPostRequest0(ctx, request, response); err != nil { + if err := resourceTencentCloudKubernetesScaleWorkerReadPostRequest0(ctx, request, response); err != nil { return nil, err } @@ -3422,7 +3422,7 @@ func (me *TkeService) DescribeKubernetesScaleWorkerById1(ctx context.Context, cl logId := tccommon.GetLogId(ctx) request := tke.NewDescribeClusterInstancesRequest() - request.ClusterId = &clusterId + request.ClusterId = helper.String(clusterId) defer func() { if errRet != nil { @@ -3439,7 +3439,7 @@ func (me *TkeService) DescribeKubernetesScaleWorkerById1(ctx context.Context, cl } log.Printf("[DEBUG]%s api[%s] success, request body [%s], response body [%s]\n", logId, request.GetAction(), request.ToJsonString(), response.ToJsonString()) - if err = resourceTencentCloudKubernetesScaleWorkerReadPostRequest1(ctx, request, response); err != nil { + if err := resourceTencentCloudKubernetesScaleWorkerReadPostRequest1(ctx, request, response); err != nil { return nil, err } diff --git a/website/docs/r/kubernetes_cluster_endpoint.html.markdown b/website/docs/r/kubernetes_cluster_endpoint.html.markdown index dc2a24168b..b0132168cb 100644 --- a/website/docs/r/kubernetes_cluster_endpoint.html.markdown +++ b/website/docs/r/kubernetes_cluster_endpoint.html.markdown @@ -36,7 +36,7 @@ depends_on = [ The following arguments are supported: -* `cluster_id` - (Required, String) Specify cluster ID. +* `cluster_id` - (Required, String, ForceNew) Specify cluster ID. * `cluster_internet_domain` - (Optional, String) Domain name for cluster Kube-apiserver internet access. Be careful if you modify value of this parameter, the cluster_external_endpoint value may be changed automatically too. * `cluster_internet_security_group` - (Optional, String) Specify security group, NOTE: This argument must not be empty if cluster internet enabled. * `cluster_internet` - (Optional, Bool) Open internet access or not. diff --git a/website/docs/r/kubernetes_scale_worker.html.markdown b/website/docs/r/kubernetes_scale_worker.html.markdown index 1603021d64..f554d222b8 100644 --- a/website/docs/r/kubernetes_scale_worker.html.markdown +++ b/website/docs/r/kubernetes_scale_worker.html.markdown @@ -125,7 +125,9 @@ The following arguments are supported: * `gpu_args` - (Optional, List, ForceNew) GPU driver parameters. * `labels` - (Optional, Map, ForceNew) Labels of kubernetes scale worker created nodes. * `mount_target` - (Optional, String, ForceNew) Mount target. Default is not mounting. -* `unschedulable` - (Optional, Int, ForceNew) Sets whether the joining node participates in the schedule. Default is '0'. Participate in scheduling. +* `pre_start_user_script` - (Optional, String, ForceNew) Base64-encoded user script, executed before initializing the node, currently only effective for adding existing nodes. +* `unschedulable` - (Optional, Int, ForceNew) Set whether the added node participates in scheduling. The default value is 0, which means participating in scheduling; non-0 means not participating in scheduling. After the node initialization is completed, you can execute kubectl uncordon nodename to join the node in scheduling. +* `user_script` - (Optional, String, ForceNew) Base64 encoded user script, this script will be executed after the k8s component is run. The user needs to ensure that the script is reentrant and retry logic. The script and its generated log files can be viewed in the /data/ccs_userscript/ path of the node, if required. The node needs to be initialized before it can be added to the schedule. It can be used with the unschedulable parameter. After the final initialization of userScript is completed, add the kubectl uncordon nodename --kubeconfig=/root/.kube/config command to add the node to the schedule. The `data_disk` object of `worker_config` supports the following: @@ -183,7 +185,7 @@ The `worker_config` object supports the following: * `security_group_ids` - (Optional, List, ForceNew) Security groups to which a CVM instance belongs. * `system_disk_size` - (Optional, Int, ForceNew) Volume of system disk in GB. Default is `50`. * `system_disk_type` - (Optional, String, ForceNew) System disk type. For more information on limits of system disk types, see [Storage Overview](https://intl.cloud.tencent.com/document/product/213/4952). Valid values: `LOCAL_BASIC`: local disk, `LOCAL_SSD`: local SSD disk, `CLOUD_SSD`: SSD, `CLOUD_PREMIUM`: Premium Cloud Storage. NOTE: `CLOUD_BASIC`, `LOCAL_BASIC` and `LOCAL_SSD` are deprecated. -* `user_data` - (Optional, String, ForceNew) ase64-encoded User Data text, the length limit is 16KB. +* `user_data` - (Optional, String, ForceNew) User data provided to instances, needs to be encoded in base64, and the maximum supported data size is 16KB. ## Attributes Reference