From d1891a0987569cd5fd59b291ec15288a22726f41 Mon Sep 17 00:00:00 2001 From: SevenEarth <391613297@qq.com> Date: Fri, 1 Nov 2024 15:34:43 +0800 Subject: [PATCH 1/7] add --- tencentcloud/provider.go | 1 + tencentcloud/provider.md | 1 + ...tc_kubernetes_cluster_master_attachment.go | 547 ++++++++++++++++++ ...tc_kubernetes_cluster_master_attachment.md | 30 + ...tes_cluster_master_attachment_extension.go | 385 ++++++++++++ ...bernetes_cluster_master_attachment_test.go | 47 ++ .../services/tke/service_tencentcloud_tke.go | 85 +++ ...es_cluster_master_attachment.html.markdown | 120 ++++ website/tencentcloud.erb | 3 + 9 files changed, 1219 insertions(+) create mode 100644 tencentcloud/services/tke/resource_tc_kubernetes_cluster_master_attachment.go create mode 100644 tencentcloud/services/tke/resource_tc_kubernetes_cluster_master_attachment.md create mode 100644 tencentcloud/services/tke/resource_tc_kubernetes_cluster_master_attachment_extension.go create mode 100644 tencentcloud/services/tke/resource_tc_kubernetes_cluster_master_attachment_test.go create mode 100644 website/docs/r/kubernetes_cluster_master_attachment.html.markdown diff --git a/tencentcloud/provider.go b/tencentcloud/provider.go index 8071216eb0..d3648a6233 100644 --- a/tencentcloud/provider.go +++ b/tencentcloud/provider.go @@ -1275,6 +1275,7 @@ func Provider() *schema.Provider { "tencentcloud_kubernetes_backup_storage_location": tke.ResourceTencentCloudKubernetesBackupStorageLocation(), "tencentcloud_kubernetes_serverless_node_pool": tke.ResourceTencentCloudKubernetesServerlessNodePool(), "tencentcloud_kubernetes_encryption_protection": tke.ResourceTencentCloudKubernetesEncryptionProtection(), + "tencentcloud_kubernetes_cluster_master_attachment": tke.ResourceTencentCloudKubernetesClusterMasterAttachment(), "tencentcloud_mysql_backup_policy": cdb.ResourceTencentCloudMysqlBackupPolicy(), "tencentcloud_mysql_account": cdb.ResourceTencentCloudMysqlAccount(), "tencentcloud_mysql_account_privilege": cdb.ResourceTencentCloudMysqlAccountPrivilege(), diff --git a/tencentcloud/provider.md b/tencentcloud/provider.md index 0adf55bdaa..c99c557196 100644 --- a/tencentcloud/provider.md +++ b/tencentcloud/provider.md @@ -686,6 +686,7 @@ Tencent Kubernetes Engine(TKE) tencentcloud_kubernetes_native_node_pool tencentcloud_kubernetes_health_check_policy tencentcloud_kubernetes_log_config + tencentcloud_kubernetes_cluster_master_attachment TDMQ for Pulsar(tpulsar) Data Source diff --git a/tencentcloud/services/tke/resource_tc_kubernetes_cluster_master_attachment.go b/tencentcloud/services/tke/resource_tc_kubernetes_cluster_master_attachment.go new file mode 100644 index 0000000000..8e9a388c8f --- /dev/null +++ b/tencentcloud/services/tke/resource_tc_kubernetes_cluster_master_attachment.go @@ -0,0 +1,547 @@ +// Code generated by iacg; DO NOT EDIT. +package tke + +import ( + "context" + "fmt" + "log" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + tkev20180525 "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/tke/v20180525" + + tccommon "github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/common" + "github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/internal/helper" +) + +func ResourceTencentCloudKubernetesClusterMasterAttachment() *schema.Resource { + return &schema.Resource{ + Create: resourceTencentCloudKubernetesClusterMasterAttachmentCreate, + Read: resourceTencentCloudKubernetesClusterMasterAttachmentRead, + Delete: resourceTencentCloudKubernetesClusterMasterAttachmentDelete, + Timeouts: &schema.ResourceTimeout{ + Read: schema.DefaultTimeout(900000 * time.Millisecond), + }, + Schema: map[string]*schema.Schema{ + "cluster_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "ID of the cluster.", + }, + + "instance_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "ID of the CVM instance, this cvm will reinstall the system.", + }, + + "node_role": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Node role, values: MASTER_ETCD, WORKER. MASTER_ETCD needs to be specified only when creating an INDEPENDENT_CLUSTER independent cluster. The number of MASTER_ETCD nodes is 3-7, and it is recommended to have an odd number. The minimum configuration for MASTER_ETCD is 4C8G.", + }, + + "enhanced_security_service": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Description: "To specify whether to enable cloud security service. Default is TRUE.", + }, + + "enhanced_monitor_service": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Description: "To specify whether to enable cloud monitor service. Default is TRUE.", + }, + + "enhanced_automation_service": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Description: "Activate TencentCloud Automation Tools (TAT) service. If this parameter is not specified, the public image will default to enabling the Cloud Automation Assistant service, while other images will default to not enabling the Cloud Automation Assistant service.", + }, + + "password": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Sensitive: true, + Description: "Password to access, should be set if `key_ids` not set.", + ValidateFunc: tccommon.ValidateAsConfigPassword, + }, + + "key_ids": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + MaxItems: 1, + Description: "The key pair to use for the instance, it looks like skey-16jig7tx, it should be set if `password` not set.", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + + "security_group_ids": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "The security group to which the instance belongs. This parameter can be obtained by calling the sgId field in the return value of DescribeSecureGroups. If this parameter is not specified, the default security group will be bound.", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + + "host_name": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "When reinstalling the system, you can specify the HostName of the instance to be modified (this parameter must be passed when the cluster is in HostName mode, and the rule name should be consistent with the HostName of the CVM instance creation interface except that uppercase characters are not supported).", + }, + + "desired_pod_numbers": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "When the node belongs to the podCIDR size customization mode, the maximum number of pods running on the node can be specified.", + Elem: &schema.Schema{ + Type: schema.TypeInt, + }, + }, + + "extra_args": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + MaxItems: 1, + Description: "Custom parameters for cluster master component.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "kube_api_server": { + Type: schema.TypeSet, + Optional: true, + ForceNew: true, + Description: "Kube apiserver custom parameters. The parameter format is [\"k1=v1\", \"k1=v2\"].", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "kube_controller_manager": { + Type: schema.TypeSet, + Optional: true, + ForceNew: true, + Description: "Kube controller manager custom parameters.", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "kube_scheduler": { + Type: schema.TypeSet, + Optional: true, + ForceNew: true, + Description: "kube scheduler custom parameters.", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "etcd": { + Type: schema.TypeSet, + Optional: true, + ForceNew: true, + Description: "etcd custom parameters. Only supports independent clusters.", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + }, + + "master_config": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + MaxItems: 1, + Description: "Advanced Node Settings. commonly used to attach existing instances.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "mount_target": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Mount target. Default is not mounting.", + }, + "docker_graph_path": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Docker graph path. Default is `/var/lib/docker`.", + }, + "user_script": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "User script encoded in base64, which will be executed after the k8s component runs. The user needs to ensure the script's reentrant and retry logic. The script and its generated log files can be viewed in the node path /data/ccs_userscript/. If the node needs to be initialized before joining the schedule, it can be used in conjunction with the `unschedulable` parameter. After the final initialization of the userScript is completed, add the command \"kubectl uncordon nodename --kubeconfig=/root/.kube/config\" to add the node to the schedule.", + }, + "unschedulable": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Description: "Set whether the joined nodes participate in scheduling, with a default value of 0, indicating participation in scheduling; Non 0 means not participating in scheduling.", + }, + "labels": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Node label list.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Name of map.", + }, + "value": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Value of map.", + }, + }, + }, + }, + "data_disk": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + MaxItems: 1, + Description: "Configurations of data disk.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "disk_type": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Types of disk. Valid value: `LOCAL_BASIC`, `LOCAL_SSD`, `CLOUD_BASIC`, `CLOUD_PREMIUM`, `CLOUD_SSD`, `CLOUD_HSSD`, `CLOUD_TSSD` and `CLOUD_BSSD`.", + }, + "file_system": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "File system, e.g. `ext3/ext4/xfs`.", + }, + "disk_size": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Description: "Volume of disk in GB. Default is `0`.", + }, + "auto_format_and_mount": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Description: "Indicate whether to auto format and mount or not. Default is `false`.", + }, + "mount_target": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Mount target.", + }, + "disk_partition": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "The name of the device or partition to mount. NOTE: this argument doesn't support setting in node pool, or will leads to mount error.", + }, + }, + }, + }, + "extra_args": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + MaxItems: 1, + Description: "Custom parameter information related to the node. This is a white-list parameter.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "kubelet": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Kubelet custom parameter. The parameter format is [\"k1=v1\", \"k1=v2\"].", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + }, + "desired_pod_number": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Description: "Indicate to set desired pod number in node. valid when the cluster is podCIDR.", + }, + "gpu_args": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + MaxItems: 1, + Description: "GPU driver parameters.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "mig_enable": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Description: "Whether to enable MIG.", + }, + "driver": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Description: "GPU driver version. Format like: `{ version: String, name: String }`. `version`: Version of GPU driver or CUDA; `name`: Name of GPU driver or CUDA.", + ValidateFunc: tccommon.ValidateTkeGpuDriverVersion, + }, + "cuda": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Description: "CUDA version. Format like: `{ version: String, name: String }`. `version`: Version of GPU driver or CUDA; `name`: Name of GPU driver or CUDA.", + ValidateFunc: tccommon.ValidateTkeGpuDriverVersion, + }, + "cudnn": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Description: "cuDNN version. Format like: `{ version: String, name: String, doc_name: String, dev_name: String }`. `version`: cuDNN version; `name`: cuDNN name; `doc_name`: Doc name of cuDNN; `dev_name`: Dev name of cuDNN.", + ValidateFunc: tccommon.ValidateTkeGpuDriverVersion, + }, + "custom_driver": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Description: "Custom GPU driver. Format like: `{address: String}`. `address`: URL of custom GPU driver address.", + }, + }, + }, + }, + "taints": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Node taint.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "key": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Key of the taint.", + }, + "value": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Value of the taint.", + }, + "effect": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Effect of the taint.", + }, + }, + }, + }, + }, + }, + }, + }, + } +} + +func resourceTencentCloudKubernetesClusterMasterAttachmentCreate(d *schema.ResourceData, meta interface{}) error { + defer tccommon.LogElapsed("resource.tencentcloud_kubernetes_cluster_master_attachment.create")() + defer tccommon.InconsistentCheck(d, meta)() + + logId := tccommon.GetLogId(tccommon.ContextNil) + + ctx := tccommon.NewResourceLifeCycleHandleFuncContext(context.Background(), logId, d, meta) + + var ( + clusterId string + instanceId string + nodeRole string + ) + var ( + request = tkev20180525.NewScaleOutClusterMasterRequest() + response = tkev20180525.NewScaleOutClusterMasterResponse() + ) + + if v, ok := d.GetOk("cluster_id"); ok { + clusterId = v.(string) + } + if v, ok := d.GetOk("instance_id"); ok { + instanceId = v.(string) + } + if v, ok := d.GetOk("node_role"); ok { + nodeRole = v.(string) + } + + request.ClusterId = helper.String(clusterId) + + if err := resourceTencentCloudKubernetesClusterMasterAttachmentCreatePostFillRequest0(ctx, request); err != nil { + return err + } + + err := resource.Retry(tccommon.WriteRetryTimeout, func() *resource.RetryError { + result, e := meta.(tccommon.ProviderMeta).GetAPIV3Conn().UseTkeV20180525Client().ScaleOutClusterMasterWithContext(ctx, request) + if e != nil { + return resourceTencentCloudKubernetesClusterMasterAttachmentCreateRequestOnError0(ctx, request, e) + } else { + log.Printf("[DEBUG]%s api[%s] success, request body [%s], response body [%s]\n", logId, request.GetAction(), request.ToJsonString(), result.ToJsonString()) + } + response = result + return nil + }) + if err != nil { + log.Printf("[CRITAL]%s create kubernetes cluster master attachment failed, reason:%+v", logId, err) + return err + } + + _ = response + + if err := resourceTencentCloudKubernetesClusterMasterAttachmentCreatePostHandleResponse0(ctx, response); err != nil { + return err + } + + d.SetId(strings.Join([]string{clusterId, instanceId, nodeRole}, tccommon.FILED_SP)) + + return resourceTencentCloudKubernetesClusterMasterAttachmentRead(d, meta) +} + +func resourceTencentCloudKubernetesClusterMasterAttachmentRead(d *schema.ResourceData, meta interface{}) error { + defer tccommon.LogElapsed("resource.tencentcloud_kubernetes_cluster_master_attachment.read")() + defer tccommon.InconsistentCheck(d, meta)() + + logId := tccommon.GetLogId(tccommon.ContextNil) + + ctx := tccommon.NewResourceLifeCycleHandleFuncContext(context.Background(), logId, d, meta) + + service := TkeService{client: meta.(tccommon.ProviderMeta).GetAPIV3Conn()} + + idSplit := strings.Split(d.Id(), tccommon.FILED_SP) + if len(idSplit) != 3 { + return fmt.Errorf("id is broken,%s", d.Id()) + } + clusterId := idSplit[0] + instanceId := idSplit[1] + nodeRole := idSplit[2] + + _ = d.Set("cluster_id", clusterId) + + _ = d.Set("instance_id", instanceId) + + _ = d.Set("node_role", nodeRole) + + respData, err := service.DescribeKubernetesClusterMasterAttachmentById(ctx, clusterId) + if err != nil { + return err + } + + if respData == nil { + d.SetId("") + log.Printf("[WARN]%s resource `kubernetes_cluster_master_attachment` [%s] not found, please check if it has been deleted.\n", logId, d.Id()) + return nil + } + + respData1, err := service.DescribeKubernetesClusterMasterAttachmentById1(ctx, instanceId) + if err != nil { + return err + } + + if respData1 == nil { + d.SetId("") + log.Printf("[WARN]%s resource `kubernetes_cluster_master_attachment` [%s] not found, please check if it has been deleted.\n", logId, d.Id()) + return nil + } + + var respData2 *tkev20180525.DescribeClusterInstancesResponseParams + reqErr2 := resource.Retry(900*time.Second, func() *resource.RetryError { + result, e := service.DescribeKubernetesClusterMasterAttachmentById2(ctx, clusterId, instanceId, nodeRole) + if e != nil { + return resourceTencentCloudKubernetesClusterMasterAttachmentReadRequestOnError2(ctx, result, e) + } + if err := resourceTencentCloudKubernetesClusterMasterAttachmentReadRequestOnSuccess2(ctx, result); err != nil { + return err + } + respData2 = result + return nil + }) + if reqErr2 != nil { + log.Printf("[CRITAL]%s read kubernetes cluster master attachment failed, reason:%+v", logId, reqErr2) + return reqErr2 + } + + if respData2 == nil { + d.SetId("") + log.Printf("[WARN]%s resource `kubernetes_cluster_master_attachment` [%s] not found, please check if it has been deleted.\n", logId, d.Id()) + return nil + } + return nil +} + +func resourceTencentCloudKubernetesClusterMasterAttachmentDelete(d *schema.ResourceData, meta interface{}) error { + defer tccommon.LogElapsed("resource.tencentcloud_kubernetes_cluster_master_attachment.delete")() + defer tccommon.InconsistentCheck(d, meta)() + + logId := tccommon.GetLogId(tccommon.ContextNil) + ctx := tccommon.NewResourceLifeCycleHandleFuncContext(context.Background(), logId, d, meta) + + idSplit := strings.Split(d.Id(), tccommon.FILED_SP) + if len(idSplit) != 3 { + return fmt.Errorf("id is broken,%s", d.Id()) + } + clusterId := idSplit[0] + instanceId := idSplit[1] + nodeRole := idSplit[2] + + var ( + request = tkev20180525.NewScaleInClusterMasterRequest() + response = tkev20180525.NewScaleInClusterMasterResponse() + ) + + request.ClusterId = helper.String(clusterId) + + if err := resourceTencentCloudKubernetesClusterMasterAttachmentDeletePostFillRequest0(ctx, request); err != nil { + return err + } + + err := resource.Retry(tccommon.WriteRetryTimeout, func() *resource.RetryError { + result, e := meta.(tccommon.ProviderMeta).GetAPIV3Conn().UseTkeV20180525Client().ScaleInClusterMasterWithContext(ctx, request) + if e != nil { + return resourceTencentCloudKubernetesClusterMasterAttachmentDeleteRequestOnError0(ctx, e) + } else { + log.Printf("[DEBUG]%s api[%s] success, request body [%s], response body [%s]\n", logId, request.GetAction(), request.ToJsonString(), result.ToJsonString()) + } + response = result + return nil + }) + if err != nil { + log.Printf("[CRITAL]%s delete kubernetes cluster master attachment failed, reason:%+v", logId, err) + return err + } + + _ = response + _ = instanceId + _ = nodeRole + return nil +} diff --git a/tencentcloud/services/tke/resource_tc_kubernetes_cluster_master_attachment.md b/tencentcloud/services/tke/resource_tc_kubernetes_cluster_master_attachment.md new file mode 100644 index 0000000000..f8d09cda24 --- /dev/null +++ b/tencentcloud/services/tke/resource_tc_kubernetes_cluster_master_attachment.md @@ -0,0 +1,30 @@ +Provides a resource to create a tke kubernetes_cluster_master_attachment + +Example Usage + +```hcl +resource "tencentcloud_kubernetes_cluster_master_attachment" "kubernetes_cluster_master_attachment" { + extra_args = { + } + master_config = { + labels = { + } + data_disks = { + } + extra_args = { + } + gpu_args = { + } + taints = { + } + } +} +``` + +Import + +tke kubernetes_cluster_master_attachment can be imported using the id, e.g. + +``` +terraform import tencentcloud_kubernetes_cluster_master_attachment.kubernetes_cluster_master_attachment kubernetes_cluster_master_attachment_id +``` diff --git a/tencentcloud/services/tke/resource_tc_kubernetes_cluster_master_attachment_extension.go b/tencentcloud/services/tke/resource_tc_kubernetes_cluster_master_attachment_extension.go new file mode 100644 index 0000000000..03e09149e1 --- /dev/null +++ b/tencentcloud/services/tke/resource_tc_kubernetes_cluster_master_attachment_extension.go @@ -0,0 +1,385 @@ +package tke + +import ( + "context" + "fmt" + "strings" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common/errors" + + tkev20180525 "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/tke/v20180525" + tccommon "github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/common" + "github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/internal/helper" + svccvm "github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/services/cvm" +) + +func resourceTencentCloudKubernetesClusterMasterAttachmentCreatePostFillRequest0(ctx context.Context, req *tkev20180525.ScaleOutClusterMasterRequest) error { + d := tccommon.ResourceDataFromContext(ctx) + if d == nil { + return fmt.Errorf("resource data can not be nil") + } + + existedInstancesForNode := tkev20180525.ExistedInstancesForNode{} + existedInstancesPara := tkev20180525.ExistedInstancesPara{} + enhancedService := tkev20180525.EnhancedService{} + loginSettings := tkev20180525.LoginSettings{} + if v, ok := d.GetOk("instance_id"); ok { + existedInstancesPara.InstanceIds = helper.Strings([]string{v.(string)}) + } + + if v, ok := d.GetOk("node_role"); ok { + existedInstancesForNode.NodeRole = helper.String(v.(string)) + } + + if v, ok := d.GetOkExists("enhanced_security_service"); ok { + enhancedService.SecurityService = &tkev20180525.RunSecurityServiceEnabled{ + Enabled: helper.Bool(v.(bool)), + } + } + + if v, ok := d.GetOkExists("enhanced_monitor_service"); ok { + enhancedService.MonitorService = &tkev20180525.RunMonitorServiceEnabled{ + Enabled: helper.Bool(v.(bool)), + } + } + + if v, ok := d.GetOkExists("enhanced_automation_service"); ok { + enhancedService.AutomationService = &tkev20180525.RunAutomationServiceEnabled{ + Enabled: helper.Bool(v.(bool)), + } + } + + if v, ok := d.GetOk("password"); ok { + loginSettings.Password = helper.String(v.(string)) + } + + if v, ok := d.GetOk("key_ids"); ok && len(v.([]interface{})) > 0 { + keyIds := v.([]interface{}) + loginSettings.KeyIds = make([]*string, 0, len(keyIds)) + for i := range keyIds { + keyId := keyIds[i].(string) + loginSettings.KeyIds = append(loginSettings.KeyIds, &keyId) + } + } + + if v, ok := d.GetOk("security_group_ids"); ok && len(v.([]interface{})) > 0 { + sgIds := v.([]interface{}) + existedInstancesPara.SecurityGroupIds = make([]*string, 0, len(sgIds)) + for i := range sgIds { + sgId := sgIds[i].(string) + existedInstancesPara.SecurityGroupIds = append(existedInstancesPara.SecurityGroupIds, &sgId) + } + } + + if v, ok := d.GetOk("host_name"); ok { + existedInstancesPara.HostName = helper.String(v.(string)) + } + + if v, ok := d.GetOk("desired_pod_numbers"); ok && len(v.([]interface{})) > 0 { + desiredPodNumbers := v.([]interface{}) + existedInstancesForNode.DesiredPodNumbers = make([]*int64, 0, len(desiredPodNumbers)) + for i := range desiredPodNumbers { + desiredPodNumber := desiredPodNumbers[i].(int64) + existedInstancesForNode.DesiredPodNumbers = append(existedInstancesForNode.DesiredPodNumbers, &desiredPodNumber) + } + } + + if v, ok := d.GetOk("master_config"); ok && len(v.([]interface{})) > 0 { + for _, item := range v.([]interface{}) { + instanceAdvancedSettingsOverridesMap := item.(map[string]interface{}) + instanceAdvancedSettings := tkev20180525.InstanceAdvancedSettings{} + if v, ok := instanceAdvancedSettingsOverridesMap["mount_target"]; ok { + instanceAdvancedSettings.MountTarget = helper.String(v.(string)) + } + + if v, ok := instanceAdvancedSettingsOverridesMap["docker_graph_path"]; ok { + instanceAdvancedSettings.DockerGraphPath = helper.String(v.(string)) + } + + if v, ok := instanceAdvancedSettingsOverridesMap["user_script"]; ok { + instanceAdvancedSettings.UserScript = helper.String(v.(string)) + } + + if v, ok := instanceAdvancedSettingsOverridesMap["unschedulable"]; ok { + instanceAdvancedSettings.Unschedulable = helper.IntInt64(v.(int)) + } + + if v, ok := instanceAdvancedSettingsOverridesMap["labels"]; ok && len(v.([]interface{})) > 0 { + for _, item := range v.([]interface{}) { + labelsMap := item.(map[string]interface{}) + labels := tkev20180525.Label{} + if v, ok := labelsMap["name"]; ok { + labels.Name = helper.String(v.(string)) + } + + if v, ok := labelsMap["value"]; ok { + labels.Value = helper.String(v.(string)) + } + + instanceAdvancedSettings.Labels = append(instanceAdvancedSettings.Labels, &labels) + } + } + + if v, ok := instanceAdvancedSettingsOverridesMap["data_disk"]; ok && len(v.([]interface{})) > 0 { + for _, item := range v.([]interface{}) { + dataDisksMap := item.(map[string]interface{}) + dataDisk := tkev20180525.DataDisk{} + if v, ok := dataDisksMap["disk_type"]; ok { + dataDisk.DiskType = helper.String(v.(string)) + } + + if v, ok := dataDisksMap["file_system"]; ok { + dataDisk.FileSystem = helper.String(v.(string)) + } + + if v, ok := dataDisksMap["disk_size"]; ok { + dataDisk.DiskSize = helper.IntInt64(v.(int)) + } + + if v, ok := dataDisksMap["auto_format_and_mount"]; ok { + dataDisk.AutoFormatAndMount = helper.Bool(v.(bool)) + } + + if v, ok := dataDisksMap["mount_target"]; ok { + dataDisk.MountTarget = helper.String(v.(string)) + } + + if v, ok := dataDisksMap["disk_partition"]; ok { + dataDisk.DiskPartition = helper.String(v.(string)) + } + + instanceAdvancedSettings.DataDisks = append(instanceAdvancedSettings.DataDisks, &dataDisk) + } + } + + if v, ok := instanceAdvancedSettingsOverridesMap["extra_args"]; ok && len(v.([]interface{})) > 0 { + for _, item := range v.([]interface{}) { + extraArgsMap := item.(map[string]interface{}) + args := tkev20180525.InstanceExtraArgs{} + if v, ok := extraArgsMap["kubelet"]; ok { + args.Kubelet = helper.InterfacesStringsPoint(v.([]interface{})) + } + + instanceAdvancedSettings.ExtraArgs = &args + } + } + + if v, ok := d.GetOk("desired_pod_number"); ok { + instanceAdvancedSettings.DesiredPodNumber = helper.IntInt64(v.(int)) + } + + if v, ok := instanceAdvancedSettingsOverridesMap["gpu_args"]; ok && len(v.([]interface{})) > 0 { + gpuArgs := v.([]interface{})[0].(map[string]interface{}) + + var ( + migEnable = gpuArgs["mig_enable"].(bool) + driver = gpuArgs["driver"].(map[string]interface{}) + cuda = gpuArgs["cuda"].(map[string]interface{}) + cudnn = gpuArgs["cudnn"].(map[string]interface{}) + customDriver = gpuArgs["custom_driver"].(map[string]interface{}) + ) + + tkeGpuArgs := tkev20180525.GPUArgs{} + tkeGpuArgs.MIGEnable = &migEnable + if len(driver) > 0 { + tkeGpuArgs.Driver = &tkev20180525.DriverVersion{ + Version: helper.String(driver["version"].(string)), + Name: helper.String(driver["name"].(string)), + } + } + + if len(cuda) > 0 { + tkeGpuArgs.CUDA = &tkev20180525.DriverVersion{ + Version: helper.String(cuda["version"].(string)), + Name: helper.String(cuda["name"].(string)), + } + } + + if len(cudnn) > 0 { + tkeGpuArgs.CUDNN = &tkev20180525.CUDNN{ + Version: helper.String(cudnn["version"].(string)), + Name: helper.String(cudnn["name"].(string)), + } + + if cudnn["doc_name"] != nil { + tkeGpuArgs.CUDNN.DocName = helper.String(cudnn["doc_name"].(string)) + } + + if cudnn["dev_name"] != nil { + tkeGpuArgs.CUDNN.DevName = helper.String(cudnn["dev_name"].(string)) + } + } + + if len(customDriver) > 0 { + tkeGpuArgs.CustomDriver = &tkev20180525.CustomDriver{ + Address: helper.String(customDriver["address"].(string)), + } + } + + instanceAdvancedSettings.GPUArgs = &tkeGpuArgs + } + + if v, ok := instanceAdvancedSettingsOverridesMap["taints"]; ok && len(v.([]interface{})) > 0 { + for _, item := range v.([]interface{}) { + taintsMap := item.(map[string]interface{}) + taint := tkev20180525.Taint{} + if v, ok := taintsMap["key"]; ok { + taint.Key = helper.String(v.(string)) + } + + if v, ok := taintsMap["value"]; ok { + taint.Value = helper.String(v.(string)) + } + + if v, ok := taintsMap["effect"]; ok { + taint.Effect = helper.String(v.(string)) + } + + instanceAdvancedSettings.Taints = append(instanceAdvancedSettings.Taints, &taint) + } + } + + existedInstancesForNode.InstanceAdvancedSettingsOverride = &instanceAdvancedSettings + } + + existedInstancesForNode.ExistedInstancesPara = &existedInstancesPara + req.ExistedInstancesForNode = []*tkev20180525.ExistedInstancesForNode{&existedInstancesForNode} + } + + return nil +} + +func resourceTencentCloudKubernetesClusterMasterAttachmentCreateRequestOnError0(ctx context.Context, req *tkev20180525.ScaleOutClusterMasterRequest, e error) *resource.RetryError { + return tccommon.RetryError(e, tccommon.InternalError) +} + +func resourceTencentCloudKubernetesClusterMasterAttachmentCreatePostHandleResponse0(ctx context.Context, resp *tkev20180525.ScaleOutClusterMasterResponse) error { + d := tccommon.ResourceDataFromContext(ctx) + if d == nil { + return fmt.Errorf("resource data can not be nil") + } + + var ( + meta = tccommon.ProviderMetaFromContext(ctx) + tkeService = TkeService{client: meta.(tccommon.ProviderMeta).GetAPIV3Conn()} + cvmService = svccvm.NewCvmService(meta.(tccommon.ProviderMeta).GetAPIV3Conn()) + clusterId string + instanceId string + nodeRole string + ) + + if v, ok := d.GetOk("cluster_id"); ok { + clusterId = v.(string) + } + + if v, ok := d.GetOk("instance_id"); ok { + instanceId = v.(string) + } + + if v, ok := d.GetOk("node_role"); ok { + nodeRole = v.(string) + } + + // wait for cvm status + if err := resource.Retry(7*tccommon.ReadRetryTimeout, func() *resource.RetryError { + instance, errRet := cvmService.DescribeInstanceById(ctx, instanceId) + if errRet != nil { + return tccommon.RetryError(errRet, tccommon.InternalError) + } + + if instance != nil && *instance.InstanceState == svccvm.CVM_STATUS_RUNNING { + return nil + } + + return resource.RetryableError(fmt.Errorf("cvm instance %s status is %s, retry...", instanceId, *instance.InstanceState)) + }); err != nil { + return err + } + + // wait for tke init + return resource.Retry(7*tccommon.ReadRetryTimeout, func() *resource.RetryError { + resp, err := tkeService.DescribeKubernetesClusterMasterAttachmentById2(ctx, clusterId, instanceId, nodeRole) + if err != nil { + return tccommon.RetryError(err, tccommon.InternalError) + } + + has := false + if len(resp.InstanceSet) == 1 { + has = true + } + + if !has { + return resource.NonRetryableError(fmt.Errorf("cvm instance %s not exist in tke instance list", instanceId)) + } + + return nil + }) +} + +func resourceTencentCloudKubernetesClusterMasterAttachmentReadRequestOnError2(ctx context.Context, resp *tkev20180525.DescribeClusterInstancesResponseParams, e error) *resource.RetryError { + return tccommon.RetryError(e, tccommon.InternalError) +} + +func resourceTencentCloudKubernetesClusterMasterAttachmentReadRequestOnSuccess2(ctx context.Context, resp *tkev20180525.DescribeClusterInstancesResponseParams) *resource.RetryError { + if resp == nil || len(resp.InstanceSet) != 1 { + return resource.NonRetryableError(fmt.Errorf("query cvm instance error.")) + } + + instanceDetial := resp.InstanceSet[0] + insId := *instanceDetial.InstanceId + insState := *instanceDetial.InstanceState + + if insState == "failed" { + return resource.NonRetryableError(fmt.Errorf("cvm instance %s attach to cluster fail, reason: %s", insId, insState)) + } + + if insState != "running" { + return resource.RetryableError(fmt.Errorf("cvm instance %s in tke status is %s, retry...", insId, insState)) + } + + return nil +} + +func resourceTencentCloudKubernetesClusterMasterAttachmentDeletePostFillRequest0(ctx context.Context, req *tkev20180525.ScaleInClusterMasterRequest) error { + d := tccommon.ResourceDataFromContext(ctx) + if d == nil { + return fmt.Errorf("resource data can not be nil") + } + + var ( + instanceId string + nodeRole string + ) + + if v, ok := d.GetOk("instance_id"); ok { + instanceId = v.(string) + } + + if v, ok := d.GetOk("node_role"); ok { + nodeRole = v.(string) + } + + req.ScaleInMasters = []*tkev20180525.ScaleInMaster{ + &tkev20180525.ScaleInMaster{ + InstanceId: helper.String(instanceId), + NodeRole: helper.String(nodeRole), + }, + } + + return nil +} + +func resourceTencentCloudKubernetesClusterMasterAttachmentDeleteRequestOnError0(ctx context.Context, e error) *resource.RetryError { + if sdkErr, ok := e.(*errors.TencentCloudSDKError); ok { + if sdkErr.GetCode() == "ResourceNotFound" { + return nil + } + + if sdkErr.GetCode() == "InvalidParameter" && strings.Contains(sdkErr.GetMessage(), `is not exist`) { + return nil + } + } + + return tccommon.RetryError(e, tccommon.InternalError) +} diff --git a/tencentcloud/services/tke/resource_tc_kubernetes_cluster_master_attachment_test.go b/tencentcloud/services/tke/resource_tc_kubernetes_cluster_master_attachment_test.go new file mode 100644 index 0000000000..778f6c1e59 --- /dev/null +++ b/tencentcloud/services/tke/resource_tc_kubernetes_cluster_master_attachment_test.go @@ -0,0 +1,47 @@ +package tke_test + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + + tcacctest "github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/acctest" +) + +func TestAccTencentCloudKubernetesClusterMasterAttachmentResource_basic(t *testing.T) { + t.Parallel() + resource.Test(t, resource.TestCase{ + PreCheck: func() { + tcacctest.AccPreCheck(t) + }, + Providers: tcacctest.AccProviders, + Steps: []resource.TestStep{{ + Config: testAccKubernetesClusterMasterAttachment, + Check: resource.ComposeTestCheckFunc(resource.TestCheckResourceAttrSet("tencentcloud_kubernetes_cluster_master_attachment.kubernetes_cluster_master_attachment", "id")), + }, { + ResourceName: "tencentcloud_kubernetes_cluster_master_attachment.kubernetes_cluster_master_attachment", + ImportState: true, + ImportStateVerify: true, + }}, + }) +} + +const testAccKubernetesClusterMasterAttachment = ` + +resource "tencentcloud_kubernetes_cluster_master_attachment" "kubernetes_cluster_master_attachment" { + extra_args = { + } + master_config = { + labels = { + } + data_disks = { + } + extra_args = { + } + gpu_args = { + } + taints = { + } + } +} +` diff --git a/tencentcloud/services/tke/service_tencentcloud_tke.go b/tencentcloud/services/tke/service_tencentcloud_tke.go index 5482477979..1574bf84a3 100644 --- a/tencentcloud/services/tke/service_tencentcloud_tke.go +++ b/tencentcloud/services/tke/service_tencentcloud_tke.go @@ -3651,3 +3651,88 @@ func (me *TkeService) DescribeKubernetesLogConfigById(ctx context.Context, clust ret = response.Response return } + +func (me *TkeService) DescribeKubernetesClusterMasterAttachmentById(ctx context.Context, clusterId string) (ret *tke.Cluster, errRet error) { + logId := tccommon.GetLogId(ctx) + + request := tke.NewDescribeClustersRequest() + request.ClusterIds = []*string{helper.String(clusterId)} + + defer func() { + if errRet != nil { + log.Printf("[CRITAL]%s api[%s] fail, request body [%s], reason[%s]\n", logId, request.GetAction(), request.ToJsonString(), errRet.Error()) + } + }() + + ratelimit.Check(request.GetAction()) + + response, err := me.client.UseTkeV20180525Client().DescribeClusters(request) + if err != nil { + errRet = err + return + } + log.Printf("[DEBUG]%s api[%s] success, request body [%s], response body [%s]\n", logId, request.GetAction(), request.ToJsonString(), response.ToJsonString()) + + if len(response.Response.Clusters) < 1 { + return + } + + ret = response.Response.Clusters[0] + return +} + +func (me *TkeService) DescribeKubernetesClusterMasterAttachmentById1(ctx context.Context, instanceId string) (ret *cvm.Instance, errRet error) { + logId := tccommon.GetLogId(ctx) + + request := cvm.NewDescribeInstancesRequest() + request.InstanceIds = []*string{helper.String(instanceId)} + + defer func() { + if errRet != nil { + log.Printf("[CRITAL]%s api[%s] fail, request body [%s], reason[%s]\n", logId, request.GetAction(), request.ToJsonString(), errRet.Error()) + } + }() + + ratelimit.Check(request.GetAction()) + + response, err := me.client.UseCvmV20170312Client().DescribeInstances(request) + if err != nil { + errRet = err + return + } + log.Printf("[DEBUG]%s api[%s] success, request body [%s], response body [%s]\n", logId, request.GetAction(), request.ToJsonString(), response.ToJsonString()) + + if len(response.Response.InstanceSet) < 1 { + return + } + + ret = response.Response.InstanceSet[0] + return +} + +func (me *TkeService) DescribeKubernetesClusterMasterAttachmentById2(ctx context.Context, clusterId string, instanceId string, nodeRole string) (ret *tke.DescribeClusterInstancesResponseParams, errRet error) { + logId := tccommon.GetLogId(ctx) + + request := tke.NewDescribeClusterInstancesRequest() + request.ClusterId = helper.String(clusterId) + request.InstanceIds = []*string{helper.String(instanceId)} + request.InstanceRole = helper.String(nodeRole) + + defer func() { + if errRet != nil { + log.Printf("[CRITAL]%s api[%s] fail, request body [%s], reason[%s]\n", logId, request.GetAction(), request.ToJsonString(), errRet.Error()) + } + }() + + ratelimit.Check(request.GetAction()) + + response, err := me.client.UseTkeV20180525Client().DescribeClusterInstances(request) + if err != nil { + errRet = err + return + } + log.Printf("[DEBUG]%s api[%s] success, request body [%s], response body [%s]\n", logId, request.GetAction(), request.ToJsonString(), response.ToJsonString()) + + ret = response.Response + return +} diff --git a/website/docs/r/kubernetes_cluster_master_attachment.html.markdown b/website/docs/r/kubernetes_cluster_master_attachment.html.markdown new file mode 100644 index 0000000000..4220d6d2da --- /dev/null +++ b/website/docs/r/kubernetes_cluster_master_attachment.html.markdown @@ -0,0 +1,120 @@ +--- +subcategory: "Tencent Kubernetes Engine(TKE)" +layout: "tencentcloud" +page_title: "TencentCloud: tencentcloud_kubernetes_cluster_master_attachment" +sidebar_current: "docs-tencentcloud-resource-kubernetes_cluster_master_attachment" +description: |- + Provides a resource to create a tke kubernetes_cluster_master_attachment +--- + +# tencentcloud_kubernetes_cluster_master_attachment + +Provides a resource to create a tke kubernetes_cluster_master_attachment + +## Example Usage + +```hcl +resource "tencentcloud_kubernetes_cluster_master_attachment" "kubernetes_cluster_master_attachment" { + extra_args = { + } + master_config = { + labels = { + } + data_disks = { + } + extra_args = { + } + gpu_args = { + } + taints = { + } + } +} +``` + +## Argument Reference + +The following arguments are supported: + +* `cluster_id` - (Required, String, ForceNew) ID of the cluster. +* `instance_id` - (Required, String, ForceNew) ID of the CVM instance, this cvm will reinstall the system. +* `node_role` - (Required, String, ForceNew) Node role, values: MASTER_ETCD, WORKER. MASTER_ETCD needs to be specified only when creating an INDEPENDENT_CLUSTER independent cluster. The number of MASTER_ETCD nodes is 3-7, and it is recommended to have an odd number. The minimum configuration for MASTER_ETCD is 4C8G. +* `desired_pod_numbers` - (Optional, List: [`Int`], ForceNew) When the node belongs to the podCIDR size customization mode, the maximum number of pods running on the node can be specified. +* `enhanced_automation_service` - (Optional, Bool, ForceNew) Activate TencentCloud Automation Tools (TAT) service. If this parameter is not specified, the public image will default to enabling the Cloud Automation Assistant service, while other images will default to not enabling the Cloud Automation Assistant service. +* `enhanced_monitor_service` - (Optional, Bool, ForceNew) To specify whether to enable cloud monitor service. Default is TRUE. +* `enhanced_security_service` - (Optional, Bool, ForceNew) To specify whether to enable cloud security service. Default is TRUE. +* `extra_args` - (Optional, List, ForceNew) Custom parameters for cluster master component. +* `host_name` - (Optional, String, ForceNew) When reinstalling the system, you can specify the HostName of the instance to be modified (this parameter must be passed when the cluster is in HostName mode, and the rule name should be consistent with the HostName of the CVM instance creation interface except that uppercase characters are not supported). +* `key_ids` - (Optional, List: [`String`], ForceNew) The key pair to use for the instance, it looks like skey-16jig7tx, it should be set if `password` not set. +* `master_config` - (Optional, List, ForceNew) Advanced Node Settings. commonly used to attach existing instances. +* `password` - (Optional, String, ForceNew) Password to access, should be set if `key_ids` not set. +* `security_group_ids` - (Optional, List: [`String`], ForceNew) The security group to which the instance belongs. This parameter can be obtained by calling the sgId field in the return value of DescribeSecureGroups. If this parameter is not specified, the default security group will be bound. + +The `data_disk` object of `master_config` supports the following: + +* `auto_format_and_mount` - (Optional, Bool, ForceNew) Indicate whether to auto format and mount or not. Default is `false`. +* `disk_partition` - (Optional, String, ForceNew) The name of the device or partition to mount. NOTE: this argument doesn't support setting in node pool, or will leads to mount error. +* `disk_size` - (Optional, Int, ForceNew) Volume of disk in GB. Default is `0`. +* `disk_type` - (Optional, String, ForceNew) Types of disk. Valid value: `LOCAL_BASIC`, `LOCAL_SSD`, `CLOUD_BASIC`, `CLOUD_PREMIUM`, `CLOUD_SSD`, `CLOUD_HSSD`, `CLOUD_TSSD` and `CLOUD_BSSD`. +* `file_system` - (Optional, String, ForceNew) File system, e.g. `ext3/ext4/xfs`. +* `mount_target` - (Optional, String, ForceNew) Mount target. + +The `extra_args` object of `master_config` supports the following: + +* `kubelet` - (Optional, List, ForceNew) Kubelet custom parameter. The parameter format is ["k1=v1", "k1=v2"]. + +The `extra_args` object supports the following: + +* `etcd` - (Optional, Set, ForceNew) etcd custom parameters. Only supports independent clusters. +* `kube_api_server` - (Optional, Set, ForceNew) Kube apiserver custom parameters. The parameter format is ["k1=v1", "k1=v2"]. +* `kube_controller_manager` - (Optional, Set, ForceNew) Kube controller manager custom parameters. +* `kube_scheduler` - (Optional, Set, ForceNew) kube scheduler custom parameters. + +The `gpu_args` object of `master_config` supports the following: + +* `cuda` - (Optional, Map, ForceNew) CUDA version. Format like: `{ version: String, name: String }`. `version`: Version of GPU driver or CUDA; `name`: Name of GPU driver or CUDA. +* `cudnn` - (Optional, Map, ForceNew) cuDNN version. Format like: `{ version: String, name: String, doc_name: String, dev_name: String }`. `version`: cuDNN version; `name`: cuDNN name; `doc_name`: Doc name of cuDNN; `dev_name`: Dev name of cuDNN. +* `custom_driver` - (Optional, Map, ForceNew) Custom GPU driver. Format like: `{address: String}`. `address`: URL of custom GPU driver address. +* `driver` - (Optional, Map, ForceNew) GPU driver version. Format like: `{ version: String, name: String }`. `version`: Version of GPU driver or CUDA; `name`: Name of GPU driver or CUDA. +* `mig_enable` - (Optional, Bool, ForceNew) Whether to enable MIG. + +The `labels` object of `master_config` supports the following: + +* `name` - (Required, String, ForceNew) Name of map. +* `value` - (Required, String, ForceNew) Value of map. + +The `master_config` object supports the following: + +* `data_disk` - (Optional, List, ForceNew) Configurations of data disk. +* `desired_pod_number` - (Optional, Int, ForceNew) Indicate to set desired pod number in node. valid when the cluster is podCIDR. +* `docker_graph_path` - (Optional, String, ForceNew) Docker graph path. Default is `/var/lib/docker`. +* `extra_args` - (Optional, List, ForceNew) Custom parameter information related to the node. This is a white-list parameter. +* `gpu_args` - (Optional, List, ForceNew) GPU driver parameters. +* `labels` - (Optional, List, ForceNew) Node label list. +* `mount_target` - (Optional, String, ForceNew) Mount target. Default is not mounting. +* `taints` - (Optional, List, ForceNew) Node taint. +* `unschedulable` - (Optional, Int, ForceNew) Set whether the joined nodes participate in scheduling, with a default value of 0, indicating participation in scheduling; Non 0 means not participating in scheduling. +* `user_script` - (Optional, String, ForceNew) User script encoded in base64, which will be executed after the k8s component runs. The user needs to ensure the script's reentrant and retry logic. The script and its generated log files can be viewed in the node path /data/ccs_userscript/. If the node needs to be initialized before joining the schedule, it can be used in conjunction with the `unschedulable` parameter. After the final initialization of the userScript is completed, add the command "kubectl uncordon nodename --kubeconfig=/root/.kube/config" to add the node to the schedule. + +The `taints` object of `master_config` supports the following: + +* `effect` - (Optional, String, ForceNew) Effect of the taint. +* `key` - (Optional, String, ForceNew) Key of the taint. +* `value` - (Optional, String, ForceNew) Value of the taint. + +## Attributes Reference + +In addition to all arguments above, the following attributes are exported: + +* `id` - ID of the resource. + + + +## Import + +tke kubernetes_cluster_master_attachment can be imported using the id, e.g. + +``` +terraform import tencentcloud_kubernetes_cluster_master_attachment.kubernetes_cluster_master_attachment kubernetes_cluster_master_attachment_id +``` + diff --git a/website/tencentcloud.erb b/website/tencentcloud.erb index 2341463db9..75dae23400 100644 --- a/website/tencentcloud.erb +++ b/website/tencentcloud.erb @@ -4788,6 +4788,9 @@