From d1891a0987569cd5fd59b291ec15288a22726f41 Mon Sep 17 00:00:00 2001 From: SevenEarth <391613297@qq.com> Date: Fri, 1 Nov 2024 15:34:43 +0800 Subject: [PATCH 1/7] add --- tencentcloud/provider.go | 1 + tencentcloud/provider.md | 1 + ...tc_kubernetes_cluster_master_attachment.go | 547 ++++++++++++++++++ ...tc_kubernetes_cluster_master_attachment.md | 30 + ...tes_cluster_master_attachment_extension.go | 385 ++++++++++++ ...bernetes_cluster_master_attachment_test.go | 47 ++ .../services/tke/service_tencentcloud_tke.go | 85 +++ ...es_cluster_master_attachment.html.markdown | 120 ++++ website/tencentcloud.erb | 3 + 9 files changed, 1219 insertions(+) create mode 100644 tencentcloud/services/tke/resource_tc_kubernetes_cluster_master_attachment.go create mode 100644 tencentcloud/services/tke/resource_tc_kubernetes_cluster_master_attachment.md create mode 100644 tencentcloud/services/tke/resource_tc_kubernetes_cluster_master_attachment_extension.go create mode 100644 tencentcloud/services/tke/resource_tc_kubernetes_cluster_master_attachment_test.go create mode 100644 website/docs/r/kubernetes_cluster_master_attachment.html.markdown diff --git a/tencentcloud/provider.go b/tencentcloud/provider.go index 8071216eb0..d3648a6233 100644 --- a/tencentcloud/provider.go +++ b/tencentcloud/provider.go @@ -1275,6 +1275,7 @@ func Provider() *schema.Provider { "tencentcloud_kubernetes_backup_storage_location": tke.ResourceTencentCloudKubernetesBackupStorageLocation(), "tencentcloud_kubernetes_serverless_node_pool": tke.ResourceTencentCloudKubernetesServerlessNodePool(), "tencentcloud_kubernetes_encryption_protection": tke.ResourceTencentCloudKubernetesEncryptionProtection(), + "tencentcloud_kubernetes_cluster_master_attachment": tke.ResourceTencentCloudKubernetesClusterMasterAttachment(), "tencentcloud_mysql_backup_policy": cdb.ResourceTencentCloudMysqlBackupPolicy(), "tencentcloud_mysql_account": cdb.ResourceTencentCloudMysqlAccount(), "tencentcloud_mysql_account_privilege": cdb.ResourceTencentCloudMysqlAccountPrivilege(), diff --git a/tencentcloud/provider.md b/tencentcloud/provider.md index 0adf55bdaa..c99c557196 100644 --- a/tencentcloud/provider.md +++ b/tencentcloud/provider.md @@ -686,6 +686,7 @@ Tencent Kubernetes Engine(TKE) tencentcloud_kubernetes_native_node_pool tencentcloud_kubernetes_health_check_policy tencentcloud_kubernetes_log_config + tencentcloud_kubernetes_cluster_master_attachment TDMQ for Pulsar(tpulsar) Data Source diff --git a/tencentcloud/services/tke/resource_tc_kubernetes_cluster_master_attachment.go b/tencentcloud/services/tke/resource_tc_kubernetes_cluster_master_attachment.go new file mode 100644 index 0000000000..8e9a388c8f --- /dev/null +++ b/tencentcloud/services/tke/resource_tc_kubernetes_cluster_master_attachment.go @@ -0,0 +1,547 @@ +// Code generated by iacg; DO NOT EDIT. +package tke + +import ( + "context" + "fmt" + "log" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + tkev20180525 "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/tke/v20180525" + + tccommon "github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/common" + "github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/internal/helper" +) + +func ResourceTencentCloudKubernetesClusterMasterAttachment() *schema.Resource { + return &schema.Resource{ + Create: resourceTencentCloudKubernetesClusterMasterAttachmentCreate, + Read: resourceTencentCloudKubernetesClusterMasterAttachmentRead, + Delete: resourceTencentCloudKubernetesClusterMasterAttachmentDelete, + Timeouts: &schema.ResourceTimeout{ + Read: schema.DefaultTimeout(900000 * time.Millisecond), + }, + Schema: map[string]*schema.Schema{ + "cluster_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "ID of the cluster.", + }, + + "instance_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "ID of the CVM instance, this cvm will reinstall the system.", + }, + + "node_role": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Node role, values: MASTER_ETCD, WORKER. MASTER_ETCD needs to be specified only when creating an INDEPENDENT_CLUSTER independent cluster. The number of MASTER_ETCD nodes is 3-7, and it is recommended to have an odd number. The minimum configuration for MASTER_ETCD is 4C8G.", + }, + + "enhanced_security_service": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Description: "To specify whether to enable cloud security service. Default is TRUE.", + }, + + "enhanced_monitor_service": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Description: "To specify whether to enable cloud monitor service. Default is TRUE.", + }, + + "enhanced_automation_service": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Description: "Activate TencentCloud Automation Tools (TAT) service. If this parameter is not specified, the public image will default to enabling the Cloud Automation Assistant service, while other images will default to not enabling the Cloud Automation Assistant service.", + }, + + "password": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Sensitive: true, + Description: "Password to access, should be set if `key_ids` not set.", + ValidateFunc: tccommon.ValidateAsConfigPassword, + }, + + "key_ids": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + MaxItems: 1, + Description: "The key pair to use for the instance, it looks like skey-16jig7tx, it should be set if `password` not set.", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + + "security_group_ids": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "The security group to which the instance belongs. This parameter can be obtained by calling the sgId field in the return value of DescribeSecureGroups. If this parameter is not specified, the default security group will be bound.", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + + "host_name": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "When reinstalling the system, you can specify the HostName of the instance to be modified (this parameter must be passed when the cluster is in HostName mode, and the rule name should be consistent with the HostName of the CVM instance creation interface except that uppercase characters are not supported).", + }, + + "desired_pod_numbers": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "When the node belongs to the podCIDR size customization mode, the maximum number of pods running on the node can be specified.", + Elem: &schema.Schema{ + Type: schema.TypeInt, + }, + }, + + "extra_args": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + MaxItems: 1, + Description: "Custom parameters for cluster master component.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "kube_api_server": { + Type: schema.TypeSet, + Optional: true, + ForceNew: true, + Description: "Kube apiserver custom parameters. The parameter format is [\"k1=v1\", \"k1=v2\"].", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "kube_controller_manager": { + Type: schema.TypeSet, + Optional: true, + ForceNew: true, + Description: "Kube controller manager custom parameters.", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "kube_scheduler": { + Type: schema.TypeSet, + Optional: true, + ForceNew: true, + Description: "kube scheduler custom parameters.", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "etcd": { + Type: schema.TypeSet, + Optional: true, + ForceNew: true, + Description: "etcd custom parameters. Only supports independent clusters.", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + }, + + "master_config": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + MaxItems: 1, + Description: "Advanced Node Settings. commonly used to attach existing instances.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "mount_target": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Mount target. Default is not mounting.", + }, + "docker_graph_path": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Docker graph path. Default is `/var/lib/docker`.", + }, + "user_script": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "User script encoded in base64, which will be executed after the k8s component runs. The user needs to ensure the script's reentrant and retry logic. The script and its generated log files can be viewed in the node path /data/ccs_userscript/. If the node needs to be initialized before joining the schedule, it can be used in conjunction with the `unschedulable` parameter. After the final initialization of the userScript is completed, add the command \"kubectl uncordon nodename --kubeconfig=/root/.kube/config\" to add the node to the schedule.", + }, + "unschedulable": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Description: "Set whether the joined nodes participate in scheduling, with a default value of 0, indicating participation in scheduling; Non 0 means not participating in scheduling.", + }, + "labels": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Node label list.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Name of map.", + }, + "value": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Value of map.", + }, + }, + }, + }, + "data_disk": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + MaxItems: 1, + Description: "Configurations of data disk.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "disk_type": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Types of disk. Valid value: `LOCAL_BASIC`, `LOCAL_SSD`, `CLOUD_BASIC`, `CLOUD_PREMIUM`, `CLOUD_SSD`, `CLOUD_HSSD`, `CLOUD_TSSD` and `CLOUD_BSSD`.", + }, + "file_system": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "File system, e.g. `ext3/ext4/xfs`.", + }, + "disk_size": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Description: "Volume of disk in GB. Default is `0`.", + }, + "auto_format_and_mount": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Description: "Indicate whether to auto format and mount or not. Default is `false`.", + }, + "mount_target": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Mount target.", + }, + "disk_partition": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "The name of the device or partition to mount. NOTE: this argument doesn't support setting in node pool, or will leads to mount error.", + }, + }, + }, + }, + "extra_args": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + MaxItems: 1, + Description: "Custom parameter information related to the node. This is a white-list parameter.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "kubelet": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Kubelet custom parameter. The parameter format is [\"k1=v1\", \"k1=v2\"].", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + }, + "desired_pod_number": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Description: "Indicate to set desired pod number in node. valid when the cluster is podCIDR.", + }, + "gpu_args": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + MaxItems: 1, + Description: "GPU driver parameters.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "mig_enable": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Description: "Whether to enable MIG.", + }, + "driver": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Description: "GPU driver version. Format like: `{ version: String, name: String }`. `version`: Version of GPU driver or CUDA; `name`: Name of GPU driver or CUDA.", + ValidateFunc: tccommon.ValidateTkeGpuDriverVersion, + }, + "cuda": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Description: "CUDA version. Format like: `{ version: String, name: String }`. `version`: Version of GPU driver or CUDA; `name`: Name of GPU driver or CUDA.", + ValidateFunc: tccommon.ValidateTkeGpuDriverVersion, + }, + "cudnn": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Description: "cuDNN version. Format like: `{ version: String, name: String, doc_name: String, dev_name: String }`. `version`: cuDNN version; `name`: cuDNN name; `doc_name`: Doc name of cuDNN; `dev_name`: Dev name of cuDNN.", + ValidateFunc: tccommon.ValidateTkeGpuDriverVersion, + }, + "custom_driver": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Description: "Custom GPU driver. Format like: `{address: String}`. `address`: URL of custom GPU driver address.", + }, + }, + }, + }, + "taints": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Node taint.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "key": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Key of the taint.", + }, + "value": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Value of the taint.", + }, + "effect": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Effect of the taint.", + }, + }, + }, + }, + }, + }, + }, + }, + } +} + +func resourceTencentCloudKubernetesClusterMasterAttachmentCreate(d *schema.ResourceData, meta interface{}) error { + defer tccommon.LogElapsed("resource.tencentcloud_kubernetes_cluster_master_attachment.create")() + defer tccommon.InconsistentCheck(d, meta)() + + logId := tccommon.GetLogId(tccommon.ContextNil) + + ctx := tccommon.NewResourceLifeCycleHandleFuncContext(context.Background(), logId, d, meta) + + var ( + clusterId string + instanceId string + nodeRole string + ) + var ( + request = tkev20180525.NewScaleOutClusterMasterRequest() + response = tkev20180525.NewScaleOutClusterMasterResponse() + ) + + if v, ok := d.GetOk("cluster_id"); ok { + clusterId = v.(string) + } + if v, ok := d.GetOk("instance_id"); ok { + instanceId = v.(string) + } + if v, ok := d.GetOk("node_role"); ok { + nodeRole = v.(string) + } + + request.ClusterId = helper.String(clusterId) + + if err := resourceTencentCloudKubernetesClusterMasterAttachmentCreatePostFillRequest0(ctx, request); err != nil { + return err + } + + err := resource.Retry(tccommon.WriteRetryTimeout, func() *resource.RetryError { + result, e := meta.(tccommon.ProviderMeta).GetAPIV3Conn().UseTkeV20180525Client().ScaleOutClusterMasterWithContext(ctx, request) + if e != nil { + return resourceTencentCloudKubernetesClusterMasterAttachmentCreateRequestOnError0(ctx, request, e) + } else { + log.Printf("[DEBUG]%s api[%s] success, request body [%s], response body [%s]\n", logId, request.GetAction(), request.ToJsonString(), result.ToJsonString()) + } + response = result + return nil + }) + if err != nil { + log.Printf("[CRITAL]%s create kubernetes cluster master attachment failed, reason:%+v", logId, err) + return err + } + + _ = response + + if err := resourceTencentCloudKubernetesClusterMasterAttachmentCreatePostHandleResponse0(ctx, response); err != nil { + return err + } + + d.SetId(strings.Join([]string{clusterId, instanceId, nodeRole}, tccommon.FILED_SP)) + + return resourceTencentCloudKubernetesClusterMasterAttachmentRead(d, meta) +} + +func resourceTencentCloudKubernetesClusterMasterAttachmentRead(d *schema.ResourceData, meta interface{}) error { + defer tccommon.LogElapsed("resource.tencentcloud_kubernetes_cluster_master_attachment.read")() + defer tccommon.InconsistentCheck(d, meta)() + + logId := tccommon.GetLogId(tccommon.ContextNil) + + ctx := tccommon.NewResourceLifeCycleHandleFuncContext(context.Background(), logId, d, meta) + + service := TkeService{client: meta.(tccommon.ProviderMeta).GetAPIV3Conn()} + + idSplit := strings.Split(d.Id(), tccommon.FILED_SP) + if len(idSplit) != 3 { + return fmt.Errorf("id is broken,%s", d.Id()) + } + clusterId := idSplit[0] + instanceId := idSplit[1] + nodeRole := idSplit[2] + + _ = d.Set("cluster_id", clusterId) + + _ = d.Set("instance_id", instanceId) + + _ = d.Set("node_role", nodeRole) + + respData, err := service.DescribeKubernetesClusterMasterAttachmentById(ctx, clusterId) + if err != nil { + return err + } + + if respData == nil { + d.SetId("") + log.Printf("[WARN]%s resource `kubernetes_cluster_master_attachment` [%s] not found, please check if it has been deleted.\n", logId, d.Id()) + return nil + } + + respData1, err := service.DescribeKubernetesClusterMasterAttachmentById1(ctx, instanceId) + if err != nil { + return err + } + + if respData1 == nil { + d.SetId("") + log.Printf("[WARN]%s resource `kubernetes_cluster_master_attachment` [%s] not found, please check if it has been deleted.\n", logId, d.Id()) + return nil + } + + var respData2 *tkev20180525.DescribeClusterInstancesResponseParams + reqErr2 := resource.Retry(900*time.Second, func() *resource.RetryError { + result, e := service.DescribeKubernetesClusterMasterAttachmentById2(ctx, clusterId, instanceId, nodeRole) + if e != nil { + return resourceTencentCloudKubernetesClusterMasterAttachmentReadRequestOnError2(ctx, result, e) + } + if err := resourceTencentCloudKubernetesClusterMasterAttachmentReadRequestOnSuccess2(ctx, result); err != nil { + return err + } + respData2 = result + return nil + }) + if reqErr2 != nil { + log.Printf("[CRITAL]%s read kubernetes cluster master attachment failed, reason:%+v", logId, reqErr2) + return reqErr2 + } + + if respData2 == nil { + d.SetId("") + log.Printf("[WARN]%s resource `kubernetes_cluster_master_attachment` [%s] not found, please check if it has been deleted.\n", logId, d.Id()) + return nil + } + return nil +} + +func resourceTencentCloudKubernetesClusterMasterAttachmentDelete(d *schema.ResourceData, meta interface{}) error { + defer tccommon.LogElapsed("resource.tencentcloud_kubernetes_cluster_master_attachment.delete")() + defer tccommon.InconsistentCheck(d, meta)() + + logId := tccommon.GetLogId(tccommon.ContextNil) + ctx := tccommon.NewResourceLifeCycleHandleFuncContext(context.Background(), logId, d, meta) + + idSplit := strings.Split(d.Id(), tccommon.FILED_SP) + if len(idSplit) != 3 { + return fmt.Errorf("id is broken,%s", d.Id()) + } + clusterId := idSplit[0] + instanceId := idSplit[1] + nodeRole := idSplit[2] + + var ( + request = tkev20180525.NewScaleInClusterMasterRequest() + response = tkev20180525.NewScaleInClusterMasterResponse() + ) + + request.ClusterId = helper.String(clusterId) + + if err := resourceTencentCloudKubernetesClusterMasterAttachmentDeletePostFillRequest0(ctx, request); err != nil { + return err + } + + err := resource.Retry(tccommon.WriteRetryTimeout, func() *resource.RetryError { + result, e := meta.(tccommon.ProviderMeta).GetAPIV3Conn().UseTkeV20180525Client().ScaleInClusterMasterWithContext(ctx, request) + if e != nil { + return resourceTencentCloudKubernetesClusterMasterAttachmentDeleteRequestOnError0(ctx, e) + } else { + log.Printf("[DEBUG]%s api[%s] success, request body [%s], response body [%s]\n", logId, request.GetAction(), request.ToJsonString(), result.ToJsonString()) + } + response = result + return nil + }) + if err != nil { + log.Printf("[CRITAL]%s delete kubernetes cluster master attachment failed, reason:%+v", logId, err) + return err + } + + _ = response + _ = instanceId + _ = nodeRole + return nil +} diff --git a/tencentcloud/services/tke/resource_tc_kubernetes_cluster_master_attachment.md b/tencentcloud/services/tke/resource_tc_kubernetes_cluster_master_attachment.md new file mode 100644 index 0000000000..f8d09cda24 --- /dev/null +++ b/tencentcloud/services/tke/resource_tc_kubernetes_cluster_master_attachment.md @@ -0,0 +1,30 @@ +Provides a resource to create a tke kubernetes_cluster_master_attachment + +Example Usage + +```hcl +resource "tencentcloud_kubernetes_cluster_master_attachment" "kubernetes_cluster_master_attachment" { + extra_args = { + } + master_config = { + labels = { + } + data_disks = { + } + extra_args = { + } + gpu_args = { + } + taints = { + } + } +} +``` + +Import + +tke kubernetes_cluster_master_attachment can be imported using the id, e.g. + +``` +terraform import tencentcloud_kubernetes_cluster_master_attachment.kubernetes_cluster_master_attachment kubernetes_cluster_master_attachment_id +``` diff --git a/tencentcloud/services/tke/resource_tc_kubernetes_cluster_master_attachment_extension.go b/tencentcloud/services/tke/resource_tc_kubernetes_cluster_master_attachment_extension.go new file mode 100644 index 0000000000..03e09149e1 --- /dev/null +++ b/tencentcloud/services/tke/resource_tc_kubernetes_cluster_master_attachment_extension.go @@ -0,0 +1,385 @@ +package tke + +import ( + "context" + "fmt" + "strings" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common/errors" + + tkev20180525 "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/tke/v20180525" + tccommon "github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/common" + "github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/internal/helper" + svccvm "github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/services/cvm" +) + +func resourceTencentCloudKubernetesClusterMasterAttachmentCreatePostFillRequest0(ctx context.Context, req *tkev20180525.ScaleOutClusterMasterRequest) error { + d := tccommon.ResourceDataFromContext(ctx) + if d == nil { + return fmt.Errorf("resource data can not be nil") + } + + existedInstancesForNode := tkev20180525.ExistedInstancesForNode{} + existedInstancesPara := tkev20180525.ExistedInstancesPara{} + enhancedService := tkev20180525.EnhancedService{} + loginSettings := tkev20180525.LoginSettings{} + if v, ok := d.GetOk("instance_id"); ok { + existedInstancesPara.InstanceIds = helper.Strings([]string{v.(string)}) + } + + if v, ok := d.GetOk("node_role"); ok { + existedInstancesForNode.NodeRole = helper.String(v.(string)) + } + + if v, ok := d.GetOkExists("enhanced_security_service"); ok { + enhancedService.SecurityService = &tkev20180525.RunSecurityServiceEnabled{ + Enabled: helper.Bool(v.(bool)), + } + } + + if v, ok := d.GetOkExists("enhanced_monitor_service"); ok { + enhancedService.MonitorService = &tkev20180525.RunMonitorServiceEnabled{ + Enabled: helper.Bool(v.(bool)), + } + } + + if v, ok := d.GetOkExists("enhanced_automation_service"); ok { + enhancedService.AutomationService = &tkev20180525.RunAutomationServiceEnabled{ + Enabled: helper.Bool(v.(bool)), + } + } + + if v, ok := d.GetOk("password"); ok { + loginSettings.Password = helper.String(v.(string)) + } + + if v, ok := d.GetOk("key_ids"); ok && len(v.([]interface{})) > 0 { + keyIds := v.([]interface{}) + loginSettings.KeyIds = make([]*string, 0, len(keyIds)) + for i := range keyIds { + keyId := keyIds[i].(string) + loginSettings.KeyIds = append(loginSettings.KeyIds, &keyId) + } + } + + if v, ok := d.GetOk("security_group_ids"); ok && len(v.([]interface{})) > 0 { + sgIds := v.([]interface{}) + existedInstancesPara.SecurityGroupIds = make([]*string, 0, len(sgIds)) + for i := range sgIds { + sgId := sgIds[i].(string) + existedInstancesPara.SecurityGroupIds = append(existedInstancesPara.SecurityGroupIds, &sgId) + } + } + + if v, ok := d.GetOk("host_name"); ok { + existedInstancesPara.HostName = helper.String(v.(string)) + } + + if v, ok := d.GetOk("desired_pod_numbers"); ok && len(v.([]interface{})) > 0 { + desiredPodNumbers := v.([]interface{}) + existedInstancesForNode.DesiredPodNumbers = make([]*int64, 0, len(desiredPodNumbers)) + for i := range desiredPodNumbers { + desiredPodNumber := desiredPodNumbers[i].(int64) + existedInstancesForNode.DesiredPodNumbers = append(existedInstancesForNode.DesiredPodNumbers, &desiredPodNumber) + } + } + + if v, ok := d.GetOk("master_config"); ok && len(v.([]interface{})) > 0 { + for _, item := range v.([]interface{}) { + instanceAdvancedSettingsOverridesMap := item.(map[string]interface{}) + instanceAdvancedSettings := tkev20180525.InstanceAdvancedSettings{} + if v, ok := instanceAdvancedSettingsOverridesMap["mount_target"]; ok { + instanceAdvancedSettings.MountTarget = helper.String(v.(string)) + } + + if v, ok := instanceAdvancedSettingsOverridesMap["docker_graph_path"]; ok { + instanceAdvancedSettings.DockerGraphPath = helper.String(v.(string)) + } + + if v, ok := instanceAdvancedSettingsOverridesMap["user_script"]; ok { + instanceAdvancedSettings.UserScript = helper.String(v.(string)) + } + + if v, ok := instanceAdvancedSettingsOverridesMap["unschedulable"]; ok { + instanceAdvancedSettings.Unschedulable = helper.IntInt64(v.(int)) + } + + if v, ok := instanceAdvancedSettingsOverridesMap["labels"]; ok && len(v.([]interface{})) > 0 { + for _, item := range v.([]interface{}) { + labelsMap := item.(map[string]interface{}) + labels := tkev20180525.Label{} + if v, ok := labelsMap["name"]; ok { + labels.Name = helper.String(v.(string)) + } + + if v, ok := labelsMap["value"]; ok { + labels.Value = helper.String(v.(string)) + } + + instanceAdvancedSettings.Labels = append(instanceAdvancedSettings.Labels, &labels) + } + } + + if v, ok := instanceAdvancedSettingsOverridesMap["data_disk"]; ok && len(v.([]interface{})) > 0 { + for _, item := range v.([]interface{}) { + dataDisksMap := item.(map[string]interface{}) + dataDisk := tkev20180525.DataDisk{} + if v, ok := dataDisksMap["disk_type"]; ok { + dataDisk.DiskType = helper.String(v.(string)) + } + + if v, ok := dataDisksMap["file_system"]; ok { + dataDisk.FileSystem = helper.String(v.(string)) + } + + if v, ok := dataDisksMap["disk_size"]; ok { + dataDisk.DiskSize = helper.IntInt64(v.(int)) + } + + if v, ok := dataDisksMap["auto_format_and_mount"]; ok { + dataDisk.AutoFormatAndMount = helper.Bool(v.(bool)) + } + + if v, ok := dataDisksMap["mount_target"]; ok { + dataDisk.MountTarget = helper.String(v.(string)) + } + + if v, ok := dataDisksMap["disk_partition"]; ok { + dataDisk.DiskPartition = helper.String(v.(string)) + } + + instanceAdvancedSettings.DataDisks = append(instanceAdvancedSettings.DataDisks, &dataDisk) + } + } + + if v, ok := instanceAdvancedSettingsOverridesMap["extra_args"]; ok && len(v.([]interface{})) > 0 { + for _, item := range v.([]interface{}) { + extraArgsMap := item.(map[string]interface{}) + args := tkev20180525.InstanceExtraArgs{} + if v, ok := extraArgsMap["kubelet"]; ok { + args.Kubelet = helper.InterfacesStringsPoint(v.([]interface{})) + } + + instanceAdvancedSettings.ExtraArgs = &args + } + } + + if v, ok := d.GetOk("desired_pod_number"); ok { + instanceAdvancedSettings.DesiredPodNumber = helper.IntInt64(v.(int)) + } + + if v, ok := instanceAdvancedSettingsOverridesMap["gpu_args"]; ok && len(v.([]interface{})) > 0 { + gpuArgs := v.([]interface{})[0].(map[string]interface{}) + + var ( + migEnable = gpuArgs["mig_enable"].(bool) + driver = gpuArgs["driver"].(map[string]interface{}) + cuda = gpuArgs["cuda"].(map[string]interface{}) + cudnn = gpuArgs["cudnn"].(map[string]interface{}) + customDriver = gpuArgs["custom_driver"].(map[string]interface{}) + ) + + tkeGpuArgs := tkev20180525.GPUArgs{} + tkeGpuArgs.MIGEnable = &migEnable + if len(driver) > 0 { + tkeGpuArgs.Driver = &tkev20180525.DriverVersion{ + Version: helper.String(driver["version"].(string)), + Name: helper.String(driver["name"].(string)), + } + } + + if len(cuda) > 0 { + tkeGpuArgs.CUDA = &tkev20180525.DriverVersion{ + Version: helper.String(cuda["version"].(string)), + Name: helper.String(cuda["name"].(string)), + } + } + + if len(cudnn) > 0 { + tkeGpuArgs.CUDNN = &tkev20180525.CUDNN{ + Version: helper.String(cudnn["version"].(string)), + Name: helper.String(cudnn["name"].(string)), + } + + if cudnn["doc_name"] != nil { + tkeGpuArgs.CUDNN.DocName = helper.String(cudnn["doc_name"].(string)) + } + + if cudnn["dev_name"] != nil { + tkeGpuArgs.CUDNN.DevName = helper.String(cudnn["dev_name"].(string)) + } + } + + if len(customDriver) > 0 { + tkeGpuArgs.CustomDriver = &tkev20180525.CustomDriver{ + Address: helper.String(customDriver["address"].(string)), + } + } + + instanceAdvancedSettings.GPUArgs = &tkeGpuArgs + } + + if v, ok := instanceAdvancedSettingsOverridesMap["taints"]; ok && len(v.([]interface{})) > 0 { + for _, item := range v.([]interface{}) { + taintsMap := item.(map[string]interface{}) + taint := tkev20180525.Taint{} + if v, ok := taintsMap["key"]; ok { + taint.Key = helper.String(v.(string)) + } + + if v, ok := taintsMap["value"]; ok { + taint.Value = helper.String(v.(string)) + } + + if v, ok := taintsMap["effect"]; ok { + taint.Effect = helper.String(v.(string)) + } + + instanceAdvancedSettings.Taints = append(instanceAdvancedSettings.Taints, &taint) + } + } + + existedInstancesForNode.InstanceAdvancedSettingsOverride = &instanceAdvancedSettings + } + + existedInstancesForNode.ExistedInstancesPara = &existedInstancesPara + req.ExistedInstancesForNode = []*tkev20180525.ExistedInstancesForNode{&existedInstancesForNode} + } + + return nil +} + +func resourceTencentCloudKubernetesClusterMasterAttachmentCreateRequestOnError0(ctx context.Context, req *tkev20180525.ScaleOutClusterMasterRequest, e error) *resource.RetryError { + return tccommon.RetryError(e, tccommon.InternalError) +} + +func resourceTencentCloudKubernetesClusterMasterAttachmentCreatePostHandleResponse0(ctx context.Context, resp *tkev20180525.ScaleOutClusterMasterResponse) error { + d := tccommon.ResourceDataFromContext(ctx) + if d == nil { + return fmt.Errorf("resource data can not be nil") + } + + var ( + meta = tccommon.ProviderMetaFromContext(ctx) + tkeService = TkeService{client: meta.(tccommon.ProviderMeta).GetAPIV3Conn()} + cvmService = svccvm.NewCvmService(meta.(tccommon.ProviderMeta).GetAPIV3Conn()) + clusterId string + instanceId string + nodeRole string + ) + + if v, ok := d.GetOk("cluster_id"); ok { + clusterId = v.(string) + } + + if v, ok := d.GetOk("instance_id"); ok { + instanceId = v.(string) + } + + if v, ok := d.GetOk("node_role"); ok { + nodeRole = v.(string) + } + + // wait for cvm status + if err := resource.Retry(7*tccommon.ReadRetryTimeout, func() *resource.RetryError { + instance, errRet := cvmService.DescribeInstanceById(ctx, instanceId) + if errRet != nil { + return tccommon.RetryError(errRet, tccommon.InternalError) + } + + if instance != nil && *instance.InstanceState == svccvm.CVM_STATUS_RUNNING { + return nil + } + + return resource.RetryableError(fmt.Errorf("cvm instance %s status is %s, retry...", instanceId, *instance.InstanceState)) + }); err != nil { + return err + } + + // wait for tke init + return resource.Retry(7*tccommon.ReadRetryTimeout, func() *resource.RetryError { + resp, err := tkeService.DescribeKubernetesClusterMasterAttachmentById2(ctx, clusterId, instanceId, nodeRole) + if err != nil { + return tccommon.RetryError(err, tccommon.InternalError) + } + + has := false + if len(resp.InstanceSet) == 1 { + has = true + } + + if !has { + return resource.NonRetryableError(fmt.Errorf("cvm instance %s not exist in tke instance list", instanceId)) + } + + return nil + }) +} + +func resourceTencentCloudKubernetesClusterMasterAttachmentReadRequestOnError2(ctx context.Context, resp *tkev20180525.DescribeClusterInstancesResponseParams, e error) *resource.RetryError { + return tccommon.RetryError(e, tccommon.InternalError) +} + +func resourceTencentCloudKubernetesClusterMasterAttachmentReadRequestOnSuccess2(ctx context.Context, resp *tkev20180525.DescribeClusterInstancesResponseParams) *resource.RetryError { + if resp == nil || len(resp.InstanceSet) != 1 { + return resource.NonRetryableError(fmt.Errorf("query cvm instance error.")) + } + + instanceDetial := resp.InstanceSet[0] + insId := *instanceDetial.InstanceId + insState := *instanceDetial.InstanceState + + if insState == "failed" { + return resource.NonRetryableError(fmt.Errorf("cvm instance %s attach to cluster fail, reason: %s", insId, insState)) + } + + if insState != "running" { + return resource.RetryableError(fmt.Errorf("cvm instance %s in tke status is %s, retry...", insId, insState)) + } + + return nil +} + +func resourceTencentCloudKubernetesClusterMasterAttachmentDeletePostFillRequest0(ctx context.Context, req *tkev20180525.ScaleInClusterMasterRequest) error { + d := tccommon.ResourceDataFromContext(ctx) + if d == nil { + return fmt.Errorf("resource data can not be nil") + } + + var ( + instanceId string + nodeRole string + ) + + if v, ok := d.GetOk("instance_id"); ok { + instanceId = v.(string) + } + + if v, ok := d.GetOk("node_role"); ok { + nodeRole = v.(string) + } + + req.ScaleInMasters = []*tkev20180525.ScaleInMaster{ + &tkev20180525.ScaleInMaster{ + InstanceId: helper.String(instanceId), + NodeRole: helper.String(nodeRole), + }, + } + + return nil +} + +func resourceTencentCloudKubernetesClusterMasterAttachmentDeleteRequestOnError0(ctx context.Context, e error) *resource.RetryError { + if sdkErr, ok := e.(*errors.TencentCloudSDKError); ok { + if sdkErr.GetCode() == "ResourceNotFound" { + return nil + } + + if sdkErr.GetCode() == "InvalidParameter" && strings.Contains(sdkErr.GetMessage(), `is not exist`) { + return nil + } + } + + return tccommon.RetryError(e, tccommon.InternalError) +} diff --git a/tencentcloud/services/tke/resource_tc_kubernetes_cluster_master_attachment_test.go b/tencentcloud/services/tke/resource_tc_kubernetes_cluster_master_attachment_test.go new file mode 100644 index 0000000000..778f6c1e59 --- /dev/null +++ b/tencentcloud/services/tke/resource_tc_kubernetes_cluster_master_attachment_test.go @@ -0,0 +1,47 @@ +package tke_test + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + + tcacctest "github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/acctest" +) + +func TestAccTencentCloudKubernetesClusterMasterAttachmentResource_basic(t *testing.T) { + t.Parallel() + resource.Test(t, resource.TestCase{ + PreCheck: func() { + tcacctest.AccPreCheck(t) + }, + Providers: tcacctest.AccProviders, + Steps: []resource.TestStep{{ + Config: testAccKubernetesClusterMasterAttachment, + Check: resource.ComposeTestCheckFunc(resource.TestCheckResourceAttrSet("tencentcloud_kubernetes_cluster_master_attachment.kubernetes_cluster_master_attachment", "id")), + }, { + ResourceName: "tencentcloud_kubernetes_cluster_master_attachment.kubernetes_cluster_master_attachment", + ImportState: true, + ImportStateVerify: true, + }}, + }) +} + +const testAccKubernetesClusterMasterAttachment = ` + +resource "tencentcloud_kubernetes_cluster_master_attachment" "kubernetes_cluster_master_attachment" { + extra_args = { + } + master_config = { + labels = { + } + data_disks = { + } + extra_args = { + } + gpu_args = { + } + taints = { + } + } +} +` diff --git a/tencentcloud/services/tke/service_tencentcloud_tke.go b/tencentcloud/services/tke/service_tencentcloud_tke.go index 5482477979..1574bf84a3 100644 --- a/tencentcloud/services/tke/service_tencentcloud_tke.go +++ b/tencentcloud/services/tke/service_tencentcloud_tke.go @@ -3651,3 +3651,88 @@ func (me *TkeService) DescribeKubernetesLogConfigById(ctx context.Context, clust ret = response.Response return } + +func (me *TkeService) DescribeKubernetesClusterMasterAttachmentById(ctx context.Context, clusterId string) (ret *tke.Cluster, errRet error) { + logId := tccommon.GetLogId(ctx) + + request := tke.NewDescribeClustersRequest() + request.ClusterIds = []*string{helper.String(clusterId)} + + defer func() { + if errRet != nil { + log.Printf("[CRITAL]%s api[%s] fail, request body [%s], reason[%s]\n", logId, request.GetAction(), request.ToJsonString(), errRet.Error()) + } + }() + + ratelimit.Check(request.GetAction()) + + response, err := me.client.UseTkeV20180525Client().DescribeClusters(request) + if err != nil { + errRet = err + return + } + log.Printf("[DEBUG]%s api[%s] success, request body [%s], response body [%s]\n", logId, request.GetAction(), request.ToJsonString(), response.ToJsonString()) + + if len(response.Response.Clusters) < 1 { + return + } + + ret = response.Response.Clusters[0] + return +} + +func (me *TkeService) DescribeKubernetesClusterMasterAttachmentById1(ctx context.Context, instanceId string) (ret *cvm.Instance, errRet error) { + logId := tccommon.GetLogId(ctx) + + request := cvm.NewDescribeInstancesRequest() + request.InstanceIds = []*string{helper.String(instanceId)} + + defer func() { + if errRet != nil { + log.Printf("[CRITAL]%s api[%s] fail, request body [%s], reason[%s]\n", logId, request.GetAction(), request.ToJsonString(), errRet.Error()) + } + }() + + ratelimit.Check(request.GetAction()) + + response, err := me.client.UseCvmV20170312Client().DescribeInstances(request) + if err != nil { + errRet = err + return + } + log.Printf("[DEBUG]%s api[%s] success, request body [%s], response body [%s]\n", logId, request.GetAction(), request.ToJsonString(), response.ToJsonString()) + + if len(response.Response.InstanceSet) < 1 { + return + } + + ret = response.Response.InstanceSet[0] + return +} + +func (me *TkeService) DescribeKubernetesClusterMasterAttachmentById2(ctx context.Context, clusterId string, instanceId string, nodeRole string) (ret *tke.DescribeClusterInstancesResponseParams, errRet error) { + logId := tccommon.GetLogId(ctx) + + request := tke.NewDescribeClusterInstancesRequest() + request.ClusterId = helper.String(clusterId) + request.InstanceIds = []*string{helper.String(instanceId)} + request.InstanceRole = helper.String(nodeRole) + + defer func() { + if errRet != nil { + log.Printf("[CRITAL]%s api[%s] fail, request body [%s], reason[%s]\n", logId, request.GetAction(), request.ToJsonString(), errRet.Error()) + } + }() + + ratelimit.Check(request.GetAction()) + + response, err := me.client.UseTkeV20180525Client().DescribeClusterInstances(request) + if err != nil { + errRet = err + return + } + log.Printf("[DEBUG]%s api[%s] success, request body [%s], response body [%s]\n", logId, request.GetAction(), request.ToJsonString(), response.ToJsonString()) + + ret = response.Response + return +} diff --git a/website/docs/r/kubernetes_cluster_master_attachment.html.markdown b/website/docs/r/kubernetes_cluster_master_attachment.html.markdown new file mode 100644 index 0000000000..4220d6d2da --- /dev/null +++ b/website/docs/r/kubernetes_cluster_master_attachment.html.markdown @@ -0,0 +1,120 @@ +--- +subcategory: "Tencent Kubernetes Engine(TKE)" +layout: "tencentcloud" +page_title: "TencentCloud: tencentcloud_kubernetes_cluster_master_attachment" +sidebar_current: "docs-tencentcloud-resource-kubernetes_cluster_master_attachment" +description: |- + Provides a resource to create a tke kubernetes_cluster_master_attachment +--- + +# tencentcloud_kubernetes_cluster_master_attachment + +Provides a resource to create a tke kubernetes_cluster_master_attachment + +## Example Usage + +```hcl +resource "tencentcloud_kubernetes_cluster_master_attachment" "kubernetes_cluster_master_attachment" { + extra_args = { + } + master_config = { + labels = { + } + data_disks = { + } + extra_args = { + } + gpu_args = { + } + taints = { + } + } +} +``` + +## Argument Reference + +The following arguments are supported: + +* `cluster_id` - (Required, String, ForceNew) ID of the cluster. +* `instance_id` - (Required, String, ForceNew) ID of the CVM instance, this cvm will reinstall the system. +* `node_role` - (Required, String, ForceNew) Node role, values: MASTER_ETCD, WORKER. MASTER_ETCD needs to be specified only when creating an INDEPENDENT_CLUSTER independent cluster. The number of MASTER_ETCD nodes is 3-7, and it is recommended to have an odd number. The minimum configuration for MASTER_ETCD is 4C8G. +* `desired_pod_numbers` - (Optional, List: [`Int`], ForceNew) When the node belongs to the podCIDR size customization mode, the maximum number of pods running on the node can be specified. +* `enhanced_automation_service` - (Optional, Bool, ForceNew) Activate TencentCloud Automation Tools (TAT) service. If this parameter is not specified, the public image will default to enabling the Cloud Automation Assistant service, while other images will default to not enabling the Cloud Automation Assistant service. +* `enhanced_monitor_service` - (Optional, Bool, ForceNew) To specify whether to enable cloud monitor service. Default is TRUE. +* `enhanced_security_service` - (Optional, Bool, ForceNew) To specify whether to enable cloud security service. Default is TRUE. +* `extra_args` - (Optional, List, ForceNew) Custom parameters for cluster master component. +* `host_name` - (Optional, String, ForceNew) When reinstalling the system, you can specify the HostName of the instance to be modified (this parameter must be passed when the cluster is in HostName mode, and the rule name should be consistent with the HostName of the CVM instance creation interface except that uppercase characters are not supported). +* `key_ids` - (Optional, List: [`String`], ForceNew) The key pair to use for the instance, it looks like skey-16jig7tx, it should be set if `password` not set. +* `master_config` - (Optional, List, ForceNew) Advanced Node Settings. commonly used to attach existing instances. +* `password` - (Optional, String, ForceNew) Password to access, should be set if `key_ids` not set. +* `security_group_ids` - (Optional, List: [`String`], ForceNew) The security group to which the instance belongs. This parameter can be obtained by calling the sgId field in the return value of DescribeSecureGroups. If this parameter is not specified, the default security group will be bound. + +The `data_disk` object of `master_config` supports the following: + +* `auto_format_and_mount` - (Optional, Bool, ForceNew) Indicate whether to auto format and mount or not. Default is `false`. +* `disk_partition` - (Optional, String, ForceNew) The name of the device or partition to mount. NOTE: this argument doesn't support setting in node pool, or will leads to mount error. +* `disk_size` - (Optional, Int, ForceNew) Volume of disk in GB. Default is `0`. +* `disk_type` - (Optional, String, ForceNew) Types of disk. Valid value: `LOCAL_BASIC`, `LOCAL_SSD`, `CLOUD_BASIC`, `CLOUD_PREMIUM`, `CLOUD_SSD`, `CLOUD_HSSD`, `CLOUD_TSSD` and `CLOUD_BSSD`. +* `file_system` - (Optional, String, ForceNew) File system, e.g. `ext3/ext4/xfs`. +* `mount_target` - (Optional, String, ForceNew) Mount target. + +The `extra_args` object of `master_config` supports the following: + +* `kubelet` - (Optional, List, ForceNew) Kubelet custom parameter. The parameter format is ["k1=v1", "k1=v2"]. + +The `extra_args` object supports the following: + +* `etcd` - (Optional, Set, ForceNew) etcd custom parameters. Only supports independent clusters. +* `kube_api_server` - (Optional, Set, ForceNew) Kube apiserver custom parameters. The parameter format is ["k1=v1", "k1=v2"]. +* `kube_controller_manager` - (Optional, Set, ForceNew) Kube controller manager custom parameters. +* `kube_scheduler` - (Optional, Set, ForceNew) kube scheduler custom parameters. + +The `gpu_args` object of `master_config` supports the following: + +* `cuda` - (Optional, Map, ForceNew) CUDA version. Format like: `{ version: String, name: String }`. `version`: Version of GPU driver or CUDA; `name`: Name of GPU driver or CUDA. +* `cudnn` - (Optional, Map, ForceNew) cuDNN version. Format like: `{ version: String, name: String, doc_name: String, dev_name: String }`. `version`: cuDNN version; `name`: cuDNN name; `doc_name`: Doc name of cuDNN; `dev_name`: Dev name of cuDNN. +* `custom_driver` - (Optional, Map, ForceNew) Custom GPU driver. Format like: `{address: String}`. `address`: URL of custom GPU driver address. +* `driver` - (Optional, Map, ForceNew) GPU driver version. Format like: `{ version: String, name: String }`. `version`: Version of GPU driver or CUDA; `name`: Name of GPU driver or CUDA. +* `mig_enable` - (Optional, Bool, ForceNew) Whether to enable MIG. + +The `labels` object of `master_config` supports the following: + +* `name` - (Required, String, ForceNew) Name of map. +* `value` - (Required, String, ForceNew) Value of map. + +The `master_config` object supports the following: + +* `data_disk` - (Optional, List, ForceNew) Configurations of data disk. +* `desired_pod_number` - (Optional, Int, ForceNew) Indicate to set desired pod number in node. valid when the cluster is podCIDR. +* `docker_graph_path` - (Optional, String, ForceNew) Docker graph path. Default is `/var/lib/docker`. +* `extra_args` - (Optional, List, ForceNew) Custom parameter information related to the node. This is a white-list parameter. +* `gpu_args` - (Optional, List, ForceNew) GPU driver parameters. +* `labels` - (Optional, List, ForceNew) Node label list. +* `mount_target` - (Optional, String, ForceNew) Mount target. Default is not mounting. +* `taints` - (Optional, List, ForceNew) Node taint. +* `unschedulable` - (Optional, Int, ForceNew) Set whether the joined nodes participate in scheduling, with a default value of 0, indicating participation in scheduling; Non 0 means not participating in scheduling. +* `user_script` - (Optional, String, ForceNew) User script encoded in base64, which will be executed after the k8s component runs. The user needs to ensure the script's reentrant and retry logic. The script and its generated log files can be viewed in the node path /data/ccs_userscript/. If the node needs to be initialized before joining the schedule, it can be used in conjunction with the `unschedulable` parameter. After the final initialization of the userScript is completed, add the command "kubectl uncordon nodename --kubeconfig=/root/.kube/config" to add the node to the schedule. + +The `taints` object of `master_config` supports the following: + +* `effect` - (Optional, String, ForceNew) Effect of the taint. +* `key` - (Optional, String, ForceNew) Key of the taint. +* `value` - (Optional, String, ForceNew) Value of the taint. + +## Attributes Reference + +In addition to all arguments above, the following attributes are exported: + +* `id` - ID of the resource. + + + +## Import + +tke kubernetes_cluster_master_attachment can be imported using the id, e.g. + +``` +terraform import tencentcloud_kubernetes_cluster_master_attachment.kubernetes_cluster_master_attachment kubernetes_cluster_master_attachment_id +``` + diff --git a/website/tencentcloud.erb b/website/tencentcloud.erb index 2341463db9..75dae23400 100644 --- a/website/tencentcloud.erb +++ b/website/tencentcloud.erb @@ -4788,6 +4788,9 @@
  • tencentcloud_kubernetes_cluster_endpoint
  • +
  • + tencentcloud_kubernetes_cluster_master_attachment +
  • tencentcloud_kubernetes_encryption_protection
  • From c63c20bd3d9eb81103cc2653c59554feeff4081b Mon Sep 17 00:00:00 2001 From: SevenEarth <391613297@qq.com> Date: Fri, 1 Nov 2024 15:38:20 +0800 Subject: [PATCH 2/7] add --- .changelog/2926.txt | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 .changelog/2926.txt diff --git a/.changelog/2926.txt b/.changelog/2926.txt new file mode 100644 index 0000000000..161e6bb837 --- /dev/null +++ b/.changelog/2926.txt @@ -0,0 +1,3 @@ +```release-note:new-resource +tencentcloud_kubernetes_cluster_master_attachment +``` \ No newline at end of file From 4d630ecc64c355e30c547ddf9a1980932c77685a Mon Sep 17 00:00:00 2001 From: SevenEarth <391613297@qq.com> Date: Fri, 1 Nov 2024 16:21:26 +0800 Subject: [PATCH 3/7] add --- ...tc_kubernetes_cluster_master_attachment.md | 51 +++++++++++------- ...tes_cluster_master_attachment_extension.go | 2 +- ...bernetes_cluster_master_attachment_test.go | 50 +++++++++++------ ...es_cluster_master_attachment.html.markdown | 53 ++++++++++++------- 4 files changed, 100 insertions(+), 56 deletions(-) diff --git a/tencentcloud/services/tke/resource_tc_kubernetes_cluster_master_attachment.md b/tencentcloud/services/tke/resource_tc_kubernetes_cluster_master_attachment.md index f8d09cda24..03d42268f7 100644 --- a/tencentcloud/services/tke/resource_tc_kubernetes_cluster_master_attachment.md +++ b/tencentcloud/services/tke/resource_tc_kubernetes_cluster_master_attachment.md @@ -1,30 +1,43 @@ -Provides a resource to create a tke kubernetes_cluster_master_attachment +Provides a resource to create a tke kubernetes cluster master attachment Example Usage ```hcl -resource "tencentcloud_kubernetes_cluster_master_attachment" "kubernetes_cluster_master_attachment" { - extra_args = { - } - master_config = { - labels = { - } - data_disks = { +resource "tencentcloud_kubernetes_cluster_master_attachment" "example" { + cluster_id = "cls-fp5o961e" + instance_id = "ins-7d6tpbyg" + node_role = "MASTER_ETCD" + enhanced_security_service = true + enhanced_monitor_service = true + enhanced_automation_service = true + password = "Password@123" + security_group_ids = ["sg-hjs685q9"] + + master_config { + mount_target = "/var/data" + docker_graph_path = "/var/lib/containerd" + unschedulable = 0 + labels { + name = "key" + value = "value" } - extra_args = { + + data_disk { + file_system = "ext4" + auto_format_and_mount = true + mount_target = "/var/data" + disk_partition = "/dev/vdb" } - gpu_args = { + + extra_args { + kubelet = ["root-dir=/root"] } - taints = { + + taints { + key = "key" + value = "value" + effect = "NoSchedule" } } } ``` - -Import - -tke kubernetes_cluster_master_attachment can be imported using the id, e.g. - -``` -terraform import tencentcloud_kubernetes_cluster_master_attachment.kubernetes_cluster_master_attachment kubernetes_cluster_master_attachment_id -``` diff --git a/tencentcloud/services/tke/resource_tc_kubernetes_cluster_master_attachment_extension.go b/tencentcloud/services/tke/resource_tc_kubernetes_cluster_master_attachment_extension.go index 03e09149e1..4ece800779 100644 --- a/tencentcloud/services/tke/resource_tc_kubernetes_cluster_master_attachment_extension.go +++ b/tencentcloud/services/tke/resource_tc_kubernetes_cluster_master_attachment_extension.go @@ -361,7 +361,7 @@ func resourceTencentCloudKubernetesClusterMasterAttachmentDeletePostFillRequest0 } req.ScaleInMasters = []*tkev20180525.ScaleInMaster{ - &tkev20180525.ScaleInMaster{ + { InstanceId: helper.String(instanceId), NodeRole: helper.String(nodeRole), }, diff --git a/tencentcloud/services/tke/resource_tc_kubernetes_cluster_master_attachment_test.go b/tencentcloud/services/tke/resource_tc_kubernetes_cluster_master_attachment_test.go index 778f6c1e59..90f4b42404 100644 --- a/tencentcloud/services/tke/resource_tc_kubernetes_cluster_master_attachment_test.go +++ b/tencentcloud/services/tke/resource_tc_kubernetes_cluster_master_attachment_test.go @@ -8,7 +8,7 @@ import ( tcacctest "github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/acctest" ) -func TestAccTencentCloudKubernetesClusterMasterAttachmentResource_basic(t *testing.T) { +func TestAccTencentCloudNeedFixKubernetesClusterMasterAttachmentResource_basic(t *testing.T) { t.Parallel() resource.Test(t, resource.TestCase{ PreCheck: func() { @@ -17,30 +17,48 @@ func TestAccTencentCloudKubernetesClusterMasterAttachmentResource_basic(t *testi Providers: tcacctest.AccProviders, Steps: []resource.TestStep{{ Config: testAccKubernetesClusterMasterAttachment, - Check: resource.ComposeTestCheckFunc(resource.TestCheckResourceAttrSet("tencentcloud_kubernetes_cluster_master_attachment.kubernetes_cluster_master_attachment", "id")), - }, { - ResourceName: "tencentcloud_kubernetes_cluster_master_attachment.kubernetes_cluster_master_attachment", - ImportState: true, - ImportStateVerify: true, + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrSet("tencentcloud_kubernetes_cluster_master_attachment.example", "id"), + ), }}, }) } const testAccKubernetesClusterMasterAttachment = ` +resource "tencentcloud_kubernetes_cluster_master_attachment" "example" { + cluster_id = "cls-fp5o961e" + instance_id = "ins-7d6tpbyg" + node_role = "MASTER_ETCD" + enhanced_security_service = true + enhanced_monitor_service = true + enhanced_automation_service = true + password = "Password@123" + security_group_ids = ["sg-hjs685q9"] -resource "tencentcloud_kubernetes_cluster_master_attachment" "kubernetes_cluster_master_attachment" { - extra_args = { - } - master_config = { - labels = { - } - data_disks = { + master_config { + mount_target = "/var/data" + docker_graph_path = "/var/lib/containerd" + unschedulable = 0 + labels { + name = "key" + value = "value" } - extra_args = { + + data_disk { + file_system = "ext4" + auto_format_and_mount = true + mount_target = "/var/data" + disk_partition = "/dev/vdb" } - gpu_args = { + + extra_args { + kubelet = ["root-dir=/root"] } - taints = { + + taints { + key = "key" + value = "value" + effect = "NoSchedule" } } } diff --git a/website/docs/r/kubernetes_cluster_master_attachment.html.markdown b/website/docs/r/kubernetes_cluster_master_attachment.html.markdown index 4220d6d2da..22c760b74c 100644 --- a/website/docs/r/kubernetes_cluster_master_attachment.html.markdown +++ b/website/docs/r/kubernetes_cluster_master_attachment.html.markdown @@ -4,29 +4,50 @@ layout: "tencentcloud" page_title: "TencentCloud: tencentcloud_kubernetes_cluster_master_attachment" sidebar_current: "docs-tencentcloud-resource-kubernetes_cluster_master_attachment" description: |- - Provides a resource to create a tke kubernetes_cluster_master_attachment + Provides a resource to create a tke kubernetes cluster master attachment --- # tencentcloud_kubernetes_cluster_master_attachment -Provides a resource to create a tke kubernetes_cluster_master_attachment +Provides a resource to create a tke kubernetes cluster master attachment ## Example Usage ```hcl -resource "tencentcloud_kubernetes_cluster_master_attachment" "kubernetes_cluster_master_attachment" { - extra_args = { - } - master_config = { - labels = { - } - data_disks = { +resource "tencentcloud_kubernetes_cluster_master_attachment" "example" { + cluster_id = "cls-fp5o961e" + instance_id = "ins-7d6tpbyg" + node_role = "MASTER_ETCD" + enhanced_security_service = true + enhanced_monitor_service = true + enhanced_automation_service = true + password = "Password@123" + security_group_ids = ["sg-hjs685q9"] + + master_config { + mount_target = "/var/data" + docker_graph_path = "/var/lib/containerd" + unschedulable = 0 + labels { + name = "key" + value = "value" } - extra_args = { + + data_disk { + file_system = "ext4" + auto_format_and_mount = true + mount_target = "/var/data" + disk_partition = "/dev/vdb" } - gpu_args = { + + extra_args { + kubelet = ["root-dir=/root"] } - taints = { + + taints { + key = "key" + value = "value" + effect = "NoSchedule" } } } @@ -110,11 +131,3 @@ In addition to all arguments above, the following attributes are exported: -## Import - -tke kubernetes_cluster_master_attachment can be imported using the id, e.g. - -``` -terraform import tencentcloud_kubernetes_cluster_master_attachment.kubernetes_cluster_master_attachment kubernetes_cluster_master_attachment_id -``` - From 62c7514b7472c4a18183eebad0323a78c3d4c7ed Mon Sep 17 00:00:00 2001 From: SevenEarth <391613297@qq.com> Date: Fri, 1 Nov 2024 19:21:11 +0800 Subject: [PATCH 4/7] add --- ..._tc_kubernetes_cluster_master_attachment.go | 6 +----- ...etes_cluster_master_attachment_extension.go | 18 +++++++++--------- 2 files changed, 10 insertions(+), 14 deletions(-) diff --git a/tencentcloud/services/tke/resource_tc_kubernetes_cluster_master_attachment.go b/tencentcloud/services/tke/resource_tc_kubernetes_cluster_master_attachment.go index 8e9a388c8f..3e98d24ae2 100644 --- a/tencentcloud/services/tke/resource_tc_kubernetes_cluster_master_attachment.go +++ b/tencentcloud/services/tke/resource_tc_kubernetes_cluster_master_attachment.go @@ -6,7 +6,6 @@ import ( "fmt" "log" "strings" - "time" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" @@ -21,9 +20,6 @@ func ResourceTencentCloudKubernetesClusterMasterAttachment() *schema.Resource { Create: resourceTencentCloudKubernetesClusterMasterAttachmentCreate, Read: resourceTencentCloudKubernetesClusterMasterAttachmentRead, Delete: resourceTencentCloudKubernetesClusterMasterAttachmentDelete, - Timeouts: &schema.ResourceTimeout{ - Read: schema.DefaultTimeout(900000 * time.Millisecond), - }, Schema: map[string]*schema.Schema{ "cluster_id": { Type: schema.TypeString, @@ -475,7 +471,7 @@ func resourceTencentCloudKubernetesClusterMasterAttachmentRead(d *schema.Resourc } var respData2 *tkev20180525.DescribeClusterInstancesResponseParams - reqErr2 := resource.Retry(900*time.Second, func() *resource.RetryError { + reqErr2 := resource.Retry(tccommon.ReadRetryTimeout, func() *resource.RetryError { result, e := service.DescribeKubernetesClusterMasterAttachmentById2(ctx, clusterId, instanceId, nodeRole) if e != nil { return resourceTencentCloudKubernetesClusterMasterAttachmentReadRequestOnError2(ctx, result, e) diff --git a/tencentcloud/services/tke/resource_tc_kubernetes_cluster_master_attachment_extension.go b/tencentcloud/services/tke/resource_tc_kubernetes_cluster_master_attachment_extension.go index 4ece800779..cf36f1e9d8 100644 --- a/tencentcloud/services/tke/resource_tc_kubernetes_cluster_master_attachment_extension.go +++ b/tencentcloud/services/tke/resource_tc_kubernetes_cluster_master_attachment_extension.go @@ -282,7 +282,7 @@ func resourceTencentCloudKubernetesClusterMasterAttachmentCreatePostHandleRespon } // wait for cvm status - if err := resource.Retry(7*tccommon.ReadRetryTimeout, func() *resource.RetryError { + if err := resource.Retry(10*tccommon.ReadRetryTimeout, func() *resource.RetryError { instance, errRet := cvmService.DescribeInstanceById(ctx, instanceId) if errRet != nil { return tccommon.RetryError(errRet, tccommon.InternalError) @@ -298,19 +298,18 @@ func resourceTencentCloudKubernetesClusterMasterAttachmentCreatePostHandleRespon } // wait for tke init - return resource.Retry(7*tccommon.ReadRetryTimeout, func() *resource.RetryError { + return resource.Retry(10*tccommon.ReadRetryTimeout, func() *resource.RetryError { resp, err := tkeService.DescribeKubernetesClusterMasterAttachmentById2(ctx, clusterId, instanceId, nodeRole) if err != nil { return tccommon.RetryError(err, tccommon.InternalError) } - has := false - if len(resp.InstanceSet) == 1 { - has = true + if len(resp.InstanceSet) != 1 { + return resource.NonRetryableError(fmt.Errorf("tke master node cvm instance %s not exist in tke instance list", instanceId)) } - if !has { - return resource.NonRetryableError(fmt.Errorf("cvm instance %s not exist in tke instance list", instanceId)) + if *resp.InstanceSet[0].InstanceState != "running" { + return resource.RetryableError(fmt.Errorf("tke master node cvm instance %s in tke status is %s, retry...", instanceId, resp.InstanceSet[0].InstanceState)) } return nil @@ -362,8 +361,9 @@ func resourceTencentCloudKubernetesClusterMasterAttachmentDeletePostFillRequest0 req.ScaleInMasters = []*tkev20180525.ScaleInMaster{ { - InstanceId: helper.String(instanceId), - NodeRole: helper.String(nodeRole), + InstanceId: helper.String(instanceId), + NodeRole: helper.String(nodeRole), + InstanceDeleteMode: helper.String("retain"), }, } From a4374c2a154a00db0ea05541db66647634c4efca Mon Sep 17 00:00:00 2001 From: SevenEarth <391613297@qq.com> Date: Fri, 1 Nov 2024 19:23:21 +0800 Subject: [PATCH 5/7] add --- tencentcloud/services/audit/resource_tc_audit_track.md | 2 +- website/docs/r/audit_track.html.markdown | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/tencentcloud/services/audit/resource_tc_audit_track.md b/tencentcloud/services/audit/resource_tc_audit_track.md index bd8bd3e241..6609c26930 100644 --- a/tencentcloud/services/audit/resource_tc_audit_track.md +++ b/tencentcloud/services/audit/resource_tc_audit_track.md @@ -39,7 +39,7 @@ resource "tencentcloud_audit_track" "example" { storage_name = "db90b92c-91d2-46b0-94ac-debbbb21dc4e" storage_prefix = "cloudaudit" storage_region = "ap-guangzhou" - storage_type = "cls" + storage_type = "cos" storage_account_id = "100037717137" storage_app_id = "1309116520" } diff --git a/website/docs/r/audit_track.html.markdown b/website/docs/r/audit_track.html.markdown index 3c322ad229..eb5991f8c4 100644 --- a/website/docs/r/audit_track.html.markdown +++ b/website/docs/r/audit_track.html.markdown @@ -50,7 +50,7 @@ resource "tencentcloud_audit_track" "example" { storage_name = "db90b92c-91d2-46b0-94ac-debbbb21dc4e" storage_prefix = "cloudaudit" storage_region = "ap-guangzhou" - storage_type = "cls" + storage_type = "cos" storage_account_id = "100037717137" storage_app_id = "1309116520" } From e5bf478c9b5453f47429574a3d7d3d3cbd5df2d8 Mon Sep 17 00:00:00 2001 From: SevenEarth <391613297@qq.com> Date: Mon, 4 Nov 2024 10:50:48 +0800 Subject: [PATCH 6/7] add --- ...tes_cluster_master_attachment_extension.go | 20 +++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/tencentcloud/services/tke/resource_tc_kubernetes_cluster_master_attachment_extension.go b/tencentcloud/services/tke/resource_tc_kubernetes_cluster_master_attachment_extension.go index cf36f1e9d8..7ec7fe5684 100644 --- a/tencentcloud/services/tke/resource_tc_kubernetes_cluster_master_attachment_extension.go +++ b/tencentcloud/services/tke/resource_tc_kubernetes_cluster_master_attachment_extension.go @@ -33,25 +33,23 @@ func resourceTencentCloudKubernetesClusterMasterAttachmentCreatePostFillRequest0 } if v, ok := d.GetOkExists("enhanced_security_service"); ok { - enhancedService.SecurityService = &tkev20180525.RunSecurityServiceEnabled{ - Enabled: helper.Bool(v.(bool)), - } + enhancedService.SecurityService = &tkev20180525.RunSecurityServiceEnabled{Enabled: helper.Bool(v.(bool))} + existedInstancesPara.EnhancedService = &enhancedService } if v, ok := d.GetOkExists("enhanced_monitor_service"); ok { - enhancedService.MonitorService = &tkev20180525.RunMonitorServiceEnabled{ - Enabled: helper.Bool(v.(bool)), - } + enhancedService.MonitorService = &tkev20180525.RunMonitorServiceEnabled{Enabled: helper.Bool(v.(bool))} + existedInstancesPara.EnhancedService = &enhancedService } if v, ok := d.GetOkExists("enhanced_automation_service"); ok { - enhancedService.AutomationService = &tkev20180525.RunAutomationServiceEnabled{ - Enabled: helper.Bool(v.(bool)), - } + enhancedService.AutomationService = &tkev20180525.RunAutomationServiceEnabled{Enabled: helper.Bool(v.(bool))} + existedInstancesPara.EnhancedService = &enhancedService } if v, ok := d.GetOk("password"); ok { loginSettings.Password = helper.String(v.(string)) + existedInstancesPara.LoginSettings = &loginSettings } if v, ok := d.GetOk("key_ids"); ok && len(v.([]interface{})) > 0 { @@ -61,6 +59,8 @@ func resourceTencentCloudKubernetesClusterMasterAttachmentCreatePostFillRequest0 keyId := keyIds[i].(string) loginSettings.KeyIds = append(loginSettings.KeyIds, &keyId) } + + existedInstancesPara.LoginSettings = &loginSettings } if v, ok := d.GetOk("security_group_ids"); ok && len(v.([]interface{})) > 0 { @@ -309,7 +309,7 @@ func resourceTencentCloudKubernetesClusterMasterAttachmentCreatePostHandleRespon } if *resp.InstanceSet[0].InstanceState != "running" { - return resource.RetryableError(fmt.Errorf("tke master node cvm instance %s in tke status is %s, retry...", instanceId, resp.InstanceSet[0].InstanceState)) + return resource.RetryableError(fmt.Errorf("tke master node cvm instance %s in tke status is %s, retry...", instanceId, *resp.InstanceSet[0].InstanceState)) } return nil From b9a939e2fc761014b88d9ca6dbfd4e593961c513 Mon Sep 17 00:00:00 2001 From: SevenEarth <391613297@qq.com> Date: Mon, 4 Nov 2024 11:32:34 +0800 Subject: [PATCH 7/7] add --- ...esource_tc_kubernetes_cluster_master_attachment_extension.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tencentcloud/services/tke/resource_tc_kubernetes_cluster_master_attachment_extension.go b/tencentcloud/services/tke/resource_tc_kubernetes_cluster_master_attachment_extension.go index 7ec7fe5684..3507107643 100644 --- a/tencentcloud/services/tke/resource_tc_kubernetes_cluster_master_attachment_extension.go +++ b/tencentcloud/services/tke/resource_tc_kubernetes_cluster_master_attachment_extension.go @@ -165,7 +165,7 @@ func resourceTencentCloudKubernetesClusterMasterAttachmentCreatePostFillRequest0 } } - if v, ok := d.GetOk("desired_pod_number"); ok { + if v, ok := d.GetOkExists("desired_pod_number"); ok { instanceAdvancedSettings.DesiredPodNumber = helper.IntInt64(v.(int)) }