diff --git a/tencentcloud/connectivity/client.go b/tencentcloud/connectivity/client.go index 962651a1b7..d3b2a10562 100644 --- a/tencentcloud/connectivity/client.go +++ b/tencentcloud/connectivity/client.go @@ -9,24 +9,6 @@ import ( "strconv" "time" - cdc "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/cdc/v20201214" - - csip "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/csip/v20221121" - cos "github.com/tencentyun/cos-go-sdk-v5" - - dasb "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/dasb/v20191018" - - oceanus "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/oceanus/v20190422" - - cfw "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/cfw/v20190904" - - waf "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/waf/v20180125" - - dlc "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/dlc/v20210125" - wedata "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/wedata/v20210820" - - ciam "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/ciam/v20220331" - "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/credentials" "github.com/aws/aws-sdk-go/aws/endpoints" @@ -44,24 +26,30 @@ import ( cat "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/cat/v20180409" cbs "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/cbs/v20170312" cdb "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/cdb/v20170320" + cdc "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/cdc/v20201214" cdn "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/cdn/v20180606" cdwch "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/cdwch/v20200915" cdwpg "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/cdwpg/v20201230" cfs "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/cfs/v20190719" + cfw "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/cfw/v20190904" chdfs "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/chdfs/v20201112" + ciam "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/ciam/v20220331" ckafka "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/ckafka/v20190819" clb "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/clb/v20180317" audit "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/cloudaudit/v20190319" cls "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/cls/v20201016" "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common" "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common/profile" + csip "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/csip/v20221121" cvm "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/cvm/v20170312" cwp "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/cwp/v20180228" cynosdb "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/cynosdb/v20190107" + dasb "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/dasb/v20191018" dayu "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/dayu/v20180709" dbbrain "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/dbbrain/v20210527" dc "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/dc/v20180410" dcdb "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/dcdb/v20180411" + dlc "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/dlc/v20210125" dnspod "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/dnspod/v20210323" domain "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/domain/v20180808" dts "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/dts/v20211206" @@ -76,6 +64,7 @@ import ( mongodb "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/mongodb/v20190725" monitor "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/monitor/v20180724" mps "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/mps/v20190612" + oceanus "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/oceanus/v20190422" organization "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/organization/v20210331" postgre "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/postgres/v20170312" privatedns "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/privatedns/v20201028" @@ -101,12 +90,16 @@ import ( teo "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/teo/v20220901" tke "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/tke/v20180525" tke2 "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/tke/v20220501" + "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/tke2/v20220501" trocket "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/trocket/v20230308" tse "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/tse/v20201207" tsf "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/tsf/v20180326" vod "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/vod/v20180717" vpc "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/vpc/v20170312" + waf "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/waf/v20180125" + wedata "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/wedata/v20210820" ssl "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/wss/v20180426" + cos "github.com/tencentyun/cos-go-sdk-v5" ) //internal version: replace import begin, please do not modify this annotation and refrain from inserting any code between the beginning and end lines of the annotation. @@ -212,7 +205,10 @@ type TencentCloudClient struct { tke2Conn *tke2.Client cdcConn *cdc.Client //omit nil client - omitNilConn *common.Client + omitNilConn *common.Client + tkev20180525Conn *tke.Client + tke2v20220501Conn *v20220501.Client + tkev20220501Conn *tke2.Client } // NewClientProfile returns a new ClientProfile @@ -1633,3 +1629,42 @@ func (me *TencentCloudClient) UseCdcClient() *cdc.Client { return me.cdcConn } + +// UseTkeV20180525Client return TKE client for service +func (me *TencentCloudClient) UseTkeV20180525Client() *tke.Client { + if me.tkev20180525Conn != nil { + return me.tkev20180525Conn + } + cpf := me.NewClientProfile(300) + cpf.Language = "zh-CN" + me.tkev20180525Conn, _ = tke.NewClient(me.Credential, me.Region, cpf) + me.tkev20180525Conn.WithHttpTransport(&LogRoundTripper{}) + + return me.tkev20180525Conn +} + +// UseTke2V20220501Client return TKE2 client for service +func (me *TencentCloudClient) UseTke2V20220501Client() *v20220501.Client { + if me.tke2v20220501Conn != nil { + return me.tke2v20220501Conn + } + cpf := me.NewClientProfile(300) + cpf.Language = "zh-CN" + me.tke2v20220501Conn, _ = v20220501.NewClient(me.Credential, me.Region, cpf) + me.tke2v20220501Conn.WithHttpTransport(&LogRoundTripper{}) + + return me.tke2v20220501Conn +} + +// UseTkeV20220501Client return TKE client for service +func (me *TencentCloudClient) UseTkeV20220501Client() *tke2.Client { + if me.tkev20220501Conn != nil { + return me.tkev20220501Conn + } + cpf := me.NewClientProfile(300) + cpf.Language = "zh-CN" + me.tkev20220501Conn, _ = tke2.NewClient(me.Credential, me.Region, cpf) + me.tkev20220501Conn.WithHttpTransport(&LogRoundTripper{}) + + return me.tkev20220501Conn +} diff --git a/tencentcloud/provider.go b/tencentcloud/provider.go index ca295742c4..88e15f446f 100644 --- a/tencentcloud/provider.go +++ b/tencentcloud/provider.go @@ -2095,7 +2095,7 @@ func Provider() *schema.Provider { "tencentcloud_kubernetes_native_node_pool": tke.ResourceTencentCloudKubernetesNativeNodePool(), "tencentcloud_cdc_site": cdc.ResourceTencentCloudCdcSite(), "tencentcloud_cdc_dedicated_cluster": cdc.ResourceTencentCloudCdcDedicatedCluster(), - }, + "tencentcloud_kubernetes_native_node_pools": tke.ResourceTencentCloudKubernetesNativeNodePools()}, ConfigureFunc: providerConfigure, } diff --git a/tencentcloud/services/tke/data_source_tc_kubernetes_available_cluster_versions.go b/tencentcloud/services/tke/data_source_tc_kubernetes_available_cluster_versions.go index 77c676c451..7072dac618 100644 --- a/tencentcloud/services/tke/data_source_tc_kubernetes_available_cluster_versions.go +++ b/tencentcloud/services/tke/data_source_tc_kubernetes_available_cluster_versions.go @@ -1,14 +1,14 @@ +// Code generated by iacg; DO NOT EDIT. package tke import ( "context" - tccommon "github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/common" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - tke "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/tke/v20180525" + tkev20180525 "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/tke/v20180525" + tccommon "github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/common" "github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/internal/helper" ) @@ -17,32 +17,32 @@ func DataSourceTencentCloudKubernetesAvailableClusterVersions() *schema.Resource Read: dataSourceTencentCloudKubernetesAvailableClusterVersionsRead, Schema: map[string]*schema.Schema{ "cluster_id": { - Optional: true, Type: schema.TypeString, + Optional: true, Description: "Cluster Id.", }, "cluster_ids": { - Optional: true, - Type: schema.TypeSet, + Type: schema.TypeSet, + Optional: true, + Description: "list of cluster IDs.", Elem: &schema.Schema{ Type: schema.TypeString, }, - Description: "list of cluster IDs.", }, "versions": { - Computed: true, - Type: schema.TypeSet, + Type: schema.TypeSet, + Computed: true, + Description: "Upgradable cluster version number. Note: This field may return null, indicating that no valid value can be obtained.", Elem: &schema.Schema{ Type: schema.TypeString, }, - Description: "Upgradable cluster version number. Note: This field may return null, indicating that no valid value can be obtained.", }, "clusters": { - Computed: true, Type: schema.TypeList, + Computed: true, Description: "cluster information. Note: This field may return null, indicating that no valid value can be obtained.", Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ @@ -52,12 +52,12 @@ func DataSourceTencentCloudKubernetesAvailableClusterVersions() *schema.Resource Description: "Cluster ID.", }, "versions": { - Type: schema.TypeSet, + Type: schema.TypeSet, + Computed: true, + Description: "list of cluster major version numbers, for example 1.18.4.", Elem: &schema.Schema{ Type: schema.TypeString, }, - Computed: true, - Description: "list of cluster major version numbers, for example 1.18.4.", }, }, }, @@ -76,7 +76,7 @@ func dataSourceTencentCloudKubernetesAvailableClusterVersionsRead(d *schema.Reso defer tccommon.LogElapsed("data_source.tencentcloud_kubernetes_available_cluster_versions.read")() defer tccommon.InconsistentCheck(d, meta)() - logId := tccommon.GetLogId(tccommon.ContextNil) + logId := tccommon.GetLogId(nil) ctx := tccommon.NewResourceLifeCycleHandleFuncContext(context.Background(), logId, d, meta) service := TkeService{client: meta.(tccommon.ProviderMeta).GetAPIV3Conn()} @@ -88,7 +88,7 @@ func dataSourceTencentCloudKubernetesAvailableClusterVersionsRead(d *schema.Reso if v, ok := d.GetOk("cluster_ids"); ok { clusterIdsList := []*string{} - clusterIdsSet := v.(*schema.Set).List() + clusterIdsSet := v.([]interface{}) for i := range clusterIdsSet { clusterIds := clusterIdsSet[i].(string) clusterIdsList = append(clusterIdsList, helper.String(clusterIds)) @@ -96,7 +96,7 @@ func dataSourceTencentCloudKubernetesAvailableClusterVersionsRead(d *schema.Reso paramMap["ClusterIds"] = clusterIdsList } - var respData *tke.DescribeAvailableClusterVersionResponseParams + var respData *tkev20180525.DescribeAvailableClusterVersionResponseParams err := resource.Retry(tccommon.ReadRetryTimeout, func() *resource.RetryError { result, e := service.DescribeKubernetesAvailableClusterVersionsByFilter(ctx, paramMap) if e != nil { diff --git a/tencentcloud/services/tke/data_source_tc_kubernetes_available_cluster_versions_extension.go b/tencentcloud/services/tke/data_source_tc_kubernetes_available_cluster_versions_extension.go index e910629e70..1b851ef482 100644 --- a/tencentcloud/services/tke/data_source_tc_kubernetes_available_cluster_versions_extension.go +++ b/tencentcloud/services/tke/data_source_tc_kubernetes_available_cluster_versions_extension.go @@ -5,6 +5,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" tke "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/tke/v20180525" + tccommon "github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/common" "github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/internal/helper" ) diff --git a/tencentcloud/services/tke/data_source_tc_kubernetes_charts.go b/tencentcloud/services/tke/data_source_tc_kubernetes_charts.go index d32d919126..ff1db5c2e7 100644 --- a/tencentcloud/services/tke/data_source_tc_kubernetes_charts.go +++ b/tencentcloud/services/tke/data_source_tc_kubernetes_charts.go @@ -8,7 +8,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - tke "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/tke/v20180525" + tkev20180525 "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/tke/v20180525" tccommon "github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/common" "github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/internal/helper" @@ -106,7 +106,7 @@ func dataSourceTencentCloudKubernetesChartsRead(d *schema.ResourceData, meta int paramMap["ClusterType"] = helper.String(v.(string)) } - var respData []*tke.AppChart + var respData []*tkev20180525.AppChart err := resource.Retry(tccommon.ReadRetryTimeout, func() *resource.RetryError { result, e := service.DescribeKubernetesChartsByFilter(ctx, paramMap) if e != nil { diff --git a/tencentcloud/services/tke/data_source_tc_kubernetes_cluster_authentication_options.go b/tencentcloud/services/tke/data_source_tc_kubernetes_cluster_authentication_options.go index ba815f897a..b68834c2ea 100644 --- a/tencentcloud/services/tke/data_source_tc_kubernetes_cluster_authentication_options.go +++ b/tencentcloud/services/tke/data_source_tc_kubernetes_cluster_authentication_options.go @@ -6,7 +6,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - tke "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/tke/v20180525" + tkev20180525 "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/tke/v20180525" tccommon "github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/common" "github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/internal/helper" @@ -115,7 +115,7 @@ func dataSourceTencentCloudKubernetesClusterAuthenticationOptionsRead(d *schema. paramMap["ClusterId"] = helper.String(v.(string)) } - var respData *tke.DescribeClusterAuthenticationOptionsResponseParams + var respData *tkev20180525.DescribeClusterAuthenticationOptionsResponseParams err := resource.Retry(tccommon.ReadRetryTimeout, func() *resource.RetryError { result, e := service.DescribeKubernetesClusterAuthenticationOptionsByFilter(ctx, paramMap) if e != nil { diff --git a/tencentcloud/services/tke/data_source_tc_kubernetes_cluster_common_names.go b/tencentcloud/services/tke/data_source_tc_kubernetes_cluster_common_names.go index c394cd3fb8..4f2f81b5e2 100644 --- a/tencentcloud/services/tke/data_source_tc_kubernetes_cluster_common_names.go +++ b/tencentcloud/services/tke/data_source_tc_kubernetes_cluster_common_names.go @@ -7,7 +7,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - tke "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/tke/v20180525" + tkev20180525 "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/tke/v20180525" tccommon "github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/common" "github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/internal/helper" @@ -93,7 +93,7 @@ func dataSourceTencentCloudKubernetesClusterCommonNamesRead(d *schema.ResourceDa paramMap["ClusterId"] = helper.String(v.(string)) } - var respData []*tke.CommonName + var respData []*tkev20180525.CommonName err := resource.Retry(tccommon.ReadRetryTimeout, func() *resource.RetryError { result, e := service.DescribeKubernetesClusterCommonNamesByFilter(ctx, paramMap) if e != nil { @@ -106,17 +106,17 @@ func dataSourceTencentCloudKubernetesClusterCommonNamesRead(d *schema.ResourceDa return err } - cns := make([]string, 0, len(respData)) + var cns []string commonNamesList := make([]map[string]interface{}, 0, len(respData)) if respData != nil { for _, commonNames := range respData { commonNamesMap := map[string]interface{}{} + var cN string if commonNames.SubaccountUin != nil { commonNamesMap["subaccount_uin"] = commonNames.SubaccountUin } - var cN string if commonNames.CN != nil { commonNamesMap["common_names"] = commonNames.CN cN = *commonNames.CN diff --git a/tencentcloud/services/tke/data_source_tc_kubernetes_cluster_instances.go b/tencentcloud/services/tke/data_source_tc_kubernetes_cluster_instances.go index d8fdc317f2..879eafba4d 100644 --- a/tencentcloud/services/tke/data_source_tc_kubernetes_cluster_instances.go +++ b/tencentcloud/services/tke/data_source_tc_kubernetes_cluster_instances.go @@ -6,7 +6,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - tke "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/tke/v20180525" + tkev20180525 "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/tke/v20180525" tccommon "github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/common" "github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/internal/helper" @@ -373,7 +373,7 @@ func dataSourceTencentCloudKubernetesClusterInstancesRead(d *schema.ResourceData if v, ok := d.GetOk("instance_ids"); ok { instanceIdsList := []*string{} - instanceIdsSet := v.(*schema.Set).List() + instanceIdsSet := v.([]interface{}) for i := range instanceIdsSet { instanceIds := instanceIdsSet[i].(string) instanceIdsList = append(instanceIdsList, helper.String(instanceIds)) @@ -387,15 +387,15 @@ func dataSourceTencentCloudKubernetesClusterInstancesRead(d *schema.ResourceData if v, ok := d.GetOk("filters"); ok { filtersSet := v.([]interface{}) - tmpSet := make([]*tke.Filter, 0, len(filtersSet)) + tmpSet := make([]*tkev20180525.Filter, 0, len(filtersSet)) for _, item := range filtersSet { filtersMap := item.(map[string]interface{}) - filter := tke.Filter{} + filter := tkev20180525.Filter{} if v, ok := filtersMap["name"]; ok { filter.Name = helper.String(v.(string)) } if v, ok := filtersMap["values"]; ok { - valuesSet := v.(*schema.Set).List() + valuesSet := v.([]interface{}) for i := range valuesSet { values := valuesSet[i].(string) filter.Values = append(filter.Values, helper.String(values)) @@ -406,7 +406,7 @@ func dataSourceTencentCloudKubernetesClusterInstancesRead(d *schema.ResourceData paramMap["Filters"] = tmpSet } - var respData []*tke.Instance + var respData []*tkev20180525.Instance err := resource.Retry(tccommon.ReadRetryTimeout, func() *resource.RetryError { result, e := service.DescribeKubernetesClusterInstancesByFilter(ctx, paramMap) if e != nil { @@ -419,7 +419,7 @@ func dataSourceTencentCloudKubernetesClusterInstancesRead(d *schema.ResourceData return err } - ids := make([]string, 0, len(respData)) + var ids []string instanceSetList := make([]map[string]interface{}, 0, len(respData)) if respData != nil { for _, instanceSet := range respData { diff --git a/tencentcloud/services/tke/data_source_tc_kubernetes_cluster_levels.go b/tencentcloud/services/tke/data_source_tc_kubernetes_cluster_levels.go index e196368bee..dae41c5d9e 100644 --- a/tencentcloud/services/tke/data_source_tc_kubernetes_cluster_levels.go +++ b/tencentcloud/services/tke/data_source_tc_kubernetes_cluster_levels.go @@ -7,7 +7,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - tke "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/tke/v20180525" + tkev20180525 "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/tke/v20180525" tccommon "github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/common" "github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/internal/helper" @@ -102,7 +102,7 @@ func dataSourceTencentCloudKubernetesClusterLevelsRead(d *schema.ResourceData, m paramMap["ClusterID"] = helper.String(v.(string)) } - var respData []*tke.ClusterLevelAttribute + var respData []*tkev20180525.ClusterLevelAttribute err := resource.Retry(tccommon.ReadRetryTimeout, func() *resource.RetryError { result, e := service.DescribeKubernetesClusterLevelsByFilter(ctx, paramMap) if e != nil { @@ -115,7 +115,7 @@ func dataSourceTencentCloudKubernetesClusterLevelsRead(d *schema.ResourceData, m return err } - levels := make([]string, 0, len(respData)) + var levels []string itemsList := make([]map[string]interface{}, 0, len(respData)) if respData != nil { for _, items := range respData { diff --git a/tencentcloud/services/tke/data_source_tc_kubernetes_cluster_native_node_pools.go b/tencentcloud/services/tke/data_source_tc_kubernetes_cluster_native_node_pools.go index 22c98f5b63..1241c6ba43 100644 --- a/tencentcloud/services/tke/data_source_tc_kubernetes_cluster_native_node_pools.go +++ b/tencentcloud/services/tke/data_source_tc_kubernetes_cluster_native_node_pools.go @@ -6,7 +6,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - tke2 "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/tke/v20220501" + tkev20220501 "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/tke/v20220501" tccommon "github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/common" "github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/internal/helper" @@ -213,79 +213,6 @@ func DataSourceTencentCloudKubernetesClusterNativeNodePools() *schema.Resource { Type: schema.TypeString, }, }, - //"upgrade_settings": { - // Type: schema.TypeList, - // Computed: true, - // Description: "Automatically upgrade configuration.", - // Elem: &schema.Resource{ - // Schema: map[string]*schema.Schema{ - // "auto_upgrade": { - // Type: schema.TypeBool, - // Computed: true, - // Description: "Whether to enable automatic upgrade.", - // }, - // "upgrade_options": { - // Type: schema.TypeList, - // Computed: true, - // Description: "Operation and maintenance window.", - // Elem: &schema.Resource{ - // Schema: map[string]*schema.Schema{ - // "auto_upgrade_start_time": { - // Type: schema.TypeString, - // Computed: true, - // Description: "Automatic upgrade start time.", - // }, - // "duration": { - // Type: schema.TypeString, - // Computed: true, - // Description: "Automatic upgrade duration.", - // }, - // "weekly_period": { - // Type: schema.TypeList, - // Computed: true, - // Description: "Operation and maintenance date.", - // Elem: &schema.Schema{ - // Type: schema.TypeString, - // }, - // }, - // }, - // }, - // }, - // "components": { - // Type: schema.TypeList, - // Computed: true, - // Description: "Upgrade items.", - // Elem: &schema.Schema{ - // Type: schema.TypeString, - // }, - // }, - // "max_unavailable": { - // Type: schema.TypeList, - // Computed: true, - // Description: "When upgrading, the maximum number of nodes that cannot be upgraded.", - // Elem: &schema.Resource{ - // Schema: map[string]*schema.Schema{ - // "type": { - // Type: schema.TypeInt, - // Computed: true, - // Description: "Numeric type, 0 is int, 1 is string.", - // }, - // "int_val": { - // Type: schema.TypeInt, - // Computed: true, - // Description: "Integer.", - // }, - // "str_val": { - // Type: schema.TypeString, - // Computed: true, - // Description: "String.", - // }, - // }, - // }, - // }, - // }, - // }, - //}, "auto_repair": { Type: schema.TypeBool, Computed: true, @@ -324,28 +251,13 @@ func DataSourceTencentCloudKubernetesClusterNativeNodePools() *schema.Resource { "disk_type": { Type: schema.TypeString, Computed: true, - Description: "Cloud disk type.", + Description: "Cloud disk type. Valid values: `CLOUD_PREMIUM`: Premium Cloud Storage, `CLOUD_SSD`: cloud SSD disk, `CLOUD_BSSD`: Basic SSD, `CLOUD_HSSD`: Enhanced SSD.", }, "disk_size": { Type: schema.TypeInt, Computed: true, Description: "Cloud disk size (G).", }, - //"auto_format_and_mount": { - // Type: schema.TypeBool, - // Computed: true, - // Description: "Whether to automatically format the disk and mount it.", - //}, - //"file_system": { - // Type: schema.TypeString, - // Computed: true, - // Description: "File system.", - //}, - //"mount_target": { - // Type: schema.TypeString, - // Computed: true, - // Description: "Mount directory.", - //}, }, }, }, @@ -584,15 +496,15 @@ func dataSourceTencentCloudKubernetesClusterNativeNodePoolsRead(d *schema.Resour if v, ok := d.GetOk("filters"); ok { filtersSet := v.([]interface{}) - tmpSet := make([]*tke2.Filter, 0, len(filtersSet)) + tmpSet := make([]*tkev20220501.Filter, 0, len(filtersSet)) for _, item := range filtersSet { filtersMap := item.(map[string]interface{}) - filter := tke2.Filter{} + filter := tkev20220501.Filter{} if v, ok := filtersMap["name"]; ok { filter.Name = helper.String(v.(string)) } if v, ok := filtersMap["values"]; ok { - valuesSet := v.(*schema.Set).List() + valuesSet := v.([]interface{}) for i := range valuesSet { values := valuesSet[i].(string) filter.Values = append(filter.Values, helper.String(values)) @@ -603,7 +515,7 @@ func dataSourceTencentCloudKubernetesClusterNativeNodePoolsRead(d *schema.Resour paramMap["Filters"] = tmpSet } - var respData []*tke2.NodePool + var respData []*tkev20220501.NodePool err := resource.Retry(tccommon.ReadRetryTimeout, func() *resource.RetryError { result, e := service.DescribeKubernetesClusterNativeNodePoolsByFilter(ctx, paramMap) if e != nil { @@ -616,22 +528,13 @@ func dataSourceTencentCloudKubernetesClusterNativeNodePoolsRead(d *schema.Resour return err } - ids := make([]string, 0, len(respData)) + var ids []string nodePoolsList := make([]map[string]interface{}, 0, len(respData)) if respData != nil { for _, nodePools := range respData { nodePoolsMap := map[string]interface{}{} - if nodePools.ClusterId != nil { - nodePoolsMap["cluster_id"] = nodePools.ClusterId - } - var nodePoolId string - if nodePools.NodePoolId != nil { - nodePoolsMap["node_pool_id"] = nodePools.NodePoolId - nodePoolId = *nodePools.NodePoolId - } - tagsList := make([]map[string]interface{}, 0, len(nodePools.Tags)) if nodePools.Tags != nil { for _, tags := range nodePools.Tags { @@ -757,56 +660,6 @@ func dataSourceTencentCloudKubernetesClusterNativeNodePoolsRead(d *schema.Resour nativeMap["security_group_ids"] = nodePools.Native.SecurityGroupIds } - //upgradeSettingsMap := map[string]interface{}{} - // - //if nodePools.Native.UpgradeSettings != nil { - // if nodePools.Native.UpgradeSettings.AutoUpgrade != nil { - // upgradeSettingsMap["auto_upgrade"] = nodePools.Native.UpgradeSettings.AutoUpgrade - // } - // - // upgradeOptionsMap := map[string]interface{}{} - // - // if nodePools.Native.UpgradeSettings.UpgradeOptions != nil { - // if nodePools.Native.UpgradeSettings.UpgradeOptions.AutoUpgradeStartTime != nil { - // upgradeOptionsMap["auto_upgrade_start_time"] = nodePools.Native.UpgradeSettings.UpgradeOptions.AutoUpgradeStartTime - // } - // - // if nodePools.Native.UpgradeSettings.UpgradeOptions.Duration != nil { - // upgradeOptionsMap["duration"] = nodePools.Native.UpgradeSettings.UpgradeOptions.Duration - // } - // - // if nodePools.Native.UpgradeSettings.UpgradeOptions.WeeklyPeriod != nil { - // upgradeOptionsMap["weekly_period"] = nodePools.Native.UpgradeSettings.UpgradeOptions.WeeklyPeriod - // } - // - // upgradeSettingsMap["upgrade_options"] = []interface{}{upgradeOptionsMap} - // } - // - // if nodePools.Native.UpgradeSettings.Components != nil { - // upgradeSettingsMap["components"] = nodePools.Native.UpgradeSettings.Components - // } - // - // maxUnavailableMap := map[string]interface{}{} - // - // if nodePools.Native.UpgradeSettings.MaxUnavailable != nil { - // if nodePools.Native.UpgradeSettings.MaxUnavailable.Type != nil { - // maxUnavailableMap["type"] = nodePools.Native.UpgradeSettings.MaxUnavailable.Type - // } - // - // if nodePools.Native.UpgradeSettings.MaxUnavailable.IntVal != nil { - // maxUnavailableMap["int_val"] = nodePools.Native.UpgradeSettings.MaxUnavailable.IntVal - // } - // - // if nodePools.Native.UpgradeSettings.MaxUnavailable.StrVal != nil { - // maxUnavailableMap["str_val"] = nodePools.Native.UpgradeSettings.MaxUnavailable.StrVal - // } - // - // upgradeSettingsMap["max_unavailable"] = []interface{}{maxUnavailableMap} - // } - // - // nativeMap["upgrade_settings"] = []interface{}{upgradeSettingsMap} - //} - if nodePools.Native.AutoRepair != nil { nativeMap["auto_repair"] = nodePools.Native.AutoRepair } @@ -840,18 +693,6 @@ func dataSourceTencentCloudKubernetesClusterNativeNodePoolsRead(d *schema.Resour systemDiskMap["disk_size"] = nodePools.Native.SystemDisk.DiskSize } - //if nodePools.Native.SystemDisk.AutoFormatAndMount != nil { - // systemDiskMap["auto_format_and_mount"] = nodePools.Native.SystemDisk.AutoFormatAndMount - //} - // - //if nodePools.Native.SystemDisk.FileSystem != nil { - // systemDiskMap["file_system"] = nodePools.Native.SystemDisk.FileSystem - //} - // - //if nodePools.Native.SystemDisk.MountTarget != nil { - // systemDiskMap["mount_target"] = nodePools.Native.SystemDisk.MountTarget - //} - nativeMap["system_disk"] = []interface{}{systemDiskMap} } diff --git a/tencentcloud/services/tke/data_source_tc_kubernetes_cluster_node_pools.go b/tencentcloud/services/tke/data_source_tc_kubernetes_cluster_node_pools.go index 054286b3f8..cec41a392b 100644 --- a/tencentcloud/services/tke/data_source_tc_kubernetes_cluster_node_pools.go +++ b/tencentcloud/services/tke/data_source_tc_kubernetes_cluster_node_pools.go @@ -6,7 +6,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - tke "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/tke/v20180525" + tkev20180525 "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/tke/v20180525" tccommon "github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/common" "github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/internal/helper" @@ -455,15 +455,15 @@ func dataSourceTencentCloudKubernetesClusterNodePoolsRead(d *schema.ResourceData if v, ok := d.GetOk("filters"); ok { filtersSet := v.([]interface{}) - tmpSet := make([]*tke.Filter, 0, len(filtersSet)) + tmpSet := make([]*tkev20180525.Filter, 0, len(filtersSet)) for _, item := range filtersSet { filtersMap := item.(map[string]interface{}) - filter := tke.Filter{} + filter := tkev20180525.Filter{} if v, ok := filtersMap["name"]; ok { filter.Name = helper.String(v.(string)) } if v, ok := filtersMap["values"]; ok { - valuesSet := v.(*schema.Set).List() + valuesSet := v.([]interface{}) for i := range valuesSet { values := valuesSet[i].(string) filter.Values = append(filter.Values, helper.String(values)) @@ -474,7 +474,7 @@ func dataSourceTencentCloudKubernetesClusterNodePoolsRead(d *schema.ResourceData paramMap["Filters"] = tmpSet } - var respData []*tke.NodePool + var respData []*tkev20180525.NodePool err := resource.Retry(tccommon.ReadRetryTimeout, func() *resource.RetryError { result, e := service.DescribeKubernetesClusterNodePoolsByFilter(ctx, paramMap) if e != nil { @@ -487,7 +487,7 @@ func dataSourceTencentCloudKubernetesClusterNodePoolsRead(d *schema.ResourceData return err } - ids := make([]string, 0, len(respData)) + var ids []string nodePoolSetList := make([]map[string]interface{}, 0, len(respData)) if respData != nil { for _, nodePoolSet := range respData { diff --git a/tencentcloud/services/tke/data_source_tc_kubernetes_clusters.go b/tencentcloud/services/tke/data_source_tc_kubernetes_clusters.go index b59ef2281a..c40135fd8b 100644 --- a/tencentcloud/services/tke/data_source_tc_kubernetes_clusters.go +++ b/tencentcloud/services/tke/data_source_tc_kubernetes_clusters.go @@ -6,7 +6,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - tke "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/tke/v20180525" + tkev20180525 "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/tke/v20180525" tccommon "github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/common" "github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/internal/helper" @@ -320,7 +320,7 @@ func dataSourceTencentCloudKubernetesClustersRead(d *schema.ResourceData, meta i service := TkeService{client: meta.(tccommon.ProviderMeta).GetAPIV3Conn()} paramMap := make(map[string]interface{}) - var respData []*tke.Cluster + var respData []*tkev20180525.Cluster err := resource.Retry(tccommon.ReadRetryTimeout, func() *resource.RetryError { result, e := service.DescribeKubernetesClustersByFilter(ctx, paramMap) if e != nil { @@ -337,7 +337,7 @@ func dataSourceTencentCloudKubernetesClustersRead(d *schema.ResourceData, meta i return err } - ids := make([]string, 0, len(respData)) + var ids []string clustersList := make([]map[string]interface{}, 0, len(respData)) if respData != nil { for _, clusters := range respData { diff --git a/tencentcloud/services/tke/resource_tc_kubernetes_addon_attachment.go b/tencentcloud/services/tke/resource_tc_kubernetes_addon_attachment.go index 5b00d37441..7d1c3fa096 100644 --- a/tencentcloud/services/tke/resource_tc_kubernetes_addon_attachment.go +++ b/tencentcloud/services/tke/resource_tc_kubernetes_addon_attachment.go @@ -9,7 +9,8 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - tke "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/tke/v20180525" + tkev20180525 "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/tke/v20180525" + tccommon "github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/common" "github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/internal/helper" ) @@ -109,8 +110,8 @@ func resourceTencentCloudKubernetesAddonAttachmentCreate(d *schema.ResourceData, name string ) var ( - request = tke.NewForwardApplicationRequestV3Request() - response = tke.NewForwardApplicationRequestV3Response() + request = tkev20180525.NewForwardApplicationRequestV3Request() + response = tkev20180525.NewForwardApplicationRequestV3Response() ) if v, ok := d.GetOk("cluster_id"); ok { @@ -137,7 +138,7 @@ func resourceTencentCloudKubernetesAddonAttachmentCreate(d *schema.ResourceData, } err := resource.Retry(tccommon.WriteRetryTimeout, func() *resource.RetryError { - result, e := meta.(tccommon.ProviderMeta).GetAPIV3Conn().UseTkeClient().ForwardApplicationRequestV3WithContext(ctx, request) + result, e := meta.(tccommon.ProviderMeta).GetAPIV3Conn().UseTkeV20180525Client().ForwardApplicationRequestV3WithContext(ctx, request) if e != nil { return tccommon.RetryError(e) } else { @@ -214,12 +215,6 @@ func resourceTencentCloudKubernetesAddonAttachmentUpdate(d *schema.ResourceData, ctx := tccommon.NewResourceLifeCycleHandleFuncContext(context.Background(), logId, d, meta) - immutableArgs := []string{"cluster_id", "name"} - for _, v := range immutableArgs { - if d.HasChange(v) { - return fmt.Errorf("argument `%s` cannot be changed", v) - } - } idSplit := strings.Split(d.Id(), tccommon.FILED_SP) if len(idSplit) != 2 { return fmt.Errorf("id is broken,%s", d.Id()) @@ -237,10 +232,10 @@ func resourceTencentCloudKubernetesAddonAttachmentUpdate(d *schema.ResourceData, } if needChange { - request := tke.NewForwardApplicationRequestV3Request() + request := tkev20180525.NewForwardApplicationRequestV3Request() err := resource.Retry(tccommon.WriteRetryTimeout, func() *resource.RetryError { - result, e := meta.(tccommon.ProviderMeta).GetAPIV3Conn().UseTkeClient().ForwardApplicationRequestV3WithContext(ctx, request) + result, e := meta.(tccommon.ProviderMeta).GetAPIV3Conn().UseTkeV20180525Client().ForwardApplicationRequestV3WithContext(ctx, request) if e != nil { return tccommon.RetryError(e) } else { @@ -278,23 +273,16 @@ func resourceTencentCloudKubernetesAddonAttachmentDelete(d *schema.ResourceData, name := idSplit[1] var ( - request = tke.NewForwardApplicationRequestV3Request() - response = tke.NewForwardApplicationRequestV3Response() + request = tkev20180525.NewForwardApplicationRequestV3Request() + response = tkev20180525.NewForwardApplicationRequestV3Response() ) - if v, ok := d.GetOk("cluster_id"); ok { - clusterId = v.(string) - } - if v, ok := d.GetOk("name"); ok { - name = v.(string) - } - if err := resourceTencentCloudKubernetesAddonAttachmentDeletePostFillRequest0(ctx, request); err != nil { return err } err := resource.Retry(tccommon.WriteRetryTimeout, func() *resource.RetryError { - result, e := meta.(tccommon.ProviderMeta).GetAPIV3Conn().UseTkeClient().ForwardApplicationRequestV3WithContext(ctx, request) + result, e := meta.(tccommon.ProviderMeta).GetAPIV3Conn().UseTkeV20180525Client().ForwardApplicationRequestV3WithContext(ctx, request) if e != nil { return tccommon.RetryError(e) } else { @@ -304,7 +292,7 @@ func resourceTencentCloudKubernetesAddonAttachmentDelete(d *schema.ResourceData, return nil }) if err != nil { - log.Printf("[CRITAL]%s create kubernetes addon attachment failed, reason:%+v", logId, err) + log.Printf("[CRITAL]%s delete kubernetes addon attachment failed, reason:%+v", logId, err) return err } diff --git a/tencentcloud/services/tke/resource_tc_kubernetes_addon_attachment_extension.go b/tencentcloud/services/tke/resource_tc_kubernetes_addon_attachment_extension.go index fb7f14a819..9539bfe61d 100644 --- a/tencentcloud/services/tke/resource_tc_kubernetes_addon_attachment_extension.go +++ b/tencentcloud/services/tke/resource_tc_kubernetes_addon_attachment_extension.go @@ -7,6 +7,7 @@ import ( "strings" tke "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/tke/v20180525" + tccommon "github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/common" "github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/internal/helper" diff --git a/tencentcloud/services/tke/resource_tc_kubernetes_auth_attachment.go b/tencentcloud/services/tke/resource_tc_kubernetes_auth_attachment.go index c0b5dc69bd..319a785422 100644 --- a/tencentcloud/services/tke/resource_tc_kubernetes_auth_attachment.go +++ b/tencentcloud/services/tke/resource_tc_kubernetes_auth_attachment.go @@ -8,7 +8,8 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - tke "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/tke/v20180525" + tkev20180525 "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/tke/v20180525" + tccommon "github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/common" "github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/internal/helper" ) @@ -108,8 +109,8 @@ func resourceTencentCloudKubernetesAuthAttachmentCreate(d *schema.ResourceData, clusterId string ) var ( - request = tke.NewModifyClusterAuthenticationOptionsRequest() - response = tke.NewModifyClusterAuthenticationOptionsResponse() + request = tkev20180525.NewModifyClusterAuthenticationOptionsRequest() + response = tkev20180525.NewModifyClusterAuthenticationOptionsResponse() ) if v, ok := d.GetOk("cluster_id"); ok { @@ -120,7 +121,7 @@ func resourceTencentCloudKubernetesAuthAttachmentCreate(d *schema.ResourceData, request.ClusterId = helper.String(v.(string)) } - serviceAccountAuthenticationOptions := tke.ServiceAccountAuthenticationOptions{} + serviceAccountAuthenticationOptions := tkev20180525.ServiceAccountAuthenticationOptions{} if v, ok := d.GetOkExists("use_tke_default"); ok { serviceAccountAuthenticationOptions.UseTKEDefault = helper.Bool(v.(bool)) } @@ -135,12 +136,12 @@ func resourceTencentCloudKubernetesAuthAttachmentCreate(d *schema.ResourceData, } request.ServiceAccounts = &serviceAccountAuthenticationOptions - oIDCConfigAuthenticationOptions := tke.OIDCConfigAuthenticationOptions{} + oIDCConfigAuthenticationOptions := tkev20180525.OIDCConfigAuthenticationOptions{} if v, ok := d.GetOkExists("auto_create_oidc_config"); ok { oIDCConfigAuthenticationOptions.AutoCreateOIDCConfig = helper.Bool(v.(bool)) } if v, ok := d.GetOk("auto_create_client_id"); ok { - autoCreateClientIdSet := v.(*schema.Set).List() + autoCreateClientIdSet := v.([]interface{}) for i := range autoCreateClientIdSet { autoCreateClientId := autoCreateClientIdSet[i].(string) oIDCConfigAuthenticationOptions.AutoCreateClientId = append(oIDCConfigAuthenticationOptions.AutoCreateClientId, helper.String(autoCreateClientId)) @@ -156,7 +157,7 @@ func resourceTencentCloudKubernetesAuthAttachmentCreate(d *schema.ResourceData, return err } - result, e := meta.(tccommon.ProviderMeta).GetAPIV3Conn().UseTkeClient().ModifyClusterAuthenticationOptionsWithContext(ctx, request) + result, e := meta.(tccommon.ProviderMeta).GetAPIV3Conn().UseTkeV20180525Client().ModifyClusterAuthenticationOptionsWithContext(ctx, request) if e != nil { if err := resourceTencentCloudKubernetesAuthAttachmentCreateRequestOnError0(ctx, request, e); err != nil { return err @@ -194,12 +195,8 @@ func resourceTencentCloudKubernetesAuthAttachmentRead(d *schema.ResourceData, me _ = d.Set("cluster_id", clusterId) - respData, err := service.DescribeKubernetesAuthAttachmentById(ctx, clusterId) - if err != nil { - return err - } - - err = resource.Retry(tccommon.ReadRetryTimeout, func() *resource.RetryError { + var respData *tkev20180525.DescribeClusterAuthenticationOptionsResponseParams + reqErr := resource.Retry(tccommon.ReadRetryTimeout, func() *resource.RetryError { result, e := service.DescribeKubernetesAuthAttachmentById(ctx, clusterId) if e != nil { return tccommon.RetryError(e) @@ -210,9 +207,9 @@ func resourceTencentCloudKubernetesAuthAttachmentRead(d *schema.ResourceData, me respData = result return nil }) - if err != nil { - log.Printf("[CRITAL]%s read kubernetes auth attachment failed, reason:%+v", logId, err) - return err + if reqErr != nil { + log.Printf("[CRITAL]%s read kubernetes auth attachment failed, reason:%+v", logId, reqErr) + return reqErr } if respData == nil { @@ -230,7 +227,7 @@ func resourceTencentCloudKubernetesAuthAttachmentRead(d *schema.ResourceData, me } if respData.ServiceAccounts.JWKSURI != nil { - _ = d.Set("jwks_uri", respData.ServiceAccounts.JWKSURI) + _ = d.Set("jwksuri", respData.ServiceAccounts.JWKSURI) } if respData.ServiceAccounts.AutoCreateDiscoveryAnonymousAuth != nil { @@ -283,18 +280,18 @@ func resourceTencentCloudKubernetesAuthAttachmentUpdate(d *schema.ResourceData, } if needChange { - request := tke.NewModifyClusterAuthenticationOptionsRequest() + request := tkev20180525.NewModifyClusterAuthenticationOptionsRequest() - request.ClusterId = &clusterId + request.ClusterId = helper.String(clusterId) - serviceAccountAuthenticationOptions := tke.ServiceAccountAuthenticationOptions{} + serviceAccountAuthenticationOptions := tkev20180525.ServiceAccountAuthenticationOptions{} if v, ok := d.GetOkExists("use_tke_default"); ok { serviceAccountAuthenticationOptions.UseTKEDefault = helper.Bool(v.(bool)) } if v, ok := d.GetOk("issuer"); ok { serviceAccountAuthenticationOptions.Issuer = helper.String(v.(string)) } - if v, ok := d.GetOk("jwks_uri"); ok { + if v, ok := d.GetOk("jwksuri"); ok { serviceAccountAuthenticationOptions.JWKSURI = helper.String(v.(string)) } if v, ok := d.GetOkExists("auto_create_discovery_anonymous_auth"); ok { @@ -302,12 +299,12 @@ func resourceTencentCloudKubernetesAuthAttachmentUpdate(d *schema.ResourceData, } request.ServiceAccounts = &serviceAccountAuthenticationOptions - oIDCConfigAuthenticationOptions := tke.OIDCConfigAuthenticationOptions{} + oIDCConfigAuthenticationOptions := tkev20180525.OIDCConfigAuthenticationOptions{} if v, ok := d.GetOkExists("auto_create_oidc_config"); ok { oIDCConfigAuthenticationOptions.AutoCreateOIDCConfig = helper.Bool(v.(bool)) } if v, ok := d.GetOk("auto_create_client_id"); ok { - autoCreateClientIdSet := v.(*schema.Set).List() + autoCreateClientIdSet := v.([]interface{}) for i := range autoCreateClientIdSet { autoCreateClientId := autoCreateClientIdSet[i].(string) oIDCConfigAuthenticationOptions.AutoCreateClientId = append(oIDCConfigAuthenticationOptions.AutoCreateClientId, helper.String(autoCreateClientId)) @@ -323,7 +320,7 @@ func resourceTencentCloudKubernetesAuthAttachmentUpdate(d *schema.ResourceData, return err } - result, e := meta.(tccommon.ProviderMeta).GetAPIV3Conn().UseTkeClient().ModifyClusterAuthenticationOptionsWithContext(ctx, request) + result, e := meta.(tccommon.ProviderMeta).GetAPIV3Conn().UseTkeV20180525Client().ModifyClusterAuthenticationOptionsWithContext(ctx, request) if e != nil { if err := resourceTencentCloudKubernetesAuthAttachmentUpdateRequestOnError0(ctx, request, e); err != nil { return err @@ -353,22 +350,18 @@ func resourceTencentCloudKubernetesAuthAttachmentDelete(d *schema.ResourceData, clusterId := d.Id() var ( - request = tke.NewModifyClusterAuthenticationOptionsRequest() - response = tke.NewModifyClusterAuthenticationOptionsResponse() + request = tkev20180525.NewModifyClusterAuthenticationOptionsRequest() + response = tkev20180525.NewModifyClusterAuthenticationOptionsResponse() ) - if v, ok := d.GetOk("cluster_id"); ok { - clusterId = v.(string) - } - - request.ClusterId = &clusterId + request.ClusterId = helper.String(clusterId) err := resource.Retry(tccommon.WriteRetryTimeout, func() *resource.RetryError { if err := resourceTencentCloudKubernetesAuthAttachmentDeletePreRequest0(ctx, request); err != nil { return err } - result, e := meta.(tccommon.ProviderMeta).GetAPIV3Conn().UseTkeClient().ModifyClusterAuthenticationOptionsWithContext(ctx, request) + result, e := meta.(tccommon.ProviderMeta).GetAPIV3Conn().UseTkeV20180525Client().ModifyClusterAuthenticationOptionsWithContext(ctx, request) if e != nil { return tccommon.RetryError(e) } else { @@ -378,7 +371,7 @@ func resourceTencentCloudKubernetesAuthAttachmentDelete(d *schema.ResourceData, return nil }) if err != nil { - log.Printf("[CRITAL]%s create kubernetes auth attachment failed, reason:%+v", logId, err) + log.Printf("[CRITAL]%s delete kubernetes auth attachment failed, reason:%+v", logId, err) return err } diff --git a/tencentcloud/services/tke/resource_tc_kubernetes_auth_attachment_extension.go b/tencentcloud/services/tke/resource_tc_kubernetes_auth_attachment_extension.go index e4dc968f5b..c40d148787 100644 --- a/tencentcloud/services/tke/resource_tc_kubernetes_auth_attachment_extension.go +++ b/tencentcloud/services/tke/resource_tc_kubernetes_auth_attachment_extension.go @@ -5,6 +5,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" tke "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/tke/v20180525" + tccommon "github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/common" "github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/internal/helper" ) @@ -104,3 +105,8 @@ func resourceTencentCloudKubernetesAuthAttachmentDeletePostHandleResponse0(ctx c } return nil } + +func resourceTencentCloudKubernetesAuthAttachmentReadPostRequest0(ctx context.Context, req *tke.DescribeClusterAuthenticationOptionsRequest, resp *tke.DescribeClusterAuthenticationOptionsResponse) error { + // TODO: implement me + panic("TODO: implement me") +} diff --git a/tencentcloud/services/tke/resource_tc_kubernetes_backup_storage_location.go b/tencentcloud/services/tke/resource_tc_kubernetes_backup_storage_location.go index 0a28fbf17c..fbae9ed729 100644 --- a/tencentcloud/services/tke/resource_tc_kubernetes_backup_storage_location.go +++ b/tencentcloud/services/tke/resource_tc_kubernetes_backup_storage_location.go @@ -7,7 +7,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - tke "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/tke/v20180525" + tkev20180525 "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/tke/v20180525" tccommon "github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/common" "github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/internal/helper" @@ -77,8 +77,8 @@ func resourceTencentCloudKubernetesBackupStorageLocationCreate(d *schema.Resourc name string ) var ( - request = tke.NewCreateBackupStorageLocationRequest() - response = tke.NewCreateBackupStorageLocationResponse() + request = tkev20180525.NewCreateBackupStorageLocationRequest() + response = tkev20180525.NewCreateBackupStorageLocationResponse() ) if v, ok := d.GetOk("name"); ok { @@ -102,7 +102,7 @@ func resourceTencentCloudKubernetesBackupStorageLocationCreate(d *schema.Resourc } err := resource.Retry(tccommon.WriteRetryTimeout, func() *resource.RetryError { - result, e := meta.(tccommon.ProviderMeta).GetAPIV3Conn().UseTkeClient().CreateBackupStorageLocationWithContext(ctx, request) + result, e := meta.(tccommon.ProviderMeta).GetAPIV3Conn().UseTkeV20180525Client().CreateBackupStorageLocationWithContext(ctx, request) if e != nil { return tccommon.RetryError(e) } else { @@ -188,14 +188,14 @@ func resourceTencentCloudKubernetesBackupStorageLocationDelete(d *schema.Resourc name := d.Id() var ( - request = tke.NewDeleteBackupStorageLocationRequest() - response = tke.NewDeleteBackupStorageLocationResponse() + request = tkev20180525.NewDeleteBackupStorageLocationRequest() + response = tkev20180525.NewDeleteBackupStorageLocationResponse() ) request.Name = helper.String(name) err := resource.Retry(tccommon.WriteRetryTimeout, func() *resource.RetryError { - result, e := meta.(tccommon.ProviderMeta).GetAPIV3Conn().UseTkeClient().DeleteBackupStorageLocationWithContext(ctx, request) + result, e := meta.(tccommon.ProviderMeta).GetAPIV3Conn().UseTkeV20180525Client().DeleteBackupStorageLocationWithContext(ctx, request) if e != nil { return tccommon.RetryError(e) } else { diff --git a/tencentcloud/services/tke/resource_tc_kubernetes_cluster.go b/tencentcloud/services/tke/resource_tc_kubernetes_cluster.go index 4413252f3f..26f3ec1724 100644 --- a/tencentcloud/services/tke/resource_tc_kubernetes_cluster.go +++ b/tencentcloud/services/tke/resource_tc_kubernetes_cluster.go @@ -7,7 +7,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - tke "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/tke/v20180525" + tkev20180525 "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/tke/v20180525" tccommon "github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/common" "github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/internal/helper" @@ -1303,11 +1303,11 @@ func resourceTencentCloudKubernetesClusterCreate(d *schema.ResourceData, meta in clusterId string ) var ( - request = tke.NewCreateClusterRequest() - response = tke.NewCreateClusterResponse() + request = tkev20180525.NewCreateClusterRequest() + response = tkev20180525.NewCreateClusterResponse() ) - clusterCIDRSettings := tke.ClusterCIDRSettings{} + clusterCIDRSettings := tkev20180525.ClusterCIDRSettings{} if v, ok := d.GetOk("cluster_cidr"); ok { clusterCIDRSettings.ClusterCIDR = helper.String(v.(string)) } @@ -1328,7 +1328,7 @@ func resourceTencentCloudKubernetesClusterCreate(d *schema.ResourceData, meta in } request.ClusterCIDRSettings = &clusterCIDRSettings - clusterBasicSettings := tke.ClusterBasicSettings{} + clusterBasicSettings := tkev20180525.ClusterBasicSettings{} if v, ok := d.GetOk("cluster_version"); ok { clusterBasicSettings.ClusterVersion = helper.String(v.(string)) } @@ -1350,14 +1350,14 @@ func resourceTencentCloudKubernetesClusterCreate(d *schema.ResourceData, meta in if v, ok := d.GetOk("cluster_level"); ok { clusterBasicSettings.ClusterLevel = helper.String(v.(string)) } - autoUpgradeClusterLevel := tke.AutoUpgradeClusterLevel{} + autoUpgradeClusterLevel := tkev20180525.AutoUpgradeClusterLevel{} if v, ok := d.GetOkExists("auto_upgrade_cluster_level"); ok { autoUpgradeClusterLevel.IsAutoUpgrade = helper.Bool(v.(bool)) } clusterBasicSettings.AutoUpgradeClusterLevel = &autoUpgradeClusterLevel request.ClusterBasicSettings = &clusterBasicSettings - clusterAdvancedSettings := tke.ClusterAdvancedSettings{} + clusterAdvancedSettings := tkev20180525.ClusterAdvancedSettings{} if v, ok := d.GetOkExists("cluster_ipvs"); ok { clusterAdvancedSettings.IPVS = helper.Bool(v.(bool)) } @@ -1371,23 +1371,23 @@ func resourceTencentCloudKubernetesClusterCreate(d *schema.ResourceData, meta in clusterAdvancedSettings.NodeNameType = helper.String(v.(string)) } if extraArgsMap, ok := helper.InterfacesHeadMap(d, "cluster_extra_args"); ok { - clusterExtraArgs := tke.ClusterExtraArgs{} + clusterExtraArgs := tkev20180525.ClusterExtraArgs{} if v, ok := extraArgsMap["kube_apiserver"]; ok { - kubeAPIServerSet := v.(*schema.Set).List() + kubeAPIServerSet := v.([]interface{}) for i := range kubeAPIServerSet { kubeAPIServer := kubeAPIServerSet[i].(string) clusterExtraArgs.KubeAPIServer = append(clusterExtraArgs.KubeAPIServer, helper.String(kubeAPIServer)) } } if v, ok := extraArgsMap["kube_controller_manager"]; ok { - kubeControllerManagerSet := v.(*schema.Set).List() + kubeControllerManagerSet := v.([]interface{}) for i := range kubeControllerManagerSet { kubeControllerManager := kubeControllerManagerSet[i].(string) clusterExtraArgs.KubeControllerManager = append(clusterExtraArgs.KubeControllerManager, helper.String(kubeControllerManager)) } } if v, ok := extraArgsMap["kube_scheduler"]; ok { - kubeSchedulerSet := v.(*schema.Set).List() + kubeSchedulerSet := v.([]interface{}) for i := range kubeSchedulerSet { kubeScheduler := kubeSchedulerSet[i].(string) clusterExtraArgs.KubeScheduler = append(clusterExtraArgs.KubeScheduler, helper.String(kubeScheduler)) @@ -1418,7 +1418,7 @@ func resourceTencentCloudKubernetesClusterCreate(d *schema.ResourceData, meta in } request.ClusterAdvancedSettings = &clusterAdvancedSettings - instanceAdvancedSettings := tke.InstanceAdvancedSettings{} + instanceAdvancedSettings := tkev20180525.InstanceAdvancedSettings{} if v, ok := d.GetOkExists("globe_desired_pod_num"); ok { instanceAdvancedSettings.DesiredPodNumber = helper.IntInt64(v.(int)) } @@ -1433,7 +1433,7 @@ func resourceTencentCloudKubernetesClusterCreate(d *schema.ResourceData, meta in if v, ok := d.GetOk("extension_addon"); ok { for _, item := range v.([]interface{}) { extensionAddonsMap := item.(map[string]interface{}) - extensionAddon := tke.ExtensionAddon{} + extensionAddon := tkev20180525.ExtensionAddon{} if v, ok := extensionAddonsMap["name"]; ok { extensionAddon.AddonName = helper.String(v.(string)) } @@ -1449,7 +1449,7 @@ func resourceTencentCloudKubernetesClusterCreate(d *schema.ResourceData, meta in } err := resource.Retry(tccommon.WriteRetryTimeout, func() *resource.RetryError { - result, e := meta.(tccommon.ProviderMeta).GetAPIV3Conn().UseTkeClient().CreateClusterWithContext(ctx, request) + result, e := meta.(tccommon.ProviderMeta).GetAPIV3Conn().UseTkeV20180525Client().CreateClusterWithContext(ctx, request) if e != nil { return tccommon.RetryError(e) } else { @@ -1567,7 +1567,7 @@ func resourceTencentCloudKubernetesClusterRead(d *schema.ResourceData, meta inte return err } - var respData1 *tke.DescribeClusterInstancesResponseParams + var respData1 *tkev20180525.DescribeClusterInstancesResponseParams reqErr1 := resource.Retry(tccommon.ReadRetryTimeout, func() *resource.RetryError { result, e := service.DescribeKubernetesClusterById1(ctx, clusterId) if e != nil { @@ -1620,7 +1620,7 @@ func resourceTencentCloudKubernetesClusterRead(d *schema.ResourceData, meta inte _ = d.Set("worker_instances_list", instanceSetList) } - var respData2 *tke.DescribeClusterSecurityResponseParams + var respData2 *tkev20180525.DescribeClusterSecurityResponseParams reqErr2 := resource.Retry(tccommon.ReadRetryTimeout, func() *resource.RetryError { result, e := service.DescribeKubernetesClusterById2(ctx, clusterId) if e != nil { @@ -1697,7 +1697,7 @@ func resourceTencentCloudKubernetesClusterUpdate(d *schema.ResourceData, meta in } if needChange { - request := tke.NewModifyClusterAttributeRequest() + request := tkev20180525.NewModifyClusterAttributeRequest() request.ClusterId = helper.String(clusterId) @@ -1718,7 +1718,7 @@ func resourceTencentCloudKubernetesClusterUpdate(d *schema.ResourceData, meta in } err := resource.Retry(tccommon.WriteRetryTimeout, func() *resource.RetryError { - result, e := meta.(tccommon.ProviderMeta).GetAPIV3Conn().UseTkeClient().ModifyClusterAttributeWithContext(ctx, request) + result, e := meta.(tccommon.ProviderMeta).GetAPIV3Conn().UseTkeV20180525Client().ModifyClusterAttributeWithContext(ctx, request) if e != nil { return tccommon.RetryError(e) } else { @@ -1742,9 +1742,9 @@ func resourceTencentCloudKubernetesClusterUpdate(d *schema.ResourceData, meta in } if needChange1 { - request1 := tke.NewUpdateClusterVersionRequest() + request1 := tkev20180525.NewUpdateClusterVersionRequest() - response1 := tke.NewUpdateClusterVersionResponse() + response1 := tkev20180525.NewUpdateClusterVersionResponse() request1.ClusterId = helper.String(clusterId) @@ -1757,7 +1757,7 @@ func resourceTencentCloudKubernetesClusterUpdate(d *schema.ResourceData, meta in } err := resource.Retry(tccommon.WriteRetryTimeout, func() *resource.RetryError { - result, e := meta.(tccommon.ProviderMeta).GetAPIV3Conn().UseTkeClient().UpdateClusterVersionWithContext(ctx, request1) + result, e := meta.(tccommon.ProviderMeta).GetAPIV3Conn().UseTkeV20180525Client().UpdateClusterVersionWithContext(ctx, request1) if e != nil { return tccommon.RetryError(e) } else { @@ -1785,12 +1785,12 @@ func resourceTencentCloudKubernetesClusterUpdate(d *schema.ResourceData, meta in } if needChange2 { - request2 := tke.NewModifyClusterAsGroupOptionAttributeRequest() + request2 := tkev20180525.NewModifyClusterAsGroupOptionAttributeRequest() request2.ClusterId = helper.String(clusterId) if clusterAsGroupOptionMap, ok := helper.InterfacesHeadMap(d, "node_pool_global_config"); ok { - clusterAsGroupOption := tke.ClusterAsGroupOption{} + clusterAsGroupOption := tkev20180525.ClusterAsGroupOption{} if v, ok := clusterAsGroupOptionMap["is_scale_in_enabled"]; ok { clusterAsGroupOption.IsScaleDownEnabled = helper.Bool(v.(bool)) } @@ -1822,7 +1822,7 @@ func resourceTencentCloudKubernetesClusterUpdate(d *schema.ResourceData, meta in } err := resource.Retry(tccommon.WriteRetryTimeout, func() *resource.RetryError { - result, e := meta.(tccommon.ProviderMeta).GetAPIV3Conn().UseTkeClient().ModifyClusterAsGroupOptionAttributeWithContext(ctx, request2) + result, e := meta.(tccommon.ProviderMeta).GetAPIV3Conn().UseTkeV20180525Client().ModifyClusterAsGroupOptionAttributeWithContext(ctx, request2) if e != nil { return tccommon.RetryError(e) } else { @@ -1853,8 +1853,8 @@ func resourceTencentCloudKubernetesClusterDelete(d *schema.ResourceData, meta in clusterId := d.Id() var ( - request = tke.NewDeleteClusterRequest() - response = tke.NewDeleteClusterResponse() + request = tkev20180525.NewDeleteClusterRequest() + response = tkev20180525.NewDeleteClusterResponse() ) request.ClusterId = helper.String(clusterId) @@ -1867,7 +1867,7 @@ func resourceTencentCloudKubernetesClusterDelete(d *schema.ResourceData, meta in } err := resource.Retry(tccommon.WriteRetryTimeout, func() *resource.RetryError { - result, e := meta.(tccommon.ProviderMeta).GetAPIV3Conn().UseTkeClient().DeleteClusterWithContext(ctx, request) + result, e := meta.(tccommon.ProviderMeta).GetAPIV3Conn().UseTkeV20180525Client().DeleteClusterWithContext(ctx, request) if e != nil { if err := resourceTencentCloudKubernetesClusterDeleteRequestOnError0(ctx, e); err != nil { return err diff --git a/tencentcloud/services/tke/resource_tc_kubernetes_cluster_attachment.go b/tencentcloud/services/tke/resource_tc_kubernetes_cluster_attachment.go index d846527f59..51c360ba84 100644 --- a/tencentcloud/services/tke/resource_tc_kubernetes_cluster_attachment.go +++ b/tencentcloud/services/tke/resource_tc_kubernetes_cluster_attachment.go @@ -1,3 +1,4 @@ +// Code generated by iacg; DO NOT EDIT. package tke import ( @@ -8,7 +9,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - tke "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/tke/v20180525" + tkev20180525 "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/tke/v20180525" tccommon "github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/common" "github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/internal/helper" @@ -170,7 +171,6 @@ func ResourceTencentCloudKubernetesClusterAttachment() *schema.Resource { Optional: true, ForceNew: true, Default: true, - Deprecated: "This argument was deprecated, use `unschedulable` instead.", Description: "Indicate to schedule the adding node or not. Default is true.", }, "desired_pod_num": { @@ -235,7 +235,6 @@ func ResourceTencentCloudKubernetesClusterAttachment() *schema.Resource { Type: schema.TypeString, Optional: true, ForceNew: true, - Deprecated: "This argument was no longer supported by TencentCloud TKE.", Description: "Mount target. Default is not mounting.", }, "docker_graph_path": { @@ -243,7 +242,6 @@ func ResourceTencentCloudKubernetesClusterAttachment() *schema.Resource { Optional: true, ForceNew: true, Default: "/var/lib/docker", - Deprecated: "This argument was no longer supported by TencentCloud TKE.", Description: "Docker graph path. Default is `/var/lib/docker`.", }, "data_disk": { @@ -303,7 +301,6 @@ func ResourceTencentCloudKubernetesClusterAttachment() *schema.Resource { Type: schema.TypeList, Optional: true, ForceNew: true, - Deprecated: "This argument was no longer supported by TencentCloud TKE.", Description: "Custom parameter information related to the node. This is a white-list parameter.", Elem: &schema.Schema{ Type: schema.TypeString, @@ -313,14 +310,12 @@ func ResourceTencentCloudKubernetesClusterAttachment() *schema.Resource { Type: schema.TypeString, Optional: true, ForceNew: true, - Deprecated: "This argument was no longer supported by TencentCloud TKE.", Description: "Base64-encoded User Data text, the length limit is 16KB.", }, "pre_start_user_script": { Type: schema.TypeString, Optional: true, ForceNew: true, - Deprecated: "This argument was no longer supported by TencentCloud TKE.", Description: "Base64-encoded user script, executed before initializing the node, currently only effective for adding existing nodes.", }, "is_schedule": { @@ -328,7 +323,6 @@ func ResourceTencentCloudKubernetesClusterAttachment() *schema.Resource { Optional: true, ForceNew: true, Default: true, - Deprecated: "This argument was deprecated, use `unschedulable` instead.", Description: "Indicate to schedule the adding node or not. Default is true.", }, "desired_pod_num": { @@ -393,7 +387,7 @@ func ResourceTencentCloudKubernetesClusterAttachment() *schema.Resource { Optional: true, ForceNew: true, Default: 0, - Description: "Sets whether the joining node participates in the schedule. Default is `0`, which means it participates in scheduling. Non-zero(eg: `1`) number means it does not participate in scheduling.", + Description: "Sets whether the joining node participates in the schedule. Default is '0'. Participate in scheduling.", }, "security_groups": { @@ -427,8 +421,8 @@ func resourceTencentCloudKubernetesClusterAttachmentCreate(d *schema.ResourceDat clusterId string ) var ( - request = tke.NewAddExistedInstancesRequest() - response = tke.NewAddExistedInstancesResponse() + request = tkev20180525.NewAddExistedInstancesRequest() + response = tkev20180525.NewAddExistedInstancesResponse() ) if v, ok := d.GetOk("instance_id"); ok { @@ -448,21 +442,21 @@ func resourceTencentCloudKubernetesClusterAttachmentCreate(d *schema.ResourceDat request.ImageId = helper.String(v.(string)) } - loginSettings := tke.LoginSettings{} + loginSettings := tkev20180525.LoginSettings{} if v, ok := d.GetOk("password"); ok { loginSettings.Password = helper.String(v.(string)) } request.LoginSettings = &loginSettings if instanceAdvancedSettingsMap, ok := helper.InterfacesHeadMap(d, "worker_config"); ok { - instanceAdvancedSettings := tke.InstanceAdvancedSettings{} + instanceAdvancedSettings := tkev20180525.InstanceAdvancedSettings{} if v, ok := instanceAdvancedSettingsMap["mount_target"]; ok { instanceAdvancedSettings.MountTarget = helper.String(v.(string)) } if v, ok := instanceAdvancedSettingsMap["data_disk"]; ok { for _, item := range v.([]interface{}) { dataDisksMap := item.(map[string]interface{}) - dataDisk := tke.DataDisk{} + dataDisk := tkev20180525.DataDisk{} if v, ok := dataDisksMap["disk_type"]; ok { dataDisk.DiskType = helper.String(v.(string)) } @@ -497,12 +491,15 @@ func resourceTencentCloudKubernetesClusterAttachmentCreate(d *schema.ResourceDat instanceAdvancedSettings.DesiredPodNumber = helper.IntInt64(v.(int)) } if gPUArgsMap, ok := helper.ConvertInterfacesHeadToMap(instanceAdvancedSettingsMap["gpu_args"]); ok { - gPUArgs := tke.GPUArgs{} + gPUArgs := tkev20180525.GPUArgs{} if v, ok := gPUArgsMap["mig_enable"]; ok { gPUArgs.MIGEnable = helper.Bool(v.(bool)) } instanceAdvancedSettings.GPUArgs = &gPUArgs } + if v, ok := d.GetOkExists("unschedulable"); ok { + instanceAdvancedSettings.Unschedulable = helper.IntInt64(v.(int)) + } request.InstanceAdvancedSettings = &instanceAdvancedSettings } @@ -513,14 +510,14 @@ func resourceTencentCloudKubernetesClusterAttachmentCreate(d *schema.ResourceDat if v, ok := d.GetOk("worker_config_overrides"); ok { for _, item := range v.([]interface{}) { instanceAdvancedSettingsOverridesMap := item.(map[string]interface{}) - instanceAdvancedSettings := tke.InstanceAdvancedSettings{} + instanceAdvancedSettings := tkev20180525.InstanceAdvancedSettings{} if v, ok := instanceAdvancedSettingsOverridesMap["mount_target"]; ok { instanceAdvancedSettings.MountTarget = helper.String(v.(string)) } if v, ok := instanceAdvancedSettingsOverridesMap["data_disk"]; ok { for _, item := range v.([]interface{}) { dataDisksMap := item.(map[string]interface{}) - dataDisk := tke.DataDisk{} + dataDisk := tkev20180525.DataDisk{} if v, ok := dataDisksMap["disk_type"]; ok { dataDisk.DiskType = helper.String(v.(string)) } @@ -555,12 +552,15 @@ func resourceTencentCloudKubernetesClusterAttachmentCreate(d *schema.ResourceDat instanceAdvancedSettings.DesiredPodNumber = helper.IntInt64(v.(int)) } if gPUArgsMap, ok := helper.ConvertInterfacesHeadToMap(instanceAdvancedSettingsOverridesMap["gpu_args"]); ok { - gPUArgs2 := tke.GPUArgs{} + gPUArgs2 := tkev20180525.GPUArgs{} if v, ok := gPUArgsMap["mig_enable"]; ok { gPUArgs2.MIGEnable = helper.Bool(v.(bool)) } instanceAdvancedSettings.GPUArgs = &gPUArgs2 } + if v, ok := d.GetOkExists("unschedulable"); ok { + instanceAdvancedSettings.Unschedulable = helper.IntInt64(v.(int)) + } request.InstanceAdvancedSettingsOverrides = append(request.InstanceAdvancedSettingsOverrides, &instanceAdvancedSettings) } } @@ -570,7 +570,7 @@ func resourceTencentCloudKubernetesClusterAttachmentCreate(d *schema.ResourceDat } err := resource.Retry(tccommon.WriteRetryTimeout, func() *resource.RetryError { - result, e := meta.(tccommon.ProviderMeta).GetAPIV3Conn().UseTkeClient().AddExistedInstancesWithContext(ctx, request) + result, e := meta.(tccommon.ProviderMeta).GetAPIV3Conn().UseTkeV20180525Client().AddExistedInstancesWithContext(ctx, request) if e != nil { return resourceTencentCloudKubernetesClusterAttachmentCreateRequestOnError0(ctx, request, e) } else { @@ -652,7 +652,7 @@ func resourceTencentCloudKubernetesClusterAttachmentRead(d *schema.ResourceData, _ = d.Set("image_id", respData1.ImageId) } - var respData2 *tke.Instance + var respData2 *tkev20180525.Instance reqErr2 := resource.Retry(tccommon.ReadRetryTimeout, func() *resource.RetryError { result, e := service.DescribeKubernetesClusterAttachmentById2(ctx, instanceId, clusterId) if e != nil { @@ -703,8 +703,8 @@ func resourceTencentCloudKubernetesClusterAttachmentDelete(d *schema.ResourceDat clusterId := idSplit[1] var ( - request = tke.NewDeleteClusterInstancesRequest() - response = tke.NewDeleteClusterInstancesResponse() + request = tkev20180525.NewDeleteClusterInstancesRequest() + response = tkev20180525.NewDeleteClusterInstancesResponse() ) request.ClusterId = helper.String(clusterId) @@ -715,7 +715,7 @@ func resourceTencentCloudKubernetesClusterAttachmentDelete(d *schema.ResourceDat request.InstanceDeleteMode = &instanceDeleteMode err := resource.Retry(tccommon.WriteRetryTimeout, func() *resource.RetryError { - result, e := meta.(tccommon.ProviderMeta).GetAPIV3Conn().UseTkeClient().DeleteClusterInstancesWithContext(ctx, request) + result, e := meta.(tccommon.ProviderMeta).GetAPIV3Conn().UseTkeV20180525Client().DeleteClusterInstancesWithContext(ctx, request) if e != nil { return resourceTencentCloudKubernetesClusterAttachmentDeleteRequestOnError0(ctx, e) } else { diff --git a/tencentcloud/services/tke/resource_tc_kubernetes_encryption_protection.go b/tencentcloud/services/tke/resource_tc_kubernetes_encryption_protection.go index 1526479a0f..482df0d762 100644 --- a/tencentcloud/services/tke/resource_tc_kubernetes_encryption_protection.go +++ b/tencentcloud/services/tke/resource_tc_kubernetes_encryption_protection.go @@ -7,7 +7,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - tke "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/tke/v20180525" + tkev20180525 "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/tke/v20180525" tccommon "github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/common" "github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/internal/helper" @@ -69,8 +69,8 @@ func resourceTencentCloudKubernetesEncryptionProtectionCreate(d *schema.Resource clusterId string ) var ( - request = tke.NewEnableEncryptionProtectionRequest() - response = tke.NewEnableEncryptionProtectionResponse() + request = tkev20180525.NewEnableEncryptionProtectionRequest() + response = tkev20180525.NewEnableEncryptionProtectionResponse() ) if v, ok := d.GetOk("cluster_id"); ok { @@ -82,7 +82,7 @@ func resourceTencentCloudKubernetesEncryptionProtectionCreate(d *schema.Resource } if kMSConfigurationMap, ok := helper.InterfacesHeadMap(d, "kms_configuration"); ok { - kMSConfiguration := tke.KMSConfiguration{} + kMSConfiguration := tkev20180525.KMSConfiguration{} if v, ok := kMSConfigurationMap["key_id"]; ok { kMSConfiguration.KeyId = helper.String(v.(string)) } @@ -93,7 +93,7 @@ func resourceTencentCloudKubernetesEncryptionProtectionCreate(d *schema.Resource } err := resource.Retry(tccommon.WriteRetryTimeout, func() *resource.RetryError { - result, e := meta.(tccommon.ProviderMeta).GetAPIV3Conn().UseTkeClient().EnableEncryptionProtectionWithContext(ctx, request) + result, e := meta.(tccommon.ProviderMeta).GetAPIV3Conn().UseTkeV20180525Client().EnableEncryptionProtectionWithContext(ctx, request) if e != nil { return tccommon.RetryError(e) } else { @@ -159,14 +159,14 @@ func resourceTencentCloudKubernetesEncryptionProtectionDelete(d *schema.Resource clusterId := d.Id() var ( - request = tke.NewDisableEncryptionProtectionRequest() - response = tke.NewDisableEncryptionProtectionResponse() + request = tkev20180525.NewDisableEncryptionProtectionRequest() + response = tkev20180525.NewDisableEncryptionProtectionResponse() ) request.ClusterId = helper.String(clusterId) err := resource.Retry(tccommon.WriteRetryTimeout, func() *resource.RetryError { - result, e := meta.(tccommon.ProviderMeta).GetAPIV3Conn().UseTkeClient().DisableEncryptionProtectionWithContext(ctx, request) + result, e := meta.(tccommon.ProviderMeta).GetAPIV3Conn().UseTkeV20180525Client().DisableEncryptionProtectionWithContext(ctx, request) if e != nil { return tccommon.RetryError(e) } else { diff --git a/tencentcloud/services/tke/resource_tc_kubernetes_native_node_pools.go b/tencentcloud/services/tke/resource_tc_kubernetes_native_node_pools.go new file mode 100644 index 0000000000..834b8f88d8 --- /dev/null +++ b/tencentcloud/services/tke/resource_tc_kubernetes_native_node_pools.go @@ -0,0 +1,1490 @@ +// Code generated by iacg; DO NOT EDIT. +package tke + +import ( + "context" + "fmt" + "log" + "strings" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + tkev20220501 "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/tke/v20220501" + + tccommon "github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/common" + "github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/internal/helper" +) + +func ResourceTencentCloudKubernetesNativeNodePools() *schema.Resource { + return &schema.Resource{ + Create: resourceTencentCloudKubernetesNativeNodePoolsCreate, + Read: resourceTencentCloudKubernetesNativeNodePoolsRead, + Update: resourceTencentCloudKubernetesNativeNodePoolsUpdate, + Delete: resourceTencentCloudKubernetesNativeNodePoolsDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + Schema: map[string]*schema.Schema{ + "cluster_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "ID of the cluster.", + }, + + "name": { + Type: schema.TypeString, + Required: true, + Description: "Node pool name.", + }, + + "type": { + Type: schema.TypeString, + Required: true, + Description: "Node pool type. Optional value is `Native`.", + }, + + "labels": { + Type: schema.TypeList, + Optional: true, + Description: "Node Labels.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + Description: "Name in the map table.", + }, + "value": { + Type: schema.TypeString, + Required: true, + Description: "Value in map table.", + }, + }, + }, + }, + + "taints": { + Type: schema.TypeList, + Optional: true, + Description: "Node taint.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "key": { + Type: schema.TypeString, + Optional: true, + Description: "Key of the taint.", + }, + "value": { + Type: schema.TypeString, + Optional: true, + Description: "Value of the taint.", + }, + "effect": { + Type: schema.TypeString, + Optional: true, + Description: "Effect of the taint.", + }, + }, + }, + }, + + "tags": { + Type: schema.TypeList, + Optional: true, + Description: "Node tags.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "resource_type": { + Type: schema.TypeString, + Optional: true, + Description: "The resource type bound to the label.", + }, + "tags": { + Type: schema.TypeList, + Optional: true, + Description: "Tag pair list.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "key": { + Type: schema.TypeString, + Optional: true, + Description: "Tag Key.", + }, + "value": { + Type: schema.TypeString, + Optional: true, + Description: "Tag Value.", + }, + }, + }, + }, + }, + }, + }, + + "deletion_protection": { + Type: schema.TypeBool, + Optional: true, + Computed: true, + Description: "Whether to enable deletion protection.", + }, + + "unschedulable": { + Type: schema.TypeBool, + Optional: true, + Computed: true, + Description: "Whether the node is not schedulable by default. The native node is not aware of it and passes false by default.", + }, + + "native": { + Type: schema.TypeList, + Required: true, + MaxItems: 1, + Description: "Native node pool creation parameters.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "scaling": { + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + Description: "Node pool scaling configuration.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "min_replicas": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + Description: "Minimum number of replicas in node pool.", + }, + "max_replicas": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + Description: "Maximum number of replicas in node pool.", + }, + "create_policy": { + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: "Node pool expansion strategy. `ZoneEquality`: multiple availability zones are broken up; `ZonePriority`: the preferred availability zone takes precedence.", + }, + }, + }, + }, + "subnet_ids": { + Type: schema.TypeList, + Required: true, + Description: "Subnet list.", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "instance_charge_type": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Node billing type. `PREPAID` is a yearly and monthly subscription, `POSTPAID_BY_HOUR` is a pay-as-you-go plan. The default is `POSTPAID_BY_HOUR`.", + }, + "system_disk": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + MaxItems: 1, + Description: "System disk configuration.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "disk_type": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Cloud disk type. Valid values: `CLOUD_PREMIUM`: Premium Cloud Storage, `CLOUD_SSD`: cloud SSD disk, `CLOUD_BSSD`: Basic SSD, `CLOUD_HSSD`: Enhanced SSD.", + }, + "disk_size": { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + Description: "Cloud disk size (G).", + }, + }, + }, + }, + "instance_types": { + Type: schema.TypeList, + Required: true, + Description: "Model list.", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "security_group_ids": { + Type: schema.TypeList, + Required: true, + Description: "Security group list.", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "auto_repair": { + Type: schema.TypeBool, + Optional: true, + Description: "Whether to enable self-healing ability.", + }, + "instance_charge_prepaid": { + Type: schema.TypeList, + Optional: true, + Computed: true, + ForceNew: true, + MaxItems: 1, + Description: "Billing configuration for yearly and monthly models.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "period": { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + Description: "Postpaid billing cycle, unit (month): 1, 2, 3, 4, 5,, 6, 7, 8, 9, 10, 11, 12, 24, 36, 48, 60.", + }, + "renew_flag": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Prepaid renewal method:\n - `NOTIFY_AND_AUTO_RENEW`: Notify users of expiration and automatically renew (default).\n - `NOTIFY_AND_MANUAL_RENEW`: Notify users of expiration, but do not automatically renew.\n - `DISABLE_NOTIFY_AND_MANUAL_RENEW`: Do not notify users of expiration and do not automatically renew.", + }, + }, + }, + }, + "management": { + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + Description: "Node pool management parameter settings.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "nameservers": { + Type: schema.TypeList, + Optional: true, + Computed: true, + Description: "Dns configuration.", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "hosts": { + Type: schema.TypeList, + Optional: true, + Description: "Hosts configuration.", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "kernel_args": { + Type: schema.TypeList, + Optional: true, + Description: "Kernel parameter configuration.", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + }, + "health_check_policy_name": { + Type: schema.TypeString, + Optional: true, + Description: "Fault self-healing rule name.", + }, + "host_name_pattern": { + Type: schema.TypeString, + Optional: true, + Description: "Native node pool hostName pattern string.", + }, + "kubelet_args": { + Type: schema.TypeList, + Optional: true, + Description: "Kubelet custom parameters.", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "lifecycle": { + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + Description: "Predefined scripts.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "pre_init": { + Type: schema.TypeString, + Optional: true, + Description: "Custom script before node initialization.", + }, + "post_init": { + Type: schema.TypeString, + Optional: true, + Description: "Custom script after node initialization.", + }, + }, + }, + }, + "runtime_root_dir": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + Description: "Runtime root directory.", + }, + "enable_autoscaling": { + Type: schema.TypeBool, + Optional: true, + Description: "Whether to enable elastic scaling.", + }, + "replicas": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + Description: "Desired number of nodes.", + }, + "internet_accessible": { + Type: schema.TypeList, + Optional: true, + Computed: true, + ForceNew: true, + MaxItems: 1, + Description: "Public network bandwidth settings.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "max_bandwidth_out": { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + Description: "Maximum bandwidth output. Note: When chargeType is `TRAFFIC_POSTPAID_BY_HOUR` and `BANDWIDTH_POSTPAID_BY_HOUR`, the valid range is 1~100. When chargeType is `BANDWIDTH_PACKAG`, the valid range is 1~2000.", + ValidateFunc: tccommon.ValidateIntegerMin(1), + }, + "charge_type": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Network billing method. Optional value is `TRAFFIC_POSTPAID_BY_HOUR`, `BANDWIDTH_POSTPAID_BY_HOUR` and `BANDWIDTH_PACKAGE`.", + }, + "bandwidth_package_id": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Bandwidth package ID. Note: When ChargeType is BANDWIDTH_PACKAG, the value cannot be empty; otherwise, the value must be empty.", + }, + }, + }, + }, + "data_disks": { + Type: schema.TypeList, + Optional: true, + Description: "Native node pool data disk list.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "disk_type": { + Type: schema.TypeString, + Required: true, + Description: "Cloud disk type. Valid values: `CLOUD_PREMIUM`: Premium Cloud Storage, `CLOUD_SSD`: cloud SSD disk, `CLOUD_BSSD`: Basic SSD, `CLOUD_HSSD`: Enhanced SSD, `CLOUD_TSSD`: Tremendous SSD, `LOCAL_NVME`: local NVME disk.", + }, + "file_system": { + Type: schema.TypeString, + Optional: true, + Description: "File system (ext3/ext4/xfs).", + }, + "disk_size": { + Type: schema.TypeInt, + Required: true, + Description: "Cloud disk size (G).", + }, + "auto_format_and_mount": { + Type: schema.TypeBool, + Required: true, + Description: "Whether to automatically format the disk and mount it.", + }, + "disk_partition": { + Type: schema.TypeString, + Optional: true, + Description: "Mount device name or partition name.", + }, + "mount_target": { + Type: schema.TypeString, + Optional: true, + Description: "Mount directory.", + }, + "encrypt": { + Type: schema.TypeString, + Optional: true, + Description: "Pass in this parameter to create an encrypted cloud disk. The value is fixed to `ENCRYPT`.", + }, + "kms_key_id": { + Type: schema.TypeString, + Optional: true, + Description: "Customize the key when purchasing an encrypted disk. When this parameter is passed in, the Encrypt parameter is not empty.", + }, + "snapshot_id": { + Type: schema.TypeString, + Optional: true, + Description: "Snapshot ID. If passed in, the cloud disk will be created based on this snapshot. The snapshot type must be a data disk snapshot.", + }, + "throughput_performance": { + Type: schema.TypeInt, + Optional: true, + Description: "Cloud disk performance, unit: MB/s. Use this parameter to purchase additional performance for the cloud disk.", + }, + }, + }, + }, + "key_ids": { + Type: schema.TypeList, + Optional: true, + Description: "Node pool ssh public key id array.", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + }, + + "annotations": { + Type: schema.TypeSet, + Optional: true, + Computed: true, + Description: "Node Annotation List.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + Description: "Name in the map table.", + }, + "value": { + Type: schema.TypeString, + Required: true, + Description: "Value in the map table.", + }, + }, + }, + }, + + "life_state": { + Type: schema.TypeString, + Computed: true, + Description: "Node pool status.", + }, + + "created_at": { + Type: schema.TypeString, + Computed: true, + Description: "creation time.", + }, + }, + } +} + +func resourceTencentCloudKubernetesNativeNodePoolsCreate(d *schema.ResourceData, meta interface{}) error { + defer tccommon.LogElapsed("resource.tencentcloud_kubernetes_native_node_pools.create")() + defer tccommon.InconsistentCheck(d, meta)() + + logId := tccommon.GetLogId(tccommon.ContextNil) + + ctx := tccommon.NewResourceLifeCycleHandleFuncContext(context.Background(), logId, d, meta) + + var ( + clusterId string + nodePoolId string + ) + var ( + request = tkev20220501.NewCreateNodePoolRequest() + response = tkev20220501.NewCreateNodePoolResponse() + ) + + if v, ok := d.GetOk("cluster_id"); ok { + clusterId = v.(string) + } + + request.ClusterId = helper.String(clusterId) + + if v, ok := d.GetOk("name"); ok { + request.Name = helper.String(v.(string)) + } + + if v, ok := d.GetOk("type"); ok { + request.Type = helper.String(v.(string)) + } + + if v, ok := d.GetOk("labels"); ok { + for _, item := range v.([]interface{}) { + labelsMap := item.(map[string]interface{}) + label := tkev20220501.Label{} + if v, ok := labelsMap["name"]; ok { + label.Name = helper.String(v.(string)) + } + if v, ok := labelsMap["value"]; ok { + label.Value = helper.String(v.(string)) + } + request.Labels = append(request.Labels, &label) + } + } + + if v, ok := d.GetOk("taints"); ok { + for _, item := range v.([]interface{}) { + taintsMap := item.(map[string]interface{}) + taint := tkev20220501.Taint{} + if v, ok := taintsMap["key"]; ok { + taint.Key = helper.String(v.(string)) + } + if v, ok := taintsMap["value"]; ok { + taint.Value = helper.String(v.(string)) + } + if v, ok := taintsMap["effect"]; ok { + taint.Effect = helper.String(v.(string)) + } + request.Taints = append(request.Taints, &taint) + } + } + + if v, ok := d.GetOk("tags"); ok { + for _, item := range v.([]interface{}) { + tagsMap := item.(map[string]interface{}) + tagSpecification := tkev20220501.TagSpecification{} + if v, ok := tagsMap["resource_type"]; ok { + tagSpecification.ResourceType = helper.String(v.(string)) + } + if v, ok := tagsMap["tags"]; ok { + for _, item := range v.([]interface{}) { + tagsMap := item.(map[string]interface{}) + tag := tkev20220501.Tag{} + if v, ok := tagsMap["key"]; ok { + tag.Key = helper.String(v.(string)) + } + if v, ok := tagsMap["value"]; ok { + tag.Value = helper.String(v.(string)) + } + tagSpecification.Tags = append(tagSpecification.Tags, &tag) + } + } + request.Tags = append(request.Tags, &tagSpecification) + } + } + + if v, ok := d.GetOkExists("deletion_protection"); ok { + request.DeletionProtection = helper.Bool(v.(bool)) + } + + if v, ok := d.GetOkExists("unschedulable"); ok { + request.Unschedulable = helper.Bool(v.(bool)) + } + + if nativeMap, ok := helper.InterfacesHeadMap(d, "native"); ok { + createNativeNodePoolParam := tkev20220501.CreateNativeNodePoolParam{} + if scalingMap, ok := helper.ConvertInterfacesHeadToMap(nativeMap["scaling"]); ok { + machineSetScaling := tkev20220501.MachineSetScaling{} + if v, ok := scalingMap["min_replicas"]; ok { + machineSetScaling.MinReplicas = helper.IntInt64(v.(int)) + } + if v, ok := scalingMap["max_replicas"]; ok { + machineSetScaling.MaxReplicas = helper.IntInt64(v.(int)) + } + if v, ok := scalingMap["create_policy"]; ok { + machineSetScaling.CreatePolicy = helper.String(v.(string)) + } + createNativeNodePoolParam.Scaling = &machineSetScaling + } + if v, ok := nativeMap["subnet_ids"]; ok { + subnetIdsSet := v.([]interface{}) + for i := range subnetIdsSet { + subnetIds := subnetIdsSet[i].(string) + createNativeNodePoolParam.SubnetIds = append(createNativeNodePoolParam.SubnetIds, helper.String(subnetIds)) + } + } + if v, ok := nativeMap["instance_charge_type"]; ok { + createNativeNodePoolParam.InstanceChargeType = helper.String(v.(string)) + } + if systemDiskMap, ok := helper.ConvertInterfacesHeadToMap(nativeMap["system_disk"]); ok { + disk := tkev20220501.Disk{} + if v, ok := systemDiskMap["disk_type"]; ok { + disk.DiskType = helper.String(v.(string)) + } + if v, ok := systemDiskMap["disk_size"]; ok { + disk.DiskSize = helper.IntInt64(v.(int)) + } + createNativeNodePoolParam.SystemDisk = &disk + } + if v, ok := nativeMap["instance_types"]; ok { + instanceTypesSet := v.([]interface{}) + for i := range instanceTypesSet { + instanceTypes := instanceTypesSet[i].(string) + createNativeNodePoolParam.InstanceTypes = append(createNativeNodePoolParam.InstanceTypes, helper.String(instanceTypes)) + } + } + if v, ok := nativeMap["security_group_ids"]; ok { + securityGroupIdsSet := v.([]interface{}) + for i := range securityGroupIdsSet { + securityGroupIds := securityGroupIdsSet[i].(string) + createNativeNodePoolParam.SecurityGroupIds = append(createNativeNodePoolParam.SecurityGroupIds, helper.String(securityGroupIds)) + } + } + if v, ok := nativeMap["auto_repair"]; ok { + createNativeNodePoolParam.AutoRepair = helper.Bool(v.(bool)) + } + if instanceChargePrepaidMap, ok := helper.ConvertInterfacesHeadToMap(nativeMap["instance_charge_prepaid"]); ok { + instanceChargePrepaid := tkev20220501.InstanceChargePrepaid{} + if v, ok := instanceChargePrepaidMap["period"]; ok { + instanceChargePrepaid.Period = helper.IntUint64(v.(int)) + } + if v, ok := instanceChargePrepaidMap["renew_flag"]; ok { + instanceChargePrepaid.RenewFlag = helper.String(v.(string)) + } + createNativeNodePoolParam.InstanceChargePrepaid = &instanceChargePrepaid + } + if managementMap, ok := helper.ConvertInterfacesHeadToMap(nativeMap["management"]); ok { + managementConfig := tkev20220501.ManagementConfig{} + if v, ok := managementMap["nameservers"]; ok { + nameserversSet := v.([]interface{}) + for i := range nameserversSet { + nameservers := nameserversSet[i].(string) + managementConfig.Nameservers = append(managementConfig.Nameservers, helper.String(nameservers)) + } + } + if v, ok := managementMap["hosts"]; ok { + hostsSet := v.([]interface{}) + for i := range hostsSet { + hosts := hostsSet[i].(string) + managementConfig.Hosts = append(managementConfig.Hosts, helper.String(hosts)) + } + } + if v, ok := managementMap["kernel_args"]; ok { + kernelArgsSet := v.([]interface{}) + for i := range kernelArgsSet { + kernelArgs := kernelArgsSet[i].(string) + managementConfig.KernelArgs = append(managementConfig.KernelArgs, helper.String(kernelArgs)) + } + } + createNativeNodePoolParam.Management = &managementConfig + } + if v, ok := nativeMap["health_check_policy_name"]; ok { + createNativeNodePoolParam.HealthCheckPolicyName = helper.String(v.(string)) + } + if v, ok := nativeMap["host_name_pattern"]; ok { + createNativeNodePoolParam.HostNamePattern = helper.String(v.(string)) + } + if v, ok := nativeMap["kubelet_args"]; ok { + kubeletArgsSet := v.([]interface{}) + for i := range kubeletArgsSet { + kubeletArgs := kubeletArgsSet[i].(string) + createNativeNodePoolParam.KubeletArgs = append(createNativeNodePoolParam.KubeletArgs, helper.String(kubeletArgs)) + } + } + if lifecycleMap, ok := helper.ConvertInterfacesHeadToMap(nativeMap["lifecycle"]); ok { + lifecycleConfig := tkev20220501.LifecycleConfig{} + if v, ok := lifecycleMap["pre_init"]; ok { + lifecycleConfig.PreInit = helper.String(v.(string)) + } + if v, ok := lifecycleMap["post_init"]; ok { + lifecycleConfig.PostInit = helper.String(v.(string)) + } + createNativeNodePoolParam.Lifecycle = &lifecycleConfig + } + if v, ok := nativeMap["runtime_root_dir"]; ok { + createNativeNodePoolParam.RuntimeRootDir = helper.String(v.(string)) + } + if v, ok := nativeMap["enable_autoscaling"]; ok { + createNativeNodePoolParam.EnableAutoscaling = helper.Bool(v.(bool)) + } + if v, ok := nativeMap["replicas"]; ok { + createNativeNodePoolParam.Replicas = helper.IntInt64(v.(int)) + } + if internetAccessibleMap, ok := helper.ConvertInterfacesHeadToMap(nativeMap["internet_accessible"]); ok { + internetAccessible := tkev20220501.InternetAccessible{} + if v, ok := internetAccessibleMap["max_bandwidth_out"]; ok { + internetAccessible.MaxBandwidthOut = helper.IntInt64(v.(int)) + } + if v, ok := internetAccessibleMap["charge_type"]; ok { + internetAccessible.ChargeType = helper.String(v.(string)) + } + if v, ok := internetAccessibleMap["bandwidth_package_id"]; ok { + internetAccessible.BandwidthPackageId = helper.String(v.(string)) + } + createNativeNodePoolParam.InternetAccessible = &internetAccessible + } + if v, ok := nativeMap["data_disks"]; ok { + for _, item := range v.([]interface{}) { + dataDisksMap := item.(map[string]interface{}) + dataDisk := tkev20220501.DataDisk{} + if v, ok := dataDisksMap["disk_type"]; ok { + dataDisk.DiskType = helper.String(v.(string)) + } + if v, ok := dataDisksMap["file_system"]; ok { + dataDisk.FileSystem = helper.String(v.(string)) + } + if v, ok := dataDisksMap["disk_size"]; ok { + dataDisk.DiskSize = helper.IntInt64(v.(int)) + } + if v, ok := dataDisksMap["auto_format_and_mount"]; ok { + dataDisk.AutoFormatAndMount = helper.Bool(v.(bool)) + } + if v, ok := dataDisksMap["disk_partition"]; ok { + dataDisk.DiskPartition = helper.String(v.(string)) + } + if v, ok := dataDisksMap["mount_target"]; ok { + dataDisk.MountTarget = helper.String(v.(string)) + } + if v, ok := dataDisksMap["encrypt"]; ok { + dataDisk.Encrypt = helper.String(v.(string)) + } + if v, ok := dataDisksMap["kms_key_id"]; ok { + dataDisk.KmsKeyId = helper.String(v.(string)) + } + if v, ok := dataDisksMap["snapshot_id"]; ok { + dataDisk.SnapshotId = helper.String(v.(string)) + } + if v, ok := dataDisksMap["throughput_performance"]; ok { + dataDisk.ThroughputPerformance = helper.IntUint64(v.(int)) + } + createNativeNodePoolParam.DataDisks = append(createNativeNodePoolParam.DataDisks, &dataDisk) + } + } + if v, ok := nativeMap["key_ids"]; ok { + keyIdsSet := v.([]interface{}) + for i := range keyIdsSet { + keyIds := keyIdsSet[i].(string) + createNativeNodePoolParam.KeyIds = append(createNativeNodePoolParam.KeyIds, helper.String(keyIds)) + } + } + request.Native = &createNativeNodePoolParam + } + + if v, ok := d.GetOk("annotations"); ok { + for _, item := range v.(*schema.Set).List() { + annotationsMap := item.(map[string]interface{}) + annotation := tkev20220501.Annotation{} + if v, ok := annotationsMap["name"]; ok { + annotation.Name = helper.String(v.(string)) + } + if v, ok := annotationsMap["value"]; ok { + annotation.Value = helper.String(v.(string)) + } + request.Annotations = append(request.Annotations, &annotation) + } + } + + err := resource.Retry(tccommon.WriteRetryTimeout, func() *resource.RetryError { + result, e := meta.(tccommon.ProviderMeta).GetAPIV3Conn().UseTkeV20220501Client().CreateNodePoolWithContext(ctx, request) + if e != nil { + return tccommon.RetryError(e) + } else { + log.Printf("[DEBUG]%s api[%s] success, request body [%s], response body [%s]\n", logId, request.GetAction(), request.ToJsonString(), result.ToJsonString()) + } + response = result + return nil + }) + if err != nil { + log.Printf("[CRITAL]%s create kubernetes native node pools failed, reason:%+v", logId, err) + return err + } + + nodePoolId = *response.Response.NodePoolId + + d.SetId(strings.Join([]string{clusterId, nodePoolId}, tccommon.FILED_SP)) + + return resourceTencentCloudKubernetesNativeNodePoolsRead(d, meta) +} + +func resourceTencentCloudKubernetesNativeNodePoolsRead(d *schema.ResourceData, meta interface{}) error { + defer tccommon.LogElapsed("resource.tencentcloud_kubernetes_native_node_pools.read")() + defer tccommon.InconsistentCheck(d, meta)() + + logId := tccommon.GetLogId(tccommon.ContextNil) + + ctx := tccommon.NewResourceLifeCycleHandleFuncContext(context.Background(), logId, d, meta) + + service := TkeService{client: meta.(tccommon.ProviderMeta).GetAPIV3Conn()} + + idSplit := strings.Split(d.Id(), tccommon.FILED_SP) + if len(idSplit) != 2 { + return fmt.Errorf("id is broken,%s", d.Id()) + } + clusterId := idSplit[0] + nodePoolId := idSplit[1] + + _ = d.Set("cluster_id", clusterId) + + respData, err := service.DescribeKubernetesNativeNodePoolsById(ctx, clusterId, nodePoolId) + if err != nil { + return err + } + + if respData == nil { + d.SetId("") + log.Printf("[WARN]%s resource `kubernetes_native_node_pools` [%s] not found, please check if it has been deleted.\n", logId, d.Id()) + return nil + } + tagsList := make([]map[string]interface{}, 0, len(respData.Tags)) + if respData.Tags != nil { + for _, tags := range respData.Tags { + tagsMap := map[string]interface{}{} + + if tags.ResourceType != nil { + tagsMap["resource_type"] = tags.ResourceType + } + + tagsList2 := make([]map[string]interface{}, 0, len(tags.Tags)) + if tags.Tags != nil { + for _, tags := range tags.Tags { + tagsMap2 := map[string]interface{}{} + + if tags.Key != nil { + tagsMap2["key"] = tags.Key + } + + if tags.Value != nil { + tagsMap2["value"] = tags.Value + } + + tagsList2 = append(tagsList2, tagsMap2) + } + + tagsMap["tags"] = tagsList2 + } + tagsList = append(tagsList, tagsMap) + } + + _ = d.Set("tags", tagsList) + } + + taintsList := make([]map[string]interface{}, 0, len(respData.Taints)) + if respData.Taints != nil { + for _, taints := range respData.Taints { + taintsMap := map[string]interface{}{} + + if taints.Key != nil { + taintsMap["key"] = taints.Key + } + + if taints.Value != nil { + taintsMap["value"] = taints.Value + } + + if taints.Effect != nil { + taintsMap["effect"] = taints.Effect + } + + taintsList = append(taintsList, taintsMap) + } + + _ = d.Set("taints", taintsList) + } + + if respData.DeletionProtection != nil { + _ = d.Set("deletion_protection", respData.DeletionProtection) + } + + if respData.Unschedulable != nil { + _ = d.Set("unschedulable", respData.Unschedulable) + } + + if respData.Type != nil { + _ = d.Set("type", respData.Type) + } + + labelsList := make([]map[string]interface{}, 0, len(respData.Labels)) + if respData.Labels != nil { + for _, labels := range respData.Labels { + labelsMap := map[string]interface{}{} + + if labels.Name != nil { + labelsMap["name"] = labels.Name + } + + if labels.Value != nil { + labelsMap["value"] = labels.Value + } + + labelsList = append(labelsList, labelsMap) + } + + _ = d.Set("labels", labelsList) + } + + if respData.LifeState != nil { + _ = d.Set("life_state", respData.LifeState) + } + + if respData.CreatedAt != nil { + _ = d.Set("created_at", respData.CreatedAt) + } + + if respData.Name != nil { + _ = d.Set("name", respData.Name) + } + + nativeMap := map[string]interface{}{} + + if respData.Native != nil { + scalingMap := map[string]interface{}{} + + if respData.Native.Scaling != nil { + if respData.Native.Scaling.MinReplicas != nil { + scalingMap["min_replicas"] = respData.Native.Scaling.MinReplicas + } + + if respData.Native.Scaling.MaxReplicas != nil { + scalingMap["max_replicas"] = respData.Native.Scaling.MaxReplicas + } + + if respData.Native.Scaling.CreatePolicy != nil { + scalingMap["create_policy"] = respData.Native.Scaling.CreatePolicy + } + + nativeMap["scaling"] = []interface{}{scalingMap} + } + + if respData.Native.SubnetIds != nil { + nativeMap["subnet_ids"] = respData.Native.SubnetIds + } + + if respData.Native.SecurityGroupIds != nil { + nativeMap["security_group_ids"] = respData.Native.SecurityGroupIds + } + + if respData.Native.AutoRepair != nil { + nativeMap["auto_repair"] = respData.Native.AutoRepair + } + + if respData.Native.InstanceChargeType != nil { + nativeMap["instance_charge_type"] = respData.Native.InstanceChargeType + } + + instanceChargePrepaidMap := map[string]interface{}{} + + if respData.Native.InstanceChargePrepaid != nil { + if respData.Native.InstanceChargePrepaid.Period != nil { + instanceChargePrepaidMap["period"] = respData.Native.InstanceChargePrepaid.Period + } + + if respData.Native.InstanceChargePrepaid.RenewFlag != nil { + instanceChargePrepaidMap["renew_flag"] = respData.Native.InstanceChargePrepaid.RenewFlag + } + + nativeMap["instance_charge_prepaid"] = []interface{}{instanceChargePrepaidMap} + } + + systemDiskMap := map[string]interface{}{} + + if respData.Native.SystemDisk != nil { + if respData.Native.SystemDisk.DiskType != nil { + systemDiskMap["disk_type"] = respData.Native.SystemDisk.DiskType + } + + if respData.Native.SystemDisk.DiskSize != nil { + systemDiskMap["disk_size"] = respData.Native.SystemDisk.DiskSize + } + + nativeMap["system_disk"] = []interface{}{systemDiskMap} + } + + if respData.Native.KeyIds != nil { + nativeMap["key_ids"] = respData.Native.KeyIds + } + + managementMap := map[string]interface{}{} + + if respData.Native.Management != nil { + if respData.Native.Management.Nameservers != nil { + managementMap["nameservers"] = respData.Native.Management.Nameservers + } + + if respData.Native.Management.Hosts != nil { + managementMap["hosts"] = respData.Native.Management.Hosts + } + + if respData.Native.Management.KernelArgs != nil { + managementMap["kernel_args"] = respData.Native.Management.KernelArgs + } + + nativeMap["management"] = []interface{}{managementMap} + } + + if respData.Native.HealthCheckPolicyName != nil { + nativeMap["health_check_policy_name"] = respData.Native.HealthCheckPolicyName + } + + if respData.Native.HostNamePattern != nil { + nativeMap["host_name_pattern"] = respData.Native.HostNamePattern + } + + if respData.Native.KubeletArgs != nil { + nativeMap["kubelet_args"] = respData.Native.KubeletArgs + } + + lifecycleMap := map[string]interface{}{} + + if respData.Native.Lifecycle != nil { + if respData.Native.Lifecycle.PreInit != nil { + lifecycleMap["pre_init"] = respData.Native.Lifecycle.PreInit + } + + if respData.Native.Lifecycle.PostInit != nil { + lifecycleMap["post_init"] = respData.Native.Lifecycle.PostInit + } + + nativeMap["lifecycle"] = []interface{}{lifecycleMap} + } + + if respData.Native.RuntimeRootDir != nil { + nativeMap["runtime_root_dir"] = respData.Native.RuntimeRootDir + } + + if respData.Native.EnableAutoscaling != nil { + nativeMap["enable_autoscaling"] = respData.Native.EnableAutoscaling + } + + if respData.Native.InstanceTypes != nil { + nativeMap["instance_types"] = respData.Native.InstanceTypes + } + + if respData.Native.Replicas != nil { + nativeMap["replicas"] = respData.Native.Replicas + } + + internetAccessibleMap := map[string]interface{}{} + + if respData.Native.InternetAccessible != nil { + if respData.Native.InternetAccessible.MaxBandwidthOut != nil { + internetAccessibleMap["max_bandwidth_out"] = respData.Native.InternetAccessible.MaxBandwidthOut + } + + if respData.Native.InternetAccessible.ChargeType != nil { + internetAccessibleMap["charge_type"] = respData.Native.InternetAccessible.ChargeType + } + + if respData.Native.InternetAccessible.BandwidthPackageId != nil { + internetAccessibleMap["bandwidth_package_id"] = respData.Native.InternetAccessible.BandwidthPackageId + } + + nativeMap["internet_accessible"] = []interface{}{internetAccessibleMap} + } + + dataDisksList := make([]map[string]interface{}, 0, len(respData.Native.DataDisks)) + if respData.Native.DataDisks != nil { + for _, dataDisks := range respData.Native.DataDisks { + dataDisksMap := map[string]interface{}{} + + if dataDisks.DiskType != nil { + dataDisksMap["disk_type"] = dataDisks.DiskType + } + + if dataDisks.FileSystem != nil { + dataDisksMap["file_system"] = dataDisks.FileSystem + } + + if dataDisks.DiskSize != nil { + dataDisksMap["disk_size"] = dataDisks.DiskSize + } + + if dataDisks.AutoFormatAndMount != nil { + dataDisksMap["auto_format_and_mount"] = dataDisks.AutoFormatAndMount + } + + if dataDisks.DiskPartition != nil { + dataDisksMap["disk_partition"] = dataDisks.DiskPartition + } + + if dataDisks.MountTarget != nil { + dataDisksMap["mount_target"] = dataDisks.MountTarget + } + + if dataDisks.Encrypt != nil { + dataDisksMap["encrypt"] = dataDisks.Encrypt + } + + if dataDisks.KmsKeyId != nil { + dataDisksMap["kms_key_id"] = dataDisks.KmsKeyId + } + + if dataDisks.SnapshotId != nil { + dataDisksMap["snapshot_id"] = dataDisks.SnapshotId + } + + if dataDisks.ThroughputPerformance != nil { + dataDisksMap["throughput_performance"] = dataDisks.ThroughputPerformance + } + + dataDisksList = append(dataDisksList, dataDisksMap) + } + + nativeMap["data_disks"] = dataDisksList + } + _ = d.Set("native", []interface{}{nativeMap}) + } + + annotationsList := make([]map[string]interface{}, 0, len(respData.Annotations)) + if respData.Annotations != nil { + for _, annotations := range respData.Annotations { + annotationsMap := map[string]interface{}{} + + if annotations.Name != nil { + annotationsMap["name"] = annotations.Name + } + + if annotations.Value != nil { + annotationsMap["value"] = annotations.Value + } + + annotationsList = append(annotationsList, annotationsMap) + } + + _ = d.Set("annotations", annotationsList) + } + + return nil +} + +func resourceTencentCloudKubernetesNativeNodePoolsUpdate(d *schema.ResourceData, meta interface{}) error { + defer tccommon.LogElapsed("resource.tencentcloud_kubernetes_native_node_pools.update")() + defer tccommon.InconsistentCheck(d, meta)() + + logId := tccommon.GetLogId(tccommon.ContextNil) + + ctx := tccommon.NewResourceLifeCycleHandleFuncContext(context.Background(), logId, d, meta) + + immutableArgs := []string{"type"} + for _, v := range immutableArgs { + if d.HasChange(v) { + return fmt.Errorf("argument `%s` cannot be changed", v) + } + } + idSplit := strings.Split(d.Id(), tccommon.FILED_SP) + if len(idSplit) != 2 { + return fmt.Errorf("id is broken,%s", d.Id()) + } + clusterId := idSplit[0] + nodePoolId := idSplit[1] + + needChange := false + mutableArgs := []string{"name", "labels", "taints", "tags", "deletion_protection", "unschedulable", "native", "annotations"} + for _, v := range mutableArgs { + if d.HasChange(v) { + needChange = true + break + } + } + + if needChange { + request := tkev20220501.NewModifyNodePoolRequest() + + request.ClusterId = helper.String(clusterId) + + request.NodePoolId = helper.String(nodePoolId) + + if v, ok := d.GetOk("name"); ok { + request.Name = helper.String(v.(string)) + } + + if v, ok := d.GetOk("labels"); ok { + for _, item := range v.([]interface{}) { + labelsMap := item.(map[string]interface{}) + label := tkev20220501.Label{} + if v, ok := labelsMap["name"]; ok { + label.Name = helper.String(v.(string)) + } + if v, ok := labelsMap["value"]; ok { + label.Value = helper.String(v.(string)) + } + request.Labels = append(request.Labels, &label) + } + } + + if v, ok := d.GetOk("taints"); ok { + for _, item := range v.([]interface{}) { + taintsMap := item.(map[string]interface{}) + taint := tkev20220501.Taint{} + if v, ok := taintsMap["key"]; ok { + taint.Key = helper.String(v.(string)) + } + if v, ok := taintsMap["value"]; ok { + taint.Value = helper.String(v.(string)) + } + if v, ok := taintsMap["effect"]; ok { + taint.Effect = helper.String(v.(string)) + } + request.Taints = append(request.Taints, &taint) + } + } + + if v, ok := d.GetOk("tags"); ok { + for _, item := range v.([]interface{}) { + tagsMap := item.(map[string]interface{}) + tagSpecification := tkev20220501.TagSpecification{} + if v, ok := tagsMap["resource_type"]; ok { + tagSpecification.ResourceType = helper.String(v.(string)) + } + if v, ok := tagsMap["tags"]; ok { + for _, item := range v.([]interface{}) { + tagsMap := item.(map[string]interface{}) + tag := tkev20220501.Tag{} + if v, ok := tagsMap["key"]; ok { + tag.Key = helper.String(v.(string)) + } + if v, ok := tagsMap["value"]; ok { + tag.Value = helper.String(v.(string)) + } + tagSpecification.Tags = append(tagSpecification.Tags, &tag) + } + } + request.Tags = append(request.Tags, &tagSpecification) + } + } + + if v, ok := d.GetOkExists("deletion_protection"); ok { + request.DeletionProtection = helper.Bool(v.(bool)) + } + + if v, ok := d.GetOkExists("unschedulable"); ok { + request.Unschedulable = helper.Bool(v.(bool)) + } + + if nativeMap, ok := helper.InterfacesHeadMap(d, "native"); ok { + updateNativeNodePoolParam := tkev20220501.UpdateNativeNodePoolParam{} + if scalingMap, ok := helper.ConvertInterfacesHeadToMap(nativeMap["scaling"]); ok { + machineSetScaling := tkev20220501.MachineSetScaling{} + if v, ok := scalingMap["min_replicas"]; ok { + machineSetScaling.MinReplicas = helper.IntInt64(v.(int)) + } + if v, ok := scalingMap["max_replicas"]; ok { + machineSetScaling.MaxReplicas = helper.IntInt64(v.(int)) + } + if v, ok := scalingMap["create_policy"]; ok { + machineSetScaling.CreatePolicy = helper.String(v.(string)) + } + updateNativeNodePoolParam.Scaling = &machineSetScaling + } + if v, ok := nativeMap["subnet_ids"]; ok { + subnetIdsSet := v.([]interface{}) + for i := range subnetIdsSet { + subnetIds := subnetIdsSet[i].(string) + updateNativeNodePoolParam.SubnetIds = append(updateNativeNodePoolParam.SubnetIds, helper.String(subnetIds)) + } + } + if v, ok := nativeMap["security_group_ids"]; ok { + securityGroupIdsSet := v.([]interface{}) + for i := range securityGroupIdsSet { + securityGroupIds := securityGroupIdsSet[i].(string) + updateNativeNodePoolParam.SecurityGroupIds = append(updateNativeNodePoolParam.SecurityGroupIds, helper.String(securityGroupIds)) + } + } + if v, ok := nativeMap["auto_repair"]; ok { + updateNativeNodePoolParam.AutoRepair = helper.Bool(v.(bool)) + } + if v, ok := nativeMap["instance_charge_type"]; ok { + updateNativeNodePoolParam.InstanceChargeType = helper.String(v.(string)) + } + if instanceChargePrepaidMap, ok := helper.ConvertInterfacesHeadToMap(nativeMap["instance_charge_prepaid"]); ok { + instanceChargePrepaid := tkev20220501.InstanceChargePrepaid{} + if v, ok := instanceChargePrepaidMap["period"]; ok { + instanceChargePrepaid.Period = helper.IntUint64(v.(int)) + } + if v, ok := instanceChargePrepaidMap["renew_flag"]; ok { + instanceChargePrepaid.RenewFlag = helper.String(v.(string)) + } + updateNativeNodePoolParam.InstanceChargePrepaid = &instanceChargePrepaid + } + if systemDiskMap, ok := helper.ConvertInterfacesHeadToMap(nativeMap["system_disk"]); ok { + disk := tkev20220501.Disk{} + if v, ok := systemDiskMap["disk_type"]; ok { + disk.DiskType = helper.String(v.(string)) + } + if v, ok := systemDiskMap["disk_size"]; ok { + disk.DiskSize = helper.IntInt64(v.(int)) + } + updateNativeNodePoolParam.SystemDisk = &disk + } + if managementMap, ok := helper.ConvertInterfacesHeadToMap(nativeMap["management"]); ok { + managementConfig := tkev20220501.ManagementConfig{} + if v, ok := managementMap["nameservers"]; ok { + nameserversSet := v.([]interface{}) + for i := range nameserversSet { + nameservers := nameserversSet[i].(string) + managementConfig.Nameservers = append(managementConfig.Nameservers, helper.String(nameservers)) + } + } + if v, ok := managementMap["hosts"]; ok { + hostsSet := v.([]interface{}) + for i := range hostsSet { + hosts := hostsSet[i].(string) + managementConfig.Hosts = append(managementConfig.Hosts, helper.String(hosts)) + } + } + if v, ok := managementMap["kernel_args"]; ok { + kernelArgsSet := v.([]interface{}) + for i := range kernelArgsSet { + kernelArgs := kernelArgsSet[i].(string) + managementConfig.KernelArgs = append(managementConfig.KernelArgs, helper.String(kernelArgs)) + } + } + updateNativeNodePoolParam.Management = &managementConfig + } + if v, ok := nativeMap["health_check_policy_name"]; ok { + updateNativeNodePoolParam.HealthCheckPolicyName = helper.String(v.(string)) + } + if v, ok := nativeMap["host_name_pattern"]; ok { + updateNativeNodePoolParam.HostNamePattern = helper.String(v.(string)) + } + if v, ok := nativeMap["kubelet_args"]; ok { + kubeletArgsSet := v.([]interface{}) + for i := range kubeletArgsSet { + kubeletArgs := kubeletArgsSet[i].(string) + updateNativeNodePoolParam.KubeletArgs = append(updateNativeNodePoolParam.KubeletArgs, helper.String(kubeletArgs)) + } + } + if lifecycleMap, ok := helper.ConvertInterfacesHeadToMap(nativeMap["lifecycle"]); ok { + lifecycleConfig := tkev20220501.LifecycleConfig{} + if v, ok := lifecycleMap["pre_init"]; ok { + lifecycleConfig.PreInit = helper.String(v.(string)) + } + if v, ok := lifecycleMap["post_init"]; ok { + lifecycleConfig.PostInit = helper.String(v.(string)) + } + updateNativeNodePoolParam.Lifecycle = &lifecycleConfig + } + if v, ok := nativeMap["runtime_root_dir"]; ok { + updateNativeNodePoolParam.RuntimeRootDir = helper.String(v.(string)) + } + if v, ok := nativeMap["enable_autoscaling"]; ok { + updateNativeNodePoolParam.EnableAutoscaling = helper.Bool(v.(bool)) + } + if v, ok := nativeMap["instance_types"]; ok { + instanceTypesSet := v.([]interface{}) + for i := range instanceTypesSet { + instanceTypes := instanceTypesSet[i].(string) + updateNativeNodePoolParam.InstanceTypes = append(updateNativeNodePoolParam.InstanceTypes, helper.String(instanceTypes)) + } + } + if v, ok := nativeMap["replicas"]; ok { + updateNativeNodePoolParam.Replicas = helper.IntInt64(v.(int)) + } + if v, ok := nativeMap["data_disks"]; ok { + for _, item := range v.([]interface{}) { + dataDisksMap := item.(map[string]interface{}) + dataDisk := tkev20220501.DataDisk{} + if v, ok := dataDisksMap["disk_type"]; ok { + dataDisk.DiskType = helper.String(v.(string)) + } + if v, ok := dataDisksMap["file_system"]; ok { + dataDisk.FileSystem = helper.String(v.(string)) + } + if v, ok := dataDisksMap["disk_size"]; ok { + dataDisk.DiskSize = helper.IntInt64(v.(int)) + } + if v, ok := dataDisksMap["auto_format_and_mount"]; ok { + dataDisk.AutoFormatAndMount = helper.Bool(v.(bool)) + } + if v, ok := dataDisksMap["disk_partition"]; ok { + dataDisk.DiskPartition = helper.String(v.(string)) + } + if v, ok := dataDisksMap["mount_target"]; ok { + dataDisk.MountTarget = helper.String(v.(string)) + } + if v, ok := dataDisksMap["encrypt"]; ok { + dataDisk.Encrypt = helper.String(v.(string)) + } + if v, ok := dataDisksMap["kms_key_id"]; ok { + dataDisk.KmsKeyId = helper.String(v.(string)) + } + if v, ok := dataDisksMap["snapshot_id"]; ok { + dataDisk.SnapshotId = helper.String(v.(string)) + } + if v, ok := dataDisksMap["throughput_performance"]; ok { + dataDisk.ThroughputPerformance = helper.IntUint64(v.(int)) + } + updateNativeNodePoolParam.DataDisks = append(updateNativeNodePoolParam.DataDisks, &dataDisk) + } + } + if v, ok := nativeMap["key_ids"]; ok { + keyIdsSet := v.([]interface{}) + for i := range keyIdsSet { + keyIds := keyIdsSet[i].(string) + updateNativeNodePoolParam.KeyIds = append(updateNativeNodePoolParam.KeyIds, helper.String(keyIds)) + } + } + request.Native = &updateNativeNodePoolParam + } + + if v, ok := d.GetOk("annotations"); ok { + for _, item := range v.([]interface{}) { + annotationsMap := item.(map[string]interface{}) + annotation := tkev20220501.Annotation{} + if v, ok := annotationsMap["name"]; ok { + annotation.Name = helper.String(v.(string)) + } + if v, ok := annotationsMap["value"]; ok { + annotation.Value = helper.String(v.(string)) + } + request.Annotations = append(request.Annotations, &annotation) + } + } + + err := resource.Retry(tccommon.WriteRetryTimeout, func() *resource.RetryError { + result, e := meta.(tccommon.ProviderMeta).GetAPIV3Conn().UseTkeV20220501Client().ModifyNodePoolWithContext(ctx, request) + if e != nil { + return tccommon.RetryError(e) + } else { + log.Printf("[DEBUG]%s api[%s] success, request body [%s], response body [%s]\n", logId, request.GetAction(), request.ToJsonString(), result.ToJsonString()) + } + return nil + }) + if err != nil { + log.Printf("[CRITAL]%s update kubernetes native node pools failed, reason:%+v", logId, err) + return err + } + } + + return resourceTencentCloudKubernetesNativeNodePoolsRead(d, meta) +} + +func resourceTencentCloudKubernetesNativeNodePoolsDelete(d *schema.ResourceData, meta interface{}) error { + defer tccommon.LogElapsed("resource.tencentcloud_kubernetes_native_node_pools.delete")() + defer tccommon.InconsistentCheck(d, meta)() + + logId := tccommon.GetLogId(tccommon.ContextNil) + ctx := tccommon.NewResourceLifeCycleHandleFuncContext(context.Background(), logId, d, meta) + + idSplit := strings.Split(d.Id(), tccommon.FILED_SP) + if len(idSplit) != 2 { + return fmt.Errorf("id is broken,%s", d.Id()) + } + clusterId := idSplit[0] + nodePoolId := idSplit[1] + + var ( + request = tkev20220501.NewDeleteNodePoolRequest() + response = tkev20220501.NewDeleteNodePoolResponse() + ) + + request.ClusterId = helper.String(clusterId) + + request.NodePoolId = helper.String(nodePoolId) + + err := resource.Retry(tccommon.WriteRetryTimeout, func() *resource.RetryError { + result, e := meta.(tccommon.ProviderMeta).GetAPIV3Conn().UseTkeV20220501Client().DeleteNodePoolWithContext(ctx, request) + if e != nil { + return tccommon.RetryError(e) + } else { + log.Printf("[DEBUG]%s api[%s] success, request body [%s], response body [%s]\n", logId, request.GetAction(), request.ToJsonString(), result.ToJsonString()) + } + response = result + return nil + }) + if err != nil { + log.Printf("[CRITAL]%s delete kubernetes native node pools failed, reason:%+v", logId, err) + return err + } + + _ = response + return nil +} diff --git a/tencentcloud/services/tke/resource_tc_kubernetes_native_node_pools.md b/tencentcloud/services/tke/resource_tc_kubernetes_native_node_pools.md new file mode 100644 index 0000000000..55ec09b303 --- /dev/null +++ b/tencentcloud/services/tke/resource_tc_kubernetes_native_node_pools.md @@ -0,0 +1,30 @@ +Provides a resource to create a tke2 kubernetes_native_node_pools + +Example Usage + +```hcl +resource "tencentcloud_kubernetes_native_node_pools" "kubernetes_native_node_pools" { + labels = { + } + taints = { + } + tags = { + tags = { + } + } + native = { + system_disk = { + } + data_disks = { + } + } +} +``` + +Import + +tke2 kubernetes_native_node_pools can be imported using the id, e.g. + +``` +terraform import tencentcloud_kubernetes_native_node_pools.kubernetes_native_node_pools kubernetes_native_node_pools_id +``` diff --git a/tencentcloud/services/tke/resource_tc_kubernetes_native_node_pools_extension.go b/tencentcloud/services/tke/resource_tc_kubernetes_native_node_pools_extension.go new file mode 100644 index 0000000000..7fdf745226 --- /dev/null +++ b/tencentcloud/services/tke/resource_tc_kubernetes_native_node_pools_extension.go @@ -0,0 +1 @@ +package tke diff --git a/tencentcloud/services/tke/resource_tc_kubernetes_native_node_pools_test.go b/tencentcloud/services/tke/resource_tc_kubernetes_native_node_pools_test.go new file mode 100644 index 0000000000..4d2e412684 --- /dev/null +++ b/tencentcloud/services/tke/resource_tc_kubernetes_native_node_pools_test.go @@ -0,0 +1,47 @@ +package tke + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + + tcacctest "github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/acctest" +) + +func TestAccTencentCloudKubernetesNativeNodePoolsResource_basic(t *testing.T) { + t.Parallel() + resource.Test(t, resource.TestCase{ + PreCheck: func() { + tcacctest.AccPreCheck(t) + }, + Providers: tcacctest.AccProviders, + Steps: []resource.TestStep{{ + Config: testAccKubernetesNativeNodePools, + Check: resource.ComposeTestCheckFunc(resource.TestCheckResourceAttrSet("tencentcloud_kubernetes_native_node_pools.kubernetes_native_node_pools", "id")), + }, { + ResourceName: "tencentcloud_kubernetes_native_node_pools.kubernetes_native_node_pools", + ImportState: true, + ImportStateVerify: true, + }}, + }) +} + +const testAccKubernetesNativeNodePools = ` + +resource "tencentcloud_kubernetes_native_node_pools" "kubernetes_native_node_pools" { + labels = { + } + taints = { + } + tags = { + tags = { + } + } + native = { + system_disk = { + } + data_disks = { + } + } +} +` diff --git a/tencentcloud/services/tke/resource_tc_kubernetes_node_pool.go b/tencentcloud/services/tke/resource_tc_kubernetes_node_pool.go index c8af50c42d..b6cbfa8bc6 100644 --- a/tencentcloud/services/tke/resource_tc_kubernetes_node_pool.go +++ b/tencentcloud/services/tke/resource_tc_kubernetes_node_pool.go @@ -9,11 +9,10 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - tke "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/tke/v20180525" + tkev20180525 "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/tke/v20180525" tccommon "github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/common" "github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/internal/helper" - svcas "github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/services/as" svccvm "github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/services/cvm" ) @@ -640,15 +639,15 @@ func resourceTencentCloudKubernetesNodePoolCreate(d *schema.ResourceData, meta i nodePoolId string ) var ( - request = tke.NewCreateClusterNodePoolRequest() - response = tke.NewCreateClusterNodePoolResponse() + request = tkev20180525.NewCreateClusterNodePoolRequest() + response = tkev20180525.NewCreateClusterNodePoolResponse() ) if v, ok := d.GetOk("cluster_id"); ok { clusterId = v.(string) } - request.ClusterId = &clusterId + request.ClusterId = helper.String(clusterId) if v, ok := d.GetOkExists("enable_auto_scale"); ok { request.EnableAutoscale = helper.Bool(v.(bool)) @@ -661,7 +660,7 @@ func resourceTencentCloudKubernetesNodePoolCreate(d *schema.ResourceData, meta i if v, ok := d.GetOk("taints"); ok { for _, item := range v.([]interface{}) { taintsMap := item.(map[string]interface{}) - taint := tke.Taint{} + taint := tkev20180525.Taint{} if v, ok := taintsMap["key"]; ok { taint.Key = helper.String(v.(string)) } @@ -684,7 +683,7 @@ func resourceTencentCloudKubernetesNodePoolCreate(d *schema.ResourceData, meta i } err := resource.Retry(tccommon.WriteRetryTimeout, func() *resource.RetryError { - result, e := meta.(tccommon.ProviderMeta).GetAPIV3Conn().UseTkeClient().CreateClusterNodePoolWithContext(ctx, request) + result, e := meta.(tccommon.ProviderMeta).GetAPIV3Conn().UseTkeV20180525Client().CreateClusterNodePoolWithContext(ctx, request) if e != nil { return tccommon.RetryError(e) } else { @@ -739,12 +738,8 @@ func resourceTencentCloudKubernetesNodePoolRead(d *schema.ResourceData, meta int return nil } - respData1, err := service.DescribeKubernetesNodePoolById1(ctx, clusterId, nodePoolId) - if err != nil { - return err - } - - err = resource.Retry(tccommon.ReadRetryTimeout, func() *resource.RetryError { + var respData1 *tkev20180525.NodePool + reqErr1 := resource.Retry(tccommon.ReadRetryTimeout, func() *resource.RetryError { result, e := service.DescribeKubernetesNodePoolById1(ctx, clusterId, nodePoolId) if e != nil { if err := resourceTencentCloudKubernetesNodePoolReadRequestOnError1(ctx, result, e); err != nil { @@ -758,9 +753,9 @@ func resourceTencentCloudKubernetesNodePoolRead(d *schema.ResourceData, meta int respData1 = result return nil }) - if err != nil { - log.Printf("[CRITAL]%s read kubernetes node pool failed, reason:%+v", logId, err) - return err + if reqErr1 != nil { + log.Printf("[CRITAL]%s read kubernetes node pool failed, reason:%+v", logId, reqErr1) + return reqErr1 } if respData1 == nil { @@ -855,12 +850,6 @@ func resourceTencentCloudKubernetesNodePoolUpdate(d *schema.ResourceData, meta i ctx := tccommon.NewResourceLifeCycleHandleFuncContext(context.Background(), logId, d, meta) - immutableArgs := []string{"cluster_id"} - for _, v := range immutableArgs { - if d.HasChange(v) { - return fmt.Errorf("argument `%s` cannot be changed", v) - } - } idSplit := strings.Split(d.Id(), tccommon.FILED_SP) if len(idSplit) != 2 { return fmt.Errorf("id is broken,%s", d.Id()) @@ -882,11 +871,11 @@ func resourceTencentCloudKubernetesNodePoolUpdate(d *schema.ResourceData, meta i } if needChange { - request := tke.NewModifyClusterNodePoolRequest() + request := tkev20180525.NewModifyClusterNodePoolRequest() - request.ClusterId = &clusterId + request.ClusterId = helper.String(clusterId) - request.NodePoolId = &nodePoolId + request.NodePoolId = helper.String(nodePoolId) if v, ok := d.GetOk("name"); ok { request.Name = helper.String(v.(string)) @@ -903,7 +892,7 @@ func resourceTencentCloudKubernetesNodePoolUpdate(d *schema.ResourceData, meta i if v, ok := d.GetOk("taints"); ok { for _, item := range v.([]interface{}) { taintsMap := item.(map[string]interface{}) - taint := tke.Taint{} + taint := tkev20180525.Taint{} if v, ok := taintsMap["key"]; ok { taint.Key = helper.String(v.(string)) } @@ -926,7 +915,7 @@ func resourceTencentCloudKubernetesNodePoolUpdate(d *schema.ResourceData, meta i } err := resource.Retry(tccommon.WriteRetryTimeout, func() *resource.RetryError { - result, e := meta.(tccommon.ProviderMeta).GetAPIV3Conn().UseTkeClient().ModifyClusterNodePoolWithContext(ctx, request) + result, e := meta.(tccommon.ProviderMeta).GetAPIV3Conn().UseTkeV20180525Client().ModifyClusterNodePoolWithContext(ctx, request) if e != nil { return tccommon.RetryError(e) } else { @@ -962,17 +951,13 @@ func resourceTencentCloudKubernetesNodePoolDelete(d *schema.ResourceData, meta i nodePoolId := idSplit[1] var ( - request = tke.NewDeleteClusterNodePoolRequest() - response = tke.NewDeleteClusterNodePoolResponse() + request = tkev20180525.NewDeleteClusterNodePoolRequest() + response = tkev20180525.NewDeleteClusterNodePoolResponse() ) - if v, ok := d.GetOk("cluster_id"); ok { - clusterId = v.(string) - } - - request.ClusterId = &clusterId + request.ClusterId = helper.String(clusterId) - request.NodePoolIds = []*string{&nodePoolId} + request.NodePoolIds = []*string{helper.String(nodePoolId)} if v, ok := d.GetOkExists("delete_keep_instance"); ok { request.KeepInstance = helper.Bool(v.(bool)) @@ -983,7 +968,7 @@ func resourceTencentCloudKubernetesNodePoolDelete(d *schema.ResourceData, meta i } err := resource.Retry(tccommon.WriteRetryTimeout, func() *resource.RetryError { - result, e := meta.(tccommon.ProviderMeta).GetAPIV3Conn().UseTkeClient().DeleteClusterNodePoolWithContext(ctx, request) + result, e := meta.(tccommon.ProviderMeta).GetAPIV3Conn().UseTkeV20180525Client().DeleteClusterNodePoolWithContext(ctx, request) if e != nil { if err := resourceTencentCloudKubernetesNodePoolDeleteRequestOnError0(ctx, e); err != nil { return err @@ -996,7 +981,7 @@ func resourceTencentCloudKubernetesNodePoolDelete(d *schema.ResourceData, meta i return nil }) if err != nil { - log.Printf("[CRITAL]%s create kubernetes node pool failed, reason:%+v", logId, err) + log.Printf("[CRITAL]%s delete kubernetes node pool failed, reason:%+v", logId, err) return err } diff --git a/tencentcloud/services/tke/resource_tc_kubernetes_scale_worker.go b/tencentcloud/services/tke/resource_tc_kubernetes_scale_worker.go index 2478b632bd..6f47c55340 100644 --- a/tencentcloud/services/tke/resource_tc_kubernetes_scale_worker.go +++ b/tencentcloud/services/tke/resource_tc_kubernetes_scale_worker.go @@ -9,7 +9,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - tke "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/tke/v20180525" + tkev20180525 "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/tke/v20180525" tccommon "github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/common" "github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/internal/helper" @@ -564,14 +564,14 @@ func resourceTencentCloudKubernetesScaleWorkerDelete(d *schema.ResourceData, met instanceIdSet := idSplit[1] var ( - request = tke.NewDescribeClustersRequest() - response = tke.NewDescribeClustersResponse() + request = tkev20180525.NewDescribeClustersRequest() + response = tkev20180525.NewDescribeClustersResponse() ) request.ClusterIds = []*string{helper.String(clusterId)} err := resource.Retry(tccommon.WriteRetryTimeout, func() *resource.RetryError { - result, e := meta.(tccommon.ProviderMeta).GetAPIV3Conn().UseTkeClient().DescribeClustersWithContext(ctx, request) + result, e := meta.(tccommon.ProviderMeta).GetAPIV3Conn().UseTkeV20180525Client().DescribeClustersWithContext(ctx, request) if e != nil { return tccommon.RetryError(e) } else { diff --git a/tencentcloud/services/tke/resource_tc_kubernetes_scale_worker_extension.go b/tencentcloud/services/tke/resource_tc_kubernetes_scale_worker_extension.go index b7112bdb46..040dc496a3 100644 --- a/tencentcloud/services/tke/resource_tc_kubernetes_scale_worker_extension.go +++ b/tencentcloud/services/tke/resource_tc_kubernetes_scale_worker_extension.go @@ -7,10 +7,10 @@ import ( "strings" "time" - "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common/errors" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common/errors" + "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/cvm/v" cvm "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/cvm/v20170312" tke "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/tke/v20180525" @@ -196,7 +196,7 @@ func resourceTencentCloudKubernetesScaleWorkerReadPostRequest1(ctx context.Conte return nil } -func resourceTencentCloudKubernetesScaleWorkerReadPostRequest2(ctx context.Context, req *cvm.DescribeInstancesRequest, resp *cvm.DescribeInstancesResponse) error { +func resourceTencentCloudKubernetesScaleWorkerReadPostRequest2(ctx context.Context, req *v.DescribeInstancesRequest, resp *v.DescribeInstancesResponse) error { d := tccommon.ResourceDataFromContext(ctx) instances := make([]*cvm.Instance, 0) @@ -336,7 +336,7 @@ func resourceTencentCloudKubernetesScaleWorkerReadPostRequest0(ctx context.Conte return nil } -func resourceTencentCloudKubernetesScaleWorkerReadPostFillRequest2(ctx context.Context, req *cvm.DescribeInstancesRequest) error { +func resourceTencentCloudKubernetesScaleWorkerReadPostFillRequest2(ctx context.Context, req *v.DescribeInstancesRequest) error { req.InstanceIds = WorkersInstanceIds return nil } diff --git a/tencentcloud/services/tke/resource_tc_kubernetes_serverless_node_pool.go b/tencentcloud/services/tke/resource_tc_kubernetes_serverless_node_pool.go index 027e9da21b..9d691ead68 100644 --- a/tencentcloud/services/tke/resource_tc_kubernetes_serverless_node_pool.go +++ b/tencentcloud/services/tke/resource_tc_kubernetes_serverless_node_pool.go @@ -9,7 +9,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - tke "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/tke/v20180525" + tkev20180525 "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/tke/v20180525" tccommon "github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/common" "github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/internal/helper" @@ -122,15 +122,15 @@ func resourceTencentCloudKubernetesServerlessNodePoolCreate(d *schema.ResourceDa nodePoolId string ) var ( - request = tke.NewCreateClusterVirtualNodePoolRequest() - response = tke.NewCreateClusterVirtualNodePoolResponse() + request = tkev20180525.NewCreateClusterVirtualNodePoolRequest() + response = tkev20180525.NewCreateClusterVirtualNodePoolResponse() ) if v, ok := d.GetOk("cluster_id"); ok { clusterId = v.(string) } - request.ClusterId = &clusterId + request.ClusterId = helper.String(clusterId) if v, ok := d.GetOk("name"); ok { request.Name = helper.String(v.(string)) @@ -139,7 +139,7 @@ func resourceTencentCloudKubernetesServerlessNodePoolCreate(d *schema.ResourceDa if v, ok := d.GetOk("taints"); ok { for _, item := range v.([]interface{}) { taintsMap := item.(map[string]interface{}) - taint := tke.Taint{} + taint := tkev20180525.Taint{} if v, ok := taintsMap["key"]; ok { taint.Key = helper.String(v.(string)) } @@ -156,7 +156,7 @@ func resourceTencentCloudKubernetesServerlessNodePoolCreate(d *schema.ResourceDa if v, ok := d.GetOk("serverless_nodes"); ok { for _, item := range v.([]interface{}) { virtualNodesMap := item.(map[string]interface{}) - virtualNodeSpec := tke.VirtualNodeSpec{} + virtualNodeSpec := tkev20180525.VirtualNodeSpec{} if v, ok := virtualNodesMap["display_name"]; ok { virtualNodeSpec.DisplayName = helper.String(v.(string)) } @@ -172,7 +172,7 @@ func resourceTencentCloudKubernetesServerlessNodePoolCreate(d *schema.ResourceDa } err := resource.Retry(tccommon.WriteRetryTimeout, func() *resource.RetryError { - result, e := meta.(tccommon.ProviderMeta).GetAPIV3Conn().UseTkeClient().CreateClusterVirtualNodePoolWithContext(ctx, request) + result, e := meta.(tccommon.ProviderMeta).GetAPIV3Conn().UseTkeV20180525Client().CreateClusterVirtualNodePoolWithContext(ctx, request) if e != nil { return tccommon.RetryError(e) } else { @@ -212,12 +212,8 @@ func resourceTencentCloudKubernetesServerlessNodePoolRead(d *schema.ResourceData _ = d.Set("cluster_id", clusterId) - respData, err := service.DescribeKubernetesServerlessNodePoolById(ctx, clusterId, nodePoolId) - if err != nil { - return err - } - - err = resource.Retry(tccommon.ReadRetryTimeout, func() *resource.RetryError { + var respData *tkev20180525.VirtualNodePool + reqErr := resource.Retry(tccommon.ReadRetryTimeout, func() *resource.RetryError { result, e := service.DescribeKubernetesServerlessNodePoolById(ctx, clusterId, nodePoolId) if e != nil { return tccommon.RetryError(e) @@ -228,9 +224,9 @@ func resourceTencentCloudKubernetesServerlessNodePoolRead(d *schema.ResourceData respData = result return nil }) - if err != nil { - log.Printf("[CRITAL]%s read kubernetes serverless node pool failed, reason:%+v", logId, err) - return err + if reqErr != nil { + log.Printf("[CRITAL]%s read kubernetes serverless node pool failed, reason:%+v", logId, reqErr) + return reqErr } if respData == nil { @@ -284,12 +280,6 @@ func resourceTencentCloudKubernetesServerlessNodePoolUpdate(d *schema.ResourceDa ctx := tccommon.NewResourceLifeCycleHandleFuncContext(context.Background(), logId, d, meta) - immutableArgs := []string{"cluster_id"} - for _, v := range immutableArgs { - if d.HasChange(v) { - return fmt.Errorf("argument `%s` cannot be changed", v) - } - } idSplit := strings.Split(d.Id(), tccommon.FILED_SP) if len(idSplit) != 2 { return fmt.Errorf("id is broken,%s", d.Id()) @@ -307,11 +297,11 @@ func resourceTencentCloudKubernetesServerlessNodePoolUpdate(d *schema.ResourceDa } if needChange { - request := tke.NewModifyClusterVirtualNodePoolRequest() + request := tkev20180525.NewModifyClusterVirtualNodePoolRequest() - request.ClusterId = &clusterId + request.ClusterId = helper.String(clusterId) - request.NodePoolId = &nodePoolId + request.NodePoolId = helper.String(nodePoolId) if v, ok := d.GetOk("name"); ok { request.Name = helper.String(v.(string)) @@ -320,7 +310,7 @@ func resourceTencentCloudKubernetesServerlessNodePoolUpdate(d *schema.ResourceDa if v, ok := d.GetOk("taints"); ok { for _, item := range v.([]interface{}) { taintsMap := item.(map[string]interface{}) - taint := tke.Taint{} + taint := tkev20180525.Taint{} if v, ok := taintsMap["key"]; ok { taint.Key = helper.String(v.(string)) } @@ -339,7 +329,7 @@ func resourceTencentCloudKubernetesServerlessNodePoolUpdate(d *schema.ResourceDa } err := resource.Retry(tccommon.WriteRetryTimeout, func() *resource.RetryError { - result, e := meta.(tccommon.ProviderMeta).GetAPIV3Conn().UseTkeClient().ModifyClusterVirtualNodePoolWithContext(ctx, request) + result, e := meta.(tccommon.ProviderMeta).GetAPIV3Conn().UseTkeV20180525Client().ModifyClusterVirtualNodePoolWithContext(ctx, request) if e != nil { return tccommon.RetryError(e) } else { @@ -371,23 +361,19 @@ func resourceTencentCloudKubernetesServerlessNodePoolDelete(d *schema.ResourceDa nodePoolId := idSplit[1] var ( - request = tke.NewDeleteClusterVirtualNodePoolRequest() - response = tke.NewDeleteClusterVirtualNodePoolResponse() + request = tkev20180525.NewDeleteClusterVirtualNodePoolRequest() + response = tkev20180525.NewDeleteClusterVirtualNodePoolResponse() ) - if v, ok := d.GetOk("cluster_id"); ok { - clusterId = v.(string) - } - - request.ClusterId = &clusterId + request.ClusterId = helper.String(clusterId) - request.NodePoolIds = []*string{&nodePoolId} + request.NodePoolIds = []*string{helper.String(nodePoolId)} force := true request.Force = &force err := resource.Retry(tccommon.WriteRetryTimeout, func() *resource.RetryError { - result, e := meta.(tccommon.ProviderMeta).GetAPIV3Conn().UseTkeClient().DeleteClusterVirtualNodePoolWithContext(ctx, request) + result, e := meta.(tccommon.ProviderMeta).GetAPIV3Conn().UseTkeV20180525Client().DeleteClusterVirtualNodePoolWithContext(ctx, request) if e != nil { return tccommon.RetryError(e) } else { @@ -397,7 +383,7 @@ func resourceTencentCloudKubernetesServerlessNodePoolDelete(d *schema.ResourceDa return nil }) if err != nil { - log.Printf("[CRITAL]%s create kubernetes serverless node pool failed, reason:%+v", logId, err) + log.Printf("[CRITAL]%s delete kubernetes serverless node pool failed, reason:%+v", logId, err) return err } diff --git a/tencentcloud/services/tke/service_tencentcloud_tke.go b/tencentcloud/services/tke/service_tencentcloud_tke.go index 303b29b369..1647728a94 100644 --- a/tencentcloud/services/tke/service_tencentcloud_tke.go +++ b/tencentcloud/services/tke/service_tencentcloud_tke.go @@ -6,24 +6,24 @@ import ( "log" "strings" - tccommon "github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/common" - svccvm "github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/services/cvm" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/pkg/errors" "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common" sdkErrors "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common/errors" tchttp "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common/http" + "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/cvm/v" cvm "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/cvm/v20170312" cwp "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/cwp/v20180228" tat "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/tat/v20201028" tke "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/tke/v20180525" tke2 "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/tke/v20220501" + tccommon "github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/common" "github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/connectivity" "github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/internal/helper" "github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/ratelimit" + svccvm "github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/services/cvm" ) type ClusterBasicSetting struct { @@ -1342,7 +1342,7 @@ func (me *TkeService) DescribeKubernetesAvailableClusterVersionsByFilter(ctx con ratelimit.Check(request.GetAction()) - response, err := me.client.UseTkeClient().DescribeAvailableClusterVersion(request) + response, err := me.client.UseTkeV20180525Client().DescribeAvailableClusterVersion(request) if err != nil { errRet = err return @@ -2734,7 +2734,7 @@ func (me *TkeService) DescribeKubernetesClusterInstancesByFilter(ctx context.Con for { request.Offset = &offset request.Limit = &limit - response, err := me.client.UseTkeClient().DescribeClusterInstances(request) + response, err := me.client.UseTkeV20180525Client().DescribeClusterInstances(request) if err != nil { errRet = err return @@ -2778,7 +2778,7 @@ func (me *TkeService) DescribeKubernetesClusterNodePoolsByFilter(ctx context.Con ratelimit.Check(request.GetAction()) - response, err := me.client.UseTkeClient().DescribeClusterNodePools(request) + response, err := me.client.UseTkeV20180525Client().DescribeClusterNodePools(request) if err != nil { errRet = err return @@ -2841,7 +2841,7 @@ func (me *TkeService) DescribeKubernetesClustersByFilter(ctx context.Context, pa return nil, err } - response, err := me.client.UseTkeClient().DescribeClusters(request) + response, err := me.client.UseTkeV20180525Client().DescribeClusters(request) if err != nil { errRet = err return @@ -2876,7 +2876,7 @@ func (me *TkeService) DescribeKubernetesClusterLevelsByFilter(ctx context.Contex ratelimit.Check(request.GetAction()) - response, err := me.client.UseTkeClient().DescribeClusterLevelAttribute(request) + response, err := me.client.UseTkeV20180525Client().DescribeClusterLevelAttribute(request) if err != nil { errRet = err return @@ -2915,7 +2915,7 @@ func (me *TkeService) DescribeKubernetesClusterCommonNamesByFilter(ctx context.C return nil, err } - response, err := me.client.UseTkeClient().DescribeClusterCommonNames(request) + response, err := me.client.UseTkeV20180525Client().DescribeClusterCommonNames(request) if err != nil { errRet = err return @@ -2950,7 +2950,7 @@ func (me *TkeService) DescribeKubernetesClusterAuthenticationOptionsByFilter(ctx ratelimit.Check(request.GetAction()) - response, err := me.client.UseTkeClient().DescribeClusterAuthenticationOptions(request) + response, err := me.client.UseTkeV20180525Client().DescribeClusterAuthenticationOptions(request) if err != nil { errRet = err return @@ -2991,7 +2991,7 @@ func (me *TkeService) DescribeKubernetesChartsByFilter(ctx context.Context, para ratelimit.Check(request.GetAction()) - response, err := me.client.UseTkeClient().GetTkeAppChartList(request) + response, err := me.client.UseTkeV20180525Client().GetTkeAppChartList(request) if err != nil { errRet = err return @@ -3020,7 +3020,7 @@ func (me *TkeService) DescribeKubernetesEncryptionProtectionById(ctx context.Con ratelimit.Check(request.GetAction()) - response, err := me.client.UseTkeClient().DescribeEncryptionStatus(request) + response, err := me.client.UseTkeV20180525Client().DescribeEncryptionStatus(request) if err != nil { errRet = err return @@ -3045,7 +3045,7 @@ func (me *TkeService) DescribeKubernetesClusterAttachmentById(ctx context.Contex ratelimit.Check(request.GetAction()) - response, err := me.client.UseTkeClient().DescribeClusters(request) + response, err := me.client.UseTkeV20180525Client().DescribeClusters(request) if err != nil { errRet = err return @@ -3074,7 +3074,7 @@ func (me *TkeService) DescribeKubernetesClusterAttachmentById1(ctx context.Conte ratelimit.Check(request.GetAction()) - response, err := me.client.UseCvmClient().DescribeInstances(request) + response, err := me.client.UseCvmV20170312Client().DescribeInstances(request) if err != nil { errRet = err return @@ -3104,7 +3104,7 @@ func (me *TkeService) DescribeKubernetesClusterAttachmentById2(ctx context.Conte ratelimit.Check(request.GetAction()) - response, err := me.client.UseTkeClient().DescribeClusterInstances(request) + response, err := me.client.UseTkeV20180525Client().DescribeClusterInstances(request) if err != nil { errRet = err return @@ -3138,7 +3138,7 @@ func (me *TkeService) DescribeKubernetesBackupStorageLocationById(ctx context.Co ratelimit.Check(request.GetAction()) - response, err := me.client.UseTkeClient().DescribeBackupStorageLocations(request) + response, err := me.client.UseTkeV20180525Client().DescribeBackupStorageLocations(request) if err != nil { errRet = err return @@ -3172,7 +3172,7 @@ func (me *TkeService) DescribeKubernetesClusterById(ctx context.Context, cluster ratelimit.Check(request.GetAction()) - response, err := me.client.UseTkeClient().DescribeClusters(request) + response, err := me.client.UseTkeV20180525Client().DescribeClusters(request) if err != nil { errRet = err return @@ -3201,7 +3201,7 @@ func (me *TkeService) DescribeKubernetesClusterById1(ctx context.Context, cluste ratelimit.Check(request.GetAction()) - response, err := me.client.UseTkeClient().DescribeClusterInstances(request) + response, err := me.client.UseTkeV20180525Client().DescribeClusterInstances(request) if err != nil { errRet = err return @@ -3226,7 +3226,7 @@ func (me *TkeService) DescribeKubernetesClusterById2(ctx context.Context, cluste ratelimit.Check(request.GetAction()) - response, err := me.client.UseTkeClient().DescribeClusterSecurity(request) + response, err := me.client.UseTkeV20180525Client().DescribeClusterSecurity(request) if err != nil { errRet = err return @@ -3241,7 +3241,7 @@ func (me *TkeService) DescribeKubernetesNodePoolById(ctx context.Context, cluste logId := tccommon.GetLogId(ctx) request := tke.NewDescribeClustersRequest() - request.ClusterIds = []*string{&clusterId} + request.ClusterIds = []*string{helper.String(clusterId)} defer func() { if errRet != nil { @@ -3251,14 +3251,14 @@ func (me *TkeService) DescribeKubernetesNodePoolById(ctx context.Context, cluste ratelimit.Check(request.GetAction()) - response, err := me.client.UseTkeClient().DescribeClusters(request) + response, err := me.client.UseTkeV20180525Client().DescribeClusters(request) if err != nil { errRet = err return } log.Printf("[DEBUG]%s api[%s] success, request body [%s], response body [%s]\n", logId, request.GetAction(), request.ToJsonString(), response.ToJsonString()) - if response == nil || len(response.Response.Clusters) < 1 { + if len(response.Response.Clusters) < 1 { return } @@ -3270,8 +3270,8 @@ func (me *TkeService) DescribeKubernetesNodePoolById1(ctx context.Context, clust logId := tccommon.GetLogId(ctx) request := tke.NewDescribeClusterNodePoolDetailRequest() - request.ClusterId = &clusterId - request.NodePoolId = &nodePoolId + request.ClusterId = helper.String(clusterId) + request.NodePoolId = helper.String(nodePoolId) defer func() { if errRet != nil { @@ -3281,14 +3281,14 @@ func (me *TkeService) DescribeKubernetesNodePoolById1(ctx context.Context, clust ratelimit.Check(request.GetAction()) - response, err := me.client.UseTkeClient().DescribeClusterNodePoolDetail(request) + response, err := me.client.UseTkeV20180525Client().DescribeClusterNodePoolDetail(request) if err != nil { errRet = err return } log.Printf("[DEBUG]%s api[%s] success, request body [%s], response body [%s]\n", logId, request.GetAction(), request.ToJsonString(), response.ToJsonString()) - if response == nil || response.Response.NodePool == nil { + if response.Response == nil { return } @@ -3300,7 +3300,7 @@ func (me *TkeService) DescribeKubernetesServerlessNodePoolById(ctx context.Conte logId := tccommon.GetLogId(ctx) request := tke.NewDescribeClusterVirtualNodePoolsRequest() - request.ClusterId = &clusterId + request.ClusterId = helper.String(clusterId) defer func() { if errRet != nil { @@ -3310,14 +3310,14 @@ func (me *TkeService) DescribeKubernetesServerlessNodePoolById(ctx context.Conte ratelimit.Check(request.GetAction()) - response, err := me.client.UseTkeClient().DescribeClusterVirtualNodePools(request) + response, err := me.client.UseTkeV20180525Client().DescribeClusterVirtualNodePools(request) if err != nil { errRet = err return } log.Printf("[DEBUG]%s api[%s] success, request body [%s], response body [%s]\n", logId, request.GetAction(), request.ToJsonString(), response.ToJsonString()) - if response == nil || len(response.Response.NodePoolSet) < 1 { + if len(response.Response.NodePoolSet) < 1 { return } @@ -3334,10 +3334,12 @@ func (me *TkeService) DescribeKubernetesAuthAttachmentById(ctx context.Context, logId := tccommon.GetLogId(ctx) request := tke.NewDescribeClusterAuthenticationOptionsRequest() - request.ClusterId = &clusterId + request.ClusterId = helper.String(clusterId) + if err := resourceTencentCloudKubernetesAuthAttachmentReadPostFillRequest0(ctx, request); err != nil { return nil, err } + defer func() { if errRet != nil { log.Printf("[CRITAL]%s api[%s] fail, request body [%s], reason[%s]\n", logId, request.GetAction(), request.ToJsonString(), errRet.Error()) @@ -3346,13 +3348,17 @@ func (me *TkeService) DescribeKubernetesAuthAttachmentById(ctx context.Context, ratelimit.Check(request.GetAction()) - response, err := me.client.UseTkeClient().DescribeClusterAuthenticationOptions(request) + response, err := me.client.UseTkeV20180525Client().DescribeClusterAuthenticationOptions(request) if err != nil { errRet = err return } log.Printf("[DEBUG]%s api[%s] success, request body [%s], response body [%s]\n", logId, request.GetAction(), request.ToJsonString(), response.ToJsonString()) + if err := resourceTencentCloudKubernetesAuthAttachmentReadPostRequest0(ctx, request, response); err != nil { + return nil, err + } + ret = response.Response return } @@ -3403,7 +3409,7 @@ func (me *TkeService) DescribeKubernetesScaleWorkerById(ctx context.Context, clu ratelimit.Check(request.GetAction()) - response, err := me.client.UseTkeClient().DescribeClusters(request) + response, err := me.client.UseTkeV20180525Client().DescribeClusters(request) if err != nil { errRet = err return @@ -3424,54 +3430,33 @@ func (me *TkeService) DescribeKubernetesScaleWorkerById1(ctx context.Context, cl request := tke.NewDescribeClusterInstancesRequest() request.ClusterId = helper.String(clusterId) - ret = &tke.DescribeClusterInstancesResponseParams{ - InstanceSet: make([]*tke.Instance, 0), - TotalCount: new(uint64), - } - defer func() { if errRet != nil { log.Printf("[CRITAL]%s api[%s] fail, request body [%s], reason[%s]\n", logId, request.GetAction(), request.ToJsonString(), errRet.Error()) } }() - var offset int64 = 0 - var pageSize int64 = 100 - for { - request.Offset = &offset - request.Limit = &pageSize - ratelimit.Check(request.GetAction()) - - response, err := me.client.UseTkeClient().DescribeClusterInstances(request) - if err != nil { - errRet = err - return - } - log.Printf("[DEBUG]%s api[%s] success, request body [%s], response body [%s]\n", logId, request.GetAction(), request.ToJsonString(), response.ToJsonString()) - - if err := resourceTencentCloudKubernetesScaleWorkerReadPostRequest1(ctx, request, response); err != nil { - return nil, err - } + ratelimit.Check(request.GetAction()) - if response == nil || len(response.Response.InstanceSet) < 1 { - break - } - count := len(response.Response.InstanceSet) - ret.InstanceSet = append(ret.InstanceSet, response.Response.InstanceSet...) - *ret.TotalCount += *helper.IntUint64(count) + response, err := me.client.UseTkeV20180525Client().DescribeClusterInstances(request) + if err != nil { + errRet = err + return + } + log.Printf("[DEBUG]%s api[%s] success, request body [%s], response body [%s]\n", logId, request.GetAction(), request.ToJsonString(), response.ToJsonString()) - if count < int(pageSize) { - break - } - offset += pageSize + if err := resourceTencentCloudKubernetesScaleWorkerReadPostRequest1(ctx, request, response); err != nil { + return nil, err } + + ret = response.Response return } -func (me *TkeService) DescribeKubernetesScaleWorkerById2(ctx context.Context) (ret *cvm.DescribeInstancesResponseParams, errRet error) { +func (me *TkeService) DescribeKubernetesScaleWorkerById2(ctx context.Context) (ret *v.DescribeInstancesResponseParams, errRet error) { logId := tccommon.GetLogId(ctx) - request := cvm.NewDescribeInstancesRequest() + request := v.NewDescribeInstancesRequest() if err := resourceTencentCloudKubernetesScaleWorkerReadPostFillRequest2(ctx, request); err != nil { return nil, err @@ -3485,7 +3470,7 @@ func (me *TkeService) DescribeKubernetesScaleWorkerById2(ctx context.Context) (r ratelimit.Check(request.GetAction()) - response, err := me.client.UseCvmClient().DescribeInstances(request) + response, err := me.client.UseCvmVClient().DescribeInstances(request) if err != nil { errRet = err return @@ -3576,7 +3561,7 @@ func (me *TkeService) DescribeKubernetesClusterNativeNodePoolsByFilter(ctx conte ratelimit.Check(request.GetAction()) - response, err := me.client.UseTke2Client().DescribeNodePools(request) + response, err := me.client.UseTkeV20220501Client().DescribeNodePools(request) if err != nil { errRet = err return @@ -3590,3 +3575,88 @@ func (me *TkeService) DescribeKubernetesClusterNativeNodePoolsByFilter(ctx conte ret = response.Response.NodePools return } + +func NewTke2Service(client *connectivity.TencentCloudClient) Tke2Service { + return Tke2Service{client: client} +} + +func (me *TkeService) DescribeKubernetesAddonAttachmentById(ctx context.Context) (ret *tke.ForwardApplicationRequestV3ResponseParams, errRet error) { + logId := tccommon.GetLogId(ctx) + + request := tke.NewForwardApplicationRequestV3Request() + + defer func() { + if errRet != nil { + log.Printf("[CRITAL]%s api[%s] fail, request body [%s], reason[%s]\n", logId, request.GetAction(), request.ToJsonString(), errRet.Error()) + } + }() + + ratelimit.Check(request.GetAction()) + + if err := resourceTencentCloudKubernetesAddonAttachmentReadPreRequest0(ctx, request); err != nil { + return nil, err + } + + response, err := me.client.UseTkeV20180525Client().ForwardApplicationRequestV3(request) + if err != nil { + errRet = err + return + } + log.Printf("[DEBUG]%s api[%s] success, request body [%s], response body [%s]\n", logId, request.GetAction(), request.ToJsonString(), response.ToJsonString()) + + ret = response.Response + return +} + +func (me *TkeService) DescribeKubernetesNativeNodePoolsById(ctx context.Context, clusterId string, nodePoolId string) (ret *tke2.NodePool, errRet error) { + logId := tccommon.GetLogId(ctx) + + request := tke2.NewDescribeNodePoolsRequest() + request.ClusterId = helper.String(clusterId) + filter := &tke2.Filter{ + Name: helper.String("NodePoolsId"), + Values: []*string{helper.String(nodePoolId)}, + } + request.Filters = append(request.Filters, filter) + + defer func() { + if errRet != nil { + log.Printf("[CRITAL]%s api[%s] fail, request body [%s], reason[%s]\n", logId, request.GetAction(), request.ToJsonString(), errRet.Error()) + } + }() + + ratelimit.Check(request.GetAction()) + + var ( + offset int64 = 0 + limit int64 = 20 + ) + var instances []*tke2.NodePool + for { + request.Offset = &offset + request.Limit = &limit + response, err := me.client.UseTkeV20220501Client().DescribeNodePools(request) + if err != nil { + errRet = err + return + } + log.Printf("[DEBUG]%s api[%s] success, request body [%s], response body [%s]\n", logId, request.GetAction(), request.ToJsonString(), response.ToJsonString()) + + if response == nil || len(response.Response.NodePools) < 1 { + break + } + instances = append(instances, response.Response.NodePools...) + if len(response.Response.NodePools) < int(limit) { + break + } + + offset += limit + } + + if len(instances) < 1 { + return + } + + ret = instances[0] + return +}