From f99eab6b89edae1bd0be0df58db0a3e4be21d002 Mon Sep 17 00:00:00 2001 From: WeiMengXS Date: Mon, 23 Oct 2023 20:14:05 +0800 Subject: [PATCH 1/5] feat: engine --- tencentcloud/provider.go | 1 + tencentcloud/resource_tc_dlc_data_engine.go | 834 ++++++++++++++++++ .../resource_tc_dlc_data_engine_test.go | 73 ++ tencentcloud/service_tencentcloud_dlc.go | 67 ++ 4 files changed, 975 insertions(+) create mode 100644 tencentcloud/resource_tc_dlc_data_engine.go create mode 100644 tencentcloud/resource_tc_dlc_data_engine_test.go diff --git a/tencentcloud/provider.go b/tencentcloud/provider.go index 03e9081fc2..40e48c0f8a 100644 --- a/tencentcloud/provider.go +++ b/tencentcloud/provider.go @@ -3499,6 +3499,7 @@ func Provider() *schema.Provider { "tencentcloud_eb_put_events": resourceTencentCloudEbPutEvents(), "tencentcloud_eb_event_connector": resourceTencentCloudEbEventConnector(), "tencentcloud_dlc_user": resourceTencentCloudDlcUser(), + "tencentcloud_dlc_data_engine": resourceTencentCloudDlcDataEngine(), "tencentcloud_dlc_add_users_to_work_group_attachment": resourceTencentCloudDlcAddUsersToWorkGroupAttachment(), "tencentcloud_dlc_store_location_config": resourceTencentCloudDlcStoreLocationConfig(), "tencentcloud_dlc_work_group": resourceTencentCloudDlcWorkGroup(), diff --git a/tencentcloud/resource_tc_dlc_data_engine.go b/tencentcloud/resource_tc_dlc_data_engine.go new file mode 100644 index 0000000000..c07cc00726 --- /dev/null +++ b/tencentcloud/resource_tc_dlc_data_engine.go @@ -0,0 +1,834 @@ +/* +Provides a resource to create a dlc data_engine + +Example Usage + +```hcl +resource "tencentcloud_dlc_data_engine" "data_engine" { + engine_type = "spark" + data_engine_name = "testSpark" + cluster_type = "spark_cu" + mode = 2 + auto_resume = false + min_clusters = 1 + max_clusters = 10 + default_data_engine = false + cidr_block = "192.0.2.1/24" + message = "test spark" + pay_mode = 1 + time_span = 3600 + time_unit = "m" + auto_renew = 0 + auto_suspend = false + crontab_resume_suspend = 0 + crontab_resume_suspend_strategy { + resume_time = "1000000-08:00:00" + suspend_time = "" + suspend_strategy = + + } + engine_exec_type = "SQL" + max_concurrency = 5 + tolerable_queue_time = 0 + auto_suspend_time = 10 + resource_type = "Standard_CU" + data_engine_config_pairs = + image_version_name = "" + main_cluster_name = "testSpark" + elastic_switch = false + elastic_limit = 0 + session_resource_template { + driver_size = "small" + executor_size = "small" + executor_nums = 1 + executor_max_numbers = 1 + + } +} +``` + +Import + +dlc data_engine can be imported using the id, e.g. + +``` +terraform import tencentcloud_dlc_data_engine.data_engine data_engine_id +``` +*/ +package tencentcloud + +import ( + "context" + "fmt" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + dlc "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/dlc/v20210125" + "github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/internal/helper" + "log" + "time" +) + +func resourceTencentCloudDlcDataEngine() *schema.Resource { + return &schema.Resource{ + Create: resourceTencentCloudDlcDataEngineCreate, + Read: resourceTencentCloudDlcDataEngineRead, + Update: resourceTencentCloudDlcDataEngineUpdate, + Delete: resourceTencentCloudDlcDataEngineDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + Schema: map[string]*schema.Schema{ + "engine_type": { + Required: true, + Type: schema.TypeString, + Description: "Engine type, only support: spark/presto.", + }, + + "data_engine_name": { + Required: true, + Type: schema.TypeString, + Description: "Engine name.", + }, + + "cluster_type": { + Required: true, + Type: schema.TypeString, + Description: "Engine cluster type, only support: spark_cu/presto_cu.", + }, + + "mode": { + Required: true, + Type: schema.TypeInt, + Description: "Engine mode, only support 1: ByAmount, 2: YearlyAndMonthly.", + }, + + "auto_resume": { + Required: true, + Type: schema.TypeBool, + Description: "Whether to automatically start the cluster, prepay not support.", + }, + + "min_clusters": { + Optional: true, + Type: schema.TypeInt, + Description: "Engine min size, greater than or equal to 1 and MaxClusters bigger than MinClusters.", + }, + + "max_clusters": { + Optional: true, + Type: schema.TypeInt, + Description: "Engine max cluster size, MaxClusters less than or equal to 10 and MaxClusters bigger than MinClusters.", + }, + + "default_data_engine": { + Optional: true, + Type: schema.TypeBool, + Description: "Whether it is the default virtual cluster.", + }, + + "cidr_block": { + Optional: true, + Type: schema.TypeString, + Description: "Engine VPC network segment, just like 192.0.2.1/24.", + }, + + "message": { + Optional: true, + Type: schema.TypeString, + Description: "Engine description information.", + }, + + "pay_mode": { + Optional: true, + Type: schema.TypeInt, + Description: "Engine pay mode type, only support 0: postPay, 1: prePay(default).", + }, + + "time_span": { + Optional: true, + Type: schema.TypeInt, + Description: "Engine TimeSpan, prePay: minimum of 1, representing one month of purchasing resources, with a maximum of 120, default 3600, postPay: fixed fee of 3600.", + }, + + "time_unit": { + Optional: true, + Type: schema.TypeString, + Description: "Engine TimeUnit, prePay: use m(default), postPay: use h.", + }, + + "auto_renew": { + Optional: true, + Type: schema.TypeInt, + Description: "Engine auto renew, only support 0: Default, 1: AutoRenewON, 2: AutoRenewOFF.", + }, + + "auto_suspend": { + Optional: true, + Type: schema.TypeBool, + Description: "Whether to automatically suspend the cluster, prepay not support.", + }, + + "crontab_resume_suspend": { + Optional: true, + Type: schema.TypeInt, + Description: "Engine crontab resume or suspend strategy, only support: 0: Wait(default), 1: Kill.", + }, + + "crontab_resume_suspend_strategy": { + Optional: true, + Type: schema.TypeList, + MaxItems: 1, + Description: "Engine auto suspend strategy, when AutoSuspend is true, CrontabResumeSuspend must stop.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "resume_time": { + Type: schema.TypeString, + Optional: true, + Description: "Scheduled pull-up time: For example: 8 o&#39;clock on Monday is expressed as 1000000-08:00:00.", + }, + "suspend_time": { + Type: schema.TypeString, + Optional: true, + Description: "Scheduled suspension time: For example: 20 o&#39;clock on Monday is expressed as 1000000-20:00:00.", + }, + "suspend_strategy": { + Type: schema.TypeInt, + Optional: true, + Description: "Suspend configuration: 0 (default): wait for the task to end before suspending, 1: force suspend.", + }, + }, + }, + }, + + "engine_exec_type": { + Optional: true, + Type: schema.TypeString, + Description: "Engine exec type, only support SQL(default) or BATCH.", + }, + + "max_concurrency": { + Optional: true, + Type: schema.TypeInt, + Description: "Maximum number of concurrent tasks in a single cluster, default 5.", + }, + + "tolerable_queue_time": { + Optional: true, + Type: schema.TypeInt, + Description: "Tolerable queuing time, default 0. scaling may be triggered when tasks are queued for longer than the tolerable time. if this parameter is 0, it means that capacity expansion may be triggered immediately once a task is queued.", + }, + + "auto_suspend_time": { + Optional: true, + Type: schema.TypeInt, + Description: "Cluster automatic suspension time, default 10 minutes.", + }, + + "resource_type": { + Optional: true, + Type: schema.TypeString, + Description: "Engine resource type not match, only support: Standard_CU/Memory_CU(only BATCH ExecType).", + }, + + "data_engine_config_pairs": { + Optional: true, + Type: schema.TypeList, + Description: "Cluster advanced configuration.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "config_item": { + Type: schema.TypeString, + Required: true, + Description: "Configuration items.", + }, + "config_value": { + Type: schema.TypeString, + Required: true, + Description: "Configuration value.", + }, + }, + }, + }, + + "image_version_name": { + Optional: true, + Type: schema.TypeString, + Description: "Cluster image version name. Such as SuperSQL-P 1.1; SuperSQL-S 3.2, etc., do not upload, and create a cluster with the latest mirror version by default.", + }, + + "main_cluster_name": { + Optional: true, + Type: schema.TypeString, + Description: "Primary cluster name, specified when creating a disaster recovery cluster.", + }, + + "elastic_switch": { + Optional: true, + Type: schema.TypeBool, + Description: "For spark Batch ExecType, yearly and monthly cluster whether to enable elasticity.", + }, + + "elastic_limit": { + Optional: true, + Type: schema.TypeInt, + Description: "For spark Batch ExecType, yearly and monthly cluster elastic limit.", + }, + + "session_resource_template": { + Optional: true, + Type: schema.TypeList, + MaxItems: 1, + Description: "For spark Batch ExecType, cluster session resource configuration template.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "driver_size": { + Type: schema.TypeString, + Optional: true, + Description: "Engine driver size specification only supports: small/medium/large/xlarge/m.small/m.medium/m.large/m.xlarge.", + }, + "executor_size": { + Type: schema.TypeString, + Optional: true, + Description: "Engine executor size specification only supports: small/medium/large/xlarge/m.small/m.medium/m.large/m.xlarge.", + }, + "executor_nums": { + Type: schema.TypeInt, + Optional: true, + Description: "Specify the number of executors. The minimum value is 1 and the maximum value is less than the cluster specification.", + }, + "executor_max_numbers": { + Type: schema.TypeInt, + Optional: true, + Description: "Specify the executor max number (in a dynamic configuration scenario), the minimum value is 1, and the maximum value is less than the cluster specification (when ExecutorMaxNumbers is less than ExecutorNums, the value is set to ExecutorNums).", + }, + }, + }, + }, + }, + } +} + +func resourceTencentCloudDlcDataEngineCreate(d *schema.ResourceData, meta interface{}) error { + defer logElapsed("resource.tencentcloud_dlc_data_engine.create")() + defer inconsistentCheck(d, meta)() + + logId := getLogId(contextNil) + + var ( + request = dlc.NewCreateDataEngineRequest() + response = dlc.NewCreateDataEngineResponse() + dataEngineId string + dataEngineName string + ) + if v, ok := d.GetOk("engine_type"); ok { + request.EngineType = helper.String(v.(string)) + } + + if v, ok := d.GetOk("data_engine_name"); ok { + dataEngineName=v.(string) + request.DataEngineName = helper.String(v.(string)) + } + + if v, ok := d.GetOk("cluster_type"); ok { + request.ClusterType = helper.String(v.(string)) + } + + if v, ok := d.GetOkExists("mode"); ok { + request.Mode = helper.IntInt64(v.(int)) + } + + if v, ok := d.GetOkExists("auto_resume"); ok { + request.AutoResume = helper.Bool(v.(bool)) + } + + if v, ok := d.GetOkExists("min_clusters"); ok { + request.MinClusters = helper.IntInt64(v.(int)) + } + + if v, ok := d.GetOkExists("max_clusters"); ok { + request.MaxClusters = helper.IntInt64(v.(int)) + } + + if v, ok := d.GetOkExists("default_data_engine"); ok { + request.DefaultDataEngine = helper.Bool(v.(bool)) + } + + if v, ok := d.GetOk("cidr_block"); ok { + request.CidrBlock = helper.String(v.(string)) + } + + if v, ok := d.GetOk("message"); ok { + request.Message = helper.String(v.(string)) + } + + if v, ok := d.GetOkExists("pay_mode"); ok { + request.PayMode = helper.IntInt64(v.(int)) + } + + if v, ok := d.GetOkExists("time_span"); ok { + request.TimeSpan = helper.IntInt64(v.(int)) + } + + if v, ok := d.GetOk("time_unit"); ok { + request.TimeUnit = helper.String(v.(string)) + } + + if v, ok := d.GetOkExists("auto_renew"); ok { + request.AutoRenew = helper.IntInt64(v.(int)) + } + + if v, ok := d.GetOkExists("auto_suspend"); ok { + request.AutoSuspend = helper.Bool(v.(bool)) + } + + if v, ok := d.GetOkExists("crontab_resume_suspend"); ok { + request.CrontabResumeSuspend = helper.IntInt64(v.(int)) + } + + if dMap, ok := helper.InterfacesHeadMap(d, "crontab_resume_suspend_strategy"); ok { + crontabResumeSuspendStrategy := dlc.CrontabResumeSuspendStrategy{} + if v, ok := dMap["resume_time"]; ok { + crontabResumeSuspendStrategy.ResumeTime = helper.String(v.(string)) + } + if v, ok := dMap["suspend_time"]; ok { + crontabResumeSuspendStrategy.SuspendTime = helper.String(v.(string)) + } + if v, ok := dMap["suspend_strategy"]; ok { + crontabResumeSuspendStrategy.SuspendStrategy = helper.IntInt64(v.(int)) + } + request.CrontabResumeSuspendStrategy = &crontabResumeSuspendStrategy + } + + if v, ok := d.GetOk("engine_exec_type"); ok { + request.EngineExecType = helper.String(v.(string)) + } + + if v, ok := d.GetOkExists("max_concurrency"); ok { + request.MaxConcurrency = helper.IntInt64(v.(int)) + } + + if v, ok := d.GetOkExists("tolerable_queue_time"); ok { + request.TolerableQueueTime = helper.IntInt64(v.(int)) + } + + if v, ok := d.GetOkExists("auto_suspend_time"); ok { + request.AutoSuspendTime = helper.IntInt64(v.(int)) + } + + if v, ok := d.GetOk("resource_type"); ok { + request.ResourceType = helper.String(v.(string)) + } + if v, ok := d.GetOk("data_engine_config_pairs"); ok { + for _, item := range v.([]interface{}) { + dMap := item.(map[string]interface{}) + dataEngineConfigPair := dlc.DataEngineConfigPair{} + if v, ok := dMap["config_item"]; ok { + dataEngineConfigPair.ConfigItem = helper.String(v.(string)) + } + if v, ok := dMap["config_value"]; ok { + dataEngineConfigPair.ConfigItem = helper.String(v.(string)) + } + request.DataEngineConfigPairs = append(request.DataEngineConfigPairs, &dataEngineConfigPair) + } + } + + if v, ok := d.GetOk("image_version_name"); ok { + request.ImageVersionName = helper.String(v.(string)) + } + + if v, ok := d.GetOk("main_cluster_name"); ok { + request.MainClusterName = helper.String(v.(string)) + } + + if v, ok := d.GetOkExists("elastic_switch"); ok { + request.ElasticSwitch = helper.Bool(v.(bool)) + } + + if v, ok := d.GetOkExists("elastic_limit"); ok { + request.ElasticLimit = helper.IntInt64(v.(int)) + } + + if dMap, ok := helper.InterfacesHeadMap(d, "session_resource_template"); ok { + sessionResourceTemplate := dlc.SessionResourceTemplate{} + if v, ok := dMap["driver_size"]; ok { + sessionResourceTemplate.DriverSize = helper.String(v.(string)) + } + if v, ok := dMap["executor_size"]; ok { + sessionResourceTemplate.ExecutorSize = helper.String(v.(string)) + } + if v, ok := dMap["executor_nums"]; ok { + sessionResourceTemplate.ExecutorNums = helper.IntUint64(v.(int)) + } + if v, ok := dMap["executor_max_numbers"]; ok { + sessionResourceTemplate.ExecutorMaxNumbers = helper.IntUint64(v.(int)) + } + request.SessionResourceTemplate = &sessionResourceTemplate + } + + err := resource.Retry(writeRetryTimeout, func() *resource.RetryError { + result, e := meta.(*TencentCloudClient).apiV3Conn.UseDlcClient().CreateDataEngine(request) + if e != nil { + return retryError(e) + } else { + log.Printf("[DEBUG]%s api[%s] success, request body [%s], response body [%s]\n", logId, request.GetAction(), request.ToJsonString(), result.ToJsonString()) + } + response = result + return nil + }) + if err != nil { + log.Printf("[CRITAL]%s create dlc dataEngine failed, reason:%+v", logId, err) + return err + } + + dataEngineId = *response.Response.DataEngineId + d.SetId(dataEngineName+FILED_SP+dataEngineId) + + service := DlcService{client: meta.(*TencentCloudClient).apiV3Conn} + + conf := BuildStateChangeConf([]string{}, []string{2}, 5*readRetryTimeout, time.Second, service.DlcDataEngineStateRefreshFunc(d.Id(), []string{})) + + if _, e := conf.WaitForState(); e != nil { + return e + } + + return resourceTencentCloudDlcDataEngineRead(d, meta) +} + +func resourceTencentCloudDlcDataEngineRead(d *schema.ResourceData, meta interface{}) error { + defer logElapsed("resource.tencentcloud_dlc_data_engine.read")() + defer inconsistentCheck(d, meta)() + + logId := getLogId(contextNil) + + ctx := context.WithValue(context.TODO(), logIdKey, logId) + + service := DlcService{client: meta.(*TencentCloudClient).apiV3Conn} + + dataEngineId := d.Id() + + dataEngine, err := service.DescribeDlcDataEngineById(ctx, dataEngineId) + if err != nil { + return err + } + + if dataEngine == nil { + d.SetId("") + log.Printf("[WARN]%s resource `DlcDataEngine` [%s] not found, please check if it has been deleted.\n", logId, d.Id()) + return nil + } + + if dataEngine.EngineType != nil { + _ = d.Set("engine_type", dataEngine.EngineType) + } + + if dataEngine.DataEngineName != nil { + _ = d.Set("data_engine_name", dataEngine.DataEngineName) + } + + if dataEngine.ClusterType != nil { + _ = d.Set("cluster_type", dataEngine.ClusterType) + } + + if dataEngine.Mode != nil { + _ = d.Set("mode", dataEngine.Mode) + } + + if dataEngine.AutoResume != nil { + _ = d.Set("auto_resume", dataEngine.AutoResume) + } + + if dataEngine.MinClusters != nil { + _ = d.Set("min_clusters", dataEngine.MinClusters) + } + + if dataEngine.MaxClusters != nil { + _ = d.Set("max_clusters", dataEngine.MaxClusters) + } + + if dataEngine.DefaultDataEngine != nil { + _ = d.Set("default_data_engine", dataEngine.DefaultDataEngine) + } + + if dataEngine.CidrBlock != nil { + _ = d.Set("cidr_block", dataEngine.CidrBlock) + } + + if dataEngine.Message != nil { + _ = d.Set("message", dataEngine.Message) + } + + if dataEngine.PayMode != nil { + _ = d.Set("pay_mode", dataEngine.PayMode) + } + + if dataEngine.TimeSpan != nil { + _ = d.Set("time_span", dataEngine.TimeSpan) + } + + if dataEngine.TimeUnit != nil { + _ = d.Set("time_unit", dataEngine.TimeUnit) + } + + if dataEngine.AutoRenew != nil { + _ = d.Set("auto_renew", dataEngine.AutoRenew) + } + + if dataEngine.AutoSuspend != nil { + _ = d.Set("auto_suspend", dataEngine.AutoSuspend) + } + + if dataEngine.CrontabResumeSuspend != nil { + _ = d.Set("crontab_resume_suspend", dataEngine.CrontabResumeSuspend) + } + + if dataEngine.CrontabResumeSuspendStrategy != nil { + crontabResumeSuspendStrategyMap := map[string]interface{}{} + + if dataEngine.CrontabResumeSuspendStrategy.ResumeTime != nil { + crontabResumeSuspendStrategyMap["resume_time"] = dataEngine.CrontabResumeSuspendStrategy.ResumeTime + } + + if dataEngine.CrontabResumeSuspendStrategy.SuspendTime != nil { + crontabResumeSuspendStrategyMap["suspend_time"] = dataEngine.CrontabResumeSuspendStrategy.SuspendTime + } + + if dataEngine.CrontabResumeSuspendStrategy.SuspendStrategy != nil { + crontabResumeSuspendStrategyMap["suspend_strategy"] = dataEngine.CrontabResumeSuspendStrategy.SuspendStrategy + } + + _ = d.Set("crontab_resume_suspend_strategy", []interface{}{crontabResumeSuspendStrategyMap}) + } + + if dataEngine.EngineExecType != nil { + _ = d.Set("engine_exec_type", dataEngine.EngineExecType) + } + + if dataEngine.MaxConcurrency != nil { + _ = d.Set("max_concurrency", dataEngine.MaxConcurrency) + } + + if dataEngine.TolerableQueueTime != nil { + _ = d.Set("tolerable_queue_time", dataEngine.TolerableQueueTime) + } + + if dataEngine.AutoSuspendTime != nil { + _ = d.Set("auto_suspend_time", dataEngine.AutoSuspendTime) + } + + if dataEngine.ResourceType != nil { + _ = d.Set("resource_type", dataEngine.ResourceType) + } + + if dataEngine. != nil { + _ = d.Set("data_engine_config_pairs", dataEngine.DataEngineConfigPairs) + } + + if dataEngine.ImageVersionName != nil { + _ = d.Set("image_version_name", dataEngine.ImageVersionName) + } + + if dataEngine.MainClusterName != nil { + _ = d.Set("main_cluster_name", dataEngine.MainClusterName) + } + + if dataEngine.ElasticSwitch != nil { + _ = d.Set("elastic_switch", dataEngine.ElasticSwitch) + } + + if dataEngine.ElasticLimit != nil { + _ = d.Set("elastic_limit", dataEngine.ElasticLimit) + } + + if dataEngine.SessionResourceTemplate != nil { + sessionResourceTemplateMap := map[string]interface{}{} + + if dataEngine.SessionResourceTemplate.DriverSize != nil { + sessionResourceTemplateMap["driver_size"] = dataEngine.SessionResourceTemplate.DriverSize + } + + if dataEngine.SessionResourceTemplate.ExecutorSize != nil { + sessionResourceTemplateMap["executor_size"] = dataEngine.SessionResourceTemplate.ExecutorSize + } + + if dataEngine.SessionResourceTemplate.ExecutorNums != nil { + sessionResourceTemplateMap["executor_nums"] = dataEngine.SessionResourceTemplate.ExecutorNums + } + + if dataEngine.SessionResourceTemplate.ExecutorMaxNumbers != nil { + sessionResourceTemplateMap["executor_max_numbers"] = dataEngine.SessionResourceTemplate.ExecutorMaxNumbers + } + + _ = d.Set("session_resource_template", []interface{}{sessionResourceTemplateMap}) + } + + return nil +} + +func resourceTencentCloudDlcDataEngineUpdate(d *schema.ResourceData, meta interface{}) error { + defer logElapsed("resource.tencentcloud_dlc_data_engine.update")() + defer inconsistentCheck(d, meta)() + + logId := getLogId(contextNil) + + request := dlc.NewUpdateDataEngineRequest() + + dataEngineId := d.Id() + + request.DataEngineId = &dataEngineId + + immutableArgs := []string{"engine_type", "data_engine_name", "cluster_type", "mode", "auto_resume", "min_clusters", "max_clusters", "default_data_engine", "cidr_block", "message", "pay_mode", "time_span", "time_unit", "auto_renew", "auto_suspend", "crontab_resume_suspend", "crontab_resume_suspend_strategy", "engine_exec_type", "max_concurrency", "tolerable_queue_time", "auto_suspend_time", "resource_type", "data_engine_config_pairs", "image_version_name", "main_cluster_name", "elastic_switch", "elastic_limit", "session_resource_template"} + + for _, v := range immutableArgs { + if d.HasChange(v) { + return fmt.Errorf("argument `%s` cannot be changed", v) + } + } + + if d.HasChange("data_engine_name") { + if v, ok := d.GetOk("data_engine_name"); ok { + request.DataEngineName = helper.String(v.(string)) + } + } + + if d.HasChange("auto_resume") { + if v, ok := d.GetOkExists("auto_resume"); ok { + request.AutoResume = helper.Bool(v.(bool)) + } + } + + if d.HasChange("min_clusters") { + if v, ok := d.GetOkExists("min_clusters"); ok { + request.MinClusters = helper.IntInt64(v.(int)) + } + } + + if d.HasChange("max_clusters") { + if v, ok := d.GetOkExists("max_clusters"); ok { + request.MaxClusters = helper.IntInt64(v.(int)) + } + } + + if d.HasChange("message") { + if v, ok := d.GetOk("message"); ok { + request.Message = helper.String(v.(string)) + } + } + + if d.HasChange("auto_suspend") { + if v, ok := d.GetOkExists("auto_suspend"); ok { + request.AutoSuspend = helper.Bool(v.(bool)) + } + } + + if d.HasChange("crontab_resume_suspend") { + if v, ok := d.GetOkExists("crontab_resume_suspend"); ok { + request.CrontabResumeSuspend = helper.IntInt64(v.(int)) + } + } + + if d.HasChange("crontab_resume_suspend_strategy") { + if dMap, ok := helper.InterfacesHeadMap(d, "crontab_resume_suspend_strategy"); ok { + crontabResumeSuspendStrategy := dlc.CrontabResumeSuspendStrategy{} + if v, ok := dMap["resume_time"]; ok { + crontabResumeSuspendStrategy.ResumeTime = helper.String(v.(string)) + } + if v, ok := dMap["suspend_time"]; ok { + crontabResumeSuspendStrategy.SuspendTime = helper.String(v.(string)) + } + if v, ok := dMap["suspend_strategy"]; ok { + crontabResumeSuspendStrategy.SuspendStrategy = helper.IntInt64(v.(int)) + } + request.CrontabResumeSuspendStrategy = &crontabResumeSuspendStrategy + } + } + + if d.HasChange("max_concurrency") { + if v, ok := d.GetOkExists("max_concurrency"); ok { + request.MaxConcurrency = helper.IntInt64(v.(int)) + } + } + + if d.HasChange("tolerable_queue_time") { + if v, ok := d.GetOkExists("tolerable_queue_time"); ok { + request.TolerableQueueTime = helper.IntInt64(v.(int)) + } + } + + if d.HasChange("auto_suspend_time") { + if v, ok := d.GetOkExists("auto_suspend_time"); ok { + request.AutoSuspendTime = helper.IntInt64(v.(int)) + } + } + + if d.HasChange("elastic_switch") { + if v, ok := d.GetOkExists("elastic_switch"); ok { + request.ElasticSwitch = helper.Bool(v.(bool)) + } + } + + if d.HasChange("elastic_limit") { + if v, ok := d.GetOkExists("elastic_limit"); ok { + request.ElasticLimit = helper.IntInt64(v.(int)) + } + } + + if d.HasChange("session_resource_template") { + if dMap, ok := helper.InterfacesHeadMap(d, "session_resource_template"); ok { + sessionResourceTemplate := dlc.SessionResourceTemplate{} + if v, ok := dMap["driver_size"]; ok { + sessionResourceTemplate.DriverSize = helper.String(v.(string)) + } + if v, ok := dMap["executor_size"]; ok { + sessionResourceTemplate.ExecutorSize = helper.String(v.(string)) + } + if v, ok := dMap["executor_nums"]; ok { + sessionResourceTemplate.ExecutorNums = helper.IntUint64(v.(int)) + } + if v, ok := dMap["executor_max_numbers"]; ok { + sessionResourceTemplate.ExecutorMaxNumbers = helper.IntUint64(v.(int)) + } + request.SessionResourceTemplate = &sessionResourceTemplate + } + } + + err := resource.Retry(writeRetryTimeout, func() *resource.RetryError { + result, e := meta.(*TencentCloudClient).apiV3Conn.UseDlcClient().UpdateDataEngine(request) + if e != nil { + return retryError(e) + } else { + log.Printf("[DEBUG]%s api[%s] success, request body [%s], response body [%s]\n", logId, request.GetAction(), request.ToJsonString(), result.ToJsonString()) + } + return nil + }) + if err != nil { + log.Printf("[CRITAL]%s update dlc dataEngine failed, reason:%+v", logId, err) + return err + } + + service := DlcService{client: meta.(*TencentCloudClient).apiV3Conn} + + conf := BuildStateChangeConf([]string{}, []string{"2"}, 5*readRetryTimeout, time.Second, service.DlcDataEngineStateRefreshFunc(d.Id(), []string{})) + + if _, e := conf.WaitForState(); e != nil { + return e + } + + return resourceTencentCloudDlcDataEngineRead(d, meta) +} + +func resourceTencentCloudDlcDataEngineDelete(d *schema.ResourceData, meta interface{}) error { + defer logElapsed("resource.tencentcloud_dlc_data_engine.delete")() + defer inconsistentCheck(d, meta)() + + logId := getLogId(contextNil) + ctx := context.WithValue(context.TODO(), logIdKey, logId) + + service := DlcService{client: meta.(*TencentCloudClient).apiV3Conn} + dataEngineId := d.Id() + + if err := service.DeleteDlcDataEngineById(ctx, dataEngineId); err != nil { + return err + } + + return nil +} diff --git a/tencentcloud/resource_tc_dlc_data_engine_test.go b/tencentcloud/resource_tc_dlc_data_engine_test.go new file mode 100644 index 0000000000..e1bc3065da --- /dev/null +++ b/tencentcloud/resource_tc_dlc_data_engine_test.go @@ -0,0 +1,73 @@ +package tencentcloud + +import ( + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "testing" +) + +func TestAccTencentCloudDlcDataEngineResource_basic(t *testing.T) { + t.Parallel() + resource.Test(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheck(t) + }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccDlcDataEngine, + Check: resource.ComposeTestCheckFunc(resource.TestCheckResourceAttrSet("tencentcloud_dlc_data_engine.data_engine", "id")), + }, + { + ResourceName: "tencentcloud_dlc_data_engine.data_engine", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +const testAccDlcDataEngine = ` + +resource "tencentcloud_dlc_data_engine" "data_engine" { + engine_type = "spark" + data_engine_name = "testSpark" + cluster_type = "spark_cu" + mode = 2 + auto_resume = false + min_clusters = 1 + max_clusters = 10 + default_data_engine = false + cidr_block = "192.0.2.1/24" + message = "test spark" + pay_mode = 1 + time_span = 3600 + time_unit = "m" + auto_renew = 0 + auto_suspend = false + crontab_resume_suspend = 0 + crontab_resume_suspend_strategy { + resume_time = "1000000-08:00:00" + suspend_time = "" + suspend_strategy = + + } + engine_exec_type = "SQL" + max_concurrency = 5 + tolerable_queue_time = 0 + auto_suspend_time = 10 + resource_type = "Standard_CU" + data_engine_config_pairs = + image_version_name = "" + main_cluster_name = "testSpark" + elastic_switch = false + elastic_limit = 0 + session_resource_template { + driver_size = "small" + executor_size = "small" + executor_nums = 1 + executor_max_numbers = 1 + + } +} + +` diff --git a/tencentcloud/service_tencentcloud_dlc.go b/tencentcloud/service_tencentcloud_dlc.go index 4db300b09f..503bd6d77c 100644 --- a/tencentcloud/service_tencentcloud_dlc.go +++ b/tencentcloud/service_tencentcloud_dlc.go @@ -336,3 +336,70 @@ func (me *DlcService) DescribeDlcDescribeUserInfoByFilter(ctx context.Context, p describeUserInfo = response.Response.UserInfo return } + +func (me *DlcService) DescribeDlcDataEngineById(ctx context.Context, dataEngineId string) (dataEngine *dlc.DataEngineInfo, errRet error) { + logId := getLogId(ctx) + + request := dlc.NewDescribeDataEnginesRequest() + request. = &dataEngineId + + defer func() { + if errRet != nil { + log.Printf("[CRITAL]%s api[%s] fail, request body [%s], reason[%s]\n", logId, request.GetAction(), request.ToJsonString(), errRet.Error()) + } + }() + + ratelimit.Check(request.GetAction()) + + response, err := me.client.UseDlcClient().DescribeDataEngines(request) + if err != nil { + errRet = err + return + } + log.Printf("[DEBUG]%s api[%s] success, request body [%s], response body [%s]\n", logId, request.GetAction(), request.ToJsonString(), response.ToJsonString()) + + if len(response.Response.DataEngineInfo) < 1 { + return + } + + dataEngine = response.Response.DataEngineInfo[0] + return +} + +func (me *DlcService) DeleteDlcDataEngineById(ctx context.Context, dataEngineId string) (errRet error) { + logId := getLogId(ctx) + + request := dlc.NewDeleteDataEngineRequest() + request.DataEngineId = &dataEngineId + + defer func() { + if errRet != nil { + log.Printf("[CRITAL]%s api[%s] fail, request body [%s], reason[%s]\n", logId, request.GetAction(), request.ToJsonString(), errRet.Error()) + } + }() + + ratelimit.Check(request.GetAction()) + + response, err := me.client.UseDlcClient().DeleteDataEngine(request) + if err != nil { + errRet = err + return + } + log.Printf("[DEBUG]%s api[%s] success, request body [%s], response body [%s]\n", logId, request.GetAction(), request.ToJsonString(), response.ToJsonString()) + + return +} + +func (me *DlcService) DlcDataEngineStateRefreshFunc(dataEngineId string, failStates []string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + ctx := contextNil + + object, err := me.DescribeDataEngine(ctx, dataEngineId) + + if err != nil { + return nil, "", err + } + + return object, helper.PString(object.State), nil + } +} From 775a967614fb2eff03b972bf692f9e3ac50bb9f2 Mon Sep 17 00:00:00 2001 From: WeiMengXS Date: Tue, 24 Oct 2023 20:51:41 +0800 Subject: [PATCH 2/5] feat: fmt and doc --- ..._check_data_engine_image_can_be_upgrade.go | 103 ++++++++ ...k_data_engine_image_can_be_upgrade_test.go | 31 +++ tencentcloud/provider.go | 11 +- tencentcloud/resource_tc_dlc_data_engine.go | 223 ++++++++++-------- .../resource_tc_dlc_data_engine_test.go | 111 ++++++--- ...lc_rollback_data_engine_image_operation.go | 123 ++++++++++ ...llback_data_engine_image_operation_test.go | 37 +++ tencentcloud/service_tencentcloud_dlc.go | 51 ++-- ..._engine_image_can_be_upgrade.html.markdown | 36 +++ website/docs/r/dlc_data_engine.html.markdown | 105 +++++++++ ..._data_engine_image_operation.html.markdown | 50 ++++ website/tencentcloud.erb | 9 + 12 files changed, 733 insertions(+), 157 deletions(-) create mode 100644 tencentcloud/data_source_tc_dlc_check_data_engine_image_can_be_upgrade.go create mode 100644 tencentcloud/data_source_tc_dlc_check_data_engine_image_can_be_upgrade_test.go create mode 100644 tencentcloud/resource_tc_dlc_rollback_data_engine_image_operation.go create mode 100644 tencentcloud/resource_tc_dlc_rollback_data_engine_image_operation_test.go create mode 100644 website/docs/d/dlc_check_data_engine_image_can_be_upgrade.html.markdown create mode 100644 website/docs/r/dlc_data_engine.html.markdown create mode 100644 website/docs/r/dlc_rollback_data_engine_image_operation.html.markdown diff --git a/tencentcloud/data_source_tc_dlc_check_data_engine_image_can_be_upgrade.go b/tencentcloud/data_source_tc_dlc_check_data_engine_image_can_be_upgrade.go new file mode 100644 index 0000000000..e2a2a27b59 --- /dev/null +++ b/tencentcloud/data_source_tc_dlc_check_data_engine_image_can_be_upgrade.go @@ -0,0 +1,103 @@ +/* +Use this data source to query detailed information of dlc check_data_engine_image_can_be_upgrade + +Example Usage + +```hcl +data "tencentcloud_dlc_check_data_engine_image_can_be_upgrade" "check_data_engine_image_can_be_upgrade" { + data_engine_id = "DataEngine-cgkvbas6" + } +``` +*/ +package tencentcloud + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + dlc "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/dlc/v20210125" + "github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/internal/helper" +) + +func dataSourceTencentCloudDlcCheckDataEngineImageCanBeUpgrade() *schema.Resource { + return &schema.Resource{ + Read: dataSourceTencentCloudDlcCheckDataEngineImageCanBeUpgradeRead, + Schema: map[string]*schema.Schema{ + "data_engine_id": { + Required: true, + Type: schema.TypeString, + Description: "Engine unique id.", + }, + + "child_image_version_id": { + Computed: true, + Type: schema.TypeString, + Description: "The latest image version id that can be upgraded.", + }, + + "is_upgrade": { + Computed: true, + Type: schema.TypeBool, + Description: "Is it possible to upgrade.", + }, + + "result_output_file": { + Type: schema.TypeString, + Optional: true, + Description: "Used to save results.", + }, + }, + } +} + +func dataSourceTencentCloudDlcCheckDataEngineImageCanBeUpgradeRead(d *schema.ResourceData, meta interface{}) error { + defer logElapsed("data_source.tencentcloud_dlc_check_data_engine_image_can_be_upgrade.read")() + defer inconsistentCheck(d, meta)() + + logId := getLogId(contextNil) + + ctx := context.WithValue(context.TODO(), logIdKey, logId) + var dataEngineId string + paramMap := make(map[string]interface{}) + if v, ok := d.GetOk("data_engine_id"); ok { + dataEngineId = v.(string) + paramMap["DataEngineId"] = helper.String(v.(string)) + } + + service := DlcService{client: meta.(*TencentCloudClient).apiV3Conn} + var checkResult *dlc.CheckDataEngineImageCanBeUpgradeResponseParams + + err := resource.Retry(readRetryTimeout, func() *resource.RetryError { + result, e := service.DescribeDlcCheckDataEngineImageCanBeUpgradeByFilter(ctx, paramMap) + if e != nil { + return retryError(e) + } + checkResult = result + return nil + }) + if err != nil { + return err + } + var data = make(map[string]interface{}, 0) + + if checkResult.ChildImageVersionId != nil { + _ = d.Set("child_image_version_id", checkResult.ChildImageVersionId) + data["child_image_version_id"] = checkResult.ChildImageVersionId + } + + if checkResult.IsUpgrade != nil { + _ = d.Set("is_upgrade", checkResult.IsUpgrade) + data["is_upgrade"] = checkResult.IsUpgrade + + } + + d.SetId(dataEngineId) + output, ok := d.GetOk("result_output_file") + if ok && output.(string) != "" { + if e := writeToFile(output.(string), data); e != nil { + return e + } + } + return nil +} diff --git a/tencentcloud/data_source_tc_dlc_check_data_engine_image_can_be_upgrade_test.go b/tencentcloud/data_source_tc_dlc_check_data_engine_image_can_be_upgrade_test.go new file mode 100644 index 0000000000..11e8bb3cc5 --- /dev/null +++ b/tencentcloud/data_source_tc_dlc_check_data_engine_image_can_be_upgrade_test.go @@ -0,0 +1,31 @@ +package tencentcloud + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +func TestAccTencentCloudDlcCheckDataEngineImageCanBeUpgradeDataSource_basic(t *testing.T) { + t.Parallel() + resource.Test(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheck(t) + }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccDlcCheckDataEngineImageCanBeUpgradeDataSource, + Check: resource.ComposeTestCheckFunc(testAccCheckTencentCloudDataSourceID("data.tencentcloud_dlc_check_data_engine_image_can_be_upgrade.check_data_engine_image_can_be_upgrade")), + }, + }, + }) +} + +const testAccDlcCheckDataEngineImageCanBeUpgradeDataSource = ` + +data "tencentcloud_dlc_check_data_engine_image_can_be_upgrade" "check_data_engine_image_can_be_upgrade" { + data_engine_id = "DataEngine-cgkvbas6" + } + +` diff --git a/tencentcloud/provider.go b/tencentcloud/provider.go index 40e48c0f8a..b7f7099a45 100644 --- a/tencentcloud/provider.go +++ b/tencentcloud/provider.go @@ -1515,7 +1515,6 @@ Tencent Cloud Organization (TCO) tencentcloud_organization_org_member_email tencentcloud_organization_org_member_auth_identity_attachment tencentcloud_organization_policy_sub_account_attachment - tencentcloud_organization_org_member_policy_attachment tencentcloud_organization_quit_organization_operation TDSQL-C for PostgreSQL(TDCPG) @@ -1832,10 +1831,13 @@ Data Lake Compute(DLC) tencentcloud_dlc_describe_user_info tencentcloud_dlc_describe_user_roles tencentcloud_dlc_check_data_engine_image_can_be_rollback + tencentcloud_dlc_check_data_engine_image_can_be_upgrade Resource tencentcloud_dlc_work_group tencentcloud_dlc_user + tencentcloud_dlc_data_engine + tencentcloud_dlc_rollback_data_engine_image_operation tencentcloud_dlc_add_users_to_work_group_attachment tencentcloud_dlc_store_location_config tencentcloud_dlc_suspend_resume_data_engine @@ -2692,6 +2694,7 @@ func Provider() *schema.Provider { "tencentcloud_cam_secret_last_used_time": dataSourceTencentCloudCamSecretLastUsedTime(), "tencentcloud_cam_policy_granting_service_access": dataSourceTencentCloudCamPolicyGrantingServiceAccess(), "tencentcloud_dlc_check_data_engine_image_can_be_rollback": dataSourceTencentCloudDlcCheckDataEngineImageCanBeRollback(), + "tencentcloud_dlc_check_data_engine_image_can_be_upgrade": dataSourceTencentCloudDlcCheckDataEngineImageCanBeUpgrade(), "tencentcloud_dlc_describe_user_type": dataSourceTencentCloudDlcDescribeUserType(), "tencentcloud_dlc_describe_user_info": dataSourceTencentCloudDlcDescribeUserInfo(), "tencentcloud_dlc_describe_user_roles": dataSourceTencentCloudDlcDescribeUserRoles(), @@ -3302,7 +3305,6 @@ func Provider() *schema.Provider { "tencentcloud_organization_instance": resourceTencentCloudOrganizationOrganization(), "tencentcloud_organization_policy_sub_account_attachment": resourceTencentCloudOrganizationPolicySubAccountAttachment(), "tencentcloud_organization_org_member_auth_identity_attachment": resourceTencentCloudOrganizationOrgMemberAuthIdentityAttachment(), - "tencentcloud_organization_org_member_policy_attachment": resourceTencentCloudOrganizationOrgMemberPolicyAttachment(), "tencentcloud_dbbrain_sql_filter": resourceTencentCloudDbbrainSqlFilter(), "tencentcloud_dbbrain_security_audit_log_export_task": resourceTencentCloudDbbrainSecurityAuditLogExportTask(), "tencentcloud_dbbrain_db_diag_report_task": resourceTencentCloudDbbrainDbDiagReportTask(), @@ -3499,11 +3501,12 @@ func Provider() *schema.Provider { "tencentcloud_eb_put_events": resourceTencentCloudEbPutEvents(), "tencentcloud_eb_event_connector": resourceTencentCloudEbEventConnector(), "tencentcloud_dlc_user": resourceTencentCloudDlcUser(), + "tencentcloud_dlc_work_group": resourceTencentCloudDlcWorkGroup(), "tencentcloud_dlc_data_engine": resourceTencentCloudDlcDataEngine(), + "tencentcloud_dlc_suspend_resume_data_engine": resourceTencentCloudDlcSuspendResumeDataEngine(), + "tencentcloud_dlc_rollback_data_engine_image_operation": resourceTencentCloudDlcRollbackDataEngineImageOperation(), "tencentcloud_dlc_add_users_to_work_group_attachment": resourceTencentCloudDlcAddUsersToWorkGroupAttachment(), "tencentcloud_dlc_store_location_config": resourceTencentCloudDlcStoreLocationConfig(), - "tencentcloud_dlc_work_group": resourceTencentCloudDlcWorkGroup(), - "tencentcloud_dlc_suspend_resume_data_engine": resourceTencentCloudDlcSuspendResumeDataEngine(), "tencentcloud_wedata_rule_template": resourceTencentCloudWedataRuleTemplate(), "tencentcloud_waf_custom_rule": resourceTencentCloudWafCustomRule(), "tencentcloud_waf_custom_white_rule": resourceTencentCloudWafCustomWhiteRule(), diff --git a/tencentcloud/resource_tc_dlc_data_engine.go b/tencentcloud/resource_tc_dlc_data_engine.go index c07cc00726..2f89e78ce6 100644 --- a/tencentcloud/resource_tc_dlc_data_engine.go +++ b/tencentcloud/resource_tc_dlc_data_engine.go @@ -8,42 +8,20 @@ resource "tencentcloud_dlc_data_engine" "data_engine" { engine_type = "spark" data_engine_name = "testSpark" cluster_type = "spark_cu" - mode = 2 + mode = 1 auto_resume = false + size = 16 + pay_mode = 0 min_clusters = 1 - max_clusters = 10 + max_clusters = 1 default_data_engine = false - cidr_block = "192.0.2.1/24" - message = "test spark" - pay_mode = 1 - time_span = 3600 - time_unit = "m" - auto_renew = 0 + cidr_block = "10.255.0.0/16" + message = "test spark1" + time_span = 1 + time_unit = "h" auto_suspend = false crontab_resume_suspend = 0 - crontab_resume_suspend_strategy { - resume_time = "1000000-08:00:00" - suspend_time = "" - suspend_strategy = - - } - engine_exec_type = "SQL" - max_concurrency = 5 - tolerable_queue_time = 0 - auto_suspend_time = 10 - resource_type = "Standard_CU" - data_engine_config_pairs = - image_version_name = "" - main_cluster_name = "testSpark" - elastic_switch = false - elastic_limit = 0 - session_resource_template { - driver_size = "small" - executor_size = "small" - executor_nums = 1 - executor_max_numbers = 1 - - } + engine_exec_type = "BATCH" } ``` @@ -60,12 +38,13 @@ package tencentcloud import ( "context" "fmt" + "log" + "strings" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" dlc "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/dlc/v20210125" "github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/internal/helper" - "log" - "time" ) func resourceTencentCloudDlcDataEngine() *schema.Resource { @@ -108,6 +87,12 @@ func resourceTencentCloudDlcDataEngine() *schema.Resource { Description: "Whether to automatically start the cluster, prepay not support.", }, + "size": { + Optional: true, + Type: schema.TypeInt, + Description: "Cluster size. Required when updating.", + }, + "min_clusters": { Optional: true, Type: schema.TypeInt, @@ -117,7 +102,7 @@ func resourceTencentCloudDlcDataEngine() *schema.Resource { "max_clusters": { Optional: true, Type: schema.TypeInt, - Description: "Engine max cluster size, MaxClusters less than or equal to 10 and MaxClusters bigger than MinClusters.", + Description: "Engine max cluster size, MaxClusters less than or equal to 10 and MaxClusters bigger than MinClusters.", }, "default_data_engine": { @@ -176,6 +161,7 @@ func resourceTencentCloudDlcDataEngine() *schema.Resource { "crontab_resume_suspend_strategy": { Optional: true, + Computed: true, Type: schema.TypeList, MaxItems: 1, Description: "Engine auto suspend strategy, when AutoSuspend is true, CrontabResumeSuspend must stop.", @@ -208,6 +194,7 @@ func resourceTencentCloudDlcDataEngine() *schema.Resource { "max_concurrency": { Optional: true, + Computed: true, Type: schema.TypeInt, Description: "Maximum number of concurrent tasks in a single cluster, default 5.", }, @@ -220,12 +207,14 @@ func resourceTencentCloudDlcDataEngine() *schema.Resource { "auto_suspend_time": { Optional: true, + Computed: true, Type: schema.TypeInt, Description: "Cluster automatic suspension time, default 10 minutes.", }, "resource_type": { Optional: true, + Computed: true, Type: schema.TypeString, Description: "Engine resource type not match, only support: Standard_CU/Memory_CU(only BATCH ExecType).", }, @@ -252,6 +241,7 @@ func resourceTencentCloudDlcDataEngine() *schema.Resource { "image_version_name": { Optional: true, + Computed: true, Type: schema.TypeString, Description: "Cluster image version name. Such as SuperSQL-P 1.1; SuperSQL-S 3.2, etc., do not upload, and create a cluster with the latest mirror version by default.", }, @@ -276,6 +266,7 @@ func resourceTencentCloudDlcDataEngine() *schema.Resource { "session_resource_template": { Optional: true, + Computed: true, Type: schema.TypeList, MaxItems: 1, Description: "For spark Batch ExecType, cluster session resource configuration template.", @@ -315,9 +306,9 @@ func resourceTencentCloudDlcDataEngineCreate(d *schema.ResourceData, meta interf logId := getLogId(contextNil) var ( - request = dlc.NewCreateDataEngineRequest() - response = dlc.NewCreateDataEngineResponse() - dataEngineId string + request = dlc.NewCreateDataEngineRequest() + response = dlc.NewCreateDataEngineResponse() + dataEngineId string dataEngineName string ) if v, ok := d.GetOk("engine_type"); ok { @@ -325,7 +316,7 @@ func resourceTencentCloudDlcDataEngineCreate(d *schema.ResourceData, meta interf } if v, ok := d.GetOk("data_engine_name"); ok { - dataEngineName=v.(string) + dataEngineName = v.(string) request.DataEngineName = helper.String(v.(string)) } @@ -341,6 +332,10 @@ func resourceTencentCloudDlcDataEngineCreate(d *schema.ResourceData, meta interf request.AutoResume = helper.Bool(v.(bool)) } + if v, ok := d.GetOkExists("size"); ok { + request.Size = helper.IntInt64(v.(int)) + } + if v, ok := d.GetOkExists("min_clusters"); ok { request.MinClusters = helper.IntInt64(v.(int)) } @@ -481,14 +476,35 @@ func resourceTencentCloudDlcDataEngineCreate(d *schema.ResourceData, meta interf } dataEngineId = *response.Response.DataEngineId - d.SetId(dataEngineName+FILED_SP+dataEngineId) + d.SetId(dataEngineName + FILED_SP + dataEngineId) - service := DlcService{client: meta.(*TencentCloudClient).apiV3Conn} - - conf := BuildStateChangeConf([]string{}, []string{2}, 5*readRetryTimeout, time.Second, service.DlcDataEngineStateRefreshFunc(d.Id(), []string{})) - - if _, e := conf.WaitForState(); e != nil { - return e + describeRequest := dlc.NewDescribeDataEngineRequest() + describeRequest.DataEngineName = helper.String(dataEngineName) + err = resource.Retry(writeRetryTimeout, func() *resource.RetryError { + result, e := meta.(*TencentCloudClient).apiV3Conn.UseDlcClient().DescribeDataEngine(describeRequest) + if e != nil { + return retryError(e) + } else { + log.Printf("[DEBUG]%s api[%s] success, request body [%s], response body [%s]\n", logId, describeRequest.GetAction(), + describeRequest.ToJsonString(), result.ToJsonString()) + } + if result == nil || result.Response == nil || result.Response.DataEngine == nil { + e = fmt.Errorf("[DEBUG]%s api[%s] resopse is null, request body [%s], response body [%s]\n", logId, + describeRequest.GetAction(), describeRequest.ToJsonString(), result.ToJsonString()) + log.Println(e) + return resource.RetryableError(e) + } + if *result.Response.DataEngine.State != int64(2) && *result.Response.DataEngine.State != int64(1) { + e = fmt.Errorf("[DEBUG]%s api[%s] status [%v] not ready , request body [%s], response body [%s]\n", + logId, describeRequest.GetAction(), *result.Response.DataEngine.State, describeRequest.ToJsonString(), result.ToJsonString()) + log.Println(e) + return resource.RetryableError(e) + } + return nil + }) + if err != nil { + log.Printf("[CRITAL]%s create dlc dataEngine failed, reason:%+v", logId, err) + return err } return resourceTencentCloudDlcDataEngineRead(d, meta) @@ -504,9 +520,12 @@ func resourceTencentCloudDlcDataEngineRead(d *schema.ResourceData, meta interfac service := DlcService{client: meta.(*TencentCloudClient).apiV3Conn} - dataEngineId := d.Id() - - dataEngine, err := service.DescribeDlcDataEngineById(ctx, dataEngineId) + idSplit := strings.Split(d.Id(), FILED_SP) + if len(idSplit) != 2 { + return fmt.Errorf("id is broken,%s", d.Id()) + } + dataEngineName := idSplit[0] + dataEngine, err := service.DescribeDlcDataEngineByName(ctx, dataEngineName) if err != nil { return err } @@ -557,22 +576,6 @@ func resourceTencentCloudDlcDataEngineRead(d *schema.ResourceData, meta interfac _ = d.Set("message", dataEngine.Message) } - if dataEngine.PayMode != nil { - _ = d.Set("pay_mode", dataEngine.PayMode) - } - - if dataEngine.TimeSpan != nil { - _ = d.Set("time_span", dataEngine.TimeSpan) - } - - if dataEngine.TimeUnit != nil { - _ = d.Set("time_unit", dataEngine.TimeUnit) - } - - if dataEngine.AutoRenew != nil { - _ = d.Set("auto_renew", dataEngine.AutoRenew) - } - if dataEngine.AutoSuspend != nil { _ = d.Set("auto_suspend", dataEngine.AutoSuspend) } @@ -619,18 +622,10 @@ func resourceTencentCloudDlcDataEngineRead(d *schema.ResourceData, meta interfac _ = d.Set("resource_type", dataEngine.ResourceType) } - if dataEngine. != nil { - _ = d.Set("data_engine_config_pairs", dataEngine.DataEngineConfigPairs) - } - if dataEngine.ImageVersionName != nil { _ = d.Set("image_version_name", dataEngine.ImageVersionName) } - if dataEngine.MainClusterName != nil { - _ = d.Set("main_cluster_name", dataEngine.MainClusterName) - } - if dataEngine.ElasticSwitch != nil { _ = d.Set("elastic_switch", dataEngine.ElasticSwitch) } @@ -672,11 +667,16 @@ func resourceTencentCloudDlcDataEngineUpdate(d *schema.ResourceData, meta interf request := dlc.NewUpdateDataEngineRequest() - dataEngineId := d.Id() - - request.DataEngineId = &dataEngineId + idSplit := strings.Split(d.Id(), FILED_SP) + if len(idSplit) != 2 { + return fmt.Errorf("id is broken,%s", d.Id()) + } + dataEngineName := idSplit[0] + request.DataEngineName = &dataEngineName - immutableArgs := []string{"engine_type", "data_engine_name", "cluster_type", "mode", "auto_resume", "min_clusters", "max_clusters", "default_data_engine", "cidr_block", "message", "pay_mode", "time_span", "time_unit", "auto_renew", "auto_suspend", "crontab_resume_suspend", "crontab_resume_suspend_strategy", "engine_exec_type", "max_concurrency", "tolerable_queue_time", "auto_suspend_time", "resource_type", "data_engine_config_pairs", "image_version_name", "main_cluster_name", "elastic_switch", "elastic_limit", "session_resource_template"} + immutableArgs := []string{"engine_type", "data_engine_name", "cluster_type", "mode", "default_data_engine", "cidr_block", + "pay_mode", "time_span", "time_unit", "auto_renew", "engine_exec_type", "tolerable_queue_time", + "resource_type", "data_engine_config_pairs", "image_version_name", "main_cluster_name"} for _, v := range immutableArgs { if d.HasChange(v) { @@ -684,34 +684,24 @@ func resourceTencentCloudDlcDataEngineUpdate(d *schema.ResourceData, meta interf } } - if d.HasChange("data_engine_name") { - if v, ok := d.GetOk("data_engine_name"); ok { - request.DataEngineName = helper.String(v.(string)) - } + if v, ok := d.GetOkExists("auto_resume"); ok { + request.AutoResume = helper.Bool(v.(bool)) } - if d.HasChange("auto_resume") { - if v, ok := d.GetOkExists("auto_resume"); ok { - request.AutoResume = helper.Bool(v.(bool)) - } + if v, ok := d.GetOkExists("size"); ok { + request.Size = helper.IntInt64(v.(int)) } - if d.HasChange("min_clusters") { - if v, ok := d.GetOkExists("min_clusters"); ok { - request.MinClusters = helper.IntInt64(v.(int)) - } + if v, ok := d.GetOkExists("min_clusters"); ok { + request.MinClusters = helper.IntInt64(v.(int)) } - if d.HasChange("max_clusters") { - if v, ok := d.GetOkExists("max_clusters"); ok { - request.MaxClusters = helper.IntInt64(v.(int)) - } + if v, ok := d.GetOkExists("max_clusters"); ok { + request.MaxClusters = helper.IntInt64(v.(int)) } - if d.HasChange("message") { - if v, ok := d.GetOk("message"); ok { - request.Message = helper.String(v.(string)) - } + if v, ok := d.GetOk("message"); ok { + request.Message = helper.String(v.(string)) } if d.HasChange("auto_suspend") { @@ -805,12 +795,33 @@ func resourceTencentCloudDlcDataEngineUpdate(d *schema.ResourceData, meta interf return err } - service := DlcService{client: meta.(*TencentCloudClient).apiV3Conn} - - conf := BuildStateChangeConf([]string{}, []string{"2"}, 5*readRetryTimeout, time.Second, service.DlcDataEngineStateRefreshFunc(d.Id(), []string{})) - - if _, e := conf.WaitForState(); e != nil { - return e + describeRequest := dlc.NewDescribeDataEngineRequest() + describeRequest.DataEngineName = helper.String(dataEngineName) + err = resource.Retry(writeRetryTimeout, func() *resource.RetryError { + result, e := meta.(*TencentCloudClient).apiV3Conn.UseDlcClient().DescribeDataEngine(describeRequest) + if e != nil { + return retryError(e) + } else { + log.Printf("[DEBUG]%s api[%s] success, request body [%s], response body [%s]\n", logId, describeRequest.GetAction(), + describeRequest.ToJsonString(), result.ToJsonString()) + } + if result == nil || result.Response == nil || result.Response.DataEngine == nil { + e = fmt.Errorf("[DEBUG]%s api[%s] resopse is null, request body [%s], response body [%s]\n", logId, + request.GetAction(), request.ToJsonString(), result.ToJsonString()) + log.Println(e) + return resource.RetryableError(e) + } + if *result.Response.DataEngine.State != int64(2) && *result.Response.DataEngine.State != int64(1) { + e = fmt.Errorf("[DEBUG]%s api[%s] status [%v] not ready , request body [%s], response body [%s]\n", + logId, describeRequest.GetAction(), *result.Response.DataEngine.State, describeRequest.ToJsonString(), result.ToJsonString()) + log.Println(e) + return resource.RetryableError(e) + } + return nil + }) + if err != nil { + log.Printf("[CRITAL]%s update dlc dataEngine failed, reason:%+v", logId, err) + return err } return resourceTencentCloudDlcDataEngineRead(d, meta) @@ -824,9 +835,13 @@ func resourceTencentCloudDlcDataEngineDelete(d *schema.ResourceData, meta interf ctx := context.WithValue(context.TODO(), logIdKey, logId) service := DlcService{client: meta.(*TencentCloudClient).apiV3Conn} - dataEngineId := d.Id() + idSplit := strings.Split(d.Id(), FILED_SP) + if len(idSplit) != 2 { + return fmt.Errorf("id is broken,%s", d.Id()) + } + dataEngineName := idSplit[0] - if err := service.DeleteDlcDataEngineById(ctx, dataEngineId); err != nil { + if err := service.DeleteDlcDataEngineByName(ctx, dataEngineName); err != nil { return err } diff --git a/tencentcloud/resource_tc_dlc_data_engine_test.go b/tencentcloud/resource_tc_dlc_data_engine_test.go index e1bc3065da..93b53aeee7 100644 --- a/tencentcloud/resource_tc_dlc_data_engine_test.go +++ b/tencentcloud/resource_tc_dlc_data_engine_test.go @@ -1,8 +1,9 @@ package tencentcloud import ( - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" ) func TestAccTencentCloudDlcDataEngineResource_basic(t *testing.T) { @@ -15,12 +16,53 @@ func TestAccTencentCloudDlcDataEngineResource_basic(t *testing.T) { Steps: []resource.TestStep{ { Config: testAccDlcDataEngine, - Check: resource.ComposeTestCheckFunc(resource.TestCheckResourceAttrSet("tencentcloud_dlc_data_engine.data_engine", "id")), + Check: resource.ComposeTestCheckFunc(resource.TestCheckResourceAttrSet("tencentcloud_dlc_data_engine.data_engine", "id"), + resource.TestCheckResourceAttr("tencentcloud_dlc_data_engine.data_engine", "engine_type", "spark"), + resource.TestCheckResourceAttr("tencentcloud_dlc_data_engine.data_engine", "data_engine_name", "testSpark"), + resource.TestCheckResourceAttr("tencentcloud_dlc_data_engine.data_engine", "cluster_type", "spark_cu"), + resource.TestCheckResourceAttr("tencentcloud_dlc_data_engine.data_engine", "mode", "1"), + resource.TestCheckResourceAttr("tencentcloud_dlc_data_engine.data_engine", "auto_resume", "false"), + resource.TestCheckResourceAttr("tencentcloud_dlc_data_engine.data_engine", "size", "16"), + resource.TestCheckResourceAttr("tencentcloud_dlc_data_engine.data_engine", "pay_mode", "0"), + resource.TestCheckResourceAttr("tencentcloud_dlc_data_engine.data_engine", "min_clusters", "1"), + resource.TestCheckResourceAttr("tencentcloud_dlc_data_engine.data_engine", "max_clusters", "1"), + resource.TestCheckResourceAttr("tencentcloud_dlc_data_engine.data_engine", "default_data_engine", "false"), + resource.TestCheckResourceAttr("tencentcloud_dlc_data_engine.data_engine", "cidr_block", "10.255.0.0/16"), + resource.TestCheckResourceAttr("tencentcloud_dlc_data_engine.data_engine", "message", "test spark1"), + resource.TestCheckResourceAttr("tencentcloud_dlc_data_engine.data_engine", "time_span", "1"), + resource.TestCheckResourceAttr("tencentcloud_dlc_data_engine.data_engine", "time_unit", "h"), + resource.TestCheckResourceAttr("tencentcloud_dlc_data_engine.data_engine", "auto_suspend", "false"), + resource.TestCheckResourceAttr("tencentcloud_dlc_data_engine.data_engine", "crontab_resume_suspend", "0"), + resource.TestCheckResourceAttr("tencentcloud_dlc_data_engine.data_engine", "engine_exec_type", "BATCH"), + ), + }, { + Config: testAccDlcDataEngineUpdate, + Check: resource.ComposeTestCheckFunc(resource.TestCheckResourceAttrSet("tencentcloud_dlc_data_engine.data_engine", "id"), + resource.TestCheckResourceAttr("tencentcloud_dlc_data_engine.data_engine", "engine_type", "spark"), + resource.TestCheckResourceAttr("tencentcloud_dlc_data_engine.data_engine", "data_engine_name", "testSpark"), + resource.TestCheckResourceAttr("tencentcloud_dlc_data_engine.data_engine", "cluster_type", "spark_cu"), + resource.TestCheckResourceAttr("tencentcloud_dlc_data_engine.data_engine", "mode", "1"), + resource.TestCheckResourceAttr("tencentcloud_dlc_data_engine.data_engine", "auto_resume", "false"), + resource.TestCheckResourceAttr("tencentcloud_dlc_data_engine.data_engine", "size", "16"), + resource.TestCheckResourceAttr("tencentcloud_dlc_data_engine.data_engine", "pay_mode", "0"), + resource.TestCheckResourceAttr("tencentcloud_dlc_data_engine.data_engine", "min_clusters", "1"), + resource.TestCheckResourceAttr("tencentcloud_dlc_data_engine.data_engine", "max_clusters", "1"), + resource.TestCheckResourceAttr("tencentcloud_dlc_data_engine.data_engine", "default_data_engine", "false"), + resource.TestCheckResourceAttr("tencentcloud_dlc_data_engine.data_engine", "cidr_block", "10.255.0.0/16"), + resource.TestCheckResourceAttr("tencentcloud_dlc_data_engine.data_engine", "message", "test spark"), + resource.TestCheckResourceAttr("tencentcloud_dlc_data_engine.data_engine", "time_span", "1"), + resource.TestCheckResourceAttr("tencentcloud_dlc_data_engine.data_engine", "time_unit", "h"), + resource.TestCheckResourceAttr("tencentcloud_dlc_data_engine.data_engine", "auto_suspend", "false"), + resource.TestCheckResourceAttr("tencentcloud_dlc_data_engine.data_engine", "crontab_resume_suspend", "0"), + resource.TestCheckResourceAttr("tencentcloud_dlc_data_engine.data_engine", "engine_exec_type", "BATCH"), + ), }, + { - ResourceName: "tencentcloud_dlc_data_engine.data_engine", - ImportState: true, - ImportStateVerify: true, + ResourceName: "tencentcloud_dlc_data_engine.data_engine", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"pay_mode", "size", "time_span", "time_unit"}, }, }, }) @@ -32,42 +74,43 @@ resource "tencentcloud_dlc_data_engine" "data_engine" { engine_type = "spark" data_engine_name = "testSpark" cluster_type = "spark_cu" - mode = 2 + mode = 1 auto_resume = false + size = 16 + pay_mode = 0 min_clusters = 1 - max_clusters = 10 + max_clusters = 1 default_data_engine = false - cidr_block = "192.0.2.1/24" - message = "test spark" - pay_mode = 1 - time_span = 3600 - time_unit = "m" - auto_renew = 0 + cidr_block = "10.255.0.0/16" + message = "test spark1" + time_span = 1 + time_unit = "h" auto_suspend = false crontab_resume_suspend = 0 - crontab_resume_suspend_strategy { - resume_time = "1000000-08:00:00" - suspend_time = "" - suspend_strategy = + engine_exec_type = "BATCH" +} - } - engine_exec_type = "SQL" - max_concurrency = 5 - tolerable_queue_time = 0 - auto_suspend_time = 10 - resource_type = "Standard_CU" - data_engine_config_pairs = - image_version_name = "" - main_cluster_name = "testSpark" - elastic_switch = false - elastic_limit = 0 - session_resource_template { - driver_size = "small" - executor_size = "small" - executor_nums = 1 - executor_max_numbers = 1 +` +const testAccDlcDataEngineUpdate = ` - } +resource "tencentcloud_dlc_data_engine" "data_engine" { + engine_type = "spark" + data_engine_name = "testSpark" + cluster_type = "spark_cu" + mode = 1 + auto_resume = false + size = 16 + pay_mode = 0 + min_clusters = 1 + max_clusters = 1 + default_data_engine = false + cidr_block = "10.255.0.0/16" + message = "test spark" + time_span = 1 + time_unit = "h" + auto_suspend = false + crontab_resume_suspend = 0 + engine_exec_type = "BATCH" } ` diff --git a/tencentcloud/resource_tc_dlc_rollback_data_engine_image_operation.go b/tencentcloud/resource_tc_dlc_rollback_data_engine_image_operation.go new file mode 100644 index 0000000000..2151749c8a --- /dev/null +++ b/tencentcloud/resource_tc_dlc_rollback_data_engine_image_operation.go @@ -0,0 +1,123 @@ +/* +Provides a resource to create a dlc rollback_data_engine_image + +Example Usage + +```hcl +data "tencentcloud_dlc_check_data_engine_image_can_be_rollback" "check_data_engine_image_can_be_rollback" { + data_engine_id = "DataEngine-cgkvbas6" +} +resource "tencentcloud_dlc_rollback_data_engine_image_operation" "rollback_data_engine_image" { + data_engine_id = "DataEngine-cgkvbas6" + from_record_id = data.tencentcloud_dlc_check_data_engine_image_can_be_rollback.check_data_engine_image_can_be_rollback.from_record_id + to_record_id = data.tencentcloud_dlc_check_data_engine_image_can_be_rollback.check_data_engine_image_can_be_rollback.to_record_id +} +``` + +Import + +dlc rollback_data_engine_image can be imported using the id, e.g. + +``` +terraform import tencentcloud_dlc_rollback_data_engine_image_operation.rollback_data_engine_image rollback_data_engine_image_id +``` +*/ +package tencentcloud + +import ( + "log" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + dlc "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/dlc/v20210125" + "github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/internal/helper" +) + +func resourceTencentCloudDlcRollbackDataEngineImageOperation() *schema.Resource { + return &schema.Resource{ + Create: resourceTencentCloudDlcRollbackDataEngineImageCreateOperation, + Read: resourceTencentCloudDlcRollbackDataEngineImageReadOperation, + Delete: resourceTencentCloudDlcRollbackDataEngineImageDeleteOperation, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + Schema: map[string]*schema.Schema{ + "data_engine_id": { + Required: true, + ForceNew: true, + Type: schema.TypeString, + Description: "Engine unique id.", + }, + + "from_record_id": { + Optional: true, + ForceNew: true, + Type: schema.TypeString, + Description: "Log record id before rollback.", + }, + + "to_record_id": { + Optional: true, + ForceNew: true, + Type: schema.TypeString, + Description: "Log record id after rollback.", + }, + }, + } +} + +func resourceTencentCloudDlcRollbackDataEngineImageCreateOperation(d *schema.ResourceData, meta interface{}) error { + defer logElapsed("resource.tencentcloud_dlc_rollback_data_engine_image_operation.create")() + defer inconsistentCheck(d, meta)() + + logId := getLogId(contextNil) + + var ( + request = dlc.NewRollbackDataEngineImageRequest() + dataEngineId string + ) + if v, ok := d.GetOk("data_engine_id"); ok { + dataEngineId = v.(string) + request.DataEngineId = helper.String(v.(string)) + } + + if v, ok := d.GetOk("from_record_id"); ok { + request.FromRecordId = helper.String(v.(string)) + } + + if v, ok := d.GetOk("to_record_id"); ok { + request.ToRecordId = helper.String(v.(string)) + } + + err := resource.Retry(writeRetryTimeout, func() *resource.RetryError { + result, e := meta.(*TencentCloudClient).apiV3Conn.UseDlcClient().RollbackDataEngineImage(request) + if e != nil { + return retryError(e) + } else { + log.Printf("[DEBUG]%s api[%s] success, request body [%s], response body [%s]\n", logId, request.GetAction(), request.ToJsonString(), result.ToJsonString()) + } + return nil + }) + if err != nil { + log.Printf("[CRITAL]%s operate dlc rollbackDataEngineImage failed, reason:%+v", logId, err) + return err + } + + d.SetId(dataEngineId) + + return resourceTencentCloudDlcRollbackDataEngineImageReadOperation(d, meta) +} + +func resourceTencentCloudDlcRollbackDataEngineImageReadOperation(d *schema.ResourceData, meta interface{}) error { + defer logElapsed("resource.tencentcloud_dlc_rollback_data_engine_image_operation.read")() + defer inconsistentCheck(d, meta)() + + return nil +} + +func resourceTencentCloudDlcRollbackDataEngineImageDeleteOperation(d *schema.ResourceData, meta interface{}) error { + defer logElapsed("resource.tencentcloud_dlc_rollback_data_engine_image_operation.delete")() + defer inconsistentCheck(d, meta)() + + return nil +} diff --git a/tencentcloud/resource_tc_dlc_rollback_data_engine_image_operation_test.go b/tencentcloud/resource_tc_dlc_rollback_data_engine_image_operation_test.go new file mode 100644 index 0000000000..112fe2bc69 --- /dev/null +++ b/tencentcloud/resource_tc_dlc_rollback_data_engine_image_operation_test.go @@ -0,0 +1,37 @@ +package tencentcloud + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +func TestAccTencentCloudDlcRollbackDataEngineImageResource_basic(t *testing.T) { + t.Parallel() + resource.Test(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheck(t) + }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccDlcRollbackDataEngineImage, + Check: resource.ComposeTestCheckFunc(resource.TestCheckResourceAttrSet("tencentcloud_dlc_rollback_data_engine_image_operation.rollback_data_engine_image", "id"), + resource.TestCheckResourceAttr("tencentcloud_dlc_rollback_data_engine_image_operation.rollback_data_engine_image", "data_engine_id", "DataEngine-cgkvbas6"), + resource.TestCheckResourceAttrSet("tencentcloud_dlc_rollback_data_engine_image_operation.rollback_data_engine_image", "from_record_id"), + resource.TestCheckResourceAttrSet("tencentcloud_dlc_rollback_data_engine_image_operation.rollback_data_engine_image", "to_record_id")), + }, + }, + }) +} + +const testAccDlcRollbackDataEngineImage = ` +data "tencentcloud_dlc_check_data_engine_image_can_be_rollback" "check_data_engine_image_can_be_rollback" { + data_engine_id = "DataEngine-cgkvbas6" +} +resource "tencentcloud_dlc_rollback_data_engine_image_operation" "rollback_data_engine_image" { + data_engine_id = "DataEngine-cgkvbas6" + from_record_id = data.tencentcloud_dlc_check_data_engine_image_can_be_rollback.check_data_engine_image_can_be_rollback.from_record_id + to_record_id = data.tencentcloud_dlc_check_data_engine_image_can_be_rollback.check_data_engine_image_can_be_rollback.to_record_id +} +` diff --git a/tencentcloud/service_tencentcloud_dlc.go b/tencentcloud/service_tencentcloud_dlc.go index 503bd6d77c..23662784a4 100644 --- a/tencentcloud/service_tencentcloud_dlc.go +++ b/tencentcloud/service_tencentcloud_dlc.go @@ -337,12 +337,14 @@ func (me *DlcService) DescribeDlcDescribeUserInfoByFilter(ctx context.Context, p return } -func (me *DlcService) DescribeDlcDataEngineById(ctx context.Context, dataEngineId string) (dataEngine *dlc.DataEngineInfo, errRet error) { +func (me *DlcService) DescribeDlcDataEngineByName(ctx context.Context, dataEngineName string) (dataEngine *dlc.DataEngineInfo, errRet error) { logId := getLogId(ctx) request := dlc.NewDescribeDataEnginesRequest() - request. = &dataEngineId - + item := &dlc.Filter{ + Name: helper.String("data-engine-name"), + Values: []*string{helper.String(dataEngineName)}} + request.Filters = []*dlc.Filter{item} defer func() { if errRet != nil { log.Printf("[CRITAL]%s api[%s] fail, request body [%s], reason[%s]\n", logId, request.GetAction(), request.ToJsonString(), errRet.Error()) @@ -358,19 +360,19 @@ func (me *DlcService) DescribeDlcDataEngineById(ctx context.Context, dataEngineI } log.Printf("[DEBUG]%s api[%s] success, request body [%s], response body [%s]\n", logId, request.GetAction(), request.ToJsonString(), response.ToJsonString()) - if len(response.Response.DataEngineInfo) < 1 { + if response == nil || response.Response == nil || len(response.Response.DataEngines) < 1 { return } - dataEngine = response.Response.DataEngineInfo[0] + dataEngine = response.Response.DataEngines[0] return } -func (me *DlcService) DeleteDlcDataEngineById(ctx context.Context, dataEngineId string) (errRet error) { +func (me *DlcService) DeleteDlcDataEngineByName(ctx context.Context, dataEngineName string) (errRet error) { logId := getLogId(ctx) request := dlc.NewDeleteDataEngineRequest() - request.DataEngineId = &dataEngineId + request.DataEngineNames = []*string{&dataEngineName} defer func() { if errRet != nil { @@ -389,17 +391,36 @@ func (me *DlcService) DeleteDlcDataEngineById(ctx context.Context, dataEngineId return } +func (me *DlcService) DescribeDlcCheckDataEngineImageCanBeUpgradeByFilter(ctx context.Context, param map[string]interface{}) (checkDataEngineImageCanBeUpgrade *dlc.CheckDataEngineImageCanBeUpgradeResponseParams, errRet error) { + var ( + logId = getLogId(ctx) + request = dlc.NewCheckDataEngineImageCanBeUpgradeRequest() + ) -func (me *DlcService) DlcDataEngineStateRefreshFunc(dataEngineId string, failStates []string) resource.StateRefreshFunc { - return func() (interface{}, string, error) { - ctx := contextNil - - object, err := me.DescribeDataEngine(ctx, dataEngineId) + defer func() { + if errRet != nil { + log.Printf("[CRITAL]%s api[%s] fail, request body [%s], reason[%s]\n", logId, request.GetAction(), request.ToJsonString(), errRet.Error()) + } + }() - if err != nil { - return nil, "", err + for k, v := range param { + if k == "DataEngineId" { + request.DataEngineId = v.(*string) } + } + + ratelimit.Check(request.GetAction()) - return object, helper.PString(object.State), nil + response, err := me.client.UseDlcClient().CheckDataEngineImageCanBeUpgrade(request) + if err != nil { + errRet = err + return } + log.Printf("[DEBUG]%s api[%s] success, request body [%s], response body [%s]\n", logId, request.GetAction(), request.ToJsonString(), response.ToJsonString()) + + if response == nil || response.Response == nil { + return + } + checkDataEngineImageCanBeUpgrade = response.Response + return } diff --git a/website/docs/d/dlc_check_data_engine_image_can_be_upgrade.html.markdown b/website/docs/d/dlc_check_data_engine_image_can_be_upgrade.html.markdown new file mode 100644 index 0000000000..73cc13707a --- /dev/null +++ b/website/docs/d/dlc_check_data_engine_image_can_be_upgrade.html.markdown @@ -0,0 +1,36 @@ +--- +subcategory: "Data Lake Compute(DLC)" +layout: "tencentcloud" +page_title: "TencentCloud: tencentcloud_dlc_check_data_engine_image_can_be_upgrade" +sidebar_current: "docs-tencentcloud-datasource-dlc_check_data_engine_image_can_be_upgrade" +description: |- + Use this data source to query detailed information of dlc check_data_engine_image_can_be_upgrade +--- + +# tencentcloud_dlc_check_data_engine_image_can_be_upgrade + +Use this data source to query detailed information of dlc check_data_engine_image_can_be_upgrade + +## Example Usage + +```hcl +data "tencentcloud_dlc_check_data_engine_image_can_be_upgrade" "check_data_engine_image_can_be_upgrade" { + data_engine_id = "DataEngine-cgkvbas6" +} +``` + +## Argument Reference + +The following arguments are supported: + +* `data_engine_id` - (Required, String) Engine unique id. +* `result_output_file` - (Optional, String) Used to save results. + +## Attributes Reference + +In addition to all arguments above, the following attributes are exported: + +* `child_image_version_id` - The latest image version id that can be upgraded. +* `is_upgrade` - Is it possible to upgrade. + + diff --git a/website/docs/r/dlc_data_engine.html.markdown b/website/docs/r/dlc_data_engine.html.markdown new file mode 100644 index 0000000000..20e8d1aaee --- /dev/null +++ b/website/docs/r/dlc_data_engine.html.markdown @@ -0,0 +1,105 @@ +--- +subcategory: "Data Lake Compute(DLC)" +layout: "tencentcloud" +page_title: "TencentCloud: tencentcloud_dlc_data_engine" +sidebar_current: "docs-tencentcloud-resource-dlc_data_engine" +description: |- + Provides a resource to create a dlc data_engine +--- + +# tencentcloud_dlc_data_engine + +Provides a resource to create a dlc data_engine + +## Example Usage + +```hcl +resource "tencentcloud_dlc_data_engine" "data_engine" { + engine_type = "spark" + data_engine_name = "testSpark" + cluster_type = "spark_cu" + mode = 1 + auto_resume = false + size = 16 + pay_mode = 0 + min_clusters = 1 + max_clusters = 1 + default_data_engine = false + cidr_block = "10.255.0.0/16" + message = "test spark1" + time_span = 1 + time_unit = "h" + auto_suspend = false + crontab_resume_suspend = 0 + engine_exec_type = "BATCH" +} +``` + +## Argument Reference + +The following arguments are supported: + +* `auto_resume` - (Required, Bool) Whether to automatically start the cluster, prepay not support. +* `cluster_type` - (Required, String) Engine cluster type, only support: spark_cu/presto_cu. +* `data_engine_name` - (Required, String) Engine name. +* `engine_type` - (Required, String) Engine type, only support: spark/presto. +* `mode` - (Required, Int) Engine mode, only support 1: ByAmount, 2: YearlyAndMonthly. +* `auto_renew` - (Optional, Int) Engine auto renew, only support 0: Default, 1: AutoRenewON, 2: AutoRenewOFF. +* `auto_suspend_time` - (Optional, Int) Cluster automatic suspension time, default 10 minutes. +* `auto_suspend` - (Optional, Bool) Whether to automatically suspend the cluster, prepay not support. +* `cidr_block` - (Optional, String) Engine VPC network segment, just like 192.0.2.1/24. +* `crontab_resume_suspend_strategy` - (Optional, List) Engine auto suspend strategy, when AutoSuspend is true, CrontabResumeSuspend must stop. +* `crontab_resume_suspend` - (Optional, Int) Engine crontab resume or suspend strategy, only support: 0: Wait(default), 1: Kill. +* `data_engine_config_pairs` - (Optional, List) Cluster advanced configuration. +* `default_data_engine` - (Optional, Bool) Whether it is the default virtual cluster. +* `elastic_limit` - (Optional, Int) For spark Batch ExecType, yearly and monthly cluster elastic limit. +* `elastic_switch` - (Optional, Bool) For spark Batch ExecType, yearly and monthly cluster whether to enable elasticity. +* `engine_exec_type` - (Optional, String) Engine exec type, only support SQL(default) or BATCH. +* `image_version_name` - (Optional, String) Cluster image version name. Such as SuperSQL-P 1.1; SuperSQL-S 3.2, etc., do not upload, and create a cluster with the latest mirror version by default. +* `main_cluster_name` - (Optional, String) Primary cluster name, specified when creating a disaster recovery cluster. +* `max_clusters` - (Optional, Int) Engine max cluster size, MaxClusters less than or equal to 10 and MaxClusters bigger than MinClusters. +* `max_concurrency` - (Optional, Int) Maximum number of concurrent tasks in a single cluster, default 5. +* `message` - (Optional, String) Engine description information. +* `min_clusters` - (Optional, Int) Engine min size, greater than or equal to 1 and MaxClusters bigger than MinClusters. +* `pay_mode` - (Optional, Int) Engine pay mode type, only support 0: postPay, 1: prePay(default). +* `resource_type` - (Optional, String) Engine resource type not match, only support: Standard_CU/Memory_CU(only BATCH ExecType). +* `session_resource_template` - (Optional, List) For spark Batch ExecType, cluster session resource configuration template. +* `size` - (Optional, Int) Cluster size. Required when updating. +* `time_span` - (Optional, Int) Engine TimeSpan, prePay: minimum of 1, representing one month of purchasing resources, with a maximum of 120, default 3600, postPay: fixed fee of 3600. +* `time_unit` - (Optional, String) Engine TimeUnit, prePay: use m(default), postPay: use h. +* `tolerable_queue_time` - (Optional, Int) Tolerable queuing time, default 0. scaling may be triggered when tasks are queued for longer than the tolerable time. if this parameter is 0, it means that capacity expansion may be triggered immediately once a task is queued. + +The `crontab_resume_suspend_strategy` object supports the following: + +* `resume_time` - (Optional, String) Scheduled pull-up time: For example: 8 o&#39;clock on Monday is expressed as 1000000-08:00:00. +* `suspend_strategy` - (Optional, Int) Suspend configuration: 0 (default): wait for the task to end before suspending, 1: force suspend. +* `suspend_time` - (Optional, String) Scheduled suspension time: For example: 20 o&#39;clock on Monday is expressed as 1000000-20:00:00. + +The `data_engine_config_pairs` object supports the following: + +* `config_item` - (Required, String) Configuration items. +* `config_value` - (Required, String) Configuration value. + +The `session_resource_template` object supports the following: + +* `driver_size` - (Optional, String) Engine driver size specification only supports: small/medium/large/xlarge/m.small/m.medium/m.large/m.xlarge. +* `executor_max_numbers` - (Optional, Int) Specify the executor max number (in a dynamic configuration scenario), the minimum value is 1, and the maximum value is less than the cluster specification (when ExecutorMaxNumbers is less than ExecutorNums, the value is set to ExecutorNums). +* `executor_nums` - (Optional, Int) Specify the number of executors. The minimum value is 1 and the maximum value is less than the cluster specification. +* `executor_size` - (Optional, String) Engine executor size specification only supports: small/medium/large/xlarge/m.small/m.medium/m.large/m.xlarge. + +## Attributes Reference + +In addition to all arguments above, the following attributes are exported: + +* `id` - ID of the resource. + + + +## Import + +dlc data_engine can be imported using the id, e.g. + +``` +terraform import tencentcloud_dlc_data_engine.data_engine data_engine_id +``` + diff --git a/website/docs/r/dlc_rollback_data_engine_image_operation.html.markdown b/website/docs/r/dlc_rollback_data_engine_image_operation.html.markdown new file mode 100644 index 0000000000..99add0879c --- /dev/null +++ b/website/docs/r/dlc_rollback_data_engine_image_operation.html.markdown @@ -0,0 +1,50 @@ +--- +subcategory: "Data Lake Compute(DLC)" +layout: "tencentcloud" +page_title: "TencentCloud: tencentcloud_dlc_rollback_data_engine_image_operation" +sidebar_current: "docs-tencentcloud-resource-dlc_rollback_data_engine_image_operation" +description: |- + Provides a resource to create a dlc rollback_data_engine_image +--- + +# tencentcloud_dlc_rollback_data_engine_image_operation + +Provides a resource to create a dlc rollback_data_engine_image + +## Example Usage + +```hcl +data "tencentcloud_dlc_check_data_engine_image_can_be_rollback" "check_data_engine_image_can_be_rollback" { + data_engine_id = "DataEngine-cgkvbas6" +} +resource "tencentcloud_dlc_rollback_data_engine_image_operation" "rollback_data_engine_image" { + data_engine_id = "DataEngine-cgkvbas6" + from_record_id = data.tencentcloud_dlc_check_data_engine_image_can_be_rollback.check_data_engine_image_can_be_rollback.from_record_id + to_record_id = data.tencentcloud_dlc_check_data_engine_image_can_be_rollback.check_data_engine_image_can_be_rollback.to_record_id +} +``` + +## Argument Reference + +The following arguments are supported: + +* `data_engine_id` - (Required, String, ForceNew) Engine unique id. +* `from_record_id` - (Optional, String, ForceNew) Log record id before rollback. +* `to_record_id` - (Optional, String, ForceNew) Log record id after rollback. + +## Attributes Reference + +In addition to all arguments above, the following attributes are exported: + +* `id` - ID of the resource. + + + +## Import + +dlc rollback_data_engine_image can be imported using the id, e.g. + +``` +terraform import tencentcloud_dlc_rollback_data_engine_image_operation.rollback_data_engine_image rollback_data_engine_image_id +``` + diff --git a/website/tencentcloud.erb b/website/tencentcloud.erb index 891d49cd18..324c25a208 100644 --- a/website/tencentcloud.erb +++ b/website/tencentcloud.erb @@ -1773,6 +1773,9 @@
  • tencentcloud_dlc_describe_user_type
  • +
  • + tencentcloud_dlc_check_data_engine_image_can_be_upgrade +
  • @@ -1781,6 +1784,12 @@
  • tencentcloud_dlc_add_users_to_work_group_attachment
  • +
  • + tencentcloud_dlc_data_engine +
  • +
  • + tencentcloud_dlc_rollback_data_engine_image_operation +
  • tencentcloud_dlc_store_location_config
  • From 8ec6dc5abac479b6fc354a50884af310fb741713 Mon Sep 17 00:00:00 2001 From: WeiMengXS Date: Tue, 24 Oct 2023 20:53:14 +0800 Subject: [PATCH 3/5] feat: changelog --- .changelog/2245.txt | 11 +++++++++++ 1 file changed, 11 insertions(+) create mode 100644 .changelog/2245.txt diff --git a/.changelog/2245.txt b/.changelog/2245.txt new file mode 100644 index 0000000000..57829b7b94 --- /dev/null +++ b/.changelog/2245.txt @@ -0,0 +1,11 @@ +```release-note:new-data-source +tencentcloud_dlc_check_data_engine_image_can_be_upgrade +``` + +```release-note:new-resource +tencentcloud_dlc_data_engine +``` + +```release-note:new-resource +tencentcloud_dlc_rollback_data_engine_image_operation +``` \ No newline at end of file From 1ceef400cb1fe82a463144aa0784e9159895920c Mon Sep 17 00:00:00 2001 From: WeiMengXS Date: Tue, 24 Oct 2023 21:05:11 +0800 Subject: [PATCH 4/5] feat: changelog --- tencentcloud/provider.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tencentcloud/provider.go b/tencentcloud/provider.go index b7f7099a45..da6b18e634 100644 --- a/tencentcloud/provider.go +++ b/tencentcloud/provider.go @@ -1514,6 +1514,7 @@ Tencent Cloud Organization (TCO) tencentcloud_organization_org_identity tencentcloud_organization_org_member_email tencentcloud_organization_org_member_auth_identity_attachment + tencentcloud_organization_org_member_policy_attachment tencentcloud_organization_policy_sub_account_attachment tencentcloud_organization_quit_organization_operation @@ -3305,6 +3306,7 @@ func Provider() *schema.Provider { "tencentcloud_organization_instance": resourceTencentCloudOrganizationOrganization(), "tencentcloud_organization_policy_sub_account_attachment": resourceTencentCloudOrganizationPolicySubAccountAttachment(), "tencentcloud_organization_org_member_auth_identity_attachment": resourceTencentCloudOrganizationOrgMemberAuthIdentityAttachment(), + "tencentcloud_organization_org_member_policy_attachment": resourceTencentCloudOrganizationOrgMemberPolicyAttachment(), "tencentcloud_dbbrain_sql_filter": resourceTencentCloudDbbrainSqlFilter(), "tencentcloud_dbbrain_security_audit_log_export_task": resourceTencentCloudDbbrainSecurityAuditLogExportTask(), "tencentcloud_dbbrain_db_diag_report_task": resourceTencentCloudDbbrainDbDiagReportTask(), From 213160365bdec3c7b4905accbd6e0f90d93cbe54 Mon Sep 17 00:00:00 2001 From: WeiMengXS Date: Tue, 24 Oct 2023 21:46:01 +0800 Subject: [PATCH 5/5] feat: changelog --- ...resource_tc_dlc_rollback_data_engine_image_operation_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tencentcloud/resource_tc_dlc_rollback_data_engine_image_operation_test.go b/tencentcloud/resource_tc_dlc_rollback_data_engine_image_operation_test.go index 112fe2bc69..ab9ee7cf8c 100644 --- a/tencentcloud/resource_tc_dlc_rollback_data_engine_image_operation_test.go +++ b/tencentcloud/resource_tc_dlc_rollback_data_engine_image_operation_test.go @@ -6,7 +6,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" ) -func TestAccTencentCloudDlcRollbackDataEngineImageResource_basic(t *testing.T) { +func TestAccTencentCloudDlcRollbackDataEngineImageOperationResource_basic(t *testing.T) { t.Parallel() resource.Test(t, resource.TestCase{ PreCheck: func() {