Skip to content

Commit 2c2e81e

Browse files
committed
add
1 parent 5b9ee31 commit 2c2e81e

File tree

1 file changed

+226
-38
lines changed

1 file changed

+226
-38
lines changed

tencentcloud/services/cvm/resource_tc_instance.go

Lines changed: 226 additions & 38 deletions
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,9 @@ package cvm
22

33
import (
44
"context"
5+
"crypto/sha256"
56
"encoding/base64"
7+
"encoding/hex"
68
"fmt"
79
"log"
810
"sort"
@@ -17,6 +19,7 @@ import (
1719

1820
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource"
1921
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
22+
cbs "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/cbs/v20170312"
2023
sdkErrors "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common/errors"
2124
cvm "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/cvm/v20170312"
2225

@@ -270,9 +273,8 @@ func ResourceTencentCloudInstance() *schema.Resource {
270273
Description: "Data disk type. For more information about limits on different data disk types, see [Storage Overview](https://intl.cloud.tencent.com/document/product/213/4952). Valid values: LOCAL_BASIC: local disk, LOCAL_SSD: local SSD disk, LOCAL_NVME: local NVME disk, specified in the InstanceType, LOCAL_PRO: local HDD disk, specified in the InstanceType, CLOUD_BASIC: HDD cloud disk, CLOUD_PREMIUM: Premium Cloud Storage, CLOUD_SSD: SSD, CLOUD_HSSD: Enhanced SSD, CLOUD_TSSD: Tremendous SSD, CLOUD_BSSD: Balanced SSD.",
271274
},
272275
"data_disk_size": {
273-
Type: schema.TypeInt,
274-
Required: true,
275-
//ForceNew: true,
276+
Type: schema.TypeInt,
277+
Required: true,
276278
Description: "Size of the data disk, and unit is GB.",
277279
},
278280
"data_disk_name": {
@@ -1008,16 +1010,28 @@ func resourceTencentCloudInstanceRead(d *schema.ResourceData, meta interface{})
10081010
}
10091011

10101012
// set data_disks
1011-
var hasDataDisks, isCombineDataDisks bool
1013+
var hasDataDisks, isCombineDataDisks, hasDataDisksName bool
10121014
dataDiskList := make([]map[string]interface{}, 0, len(instance.DataDisks))
10131015
diskSizeMap := map[string]*uint64{}
10141016
diskOrderMap := make(map[string]int)
10151017

1016-
if _, ok := d.GetOk("data_disks"); ok {
1018+
if v, ok := d.GetOk("data_disks"); ok {
10171019
hasDataDisks = true
1020+
// check has data disk name
1021+
dataDisks := v.([]interface{})
1022+
for _, item := range dataDisks {
1023+
value := item.(map[string]interface{})
1024+
if v, ok := value["data_disk_name"]; ok && v != nil {
1025+
diskName := v.(string)
1026+
if diskName != "" {
1027+
hasDataDisksName = true
1028+
}
1029+
}
1030+
}
10181031
}
10191032

1020-
if len(instance.DataDisks) > 0 {
1033+
// scene with has disks name
1034+
if len(instance.DataDisks) > 0 && !hasDataDisksName {
10211035
var diskIds []*string
10221036
for i := range instance.DataDisks {
10231037
id := instance.DataDisks[i].DiskId
@@ -1065,48 +1079,206 @@ func resourceTencentCloudInstanceRead(d *schema.ResourceData, meta interface{})
10651079
if err != nil {
10661080
return err
10671081
}
1068-
}
10691082

1070-
tmpDataDisks := make([]interface{}, 0, len(instance.DataDisks))
1071-
if v, ok := d.GetOk("data_disks"); ok {
1072-
tmpDataDisks = v.([]interface{})
1073-
}
1083+
tmpDataDisks := make([]interface{}, 0, len(instance.DataDisks))
1084+
if v, ok := d.GetOk("data_disks"); ok {
1085+
tmpDataDisks = v.([]interface{})
1086+
}
1087+
1088+
for index, disk := range instance.DataDisks {
1089+
dataDisk := make(map[string]interface{}, 5)
1090+
dataDisk["data_disk_id"] = disk.DiskId
1091+
if disk.DiskId == nil {
1092+
dataDisk["data_disk_size"] = disk.DiskSize
1093+
} else if size, ok := diskSizeMap[*disk.DiskId]; ok {
1094+
dataDisk["data_disk_size"] = size
1095+
}
10741096

1075-
for index, disk := range instance.DataDisks {
1076-
dataDisk := make(map[string]interface{}, 5)
1077-
dataDisk["data_disk_id"] = disk.DiskId
1078-
if disk.DiskId == nil {
1079-
dataDisk["data_disk_size"] = disk.DiskSize
1080-
} else if size, ok := diskSizeMap[*disk.DiskId]; ok {
1081-
dataDisk["data_disk_size"] = size
1097+
dataDisk["delete_with_instance_prepaid"] = false
1098+
if len(tmpDataDisks) == len(instance.DataDisks) {
1099+
tmpDataDisk := tmpDataDisks[index].(map[string]interface{})
1100+
if deleteWithInstancePrepaid, ok := tmpDataDisk["delete_with_instance_prepaid"]; ok {
1101+
deleteWithInstancePrepaidBool := deleteWithInstancePrepaid.(bool)
1102+
dataDisk["delete_with_instance_prepaid"] = deleteWithInstancePrepaidBool
1103+
}
1104+
}
1105+
1106+
dataDisk["data_disk_type"] = disk.DiskType
1107+
dataDisk["data_disk_snapshot_id"] = disk.SnapshotId
1108+
dataDisk["delete_with_instance"] = disk.DeleteWithInstance
1109+
dataDisk["encrypt"] = disk.Encrypt
1110+
dataDisk["throughput_performance"] = disk.ThroughputPerformance
1111+
dataDiskList = append(dataDiskList, dataDisk)
10821112
}
10831113

1084-
dataDisk["delete_with_instance_prepaid"] = false
1085-
if len(tmpDataDisks) == len(instance.DataDisks) {
1086-
tmpDataDisk := tmpDataDisks[index].(map[string]interface{})
1087-
if deleteWithInstancePrepaid, ok := tmpDataDisk["delete_with_instance_prepaid"]; ok {
1088-
deleteWithInstancePrepaidBool := deleteWithInstancePrepaid.(bool)
1089-
dataDisk["delete_with_instance_prepaid"] = deleteWithInstancePrepaidBool
1114+
if hasDataDisks && !isCombineDataDisks {
1115+
sort.SliceStable(dataDiskList, func(idx1, idx2 int) bool {
1116+
dataDiskIdIdx1 := *dataDiskList[idx1]["data_disk_id"].(*string)
1117+
dataDiskIdIdx2 := *dataDiskList[idx2]["data_disk_id"].(*string)
1118+
return diskOrderMap[dataDiskIdIdx1] < diskOrderMap[dataDiskIdIdx2]
1119+
})
1120+
}
1121+
1122+
_ = d.Set("data_disks", dataDiskList)
1123+
} else if len(instance.DataDisks) > 0 && hasDataDisksName {
1124+
// scene with no disks name
1125+
dDiskHash := make([]map[string]interface{}, 0)
1126+
// get source disk hash
1127+
if v, ok := d.GetOk("data_disks"); ok {
1128+
dataDisks := v.([]interface{})
1129+
for index, item := range dataDisks {
1130+
value := item.(map[string]interface{})
1131+
tmpMap := make(map[string]interface{})
1132+
diskName := string(index)
1133+
diskType := value["data_disk_type"].(string)
1134+
diskSize := int64(value["data_disk_size"].(int))
1135+
deleteWithInstance := value["delete_with_instance"].(bool)
1136+
encrypt := value["encrypt"].(bool)
1137+
if v, ok := value["data_disk_name"].(string); ok && v != "" {
1138+
diskName = v
1139+
}
1140+
1141+
diskObj := diskHash{
1142+
diskType: diskType,
1143+
diskSize: diskSize,
1144+
deleteWithInstance: deleteWithInstance,
1145+
encrypt: encrypt,
1146+
}
1147+
1148+
// set hash
1149+
tmpMap[diskName] = getDataDiskHash(diskObj)
1150+
tmpMap["index"] = index
1151+
tmpMap["flag"] = 0
1152+
dDiskHash = append(dDiskHash, tmpMap)
10901153
}
10911154
}
10921155

1093-
dataDisk["data_disk_type"] = disk.DiskType
1094-
dataDisk["data_disk_snapshot_id"] = disk.SnapshotId
1095-
dataDisk["delete_with_instance"] = disk.DeleteWithInstance
1096-
dataDisk["encrypt"] = disk.Encrypt
1097-
dataDisk["throughput_performance"] = disk.ThroughputPerformance
1098-
dataDiskList = append(dataDiskList, dataDisk)
1099-
}
1156+
tmpDataDiskMap := make(map[int]interface{}, 0)
1157+
var diskIds []*string
1158+
var cbsDisks []*cbs.Disk
1159+
for i := range instance.DataDisks {
1160+
id := instance.DataDisks[i].DiskId
1161+
if id == nil {
1162+
continue
1163+
}
1164+
1165+
if strings.HasPrefix(*id, "disk-") {
1166+
diskIds = append(diskIds, id)
1167+
}
1168+
}
1169+
1170+
err := resource.Retry(tccommon.ReadRetryTimeout, func() *resource.RetryError {
1171+
cbsDisks, err = cbsService.DescribeDiskList(ctx, diskIds)
1172+
if err != nil {
1173+
return resource.NonRetryableError(err)
1174+
}
11001175

1101-
if hasDataDisks && !isCombineDataDisks {
1102-
sort.SliceStable(dataDiskList, func(idx1, idx2 int) bool {
1103-
dataDiskIdIdx1 := *dataDiskList[idx1]["data_disk_id"].(*string)
1104-
dataDiskIdIdx2 := *dataDiskList[idx2]["data_disk_id"].(*string)
1105-
return diskOrderMap[dataDiskIdIdx1] < diskOrderMap[dataDiskIdIdx2]
1176+
for i := range cbsDisks {
1177+
disk := cbsDisks[i]
1178+
if *disk.DiskState == "EXPANDING" {
1179+
return resource.RetryableError(fmt.Errorf("data_disk[%d] is expending", i))
1180+
}
1181+
}
1182+
1183+
return nil
11061184
})
1107-
}
11081185

1109-
_ = d.Set("data_disks", dataDiskList)
1186+
if err != nil {
1187+
return err
1188+
}
1189+
1190+
// update instance DataDisks
1191+
for _, cvmDisk := range instance.DataDisks {
1192+
for _, cbsDisk := range cbsDisks {
1193+
if *cvmDisk.DiskId == *cbsDisk.DiskId {
1194+
dName := *cbsDisk.DiskName
1195+
cvmDisk.DiskName = &dName
1196+
break
1197+
}
1198+
}
1199+
1200+
fmt.Println(1111111111)
1201+
fmt.Println(1111111111)
1202+
fmt.Println(*cvmDisk.DiskName)
1203+
fmt.Println(1111111111)
1204+
fmt.Println(1111111111)
1205+
}
1206+
1207+
// has disk name first
1208+
for _, disk := range instance.DataDisks {
1209+
for _, hashItem := range dDiskHash {
1210+
diskName := *disk.DiskName
1211+
tmpHash := getDataDiskHash(diskHash{
1212+
diskType: *disk.DiskType,
1213+
diskSize: *disk.DiskSize,
1214+
deleteWithInstance: *disk.DeleteWithInstance,
1215+
encrypt: *disk.Encrypt,
1216+
})
1217+
1218+
// get disk name
1219+
if _, ok := hashItem[diskName]; ok {
1220+
// check hash and flag
1221+
if tmpHash == hashItem[diskName] && hashItem["flag"] == 0 {
1222+
dataDisk := make(map[string]interface{}, 5)
1223+
dataDisk["data_disk_id"] = disk.DiskId
1224+
dataDisk["data_disk_size"] = disk.DiskSize
1225+
dataDisk["data_disk_name"] = disk.DiskName
1226+
dataDisk["data_disk_type"] = disk.DiskType
1227+
dataDisk["data_disk_snapshot_id"] = disk.SnapshotId
1228+
dataDisk["delete_with_instance"] = disk.DeleteWithInstance
1229+
dataDisk["encrypt"] = disk.Encrypt
1230+
dataDisk["throughput_performance"] = disk.ThroughputPerformance
1231+
tmpDataDiskMap[hashItem["index"].(int)] = dataDisk
1232+
hashItem["flag"] = 1
1233+
break
1234+
}
1235+
}
1236+
}
1237+
}
1238+
1239+
// no disk name last
1240+
for _, disk := range instance.DataDisks {
1241+
for index, hashItem := range dDiskHash {
1242+
tmpHash := getDataDiskHash(diskHash{
1243+
diskType: *disk.DiskType,
1244+
diskSize: *disk.DiskSize,
1245+
deleteWithInstance: *disk.DeleteWithInstance,
1246+
encrypt: *disk.Encrypt,
1247+
})
1248+
1249+
// check hash and flag
1250+
if tmpHash == hashItem[string(index)] && hashItem["flag"] == 0 {
1251+
dataDisk := make(map[string]interface{}, 5)
1252+
dataDisk["data_disk_id"] = disk.DiskId
1253+
dataDisk["data_disk_size"] = disk.DiskSize
1254+
dataDisk["data_disk_name"] = disk.DiskName
1255+
dataDisk["data_disk_type"] = disk.DiskType
1256+
dataDisk["data_disk_snapshot_id"] = disk.SnapshotId
1257+
dataDisk["delete_with_instance"] = disk.DeleteWithInstance
1258+
dataDisk["encrypt"] = disk.Encrypt
1259+
dataDisk["throughput_performance"] = disk.ThroughputPerformance
1260+
tmpDataDiskMap[hashItem["index"].(int)] = dataDisk
1261+
hashItem["flag"] = 1
1262+
break
1263+
}
1264+
}
1265+
}
1266+
1267+
keys := make([]int, 0, len(tmpDataDiskMap))
1268+
for k := range tmpDataDiskMap {
1269+
keys = append(keys, k)
1270+
}
1271+
1272+
sort.Ints(keys)
1273+
for _, v := range keys {
1274+
tmpDataDisk := tmpDataDiskMap[v].(map[string]interface{})
1275+
dataDiskList = append(dataDiskList, tmpDataDisk)
1276+
}
1277+
1278+
_ = d.Set("data_disks", dataDiskList)
1279+
} else {
1280+
_ = d.Set("data_disks", dataDiskList)
1281+
}
11101282

11111283
if len(instance.PrivateIpAddresses) > 0 {
11121284
_ = d.Set("private_ip", instance.PrivateIpAddresses[0])
@@ -2160,3 +2332,19 @@ func waitIpRelease(ctx context.Context, vpcService vpc.VpcService, instance *cvm
21602332

21612333
return nil
21622334
}
2335+
2336+
type diskHash struct {
2337+
diskType string
2338+
diskSize int64
2339+
deleteWithInstance bool
2340+
encrypt bool
2341+
}
2342+
2343+
func getDataDiskHash(obj diskHash) string {
2344+
h := sha256.New()
2345+
h.Write([]byte(obj.diskType))
2346+
h.Write([]byte(fmt.Sprintf("%d", obj.diskSize)))
2347+
h.Write([]byte(fmt.Sprintf("%t", obj.deleteWithInstance)))
2348+
h.Write([]byte(fmt.Sprintf("%t", obj.encrypt)))
2349+
return hex.EncodeToString(h.Sum(nil))
2350+
}

0 commit comments

Comments
 (0)