From 99e9b763e5477d17af0b4e6aaa0220aefe33a87a Mon Sep 17 00:00:00 2001 From: Sam Serdlow Date: Fri, 10 Jan 2025 16:19:46 +0000 Subject: [PATCH 01/10] Removing alpha disk from tests. --- test/e2e/tests/single_zone_e2e_test.go | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/test/e2e/tests/single_zone_e2e_test.go b/test/e2e/tests/single_zone_e2e_test.go index 802099150..53a635144 100644 --- a/test/e2e/tests/single_zone_e2e_test.go +++ b/test/e2e/tests/single_zone_e2e_test.go @@ -923,7 +923,7 @@ var _ = Describe("GCE PD CSI Driver", func() { Expect(err).To(BeNil(), "DeleteVolume failed") // Validate Disk Deleted - _, err = computeAlphaService.Disks.Get(p, zone, volName).Do() + _, err = computeService.Disks.Get(p, zone, volName).Do() Expect(gce.IsGCEError(err, "notFound")).To(BeTrue(), "Expected disk to not be found") }() }) @@ -1738,12 +1738,9 @@ func createAndValidateUniqueZonalMultiWriterDisk(client *remote.CsiClient, proje Expect(cloudDisk.Status).To(Equal(readyState)) Expect(cloudDisk.SizeGb).To(Equal(defaultMwSizeGb)) Expect(cloudDisk.Name).To(Equal(volName)) + Expect(cloudDisk.MultiWriter).To(Equal(true)) disk.validate(cloudDisk) - alphaDisk, err := computeAlphaService.Disks.Get(project, zone, volName).Do() - Expect(err).To(BeNil(), "Failed to get cloud disk using alpha API") - Expect(alphaDisk.MultiWriter).To(Equal(true)) - return volName, volume.VolumeId } From 4edbfb1e4f266cbe4677ade6d17f96d9381f29b3 Mon Sep 17 00:00:00 2001 From: Sam Serdlow Date: Fri, 10 Jan 2025 21:20:39 +0000 Subject: [PATCH 02/10] Changes setup the test to run with hyperdisk extreme. --- test/e2e/tests/setup_e2e_test.go | 4 +- test/e2e/tests/single_zone_e2e_test.go | 215 ++++++++++++------------- 2 files changed, 109 insertions(+), 110 deletions(-) diff --git a/test/e2e/tests/setup_e2e_test.go b/test/e2e/tests/setup_e2e_test.go index 2e1c8cf51..46cdb38ff 100644 --- a/test/e2e/tests/setup_e2e_test.go +++ b/test/e2e/tests/setup_e2e_test.go @@ -40,9 +40,9 @@ var ( serviceAccount = flag.String("service-account", "", "Service account to bring up instance with") vmNamePrefix = flag.String("vm-name-prefix", "gce-pd-csi-e2e", "VM name prefix") architecture = flag.String("arch", "amd64", "Architecture pd csi driver build on") - minCpuPlatform = flag.String("min-cpu-platform", "AMD Milan", "Minimum CPU architecture") + minCpuPlatform = flag.String("min-cpu-platform", "cascadelake", "Minimum CPU architecture") zones = flag.String("zones", "us-east4-a,us-east4-c", "Zones to run tests in. If there are multiple zones, separate each by comma") - machineType = flag.String("machine-type", "n2d-standard-2", "Type of machine to provision instance on") + machineType = flag.String("machine-type", "n2-standard-80", "Type of machine to provision instance on") imageURL = flag.String("image-url", "projects/ubuntu-os-cloud/global/images/family/ubuntu-minimal-2404-lts-amd64", "OS image url to get image from") runInProw = flag.Bool("run-in-prow", false, "If true, use a Boskos loaned project and special CI service accounts and ssh keys") deleteInstances = flag.Bool("delete-instances", false, "Delete the instances after tests run") diff --git a/test/e2e/tests/single_zone_e2e_test.go b/test/e2e/tests/single_zone_e2e_test.go index 53a635144..696962130 100644 --- a/test/e2e/tests/single_zone_e2e_test.go +++ b/test/e2e/tests/single_zone_e2e_test.go @@ -40,7 +40,7 @@ import ( compute "google.golang.org/api/compute/v1" "google.golang.org/api/iterator" kmspb "google.golang.org/genproto/googleapis/cloud/kms/v1" - fieldmask "google.golang.org/genproto/protobuf/field_mask" + // fieldmask "google.golang.org/genproto/protobuf/field_mask" ) const ( @@ -608,124 +608,124 @@ var _ = Describe("GCE PD CSI Driver", func() { }() }) - DescribeTable("Should create CMEK key, go through volume lifecycle, validate behavior on key revoke and restore", - func(diskType string) { - ctx := context.Background() - Expect(testContexts).ToNot(BeEmpty()) - testContext := getRandomTestContext() + // DescribeTable("Should create CMEK key, go through volume lifecycle, validate behavior on key revoke and restore", + // func(diskType string) { + // ctx := context.Background() + // Expect(testContexts).ToNot(BeEmpty()) + // testContext := getRandomTestContext() - controllerInstance := testContext.Instance - controllerClient := testContext.Client + // controllerInstance := testContext.Instance + // controllerClient := testContext.Client - p, z, _ := controllerInstance.GetIdentity() - locationID := "global" + // p, z, _ := controllerInstance.GetIdentity() + // locationID := "global" - // The resource name of the key rings. - parentName := fmt.Sprintf("projects/%s/locations/%s", p, locationID) - keyRingId := "gce-pd-csi-test-ring" + // // The resource name of the key rings. + // parentName := fmt.Sprintf("projects/%s/locations/%s", p, locationID) + // keyRingId := "gce-pd-csi-test-ring" - key, keyVersions := setupKeyRing(ctx, parentName, keyRingId) + // key, keyVersions := setupKeyRing(ctx, parentName, keyRingId) - // Defer deletion of all key versions - // https://cloud.google.com/kms/docs/destroy-restore - defer func() { - for _, keyVersion := range keyVersions { - destroyKeyReq := &kmspb.DestroyCryptoKeyVersionRequest{ - Name: keyVersion, - } - _, err := kmsClient.DestroyCryptoKeyVersion(ctx, destroyKeyReq) - Expect(err).To(BeNil(), "Failed to destroy crypto key version: %v", keyVersion) - } - }() + // // Defer deletion of all key versions + // // https://cloud.google.com/kms/docs/destroy-restore + // defer func() { + // for _, keyVersion := range keyVersions { + // destroyKeyReq := &kmspb.DestroyCryptoKeyVersionRequest{ + // Name: keyVersion, + // } + // _, err := kmsClient.DestroyCryptoKeyVersion(ctx, destroyKeyReq) + // Expect(err).To(BeNil(), "Failed to destroy crypto key version: %v", keyVersion) + // } + // }() - // Go through volume lifecycle using CMEK-ed PD Create Disk - disk := typeToDisk[diskType] - volName := testNamePrefix + string(uuid.NewUUID()) - params := merge(disk.params, map[string]string{ - common.ParameterKeyDiskEncryptionKmsKey: key.Name, - }) - topology := &csi.TopologyRequirement{ - Requisite: []*csi.Topology{ - { - Segments: map[string]string{common.TopologyKeyZone: z}, - }, - }, - } + // // Go through volume lifecycle using CMEK-ed PD Create Disk + // disk := typeToDisk[diskType] + // volName := testNamePrefix + string(uuid.NewUUID()) + // params := merge(disk.params, map[string]string{ + // common.ParameterKeyDiskEncryptionKmsKey: key.Name, + // }) + // topology := &csi.TopologyRequirement{ + // Requisite: []*csi.Topology{ + // { + // Segments: map[string]string{common.TopologyKeyZone: z}, + // }, + // }, + // } - diskSize := defaultSizeGb - if diskType == extremeDiskType { - diskSize = defaultExtremeSizeGb - } - volume, err := controllerClient.CreateVolume(volName, params, diskSize, topology, nil) - Expect(err).To(BeNil(), "CreateVolume failed with error: %v", err) + // diskSize := defaultSizeGb + // if diskType == extremeDiskType { + // diskSize = defaultExtremeSizeGb + // } + // volume, err := controllerClient.CreateVolume(volName, params, diskSize, topology, nil) + // Expect(err).To(BeNil(), "CreateVolume failed with error: %v", err) - // Validate Disk Created - cloudDisk, err := computeService.Disks.Get(p, z, volName).Do() - Expect(err).To(BeNil(), "Could not get disk from cloud directly") - Expect(cloudDisk.Status).To(Equal(readyState)) - Expect(cloudDisk.SizeGb).To(Equal(diskSize)) - Expect(cloudDisk.Name).To(Equal(volName)) - disk.validate(cloudDisk) + // // Validate Disk Created + // cloudDisk, err := computeService.Disks.Get(p, z, volName).Do() + // Expect(err).To(BeNil(), "Could not get disk from cloud directly") + // Expect(cloudDisk.Status).To(Equal(readyState)) + // Expect(cloudDisk.SizeGb).To(Equal(diskSize)) + // Expect(cloudDisk.Name).To(Equal(volName)) + // disk.validate(cloudDisk) - defer func() { - // Delete Disk - err = controllerClient.DeleteVolume(volume.VolumeId) - Expect(err).To(BeNil(), "DeleteVolume failed") + // defer func() { + // // Delete Disk + // err = controllerClient.DeleteVolume(volume.VolumeId) + // Expect(err).To(BeNil(), "DeleteVolume failed") - // Validate Disk Deleted - _, err = computeService.Disks.Get(p, z, volName).Do() - Expect(gce.IsGCEError(err, "notFound")).To(BeTrue(), "Expected disk to not be found") - }() + // // Validate Disk Deleted + // _, err = computeService.Disks.Get(p, z, volName).Do() + // Expect(gce.IsGCEError(err, "notFound")).To(BeTrue(), "Expected disk to not be found") + // }() - // Test disk works - err = testAttachWriteReadDetach(volume.VolumeId, volName, controllerInstance, controllerClient, false /* readOnly */) - Expect(err).To(BeNil(), "Failed to go through volume lifecycle before revoking CMEK key") + // // Test disk works + // err = testAttachWriteReadDetach(volume.VolumeId, volName, controllerInstance, controllerClient, false /* readOnly */) + // Expect(err).To(BeNil(), "Failed to go through volume lifecycle before revoking CMEK key") - // Revoke CMEK key - // https://cloud.google.com/kms/docs/enable-disable + // // Revoke CMEK key + // // https://cloud.google.com/kms/docs/enable-disable - for _, keyVersion := range keyVersions { - disableReq := &kmspb.UpdateCryptoKeyVersionRequest{ - CryptoKeyVersion: &kmspb.CryptoKeyVersion{ - Name: keyVersion, - State: kmspb.CryptoKeyVersion_DISABLED, - }, - UpdateMask: &fieldmask.FieldMask{ - Paths: []string{"state"}, - }, - } - _, err = kmsClient.UpdateCryptoKeyVersion(ctx, disableReq) - Expect(err).To(BeNil(), "Failed to disable crypto key") - } + // for _, keyVersion := range keyVersions { + // disableReq := &kmspb.UpdateCryptoKeyVersionRequest{ + // CryptoKeyVersion: &kmspb.CryptoKeyVersion{ + // Name: keyVersion, + // State: kmspb.CryptoKeyVersion_DISABLED, + // }, + // UpdateMask: &fieldmask.FieldMask{ + // Paths: []string{"state"}, + // }, + // } + // _, err = kmsClient.UpdateCryptoKeyVersion(ctx, disableReq) + // Expect(err).To(BeNil(), "Failed to disable crypto key") + // } - // Make sure attach of PD fails - err = testAttachWriteReadDetach(volume.VolumeId, volName, controllerInstance, controllerClient, false /* readOnly */) - Expect(err).ToNot(BeNil(), "Volume lifecycle should have failed, but succeeded") + // // Make sure attach of PD fails + // err = testAttachWriteReadDetach(volume.VolumeId, volName, controllerInstance, controllerClient, false /* readOnly */) + // Expect(err).ToNot(BeNil(), "Volume lifecycle should have failed, but succeeded") - // Restore CMEK key - for _, keyVersion := range keyVersions { - enableReq := &kmspb.UpdateCryptoKeyVersionRequest{ - CryptoKeyVersion: &kmspb.CryptoKeyVersion{ - Name: keyVersion, - State: kmspb.CryptoKeyVersion_ENABLED, - }, - UpdateMask: &fieldmask.FieldMask{ - Paths: []string{"state"}, - }, - } - _, err = kmsClient.UpdateCryptoKeyVersion(ctx, enableReq) - Expect(err).To(BeNil(), "Failed to enable crypto key") - } + // // Restore CMEK key + // for _, keyVersion := range keyVersions { + // enableReq := &kmspb.UpdateCryptoKeyVersionRequest{ + // CryptoKeyVersion: &kmspb.CryptoKeyVersion{ + // Name: keyVersion, + // State: kmspb.CryptoKeyVersion_ENABLED, + // }, + // UpdateMask: &fieldmask.FieldMask{ + // Paths: []string{"state"}, + // }, + // } + // _, err = kmsClient.UpdateCryptoKeyVersion(ctx, enableReq) + // Expect(err).To(BeNil(), "Failed to enable crypto key") + // } - // The controller publish failure in above step would set a backoff condition on the node. Wait suffcient amount of time for the driver to accept new controller publish requests. - time.Sleep(time.Second) - // Make sure attach of PD succeeds - err = testAttachWriteReadDetach(volume.VolumeId, volName, controllerInstance, controllerClient, false /* readOnly */) - Expect(err).To(BeNil(), "Failed to go through volume lifecycle after restoring CMEK key") - }, - Entry("on pd-standard", standardDiskType), - Entry("on pd-extreme", extremeDiskType), - ) + // // The controller publish failure in above step would set a backoff condition on the node. Wait suffcient amount of time for the driver to accept new controller publish requests. + // time.Sleep(time.Second) + // // Make sure attach of PD succeeds + // err = testAttachWriteReadDetach(volume.VolumeId, volName, controllerInstance, controllerClient, false /* readOnly */) + // Expect(err).To(BeNil(), "Failed to go through volume lifecycle after restoring CMEK key") + // }, + // Entry("on pd-standard", standardDiskType), + // Entry("on pd-extreme", extremeDiskType), + // ) It("Should create disks, attach them places, and verify List returns correct results", func() { Expect(testContexts).ToNot(BeEmpty()) @@ -911,11 +911,10 @@ var _ = Describe("GCE PD CSI Driver", func() { p, _, _ := testContext.Instance.GetIdentity() client := testContext.Client - // Hardcode to us-east1-a while feature is in alpha - zone := "us-east1-a" + zone := "us-east1-b" // Create and Validate Disk - volName, volID := createAndValidateUniqueZonalMultiWriterDisk(client, p, zone, hdbDiskType) + volName, volID := createAndValidateUniqueZonalMultiWriterDisk(client, p, zone, hdxDiskType) defer func() { // Delete Disk @@ -936,7 +935,7 @@ var _ = Describe("GCE PD CSI Driver", func() { instance := testContext.Instance // Create and Validate Disk - volName, volID := createAndValidateUniqueZonalMultiWriterDisk(client, p, z, hdbDiskType) + volName, volID := createAndValidateUniqueZonalMultiWriterDisk(client, p, z, hdxDiskType) defer func() { // Delete Disk @@ -1738,7 +1737,7 @@ func createAndValidateUniqueZonalMultiWriterDisk(client *remote.CsiClient, proje Expect(cloudDisk.Status).To(Equal(readyState)) Expect(cloudDisk.SizeGb).To(Equal(defaultMwSizeGb)) Expect(cloudDisk.Name).To(Equal(volName)) - Expect(cloudDisk.MultiWriter).To(Equal(true)) + Expect(cloudDisk.AccessMode).To(Equal("READ_WRITE_MANY")) disk.validate(cloudDisk) return volName, volume.VolumeId From bb6832ec72484cb53ddb7e52425c2feba50bc593 Mon Sep 17 00:00:00 2001 From: Sam Serdlow Date: Fri, 10 Jan 2025 22:45:47 +0000 Subject: [PATCH 03/10] Updates the disk type back to balanced, as HDX doesn't support multi writer. --- test/e2e/tests/single_zone_e2e_test.go | 212 ++++++++++++------------- 1 file changed, 106 insertions(+), 106 deletions(-) diff --git a/test/e2e/tests/single_zone_e2e_test.go b/test/e2e/tests/single_zone_e2e_test.go index 696962130..adb085af7 100644 --- a/test/e2e/tests/single_zone_e2e_test.go +++ b/test/e2e/tests/single_zone_e2e_test.go @@ -40,7 +40,7 @@ import ( compute "google.golang.org/api/compute/v1" "google.golang.org/api/iterator" kmspb "google.golang.org/genproto/googleapis/cloud/kms/v1" - // fieldmask "google.golang.org/genproto/protobuf/field_mask" + fieldmask "google.golang.org/genproto/protobuf/field_mask" ) const ( @@ -608,124 +608,124 @@ var _ = Describe("GCE PD CSI Driver", func() { }() }) - // DescribeTable("Should create CMEK key, go through volume lifecycle, validate behavior on key revoke and restore", - // func(diskType string) { - // ctx := context.Background() - // Expect(testContexts).ToNot(BeEmpty()) - // testContext := getRandomTestContext() + DescribeTable("Should create CMEK key, go through volume lifecycle, validate behavior on key revoke and restore", + func(diskType string) { + ctx := context.Background() + Expect(testContexts).ToNot(BeEmpty()) + testContext := getRandomTestContext() - // controllerInstance := testContext.Instance - // controllerClient := testContext.Client + controllerInstance := testContext.Instance + controllerClient := testContext.Client - // p, z, _ := controllerInstance.GetIdentity() - // locationID := "global" + p, z, _ := controllerInstance.GetIdentity() + locationID := "global" - // // The resource name of the key rings. - // parentName := fmt.Sprintf("projects/%s/locations/%s", p, locationID) - // keyRingId := "gce-pd-csi-test-ring" + // The resource name of the key rings. + parentName := fmt.Sprintf("projects/%s/locations/%s", p, locationID) + keyRingId := "gce-pd-csi-test-ring" - // key, keyVersions := setupKeyRing(ctx, parentName, keyRingId) + key, keyVersions := setupKeyRing(ctx, parentName, keyRingId) - // // Defer deletion of all key versions - // // https://cloud.google.com/kms/docs/destroy-restore - // defer func() { - // for _, keyVersion := range keyVersions { - // destroyKeyReq := &kmspb.DestroyCryptoKeyVersionRequest{ - // Name: keyVersion, - // } - // _, err := kmsClient.DestroyCryptoKeyVersion(ctx, destroyKeyReq) - // Expect(err).To(BeNil(), "Failed to destroy crypto key version: %v", keyVersion) - // } - // }() + // Defer deletion of all key versions + // https://cloud.google.com/kms/docs/destroy-restore + defer func() { + for _, keyVersion := range keyVersions { + destroyKeyReq := &kmspb.DestroyCryptoKeyVersionRequest{ + Name: keyVersion, + } + _, err := kmsClient.DestroyCryptoKeyVersion(ctx, destroyKeyReq) + Expect(err).To(BeNil(), "Failed to destroy crypto key version: %v", keyVersion) + } + }() - // // Go through volume lifecycle using CMEK-ed PD Create Disk - // disk := typeToDisk[diskType] - // volName := testNamePrefix + string(uuid.NewUUID()) - // params := merge(disk.params, map[string]string{ - // common.ParameterKeyDiskEncryptionKmsKey: key.Name, - // }) - // topology := &csi.TopologyRequirement{ - // Requisite: []*csi.Topology{ - // { - // Segments: map[string]string{common.TopologyKeyZone: z}, - // }, - // }, - // } + // Go through volume lifecycle using CMEK-ed PD Create Disk + disk := typeToDisk[diskType] + volName := testNamePrefix + string(uuid.NewUUID()) + params := merge(disk.params, map[string]string{ + common.ParameterKeyDiskEncryptionKmsKey: key.Name, + }) + topology := &csi.TopologyRequirement{ + Requisite: []*csi.Topology{ + { + Segments: map[string]string{common.TopologyKeyZone: z}, + }, + }, + } - // diskSize := defaultSizeGb - // if diskType == extremeDiskType { - // diskSize = defaultExtremeSizeGb - // } - // volume, err := controllerClient.CreateVolume(volName, params, diskSize, topology, nil) - // Expect(err).To(BeNil(), "CreateVolume failed with error: %v", err) + diskSize := defaultSizeGb + if diskType == extremeDiskType { + diskSize = defaultExtremeSizeGb + } + volume, err := controllerClient.CreateVolume(volName, params, diskSize, topology, nil) + Expect(err).To(BeNil(), "CreateVolume failed with error: %v", err) - // // Validate Disk Created - // cloudDisk, err := computeService.Disks.Get(p, z, volName).Do() - // Expect(err).To(BeNil(), "Could not get disk from cloud directly") - // Expect(cloudDisk.Status).To(Equal(readyState)) - // Expect(cloudDisk.SizeGb).To(Equal(diskSize)) - // Expect(cloudDisk.Name).To(Equal(volName)) - // disk.validate(cloudDisk) + // Validate Disk Created + cloudDisk, err := computeService.Disks.Get(p, z, volName).Do() + Expect(err).To(BeNil(), "Could not get disk from cloud directly") + Expect(cloudDisk.Status).To(Equal(readyState)) + Expect(cloudDisk.SizeGb).To(Equal(diskSize)) + Expect(cloudDisk.Name).To(Equal(volName)) + disk.validate(cloudDisk) - // defer func() { - // // Delete Disk - // err = controllerClient.DeleteVolume(volume.VolumeId) - // Expect(err).To(BeNil(), "DeleteVolume failed") + defer func() { + // Delete Disk + err = controllerClient.DeleteVolume(volume.VolumeId) + Expect(err).To(BeNil(), "DeleteVolume failed") - // // Validate Disk Deleted - // _, err = computeService.Disks.Get(p, z, volName).Do() - // Expect(gce.IsGCEError(err, "notFound")).To(BeTrue(), "Expected disk to not be found") - // }() + // Validate Disk Deleted + _, err = computeService.Disks.Get(p, z, volName).Do() + Expect(gce.IsGCEError(err, "notFound")).To(BeTrue(), "Expected disk to not be found") + }() - // // Test disk works - // err = testAttachWriteReadDetach(volume.VolumeId, volName, controllerInstance, controllerClient, false /* readOnly */) - // Expect(err).To(BeNil(), "Failed to go through volume lifecycle before revoking CMEK key") + // Test disk works + err = testAttachWriteReadDetach(volume.VolumeId, volName, controllerInstance, controllerClient, false /* readOnly */) + Expect(err).To(BeNil(), "Failed to go through volume lifecycle before revoking CMEK key") - // // Revoke CMEK key - // // https://cloud.google.com/kms/docs/enable-disable + // Revoke CMEK key + // https://cloud.google.com/kms/docs/enable-disable - // for _, keyVersion := range keyVersions { - // disableReq := &kmspb.UpdateCryptoKeyVersionRequest{ - // CryptoKeyVersion: &kmspb.CryptoKeyVersion{ - // Name: keyVersion, - // State: kmspb.CryptoKeyVersion_DISABLED, - // }, - // UpdateMask: &fieldmask.FieldMask{ - // Paths: []string{"state"}, - // }, - // } - // _, err = kmsClient.UpdateCryptoKeyVersion(ctx, disableReq) - // Expect(err).To(BeNil(), "Failed to disable crypto key") - // } + for _, keyVersion := range keyVersions { + disableReq := &kmspb.UpdateCryptoKeyVersionRequest{ + CryptoKeyVersion: &kmspb.CryptoKeyVersion{ + Name: keyVersion, + State: kmspb.CryptoKeyVersion_DISABLED, + }, + UpdateMask: &fieldmask.FieldMask{ + Paths: []string{"state"}, + }, + } + _, err = kmsClient.UpdateCryptoKeyVersion(ctx, disableReq) + Expect(err).To(BeNil(), "Failed to disable crypto key") + } - // // Make sure attach of PD fails - // err = testAttachWriteReadDetach(volume.VolumeId, volName, controllerInstance, controllerClient, false /* readOnly */) - // Expect(err).ToNot(BeNil(), "Volume lifecycle should have failed, but succeeded") + // Make sure attach of PD fails + err = testAttachWriteReadDetach(volume.VolumeId, volName, controllerInstance, controllerClient, false /* readOnly */) + Expect(err).ToNot(BeNil(), "Volume lifecycle should have failed, but succeeded") - // // Restore CMEK key - // for _, keyVersion := range keyVersions { - // enableReq := &kmspb.UpdateCryptoKeyVersionRequest{ - // CryptoKeyVersion: &kmspb.CryptoKeyVersion{ - // Name: keyVersion, - // State: kmspb.CryptoKeyVersion_ENABLED, - // }, - // UpdateMask: &fieldmask.FieldMask{ - // Paths: []string{"state"}, - // }, - // } - // _, err = kmsClient.UpdateCryptoKeyVersion(ctx, enableReq) - // Expect(err).To(BeNil(), "Failed to enable crypto key") - // } + // Restore CMEK key + for _, keyVersion := range keyVersions { + enableReq := &kmspb.UpdateCryptoKeyVersionRequest{ + CryptoKeyVersion: &kmspb.CryptoKeyVersion{ + Name: keyVersion, + State: kmspb.CryptoKeyVersion_ENABLED, + }, + UpdateMask: &fieldmask.FieldMask{ + Paths: []string{"state"}, + }, + } + _, err = kmsClient.UpdateCryptoKeyVersion(ctx, enableReq) + Expect(err).To(BeNil(), "Failed to enable crypto key") + } - // // The controller publish failure in above step would set a backoff condition on the node. Wait suffcient amount of time for the driver to accept new controller publish requests. - // time.Sleep(time.Second) - // // Make sure attach of PD succeeds - // err = testAttachWriteReadDetach(volume.VolumeId, volName, controllerInstance, controllerClient, false /* readOnly */) - // Expect(err).To(BeNil(), "Failed to go through volume lifecycle after restoring CMEK key") - // }, - // Entry("on pd-standard", standardDiskType), - // Entry("on pd-extreme", extremeDiskType), - // ) + // The controller publish failure in above step would set a backoff condition on the node. Wait suffcient amount of time for the driver to accept new controller publish requests. + time.Sleep(time.Second) + // Make sure attach of PD succeeds + err = testAttachWriteReadDetach(volume.VolumeId, volName, controllerInstance, controllerClient, false /* readOnly */) + Expect(err).To(BeNil(), "Failed to go through volume lifecycle after restoring CMEK key") + }, + Entry("on pd-standard", standardDiskType), + Entry("on pd-extreme", extremeDiskType), + ) It("Should create disks, attach them places, and verify List returns correct results", func() { Expect(testContexts).ToNot(BeEmpty()) @@ -914,7 +914,7 @@ var _ = Describe("GCE PD CSI Driver", func() { zone := "us-east1-b" // Create and Validate Disk - volName, volID := createAndValidateUniqueZonalMultiWriterDisk(client, p, zone, hdxDiskType) + volName, volID := createAndValidateUniqueZonalMultiWriterDisk(client, p, zone, hdbDiskType) defer func() { // Delete Disk @@ -935,7 +935,7 @@ var _ = Describe("GCE PD CSI Driver", func() { instance := testContext.Instance // Create and Validate Disk - volName, volID := createAndValidateUniqueZonalMultiWriterDisk(client, p, z, hdxDiskType) + volName, volID := createAndValidateUniqueZonalMultiWriterDisk(client, p, z, hdbDiskType) defer func() { // Delete Disk @@ -1709,7 +1709,7 @@ func createAndValidateUniqueZonalMultiWriterDisk(client *remote.CsiClient, proje disk := typeToDisk[diskType] disk.params[common.ParameterAccessMode] = "READ_WRITE_MANY" - +// .AccessMode volName := testNamePrefix + string(uuid.NewUUID()) volume, err := client.CreateVolumeWithCaps(volName, disk.params, defaultMwSizeGb, &csi.TopologyRequirement{ From d06c270de2c668198217a23e74b760b5ec837023 Mon Sep 17 00:00:00 2001 From: Sam Serdlow Date: Fri, 10 Jan 2025 22:51:56 +0000 Subject: [PATCH 04/10] Moving over to m1 megamem as thats the only type of machine that can support all needed disk types. --- test/e2e/tests/setup_e2e_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/e2e/tests/setup_e2e_test.go b/test/e2e/tests/setup_e2e_test.go index 46cdb38ff..d2ecfdbdc 100644 --- a/test/e2e/tests/setup_e2e_test.go +++ b/test/e2e/tests/setup_e2e_test.go @@ -40,9 +40,9 @@ var ( serviceAccount = flag.String("service-account", "", "Service account to bring up instance with") vmNamePrefix = flag.String("vm-name-prefix", "gce-pd-csi-e2e", "VM name prefix") architecture = flag.String("arch", "amd64", "Architecture pd csi driver build on") - minCpuPlatform = flag.String("min-cpu-platform", "cascadelake", "Minimum CPU architecture") + minCpuPlatform = flag.String("min-cpu-platform", "skylake", "Minimum CPU architecture") zones = flag.String("zones", "us-east4-a,us-east4-c", "Zones to run tests in. If there are multiple zones, separate each by comma") - machineType = flag.String("machine-type", "n2-standard-80", "Type of machine to provision instance on") + machineType = flag.String("machine-type", "m1-megamem-96", "Type of machine to provision instance on") imageURL = flag.String("image-url", "projects/ubuntu-os-cloud/global/images/family/ubuntu-minimal-2404-lts-amd64", "OS image url to get image from") runInProw = flag.Bool("run-in-prow", false, "If true, use a Boskos loaned project and special CI service accounts and ssh keys") deleteInstances = flag.Bool("delete-instances", false, "Delete the instances after tests run") From f6ca09c43aa6a65e7f62be06d244d5b00c879e8f Mon Sep 17 00:00:00 2001 From: Sam Serdlow Date: Mon, 13 Jan 2025 18:19:19 +0000 Subject: [PATCH 05/10] Changes update the tests to use two contexts, one for multiwriter and one for the existing tests. This was deemed necessary as only some disks can support multi-writer, and only some VM shapes can support said disks. --- test/e2e/tests/setup_e2e_test.go | 47 +- test/e2e/tests/single_zone_e2e_test.go | 2343 ++++++++++++------------ test/run-e2e-local.sh | 4 +- 3 files changed, 1211 insertions(+), 1183 deletions(-) diff --git a/test/e2e/tests/setup_e2e_test.go b/test/e2e/tests/setup_e2e_test.go index d2ecfdbdc..e6af641b8 100644 --- a/test/e2e/tests/setup_e2e_test.go +++ b/test/e2e/tests/setup_e2e_test.go @@ -35,14 +35,17 @@ import ( remote "sigs.k8s.io/gcp-compute-persistent-disk-csi-driver/test/remote" ) +// Multi-writer is only supported on M3, C3, and N4 https://cloud.google.com/compute/docs/disks/sharing-disks-between-vms#hd-multi-writer var ( project = flag.String("project", "", "Project to run tests in") serviceAccount = flag.String("service-account", "", "Service account to bring up instance with") vmNamePrefix = flag.String("vm-name-prefix", "gce-pd-csi-e2e", "VM name prefix") architecture = flag.String("arch", "amd64", "Architecture pd csi driver build on") - minCpuPlatform = flag.String("min-cpu-platform", "skylake", "Minimum CPU architecture") + minCpuPlatform = flag.String("min-cpu-platform", "rome", "Minimum CPU architecture") + mwMinCpuPlatform = flag.String("min-cpu-platform-mw", "sapphirerapids", "Minimum CPU architecture for multiwriter tests") zones = flag.String("zones", "us-east4-a,us-east4-c", "Zones to run tests in. If there are multiple zones, separate each by comma") - machineType = flag.String("machine-type", "m1-megamem-96", "Type of machine to provision instance on") + machineType = flag.String("machine-type", "n2d-standard-4", "Type of machine to provision instance on") + mwMachineType = flag.String("mw-machine-type", "c3-standard-4", "Type of machine to provision instance for multiwriter tests") imageURL = flag.String("image-url", "projects/ubuntu-os-cloud/global/images/family/ubuntu-minimal-2404-lts-amd64", "OS image url to get image from") runInProw = flag.Bool("run-in-prow", false, "If true, use a Boskos loaned project and special CI service accounts and ssh keys") deleteInstances = flag.Bool("delete-instances", false, "Delete the instances after tests run") @@ -50,11 +53,12 @@ var ( extraDriverFlags = flag.String("extra-driver-flags", "", "Extra flags to pass to the driver") enableConfidentialCompute = flag.Bool("enable-confidential-compute", false, "Create VMs with confidential compute mode. This uses NVMe devices") - testContexts = []*remote.TestContext{} - computeService *compute.Service - computeAlphaService *computealpha.Service - computeBetaService *computebeta.Service - kmsClient *cloudkms.KeyManagementClient + testContexts = []*remote.TestContext{} + multiWriterTestContexts = []*remote.TestContext{} + computeService *compute.Service + computeAlphaService *computealpha.Service + computeBetaService *computebeta.Service + kmsClient *cloudkms.KeyManagementClient ) func init() { @@ -70,7 +74,9 @@ func TestE2E(t *testing.T) { var _ = BeforeSuite(func() { var err error tcc := make(chan *remote.TestContext) + mwTcc := make(chan *remote.TestContext) defer close(tcc) + defer close(mwTcc) zones := strings.Split(*zones, ",") @@ -101,13 +107,16 @@ var _ = BeforeSuite(func() { for _, zone := range zones { go func(curZone string) { defer GinkgoRecover() - tcc <- NewTestContext(curZone) + tcc <- NewTestContext(curZone, *machineType, *minCpuPlatform) + mwTcc <- NewTestContext(curZone, *mwMachineType, *mwMinCpuPlatform) }(zone) } for i := 0; i < len(zones); i++ { tc := <-tcc testContexts = append(testContexts, tc) + mwTc := <-mwTcc + multiWriterTestContexts = append(multiWriterTestContexts, mwTc) klog.Infof("Added TestContext for node %s", tc.Instance.GetName()) } }) @@ -120,6 +129,13 @@ var _ = AfterSuite(func() { tc.Instance.DeleteInstance() } } + for _, mwTc := range multiWriterTestContexts { + err := remote.TeardownDriverAndClient(mwTc) + Expect(err).To(BeNil(), "Multiwriter Teardown Driver and Client failed with error") + if *deleteInstances { + mwTc.Instance.DeleteInstance() + } + } }) func notEmpty(v string) bool { @@ -133,17 +149,19 @@ func getDriverConfig() testutils.DriverConfig { } } -func NewTestContext(zone string) *remote.TestContext { - nodeID := fmt.Sprintf("%s-%s", *vmNamePrefix, zone) +// Could do a multi writer optional variable here. That'd force the mincpu platfor and machinetype to a specific thing +// Create a new context, and have the multi writer contexts run on only that? +func NewTestContext(zone string, machineType string, minCpuPlatform string) *remote.TestContext { + nodeID := fmt.Sprintf("%s-%s-%s", *vmNamePrefix, zone, machineType) klog.Infof("Setting up node %s", nodeID) instanceConfig := remote.InstanceConfig{ Project: *project, Architecture: *architecture, - MinCpuPlatform: *minCpuPlatform, + MinCpuPlatform: minCpuPlatform, Zone: zone, Name: nodeID, - MachineType: *machineType, + MachineType: machineType, ServiceAccount: *serviceAccount, ImageURL: *imageURL, CloudtopHost: *cloudtopHost, @@ -185,3 +203,8 @@ func getRandomTestContext() *remote.TestContext { rn := rand.Intn(len(testContexts)) return testContexts[rn] } +func getRandomMwTestContext() *remote.TestContext { + Expect(multiWriterTestContexts).ToNot(BeEmpty()) + rn := rand.Intn(len(multiWriterTestContexts)) + return multiWriterTestContexts[rn] +} diff --git a/test/e2e/tests/single_zone_e2e_test.go b/test/e2e/tests/single_zone_e2e_test.go index adb085af7..6e039e617 100644 --- a/test/e2e/tests/single_zone_e2e_test.go +++ b/test/e2e/tests/single_zone_e2e_test.go @@ -18,7 +18,6 @@ import ( "context" "fmt" "os" - "path/filepath" "regexp" "strconv" "strings" @@ -26,9 +25,7 @@ import ( "k8s.io/apimachinery/pkg/util/uuid" "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/klog/v2" "sigs.k8s.io/gcp-compute-persistent-disk-csi-driver/pkg/common" - "sigs.k8s.io/gcp-compute-persistent-disk-csi-driver/pkg/deviceutils" gce "sigs.k8s.io/gcp-compute-persistent-disk-csi-driver/pkg/gce-cloud-provider/compute" testutils "sigs.k8s.io/gcp-compute-persistent-disk-csi-driver/test/e2e/utils" "sigs.k8s.io/gcp-compute-persistent-disk-csi-driver/test/remote" @@ -40,7 +37,6 @@ import ( compute "google.golang.org/api/compute/v1" "google.golang.org/api/iterator" kmspb "google.golang.org/genproto/googleapis/cloud/kms/v1" - fieldmask "google.golang.org/genproto/protobuf/field_mask" ) const ( @@ -80,209 +76,215 @@ const ( var _ = Describe("GCE PD CSI Driver", func() { - It("Should get reasonable volume limits from nodes with NodeGetInfo", func() { - testContext := getRandomTestContext() - resp, err := testContext.Client.NodeGetInfo() - Expect(err).To(BeNil()) - volumeLimit := resp.GetMaxVolumesPerNode() - Expect(volumeLimit).To(Equal(defaultVolumeLimit)) - }) + // ReportAfterEach(func(report SpecReport) { + // customFormat := fmt.Sprintf("%s | %s", report.State, report.FullText()) + // client.SendReport(customFormat) + // }) + // It("Should get reasonable volume limits from nodes with NodeGetInfo", func() { + // testContext := getRandomTestContext() + // resp, err := testContext.Client.NodeGetInfo() + // Expect(err).To(BeNil()) + // volumeLimit := resp.GetMaxVolumesPerNode() + // Expect(volumeLimit).To(Equal(defaultVolumeLimit)) + // }) - It("[NVMe] Should create->attach->stage->mount volume and check if it is writable, then unmount->unstage->detach->delete and check disk is deleted", func() { - testContext := getRandomTestContext() + // It("[NVMe] Should create->attach->stage->mount volume and check if it is writable, then unmount->unstage->detach->delete and check disk is deleted", func() { + // testContext := getRandomTestContext() - p, z, _ := testContext.Instance.GetIdentity() - client := testContext.Client - instance := testContext.Instance + // p, z, _ := testContext.Instance.GetIdentity() + // client := testContext.Client + // instance := testContext.Instance - // Create Disk - volName, volID := createAndValidateUniqueZonalDisk(client, p, z, standardDiskType) + // // Create Disk + // volName, volID := createAndValidateUniqueZonalDisk(client, p, z, standardDiskType) - defer func() { - // Delete Disk - err := client.DeleteVolume(volID) - Expect(err).To(BeNil(), "DeleteVolume failed") + // defer func() { + // // Delete Disk + // err := client.DeleteVolume(volID) + // Expect(err).To(BeNil(), "DeleteVolume failed") - // Validate Disk Deleted - _, err = computeService.Disks.Get(p, z, volName).Do() - Expect(gce.IsGCEError(err, "notFound")).To(BeTrue(), "Expected disk to not be found") - }() + // // Validate Disk Deleted + // _, err = computeService.Disks.Get(p, z, volName).Do() + // Expect(gce.IsGCEError(err, "notFound")).To(BeTrue(), "Expected disk to not be found") + // }() - // Attach Disk - err := testAttachWriteReadDetach(volID, volName, instance, client, false /* readOnly */) - Expect(err).To(BeNil(), "Failed to go through volume lifecycle") - }) + // // Attach Disk + // err := testAttachWriteReadDetach(volID, volName, instance, client, false /* readOnly */) + // Expect(err).To(BeNil(), "Failed to go through volume lifecycle") + // }) - It("Should automatically fix the symlink between /dev/* and /dev/by-id if the disk does not match", func() { - testContext := getRandomTestContext() + // It("Should automatically fix the symlink between /dev/* and /dev/by-id if the disk does not match", func() { + // testContext := getRandomTestContext() - p, z, _ := testContext.Instance.GetIdentity() - client := testContext.Client - instance := testContext.Instance + // p, z, _ := testContext.Instance.GetIdentity() + // client := testContext.Client + // instance := testContext.Instance - // Create Disk - volName, volID := createAndValidateUniqueZonalDisk(client, p, z, standardDiskType) + // // Create Disk + // volName, volID := createAndValidateUniqueZonalDisk(client, p, z, standardDiskType) - defer func() { - // Delete Disk - err := client.DeleteVolume(volID) - Expect(err).To(BeNil(), "DeleteVolume failed") + // defer func() { + // // Delete Disk + // err := client.DeleteVolume(volID) + // Expect(err).To(BeNil(), "DeleteVolume failed") - // Validate Disk Deleted - _, err = computeService.Disks.Get(p, z, volName).Do() - Expect(gce.IsGCEError(err, "notFound")).To(BeTrue(), "Expected disk to not be found") - }() + // // Validate Disk Deleted + // _, err = computeService.Disks.Get(p, z, volName).Do() + // Expect(gce.IsGCEError(err, "notFound")).To(BeTrue(), "Expected disk to not be found") + // }() - // Attach Disk - err := client.ControllerPublishVolumeReadWrite(volID, instance.GetNodeID(), false /* forceAttach */) - Expect(err).To(BeNil(), "ControllerPublishVolume failed with error for disk %v on node %v: %v", volID, instance.GetNodeID()) + // // Attach Disk + // err := client.ControllerPublishVolumeReadWrite(volID, instance.GetNodeID(), false /* forceAttach */) + // Expect(err).To(BeNil(), "ControllerPublishVolume failed with error for disk %v on node %v: %v", volID, instance.GetNodeID()) - defer func() { - // Detach Disk - err = client.ControllerUnpublishVolume(volID, instance.GetNodeID()) - if err != nil { - klog.Errorf("Failed to detach disk: %v", err) - } + // defer func() { + // // Detach Disk + // err = client.ControllerUnpublishVolume(volID, instance.GetNodeID()) + // if err != nil { + // klog.Errorf("Failed to detach disk: %v", err) + // } - }() + // }() - // MESS UP THE symlink - devicePaths := deviceutils.NewDeviceUtils().GetDiskByIdPaths(volName, "") - for _, devicePath := range devicePaths { - err = testutils.RmAll(instance, devicePath) - Expect(err).To(BeNil(), "failed to remove /dev/by-id folder") - err = testutils.Symlink(instance, "/dev/null", devicePath) - Expect(err).To(BeNil(), "failed to add invalid symlink /dev/by-id folder") - } + // // MESS UP THE symlink + // devicePaths := deviceutils.NewDeviceUtils().GetDiskByIdPaths(volName, "") + // for _, devicePath := range devicePaths { + // err = testutils.RmAll(instance, devicePath) + // Expect(err).To(BeNil(), "failed to remove /dev/by-id folder") + // err = testutils.Symlink(instance, "/dev/null", devicePath) + // Expect(err).To(BeNil(), "failed to add invalid symlink /dev/by-id folder") + // } - // Stage Disk - stageDir := filepath.Join("/tmp/", volName, "stage") - err = client.NodeStageExt4Volume(volID, stageDir) - Expect(err).To(BeNil(), "failed to repair /dev/by-id symlink and stage volume") - - // Validate that the link is correct - var validated bool - for _, devicePath := range devicePaths { - validated, err = testutils.ValidateLogicalLinkIsDisk(instance, devicePath, volName) - Expect(err).To(BeNil(), "failed to validate link %s is disk %s: %v", stageDir, volName, err) - if validated { - break - } - } - Expect(validated).To(BeTrue(), "could not find device in %v that links to volume %s", devicePaths, volName) + // // Stage Disk + // stageDir := filepath.Join("/tmp/", volName, "stage") + // err = client.NodeStageExt4Volume(volID, stageDir) + // Expect(err).To(BeNil(), "failed to repair /dev/by-id symlink and stage volume") + + // // Validate that the link is correct + // var validated bool + // for _, devicePath := range devicePaths { + // validated, err = testutils.ValidateLogicalLinkIsDisk(instance, devicePath, volName) + // Expect(err).To(BeNil(), "failed to validate link %s is disk %s: %v", stageDir, volName, err) + // if validated { + // break + // } + // } + // Expect(validated).To(BeTrue(), "could not find device in %v that links to volume %s", devicePaths, volName) - defer func() { - // Unstage Disk - err = client.NodeUnstageVolume(volID, stageDir) - if err != nil { - klog.Errorf("Failed to unstage volume: %v", err) - } - fp := filepath.Join("/tmp/", volName) - err = testutils.RmAll(instance, fp) - if err != nil { - klog.Errorf("Failed to rm file path %s: %v", fp, err) - } - }() - }) + // defer func() { + // // Unstage Disk + // err = client.NodeUnstageVolume(volID, stageDir) + // if err != nil { + // klog.Errorf("Failed to unstage volume: %v", err) + // } + // fp := filepath.Join("/tmp/", volName) + // err = testutils.RmAll(instance, fp) + // if err != nil { + // klog.Errorf("Failed to rm file path %s: %v", fp, err) + // } + // }() + // }) - It("[NVMe] Should automatically add a symlink between /dev/* and /dev/by-id if disk is not found", func() { - testContext := getRandomTestContext() + // It("[NVMe] Should automatically add a symlink between /dev/* and /dev/by-id if disk is not found", func() { + // testContext := getRandomTestContext() - p, z, _ := testContext.Instance.GetIdentity() - client := testContext.Client - instance := testContext.Instance + // p, z, _ := testContext.Instance.GetIdentity() + // client := testContext.Client + // instance := testContext.Instance - // Create Disk - volName, volID := createAndValidateUniqueZonalDisk(client, p, z, standardDiskType) + // // Create Disk + // volName, volID := createAndValidateUniqueZonalDisk(client, p, z, standardDiskType) - defer func() { - // Delete Disk - err := client.DeleteVolume(volID) - Expect(err).To(BeNil(), "DeleteVolume failed") + // defer func() { + // // Delete Disk + // err := client.DeleteVolume(volID) + // Expect(err).To(BeNil(), "DeleteVolume failed") - // Validate Disk Deleted - _, err = computeService.Disks.Get(p, z, volName).Do() - Expect(gce.IsGCEError(err, "notFound")).To(BeTrue(), "Expected disk to not be found") - }() + // // Validate Disk Deleted + // _, err = computeService.Disks.Get(p, z, volName).Do() + // Expect(gce.IsGCEError(err, "notFound")).To(BeTrue(), "Expected disk to not be found") + // }() - // Attach Disk - err := client.ControllerPublishVolumeReadWrite(volID, instance.GetNodeID(), false /* forceAttach */) - Expect(err).To(BeNil(), "ControllerPublishVolume failed with error for disk %v on node %v: %v", volID, instance.GetNodeID()) + // // Attach Disk + // err := client.ControllerPublishVolumeReadWrite(volID, instance.GetNodeID(), false /* forceAttach */) + // Expect(err).To(BeNil(), "ControllerPublishVolume failed with error for disk %v on node %v: %v", volID, instance.GetNodeID()) - defer func() { - // Detach Disk - err = client.ControllerUnpublishVolume(volID, instance.GetNodeID()) - if err != nil { - klog.Errorf("Failed to detach disk: %v", err) - } + // defer func() { + // // Detach Disk + // err = client.ControllerUnpublishVolume(volID, instance.GetNodeID()) + // if err != nil { + // klog.Errorf("Failed to detach disk: %v", err) + // } - }() + // }() - // DELETE THE symlink - devicePaths := deviceutils.NewDeviceUtils().GetDiskByIdPaths(volName, "") - for _, devicePath := range devicePaths { - err = testutils.RmAll(instance, devicePath) - Expect(err).To(BeNil(), "failed to remove /dev/by-id folder") - } + // // DELETE THE symlink + // devicePaths := deviceutils.NewDeviceUtils().GetDiskByIdPaths(volName, "") + // for _, devicePath := range devicePaths { + // err = testutils.RmAll(instance, devicePath) + // Expect(err).To(BeNil(), "failed to remove /dev/by-id folder") + // } - // Stage Disk - stageDir := filepath.Join("/tmp/", volName, "stage") - err = client.NodeStageExt4Volume(volID, stageDir) - Expect(err).To(BeNil(), "failed to repair /dev/by-id symlink and stage volume") - - // Validate that the link is correct - var validated bool - for _, devicePath := range devicePaths { - validated, err = testutils.ValidateLogicalLinkIsDisk(instance, devicePath, volName) - Expect(err).To(BeNil(), "failed to validate link %s is disk %s: %v", stageDir, volName, err) - if validated { - break - } - } - Expect(validated).To(BeTrue(), "could not find device in %v that links to volume %s", devicePaths, volName) + // // Stage Disk + // stageDir := filepath.Join("/tmp/", volName, "stage") + // err = client.NodeStageExt4Volume(volID, stageDir) + // Expect(err).To(BeNil(), "failed to repair /dev/by-id symlink and stage volume") + + // // Validate that the link is correct + // var validated bool + // for _, devicePath := range devicePaths { + // validated, err = testutils.ValidateLogicalLinkIsDisk(instance, devicePath, volName) + // Expect(err).To(BeNil(), "failed to validate link %s is disk %s: %v", stageDir, volName, err) + // if validated { + // break + // } + // } + // Expect(validated).To(BeTrue(), "could not find device in %v that links to volume %s", devicePaths, volName) - defer func() { - // Unstage Disk - err = client.NodeUnstageVolume(volID, stageDir) - if err != nil { - klog.Errorf("Failed to unstage volume: %v", err) - } - fp := filepath.Join("/tmp/", volName) - err = testutils.RmAll(instance, fp) - if err != nil { - klog.Errorf("Failed to rm file path %s: %v", fp, err) - } - }() - }) + // defer func() { + // // Unstage Disk + // err = client.NodeUnstageVolume(volID, stageDir) + // if err != nil { + // klog.Errorf("Failed to unstage volume: %v", err) + // } + // fp := filepath.Join("/tmp/", volName) + // err = testutils.RmAll(instance, fp) + // if err != nil { + // klog.Errorf("Failed to rm file path %s: %v", fp, err) + // } + // }() + // }) - It("Should create disks in correct zones when topology is specified", func() { - Expect(testContexts).ToNot(BeEmpty()) - testContext := getRandomTestContext() + // It("Should create disks in correct zones when topology is specified", func() { + // Expect(testContexts).ToNot(BeEmpty()) + // testContext := getRandomTestContext() - p, _, _ := testContext.Instance.GetIdentity() + // p, _, _ := testContext.Instance.GetIdentity() - zones := []string{"us-central1-c", "us-central1-b", "us-central1-a"} + // zones := []string{"us-central1-c", "us-central1-b", "us-central1-a"} - for _, zone := range zones { - volName := testNamePrefix + string(uuid.NewUUID()) - topReq := &csi.TopologyRequirement{ - Requisite: []*csi.Topology{ - { - Segments: map[string]string{common.TopologyKeyZone: zone}, - }, - }, - } - volume, err := testContext.Client.CreateVolume(volName, nil, defaultSizeGb, topReq, nil) - Expect(err).To(BeNil(), "Failed to create volume") - defer func() { - err = testContext.Client.DeleteVolume(volume.VolumeId) - Expect(err).To(BeNil(), "Failed to delete volume") - }() + // for _, zone := range zones { + // volName := testNamePrefix + string(uuid.NewUUID()) + // topReq := &csi.TopologyRequirement{ + // Requisite: []*csi.Topology{ + // { + // Segments: map[string]string{common.TopologyKeyZone: zone}, + // }, + // }, + // } + // volume, err := testContext.Client.CreateVolume(volName, nil, defaultSizeGb, topReq, nil) + // Expect(err).To(BeNil(), "Failed to create volume") + // defer func() { + // err = testContext.Client.DeleteVolume(volume.VolumeId) + // Expect(err).To(BeNil(), "Failed to delete volume") + // }() - _, err = computeService.Disks.Get(p, zone, volName).Do() - Expect(err).To(BeNil(), "Could not find disk in correct zone") - } - }) + // _, err = computeService.Disks.Get(p, zone, volName).Do() + // Expect(err).To(BeNil(), "Could not find disk in correct zone") + // } + // }) + + /******************/ // TODO(hime): Enable this test once all release branches contain the fix from PR#1708. // It("Should return InvalidArgument when disk size exceeds limit", func() { // // If this returns a different error code (like Unknown), the error wrapping logic in #1708 has regressed. @@ -311,602 +313,603 @@ var _ = Describe("GCE PD CSI Driver", func() { // } // }) - DescribeTable("Should complete entire disk lifecycle with underspecified volume ID", - func(diskType string) { - testContext := getRandomTestContext() - - p, z, _ := testContext.Instance.GetIdentity() - client := testContext.Client - instance := testContext.Instance - - volName, _ := createAndValidateUniqueZonalDisk(client, p, z, diskType) + /******************/ + // DescribeTable("Should complete entire disk lifecycle with underspecified volume ID", + // func(diskType string) { + // testContext := getRandomTestContext() - underSpecifiedID := common.GenerateUnderspecifiedVolumeID(volName, true /* isZonal */) + // p, z, _ := testContext.Instance.GetIdentity() + // client := testContext.Client + // instance := testContext.Instance - defer func() { - // Delete Disk - err := client.DeleteVolume(underSpecifiedID) - Expect(err).To(BeNil(), "DeleteVolume failed") + // volName, _ := createAndValidateUniqueZonalDisk(client, p, z, diskType) - // Validate Disk Deleted - _, err = computeService.Disks.Get(p, z, volName).Do() - Expect(gce.IsGCEError(err, "notFound")).To(BeTrue(), "Expected disk to not be found") - }() + // underSpecifiedID := common.GenerateUnderspecifiedVolumeID(volName, true /* isZonal */) - // Attach Disk - err := testAttachWriteReadDetach(underSpecifiedID, volName, instance, client, false /* readOnly */) - Expect(err).To(BeNil(), "Failed to go through volume lifecycle") - }, - Entry("on pd-standard", standardDiskType), - Entry("on pd-extreme", extremeDiskType), - Entry("on hyperdisk-throughput", hdtDiskType), - Entry("on pd-ssd", ssdDiskType), - ) - - DescribeTable("[NVMe] Should complete publish/unpublish lifecycle with underspecified volume ID and missing volume", - func(diskType string) { - testContext := getRandomTestContext() - - p, z, _ := testContext.Instance.GetIdentity() - client := testContext.Client - instance := testContext.Instance - - // Create Disk - volName, _ := createAndValidateUniqueZonalDisk(client, p, z, diskType) - underSpecifiedID := common.GenerateUnderspecifiedVolumeID(volName, true /* isZonal */) - - defer func() { - // Detach Disk - err := instance.DetachDisk(volName) - Expect(err).To(BeNil(), "DetachDisk failed") - - // Delete Disk - err = client.DeleteVolume(underSpecifiedID) - Expect(err).To(BeNil(), "DeleteVolume failed") - - // Validate Disk Deleted - _, err = computeService.Disks.Get(p, z, volName).Do() - Expect(gce.IsGCEError(err, "notFound")).To(BeTrue(), "Expected disk to not be found") - - // Unpublish Disk - err = client.ControllerUnpublishVolume(underSpecifiedID, instance.GetNodeID()) - Expect(err).To(BeNil(), "ControllerUnpublishVolume failed") - }() - - // Attach Disk - err := client.ControllerPublishVolumeReadWrite(underSpecifiedID, instance.GetNodeID(), false /* forceAttach */) - Expect(err).To(BeNil(), "ControllerPublishVolume failed") - }, - Entry("on pd-standard", standardDiskType), - Entry("on pd-extreme", extremeDiskType), - ) + // defer func() { + // // Delete Disk + // err := client.DeleteVolume(underSpecifiedID) + // Expect(err).To(BeNil(), "DeleteVolume failed") - It("Should successfully create RePD in two zones in the drivers region when none are specified", func() { - Expect(testContexts).ToNot(BeEmpty()) - testContext := getRandomTestContext() - - controllerInstance := testContext.Instance - controllerClient := testContext.Client - - p, z, _ := controllerInstance.GetIdentity() - - region, err := common.GetRegionFromZones([]string{z}) - Expect(err).To(BeNil(), "Failed to get region from zones") - - // Create Disk - volName := testNamePrefix + string(uuid.NewUUID()) - volume, err := controllerClient.CreateVolume(volName, map[string]string{ - common.ParameterKeyReplicationType: "regional-pd", - }, defaultRepdSizeGb, nil, nil) - Expect(err).To(BeNil(), "CreateVolume failed with error: %v", err) - - // Validate Disk Created - cloudDisk, err := computeService.RegionDisks.Get(p, region, volName).Do() - Expect(err).To(BeNil(), "Could not get disk from cloud directly") - Expect(cloudDisk.Type).To(ContainSubstring(standardDiskType)) - Expect(cloudDisk.Status).To(Equal(readyState)) - Expect(cloudDisk.SizeGb).To(Equal(defaultRepdSizeGb)) - Expect(cloudDisk.Name).To(Equal(volName)) - Expect(len(cloudDisk.ReplicaZones)).To(Equal(2)) - for _, replicaZone := range cloudDisk.ReplicaZones { - actualZone := zoneFromURL(replicaZone) - gotRegion, err := common.GetRegionFromZones([]string{actualZone}) - Expect(err).To(BeNil(), "failed to get region from actual zone %v", actualZone) - Expect(gotRegion).To(Equal(region), "Got region from replica zone that did not match supplied region") - } - defer func() { - // Delete Disk - controllerClient.DeleteVolume(volume.VolumeId) - Expect(err).To(BeNil(), "DeleteVolume failed") + // // Validate Disk Deleted + // _, err = computeService.Disks.Get(p, z, volName).Do() + // Expect(gce.IsGCEError(err, "notFound")).To(BeTrue(), "Expected disk to not be found") + // }() - // Validate Disk Deleted - _, err = computeService.RegionDisks.Get(p, region, volName).Do() - Expect(gce.IsGCEError(err, "notFound")).To(BeTrue(), "Expected disk to not be found") - }() - }) + // // Attach Disk + // err := testAttachWriteReadDetach(underSpecifiedID, volName, instance, client, false /* readOnly */) + // Expect(err).To(BeNil(), "Failed to go through volume lifecycle") + // }, + // Entry("on pd-standard", standardDiskType), + // Entry("on pd-extreme", extremeDiskType), + // Entry("on hyperdisk-throughput", hdtDiskType), + // Entry("on pd-ssd", ssdDiskType), + // ) - DescribeTable("Should create and delete disk with default zone", - func(diskType string) { - Expect(testContexts).ToNot(BeEmpty()) - testContext := getRandomTestContext() + // DescribeTable("[NVMe] Should complete publish/unpublish lifecycle with underspecified volume ID and missing volume", + // func(diskType string) { + // testContext := getRandomTestContext() - p, z, _ := testContext.Instance.GetIdentity() - client := testContext.Client + // p, z, _ := testContext.Instance.GetIdentity() + // client := testContext.Client + // instance := testContext.Instance - // Create Disk - disk := typeToDisk[diskType] - volName := testNamePrefix + string(uuid.NewUUID()) + // // Create Disk + // volName, _ := createAndValidateUniqueZonalDisk(client, p, z, diskType) + // underSpecifiedID := common.GenerateUnderspecifiedVolumeID(volName, true /* isZonal */) - diskSize := defaultSizeGb - if diskType == extremeDiskType { - diskSize = defaultExtremeSizeGb - } + // defer func() { + // // Detach Disk + // err := instance.DetachDisk(volName) + // Expect(err).To(BeNil(), "DetachDisk failed") - volume, err := client.CreateVolume(volName, disk.params, diskSize, nil, nil) + // // Delete Disk + // err = client.DeleteVolume(underSpecifiedID) + // Expect(err).To(BeNil(), "DeleteVolume failed") - Expect(err).To(BeNil(), "CreateVolume failed with error: %v", err) + // // Validate Disk Deleted + // _, err = computeService.Disks.Get(p, z, volName).Do() + // Expect(gce.IsGCEError(err, "notFound")).To(BeTrue(), "Expected disk to not be found") - // Validate Disk Created - cloudDisk, err := computeService.Disks.Get(p, z, volName).Do() - Expect(err).To(BeNil(), "Could not get disk from cloud directly") - Expect(cloudDisk.Status).To(Equal(readyState)) - Expect(cloudDisk.SizeGb).To(Equal(diskSize)) - Expect(cloudDisk.Name).To(Equal(volName)) - disk.validate(cloudDisk) + // // Unpublish Disk + // err = client.ControllerUnpublishVolume(underSpecifiedID, instance.GetNodeID()) + // Expect(err).To(BeNil(), "ControllerUnpublishVolume failed") + // }() - defer func() { - // Delete Disk - client.DeleteVolume(volume.VolumeId) - Expect(err).To(BeNil(), "DeleteVolume failed") + // // Attach Disk + // err := client.ControllerPublishVolumeReadWrite(underSpecifiedID, instance.GetNodeID(), false /* forceAttach */) + // Expect(err).To(BeNil(), "ControllerPublishVolume failed") + // }, + // Entry("on pd-standard", standardDiskType), + // Entry("on pd-extreme", extremeDiskType), + // ) - // Validate Disk Deleted - _, err = computeService.Disks.Get(p, z, volName).Do() - Expect(gce.IsGCEError(err, "notFound")).To(BeTrue(), "Expected disk to not be found") - }() - }, - Entry("on pd-standard", standardDiskType), - Entry("on pd-extreme", extremeDiskType), - ) + // It("Should successfully create RePD in two zones in the drivers region when none are specified", func() { + // Expect(testContexts).ToNot(BeEmpty()) + // testContext := getRandomTestContext() - DescribeTable("Should create and delete pd-extreme disk with default iops", - func(diskType string) { - Expect(testContexts).ToNot(BeEmpty()) - testContext := getRandomTestContext() + // controllerInstance := testContext.Instance + // controllerClient := testContext.Client + + // p, z, _ := controllerInstance.GetIdentity() + + // region, err := common.GetRegionFromZones([]string{z}) + // Expect(err).To(BeNil(), "Failed to get region from zones") + + // // Create Disk + // volName := testNamePrefix + string(uuid.NewUUID()) + // volume, err := controllerClient.CreateVolume(volName, map[string]string{ + // common.ParameterKeyReplicationType: "regional-pd", + // }, defaultRepdSizeGb, nil, nil) + // Expect(err).To(BeNil(), "CreateVolume failed with error: %v", err) + + // // Validate Disk Created + // cloudDisk, err := computeService.RegionDisks.Get(p, region, volName).Do() + // Expect(err).To(BeNil(), "Could not get disk from cloud directly") + // Expect(cloudDisk.Type).To(ContainSubstring(standardDiskType)) + // Expect(cloudDisk.Status).To(Equal(readyState)) + // Expect(cloudDisk.SizeGb).To(Equal(defaultRepdSizeGb)) + // Expect(cloudDisk.Name).To(Equal(volName)) + // Expect(len(cloudDisk.ReplicaZones)).To(Equal(2)) + // for _, replicaZone := range cloudDisk.ReplicaZones { + // actualZone := zoneFromURL(replicaZone) + // gotRegion, err := common.GetRegionFromZones([]string{actualZone}) + // Expect(err).To(BeNil(), "failed to get region from actual zone %v", actualZone) + // Expect(gotRegion).To(Equal(region), "Got region from replica zone that did not match supplied region") + // } + // defer func() { + // // Delete Disk + // controllerClient.DeleteVolume(volume.VolumeId) + // Expect(err).To(BeNil(), "DeleteVolume failed") + + // // Validate Disk Deleted + // _, err = computeService.RegionDisks.Get(p, region, volName).Do() + // Expect(gce.IsGCEError(err, "notFound")).To(BeTrue(), "Expected disk to not be found") + // }() + // }) - p, z, _ := testContext.Instance.GetIdentity() - client := testContext.Client + // DescribeTable("Should create and delete disk with default zone", + // func(diskType string) { + // Expect(testContexts).ToNot(BeEmpty()) + // testContext := getRandomTestContext() - // Create Disk - diskParams := map[string]string{ - common.ParameterKeyType: diskType, - } - volName := testNamePrefix + string(uuid.NewUUID()) + // p, z, _ := testContext.Instance.GetIdentity() + // client := testContext.Client - diskSize := defaultExtremeSizeGb + // // Create Disk + // disk := typeToDisk[diskType] + // volName := testNamePrefix + string(uuid.NewUUID()) - volume, err := client.CreateVolume(volName, diskParams, diskSize, nil, nil) + // diskSize := defaultSizeGb + // if diskType == extremeDiskType { + // diskSize = defaultExtremeSizeGb + // } - Expect(err).To(BeNil(), "CreateVolume failed with error: %v", err) + // volume, err := client.CreateVolume(volName, disk.params, diskSize, nil, nil) - // Validate Disk Created - cloudDisk, err := computeService.Disks.Get(p, z, volName).Do() - Expect(err).To(BeNil(), "Could not get disk from cloud directly") - Expect(cloudDisk.Status).To(Equal(readyState)) - Expect(cloudDisk.SizeGb).To(Equal(defaultExtremeSizeGb)) - Expect(cloudDisk.Type).To(ContainSubstring(extremeDiskType)) - Expect(cloudDisk.ProvisionedIops).To(Equal(provisionedIOPSOnCreateDefaultInt)) - Expect(cloudDisk.Name).To(Equal(volName)) + // Expect(err).To(BeNil(), "CreateVolume failed with error: %v", err) - defer func() { - // Delete Disk - client.DeleteVolume(volume.VolumeId) - Expect(err).To(BeNil(), "DeleteVolume failed") + // // Validate Disk Created + // cloudDisk, err := computeService.Disks.Get(p, z, volName).Do() + // Expect(err).To(BeNil(), "Could not get disk from cloud directly") + // Expect(cloudDisk.Status).To(Equal(readyState)) + // Expect(cloudDisk.SizeGb).To(Equal(diskSize)) + // Expect(cloudDisk.Name).To(Equal(volName)) + // disk.validate(cloudDisk) - // Validate Disk Deleted - _, err = computeService.Disks.Get(p, z, volName).Do() - Expect(gce.IsGCEError(err, "notFound")).To(BeTrue(), "Expected disk to not be found") - }() - }, - Entry("on pd-extreme", extremeDiskType), - ) + // defer func() { + // // Delete Disk + // client.DeleteVolume(volume.VolumeId) + // Expect(err).To(BeNil(), "DeleteVolume failed") - DescribeTable("Should create and delete disk with labels", - func(diskType string) { - Expect(testContexts).ToNot(BeEmpty()) - testContext := getRandomTestContext() + // // Validate Disk Deleted + // _, err = computeService.Disks.Get(p, z, volName).Do() + // Expect(gce.IsGCEError(err, "notFound")).To(BeTrue(), "Expected disk to not be found") + // }() + // }, + // Entry("on pd-standard", standardDiskType), + // Entry("on pd-extreme", extremeDiskType), + // ) + + // DescribeTable("Should create and delete pd-extreme disk with default iops", + // func(diskType string) { + // Expect(testContexts).ToNot(BeEmpty()) + // testContext := getRandomTestContext() + + // p, z, _ := testContext.Instance.GetIdentity() + // client := testContext.Client + + // // Create Disk + // diskParams := map[string]string{ + // common.ParameterKeyType: diskType, + // } + // volName := testNamePrefix + string(uuid.NewUUID()) - p, z, _ := testContext.Instance.GetIdentity() - client := testContext.Client + // diskSize := defaultExtremeSizeGb - // Create Disk - disk := typeToDisk[diskType] - volName := testNamePrefix + string(uuid.NewUUID()) - params := merge(disk.params, map[string]string{ - common.ParameterKeyLabels: "key1=value1,key2=value2", - }) + // volume, err := client.CreateVolume(volName, diskParams, diskSize, nil, nil) - diskSize := defaultSizeGb - if diskType == extremeDiskType { - diskSize = defaultExtremeSizeGb - } - volume, err := client.CreateVolume(volName, params, diskSize, nil, nil) - Expect(err).To(BeNil(), "CreateVolume failed with error: %v", err) + // Expect(err).To(BeNil(), "CreateVolume failed with error: %v", err) - // Validate Disk Created - cloudDisk, err := computeService.Disks.Get(p, z, volName).Do() - Expect(err).To(BeNil(), "Could not get disk from cloud directly") - Expect(cloudDisk.Status).To(Equal(readyState)) - Expect(cloudDisk.SizeGb).To(Equal(diskSize)) - Expect(cloudDisk.Labels).To(Equal(map[string]string{ - "key1": "value1", - "key2": "value2", - // The label below is added as an --extra-label driver command line argument. - testutils.DiskLabelKey: testutils.DiskLabelValue, - })) - Expect(cloudDisk.Name).To(Equal(volName)) - disk.validate(cloudDisk) + // // Validate Disk Created + // cloudDisk, err := computeService.Disks.Get(p, z, volName).Do() + // Expect(err).To(BeNil(), "Could not get disk from cloud directly") + // Expect(cloudDisk.Status).To(Equal(readyState)) + // Expect(cloudDisk.SizeGb).To(Equal(defaultExtremeSizeGb)) + // Expect(cloudDisk.Type).To(ContainSubstring(extremeDiskType)) + // Expect(cloudDisk.ProvisionedIops).To(Equal(provisionedIOPSOnCreateDefaultInt)) + // Expect(cloudDisk.Name).To(Equal(volName)) - defer func() { - // Delete Disk - err := client.DeleteVolume(volume.VolumeId) - Expect(err).To(BeNil(), "DeleteVolume failed") + // defer func() { + // // Delete Disk + // client.DeleteVolume(volume.VolumeId) + // Expect(err).To(BeNil(), "DeleteVolume failed") - // Validate Disk Deleted - _, err = computeService.Disks.Get(p, z, volName).Do() - Expect(gce.IsGCEError(err, "notFound")).To(BeTrue(), "Expected disk to not be found") - }() - }, - Entry("on pd-standard", standardDiskType), - Entry("on pd-extreme", extremeDiskType), - ) + // // Validate Disk Deleted + // _, err = computeService.Disks.Get(p, z, volName).Do() + // Expect(gce.IsGCEError(err, "notFound")).To(BeTrue(), "Expected disk to not be found") + // }() + // }, + // Entry("on pd-extreme", extremeDiskType), + // ) - It("Should create and delete snapshot for the volume with default zone", func() { - Expect(testContexts).ToNot(BeEmpty()) - testContext := getRandomTestContext() + // DescribeTable("Should create and delete disk with labels", + // func(diskType string) { + // Expect(testContexts).ToNot(BeEmpty()) + // testContext := getRandomTestContext() - p, z, _ := testContext.Instance.GetIdentity() - client := testContext.Client + // p, z, _ := testContext.Instance.GetIdentity() + // client := testContext.Client - volName, volID := createAndValidateUniqueZonalDisk(client, p, z, standardDiskType) - - // Create Snapshot - snapshotName := testNamePrefix + string(uuid.NewUUID()) - snapshotID, err := client.CreateSnapshot(snapshotName, volID, nil) - Expect(err).To(BeNil(), "CreateSnapshot failed with error: %v", err) + // // Create Disk + // disk := typeToDisk[diskType] + // volName := testNamePrefix + string(uuid.NewUUID()) + // params := merge(disk.params, map[string]string{ + // common.ParameterKeyLabels: "key1=value1,key2=value2", + // }) - // Validate Snapshot Created - snapshot, err := computeService.Snapshots.Get(p, snapshotName).Do() - Expect(err).To(BeNil(), "Could not get snapshot from cloud directly") - Expect(snapshot.Name).To(Equal(snapshotName)) + // diskSize := defaultSizeGb + // if diskType == extremeDiskType { + // diskSize = defaultExtremeSizeGb + // } + // volume, err := client.CreateVolume(volName, params, diskSize, nil, nil) + // Expect(err).To(BeNil(), "CreateVolume failed with error: %v", err) + + // // Validate Disk Created + // cloudDisk, err := computeService.Disks.Get(p, z, volName).Do() + // Expect(err).To(BeNil(), "Could not get disk from cloud directly") + // Expect(cloudDisk.Status).To(Equal(readyState)) + // Expect(cloudDisk.SizeGb).To(Equal(diskSize)) + // Expect(cloudDisk.Labels).To(Equal(map[string]string{ + // "key1": "value1", + // "key2": "value2", + // // The label below is added as an --extra-label driver command line argument. + // testutils.DiskLabelKey: testutils.DiskLabelValue, + // })) + // Expect(cloudDisk.Name).To(Equal(volName)) + // disk.validate(cloudDisk) - err = wait.Poll(10*time.Second, 3*time.Minute, func() (bool, error) { - snapshot, err := computeService.Snapshots.Get(p, snapshotName).Do() - Expect(err).To(BeNil(), "Could not get snapshot from cloud directly") - if snapshot.Status == "READY" { - return true, nil - } - return false, nil - }) - Expect(err).To(BeNil(), "Could not wait for snapshot be ready") + // defer func() { + // // Delete Disk + // err := client.DeleteVolume(volume.VolumeId) + // Expect(err).To(BeNil(), "DeleteVolume failed") - defer func() { - // Delete Disk - err := client.DeleteVolume(volID) - Expect(err).To(BeNil(), "DeleteVolume failed") + // // Validate Disk Deleted + // _, err = computeService.Disks.Get(p, z, volName).Do() + // Expect(gce.IsGCEError(err, "notFound")).To(BeTrue(), "Expected disk to not be found") + // }() + // }, + // Entry("on pd-standard", standardDiskType), + // Entry("on pd-extreme", extremeDiskType), + // ) - // Validate Disk Deleted - _, err = computeService.Disks.Get(p, z, volName).Do() - Expect(gce.IsGCEError(err, "notFound")).To(BeTrue(), "Expected disk to not be found") + // It("Should create and delete snapshot for the volume with default zone", func() { + // Expect(testContexts).ToNot(BeEmpty()) + // testContext := getRandomTestContext() - // Delete Snapshot - err = client.DeleteSnapshot(snapshotID) - Expect(err).To(BeNil(), "DeleteSnapshot failed") + // p, z, _ := testContext.Instance.GetIdentity() + // client := testContext.Client - // Validate Snapshot Deleted - _, err = computeService.Snapshots.Get(p, snapshotName).Do() - Expect(gce.IsGCEError(err, "notFound")).To(BeTrue(), "Expected snapshot to not be found") - }() - }) + // volName, volID := createAndValidateUniqueZonalDisk(client, p, z, standardDiskType) - DescribeTable("Should create CMEK key, go through volume lifecycle, validate behavior on key revoke and restore", - func(diskType string) { - ctx := context.Background() - Expect(testContexts).ToNot(BeEmpty()) - testContext := getRandomTestContext() + // // Create Snapshot + // snapshotName := testNamePrefix + string(uuid.NewUUID()) + // snapshotID, err := client.CreateSnapshot(snapshotName, volID, nil) + // Expect(err).To(BeNil(), "CreateSnapshot failed with error: %v", err) - controllerInstance := testContext.Instance - controllerClient := testContext.Client + // // Validate Snapshot Created + // snapshot, err := computeService.Snapshots.Get(p, snapshotName).Do() + // Expect(err).To(BeNil(), "Could not get snapshot from cloud directly") + // Expect(snapshot.Name).To(Equal(snapshotName)) - p, z, _ := controllerInstance.GetIdentity() - locationID := "global" + // err = wait.Poll(10*time.Second, 3*time.Minute, func() (bool, error) { + // snapshot, err := computeService.Snapshots.Get(p, snapshotName).Do() + // Expect(err).To(BeNil(), "Could not get snapshot from cloud directly") + // if snapshot.Status == "READY" { + // return true, nil + // } + // return false, nil + // }) + // Expect(err).To(BeNil(), "Could not wait for snapshot be ready") + + // defer func() { + // // Delete Disk + // err := client.DeleteVolume(volID) + // Expect(err).To(BeNil(), "DeleteVolume failed") + + // // Validate Disk Deleted + // _, err = computeService.Disks.Get(p, z, volName).Do() + // Expect(gce.IsGCEError(err, "notFound")).To(BeTrue(), "Expected disk to not be found") + + // // Delete Snapshot + // err = client.DeleteSnapshot(snapshotID) + // Expect(err).To(BeNil(), "DeleteSnapshot failed") + + // // Validate Snapshot Deleted + // _, err = computeService.Snapshots.Get(p, snapshotName).Do() + // Expect(gce.IsGCEError(err, "notFound")).To(BeTrue(), "Expected snapshot to not be found") + // }() + // }) - // The resource name of the key rings. - parentName := fmt.Sprintf("projects/%s/locations/%s", p, locationID) - keyRingId := "gce-pd-csi-test-ring" + // DescribeTable("Should create CMEK key, go through volume lifecycle, validate behavior on key revoke and restore", + // func(diskType string) { + // ctx := context.Background() + // Expect(testContexts).ToNot(BeEmpty()) + // testContext := getRandomTestContext() - key, keyVersions := setupKeyRing(ctx, parentName, keyRingId) + // controllerInstance := testContext.Instance + // controllerClient := testContext.Client - // Defer deletion of all key versions - // https://cloud.google.com/kms/docs/destroy-restore - defer func() { - for _, keyVersion := range keyVersions { - destroyKeyReq := &kmspb.DestroyCryptoKeyVersionRequest{ - Name: keyVersion, - } - _, err := kmsClient.DestroyCryptoKeyVersion(ctx, destroyKeyReq) - Expect(err).To(BeNil(), "Failed to destroy crypto key version: %v", keyVersion) - } - }() + // p, z, _ := controllerInstance.GetIdentity() + // locationID := "global" - // Go through volume lifecycle using CMEK-ed PD Create Disk - disk := typeToDisk[diskType] - volName := testNamePrefix + string(uuid.NewUUID()) - params := merge(disk.params, map[string]string{ - common.ParameterKeyDiskEncryptionKmsKey: key.Name, - }) - topology := &csi.TopologyRequirement{ - Requisite: []*csi.Topology{ - { - Segments: map[string]string{common.TopologyKeyZone: z}, - }, - }, - } + // // The resource name of the key rings. + // parentName := fmt.Sprintf("projects/%s/locations/%s", p, locationID) + // keyRingId := "gce-pd-csi-test-ring" - diskSize := defaultSizeGb - if diskType == extremeDiskType { - diskSize = defaultExtremeSizeGb - } - volume, err := controllerClient.CreateVolume(volName, params, diskSize, topology, nil) - Expect(err).To(BeNil(), "CreateVolume failed with error: %v", err) + // key, keyVersions := setupKeyRing(ctx, parentName, keyRingId) - // Validate Disk Created - cloudDisk, err := computeService.Disks.Get(p, z, volName).Do() - Expect(err).To(BeNil(), "Could not get disk from cloud directly") - Expect(cloudDisk.Status).To(Equal(readyState)) - Expect(cloudDisk.SizeGb).To(Equal(diskSize)) - Expect(cloudDisk.Name).To(Equal(volName)) - disk.validate(cloudDisk) + // // Defer deletion of all key versions + // // https://cloud.google.com/kms/docs/destroy-restore + // defer func() { + // for _, keyVersion := range keyVersions { + // destroyKeyReq := &kmspb.DestroyCryptoKeyVersionRequest{ + // Name: keyVersion, + // } + // _, err := kmsClient.DestroyCryptoKeyVersion(ctx, destroyKeyReq) + // Expect(err).To(BeNil(), "Failed to destroy crypto key version: %v", keyVersion) + // } + // }() - defer func() { - // Delete Disk - err = controllerClient.DeleteVolume(volume.VolumeId) - Expect(err).To(BeNil(), "DeleteVolume failed") + // // Go through volume lifecycle using CMEK-ed PD Create Disk + // disk := typeToDisk[diskType] + // volName := testNamePrefix + string(uuid.NewUUID()) + // params := merge(disk.params, map[string]string{ + // common.ParameterKeyDiskEncryptionKmsKey: key.Name, + // }) + // topology := &csi.TopologyRequirement{ + // Requisite: []*csi.Topology{ + // { + // Segments: map[string]string{common.TopologyKeyZone: z}, + // }, + // }, + // } - // Validate Disk Deleted - _, err = computeService.Disks.Get(p, z, volName).Do() - Expect(gce.IsGCEError(err, "notFound")).To(BeTrue(), "Expected disk to not be found") - }() + // diskSize := defaultSizeGb + // if diskType == extremeDiskType { + // diskSize = defaultExtremeSizeGb + // } + // volume, err := controllerClient.CreateVolume(volName, params, diskSize, topology, nil) + // Expect(err).To(BeNil(), "CreateVolume failed with error: %v", err) - // Test disk works - err = testAttachWriteReadDetach(volume.VolumeId, volName, controllerInstance, controllerClient, false /* readOnly */) - Expect(err).To(BeNil(), "Failed to go through volume lifecycle before revoking CMEK key") - - // Revoke CMEK key - // https://cloud.google.com/kms/docs/enable-disable - - for _, keyVersion := range keyVersions { - disableReq := &kmspb.UpdateCryptoKeyVersionRequest{ - CryptoKeyVersion: &kmspb.CryptoKeyVersion{ - Name: keyVersion, - State: kmspb.CryptoKeyVersion_DISABLED, - }, - UpdateMask: &fieldmask.FieldMask{ - Paths: []string{"state"}, - }, - } - _, err = kmsClient.UpdateCryptoKeyVersion(ctx, disableReq) - Expect(err).To(BeNil(), "Failed to disable crypto key") - } + // // Validate Disk Created + // cloudDisk, err := computeService.Disks.Get(p, z, volName).Do() + // Expect(err).To(BeNil(), "Could not get disk from cloud directly") + // Expect(cloudDisk.Status).To(Equal(readyState)) + // Expect(cloudDisk.SizeGb).To(Equal(diskSize)) + // Expect(cloudDisk.Name).To(Equal(volName)) + // disk.validate(cloudDisk) - // Make sure attach of PD fails - err = testAttachWriteReadDetach(volume.VolumeId, volName, controllerInstance, controllerClient, false /* readOnly */) - Expect(err).ToNot(BeNil(), "Volume lifecycle should have failed, but succeeded") - - // Restore CMEK key - for _, keyVersion := range keyVersions { - enableReq := &kmspb.UpdateCryptoKeyVersionRequest{ - CryptoKeyVersion: &kmspb.CryptoKeyVersion{ - Name: keyVersion, - State: kmspb.CryptoKeyVersion_ENABLED, - }, - UpdateMask: &fieldmask.FieldMask{ - Paths: []string{"state"}, - }, - } - _, err = kmsClient.UpdateCryptoKeyVersion(ctx, enableReq) - Expect(err).To(BeNil(), "Failed to enable crypto key") - } + // defer func() { + // // Delete Disk + // err = controllerClient.DeleteVolume(volume.VolumeId) + // Expect(err).To(BeNil(), "DeleteVolume failed") - // The controller publish failure in above step would set a backoff condition on the node. Wait suffcient amount of time for the driver to accept new controller publish requests. - time.Sleep(time.Second) - // Make sure attach of PD succeeds - err = testAttachWriteReadDetach(volume.VolumeId, volName, controllerInstance, controllerClient, false /* readOnly */) - Expect(err).To(BeNil(), "Failed to go through volume lifecycle after restoring CMEK key") - }, - Entry("on pd-standard", standardDiskType), - Entry("on pd-extreme", extremeDiskType), - ) + // // Validate Disk Deleted + // _, err = computeService.Disks.Get(p, z, volName).Do() + // Expect(gce.IsGCEError(err, "notFound")).To(BeTrue(), "Expected disk to not be found") + // }() - It("Should create disks, attach them places, and verify List returns correct results", func() { - Expect(testContexts).ToNot(BeEmpty()) - testContext := getRandomTestContext() + // // Test disk works + // err = testAttachWriteReadDetach(volume.VolumeId, volName, controllerInstance, controllerClient, false /* readOnly */) + // Expect(err).To(BeNil(), "Failed to go through volume lifecycle before revoking CMEK key") - p, z, _ := testContext.Instance.GetIdentity() - client := testContext.Client + // // Revoke CMEK key + // // https://cloud.google.com/kms/docs/enable-disable - nodeID := testContext.Instance.GetNodeID() + // for _, keyVersion := range keyVersions { + // disableReq := &kmspb.UpdateCryptoKeyVersionRequest{ + // CryptoKeyVersion: &kmspb.CryptoKeyVersion{ + // Name: keyVersion, + // State: kmspb.CryptoKeyVersion_DISABLED, + // }, + // UpdateMask: &fieldmask.FieldMask{ + // Paths: []string{"state"}, + // }, + // } + // _, err = kmsClient.UpdateCryptoKeyVersion(ctx, disableReq) + // Expect(err).To(BeNil(), "Failed to disable crypto key") + // } - _, volID := createAndValidateUniqueZonalDisk(client, p, z, standardDiskType) - defer deleteVolumeOrError(client, volID) + // // Make sure attach of PD fails + // err = testAttachWriteReadDetach(volume.VolumeId, volName, controllerInstance, controllerClient, false /* readOnly */) + // Expect(err).ToNot(BeNil(), "Volume lifecycle should have failed, but succeeded") - _, secondVolID := createAndValidateUniqueZonalDisk(client, p, z, standardDiskType) - defer deleteVolumeOrError(client, secondVolID) + // // Restore CMEK key + // for _, keyVersion := range keyVersions { + // enableReq := &kmspb.UpdateCryptoKeyVersionRequest{ + // CryptoKeyVersion: &kmspb.CryptoKeyVersion{ + // Name: keyVersion, + // State: kmspb.CryptoKeyVersion_ENABLED, + // }, + // UpdateMask: &fieldmask.FieldMask{ + // Paths: []string{"state"}, + // }, + // } + // _, err = kmsClient.UpdateCryptoKeyVersion(ctx, enableReq) + // Expect(err).To(BeNil(), "Failed to enable crypto key") + // } - // Attach volID to current instance - err := client.ControllerPublishVolumeReadWrite(volID, nodeID, false /* forceAttach */) - Expect(err).To(BeNil(), "Failed ControllerPublishVolume") - defer client.ControllerUnpublishVolume(volID, nodeID) + // // The controller publish failure in above step would set a backoff condition on the node. Wait suffcient amount of time for the driver to accept new controller publish requests. + // time.Sleep(time.Second) + // // Make sure attach of PD succeeds + // err = testAttachWriteReadDetach(volume.VolumeId, volName, controllerInstance, controllerClient, false /* readOnly */) + // Expect(err).To(BeNil(), "Failed to go through volume lifecycle after restoring CMEK key") + // }, + // Entry("on pd-standard", standardDiskType), + // Entry("on pd-extreme", extremeDiskType), + // ) + + // It("Should create disks, attach them places, and verify List returns correct results", func() { + // Expect(testContexts).ToNot(BeEmpty()) + // testContext := getRandomTestContext() - // List Volumes - volsToNodes, err := client.ListVolumes() - Expect(err).To(BeNil(), "Failed ListVolumes") + // p, z, _ := testContext.Instance.GetIdentity() + // client := testContext.Client - // Verify - Expect(volsToNodes[volID]).ToNot(BeNil(), "Couldn't find attached nodes for vol") - Expect(volsToNodes[volID]).To(ContainElement(nodeID), "Couldn't find node in attached nodes for vol") - Expect(volsToNodes[secondVolID]).To(BeNil(), "Second vol ID attached nodes not nil") - }) + // nodeID := testContext.Instance.GetNodeID() - It("Should create and delete snapshot for RePD in two zones ", func() { - Expect(testContexts).ToNot(BeEmpty()) - testContext := getRandomTestContext() - - controllerInstance := testContext.Instance - controllerClient := testContext.Client - - p, z, _ := controllerInstance.GetIdentity() - - region, err := common.GetRegionFromZones([]string{z}) - Expect(err).To(BeNil(), "Failed to get region from zones") - - // Create Disk - volName := testNamePrefix + string(uuid.NewUUID()) - volume, err := controllerClient.CreateVolume(volName, map[string]string{ - common.ParameterKeyReplicationType: "regional-pd", - }, defaultRepdSizeGb, nil, nil) - Expect(err).To(BeNil(), "CreateVolume failed with error: %v", err) - - // Validate Disk Created - cloudDisk, err := computeService.RegionDisks.Get(p, region, volName).Do() - Expect(err).To(BeNil(), "Could not get disk from cloud directly") - Expect(cloudDisk.Type).To(ContainSubstring(standardDiskType)) - Expect(cloudDisk.Status).To(Equal(readyState)) - Expect(cloudDisk.SizeGb).To(Equal(defaultRepdSizeGb)) - Expect(cloudDisk.Name).To(Equal(volName)) - Expect(len(cloudDisk.ReplicaZones)).To(Equal(2)) - for _, replicaZone := range cloudDisk.ReplicaZones { - actualZone := zoneFromURL(replicaZone) - gotRegion, err := common.GetRegionFromZones([]string{actualZone}) - Expect(err).To(BeNil(), "failed to get region from actual zone %v", actualZone) - Expect(gotRegion).To(Equal(region), "Got region from replica zone that did not match supplied region") - } + // _, volID := createAndValidateUniqueZonalDisk(client, p, z, standardDiskType) + // defer deleteVolumeOrError(client, volID) - // Create Snapshot - snapshotName := testNamePrefix + string(uuid.NewUUID()) - snapshotID, err := controllerClient.CreateSnapshot(snapshotName, volume.VolumeId, nil) - Expect(err).To(BeNil(), "CreateSnapshot failed with error: %v", err) + // _, secondVolID := createAndValidateUniqueZonalDisk(client, p, z, standardDiskType) + // defer deleteVolumeOrError(client, secondVolID) - // Validate Snapshot Created - snapshot, err := computeService.Snapshots.Get(p, snapshotName).Do() - Expect(err).To(BeNil(), "Could not get snapshot from cloud directly") - Expect(snapshot.Name).To(Equal(snapshotName)) + // // Attach volID to current instance + // err := client.ControllerPublishVolumeReadWrite(volID, nodeID, false /* forceAttach */) + // Expect(err).To(BeNil(), "Failed ControllerPublishVolume") + // defer client.ControllerUnpublishVolume(volID, nodeID) - err = wait.Poll(10*time.Second, 3*time.Minute, func() (bool, error) { - snapshot, err := computeService.Snapshots.Get(p, snapshotName).Do() - Expect(err).To(BeNil(), "Could not get snapshot from cloud directly") - if snapshot.Status == "READY" { - return true, nil - } - return false, nil - }) - Expect(err).To(BeNil(), "Could not wait for snapshot be ready") + // // List Volumes + // volsToNodes, err := client.ListVolumes() + // Expect(err).To(BeNil(), "Failed ListVolumes") - defer func() { - // Delete Disk - err := controllerClient.DeleteVolume(volume.VolumeId) - Expect(err).To(BeNil(), "DeleteVolume failed") + // // Verify + // Expect(volsToNodes[volID]).ToNot(BeNil(), "Couldn't find attached nodes for vol") + // Expect(volsToNodes[volID]).To(ContainElement(nodeID), "Couldn't find node in attached nodes for vol") + // Expect(volsToNodes[secondVolID]).To(BeNil(), "Second vol ID attached nodes not nil") + // }) - // Validate Disk Deleted - _, err = computeService.RegionDisks.Get(p, region, volName).Do() - Expect(gce.IsGCEError(err, "notFound")).To(BeTrue(), "Expected disk to not be found") + // It("Should create and delete snapshot for RePD in two zones ", func() { + // Expect(testContexts).ToNot(BeEmpty()) + // testContext := getRandomTestContext() - // Delete Snapshot - err = controllerClient.DeleteSnapshot(snapshotID) - Expect(err).To(BeNil(), "DeleteSnapshot failed") + // controllerInstance := testContext.Instance + // controllerClient := testContext.Client + + // p, z, _ := controllerInstance.GetIdentity() + + // region, err := common.GetRegionFromZones([]string{z}) + // Expect(err).To(BeNil(), "Failed to get region from zones") + + // // Create Disk + // volName := testNamePrefix + string(uuid.NewUUID()) + // volume, err := controllerClient.CreateVolume(volName, map[string]string{ + // common.ParameterKeyReplicationType: "regional-pd", + // }, defaultRepdSizeGb, nil, nil) + // Expect(err).To(BeNil(), "CreateVolume failed with error: %v", err) + + // // Validate Disk Created + // cloudDisk, err := computeService.RegionDisks.Get(p, region, volName).Do() + // Expect(err).To(BeNil(), "Could not get disk from cloud directly") + // Expect(cloudDisk.Type).To(ContainSubstring(standardDiskType)) + // Expect(cloudDisk.Status).To(Equal(readyState)) + // Expect(cloudDisk.SizeGb).To(Equal(defaultRepdSizeGb)) + // Expect(cloudDisk.Name).To(Equal(volName)) + // Expect(len(cloudDisk.ReplicaZones)).To(Equal(2)) + // for _, replicaZone := range cloudDisk.ReplicaZones { + // actualZone := zoneFromURL(replicaZone) + // gotRegion, err := common.GetRegionFromZones([]string{actualZone}) + // Expect(err).To(BeNil(), "failed to get region from actual zone %v", actualZone) + // Expect(gotRegion).To(Equal(region), "Got region from replica zone that did not match supplied region") + // } - // Validate Snapshot Deleted - _, err = computeService.Snapshots.Get(p, snapshotName).Do() - Expect(gce.IsGCEError(err, "notFound")).To(BeTrue(), "Expected snapshot to not be found") - }() - }) + // // Create Snapshot + // snapshotName := testNamePrefix + string(uuid.NewUUID()) + // snapshotID, err := controllerClient.CreateSnapshot(snapshotName, volume.VolumeId, nil) + // Expect(err).To(BeNil(), "CreateSnapshot failed with error: %v", err) + + // // Validate Snapshot Created + // snapshot, err := computeService.Snapshots.Get(p, snapshotName).Do() + // Expect(err).To(BeNil(), "Could not get snapshot from cloud directly") + // Expect(snapshot.Name).To(Equal(snapshotName)) + + // err = wait.Poll(10*time.Second, 3*time.Minute, func() (bool, error) { + // snapshot, err := computeService.Snapshots.Get(p, snapshotName).Do() + // Expect(err).To(BeNil(), "Could not get snapshot from cloud directly") + // if snapshot.Status == "READY" { + // return true, nil + // } + // return false, nil + // }) + // Expect(err).To(BeNil(), "Could not wait for snapshot be ready") + + // defer func() { + // // Delete Disk + // err := controllerClient.DeleteVolume(volume.VolumeId) + // Expect(err).To(BeNil(), "DeleteVolume failed") + + // // Validate Disk Deleted + // _, err = computeService.RegionDisks.Get(p, region, volName).Do() + // Expect(gce.IsGCEError(err, "notFound")).To(BeTrue(), "Expected disk to not be found") + + // // Delete Snapshot + // err = controllerClient.DeleteSnapshot(snapshotID) + // Expect(err).To(BeNil(), "DeleteSnapshot failed") + + // // Validate Snapshot Deleted + // _, err = computeService.Snapshots.Get(p, snapshotName).Do() + // Expect(gce.IsGCEError(err, "notFound")).To(BeTrue(), "Expected snapshot to not be found") + // }() + // }) - It("Should get correct VolumeStats for Block", func() { - testContext := getRandomTestContext() + // It("Should get correct VolumeStats for Block", func() { + // testContext := getRandomTestContext() - p, z, _ := testContext.Instance.GetIdentity() - client := testContext.Client - instance := testContext.Instance + // p, z, _ := testContext.Instance.GetIdentity() + // client := testContext.Client + // instance := testContext.Instance - volName, volID := createAndValidateUniqueZonalDisk(client, p, z, standardDiskType) + // volName, volID := createAndValidateUniqueZonalDisk(client, p, z, standardDiskType) - defer func() { - // Delete Disk - err := client.DeleteVolume(volID) - Expect(err).To(BeNil(), "DeleteVolume failed") + // defer func() { + // // Delete Disk + // err := client.DeleteVolume(volID) + // Expect(err).To(BeNil(), "DeleteVolume failed") - // Validate Disk Deleted - _, err = computeService.Disks.Get(p, z, volName).Do() - Expect(gce.IsGCEError(err, "notFound")).To(BeTrue(), "Expected disk to not be found") - }() + // // Validate Disk Deleted + // _, err = computeService.Disks.Get(p, z, volName).Do() + // Expect(gce.IsGCEError(err, "notFound")).To(BeTrue(), "Expected disk to not be found") + // }() - verifyVolumeStats := func(a *verifyArgs) error { - available, capacity, used, inodesFree, inodes, inodesUsed, err := client.NodeGetVolumeStats(volID, a.publishDir) - if err != nil { - return fmt.Errorf("failed to get node volume stats: %v", err.Error()) - } - if available != 0 || capacity != common.GbToBytes(defaultSizeGb) || used != 0 || - inodesFree != 0 || inodes != 0 || inodesUsed != 0 { - return fmt.Errorf("got: available %v, capacity %v, used %v, inodesFree %v, inodes %v, inodesUsed %v -- expected: capacity = %v, available = 0, used = 0, inodesFree = 0, inodes = 0 , inodesUsed = 0", - available, capacity, used, inodesFree, inodes, inodesUsed, common.GbToBytes(defaultSizeGb)) - } - return nil - } + // verifyVolumeStats := func(a *verifyArgs) error { + // available, capacity, used, inodesFree, inodes, inodesUsed, err := client.NodeGetVolumeStats(volID, a.publishDir) + // if err != nil { + // return fmt.Errorf("failed to get node volume stats: %v", err.Error()) + // } + // if available != 0 || capacity != common.GbToBytes(defaultSizeGb) || used != 0 || + // inodesFree != 0 || inodes != 0 || inodesUsed != 0 { + // return fmt.Errorf("got: available %v, capacity %v, used %v, inodesFree %v, inodes %v, inodesUsed %v -- expected: capacity = %v, available = 0, used = 0, inodesFree = 0, inodes = 0 , inodesUsed = 0", + // available, capacity, used, inodesFree, inodes, inodesUsed, common.GbToBytes(defaultSizeGb)) + // } + // return nil + // } - // Attach Disk - err := testLifecycleWithVerify(volID, volName, instance, client, false /* readOnly */, true /* block */, verifyVolumeStats, nil) - Expect(err).To(BeNil(), "Failed to go through volume lifecycle") - }) + // // Attach Disk + // err := testLifecycleWithVerify(volID, volName, instance, client, false /* readOnly */, true /* block */, verifyVolumeStats, nil) + // Expect(err).To(BeNil(), "Failed to go through volume lifecycle") + // }) - It("Should get correct VolumeStats", func() { - testContext := getRandomTestContext() + // It("Should get correct VolumeStats", func() { + // testContext := getRandomTestContext() - p, z, _ := testContext.Instance.GetIdentity() - client := testContext.Client - instance := testContext.Instance + // p, z, _ := testContext.Instance.GetIdentity() + // client := testContext.Client + // instance := testContext.Instance - volName, volID := createAndValidateUniqueZonalDisk(client, p, z, standardDiskType) + // volName, volID := createAndValidateUniqueZonalDisk(client, p, z, standardDiskType) - defer func() { - // Delete Disk - err := client.DeleteVolume(volID) - Expect(err).To(BeNil(), "DeleteVolume failed") + // defer func() { + // // Delete Disk + // err := client.DeleteVolume(volID) + // Expect(err).To(BeNil(), "DeleteVolume failed") - // Validate Disk Deleted - _, err = computeService.Disks.Get(p, z, volName).Do() - Expect(gce.IsGCEError(err, "notFound")).To(BeTrue(), "Expected disk to not be found") - }() + // // Validate Disk Deleted + // _, err = computeService.Disks.Get(p, z, volName).Do() + // Expect(gce.IsGCEError(err, "notFound")).To(BeTrue(), "Expected disk to not be found") + // }() - verifyVolumeStats := func(a *verifyArgs) error { - available, capacity, used, inodesFree, inodes, inodesUsed, err := client.NodeGetVolumeStats(volID, a.publishDir) - if err != nil { - return fmt.Errorf("failed to get node volume stats: %v", err.Error()) - } - if !equalWithinEpsilon(available, common.GbToBytes(defaultSizeGb), defaultEpsilon) || !equalWithinEpsilon(capacity, common.GbToBytes(defaultSizeGb), defaultEpsilon) || !equalWithinEpsilon(used, 0, defaultEpsilon) || - inodesFree == 0 || inodes == 0 || inodesUsed == 0 { - return fmt.Errorf("got: available %v, capacity %v, used %v, inodesFree %v, inodes %v, inodesUsed %v -- expected: available ~= %v, capacity ~= %v, used = 0, inodesFree != 0, inodes != 0 , inodesUsed != 0", - available, capacity, used, inodesFree, inodes, inodesUsed, common.GbToBytes(defaultSizeGb), common.GbToBytes(defaultSizeGb)) - } - return nil - } + // verifyVolumeStats := func(a *verifyArgs) error { + // available, capacity, used, inodesFree, inodes, inodesUsed, err := client.NodeGetVolumeStats(volID, a.publishDir) + // if err != nil { + // return fmt.Errorf("failed to get node volume stats: %v", err.Error()) + // } + // if !equalWithinEpsilon(available, common.GbToBytes(defaultSizeGb), defaultEpsilon) || !equalWithinEpsilon(capacity, common.GbToBytes(defaultSizeGb), defaultEpsilon) || !equalWithinEpsilon(used, 0, defaultEpsilon) || + // inodesFree == 0 || inodes == 0 || inodesUsed == 0 { + // return fmt.Errorf("got: available %v, capacity %v, used %v, inodesFree %v, inodes %v, inodesUsed %v -- expected: available ~= %v, capacity ~= %v, used = 0, inodesFree != 0, inodes != 0 , inodesUsed != 0", + // available, capacity, used, inodesFree, inodes, inodesUsed, common.GbToBytes(defaultSizeGb), common.GbToBytes(defaultSizeGb)) + // } + // return nil + // } - // Attach Disk - err := testLifecycleWithVerify(volID, volName, instance, client, false /* readOnly */, false /* fs */, verifyVolumeStats, nil) - Expect(err).To(BeNil(), "Failed to go through volume lifecycle") - }) + // // Attach Disk + // err := testLifecycleWithVerify(volID, volName, instance, client, false /* readOnly */, false /* fs */, verifyVolumeStats, nil) + // Expect(err).To(BeNil(), "Failed to go through volume lifecycle") + // }) It("Should create and delete multi-writer disk", func() { Expect(testContexts).ToNot(BeEmpty()) - testContext := getRandomTestContext() + testContext := getRandomMwTestContext() p, _, _ := testContext.Instance.GetIdentity() client := testContext.Client @@ -928,7 +931,7 @@ var _ = Describe("GCE PD CSI Driver", func() { }) It("Should complete entire disk lifecycle with multi-writer disk", func() { - testContext := getRandomTestContext() + testContext := getRandomMwTestContext() p, z, _ := testContext.Instance.GetIdentity() client := testContext.Client @@ -970,545 +973,545 @@ var _ = Describe("GCE PD CSI Driver", func() { Expect(err).To(BeNil(), "Failed to go through volume lifecycle") }) - DescribeTable("Should successfully create disk with PVC/PV tags", - func(diskType string) { - Expect(testContexts).ToNot(BeEmpty()) - testContext := getRandomTestContext() - - controllerInstance := testContext.Instance - controllerClient := testContext.Client - - diskSize := defaultSizeGb - if diskType == extremeDiskType { - diskSize = defaultExtremeSizeGb - } - - p, z, _ := controllerInstance.GetIdentity() - - // Create Disk - disk := typeToDisk[diskType] - volName := testNamePrefix + string(uuid.NewUUID()) - params := merge(disk.params, map[string]string{ - common.ParameterKeyPVCName: "test-pvc", - common.ParameterKeyPVCNamespace: "test-pvc-namespace", - common.ParameterKeyPVName: "test-pv-name", - }) - volume, err := controllerClient.CreateVolume(volName, params, diskSize, nil /* topReq */, nil) - Expect(err).To(BeNil(), "CreateVolume failed with error: %v", err) - - // Validate Disk Created - cloudDisk, err := computeService.Disks.Get(p, z, volName).Do() - Expect(err).To(BeNil(), "Could not get disk from cloud directly") - Expect(cloudDisk.Status).To(Equal(readyState)) - Expect(cloudDisk.SizeGb).To(Equal(diskSize)) - Expect(cloudDisk.Name).To(Equal(volName)) - Expect(cloudDisk.Description).To(Equal("{\"kubernetes.io/created-for/pv/name\":\"test-pv-name\",\"kubernetes.io/created-for/pvc/name\":\"test-pvc\",\"kubernetes.io/created-for/pvc/namespace\":\"test-pvc-namespace\",\"storage.gke.io/created-by\":\"pd.csi.storage.gke.io\"}")) - disk.validate(cloudDisk) - - defer func() { - // Delete Disk - controllerClient.DeleteVolume(volume.VolumeId) - Expect(err).To(BeNil(), "DeleteVolume failed") - - // Validate Disk Deleted - _, err = computeService.Disks.Get(p, z, volName).Do() - Expect(gce.IsGCEError(err, "notFound")).To(BeTrue(), "Expected disk to not be found") - }() - }, - Entry("on pd-standard", standardDiskType), - Entry("on pd-extreme", extremeDiskType), - ) - - // Use the region of the test location. - It("Should successfully create snapshot with storage locations", func() { - testContext := getRandomTestContext() + // DescribeTable("Should successfully create disk with PVC/PV tags", + // func(diskType string) { + // Expect(testContexts).ToNot(BeEmpty()) + // testContext := getRandomTestContext() - p, z, _ := testContext.Instance.GetIdentity() - client := testContext.Client + // controllerInstance := testContext.Instance + // controllerClient := testContext.Client - // Create Disk - volName, volID := createAndValidateUniqueZonalDisk(client, p, z, standardDiskType) - - // Create Snapshot - snapshotName := testNamePrefix + string(uuid.NewUUID()) + // diskSize := defaultSizeGb + // if diskType == extremeDiskType { + // diskSize = defaultExtremeSizeGb + // } - // Convert GCP zone to region, e.g. us-central1-a => us-central1 - // This is safe because we hardcode the zones. - snapshotLocation := z[:len(z)-2] + // p, z, _ := controllerInstance.GetIdentity() - snapshotParams := map[string]string{ - common.ParameterKeyStorageLocations: snapshotLocation, - common.ParameterKeyVolumeSnapshotName: "test-volumesnapshot-name", - common.ParameterKeyVolumeSnapshotNamespace: "test-volumesnapshot-namespace", - common.ParameterKeyVolumeSnapshotContentName: "test-volumesnapshotcontent-name", - } - snapshotID, err := client.CreateSnapshot(snapshotName, volID, snapshotParams) - Expect(err).To(BeNil(), "CreateSnapshot failed with error: %v", err) - - // Validate Snapshot Created - snapshot, err := computeService.Snapshots.Get(p, snapshotName).Do() - Expect(err).To(BeNil(), "Could not get snapshot from cloud directly") - Expect(snapshot.Name).To(Equal(snapshotName)) - Expect(snapshot.Description).To(Equal("{\"kubernetes.io/created-for/volumesnapshot/name\":\"test-volumesnapshot-name\",\"kubernetes.io/created-for/volumesnapshot/namespace\":\"test-volumesnapshot-namespace\",\"kubernetes.io/created-for/volumesnapshotcontent/name\":\"test-volumesnapshotcontent-name\",\"storage.gke.io/created-by\":\"pd.csi.storage.gke.io\"}")) - - err = wait.Poll(10*time.Second, 3*time.Minute, func() (bool, error) { - snapshot, err := computeService.Snapshots.Get(p, snapshotName).Do() - Expect(err).To(BeNil(), "Could not get snapshot from cloud directly") - if snapshot.Status == "READY" { - return true, nil - } - return false, nil - }) - Expect(err).To(BeNil(), "Could not wait for snapshot be ready") + // // Create Disk + // disk := typeToDisk[diskType] + // volName := testNamePrefix + string(uuid.NewUUID()) + // params := merge(disk.params, map[string]string{ + // common.ParameterKeyPVCName: "test-pvc", + // common.ParameterKeyPVCNamespace: "test-pvc-namespace", + // common.ParameterKeyPVName: "test-pv-name", + // }) + // volume, err := controllerClient.CreateVolume(volName, params, diskSize, nil /* topReq */, nil) + // Expect(err).To(BeNil(), "CreateVolume failed with error: %v", err) + + // // Validate Disk Created + // cloudDisk, err := computeService.Disks.Get(p, z, volName).Do() + // Expect(err).To(BeNil(), "Could not get disk from cloud directly") + // Expect(cloudDisk.Status).To(Equal(readyState)) + // Expect(cloudDisk.SizeGb).To(Equal(diskSize)) + // Expect(cloudDisk.Name).To(Equal(volName)) + // Expect(cloudDisk.Description).To(Equal("{\"kubernetes.io/created-for/pv/name\":\"test-pv-name\",\"kubernetes.io/created-for/pvc/name\":\"test-pvc\",\"kubernetes.io/created-for/pvc/namespace\":\"test-pvc-namespace\",\"storage.gke.io/created-by\":\"pd.csi.storage.gke.io\"}")) + // disk.validate(cloudDisk) - defer func() { - // Delete Disk - err := client.DeleteVolume(volID) - Expect(err).To(BeNil(), "DeleteVolume failed") + // defer func() { + // // Delete Disk + // controllerClient.DeleteVolume(volume.VolumeId) + // Expect(err).To(BeNil(), "DeleteVolume failed") - // Validate Disk Deleted - _, err = computeService.Disks.Get(p, z, volName).Do() - Expect(gce.IsGCEError(err, "notFound")).To(BeTrue(), "Expected disk to not be found") + // // Validate Disk Deleted + // _, err = computeService.Disks.Get(p, z, volName).Do() + // Expect(gce.IsGCEError(err, "notFound")).To(BeTrue(), "Expected disk to not be found") + // }() + // }, + // Entry("on pd-standard", standardDiskType), + // Entry("on pd-extreme", extremeDiskType), + // ) - // Delete Snapshot - err = client.DeleteSnapshot(snapshotID) - Expect(err).To(BeNil(), "DeleteSnapshot failed") + // // Use the region of the test location. + // It("Should successfully create snapshot with storage locations", func() { + // testContext := getRandomTestContext() - // Validate Snapshot Deleted - _, err = computeService.Snapshots.Get(p, snapshotName).Do() - Expect(gce.IsGCEError(err, "notFound")).To(BeTrue(), "Expected snapshot to not be found") - }() - }) + // p, z, _ := testContext.Instance.GetIdentity() + // client := testContext.Client - // Use the region of the test location. - It("Should successfully create snapshot backed by disk image", func() { - testContext := getRandomTestContext() + // // Create Disk + // volName, volID := createAndValidateUniqueZonalDisk(client, p, z, standardDiskType) - p, z, _ := testContext.Instance.GetIdentity() - client := testContext.Client + // // Create Snapshot + // snapshotName := testNamePrefix + string(uuid.NewUUID()) - // Create Disk - volName, volID := createAndValidateUniqueZonalDisk(client, p, z, standardDiskType) + // // Convert GCP zone to region, e.g. us-central1-a => us-central1 + // // This is safe because we hardcode the zones. + // snapshotLocation := z[:len(z)-2] - // Create Snapshot - snapshotName := testNamePrefix + string(uuid.NewUUID()) - testImageFamily := "test-family" + // snapshotParams := map[string]string{ + // common.ParameterKeyStorageLocations: snapshotLocation, + // common.ParameterKeyVolumeSnapshotName: "test-volumesnapshot-name", + // common.ParameterKeyVolumeSnapshotNamespace: "test-volumesnapshot-namespace", + // common.ParameterKeyVolumeSnapshotContentName: "test-volumesnapshotcontent-name", + // } + // snapshotID, err := client.CreateSnapshot(snapshotName, volID, snapshotParams) + // Expect(err).To(BeNil(), "CreateSnapshot failed with error: %v", err) + + // // Validate Snapshot Created + // snapshot, err := computeService.Snapshots.Get(p, snapshotName).Do() + // Expect(err).To(BeNil(), "Could not get snapshot from cloud directly") + // Expect(snapshot.Name).To(Equal(snapshotName)) + // Expect(snapshot.Description).To(Equal("{\"kubernetes.io/created-for/volumesnapshot/name\":\"test-volumesnapshot-name\",\"kubernetes.io/created-for/volumesnapshot/namespace\":\"test-volumesnapshot-namespace\",\"kubernetes.io/created-for/volumesnapshotcontent/name\":\"test-volumesnapshotcontent-name\",\"storage.gke.io/created-by\":\"pd.csi.storage.gke.io\"}")) + + // err = wait.Poll(10*time.Second, 3*time.Minute, func() (bool, error) { + // snapshot, err := computeService.Snapshots.Get(p, snapshotName).Do() + // Expect(err).To(BeNil(), "Could not get snapshot from cloud directly") + // if snapshot.Status == "READY" { + // return true, nil + // } + // return false, nil + // }) + // Expect(err).To(BeNil(), "Could not wait for snapshot be ready") + + // defer func() { + // // Delete Disk + // err := client.DeleteVolume(volID) + // Expect(err).To(BeNil(), "DeleteVolume failed") + + // // Validate Disk Deleted + // _, err = computeService.Disks.Get(p, z, volName).Do() + // Expect(gce.IsGCEError(err, "notFound")).To(BeTrue(), "Expected disk to not be found") + + // // Delete Snapshot + // err = client.DeleteSnapshot(snapshotID) + // Expect(err).To(BeNil(), "DeleteSnapshot failed") + + // // Validate Snapshot Deleted + // _, err = computeService.Snapshots.Get(p, snapshotName).Do() + // Expect(gce.IsGCEError(err, "notFound")).To(BeTrue(), "Expected snapshot to not be found") + // }() + // }) - snapshotParams := map[string]string{common.ParameterKeySnapshotType: common.DiskImageType, common.ParameterKeyImageFamily: testImageFamily} - snapshotID, err := client.CreateSnapshot(snapshotName, volID, snapshotParams) - Expect(err).To(BeNil(), "CreateSnapshot failed with error: %v", err) + // // Use the region of the test location. + // It("Should successfully create snapshot backed by disk image", func() { + // testContext := getRandomTestContext() - // Validate Snapshot Created - snapshot, err := computeService.Images.Get(p, snapshotName).Do() - Expect(err).To(BeNil(), "Could not get snapshot from cloud directly") - Expect(snapshot.Name).To(Equal(snapshotName)) + // p, z, _ := testContext.Instance.GetIdentity() + // client := testContext.Client - err = wait.Poll(10*time.Second, 5*time.Minute, func() (bool, error) { - snapshot, err := computeService.Images.Get(p, snapshotName).Do() - Expect(err).To(BeNil(), "Could not get snapshot from cloud directly") - if snapshot.Status == "READY" { - return true, nil - } - return false, nil - }) - Expect(err).To(BeNil(), "Could not wait for snapshot be ready") + // // Create Disk + // volName, volID := createAndValidateUniqueZonalDisk(client, p, z, standardDiskType) - // Check Snapshot Type - snapshot, err = computeService.Images.Get(p, snapshotName).Do() - Expect(err).To(BeNil(), "Could not get snapshot from cloud directly") - _, snapshotType, _, err := common.SnapshotIDToProjectKey(cleanSelfLink(snapshot.SelfLink)) - Expect(err).To(BeNil(), "Failed to parse snapshot ID") - Expect(snapshotType).To(Equal(common.DiskImageType), "Expected images type in snapshot ID") + // // Create Snapshot + // snapshotName := testNamePrefix + string(uuid.NewUUID()) + // testImageFamily := "test-family" - defer func() { - // Delete Disk - err := client.DeleteVolume(volID) - Expect(err).To(BeNil(), "DeleteVolume failed") + // snapshotParams := map[string]string{common.ParameterKeySnapshotType: common.DiskImageType, common.ParameterKeyImageFamily: testImageFamily} + // snapshotID, err := client.CreateSnapshot(snapshotName, volID, snapshotParams) + // Expect(err).To(BeNil(), "CreateSnapshot failed with error: %v", err) - // Validate Disk Deleted - _, err = computeService.Disks.Get(p, z, volName).Do() - Expect(gce.IsGCEError(err, "notFound")).To(BeTrue(), "Expected disk to not be found") + // // Validate Snapshot Created + // snapshot, err := computeService.Images.Get(p, snapshotName).Do() + // Expect(err).To(BeNil(), "Could not get snapshot from cloud directly") + // Expect(snapshot.Name).To(Equal(snapshotName)) - // Delete Snapshot - err = client.DeleteSnapshot(snapshotID) - Expect(err).To(BeNil(), "DeleteSnapshot failed") + // err = wait.Poll(10*time.Second, 5*time.Minute, func() (bool, error) { + // snapshot, err := computeService.Images.Get(p, snapshotName).Do() + // Expect(err).To(BeNil(), "Could not get snapshot from cloud directly") + // if snapshot.Status == "READY" { + // return true, nil + // } + // return false, nil + // }) + // Expect(err).To(BeNil(), "Could not wait for snapshot be ready") + + // // Check Snapshot Type + // snapshot, err = computeService.Images.Get(p, snapshotName).Do() + // Expect(err).To(BeNil(), "Could not get snapshot from cloud directly") + // _, snapshotType, _, err := common.SnapshotIDToProjectKey(cleanSelfLink(snapshot.SelfLink)) + // Expect(err).To(BeNil(), "Failed to parse snapshot ID") + // Expect(snapshotType).To(Equal(common.DiskImageType), "Expected images type in snapshot ID") + + // defer func() { + // // Delete Disk + // err := client.DeleteVolume(volID) + // Expect(err).To(BeNil(), "DeleteVolume failed") + + // // Validate Disk Deleted + // _, err = computeService.Disks.Get(p, z, volName).Do() + // Expect(gce.IsGCEError(err, "notFound")).To(BeTrue(), "Expected disk to not be found") + + // // Delete Snapshot + // err = client.DeleteSnapshot(snapshotID) + // Expect(err).To(BeNil(), "DeleteSnapshot failed") + + // // Validate Snapshot Deleted + // _, err = computeService.Images.Get(p, snapshotName).Do() + // Expect(gce.IsGCEError(err, "notFound")).To(BeTrue(), "Expected snapshot to not be found") + // }() + // }) - // Validate Snapshot Deleted - _, err = computeService.Images.Get(p, snapshotName).Do() - Expect(gce.IsGCEError(err, "notFound")).To(BeTrue(), "Expected snapshot to not be found") - }() - }) + // It("Should successfully create zonal PD from a zonal PD VolumeContentSource", func() { + // Expect(testContexts).ToNot(BeEmpty()) + // testContext := getRandomTestContext() - It("Should successfully create zonal PD from a zonal PD VolumeContentSource", func() { - Expect(testContexts).ToNot(BeEmpty()) - testContext := getRandomTestContext() - - controllerInstance := testContext.Instance - controllerClient := testContext.Client - - p, z, _ := controllerInstance.GetIdentity() - - // Create Source Disk - _, srcVolID := createAndValidateUniqueZonalDisk(controllerClient, p, z, standardDiskType) - - // Create Disk - volName := testNamePrefix + string(uuid.NewUUID()) - volume, err := controllerClient.CreateVolume(volName, map[string]string{ - common.ParameterKeyReplicationType: "none", - }, defaultSizeGb, - &csi.TopologyRequirement{ - Requisite: []*csi.Topology{ - { - Segments: map[string]string{common.TopologyKeyZone: z}, - }, - }, - }, - &csi.VolumeContentSource{ - Type: &csi.VolumeContentSource_Volume{ - Volume: &csi.VolumeContentSource_VolumeSource{ - VolumeId: srcVolID, - }, - }, - }) - - Expect(err).To(BeNil(), "CreateVolume failed with error: %v", err) - - // Validate Disk Created - cloudDisk, err := computeService.Disks.Get(p, z, volName).Do() - Expect(err).To(BeNil(), "Could not get disk from cloud directly") - Expect(cloudDisk.Type).To(ContainSubstring(standardDiskType)) - Expect(cloudDisk.Status).To(Equal(readyState)) - Expect(cloudDisk.SizeGb).To(Equal(defaultSizeGb)) - Expect(cloudDisk.Name).To(Equal(volName)) - // Validate the the clone disk zone matches the source disk zone. - _, srcKey, err := common.VolumeIDToKey(srcVolID) - Expect(err).To(BeNil(), "Could not get source volume key from id") - Expect(zoneFromURL(cloudDisk.Zone)).To(Equal(srcKey.Zone)) - defer func() { - // Delete Disk - controllerClient.DeleteVolume(volume.VolumeId) - Expect(err).To(BeNil(), "DeleteVolume failed") + // controllerInstance := testContext.Instance + // controllerClient := testContext.Client - // Validate Disk Deleted - _, err = computeService.Disks.Get(p, z, volName).Do() - Expect(gce.IsGCEError(err, "notFound")).To(BeTrue(), "Expected disk to not be found") - }() - }) + // p, z, _ := controllerInstance.GetIdentity() - It("Should successfully create RePD from a zonal PD VolumeContentSource", func() { - Expect(testContexts).ToNot(BeEmpty()) - testContext := getRandomTestContext() - - controllerInstance := testContext.Instance - controllerClient := testContext.Client - - p, z, _ := controllerInstance.GetIdentity() - - region, err := common.GetRegionFromZones([]string{z}) - Expect(err).To(BeNil(), "Failed to get region from zones") - - // Create Source Disk - srcVolName := testNamePrefix + string(uuid.NewUUID()) - srcVolume, err := controllerClient.CreateVolume(srcVolName, map[string]string{ - common.ParameterKeyReplicationType: "none", - }, defaultRepdSizeGb, nil, nil) - // Create Disk - volName := testNamePrefix + string(uuid.NewUUID()) - volume, err := controllerClient.CreateVolume(volName, map[string]string{ - common.ParameterKeyReplicationType: "regional-pd", - }, defaultRepdSizeGb, nil, - &csi.VolumeContentSource{ - Type: &csi.VolumeContentSource_Volume{ - Volume: &csi.VolumeContentSource_VolumeSource{ - VolumeId: srcVolume.VolumeId, - }, - }, - }) - - Expect(err).To(BeNil(), "CreateVolume failed with error: %v", err) - - // Validate Disk Created - cloudDisk, err := computeService.RegionDisks.Get(p, region, volName).Do() - Expect(err).To(BeNil(), "Could not get disk from cloud directly") - Expect(cloudDisk.Type).To(ContainSubstring(standardDiskType)) - Expect(cloudDisk.Status).To(Equal(readyState)) - Expect(cloudDisk.SizeGb).To(Equal(defaultRepdSizeGb)) - Expect(cloudDisk.Name).To(Equal(volName)) - Expect(len(cloudDisk.ReplicaZones)).To(Equal(2)) - replicaZonesCompatible := false - _, srcKey, err := common.VolumeIDToKey(srcVolume.VolumeId) - Expect(err).To(BeNil(), "Could not get source volume key from id") - for _, replicaZone := range cloudDisk.ReplicaZones { - actualZone := zoneFromURL(replicaZone) - if actualZone == srcKey.Zone { - replicaZonesCompatible = true - } - gotRegion, err := common.GetRegionFromZones([]string{actualZone}) - Expect(err).To(BeNil(), "failed to get region from actual zone %v", actualZone) - Expect(gotRegion).To(Equal(region), "Got region from replica zone that did not match supplied region") - } - // Validate that one of the replicaZones of the clone matches the zone of the source disk. - Expect(replicaZonesCompatible).To(Equal(true)) - defer func() { - // Delete Disk - controllerClient.DeleteVolume(volume.VolumeId) - Expect(err).To(BeNil(), "DeleteVolume failed") + // // Create Source Disk + // _, srcVolID := createAndValidateUniqueZonalDisk(controllerClient, p, z, standardDiskType) - // Validate Disk Deleted - _, err = computeService.RegionDisks.Get(p, region, volName).Do() - Expect(gce.IsGCEError(err, "notFound")).To(BeTrue(), "Expected disk to not be found") - }() - }) + // // Create Disk + // volName := testNamePrefix + string(uuid.NewUUID()) + // volume, err := controllerClient.CreateVolume(volName, map[string]string{ + // common.ParameterKeyReplicationType: "none", + // }, defaultSizeGb, + // &csi.TopologyRequirement{ + // Requisite: []*csi.Topology{ + // { + // Segments: map[string]string{common.TopologyKeyZone: z}, + // }, + // }, + // }, + // &csi.VolumeContentSource{ + // Type: &csi.VolumeContentSource_Volume{ + // Volume: &csi.VolumeContentSource_VolumeSource{ + // VolumeId: srcVolID, + // }, + // }, + // }) + + // Expect(err).To(BeNil(), "CreateVolume failed with error: %v", err) + + // // Validate Disk Created + // cloudDisk, err := computeService.Disks.Get(p, z, volName).Do() + // Expect(err).To(BeNil(), "Could not get disk from cloud directly") + // Expect(cloudDisk.Type).To(ContainSubstring(standardDiskType)) + // Expect(cloudDisk.Status).To(Equal(readyState)) + // Expect(cloudDisk.SizeGb).To(Equal(defaultSizeGb)) + // Expect(cloudDisk.Name).To(Equal(volName)) + // // Validate the the clone disk zone matches the source disk zone. + // _, srcKey, err := common.VolumeIDToKey(srcVolID) + // Expect(err).To(BeNil(), "Could not get source volume key from id") + // Expect(zoneFromURL(cloudDisk.Zone)).To(Equal(srcKey.Zone)) + // defer func() { + // // Delete Disk + // controllerClient.DeleteVolume(volume.VolumeId) + // Expect(err).To(BeNil(), "DeleteVolume failed") + + // // Validate Disk Deleted + // _, err = computeService.Disks.Get(p, z, volName).Do() + // Expect(gce.IsGCEError(err, "notFound")).To(BeTrue(), "Expected disk to not be found") + // }() + // }) - It("Should successfully create RePD from a RePD VolumeContentSource", func() { - Expect(testContexts).ToNot(BeEmpty()) - testContext := getRandomTestContext() - - controllerInstance := testContext.Instance - controllerClient := testContext.Client - - p, z, _ := controllerInstance.GetIdentity() - - region, err := common.GetRegionFromZones([]string{z}) - Expect(err).To(BeNil(), "Failed to get region from zones") - - // Create Source Disk - srcVolName := testNamePrefix + string(uuid.NewUUID()) - srcVolume, err := controllerClient.CreateVolume(srcVolName, map[string]string{ - common.ParameterKeyReplicationType: "regional-pd", - }, defaultRepdSizeGb, nil, nil) - // Create Disk - volName := testNamePrefix + string(uuid.NewUUID()) - volume, err := controllerClient.CreateVolume(volName, map[string]string{ - common.ParameterKeyReplicationType: "regional-pd", - }, defaultRepdSizeGb, nil, - &csi.VolumeContentSource{ - Type: &csi.VolumeContentSource_Volume{ - Volume: &csi.VolumeContentSource_VolumeSource{ - VolumeId: srcVolume.VolumeId, - }, - }, - }) - - Expect(err).To(BeNil(), "CreateVolume failed with error: %v", err) - - // Validate Disk Created - cloudDisk, err := computeService.RegionDisks.Get(p, region, volName).Do() - Expect(err).To(BeNil(), "Could not get disk from cloud directly") - Expect(cloudDisk.Type).To(ContainSubstring(standardDiskType)) - Expect(cloudDisk.Status).To(Equal(readyState)) - Expect(cloudDisk.SizeGb).To(Equal(defaultRepdSizeGb)) - Expect(cloudDisk.Name).To(Equal(volName)) - Expect(len(cloudDisk.ReplicaZones)).To(Equal(2)) - // Validate that the replicaZones of the clone match the replicaZones of the source disk. - srcCloudDisk, err := computeService.RegionDisks.Get(p, region, srcVolName).Do() - Expect(err).To(BeNil(), "Could not get source disk from cloud directly") - Expect(srcCloudDisk.ReplicaZones).To(Equal(cloudDisk.ReplicaZones)) - for _, replicaZone := range cloudDisk.ReplicaZones { - actualZone := zoneFromURL(replicaZone) - gotRegion, err := common.GetRegionFromZones([]string{actualZone}) - Expect(err).To(BeNil(), "failed to get region from actual zone %v", actualZone) - Expect(gotRegion).To(Equal(region), "Got region from replica zone that did not match supplied region") - } - defer func() { - // Delete Disk - controllerClient.DeleteVolume(volume.VolumeId) - Expect(err).To(BeNil(), "DeleteVolume failed") + // It("Should successfully create RePD from a zonal PD VolumeContentSource", func() { + // Expect(testContexts).ToNot(BeEmpty()) + // testContext := getRandomTestContext() - // Validate Disk Deleted - _, err = computeService.RegionDisks.Get(p, region, volName).Do() - Expect(gce.IsGCEError(err, "notFound")).To(BeTrue(), "Expected disk to not be found") - }() - }) + // controllerInstance := testContext.Instance + // controllerClient := testContext.Client + + // p, z, _ := controllerInstance.GetIdentity() + + // region, err := common.GetRegionFromZones([]string{z}) + // Expect(err).To(BeNil(), "Failed to get region from zones") + + // // Create Source Disk + // srcVolName := testNamePrefix + string(uuid.NewUUID()) + // srcVolume, err := controllerClient.CreateVolume(srcVolName, map[string]string{ + // common.ParameterKeyReplicationType: "none", + // }, defaultRepdSizeGb, nil, nil) + // // Create Disk + // volName := testNamePrefix + string(uuid.NewUUID()) + // volume, err := controllerClient.CreateVolume(volName, map[string]string{ + // common.ParameterKeyReplicationType: "regional-pd", + // }, defaultRepdSizeGb, nil, + // &csi.VolumeContentSource{ + // Type: &csi.VolumeContentSource_Volume{ + // Volume: &csi.VolumeContentSource_VolumeSource{ + // VolumeId: srcVolume.VolumeId, + // }, + // }, + // }) + + // Expect(err).To(BeNil(), "CreateVolume failed with error: %v", err) + + // // Validate Disk Created + // cloudDisk, err := computeService.RegionDisks.Get(p, region, volName).Do() + // Expect(err).To(BeNil(), "Could not get disk from cloud directly") + // Expect(cloudDisk.Type).To(ContainSubstring(standardDiskType)) + // Expect(cloudDisk.Status).To(Equal(readyState)) + // Expect(cloudDisk.SizeGb).To(Equal(defaultRepdSizeGb)) + // Expect(cloudDisk.Name).To(Equal(volName)) + // Expect(len(cloudDisk.ReplicaZones)).To(Equal(2)) + // replicaZonesCompatible := false + // _, srcKey, err := common.VolumeIDToKey(srcVolume.VolumeId) + // Expect(err).To(BeNil(), "Could not get source volume key from id") + // for _, replicaZone := range cloudDisk.ReplicaZones { + // actualZone := zoneFromURL(replicaZone) + // if actualZone == srcKey.Zone { + // replicaZonesCompatible = true + // } + // gotRegion, err := common.GetRegionFromZones([]string{actualZone}) + // Expect(err).To(BeNil(), "failed to get region from actual zone %v", actualZone) + // Expect(gotRegion).To(Equal(region), "Got region from replica zone that did not match supplied region") + // } + // // Validate that one of the replicaZones of the clone matches the zone of the source disk. + // Expect(replicaZonesCompatible).To(Equal(true)) + // defer func() { + // // Delete Disk + // controllerClient.DeleteVolume(volume.VolumeId) + // Expect(err).To(BeNil(), "DeleteVolume failed") + + // // Validate Disk Deleted + // _, err = computeService.RegionDisks.Get(p, region, volName).Do() + // Expect(gce.IsGCEError(err, "notFound")).To(BeTrue(), "Expected disk to not be found") + // }() + // }) - It("Should pass if valid compute endpoint is passed in", func() { - // gets instance set up w/o compute-endpoint set from test setup - _, err := getRandomTestContext().Client.ListVolumes() - Expect(err).To(BeNil(), "no error expected when passed valid compute url") + // It("Should successfully create RePD from a RePD VolumeContentSource", func() { + // Expect(testContexts).ToNot(BeEmpty()) + // testContext := getRandomTestContext() - i := getRandomTestContext().Instance + // controllerInstance := testContext.Instance + // controllerClient := testContext.Client + + // p, z, _ := controllerInstance.GetIdentity() + + // region, err := common.GetRegionFromZones([]string{z}) + // Expect(err).To(BeNil(), "Failed to get region from zones") + + // // Create Source Disk + // srcVolName := testNamePrefix + string(uuid.NewUUID()) + // srcVolume, err := controllerClient.CreateVolume(srcVolName, map[string]string{ + // common.ParameterKeyReplicationType: "regional-pd", + // }, defaultRepdSizeGb, nil, nil) + // // Create Disk + // volName := testNamePrefix + string(uuid.NewUUID()) + // volume, err := controllerClient.CreateVolume(volName, map[string]string{ + // common.ParameterKeyReplicationType: "regional-pd", + // }, defaultRepdSizeGb, nil, + // &csi.VolumeContentSource{ + // Type: &csi.VolumeContentSource_Volume{ + // Volume: &csi.VolumeContentSource_VolumeSource{ + // VolumeId: srcVolume.VolumeId, + // }, + // }, + // }) + + // Expect(err).To(BeNil(), "CreateVolume failed with error: %v", err) + + // // Validate Disk Created + // cloudDisk, err := computeService.RegionDisks.Get(p, region, volName).Do() + // Expect(err).To(BeNil(), "Could not get disk from cloud directly") + // Expect(cloudDisk.Type).To(ContainSubstring(standardDiskType)) + // Expect(cloudDisk.Status).To(Equal(readyState)) + // Expect(cloudDisk.SizeGb).To(Equal(defaultRepdSizeGb)) + // Expect(cloudDisk.Name).To(Equal(volName)) + // Expect(len(cloudDisk.ReplicaZones)).To(Equal(2)) + // // Validate that the replicaZones of the clone match the replicaZones of the source disk. + // srcCloudDisk, err := computeService.RegionDisks.Get(p, region, srcVolName).Do() + // Expect(err).To(BeNil(), "Could not get source disk from cloud directly") + // Expect(srcCloudDisk.ReplicaZones).To(Equal(cloudDisk.ReplicaZones)) + // for _, replicaZone := range cloudDisk.ReplicaZones { + // actualZone := zoneFromURL(replicaZone) + // gotRegion, err := common.GetRegionFromZones([]string{actualZone}) + // Expect(err).To(BeNil(), "failed to get region from actual zone %v", actualZone) + // Expect(gotRegion).To(Equal(region), "Got region from replica zone that did not match supplied region") + // } + // defer func() { + // // Delete Disk + // controllerClient.DeleteVolume(volume.VolumeId) + // Expect(err).To(BeNil(), "DeleteVolume failed") + + // // Validate Disk Deleted + // _, err = computeService.RegionDisks.Get(p, region, volName).Do() + // Expect(gce.IsGCEError(err, "notFound")).To(BeTrue(), "Expected disk to not be found") + // }() + // }) - // Create new driver and client with valid, empty endpoint - klog.Infof("Setup driver with empty compute endpoint %s\n", i.GetName()) - tcEmpty, err := testutils.GCEClientAndDriverSetup(i, getDriverConfig()) - if err != nil { - klog.Fatalf("Failed to set up Test Context for instance %v: %v", i.GetName(), err) - } - _, err = tcEmpty.Client.ListVolumes() + // It("Should pass if valid compute endpoint is passed in", func() { + // // gets instance set up w/o compute-endpoint set from test setup + // _, err := getRandomTestContext().Client.ListVolumes() + // Expect(err).To(BeNil(), "no error expected when passed valid compute url") - Expect(err).To(BeNil(), "no error expected when passed empty compute url") + // i := getRandomTestContext().Instance - // Create new driver and client w/ valid, passed-in endpoint - driverConfig := getDriverConfig() - driverConfig.ComputeEndpoint = "https://compute.googleapis.com" - tcValid, err := testutils.GCEClientAndDriverSetup(i, driverConfig) - if err != nil { - klog.Fatalf("Failed to set up Test Context for instance %v: %v", i.GetName(), err) - } - _, err = tcValid.Client.ListVolumes() + // // Create new driver and client with valid, empty endpoint + // klog.Infof("Setup driver with empty compute endpoint %s\n", i.GetName()) + // tcEmpty, err := testutils.GCEClientAndDriverSetup(i, getDriverConfig()) + // if err != nil { + // klog.Fatalf("Failed to set up Test Context for instance %v: %v", i.GetName(), err) + // } + // _, err = tcEmpty.Client.ListVolumes() - Expect(err).To(BeNil(), "no error expected when passed valid compute url") - }) + // Expect(err).To(BeNil(), "no error expected when passed empty compute url") - It("[NVMe] Should update readahead if read_ahead_kb passed on mount", func() { - testContext := getRandomTestContext() + // // Create new driver and client w/ valid, passed-in endpoint + // driverConfig := getDriverConfig() + // driverConfig.ComputeEndpoint = "https://compute.googleapis.com" + // tcValid, err := testutils.GCEClientAndDriverSetup(i, driverConfig) + // if err != nil { + // klog.Fatalf("Failed to set up Test Context for instance %v: %v", i.GetName(), err) + // } + // _, err = tcValid.Client.ListVolumes() - p, z, _ := testContext.Instance.GetIdentity() - client := testContext.Client - instance := testContext.Instance + // Expect(err).To(BeNil(), "no error expected when passed valid compute url") + // }) - // Create Disk - volName, volID := createAndValidateUniqueZonalDisk(client, p, z, standardDiskType) + // It("[NVMe] Should update readahead if read_ahead_kb passed on mount", func() { + // testContext := getRandomTestContext() - defer func() { - // Delete Disk - err := client.DeleteVolume(volID) - Expect(err).To(BeNil(), "DeleteVolume failed") + // p, z, _ := testContext.Instance.GetIdentity() + // client := testContext.Client + // instance := testContext.Instance - // Validate Disk Deleted - _, err = computeService.Disks.Get(p, z, volName).Do() - Expect(gce.IsGCEError(err, "notFound")).To(BeTrue(), "Expected disk to not be found") - }() + // // Create Disk + // volName, volID := createAndValidateUniqueZonalDisk(client, p, z, standardDiskType) - // Attach Disk - err := client.ControllerPublishVolumeReadWrite(volID, instance.GetNodeID(), false /* forceAttach */) - Expect(err).To(BeNil(), "ControllerPublishVolume failed with error for disk %v on node %v: %v", volID, instance.GetNodeID(), err) + // defer func() { + // // Delete Disk + // err := client.DeleteVolume(volID) + // Expect(err).To(BeNil(), "DeleteVolume failed") - defer func() { - // Detach Disk - err = client.ControllerUnpublishVolume(volID, instance.GetNodeID()) - if err != nil { - klog.Errorf("Failed to detach disk: %v", err) - } + // // Validate Disk Deleted + // _, err = computeService.Disks.Get(p, z, volName).Do() + // Expect(gce.IsGCEError(err, "notFound")).To(BeTrue(), "Expected disk to not be found") + // }() - }() + // // Attach Disk + // err := client.ControllerPublishVolumeReadWrite(volID, instance.GetNodeID(), false /* forceAttach */) + // Expect(err).To(BeNil(), "ControllerPublishVolume failed with error for disk %v on node %v: %v", volID, instance.GetNodeID(), err) - // Stage Disk - stageDir := filepath.Join("/tmp/", volName, "stage") - expectedReadAheadKB := "4096" - volCap := &csi.VolumeCapability{ - AccessType: &csi.VolumeCapability_Mount{ - Mount: &csi.VolumeCapability_MountVolume{ - MountFlags: []string{fmt.Sprintf("read_ahead_kb=%s", expectedReadAheadKB)}, - }, - }, - AccessMode: &csi.VolumeCapability_AccessMode{ - Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, - }, - } - err = client.NodeStageVolume(volID, stageDir, volCap) - Expect(err).To(BeNil(), "failed to stage volume: %v", err) - - // Validate that the link is correct - var validated bool - var devName string - devicePaths := deviceutils.NewDeviceUtils().GetDiskByIdPaths(volName, "") - for _, devicePath := range devicePaths { - validated, err = testutils.ValidateLogicalLinkIsDisk(instance, devicePath, volName) - Expect(err).To(BeNil(), "failed to validate link %s is disk %s: %v", stageDir, volName, err) - if validated { - devFsPath, err := instance.SSH("find", devicePath, "-printf", "'%l'") - Expect(err).To(BeNil(), "Failed to symlink devicePath") - devFsPathPieces := strings.Split(devFsPath, "/") - devName = devFsPathPieces[len(devFsPathPieces)-1] - break - } - } - Expect(validated).To(BeTrue(), "could not find device in %v that links to volume %s", devicePaths, volName) - actualReadAheadKBStr, err := instance.SSH("cat", fmt.Sprintf("/sys/block/%s/queue/read_ahead_kb", devName)) - actualReadAheadKB := strings.TrimSpace(actualReadAheadKBStr) - Expect(err).To(BeNil(), "Failed to read read_ahead_kb: %v", err) - Expect(actualReadAheadKB).To(Equal(expectedReadAheadKB), "unexpected read_ahead_kb") + // defer func() { + // // Detach Disk + // err = client.ControllerUnpublishVolume(volID, instance.GetNodeID()) + // if err != nil { + // klog.Errorf("Failed to detach disk: %v", err) + // } - defer func() { - // Unstage Disk - err = client.NodeUnstageVolume(volID, stageDir) - if err != nil { - klog.Errorf("Failed to unstage volume: %v", err) - } - fp := filepath.Join("/tmp/", volName) - err = testutils.RmAll(instance, fp) - if err != nil { - klog.Errorf("Failed to rm file path %s: %v", fp, err) - } - }() - }) + // }() - It("Should block unstage if filesystem mounted", func() { - testContext := getRandomTestContext() + // // Stage Disk + // stageDir := filepath.Join("/tmp/", volName, "stage") + // expectedReadAheadKB := "4096" + // volCap := &csi.VolumeCapability{ + // AccessType: &csi.VolumeCapability_Mount{ + // Mount: &csi.VolumeCapability_MountVolume{ + // MountFlags: []string{fmt.Sprintf("read_ahead_kb=%s", expectedReadAheadKB)}, + // }, + // }, + // AccessMode: &csi.VolumeCapability_AccessMode{ + // Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, + // }, + // } + // err = client.NodeStageVolume(volID, stageDir, volCap) + // Expect(err).To(BeNil(), "failed to stage volume: %v", err) + + // // Validate that the link is correct + // var validated bool + // var devName string + // devicePaths := deviceutils.NewDeviceUtils().GetDiskByIdPaths(volName, "") + // for _, devicePath := range devicePaths { + // validated, err = testutils.ValidateLogicalLinkIsDisk(instance, devicePath, volName) + // Expect(err).To(BeNil(), "failed to validate link %s is disk %s: %v", stageDir, volName, err) + // if validated { + // devFsPath, err := instance.SSH("find", devicePath, "-printf", "'%l'") + // Expect(err).To(BeNil(), "Failed to symlink devicePath") + // devFsPathPieces := strings.Split(devFsPath, "/") + // devName = devFsPathPieces[len(devFsPathPieces)-1] + // break + // } + // } + // Expect(validated).To(BeTrue(), "could not find device in %v that links to volume %s", devicePaths, volName) + // actualReadAheadKBStr, err := instance.SSH("cat", fmt.Sprintf("/sys/block/%s/queue/read_ahead_kb", devName)) + // actualReadAheadKB := strings.TrimSpace(actualReadAheadKBStr) + // Expect(err).To(BeNil(), "Failed to read read_ahead_kb: %v", err) + // Expect(actualReadAheadKB).To(Equal(expectedReadAheadKB), "unexpected read_ahead_kb") + + // defer func() { + // // Unstage Disk + // err = client.NodeUnstageVolume(volID, stageDir) + // if err != nil { + // klog.Errorf("Failed to unstage volume: %v", err) + // } + // fp := filepath.Join("/tmp/", volName) + // err = testutils.RmAll(instance, fp) + // if err != nil { + // klog.Errorf("Failed to rm file path %s: %v", fp, err) + // } + // }() + // }) - p, z, _ := testContext.Instance.GetIdentity() - client := testContext.Client - instance := testContext.Instance + // It("Should block unstage if filesystem mounted", func() { + // testContext := getRandomTestContext() - // Create Disk - volName, volID := createAndValidateUniqueZonalDisk(client, p, z, standardDiskType) + // p, z, _ := testContext.Instance.GetIdentity() + // client := testContext.Client + // instance := testContext.Instance - defer func() { - // Delete Disk - err := client.DeleteVolume(volID) - Expect(err).To(BeNil(), "DeleteVolume failed") + // // Create Disk + // volName, volID := createAndValidateUniqueZonalDisk(client, p, z, standardDiskType) - // Validate Disk Deleted - _, err = computeService.Disks.Get(p, z, volName).Do() - Expect(gce.IsGCEError(err, "notFound")).To(BeTrue(), "Expected disk to not be found") - }() + // defer func() { + // // Delete Disk + // err := client.DeleteVolume(volID) + // Expect(err).To(BeNil(), "DeleteVolume failed") - // Attach Disk - err := client.ControllerPublishVolumeReadWrite(volID, instance.GetNodeID(), false /* forceAttach */) - Expect(err).To(BeNil(), "ControllerPublishVolume failed with error for disk %v on node %v: %v", volID, instance.GetNodeID(), err) + // // Validate Disk Deleted + // _, err = computeService.Disks.Get(p, z, volName).Do() + // Expect(gce.IsGCEError(err, "notFound")).To(BeTrue(), "Expected disk to not be found") + // }() - defer func() { - // Detach Disk - err = client.ControllerUnpublishVolume(volID, instance.GetNodeID()) - if err != nil { - klog.Errorf("Failed to detach disk: %v", err) - } - }() + // // Attach Disk + // err := client.ControllerPublishVolumeReadWrite(volID, instance.GetNodeID(), false /* forceAttach */) + // Expect(err).To(BeNil(), "ControllerPublishVolume failed with error for disk %v on node %v: %v", volID, instance.GetNodeID(), err) - // Stage Disk - stageDir := filepath.Join("/tmp/", volName, "stage") - err = client.NodeStageExt4Volume(volID, stageDir) - Expect(err).To(BeNil(), "failed to stage volume: %v", err) - - // Create private bind mount - boundMountStageDir := filepath.Join("/tmp/bindmount", volName, "bindmount") - boundMountStageMkdirOutput, err := instance.SSH("mkdir", "-p", boundMountStageDir) - Expect(err).To(BeNil(), "mkdir failed on instance %v: output: %v: %v", instance.GetNodeID(), boundMountStageMkdirOutput, err) - bindMountOutput, err := instance.SSH("mount", "--rbind", "--make-private", stageDir, boundMountStageDir) - Expect(err).To(BeNil(), "Bind mount failed on instance %v: output: %v: %v", instance.GetNodeID(), bindMountOutput, err) - - privateBindMountRemoved := false - unmountAndRmPrivateBindMount := func() { - if !privateBindMountRemoved { - // Umount and delete private mount staging directory - bindUmountOutput, err := instance.SSH("umount", boundMountStageDir) - Expect(err).To(BeNil(), "Bind mount failed on instance %v: output: %v: %v", instance.GetNodeID(), bindUmountOutput, err) - err = testutils.RmAll(instance, boundMountStageDir) - Expect(err).To(BeNil(), "Failed to rm mount stage dir %s: %v", boundMountStageDir, err) - } - privateBindMountRemoved = true - } + // defer func() { + // // Detach Disk + // err = client.ControllerUnpublishVolume(volID, instance.GetNodeID()) + // if err != nil { + // klog.Errorf("Failed to detach disk: %v", err) + // } + // }() + + // // Stage Disk + // stageDir := filepath.Join("/tmp/", volName, "stage") + // err = client.NodeStageExt4Volume(volID, stageDir) + // Expect(err).To(BeNil(), "failed to stage volume: %v", err) + + // // Create private bind mount + // boundMountStageDir := filepath.Join("/tmp/bindmount", volName, "bindmount") + // boundMountStageMkdirOutput, err := instance.SSH("mkdir", "-p", boundMountStageDir) + // Expect(err).To(BeNil(), "mkdir failed on instance %v: output: %v: %v", instance.GetNodeID(), boundMountStageMkdirOutput, err) + // bindMountOutput, err := instance.SSH("mount", "--rbind", "--make-private", stageDir, boundMountStageDir) + // Expect(err).To(BeNil(), "Bind mount failed on instance %v: output: %v: %v", instance.GetNodeID(), bindMountOutput, err) + + // privateBindMountRemoved := false + // unmountAndRmPrivateBindMount := func() { + // if !privateBindMountRemoved { + // // Umount and delete private mount staging directory + // bindUmountOutput, err := instance.SSH("umount", boundMountStageDir) + // Expect(err).To(BeNil(), "Bind mount failed on instance %v: output: %v: %v", instance.GetNodeID(), bindUmountOutput, err) + // err = testutils.RmAll(instance, boundMountStageDir) + // Expect(err).To(BeNil(), "Failed to rm mount stage dir %s: %v", boundMountStageDir, err) + // } + // privateBindMountRemoved = true + // } - defer func() { - unmountAndRmPrivateBindMount() - }() + // defer func() { + // unmountAndRmPrivateBindMount() + // }() - // Unstage Disk - err = client.NodeUnstageVolume(volID, stageDir) - Expect(err).ToNot(BeNil(), "Expected failure during unstage") - Expect(err).To(MatchError(ContainSubstring(("is still in use")))) + // // Unstage Disk + // err = client.NodeUnstageVolume(volID, stageDir) + // Expect(err).ToNot(BeNil(), "Expected failure during unstage") + // Expect(err).To(MatchError(ContainSubstring(("is still in use")))) - // Unmount private bind mount and try again - unmountAndRmPrivateBindMount() + // // Unmount private bind mount and try again + // unmountAndRmPrivateBindMount() - // Unstage Disk - err = client.NodeUnstageVolume(volID, stageDir) - Expect(err).To(BeNil(), "Failed to unstage volume: %v", err) - fp := filepath.Join("/tmp/", volName) - err = testutils.RmAll(instance, fp) - Expect(err).To(BeNil(), "Failed to rm file path %s: %v", fp, err) - }) + // // Unstage Disk + // err = client.NodeUnstageVolume(volID, stageDir) + // Expect(err).To(BeNil(), "Failed to unstage volume: %v", err) + // fp := filepath.Join("/tmp/", volName) + // err = testutils.RmAll(instance, fp) + // Expect(err).To(BeNil(), "Failed to rm file path %s: %v", fp, err) + // }) type multiZoneTestConfig struct { diskType string @@ -1709,7 +1712,7 @@ func createAndValidateUniqueZonalMultiWriterDisk(client *remote.CsiClient, proje disk := typeToDisk[diskType] disk.params[common.ParameterAccessMode] = "READ_WRITE_MANY" -// .AccessMode + // .AccessMode volName := testNamePrefix + string(uuid.NewUUID()) volume, err := client.CreateVolumeWithCaps(volName, disk.params, defaultMwSizeGb, &csi.TopologyRequirement{ diff --git a/test/run-e2e-local.sh b/test/run-e2e-local.sh index aaf9a65d5..9a86240e4 100755 --- a/test/run-e2e-local.sh +++ b/test/run-e2e-local.sh @@ -16,4 +16,6 @@ if hostname | grep -q c.googlers.com ; then CLOUDTOP_HOST=--cloudtop-host fi -ginkgo --v "test/e2e/tests" -- --project "${PROJECT}" --service-account "${IAM_NAME}" "${CLOUDTOP_HOST}" --v=6 --logtostderr $@ + + +ginkgo --v --progress "test/e2e/tests" -- --project "${PROJECT}" --service-account "${IAM_NAME}" "${CLOUDTOP_HOST}" --v=6 --logtostderr $@ From 712b072a91dae9c4045c2cb5ab48b1310dd65d39 Mon Sep 17 00:00:00 2001 From: Sam Serdlow Date: Mon, 13 Jan 2025 18:19:19 +0000 Subject: [PATCH 06/10] Changes update the tests to use two contexts, one for multiwriter and one for the existing tests. This was deemed necessary as only some disks can support multi-writer, and only some VM shapes can support said disks. --- test/e2e/tests/single_zone_e2e_test.go | 38 ++++++++++---------------- 1 file changed, 15 insertions(+), 23 deletions(-) diff --git a/test/e2e/tests/single_zone_e2e_test.go b/test/e2e/tests/single_zone_e2e_test.go index 6e039e617..8be5d9cd9 100644 --- a/test/e2e/tests/single_zone_e2e_test.go +++ b/test/e2e/tests/single_zone_e2e_test.go @@ -37,6 +37,7 @@ import ( compute "google.golang.org/api/compute/v1" "google.golang.org/api/iterator" kmspb "google.golang.org/genproto/googleapis/cloud/kms/v1" + fieldmask "google.golang.org/genproto/protobuf/field_mask" ) const ( @@ -75,18 +76,13 @@ const ( ) var _ = Describe("GCE PD CSI Driver", func() { - - // ReportAfterEach(func(report SpecReport) { - // customFormat := fmt.Sprintf("%s | %s", report.State, report.FullText()) - // client.SendReport(customFormat) - // }) - // It("Should get reasonable volume limits from nodes with NodeGetInfo", func() { - // testContext := getRandomTestContext() - // resp, err := testContext.Client.NodeGetInfo() - // Expect(err).To(BeNil()) - // volumeLimit := resp.GetMaxVolumesPerNode() - // Expect(volumeLimit).To(Equal(defaultVolumeLimit)) - // }) + It("Should get reasonable volume limits from nodes with NodeGetInfo", func() { + testContext := getRandomTestContext() + resp, err := testContext.Client.NodeGetInfo() + Expect(err).To(BeNil()) + volumeLimit := resp.GetMaxVolumesPerNode() + Expect(volumeLimit).To(Equal(defaultVolumeLimit)) + }) // It("[NVMe] Should create->attach->stage->mount volume and check if it is writable, then unmount->unstage->detach->delete and check disk is deleted", func() { // testContext := getRandomTestContext() @@ -279,12 +275,11 @@ var _ = Describe("GCE PD CSI Driver", func() { // Expect(err).To(BeNil(), "Failed to delete volume") // }() - // _, err = computeService.Disks.Get(p, zone, volName).Do() - // Expect(err).To(BeNil(), "Could not find disk in correct zone") - // } - // }) + _, err = computeService.Disks.Get(p, zone, volName).Do() + Expect(err).To(BeNil(), "Could not find disk in correct zone") + } + }) - /******************/ // TODO(hime): Enable this test once all release branches contain the fix from PR#1708. // It("Should return InvalidArgument when disk size exceeds limit", func() { // // If this returns a different error code (like Unknown), the error wrapping logic in #1708 has regressed. @@ -911,13 +906,10 @@ var _ = Describe("GCE PD CSI Driver", func() { Expect(testContexts).ToNot(BeEmpty()) testContext := getRandomMwTestContext() - p, _, _ := testContext.Instance.GetIdentity() + p, z, _ := testContext.Instance.GetIdentity() client := testContext.Client - - zone := "us-east1-b" - // Create and Validate Disk - volName, volID := createAndValidateUniqueZonalMultiWriterDisk(client, p, zone, hdbDiskType) + volName, volID := createAndValidateUniqueZonalMultiWriterDisk(client, p, z, hdbDiskType) defer func() { // Delete Disk @@ -925,7 +917,7 @@ var _ = Describe("GCE PD CSI Driver", func() { Expect(err).To(BeNil(), "DeleteVolume failed") // Validate Disk Deleted - _, err = computeService.Disks.Get(p, zone, volName).Do() + _, err = computeService.Disks.Get(p, z, volName).Do() Expect(gce.IsGCEError(err, "notFound")).To(BeTrue(), "Expected disk to not be found") }() }) From ca165599488c640a720631a98facd39c6f42c5bb Mon Sep 17 00:00:00 2001 From: Sam Serdlow Date: Mon, 13 Jan 2025 19:49:23 +0000 Subject: [PATCH 07/10] Fixing some git oddness --- test/e2e/tests/single_zone_e2e_test.go | 2308 ++++++++++++------------ 1 file changed, 1155 insertions(+), 1153 deletions(-) diff --git a/test/e2e/tests/single_zone_e2e_test.go b/test/e2e/tests/single_zone_e2e_test.go index 8be5d9cd9..c5dbcef20 100644 --- a/test/e2e/tests/single_zone_e2e_test.go +++ b/test/e2e/tests/single_zone_e2e_test.go @@ -18,6 +18,7 @@ import ( "context" "fmt" "os" + "path/filepath" "regexp" "strconv" "strings" @@ -25,7 +26,9 @@ import ( "k8s.io/apimachinery/pkg/util/uuid" "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/klog/v2" "sigs.k8s.io/gcp-compute-persistent-disk-csi-driver/pkg/common" + "sigs.k8s.io/gcp-compute-persistent-disk-csi-driver/pkg/deviceutils" gce "sigs.k8s.io/gcp-compute-persistent-disk-csi-driver/pkg/gce-cloud-provider/compute" testutils "sigs.k8s.io/gcp-compute-persistent-disk-csi-driver/test/e2e/utils" "sigs.k8s.io/gcp-compute-persistent-disk-csi-driver/test/remote" @@ -84,196 +87,196 @@ var _ = Describe("GCE PD CSI Driver", func() { Expect(volumeLimit).To(Equal(defaultVolumeLimit)) }) - // It("[NVMe] Should create->attach->stage->mount volume and check if it is writable, then unmount->unstage->detach->delete and check disk is deleted", func() { - // testContext := getRandomTestContext() + It("[NVMe] Should create->attach->stage->mount volume and check if it is writable, then unmount->unstage->detach->delete and check disk is deleted", func() { + testContext := getRandomTestContext() - // p, z, _ := testContext.Instance.GetIdentity() - // client := testContext.Client - // instance := testContext.Instance + p, z, _ := testContext.Instance.GetIdentity() + client := testContext.Client + instance := testContext.Instance - // // Create Disk - // volName, volID := createAndValidateUniqueZonalDisk(client, p, z, standardDiskType) + // Create Disk + volName, volID := createAndValidateUniqueZonalDisk(client, p, z, standardDiskType) - // defer func() { - // // Delete Disk - // err := client.DeleteVolume(volID) - // Expect(err).To(BeNil(), "DeleteVolume failed") + defer func() { + // Delete Disk + err := client.DeleteVolume(volID) + Expect(err).To(BeNil(), "DeleteVolume failed") - // // Validate Disk Deleted - // _, err = computeService.Disks.Get(p, z, volName).Do() - // Expect(gce.IsGCEError(err, "notFound")).To(BeTrue(), "Expected disk to not be found") - // }() + // Validate Disk Deleted + _, err = computeService.Disks.Get(p, z, volName).Do() + Expect(gce.IsGCEError(err, "notFound")).To(BeTrue(), "Expected disk to not be found") + }() - // // Attach Disk - // err := testAttachWriteReadDetach(volID, volName, instance, client, false /* readOnly */) - // Expect(err).To(BeNil(), "Failed to go through volume lifecycle") - // }) + // Attach Disk + err := testAttachWriteReadDetach(volID, volName, instance, client, false /* readOnly */) + Expect(err).To(BeNil(), "Failed to go through volume lifecycle") + }) - // It("Should automatically fix the symlink between /dev/* and /dev/by-id if the disk does not match", func() { - // testContext := getRandomTestContext() + It("Should automatically fix the symlink between /dev/* and /dev/by-id if the disk does not match", func() { + testContext := getRandomTestContext() - // p, z, _ := testContext.Instance.GetIdentity() - // client := testContext.Client - // instance := testContext.Instance + p, z, _ := testContext.Instance.GetIdentity() + client := testContext.Client + instance := testContext.Instance - // // Create Disk - // volName, volID := createAndValidateUniqueZonalDisk(client, p, z, standardDiskType) + // Create Disk + volName, volID := createAndValidateUniqueZonalDisk(client, p, z, standardDiskType) - // defer func() { - // // Delete Disk - // err := client.DeleteVolume(volID) - // Expect(err).To(BeNil(), "DeleteVolume failed") + defer func() { + // Delete Disk + err := client.DeleteVolume(volID) + Expect(err).To(BeNil(), "DeleteVolume failed") - // // Validate Disk Deleted - // _, err = computeService.Disks.Get(p, z, volName).Do() - // Expect(gce.IsGCEError(err, "notFound")).To(BeTrue(), "Expected disk to not be found") - // }() + // Validate Disk Deleted + _, err = computeService.Disks.Get(p, z, volName).Do() + Expect(gce.IsGCEError(err, "notFound")).To(BeTrue(), "Expected disk to not be found") + }() - // // Attach Disk - // err := client.ControllerPublishVolumeReadWrite(volID, instance.GetNodeID(), false /* forceAttach */) - // Expect(err).To(BeNil(), "ControllerPublishVolume failed with error for disk %v on node %v: %v", volID, instance.GetNodeID()) + // Attach Disk + err := client.ControllerPublishVolumeReadWrite(volID, instance.GetNodeID(), false /* forceAttach */) + Expect(err).To(BeNil(), "ControllerPublishVolume failed with error for disk %v on node %v: %v", volID, instance.GetNodeID()) - // defer func() { - // // Detach Disk - // err = client.ControllerUnpublishVolume(volID, instance.GetNodeID()) - // if err != nil { - // klog.Errorf("Failed to detach disk: %v", err) - // } + defer func() { + // Detach Disk + err = client.ControllerUnpublishVolume(volID, instance.GetNodeID()) + if err != nil { + klog.Errorf("Failed to detach disk: %v", err) + } - // }() + }() - // // MESS UP THE symlink - // devicePaths := deviceutils.NewDeviceUtils().GetDiskByIdPaths(volName, "") - // for _, devicePath := range devicePaths { - // err = testutils.RmAll(instance, devicePath) - // Expect(err).To(BeNil(), "failed to remove /dev/by-id folder") - // err = testutils.Symlink(instance, "/dev/null", devicePath) - // Expect(err).To(BeNil(), "failed to add invalid symlink /dev/by-id folder") - // } + // MESS UP THE symlink + devicePaths := deviceutils.NewDeviceUtils().GetDiskByIdPaths(volName, "") + for _, devicePath := range devicePaths { + err = testutils.RmAll(instance, devicePath) + Expect(err).To(BeNil(), "failed to remove /dev/by-id folder") + err = testutils.Symlink(instance, "/dev/null", devicePath) + Expect(err).To(BeNil(), "failed to add invalid symlink /dev/by-id folder") + } - // // Stage Disk - // stageDir := filepath.Join("/tmp/", volName, "stage") - // err = client.NodeStageExt4Volume(volID, stageDir) - // Expect(err).To(BeNil(), "failed to repair /dev/by-id symlink and stage volume") - - // // Validate that the link is correct - // var validated bool - // for _, devicePath := range devicePaths { - // validated, err = testutils.ValidateLogicalLinkIsDisk(instance, devicePath, volName) - // Expect(err).To(BeNil(), "failed to validate link %s is disk %s: %v", stageDir, volName, err) - // if validated { - // break - // } - // } - // Expect(validated).To(BeTrue(), "could not find device in %v that links to volume %s", devicePaths, volName) + // Stage Disk + stageDir := filepath.Join("/tmp/", volName, "stage") + err = client.NodeStageExt4Volume(volID, stageDir) + Expect(err).To(BeNil(), "failed to repair /dev/by-id symlink and stage volume") + + // Validate that the link is correct + var validated bool + for _, devicePath := range devicePaths { + validated, err = testutils.ValidateLogicalLinkIsDisk(instance, devicePath, volName) + Expect(err).To(BeNil(), "failed to validate link %s is disk %s: %v", stageDir, volName, err) + if validated { + break + } + } + Expect(validated).To(BeTrue(), "could not find device in %v that links to volume %s", devicePaths, volName) - // defer func() { - // // Unstage Disk - // err = client.NodeUnstageVolume(volID, stageDir) - // if err != nil { - // klog.Errorf("Failed to unstage volume: %v", err) - // } - // fp := filepath.Join("/tmp/", volName) - // err = testutils.RmAll(instance, fp) - // if err != nil { - // klog.Errorf("Failed to rm file path %s: %v", fp, err) - // } - // }() - // }) + defer func() { + // Unstage Disk + err = client.NodeUnstageVolume(volID, stageDir) + if err != nil { + klog.Errorf("Failed to unstage volume: %v", err) + } + fp := filepath.Join("/tmp/", volName) + err = testutils.RmAll(instance, fp) + if err != nil { + klog.Errorf("Failed to rm file path %s: %v", fp, err) + } + }() + }) - // It("[NVMe] Should automatically add a symlink between /dev/* and /dev/by-id if disk is not found", func() { - // testContext := getRandomTestContext() + It("[NVMe] Should automatically add a symlink between /dev/* and /dev/by-id if disk is not found", func() { + testContext := getRandomTestContext() - // p, z, _ := testContext.Instance.GetIdentity() - // client := testContext.Client - // instance := testContext.Instance + p, z, _ := testContext.Instance.GetIdentity() + client := testContext.Client + instance := testContext.Instance - // // Create Disk - // volName, volID := createAndValidateUniqueZonalDisk(client, p, z, standardDiskType) + // Create Disk + volName, volID := createAndValidateUniqueZonalDisk(client, p, z, standardDiskType) - // defer func() { - // // Delete Disk - // err := client.DeleteVolume(volID) - // Expect(err).To(BeNil(), "DeleteVolume failed") + defer func() { + // Delete Disk + err := client.DeleteVolume(volID) + Expect(err).To(BeNil(), "DeleteVolume failed") - // // Validate Disk Deleted - // _, err = computeService.Disks.Get(p, z, volName).Do() - // Expect(gce.IsGCEError(err, "notFound")).To(BeTrue(), "Expected disk to not be found") - // }() + // Validate Disk Deleted + _, err = computeService.Disks.Get(p, z, volName).Do() + Expect(gce.IsGCEError(err, "notFound")).To(BeTrue(), "Expected disk to not be found") + }() - // // Attach Disk - // err := client.ControllerPublishVolumeReadWrite(volID, instance.GetNodeID(), false /* forceAttach */) - // Expect(err).To(BeNil(), "ControllerPublishVolume failed with error for disk %v on node %v: %v", volID, instance.GetNodeID()) + // Attach Disk + err := client.ControllerPublishVolumeReadWrite(volID, instance.GetNodeID(), false /* forceAttach */) + Expect(err).To(BeNil(), "ControllerPublishVolume failed with error for disk %v on node %v: %v", volID, instance.GetNodeID()) - // defer func() { - // // Detach Disk - // err = client.ControllerUnpublishVolume(volID, instance.GetNodeID()) - // if err != nil { - // klog.Errorf("Failed to detach disk: %v", err) - // } + defer func() { + // Detach Disk + err = client.ControllerUnpublishVolume(volID, instance.GetNodeID()) + if err != nil { + klog.Errorf("Failed to detach disk: %v", err) + } - // }() + }() - // // DELETE THE symlink - // devicePaths := deviceutils.NewDeviceUtils().GetDiskByIdPaths(volName, "") - // for _, devicePath := range devicePaths { - // err = testutils.RmAll(instance, devicePath) - // Expect(err).To(BeNil(), "failed to remove /dev/by-id folder") - // } + // DELETE THE symlink + devicePaths := deviceutils.NewDeviceUtils().GetDiskByIdPaths(volName, "") + for _, devicePath := range devicePaths { + err = testutils.RmAll(instance, devicePath) + Expect(err).To(BeNil(), "failed to remove /dev/by-id folder") + } - // // Stage Disk - // stageDir := filepath.Join("/tmp/", volName, "stage") - // err = client.NodeStageExt4Volume(volID, stageDir) - // Expect(err).To(BeNil(), "failed to repair /dev/by-id symlink and stage volume") - - // // Validate that the link is correct - // var validated bool - // for _, devicePath := range devicePaths { - // validated, err = testutils.ValidateLogicalLinkIsDisk(instance, devicePath, volName) - // Expect(err).To(BeNil(), "failed to validate link %s is disk %s: %v", stageDir, volName, err) - // if validated { - // break - // } - // } - // Expect(validated).To(BeTrue(), "could not find device in %v that links to volume %s", devicePaths, volName) + // Stage Disk + stageDir := filepath.Join("/tmp/", volName, "stage") + err = client.NodeStageExt4Volume(volID, stageDir) + Expect(err).To(BeNil(), "failed to repair /dev/by-id symlink and stage volume") + + // Validate that the link is correct + var validated bool + for _, devicePath := range devicePaths { + validated, err = testutils.ValidateLogicalLinkIsDisk(instance, devicePath, volName) + Expect(err).To(BeNil(), "failed to validate link %s is disk %s: %v", stageDir, volName, err) + if validated { + break + } + } + Expect(validated).To(BeTrue(), "could not find device in %v that links to volume %s", devicePaths, volName) - // defer func() { - // // Unstage Disk - // err = client.NodeUnstageVolume(volID, stageDir) - // if err != nil { - // klog.Errorf("Failed to unstage volume: %v", err) - // } - // fp := filepath.Join("/tmp/", volName) - // err = testutils.RmAll(instance, fp) - // if err != nil { - // klog.Errorf("Failed to rm file path %s: %v", fp, err) - // } - // }() - // }) + defer func() { + // Unstage Disk + err = client.NodeUnstageVolume(volID, stageDir) + if err != nil { + klog.Errorf("Failed to unstage volume: %v", err) + } + fp := filepath.Join("/tmp/", volName) + err = testutils.RmAll(instance, fp) + if err != nil { + klog.Errorf("Failed to rm file path %s: %v", fp, err) + } + }() + }) - // It("Should create disks in correct zones when topology is specified", func() { - // Expect(testContexts).ToNot(BeEmpty()) - // testContext := getRandomTestContext() + It("Should create disks in correct zones when topology is specified", func() { + Expect(testContexts).ToNot(BeEmpty()) + testContext := getRandomTestContext() - // p, _, _ := testContext.Instance.GetIdentity() + p, _, _ := testContext.Instance.GetIdentity() - // zones := []string{"us-central1-c", "us-central1-b", "us-central1-a"} + zones := []string{"us-central1-c", "us-central1-b", "us-central1-a"} - // for _, zone := range zones { - // volName := testNamePrefix + string(uuid.NewUUID()) - // topReq := &csi.TopologyRequirement{ - // Requisite: []*csi.Topology{ - // { - // Segments: map[string]string{common.TopologyKeyZone: zone}, - // }, - // }, - // } - // volume, err := testContext.Client.CreateVolume(volName, nil, defaultSizeGb, topReq, nil) - // Expect(err).To(BeNil(), "Failed to create volume") - // defer func() { - // err = testContext.Client.DeleteVolume(volume.VolumeId) - // Expect(err).To(BeNil(), "Failed to delete volume") - // }() + for _, zone := range zones { + volName := testNamePrefix + string(uuid.NewUUID()) + topReq := &csi.TopologyRequirement{ + Requisite: []*csi.Topology{ + { + Segments: map[string]string{common.TopologyKeyZone: zone}, + }, + }, + } + volume, err := testContext.Client.CreateVolume(volName, nil, defaultSizeGb, topReq, nil) + Expect(err).To(BeNil(), "Failed to create volume") + defer func() { + err = testContext.Client.DeleteVolume(volume.VolumeId) + Expect(err).To(BeNil(), "Failed to delete volume") + }() _, err = computeService.Disks.Get(p, zone, volName).Do() Expect(err).To(BeNil(), "Could not find disk in correct zone") @@ -308,599 +311,598 @@ var _ = Describe("GCE PD CSI Driver", func() { // } // }) - /******************/ - // DescribeTable("Should complete entire disk lifecycle with underspecified volume ID", - // func(diskType string) { - // testContext := getRandomTestContext() + DescribeTable("Should complete entire disk lifecycle with underspecified volume ID", + func(diskType string) { + testContext := getRandomTestContext() - // p, z, _ := testContext.Instance.GetIdentity() - // client := testContext.Client - // instance := testContext.Instance + p, z, _ := testContext.Instance.GetIdentity() + client := testContext.Client + instance := testContext.Instance - // volName, _ := createAndValidateUniqueZonalDisk(client, p, z, diskType) + volName, _ := createAndValidateUniqueZonalDisk(client, p, z, diskType) - // underSpecifiedID := common.GenerateUnderspecifiedVolumeID(volName, true /* isZonal */) + underSpecifiedID := common.GenerateUnderspecifiedVolumeID(volName, true /* isZonal */) - // defer func() { - // // Delete Disk - // err := client.DeleteVolume(underSpecifiedID) - // Expect(err).To(BeNil(), "DeleteVolume failed") + defer func() { + // Delete Disk + err := client.DeleteVolume(underSpecifiedID) + Expect(err).To(BeNil(), "DeleteVolume failed") - // // Validate Disk Deleted - // _, err = computeService.Disks.Get(p, z, volName).Do() - // Expect(gce.IsGCEError(err, "notFound")).To(BeTrue(), "Expected disk to not be found") - // }() + // Validate Disk Deleted + _, err = computeService.Disks.Get(p, z, volName).Do() + Expect(gce.IsGCEError(err, "notFound")).To(BeTrue(), "Expected disk to not be found") + }() - // // Attach Disk - // err := testAttachWriteReadDetach(underSpecifiedID, volName, instance, client, false /* readOnly */) - // Expect(err).To(BeNil(), "Failed to go through volume lifecycle") - // }, - // Entry("on pd-standard", standardDiskType), - // Entry("on pd-extreme", extremeDiskType), - // Entry("on hyperdisk-throughput", hdtDiskType), - // Entry("on pd-ssd", ssdDiskType), - // ) + // Attach Disk + err := testAttachWriteReadDetach(underSpecifiedID, volName, instance, client, false /* readOnly */) + Expect(err).To(BeNil(), "Failed to go through volume lifecycle") + }, + Entry("on pd-standard", standardDiskType), + Entry("on pd-extreme", extremeDiskType), + Entry("on hyperdisk-throughput", hdtDiskType), + Entry("on pd-ssd", ssdDiskType), + ) - // DescribeTable("[NVMe] Should complete publish/unpublish lifecycle with underspecified volume ID and missing volume", - // func(diskType string) { - // testContext := getRandomTestContext() + DescribeTable("[NVMe] Should complete publish/unpublish lifecycle with underspecified volume ID and missing volume", + func(diskType string) { + testContext := getRandomTestContext() - // p, z, _ := testContext.Instance.GetIdentity() - // client := testContext.Client - // instance := testContext.Instance + p, z, _ := testContext.Instance.GetIdentity() + client := testContext.Client + instance := testContext.Instance - // // Create Disk - // volName, _ := createAndValidateUniqueZonalDisk(client, p, z, diskType) - // underSpecifiedID := common.GenerateUnderspecifiedVolumeID(volName, true /* isZonal */) + // Create Disk + volName, _ := createAndValidateUniqueZonalDisk(client, p, z, diskType) + underSpecifiedID := common.GenerateUnderspecifiedVolumeID(volName, true /* isZonal */) - // defer func() { - // // Detach Disk - // err := instance.DetachDisk(volName) - // Expect(err).To(BeNil(), "DetachDisk failed") + defer func() { + // Detach Disk + err := instance.DetachDisk(volName) + Expect(err).To(BeNil(), "DetachDisk failed") - // // Delete Disk - // err = client.DeleteVolume(underSpecifiedID) - // Expect(err).To(BeNil(), "DeleteVolume failed") + // Delete Disk + err = client.DeleteVolume(underSpecifiedID) + Expect(err).To(BeNil(), "DeleteVolume failed") - // // Validate Disk Deleted - // _, err = computeService.Disks.Get(p, z, volName).Do() - // Expect(gce.IsGCEError(err, "notFound")).To(BeTrue(), "Expected disk to not be found") + // Validate Disk Deleted + _, err = computeService.Disks.Get(p, z, volName).Do() + Expect(gce.IsGCEError(err, "notFound")).To(BeTrue(), "Expected disk to not be found") - // // Unpublish Disk - // err = client.ControllerUnpublishVolume(underSpecifiedID, instance.GetNodeID()) - // Expect(err).To(BeNil(), "ControllerUnpublishVolume failed") - // }() + // Unpublish Disk + err = client.ControllerUnpublishVolume(underSpecifiedID, instance.GetNodeID()) + Expect(err).To(BeNil(), "ControllerUnpublishVolume failed") + }() - // // Attach Disk - // err := client.ControllerPublishVolumeReadWrite(underSpecifiedID, instance.GetNodeID(), false /* forceAttach */) - // Expect(err).To(BeNil(), "ControllerPublishVolume failed") - // }, - // Entry("on pd-standard", standardDiskType), - // Entry("on pd-extreme", extremeDiskType), - // ) + // Attach Disk + err := client.ControllerPublishVolumeReadWrite(underSpecifiedID, instance.GetNodeID(), false /* forceAttach */) + Expect(err).To(BeNil(), "ControllerPublishVolume failed") + }, + Entry("on pd-standard", standardDiskType), + Entry("on pd-extreme", extremeDiskType), + ) - // It("Should successfully create RePD in two zones in the drivers region when none are specified", func() { - // Expect(testContexts).ToNot(BeEmpty()) - // testContext := getRandomTestContext() + It("Should successfully create RePD in two zones in the drivers region when none are specified", func() { + Expect(testContexts).ToNot(BeEmpty()) + testContext := getRandomTestContext() - // controllerInstance := testContext.Instance - // controllerClient := testContext.Client - - // p, z, _ := controllerInstance.GetIdentity() - - // region, err := common.GetRegionFromZones([]string{z}) - // Expect(err).To(BeNil(), "Failed to get region from zones") - - // // Create Disk - // volName := testNamePrefix + string(uuid.NewUUID()) - // volume, err := controllerClient.CreateVolume(volName, map[string]string{ - // common.ParameterKeyReplicationType: "regional-pd", - // }, defaultRepdSizeGb, nil, nil) - // Expect(err).To(BeNil(), "CreateVolume failed with error: %v", err) - - // // Validate Disk Created - // cloudDisk, err := computeService.RegionDisks.Get(p, region, volName).Do() - // Expect(err).To(BeNil(), "Could not get disk from cloud directly") - // Expect(cloudDisk.Type).To(ContainSubstring(standardDiskType)) - // Expect(cloudDisk.Status).To(Equal(readyState)) - // Expect(cloudDisk.SizeGb).To(Equal(defaultRepdSizeGb)) - // Expect(cloudDisk.Name).To(Equal(volName)) - // Expect(len(cloudDisk.ReplicaZones)).To(Equal(2)) - // for _, replicaZone := range cloudDisk.ReplicaZones { - // actualZone := zoneFromURL(replicaZone) - // gotRegion, err := common.GetRegionFromZones([]string{actualZone}) - // Expect(err).To(BeNil(), "failed to get region from actual zone %v", actualZone) - // Expect(gotRegion).To(Equal(region), "Got region from replica zone that did not match supplied region") - // } - // defer func() { - // // Delete Disk - // controllerClient.DeleteVolume(volume.VolumeId) - // Expect(err).To(BeNil(), "DeleteVolume failed") - - // // Validate Disk Deleted - // _, err = computeService.RegionDisks.Get(p, region, volName).Do() - // Expect(gce.IsGCEError(err, "notFound")).To(BeTrue(), "Expected disk to not be found") - // }() - // }) + controllerInstance := testContext.Instance + controllerClient := testContext.Client + + p, z, _ := controllerInstance.GetIdentity() + + region, err := common.GetRegionFromZones([]string{z}) + Expect(err).To(BeNil(), "Failed to get region from zones") + + // Create Disk + volName := testNamePrefix + string(uuid.NewUUID()) + volume, err := controllerClient.CreateVolume(volName, map[string]string{ + common.ParameterKeyReplicationType: "regional-pd", + }, defaultRepdSizeGb, nil, nil) + Expect(err).To(BeNil(), "CreateVolume failed with error: %v", err) + + // Validate Disk Created + cloudDisk, err := computeService.RegionDisks.Get(p, region, volName).Do() + Expect(err).To(BeNil(), "Could not get disk from cloud directly") + Expect(cloudDisk.Type).To(ContainSubstring(standardDiskType)) + Expect(cloudDisk.Status).To(Equal(readyState)) + Expect(cloudDisk.SizeGb).To(Equal(defaultRepdSizeGb)) + Expect(cloudDisk.Name).To(Equal(volName)) + Expect(len(cloudDisk.ReplicaZones)).To(Equal(2)) + for _, replicaZone := range cloudDisk.ReplicaZones { + actualZone := zoneFromURL(replicaZone) + gotRegion, err := common.GetRegionFromZones([]string{actualZone}) + Expect(err).To(BeNil(), "failed to get region from actual zone %v", actualZone) + Expect(gotRegion).To(Equal(region), "Got region from replica zone that did not match supplied region") + } + defer func() { + // Delete Disk + controllerClient.DeleteVolume(volume.VolumeId) + Expect(err).To(BeNil(), "DeleteVolume failed") - // DescribeTable("Should create and delete disk with default zone", - // func(diskType string) { - // Expect(testContexts).ToNot(BeEmpty()) - // testContext := getRandomTestContext() + // Validate Disk Deleted + _, err = computeService.RegionDisks.Get(p, region, volName).Do() + Expect(gce.IsGCEError(err, "notFound")).To(BeTrue(), "Expected disk to not be found") + }() + }) - // p, z, _ := testContext.Instance.GetIdentity() - // client := testContext.Client + DescribeTable("Should create and delete disk with default zone", + func(diskType string) { + Expect(testContexts).ToNot(BeEmpty()) + testContext := getRandomTestContext() - // // Create Disk - // disk := typeToDisk[diskType] - // volName := testNamePrefix + string(uuid.NewUUID()) + p, z, _ := testContext.Instance.GetIdentity() + client := testContext.Client - // diskSize := defaultSizeGb - // if diskType == extremeDiskType { - // diskSize = defaultExtremeSizeGb - // } + // Create Disk + disk := typeToDisk[diskType] + volName := testNamePrefix + string(uuid.NewUUID()) - // volume, err := client.CreateVolume(volName, disk.params, diskSize, nil, nil) + diskSize := defaultSizeGb + if diskType == extremeDiskType { + diskSize = defaultExtremeSizeGb + } - // Expect(err).To(BeNil(), "CreateVolume failed with error: %v", err) + volume, err := client.CreateVolume(volName, disk.params, diskSize, nil, nil) - // // Validate Disk Created - // cloudDisk, err := computeService.Disks.Get(p, z, volName).Do() - // Expect(err).To(BeNil(), "Could not get disk from cloud directly") - // Expect(cloudDisk.Status).To(Equal(readyState)) - // Expect(cloudDisk.SizeGb).To(Equal(diskSize)) - // Expect(cloudDisk.Name).To(Equal(volName)) - // disk.validate(cloudDisk) + Expect(err).To(BeNil(), "CreateVolume failed with error: %v", err) - // defer func() { - // // Delete Disk - // client.DeleteVolume(volume.VolumeId) - // Expect(err).To(BeNil(), "DeleteVolume failed") + // Validate Disk Created + cloudDisk, err := computeService.Disks.Get(p, z, volName).Do() + Expect(err).To(BeNil(), "Could not get disk from cloud directly") + Expect(cloudDisk.Status).To(Equal(readyState)) + Expect(cloudDisk.SizeGb).To(Equal(diskSize)) + Expect(cloudDisk.Name).To(Equal(volName)) + disk.validate(cloudDisk) - // // Validate Disk Deleted - // _, err = computeService.Disks.Get(p, z, volName).Do() - // Expect(gce.IsGCEError(err, "notFound")).To(BeTrue(), "Expected disk to not be found") - // }() - // }, - // Entry("on pd-standard", standardDiskType), - // Entry("on pd-extreme", extremeDiskType), - // ) - - // DescribeTable("Should create and delete pd-extreme disk with default iops", - // func(diskType string) { - // Expect(testContexts).ToNot(BeEmpty()) - // testContext := getRandomTestContext() - - // p, z, _ := testContext.Instance.GetIdentity() - // client := testContext.Client - - // // Create Disk - // diskParams := map[string]string{ - // common.ParameterKeyType: diskType, - // } - // volName := testNamePrefix + string(uuid.NewUUID()) + defer func() { + // Delete Disk + client.DeleteVolume(volume.VolumeId) + Expect(err).To(BeNil(), "DeleteVolume failed") - // diskSize := defaultExtremeSizeGb + // Validate Disk Deleted + _, err = computeService.Disks.Get(p, z, volName).Do() + Expect(gce.IsGCEError(err, "notFound")).To(BeTrue(), "Expected disk to not be found") + }() + }, + Entry("on pd-standard", standardDiskType), + Entry("on pd-extreme", extremeDiskType), + ) - // volume, err := client.CreateVolume(volName, diskParams, diskSize, nil, nil) + DescribeTable("Should create and delete pd-extreme disk with default iops", + func(diskType string) { + Expect(testContexts).ToNot(BeEmpty()) + testContext := getRandomTestContext() - // Expect(err).To(BeNil(), "CreateVolume failed with error: %v", err) + p, z, _ := testContext.Instance.GetIdentity() + client := testContext.Client - // // Validate Disk Created - // cloudDisk, err := computeService.Disks.Get(p, z, volName).Do() - // Expect(err).To(BeNil(), "Could not get disk from cloud directly") - // Expect(cloudDisk.Status).To(Equal(readyState)) - // Expect(cloudDisk.SizeGb).To(Equal(defaultExtremeSizeGb)) - // Expect(cloudDisk.Type).To(ContainSubstring(extremeDiskType)) - // Expect(cloudDisk.ProvisionedIops).To(Equal(provisionedIOPSOnCreateDefaultInt)) - // Expect(cloudDisk.Name).To(Equal(volName)) + // Create Disk + diskParams := map[string]string{ + common.ParameterKeyType: diskType, + } + volName := testNamePrefix + string(uuid.NewUUID()) - // defer func() { - // // Delete Disk - // client.DeleteVolume(volume.VolumeId) - // Expect(err).To(BeNil(), "DeleteVolume failed") + diskSize := defaultExtremeSizeGb - // // Validate Disk Deleted - // _, err = computeService.Disks.Get(p, z, volName).Do() - // Expect(gce.IsGCEError(err, "notFound")).To(BeTrue(), "Expected disk to not be found") - // }() - // }, - // Entry("on pd-extreme", extremeDiskType), - // ) + volume, err := client.CreateVolume(volName, diskParams, diskSize, nil, nil) - // DescribeTable("Should create and delete disk with labels", - // func(diskType string) { - // Expect(testContexts).ToNot(BeEmpty()) - // testContext := getRandomTestContext() + Expect(err).To(BeNil(), "CreateVolume failed with error: %v", err) - // p, z, _ := testContext.Instance.GetIdentity() - // client := testContext.Client + // Validate Disk Created + cloudDisk, err := computeService.Disks.Get(p, z, volName).Do() + Expect(err).To(BeNil(), "Could not get disk from cloud directly") + Expect(cloudDisk.Status).To(Equal(readyState)) + Expect(cloudDisk.SizeGb).To(Equal(defaultExtremeSizeGb)) + Expect(cloudDisk.Type).To(ContainSubstring(extremeDiskType)) + Expect(cloudDisk.ProvisionedIops).To(Equal(provisionedIOPSOnCreateDefaultInt)) + Expect(cloudDisk.Name).To(Equal(volName)) - // // Create Disk - // disk := typeToDisk[diskType] - // volName := testNamePrefix + string(uuid.NewUUID()) - // params := merge(disk.params, map[string]string{ - // common.ParameterKeyLabels: "key1=value1,key2=value2", - // }) + defer func() { + // Delete Disk + client.DeleteVolume(volume.VolumeId) + Expect(err).To(BeNil(), "DeleteVolume failed") - // diskSize := defaultSizeGb - // if diskType == extremeDiskType { - // diskSize = defaultExtremeSizeGb - // } - // volume, err := client.CreateVolume(volName, params, diskSize, nil, nil) - // Expect(err).To(BeNil(), "CreateVolume failed with error: %v", err) - - // // Validate Disk Created - // cloudDisk, err := computeService.Disks.Get(p, z, volName).Do() - // Expect(err).To(BeNil(), "Could not get disk from cloud directly") - // Expect(cloudDisk.Status).To(Equal(readyState)) - // Expect(cloudDisk.SizeGb).To(Equal(diskSize)) - // Expect(cloudDisk.Labels).To(Equal(map[string]string{ - // "key1": "value1", - // "key2": "value2", - // // The label below is added as an --extra-label driver command line argument. - // testutils.DiskLabelKey: testutils.DiskLabelValue, - // })) - // Expect(cloudDisk.Name).To(Equal(volName)) - // disk.validate(cloudDisk) + // Validate Disk Deleted + _, err = computeService.Disks.Get(p, z, volName).Do() + Expect(gce.IsGCEError(err, "notFound")).To(BeTrue(), "Expected disk to not be found") + }() + }, + Entry("on pd-extreme", extremeDiskType), + ) - // defer func() { - // // Delete Disk - // err := client.DeleteVolume(volume.VolumeId) - // Expect(err).To(BeNil(), "DeleteVolume failed") + DescribeTable("Should create and delete disk with labels", + func(diskType string) { + Expect(testContexts).ToNot(BeEmpty()) + testContext := getRandomTestContext() - // // Validate Disk Deleted - // _, err = computeService.Disks.Get(p, z, volName).Do() - // Expect(gce.IsGCEError(err, "notFound")).To(BeTrue(), "Expected disk to not be found") - // }() - // }, - // Entry("on pd-standard", standardDiskType), - // Entry("on pd-extreme", extremeDiskType), - // ) + p, z, _ := testContext.Instance.GetIdentity() + client := testContext.Client - // It("Should create and delete snapshot for the volume with default zone", func() { - // Expect(testContexts).ToNot(BeEmpty()) - // testContext := getRandomTestContext() + // Create Disk + disk := typeToDisk[diskType] + volName := testNamePrefix + string(uuid.NewUUID()) + params := merge(disk.params, map[string]string{ + common.ParameterKeyLabels: "key1=value1,key2=value2", + }) - // p, z, _ := testContext.Instance.GetIdentity() - // client := testContext.Client + diskSize := defaultSizeGb + if diskType == extremeDiskType { + diskSize = defaultExtremeSizeGb + } + volume, err := client.CreateVolume(volName, params, diskSize, nil, nil) + Expect(err).To(BeNil(), "CreateVolume failed with error: %v", err) - // volName, volID := createAndValidateUniqueZonalDisk(client, p, z, standardDiskType) + // Validate Disk Created + cloudDisk, err := computeService.Disks.Get(p, z, volName).Do() + Expect(err).To(BeNil(), "Could not get disk from cloud directly") + Expect(cloudDisk.Status).To(Equal(readyState)) + Expect(cloudDisk.SizeGb).To(Equal(diskSize)) + Expect(cloudDisk.Labels).To(Equal(map[string]string{ + "key1": "value1", + "key2": "value2", + // The label below is added as an --extra-label driver command line argument. + testutils.DiskLabelKey: testutils.DiskLabelValue, + })) + Expect(cloudDisk.Name).To(Equal(volName)) + disk.validate(cloudDisk) - // // Create Snapshot - // snapshotName := testNamePrefix + string(uuid.NewUUID()) - // snapshotID, err := client.CreateSnapshot(snapshotName, volID, nil) - // Expect(err).To(BeNil(), "CreateSnapshot failed with error: %v", err) + defer func() { + // Delete Disk + err := client.DeleteVolume(volume.VolumeId) + Expect(err).To(BeNil(), "DeleteVolume failed") - // // Validate Snapshot Created - // snapshot, err := computeService.Snapshots.Get(p, snapshotName).Do() - // Expect(err).To(BeNil(), "Could not get snapshot from cloud directly") - // Expect(snapshot.Name).To(Equal(snapshotName)) + // Validate Disk Deleted + _, err = computeService.Disks.Get(p, z, volName).Do() + Expect(gce.IsGCEError(err, "notFound")).To(BeTrue(), "Expected disk to not be found") + }() + }, + Entry("on pd-standard", standardDiskType), + Entry("on pd-extreme", extremeDiskType), + ) - // err = wait.Poll(10*time.Second, 3*time.Minute, func() (bool, error) { - // snapshot, err := computeService.Snapshots.Get(p, snapshotName).Do() - // Expect(err).To(BeNil(), "Could not get snapshot from cloud directly") - // if snapshot.Status == "READY" { - // return true, nil - // } - // return false, nil - // }) - // Expect(err).To(BeNil(), "Could not wait for snapshot be ready") - - // defer func() { - // // Delete Disk - // err := client.DeleteVolume(volID) - // Expect(err).To(BeNil(), "DeleteVolume failed") - - // // Validate Disk Deleted - // _, err = computeService.Disks.Get(p, z, volName).Do() - // Expect(gce.IsGCEError(err, "notFound")).To(BeTrue(), "Expected disk to not be found") - - // // Delete Snapshot - // err = client.DeleteSnapshot(snapshotID) - // Expect(err).To(BeNil(), "DeleteSnapshot failed") - - // // Validate Snapshot Deleted - // _, err = computeService.Snapshots.Get(p, snapshotName).Do() - // Expect(gce.IsGCEError(err, "notFound")).To(BeTrue(), "Expected snapshot to not be found") - // }() - // }) + It("Should create and delete snapshot for the volume with default zone", func() { + Expect(testContexts).ToNot(BeEmpty()) + testContext := getRandomTestContext() - // DescribeTable("Should create CMEK key, go through volume lifecycle, validate behavior on key revoke and restore", - // func(diskType string) { - // ctx := context.Background() - // Expect(testContexts).ToNot(BeEmpty()) - // testContext := getRandomTestContext() + p, z, _ := testContext.Instance.GetIdentity() + client := testContext.Client - // controllerInstance := testContext.Instance - // controllerClient := testContext.Client + volName, volID := createAndValidateUniqueZonalDisk(client, p, z, standardDiskType) - // p, z, _ := controllerInstance.GetIdentity() - // locationID := "global" + // Create Snapshot + snapshotName := testNamePrefix + string(uuid.NewUUID()) + snapshotID, err := client.CreateSnapshot(snapshotName, volID, nil) + Expect(err).To(BeNil(), "CreateSnapshot failed with error: %v", err) - // // The resource name of the key rings. - // parentName := fmt.Sprintf("projects/%s/locations/%s", p, locationID) - // keyRingId := "gce-pd-csi-test-ring" + // Validate Snapshot Created + snapshot, err := computeService.Snapshots.Get(p, snapshotName).Do() + Expect(err).To(BeNil(), "Could not get snapshot from cloud directly") + Expect(snapshot.Name).To(Equal(snapshotName)) - // key, keyVersions := setupKeyRing(ctx, parentName, keyRingId) + err = wait.Poll(10*time.Second, 3*time.Minute, func() (bool, error) { + snapshot, err := computeService.Snapshots.Get(p, snapshotName).Do() + Expect(err).To(BeNil(), "Could not get snapshot from cloud directly") + if snapshot.Status == "READY" { + return true, nil + } + return false, nil + }) + Expect(err).To(BeNil(), "Could not wait for snapshot be ready") - // // Defer deletion of all key versions - // // https://cloud.google.com/kms/docs/destroy-restore - // defer func() { - // for _, keyVersion := range keyVersions { - // destroyKeyReq := &kmspb.DestroyCryptoKeyVersionRequest{ - // Name: keyVersion, - // } - // _, err := kmsClient.DestroyCryptoKeyVersion(ctx, destroyKeyReq) - // Expect(err).To(BeNil(), "Failed to destroy crypto key version: %v", keyVersion) - // } - // }() + defer func() { + // Delete Disk + err := client.DeleteVolume(volID) + Expect(err).To(BeNil(), "DeleteVolume failed") - // // Go through volume lifecycle using CMEK-ed PD Create Disk - // disk := typeToDisk[diskType] - // volName := testNamePrefix + string(uuid.NewUUID()) - // params := merge(disk.params, map[string]string{ - // common.ParameterKeyDiskEncryptionKmsKey: key.Name, - // }) - // topology := &csi.TopologyRequirement{ - // Requisite: []*csi.Topology{ - // { - // Segments: map[string]string{common.TopologyKeyZone: z}, - // }, - // }, - // } + // Validate Disk Deleted + _, err = computeService.Disks.Get(p, z, volName).Do() + Expect(gce.IsGCEError(err, "notFound")).To(BeTrue(), "Expected disk to not be found") - // diskSize := defaultSizeGb - // if diskType == extremeDiskType { - // diskSize = defaultExtremeSizeGb - // } - // volume, err := controllerClient.CreateVolume(volName, params, diskSize, topology, nil) - // Expect(err).To(BeNil(), "CreateVolume failed with error: %v", err) + // Delete Snapshot + err = client.DeleteSnapshot(snapshotID) + Expect(err).To(BeNil(), "DeleteSnapshot failed") - // // Validate Disk Created - // cloudDisk, err := computeService.Disks.Get(p, z, volName).Do() - // Expect(err).To(BeNil(), "Could not get disk from cloud directly") - // Expect(cloudDisk.Status).To(Equal(readyState)) - // Expect(cloudDisk.SizeGb).To(Equal(diskSize)) - // Expect(cloudDisk.Name).To(Equal(volName)) - // disk.validate(cloudDisk) + // Validate Snapshot Deleted + _, err = computeService.Snapshots.Get(p, snapshotName).Do() + Expect(gce.IsGCEError(err, "notFound")).To(BeTrue(), "Expected snapshot to not be found") + }() + }) - // defer func() { - // // Delete Disk - // err = controllerClient.DeleteVolume(volume.VolumeId) - // Expect(err).To(BeNil(), "DeleteVolume failed") + DescribeTable("Should create CMEK key, go through volume lifecycle, validate behavior on key revoke and restore", + func(diskType string) { + ctx := context.Background() + Expect(testContexts).ToNot(BeEmpty()) + testContext := getRandomTestContext() - // // Validate Disk Deleted - // _, err = computeService.Disks.Get(p, z, volName).Do() - // Expect(gce.IsGCEError(err, "notFound")).To(BeTrue(), "Expected disk to not be found") - // }() + controllerInstance := testContext.Instance + controllerClient := testContext.Client - // // Test disk works - // err = testAttachWriteReadDetach(volume.VolumeId, volName, controllerInstance, controllerClient, false /* readOnly */) - // Expect(err).To(BeNil(), "Failed to go through volume lifecycle before revoking CMEK key") + p, z, _ := controllerInstance.GetIdentity() + locationID := "global" - // // Revoke CMEK key - // // https://cloud.google.com/kms/docs/enable-disable + // The resource name of the key rings. + parentName := fmt.Sprintf("projects/%s/locations/%s", p, locationID) + keyRingId := "gce-pd-csi-test-ring" - // for _, keyVersion := range keyVersions { - // disableReq := &kmspb.UpdateCryptoKeyVersionRequest{ - // CryptoKeyVersion: &kmspb.CryptoKeyVersion{ - // Name: keyVersion, - // State: kmspb.CryptoKeyVersion_DISABLED, - // }, - // UpdateMask: &fieldmask.FieldMask{ - // Paths: []string{"state"}, - // }, - // } - // _, err = kmsClient.UpdateCryptoKeyVersion(ctx, disableReq) - // Expect(err).To(BeNil(), "Failed to disable crypto key") - // } + key, keyVersions := setupKeyRing(ctx, parentName, keyRingId) - // // Make sure attach of PD fails - // err = testAttachWriteReadDetach(volume.VolumeId, volName, controllerInstance, controllerClient, false /* readOnly */) - // Expect(err).ToNot(BeNil(), "Volume lifecycle should have failed, but succeeded") + // Defer deletion of all key versions + // https://cloud.google.com/kms/docs/destroy-restore + defer func() { + for _, keyVersion := range keyVersions { + destroyKeyReq := &kmspb.DestroyCryptoKeyVersionRequest{ + Name: keyVersion, + } + _, err := kmsClient.DestroyCryptoKeyVersion(ctx, destroyKeyReq) + Expect(err).To(BeNil(), "Failed to destroy crypto key version: %v", keyVersion) + } + }() - // // Restore CMEK key - // for _, keyVersion := range keyVersions { - // enableReq := &kmspb.UpdateCryptoKeyVersionRequest{ - // CryptoKeyVersion: &kmspb.CryptoKeyVersion{ - // Name: keyVersion, - // State: kmspb.CryptoKeyVersion_ENABLED, - // }, - // UpdateMask: &fieldmask.FieldMask{ - // Paths: []string{"state"}, - // }, - // } - // _, err = kmsClient.UpdateCryptoKeyVersion(ctx, enableReq) - // Expect(err).To(BeNil(), "Failed to enable crypto key") - // } + // Go through volume lifecycle using CMEK-ed PD Create Disk + disk := typeToDisk[diskType] + volName := testNamePrefix + string(uuid.NewUUID()) + params := merge(disk.params, map[string]string{ + common.ParameterKeyDiskEncryptionKmsKey: key.Name, + }) + topology := &csi.TopologyRequirement{ + Requisite: []*csi.Topology{ + { + Segments: map[string]string{common.TopologyKeyZone: z}, + }, + }, + } - // // The controller publish failure in above step would set a backoff condition on the node. Wait suffcient amount of time for the driver to accept new controller publish requests. - // time.Sleep(time.Second) - // // Make sure attach of PD succeeds - // err = testAttachWriteReadDetach(volume.VolumeId, volName, controllerInstance, controllerClient, false /* readOnly */) - // Expect(err).To(BeNil(), "Failed to go through volume lifecycle after restoring CMEK key") - // }, - // Entry("on pd-standard", standardDiskType), - // Entry("on pd-extreme", extremeDiskType), - // ) - - // It("Should create disks, attach them places, and verify List returns correct results", func() { - // Expect(testContexts).ToNot(BeEmpty()) - // testContext := getRandomTestContext() + diskSize := defaultSizeGb + if diskType == extremeDiskType { + diskSize = defaultExtremeSizeGb + } + volume, err := controllerClient.CreateVolume(volName, params, diskSize, topology, nil) + Expect(err).To(BeNil(), "CreateVolume failed with error: %v", err) - // p, z, _ := testContext.Instance.GetIdentity() - // client := testContext.Client + // Validate Disk Created + cloudDisk, err := computeService.Disks.Get(p, z, volName).Do() + Expect(err).To(BeNil(), "Could not get disk from cloud directly") + Expect(cloudDisk.Status).To(Equal(readyState)) + Expect(cloudDisk.SizeGb).To(Equal(diskSize)) + Expect(cloudDisk.Name).To(Equal(volName)) + disk.validate(cloudDisk) - // nodeID := testContext.Instance.GetNodeID() + defer func() { + // Delete Disk + err = controllerClient.DeleteVolume(volume.VolumeId) + Expect(err).To(BeNil(), "DeleteVolume failed") - // _, volID := createAndValidateUniqueZonalDisk(client, p, z, standardDiskType) - // defer deleteVolumeOrError(client, volID) + // Validate Disk Deleted + _, err = computeService.Disks.Get(p, z, volName).Do() + Expect(gce.IsGCEError(err, "notFound")).To(BeTrue(), "Expected disk to not be found") + }() - // _, secondVolID := createAndValidateUniqueZonalDisk(client, p, z, standardDiskType) - // defer deleteVolumeOrError(client, secondVolID) + // Test disk works + err = testAttachWriteReadDetach(volume.VolumeId, volName, controllerInstance, controllerClient, false /* readOnly */) + Expect(err).To(BeNil(), "Failed to go through volume lifecycle before revoking CMEK key") + + // Revoke CMEK key + // https://cloud.google.com/kms/docs/enable-disable + + for _, keyVersion := range keyVersions { + disableReq := &kmspb.UpdateCryptoKeyVersionRequest{ + CryptoKeyVersion: &kmspb.CryptoKeyVersion{ + Name: keyVersion, + State: kmspb.CryptoKeyVersion_DISABLED, + }, + UpdateMask: &fieldmask.FieldMask{ + Paths: []string{"state"}, + }, + } + _, err = kmsClient.UpdateCryptoKeyVersion(ctx, disableReq) + Expect(err).To(BeNil(), "Failed to disable crypto key") + } - // // Attach volID to current instance - // err := client.ControllerPublishVolumeReadWrite(volID, nodeID, false /* forceAttach */) - // Expect(err).To(BeNil(), "Failed ControllerPublishVolume") - // defer client.ControllerUnpublishVolume(volID, nodeID) + // Make sure attach of PD fails + err = testAttachWriteReadDetach(volume.VolumeId, volName, controllerInstance, controllerClient, false /* readOnly */) + Expect(err).ToNot(BeNil(), "Volume lifecycle should have failed, but succeeded") + + // Restore CMEK key + for _, keyVersion := range keyVersions { + enableReq := &kmspb.UpdateCryptoKeyVersionRequest{ + CryptoKeyVersion: &kmspb.CryptoKeyVersion{ + Name: keyVersion, + State: kmspb.CryptoKeyVersion_ENABLED, + }, + UpdateMask: &fieldmask.FieldMask{ + Paths: []string{"state"}, + }, + } + _, err = kmsClient.UpdateCryptoKeyVersion(ctx, enableReq) + Expect(err).To(BeNil(), "Failed to enable crypto key") + } - // // List Volumes - // volsToNodes, err := client.ListVolumes() - // Expect(err).To(BeNil(), "Failed ListVolumes") + // The controller publish failure in above step would set a backoff condition on the node. Wait suffcient amount of time for the driver to accept new controller publish requests. + time.Sleep(time.Second) + // Make sure attach of PD succeeds + err = testAttachWriteReadDetach(volume.VolumeId, volName, controllerInstance, controllerClient, false /* readOnly */) + Expect(err).To(BeNil(), "Failed to go through volume lifecycle after restoring CMEK key") + }, + Entry("on pd-standard", standardDiskType), + Entry("on pd-extreme", extremeDiskType), + ) - // // Verify - // Expect(volsToNodes[volID]).ToNot(BeNil(), "Couldn't find attached nodes for vol") - // Expect(volsToNodes[volID]).To(ContainElement(nodeID), "Couldn't find node in attached nodes for vol") - // Expect(volsToNodes[secondVolID]).To(BeNil(), "Second vol ID attached nodes not nil") - // }) + It("Should create disks, attach them places, and verify List returns correct results", func() { + Expect(testContexts).ToNot(BeEmpty()) + testContext := getRandomTestContext() - // It("Should create and delete snapshot for RePD in two zones ", func() { - // Expect(testContexts).ToNot(BeEmpty()) - // testContext := getRandomTestContext() + p, z, _ := testContext.Instance.GetIdentity() + client := testContext.Client + + nodeID := testContext.Instance.GetNodeID() + + _, volID := createAndValidateUniqueZonalDisk(client, p, z, standardDiskType) + defer deleteVolumeOrError(client, volID) + + _, secondVolID := createAndValidateUniqueZonalDisk(client, p, z, standardDiskType) + defer deleteVolumeOrError(client, secondVolID) + + // Attach volID to current instance + err := client.ControllerPublishVolumeReadWrite(volID, nodeID, false /* forceAttach */) + Expect(err).To(BeNil(), "Failed ControllerPublishVolume") + defer client.ControllerUnpublishVolume(volID, nodeID) + + // List Volumes + volsToNodes, err := client.ListVolumes() + Expect(err).To(BeNil(), "Failed ListVolumes") + + // Verify + Expect(volsToNodes[volID]).ToNot(BeNil(), "Couldn't find attached nodes for vol") + Expect(volsToNodes[volID]).To(ContainElement(nodeID), "Couldn't find node in attached nodes for vol") + Expect(volsToNodes[secondVolID]).To(BeNil(), "Second vol ID attached nodes not nil") + }) + + It("Should create and delete snapshot for RePD in two zones ", func() { + Expect(testContexts).ToNot(BeEmpty()) + testContext := getRandomTestContext() + + controllerInstance := testContext.Instance + controllerClient := testContext.Client + + p, z, _ := controllerInstance.GetIdentity() + + region, err := common.GetRegionFromZones([]string{z}) + Expect(err).To(BeNil(), "Failed to get region from zones") + + // Create Disk + volName := testNamePrefix + string(uuid.NewUUID()) + volume, err := controllerClient.CreateVolume(volName, map[string]string{ + common.ParameterKeyReplicationType: "regional-pd", + }, defaultRepdSizeGb, nil, nil) + Expect(err).To(BeNil(), "CreateVolume failed with error: %v", err) + + // Validate Disk Created + cloudDisk, err := computeService.RegionDisks.Get(p, region, volName).Do() + Expect(err).To(BeNil(), "Could not get disk from cloud directly") + Expect(cloudDisk.Type).To(ContainSubstring(standardDiskType)) + Expect(cloudDisk.Status).To(Equal(readyState)) + Expect(cloudDisk.SizeGb).To(Equal(defaultRepdSizeGb)) + Expect(cloudDisk.Name).To(Equal(volName)) + Expect(len(cloudDisk.ReplicaZones)).To(Equal(2)) + for _, replicaZone := range cloudDisk.ReplicaZones { + actualZone := zoneFromURL(replicaZone) + gotRegion, err := common.GetRegionFromZones([]string{actualZone}) + Expect(err).To(BeNil(), "failed to get region from actual zone %v", actualZone) + Expect(gotRegion).To(Equal(region), "Got region from replica zone that did not match supplied region") + } + + // Create Snapshot + snapshotName := testNamePrefix + string(uuid.NewUUID()) + snapshotID, err := controllerClient.CreateSnapshot(snapshotName, volume.VolumeId, nil) + Expect(err).To(BeNil(), "CreateSnapshot failed with error: %v", err) + + // Validate Snapshot Created + snapshot, err := computeService.Snapshots.Get(p, snapshotName).Do() + Expect(err).To(BeNil(), "Could not get snapshot from cloud directly") + Expect(snapshot.Name).To(Equal(snapshotName)) + + err = wait.Poll(10*time.Second, 3*time.Minute, func() (bool, error) { + snapshot, err := computeService.Snapshots.Get(p, snapshotName).Do() + Expect(err).To(BeNil(), "Could not get snapshot from cloud directly") + if snapshot.Status == "READY" { + return true, nil + } + return false, nil + }) + Expect(err).To(BeNil(), "Could not wait for snapshot be ready") + + defer func() { + // Delete Disk + err := controllerClient.DeleteVolume(volume.VolumeId) + Expect(err).To(BeNil(), "DeleteVolume failed") + + // Validate Disk Deleted + _, err = computeService.RegionDisks.Get(p, region, volName).Do() + Expect(gce.IsGCEError(err, "notFound")).To(BeTrue(), "Expected disk to not be found") - // controllerInstance := testContext.Instance - // controllerClient := testContext.Client - - // p, z, _ := controllerInstance.GetIdentity() - - // region, err := common.GetRegionFromZones([]string{z}) - // Expect(err).To(BeNil(), "Failed to get region from zones") - - // // Create Disk - // volName := testNamePrefix + string(uuid.NewUUID()) - // volume, err := controllerClient.CreateVolume(volName, map[string]string{ - // common.ParameterKeyReplicationType: "regional-pd", - // }, defaultRepdSizeGb, nil, nil) - // Expect(err).To(BeNil(), "CreateVolume failed with error: %v", err) - - // // Validate Disk Created - // cloudDisk, err := computeService.RegionDisks.Get(p, region, volName).Do() - // Expect(err).To(BeNil(), "Could not get disk from cloud directly") - // Expect(cloudDisk.Type).To(ContainSubstring(standardDiskType)) - // Expect(cloudDisk.Status).To(Equal(readyState)) - // Expect(cloudDisk.SizeGb).To(Equal(defaultRepdSizeGb)) - // Expect(cloudDisk.Name).To(Equal(volName)) - // Expect(len(cloudDisk.ReplicaZones)).To(Equal(2)) - // for _, replicaZone := range cloudDisk.ReplicaZones { - // actualZone := zoneFromURL(replicaZone) - // gotRegion, err := common.GetRegionFromZones([]string{actualZone}) - // Expect(err).To(BeNil(), "failed to get region from actual zone %v", actualZone) - // Expect(gotRegion).To(Equal(region), "Got region from replica zone that did not match supplied region") - // } + // Delete Snapshot + err = controllerClient.DeleteSnapshot(snapshotID) + Expect(err).To(BeNil(), "DeleteSnapshot failed") - // // Create Snapshot - // snapshotName := testNamePrefix + string(uuid.NewUUID()) - // snapshotID, err := controllerClient.CreateSnapshot(snapshotName, volume.VolumeId, nil) - // Expect(err).To(BeNil(), "CreateSnapshot failed with error: %v", err) - - // // Validate Snapshot Created - // snapshot, err := computeService.Snapshots.Get(p, snapshotName).Do() - // Expect(err).To(BeNil(), "Could not get snapshot from cloud directly") - // Expect(snapshot.Name).To(Equal(snapshotName)) - - // err = wait.Poll(10*time.Second, 3*time.Minute, func() (bool, error) { - // snapshot, err := computeService.Snapshots.Get(p, snapshotName).Do() - // Expect(err).To(BeNil(), "Could not get snapshot from cloud directly") - // if snapshot.Status == "READY" { - // return true, nil - // } - // return false, nil - // }) - // Expect(err).To(BeNil(), "Could not wait for snapshot be ready") - - // defer func() { - // // Delete Disk - // err := controllerClient.DeleteVolume(volume.VolumeId) - // Expect(err).To(BeNil(), "DeleteVolume failed") - - // // Validate Disk Deleted - // _, err = computeService.RegionDisks.Get(p, region, volName).Do() - // Expect(gce.IsGCEError(err, "notFound")).To(BeTrue(), "Expected disk to not be found") - - // // Delete Snapshot - // err = controllerClient.DeleteSnapshot(snapshotID) - // Expect(err).To(BeNil(), "DeleteSnapshot failed") - - // // Validate Snapshot Deleted - // _, err = computeService.Snapshots.Get(p, snapshotName).Do() - // Expect(gce.IsGCEError(err, "notFound")).To(BeTrue(), "Expected snapshot to not be found") - // }() - // }) + // Validate Snapshot Deleted + _, err = computeService.Snapshots.Get(p, snapshotName).Do() + Expect(gce.IsGCEError(err, "notFound")).To(BeTrue(), "Expected snapshot to not be found") + }() + }) - // It("Should get correct VolumeStats for Block", func() { - // testContext := getRandomTestContext() + It("Should get correct VolumeStats for Block", func() { + testContext := getRandomTestContext() - // p, z, _ := testContext.Instance.GetIdentity() - // client := testContext.Client - // instance := testContext.Instance + p, z, _ := testContext.Instance.GetIdentity() + client := testContext.Client + instance := testContext.Instance - // volName, volID := createAndValidateUniqueZonalDisk(client, p, z, standardDiskType) + volName, volID := createAndValidateUniqueZonalDisk(client, p, z, standardDiskType) - // defer func() { - // // Delete Disk - // err := client.DeleteVolume(volID) - // Expect(err).To(BeNil(), "DeleteVolume failed") + defer func() { + // Delete Disk + err := client.DeleteVolume(volID) + Expect(err).To(BeNil(), "DeleteVolume failed") - // // Validate Disk Deleted - // _, err = computeService.Disks.Get(p, z, volName).Do() - // Expect(gce.IsGCEError(err, "notFound")).To(BeTrue(), "Expected disk to not be found") - // }() + // Validate Disk Deleted + _, err = computeService.Disks.Get(p, z, volName).Do() + Expect(gce.IsGCEError(err, "notFound")).To(BeTrue(), "Expected disk to not be found") + }() - // verifyVolumeStats := func(a *verifyArgs) error { - // available, capacity, used, inodesFree, inodes, inodesUsed, err := client.NodeGetVolumeStats(volID, a.publishDir) - // if err != nil { - // return fmt.Errorf("failed to get node volume stats: %v", err.Error()) - // } - // if available != 0 || capacity != common.GbToBytes(defaultSizeGb) || used != 0 || - // inodesFree != 0 || inodes != 0 || inodesUsed != 0 { - // return fmt.Errorf("got: available %v, capacity %v, used %v, inodesFree %v, inodes %v, inodesUsed %v -- expected: capacity = %v, available = 0, used = 0, inodesFree = 0, inodes = 0 , inodesUsed = 0", - // available, capacity, used, inodesFree, inodes, inodesUsed, common.GbToBytes(defaultSizeGb)) - // } - // return nil - // } + verifyVolumeStats := func(a *verifyArgs) error { + available, capacity, used, inodesFree, inodes, inodesUsed, err := client.NodeGetVolumeStats(volID, a.publishDir) + if err != nil { + return fmt.Errorf("failed to get node volume stats: %v", err.Error()) + } + if available != 0 || capacity != common.GbToBytes(defaultSizeGb) || used != 0 || + inodesFree != 0 || inodes != 0 || inodesUsed != 0 { + return fmt.Errorf("got: available %v, capacity %v, used %v, inodesFree %v, inodes %v, inodesUsed %v -- expected: capacity = %v, available = 0, used = 0, inodesFree = 0, inodes = 0 , inodesUsed = 0", + available, capacity, used, inodesFree, inodes, inodesUsed, common.GbToBytes(defaultSizeGb)) + } + return nil + } - // // Attach Disk - // err := testLifecycleWithVerify(volID, volName, instance, client, false /* readOnly */, true /* block */, verifyVolumeStats, nil) - // Expect(err).To(BeNil(), "Failed to go through volume lifecycle") - // }) + // Attach Disk + err := testLifecycleWithVerify(volID, volName, instance, client, false /* readOnly */, true /* block */, verifyVolumeStats, nil) + Expect(err).To(BeNil(), "Failed to go through volume lifecycle") + }) - // It("Should get correct VolumeStats", func() { - // testContext := getRandomTestContext() + It("Should get correct VolumeStats", func() { + testContext := getRandomTestContext() - // p, z, _ := testContext.Instance.GetIdentity() - // client := testContext.Client - // instance := testContext.Instance + p, z, _ := testContext.Instance.GetIdentity() + client := testContext.Client + instance := testContext.Instance - // volName, volID := createAndValidateUniqueZonalDisk(client, p, z, standardDiskType) + volName, volID := createAndValidateUniqueZonalDisk(client, p, z, standardDiskType) - // defer func() { - // // Delete Disk - // err := client.DeleteVolume(volID) - // Expect(err).To(BeNil(), "DeleteVolume failed") + defer func() { + // Delete Disk + err := client.DeleteVolume(volID) + Expect(err).To(BeNil(), "DeleteVolume failed") - // // Validate Disk Deleted - // _, err = computeService.Disks.Get(p, z, volName).Do() - // Expect(gce.IsGCEError(err, "notFound")).To(BeTrue(), "Expected disk to not be found") - // }() + // Validate Disk Deleted + _, err = computeService.Disks.Get(p, z, volName).Do() + Expect(gce.IsGCEError(err, "notFound")).To(BeTrue(), "Expected disk to not be found") + }() - // verifyVolumeStats := func(a *verifyArgs) error { - // available, capacity, used, inodesFree, inodes, inodesUsed, err := client.NodeGetVolumeStats(volID, a.publishDir) - // if err != nil { - // return fmt.Errorf("failed to get node volume stats: %v", err.Error()) - // } - // if !equalWithinEpsilon(available, common.GbToBytes(defaultSizeGb), defaultEpsilon) || !equalWithinEpsilon(capacity, common.GbToBytes(defaultSizeGb), defaultEpsilon) || !equalWithinEpsilon(used, 0, defaultEpsilon) || - // inodesFree == 0 || inodes == 0 || inodesUsed == 0 { - // return fmt.Errorf("got: available %v, capacity %v, used %v, inodesFree %v, inodes %v, inodesUsed %v -- expected: available ~= %v, capacity ~= %v, used = 0, inodesFree != 0, inodes != 0 , inodesUsed != 0", - // available, capacity, used, inodesFree, inodes, inodesUsed, common.GbToBytes(defaultSizeGb), common.GbToBytes(defaultSizeGb)) - // } - // return nil - // } + verifyVolumeStats := func(a *verifyArgs) error { + available, capacity, used, inodesFree, inodes, inodesUsed, err := client.NodeGetVolumeStats(volID, a.publishDir) + if err != nil { + return fmt.Errorf("failed to get node volume stats: %v", err.Error()) + } + if !equalWithinEpsilon(available, common.GbToBytes(defaultSizeGb), defaultEpsilon) || !equalWithinEpsilon(capacity, common.GbToBytes(defaultSizeGb), defaultEpsilon) || !equalWithinEpsilon(used, 0, defaultEpsilon) || + inodesFree == 0 || inodes == 0 || inodesUsed == 0 { + return fmt.Errorf("got: available %v, capacity %v, used %v, inodesFree %v, inodes %v, inodesUsed %v -- expected: available ~= %v, capacity ~= %v, used = 0, inodesFree != 0, inodes != 0 , inodesUsed != 0", + available, capacity, used, inodesFree, inodes, inodesUsed, common.GbToBytes(defaultSizeGb), common.GbToBytes(defaultSizeGb)) + } + return nil + } - // // Attach Disk - // err := testLifecycleWithVerify(volID, volName, instance, client, false /* readOnly */, false /* fs */, verifyVolumeStats, nil) - // Expect(err).To(BeNil(), "Failed to go through volume lifecycle") - // }) + // Attach Disk + err := testLifecycleWithVerify(volID, volName, instance, client, false /* readOnly */, false /* fs */, verifyVolumeStats, nil) + Expect(err).To(BeNil(), "Failed to go through volume lifecycle") + }) It("Should create and delete multi-writer disk", func() { Expect(testContexts).ToNot(BeEmpty()) @@ -965,545 +967,545 @@ var _ = Describe("GCE PD CSI Driver", func() { Expect(err).To(BeNil(), "Failed to go through volume lifecycle") }) - // DescribeTable("Should successfully create disk with PVC/PV tags", - // func(diskType string) { - // Expect(testContexts).ToNot(BeEmpty()) - // testContext := getRandomTestContext() + DescribeTable("Should successfully create disk with PVC/PV tags", + func(diskType string) { + Expect(testContexts).ToNot(BeEmpty()) + testContext := getRandomTestContext() - // controllerInstance := testContext.Instance - // controllerClient := testContext.Client + controllerInstance := testContext.Instance + controllerClient := testContext.Client - // diskSize := defaultSizeGb - // if diskType == extremeDiskType { - // diskSize = defaultExtremeSizeGb - // } + diskSize := defaultSizeGb + if diskType == extremeDiskType { + diskSize = defaultExtremeSizeGb + } - // p, z, _ := controllerInstance.GetIdentity() + p, z, _ := controllerInstance.GetIdentity() - // // Create Disk - // disk := typeToDisk[diskType] - // volName := testNamePrefix + string(uuid.NewUUID()) - // params := merge(disk.params, map[string]string{ - // common.ParameterKeyPVCName: "test-pvc", - // common.ParameterKeyPVCNamespace: "test-pvc-namespace", - // common.ParameterKeyPVName: "test-pv-name", - // }) - // volume, err := controllerClient.CreateVolume(volName, params, diskSize, nil /* topReq */, nil) - // Expect(err).To(BeNil(), "CreateVolume failed with error: %v", err) - - // // Validate Disk Created - // cloudDisk, err := computeService.Disks.Get(p, z, volName).Do() - // Expect(err).To(BeNil(), "Could not get disk from cloud directly") - // Expect(cloudDisk.Status).To(Equal(readyState)) - // Expect(cloudDisk.SizeGb).To(Equal(diskSize)) - // Expect(cloudDisk.Name).To(Equal(volName)) - // Expect(cloudDisk.Description).To(Equal("{\"kubernetes.io/created-for/pv/name\":\"test-pv-name\",\"kubernetes.io/created-for/pvc/name\":\"test-pvc\",\"kubernetes.io/created-for/pvc/namespace\":\"test-pvc-namespace\",\"storage.gke.io/created-by\":\"pd.csi.storage.gke.io\"}")) - // disk.validate(cloudDisk) + // Create Disk + disk := typeToDisk[diskType] + volName := testNamePrefix + string(uuid.NewUUID()) + params := merge(disk.params, map[string]string{ + common.ParameterKeyPVCName: "test-pvc", + common.ParameterKeyPVCNamespace: "test-pvc-namespace", + common.ParameterKeyPVName: "test-pv-name", + }) + volume, err := controllerClient.CreateVolume(volName, params, diskSize, nil /* topReq */, nil) + Expect(err).To(BeNil(), "CreateVolume failed with error: %v", err) + + // Validate Disk Created + cloudDisk, err := computeService.Disks.Get(p, z, volName).Do() + Expect(err).To(BeNil(), "Could not get disk from cloud directly") + Expect(cloudDisk.Status).To(Equal(readyState)) + Expect(cloudDisk.SizeGb).To(Equal(diskSize)) + Expect(cloudDisk.Name).To(Equal(volName)) + Expect(cloudDisk.Description).To(Equal("{\"kubernetes.io/created-for/pv/name\":\"test-pv-name\",\"kubernetes.io/created-for/pvc/name\":\"test-pvc\",\"kubernetes.io/created-for/pvc/namespace\":\"test-pvc-namespace\",\"storage.gke.io/created-by\":\"pd.csi.storage.gke.io\"}")) + disk.validate(cloudDisk) - // defer func() { - // // Delete Disk - // controllerClient.DeleteVolume(volume.VolumeId) - // Expect(err).To(BeNil(), "DeleteVolume failed") + defer func() { + // Delete Disk + controllerClient.DeleteVolume(volume.VolumeId) + Expect(err).To(BeNil(), "DeleteVolume failed") - // // Validate Disk Deleted - // _, err = computeService.Disks.Get(p, z, volName).Do() - // Expect(gce.IsGCEError(err, "notFound")).To(BeTrue(), "Expected disk to not be found") - // }() - // }, - // Entry("on pd-standard", standardDiskType), - // Entry("on pd-extreme", extremeDiskType), - // ) + // Validate Disk Deleted + _, err = computeService.Disks.Get(p, z, volName).Do() + Expect(gce.IsGCEError(err, "notFound")).To(BeTrue(), "Expected disk to not be found") + }() + }, + Entry("on pd-standard", standardDiskType), + Entry("on pd-extreme", extremeDiskType), + ) - // // Use the region of the test location. - // It("Should successfully create snapshot with storage locations", func() { - // testContext := getRandomTestContext() + // Use the region of the test location. + It("Should successfully create snapshot with storage locations", func() { + testContext := getRandomTestContext() - // p, z, _ := testContext.Instance.GetIdentity() - // client := testContext.Client + p, z, _ := testContext.Instance.GetIdentity() + client := testContext.Client - // // Create Disk - // volName, volID := createAndValidateUniqueZonalDisk(client, p, z, standardDiskType) + // Create Disk + volName, volID := createAndValidateUniqueZonalDisk(client, p, z, standardDiskType) - // // Create Snapshot - // snapshotName := testNamePrefix + string(uuid.NewUUID()) + // Create Snapshot + snapshotName := testNamePrefix + string(uuid.NewUUID()) - // // Convert GCP zone to region, e.g. us-central1-a => us-central1 - // // This is safe because we hardcode the zones. - // snapshotLocation := z[:len(z)-2] + // Convert GCP zone to region, e.g. us-central1-a => us-central1 + // This is safe because we hardcode the zones. + snapshotLocation := z[:len(z)-2] - // snapshotParams := map[string]string{ - // common.ParameterKeyStorageLocations: snapshotLocation, - // common.ParameterKeyVolumeSnapshotName: "test-volumesnapshot-name", - // common.ParameterKeyVolumeSnapshotNamespace: "test-volumesnapshot-namespace", - // common.ParameterKeyVolumeSnapshotContentName: "test-volumesnapshotcontent-name", - // } - // snapshotID, err := client.CreateSnapshot(snapshotName, volID, snapshotParams) - // Expect(err).To(BeNil(), "CreateSnapshot failed with error: %v", err) - - // // Validate Snapshot Created - // snapshot, err := computeService.Snapshots.Get(p, snapshotName).Do() - // Expect(err).To(BeNil(), "Could not get snapshot from cloud directly") - // Expect(snapshot.Name).To(Equal(snapshotName)) - // Expect(snapshot.Description).To(Equal("{\"kubernetes.io/created-for/volumesnapshot/name\":\"test-volumesnapshot-name\",\"kubernetes.io/created-for/volumesnapshot/namespace\":\"test-volumesnapshot-namespace\",\"kubernetes.io/created-for/volumesnapshotcontent/name\":\"test-volumesnapshotcontent-name\",\"storage.gke.io/created-by\":\"pd.csi.storage.gke.io\"}")) - - // err = wait.Poll(10*time.Second, 3*time.Minute, func() (bool, error) { - // snapshot, err := computeService.Snapshots.Get(p, snapshotName).Do() - // Expect(err).To(BeNil(), "Could not get snapshot from cloud directly") - // if snapshot.Status == "READY" { - // return true, nil - // } - // return false, nil - // }) - // Expect(err).To(BeNil(), "Could not wait for snapshot be ready") - - // defer func() { - // // Delete Disk - // err := client.DeleteVolume(volID) - // Expect(err).To(BeNil(), "DeleteVolume failed") - - // // Validate Disk Deleted - // _, err = computeService.Disks.Get(p, z, volName).Do() - // Expect(gce.IsGCEError(err, "notFound")).To(BeTrue(), "Expected disk to not be found") - - // // Delete Snapshot - // err = client.DeleteSnapshot(snapshotID) - // Expect(err).To(BeNil(), "DeleteSnapshot failed") - - // // Validate Snapshot Deleted - // _, err = computeService.Snapshots.Get(p, snapshotName).Do() - // Expect(gce.IsGCEError(err, "notFound")).To(BeTrue(), "Expected snapshot to not be found") - // }() - // }) + snapshotParams := map[string]string{ + common.ParameterKeyStorageLocations: snapshotLocation, + common.ParameterKeyVolumeSnapshotName: "test-volumesnapshot-name", + common.ParameterKeyVolumeSnapshotNamespace: "test-volumesnapshot-namespace", + common.ParameterKeyVolumeSnapshotContentName: "test-volumesnapshotcontent-name", + } + snapshotID, err := client.CreateSnapshot(snapshotName, volID, snapshotParams) + Expect(err).To(BeNil(), "CreateSnapshot failed with error: %v", err) + + // Validate Snapshot Created + snapshot, err := computeService.Snapshots.Get(p, snapshotName).Do() + Expect(err).To(BeNil(), "Could not get snapshot from cloud directly") + Expect(snapshot.Name).To(Equal(snapshotName)) + Expect(snapshot.Description).To(Equal("{\"kubernetes.io/created-for/volumesnapshot/name\":\"test-volumesnapshot-name\",\"kubernetes.io/created-for/volumesnapshot/namespace\":\"test-volumesnapshot-namespace\",\"kubernetes.io/created-for/volumesnapshotcontent/name\":\"test-volumesnapshotcontent-name\",\"storage.gke.io/created-by\":\"pd.csi.storage.gke.io\"}")) + + err = wait.Poll(10*time.Second, 3*time.Minute, func() (bool, error) { + snapshot, err := computeService.Snapshots.Get(p, snapshotName).Do() + Expect(err).To(BeNil(), "Could not get snapshot from cloud directly") + if snapshot.Status == "READY" { + return true, nil + } + return false, nil + }) + Expect(err).To(BeNil(), "Could not wait for snapshot be ready") - // // Use the region of the test location. - // It("Should successfully create snapshot backed by disk image", func() { - // testContext := getRandomTestContext() + defer func() { + // Delete Disk + err := client.DeleteVolume(volID) + Expect(err).To(BeNil(), "DeleteVolume failed") - // p, z, _ := testContext.Instance.GetIdentity() - // client := testContext.Client + // Validate Disk Deleted + _, err = computeService.Disks.Get(p, z, volName).Do() + Expect(gce.IsGCEError(err, "notFound")).To(BeTrue(), "Expected disk to not be found") - // // Create Disk - // volName, volID := createAndValidateUniqueZonalDisk(client, p, z, standardDiskType) + // Delete Snapshot + err = client.DeleteSnapshot(snapshotID) + Expect(err).To(BeNil(), "DeleteSnapshot failed") - // // Create Snapshot - // snapshotName := testNamePrefix + string(uuid.NewUUID()) - // testImageFamily := "test-family" + // Validate Snapshot Deleted + _, err = computeService.Snapshots.Get(p, snapshotName).Do() + Expect(gce.IsGCEError(err, "notFound")).To(BeTrue(), "Expected snapshot to not be found") + }() + }) - // snapshotParams := map[string]string{common.ParameterKeySnapshotType: common.DiskImageType, common.ParameterKeyImageFamily: testImageFamily} - // snapshotID, err := client.CreateSnapshot(snapshotName, volID, snapshotParams) - // Expect(err).To(BeNil(), "CreateSnapshot failed with error: %v", err) + // Use the region of the test location. + It("Should successfully create snapshot backed by disk image", func() { + testContext := getRandomTestContext() - // // Validate Snapshot Created - // snapshot, err := computeService.Images.Get(p, snapshotName).Do() - // Expect(err).To(BeNil(), "Could not get snapshot from cloud directly") - // Expect(snapshot.Name).To(Equal(snapshotName)) + p, z, _ := testContext.Instance.GetIdentity() + client := testContext.Client - // err = wait.Poll(10*time.Second, 5*time.Minute, func() (bool, error) { - // snapshot, err := computeService.Images.Get(p, snapshotName).Do() - // Expect(err).To(BeNil(), "Could not get snapshot from cloud directly") - // if snapshot.Status == "READY" { - // return true, nil - // } - // return false, nil - // }) - // Expect(err).To(BeNil(), "Could not wait for snapshot be ready") - - // // Check Snapshot Type - // snapshot, err = computeService.Images.Get(p, snapshotName).Do() - // Expect(err).To(BeNil(), "Could not get snapshot from cloud directly") - // _, snapshotType, _, err := common.SnapshotIDToProjectKey(cleanSelfLink(snapshot.SelfLink)) - // Expect(err).To(BeNil(), "Failed to parse snapshot ID") - // Expect(snapshotType).To(Equal(common.DiskImageType), "Expected images type in snapshot ID") - - // defer func() { - // // Delete Disk - // err := client.DeleteVolume(volID) - // Expect(err).To(BeNil(), "DeleteVolume failed") - - // // Validate Disk Deleted - // _, err = computeService.Disks.Get(p, z, volName).Do() - // Expect(gce.IsGCEError(err, "notFound")).To(BeTrue(), "Expected disk to not be found") - - // // Delete Snapshot - // err = client.DeleteSnapshot(snapshotID) - // Expect(err).To(BeNil(), "DeleteSnapshot failed") - - // // Validate Snapshot Deleted - // _, err = computeService.Images.Get(p, snapshotName).Do() - // Expect(gce.IsGCEError(err, "notFound")).To(BeTrue(), "Expected snapshot to not be found") - // }() - // }) + // Create Disk + volName, volID := createAndValidateUniqueZonalDisk(client, p, z, standardDiskType) - // It("Should successfully create zonal PD from a zonal PD VolumeContentSource", func() { - // Expect(testContexts).ToNot(BeEmpty()) - // testContext := getRandomTestContext() + // Create Snapshot + snapshotName := testNamePrefix + string(uuid.NewUUID()) + testImageFamily := "test-family" - // controllerInstance := testContext.Instance - // controllerClient := testContext.Client + snapshotParams := map[string]string{common.ParameterKeySnapshotType: common.DiskImageType, common.ParameterKeyImageFamily: testImageFamily} + snapshotID, err := client.CreateSnapshot(snapshotName, volID, snapshotParams) + Expect(err).To(BeNil(), "CreateSnapshot failed with error: %v", err) - // p, z, _ := controllerInstance.GetIdentity() + // Validate Snapshot Created + snapshot, err := computeService.Images.Get(p, snapshotName).Do() + Expect(err).To(BeNil(), "Could not get snapshot from cloud directly") + Expect(snapshot.Name).To(Equal(snapshotName)) - // // Create Source Disk - // _, srcVolID := createAndValidateUniqueZonalDisk(controllerClient, p, z, standardDiskType) + err = wait.Poll(10*time.Second, 5*time.Minute, func() (bool, error) { + snapshot, err := computeService.Images.Get(p, snapshotName).Do() + Expect(err).To(BeNil(), "Could not get snapshot from cloud directly") + if snapshot.Status == "READY" { + return true, nil + } + return false, nil + }) + Expect(err).To(BeNil(), "Could not wait for snapshot be ready") - // // Create Disk - // volName := testNamePrefix + string(uuid.NewUUID()) - // volume, err := controllerClient.CreateVolume(volName, map[string]string{ - // common.ParameterKeyReplicationType: "none", - // }, defaultSizeGb, - // &csi.TopologyRequirement{ - // Requisite: []*csi.Topology{ - // { - // Segments: map[string]string{common.TopologyKeyZone: z}, - // }, - // }, - // }, - // &csi.VolumeContentSource{ - // Type: &csi.VolumeContentSource_Volume{ - // Volume: &csi.VolumeContentSource_VolumeSource{ - // VolumeId: srcVolID, - // }, - // }, - // }) - - // Expect(err).To(BeNil(), "CreateVolume failed with error: %v", err) - - // // Validate Disk Created - // cloudDisk, err := computeService.Disks.Get(p, z, volName).Do() - // Expect(err).To(BeNil(), "Could not get disk from cloud directly") - // Expect(cloudDisk.Type).To(ContainSubstring(standardDiskType)) - // Expect(cloudDisk.Status).To(Equal(readyState)) - // Expect(cloudDisk.SizeGb).To(Equal(defaultSizeGb)) - // Expect(cloudDisk.Name).To(Equal(volName)) - // // Validate the the clone disk zone matches the source disk zone. - // _, srcKey, err := common.VolumeIDToKey(srcVolID) - // Expect(err).To(BeNil(), "Could not get source volume key from id") - // Expect(zoneFromURL(cloudDisk.Zone)).To(Equal(srcKey.Zone)) - // defer func() { - // // Delete Disk - // controllerClient.DeleteVolume(volume.VolumeId) - // Expect(err).To(BeNil(), "DeleteVolume failed") - - // // Validate Disk Deleted - // _, err = computeService.Disks.Get(p, z, volName).Do() - // Expect(gce.IsGCEError(err, "notFound")).To(BeTrue(), "Expected disk to not be found") - // }() - // }) + // Check Snapshot Type + snapshot, err = computeService.Images.Get(p, snapshotName).Do() + Expect(err).To(BeNil(), "Could not get snapshot from cloud directly") + _, snapshotType, _, err := common.SnapshotIDToProjectKey(cleanSelfLink(snapshot.SelfLink)) + Expect(err).To(BeNil(), "Failed to parse snapshot ID") + Expect(snapshotType).To(Equal(common.DiskImageType), "Expected images type in snapshot ID") - // It("Should successfully create RePD from a zonal PD VolumeContentSource", func() { - // Expect(testContexts).ToNot(BeEmpty()) - // testContext := getRandomTestContext() + defer func() { + // Delete Disk + err := client.DeleteVolume(volID) + Expect(err).To(BeNil(), "DeleteVolume failed") - // controllerInstance := testContext.Instance - // controllerClient := testContext.Client - - // p, z, _ := controllerInstance.GetIdentity() - - // region, err := common.GetRegionFromZones([]string{z}) - // Expect(err).To(BeNil(), "Failed to get region from zones") - - // // Create Source Disk - // srcVolName := testNamePrefix + string(uuid.NewUUID()) - // srcVolume, err := controllerClient.CreateVolume(srcVolName, map[string]string{ - // common.ParameterKeyReplicationType: "none", - // }, defaultRepdSizeGb, nil, nil) - // // Create Disk - // volName := testNamePrefix + string(uuid.NewUUID()) - // volume, err := controllerClient.CreateVolume(volName, map[string]string{ - // common.ParameterKeyReplicationType: "regional-pd", - // }, defaultRepdSizeGb, nil, - // &csi.VolumeContentSource{ - // Type: &csi.VolumeContentSource_Volume{ - // Volume: &csi.VolumeContentSource_VolumeSource{ - // VolumeId: srcVolume.VolumeId, - // }, - // }, - // }) - - // Expect(err).To(BeNil(), "CreateVolume failed with error: %v", err) - - // // Validate Disk Created - // cloudDisk, err := computeService.RegionDisks.Get(p, region, volName).Do() - // Expect(err).To(BeNil(), "Could not get disk from cloud directly") - // Expect(cloudDisk.Type).To(ContainSubstring(standardDiskType)) - // Expect(cloudDisk.Status).To(Equal(readyState)) - // Expect(cloudDisk.SizeGb).To(Equal(defaultRepdSizeGb)) - // Expect(cloudDisk.Name).To(Equal(volName)) - // Expect(len(cloudDisk.ReplicaZones)).To(Equal(2)) - // replicaZonesCompatible := false - // _, srcKey, err := common.VolumeIDToKey(srcVolume.VolumeId) - // Expect(err).To(BeNil(), "Could not get source volume key from id") - // for _, replicaZone := range cloudDisk.ReplicaZones { - // actualZone := zoneFromURL(replicaZone) - // if actualZone == srcKey.Zone { - // replicaZonesCompatible = true - // } - // gotRegion, err := common.GetRegionFromZones([]string{actualZone}) - // Expect(err).To(BeNil(), "failed to get region from actual zone %v", actualZone) - // Expect(gotRegion).To(Equal(region), "Got region from replica zone that did not match supplied region") - // } - // // Validate that one of the replicaZones of the clone matches the zone of the source disk. - // Expect(replicaZonesCompatible).To(Equal(true)) - // defer func() { - // // Delete Disk - // controllerClient.DeleteVolume(volume.VolumeId) - // Expect(err).To(BeNil(), "DeleteVolume failed") - - // // Validate Disk Deleted - // _, err = computeService.RegionDisks.Get(p, region, volName).Do() - // Expect(gce.IsGCEError(err, "notFound")).To(BeTrue(), "Expected disk to not be found") - // }() - // }) + // Validate Disk Deleted + _, err = computeService.Disks.Get(p, z, volName).Do() + Expect(gce.IsGCEError(err, "notFound")).To(BeTrue(), "Expected disk to not be found") - // It("Should successfully create RePD from a RePD VolumeContentSource", func() { - // Expect(testContexts).ToNot(BeEmpty()) - // testContext := getRandomTestContext() + // Delete Snapshot + err = client.DeleteSnapshot(snapshotID) + Expect(err).To(BeNil(), "DeleteSnapshot failed") - // controllerInstance := testContext.Instance - // controllerClient := testContext.Client - - // p, z, _ := controllerInstance.GetIdentity() - - // region, err := common.GetRegionFromZones([]string{z}) - // Expect(err).To(BeNil(), "Failed to get region from zones") - - // // Create Source Disk - // srcVolName := testNamePrefix + string(uuid.NewUUID()) - // srcVolume, err := controllerClient.CreateVolume(srcVolName, map[string]string{ - // common.ParameterKeyReplicationType: "regional-pd", - // }, defaultRepdSizeGb, nil, nil) - // // Create Disk - // volName := testNamePrefix + string(uuid.NewUUID()) - // volume, err := controllerClient.CreateVolume(volName, map[string]string{ - // common.ParameterKeyReplicationType: "regional-pd", - // }, defaultRepdSizeGb, nil, - // &csi.VolumeContentSource{ - // Type: &csi.VolumeContentSource_Volume{ - // Volume: &csi.VolumeContentSource_VolumeSource{ - // VolumeId: srcVolume.VolumeId, - // }, - // }, - // }) - - // Expect(err).To(BeNil(), "CreateVolume failed with error: %v", err) - - // // Validate Disk Created - // cloudDisk, err := computeService.RegionDisks.Get(p, region, volName).Do() - // Expect(err).To(BeNil(), "Could not get disk from cloud directly") - // Expect(cloudDisk.Type).To(ContainSubstring(standardDiskType)) - // Expect(cloudDisk.Status).To(Equal(readyState)) - // Expect(cloudDisk.SizeGb).To(Equal(defaultRepdSizeGb)) - // Expect(cloudDisk.Name).To(Equal(volName)) - // Expect(len(cloudDisk.ReplicaZones)).To(Equal(2)) - // // Validate that the replicaZones of the clone match the replicaZones of the source disk. - // srcCloudDisk, err := computeService.RegionDisks.Get(p, region, srcVolName).Do() - // Expect(err).To(BeNil(), "Could not get source disk from cloud directly") - // Expect(srcCloudDisk.ReplicaZones).To(Equal(cloudDisk.ReplicaZones)) - // for _, replicaZone := range cloudDisk.ReplicaZones { - // actualZone := zoneFromURL(replicaZone) - // gotRegion, err := common.GetRegionFromZones([]string{actualZone}) - // Expect(err).To(BeNil(), "failed to get region from actual zone %v", actualZone) - // Expect(gotRegion).To(Equal(region), "Got region from replica zone that did not match supplied region") - // } - // defer func() { - // // Delete Disk - // controllerClient.DeleteVolume(volume.VolumeId) - // Expect(err).To(BeNil(), "DeleteVolume failed") - - // // Validate Disk Deleted - // _, err = computeService.RegionDisks.Get(p, region, volName).Do() - // Expect(gce.IsGCEError(err, "notFound")).To(BeTrue(), "Expected disk to not be found") - // }() - // }) + // Validate Snapshot Deleted + _, err = computeService.Images.Get(p, snapshotName).Do() + Expect(gce.IsGCEError(err, "notFound")).To(BeTrue(), "Expected snapshot to not be found") + }() + }) - // It("Should pass if valid compute endpoint is passed in", func() { - // // gets instance set up w/o compute-endpoint set from test setup - // _, err := getRandomTestContext().Client.ListVolumes() - // Expect(err).To(BeNil(), "no error expected when passed valid compute url") + It("Should successfully create zonal PD from a zonal PD VolumeContentSource", func() { + Expect(testContexts).ToNot(BeEmpty()) + testContext := getRandomTestContext() - // i := getRandomTestContext().Instance + controllerInstance := testContext.Instance + controllerClient := testContext.Client - // // Create new driver and client with valid, empty endpoint - // klog.Infof("Setup driver with empty compute endpoint %s\n", i.GetName()) - // tcEmpty, err := testutils.GCEClientAndDriverSetup(i, getDriverConfig()) - // if err != nil { - // klog.Fatalf("Failed to set up Test Context for instance %v: %v", i.GetName(), err) - // } - // _, err = tcEmpty.Client.ListVolumes() + p, z, _ := controllerInstance.GetIdentity() - // Expect(err).To(BeNil(), "no error expected when passed empty compute url") + // Create Source Disk + _, srcVolID := createAndValidateUniqueZonalDisk(controllerClient, p, z, standardDiskType) - // // Create new driver and client w/ valid, passed-in endpoint - // driverConfig := getDriverConfig() - // driverConfig.ComputeEndpoint = "https://compute.googleapis.com" - // tcValid, err := testutils.GCEClientAndDriverSetup(i, driverConfig) - // if err != nil { - // klog.Fatalf("Failed to set up Test Context for instance %v: %v", i.GetName(), err) - // } - // _, err = tcValid.Client.ListVolumes() + // Create Disk + volName := testNamePrefix + string(uuid.NewUUID()) + volume, err := controllerClient.CreateVolume(volName, map[string]string{ + common.ParameterKeyReplicationType: "none", + }, defaultSizeGb, + &csi.TopologyRequirement{ + Requisite: []*csi.Topology{ + { + Segments: map[string]string{common.TopologyKeyZone: z}, + }, + }, + }, + &csi.VolumeContentSource{ + Type: &csi.VolumeContentSource_Volume{ + Volume: &csi.VolumeContentSource_VolumeSource{ + VolumeId: srcVolID, + }, + }, + }) + + Expect(err).To(BeNil(), "CreateVolume failed with error: %v", err) + + // Validate Disk Created + cloudDisk, err := computeService.Disks.Get(p, z, volName).Do() + Expect(err).To(BeNil(), "Could not get disk from cloud directly") + Expect(cloudDisk.Type).To(ContainSubstring(standardDiskType)) + Expect(cloudDisk.Status).To(Equal(readyState)) + Expect(cloudDisk.SizeGb).To(Equal(defaultSizeGb)) + Expect(cloudDisk.Name).To(Equal(volName)) + // Validate the the clone disk zone matches the source disk zone. + _, srcKey, err := common.VolumeIDToKey(srcVolID) + Expect(err).To(BeNil(), "Could not get source volume key from id") + Expect(zoneFromURL(cloudDisk.Zone)).To(Equal(srcKey.Zone)) + defer func() { + // Delete Disk + controllerClient.DeleteVolume(volume.VolumeId) + Expect(err).To(BeNil(), "DeleteVolume failed") - // Expect(err).To(BeNil(), "no error expected when passed valid compute url") - // }) + // Validate Disk Deleted + _, err = computeService.Disks.Get(p, z, volName).Do() + Expect(gce.IsGCEError(err, "notFound")).To(BeTrue(), "Expected disk to not be found") + }() + }) - // It("[NVMe] Should update readahead if read_ahead_kb passed on mount", func() { - // testContext := getRandomTestContext() + It("Should successfully create RePD from a zonal PD VolumeContentSource", func() { + Expect(testContexts).ToNot(BeEmpty()) + testContext := getRandomTestContext() - // p, z, _ := testContext.Instance.GetIdentity() - // client := testContext.Client - // instance := testContext.Instance + controllerInstance := testContext.Instance + controllerClient := testContext.Client + + p, z, _ := controllerInstance.GetIdentity() + + region, err := common.GetRegionFromZones([]string{z}) + Expect(err).To(BeNil(), "Failed to get region from zones") + + // Create Source Disk + srcVolName := testNamePrefix + string(uuid.NewUUID()) + srcVolume, err := controllerClient.CreateVolume(srcVolName, map[string]string{ + common.ParameterKeyReplicationType: "none", + }, defaultRepdSizeGb, nil, nil) + // Create Disk + volName := testNamePrefix + string(uuid.NewUUID()) + volume, err := controllerClient.CreateVolume(volName, map[string]string{ + common.ParameterKeyReplicationType: "regional-pd", + }, defaultRepdSizeGb, nil, + &csi.VolumeContentSource{ + Type: &csi.VolumeContentSource_Volume{ + Volume: &csi.VolumeContentSource_VolumeSource{ + VolumeId: srcVolume.VolumeId, + }, + }, + }) + + Expect(err).To(BeNil(), "CreateVolume failed with error: %v", err) + + // Validate Disk Created + cloudDisk, err := computeService.RegionDisks.Get(p, region, volName).Do() + Expect(err).To(BeNil(), "Could not get disk from cloud directly") + Expect(cloudDisk.Type).To(ContainSubstring(standardDiskType)) + Expect(cloudDisk.Status).To(Equal(readyState)) + Expect(cloudDisk.SizeGb).To(Equal(defaultRepdSizeGb)) + Expect(cloudDisk.Name).To(Equal(volName)) + Expect(len(cloudDisk.ReplicaZones)).To(Equal(2)) + replicaZonesCompatible := false + _, srcKey, err := common.VolumeIDToKey(srcVolume.VolumeId) + Expect(err).To(BeNil(), "Could not get source volume key from id") + for _, replicaZone := range cloudDisk.ReplicaZones { + actualZone := zoneFromURL(replicaZone) + if actualZone == srcKey.Zone { + replicaZonesCompatible = true + } + gotRegion, err := common.GetRegionFromZones([]string{actualZone}) + Expect(err).To(BeNil(), "failed to get region from actual zone %v", actualZone) + Expect(gotRegion).To(Equal(region), "Got region from replica zone that did not match supplied region") + } + // Validate that one of the replicaZones of the clone matches the zone of the source disk. + Expect(replicaZonesCompatible).To(Equal(true)) + defer func() { + // Delete Disk + controllerClient.DeleteVolume(volume.VolumeId) + Expect(err).To(BeNil(), "DeleteVolume failed") - // // Create Disk - // volName, volID := createAndValidateUniqueZonalDisk(client, p, z, standardDiskType) + // Validate Disk Deleted + _, err = computeService.RegionDisks.Get(p, region, volName).Do() + Expect(gce.IsGCEError(err, "notFound")).To(BeTrue(), "Expected disk to not be found") + }() + }) - // defer func() { - // // Delete Disk - // err := client.DeleteVolume(volID) - // Expect(err).To(BeNil(), "DeleteVolume failed") + It("Should successfully create RePD from a RePD VolumeContentSource", func() { + Expect(testContexts).ToNot(BeEmpty()) + testContext := getRandomTestContext() - // // Validate Disk Deleted - // _, err = computeService.Disks.Get(p, z, volName).Do() - // Expect(gce.IsGCEError(err, "notFound")).To(BeTrue(), "Expected disk to not be found") - // }() + controllerInstance := testContext.Instance + controllerClient := testContext.Client + + p, z, _ := controllerInstance.GetIdentity() + + region, err := common.GetRegionFromZones([]string{z}) + Expect(err).To(BeNil(), "Failed to get region from zones") + + // Create Source Disk + srcVolName := testNamePrefix + string(uuid.NewUUID()) + srcVolume, err := controllerClient.CreateVolume(srcVolName, map[string]string{ + common.ParameterKeyReplicationType: "regional-pd", + }, defaultRepdSizeGb, nil, nil) + // Create Disk + volName := testNamePrefix + string(uuid.NewUUID()) + volume, err := controllerClient.CreateVolume(volName, map[string]string{ + common.ParameterKeyReplicationType: "regional-pd", + }, defaultRepdSizeGb, nil, + &csi.VolumeContentSource{ + Type: &csi.VolumeContentSource_Volume{ + Volume: &csi.VolumeContentSource_VolumeSource{ + VolumeId: srcVolume.VolumeId, + }, + }, + }) + + Expect(err).To(BeNil(), "CreateVolume failed with error: %v", err) + + // Validate Disk Created + cloudDisk, err := computeService.RegionDisks.Get(p, region, volName).Do() + Expect(err).To(BeNil(), "Could not get disk from cloud directly") + Expect(cloudDisk.Type).To(ContainSubstring(standardDiskType)) + Expect(cloudDisk.Status).To(Equal(readyState)) + Expect(cloudDisk.SizeGb).To(Equal(defaultRepdSizeGb)) + Expect(cloudDisk.Name).To(Equal(volName)) + Expect(len(cloudDisk.ReplicaZones)).To(Equal(2)) + // Validate that the replicaZones of the clone match the replicaZones of the source disk. + srcCloudDisk, err := computeService.RegionDisks.Get(p, region, srcVolName).Do() + Expect(err).To(BeNil(), "Could not get source disk from cloud directly") + Expect(srcCloudDisk.ReplicaZones).To(Equal(cloudDisk.ReplicaZones)) + for _, replicaZone := range cloudDisk.ReplicaZones { + actualZone := zoneFromURL(replicaZone) + gotRegion, err := common.GetRegionFromZones([]string{actualZone}) + Expect(err).To(BeNil(), "failed to get region from actual zone %v", actualZone) + Expect(gotRegion).To(Equal(region), "Got region from replica zone that did not match supplied region") + } + defer func() { + // Delete Disk + controllerClient.DeleteVolume(volume.VolumeId) + Expect(err).To(BeNil(), "DeleteVolume failed") - // // Attach Disk - // err := client.ControllerPublishVolumeReadWrite(volID, instance.GetNodeID(), false /* forceAttach */) - // Expect(err).To(BeNil(), "ControllerPublishVolume failed with error for disk %v on node %v: %v", volID, instance.GetNodeID(), err) + // Validate Disk Deleted + _, err = computeService.RegionDisks.Get(p, region, volName).Do() + Expect(gce.IsGCEError(err, "notFound")).To(BeTrue(), "Expected disk to not be found") + }() + }) - // defer func() { - // // Detach Disk - // err = client.ControllerUnpublishVolume(volID, instance.GetNodeID()) - // if err != nil { - // klog.Errorf("Failed to detach disk: %v", err) - // } + It("Should pass if valid compute endpoint is passed in", func() { + // gets instance set up w/o compute-endpoint set from test setup + _, err := getRandomTestContext().Client.ListVolumes() + Expect(err).To(BeNil(), "no error expected when passed valid compute url") - // }() + i := getRandomTestContext().Instance - // // Stage Disk - // stageDir := filepath.Join("/tmp/", volName, "stage") - // expectedReadAheadKB := "4096" - // volCap := &csi.VolumeCapability{ - // AccessType: &csi.VolumeCapability_Mount{ - // Mount: &csi.VolumeCapability_MountVolume{ - // MountFlags: []string{fmt.Sprintf("read_ahead_kb=%s", expectedReadAheadKB)}, - // }, - // }, - // AccessMode: &csi.VolumeCapability_AccessMode{ - // Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, - // }, - // } - // err = client.NodeStageVolume(volID, stageDir, volCap) - // Expect(err).To(BeNil(), "failed to stage volume: %v", err) - - // // Validate that the link is correct - // var validated bool - // var devName string - // devicePaths := deviceutils.NewDeviceUtils().GetDiskByIdPaths(volName, "") - // for _, devicePath := range devicePaths { - // validated, err = testutils.ValidateLogicalLinkIsDisk(instance, devicePath, volName) - // Expect(err).To(BeNil(), "failed to validate link %s is disk %s: %v", stageDir, volName, err) - // if validated { - // devFsPath, err := instance.SSH("find", devicePath, "-printf", "'%l'") - // Expect(err).To(BeNil(), "Failed to symlink devicePath") - // devFsPathPieces := strings.Split(devFsPath, "/") - // devName = devFsPathPieces[len(devFsPathPieces)-1] - // break - // } - // } - // Expect(validated).To(BeTrue(), "could not find device in %v that links to volume %s", devicePaths, volName) - // actualReadAheadKBStr, err := instance.SSH("cat", fmt.Sprintf("/sys/block/%s/queue/read_ahead_kb", devName)) - // actualReadAheadKB := strings.TrimSpace(actualReadAheadKBStr) - // Expect(err).To(BeNil(), "Failed to read read_ahead_kb: %v", err) - // Expect(actualReadAheadKB).To(Equal(expectedReadAheadKB), "unexpected read_ahead_kb") - - // defer func() { - // // Unstage Disk - // err = client.NodeUnstageVolume(volID, stageDir) - // if err != nil { - // klog.Errorf("Failed to unstage volume: %v", err) - // } - // fp := filepath.Join("/tmp/", volName) - // err = testutils.RmAll(instance, fp) - // if err != nil { - // klog.Errorf("Failed to rm file path %s: %v", fp, err) - // } - // }() - // }) + // Create new driver and client with valid, empty endpoint + klog.Infof("Setup driver with empty compute endpoint %s\n", i.GetName()) + tcEmpty, err := testutils.GCEClientAndDriverSetup(i, getDriverConfig()) + if err != nil { + klog.Fatalf("Failed to set up Test Context for instance %v: %v", i.GetName(), err) + } + _, err = tcEmpty.Client.ListVolumes() - // It("Should block unstage if filesystem mounted", func() { - // testContext := getRandomTestContext() + Expect(err).To(BeNil(), "no error expected when passed empty compute url") - // p, z, _ := testContext.Instance.GetIdentity() - // client := testContext.Client - // instance := testContext.Instance + // Create new driver and client w/ valid, passed-in endpoint + driverConfig := getDriverConfig() + driverConfig.ComputeEndpoint = "https://compute.googleapis.com" + tcValid, err := testutils.GCEClientAndDriverSetup(i, driverConfig) + if err != nil { + klog.Fatalf("Failed to set up Test Context for instance %v: %v", i.GetName(), err) + } + _, err = tcValid.Client.ListVolumes() - // // Create Disk - // volName, volID := createAndValidateUniqueZonalDisk(client, p, z, standardDiskType) + Expect(err).To(BeNil(), "no error expected when passed valid compute url") + }) - // defer func() { - // // Delete Disk - // err := client.DeleteVolume(volID) - // Expect(err).To(BeNil(), "DeleteVolume failed") + It("[NVMe] Should update readahead if read_ahead_kb passed on mount", func() { + testContext := getRandomTestContext() - // // Validate Disk Deleted - // _, err = computeService.Disks.Get(p, z, volName).Do() - // Expect(gce.IsGCEError(err, "notFound")).To(BeTrue(), "Expected disk to not be found") - // }() + p, z, _ := testContext.Instance.GetIdentity() + client := testContext.Client + instance := testContext.Instance - // // Attach Disk - // err := client.ControllerPublishVolumeReadWrite(volID, instance.GetNodeID(), false /* forceAttach */) - // Expect(err).To(BeNil(), "ControllerPublishVolume failed with error for disk %v on node %v: %v", volID, instance.GetNodeID(), err) + // Create Disk + volName, volID := createAndValidateUniqueZonalDisk(client, p, z, standardDiskType) - // defer func() { - // // Detach Disk - // err = client.ControllerUnpublishVolume(volID, instance.GetNodeID()) - // if err != nil { - // klog.Errorf("Failed to detach disk: %v", err) - // } - // }() - - // // Stage Disk - // stageDir := filepath.Join("/tmp/", volName, "stage") - // err = client.NodeStageExt4Volume(volID, stageDir) - // Expect(err).To(BeNil(), "failed to stage volume: %v", err) - - // // Create private bind mount - // boundMountStageDir := filepath.Join("/tmp/bindmount", volName, "bindmount") - // boundMountStageMkdirOutput, err := instance.SSH("mkdir", "-p", boundMountStageDir) - // Expect(err).To(BeNil(), "mkdir failed on instance %v: output: %v: %v", instance.GetNodeID(), boundMountStageMkdirOutput, err) - // bindMountOutput, err := instance.SSH("mount", "--rbind", "--make-private", stageDir, boundMountStageDir) - // Expect(err).To(BeNil(), "Bind mount failed on instance %v: output: %v: %v", instance.GetNodeID(), bindMountOutput, err) - - // privateBindMountRemoved := false - // unmountAndRmPrivateBindMount := func() { - // if !privateBindMountRemoved { - // // Umount and delete private mount staging directory - // bindUmountOutput, err := instance.SSH("umount", boundMountStageDir) - // Expect(err).To(BeNil(), "Bind mount failed on instance %v: output: %v: %v", instance.GetNodeID(), bindUmountOutput, err) - // err = testutils.RmAll(instance, boundMountStageDir) - // Expect(err).To(BeNil(), "Failed to rm mount stage dir %s: %v", boundMountStageDir, err) - // } - // privateBindMountRemoved = true - // } + defer func() { + // Delete Disk + err := client.DeleteVolume(volID) + Expect(err).To(BeNil(), "DeleteVolume failed") + + // Validate Disk Deleted + _, err = computeService.Disks.Get(p, z, volName).Do() + Expect(gce.IsGCEError(err, "notFound")).To(BeTrue(), "Expected disk to not be found") + }() - // defer func() { - // unmountAndRmPrivateBindMount() - // }() + // Attach Disk + err := client.ControllerPublishVolumeReadWrite(volID, instance.GetNodeID(), false /* forceAttach */) + Expect(err).To(BeNil(), "ControllerPublishVolume failed with error for disk %v on node %v: %v", volID, instance.GetNodeID(), err) - // // Unstage Disk - // err = client.NodeUnstageVolume(volID, stageDir) - // Expect(err).ToNot(BeNil(), "Expected failure during unstage") - // Expect(err).To(MatchError(ContainSubstring(("is still in use")))) + defer func() { + // Detach Disk + err = client.ControllerUnpublishVolume(volID, instance.GetNodeID()) + if err != nil { + klog.Errorf("Failed to detach disk: %v", err) + } - // // Unmount private bind mount and try again - // unmountAndRmPrivateBindMount() + }() - // // Unstage Disk - // err = client.NodeUnstageVolume(volID, stageDir) - // Expect(err).To(BeNil(), "Failed to unstage volume: %v", err) - // fp := filepath.Join("/tmp/", volName) - // err = testutils.RmAll(instance, fp) - // Expect(err).To(BeNil(), "Failed to rm file path %s: %v", fp, err) - // }) + // Stage Disk + stageDir := filepath.Join("/tmp/", volName, "stage") + expectedReadAheadKB := "4096" + volCap := &csi.VolumeCapability{ + AccessType: &csi.VolumeCapability_Mount{ + Mount: &csi.VolumeCapability_MountVolume{ + MountFlags: []string{fmt.Sprintf("read_ahead_kb=%s", expectedReadAheadKB)}, + }, + }, + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, + }, + } + err = client.NodeStageVolume(volID, stageDir, volCap) + Expect(err).To(BeNil(), "failed to stage volume: %v", err) + + // Validate that the link is correct + var validated bool + var devName string + devicePaths := deviceutils.NewDeviceUtils().GetDiskByIdPaths(volName, "") + for _, devicePath := range devicePaths { + validated, err = testutils.ValidateLogicalLinkIsDisk(instance, devicePath, volName) + Expect(err).To(BeNil(), "failed to validate link %s is disk %s: %v", stageDir, volName, err) + if validated { + devFsPath, err := instance.SSH("find", devicePath, "-printf", "'%l'") + Expect(err).To(BeNil(), "Failed to symlink devicePath") + devFsPathPieces := strings.Split(devFsPath, "/") + devName = devFsPathPieces[len(devFsPathPieces)-1] + break + } + } + Expect(validated).To(BeTrue(), "could not find device in %v that links to volume %s", devicePaths, volName) + actualReadAheadKBStr, err := instance.SSH("cat", fmt.Sprintf("/sys/block/%s/queue/read_ahead_kb", devName)) + actualReadAheadKB := strings.TrimSpace(actualReadAheadKBStr) + Expect(err).To(BeNil(), "Failed to read read_ahead_kb: %v", err) + Expect(actualReadAheadKB).To(Equal(expectedReadAheadKB), "unexpected read_ahead_kb") + + defer func() { + // Unstage Disk + err = client.NodeUnstageVolume(volID, stageDir) + if err != nil { + klog.Errorf("Failed to unstage volume: %v", err) + } + fp := filepath.Join("/tmp/", volName) + err = testutils.RmAll(instance, fp) + if err != nil { + klog.Errorf("Failed to rm file path %s: %v", fp, err) + } + }() + }) + + It("Should block unstage if filesystem mounted", func() { + testContext := getRandomTestContext() + + p, z, _ := testContext.Instance.GetIdentity() + client := testContext.Client + instance := testContext.Instance + + // Create Disk + volName, volID := createAndValidateUniqueZonalDisk(client, p, z, standardDiskType) + + defer func() { + // Delete Disk + err := client.DeleteVolume(volID) + Expect(err).To(BeNil(), "DeleteVolume failed") + + // Validate Disk Deleted + _, err = computeService.Disks.Get(p, z, volName).Do() + Expect(gce.IsGCEError(err, "notFound")).To(BeTrue(), "Expected disk to not be found") + }() + + // Attach Disk + err := client.ControllerPublishVolumeReadWrite(volID, instance.GetNodeID(), false /* forceAttach */) + Expect(err).To(BeNil(), "ControllerPublishVolume failed with error for disk %v on node %v: %v", volID, instance.GetNodeID(), err) + + defer func() { + // Detach Disk + err = client.ControllerUnpublishVolume(volID, instance.GetNodeID()) + if err != nil { + klog.Errorf("Failed to detach disk: %v", err) + } + }() + + // Stage Disk + stageDir := filepath.Join("/tmp/", volName, "stage") + err = client.NodeStageExt4Volume(volID, stageDir) + Expect(err).To(BeNil(), "failed to stage volume: %v", err) + + // Create private bind mount + boundMountStageDir := filepath.Join("/tmp/bindmount", volName, "bindmount") + boundMountStageMkdirOutput, err := instance.SSH("mkdir", "-p", boundMountStageDir) + Expect(err).To(BeNil(), "mkdir failed on instance %v: output: %v: %v", instance.GetNodeID(), boundMountStageMkdirOutput, err) + bindMountOutput, err := instance.SSH("mount", "--rbind", "--make-private", stageDir, boundMountStageDir) + Expect(err).To(BeNil(), "Bind mount failed on instance %v: output: %v: %v", instance.GetNodeID(), bindMountOutput, err) + + privateBindMountRemoved := false + unmountAndRmPrivateBindMount := func() { + if !privateBindMountRemoved { + // Umount and delete private mount staging directory + bindUmountOutput, err := instance.SSH("umount", boundMountStageDir) + Expect(err).To(BeNil(), "Bind mount failed on instance %v: output: %v: %v", instance.GetNodeID(), bindUmountOutput, err) + err = testutils.RmAll(instance, boundMountStageDir) + Expect(err).To(BeNil(), "Failed to rm mount stage dir %s: %v", boundMountStageDir, err) + } + privateBindMountRemoved = true + } + + defer func() { + unmountAndRmPrivateBindMount() + }() + + // Unstage Disk + err = client.NodeUnstageVolume(volID, stageDir) + Expect(err).ToNot(BeNil(), "Expected failure during unstage") + Expect(err).To(MatchError(ContainSubstring(("is still in use")))) + + // Unmount private bind mount and try again + unmountAndRmPrivateBindMount() + + // Unstage Disk + err = client.NodeUnstageVolume(volID, stageDir) + Expect(err).To(BeNil(), "Failed to unstage volume: %v", err) + fp := filepath.Join("/tmp/", volName) + err = testutils.RmAll(instance, fp) + Expect(err).To(BeNil(), "Failed to rm file path %s: %v", fp, err) + }) type multiZoneTestConfig struct { diskType string From c22597ecc30c2f7ddd1fb9efb980c9573b2eecdc Mon Sep 17 00:00:00 2001 From: Sam Serdlow Date: Mon, 13 Jan 2025 19:54:35 +0000 Subject: [PATCH 08/10] Fixing some formatting. --- test/e2e/tests/setup_e2e_test.go | 19 ++++++++++--------- test/run-e2e-local.sh | 2 -- 2 files changed, 10 insertions(+), 11 deletions(-) diff --git a/test/e2e/tests/setup_e2e_test.go b/test/e2e/tests/setup_e2e_test.go index e6af641b8..54a776187 100644 --- a/test/e2e/tests/setup_e2e_test.go +++ b/test/e2e/tests/setup_e2e_test.go @@ -35,16 +35,17 @@ import ( remote "sigs.k8s.io/gcp-compute-persistent-disk-csi-driver/test/remote" ) -// Multi-writer is only supported on M3, C3, and N4 https://cloud.google.com/compute/docs/disks/sharing-disks-between-vms#hd-multi-writer var ( - project = flag.String("project", "", "Project to run tests in") - serviceAccount = flag.String("service-account", "", "Service account to bring up instance with") - vmNamePrefix = flag.String("vm-name-prefix", "gce-pd-csi-e2e", "VM name prefix") - architecture = flag.String("arch", "amd64", "Architecture pd csi driver build on") - minCpuPlatform = flag.String("min-cpu-platform", "rome", "Minimum CPU architecture") - mwMinCpuPlatform = flag.String("min-cpu-platform-mw", "sapphirerapids", "Minimum CPU architecture for multiwriter tests") - zones = flag.String("zones", "us-east4-a,us-east4-c", "Zones to run tests in. If there are multiple zones, separate each by comma") - machineType = flag.String("machine-type", "n2d-standard-4", "Type of machine to provision instance on") + project = flag.String("project", "", "Project to run tests in") + serviceAccount = flag.String("service-account", "", "Service account to bring up instance with") + vmNamePrefix = flag.String("vm-name-prefix", "gce-pd-csi-e2e", "VM name prefix") + architecture = flag.String("arch", "amd64", "Architecture pd csi driver build on") + minCpuPlatform = flag.String("min-cpu-platform", "rome", "Minimum CPU architecture") + mwMinCpuPlatform = flag.String("min-cpu-platform-mw", "sapphirerapids", "Minimum CPU architecture for multiwriter tests") + zones = flag.String("zones", "us-east4-a,us-east4-c", "Zones to run tests in. If there are multiple zones, separate each by comma") + machineType = flag.String("machine-type", "n2d-standard-4", "Type of machine to provision instance on") + // Multi-writer is only supported on M3, C3, and N4 + // https://cloud.google.com/compute/docs/disks/sharing-disks-between-vms#hd-multi-writer mwMachineType = flag.String("mw-machine-type", "c3-standard-4", "Type of machine to provision instance for multiwriter tests") imageURL = flag.String("image-url", "projects/ubuntu-os-cloud/global/images/family/ubuntu-minimal-2404-lts-amd64", "OS image url to get image from") runInProw = flag.Bool("run-in-prow", false, "If true, use a Boskos loaned project and special CI service accounts and ssh keys") diff --git a/test/run-e2e-local.sh b/test/run-e2e-local.sh index 9a86240e4..835ef10b2 100755 --- a/test/run-e2e-local.sh +++ b/test/run-e2e-local.sh @@ -16,6 +16,4 @@ if hostname | grep -q c.googlers.com ; then CLOUDTOP_HOST=--cloudtop-host fi - - ginkgo --v --progress "test/e2e/tests" -- --project "${PROJECT}" --service-account "${IAM_NAME}" "${CLOUDTOP_HOST}" --v=6 --logtostderr $@ From 0f0b9b1f7a8cf57b3efb783d53d69cfc763f576c Mon Sep 17 00:00:00 2001 From: Sam Serdlow Date: Mon, 13 Jan 2025 19:57:06 +0000 Subject: [PATCH 09/10] More formatting fixes. --- test/e2e/tests/setup_e2e_test.go | 2 -- test/e2e/tests/single_zone_e2e_test.go | 1 - test/run-e2e-local.sh | 2 +- 3 files changed, 1 insertion(+), 4 deletions(-) diff --git a/test/e2e/tests/setup_e2e_test.go b/test/e2e/tests/setup_e2e_test.go index 54a776187..f7a271a42 100644 --- a/test/e2e/tests/setup_e2e_test.go +++ b/test/e2e/tests/setup_e2e_test.go @@ -150,8 +150,6 @@ func getDriverConfig() testutils.DriverConfig { } } -// Could do a multi writer optional variable here. That'd force the mincpu platfor and machinetype to a specific thing -// Create a new context, and have the multi writer contexts run on only that? func NewTestContext(zone string, machineType string, minCpuPlatform string) *remote.TestContext { nodeID := fmt.Sprintf("%s-%s-%s", *vmNamePrefix, zone, machineType) klog.Infof("Setting up node %s", nodeID) diff --git a/test/e2e/tests/single_zone_e2e_test.go b/test/e2e/tests/single_zone_e2e_test.go index c5dbcef20..80450a3d9 100644 --- a/test/e2e/tests/single_zone_e2e_test.go +++ b/test/e2e/tests/single_zone_e2e_test.go @@ -1706,7 +1706,6 @@ func createAndValidateUniqueZonalMultiWriterDisk(client *remote.CsiClient, proje disk := typeToDisk[diskType] disk.params[common.ParameterAccessMode] = "READ_WRITE_MANY" - // .AccessMode volName := testNamePrefix + string(uuid.NewUUID()) volume, err := client.CreateVolumeWithCaps(volName, disk.params, defaultMwSizeGb, &csi.TopologyRequirement{ diff --git a/test/run-e2e-local.sh b/test/run-e2e-local.sh index 835ef10b2..b6b9d5554 100755 --- a/test/run-e2e-local.sh +++ b/test/run-e2e-local.sh @@ -16,4 +16,4 @@ if hostname | grep -q c.googlers.com ; then CLOUDTOP_HOST=--cloudtop-host fi -ginkgo --v --progress "test/e2e/tests" -- --project "${PROJECT}" --service-account "${IAM_NAME}" "${CLOUDTOP_HOST}" --v=6 --logtostderr $@ +ginkgo --v "test/e2e/tests" -- --project "${PROJECT}" --service-account "${IAM_NAME}" "${CLOUDTOP_HOST}" --v=6 --logtostderr $@ From 34d560270e8946598b344787a3bd00ef54131c41 Mon Sep 17 00:00:00 2001 From: Sam Serdlow Date: Mon, 13 Jan 2025 19:58:47 +0000 Subject: [PATCH 10/10] Hopefully last changes for formatting. --- test/run-e2e-local.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/run-e2e-local.sh b/test/run-e2e-local.sh index b6b9d5554..aaf9a65d5 100755 --- a/test/run-e2e-local.sh +++ b/test/run-e2e-local.sh @@ -16,4 +16,4 @@ if hostname | grep -q c.googlers.com ; then CLOUDTOP_HOST=--cloudtop-host fi -ginkgo --v "test/e2e/tests" -- --project "${PROJECT}" --service-account "${IAM_NAME}" "${CLOUDTOP_HOST}" --v=6 --logtostderr $@ +ginkgo --v "test/e2e/tests" -- --project "${PROJECT}" --service-account "${IAM_NAME}" "${CLOUDTOP_HOST}" --v=6 --logtostderr $@