diff --git a/pkg/common/parameters.go b/pkg/common/parameters.go index 18cca76fb..cd973b319 100644 --- a/pkg/common/parameters.go +++ b/pkg/common/parameters.go @@ -80,6 +80,7 @@ const ( DiskTypeHdHA = "hyperdisk-balanced-high-availability" DiskTypeHdT = "hyperdisk-throughput" DiskTypeHdE = "hyperdisk-extreme" + DiskTypeHdML = "hyperdisk-ml" ) type DataCacheParameters struct { diff --git a/pkg/gce-pd-csi-driver/controller.go b/pkg/gce-pd-csi-driver/controller.go index fcb1ddb60..dcb0e3c45 100644 --- a/pkg/gce-pd-csi-driver/controller.go +++ b/pkg/gce-pd-csi-driver/controller.go @@ -232,7 +232,7 @@ var ( "nextPageToken", } listDisksFieldsWithUsers = append(listDisksFieldsWithoutUsers, "items/users") - disksWithModifiableAccessMode = []string{"hyperdisk-ml"} + disksWithModifiableAccessMode = []string{common.DiskTypeHdML} disksWithUnsettableAccessMode = map[string]bool{ common.DiskTypeHdE: true, common.DiskTypeHdT: true, @@ -374,7 +374,8 @@ func (gceCS *GCEControllerServer) createVolumeInternal(ctx context.Context, req // Validate VolumeContentSource is set when access mode is read only readonly, _ := getReadOnlyFromCapabilities(volumeCapabilities) - if readonly && req.GetVolumeContentSource() == nil { + + if readonly && req.GetVolumeContentSource() == nil && params.DiskType != common.DiskTypeHdML { return nil, status.Error(codes.InvalidArgument, "VolumeContentSource must be provided when AccessMode is set to read only") } @@ -485,12 +486,19 @@ func (gceCS *GCEControllerServer) createMultiZoneDisk(ctx context.Context, req * } defer gceCS.volumeLocks.Release(volumeID) + // If creating an empty disk (content source nil), always create RWO disks (when supported) + // This allows disks to be created as underlying RWO disks, so they can be hydrated. + accessMode := common.GCEReadWriteOnceAccessMode + if req.GetVolumeContentSource() != nil { + accessMode = common.GCEReadOnlyManyAccessMode + } + createDiskErrs := []error{} createdDisks := make([]*gce.CloudDisk, 0, len(zones)) for _, zone := range zones { volKey := meta.ZonalKey(req.GetName(), zone) klog.V(4).Infof("Creating single zone disk for zone %q and volume: %v", zone, volKey) - disk, err := gceCS.createSingleDisk(ctx, req, params, volKey, []string{zone}) + disk, err := gceCS.createSingleDisk(ctx, req, params, volKey, []string{zone}, accessMode) if err != nil { createDiskErrs = append(createDiskErrs, err) continue @@ -593,11 +601,23 @@ func (gceCS *GCEControllerServer) createSingleDeviceDisk(ctx context.Context, re if err != nil { return nil, common.LoggedError("Failed to convert volume key to volume ID: ", err) } + accessMode, err := getAccessMode(req, params) + if err != nil { + return nil, common.LoggedError("Failed to get access mode: ", err) + } + + // If creating an empty disk (content source nil), always create RWO disks (when supported) + // This allows disks to be created as underlying RWO disks, so they can be hydrated. + readonly, _ := getReadOnlyFromCapabilities(req.GetVolumeCapabilities()) + if readonly && req.GetVolumeContentSource() == nil && params.DiskType == common.DiskTypeHdML { + accessMode = common.GCEReadWriteOnceAccessMode + } + if acquired := gceCS.volumeLocks.TryAcquire(volumeID); !acquired { return nil, status.Errorf(codes.Aborted, common.VolumeOperationAlreadyExistsFmt, volumeID) } defer gceCS.volumeLocks.Release(volumeID) - disk, err := gceCS.createSingleDisk(ctx, req, params, volKey, zones) + disk, err := gceCS.createSingleDisk(ctx, req, params, volKey, zones, accessMode) if err != nil { return nil, common.LoggedError("CreateVolume failed: %v", err) @@ -606,30 +626,36 @@ func (gceCS *GCEControllerServer) createSingleDeviceDisk(ctx context.Context, re return generateCreateVolumeResponseWithVolumeId(disk, zones, params, dataCacheParams, enableDataCache, volumeID), err } -func (gceCS *GCEControllerServer) createSingleDisk(ctx context.Context, req *csi.CreateVolumeRequest, params common.DiskParameters, volKey *meta.Key, zones []string) (*gce.CloudDisk, error) { - capacityRange := req.GetCapacityRange() - capBytes, _ := getRequestCapacity(capacityRange) +func getAccessMode(req *csi.CreateVolumeRequest, params common.DiskParameters) (string, error) { readonly, _ := getReadOnlyFromCapabilities(req.GetVolumeCapabilities()) - accessMode := "" - multiWriter := false if common.IsHyperdisk(params.DiskType) { if am, err := getHyperdiskAccessModeFromCapabilities(req.GetVolumeCapabilities()); err != nil { - return nil, err + return "", err } else if disksWithUnsettableAccessMode[params.DiskType] { // Disallow multi-attach for HdT and HdE. These checks were done in `createVolumeInternal`, // but repeating them here future-proves us from possible refactors. if am != common.GCEReadWriteOnceAccessMode { - return nil, status.Errorf(codes.Internal, "") + return "", status.Errorf(codes.Internal, "") } } else { - accessMode = am + return am, nil } - } else { - multiWriter, _ = getMultiWriterFromCapabilities(req.GetVolumeCapabilities()) } if readonly && slices.Contains(disksWithModifiableAccessMode, params.DiskType) { - accessMode = common.GCEReadOnlyManyAccessMode + return common.GCEReadOnlyManyAccessMode, nil + } + + return "", nil +} + +func (gceCS *GCEControllerServer) createSingleDisk(ctx context.Context, req *csi.CreateVolumeRequest, params common.DiskParameters, volKey *meta.Key, zones []string, accessMode string) (*gce.CloudDisk, error) { + capacityRange := req.GetCapacityRange() + capBytes, _ := getRequestCapacity(capacityRange) + + multiWriter := false + if !common.IsHyperdisk(params.DiskType) { + multiWriter, _ = getMultiWriterFromCapabilities(req.GetVolumeCapabilities()) } // Validate if disk already exists diff --git a/pkg/gce-pd-csi-driver/controller_test.go b/pkg/gce-pd-csi-driver/controller_test.go index 30a508ae8..f55d33529 100644 --- a/pkg/gce-pd-csi-driver/controller_test.go +++ b/pkg/gce-pd-csi-driver/controller_test.go @@ -1298,6 +1298,54 @@ func TestCreateVolumeArguments(t *testing.T) { }, expErrCode: codes.InvalidArgument, }, + { + name: "err empty HdB ROX single-zone disk no content source", + req: &csi.CreateVolumeRequest{ + Name: name, + CapacityRange: stdCapRange, + VolumeCapabilities: []*csi.VolumeCapability{ + { + AccessType: &csi.VolumeCapability_Mount{ + Mount: &csi.VolumeCapability_MountVolume{}, + }, + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_MULTI_NODE_READER_ONLY, + }, + }, + }, + Parameters: map[string]string{ + common.ParameterKeyType: "hyperdisk-balanced", + }, + }, + expErrCode: codes.InvalidArgument, + }, + { + name: "success empty HdML ROX single-zone disk no content source", + req: &csi.CreateVolumeRequest{ + Name: name, + CapacityRange: stdCapRange, + VolumeCapabilities: []*csi.VolumeCapability{ + { + AccessType: &csi.VolumeCapability_Mount{ + Mount: &csi.VolumeCapability_MountVolume{}, + }, + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_MULTI_NODE_READER_ONLY, + }, + }, + }, + Parameters: map[string]string{ + common.ParameterKeyType: "hyperdisk-ml", + }, + }, + expVol: &csi.Volume{ + CapacityBytes: common.GbToBytes(20), + VolumeId: testVolumeID, + VolumeContext: nil, + AccessibleTopology: stdTopology, + }, + expErrCode: codes.OK, + }, } // Run test cases @@ -1318,6 +1366,7 @@ func TestCreateVolumeArguments(t *testing.T) { } continue } + t.Logf("ErroCode: %v", err) if tc.expErrCode != codes.OK { t.Fatalf("Expected error: %v, got no error", tc.expErrCode) } @@ -1504,7 +1553,7 @@ func TestMultiZoneVolumeCreation(t *testing.T) { expZones: []string{"us-central1-a", "us-central1-b", "us-central1-c"}, }, { - name: "err single ROX multi-zone no topology", + name: "success empty HdML ROX multi-zone disk no content source", req: &csi.CreateVolumeRequest{ Name: "test-name", CapacityRange: stdCapRange, @@ -1522,18 +1571,26 @@ func TestMultiZoneVolumeCreation(t *testing.T) { common.ParameterKeyType: "hyperdisk-ml", common.ParameterKeyEnableMultiZoneProvisioning: "true", }, - VolumeContentSource: &csi.VolumeContentSource{ - Type: &csi.VolumeContentSource_Snapshot{ - Snapshot: &csi.VolumeContentSource_SnapshotSource{ - SnapshotId: testSnapshotID, + AccessibilityRequirements: &csi.TopologyRequirement{ + Requisite: []*csi.Topology{ + { + Segments: map[string]string{common.TopologyKeyZone: "us-central1-a"}, + }, + }, + Preferred: []*csi.Topology{ + { + Segments: map[string]string{common.TopologyKeyZone: "us-central1-a"}, + }, + { + Segments: map[string]string{common.TopologyKeyZone: "us-central1-b"}, }, }, }, }, - expErrCode: codes.InvalidArgument, + expZones: []string{"us-central1-a", "us-central1-b"}, }, { - name: "err rwo access mode", + name: "error empty HdB ROX multi-zone disk no content source", req: &csi.CreateVolumeRequest{ Name: "test-name", CapacityRange: stdCapRange, @@ -1543,21 +1600,14 @@ func TestMultiZoneVolumeCreation(t *testing.T) { Mount: &csi.VolumeCapability_MountVolume{}, }, AccessMode: &csi.VolumeCapability_AccessMode{ - Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, + Mode: csi.VolumeCapability_AccessMode_MULTI_NODE_READER_ONLY, }, }, }, Parameters: map[string]string{ - common.ParameterKeyType: "hyperdisk-ml", + common.ParameterKeyType: "hyperdisk-balanced", common.ParameterKeyEnableMultiZoneProvisioning: "true", }, - VolumeContentSource: &csi.VolumeContentSource{ - Type: &csi.VolumeContentSource_Snapshot{ - Snapshot: &csi.VolumeContentSource_SnapshotSource{ - SnapshotId: testSnapshotID, - }, - }, - }, AccessibilityRequirements: &csi.TopologyRequirement{ Requisite: []*csi.Topology{ { @@ -1568,13 +1618,16 @@ func TestMultiZoneVolumeCreation(t *testing.T) { { Segments: map[string]string{common.TopologyKeyZone: "us-central1-a"}, }, + { + Segments: map[string]string{common.TopologyKeyZone: "us-central1-b"}, + }, }, }, }, expErrCode: codes.InvalidArgument, }, { - name: "err no content source", + name: "err single ROX multi-zone no topology", req: &csi.CreateVolumeRequest{ Name: "test-name", CapacityRange: stdCapRange, @@ -1592,6 +1645,42 @@ func TestMultiZoneVolumeCreation(t *testing.T) { common.ParameterKeyType: "hyperdisk-ml", common.ParameterKeyEnableMultiZoneProvisioning: "true", }, + VolumeContentSource: &csi.VolumeContentSource{ + Type: &csi.VolumeContentSource_Snapshot{ + Snapshot: &csi.VolumeContentSource_SnapshotSource{ + SnapshotId: testSnapshotID, + }, + }, + }, + }, + expErrCode: codes.InvalidArgument, + }, + { + name: "err rwo access mode", + req: &csi.CreateVolumeRequest{ + Name: "test-name", + CapacityRange: stdCapRange, + VolumeCapabilities: []*csi.VolumeCapability{ + { + AccessType: &csi.VolumeCapability_Mount{ + Mount: &csi.VolumeCapability_MountVolume{}, + }, + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, + }, + }, + }, + Parameters: map[string]string{ + common.ParameterKeyType: "hyperdisk-ml", + common.ParameterKeyEnableMultiZoneProvisioning: "true", + }, + VolumeContentSource: &csi.VolumeContentSource{ + Type: &csi.VolumeContentSource_Snapshot{ + Snapshot: &csi.VolumeContentSource_SnapshotSource{ + SnapshotId: testSnapshotID, + }, + }, + }, AccessibilityRequirements: &csi.TopologyRequirement{ Requisite: []*csi.Topology{ { diff --git a/test/e2e/tests/multi_zone_e2e_test.go b/test/e2e/tests/multi_zone_e2e_test.go index 4a20b756d..c9b7fa6bf 100644 --- a/test/e2e/tests/multi_zone_e2e_test.go +++ b/test/e2e/tests/multi_zone_e2e_test.go @@ -95,14 +95,12 @@ var _ = Describe("GCE PD CSI Driver Multi-Zone", func() { It("Should attach ROX 'multi-zone' PV instances to two separate VMs", func() { checkSkipMultiZoneTests() - // Create new driver and client - - Expect(testContexts).NotTo(BeEmpty()) + Expect(hyperdiskTestContexts).NotTo(BeEmpty()) zoneToContext := map[string]*remote.TestContext{} zones := []string{} - for _, tc := range testContexts { + for _, tc := range hyperdiskTestContexts { _, z, _ := tc.Instance.GetIdentity() // Zone hasn't been seen before if _, ok := zoneToContext[z]; !ok { @@ -189,14 +187,12 @@ var _ = Describe("GCE PD CSI Driver Multi-Zone", func() { It("Should create RWO 'multi-zone' PV instances from a previously created disk", func() { checkSkipMultiZoneTests() - // Create new driver and client - - Expect(testContexts).NotTo(BeEmpty()) + Expect(hyperdiskTestContexts).NotTo(BeEmpty()) zoneToContext := map[string]*remote.TestContext{} zones := []string{} - for _, tc := range testContexts { + for _, tc := range hyperdiskTestContexts { _, z, _ := tc.Instance.GetIdentity() // Zone hasn't been seen before if _, ok := zoneToContext[z]; !ok { @@ -289,12 +285,13 @@ var _ = Describe("GCE PD CSI Driver Multi-Zone", func() { It("Should create ROX 'multi-zone' PV from existing snapshot", func() { checkSkipMultiZoneTests() - Expect(testContexts).NotTo(BeEmpty()) + + Expect(hyperdiskTestContexts).NotTo(BeEmpty()) zoneToContext := map[string]*remote.TestContext{} zones := []string{} - for _, tc := range testContexts { + for _, tc := range hyperdiskTestContexts { _, z, _ := tc.Instance.GetIdentity() // Zone hasn't been seen before if _, ok := zoneToContext[z]; !ok { @@ -317,7 +314,7 @@ var _ = Describe("GCE PD CSI Driver Multi-Zone", func() { tc0 := zoneToContext[zones[0]] tc1 := zoneToContext[zones[1]] - snapshotVolName, snapshotVolID := createAndValidateUniqueZonalDisk(controllerClient, p, zones[0], standardDiskType) + snapshotVolName, snapshotVolID := createAndValidateUniqueZonalDisk(controllerClient, p, zones[0], ssdDiskType) underSpecifiedID := common.GenerateUnderspecifiedVolumeID(snapshotVolName, true /* isZonal */) @@ -436,12 +433,13 @@ var _ = Describe("GCE PD CSI Driver Multi-Zone", func() { It("Should create ROX 'multi-zone' PV from existing snapshot with no topology", func() { checkSkipMultiZoneTests() - Expect(testContexts).NotTo(BeEmpty()) + + Expect(hyperdiskTestContexts).NotTo(BeEmpty()) zoneToContext := map[string]*remote.TestContext{} zones := []string{} - for _, tc := range testContexts { + for _, tc := range hyperdiskTestContexts { _, z, _ := tc.Instance.GetIdentity() // Zone hasn't been seen before if _, ok := zoneToContext[z]; !ok { @@ -464,7 +462,7 @@ var _ = Describe("GCE PD CSI Driver Multi-Zone", func() { tc0 := zoneToContext[zones[0]] tc1 := zoneToContext[zones[1]] - snapshotVolName, snapshotVolID := createAndValidateUniqueZonalDisk(controllerClient, p, zones[0], standardDiskType) + snapshotVolName, snapshotVolID := createAndValidateUniqueZonalDisk(controllerClient, p, zones[0], ssdDiskType) underSpecifiedID := common.GenerateUnderspecifiedVolumeID(snapshotVolName, true /* isZonal */) @@ -574,12 +572,13 @@ var _ = Describe("GCE PD CSI Driver Multi-Zone", func() { It("Should create ROX 'multi-zone' PV from existing disk image", func() { checkSkipMultiZoneTests() - Expect(testContexts).NotTo(BeEmpty()) + + Expect(hyperdiskTestContexts).NotTo(BeEmpty()) zoneToContext := map[string]*remote.TestContext{} zones := []string{} - for _, tc := range testContexts { + for _, tc := range hyperdiskTestContexts { _, z, _ := tc.Instance.GetIdentity() // Zone hasn't been seen before if _, ok := zoneToContext[z]; !ok { @@ -602,7 +601,7 @@ var _ = Describe("GCE PD CSI Driver Multi-Zone", func() { tc0 := zoneToContext[zones[0]] tc1 := zoneToContext[zones[1]] - snapshotVolName, snapshotVolID := createAndValidateUniqueZonalDisk(controllerClient, p, zones[0], standardDiskType) + snapshotVolName, snapshotVolID := createAndValidateUniqueZonalDisk(controllerClient, p, zones[0], ssdDiskType) underSpecifiedID := common.GenerateUnderspecifiedVolumeID(snapshotVolName, true /* isZonal */) @@ -717,13 +716,14 @@ var _ = Describe("GCE PD CSI Driver Multi-Zone", func() { It("Should create RWO 'multi-zone' PV that has empty disks", func() { checkSkipMultiZoneTests() + // Create new driver and client - Expect(testContexts).NotTo(BeEmpty()) + Expect(hyperdiskTestContexts).NotTo(BeEmpty()) zoneToContext := map[string]*remote.TestContext{} zones := []string{} - for _, tc := range testContexts { + for _, tc := range hyperdiskTestContexts { _, z, _ := tc.Instance.GetIdentity() // Zone hasn't been seen before if _, ok := zoneToContext[z]; !ok { @@ -831,6 +831,207 @@ var _ = Describe("GCE PD CSI Driver Multi-Zone", func() { }) + It("Should create ROX 'multi-zone' PV that has empty disks in RWO mode", func() { + checkSkipMultiZoneTests() + + Expect(hyperdiskTestContexts).NotTo(BeEmpty()) + + zoneToContext := map[string]*remote.TestContext{} + zones := []string{} + + for _, tc := range hyperdiskTestContexts { + _, z, _ := tc.Instance.GetIdentity() + // Zone hasn't been seen before + if _, ok := zoneToContext[z]; !ok { + zoneToContext[z] = tc + zones = append(zones, z) + } + if len(zoneToContext) == 2 { + break + } + } + + Expect(len(zoneToContext)).To(Equal(2), "Must have instances in 2 zones") + + controllerContext := zoneToContext[zones[0]] + controllerClient := controllerContext.Client + controllerInstance := controllerContext.Instance + + p, _, _ := controllerInstance.GetIdentity() + + // Attach disk to instance in the first zone. + tc0 := zoneToContext[zones[0]] + tc1 := zoneToContext[zones[1]] + + // Create Disk + volName := testNamePrefix + string(uuid.NewUUID()) + _, err := controllerClient.CreateVolumeWithCaps(volName, map[string]string{ + common.ParameterKeyEnableMultiZoneProvisioning: "true", + common.ParameterKeyType: "hyperdisk-ml", + }, defaultHdmlSizeGb, + &csi.TopologyRequirement{ + Requisite: []*csi.Topology{ + { + Segments: map[string]string{common.TopologyKeyZone: zones[0]}, + }, + { + Segments: map[string]string{common.TopologyKeyZone: zones[1]}, + }, + }, + }, + []*csi.VolumeCapability{ + { + AccessType: &csi.VolumeCapability_Mount{ + Mount: &csi.VolumeCapability_MountVolume{}, + }, + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_MULTI_NODE_READER_ONLY, + }, + }, + }, + nil) + Expect(err).To(BeNil(), "CreateVolume failed with error: %v", err) + + volID := fmt.Sprintf("projects/%s/zones/multi-zone/disks/%s", p, volName) + defer func() { + // Delete Disk + err := controllerClient.DeleteVolume(volID) + Expect(err).To(BeNil(), "DeleteVolume failed") + + // Validate Disk Deleted + _, err = computeService.Disks.Get(p, zones[0], volName).Do() + Expect(gce.IsGCEError(err, "notFound")).To(BeTrue(), "Expected disk to not be found. Err: %v", err) + _, err = computeService.Disks.Get(p, zones[1], volName).Do() + Expect(gce.IsGCEError(err, "notFound")).To(BeTrue(), "Expected disk to not be found. Err: %v", err) + }() + + disk1, err := computeService.Disks.Get(p, zones[0], volName).Do() + Expect(err).To(BeNil(), "Failed to get disk %v/%v", zones[0], volName) + disk2, err := computeService.Disks.Get(p, zones[1], volName).Do() + Expect(err).To(BeNil(), "Failed to get disk %v/%v", zones[1], volName) + + // Validate disks have multi-zone labels + Expect(disk1.Labels[common.MultiZoneLabel]).To(Equal("true")) + Expect(disk2.Labels[common.MultiZoneLabel]).To(Equal("true")) + + // Validate disks are RWO + Expect(disk1.AccessMode).To(Equal("READ_WRITE_SINGLE")) + Expect(disk2.AccessMode).To(Equal("READ_WRITE_SINGLE")) + + // Validate underlying disks can be used + volID0 := fmt.Sprintf("projects/%s/zones/%s/disks/%s", p, zones[0], volName) + volID1 := fmt.Sprintf("projects/%s/zones/%s/disks/%s", p, zones[1], volName) + + err = testAttachWriteReadDetach(volID0, volName, tc0.Instance, tc0.Client, false /* readonly */, false /* detachAndReattach */, false /* setupDataCache */) + Expect(err).To(BeNil(), "Failed to attach/write/read/detach on vol1") + + err = testAttachWriteReadDetach(volID1, volName, tc1.Instance, tc1.Client, false /* readonly */, false /* detachAndReattach */, false /* setupDataCache */) + Expect(err).To(BeNil(), "Failed to attach/write/read/detach on vol2") + + // Validate disks can be used in multi-zone mode on both nodes + volIDMultiZone := fmt.Sprintf("projects/%s/zones/multi-zone/disks/%s", p, volName) + err = testAttachWriteReadDetach(volIDMultiZone, volName, tc0.Instance, tc0.Client, true /* readonly */, false /* detachAndReattach */, false /* setupDataCache */) + Expect(err).To(BeNil(), "Failed to attach/read/detach on vol1") + + err = testAttachWriteReadDetach(volIDMultiZone, volName, tc1.Instance, tc1.Client, true /* readonly */, false /* detachAndReattach */, false /* setupDataCache */) + Expect(err).To(BeNil(), "Failed to attach/read/detach on vol2") + + // Validate disks are ROX now + disk1, err = computeService.Disks.Get(p, zones[0], volName).Do() + Expect(err).To(BeNil(), "Failed to get disk %v/%v", zones[0], volName) + disk2, err = computeService.Disks.Get(p, zones[1], volName).Do() + Expect(err).To(BeNil(), "Failed to get disk %v/%v", zones[1], volName) + + Expect(disk1.AccessMode).To(Equal("READ_ONLY_MANY")) + Expect(disk2.AccessMode).To(Equal("READ_ONLY_MANY")) + }) + + It("Should create ROX 'single-zone' PV that has empty disks in RWO mode", func() { + Expect(hyperdiskTestContexts).NotTo(BeEmpty()) + + zoneToContext := map[string]*remote.TestContext{} + zones := []string{} + + for _, tc := range hyperdiskTestContexts { + _, z, _ := tc.Instance.GetIdentity() + // Zone hasn't been seen before + if _, ok := zoneToContext[z]; !ok { + zoneToContext[z] = tc + zones = append(zones, z) + } + if len(zoneToContext) == 2 { + break + } + } + + controllerContext := zoneToContext[zones[0]] + controllerClient := controllerContext.Client + controllerInstance := controllerContext.Instance + + p, _, _ := controllerInstance.GetIdentity() + + // Attach disk to instance in the first zone. + tc0 := zoneToContext[zones[0]] + + // Create Disk + volName := testNamePrefix + string(uuid.NewUUID()) + _, err := controllerClient.CreateVolumeWithCaps(volName, map[string]string{ + common.ParameterKeyType: "hyperdisk-ml", + }, defaultHdmlSizeGb, + &csi.TopologyRequirement{ + Requisite: []*csi.Topology{ + { + Segments: map[string]string{common.TopologyKeyZone: zones[0]}, + }, + }, + }, + []*csi.VolumeCapability{ + { + AccessType: &csi.VolumeCapability_Mount{ + Mount: &csi.VolumeCapability_MountVolume{}, + }, + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_MULTI_NODE_READER_ONLY, + }, + }, + }, + nil) + Expect(err).To(BeNil(), "CreateVolume failed with error: %v", err) + + volID := fmt.Sprintf("projects/%s/zones/%s/disks/%s", p, zones[0], volName) + defer func() { + // Delete Disk + err := controllerClient.DeleteVolume(volID) + Expect(err).To(BeNil(), "DeleteVolume failed") + + // Validate Disk Deleted + _, err = computeService.Disks.Get(p, zones[0], volName).Do() + Expect(gce.IsGCEError(err, "notFound")).To(BeTrue(), "Expected disk to not be found. Err: %v", err) + }() + + disk1, err := computeService.Disks.Get(p, zones[0], volName).Do() + Expect(err).To(BeNil(), "Failed to get disk %v/%v", zones[0], volName) + + // Validate disks are RWO + Expect(disk1.AccessMode).To(Equal("READ_WRITE_SINGLE")) + + // Validate underlying disks can be used + volID1 := fmt.Sprintf("projects/%s/zones/%s/disks/%s", p, zones[0], volName) + + err = testAttachWriteReadDetach(volID1, volName, tc0.Instance, tc0.Client, false /* readonly */, false /* detachAndReattach */, false /* setupDataCache */) + Expect(err).To(BeNil(), "Failed to attach/write/read/detach on vol1") + + // Validate disks can be used in single-zone mode on both nodes + err = testAttachWriteReadDetach(volID1, volName, tc0.Instance, tc0.Client, true /* readonly */, false /* detachAndReattach */, false /* setupDataCache */) + Expect(err).To(BeNil(), "Failed to attach/read/detach on vol1") + + // Validate disk is ROX now + disk1, err = computeService.Disks.Get(p, zones[0], volName).Do() + Expect(err).To(BeNil(), "Failed to get disk %v/%v", zones[0], volName) + + Expect(disk1.AccessMode).To(Equal("READ_ONLY_MANY")) + }) + It("Should successfully run through entire lifecycle of an RePD volume on instances in 2 zones", func() { // Create new driver and client