From 5f2882623dbc09681806e3c0a9dc92bb3e3ec55c Mon Sep 17 00:00:00 2001 From: David Zhu Date: Mon, 24 Jun 2019 11:14:42 -0700 Subject: [PATCH 1/3] Bump CSI Test (csi-sanity) dependency to v2.0.1 --- Gopkg.lock | 6 +- Gopkg.toml | 2 +- .../kubernetes-csi/csi-test/.travis.yml | 1 + .../csi-test/pkg/sanity/cleanup.go | 4 +- .../csi-test/pkg/sanity/controller.go | 582 ++++++++++++++++-- .../csi-test/pkg/sanity/identity.go | 20 +- .../csi-test/pkg/sanity/node.go | 362 ++++++++++- .../csi-test/pkg/sanity/sanity.go | 242 ++++++-- .../csi-test/pkg/sanity/tests.go | 4 +- .../csi-test/release-tools/LICENSE | 201 ++++++ .../kubernetes-csi/csi-test/utils/grpcutil.go | 6 + 11 files changed, 1293 insertions(+), 137 deletions(-) create mode 120000 vendor/github.com/kubernetes-csi/csi-test/.travis.yml create mode 100644 vendor/github.com/kubernetes-csi/csi-test/release-tools/LICENSE diff --git a/Gopkg.lock b/Gopkg.lock index 0b82e7550..1d808553a 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -118,15 +118,15 @@ version = "v0.6.1" [[projects]] - digest = "1:5b7d11a9941a975a126f7d79e3871f9201ed715c62af86e9dca1e231cc184192" + digest = "1:2af9c947430dba1b6895a8c552d5b915d95dc401ab78c1e017fae86905236fd3" name = "github.com/kubernetes-csi/csi-test" packages = [ "pkg/sanity", "utils", ] pruneopts = "NUT" - revision = "5b1e3786b7c8f7ca514b40e882a0b5dc36e4c842" - version = "v1.1.0" + revision = "6738ab2206eac88874f0a3ede59b40f680f59f43" + version = "v2.0.1" [[projects]] digest = "1:3c46171ee5eee66086897e1efca67b84bf552b1f80039d421068c90684868194" diff --git a/Gopkg.toml b/Gopkg.toml index 6cb1e0479..ddc43f8fd 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -47,7 +47,7 @@ [[constraint]] name = "github.com/kubernetes-csi/csi-test" - version = "v1.0.0" + version = "v2.0.1" [[constraint]] branch = "master" diff --git a/vendor/github.com/kubernetes-csi/csi-test/.travis.yml b/vendor/github.com/kubernetes-csi/csi-test/.travis.yml new file mode 120000 index 000000000..a554dfc76 --- /dev/null +++ b/vendor/github.com/kubernetes-csi/csi-test/.travis.yml @@ -0,0 +1 @@ +release-tools/travis.yml \ No newline at end of file diff --git a/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/cleanup.go b/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/cleanup.go index 65a30334f..73d7ccc10 100644 --- a/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/cleanup.go +++ b/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/cleanup.go @@ -88,7 +88,7 @@ func (cl *Cleanup) DeleteVolumes() { ctx, &csi.NodeUnpublishVolumeRequest{ VolumeId: info.VolumeID, - TargetPath: cl.Context.Config.TargetPath, + TargetPath: cl.Context.TargetPath, }, ); err != nil { logger.Printf("warning: NodeUnpublishVolume: %s", err) @@ -99,7 +99,7 @@ func (cl *Cleanup) DeleteVolumes() { ctx, &csi.NodeUnstageVolumeRequest{ VolumeId: info.VolumeID, - StagingTargetPath: cl.Context.Config.StagingPath, + StagingTargetPath: cl.Context.StagingPath, }, ); err != nil { logger.Printf("warning: NodeUnstageVolume: %s", err) diff --git a/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/controller.go b/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/controller.go index 8ab1bea3b..8fe27c546 100644 --- a/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/controller.go +++ b/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/controller.go @@ -19,14 +19,13 @@ package sanity import ( "context" "fmt" + "strconv" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" "github.com/container-storage-interface/spec/lib/go/csi" - "strconv" - . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" ) @@ -89,7 +88,7 @@ var _ = DescribeSanity("Controller Service", func(sc *SanityContext) { ) BeforeEach(func() { - c = csi.NewControllerClient(sc.Conn) + c = csi.NewControllerClient(sc.ControllerConn) n = csi.NewNodeClient(sc.Conn) cl = &Cleanup{ @@ -126,6 +125,7 @@ var _ = DescribeSanity("Controller Service", func(sc *SanityContext) { case csi.ControllerServiceCapability_RPC_LIST_SNAPSHOTS: case csi.ControllerServiceCapability_RPC_PUBLISH_READONLY: case csi.ControllerServiceCapability_RPC_CLONE_VOLUME: + case csi.ControllerServiceCapability_RPC_EXPAND_VOLUME: default: Fail(fmt.Sprintf("Unknown capability: %v\n", cap.GetRpc().GetType())) } @@ -170,10 +170,192 @@ var _ = DescribeSanity("Controller Service", func(sc *SanityContext) { } }) - // TODO: Add test to test for tokens + It("should fail when an invalid starting_token is passed", func() { + vols, err := c.ListVolumes( + context.Background(), + &csi.ListVolumesRequest{ + StartingToken: "invalid-token", + }, + ) + Expect(err).To(HaveOccurred()) + Expect(vols).To(BeNil()) + + serverError, ok := status.FromError(err) + Expect(ok).To(BeTrue()) + Expect(serverError.Code()).To(Equal(codes.Aborted)) + }) + + It("should fail when the starting_token is greater than total number of vols", func() { + // Get total number of volumes. + vols, err := c.ListVolumes( + context.Background(), + &csi.ListVolumesRequest{}) + Expect(err).NotTo(HaveOccurred()) + Expect(vols).NotTo(BeNil()) + + totalVols := len(vols.GetEntries()) + + // Send starting_token that is greater than the total number of volumes. + vols, err = c.ListVolumes( + context.Background(), + &csi.ListVolumesRequest{ + StartingToken: strconv.Itoa(totalVols + 5), + }, + ) + Expect(err).To(HaveOccurred()) + Expect(vols).To(BeNil()) + + serverError, ok := status.FromError(err) + Expect(ok).To(BeTrue()) + Expect(serverError.Code()).To(Equal(codes.Aborted)) + }) + + It("check the presence of new volumes in the volume list", func() { + // List Volumes before creating new volume. + vols, err := c.ListVolumes( + context.Background(), + &csi.ListVolumesRequest{}) + Expect(err).NotTo(HaveOccurred()) + Expect(vols).NotTo(BeNil()) + + totalVols := len(vols.GetEntries()) + + By("creating a volume") + name := "sanity" + + // Create a new volume. + req := &csi.CreateVolumeRequest{ + Name: name, + VolumeCapabilities: []*csi.VolumeCapability{ + { + AccessType: &csi.VolumeCapability_Mount{ + Mount: &csi.VolumeCapability_MountVolume{}, + }, + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, + }, + }, + }, + Secrets: sc.Secrets.CreateVolumeSecret, + } + + vol, err := c.CreateVolume(context.Background(), req) + Expect(err).NotTo(HaveOccurred()) + Expect(vol).NotTo(BeNil()) + Expect(vol.GetVolume()).NotTo(BeNil()) + Expect(vol.GetVolume().GetVolumeId()).NotTo(BeEmpty()) + + // List volumes and check for the newly created volume. + vols, err = c.ListVolumes( + context.Background(), + &csi.ListVolumesRequest{}) + Expect(err).NotTo(HaveOccurred()) + Expect(vols).NotTo(BeNil()) + Expect(len(vols.GetEntries())).To(Equal(totalVols + 1)) + + By("cleaning up deleting the volume") + + delReq := &csi.DeleteVolumeRequest{ + VolumeId: vol.GetVolume().GetVolumeId(), + Secrets: sc.Secrets.DeleteVolumeSecret, + } + + _, err = c.DeleteVolume(context.Background(), delReq) + Expect(err).NotTo(HaveOccurred()) + + // List volumes and check if the deleted volume exists in the volume list. + vols, err = c.ListVolumes( + context.Background(), + &csi.ListVolumesRequest{}) + Expect(err).NotTo(HaveOccurred()) + Expect(vols).NotTo(BeNil()) + Expect(len(vols.GetEntries())).To(Equal(totalVols)) + }) + + It("should return next token when a limited number of entries are requested", func() { + // minVolCount is the minimum number of volumes expected to exist, + // based on which paginated volume listing is performed. + minVolCount := 5 + // maxEntried is the maximum entries in list volume request. + maxEntries := 2 + // currentTotalVols is the total number of volumes at a given time. It + // is used to verify that all the volumes have been listed. + currentTotalVols := 0 + // newVols to keep a record of the newly created volume names and ids. + newVols := map[string]string{} + + // Get the number of existing volumes. + vols, err := c.ListVolumes( + context.Background(), + &csi.ListVolumesRequest{}) + Expect(err).NotTo(HaveOccurred()) + Expect(vols).NotTo(BeNil()) + + initialTotalVols := len(vols.GetEntries()) + currentTotalVols = initialTotalVols + + // Ensure minimum minVolCount volumes exist. + if initialTotalVols < minVolCount { + + By("creating required new volumes") + requiredVols := minVolCount - initialTotalVols + for i := 1; i <= requiredVols; i++ { + name := "sanity" + strconv.Itoa(i) + req := &csi.CreateVolumeRequest{ + Name: name, + VolumeCapabilities: []*csi.VolumeCapability{ + { + AccessType: &csi.VolumeCapability_Mount{ + Mount: &csi.VolumeCapability_MountVolume{}, + }, + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, + }, + }, + }, + Secrets: sc.Secrets.CreateVolumeSecret, + } + + vol, err := c.CreateVolume(context.Background(), req) + Expect(err).NotTo(HaveOccurred()) + Expect(vol).NotTo(BeNil()) + cl.RegisterVolume(name, VolumeInfo{VolumeID: vol.Volume.VolumeId}) + newVols[name] = vol.Volume.VolumeId + } + + // Update the current total vols count. + currentTotalVols += requiredVols + } + + // Request list volumes with max entries maxEntries. + vols, err = c.ListVolumes( + context.Background(), + &csi.ListVolumesRequest{ + MaxEntries: int32(maxEntries), + }) + Expect(err).NotTo(HaveOccurred()) + Expect(vols).NotTo(BeNil()) + + nextToken := vols.GetNextToken() + + Expect(nextToken).To(Equal(strconv.Itoa(maxEntries))) + Expect(len(vols.GetEntries())).To(Equal(maxEntries)) + + if initialTotalVols < minVolCount { - // TODO: Add test which checks list of volume is there when created, - // and not there when deleted. + By("cleaning up deleting the volumes") + for name, volID := range newVols { + delReq := &csi.DeleteVolumeRequest{ + VolumeId: volID, + Secrets: sc.Secrets.DeleteVolumeSecret, + } + + _, err := c.DeleteVolume(context.Background(), delReq) + Expect(err).NotTo(HaveOccurred()) + cl.UnregisterVolume(name) + } + } + }) }) Describe("CreateVolume", func() { @@ -200,7 +382,7 @@ var _ = DescribeSanity("Controller Service", func(sc *SanityContext) { }) It("should fail when no volume capabilities are provided", func() { - name := uniqueString("sanity-controller-create-no-volume-capabilities") + name := UniqueString("sanity-controller-create-no-volume-capabilities") vol, err := c.CreateVolume( context.Background(), &csi.CreateVolumeRequest{ @@ -220,7 +402,7 @@ var _ = DescribeSanity("Controller Service", func(sc *SanityContext) { It("should return appropriate values SingleNodeWriter NoCapacity Type:Mount", func() { By("creating a volume") - name := uniqueString("sanity-controller-create-single-no-capacity") + name := UniqueString("sanity-controller-create-single-no-capacity") vol, err := c.CreateVolume( context.Background(), @@ -262,7 +444,7 @@ var _ = DescribeSanity("Controller Service", func(sc *SanityContext) { It("should return appropriate values SingleNodeWriter WithCapacity 1Gi Type:Mount", func() { By("creating a volume") - name := uniqueString("sanity-controller-create-single-with-capacity") + name := UniqueString("sanity-controller-create-single-with-capacity") vol, err := c.CreateVolume( context.Background(), @@ -311,7 +493,7 @@ var _ = DescribeSanity("Controller Service", func(sc *SanityContext) { It("should not fail when requesting to create a volume with already existing name and same capacity.", func() { By("creating a volume") - name := uniqueString("sanity-controller-create-twice") + name := UniqueString("sanity-controller-create-twice") size := TestVolumeSize(sc) vol1, err := c.CreateVolume( @@ -385,7 +567,7 @@ var _ = DescribeSanity("Controller Service", func(sc *SanityContext) { It("should fail when requesting to create a volume with already existing name and different capacity.", func() { By("creating a volume") - name := uniqueString("sanity-controller-create-twice-different") + name := UniqueString("sanity-controller-create-twice-different") size1 := TestVolumeSize(sc) vol1, err := c.CreateVolume( @@ -507,6 +689,133 @@ var _ = DescribeSanity("Controller Service", func(sc *SanityContext) { Expect(err).NotTo(HaveOccurred()) cl.UnregisterVolume(name) }) + + It("should create volume from an existing source snapshot", func() { + if !isControllerCapabilitySupported(c, csi.ControllerServiceCapability_RPC_CREATE_DELETE_SNAPSHOT) { + Skip("Snapshot not supported") + } + + By("creating a volume") + vol1Name := UniqueString("sanity-controller-source-vol") + vol1Req := MakeCreateVolumeReq(sc, vol1Name) + volume1, err := c.CreateVolume(context.Background(), vol1Req) + Expect(err).NotTo(HaveOccurred()) + + By("creating a snapshot") + snapName := UniqueString("sanity-controller-snap-from-vol") + snapReq := MakeCreateSnapshotReq(sc, snapName, volume1.GetVolume().GetVolumeId(), nil) + snap, err := c.CreateSnapshot(context.Background(), snapReq) + Expect(err).NotTo(HaveOccurred()) + Expect(snap).NotTo(BeNil()) + verifySnapshotInfo(snap.GetSnapshot()) + + By("creating a volume from source snapshot") + vol2Name := UniqueString("sanity-controller-vol-from-snap") + vol2Req := MakeCreateVolumeReq(sc, vol2Name) + vol2Req.VolumeContentSource = &csi.VolumeContentSource{ + Type: &csi.VolumeContentSource_Snapshot{ + Snapshot: &csi.VolumeContentSource_SnapshotSource{ + SnapshotId: snap.GetSnapshot().GetSnapshotId(), + }, + }, + } + volume2, err := c.CreateVolume(context.Background(), vol2Req) + Expect(err).NotTo(HaveOccurred()) + + By("cleaning up deleting the volume created from snapshot") + delVol2Req := MakeDeleteVolumeReq(sc, volume2.GetVolume().GetVolumeId()) + _, err = c.DeleteVolume(context.Background(), delVol2Req) + Expect(err).NotTo(HaveOccurred()) + + By("cleaning up deleting the snapshot") + delSnapReq := MakeDeleteSnapshotReq(sc, snap.GetSnapshot().GetSnapshotId()) + _, err = c.DeleteSnapshot(context.Background(), delSnapReq) + Expect(err).NotTo(HaveOccurred()) + + By("cleaning up deleting the source volume") + delVol1Req := MakeDeleteVolumeReq(sc, volume1.GetVolume().GetVolumeId()) + _, err = c.DeleteVolume(context.Background(), delVol1Req) + Expect(err).NotTo(HaveOccurred()) + }) + + It("should fail when the volume source snapshot is not found", func() { + if !isControllerCapabilitySupported(c, csi.ControllerServiceCapability_RPC_CREATE_DELETE_SNAPSHOT) { + Skip("Snapshot not supported") + } + + By("creating a volume from source snapshot") + volName := UniqueString("sanity-controller-vol-from-snap") + volReq := MakeCreateVolumeReq(sc, volName) + volReq.VolumeContentSource = &csi.VolumeContentSource{ + Type: &csi.VolumeContentSource_Snapshot{ + Snapshot: &csi.VolumeContentSource_SnapshotSource{ + SnapshotId: "non-existing-snapshot-id", + }, + }, + } + _, err := c.CreateVolume(context.Background(), volReq) + Expect(err).To(HaveOccurred()) + serverError, ok := status.FromError(err) + Expect(ok).To(BeTrue()) + Expect(serverError.Code()).To(Equal(codes.NotFound)) + }) + + It("should create volume from an existing source volume", func() { + if !isControllerCapabilitySupported(c, csi.ControllerServiceCapability_RPC_CLONE_VOLUME) { + Skip("Volume Cloning not supported") + } + + By("creating a volume") + vol1Name := UniqueString("sanity-controller-source-vol") + vol1Req := MakeCreateVolumeReq(sc, vol1Name) + volume1, err := c.CreateVolume(context.Background(), vol1Req) + Expect(err).NotTo(HaveOccurred()) + + By("creating a volume from source volume") + vol2Name := UniqueString("sanity-controller-vol-from-vol") + vol2Req := MakeCreateVolumeReq(sc, vol2Name) + vol2Req.VolumeContentSource = &csi.VolumeContentSource{ + Type: &csi.VolumeContentSource_Volume{ + Volume: &csi.VolumeContentSource_VolumeSource{ + VolumeId: volume1.GetVolume().GetVolumeId(), + }, + }, + } + volume2, err := c.CreateVolume(context.Background(), vol2Req) + Expect(err).NotTo(HaveOccurred()) + + By("cleaning up deleting the volume created from source volume") + delVol2Req := MakeDeleteVolumeReq(sc, volume2.GetVolume().GetVolumeId()) + _, err = c.DeleteVolume(context.Background(), delVol2Req) + Expect(err).NotTo(HaveOccurred()) + + By("cleaning up deleting the source volume") + delVol1Req := MakeDeleteVolumeReq(sc, volume1.GetVolume().GetVolumeId()) + _, err = c.DeleteVolume(context.Background(), delVol1Req) + Expect(err).NotTo(HaveOccurred()) + }) + + It("should fail when the volume source volume is not found", func() { + if !isControllerCapabilitySupported(c, csi.ControllerServiceCapability_RPC_CLONE_VOLUME) { + Skip("Volume Cloning not supported") + } + + By("creating a volume from source snapshot") + volName := UniqueString("sanity-controller-vol-from-snap") + volReq := MakeCreateVolumeReq(sc, volName) + volReq.VolumeContentSource = &csi.VolumeContentSource{ + Type: &csi.VolumeContentSource_Volume{ + Volume: &csi.VolumeContentSource_VolumeSource{ + VolumeId: "non-existing-volume-id", + }, + }, + } + _, err := c.CreateVolume(context.Background(), volReq) + Expect(err).To(HaveOccurred()) + serverError, ok := status.FromError(err) + Expect(ok).To(BeTrue()) + Expect(serverError.Code()).To(Equal(codes.NotFound)) + }) }) Describe("DeleteVolume", func() { @@ -547,7 +856,7 @@ var _ = DescribeSanity("Controller Service", func(sc *SanityContext) { // Create Volume First By("creating a volume") - name := uniqueString("sanity-controller-create-appropriate") + name := UniqueString("sanity-controller-create-appropriate") vol, err := c.CreateVolume( context.Background(), @@ -593,7 +902,9 @@ var _ = DescribeSanity("Controller Service", func(sc *SanityContext) { _, err := c.ValidateVolumeCapabilities( context.Background(), - &csi.ValidateVolumeCapabilitiesRequest{}) + &csi.ValidateVolumeCapabilitiesRequest{ + Secrets: sc.Secrets.ControllerValidateVolumeCapabilitiesSecret, + }) Expect(err).To(HaveOccurred()) serverError, ok := status.FromError(err) @@ -603,23 +914,64 @@ var _ = DescribeSanity("Controller Service", func(sc *SanityContext) { It("should fail when no volume capabilities are provided", func() { - _, err := c.ValidateVolumeCapabilities( + // Create Volume First + By("creating a single node writer volume") + name := UniqueString("sanity-controller-validate-nocaps") + + vol, err := c.CreateVolume( + context.Background(), + &csi.CreateVolumeRequest{ + Name: name, + VolumeCapabilities: []*csi.VolumeCapability{ + { + AccessType: &csi.VolumeCapability_Mount{ + Mount: &csi.VolumeCapability_MountVolume{}, + }, + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, + }, + }, + }, + Secrets: sc.Secrets.CreateVolumeSecret, + Parameters: sc.Config.TestVolumeParameters, + }, + ) + Expect(err).NotTo(HaveOccurred()) + Expect(vol).NotTo(BeNil()) + Expect(vol.GetVolume()).NotTo(BeNil()) + Expect(vol.GetVolume().GetVolumeId()).NotTo(BeEmpty()) + cl.RegisterVolume(name, VolumeInfo{VolumeID: vol.GetVolume().GetVolumeId()}) + + _, err = c.ValidateVolumeCapabilities( context.Background(), &csi.ValidateVolumeCapabilitiesRequest{ - VolumeId: "id", + VolumeId: vol.GetVolume().GetVolumeId(), + Secrets: sc.Secrets.ControllerValidateVolumeCapabilitiesSecret, }) Expect(err).To(HaveOccurred()) serverError, ok := status.FromError(err) Expect(ok).To(BeTrue()) Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) + + By("cleaning up deleting the volume") + + _, err = c.DeleteVolume( + context.Background(), + &csi.DeleteVolumeRequest{ + VolumeId: vol.GetVolume().GetVolumeId(), + Secrets: sc.Secrets.DeleteVolumeSecret, + }, + ) + Expect(err).NotTo(HaveOccurred()) + cl.UnregisterVolume(name) }) It("should return appropriate values (no optional values added)", func() { // Create Volume First By("creating a single node writer volume") - name := uniqueString("sanity-controller-validate") + name := UniqueString("sanity-controller-validate") vol, err := c.CreateVolume( context.Background(), @@ -661,6 +1013,7 @@ var _ = DescribeSanity("Controller Service", func(sc *SanityContext) { }, }, }, + Secrets: sc.Secrets.ControllerValidateVolumeCapabilitiesSecret, }) Expect(err).NotTo(HaveOccurred()) Expect(valivolcap).NotTo(BeNil()) @@ -700,6 +1053,7 @@ var _ = DescribeSanity("Controller Service", func(sc *SanityContext) { }, }, }, + Secrets: sc.Secrets.ControllerValidateVolumeCapabilitiesSecret, }, ) Expect(err).To(HaveOccurred()) @@ -765,11 +1119,29 @@ var _ = DescribeSanity("Controller Service", func(sc *SanityContext) { Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) }) + // CSI spec poses no specific requirements for the cluster/storage setups that a SP MUST support. To perform + // meaningful checks the following test assumes that topology-aware provisioning on a single node setup is supported It("should return appropriate values (no optional values added)", func() { + By("getting node information") + ni, err := n.NodeGetInfo( + context.Background(), + &csi.NodeGetInfoRequest{}) + Expect(err).NotTo(HaveOccurred()) + Expect(ni).NotTo(BeNil()) + Expect(ni.GetNodeId()).NotTo(BeEmpty()) + + var accReqs *csi.TopologyRequirement + if ni.AccessibleTopology != nil { + // Topology requirements are honored if provided by the driver + accReqs = &csi.TopologyRequirement{ + Requisite: []*csi.Topology{ni.AccessibleTopology}, + } + } + // Create Volume First By("creating a single node writer volume") - name := uniqueString("sanity-controller-publish") + name := UniqueString("sanity-controller-publish") vol, err := c.CreateVolume( context.Background(), @@ -785,8 +1157,9 @@ var _ = DescribeSanity("Controller Service", func(sc *SanityContext) { }, }, }, - Secrets: sc.Secrets.CreateVolumeSecret, - Parameters: sc.Config.TestVolumeParameters, + Secrets: sc.Secrets.CreateVolumeSecret, + Parameters: sc.Config.TestVolumeParameters, + AccessibilityRequirements: accReqs, }, ) Expect(err).NotTo(HaveOccurred()) @@ -795,14 +1168,6 @@ var _ = DescribeSanity("Controller Service", func(sc *SanityContext) { Expect(vol.GetVolume().GetVolumeId()).NotTo(BeEmpty()) cl.RegisterVolume(name, VolumeInfo{VolumeID: vol.GetVolume().GetVolumeId()}) - By("getting a node id") - nid, err := n.NodeGetInfo( - context.Background(), - &csi.NodeGetInfoRequest{}) - Expect(err).NotTo(HaveOccurred()) - Expect(nid).NotTo(BeNil()) - Expect(nid.GetNodeId()).NotTo(BeEmpty()) - // ControllerPublishVolume By("calling controllerpublish on that volume") @@ -810,7 +1175,7 @@ var _ = DescribeSanity("Controller Service", func(sc *SanityContext) { context.Background(), &csi.ControllerPublishVolumeRequest{ VolumeId: vol.GetVolume().GetVolumeId(), - NodeId: nid.GetNodeId(), + NodeId: ni.GetNodeId(), VolumeCapability: &csi.VolumeCapability{ AccessType: &csi.VolumeCapability_Mount{ Mount: &csi.VolumeCapability_MountVolume{}, @@ -824,7 +1189,7 @@ var _ = DescribeSanity("Controller Service", func(sc *SanityContext) { }, ) Expect(err).NotTo(HaveOccurred()) - cl.RegisterVolume(name, VolumeInfo{VolumeID: vol.GetVolume().GetVolumeId(), NodeID: nid.GetNodeId()}) + cl.RegisterVolume(name, VolumeInfo{VolumeID: vol.GetVolume().GetVolumeId(), NodeID: ni.GetNodeId()}) Expect(conpubvol).NotTo(BeNil()) By("cleaning up unpublishing the volume") @@ -834,7 +1199,7 @@ var _ = DescribeSanity("Controller Service", func(sc *SanityContext) { &csi.ControllerUnpublishVolumeRequest{ VolumeId: vol.GetVolume().GetVolumeId(), // NodeID is optional in ControllerUnpublishVolume - NodeId: nid.GetNodeId(), + NodeId: ni.GetNodeId(), Secrets: sc.Secrets.ControllerUnpublishVolumeSecret, }, ) @@ -854,6 +1219,48 @@ var _ = DescribeSanity("Controller Service", func(sc *SanityContext) { cl.UnregisterVolume(name) }) + It("should fail when publishing more volumes than the node max attach limit", func() { + if !sc.Config.TestNodeVolumeAttachLimit { + Skip("testnodevolumeattachlimit not enabled") + } + + By("getting node info") + nodeInfo, err := n.NodeGetInfo( + context.Background(), + &csi.NodeGetInfoRequest{}) + Expect(err).NotTo(HaveOccurred()) + Expect(nodeInfo).NotTo(BeNil()) + + if nodeInfo.MaxVolumesPerNode <= 0 { + Skip("No MaxVolumesPerNode") + } + + nid := nodeInfo.GetNodeId() + Expect(nid).NotTo(BeEmpty()) + + // Store the volume name and volume ID for later cleanup. + createdVols := map[string]string{} + By("creating volumes") + for i := int64(0); i < nodeInfo.MaxVolumesPerNode; i++ { + name := UniqueString(fmt.Sprintf("sanity-max-attach-limit-vol-%d", i)) + volID, err := CreateAndControllerPublishVolume(sc, c, name, nid) + Expect(err).NotTo(HaveOccurred()) + cl.RegisterVolume(name, VolumeInfo{VolumeID: volID, NodeID: nid}) + createdVols[name] = volID + } + + extraVolName := UniqueString("sanity-max-attach-limit-vol+1") + _, err = CreateAndControllerPublishVolume(sc, c, extraVolName, nid) + Expect(err).To(HaveOccurred()) + + By("cleaning up") + for volName, volID := range createdVols { + err = ControllerUnpublishAndDeleteVolume(sc, c, volID, nid) + Expect(err).NotTo(HaveOccurred()) + cl.UnregisterVolume(volName) + } + }) + It("should fail when the volume does not exist", func() { By("calling controller publish on a non-existent volume") @@ -887,7 +1294,7 @@ var _ = DescribeSanity("Controller Service", func(sc *SanityContext) { // Create Volume First By("creating a single node writer volume") - name := uniqueString("sanity-controller-wrong-node") + name := UniqueString("sanity-controller-wrong-node") vol, err := c.CreateVolume( context.Background(), @@ -960,7 +1367,7 @@ var _ = DescribeSanity("Controller Service", func(sc *SanityContext) { // Create Volume First By("creating a single node writer volume") - name := uniqueString("sanity-controller-published-incompatible") + name := UniqueString("sanity-controller-published-incompatible") vol, err := c.CreateVolume( context.Background(), @@ -1078,11 +1485,29 @@ var _ = DescribeSanity("Controller Service", func(sc *SanityContext) { Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) }) + // CSI spec poses no specific requirements for the cluster/storage setups that a SP MUST support. To perform + // meaningful checks the following test assumes that topology-aware provisioning on a single node setup is supported It("should return appropriate values (no optional values added)", func() { // Create Volume First By("creating a single node writer volume") - name := uniqueString("sanity-controller-unpublish") + name := UniqueString("sanity-controller-unpublish") + + By("getting node information") + ni, err := n.NodeGetInfo( + context.Background(), + &csi.NodeGetInfoRequest{}) + Expect(err).NotTo(HaveOccurred()) + Expect(ni).NotTo(BeNil()) + Expect(ni.GetNodeId()).NotTo(BeEmpty()) + + var accReqs *csi.TopologyRequirement + if ni.AccessibleTopology != nil { + // Topology requirements are honored if provided by the driver + accReqs = &csi.TopologyRequirement{ + Requisite: []*csi.Topology{ni.AccessibleTopology}, + } + } vol, err := c.CreateVolume( context.Background(), @@ -1098,8 +1523,9 @@ var _ = DescribeSanity("Controller Service", func(sc *SanityContext) { }, }, }, - Secrets: sc.Secrets.CreateVolumeSecret, - Parameters: sc.Config.TestVolumeParameters, + Secrets: sc.Secrets.CreateVolumeSecret, + Parameters: sc.Config.TestVolumeParameters, + AccessibilityRequirements: accReqs, }, ) Expect(err).NotTo(HaveOccurred()) @@ -1108,14 +1534,6 @@ var _ = DescribeSanity("Controller Service", func(sc *SanityContext) { Expect(vol.GetVolume().GetVolumeId()).NotTo(BeEmpty()) cl.RegisterVolume(name, VolumeInfo{VolumeID: vol.GetVolume().GetVolumeId()}) - By("getting a node id") - nid, err := n.NodeGetInfo( - context.Background(), - &csi.NodeGetInfoRequest{}) - Expect(err).NotTo(HaveOccurred()) - Expect(nid).NotTo(BeNil()) - Expect(nid.GetNodeId()).NotTo(BeEmpty()) - // ControllerPublishVolume By("calling controllerpublish on that volume") @@ -1123,7 +1541,7 @@ var _ = DescribeSanity("Controller Service", func(sc *SanityContext) { context.Background(), &csi.ControllerPublishVolumeRequest{ VolumeId: vol.GetVolume().GetVolumeId(), - NodeId: nid.GetNodeId(), + NodeId: ni.GetNodeId(), VolumeCapability: &csi.VolumeCapability{ AccessType: &csi.VolumeCapability_Mount{ Mount: &csi.VolumeCapability_MountVolume{}, @@ -1137,7 +1555,7 @@ var _ = DescribeSanity("Controller Service", func(sc *SanityContext) { }, ) Expect(err).NotTo(HaveOccurred()) - cl.RegisterVolume(name, VolumeInfo{VolumeID: vol.GetVolume().GetVolumeId(), NodeID: nid.GetNodeId()}) + cl.RegisterVolume(name, VolumeInfo{VolumeID: vol.GetVolume().GetVolumeId(), NodeID: ni.GetNodeId()}) Expect(conpubvol).NotTo(BeNil()) // ControllerUnpublishVolume @@ -1148,7 +1566,7 @@ var _ = DescribeSanity("Controller Service", func(sc *SanityContext) { &csi.ControllerUnpublishVolumeRequest{ VolumeId: vol.GetVolume().GetVolumeId(), // NodeID is optional in ControllerUnpublishVolume - NodeId: nid.GetNodeId(), + NodeId: ni.GetNodeId(), Secrets: sc.Secrets.ControllerUnpublishVolumeSecret, }, ) @@ -1176,7 +1594,7 @@ var _ = DescribeSanity("ListSnapshots [Controller Server]", func(sc *SanityConte ) BeforeEach(func() { - c = csi.NewControllerClient(sc.Conn) + c = csi.NewControllerClient(sc.ControllerConn) if !isControllerCapabilitySupported(c, csi.ControllerServiceCapability_RPC_LIST_SNAPSHOTS) { Skip("ListSnapshots not supported") @@ -1195,7 +1613,7 @@ var _ = DescribeSanity("ListSnapshots [Controller Server]", func(sc *SanityConte } }) - It("should return snapshots that match the specify snapshot id", func() { + It("should return snapshots that match the specified snapshot id", func() { By("creating a volume") volReq := MakeCreateVolumeReq(sc, "listSnapshots-volume-1") @@ -1227,7 +1645,7 @@ var _ = DescribeSanity("ListSnapshots [Controller Server]", func(sc *SanityConte Expect(err).NotTo(HaveOccurred()) }) - It("should return empty when the specify snapshot id is not exist", func() { + It("should return empty when the specified snapshot id does not exist", func() { snapshots, err := c.ListSnapshots( context.Background(), @@ -1237,7 +1655,7 @@ var _ = DescribeSanity("ListSnapshots [Controller Server]", func(sc *SanityConte Expect(snapshots.GetEntries()).To(BeEmpty()) }) - It("should return snapshots that match the specify source volume id)", func() { + It("should return snapshots that match the specified source volume id)", func() { By("creating a volume") volReq := MakeCreateVolumeReq(sc, "listSnapshots-volume-2") @@ -1270,7 +1688,7 @@ var _ = DescribeSanity("ListSnapshots [Controller Server]", func(sc *SanityConte Expect(err).NotTo(HaveOccurred()) }) - It("should return empty when the specify source volume id is not exist", func() { + It("should return empty when the specified source volume id does not exist", func() { snapshots, err := c.ListSnapshots( context.Background(), @@ -1429,7 +1847,7 @@ var _ = DescribeSanity("DeleteSnapshot [Controller Server]", func(sc *SanityCont ) BeforeEach(func() { - c = csi.NewControllerClient(sc.Conn) + c = csi.NewControllerClient(sc.ControllerConn) if !isControllerCapabilitySupported(c, csi.ControllerServiceCapability_RPC_CREATE_DELETE_SNAPSHOT) { Skip("DeleteSnapshot not supported") @@ -1492,7 +1910,7 @@ var _ = DescribeSanity("CreateSnapshot [Controller Server]", func(sc *SanityCont ) BeforeEach(func() { - c = csi.NewControllerClient(sc.Conn) + c = csi.NewControllerClient(sc.ControllerConn) if !isControllerCapabilitySupported(c, csi.ControllerServiceCapability_RPC_CREATE_DELETE_SNAPSHOT) { Skip("CreateSnapshot not supported") @@ -1701,3 +2119,61 @@ func MakeDeleteVolumeReq(sc *SanityContext, id string) *csi.DeleteVolumeRequest return delVolReq } + +// MakeControllerPublishVolumeReq creates and returns a ControllerPublishVolumeRequest. +func MakeControllerPublishVolumeReq(sc *SanityContext, volID, nodeID string) *csi.ControllerPublishVolumeRequest { + return &csi.ControllerPublishVolumeRequest{ + VolumeId: volID, + NodeId: nodeID, + VolumeCapability: &csi.VolumeCapability{ + AccessType: &csi.VolumeCapability_Mount{ + Mount: &csi.VolumeCapability_MountVolume{}, + }, + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, + }, + }, + Readonly: false, + Secrets: sc.Secrets.ControllerPublishVolumeSecret, + } +} + +// MakeControllerUnpublishVolumeReq creates and returns a ControllerUnpublishVolumeRequest. +func MakeControllerUnpublishVolumeReq(sc *SanityContext, volID, nodeID string) *csi.ControllerUnpublishVolumeRequest { + return &csi.ControllerUnpublishVolumeRequest{ + VolumeId: volID, + NodeId: nodeID, + Secrets: sc.Secrets.ControllerUnpublishVolumeSecret, + } +} + +// CreateAndControllerPublishVolume creates and controller publishes a volume given a volume name and node ID. +func CreateAndControllerPublishVolume(sc *SanityContext, c csi.ControllerClient, volName, nodeID string) (volID string, err error) { + vol, err := c.CreateVolume(context.Background(), MakeCreateVolumeReq(sc, volName)) + Expect(err).NotTo(HaveOccurred()) + Expect(vol).NotTo(BeNil()) + Expect(vol.GetVolume()).NotTo(BeNil()) + Expect(vol.GetVolume().GetVolumeId()).NotTo(BeEmpty()) + + _, err = c.ControllerPublishVolume( + context.Background(), + MakeControllerPublishVolumeReq(sc, vol.GetVolume().GetVolumeId(), nodeID), + ) + return vol.GetVolume().GetVolumeId(), err +} + +// ControllerUnpublishAndDeleteVolume controller unpublishes and deletes a volume, given volume ID and node ID. +func ControllerUnpublishAndDeleteVolume(sc *SanityContext, c csi.ControllerClient, volID, nodeID string) error { + _, err := c.ControllerUnpublishVolume( + context.Background(), + MakeControllerUnpublishVolumeReq(sc, volID, nodeID), + ) + Expect(err).NotTo(HaveOccurred()) + + _, err = c.DeleteVolume( + context.Background(), + MakeDeleteVolumeReq(sc, volID), + ) + Expect(err).NotTo(HaveOccurred()) + return err +} diff --git a/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/identity.go b/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/identity.go index c1a5eb7ef..0cefcd9f1 100644 --- a/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/identity.go +++ b/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/identity.go @@ -49,11 +49,23 @@ var _ = DescribeSanity("Identity Service", func(sc *SanityContext) { By("checking successful response") Expect(res.GetCapabilities()).NotTo(BeNil()) for _, cap := range res.GetCapabilities() { - switch cap.GetService().GetType() { - case csi.PluginCapability_Service_CONTROLLER_SERVICE: - case csi.PluginCapability_Service_VOLUME_ACCESSIBILITY_CONSTRAINTS: + switch cap.GetType().(type) { + case *csi.PluginCapability_Service_: + switch cap.GetService().GetType() { + case csi.PluginCapability_Service_CONTROLLER_SERVICE: + case csi.PluginCapability_Service_VOLUME_ACCESSIBILITY_CONSTRAINTS: + default: + Fail(fmt.Sprintf("Unknown service: %v\n", cap.GetService().GetType())) + } + case *csi.PluginCapability_VolumeExpansion_: + switch cap.GetVolumeExpansion().GetType() { + case csi.PluginCapability_VolumeExpansion_ONLINE: + case csi.PluginCapability_VolumeExpansion_OFFLINE: + default: + Fail(fmt.Sprintf("Unknown volume expansion mode: %v\n", cap.GetVolumeExpansion().GetType())) + } default: - Fail(fmt.Sprintf("Unknown capability: %v\n", cap.GetService().GetType())) + Fail(fmt.Sprintf("Unknown capability: %v\n", cap.GetType())) } } diff --git a/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/node.go b/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/node.go index 9bd9194b0..c0d69d8b4 100644 --- a/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/node.go +++ b/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/node.go @@ -60,8 +60,7 @@ func isPluginCapabilitySupported(c csi.IdentityClient, Expect(caps.GetCapabilities()).NotTo(BeNil()) for _, cap := range caps.GetCapabilities() { - Expect(cap.GetService()).NotTo(BeNil()) - if cap.GetService().GetType() == capType { + if cap.GetService() != nil && cap.GetService().GetType() == capType { return true } } @@ -76,20 +75,18 @@ var _ = DescribeSanity("Node Service", func(sc *SanityContext) { controllerPublishSupported bool nodeStageSupported bool + nodeVolumeStatsSupported bool ) BeforeEach(func() { c = csi.NewNodeClient(sc.Conn) - s = csi.NewControllerClient(sc.Conn) + s = csi.NewControllerClient(sc.ControllerConn) controllerPublishSupported = isControllerCapabilitySupported( s, csi.ControllerServiceCapability_RPC_PUBLISH_UNPUBLISH_VOLUME) nodeStageSupported = isNodeCapabilitySupported(c, csi.NodeServiceCapability_RPC_STAGE_UNSTAGE_VOLUME) - if nodeStageSupported { - err := createMountTargetLocation(sc.Config.StagingPath) - Expect(err).NotTo(HaveOccurred()) - } + nodeVolumeStatsSupported = isNodeCapabilitySupported(c, csi.NodeServiceCapability_RPC_GET_VOLUME_STATS) cl = &Cleanup{ Context: sc, NodeClient: c, @@ -120,6 +117,7 @@ var _ = DescribeSanity("Node Service", func(sc *SanityContext) { case csi.NodeServiceCapability_RPC_UNKNOWN: case csi.NodeServiceCapability_RPC_STAGE_UNSTAGE_VOLUME: case csi.NodeServiceCapability_RPC_GET_VOLUME_STATS: + case csi.NodeServiceCapability_RPC_EXPAND_VOLUME: default: Fail(fmt.Sprintf("Unknown capability: %v\n", cap.GetRpc().GetType())) } @@ -138,7 +136,7 @@ var _ = DescribeSanity("Node Service", func(sc *SanityContext) { accessibilityConstraintSupported = isPluginCapabilitySupported(i, csi.PluginCapability_Service_VOLUME_ACCESSIBILITY_CONSTRAINTS) }) - It("should return approproate values", func() { + It("should return appropriate values", func() { ninfo, err := c.NodeGetInfo( context.Background(), &csi.NodeGetInfoRequest{}) @@ -189,7 +187,7 @@ var _ = DescribeSanity("Node Service", func(sc *SanityContext) { context.Background(), &csi.NodePublishVolumeRequest{ VolumeId: "id", - TargetPath: sc.Config.TargetPath, + TargetPath: sc.TargetPath + "/target", Secrets: sc.Secrets.NodePublishVolumeSecret, }, ) @@ -246,7 +244,7 @@ var _ = DescribeSanity("Node Service", func(sc *SanityContext) { _, err := c.NodeStageVolume( context.Background(), &csi.NodeStageVolumeRequest{ - StagingTargetPath: sc.Config.StagingPath, + StagingTargetPath: sc.StagingPath, VolumeCapability: &csi.VolumeCapability{ AccessType: &csi.VolumeCapability_Mount{ Mount: &csi.VolumeCapability_MountVolume{}, @@ -295,11 +293,40 @@ var _ = DescribeSanity("Node Service", func(sc *SanityContext) { }) It("should fail when no volume capability is provided", func() { - _, err := c.NodeStageVolume( + + // Create Volume First + By("creating a single node writer volume") + name := UniqueString("sanity-node-stage-nocaps") + + vol, err := s.CreateVolume( + context.Background(), + &csi.CreateVolumeRequest{ + Name: name, + VolumeCapabilities: []*csi.VolumeCapability{ + { + AccessType: &csi.VolumeCapability_Mount{ + Mount: &csi.VolumeCapability_MountVolume{}, + }, + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, + }, + }, + }, + Secrets: sc.Secrets.CreateVolumeSecret, + Parameters: sc.Config.TestVolumeParameters, + }, + ) + Expect(err).NotTo(HaveOccurred()) + Expect(vol).NotTo(BeNil()) + Expect(vol.GetVolume()).NotTo(BeNil()) + Expect(vol.GetVolume().GetVolumeId()).NotTo(BeEmpty()) + cl.RegisterVolume(name, VolumeInfo{VolumeID: vol.GetVolume().GetVolumeId()}) + + _, err = c.NodeStageVolume( context.Background(), &csi.NodeStageVolumeRequest{ - VolumeId: "id", - StagingTargetPath: sc.Config.StagingPath, + VolumeId: vol.GetVolume().GetVolumeId(), + StagingTargetPath: sc.StagingPath, PublishContext: map[string]string{ "device": device, }, @@ -311,6 +338,18 @@ var _ = DescribeSanity("Node Service", func(sc *SanityContext) { serverError, ok := status.FromError(err) Expect(ok).To(BeTrue()) Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) + + By("cleaning up deleting the volume") + + _, err = s.DeleteVolume( + context.Background(), + &csi.DeleteVolumeRequest{ + VolumeId: vol.GetVolume().GetVolumeId(), + Secrets: sc.Secrets.DeleteVolumeSecret, + }, + ) + Expect(err).NotTo(HaveOccurred()) + cl.UnregisterVolume(name) }) }) @@ -326,7 +365,7 @@ var _ = DescribeSanity("Node Service", func(sc *SanityContext) { _, err := c.NodeUnstageVolume( context.Background(), &csi.NodeUnstageVolumeRequest{ - StagingTargetPath: sc.Config.StagingPath, + StagingTargetPath: sc.StagingPath, }) Expect(err).To(HaveOccurred()) @@ -350,8 +389,259 @@ var _ = DescribeSanity("Node Service", func(sc *SanityContext) { }) }) + Describe("NodeGetVolumeStats", func() { + BeforeEach(func() { + if !nodeVolumeStatsSupported { + Skip("NodeGetVolume not supported") + } + }) + + It("should fail when no volume id is provided", func() { + _, err := c.NodeGetVolumeStats( + context.Background(), + &csi.NodeGetVolumeStatsRequest{ + VolumePath: "some/path", + }, + ) + Expect(err).To(HaveOccurred()) + + serverError, ok := status.FromError(err) + Expect(ok).To(BeTrue()) + Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) + }) + + It("should fail when no volume path is provided", func() { + _, err := c.NodeGetVolumeStats( + context.Background(), + &csi.NodeGetVolumeStatsRequest{ + VolumeId: "id", + }, + ) + Expect(err).To(HaveOccurred()) + + serverError, ok := status.FromError(err) + Expect(ok).To(BeTrue()) + Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) + }) + + It("should fail when volume is not found", func() { + _, err := c.NodeGetVolumeStats( + context.Background(), + &csi.NodeGetVolumeStatsRequest{ + VolumeId: "id", + VolumePath: "some/path", + }, + ) + Expect(err).To(HaveOccurred()) + + serverError, ok := status.FromError(err) + Expect(ok).To(BeTrue()) + Expect(serverError.Code()).To(Equal(codes.NotFound)) + }) + + It("should fail when volume does not exist on the specified path", func() { + name := UniqueString("sanity-node-get-volume-stats") + + By("creating a single node writer volume") + vol, err := s.CreateVolume( + context.Background(), + &csi.CreateVolumeRequest{ + Name: name, + VolumeCapabilities: []*csi.VolumeCapability{ + { + AccessType: &csi.VolumeCapability_Mount{ + Mount: &csi.VolumeCapability_MountVolume{}, + }, + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, + }, + }, + }, + Secrets: sc.Secrets.CreateVolumeSecret, + Parameters: sc.Config.TestVolumeParameters, + }, + ) + Expect(err).NotTo(HaveOccurred()) + Expect(vol).NotTo(BeNil()) + Expect(vol.GetVolume()).NotTo(BeNil()) + Expect(vol.GetVolume().GetVolumeId()).NotTo(BeEmpty()) + cl.RegisterVolume(name, VolumeInfo{VolumeID: vol.GetVolume().GetVolumeId()}) + + By("getting a node id") + nid, err := c.NodeGetInfo( + context.Background(), + &csi.NodeGetInfoRequest{}) + Expect(err).NotTo(HaveOccurred()) + Expect(nid).NotTo(BeNil()) + Expect(nid.GetNodeId()).NotTo(BeEmpty()) + + var conpubvol *csi.ControllerPublishVolumeResponse + if controllerPublishSupported { + By("controller publishing volume") + + conpubvol, err = s.ControllerPublishVolume( + context.Background(), + &csi.ControllerPublishVolumeRequest{ + VolumeId: vol.GetVolume().GetVolumeId(), + NodeId: nid.GetNodeId(), + VolumeCapability: &csi.VolumeCapability{ + AccessType: &csi.VolumeCapability_Mount{ + Mount: &csi.VolumeCapability_MountVolume{}, + }, + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, + }, + }, + VolumeContext: vol.GetVolume().GetVolumeContext(), + Readonly: false, + Secrets: sc.Secrets.ControllerPublishVolumeSecret, + }, + ) + Expect(err).NotTo(HaveOccurred()) + cl.RegisterVolume(name, VolumeInfo{VolumeID: vol.GetVolume().GetVolumeId(), NodeID: nid.GetNodeId()}) + Expect(conpubvol).NotTo(BeNil()) + } + // NodeStageVolume + if nodeStageSupported { + By("node staging volume") + nodestagevol, err := c.NodeStageVolume( + context.Background(), + &csi.NodeStageVolumeRequest{ + VolumeId: vol.GetVolume().GetVolumeId(), + VolumeCapability: &csi.VolumeCapability{ + AccessType: &csi.VolumeCapability_Mount{ + Mount: &csi.VolumeCapability_MountVolume{}, + }, + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, + }, + }, + StagingTargetPath: sc.StagingPath, + VolumeContext: vol.GetVolume().GetVolumeContext(), + PublishContext: conpubvol.GetPublishContext(), + Secrets: sc.Secrets.NodeStageVolumeSecret, + }, + ) + Expect(err).NotTo(HaveOccurred()) + Expect(nodestagevol).NotTo(BeNil()) + } + // NodePublishVolume + By("publishing the volume on a node") + var stagingPath string + if nodeStageSupported { + stagingPath = sc.StagingPath + } + nodepubvol, err := c.NodePublishVolume( + context.Background(), + &csi.NodePublishVolumeRequest{ + VolumeId: vol.GetVolume().GetVolumeId(), + TargetPath: sc.TargetPath + "/target", + StagingTargetPath: stagingPath, + VolumeCapability: &csi.VolumeCapability{ + AccessType: &csi.VolumeCapability_Mount{ + Mount: &csi.VolumeCapability_MountVolume{}, + }, + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, + }, + }, + VolumeContext: vol.GetVolume().GetVolumeContext(), + PublishContext: conpubvol.GetPublishContext(), + Secrets: sc.Secrets.NodePublishVolumeSecret, + }, + ) + Expect(err).NotTo(HaveOccurred()) + Expect(nodepubvol).NotTo(BeNil()) + + // NodeGetVolumeStats + By("Get node volume stats") + _, err = c.NodeGetVolumeStats( + context.Background(), + &csi.NodeGetVolumeStatsRequest{ + VolumeId: vol.GetVolume().GetVolumeId(), + VolumePath: "some/path", + }, + ) + Expect(err).To(HaveOccurred()) + + serverError, ok := status.FromError(err) + Expect(ok).To(BeTrue()) + Expect(serverError.Code()).To(Equal(codes.NotFound)) + + // NodeUnpublishVolume + By("cleaning up calling nodeunpublish") + nodeunpubvol, err := c.NodeUnpublishVolume( + context.Background(), + &csi.NodeUnpublishVolumeRequest{ + VolumeId: vol.GetVolume().GetVolumeId(), + TargetPath: sc.TargetPath + "/target", + }) + Expect(err).NotTo(HaveOccurred()) + Expect(nodeunpubvol).NotTo(BeNil()) + + if nodeStageSupported { + By("cleaning up calling nodeunstage") + nodeunstagevol, err := c.NodeUnstageVolume( + context.Background(), + &csi.NodeUnstageVolumeRequest{ + VolumeId: vol.GetVolume().GetVolumeId(), + StagingTargetPath: sc.StagingPath, + }, + ) + Expect(err).NotTo(HaveOccurred()) + Expect(nodeunstagevol).NotTo(BeNil()) + } + + if controllerPublishSupported { + By("cleaning up calling controllerunpublishing") + + controllerunpubvol, err := s.ControllerUnpublishVolume( + context.Background(), + &csi.ControllerUnpublishVolumeRequest{ + VolumeId: vol.GetVolume().GetVolumeId(), + NodeId: nid.GetNodeId(), + Secrets: sc.Secrets.ControllerUnpublishVolumeSecret, + }, + ) + Expect(err).NotTo(HaveOccurred()) + Expect(controllerunpubvol).NotTo(BeNil()) + } + + By("cleaning up deleting the volume") + + _, err = s.DeleteVolume( + context.Background(), + &csi.DeleteVolumeRequest{ + VolumeId: vol.GetVolume().GetVolumeId(), + Secrets: sc.Secrets.DeleteVolumeSecret, + }, + ) + Expect(err).NotTo(HaveOccurred()) + + }) + + }) + + // CSI spec poses no specific requirements for the cluster/storage setups that a SP MUST support. To perform + // meaningful checks the following test assumes that topology-aware provisioning on a single node setup is supported It("should work", func() { - name := uniqueString("sanity-node-full") + name := UniqueString("sanity-node-full") + + By("getting node information") + ni, err := c.NodeGetInfo( + context.Background(), + &csi.NodeGetInfoRequest{}) + Expect(err).NotTo(HaveOccurred()) + Expect(ni).NotTo(BeNil()) + Expect(ni.GetNodeId()).NotTo(BeEmpty()) + + var accReqs *csi.TopologyRequirement + if ni.AccessibleTopology != nil { + // Topology requirements are honored if provided by the driver + accReqs = &csi.TopologyRequirement{ + Requisite: []*csi.Topology{ni.AccessibleTopology}, + } + } // Create Volume First By("creating a single node writer volume") @@ -369,7 +659,9 @@ var _ = DescribeSanity("Node Service", func(sc *SanityContext) { }, }, }, - Secrets: sc.Secrets.CreateVolumeSecret, + Secrets: sc.Secrets.CreateVolumeSecret, + Parameters: sc.Config.TestVolumeParameters, + AccessibilityRequirements: accReqs, }, ) Expect(err).NotTo(HaveOccurred()) @@ -378,14 +670,6 @@ var _ = DescribeSanity("Node Service", func(sc *SanityContext) { Expect(vol.GetVolume().GetVolumeId()).NotTo(BeEmpty()) cl.RegisterVolume(name, VolumeInfo{VolumeID: vol.GetVolume().GetVolumeId()}) - By("getting a node id") - nid, err := c.NodeGetInfo( - context.Background(), - &csi.NodeGetInfoRequest{}) - Expect(err).NotTo(HaveOccurred()) - Expect(nid).NotTo(BeNil()) - Expect(nid.GetNodeId()).NotTo(BeEmpty()) - var conpubvol *csi.ControllerPublishVolumeResponse if controllerPublishSupported { By("controller publishing volume") @@ -394,7 +678,7 @@ var _ = DescribeSanity("Node Service", func(sc *SanityContext) { context.Background(), &csi.ControllerPublishVolumeRequest{ VolumeId: vol.GetVolume().GetVolumeId(), - NodeId: nid.GetNodeId(), + NodeId: ni.GetNodeId(), VolumeCapability: &csi.VolumeCapability{ AccessType: &csi.VolumeCapability_Mount{ Mount: &csi.VolumeCapability_MountVolume{}, @@ -409,7 +693,7 @@ var _ = DescribeSanity("Node Service", func(sc *SanityContext) { }, ) Expect(err).NotTo(HaveOccurred()) - cl.RegisterVolume(name, VolumeInfo{VolumeID: vol.GetVolume().GetVolumeId(), NodeID: nid.GetNodeId()}) + cl.RegisterVolume(name, VolumeInfo{VolumeID: vol.GetVolume().GetVolumeId(), NodeID: ni.GetNodeId()}) Expect(conpubvol).NotTo(BeNil()) } // NodeStageVolume @@ -427,7 +711,7 @@ var _ = DescribeSanity("Node Service", func(sc *SanityContext) { Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, }, }, - StagingTargetPath: sc.Config.StagingPath, + StagingTargetPath: sc.StagingPath, VolumeContext: vol.GetVolume().GetVolumeContext(), PublishContext: conpubvol.GetPublishContext(), Secrets: sc.Secrets.NodeStageVolumeSecret, @@ -440,13 +724,13 @@ var _ = DescribeSanity("Node Service", func(sc *SanityContext) { By("publishing the volume on a node") var stagingPath string if nodeStageSupported { - stagingPath = sc.Config.StagingPath + stagingPath = sc.StagingPath } nodepubvol, err := c.NodePublishVolume( context.Background(), &csi.NodePublishVolumeRequest{ VolumeId: vol.GetVolume().GetVolumeId(), - TargetPath: sc.Config.TargetPath, + TargetPath: sc.TargetPath + "/target", StagingTargetPath: stagingPath, VolumeCapability: &csi.VolumeCapability{ AccessType: &csi.VolumeCapability_Mount{ @@ -464,13 +748,27 @@ var _ = DescribeSanity("Node Service", func(sc *SanityContext) { Expect(err).NotTo(HaveOccurred()) Expect(nodepubvol).NotTo(BeNil()) + // NodeGetVolumeStats + if nodeVolumeStatsSupported { + By("Get node volume stats") + statsResp, err := c.NodeGetVolumeStats( + context.Background(), + &csi.NodeGetVolumeStatsRequest{ + VolumeId: vol.GetVolume().GetVolumeId(), + VolumePath: sc.TargetPath + "/target", + }, + ) + Expect(err).ToNot(HaveOccurred()) + Expect(statsResp.GetUsage()).ToNot(BeNil()) + } + // NodeUnpublishVolume By("cleaning up calling nodeunpublish") nodeunpubvol, err := c.NodeUnpublishVolume( context.Background(), &csi.NodeUnpublishVolumeRequest{ VolumeId: vol.GetVolume().GetVolumeId(), - TargetPath: sc.Config.TargetPath, + TargetPath: sc.TargetPath + "/target", }) Expect(err).NotTo(HaveOccurred()) Expect(nodeunpubvol).NotTo(BeNil()) @@ -481,7 +779,7 @@ var _ = DescribeSanity("Node Service", func(sc *SanityContext) { context.Background(), &csi.NodeUnstageVolumeRequest{ VolumeId: vol.GetVolume().GetVolumeId(), - StagingTargetPath: sc.Config.StagingPath, + StagingTargetPath: sc.StagingPath, }, ) Expect(err).NotTo(HaveOccurred()) @@ -495,7 +793,7 @@ var _ = DescribeSanity("Node Service", func(sc *SanityContext) { context.Background(), &csi.ControllerUnpublishVolumeRequest{ VolumeId: vol.GetVolume().GetVolumeId(), - NodeId: nid.GetNodeId(), + NodeId: ni.GetNodeId(), Secrets: sc.Secrets.ControllerUnpublishVolumeSecret, }, ) diff --git a/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/sanity.go b/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/sanity.go index e3c1684ed..73e60aece 100644 --- a/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/sanity.go +++ b/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/sanity.go @@ -17,11 +17,15 @@ limitations under the License. package sanity import ( + "context" "crypto/rand" "fmt" "io/ioutil" "os" + "os/exec" + "strings" "testing" + "time" "github.com/kubernetes-csi/csi-test/utils" yaml "gopkg.in/yaml.v2" @@ -29,42 +33,106 @@ import ( "google.golang.org/grpc" . "github.com/onsi/ginkgo" + "github.com/onsi/ginkgo/reporters" . "github.com/onsi/gomega" ) // CSISecrets consists of secrets used in CSI credentials. type CSISecrets struct { - CreateVolumeSecret map[string]string `yaml:"CreateVolumeSecret"` - DeleteVolumeSecret map[string]string `yaml:"DeleteVolumeSecret"` - ControllerPublishVolumeSecret map[string]string `yaml:"ControllerPublishVolumeSecret"` - ControllerUnpublishVolumeSecret map[string]string `yaml:"ControllerUnpublishVolumeSecret"` - NodeStageVolumeSecret map[string]string `yaml:"NodeStageVolumeSecret"` - NodePublishVolumeSecret map[string]string `yaml:"NodePublishVolumeSecret"` - CreateSnapshotSecret map[string]string `yaml:"CreateSnapshotSecret"` - DeleteSnapshotSecret map[string]string `yaml:"DeleteSnapshotSecret"` + CreateVolumeSecret map[string]string `yaml:"CreateVolumeSecret"` + DeleteVolumeSecret map[string]string `yaml:"DeleteVolumeSecret"` + ControllerPublishVolumeSecret map[string]string `yaml:"ControllerPublishVolumeSecret"` + ControllerUnpublishVolumeSecret map[string]string `yaml:"ControllerUnpublishVolumeSecret"` + ControllerValidateVolumeCapabilitiesSecret map[string]string `yaml:"ControllerValidateVolumeCapabilitiesSecret"` + NodeStageVolumeSecret map[string]string `yaml:"NodeStageVolumeSecret"` + NodePublishVolumeSecret map[string]string `yaml:"NodePublishVolumeSecret"` + CreateSnapshotSecret map[string]string `yaml:"CreateSnapshotSecret"` + DeleteSnapshotSecret map[string]string `yaml:"DeleteSnapshotSecret"` } // Config provides the configuration for the sanity tests. It // needs to be initialized by the user of the sanity package. type Config struct { - TargetPath string + // TargetPath is the *parent* directory for NodePublishVolumeRequest.target_path. + // It gets created and removed by csi-sanity. + TargetPath string + + // StagingPath is the NodeStageVolumeRequest.staging_target_path. + // It gets created and removed by csi-sanity. StagingPath string - Address string - SecretsFile string - TestVolumeSize int64 - TestVolumeParametersFile string - TestVolumeParameters map[string]string + Address string + ControllerAddress string + SecretsFile string + + TestVolumeSize int64 + TestVolumeParametersFile string + TestVolumeParameters map[string]string + TestNodeVolumeAttachLimit bool + + JUnitFile string + + // Callback functions to customize the creation of target and staging + // directories. Returns the new paths for mount and staging. + // If not defined, directories are created in the default way at TargetPath + // and StagingPath on the host. + // + // Both functions can replace the suggested path. What the test then uses + // is the path returned by them. + // + // Note that target and staging directory have different + // semantics in the CSI spec: for NodeStateVolume, + // CreateTargetDir must create the directory and return the + // full path to it. For NodePublishVolume, CreateStagingDir + // must create the *parent* directory of `path` (or some other + // directory) and return the full path for an entry inside + // that created directory. + CreateTargetDir func(path string) (string, error) + CreateStagingDir func(path string) (string, error) + + // Callback functions to customize the removal of the target and staging + // directories. + // If not defined, directories are removed in the default way at TargetPath + // and StagingPath on the host. + // + // Both functions are passed the actual paths as used during the test. + // + // Note that RemoveTargetPath only needs to remove the *parent* of the + // given path. The CSI driver should have removed the entry at that path + // already. + RemoveTargetPath func(path string) error + RemoveStagingPath func(path string) error + + // Commands to be executed for customized creation of the target and staging + // paths. This command must be available on the host where sanity runs. The + // stdout of the commands are the paths for mount and staging. + CreateTargetPathCmd string + CreateStagingPathCmd string + // Timeout for the executed commands for path creation. + CreatePathCmdTimeout int + + // Commands to be executed for customized removal of the target and staging + // paths. Thie command must be available on the host where sanity runs. + RemoveTargetPathCmd string + RemoveStagingPathCmd string + // Timeout for the executed commands for path removal. + RemovePathCmdTimeout int } // SanityContext holds the variables that each test can depend on. It // gets initialized before each test block runs. type SanityContext struct { - Config *Config - Conn *grpc.ClientConn - Secrets *CSISecrets + Config *Config + Conn *grpc.ClientConn + ControllerConn *grpc.ClientConn + Secrets *CSISecrets + + connAddress string + controllerConnAddress string - connAddress string + // Target and staging paths derived from the sanity config. + TargetPath string + StagingPath string } // Test will test the CSI driver at the specified address by @@ -88,7 +156,16 @@ func Test(t *testing.T, reqConfig *Config) { registerTestsInGinkgo(sc) RegisterFailHandler(Fail) - RunSpecs(t, "CSI Driver Test Suite") + + var specReporters []Reporter + if reqConfig.JUnitFile != "" { + junitReporter := reporters.NewJUnitReporter(reqConfig.JUnitFile) + specReporters = append(specReporters, junitReporter) + } + RunSpecsWithDefaultAndCustomReporters(t, "CSI Driver Test Suite", specReporters) + if sc.Conn != nil { + sc.Conn.Close() + } } func GinkgoTest(reqConfig *Config) { @@ -99,7 +176,7 @@ func GinkgoTest(reqConfig *Config) { registerTestsInGinkgo(sc) } -func (sc *SanityContext) setup() { +func (sc *SanityContext) Setup() { var err error if len(sc.Config.SecretsFile) > 0 { @@ -113,6 +190,9 @@ func (sc *SanityContext) setup() { // dynamically (and differently!) in a BeforeEach, so only // reuse the connection if the address is still the same. if sc.Conn == nil || sc.connAddress != sc.Config.Address { + if sc.Conn != nil { + sc.Conn.Close() + } By("connecting to CSI driver") sc.Conn, err = utils.Connect(sc.Config.Address) Expect(err).NotTo(HaveOccurred()) @@ -121,16 +201,38 @@ func (sc *SanityContext) setup() { By(fmt.Sprintf("reusing connection to CSI driver at %s", sc.connAddress)) } - By("creating mount and staging directories") - err = createMountTargetLocation(sc.Config.TargetPath) - Expect(err).NotTo(HaveOccurred()) - if len(sc.Config.StagingPath) > 0 { - err = createMountTargetLocation(sc.Config.StagingPath) - Expect(err).NotTo(HaveOccurred()) + if sc.ControllerConn == nil || sc.controllerConnAddress != sc.Config.ControllerAddress { + // If controller address is empty, use the common connection. + if sc.Config.ControllerAddress == "" { + sc.ControllerConn = sc.Conn + sc.controllerConnAddress = sc.Config.Address + } else { + sc.ControllerConn, err = utils.Connect(sc.Config.ControllerAddress) + Expect(err).NotTo(HaveOccurred()) + sc.controllerConnAddress = sc.Config.ControllerAddress + } + } else { + By(fmt.Sprintf("reusing connection to CSI driver controller at %s", sc.controllerConnAddress)) } + + By("creating mount and staging directories") + + // If callback function for creating target dir is specified, use it. + targetPath, err := createMountTargetLocation(sc.Config.TargetPath, sc.Config.CreateTargetPathCmd, sc.Config.CreateTargetDir, sc.Config.CreatePathCmdTimeout) + Expect(err).NotTo(HaveOccurred(), "failed to create target directory %s", targetPath) + sc.TargetPath = targetPath + + // If callback function for creating staging dir is specified, use it. + stagingPath, err := createMountTargetLocation(sc.Config.StagingPath, sc.Config.CreateStagingPathCmd, sc.Config.CreateStagingDir, sc.Config.CreatePathCmdTimeout) + Expect(err).NotTo(HaveOccurred(), "failed to create staging directory %s", stagingPath) + sc.StagingPath = stagingPath } -func (sc *SanityContext) teardown() { +func (sc *SanityContext) Teardown() { + // Delete the created paths if any. + removeMountTargetLocation(sc.TargetPath, sc.Config.RemoveTargetPathCmd, sc.Config.RemoveTargetPath, sc.Config.RemovePathCmdTimeout) + removeMountTargetLocation(sc.StagingPath, sc.Config.RemoveStagingPathCmd, sc.Config.RemoveStagingPath, sc.Config.RemovePathCmdTimeout) + // We intentionally do not close the connection to the CSI // driver here because the large amount of connection attempts // caused test failures @@ -143,17 +245,77 @@ func (sc *SanityContext) teardown() { // (https://github.com/kubernetes-csi/csi-test/pull/98). } -func createMountTargetLocation(targetPath string) error { - fileInfo, err := os.Stat(targetPath) - if err != nil && os.IsNotExist(err) { - return os.MkdirAll(targetPath, 0755) - } else if err != nil { - return err +// createMountTargetLocation takes a target path parameter and creates the +// target path using a custom command, custom function or falls back to the +// default using mkdir and returns the new target path. +func createMountTargetLocation(targetPath string, createPathCmd string, customCreateDir func(string) (string, error), timeout int) (string, error) { + + // Return the target path if empty. + if targetPath == "" { + return targetPath, nil } - if !fileInfo.IsDir() { - return fmt.Errorf("Target location %s is not a directory", targetPath) + + var newTargetPath string + + if createPathCmd != "" { + // Create the target path using the create path command. + ctx, cancel := context.WithTimeout(context.Background(), time.Duration(timeout)*time.Second) + defer cancel() + + cmd := exec.CommandContext(ctx, createPathCmd, targetPath) + cmd.Stderr = os.Stderr + out, err := cmd.Output() + if err != nil { + return "", fmt.Errorf("target path creation command %s failed: %v", createPathCmd, err) + } + // Set the command's stdout as the new target path. + newTargetPath = strings.TrimSpace(string(out)) + } else if customCreateDir != nil { + // Create the target path using the custom create dir function. + newpath, err := customCreateDir(targetPath) + if err != nil { + return "", err + } + newTargetPath = newpath + } else { + // Create the target path. Only the directory itself + // and not its parents get created, and it is an error + // if the directory already exists. + if err := os.Mkdir(targetPath, 0755); err != nil { + return "", err + } + newTargetPath = targetPath } + return newTargetPath, nil +} + +// removeMountTargetLocation takes a target path parameter and removes the path +// using a custom command, custom function or falls back to the default removal +// by deleting the path on the host. +func removeMountTargetLocation(targetPath string, removePathCmd string, customRemovePath func(string) error, timeout int) error { + if targetPath == "" { + return nil + } + + if removePathCmd != "" { + ctx, cancel := context.WithTimeout(context.Background(), time.Duration(timeout)*time.Second) + defer cancel() + + cmd := exec.CommandContext(ctx, removePathCmd, targetPath) + cmd.Stderr = os.Stderr + _, err := cmd.Output() + if err != nil { + return fmt.Errorf("target path removal command %s failed: %v", removePathCmd, err) + } + } else if customRemovePath != nil { + if err := customRemovePath(targetPath); err != nil { + return err + } + } else { + // It's an error if the directory is not empty by now. + return os.Remove(targetPath) + } return nil } @@ -173,11 +335,11 @@ func loadSecrets(path string) (*CSISecrets, error) { return &creds, nil } -var uniqueSuffix = "-" + pseudoUUID() +var uniqueSuffix = "-" + PseudoUUID() -// pseudoUUID returns a unique string generated from random +// PseudoUUID returns a unique string generated from random // bytes, empty string in case of error. -func pseudoUUID() string { +func PseudoUUID() string { b := make([]byte, 8) if _, err := rand.Read(b); err != nil { // Shouldn't happen?! @@ -186,9 +348,9 @@ func pseudoUUID() string { return fmt.Sprintf("%08X-%08X", b[0:4], b[4:8]) } -// uniqueString returns a unique string by appending a random +// UniqueString returns a unique string by appending a random // number. In case of an error, just the prefix is returned, so it // alone should already be fairly unique. -func uniqueString(prefix string) string { +func UniqueString(prefix string) string { return prefix + uniqueSuffix } diff --git a/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/tests.go b/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/tests.go index 47763b752..5ddc061ce 100644 --- a/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/tests.go +++ b/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/tests.go @@ -43,13 +43,13 @@ func registerTestsInGinkgo(sc *SanityContext) { for _, test := range tests { Describe(test.text, func() { BeforeEach(func() { - sc.setup() + sc.Setup() }) test.body(sc) AfterEach(func() { - sc.teardown() + sc.Teardown() }) }) } diff --git a/vendor/github.com/kubernetes-csi/csi-test/release-tools/LICENSE b/vendor/github.com/kubernetes-csi/csi-test/release-tools/LICENSE new file mode 100644 index 000000000..8dada3eda --- /dev/null +++ b/vendor/github.com/kubernetes-csi/csi-test/release-tools/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/kubernetes-csi/csi-test/utils/grpcutil.go b/vendor/github.com/kubernetes-csi/csi-test/utils/grpcutil.go index 20eebbc89..ff0587f74 100644 --- a/vendor/github.com/kubernetes-csi/csi-test/utils/grpcutil.go +++ b/vendor/github.com/kubernetes-csi/csi-test/utils/grpcutil.go @@ -25,6 +25,7 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/keepalive" ) // Connect address by grpc @@ -40,6 +41,11 @@ func Connect(address string) (*grpc.ClientConn, error) { return net.DialTimeout("unix", u.Path, timeout) })) } + // This is necessary when connecting via TCP and does not hurt + // when using Unix domain sockets. It ensures that gRPC detects a dead connection + // in a timely manner. + dialOptions = append(dialOptions, + grpc.WithKeepaliveParams(keepalive.ClientParameters{PermitWithoutStream: true})) conn, err := grpc.Dial(address, dialOptions...) if err != nil { From 79ade286d4ea8db7d88335bc1e52bcc453774203 Mon Sep 17 00:00:00 2001 From: David Zhu Date: Mon, 24 Jun 2019 11:15:21 -0700 Subject: [PATCH 2/3] Update csi sanity caller to fix small errors, check for snapshot existence before creating volume --- pkg/gce-pd-csi-driver/controller.go | 11 ++++++++++- test/sanity/sanity_test.go | 22 +++++++++++++++++++--- 2 files changed, 29 insertions(+), 4 deletions(-) diff --git a/pkg/gce-pd-csi-driver/controller.go b/pkg/gce-pd-csi-driver/controller.go index a31fe2915..898c20097 100644 --- a/pkg/gce-pd-csi-driver/controller.go +++ b/pkg/gce-pd-csi-driver/controller.go @@ -24,6 +24,7 @@ import ( "github.com/golang/protobuf/ptypes" "context" + "github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud/meta" csi "github.com/container-storage-interface/spec/lib/go/csi" compute "google.golang.org/api/compute/v1" @@ -163,6 +164,14 @@ func (gceCS *GCEControllerServer) CreateVolume(ctx context.Context, req *csi.Cre if content.GetSnapshot() != nil { // TODO(#161): Add support for Volume Source (cloning) introduced in CSI v1.0.0 snapshotId = content.GetSnapshot().GetSnapshotId() + + // Verify that snapshot exists + sl, err := gceCS.getSnapshotById(ctx, snapshotId) + if err != nil { + return nil, status.Errorf(codes.Internal, "CreateVolume failed to get snapshot %s: %v", snapshotId, err) + } else if len(sl.Entries) == 0 { + return nil, status.Errorf(codes.NotFound, "CreateVolume source snapshot %s does not exist", snapshotId) + } } } @@ -171,7 +180,7 @@ func (gceCS *GCEControllerServer) CreateVolume(ctx context.Context, req *csi.Cre switch replicationType { case replicationTypeNone: if len(zones) != 1 { - return nil, status.Errorf(codes.Internal, fmt.Sprintf("CreateVolume failed to get a single zone for creating zonal disk, instead got: %v", zones)) + return nil, status.Error(codes.Internal, fmt.Sprintf("CreateVolume failed to get a single zone for creating zonal disk, instead got: %v", zones)) } disk, err = createSingleZoneDisk(ctx, gceCS.CloudProvider, name, zones, diskType, capacityRange, capBytes, snapshotId, diskEncryptionKmsKey) if err != nil { diff --git a/test/sanity/sanity_test.go b/test/sanity/sanity_test.go index 329e3c69e..4529bd0a5 100644 --- a/test/sanity/sanity_test.go +++ b/test/sanity/sanity_test.go @@ -15,6 +15,9 @@ limitations under the License. package sanitytest import ( + "fmt" + "os" + "path" "testing" sanity "github.com/kubernetes-csi/csi-test/pkg/sanity" @@ -31,9 +34,10 @@ func TestSanity(t *testing.T) { project := "test-project" zone := "test-zone" vendorVersion := "test-version" - endpoint := "unix:/tmp/csi.sock" - mountPath := "/tmp/csi/mount" - stagePath := "/tmp/csi/stage" + tmpDir := "/tmp/csi" + endpoint := fmt.Sprintf("unix:%s/csi.sock", tmpDir) + mountPath := path.Join(tmpDir, "mount") + stagePath := path.Join(tmpDir, "stage") // Set up driver and env gceDriver := driver.GetGCEDriver() @@ -57,6 +61,18 @@ func TestSanity(t *testing.T) { } cloudProvider.InsertInstance(instance, "test-location", "test-name") + err = os.MkdirAll(tmpDir, 0755) + if err != nil { + t.Fatalf("Failed to create sanity temp working dir %s: %v", tmpDir, err) + } + + defer func() { + // Clean up tmp dir + if err = os.RemoveAll(tmpDir); err != nil { + t.Fatalf("Failed to clean up sanity temp working dir %s: %v", tmpDir, err) + } + }() + go func() { gceDriver.Run(endpoint) }() From 7b74b9d025093e05f1fac24f3a98107da405120c Mon Sep 17 00:00:00 2001 From: David Zhu Date: Mon, 24 Jun 2019 13:07:19 -0700 Subject: [PATCH 3/3] Fix unit tests around create volume to deal with non-existant snapshots --- pkg/gce-pd-csi-driver/controller_test.go | 103 +++++++++++++++++------ 1 file changed, 75 insertions(+), 28 deletions(-) diff --git a/pkg/gce-pd-csi-driver/controller_test.go b/pkg/gce-pd-csi-driver/controller_test.go index 8b5c81f80..1562ae572 100644 --- a/pkg/gce-pd-csi-driver/controller_test.go +++ b/pkg/gce-pd-csi-driver/controller_test.go @@ -22,9 +22,11 @@ import ( "testing" "time" + "github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud/meta" "github.com/golang/protobuf/ptypes" "context" + compute "google.golang.org/api/compute/v1" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" @@ -588,34 +590,6 @@ func TestCreateVolumeArguments(t *testing.T) { }, }, }, - { - name: "success with data source of snapshot type", - req: &csi.CreateVolumeRequest{ - Name: "test-name", - CapacityRange: stdCapRange, - VolumeCapabilities: stdVolCaps, - VolumeContentSource: &csi.VolumeContentSource{ - Type: &csi.VolumeContentSource_Snapshot{ - Snapshot: &csi.VolumeContentSource_SnapshotSource{ - SnapshotId: "snapshot-source", - }, - }, - }, - }, - expVol: &csi.Volume{ - CapacityBytes: common.GbToBytes(20), - VolumeId: testVolumeId, - VolumeContext: nil, - AccessibleTopology: stdTopology, - ContentSource: &csi.VolumeContentSource{ - Type: &csi.VolumeContentSource_Snapshot{ - Snapshot: &csi.VolumeContentSource_SnapshotSource{ - SnapshotId: "snapshot-source", - }, - }, - }, - }, - }, { name: "success with block volume capability", req: &csi.CreateVolumeRequest{ @@ -690,6 +664,8 @@ func TestCreateVolumeArguments(t *testing.T) { // Setup new driver each time so no interference gceDriver := initGCEDriver(t, nil) + //gceDriver.cs.CloudProvider.CreateSnapshot(context.Background, ) + // Start Test resp, err := gceDriver.cs.CreateVolume(context.Background(), tc.req) //check response @@ -728,6 +704,77 @@ func TestCreateVolumeArguments(t *testing.T) { } } +func TestCreateVolumeWithVolumeSource(t *testing.T) { + // Define test cases + testCases := []struct { + name string + volKey *meta.Key + snapshotOnCloud bool + expErrCode codes.Code + }{ + { + name: "success with data source of snapshot type", + volKey: meta.ZonalKey("my-disk", zone), + snapshotOnCloud: true, + }, + { + name: "fail with data source of snapshot type that doesn't exist", + volKey: meta.ZonalKey("my-disk", zone), + snapshotOnCloud: false, + expErrCode: codes.NotFound, + }, + } + + // Run test cases + for _, tc := range testCases { + t.Logf("test case: %s", tc.name) + // Setup new driver each time so no interference + gceDriver := initGCEDriver(t, nil) + + //gceDriver.cs.CloudProvider.CreateSnapshot(context.Background, ) + + // Start Test + req := &csi.CreateVolumeRequest{ + Name: "test-name", + CapacityRange: stdCapRange, + VolumeCapabilities: stdVolCaps, + VolumeContentSource: &csi.VolumeContentSource{ + Type: &csi.VolumeContentSource_Snapshot{ + Snapshot: &csi.VolumeContentSource_SnapshotSource{ + SnapshotId: testSnapshotId, + }, + }, + }, + } + + if tc.snapshotOnCloud { + gceDriver.cs.CloudProvider.CreateSnapshot(context.Background(), tc.volKey, name) + } + resp, err := gceDriver.cs.CreateVolume(context.Background(), req) + //check response + if err != nil { + serverError, ok := status.FromError(err) + if !ok { + t.Fatalf("Could not get error status code from err: %v", serverError) + } + if serverError.Code() != tc.expErrCode { + t.Fatalf("Expected error code: %v, got: %v. err : %v", tc.expErrCode, serverError.Code(), err) + } + continue + } + if tc.expErrCode != codes.OK { + t.Fatalf("Expected error: %v, got no error", tc.expErrCode) + } + + // Make sure response has snapshot + vol := resp.GetVolume() + if vol.ContentSource == nil || vol.ContentSource.Type == nil || vol.ContentSource.GetSnapshot() == nil || vol.ContentSource.GetSnapshot().SnapshotId == "" { + t.Fatalf("Expected volume content source to have snapshot ID, got none") + } + + } +} + func TestCreateVolumeRandomRequisiteTopology(t *testing.T) { req := &csi.CreateVolumeRequest{ Name: "test-name",