From d6f0ad8af6b72810dc9d81af6a90888a86e7c57a Mon Sep 17 00:00:00 2001 From: David Zhu Date: Thu, 31 Oct 2019 17:04:58 -0700 Subject: [PATCH 1/2] Generated: dep ensure - vendor in csi-test v2.4.0-rc1 --- Gopkg.lock | 6 +-- Gopkg.toml | 2 +- .../csi-test/pkg/sanity/controller.go | 17 +++++++- .../csi-test/pkg/sanity/sanity.go | 40 ++++++++++++------- 4 files changed, 44 insertions(+), 21 deletions(-) diff --git a/Gopkg.lock b/Gopkg.lock index c40dd756f..9093fb849 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -118,15 +118,15 @@ version = "v0.6.1" [[projects]] - digest = "1:a56295e19bdcc1777d448e95df77ccd4378a963ccb0aac2c93c0b03d9d4f024a" + digest = "1:b0e8085f074794a143a6548cb8a4576107584f66c56ab8d07ca8aafd60c795c1" name = "github.com/kubernetes-csi/csi-test" packages = [ "pkg/sanity", "utils", ] pruneopts = "NUT" - revision = "82b05190c167c52bb6d5aaf2e1d7c833fa539783" - version = "v2.2.0" + revision = "ec1441c6838578b38bf361ed3224155e30ac4cfb" + version = "v2.4.0-rc1" [[projects]] digest = "1:3c46171ee5eee66086897e1efca67b84bf552b1f80039d421068c90684868194" diff --git a/Gopkg.toml b/Gopkg.toml index e5df1ecf2..40157b4c0 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -47,7 +47,7 @@ [[constraint]] name = "github.com/kubernetes-csi/csi-test" - version = "2.2.0" + version = "2.4.0-rc1" [[constraint]] branch = "master" diff --git a/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/controller.go b/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/controller.go index a40879e10..084bc660d 100644 --- a/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/controller.go +++ b/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/controller.go @@ -139,6 +139,7 @@ var _ = DescribeSanity("Controller Service [Controller Server]", func(sc *Sanity case csi.ControllerServiceCapability_RPC_PUBLISH_READONLY: case csi.ControllerServiceCapability_RPC_CLONE_VOLUME: case csi.ControllerServiceCapability_RPC_EXPAND_VOLUME: + case csi.ControllerServiceCapability_RPC_LIST_VOLUMES_PUBLISHED_NODES: default: Fail(fmt.Sprintf("Unknown capability: %v\n", cap.GetRpc().GetType())) } @@ -259,11 +260,18 @@ var _ = DescribeSanity("Controller Service [Controller Server]", func(sc *Sanity Expect(len(vols.GetEntries())).To(Equal(totalVols)) }) - It("pagination should detect volumes added between pages and accept tokens when the last volume from a page is deleted", func() { + // Disabling this below case as it is fragile and results are inconsistent + // when no of volumes are different. The test might fail on a driver + // which implements the pagination based on index just by altering + // minVolCount := 4 and maxEntries := 3 + // Related discussion links: + // https://github.com/intel/pmem-csi/pull/424#issuecomment-540499938 + // https://github.com/kubernetes-csi/csi-test/issues/223 + XIt("pagination should detect volumes added between pages and accept tokens when the last volume from a page is deleted", func() { // minVolCount is the minimum number of volumes expected to exist, // based on which paginated volume listing is performed. minVolCount := 3 - // maxEntried is the maximum entries in list volume request. + // maxEntries is the maximum entries in list volume request. maxEntries := 2 // existing_vols to keep a record of the volumes that should exist existing_vols := map[string]bool{} @@ -358,6 +366,8 @@ var _ = DescribeSanity("Controller Service [Controller Server]", func(sc *Sanity Expect(err).NotTo(HaveOccurred()) Expect(vol).NotTo(BeNil()) Expect(vol.Volume).NotTo(BeNil()) + // Register the volume so it's automatically cleaned + cl.RegisterVolume(vol.Volume.VolumeId, VolumeInfo{VolumeID: vol.Volume.VolumeId}) existing_vols[vol.Volume.VolumeId] = true vols, err = c.ListVolumes( @@ -2097,6 +2107,7 @@ var _ = DescribeSanity("ExpandVolume [Controller Server]", func(sc *SanityContex CapacityRange: &csi.CapacityRange{ RequiredBytes: TestVolumeExpandSize(sc), }, + Secrets: sc.Secrets.ControllerExpandVolumeSecret, } rsp, err := c.ControllerExpandVolume(context.Background(), expReq) Expect(err).To(HaveOccurred()) @@ -2110,6 +2121,7 @@ var _ = DescribeSanity("ExpandVolume [Controller Server]", func(sc *SanityContex It("should fail if no capacity range is given", func() { expReq := &csi.ControllerExpandVolumeRequest{ VolumeId: "", + Secrets: sc.Secrets.ControllerExpandVolumeSecret, } rsp, err := c.ControllerExpandVolume(context.Background(), expReq) Expect(err).To(HaveOccurred()) @@ -2156,6 +2168,7 @@ var _ = DescribeSanity("ExpandVolume [Controller Server]", func(sc *SanityContex CapacityRange: &csi.CapacityRange{ RequiredBytes: TestVolumeExpandSize(sc), }, + Secrets: sc.Secrets.ControllerExpandVolumeSecret, } rsp, err := c.ControllerExpandVolume(context.Background(), expReq) Expect(err).NotTo(HaveOccurred()) diff --git a/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/sanity.go b/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/sanity.go index c42fb5f8b..49daa7f76 100644 --- a/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/sanity.go +++ b/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/sanity.go @@ -48,6 +48,7 @@ type CSISecrets struct { NodePublishVolumeSecret map[string]string `yaml:"NodePublishVolumeSecret"` CreateSnapshotSecret map[string]string `yaml:"CreateSnapshotSecret"` DeleteSnapshotSecret map[string]string `yaml:"DeleteSnapshotSecret"` + ControllerExpandVolumeSecret map[string]string `yaml:"ControllerExpandVolumeSecret"` } // Config provides the configuration for the sanity tests. It @@ -121,9 +122,10 @@ type Config struct { // Timeout for the executed commands for path removal. RemovePathCmdTimeout int - // IDGen is an optional interface for callers to provide a generator for - // valid Volume and Node IDs. Defaults to DefaultIDGenerator which generates - // generic string IDs + // IDGen is an optional interface for callers to provide a + // generator for valid Volume and Node IDs. If unset, + // it will be set to a DefaultIDGenerator instance when + // passing the config to Test or GinkgoTest. IDGen IDGenerator } @@ -143,6 +145,21 @@ type SanityContext struct { StagingPath string } +// newContext sets up sanity testing with a config supplied by the +// user of the sanity package. Ownership of that config is shared +// between the sanity package and the caller. +func newContext(reqConfig *Config) *SanityContext { + // To avoid runtime if checks when using IDGen, a default + // is set here. + if reqConfig.IDGen == nil { + reqConfig.IDGen = &DefaultIDGenerator{} + } + + return &SanityContext{ + Config: reqConfig, + } +} + // Test will test the CSI driver at the specified address by // setting up a Ginkgo suite and running it. func Test(t *testing.T, reqConfig *Config) { @@ -158,14 +175,7 @@ func Test(t *testing.T, reqConfig *Config) { } } - if reqConfig.IDGen == nil { - reqConfig.IDGen = &DefaultIDGenerator{} - } - - sc := &SanityContext{ - Config: reqConfig, - } - + sc := newContext(reqConfig) registerTestsInGinkgo(sc) RegisterFailHandler(Fail) @@ -180,11 +190,11 @@ func Test(t *testing.T, reqConfig *Config) { } } +// GinkoTest is another entry point for sanity testing: instead of directly +// running tests like Test does, it merely registers the tests. This can +// be used to embed sanity testing in a custom Ginkgo test suite. func GinkgoTest(reqConfig *Config) { - sc := &SanityContext{ - Config: reqConfig, - } - + sc := newContext(reqConfig) registerTestsInGinkgo(sc) } From 5f48ffdf75adebddc8b878cc2341d7a3217d9670 Mon Sep 17 00:00:00 2001 From: David Zhu Date: Thu, 31 Oct 2019 17:05:40 -0700 Subject: [PATCH 2/2] Add ListVolumes with PublishedNodes --- .../overlays/dev/kustomization.yaml | 1 + pkg/gce-cloud-provider/compute/fake-gce.go | 60 ++++++++++++++++--- pkg/gce-cloud-provider/compute/gce-compute.go | 18 ++++++ pkg/gce-cloud-provider/compute/gce.go | 6 ++ pkg/gce-pd-csi-driver/controller.go | 38 +++++++++++- pkg/gce-pd-csi-driver/controller_test.go | 58 ++++++++++++++++++ pkg/gce-pd-csi-driver/gce-pd-driver.go | 2 + test/e2e/tests/single_zone_e2e_test.go | 52 ++++++++++++++-- test/remote/client-wrappers.go | 12 ++++ test/remote/instance.go | 5 +- 10 files changed, 235 insertions(+), 17 deletions(-) diff --git a/deploy/kubernetes/overlays/dev/kustomization.yaml b/deploy/kubernetes/overlays/dev/kustomization.yaml index 4e7b4cb1a..64eb081d5 100644 --- a/deploy/kubernetes/overlays/dev/kustomization.yaml +++ b/deploy/kubernetes/overlays/dev/kustomization.yaml @@ -10,3 +10,4 @@ images: - name: gke.gcr.io/gcp-compute-persistent-disk-csi-driver newName: gcr.io/dyzz-csi-staging/csi/gce-pd-driver newTag: "latest" + \ No newline at end of file diff --git a/pkg/gce-cloud-provider/compute/fake-gce.go b/pkg/gce-cloud-provider/compute/fake-gce.go index 896795518..d737f16d0 100644 --- a/pkg/gce-cloud-provider/compute/fake-gce.go +++ b/pkg/gce-cloud-provider/compute/fake-gce.go @@ -28,6 +28,9 @@ import ( "google.golang.org/grpc/status" "k8s.io/klog" "sigs.k8s.io/gcp-compute-persistent-disk-csi-driver/pkg/common" + + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/uuid" ) const ( @@ -41,20 +44,22 @@ type FakeCloudProvider struct { project string zone string - disks map[string]*CloudDisk - instances map[string]*computev1.Instance - snapshots map[string]*computev1.Snapshot + disks map[string]*CloudDisk + pageTokens map[string]sets.String + instances map[string]*computev1.Instance + snapshots map[string]*computev1.Snapshot } var _ GCECompute = &FakeCloudProvider{} func CreateFakeCloudProvider(project, zone string, cloudDisks []*CloudDisk) (*FakeCloudProvider, error) { fcp := &FakeCloudProvider{ - project: project, - zone: zone, - disks: map[string]*CloudDisk{}, - instances: map[string]*computev1.Instance{}, - snapshots: map[string]*computev1.Snapshot{}, + project: project, + zone: zone, + disks: map[string]*CloudDisk{}, + instances: map[string]*computev1.Instance{}, + snapshots: map[string]*computev1.Snapshot{}, + pageTokens: map[string]sets.String{}, } for _, d := range cloudDisks { fcp.disks[d.GetName()] = d @@ -94,6 +99,45 @@ func (cloud *FakeCloudProvider) ListZones(ctx context.Context, region string) ([ return []string{cloud.zone, "country-region-fakesecondzone"}, nil } +func (cloud *FakeCloudProvider) ListDisks(ctx context.Context, maxEntries int64, pageToken string) ([]*computev1.Disk, string, error) { + // Ignore page tokens for now + var seen sets.String + var ok bool + var count int64 = 0 + var newToken string + d := []*computev1.Disk{} + + if pageToken != "" { + seen, ok = cloud.pageTokens[pageToken] + if !ok { + return nil, "", invalidError() + } + } else { + seen = sets.NewString() + } + + if maxEntries == 0 { + maxEntries = 500 + } + + for name, cd := range cloud.disks { + // Only return zonal disks for simplicity + if !seen.Has(name) { + d = append(d, cd.ZonalDisk) + seen.Insert(name) + count++ + } + + if count >= maxEntries { + newToken = string(uuid.NewUUID()) + cloud.pageTokens[newToken] = seen + break + } + } + + return d, newToken, nil +} + func (cloud *FakeCloudProvider) ListSnapshots(ctx context.Context, filter string, maxEntries int64, pageToken string) ([]*computev1.Snapshot, string, error) { var sourceDisk string snapshots := []*computev1.Snapshot{} diff --git a/pkg/gce-cloud-provider/compute/gce-compute.go b/pkg/gce-cloud-provider/compute/gce-compute.go index e3962ccf9..1986827a5 100644 --- a/pkg/gce-cloud-provider/compute/gce-compute.go +++ b/pkg/gce-cloud-provider/compute/gce-compute.go @@ -50,6 +50,7 @@ type GCECompute interface { GetDiskTypeURI(volKey *meta.Key, diskType string) string WaitForAttach(ctx context.Context, volKey *meta.Key, instanceZone, instanceName string) error ResizeDisk(ctx context.Context, volKey *meta.Key, requestBytes int64) (int64, error) + ListDisks(ctx context.Context, maxEntries int64, pageToken string) ([]*computev1.Disk, string, error) // Regional Disk Methods GetReplicaZoneURI(zone string) string // Instance Methods @@ -62,6 +63,23 @@ type GCECompute interface { DeleteSnapshot(ctx context.Context, snapshotName string) error } +// ListDisks lists disks based on maxEntries and pageToken only in the project +// and zone that the driver is running in. +func (cloud *CloudProvider) ListDisks(ctx context.Context, maxEntries int64, pageToken string) ([]*computev1.Disk, string, error) { + lCall := cloud.service.Disks.List(cloud.project, cloud.zone) + if maxEntries != 0 { + lCall = lCall.MaxResults(maxEntries) + } + if len(pageToken) != 0 { + lCall = lCall.PageToken(pageToken) + } + diskList, err := lCall.Do() + if err != nil { + return nil, "", err + } + return diskList.Items, diskList.NextPageToken, nil +} + // RepairUnderspecifiedVolumeKey will query the cloud provider and check each zone for the disk specified // by the volume key and return a volume key with a correct zone func (cloud *CloudProvider) RepairUnderspecifiedVolumeKey(ctx context.Context, volumeKey *meta.Key) (*meta.Key, error) { diff --git a/pkg/gce-cloud-provider/compute/gce.go b/pkg/gce-cloud-provider/compute/gce.go index 9432da86d..5c0a81460 100644 --- a/pkg/gce-cloud-provider/compute/gce.go +++ b/pkg/gce-cloud-provider/compute/gce.go @@ -228,3 +228,9 @@ func IsGCEError(err error, reason string) bool { func IsGCENotFoundError(err error) bool { return IsGCEError(err, "notFound") } + +// IsInvalidError returns true if the error is a googleapi.Error with +// invalid reason +func IsGCEInvalidError(err error) bool { + return IsGCEError(err, "invalid") +} diff --git a/pkg/gce-pd-csi-driver/controller.go b/pkg/gce-pd-csi-driver/controller.go index 44dab8ee9..3b1051ca9 100644 --- a/pkg/gce-pd-csi-driver/controller.go +++ b/pkg/gce-pd-csi-driver/controller.go @@ -510,8 +510,42 @@ func (gceCS *GCEControllerServer) ValidateVolumeCapabilities(ctx context.Context func (gceCS *GCEControllerServer) ListVolumes(ctx context.Context, req *csi.ListVolumesRequest) (*csi.ListVolumesResponse, error) { // https://cloud.google.com/compute/docs/reference/beta/disks/list - // List volumes in the whole region? In only the zone that this controller is running? - return nil, status.Error(codes.Unimplemented, "") + if req.MaxEntries < 0 { + return nil, status.Error(codes.InvalidArgument, fmt.Sprintf( + "ListVolumes got max entries request %v. GCE only supports values between 0-500", req.MaxEntries)) + } + var maxEntries int64 = int64(req.MaxEntries) + if maxEntries > 500 { + klog.Warningf("ListVolumes requested max entries of %v, GCE only supports values <=500 so defaulting value back to 500", maxEntries) + maxEntries = 500 + } + diskList, nextToken, err := gceCS.CloudProvider.ListDisks(ctx, maxEntries, req.StartingToken) + if err != nil { + if gce.IsGCEInvalidError(err) { + return nil, status.Error(codes.Aborted, fmt.Sprintf("ListVolumes error with invalid request: %v", err)) + } + return nil, status.Error(codes.Internal, fmt.Sprintf("Unknown list disk error: %v", err)) + } + entries := []*csi.ListVolumesResponse_Entry{} + for _, d := range diskList { + users := []string{} + for _, u := range d.Users { + users = append(users, cleanSelfLink(u)) + } + entries = append(entries, &csi.ListVolumesResponse_Entry{ + Volume: &csi.Volume{ + VolumeId: cleanSelfLink(d.SelfLink), + }, + Status: &csi.ListVolumesResponse_VolumeStatus{ + PublishedNodeIds: users, + }, + }) + } + + return &csi.ListVolumesResponse{ + Entries: entries, + NextToken: nextToken, + }, nil } func (gceCS *GCEControllerServer) GetCapacity(ctx context.Context, req *csi.GetCapacityRequest) (*csi.GetCapacityResponse, error) { diff --git a/pkg/gce-pd-csi-driver/controller_test.go b/pkg/gce-pd-csi-driver/controller_test.go index 53e6cb9e0..e0e41d889 100644 --- a/pkg/gce-pd-csi-driver/controller_test.go +++ b/pkg/gce-pd-csi-driver/controller_test.go @@ -714,6 +714,64 @@ func TestCreateVolumeArguments(t *testing.T) { } } +func TestListVolumeArgs(t *testing.T) { + testCases := []struct { + name string + maxEntries int32 + expectedEntries int + expectedErr bool + }{ + { + name: "normal", + expectedEntries: 500, + }, + { + name: "fine amount of entries", + maxEntries: 420, + expectedEntries: 420, + }, + { + name: "too many entries, but defaults to 500", + maxEntries: 501, + expectedEntries: 500, + }, + { + name: "negative entries", + maxEntries: -1, + expectedErr: true, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + // Setup new driver each time so no interference + d := []*gce.CloudDisk{} + for i := 0; i < 600; i++ { + // Create 600 dummy disks + d = append(d, &gce.CloudDisk{ZonalDisk: &compute.Disk{Name: fmt.Sprintf("%v", i)}}) + } + gceDriver := initGCEDriver(t, d) + lvr := &csi.ListVolumesRequest{ + MaxEntries: tc.maxEntries, + } + resp, err := gceDriver.cs.ListVolumes(context.TODO(), lvr) + if tc.expectedErr && err == nil { + t.Fatalf("Got no error when expecting an error") + } + if err != nil { + if !tc.expectedErr { + t.Fatalf("Got error %v, expecting none", err) + } + return + } + + if len(resp.Entries) != tc.expectedEntries { + t.Fatalf("Got %v entries, expected %v", len(resp.Entries), tc.expectedEntries) + } + }) + } +} + func TestCreateVolumeWithVolumeSource(t *testing.T) { // Define test cases testCases := []struct { diff --git a/pkg/gce-pd-csi-driver/gce-pd-driver.go b/pkg/gce-pd-csi-driver/gce-pd-driver.go index f49ef2261..a43b36f8f 100644 --- a/pkg/gce-pd-csi-driver/gce-pd-driver.go +++ b/pkg/gce-pd-csi-driver/gce-pd-driver.go @@ -67,6 +67,8 @@ func (gceDriver *GCEDriver) SetupGCEDriver(cloudProvider gce.GCECompute, mounter csi.ControllerServiceCapability_RPC_LIST_SNAPSHOTS, csi.ControllerServiceCapability_RPC_PUBLISH_READONLY, csi.ControllerServiceCapability_RPC_EXPAND_VOLUME, + csi.ControllerServiceCapability_RPC_LIST_VOLUMES, + csi.ControllerServiceCapability_RPC_LIST_VOLUMES_PUBLISHED_NODES, } gceDriver.AddControllerServiceCapabilities(csc) ns := []csi.NodeServiceCapability_RPC_Type{ diff --git a/test/e2e/tests/single_zone_e2e_test.go b/test/e2e/tests/single_zone_e2e_test.go index 5f18fe947..b38846a78 100644 --- a/test/e2e/tests/single_zone_e2e_test.go +++ b/test/e2e/tests/single_zone_e2e_test.go @@ -24,7 +24,7 @@ import ( "k8s.io/apimachinery/pkg/util/wait" "sigs.k8s.io/gcp-compute-persistent-disk-csi-driver/pkg/common" gce "sigs.k8s.io/gcp-compute-persistent-disk-csi-driver/pkg/gce-cloud-provider/compute" - remote "sigs.k8s.io/gcp-compute-persistent-disk-csi-driver/test/remote" + "sigs.k8s.io/gcp-compute-persistent-disk-csi-driver/test/remote" csi "github.com/container-storage-interface/spec/lib/go/csi" . "github.com/onsi/ginkgo" @@ -423,6 +423,36 @@ var _ = Describe("GCE PD CSI Driver", func() { Expect(err).To(BeNil(), "Failed to go through volume lifecycle after restoring CMEK key") }) + It("Should create disks, attach them places, and verify List returns correct results", func() { + Expect(testContexts).ToNot(BeEmpty()) + testContext := getRandomTestContext() + + p, z, _ := testContext.Instance.GetIdentity() + client := testContext.Client + + nodeID := testContext.Instance.GetNodeID() + + _, volID := createAndValidateUniqueZonalDisk(client, p, z) + defer deleteVolumeOrError(client, volID, p) + + _, secondVolID := createAndValidateUniqueZonalDisk(client, p, z) + defer deleteVolumeOrError(client, secondVolID, p) + + // Attach volID to current instance + err := client.ControllerPublishVolume(volID, nodeID) + Expect(err).To(BeNil(), "Failed ControllerPublishVolume") + defer client.ControllerUnpublishVolume(volID, nodeID) + + // List Volumes + volsToNodes, err := client.ListVolumes() + Expect(err).To(BeNil(), "Failed ListVolumes") + + // Verify + Expect(volsToNodes[volID]).ToNot(BeNil(), "Couldn't find attached nodes for vol") + Expect(volsToNodes[volID]).To(ContainElement(nodeID), "Couldn't find node in attached nodes for vol") + Expect(volsToNodes[secondVolID]).To(BeNil(), "Second vol ID attached nodes not nil") + }) + It("Should create and delete snapshot for RePD in two zones ", func() { Expect(testContexts).ToNot(BeEmpty()) testContext := getRandomTestContext() @@ -580,10 +610,11 @@ func equalWithinEpsilon(a, b, epsiolon int64) bool { return b-a < epsiolon } -func createAndValidateUniqueZonalDisk(client *remote.CsiClient, project, zone string) (string, string) { +func createAndValidateUniqueZonalDisk(client *remote.CsiClient, project, zone string) (volName, volID string) { // Create Disk - volName := testNamePrefix + string(uuid.NewUUID()) - volID, err := client.CreateVolume(volName, nil, defaultSizeGb, + var err error + volName = testNamePrefix + string(uuid.NewUUID()) + volID, err = client.CreateVolume(volName, nil, defaultSizeGb, &csi.TopologyRequirement{ Requisite: []*csi.Topology{ { @@ -600,6 +631,17 @@ func createAndValidateUniqueZonalDisk(client *remote.CsiClient, project, zone st Expect(cloudDisk.Status).To(Equal(readyState)) Expect(cloudDisk.SizeGb).To(Equal(defaultSizeGb)) Expect(cloudDisk.Name).To(Equal(volName)) + return +} + +func deleteVolumeOrError(client *remote.CsiClient, volID, project string) { + // Delete Disk + err := client.DeleteVolume(volID) + Expect(err).To(BeNil(), "DeleteVolume failed") - return volName, volID + // Validate Disk Deleted + key, err := common.VolumeIDToKey(volID) + Expect(err).To(BeNil(), "Failed to conver volume ID To key") + _, err = computeService.Disks.Get(project, key.Zone, key.Name).Do() + Expect(gce.IsGCEError(err, "notFound")).To(BeTrue(), "Expected disk to not be found") } diff --git a/test/remote/client-wrappers.go b/test/remote/client-wrappers.go index a340ed91d..fb4366dc5 100644 --- a/test/remote/client-wrappers.go +++ b/test/remote/client-wrappers.go @@ -138,6 +138,18 @@ func (c *CsiClient) ControllerPublishVolume(volId, nodeId string) error { return err } +func (c *CsiClient) ListVolumes() (map[string]([]string), error) { + resp, err := c.ctrlClient.ListVolumes(context.Background(), &csipb.ListVolumesRequest{}) + if err != nil { + return nil, err + } + vols := map[string]([]string){} + for _, e := range resp.Entries { + vols[e.Volume.VolumeId] = e.Status.PublishedNodeIds + } + return vols, nil +} + func (c *CsiClient) ControllerUnpublishVolume(volId, nodeId string) error { cupreq := &csipb.ControllerUnpublishVolumeRequest{ VolumeId: volId, diff --git a/test/remote/instance.go b/test/remote/instance.go index 7244f4cf3..a078c6728 100644 --- a/test/remote/instance.go +++ b/test/remote/instance.go @@ -18,6 +18,7 @@ package remote import ( "context" + "errors" "fmt" "io/ioutil" "net/http" @@ -136,9 +137,9 @@ func (i *InstanceInfo) CreateOrGetInstance(serviceAccount string) error { if err != nil { ret := fmt.Sprintf("could not create instance %s: API error: %v", i.name, err) if op != nil { - ret = fmt.Sprintf("%s: %v", ret, op.Error) + ret = fmt.Sprintf("%s. op error: %v", ret, op.Error) } - return fmt.Errorf(ret) + return errors.New(ret) } else if op.Error != nil { return fmt.Errorf("could not create instance %s: %+v", i.name, op.Error) }