diff --git a/Gopkg.lock b/Gopkg.lock index 7d79de926..35819c5f8 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -51,13 +51,13 @@ revision = "b7773ae218740a7be65057fc60b366a49b538a44" [[projects]] - branch = "master" + branch = "v0.3.0" name = "github.com/kubernetes-csi/csi-test" packages = [ "pkg/sanity", "utils" ] - revision = "1bf94ed5c3afa2db7d3117f206f1b00249764790" + revision = "53045fadb43cf34da55260ee767f7e45645388e9" [[projects]] name = "github.com/onsi/ginkgo" @@ -306,6 +306,6 @@ [solve-meta] analyzer-name = "dep" analyzer-version = 1 - inputs-digest = "4ca1df328310c5afd5b74eb573280af9e650de8dc4a6573c1aa15de82d72edd9" + inputs-digest = "52dfe28773229d8cbf1622d274e5f1bf9b9b263f3edb684802cc87095162a5c4" solver-name = "gps-cdcl" solver-version = 1 diff --git a/Gopkg.toml b/Gopkg.toml index c34f8fd8f..7d541779a 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -76,7 +76,7 @@ non-go = true [[constraint]] - branch = "master" + branch = "v0.3.0" name = "github.com/kubernetes-csi/csi-test" [[constraint]] diff --git a/cmd/main.go b/cmd/main.go index 2c7e3e5d1..b9bbbbc4c 100644 --- a/cmd/main.go +++ b/cmd/main.go @@ -16,11 +16,14 @@ package main import ( "flag" + "math/rand" "os" + "time" "github.com/golang/glog" - gce "sigs.k8s.io/gcp-compute-persistent-disk-csi-driver/pkg/gce-cloud-provider" + gce "sigs.k8s.io/gcp-compute-persistent-disk-csi-driver/pkg/gce-cloud-provider/compute" + metadataservice "sigs.k8s.io/gcp-compute-persistent-disk-csi-driver/pkg/gce-cloud-provider/metadata" driver "sigs.k8s.io/gcp-compute-persistent-disk-csi-driver/pkg/gce-pd-csi-driver" mountmanager "sigs.k8s.io/gcp-compute-persistent-disk-csi-driver/pkg/mount-manager" ) @@ -38,6 +41,7 @@ var ( func main() { flag.Parse() + rand.Seed(time.Now().UnixNano()) handle() os.Exit(0) } @@ -59,7 +63,12 @@ func handle() { mounter := mountmanager.NewSafeMounter() deviceUtils := mountmanager.NewDeviceUtils() - err = gceDriver.SetupGCEDriver(cloudProvider, mounter, deviceUtils, *driverName, *nodeID, vendorVersion) + ms, err := metadataservice.NewMetadataService() + if err != nil { + glog.Fatalf("Failed to set up metadata service: %v", err) + } + + err = gceDriver.SetupGCEDriver(cloudProvider, mounter, deviceUtils, ms, *driverName, *nodeID, vendorVersion) if err != nil { glog.Fatalf("Failed to initialize GCE CSI Driver: %v", err) } diff --git a/pkg/common/constants.go b/pkg/common/constants.go new file mode 100644 index 000000000..529099f9e --- /dev/null +++ b/pkg/common/constants.go @@ -0,0 +1,26 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package common + +const ( + // Keys for Storage Class Parameters + ParameterKeyZone = "zone" + ParameterKeyType = "type" + + // Keys for Topology. This key will be shared amonst drivers from GCP + TopologyKeyZone = "com.google.topology/zone" +) diff --git a/pkg/utils/utils.go b/pkg/common/utils.go similarity index 98% rename from pkg/utils/utils.go rename to pkg/common/utils.go index 7fe7edd05..980931071 100644 --- a/pkg/utils/utils.go +++ b/pkg/common/utils.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package utils +package common import ( "fmt" diff --git a/pkg/gce-cloud-provider/fake-gce.go b/pkg/gce-cloud-provider/compute/fake-gce.go similarity index 91% rename from pkg/gce-cloud-provider/fake-gce.go rename to pkg/gce-cloud-provider/compute/fake-gce.go index 2a24c8be8..3164f975a 100644 --- a/pkg/gce-cloud-provider/fake-gce.go +++ b/pkg/gce-cloud-provider/compute/fake-gce.go @@ -21,9 +21,10 @@ import ( "github.com/golang/glog" "golang.org/x/net/context" compute "google.golang.org/api/compute/v1" + "google.golang.org/api/googleapi" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" - "sigs.k8s.io/gcp-compute-persistent-disk-csi-driver/pkg/utils" + "sigs.k8s.io/gcp-compute-persistent-disk-csi-driver/pkg/common" ) type FakeCloudProvider struct { @@ -58,7 +59,7 @@ func (cloud *FakeCloudProvider) GetZone() string { func (cloud *FakeCloudProvider) GetDiskOrError(ctx context.Context, volumeZone, volumeName string) (*compute.Disk, error) { disk, ok := cloud.disks[volumeName] if !ok { - return nil, fmt.Errorf("Disk %v not found", volumeName) + return nil, notFoundError() } return disk, nil } @@ -71,12 +72,12 @@ func (cloud *FakeCloudProvider) GetAndValidateExistingDisk(ctx context.Context, } if disk != nil { // Check that disk is the same - requestValid := utils.GbToBytes(disk.SizeGb) >= reqBytes || reqBytes == 0 - responseValid := utils.GbToBytes(disk.SizeGb) <= limBytes || limBytes == 0 + requestValid := common.GbToBytes(disk.SizeGb) >= reqBytes || reqBytes == 0 + responseValid := common.GbToBytes(disk.SizeGb) <= limBytes || limBytes == 0 if !requestValid || !responseValid { return true, status.Error(codes.AlreadyExists, fmt.Sprintf( "Disk already exists with incompatible capacity. Need %v (Required) < %v (Existing) < %v (Limit)", - reqBytes, utils.GbToBytes(disk.SizeGb), limBytes)) + reqBytes, common.GbToBytes(disk.SizeGb), limBytes)) } respType := strings.Split(disk.Type, "/") @@ -171,7 +172,7 @@ func (cloud *FakeCloudProvider) InsertInstance(instance *compute.Instance, insta func (cloud *FakeCloudProvider) GetInstanceOrError(ctx context.Context, instanceZone, instanceName string) (*compute.Instance, error) { instance, ok := cloud.instances[instanceName] if !ok { - return nil, fmt.Errorf("Could not find instance %v", instanceName) + return nil, notFoundError() } return instance, nil } @@ -180,3 +181,13 @@ func (cloud *FakeCloudProvider) GetInstanceOrError(ctx context.Context, instance func (cloud *FakeCloudProvider) WaitForOp(ctx context.Context, op *compute.Operation, zone string) error { return nil } + +func notFoundError() *googleapi.Error { + return &googleapi.Error{ + Errors: []googleapi.ErrorItem{ + { + Reason: "notFound", + }, + }, + } +} diff --git a/pkg/gce-cloud-provider/gce-compute.go b/pkg/gce-cloud-provider/compute/gce-compute.go similarity index 90% rename from pkg/gce-cloud-provider/gce-compute.go rename to pkg/gce-cloud-provider/compute/gce-compute.go index 4d74c3eda..54c006206 100644 --- a/pkg/gce-cloud-provider/gce-compute.go +++ b/pkg/gce-cloud-provider/compute/gce-compute.go @@ -25,7 +25,7 @@ import ( "google.golang.org/grpc/codes" "google.golang.org/grpc/status" "k8s.io/apimachinery/pkg/util/wait" - "sigs.k8s.io/gcp-compute-persistent-disk-csi-driver/pkg/utils" + "sigs.k8s.io/gcp-compute-persistent-disk-csi-driver/pkg/common" ) type GCECompute interface { @@ -62,11 +62,7 @@ func (cloud *CloudProvider) GetDiskOrError(ctx context.Context, volumeZone, volu glog.Infof("Getting disk %v from zone %v", volumeName, volumeZone) disk, err := svc.Disks.Get(project, volumeZone, volumeName).Context(ctx).Do() if err != nil { - if IsGCEError(err, "notFound") { - return nil, status.Error(codes.NotFound, fmt.Sprintf("disk %v does not exist", volumeName)) - } - - return nil, status.Error(codes.Internal, fmt.Sprintf("unknown disk GET error: %v", err)) + return nil, err } glog.Infof("Got disk %v from zone %v", volumeName, volumeZone) return disk, nil @@ -86,12 +82,12 @@ func (cloud *CloudProvider) GetAndValidateExistingDisk(ctx context.Context, conf if resp != nil { // Disk already exists - requestValid := utils.GbToBytes(resp.SizeGb) >= reqBytes && reqBytes != 0 - responseValid := utils.GbToBytes(resp.SizeGb) <= limBytes && limBytes != 0 + requestValid := common.GbToBytes(resp.SizeGb) >= reqBytes && reqBytes != 0 + responseValid := common.GbToBytes(resp.SizeGb) <= limBytes && limBytes != 0 if !requestValid || !responseValid { return true, status.Error(codes.AlreadyExists, fmt.Sprintf( "Disk already exists with incompatible capacity. Need %v (Required) < %v (Existing) < %v (Limit)", - reqBytes, utils.GbToBytes(resp.SizeGb), limBytes)) + reqBytes, common.GbToBytes(resp.SizeGb), limBytes)) } respType := strings.Split(resp.Type, "/") @@ -190,11 +186,7 @@ func (cloud *CloudProvider) GetInstanceOrError(ctx context.Context, instanceZone glog.Infof("Getting instance %v from zone %v", instanceName, instanceZone) instance, err := svc.Instances.Get(project, instanceZone, instanceName).Do() if err != nil { - if IsGCEError(err, "notFound") { - return nil, status.Error(codes.NotFound, fmt.Sprintf("instance %v does not exist", instanceName)) - } - - return nil, status.Error(codes.Internal, fmt.Sprintf("unknown instance GET error: %v", err)) + return nil, err } glog.Infof("Got instance %v from zone %v", instanceName, instanceZone) return instance, nil diff --git a/pkg/gce-cloud-provider/gce.go b/pkg/gce-cloud-provider/compute/gce.go similarity index 97% rename from pkg/gce-cloud-provider/gce.go rename to pkg/gce-cloud-provider/compute/gce.go index fdffc53a0..50dd067b4 100644 --- a/pkg/gce-cloud-provider/gce.go +++ b/pkg/gce-cloud-provider/compute/gce.go @@ -51,7 +51,6 @@ func CreateCloudProvider(vendorVersion string) (*CloudProvider, error) { if err != nil { return nil, err } - // TODO: Use metadata server or flags to retrieve project and zone. Fallback on flag if necessary project, zone, err := getProjectAndZoneFromMetadata() if err != nil { diff --git a/pkg/gce-cloud-provider/metadata/fake.go b/pkg/gce-cloud-provider/metadata/fake.go new file mode 100644 index 000000000..13d5950ac --- /dev/null +++ b/pkg/gce-cloud-provider/metadata/fake.go @@ -0,0 +1,33 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metadata + +type fakeServiceManager struct{} + +var _ MetadataService = &fakeServiceManager{} + +func NewFakeService() MetadataService { + return &fakeServiceManager{} +} + +func (manager *fakeServiceManager) GetZone() string { + return "test-location" +} + +func (manager *fakeServiceManager) GetProject() string { + return "test-project" +} diff --git a/pkg/gce-cloud-provider/metadata/metadata.go b/pkg/gce-cloud-provider/metadata/metadata.go new file mode 100644 index 000000000..d1a36738a --- /dev/null +++ b/pkg/gce-cloud-provider/metadata/metadata.go @@ -0,0 +1,62 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metadata + +import ( + "fmt" + + "cloud.google.com/go/compute/metadata" +) + +// MetadataService is a fakeable interface exposing necessary data +// from the GCE Metadata service +type MetadataService interface { + GetZone() string + GetProject() string +} + +type metadataServiceManager struct { + // Current zone the driver is running in + zone string + project string +} + +var _ MetadataService = &metadataServiceManager{} + +func NewMetadataService() (MetadataService, error) { + zone, err := metadata.Zone() + if err != nil { + return nil, fmt.Errorf("failed to get current zone: %v", err) + } + projectID, err := metadata.ProjectID() + if err != nil { + return nil, fmt.Errorf("failed to get project: %v", err) + } + + return &metadataServiceManager{ + project: projectID, + zone: zone, + }, nil +} + +func (manager *metadataServiceManager) GetZone() string { + return manager.zone +} + +func (manager *metadataServiceManager) GetProject() string { + return manager.project +} diff --git a/pkg/gce-pd-csi-driver/controller.go b/pkg/gce-pd-csi-driver/controller.go index 0f3879d96..22de83598 100644 --- a/pkg/gce-pd-csi-driver/controller.go +++ b/pkg/gce-pd-csi-driver/controller.go @@ -16,6 +16,7 @@ package gceGCEDriver import ( "fmt" + "math/rand" "strings" csi "github.com/container-storage-interface/spec/lib/go/csi/v0" @@ -24,8 +25,8 @@ import ( compute "google.golang.org/api/compute/v1" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" - gce "sigs.k8s.io/gcp-compute-persistent-disk-csi-driver/pkg/gce-cloud-provider" - utils "sigs.k8s.io/gcp-compute-persistent-disk-csi-driver/pkg/utils" + "sigs.k8s.io/gcp-compute-persistent-disk-csi-driver/pkg/common" + gce "sigs.k8s.io/gcp-compute-persistent-disk-csi-driver/pkg/gce-cloud-provider/compute" ) // TODO: Add noisy glog.V(5).Infof() EVERYWHERE @@ -53,7 +54,6 @@ const ( ) func (gceCS *GCEControllerServer) CreateVolume(ctx context.Context, req *csi.CreateVolumeRequest) (*csi.CreateVolumeResponse, error) { - // TODO: Check create zone against Driver zone. They must MATCH glog.Infof("CreateVolume called with request %v", *req) // Validate arguments @@ -78,29 +78,50 @@ func (gceCS *GCEControllerServer) CreateVolume(ctx context.Context, req *csi.Cre // Apply Parameters (case-insensitive). We leave validation of // the values to the cloud provider. diskType := "pd-standard" - configuredZone := gceCS.CloudProvider.GetZone() + var configuredZone string + for k, v := range req.GetParameters() { if k == "csiProvisionerSecretName" || k == "csiProvisionerSecretNamespace" { // These are hardcoded secrets keys required to function but not needed by GCE PD continue } switch strings.ToLower(k) { - case "type": + case common.ParameterKeyType: glog.Infof("Setting type: %v", v) diskType = v - case "zone": + case common.ParameterKeyZone: configuredZone = v default: return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("invalid option %q", k)) } } + if req.GetAccessibilityRequirements() != nil { + if len(configuredZone) != 0 { + return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("CreateVolume only one of parameter zone or topology zone may be specified")) + } + configuredZone, err = pickTopology(req.GetAccessibilityRequirements()) + if err != nil { + return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("CreateVolume failed to pick topology: %v", err)) + } + } + + if len(configuredZone) == 0 { + // Default to zone that the driver is in + configuredZone = gceCS.CloudProvider.GetZone() + } + createResp := &csi.CreateVolumeResponse{ Volume: &csi.Volume{ CapacityBytes: capBytes, - Id: utils.CombineVolumeId(configuredZone, name), + Id: common.CombineVolumeId(configuredZone, name), // TODO: Are there any attributes we need to add. These get sent to ControllerPublishVolume Attributes: nil, + AccessibleTopology: []*csi.Topology{ + { + Segments: map[string]string{common.TopologyKeyZone: configuredZone}, + }, + }, }, } @@ -117,7 +138,7 @@ func (gceCS *GCEControllerServer) CreateVolume(ctx context.Context, req *csi.Cre return createResp, nil } - sizeGb := utils.BytesToGb(capBytes) + sizeGb := common.BytesToGb(capBytes) if sizeGb < MinimumDiskSizeInGb { sizeGb = MinimumDiskSizeInGb } @@ -177,7 +198,7 @@ func (gceCS *GCEControllerServer) DeleteVolume(ctx context.Context, req *csi.Del return nil, status.Error(codes.InvalidArgument, "DeleteVolume Volume ID must be provided") } - zone, name, err := utils.SplitZoneNameId(volumeID) + zone, name, err := common.SplitZoneNameId(volumeID) if err != nil { // Cannot find volume associated with this ID because can't even get the name or zone // This is a success according to the spec @@ -222,9 +243,9 @@ func (gceCS *GCEControllerServer) ControllerPublishVolume(ctx context.Context, r return nil, status.Error(codes.InvalidArgument, "ControllerPublishVolume Volume capability must be provided") } - volumeZone, volumeName, err := utils.SplitZoneNameId(volumeID) + volumeZone, volumeName, err := common.SplitZoneNameId(volumeID) if err != nil { - return nil, err + return nil, status.Error(codes.NotFound, fmt.Sprintf("Could not find volume with ID %v: %v", volumeID, err)) } // TODO: Check volume capability matches @@ -240,7 +261,10 @@ func (gceCS *GCEControllerServer) ControllerPublishVolume(ctx context.Context, r } instance, err := gceCS.CloudProvider.GetInstanceOrError(ctx, volumeZone, nodeID) if err != nil { - return nil, err + if gce.IsGCEError(err, "notFound") { + return nil, status.Error(codes.NotFound, fmt.Sprintf("Could not find instance %v: %v", nodeID, err)) + } + return nil, status.Error(codes.Internal, fmt.Sprintf("Unknown get instance error: %v", err)) } readWrite := "READ_WRITE" @@ -302,7 +326,7 @@ func (gceCS *GCEControllerServer) ControllerUnpublishVolume(ctx context.Context, return nil, status.Error(codes.InvalidArgument, "ControllerUnpublishVolume Node ID must be provided") } - volumeZone, volumeName, err := utils.SplitZoneNameId(volumeID) + volumeZone, volumeName, err := common.SplitZoneNameId(volumeID) if err != nil { return nil, err } @@ -341,12 +365,23 @@ func (gceCS *GCEControllerServer) ValidateVolumeCapabilities(ctx context.Context // TODO: Factor out the volume capability functionality and use as validation in all other functions as well glog.V(5).Infof("Using default ValidateVolumeCapabilities") // Validate Arguments + if req.GetVolumeCapabilities() == nil || len(req.GetVolumeCapabilities()) == 0 { + return nil, status.Error(codes.InvalidArgument, "ValidateVolumeCapabilities Volume Capabilities must be provided") + } volumeID := req.GetVolumeId() if len(volumeID) == 0 { - return nil, status.Error(codes.InvalidArgument, "ControllerUnpublishVolume Volume ID must be provided") + return nil, status.Error(codes.InvalidArgument, "ValidateVolumeCapabilities Volume ID must be provided") } - if req.GetVolumeCapabilities() == nil || len(req.GetVolumeCapabilities()) == 0 { - return nil, status.Error(codes.InvalidArgument, "ControllerUnpublishVolume Volume Capabilities must be provided") + z, n, err := common.SplitZoneNameId(volumeID) + if err != nil { + return nil, status.Error(codes.NotFound, fmt.Sprintf("Volume ID is of improper format, got %v", volumeID)) + } + _, err = gceCS.CloudProvider.GetDiskOrError(ctx, z, n) + if err != nil { + if gce.IsGCEError(err, "notFound") { + return nil, status.Error(codes.NotFound, fmt.Sprintf("Could not find disk %v: %v", n, err)) + } + return nil, status.Error(codes.Internal, fmt.Sprintf("Unknown get disk error: %v", err)) } for _, c := range req.GetVolumeCapabilities() { @@ -365,6 +400,29 @@ func (gceCS *GCEControllerServer) ValidateVolumeCapabilities(ctx context.Context // TODO: Ignoring mount & block types for now. } + for _, top := range req.GetAccessibleTopology() { + for k, v := range top.GetSegments() { + switch k { + case common.TopologyKeyZone: + // take the zone from v and see if it matches with zone + if v == z { + // Accessible zone matches with storage zone + return &csi.ValidateVolumeCapabilitiesResponse{ + Supported: true, + }, nil + } else { + // Accessible zone does not match + return &csi.ValidateVolumeCapabilitiesResponse{ + Supported: false, + Message: fmt.Sprintf("Volume %s is not accesible from topology %s:%s", volumeID, k, v), + }, nil + } + default: + return nil, status.Error(codes.InvalidArgument, "ValidateVolumeCapabilities unknown topology segment key") + } + } + } + return &csi.ValidateVolumeCapabilitiesResponse{ Supported: true, }, nil @@ -458,3 +516,52 @@ func diskIsAttachedAndCompatible(volume *compute.Disk, instance *compute.Instanc } return false, nil } + +func pickTopology(top *csi.TopologyRequirement) (string, error) { + reqTop := top.GetRequisite() + prefTop := top.GetPreferred() + + // Pick the preferred topology in order + if len(prefTop) != 0 { + if prefTop[0].GetSegments() == nil { + return "", fmt.Errorf("preferred topologies specified but no segments") + } + + // GCE PD cloud provider Create has no restrictions so just create in top preferred zone + zone, err := getZoneFromSegment(prefTop[0].GetSegments()) + if err != nil { + return "", fmt.Errorf("could not get zone from preferred topology: %v", err) + } + return zone, nil + } else if len(reqTop) != 0 { + r := rand.Intn(len(reqTop)) + if reqTop[r].GetSegments() == nil { + return "", fmt.Errorf("requisite topologies specified but no segments in requisite topology %v", r) + } + + zone, err := getZoneFromSegment(reqTop[r].GetSegments()) + if err != nil { + return "", fmt.Errorf("could not get zone from requisite topology: %v", err) + } + return zone, nil + } else { + return "", fmt.Errorf("accessibility requirements specified but no requisite or preferred topologies") + } + +} + +func getZoneFromSegment(seg map[string]string) (string, error) { + var zone string + for k, v := range seg { + switch k { + case common.TopologyKeyZone: + zone = v + default: + return "", fmt.Errorf("topology segment has unknown key %v", k) + } + } + if len(zone) == 0 { + return "", fmt.Errorf("topology specified but could not find zone in segment: %v", seg) + } + return zone, nil +} diff --git a/pkg/gce-pd-csi-driver/controller_test.go b/pkg/gce-pd-csi-driver/controller_test.go index d9c103ab0..b7c1fa41e 100644 --- a/pkg/gce-pd-csi-driver/controller_test.go +++ b/pkg/gce-pd-csi-driver/controller_test.go @@ -15,6 +15,7 @@ limitations under the License. package gceGCEDriver import ( + "reflect" "testing" "golang.org/x/net/context" @@ -23,8 +24,9 @@ import ( "google.golang.org/grpc/status" csi "github.com/container-storage-interface/spec/lib/go/csi/v0" - gce "sigs.k8s.io/gcp-compute-persistent-disk-csi-driver/pkg/gce-cloud-provider" - utils "sigs.k8s.io/gcp-compute-persistent-disk-csi-driver/pkg/utils" + "sigs.k8s.io/gcp-compute-persistent-disk-csi-driver/pkg/common" + gce "sigs.k8s.io/gcp-compute-persistent-disk-csi-driver/pkg/gce-cloud-provider/compute" + metadataservice "sigs.k8s.io/gcp-compute-persistent-disk-csi-driver/pkg/gce-cloud-provider/metadata" ) const ( @@ -35,9 +37,9 @@ const ( testVolumeId = zone + "/" + "test-vol" ) -func TestCreateVolumeArguments(t *testing.T) { +var ( // Define "normal" parameters - stdVolCap := []*csi.VolumeCapability{ + stdVolCap = []*csi.VolumeCapability{ { AccessType: &csi.VolumeCapability_Mount{ Mount: &csi.VolumeCapability_MountVolume{}, @@ -47,13 +49,21 @@ func TestCreateVolumeArguments(t *testing.T) { }, }, } - stdCapRange := &csi.CapacityRange{ - RequiredBytes: utils.GbToBytes(20), + stdCapRange = &csi.CapacityRange{ + RequiredBytes: common.GbToBytes(20), + } + stdParams = map[string]string{ + common.ParameterKeyZone: zone, + common.ParameterKeyType: "test-type", } - stdParams := map[string]string{ - "zone": zone, - "type": "test-type", + stdTopology = []*csi.Topology{ + { + Segments: map[string]string{common.TopologyKeyZone: zone}, + }, } +) + +func TestCreateVolumeArguments(t *testing.T) { // Define test cases testCases := []struct { @@ -71,9 +81,10 @@ func TestCreateVolumeArguments(t *testing.T) { Parameters: stdParams, }, expVol: &csi.Volume{ - CapacityBytes: utils.GbToBytes(20), - Id: testVolumeId, - Attributes: nil, + CapacityBytes: common.GbToBytes(20), + Id: testVolumeId, + Attributes: nil, + AccessibleTopology: stdTopology, }, }, { @@ -94,9 +105,10 @@ func TestCreateVolumeArguments(t *testing.T) { Parameters: stdParams, }, expVol: &csi.Volume{ - CapacityBytes: MinimumVolumeSizeInBytes, - Id: testVolumeId, - Attributes: nil, + CapacityBytes: MinimumVolumeSizeInBytes, + Id: testVolumeId, + Attributes: nil, + AccessibleTopology: stdTopology, }, }, { @@ -116,9 +128,10 @@ func TestCreateVolumeArguments(t *testing.T) { VolumeCapabilities: stdVolCap, }, expVol: &csi.Volume{ - CapacityBytes: utils.GbToBytes(20), - Id: testVolumeId, - Attributes: nil, + CapacityBytes: common.GbToBytes(20), + Id: testVolumeId, + Attributes: nil, + AccessibleTopology: stdTopology, }, }, { @@ -131,11 +144,132 @@ func TestCreateVolumeArguments(t *testing.T) { ControllerCreateSecrets: map[string]string{"key1": "this is a random", "crypto": "secret"}, }, expVol: &csi.Volume{ - CapacityBytes: utils.GbToBytes(20), - Id: testVolumeId, + CapacityBytes: common.GbToBytes(20), + Id: testVolumeId, + Attributes: nil, + AccessibleTopology: stdTopology, + }, + }, + { + name: "success with topology", + req: &csi.CreateVolumeRequest{ + Name: "test-vol", + CapacityRange: stdCapRange, + VolumeCapabilities: stdVolCap, + Parameters: map[string]string{"type": "test-type"}, + AccessibilityRequirements: &csi.TopologyRequirement{ + Requisite: []*csi.Topology{ + { + Segments: map[string]string{common.TopologyKeyZone: "topology-zone"}, + }, + }, + }, + }, + expVol: &csi.Volume{ + CapacityBytes: common.GbToBytes(20), + Id: "topology-zone/test-vol", Attributes: nil, + AccessibleTopology: []*csi.Topology{ + { + Segments: map[string]string{common.TopologyKeyZone: "topology-zone"}, + }, + }, + }, + }, + { + name: "success with picking first preferred topology", + req: &csi.CreateVolumeRequest{ + Name: "test-vol", + CapacityRange: stdCapRange, + VolumeCapabilities: stdVolCap, + Parameters: map[string]string{"type": "test-type"}, + AccessibilityRequirements: &csi.TopologyRequirement{ + Requisite: []*csi.Topology{ + { + Segments: map[string]string{common.TopologyKeyZone: "topology-zone3"}, + }, + { + Segments: map[string]string{common.TopologyKeyZone: "topology-zone1"}, + }, + { + Segments: map[string]string{common.TopologyKeyZone: "topology-zone2"}, + }, + }, + Preferred: []*csi.Topology{ + { + Segments: map[string]string{common.TopologyKeyZone: "topology-zone2"}, + }, + { + Segments: map[string]string{common.TopologyKeyZone: "topology-zone3"}, + }, + { + Segments: map[string]string{common.TopologyKeyZone: "topology-zone1"}, + }, + }, + }, + }, + expVol: &csi.Volume{ + CapacityBytes: common.GbToBytes(20), + Id: "topology-zone2/test-vol", + Attributes: nil, + AccessibleTopology: []*csi.Topology{ + { + Segments: map[string]string{common.TopologyKeyZone: "topology-zone2"}, + }, + }, }, }, + { + name: "fail with zone specified in both topology and params", + req: &csi.CreateVolumeRequest{ + Name: "test-vol", + CapacityRange: stdCapRange, + VolumeCapabilities: stdVolCap, + Parameters: stdParams, + AccessibilityRequirements: &csi.TopologyRequirement{ + Requisite: []*csi.Topology{ + { + Segments: map[string]string{common.TopologyKeyZone: "topology-zone"}, + }, + }, + }, + }, + expErrCode: codes.InvalidArgument, + }, + { + name: "fail with extra topology", + req: &csi.CreateVolumeRequest{ + Name: "test-vol", + CapacityRange: stdCapRange, + VolumeCapabilities: stdVolCap, + Parameters: stdParams, + AccessibilityRequirements: &csi.TopologyRequirement{ + Requisite: []*csi.Topology{ + { + Segments: map[string]string{"ooblezoners": "topology-zone", common.TopologyKeyZone: "top-zone"}, + }, + }, + }, + }, + expErrCode: codes.InvalidArgument, + }, + { + name: "fail with missing topology zone", + req: &csi.CreateVolumeRequest{ + Name: "test-vol", + CapacityRange: stdCapRange, + VolumeCapabilities: stdVolCap, + Parameters: stdParams, + AccessibilityRequirements: &csi.TopologyRequirement{ + Requisite: []*csi.Topology{ + { + Segments: map[string]string{}, + }, + }, + }, + }, + expErrCode: codes.InvalidArgument, + }, } // Run test cases @@ -147,7 +281,7 @@ func TestCreateVolumeArguments(t *testing.T) { if err != nil { t.Fatalf("Failed to create fake cloud provider: %v", err) } - err = gceDriver.SetupGCEDriver(fakeCloudProvider, nil, nil, driver, node, "vendor-version") + err = gceDriver.SetupGCEDriver(fakeCloudProvider, nil, nil, metadataservice.NewFakeService(), driver, node, "vendor-version") if err != nil { t.Fatalf("Failed to setup GCE Driver: %v", err) } @@ -176,23 +310,60 @@ func TestCreateVolumeArguments(t *testing.T) { t.Fatalf("Expected volume %v, got nil volume", tc.expVol) } - if vol.GetCapacityBytes() != tc.expVol.GetCapacityBytes() { - t.Fatalf("Expected volume capacity bytes: %v, got: %v", vol.GetCapacityBytes(), tc.expVol.GetCapacityBytes()) + if !reflect.DeepEqual(vol, tc.expVol) { + t.Fatalf("Expected volume: %#v\nTopology %#v\n\n to equal volume: %#v\nTopology %#v\n\n", + vol, vol.GetAccessibleTopology()[0], tc.expVol, tc.expVol.GetAccessibleTopology()[0]) } + } +} - if vol.GetId() != tc.expVol.GetId() { - t.Fatalf("Expected volume id: %v, got: %v", vol.GetId(), tc.expVol.GetId()) - } +func TestCreateVolumeRandomRequisiteTopology(t *testing.T) { + req := &csi.CreateVolumeRequest{ + Name: "test-vol", + CapacityRange: stdCapRange, + VolumeCapabilities: stdVolCap, + Parameters: map[string]string{"type": "test-type"}, + AccessibilityRequirements: &csi.TopologyRequirement{ + Requisite: []*csi.Topology{ + { + Segments: map[string]string{common.TopologyKeyZone: "topology-zone3"}, + }, + { + Segments: map[string]string{common.TopologyKeyZone: "topology-zone1"}, + }, + { + Segments: map[string]string{common.TopologyKeyZone: "topology-zone2"}, + }, + }, + }, + } - for akey, aval := range tc.expVol.GetAttributes() { - if gotVal, ok := vol.GetAttributes()[akey]; !ok || gotVal != aval { - t.Fatalf("Expected volume attribute for key %v: %v, got: %v", akey, aval, gotVal) - } + gceDriver := GetGCEDriver() + fakeCloudProvider, err := gce.FakeCreateCloudProvider(project, zone) + if err != nil { + t.Fatalf("Failed to create fake cloud provider: %v", err) + } + err = gceDriver.SetupGCEDriver(fakeCloudProvider, nil, nil, metadataservice.NewFakeService(), driver, node, "vendor-version") + if err != nil { + t.Fatalf("Failed to setup GCE Driver: %v", err) + } + + tZones := map[string]bool{} + // Start Test + for i := 0; i < 50; i++ { + resp, err := gceDriver.cs.CreateVolume(context.TODO(), req) + if err != nil { + t.Fatalf("CreateVolume did not expect error, but got %v", err) } - if tc.expVol.GetAttributes() == nil && vol.GetAttributes() != nil { - t.Fatalf("Expected volume attributes to be nil, got: %#v", vol.GetAttributes()) + tZone, ok := resp.GetVolume().GetAccessibleTopology()[0].GetSegments()[common.TopologyKeyZone] + if !ok { + t.Fatalf("Could not find topology zone in response") } - + tZones[tZone] = true + } + // We expect that we should have picked all 3 topology zones here + if len(tZones) != 3 { + t.Fatalf("Expected all 3 topology zones to be rotated through, got only: %v", tZones) } } @@ -293,16 +464,16 @@ func TestGetRequestCapacity(t *testing.T) { { name: "success: fully specified both above min", capRange: &csi.CapacityRange{ - RequiredBytes: utils.GbToBytes(20), - LimitBytes: utils.GbToBytes(50), + RequiredBytes: common.GbToBytes(20), + LimitBytes: common.GbToBytes(50), }, - expCap: utils.GbToBytes(20), + expCap: common.GbToBytes(20), }, { name: "success: fully specified required below min", capRange: &csi.CapacityRange{ RequiredBytes: MinimumVolumeSizeInBytes - 1, - LimitBytes: utils.GbToBytes(50), + LimitBytes: common.GbToBytes(50), }, expCap: MinimumVolumeSizeInBytes, }, @@ -317,8 +488,8 @@ func TestGetRequestCapacity(t *testing.T) { { name: "fail: limit less than required", capRange: &csi.CapacityRange{ - RequiredBytes: utils.GbToBytes(50), - LimitBytes: utils.GbToBytes(20), + RequiredBytes: common.GbToBytes(50), + LimitBytes: common.GbToBytes(20), }, expErr: true, }, diff --git a/pkg/gce-pd-csi-driver/gce-pd-driver.go b/pkg/gce-pd-csi-driver/gce-pd-driver.go index 6c96d05a7..cebc70a3f 100644 --- a/pkg/gce-pd-csi-driver/gce-pd-driver.go +++ b/pkg/gce-pd-csi-driver/gce-pd-driver.go @@ -22,7 +22,8 @@ import ( "google.golang.org/grpc/codes" "google.golang.org/grpc/status" "k8s.io/kubernetes/pkg/util/mount" - gce "sigs.k8s.io/gcp-compute-persistent-disk-csi-driver/pkg/gce-cloud-provider" + gce "sigs.k8s.io/gcp-compute-persistent-disk-csi-driver/pkg/gce-cloud-provider/compute" + metadataservice "sigs.k8s.io/gcp-compute-persistent-disk-csi-driver/pkg/gce-cloud-provider/metadata" mountmanager "sigs.k8s.io/gcp-compute-persistent-disk-csi-driver/pkg/mount-manager" ) @@ -45,7 +46,7 @@ func GetGCEDriver() *GCEDriver { } func (gceDriver *GCEDriver) SetupGCEDriver(cloudProvider gce.GCECompute, mounter *mount.SafeFormatAndMount, - deviceUtils mountmanager.DeviceUtils, name, nodeID, vendorVersion string) error { + deviceUtils mountmanager.DeviceUtils, meta metadataservice.MetadataService, name, nodeID, vendorVersion string) error { if name == "" { return fmt.Errorf("Driver name missing") } @@ -75,7 +76,7 @@ func (gceDriver *GCEDriver) SetupGCEDriver(cloudProvider gce.GCECompute, mounter // Set up RPC Servers gceDriver.ids = NewIdentityServer(gceDriver) - gceDriver.ns = NewNodeServer(gceDriver, mounter, deviceUtils) + gceDriver.ns = NewNodeServer(gceDriver, mounter, deviceUtils, meta) gceDriver.cs = NewControllerServer(gceDriver, cloudProvider) return nil @@ -131,11 +132,12 @@ func NewIdentityServer(gceDriver *GCEDriver) *GCEIdentityServer { } } -func NewNodeServer(gceDriver *GCEDriver, mounter *mount.SafeFormatAndMount, deviceUtils mountmanager.DeviceUtils) *GCENodeServer { +func NewNodeServer(gceDriver *GCEDriver, mounter *mount.SafeFormatAndMount, deviceUtils mountmanager.DeviceUtils, meta metadataservice.MetadataService) *GCENodeServer { return &GCENodeServer{ - Driver: gceDriver, - Mounter: mounter, - DeviceUtils: deviceUtils, + Driver: gceDriver, + Mounter: mounter, + DeviceUtils: deviceUtils, + MetadataService: meta, } } diff --git a/pkg/gce-pd-csi-driver/gce-pd-driver_test.go b/pkg/gce-pd-csi-driver/gce-pd-driver_test.go index 6a8be294b..e4d5e562d 100644 --- a/pkg/gce-pd-csi-driver/gce-pd-driver_test.go +++ b/pkg/gce-pd-csi-driver/gce-pd-driver_test.go @@ -17,7 +17,8 @@ package gceGCEDriver import ( "testing" - gce "sigs.k8s.io/gcp-compute-persistent-disk-csi-driver/pkg/gce-cloud-provider" + gce "sigs.k8s.io/gcp-compute-persistent-disk-csi-driver/pkg/gce-cloud-provider/compute" + metadataservice "sigs.k8s.io/gcp-compute-persistent-disk-csi-driver/pkg/gce-cloud-provider/metadata" ) func initGCEDriver(t *testing.T) *GCEDriver { @@ -27,7 +28,7 @@ func initGCEDriver(t *testing.T) *GCEDriver { if err != nil { t.Fatalf("Failed to create fake cloud provider: %v", err) } - err = gceDriver.SetupGCEDriver(fakeCloudProvider, nil, nil, driver, node, vendorVersion) + err = gceDriver.SetupGCEDriver(fakeCloudProvider, nil, nil, metadataservice.NewFakeService(), driver, node, vendorVersion) if err != nil { t.Fatalf("Failed to setup GCE Driver: %v", err) } diff --git a/pkg/gce-pd-csi-driver/identity.go b/pkg/gce-pd-csi-driver/identity.go index 0288818ad..34cd841f0 100644 --- a/pkg/gce-pd-csi-driver/identity.go +++ b/pkg/gce-pd-csi-driver/identity.go @@ -51,6 +51,13 @@ func (gceIdentity *GCEIdentityServer) GetPluginCapabilities(ctx context.Context, }, }, }, + { + Type: &csi.PluginCapability_Service_{ + Service: &csi.PluginCapability_Service{ + Type: csi.PluginCapability_Service_ACCESSIBILITY_CONSTRAINTS, + }, + }, + }, }, }, nil } diff --git a/pkg/gce-pd-csi-driver/identity_test.go b/pkg/gce-pd-csi-driver/identity_test.go index 4773a661a..2c60c4c87 100644 --- a/pkg/gce-pd-csi-driver/identity_test.go +++ b/pkg/gce-pd-csi-driver/identity_test.go @@ -19,12 +19,13 @@ import ( csi "github.com/container-storage-interface/spec/lib/go/csi/v0" "golang.org/x/net/context" + metadataservice "sigs.k8s.io/gcp-compute-persistent-disk-csi-driver/pkg/gce-cloud-provider/metadata" ) func TestGetPluginInfo(t *testing.T) { vendorVersion := "test-vendor" gceDriver := GetGCEDriver() - err := gceDriver.SetupGCEDriver(nil, nil, nil, driver, node, vendorVersion) + err := gceDriver.SetupGCEDriver(nil, nil, nil, metadataservice.NewFakeService(), driver, node, vendorVersion) if err != nil { t.Fatalf("Failed to setup GCE Driver: %v", err) } @@ -46,7 +47,7 @@ func TestGetPluginInfo(t *testing.T) { func TestGetPluginCapabilities(t *testing.T) { gceDriver := GetGCEDriver() - err := gceDriver.SetupGCEDriver(nil, nil, nil, driver, node, "test-vendor") + err := gceDriver.SetupGCEDriver(nil, nil, nil, metadataservice.NewFakeService(), driver, node, "test-vendor") if err != nil { t.Fatalf("Failed to setup GCE Driver: %v", err) } @@ -59,6 +60,7 @@ func TestGetPluginCapabilities(t *testing.T) { for _, capability := range resp.GetCapabilities() { switch capability.GetService().GetType() { case csi.PluginCapability_Service_CONTROLLER_SERVICE: + case csi.PluginCapability_Service_ACCESSIBILITY_CONSTRAINTS: default: t.Fatalf("Unknown capability: %v", capability.GetService().GetType()) } @@ -67,7 +69,7 @@ func TestGetPluginCapabilities(t *testing.T) { func TestProbe(t *testing.T) { gceDriver := GetGCEDriver() - err := gceDriver.SetupGCEDriver(nil, nil, nil, driver, node, "test-vendor") + err := gceDriver.SetupGCEDriver(nil, nil, nil, metadataservice.NewFakeService(), driver, node, "test-vendor") if err != nil { t.Fatalf("Failed to setup GCE Driver: %v", err) } diff --git a/pkg/gce-pd-csi-driver/node.go b/pkg/gce-pd-csi-driver/node.go index bde6338ef..da8c4ab00 100644 --- a/pkg/gce-pd-csi-driver/node.go +++ b/pkg/gce-pd-csi-driver/node.go @@ -25,14 +25,16 @@ import ( "google.golang.org/grpc/codes" "google.golang.org/grpc/status" "k8s.io/kubernetes/pkg/util/mount" + "sigs.k8s.io/gcp-compute-persistent-disk-csi-driver/pkg/common" + metadataservice "sigs.k8s.io/gcp-compute-persistent-disk-csi-driver/pkg/gce-cloud-provider/metadata" mountmanager "sigs.k8s.io/gcp-compute-persistent-disk-csi-driver/pkg/mount-manager" - utils "sigs.k8s.io/gcp-compute-persistent-disk-csi-driver/pkg/utils" ) type GCENodeServer struct { - Driver *GCEDriver - Mounter *mount.SafeFormatAndMount - DeviceUtils mountmanager.DeviceUtils + Driver *GCEDriver + Mounter *mount.SafeFormatAndMount + DeviceUtils mountmanager.DeviceUtils + MetadataService metadataservice.MetadataService // TODO: Only lock mutually exclusive calls and make locking more fine grained mux sync.Mutex } @@ -159,7 +161,7 @@ func (ns *GCENodeServer) NodeStageVolume(ctx context.Context, req *csi.NodeStage return nil, status.Error(codes.InvalidArgument, "NodeStageVolume Volume Capability must be provided") } - _, volumeName, err := utils.SplitZoneNameId(volumeID) + _, volumeName, err := common.SplitZoneNameId(volumeID) if err != nil { return nil, err } @@ -270,12 +272,17 @@ func (ns *GCENodeServer) NodeGetCapabilities(ctx context.Context, req *csi.NodeG func (ns *GCENodeServer) NodeGetInfo(ctx context.Context, req *csi.NodeGetInfoRequest) (*csi.NodeGetInfoResponse, error) { glog.Infof("NodeGetInfo called with req: %#v", req) + top := &csi.Topology{ + Segments: map[string]string{common.TopologyKeyZone: ns.MetadataService.GetZone()}, + } + resp := &csi.NodeGetInfoResponse{ NodeId: ns.Driver.nodeID, // TODO: Set MaxVolumesPerNode based on Node Type // Default of 0 means that CO Decides how many nodes can be published + // Can get from metadata server "machine-type" MaxVolumesPerNode: 0, - AccessibleTopology: nil, + AccessibleTopology: top, } return resp, nil } diff --git a/test/binremote/instance.go b/test/binremote/instance.go index 679e4acb6..95e301711 100644 --- a/test/binremote/instance.go +++ b/test/binremote/instance.go @@ -46,19 +46,25 @@ type InstanceInfo struct { zone string name string + // External IP is filled in after instance creation + // TODO: Maybe combine create instance and create instance info so all fields are set up at the same time... externalIP string computeService *compute.Service } -func CreateInstanceInfo(project, zone, name string) (*InstanceInfo, error) { - cs, err := getComputeClient() - if err != nil { - return nil, err - } +func (i *InstanceInfo) GetIdentity() (string, string, string) { + return i.project, i.zone, i.name +} + +func (i *InstanceInfo) GetName() string { + return i.name +} + +func CreateInstanceInfo(project, instanceZone, name string, cs *compute.Service) (*InstanceInfo, error) { return &InstanceInfo{ project: project, - zone: zone, + zone: instanceZone, name: name, computeService: cs, @@ -66,7 +72,7 @@ func CreateInstanceInfo(project, zone, name string) (*InstanceInfo, error) { } // Provision a gce instance using image -func (i *InstanceInfo) CreateInstance(serviceAccount string) error { +func (i *InstanceInfo) CreateOrGetInstance(serviceAccount string) error { var err error var instance *compute.Instance glog.V(4).Infof("Creating instance: %v", i.name) @@ -236,7 +242,7 @@ func (i *InstanceInfo) createDefaultFirewallRule() error { return nil } -func getComputeClient() (*compute.Service, error) { +func GetComputeClient() (*compute.Service, error) { const retries = 10 const backoff = time.Second * 6 diff --git a/test/e2e/tests/multi_zone_e2e_test.go b/test/e2e/tests/multi_zone_e2e_test.go new file mode 100644 index 000000000..7bd41dc0d --- /dev/null +++ b/test/e2e/tests/multi_zone_e2e_test.go @@ -0,0 +1,55 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package tests + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "sigs.k8s.io/gcp-compute-persistent-disk-csi-driver/pkg/common" + testutils "sigs.k8s.io/gcp-compute-persistent-disk-csi-driver/test/e2e/utils" +) + +var _ = Describe("GCE PD CSI Driver Multi-Zone", func() { + BeforeEach(func() { + Expect(len(testInstances)).To(BeNumerically(">", 1)) + // TODO: Check whether the instances are in different zones??? + // I Think there should be a better way of guaranteeing this. Like a map from zone to instance for testInstances (?) + }) + + It("Should get reasonable topology from nodes with NodeGetInfo", func() { + for _, instance := range testInstances { + testContext, err := testutils.SetupNewDriverAndClient(instance) + Expect(err).To(BeNil(), "Set up new Driver and Client failed with error") + + resp, err := testContext.Client.NodeGetInfo() + Expect(err).To(BeNil()) + + // Get Cloud Instance + p, z, n := testContext.Instance.GetIdentity() + cloudInstance, err := computeService.Instances.Get(p, z, n).Do() + Expect(err).To(BeNil()) + Expect(cloudInstance).ToNot(BeNil()) + + // Check topology matches + segments := resp.GetAccessibleTopology().GetSegments() + Expect(segments).ToNot(BeNil()) + + Expect(segments[common.TopologyKeyZone]).To(Equal(z)) + Expect(len(segments)).To(Equal(1)) + } + + }) + +}) diff --git a/test/e2e/tests/setup_e2e_test.go b/test/e2e/tests/setup_e2e_test.go index 53bc7441b..b9440c268 100644 --- a/test/e2e/tests/setup_e2e_test.go +++ b/test/e2e/tests/setup_e2e_test.go @@ -16,16 +16,26 @@ package tests import ( "flag" + "fmt" + "math/rand" "testing" + "time" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" + compute "google.golang.org/api/compute/v1" + remote "sigs.k8s.io/gcp-compute-persistent-disk-csi-driver/test/binremote" + testutils "sigs.k8s.io/gcp-compute-persistent-disk-csi-driver/test/e2e/utils" ) var ( - project = flag.String("project", "", "Project to run tests in") - serviceAccount = flag.String("service-account", "", "Service account to bring up instance with") - runInProw = flag.Bool("run-in-prow", false, "If true, use a Boskos loaned project and special CI service accounts and ssh keys") + project = flag.String("project", "", "Project to run tests in") + serviceAccount = flag.String("service-account", "", "Service account to bring up instance with") + runInProw = flag.Bool("run-in-prow", false, "If true, use a Boskos loaned project and special CI service accounts and ssh keys") + deleteInstances = flag.Bool("delete-instances", false, "Delete the instances after tests run") + + testInstances = []*remote.InstanceInfo{} + computeService *compute.Service ) func TestE2E(t *testing.T) { @@ -33,3 +43,45 @@ func TestE2E(t *testing.T) { RegisterFailHandler(Fail) RunSpecs(t, "Google Compute Engine Persistent Disk Container Storage Interface Driver Tests") } + +var _ = BeforeSuite(func() { + var err error + zones := []string{"us-central1-c", "us-central1-b"} + + rand.Seed(time.Now().UnixNano()) + + computeService, err = remote.GetComputeClient() + Expect(err).To(BeNil()) + + for _, zone := range zones { + nodeID := fmt.Sprintf("gce-pd-csi-e2e-%s", zone) + + if *runInProw { + *project, *serviceAccount = testutils.SetupProwConfig() + } + + Expect(*project).ToNot(BeEmpty(), "Project should not be empty") + Expect(*serviceAccount).ToNot(BeEmpty(), "Service account should not be empty") + + i, err := testutils.SetupInstance(*project, zone, nodeID, *serviceAccount, computeService) + Expect(err).To(BeNil()) + + testInstances = append(testInstances, i) + } + +}) + +var _ = AfterSuite(func() { + /* + err := node.client.CloseConn() + if err != nil { + Logf("Failed to close the client") + } else { + Logf("Closed the client") + */ + for _, i := range testInstances { + if *deleteInstances { + i.DeleteInstance() + } + } +}) diff --git a/test/e2e/tests/single_zone_e2e_test.go b/test/e2e/tests/single_zone_e2e_test.go index b46ecffaf..e9cf1ecfe 100644 --- a/test/e2e/tests/single_zone_e2e_test.go +++ b/test/e2e/tests/single_zone_e2e_test.go @@ -20,15 +20,16 @@ import ( "strings" "k8s.io/apimachinery/pkg/util/uuid" - remote "sigs.k8s.io/gcp-compute-persistent-disk-csi-driver/test/binremote" - "sigs.k8s.io/gcp-compute-persistent-disk-csi-driver/test/e2e/utils" + "sigs.k8s.io/gcp-compute-persistent-disk-csi-driver/pkg/common" + gce "sigs.k8s.io/gcp-compute-persistent-disk-csi-driver/pkg/gce-cloud-provider/compute" + testutils "sigs.k8s.io/gcp-compute-persistent-disk-csi-driver/test/e2e/utils" + csi "github.com/container-storage-interface/spec/lib/go/csi/v0" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" ) const ( - network = "unix" testNamePrefix = "gcepd-csi-e2e-" defaultSizeGb int64 = 5 @@ -37,63 +38,30 @@ const ( ssdDiskType = "pd-ssd" ) -var ( - client *utils.CsiClient - instance *remote.InstanceInfo - //gceCloud *gce.CloudProvider - nodeID string -) - -var _ = BeforeSuite(func() { - var err error - // TODO(dyzz): better defaults - nodeID = "gce-pd-csi-e2e-us-central1-c" - port := "2000" - if *runInProw { - *project, *serviceAccount = utils.SetupProwConfig() - } - - Expect(*project).ToNot(BeEmpty(), "Project should not be empty") - Expect(*serviceAccount).ToNot(BeEmpty(), "Service account should not be empty") - - instance, err = utils.SetupInstanceAndDriver(*project, "us-central1-c", nodeID, port, *serviceAccount) - Expect(err).To(BeNil()) - - client = utils.CreateCSIClient(fmt.Sprintf("localhost:%s", port)) -}) - -var _ = AfterSuite(func() { - // Close the client - err := client.CloseConn() - if err != nil { - Logf("Failed to close the client") - } else { - Logf("Closed the client") - } - - // instance.DeleteInstance() -}) - var _ = Describe("GCE PD CSI Driver", func() { - BeforeEach(func() { - err := client.AssertCSIConnection() - Expect(err).To(BeNil(), "Failed to assert csi client connection: %v", err) - }) - It("Should create->attach->stage->mount volume and check if it is writable, then unmount->unstage->detach->delete and check disk is deleted", func() { + // Create new driver and client + // TODO: Should probably actual have some object that includes both client and instance so we can relate the two?? + Expect(testInstances).NotTo(BeEmpty()) + testContext, err := testutils.SetupNewDriverAndClient(testInstances[0]) + Expect(err).To(BeNil(), "Set up new Driver and Client failed with error") + p, z, _ := testContext.Instance.GetIdentity() + client := testContext.Client + instance := testContext.Instance + // Create Disk volName := testNamePrefix + string(uuid.NewUUID()) - volId, err := client.CreateVolume(volName) + volId, err := client.CreateVolume(volName, nil) Expect(err).To(BeNil(), "CreateVolume failed with error: %v", err) // TODO: Validate Disk Created - /*cloudDisk, err := gceCloud.GetDiskOrError(context.Background(), gceCloud.GetZone(), volName) + cloudDisk, err := computeService.Disks.Get(p, z, volName).Do() Expect(err).To(BeNil(), "Could not get disk from cloud directly") Expect(cloudDisk.Type).To(ContainSubstring(standardDiskType)) Expect(cloudDisk.Status).To(Equal(readyState)) Expect(cloudDisk.SizeGb).To(Equal(defaultSizeGb)) - Expect(cloudDisk.Name).To(Equal(volName))*/ + Expect(cloudDisk.Name).To(Equal(volName)) defer func() { // Delete Disk @@ -101,19 +69,17 @@ var _ = Describe("GCE PD CSI Driver", func() { Expect(err).To(BeNil(), "DeleteVolume failed") // TODO: Validate Disk Deleted - /*_, err = gceCloud.GetDiskOrError(context.Background(), gceCloud.GetZone(), volName) - serverError, ok := status.FromError(err) - Expect(ok).To(BeTrue()) - Expect(serverError.Code()).To(Equal(codes.NotFound))*/ + _, err = computeService.Disks.Get(p, z, volName).Do() + Expect(gce.IsGCEError(err, "notFound")).To(BeTrue(), "Expected disk to not be found") }() // Attach Disk - err = client.ControllerPublishVolume(volId, nodeID) + err = client.ControllerPublishVolume(volId, instance.GetName()) Expect(err).To(BeNil(), "ControllerPublishVolume failed with error") defer func() { // Detach Disk - err := client.ControllerUnpublishVolume(volId, nodeID) + err := client.ControllerUnpublishVolume(volId, instance.GetName()) Expect(err).To(BeNil(), "ControllerUnpublishVolume failed with error") }() @@ -126,7 +92,7 @@ var _ = Describe("GCE PD CSI Driver", func() { // Unstage Disk err := client.NodeUnstageVolume(volId, stageDir) Expect(err).To(BeNil(), "NodeUnstageVolume failed with error") - err = utils.RmAll(instance, filepath.Join("/tmp/", volName)) + err = testutils.RmAll(instance, filepath.Join("/tmp/", volName)) Expect(err).To(BeNil(), "Failed to remove temp directory") }() @@ -134,13 +100,13 @@ var _ = Describe("GCE PD CSI Driver", func() { publishDir := filepath.Join("/tmp/", volName, "mount") err = client.NodePublishVolume(volId, stageDir, publishDir) Expect(err).To(BeNil(), "NodePublishVolume failed with error") - err = utils.ForceChmod(instance, filepath.Join("/tmp/", volName), "777") + err = testutils.ForceChmod(instance, filepath.Join("/tmp/", volName), "777") Expect(err).To(BeNil(), "Chmod failed with error") // Write a file testFileContents := "test" testFile := filepath.Join(publishDir, "testfile") - err = utils.WriteFile(instance, testFile, testFileContents) + err = testutils.WriteFile(instance, testFile, testFileContents) Expect(err).To(BeNil(), "Failed to write file") // Unmount Disk @@ -151,12 +117,12 @@ var _ = Describe("GCE PD CSI Driver", func() { secondPublishDir := filepath.Join("/tmp/", volName, "secondmount") err = client.NodePublishVolume(volId, stageDir, secondPublishDir) Expect(err).To(BeNil(), "NodePublishVolume failed with error") - err = utils.ForceChmod(instance, filepath.Join("/tmp/", volName), "777") + err = testutils.ForceChmod(instance, filepath.Join("/tmp/", volName), "777") Expect(err).To(BeNil(), "Chmod failed with error") // Read File secondTestFile := filepath.Join(secondPublishDir, "testfile") - readContents, err := utils.ReadFile(instance, secondTestFile) + readContents, err := testutils.ReadFile(instance, secondTestFile) Expect(err).To(BeNil(), "ReadFile failed with error") Expect(strings.TrimSpace(string(readContents))).To(Equal(testFileContents)) @@ -166,6 +132,37 @@ var _ = Describe("GCE PD CSI Driver", func() { }) + It("Should create disks in correct zones when topology is specified", func() { + /// + Expect(testInstances).NotTo(BeEmpty()) + testContext, err := testutils.SetupNewDriverAndClient(testInstances[0]) + Expect(err).To(BeNil(), "Failed to set up new driver and client") + p, _, _ := testContext.Instance.GetIdentity() + + zones := []string{"us-central1-c", "us-central1-b", "us-central1-a"} + + for _, zone := range zones { + volName := testNamePrefix + string(uuid.NewUUID()) + topReq := &csi.TopologyRequirement{ + Requisite: []*csi.Topology{ + { + Segments: map[string]string{common.TopologyKeyZone: zone}, + }, + }, + } + volID, err := testContext.Client.CreateVolume(volName, topReq) + Expect(err).To(BeNil(), "Failed to create volume") + defer func() { + err = testContext.Client.DeleteVolume(volID) + Expect(err).To(BeNil(), "Failed to delete volume") + }() + + _, err = computeService.Disks.Get(p, zone, volName).Do() + Expect(err).To(BeNil(), "Could not find disk in correct zone") + } + + }) + // Test volume already exists // Test volume with op pending diff --git a/test/e2e/utils/client_wrappers.go b/test/e2e/utils/client_wrappers.go index e7c81a175..25daeb8f7 100644 --- a/test/e2e/utils/client_wrappers.go +++ b/test/e2e/utils/client_wrappers.go @@ -87,11 +87,14 @@ func (c *CsiClient) CloseConn() error { return c.conn.Close() } -func (c *CsiClient) CreateVolume(volName string) (string, error) { +func (c *CsiClient) CreateVolume(volName string, topReq *csipb.TopologyRequirement) (string, error) { cvr := &csipb.CreateVolumeRequest{ Name: volName, VolumeCapabilities: stdVolCaps, } + if topReq != nil { + cvr.AccessibilityRequirements = topReq + } cresp, err := c.ctrlClient.CreateVolume(context.Background(), cvr) if err != nil { return "", err @@ -166,3 +169,8 @@ func (c *CsiClient) NodePublishVolume(volumeId, stageDir, publishDir string) err _, err := c.nodeClient.NodePublishVolume(context.Background(), nodePublishReq) return err } + +func (c *CsiClient) NodeGetInfo() (*csipb.NodeGetInfoResponse, error) { + resp, err := c.nodeClient.NodeGetInfo(context.Background(), &csipb.NodeGetInfoRequest{}) + return resp, err +} diff --git a/test/e2e/utils/utils.go b/test/e2e/utils/utils.go index edc539b8c..55a9c5945 100644 --- a/test/e2e/utils/utils.go +++ b/test/e2e/utils/utils.go @@ -17,6 +17,7 @@ package utils import ( "context" "fmt" + "math/rand" "os" "path" "time" @@ -24,6 +25,7 @@ import ( "github.com/golang/glog" "golang.org/x/oauth2/google" cloudresourcemanager "google.golang.org/api/cloudresourcemanager/v1" + compute "google.golang.org/api/compute/v1" boskosclient "k8s.io/test-infra/boskos/client" remote "sigs.k8s.io/gcp-compute-persistent-disk-csi-driver/test/binremote" ) @@ -36,21 +38,28 @@ const ( archiveName = "e2e_gce_pd_test.tar.gz" ) -func SetupInstanceAndDriver(instanceProject, instanceZone, instanceName, port, instanceServiceAccount string) (*remote.InstanceInfo, error) { +type TestContext struct { + Instance *remote.InstanceInfo + Client *CsiClient +} + +func SetupInstance(instanceProject, instanceZone, instanceName, instanceServiceAccount string, cs *compute.Service) (*remote.InstanceInfo, error) { // Create the instance in the requisite zone - instance, err := remote.CreateInstanceInfo(instanceProject, instanceZone, instanceName) + instance, err := remote.CreateInstanceInfo(instanceProject, instanceZone, instanceName, cs) if err != nil { return nil, err } - err = instance.CreateInstance(instanceServiceAccount) - + err = instance.CreateOrGetInstance(instanceServiceAccount) if err != nil { return nil, err } + return instance, nil +} - // Create Driver Archive - +// TODO: Need a function to clean up this driver from the instance +func SetupNewDriverAndClient(instance *remote.InstanceInfo) (*TestContext, error) { + port := fmt.Sprintf("%v", 1024+rand.Intn(10000)) goPath, ok := os.LookupEnv("GOPATH") if !ok { return nil, fmt.Errorf("Could not find environment variable GOPATH") @@ -72,7 +81,7 @@ func SetupInstanceAndDriver(instanceProject, instanceZone, instanceName, port, i endpoint := fmt.Sprintf("tcp://localhost:%s", port) workspace := remote.NewWorkspaceDir("gce-pd-e2e-") driverRunCmd := fmt.Sprintf("sh -c '/usr/bin/nohup %s/gce-pd-csi-driver --endpoint=%s --nodeid=%s > %s/prog.out 2> %s/prog.err < /dev/null &'", - workspace, endpoint, instanceName, workspace, workspace) + workspace, endpoint, instance.GetName(), workspace, workspace) err = instance.UploadAndRun(archivePath, workspace, driverRunCmd) if err != nil { return nil, err @@ -84,7 +93,13 @@ func SetupInstanceAndDriver(instanceProject, instanceZone, instanceName, port, i return nil, fmt.Errorf("SSH Tunnel pid %v encountered error: %v", res, err) } - return instance, nil + client := CreateCSIClient(fmt.Sprintf("localhost:%s", port)) + err = client.AssertCSIConnection() + if err != nil { + return nil, fmt.Errorf("asserting csi connection failed with: %v", err) + } + + return &TestContext{Instance: instance, Client: client}, nil } func SetupProwConfig() (project, serviceAccount string) { diff --git a/test/run-e2e-local.sh b/test/run-e2e-local.sh index 58cfbef09..b9ef10e0a 100755 --- a/test/run-e2e-local.sh +++ b/test/run-e2e-local.sh @@ -5,4 +5,4 @@ set -o errexit readonly PKGDIR=sigs.k8s.io/gcp-compute-persistent-disk-csi-driver -go test --v=true "${PKGDIR}/test/e2e/tests" --logtostderr --project ${PROJECT} --service-account ${IAM_NAME} \ No newline at end of file +ginkgo -v "test/e2e/tests" --logtostderr -- --project ${PROJECT} --service-account ${IAM_NAME} \ No newline at end of file diff --git a/test/run-e2e.sh b/test/run-e2e.sh index 1c7bf6854..692b604b6 100755 --- a/test/run-e2e.sh +++ b/test/run-e2e.sh @@ -5,4 +5,4 @@ set -x readonly PKGDIR=sigs.k8s.io/gcp-compute-persistent-disk-csi-driver -go test --v=true "${PKGDIR}/test/e2e/tests" --logtostderr --run-in-prow=true \ No newline at end of file +go test --v=true "${PKGDIR}/test/e2e/tests" --logtostderr --run-in-prow=true --delete-instances=true diff --git a/test/run-unit.sh b/test/run-unit.sh index f3485a5ca..6e34d1da9 100755 --- a/test/run-unit.sh +++ b/test/run-unit.sh @@ -8,4 +8,4 @@ readonly PKGDIR=sigs.k8s.io/gcp-compute-persistent-disk-csi-driver go test -timeout 30s "${PKGDIR}/pkg/gce-pd-csi-driver" # The following have no unit tests yet #go test -timeout 30s "${PKGDIR}/pkg/mount-manager" -#go test -timeout 30s "${PKGDIR}/pkg/gce-cloud-provider" \ No newline at end of file +#go test -timeout 30s "${PKGDIR}/pkg/gce-cloud-provider/compute" \ No newline at end of file diff --git a/test/sanity/sanity_test.go b/test/sanity/sanity_test.go index 1fc20029a..ae766889e 100644 --- a/test/sanity/sanity_test.go +++ b/test/sanity/sanity_test.go @@ -19,7 +19,8 @@ import ( sanity "github.com/kubernetes-csi/csi-test/pkg/sanity" compute "google.golang.org/api/compute/v1" - gce "sigs.k8s.io/gcp-compute-persistent-disk-csi-driver/pkg/gce-cloud-provider" + gce "sigs.k8s.io/gcp-compute-persistent-disk-csi-driver/pkg/gce-cloud-provider/compute" + metadataservice "sigs.k8s.io/gcp-compute-persistent-disk-csi-driver/pkg/gce-cloud-provider/metadata" driver "sigs.k8s.io/gcp-compute-persistent-disk-csi-driver/pkg/gce-pd-csi-driver" mountmanager "sigs.k8s.io/gcp-compute-persistent-disk-csi-driver/pkg/mount-manager" ) @@ -46,7 +47,7 @@ func TestSanity(t *testing.T) { deviceUtils := mountmanager.NewFakeDeviceUtils() //Initialize GCE Driver - err = gceDriver.SetupGCEDriver(cloudProvider, mounter, deviceUtils, driverName, nodeID, vendorVersion) + err = gceDriver.SetupGCEDriver(cloudProvider, mounter, deviceUtils, metadataservice.NewFakeService(), driverName, nodeID, vendorVersion) if err != nil { t.Fatalf("Failed to initialize GCE CSI Driver: %v", err) } diff --git a/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/controller.go b/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/controller.go index 0fb22392c..568d78b80 100644 --- a/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/controller.go +++ b/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/controller.go @@ -22,11 +22,12 @@ import ( "google.golang.org/grpc/codes" "google.golang.org/grpc/status" - csi "github.com/container-storage-interface/spec/lib/go/csi/v0" - context "golang.org/x/net/context" + "github.com/container-storage-interface/spec/lib/go/csi/v0" + "golang.org/x/net/context" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" + "strconv" ) const ( @@ -36,9 +37,9 @@ const ( DefTestVolumeSize int64 = 10 * 1024 * 1024 * 1024 ) -func TestVolumeSize() int64 { - if config.TestVolumeSize > 0 { - return config.TestVolumeSize +func TestVolumeSize(sc *SanityContext) int64 { + if sc.Config.TestVolumeSize > 0 { + return sc.Config.TestVolumeSize } return DefTestVolumeSize } @@ -48,6 +49,13 @@ func verifyVolumeInfo(v *csi.Volume) { Expect(v.GetId()).NotTo(BeEmpty()) } +func verifySnapshotInfo(snapshot *csi.Snapshot) { + Expect(snapshot).NotTo(BeNil()) + Expect(snapshot.GetId()).NotTo(BeEmpty()) + Expect(snapshot.GetSourceVolumeId()).NotTo(BeEmpty()) + Expect(snapshot.GetCreatedAt()).NotTo(BeZero()) +} + func isControllerCapabilitySupported( c csi.ControllerClient, capType csi.ControllerServiceCapability_RPC_Type, @@ -69,13 +77,13 @@ func isControllerCapabilitySupported( return false } -var _ = Describe("ControllerGetCapabilities [Controller Server]", func() { +var _ = DescribeSanity("ControllerGetCapabilities [Controller Server]", func(sc *SanityContext) { var ( c csi.ControllerClient ) BeforeEach(func() { - c = csi.NewControllerClient(conn) + c = csi.NewControllerClient(sc.Conn) }) It("should return appropriate capabilities", func() { @@ -96,6 +104,8 @@ var _ = Describe("ControllerGetCapabilities [Controller Server]", func() { case csi.ControllerServiceCapability_RPC_PUBLISH_UNPUBLISH_VOLUME: case csi.ControllerServiceCapability_RPC_LIST_VOLUMES: case csi.ControllerServiceCapability_RPC_GET_CAPACITY: + case csi.ControllerServiceCapability_RPC_CREATE_DELETE_SNAPSHOT: + case csi.ControllerServiceCapability_RPC_LIST_SNAPSHOTS: default: Fail(fmt.Sprintf("Unknown capability: %v\n", cap.GetRpc().GetType())) } @@ -103,13 +113,13 @@ var _ = Describe("ControllerGetCapabilities [Controller Server]", func() { }) }) -var _ = Describe("GetCapacity [Controller Server]", func() { +var _ = DescribeSanity("GetCapacity [Controller Server]", func(sc *SanityContext) { var ( c csi.ControllerClient ) BeforeEach(func() { - c = csi.NewControllerClient(conn) + c = csi.NewControllerClient(sc.Conn) if !isControllerCapabilitySupported(c, csi.ControllerServiceCapability_RPC_GET_CAPACITY) { Skip("GetCapacity not supported") @@ -127,13 +137,13 @@ var _ = Describe("GetCapacity [Controller Server]", func() { }) }) -var _ = Describe("ListVolumes [Controller Server]", func() { +var _ = DescribeSanity("ListVolumes [Controller Server]", func(sc *SanityContext) { var ( c csi.ControllerClient ) BeforeEach(func() { - c = csi.NewControllerClient(conn) + c = csi.NewControllerClient(sc.Conn) if !isControllerCapabilitySupported(c, csi.ControllerServiceCapability_RPC_LIST_VOLUMES) { Skip("ListVolumes not supported") @@ -158,13 +168,13 @@ var _ = Describe("ListVolumes [Controller Server]", func() { // and not there when deleted. }) -var _ = Describe("CreateVolume [Controller Server]", func() { +var _ = DescribeSanity("CreateVolume [Controller Server]", func(sc *SanityContext) { var ( c csi.ControllerClient ) BeforeEach(func() { - c = csi.NewControllerClient(conn) + c = csi.NewControllerClient(sc.Conn) if !isControllerCapabilitySupported(c, csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME) { Skip("CreateVolume not supported") @@ -175,8 +185,8 @@ var _ = Describe("CreateVolume [Controller Server]", func() { req := &csi.CreateVolumeRequest{} - if secrets != nil { - req.ControllerCreateSecrets = secrets.CreateVolumeSecret + if sc.Secrets != nil { + req.ControllerCreateSecrets = sc.Secrets.CreateVolumeSecret } _, err := c.CreateVolume(context.Background(), req) @@ -193,8 +203,8 @@ var _ = Describe("CreateVolume [Controller Server]", func() { Name: "name", } - if secrets != nil { - req.ControllerCreateSecrets = secrets.CreateVolumeSecret + if sc.Secrets != nil { + req.ControllerCreateSecrets = sc.Secrets.CreateVolumeSecret } _, err := c.CreateVolume(context.Background(), req) @@ -224,8 +234,8 @@ var _ = Describe("CreateVolume [Controller Server]", func() { }, } - if secrets != nil { - req.ControllerCreateSecrets = secrets.CreateVolumeSecret + if sc.Secrets != nil { + req.ControllerCreateSecrets = sc.Secrets.CreateVolumeSecret } vol, err := c.CreateVolume(context.Background(), req) @@ -240,8 +250,8 @@ var _ = Describe("CreateVolume [Controller Server]", func() { VolumeId: vol.GetVolume().GetId(), } - if secrets != nil { - delReq.ControllerDeleteSecrets = secrets.DeleteVolumeSecret + if sc.Secrets != nil { + delReq.ControllerDeleteSecrets = sc.Secrets.DeleteVolumeSecret } _, err = c.DeleteVolume(context.Background(), delReq) @@ -266,12 +276,12 @@ var _ = Describe("CreateVolume [Controller Server]", func() { }, }, CapacityRange: &csi.CapacityRange{ - RequiredBytes: TestVolumeSize(), + RequiredBytes: TestVolumeSize(sc), }, } - if secrets != nil { - req.ControllerCreateSecrets = secrets.CreateVolumeSecret + if sc.Secrets != nil { + req.ControllerCreateSecrets = sc.Secrets.CreateVolumeSecret } vol, err := c.CreateVolume(context.Background(), req) @@ -287,7 +297,7 @@ var _ = Describe("CreateVolume [Controller Server]", func() { Expect(vol).NotTo(BeNil()) Expect(vol.GetVolume()).NotTo(BeNil()) Expect(vol.GetVolume().GetId()).NotTo(BeEmpty()) - Expect(vol.GetVolume().GetCapacityBytes()).To(BeNumerically(">=", TestVolumeSize())) + Expect(vol.GetVolume().GetCapacityBytes()).To(BeNumerically(">=", TestVolumeSize(sc))) } By("cleaning up deleting the volume") @@ -295,8 +305,8 @@ var _ = Describe("CreateVolume [Controller Server]", func() { VolumeId: vol.GetVolume().GetId(), } - if secrets != nil { - delReq.ControllerDeleteSecrets = secrets.DeleteVolumeSecret + if sc.Secrets != nil { + delReq.ControllerDeleteSecrets = sc.Secrets.DeleteVolumeSecret } _, err = c.DeleteVolume(context.Background(), delReq) @@ -306,7 +316,7 @@ var _ = Describe("CreateVolume [Controller Server]", func() { By("creating a volume") name := "sanity" - size := TestVolumeSize() + size := TestVolumeSize(sc) req := &csi.CreateVolumeRequest{ Name: name, @@ -325,8 +335,8 @@ var _ = Describe("CreateVolume [Controller Server]", func() { }, } - if secrets != nil { - req.ControllerCreateSecrets = secrets.CreateVolumeSecret + if sc.Secrets != nil { + req.ControllerCreateSecrets = sc.Secrets.CreateVolumeSecret } vol1, err := c.CreateVolume(context.Background(), req) @@ -353,8 +363,8 @@ var _ = Describe("CreateVolume [Controller Server]", func() { }, } - if secrets != nil { - req2.ControllerCreateSecrets = secrets.CreateVolumeSecret + if sc.Secrets != nil { + req2.ControllerCreateSecrets = sc.Secrets.CreateVolumeSecret } vol2, err := c.CreateVolume(context.Background(), req2) @@ -371,8 +381,8 @@ var _ = Describe("CreateVolume [Controller Server]", func() { VolumeId: vol1.GetVolume().GetId(), } - if secrets != nil { - delReq.ControllerDeleteSecrets = secrets.DeleteVolumeSecret + if sc.Secrets != nil { + delReq.ControllerDeleteSecrets = sc.Secrets.DeleteVolumeSecret } _, err = c.DeleteVolume(context.Background(), delReq) @@ -382,7 +392,7 @@ var _ = Describe("CreateVolume [Controller Server]", func() { By("creating a volume") name := "sanity" - size1 := TestVolumeSize() + size1 := TestVolumeSize(sc) req := &csi.CreateVolumeRequest{ Name: name, @@ -402,8 +412,8 @@ var _ = Describe("CreateVolume [Controller Server]", func() { }, } - if secrets != nil { - req.ControllerCreateSecrets = secrets.CreateVolumeSecret + if sc.Secrets != nil { + req.ControllerCreateSecrets = sc.Secrets.CreateVolumeSecret } vol1, err := c.CreateVolume(context.Background(), req) @@ -411,7 +421,7 @@ var _ = Describe("CreateVolume [Controller Server]", func() { Expect(vol1).NotTo(BeNil()) Expect(vol1.GetVolume()).NotTo(BeNil()) Expect(vol1.GetVolume().GetId()).NotTo(BeEmpty()) - size2 := 2 * TestVolumeSize() + size2 := 2 * TestVolumeSize(sc) req2 := &csi.CreateVolumeRequest{ Name: name, @@ -431,8 +441,8 @@ var _ = Describe("CreateVolume [Controller Server]", func() { }, } - if secrets != nil { - req2.ControllerCreateSecrets = secrets.CreateVolumeSecret + if sc.Secrets != nil { + req2.ControllerCreateSecrets = sc.Secrets.CreateVolumeSecret } _, err = c.CreateVolume(context.Background(), req2) @@ -447,8 +457,8 @@ var _ = Describe("CreateVolume [Controller Server]", func() { VolumeId: vol1.GetVolume().GetId(), } - if secrets != nil { - delReq.ControllerDeleteSecrets = secrets.DeleteVolumeSecret + if sc.Secrets != nil { + delReq.ControllerDeleteSecrets = sc.Secrets.DeleteVolumeSecret } _, err = c.DeleteVolume(context.Background(), delReq) @@ -456,13 +466,13 @@ var _ = Describe("CreateVolume [Controller Server]", func() { }) }) -var _ = Describe("DeleteVolume [Controller Server]", func() { +var _ = DescribeSanity("DeleteVolume [Controller Server]", func(sc *SanityContext) { var ( c csi.ControllerClient ) BeforeEach(func() { - c = csi.NewControllerClient(conn) + c = csi.NewControllerClient(sc.Conn) if !isControllerCapabilitySupported(c, csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME) { Skip("DeleteVolume not supported") @@ -473,8 +483,8 @@ var _ = Describe("DeleteVolume [Controller Server]", func() { req := &csi.DeleteVolumeRequest{} - if secrets != nil { - req.ControllerDeleteSecrets = secrets.DeleteVolumeSecret + if sc.Secrets != nil { + req.ControllerDeleteSecrets = sc.Secrets.DeleteVolumeSecret } _, err := c.DeleteVolume(context.Background(), req) @@ -491,8 +501,8 @@ var _ = Describe("DeleteVolume [Controller Server]", func() { VolumeId: "reallyfakevolumeid", } - if secrets != nil { - req.ControllerDeleteSecrets = secrets.DeleteVolumeSecret + if sc.Secrets != nil { + req.ControllerDeleteSecrets = sc.Secrets.DeleteVolumeSecret } _, err := c.DeleteVolume(context.Background(), req) @@ -519,8 +529,8 @@ var _ = Describe("DeleteVolume [Controller Server]", func() { }, } - if secrets != nil { - createReq.ControllerCreateSecrets = secrets.CreateVolumeSecret + if sc.Secrets != nil { + createReq.ControllerCreateSecrets = sc.Secrets.CreateVolumeSecret } vol, err := c.CreateVolume(context.Background(), createReq) @@ -537,8 +547,8 @@ var _ = Describe("DeleteVolume [Controller Server]", func() { VolumeId: vol.GetVolume().GetId(), } - if secrets != nil { - req.ControllerDeleteSecrets = secrets.DeleteVolumeSecret + if sc.Secrets != nil { + req.ControllerDeleteSecrets = sc.Secrets.DeleteVolumeSecret } _, err = c.DeleteVolume(context.Background(), req) @@ -546,13 +556,13 @@ var _ = Describe("DeleteVolume [Controller Server]", func() { }) }) -var _ = Describe("ValidateVolumeCapabilities [Controller Server]", func() { +var _ = DescribeSanity("ValidateVolumeCapabilities [Controller Server]", func(sc *SanityContext) { var ( c csi.ControllerClient ) BeforeEach(func() { - c = csi.NewControllerClient(conn) + c = csi.NewControllerClient(sc.Conn) }) It("should fail when no volume id is provided", func() { @@ -601,8 +611,8 @@ var _ = Describe("ValidateVolumeCapabilities [Controller Server]", func() { }, } - if secrets != nil { - req.ControllerCreateSecrets = secrets.CreateVolumeSecret + if sc.Secrets != nil { + req.ControllerCreateSecrets = sc.Secrets.CreateVolumeSecret } vol, err := c.CreateVolume(context.Background(), req) @@ -638,24 +648,49 @@ var _ = Describe("ValidateVolumeCapabilities [Controller Server]", func() { VolumeId: vol.GetVolume().GetId(), } - if secrets != nil { - delReq.ControllerDeleteSecrets = secrets.DeleteVolumeSecret + if sc.Secrets != nil { + delReq.ControllerDeleteSecrets = sc.Secrets.DeleteVolumeSecret } _, err = c.DeleteVolume(context.Background(), delReq) Expect(err).NotTo(HaveOccurred()) }) + + It("should fail when the requested volume does not exist", func() { + + _, err := c.ValidateVolumeCapabilities( + context.Background(), + &csi.ValidateVolumeCapabilitiesRequest{ + VolumeId: "some-vol-id", + VolumeCapabilities: []*csi.VolumeCapability{ + { + AccessType: &csi.VolumeCapability_Mount{ + Mount: &csi.VolumeCapability_MountVolume{}, + }, + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, + }, + }, + }, + }, + ) + Expect(err).To(HaveOccurred()) + + serverError, ok := status.FromError(err) + Expect(ok).To(BeTrue()) + Expect(serverError.Code()).To(Equal(codes.NotFound)) + }) }) -var _ = Describe("ControllerPublishVolume [Controller Server]", func() { +var _ = DescribeSanity("ControllerPublishVolume [Controller Server]", func(sc *SanityContext) { var ( c csi.ControllerClient n csi.NodeClient ) BeforeEach(func() { - c = csi.NewControllerClient(conn) - n = csi.NewNodeClient(conn) + c = csi.NewControllerClient(sc.Conn) + n = csi.NewNodeClient(sc.Conn) if !isControllerCapabilitySupported(c, csi.ControllerServiceCapability_RPC_PUBLISH_UNPUBLISH_VOLUME) { Skip("ControllerPublishVolume not supported") @@ -666,8 +701,8 @@ var _ = Describe("ControllerPublishVolume [Controller Server]", func() { req := &csi.ControllerPublishVolumeRequest{} - if secrets != nil { - req.ControllerPublishSecrets = secrets.ControllerPublishVolumeSecret + if sc.Secrets != nil { + req.ControllerPublishSecrets = sc.Secrets.ControllerPublishVolumeSecret } _, err := c.ControllerPublishVolume(context.Background(), req) @@ -684,8 +719,8 @@ var _ = Describe("ControllerPublishVolume [Controller Server]", func() { VolumeId: "id", } - if secrets != nil { - req.ControllerPublishSecrets = secrets.ControllerPublishVolumeSecret + if sc.Secrets != nil { + req.ControllerPublishSecrets = sc.Secrets.ControllerPublishVolumeSecret } _, err := c.ControllerPublishVolume(context.Background(), req) @@ -703,8 +738,8 @@ var _ = Describe("ControllerPublishVolume [Controller Server]", func() { NodeId: "fakenode", } - if secrets != nil { - req.ControllerPublishSecrets = secrets.ControllerPublishVolumeSecret + if sc.Secrets != nil { + req.ControllerPublishSecrets = sc.Secrets.ControllerPublishVolumeSecret } _, err := c.ControllerPublishVolume(context.Background(), req) @@ -734,8 +769,8 @@ var _ = Describe("ControllerPublishVolume [Controller Server]", func() { }, } - if secrets != nil { - req.ControllerCreateSecrets = secrets.CreateVolumeSecret + if sc.Secrets != nil { + req.ControllerCreateSecrets = sc.Secrets.CreateVolumeSecret } vol, err := c.CreateVolume(context.Background(), req) @@ -769,8 +804,8 @@ var _ = Describe("ControllerPublishVolume [Controller Server]", func() { Readonly: false, } - if secrets != nil { - pubReq.ControllerPublishSecrets = secrets.ControllerPublishVolumeSecret + if sc.Secrets != nil { + pubReq.ControllerPublishSecrets = sc.Secrets.ControllerPublishVolumeSecret } conpubvol, err := c.ControllerPublishVolume(context.Background(), pubReq) @@ -785,8 +820,8 @@ var _ = Describe("ControllerPublishVolume [Controller Server]", func() { NodeId: nid.GetNodeId(), } - if secrets != nil { - unpubReq.ControllerUnpublishSecrets = secrets.ControllerUnpublishVolumeSecret + if sc.Secrets != nil { + unpubReq.ControllerUnpublishSecrets = sc.Secrets.ControllerUnpublishVolumeSecret } conunpubvol, err := c.ControllerUnpublishVolume(context.Background(), unpubReq) @@ -799,8 +834,214 @@ var _ = Describe("ControllerPublishVolume [Controller Server]", func() { VolumeId: vol.GetVolume().GetId(), } - if secrets != nil { - delReq.ControllerDeleteSecrets = secrets.DeleteVolumeSecret + if sc.Secrets != nil { + delReq.ControllerDeleteSecrets = sc.Secrets.DeleteVolumeSecret + } + + _, err = c.DeleteVolume(context.Background(), delReq) + Expect(err).NotTo(HaveOccurred()) + }) + + It("should fail when the volume does not exist", func() { + + By("calling controller publish on a non-existent volume") + + pubReq := &csi.ControllerPublishVolumeRequest{ + VolumeId: "some-vol-id", + NodeId: "some-node-id", + VolumeCapability: &csi.VolumeCapability{ + AccessType: &csi.VolumeCapability_Mount{ + Mount: &csi.VolumeCapability_MountVolume{}, + }, + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, + }, + }, + Readonly: false, + } + + if sc.Secrets != nil { + pubReq.ControllerPublishSecrets = sc.Secrets.ControllerPublishVolumeSecret + } + + conpubvol, err := c.ControllerPublishVolume(context.Background(), pubReq) + Expect(err).To(HaveOccurred()) + Expect(conpubvol).To(BeNil()) + + serverError, ok := status.FromError(err) + Expect(ok).To(BeTrue()) + Expect(serverError.Code()).To(Equal(codes.NotFound)) + }) + + It("should fail when the node does not exist", func() { + + // Create Volume First + By("creating a single node writer volume") + name := "sanity" + req := &csi.CreateVolumeRequest{ + Name: name, + VolumeCapabilities: []*csi.VolumeCapability{ + { + AccessType: &csi.VolumeCapability_Mount{ + Mount: &csi.VolumeCapability_MountVolume{}, + }, + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, + }, + }, + }, + } + + if sc.Secrets != nil { + req.ControllerCreateSecrets = sc.Secrets.CreateVolumeSecret + } + + vol, err := c.CreateVolume(context.Background(), req) + Expect(err).NotTo(HaveOccurred()) + Expect(vol).NotTo(BeNil()) + Expect(vol.GetVolume()).NotTo(BeNil()) + Expect(vol.GetVolume().GetId()).NotTo(BeEmpty()) + + // ControllerPublishVolume + By("calling controllerpublish on that volume") + + pubReq := &csi.ControllerPublishVolumeRequest{ + VolumeId: vol.GetVolume().GetId(), + NodeId: "some-fake-node-id", + VolumeCapability: &csi.VolumeCapability{ + AccessType: &csi.VolumeCapability_Mount{ + Mount: &csi.VolumeCapability_MountVolume{}, + }, + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, + }, + }, + Readonly: false, + } + + if sc.Secrets != nil { + pubReq.ControllerPublishSecrets = sc.Secrets.ControllerPublishVolumeSecret + } + + conpubvol, err := c.ControllerPublishVolume(context.Background(), pubReq) + Expect(err).To(HaveOccurred()) + Expect(conpubvol).To(BeNil()) + + serverError, ok := status.FromError(err) + Expect(ok).To(BeTrue()) + Expect(serverError.Code()).To(Equal(codes.NotFound)) + + By("cleaning up deleting the volume") + + delReq := &csi.DeleteVolumeRequest{ + VolumeId: vol.GetVolume().GetId(), + } + + if sc.Secrets != nil { + delReq.ControllerDeleteSecrets = sc.Secrets.DeleteVolumeSecret + } + + _, err = c.DeleteVolume(context.Background(), delReq) + Expect(err).NotTo(HaveOccurred()) + }) + + It("should fail when the volume is already published but is incompatible", func() { + + // Create Volume First + By("creating a single node writer volume") + name := "sanity" + req := &csi.CreateVolumeRequest{ + Name: name, + VolumeCapabilities: []*csi.VolumeCapability{ + { + AccessType: &csi.VolumeCapability_Mount{ + Mount: &csi.VolumeCapability_MountVolume{}, + }, + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, + }, + }, + }, + } + + if sc.Secrets != nil { + req.ControllerCreateSecrets = sc.Secrets.CreateVolumeSecret + } + + vol, err := c.CreateVolume(context.Background(), req) + Expect(err).NotTo(HaveOccurred()) + Expect(vol).NotTo(BeNil()) + Expect(vol.GetVolume()).NotTo(BeNil()) + Expect(vol.GetVolume().GetId()).NotTo(BeEmpty()) + + By("getting a node id") + nid, err := n.NodeGetId( + context.Background(), + &csi.NodeGetIdRequest{}) + Expect(err).NotTo(HaveOccurred()) + Expect(nid).NotTo(BeNil()) + Expect(nid.GetNodeId()).NotTo(BeEmpty()) + + // ControllerPublishVolume + By("calling controllerpublish on that volume") + + pubReq := &csi.ControllerPublishVolumeRequest{ + VolumeId: vol.GetVolume().GetId(), + NodeId: nid.GetNodeId(), + VolumeCapability: &csi.VolumeCapability{ + AccessType: &csi.VolumeCapability_Mount{ + Mount: &csi.VolumeCapability_MountVolume{}, + }, + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, + }, + }, + Readonly: false, + } + + if sc.Secrets != nil { + pubReq.ControllerPublishSecrets = sc.Secrets.ControllerPublishVolumeSecret + } + + conpubvol, err := c.ControllerPublishVolume(context.Background(), pubReq) + Expect(err).NotTo(HaveOccurred()) + Expect(conpubvol).NotTo(BeNil()) + + // Publish again with different attributes. + pubReq.Readonly = true + + conpubvol, err = c.ControllerPublishVolume(context.Background(), pubReq) + Expect(err).To(HaveOccurred()) + Expect(conpubvol).To(BeNil()) + + serverError, ok := status.FromError(err) + Expect(ok).To(BeTrue()) + Expect(serverError.Code()).To(Equal(codes.AlreadyExists)) + + By("cleaning up unpublishing the volume") + + unpubReq := &csi.ControllerUnpublishVolumeRequest{ + VolumeId: vol.GetVolume().GetId(), + // NodeID is optional in ControllerUnpublishVolume + NodeId: nid.GetNodeId(), + } + + if sc.Secrets != nil { + unpubReq.ControllerUnpublishSecrets = sc.Secrets.ControllerUnpublishVolumeSecret + } + + conunpubvol, err := c.ControllerUnpublishVolume(context.Background(), unpubReq) + Expect(err).NotTo(HaveOccurred()) + Expect(conunpubvol).NotTo(BeNil()) + + By("cleaning up deleting the volume") + + delReq := &csi.DeleteVolumeRequest{ + VolumeId: vol.GetVolume().GetId(), + } + + if sc.Secrets != nil { + delReq.ControllerDeleteSecrets = sc.Secrets.DeleteVolumeSecret } _, err = c.DeleteVolume(context.Background(), delReq) @@ -808,15 +1049,15 @@ var _ = Describe("ControllerPublishVolume [Controller Server]", func() { }) }) -var _ = Describe("ControllerUnpublishVolume [Controller Server]", func() { +var _ = DescribeSanity("ControllerUnpublishVolume [Controller Server]", func(sc *SanityContext) { var ( c csi.ControllerClient n csi.NodeClient ) BeforeEach(func() { - c = csi.NewControllerClient(conn) - n = csi.NewNodeClient(conn) + c = csi.NewControllerClient(sc.Conn) + n = csi.NewNodeClient(sc.Conn) if !isControllerCapabilitySupported(c, csi.ControllerServiceCapability_RPC_PUBLISH_UNPUBLISH_VOLUME) { Skip("ControllerUnpublishVolume not supported") @@ -827,8 +1068,8 @@ var _ = Describe("ControllerUnpublishVolume [Controller Server]", func() { req := &csi.ControllerUnpublishVolumeRequest{} - if secrets != nil { - req.ControllerUnpublishSecrets = secrets.ControllerUnpublishVolumeSecret + if sc.Secrets != nil { + req.ControllerUnpublishSecrets = sc.Secrets.ControllerUnpublishVolumeSecret } _, err := c.ControllerUnpublishVolume(context.Background(), req) @@ -859,8 +1100,8 @@ var _ = Describe("ControllerUnpublishVolume [Controller Server]", func() { }, } - if secrets != nil { - req.ControllerCreateSecrets = secrets.CreateVolumeSecret + if sc.Secrets != nil { + req.ControllerCreateSecrets = sc.Secrets.CreateVolumeSecret } vol, err := c.CreateVolume(context.Background(), req) @@ -894,8 +1135,8 @@ var _ = Describe("ControllerUnpublishVolume [Controller Server]", func() { Readonly: false, } - if secrets != nil { - pubReq.ControllerPublishSecrets = secrets.ControllerPublishVolumeSecret + if sc.Secrets != nil { + pubReq.ControllerPublishSecrets = sc.Secrets.ControllerPublishVolumeSecret } conpubvol, err := c.ControllerPublishVolume(context.Background(), pubReq) @@ -911,8 +1152,8 @@ var _ = Describe("ControllerUnpublishVolume [Controller Server]", func() { NodeId: nid.GetNodeId(), } - if secrets != nil { - unpubReq.ControllerUnpublishSecrets = secrets.ControllerUnpublishVolumeSecret + if sc.Secrets != nil { + unpubReq.ControllerUnpublishSecrets = sc.Secrets.ControllerUnpublishVolumeSecret } conunpubvol, err := c.ControllerUnpublishVolume(context.Background(), unpubReq) @@ -925,11 +1166,544 @@ var _ = Describe("ControllerUnpublishVolume [Controller Server]", func() { VolumeId: vol.GetVolume().GetId(), } - if secrets != nil { - delReq.ControllerDeleteSecrets = secrets.DeleteVolumeSecret + if sc.Secrets != nil { + delReq.ControllerDeleteSecrets = sc.Secrets.DeleteVolumeSecret } _, err = c.DeleteVolume(context.Background(), delReq) Expect(err).NotTo(HaveOccurred()) }) }) + +var _ = DescribeSanity("ListSnapshots [Controller Server]", func(sc *SanityContext) { + var ( + c csi.ControllerClient + ) + + BeforeEach(func() { + c = csi.NewControllerClient(sc.Conn) + + if !isControllerCapabilitySupported(c, csi.ControllerServiceCapability_RPC_LIST_SNAPSHOTS) { + Skip("ListSnapshots not supported") + } + }) + + It("should return appropriate values (no optional values added)", func() { + snapshots, err := c.ListSnapshots( + context.Background(), + &csi.ListSnapshotsRequest{}) + Expect(err).NotTo(HaveOccurred()) + Expect(snapshots).NotTo(BeNil()) + + for _, snapshot := range snapshots.GetEntries() { + verifySnapshotInfo(snapshot.GetSnapshot()) + } + }) + + It("should return snapshots that match the specify snapshot id", func() { + + By("creating a volume") + volReq := MakeCreateVolumeReq(sc, "listSnapshots-volume-1") + volume, err := c.CreateVolume(context.Background(), volReq) + Expect(err).NotTo(HaveOccurred()) + + By("creating a snapshot") + snapshotReq := MakeCreateSnapshotReq(sc, "listSnapshots-snapshot-1", volume.GetVolume().GetId(), nil) + snapshot, err := c.CreateSnapshot(context.Background(), snapshotReq) + Expect(err).NotTo(HaveOccurred()) + + snapshots, err := c.ListSnapshots( + context.Background(), + &csi.ListSnapshotsRequest{SnapshotId: snapshot.GetSnapshot().GetId()}) + Expect(err).NotTo(HaveOccurred()) + Expect(snapshots).NotTo(BeNil()) + Expect(len(snapshots.GetEntries())).To(BeNumerically("==", 1)) + verifySnapshotInfo(snapshots.GetEntries()[0].GetSnapshot()) + Expect(snapshots.GetEntries()[0].GetSnapshot().GetId()).To(Equal(snapshot.GetSnapshot().GetId())) + + By("cleaning up deleting the volume") + delVolReq := MakeDeleteVolumeReq(sc, volume.GetVolume().GetId()) + _, err = c.DeleteVolume(context.Background(), delVolReq) + Expect(err).NotTo(HaveOccurred()) + + By("cleaning up deleting the snapshot") + delSnapReq := MakeDeleteSnapshotReq(sc, snapshot.GetSnapshot().GetId()) + _, err = c.DeleteSnapshot(context.Background(), delSnapReq) + Expect(err).NotTo(HaveOccurred()) + }) + + It("should return empty when the specify snapshot id is not exist", func() { + + snapshots, err := c.ListSnapshots( + context.Background(), + &csi.ListSnapshotsRequest{SnapshotId: "none-exist-id"}) + Expect(err).NotTo(HaveOccurred()) + Expect(snapshots).NotTo(BeNil()) + Expect(snapshots.GetEntries()).To(BeEmpty()) + }) + + It("should return snapshots that match the specify source volume id)", func() { + + By("creating a volume") + volReq := MakeCreateVolumeReq(sc, "listSnapshots-volume-2") + volume, err := c.CreateVolume(context.Background(), volReq) + Expect(err).NotTo(HaveOccurred()) + + By("creating a snapshot") + snapshotReq := MakeCreateSnapshotReq(sc, "listSnapshots-snapshot-2", volume.GetVolume().GetId(), nil) + snapshot, err := c.CreateSnapshot(context.Background(), snapshotReq) + Expect(err).NotTo(HaveOccurred()) + + snapshots, err := c.ListSnapshots( + context.Background(), + &csi.ListSnapshotsRequest{SourceVolumeId: snapshot.GetSnapshot().GetSourceVolumeId()}) + Expect(err).NotTo(HaveOccurred()) + Expect(snapshots).NotTo(BeNil()) + for _, snap := range snapshots.GetEntries() { + verifySnapshotInfo(snap.GetSnapshot()) + Expect(snap.GetSnapshot().GetSourceVolumeId()).To(Equal(snapshot.GetSnapshot().GetSourceVolumeId())) + } + + By("cleaning up deleting the snapshot") + delSnapReq := MakeDeleteSnapshotReq(sc, snapshot.GetSnapshot().GetId()) + _, err = c.DeleteSnapshot(context.Background(), delSnapReq) + Expect(err).NotTo(HaveOccurred()) + + By("cleaning up deleting the volume") + delVolReq := MakeDeleteVolumeReq(sc, volume.GetVolume().GetId()) + _, err = c.DeleteVolume(context.Background(), delVolReq) + Expect(err).NotTo(HaveOccurred()) + }) + + It("should return empty when the specify source volume id is not exist", func() { + + snapshots, err := c.ListSnapshots( + context.Background(), + &csi.ListSnapshotsRequest{SourceVolumeId: "none-exist-volume-id"}) + Expect(err).NotTo(HaveOccurred()) + Expect(snapshots).NotTo(BeNil()) + Expect(snapshots.GetEntries()).To(BeEmpty()) + }) + + It("should fail when an invalid starting_token is passed", func() { + vols, err := c.ListSnapshots( + context.Background(), + &csi.ListSnapshotsRequest{ + StartingToken: "invalid-token", + }, + ) + Expect(err).To(HaveOccurred()) + Expect(vols).To(BeNil()) + + serverError, ok := status.FromError(err) + Expect(ok).To(BeTrue()) + Expect(serverError.Code()).To(Equal(codes.Aborted)) + }) + + It("should fail when the starting_token is greater than total number of snapshots", func() { + // Get total number of snapshots. + snapshots, err := c.ListSnapshots( + context.Background(), + &csi.ListSnapshotsRequest{}) + Expect(err).NotTo(HaveOccurred()) + Expect(snapshots).NotTo(BeNil()) + + totalSnapshots := len(snapshots.GetEntries()) + + // Send starting_token that is greater than the total number of snapshots. + snapshots, err = c.ListSnapshots( + context.Background(), + &csi.ListSnapshotsRequest{ + StartingToken: strconv.Itoa(totalSnapshots + 5), + }, + ) + Expect(err).To(HaveOccurred()) + Expect(snapshots).To(BeNil()) + + serverError, ok := status.FromError(err) + Expect(ok).To(BeTrue()) + Expect(serverError.Code()).To(Equal(codes.Aborted)) + }) + + It("check the presence of new snapshots in the snapshot list", func() { + // List Snapshots before creating new snapshots. + snapshots, err := c.ListSnapshots( + context.Background(), + &csi.ListSnapshotsRequest{}) + Expect(err).NotTo(HaveOccurred()) + Expect(snapshots).NotTo(BeNil()) + + totalSnapshots := len(snapshots.GetEntries()) + + By("creating a volume") + volReq := MakeCreateVolumeReq(sc, "listSnapshots-volume-3") + volume, err := c.CreateVolume(context.Background(), volReq) + Expect(err).NotTo(HaveOccurred()) + + By("creating a snapshot") + snapReq := MakeCreateSnapshotReq(sc, "listSnapshots-snapshot-3", volume.GetVolume().GetId(), nil) + snapshot, err := c.CreateSnapshot(context.Background(), snapReq) + Expect(err).NotTo(HaveOccurred()) + Expect(snapshot).NotTo(BeNil()) + verifySnapshotInfo(snapshot.GetSnapshot()) + + snapshots, err = c.ListSnapshots( + context.Background(), + &csi.ListSnapshotsRequest{}) + Expect(err).NotTo(HaveOccurred()) + Expect(snapshots).NotTo(BeNil()) + Expect(len(snapshots.GetEntries())).To(Equal(totalSnapshots + 1)) + + By("cleaning up deleting the snapshot") + delSnapReq := MakeDeleteSnapshotReq(sc, snapshot.GetSnapshot().GetId()) + _, err = c.DeleteSnapshot(context.Background(), delSnapReq) + Expect(err).NotTo(HaveOccurred()) + + By("cleaning up deleting the volume") + delVolReq := MakeDeleteVolumeReq(sc, volume.GetVolume().GetId()) + _, err = c.DeleteVolume(context.Background(), delVolReq) + Expect(err).NotTo(HaveOccurred()) + + // List snapshots and check if the deleted snapshot exists in the snapshot list. + snapshots, err = c.ListSnapshots( + context.Background(), + &csi.ListSnapshotsRequest{}) + Expect(err).NotTo(HaveOccurred()) + Expect(snapshots).NotTo(BeNil()) + Expect(len(snapshots.GetEntries())).To(Equal(totalSnapshots)) + }) + + It("should return next token when a limited number of entries are requested", func() { + // minSnapshotCount is the minimum number of snapshots expected to exist, + // based on which paginated snapshot listing is performed. + minSnapshotCount := 5 + // maxEntried is the maximum entries in list snapshot request. + maxEntries := 2 + // currentTotalVols is the total number of volumes at a given time. It + // is used to verify that all the snapshots have been listed. + currentTotalSnapshots := 0 + + // Get the number of existing volumes. + snapshots, err := c.ListSnapshots( + context.Background(), + &csi.ListSnapshotsRequest{}) + Expect(err).NotTo(HaveOccurred()) + Expect(snapshots).NotTo(BeNil()) + + initialTotalSnapshots := len(snapshots.GetEntries()) + currentTotalSnapshots = initialTotalSnapshots + + createVols := make([]*csi.Volume, 0) + createSnapshots := make([]*csi.Snapshot, 0) + + // Ensure minimum minVolCount volumes exist. + if initialTotalSnapshots < minSnapshotCount { + + By("creating required new volumes") + requiredSnapshots := minSnapshotCount - initialTotalSnapshots + + for i := 1; i <= requiredSnapshots; i++ { + volReq := MakeCreateVolumeReq(sc, "volume"+strconv.Itoa(i)) + volume, err := c.CreateVolume(context.Background(), volReq) + Expect(err).NotTo(HaveOccurred()) + Expect(volume).NotTo(BeNil()) + createVols = append(createVols, volume.GetVolume()) + + snapReq := MakeCreateSnapshotReq(sc, "snapshot"+strconv.Itoa(i), volume.GetVolume().GetId(), nil) + snapshot, err := c.CreateSnapshot(context.Background(), snapReq) + Expect(err).NotTo(HaveOccurred()) + Expect(snapshot).NotTo(BeNil()) + verifySnapshotInfo(snapshot.GetSnapshot()) + createSnapshots = append(createSnapshots, snapshot.GetSnapshot()) + } + + // Update the current total snapshots count. + currentTotalSnapshots += requiredSnapshots + } + + // Request list snapshots with max entries maxEntries. + snapshots, err = c.ListSnapshots( + context.Background(), + &csi.ListSnapshotsRequest{ + MaxEntries: int32(maxEntries), + }) + Expect(err).NotTo(HaveOccurred()) + Expect(snapshots).NotTo(BeNil()) + + nextToken := snapshots.GetNextToken() + + Expect(nextToken).To(Equal(strconv.Itoa(maxEntries))) + Expect(len(snapshots.GetEntries())).To(Equal(maxEntries)) + + // Request list snapshots with starting_token and no max entries. + snapshots, err = c.ListSnapshots( + context.Background(), + &csi.ListSnapshotsRequest{ + StartingToken: nextToken, + }) + Expect(err).NotTo(HaveOccurred()) + Expect(snapshots).NotTo(BeNil()) + + // Ensure that all the remaining entries are returned at once. + Expect(len(snapshots.GetEntries())).To(Equal(currentTotalSnapshots - maxEntries)) + + if initialTotalSnapshots < minSnapshotCount { + + By("cleaning up deleting the snapshots") + + for _, snap := range createSnapshots { + delSnapReq := MakeDeleteSnapshotReq(sc, snap.GetId()) + _, err = c.DeleteSnapshot(context.Background(), delSnapReq) + Expect(err).NotTo(HaveOccurred()) + } + + By("cleaning up deleting the volumes") + + for _, vol := range createVols { + delVolReq := MakeDeleteVolumeReq(sc, vol.GetId()) + _, err = c.DeleteVolume(context.Background(), delVolReq) + Expect(err).NotTo(HaveOccurred()) + } + } + }) + +}) + +var _ = DescribeSanity("DeleteSnapshot [Controller Server]", func(sc *SanityContext) { + var ( + c csi.ControllerClient + ) + + BeforeEach(func() { + c = csi.NewControllerClient(sc.Conn) + + if !isControllerCapabilitySupported(c, csi.ControllerServiceCapability_RPC_CREATE_DELETE_SNAPSHOT) { + Skip("DeleteSnapshot not supported") + } + }) + + It("should fail when no snapshot id is provided", func() { + + req := &csi.DeleteSnapshotRequest{} + + if sc.Secrets != nil { + req.DeleteSnapshotSecrets = sc.Secrets.DeleteSnapshotSecret + } + + _, err := c.DeleteSnapshot(context.Background(), req) + Expect(err).To(HaveOccurred()) + + serverError, ok := status.FromError(err) + Expect(ok).To(BeTrue()) + Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) + }) + + It("should succeed when an invalid snapshot id is used", func() { + + req := MakeDeleteSnapshotReq(sc, "reallyfakesnapshotid") + _, err := c.DeleteSnapshot(context.Background(), req) + Expect(err).NotTo(HaveOccurred()) + }) + + It("should return appropriate values (no optional values added)", func() { + + By("creating a volume") + volReq := MakeCreateVolumeReq(sc, "DeleteSnapshot-volume-1") + volume, err := c.CreateVolume(context.Background(), volReq) + Expect(err).NotTo(HaveOccurred()) + + // Create Snapshot First + By("creating a snapshot") + snapshotReq := MakeCreateSnapshotReq(sc, "DeleteSnapshot-snapshot-1", volume.GetVolume().GetId(), nil) + snapshot, err := c.CreateSnapshot(context.Background(), snapshotReq) + Expect(err).NotTo(HaveOccurred()) + Expect(snapshot).NotTo(BeNil()) + verifySnapshotInfo(snapshot.GetSnapshot()) + + By("cleaning up deleting the snapshot") + delSnapReq := MakeDeleteSnapshotReq(sc, snapshot.GetSnapshot().GetId()) + _, err = c.DeleteSnapshot(context.Background(), delSnapReq) + Expect(err).NotTo(HaveOccurred()) + + By("cleaning up deleting the volume") + delVolReq := MakeDeleteVolumeReq(sc, volume.GetVolume().GetId()) + _, err = c.DeleteVolume(context.Background(), delVolReq) + Expect(err).NotTo(HaveOccurred()) + }) +}) + +var _ = DescribeSanity("CreateSnapshot [Controller Server]", func(sc *SanityContext) { + var ( + c csi.ControllerClient + ) + + BeforeEach(func() { + c = csi.NewControllerClient(sc.Conn) + + if !isControllerCapabilitySupported(c, csi.ControllerServiceCapability_RPC_CREATE_DELETE_SNAPSHOT) { + Skip("CreateSnapshot not supported") + } + }) + + It("should fail when no name is provided", func() { + + req := &csi.CreateSnapshotRequest{ + SourceVolumeId: "testId", + } + + if sc.Secrets != nil { + req.CreateSnapshotSecrets = sc.Secrets.CreateSnapshotSecret + } + + _, err := c.CreateSnapshot(context.Background(), req) + Expect(err).To(HaveOccurred()) + serverError, ok := status.FromError(err) + Expect(ok).To(BeTrue()) + Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) + }) + + It("should fail when no source volume id is provided", func() { + + req := &csi.CreateSnapshotRequest{ + Name: "name", + } + + if sc.Secrets != nil { + req.CreateSnapshotSecrets = sc.Secrets.CreateSnapshotSecret + } + + _, err := c.CreateSnapshot(context.Background(), req) + Expect(err).To(HaveOccurred()) + serverError, ok := status.FromError(err) + Expect(ok).To(BeTrue()) + Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) + }) + + It("should not fail when requesting to create a snapshot with already existing name and same SourceVolumeId.", func() { + + By("creating a volume") + volReq := MakeCreateVolumeReq(sc, "CreateSnapshot-volume-1") + volume, err := c.CreateVolume(context.Background(), volReq) + Expect(err).NotTo(HaveOccurred()) + + By("creating a snapshot") + snapReq1 := MakeCreateSnapshotReq(sc, "CreateSnapshot-snapshot-1", volume.GetVolume().GetId(), nil) + snap1, err := c.CreateSnapshot(context.Background(), snapReq1) + Expect(err).NotTo(HaveOccurred()) + Expect(snap1).NotTo(BeNil()) + verifySnapshotInfo(snap1.GetSnapshot()) + + snap2, err := c.CreateSnapshot(context.Background(), snapReq1) + Expect(err).NotTo(HaveOccurred()) + Expect(snap2).NotTo(BeNil()) + verifySnapshotInfo(snap2.GetSnapshot()) + + By("cleaning up deleting the snapshot") + delSnapReq := MakeDeleteSnapshotReq(sc, snap1.GetSnapshot().GetId()) + _, err = c.DeleteSnapshot(context.Background(), delSnapReq) + Expect(err).NotTo(HaveOccurred()) + + By("cleaning up deleting the volume") + delVolReq := MakeDeleteVolumeReq(sc, volume.GetVolume().GetId()) + _, err = c.DeleteVolume(context.Background(), delVolReq) + Expect(err).NotTo(HaveOccurred()) + }) + + It("should fail when requesting to create a snapshot with already existing name and different SourceVolumeId.", func() { + + By("creating a volume") + volume, err := c.CreateVolume(context.Background(), MakeCreateVolumeReq(sc, "CreateSnapshot-volume-2")) + Expect(err).ToNot(HaveOccurred()) + + By("creating a snapshot with the created volume source id") + req1 := MakeCreateSnapshotReq(sc, "CreateSnapshot-snapshot-2", volume.GetVolume().GetId(), nil) + snap1, err := c.CreateSnapshot(context.Background(), req1) + Expect(err).NotTo(HaveOccurred()) + Expect(snap1).NotTo(BeNil()) + verifySnapshotInfo(snap1.GetSnapshot()) + + By("creating a snapshot with the same name but different volume source id") + req2 := MakeCreateSnapshotReq(sc, "CreateSnapshot-snapshot-2", "test001", nil) + _, err = c.CreateSnapshot(context.Background(), req2) + Expect(err).To(HaveOccurred()) + serverError, ok := status.FromError(err) + Expect(ok).To(BeTrue()) + Expect(serverError.Code()).To(Equal(codes.AlreadyExists)) + + By("cleaning up deleting the snapshot") + delSnapReq := MakeDeleteSnapshotReq(sc, snap1.GetSnapshot().GetId()) + _, err = c.DeleteSnapshot(context.Background(), delSnapReq) + Expect(err).NotTo(HaveOccurred()) + + By("cleaning up deleting the volume") + delVolReq := MakeDeleteVolumeReq(sc, volume.GetVolume().GetId()) + _, err = c.DeleteVolume(context.Background(), delVolReq) + Expect(err).NotTo(HaveOccurred()) + }) +}) + +func MakeCreateVolumeReq(sc *SanityContext, name string) *csi.CreateVolumeRequest { + size1 := TestVolumeSize(sc) + + req := &csi.CreateVolumeRequest{ + Name: name, + VolumeCapabilities: []*csi.VolumeCapability{ + { + AccessType: &csi.VolumeCapability_Mount{ + Mount: &csi.VolumeCapability_MountVolume{}, + }, + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, + }, + }, + }, + CapacityRange: &csi.CapacityRange{ + RequiredBytes: size1, + LimitBytes: size1, + }, + } + + if sc.Secrets != nil { + req.ControllerCreateSecrets = sc.Secrets.CreateVolumeSecret + } + + return req +} + +func MakeCreateSnapshotReq(sc *SanityContext, name, sourceVolumeId string, parameters map[string]string) *csi.CreateSnapshotRequest { + req := &csi.CreateSnapshotRequest{ + Name: name, + SourceVolumeId: sourceVolumeId, + Parameters: parameters, + } + + if sc.Secrets != nil { + req.CreateSnapshotSecrets = sc.Secrets.CreateSnapshotSecret + } + + return req +} + +func MakeDeleteSnapshotReq(sc *SanityContext, id string) *csi.DeleteSnapshotRequest { + delSnapReq := &csi.DeleteSnapshotRequest{ + SnapshotId: id, + } + + if sc.Secrets != nil { + delSnapReq.DeleteSnapshotSecrets = sc.Secrets.DeleteSnapshotSecret + } + + return delSnapReq +} + +func MakeDeleteVolumeReq(sc *SanityContext, id string) *csi.DeleteVolumeRequest { + delVolReq := &csi.DeleteVolumeRequest{ + VolumeId: id, + } + + if sc.Secrets != nil { + delVolReq.ControllerDeleteSecrets = sc.Secrets.DeleteVolumeSecret + } + + return delVolReq +} diff --git a/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/identity.go b/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/identity.go index cb5aad48a..2d3e1e3ea 100644 --- a/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/identity.go +++ b/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/identity.go @@ -30,13 +30,13 @@ import ( . "github.com/onsi/gomega" ) -var _ = Describe("GetPluginCapabilities [Identity Service]", func() { +var _ = DescribeSanity("GetPluginCapabilities [Identity Service]", func(sc *SanityContext) { var ( c csi.IdentityClient ) BeforeEach(func() { - c = csi.NewIdentityClient(conn) + c = csi.NewIdentityClient(sc.Conn) }) It("should return appropriate capabilities", func() { @@ -50,6 +50,7 @@ var _ = Describe("GetPluginCapabilities [Identity Service]", func() { for _, cap := range res.GetCapabilities() { switch cap.GetService().GetType() { case csi.PluginCapability_Service_CONTROLLER_SERVICE: + case csi.PluginCapability_Service_ACCESSIBILITY_CONSTRAINTS: default: Fail(fmt.Sprintf("Unknown capability: %v\n", cap.GetService().GetType())) } @@ -59,13 +60,13 @@ var _ = Describe("GetPluginCapabilities [Identity Service]", func() { }) -var _ = Describe("Probe [Identity Service]", func() { +var _ = DescribeSanity("Probe [Identity Service]", func(sc *SanityContext) { var ( c csi.IdentityClient ) BeforeEach(func() { - c = csi.NewIdentityClient(conn) + c = csi.NewIdentityClient(sc.Conn) }) It("should return appropriate information", func() { @@ -79,16 +80,21 @@ var _ = Describe("Probe [Identity Service]", func() { Expect(ok).To(BeTrue()) Expect(serverError.Code() == codes.FailedPrecondition || serverError.Code() == codes.OK).To(BeTrue()) + + if res.GetReady() != nil { + Expect(res.GetReady().GetValue() == true || + res.GetReady().GetValue() == false).To(BeTrue()) + } }) }) -var _ = Describe("GetPluginInfo [Identity Server]", func() { +var _ = DescribeSanity("GetPluginInfo [Identity Server]", func(sc *SanityContext) { var ( c csi.IdentityClient ) BeforeEach(func() { - c = csi.NewIdentityClient(conn) + c = csi.NewIdentityClient(sc.Conn) }) It("should return appropriate information", func() { diff --git a/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/node.go b/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/node.go index d57621dec..117d317a0 100644 --- a/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/node.go +++ b/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/node.go @@ -38,7 +38,6 @@ func isNodeCapabilitySupported(c csi.NodeClient, &csi.NodeGetCapabilitiesRequest{}) Expect(err).NotTo(HaveOccurred()) Expect(caps).NotTo(BeNil()) - Expect(caps.GetCapabilities()).NotTo(BeNil()) for _, cap := range caps.GetCapabilities() { Expect(cap.GetRpc()).NotTo(BeNil()) @@ -49,13 +48,33 @@ func isNodeCapabilitySupported(c csi.NodeClient, return false } -var _ = Describe("NodeGetCapabilities [Node Server]", func() { +func isPluginCapabilitySupported(c csi.IdentityClient, + capType csi.PluginCapability_Service_Type, +) bool { + + caps, err := c.GetPluginCapabilities( + context.Background(), + &csi.GetPluginCapabilitiesRequest{}) + Expect(err).NotTo(HaveOccurred()) + Expect(caps).NotTo(BeNil()) + Expect(caps.GetCapabilities()).NotTo(BeNil()) + + for _, cap := range caps.GetCapabilities() { + Expect(cap.GetService()).NotTo(BeNil()) + if cap.GetService().GetType() == capType { + return true + } + } + return false +} + +var _ = DescribeSanity("NodeGetCapabilities [Node Server]", func(sc *SanityContext) { var ( c csi.NodeClient ) BeforeEach(func() { - c = csi.NewNodeClient(conn) + c = csi.NewNodeClient(sc.Conn) }) It("should return appropriate capabilities", func() { @@ -66,7 +85,6 @@ var _ = Describe("NodeGetCapabilities [Node Server]", func() { By("checking successful response") Expect(err).NotTo(HaveOccurred()) Expect(caps).NotTo(BeNil()) - Expect(caps.GetCapabilities()).NotTo(BeNil()) for _, cap := range caps.GetCapabilities() { Expect(cap.GetRpc()).NotTo(BeNil()) @@ -81,13 +99,13 @@ var _ = Describe("NodeGetCapabilities [Node Server]", func() { }) }) -var _ = Describe("NodeGetId [Node Server]", func() { +var _ = DescribeSanity("NodeGetId [Node Server]", func(sc *SanityContext) { var ( c csi.NodeClient ) BeforeEach(func() { - c = csi.NewNodeClient(conn) + c = csi.NewNodeClient(sc.Conn) }) It("should return appropriate values", func() { @@ -101,7 +119,36 @@ var _ = Describe("NodeGetId [Node Server]", func() { }) }) -var _ = Describe("NodePublishVolume [Node Server]", func() { +var _ = DescribeSanity("NodeGetInfo [Node Server]", func(sc *SanityContext) { + var ( + c csi.NodeClient + i csi.IdentityClient + accessibilityConstraintSupported bool + ) + + BeforeEach(func() { + c = csi.NewNodeClient(sc.Conn) + i = csi.NewIdentityClient(sc.Conn) + accessibilityConstraintSupported = isPluginCapabilitySupported(i, csi.PluginCapability_Service_ACCESSIBILITY_CONSTRAINTS) + }) + + It("should return approproate values", func() { + ninfo, err := c.NodeGetInfo( + context.Background(), + &csi.NodeGetInfoRequest{}) + + Expect(err).NotTo(HaveOccurred()) + Expect(ninfo).NotTo(BeNil()) + Expect(ninfo.GetNodeId()).NotTo(BeEmpty()) + Expect(ninfo.GetMaxVolumesPerNode()).NotTo(BeNumerically("<", 0)) + + if accessibilityConstraintSupported { + Expect(ninfo.GetAccessibleTopology()).NotTo(BeNil()) + } + }) +}) + +var _ = DescribeSanity("NodePublishVolume [Node Server]", func(sc *SanityContext) { var ( s csi.ControllerClient c csi.NodeClient @@ -110,14 +157,14 @@ var _ = Describe("NodePublishVolume [Node Server]", func() { ) BeforeEach(func() { - s = csi.NewControllerClient(conn) - c = csi.NewNodeClient(conn) + s = csi.NewControllerClient(sc.Conn) + c = csi.NewNodeClient(sc.Conn) controllerPublishSupported = isControllerCapabilitySupported( s, csi.ControllerServiceCapability_RPC_PUBLISH_UNPUBLISH_VOLUME) nodeStageSupported = isNodeCapabilitySupported(c, csi.NodeServiceCapability_RPC_STAGE_UNSTAGE_VOLUME) if nodeStageSupported { - err := createMountTargetLocation(config.StagingPath) + err := createMountTargetLocation(sc.Config.StagingPath) Expect(err).NotTo(HaveOccurred()) } }) @@ -126,8 +173,8 @@ var _ = Describe("NodePublishVolume [Node Server]", func() { req := &csi.NodePublishVolumeRequest{} - if secrets != nil { - req.NodePublishSecrets = secrets.NodePublishVolumeSecret + if sc.Secrets != nil { + req.NodePublishSecrets = sc.Secrets.NodePublishVolumeSecret } _, err := c.NodePublishVolume(context.Background(), req) @@ -144,8 +191,8 @@ var _ = Describe("NodePublishVolume [Node Server]", func() { VolumeId: "id", } - if secrets != nil { - req.NodePublishSecrets = secrets.NodePublishVolumeSecret + if sc.Secrets != nil { + req.NodePublishSecrets = sc.Secrets.NodePublishVolumeSecret } _, err := c.NodePublishVolume(context.Background(), req) @@ -160,11 +207,11 @@ var _ = Describe("NodePublishVolume [Node Server]", func() { req := &csi.NodePublishVolumeRequest{ VolumeId: "id", - TargetPath: config.TargetPath, + TargetPath: sc.Config.TargetPath, } - if secrets != nil { - req.NodePublishSecrets = secrets.NodePublishVolumeSecret + if sc.Secrets != nil { + req.NodePublishSecrets = sc.Secrets.NodePublishVolumeSecret } _, err := c.NodePublishVolume(context.Background(), req) @@ -176,11 +223,11 @@ var _ = Describe("NodePublishVolume [Node Server]", func() { }) It("should return appropriate values (no optional values added)", func() { - testFullWorkflowSuccess(s, c, controllerPublishSupported, nodeStageSupported) + testFullWorkflowSuccess(sc, s, c, controllerPublishSupported, nodeStageSupported) }) }) -var _ = Describe("NodeUnpublishVolume [Node Server]", func() { +var _ = DescribeSanity("NodeUnpublishVolume [Node Server]", func(sc *SanityContext) { var ( s csi.ControllerClient c csi.NodeClient @@ -189,14 +236,14 @@ var _ = Describe("NodeUnpublishVolume [Node Server]", func() { ) BeforeEach(func() { - s = csi.NewControllerClient(conn) - c = csi.NewNodeClient(conn) + s = csi.NewControllerClient(sc.Conn) + c = csi.NewNodeClient(sc.Conn) controllerPublishSupported = isControllerCapabilitySupported( s, csi.ControllerServiceCapability_RPC_PUBLISH_UNPUBLISH_VOLUME) nodeStageSupported = isNodeCapabilitySupported(c, csi.NodeServiceCapability_RPC_STAGE_UNSTAGE_VOLUME) if nodeStageSupported { - err := createMountTargetLocation(config.StagingPath) + err := createMountTargetLocation(sc.Config.StagingPath) Expect(err).NotTo(HaveOccurred()) } }) @@ -228,12 +275,12 @@ var _ = Describe("NodeUnpublishVolume [Node Server]", func() { }) It("should return appropriate values (no optional values added)", func() { - testFullWorkflowSuccess(s, c, controllerPublishSupported, nodeStageSupported) + testFullWorkflowSuccess(sc, s, c, controllerPublishSupported, nodeStageSupported) }) }) // TODO: Tests for NodeStageVolume/NodeUnstageVolume -func testFullWorkflowSuccess(s csi.ControllerClient, c csi.NodeClient, controllerPublishSupported, nodeStageSupported bool) { +func testFullWorkflowSuccess(sc *SanityContext, s csi.ControllerClient, c csi.NodeClient, controllerPublishSupported, nodeStageSupported bool) { // Create Volume First By("creating a single node writer volume") name := "sanity" @@ -251,8 +298,8 @@ func testFullWorkflowSuccess(s csi.ControllerClient, c csi.NodeClient, controlle }, } - if secrets != nil { - req.ControllerCreateSecrets = secrets.CreateVolumeSecret + if sc.Secrets != nil { + req.ControllerCreateSecrets = sc.Secrets.CreateVolumeSecret } vol, err := s.CreateVolume(context.Background(), req) @@ -283,11 +330,12 @@ func testFullWorkflowSuccess(s csi.ControllerClient, c csi.NodeClient, controlle Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, }, }, - Readonly: false, + VolumeAttributes: vol.GetVolume().GetAttributes(), + Readonly: false, } - if secrets != nil { - pubReq.ControllerPublishSecrets = secrets.ControllerPublishVolumeSecret + if sc.Secrets != nil { + pubReq.ControllerPublishSecrets = sc.Secrets.ControllerPublishVolumeSecret } conpubvol, err = s.ControllerPublishVolume(context.Background(), pubReq) @@ -307,13 +355,14 @@ func testFullWorkflowSuccess(s csi.ControllerClient, c csi.NodeClient, controlle Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, }, }, - StagingTargetPath: config.StagingPath, + StagingTargetPath: sc.Config.StagingPath, + VolumeAttributes: vol.GetVolume().GetAttributes(), } if controllerPublishSupported { nodeStageVolReq.PublishInfo = conpubvol.GetPublishInfo() } - if secrets != nil { - nodeStageVolReq.NodeStageSecrets = secrets.NodeStageVolumeSecret + if sc.Secrets != nil { + nodeStageVolReq.NodeStageSecrets = sc.Secrets.NodeStageVolumeSecret } nodestagevol, err := c.NodeStageVolume( context.Background(), nodeStageVolReq) @@ -324,7 +373,7 @@ func testFullWorkflowSuccess(s csi.ControllerClient, c csi.NodeClient, controlle By("publishing the volume on a node") nodepubvolRequest := &csi.NodePublishVolumeRequest{ VolumeId: vol.GetVolume().GetId(), - TargetPath: config.TargetPath, + TargetPath: sc.Config.TargetPath, VolumeCapability: &csi.VolumeCapability{ AccessType: &csi.VolumeCapability_Mount{ Mount: &csi.VolumeCapability_MountVolume{}, @@ -333,15 +382,16 @@ func testFullWorkflowSuccess(s csi.ControllerClient, c csi.NodeClient, controlle Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, }, }, + VolumeAttributes: vol.GetVolume().GetAttributes(), } if nodeStageSupported { - nodepubvolRequest.StagingTargetPath = config.StagingPath + nodepubvolRequest.StagingTargetPath = sc.Config.StagingPath } if controllerPublishSupported { nodepubvolRequest.PublishInfo = conpubvol.GetPublishInfo() } - if secrets != nil { - nodepubvolRequest.NodePublishSecrets = secrets.NodePublishVolumeSecret + if sc.Secrets != nil { + nodepubvolRequest.NodePublishSecrets = sc.Secrets.NodePublishVolumeSecret } nodepubvol, err := c.NodePublishVolume(context.Background(), nodepubvolRequest) Expect(err).NotTo(HaveOccurred()) @@ -353,7 +403,7 @@ func testFullWorkflowSuccess(s csi.ControllerClient, c csi.NodeClient, controlle context.Background(), &csi.NodeUnpublishVolumeRequest{ VolumeId: vol.GetVolume().GetId(), - TargetPath: config.TargetPath, + TargetPath: sc.Config.TargetPath, }) Expect(err).NotTo(HaveOccurred()) Expect(nodeunpubvol).NotTo(BeNil()) @@ -364,7 +414,7 @@ func testFullWorkflowSuccess(s csi.ControllerClient, c csi.NodeClient, controlle context.Background(), &csi.NodeUnstageVolumeRequest{ VolumeId: vol.GetVolume().GetId(), - StagingTargetPath: config.StagingPath, + StagingTargetPath: sc.Config.StagingPath, }, ) Expect(err).NotTo(HaveOccurred()) @@ -379,8 +429,8 @@ func testFullWorkflowSuccess(s csi.ControllerClient, c csi.NodeClient, controlle NodeId: nid.GetNodeId(), } - if secrets != nil { - unpubReq.ControllerUnpublishSecrets = secrets.ControllerUnpublishVolumeSecret + if sc.Secrets != nil { + unpubReq.ControllerUnpublishSecrets = sc.Secrets.ControllerUnpublishVolumeSecret } controllerunpubvol, err := s.ControllerUnpublishVolume(context.Background(), unpubReq) @@ -394,15 +444,15 @@ func testFullWorkflowSuccess(s csi.ControllerClient, c csi.NodeClient, controlle VolumeId: vol.GetVolume().GetId(), } - if secrets != nil { - delReq.ControllerDeleteSecrets = secrets.DeleteVolumeSecret + if sc.Secrets != nil { + delReq.ControllerDeleteSecrets = sc.Secrets.DeleteVolumeSecret } _, err = s.DeleteVolume(context.Background(), delReq) Expect(err).NotTo(HaveOccurred()) } -var _ = Describe("NodeStageVolume [Node Server]", func() { +var _ = DescribeSanity("NodeStageVolume [Node Server]", func(sc *SanityContext) { var ( s csi.ControllerClient c csi.NodeClient @@ -412,15 +462,15 @@ var _ = Describe("NodeStageVolume [Node Server]", func() { ) BeforeEach(func() { - s = csi.NewControllerClient(conn) - c = csi.NewNodeClient(conn) + s = csi.NewControllerClient(sc.Conn) + c = csi.NewNodeClient(sc.Conn) device = "/dev/mock" controllerPublishSupported = isControllerCapabilitySupported( s, csi.ControllerServiceCapability_RPC_PUBLISH_UNPUBLISH_VOLUME) nodeStageSupported = isNodeCapabilitySupported(c, csi.NodeServiceCapability_RPC_STAGE_UNSTAGE_VOLUME) if nodeStageSupported { - err := createMountTargetLocation(config.StagingPath) + err := createMountTargetLocation(sc.Config.StagingPath) Expect(err).NotTo(HaveOccurred()) } else { Skip("NodeStageVolume not supported") @@ -430,7 +480,7 @@ var _ = Describe("NodeStageVolume [Node Server]", func() { It("should fail when no volume id is provided", func() { req := &csi.NodeStageVolumeRequest{ - StagingTargetPath: config.StagingPath, + StagingTargetPath: sc.Config.StagingPath, VolumeCapability: &csi.VolumeCapability{ AccessType: &csi.VolumeCapability_Mount{ Mount: &csi.VolumeCapability_MountVolume{}, @@ -444,8 +494,8 @@ var _ = Describe("NodeStageVolume [Node Server]", func() { }, } - if secrets != nil { - req.NodeStageSecrets = secrets.NodeStageVolumeSecret + if sc.Secrets != nil { + req.NodeStageSecrets = sc.Secrets.NodeStageVolumeSecret } _, err := c.NodeStageVolume(context.Background(), req) @@ -473,8 +523,8 @@ var _ = Describe("NodeStageVolume [Node Server]", func() { }, } - if secrets != nil { - req.NodeStageSecrets = secrets.NodeStageVolumeSecret + if sc.Secrets != nil { + req.NodeStageSecrets = sc.Secrets.NodeStageVolumeSecret } _, err := c.NodeStageVolume(context.Background(), req) @@ -489,14 +539,14 @@ var _ = Describe("NodeStageVolume [Node Server]", func() { req := &csi.NodeStageVolumeRequest{ VolumeId: "id", - StagingTargetPath: config.StagingPath, + StagingTargetPath: sc.Config.StagingPath, PublishInfo: map[string]string{ "device": device, }, } - if secrets != nil { - req.NodeStageSecrets = secrets.NodeStageVolumeSecret + if sc.Secrets != nil { + req.NodeStageSecrets = sc.Secrets.NodeStageVolumeSecret } _, err := c.NodeStageVolume(context.Background(), req) @@ -508,11 +558,11 @@ var _ = Describe("NodeStageVolume [Node Server]", func() { }) It("should return appropriate values (no optional values added)", func() { - testFullWorkflowSuccess(s, c, controllerPublishSupported, nodeStageSupported) + testFullWorkflowSuccess(sc, s, c, controllerPublishSupported, nodeStageSupported) }) }) -var _ = Describe("NodeUnstageVolume [Node Server]", func() { +var _ = DescribeSanity("NodeUnstageVolume [Node Server]", func(sc *SanityContext) { var ( s csi.ControllerClient c csi.NodeClient @@ -521,14 +571,14 @@ var _ = Describe("NodeUnstageVolume [Node Server]", func() { ) BeforeEach(func() { - s = csi.NewControllerClient(conn) - c = csi.NewNodeClient(conn) + s = csi.NewControllerClient(sc.Conn) + c = csi.NewNodeClient(sc.Conn) controllerPublishSupported = isControllerCapabilitySupported( s, csi.ControllerServiceCapability_RPC_PUBLISH_UNPUBLISH_VOLUME) nodeStageSupported = isNodeCapabilitySupported(c, csi.NodeServiceCapability_RPC_STAGE_UNSTAGE_VOLUME) if nodeStageSupported { - err := createMountTargetLocation(config.StagingPath) + err := createMountTargetLocation(sc.Config.StagingPath) Expect(err).NotTo(HaveOccurred()) } else { Skip("NodeUnstageVolume not supported") @@ -540,7 +590,7 @@ var _ = Describe("NodeUnstageVolume [Node Server]", func() { _, err := c.NodeUnstageVolume( context.Background(), &csi.NodeUnstageVolumeRequest{ - StagingTargetPath: config.StagingPath, + StagingTargetPath: sc.Config.StagingPath, }) Expect(err).To(HaveOccurred()) @@ -564,6 +614,6 @@ var _ = Describe("NodeUnstageVolume [Node Server]", func() { }) It("should return appropriate values (no optional values added)", func() { - testFullWorkflowSuccess(s, c, controllerPublishSupported, nodeStageSupported) + testFullWorkflowSuccess(sc, s, c, controllerPublishSupported, nodeStageSupported) }) }) diff --git a/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/sanity.go b/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/sanity.go index 58d63b702..3e57e611c 100644 --- a/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/sanity.go +++ b/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/sanity.go @@ -20,7 +20,6 @@ import ( "fmt" "io/ioutil" "os" - "sync" "testing" "github.com/kubernetes-csi/csi-test/utils" @@ -40,16 +39,12 @@ type CSISecrets struct { ControllerUnpublishVolumeSecret map[string]string `yaml:"ControllerUnpublishVolumeSecret"` NodeStageVolumeSecret map[string]string `yaml:"NodeStageVolumeSecret"` NodePublishVolumeSecret map[string]string `yaml:"NodePublishVolumeSecret"` + CreateSnapshotSecret map[string]string `yaml:"CreateSnapshotSecret"` + DeleteSnapshotSecret map[string]string `yaml:"DeleteSnapshotSecret"` } -var ( - config *Config - conn *grpc.ClientConn - lock sync.Mutex - secrets *CSISecrets -) - -// Config provides the configuration for the sanity tests +// Config provides the configuration for the sanity tests. It +// needs to be initialized by the user of the sanity package. type Config struct { TargetPath string StagingPath string @@ -58,40 +53,61 @@ type Config struct { TestVolumeSize int64 } -// Test will test the CSI driver at the specified address +// SanityContext holds the variables that each test can depend on. It +// gets initialized before each test block runs. +type SanityContext struct { + Config *Config + Conn *grpc.ClientConn + Secrets *CSISecrets +} + +// Test will test the CSI driver at the specified address by +// setting up a Ginkgo suite and running it. func Test(t *testing.T, reqConfig *Config) { - lock.Lock() - defer lock.Unlock() + sc := &SanityContext{ + Config: reqConfig, + } - config = reqConfig + registerTestsInGinkgo(sc) RegisterFailHandler(Fail) RunSpecs(t, "CSI Driver Test Suite") } -var _ = BeforeSuite(func() { +func GinkgoTest(reqConfig *Config) { + sc := &SanityContext{ + Config: reqConfig, + } + + registerTestsInGinkgo(sc) +} + +func (sc *SanityContext) setup() { var err error - if len(config.SecretsFile) > 0 { - secrets, err = loadSecrets(config.SecretsFile) + if len(sc.Config.SecretsFile) > 0 { + sc.Secrets, err = loadSecrets(sc.Config.SecretsFile) Expect(err).NotTo(HaveOccurred()) } By("connecting to CSI driver") - conn, err = utils.Connect(config.Address) + sc.Conn, err = utils.Connect(sc.Config.Address) Expect(err).NotTo(HaveOccurred()) By("creating mount and staging directories") - err = createMountTargetLocation(config.TargetPath) + err = createMountTargetLocation(sc.Config.TargetPath) Expect(err).NotTo(HaveOccurred()) - if len(config.StagingPath) > 0 { - err = createMountTargetLocation(config.StagingPath) + if len(sc.Config.StagingPath) > 0 { + err = createMountTargetLocation(sc.Config.StagingPath) Expect(err).NotTo(HaveOccurred()) } -}) +} -var _ = AfterSuite(func() { - conn.Close() -}) +func (sc *SanityContext) teardown() { + if sc.Conn != nil { + sc.Conn.Close() + sc.Conn = nil + } +} func createMountTargetLocation(targetPath string) error { fileInfo, err := os.Stat(targetPath) diff --git a/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/tests.go b/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/tests.go new file mode 100644 index 000000000..e11f0d761 --- /dev/null +++ b/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/tests.go @@ -0,0 +1,57 @@ +/* +Copyright 2018 Intel Corporation + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sanity + +import ( + . "github.com/onsi/ginkgo" +) + +type test struct { + text string + body func(*SanityContext) +} + +var tests []test + +// DescribeSanity must be used instead of the usual Ginkgo Describe to +// register a test block. The difference is that the body function +// will be called multiple times with the right context (when +// setting up a Ginkgo suite or a testing.T test, with the right +// configuration). +func DescribeSanity(text string, body func(*SanityContext)) bool { + tests = append(tests, test{text, body}) + return true +} + +// registerTestsInGinkgo invokes the actual Gingko Describe +// for the tests registered earlier with DescribeSanity. +func registerTestsInGinkgo(sc *SanityContext) { + for _, test := range tests { + Describe(test.text, func() { + + BeforeEach(func() { + sc.setup() + }) + + AfterEach(func() { + sc.teardown() + }) + + test.body(sc) + }) + } +}