diff --git a/cmd/main.go b/cmd/main.go index 822667c4b..c089c4788 100644 --- a/cmd/main.go +++ b/cmd/main.go @@ -74,13 +74,13 @@ func handle() { mounter := mountmanager.NewSafeMounter() deviceUtils := mountmanager.NewDeviceUtils() - + statter := mountmanager.NewStatter() ms, err := metadataservice.NewMetadataService() if err != nil { klog.Fatalf("Failed to set up metadata service: %v", err) } - err = gceDriver.SetupGCEDriver(cloudProvider, mounter, deviceUtils, ms, driverName, vendorVersion) + err = gceDriver.SetupGCEDriver(cloudProvider, mounter, deviceUtils, ms, statter, driverName, vendorVersion) if err != nil { klog.Fatalf("Failed to initialize GCE CSI Driver: %v", err) } diff --git a/pkg/gce-pd-csi-driver/gce-pd-driver.go b/pkg/gce-pd-csi-driver/gce-pd-driver.go index 4dcb227b8..f49ef2261 100644 --- a/pkg/gce-pd-csi-driver/gce-pd-driver.go +++ b/pkg/gce-pd-csi-driver/gce-pd-driver.go @@ -46,7 +46,7 @@ func GetGCEDriver() *GCEDriver { } func (gceDriver *GCEDriver) SetupGCEDriver(cloudProvider gce.GCECompute, mounter *mount.SafeFormatAndMount, - deviceUtils mountmanager.DeviceUtils, meta metadataservice.MetadataService, name, vendorVersion string) error { + deviceUtils mountmanager.DeviceUtils, meta metadataservice.MetadataService, statter mountmanager.Statter, name, vendorVersion string) error { if name == "" { return fmt.Errorf("Driver name missing") } @@ -72,12 +72,13 @@ func (gceDriver *GCEDriver) SetupGCEDriver(cloudProvider gce.GCECompute, mounter ns := []csi.NodeServiceCapability_RPC_Type{ csi.NodeServiceCapability_RPC_STAGE_UNSTAGE_VOLUME, csi.NodeServiceCapability_RPC_EXPAND_VOLUME, + csi.NodeServiceCapability_RPC_GET_VOLUME_STATS, } gceDriver.AddNodeServiceCapabilities(ns) // Set up RPC Servers gceDriver.ids = NewIdentityServer(gceDriver) - gceDriver.ns = NewNodeServer(gceDriver, mounter, deviceUtils, meta) + gceDriver.ns = NewNodeServer(gceDriver, mounter, deviceUtils, meta, statter) gceDriver.cs = NewControllerServer(gceDriver, cloudProvider, meta) return nil @@ -133,13 +134,14 @@ func NewIdentityServer(gceDriver *GCEDriver) *GCEIdentityServer { } } -func NewNodeServer(gceDriver *GCEDriver, mounter *mount.SafeFormatAndMount, deviceUtils mountmanager.DeviceUtils, meta metadataservice.MetadataService) *GCENodeServer { +func NewNodeServer(gceDriver *GCEDriver, mounter *mount.SafeFormatAndMount, deviceUtils mountmanager.DeviceUtils, meta metadataservice.MetadataService, statter mountmanager.Statter) *GCENodeServer { return &GCENodeServer{ Driver: gceDriver, Mounter: mounter, DeviceUtils: deviceUtils, MetadataService: meta, volumeLocks: common.NewVolumeLocks(), + VolumeStatter: statter, } } diff --git a/pkg/gce-pd-csi-driver/gce-pd-driver_test.go b/pkg/gce-pd-csi-driver/gce-pd-driver_test.go index d06e7c23b..316d783e3 100644 --- a/pkg/gce-pd-csi-driver/gce-pd-driver_test.go +++ b/pkg/gce-pd-csi-driver/gce-pd-driver_test.go @@ -44,7 +44,7 @@ func initBlockingGCEDriver(t *testing.T, cloudDisks []*gce.CloudDisk, readyToExe func initGCEDriverWithCloudProvider(t *testing.T, cloudProvider gce.GCECompute) *GCEDriver { vendorVersion := "test-vendor" gceDriver := GetGCEDriver() - err := gceDriver.SetupGCEDriver(cloudProvider, nil, nil, metadataservice.NewFakeService(), driver, vendorVersion) + err := gceDriver.SetupGCEDriver(cloudProvider, nil, nil, metadataservice.NewFakeService(), nil, driver, vendorVersion) if err != nil { t.Fatalf("Failed to setup GCE Driver: %v", err) } diff --git a/pkg/gce-pd-csi-driver/identity_test.go b/pkg/gce-pd-csi-driver/identity_test.go index 51bee490d..e7ebfd8b3 100644 --- a/pkg/gce-pd-csi-driver/identity_test.go +++ b/pkg/gce-pd-csi-driver/identity_test.go @@ -26,7 +26,7 @@ import ( func TestGetPluginInfo(t *testing.T) { vendorVersion := "test-vendor" gceDriver := GetGCEDriver() - err := gceDriver.SetupGCEDriver(nil, nil, nil, metadataservice.NewFakeService(), driver, vendorVersion) + err := gceDriver.SetupGCEDriver(nil, nil, nil, metadataservice.NewFakeService(), nil, driver, vendorVersion) if err != nil { t.Fatalf("Failed to setup GCE Driver: %v", err) } @@ -48,7 +48,7 @@ func TestGetPluginInfo(t *testing.T) { func TestGetPluginCapabilities(t *testing.T) { gceDriver := GetGCEDriver() - err := gceDriver.SetupGCEDriver(nil, nil, nil, metadataservice.NewFakeService(), driver, "test-vendor") + err := gceDriver.SetupGCEDriver(nil, nil, nil, metadataservice.NewFakeService(), nil, driver, "test-vendor") if err != nil { t.Fatalf("Failed to setup GCE Driver: %v", err) } @@ -80,7 +80,7 @@ func TestGetPluginCapabilities(t *testing.T) { func TestProbe(t *testing.T) { gceDriver := GetGCEDriver() - err := gceDriver.SetupGCEDriver(nil, nil, nil, metadataservice.NewFakeService(), driver, "test-vendor") + err := gceDriver.SetupGCEDriver(nil, nil, nil, metadataservice.NewFakeService(), nil, driver, "test-vendor") if err != nil { t.Fatalf("Failed to setup GCE Driver: %v", err) } diff --git a/pkg/gce-pd-csi-driver/node.go b/pkg/gce-pd-csi-driver/node.go index 0ef57cb46..0f1b4856e 100644 --- a/pkg/gce-pd-csi-driver/node.go +++ b/pkg/gce-pd-csi-driver/node.go @@ -22,12 +22,15 @@ import ( "context" - csi "github.com/container-storage-interface/spec/lib/go/csi" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" + + csi "github.com/container-storage-interface/spec/lib/go/csi" + "k8s.io/klog" "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/util/resizefs" + "sigs.k8s.io/gcp-compute-persistent-disk-csi-driver/pkg/common" metadataservice "sigs.k8s.io/gcp-compute-persistent-disk-csi-driver/pkg/gce-cloud-provider/metadata" mountmanager "sigs.k8s.io/gcp-compute-persistent-disk-csi-driver/pkg/mount-manager" @@ -37,6 +40,7 @@ type GCENodeServer struct { Driver *GCEDriver Mounter *mount.SafeFormatAndMount DeviceUtils mountmanager.DeviceUtils + VolumeStatter mountmanager.Statter MetadataService metadataservice.MetadataService // A map storing all volumes with ongoing operations so that additional operations @@ -360,7 +364,61 @@ func (ns *GCENodeServer) NodeGetInfo(ctx context.Context, req *csi.NodeGetInfoRe } func (ns *GCENodeServer) NodeGetVolumeStats(ctx context.Context, req *csi.NodeGetVolumeStatsRequest) (*csi.NodeGetVolumeStatsResponse, error) { - return nil, status.Error(codes.Unimplemented, fmt.Sprintf("NodeGetVolumeStats is not yet implemented")) + if len(req.VolumeId) == 0 { + return nil, status.Error(codes.InvalidArgument, "NodeGetVolumeStats volume ID was empty") + } + if len(req.VolumePath) == 0 { + return nil, status.Error(codes.InvalidArgument, "NodeGetVolumeStats volume path was empty") + } + + exists, err := ns.Mounter.Interface.ExistsPath(req.VolumePath) + if err != nil { + return nil, status.Errorf(codes.Internal, "unknown error when stat on %s: %v", req.VolumePath, err) + } + if !exists { + return nil, status.Errorf(codes.NotFound, "path %s does not exist", req.VolumePath) + } + + isBlock, err := ns.VolumeStatter.IsBlockDevice(req.VolumePath) + if err != nil { + return nil, status.Errorf(codes.Internal, "failed to determine whether %s is block device: %v", req.VolumePath, err) + } + if isBlock { + bcap, err := ns.getBlockSizeBytes(req.VolumePath) + if err != nil { + return nil, status.Errorf(codes.Internal, "failed to get block capacity on path %s: %v", req.VolumePath, err) + } + return &csi.NodeGetVolumeStatsResponse{ + Usage: []*csi.VolumeUsage{ + { + Unit: csi.VolumeUsage_BYTES, + Total: bcap, + }, + }, + }, nil + } + + available, capacity, used, inodesFree, inodes, inodesUsed, err := ns.VolumeStatter.StatFS(req.VolumePath) + if err != nil { + return nil, status.Errorf(codes.Internal, "failed to get fs info on path %s: %v", req.VolumePath, err) + } + + return &csi.NodeGetVolumeStatsResponse{ + Usage: []*csi.VolumeUsage{ + { + Unit: csi.VolumeUsage_BYTES, + Available: available, + Total: capacity, + Used: used, + }, + { + Unit: csi.VolumeUsage_INODES, + Available: inodesFree, + Total: inodes, + Used: inodesUsed, + }, + }, + }, nil } func (ns *GCENodeServer) NodeExpandVolume(ctx context.Context, req *csi.NodeExpandVolumeRequest) (*csi.NodeExpandVolumeResponse, error) { diff --git a/pkg/gce-pd-csi-driver/node_test.go b/pkg/gce-pd-csi-driver/node_test.go index 8f996f74a..41853742a 100644 --- a/pkg/gce-pd-csi-driver/node_test.go +++ b/pkg/gce-pd-csi-driver/node_test.go @@ -44,7 +44,7 @@ func getTestGCEDriverWithCustomMounter(t *testing.T, mounter *mount.SafeFormatAn func getCustomTestGCEDriver(t *testing.T, mounter *mount.SafeFormatAndMount, deviceUtils mountmanager.DeviceUtils, metaService metadataservice.MetadataService) *GCEDriver { gceDriver := GetGCEDriver() - err := gceDriver.SetupGCEDriver(nil, mounter, deviceUtils, metaService, driver, "test-vendor") + err := gceDriver.SetupGCEDriver(nil, mounter, deviceUtils, metaService, mountmanager.NewFakeStatter(), driver, "test-vendor") if err != nil { t.Fatalf("Failed to setup GCE Driver: %v", err) } @@ -53,13 +53,76 @@ func getCustomTestGCEDriver(t *testing.T, mounter *mount.SafeFormatAndMount, dev func getTestBlockingGCEDriver(t *testing.T, readyToExecute chan chan struct{}) *GCEDriver { gceDriver := GetGCEDriver() - err := gceDriver.SetupGCEDriver(nil, mountmanager.NewFakeSafeBlockingMounter(readyToExecute), mountmanager.NewFakeDeviceUtils(), metadataservice.NewFakeService(), driver, "test-vendor") + err := gceDriver.SetupGCEDriver(nil, mountmanager.NewFakeSafeBlockingMounter(readyToExecute), mountmanager.NewFakeDeviceUtils(), metadataservice.NewFakeService(), nil, driver, "test-vendor") if err != nil { t.Fatalf("Failed to setup GCE Driver: %v", err) } return gceDriver } +func TestNodeGetVolumeStats(t *testing.T) { + gceDriver := getTestGCEDriver(t) + ns := gceDriver.ns + + req := &csi.NodePublishVolumeRequest{ + VolumeId: defaultVolumeID, + TargetPath: defaultTargetPath, + StagingTargetPath: defaultStagingPath, + Readonly: false, + VolumeCapability: stdVolCap, + } + _, err := ns.NodePublishVolume(context.Background(), req) + if err != nil { + t.Fatalf("Failed to set up test by publishing default vol: %v", err) + } + + testCases := []struct { + name string + volumeID string + volumePath string + expectErr bool + }{ + { + name: "normal", + volumeID: defaultVolumeID, + volumePath: defaultTargetPath, + }, + { + name: "no vol id", + volumePath: defaultTargetPath, + expectErr: true, + }, + { + name: "no vol path", + volumeID: defaultVolumeID, + expectErr: true, + }, + { + name: "bad vol path", + volumeID: defaultVolumeID, + volumePath: "/mnt/fake", + expectErr: true, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + + req := &csi.NodeGetVolumeStatsRequest{ + VolumeId: tc.volumeID, + VolumePath: tc.volumePath, + } + _, err := ns.NodeGetVolumeStats(context.Background(), req) + if err != nil && !tc.expectErr { + t.Fatalf("Got unexpected err: %v", err) + } + if err == nil && tc.expectErr { + t.Fatal("Did not get error but expected one") + } + }) + } +} + func TestNodeGetVolumeLimits(t *testing.T) { gceDriver := getTestGCEDriver(t) diff --git a/pkg/gce-pd-csi-driver/node_test.go.orig b/pkg/gce-pd-csi-driver/node_test.go.orig deleted file mode 100644 index c5d2e46e7..000000000 --- a/pkg/gce-pd-csi-driver/node_test.go.orig +++ /dev/null @@ -1,595 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package gceGCEDriver - -import ( -<<<<<<< HEAD - "context" - "testing" - -======= - "errors" - "fmt" - "strconv" - "testing" - - "context" - ->>>>>>> Add implementation and unit tests for Expand capability - csi "github.com/container-storage-interface/spec/lib/go/csi" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" - "k8s.io/kubernetes/pkg/util/mount" - utilexec "k8s.io/utils/exec" - "sigs.k8s.io/gcp-compute-persistent-disk-csi-driver/pkg/common" - metadataservice "sigs.k8s.io/gcp-compute-persistent-disk-csi-driver/pkg/gce-cloud-provider/metadata" - mountmanager "sigs.k8s.io/gcp-compute-persistent-disk-csi-driver/pkg/mount-manager" -) - -const defaultVolumeID = "project/test001/zones/c1/disks/testDisk" -const defaultTargetPath = "/mnt/test" -const defaultStagingPath = "/staging" - -func getTestGCEDriver(t *testing.T) *GCEDriver { - return getCustomTestGCEDriver(t, mountmanager.NewFakeSafeMounter(), mountmanager.NewFakeDeviceUtils(), metadataservice.NewFakeService()) -} - -func getTestGCEDriverWithCustomMounter(t *testing.T, mounter *mount.SafeFormatAndMount) *GCEDriver { - return getCustomTestGCEDriver(t, mounter, mountmanager.NewFakeDeviceUtils(), metadataservice.NewFakeService()) -} - -func getCustomTestGCEDriver(t *testing.T, mounter *mount.SafeFormatAndMount, deviceUtils mountmanager.DeviceUtils, metaService metadataservice.MetadataService) *GCEDriver { - gceDriver := GetGCEDriver() - err := gceDriver.SetupGCEDriver(nil, mounter, deviceUtils, metaService, driver, "test-vendor") - if err != nil { - t.Fatalf("Failed to setup GCE Driver: %v", err) - } - return gceDriver -} - -func getTestBlockingGCEDriver(t *testing.T, readyToExecute chan chan struct{}) *GCEDriver { - gceDriver := GetGCEDriver() - err := gceDriver.SetupGCEDriver(nil, mountmanager.NewFakeSafeBlockingMounter(readyToExecute), mountmanager.NewFakeDeviceUtils(), metadataservice.NewFakeService(), driver, "test-vendor") - if err != nil { - t.Fatalf("Failed to setup GCE Driver: %v", err) - } - return gceDriver -} - -func TestNodeGetVolumeLimits(t *testing.T) { - - gceDriver := getTestGCEDriver(t) - ns := gceDriver.ns - req := &csi.NodeGetInfoRequest{} - - testCases := []struct { - name string - machineType string - expVolumeLimit int64 - }{ - { - name: "Predifined standard machine", - machineType: "n1-standard-1", - expVolumeLimit: volumeLimit128, - }, - { - name: "Predifined micro machine", - machineType: "f1-micro", - expVolumeLimit: volumeLimit16, - }, - { - name: "Predifined small machine", - machineType: "g1-small", - expVolumeLimit: volumeLimit16, - }, - { - name: "Custom machine with 1GiB Mem", - machineType: "custom-1-1024", - expVolumeLimit: volumeLimit128, - }, - { - name: "Custom machine with 4GiB Mem", - machineType: "custom-2-4096", - expVolumeLimit: volumeLimit128, - }, - } - - for _, tc := range testCases { - t.Logf("Test case: %s", tc.name) - metadataservice.SetMachineType(tc.machineType) - res, err := ns.NodeGetInfo(context.Background(), req) - if err != nil { - t.Fatalf("Failed to get node info: %v", err) - } else { - volumeLimit := res.GetMaxVolumesPerNode() - if volumeLimit != tc.expVolumeLimit { - t.Fatalf("Expected volume limit: %v, got %v, for machine-type: %v", - tc.expVolumeLimit, volumeLimit, tc.machineType) - } - t.Logf("Get node info: %v", res) - } - } -} - -func TestNodePublishVolume(t *testing.T) { - gceDriver := getTestGCEDriver(t) - ns := gceDriver.ns - testCases := []struct { - name string - req *csi.NodePublishVolumeRequest - expErrCode codes.Code - }{ - { - name: "Valid request", - req: &csi.NodePublishVolumeRequest{ - VolumeId: defaultVolumeID, - TargetPath: defaultTargetPath, - StagingTargetPath: defaultStagingPath, - Readonly: false, - VolumeCapability: stdVolCap, - }, - }, - { - name: "Invalid request (invalid access mode)", - req: &csi.NodePublishVolumeRequest{ - VolumeId: defaultVolumeID, - TargetPath: defaultTargetPath, - StagingTargetPath: defaultStagingPath, - Readonly: false, - VolumeCapability: createVolumeCapability(csi.VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER), - }, - expErrCode: codes.InvalidArgument, - }, - { - name: "Invalid request (No VolumeId)", - req: &csi.NodePublishVolumeRequest{ - TargetPath: defaultTargetPath, - StagingTargetPath: defaultStagingPath, - Readonly: false, - VolumeCapability: stdVolCap, - }, - expErrCode: codes.InvalidArgument, - }, - { - name: "Invalid request (No TargetPath)", - req: &csi.NodePublishVolumeRequest{ - VolumeId: defaultVolumeID, - StagingTargetPath: defaultStagingPath, - Readonly: false, - VolumeCapability: stdVolCap, - }, - expErrCode: codes.InvalidArgument, - }, - { - name: "Invalid request (No StagingTargetPath)", - req: &csi.NodePublishVolumeRequest{ - VolumeId: defaultVolumeID, - TargetPath: defaultTargetPath, - Readonly: false, - VolumeCapability: stdVolCap, - }, - expErrCode: codes.InvalidArgument, - }, - { - name: "Invalid request (Nil VolumeCapability)", - req: &csi.NodePublishVolumeRequest{ - VolumeId: defaultVolumeID, - TargetPath: defaultTargetPath, - StagingTargetPath: defaultStagingPath, - Readonly: false, - VolumeCapability: nil, - }, - expErrCode: codes.InvalidArgument, - }, - } - for _, tc := range testCases { - t.Logf("Test case: %s", tc.name) - _, err := ns.NodePublishVolume(context.Background(), tc.req) - if err != nil { - serverError, ok := status.FromError(err) - if !ok { - t.Fatalf("Could not get error status code from err: %v", err) - } - if serverError.Code() != tc.expErrCode { - t.Fatalf("Expected error code: %v, got: %v. err : %v", tc.expErrCode, serverError.Code(), err) - } - continue - } - if tc.expErrCode != codes.OK { - t.Fatalf("Expected error: %v, got no error", tc.expErrCode) - } - } -} - -func TestNodeUnpublishVolume(t *testing.T) { - gceDriver := getTestGCEDriver(t) - ns := gceDriver.ns - testCases := []struct { - name string - req *csi.NodeUnpublishVolumeRequest - expErrCode codes.Code - }{ - { - name: "Valid request", - req: &csi.NodeUnpublishVolumeRequest{ - VolumeId: defaultVolumeID, - TargetPath: defaultTargetPath, - }, - }, - { - name: "Invalid request (No VolumeId)", - req: &csi.NodeUnpublishVolumeRequest{ - TargetPath: defaultTargetPath, - }, - expErrCode: codes.InvalidArgument, - }, - { - name: "Invalid request (No TargetPath)", - req: &csi.NodeUnpublishVolumeRequest{ - VolumeId: defaultVolumeID, - }, - expErrCode: codes.InvalidArgument, - }, - } - for _, tc := range testCases { - t.Logf("Test case: %s", tc.name) - _, err := ns.NodeUnpublishVolume(context.Background(), tc.req) - if err != nil { - serverError, ok := status.FromError(err) - if !ok { - t.Fatalf("Could not get error status code from err: %v", err) - } - if serverError.Code() != tc.expErrCode { - t.Fatalf("Expected error code: %v, got: %v. err : %v", tc.expErrCode, serverError.Code(), err) - } - continue - } - if tc.expErrCode != codes.OK { - t.Fatalf("Expected error: %v, got no error", tc.expErrCode) - } - } -} - -func TestNodeStageVolume(t *testing.T) { - gceDriver := getTestGCEDriver(t) - ns := gceDriver.ns - volumeID := "project/test001/zones/c1/disks/testDisk" - blockCap := &csi.VolumeCapability_Block{ - Block: &csi.VolumeCapability_BlockVolume{}, - } - cap := &csi.VolumeCapability{ - AccessType: blockCap, - } - - testCases := []struct { - name string - req *csi.NodeStageVolumeRequest - expErrCode codes.Code - }{ - { - name: "Valid request", - req: &csi.NodeStageVolumeRequest{ - VolumeId: volumeID, - StagingTargetPath: defaultStagingPath, - VolumeCapability: stdVolCap, - }, - }, - { - name: "Invalid request (Bad Access Mode)", - req: &csi.NodeStageVolumeRequest{ - VolumeId: volumeID, - StagingTargetPath: defaultStagingPath, - VolumeCapability: createVolumeCapability(csi.VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER), - }, - expErrCode: codes.InvalidArgument, - }, - { - name: "Invalid request (No VolumeId)", - req: &csi.NodeStageVolumeRequest{ - StagingTargetPath: defaultStagingPath, - VolumeCapability: stdVolCap, - }, - expErrCode: codes.InvalidArgument, - }, - { - name: "Invalid request (No StagingTargetPath)", - req: &csi.NodeStageVolumeRequest{ - VolumeId: volumeID, - VolumeCapability: stdVolCap, - }, - expErrCode: codes.InvalidArgument, - }, - { - name: "Invalid request (Nil VolumeCapability)", - req: &csi.NodeStageVolumeRequest{ - VolumeId: volumeID, - StagingTargetPath: defaultStagingPath, - VolumeCapability: nil, - }, - expErrCode: codes.InvalidArgument, - }, - { - name: "Invalid request (No Mount in capability)", - req: &csi.NodeStageVolumeRequest{ - VolumeId: volumeID, - StagingTargetPath: defaultStagingPath, - VolumeCapability: cap, - }, - expErrCode: codes.InvalidArgument, - }, - } - for _, tc := range testCases { - t.Logf("Test case: %s", tc.name) - _, err := ns.NodeStageVolume(context.Background(), tc.req) - if err != nil { - serverError, ok := status.FromError(err) - if !ok { - t.Fatalf("Could not get error status code from err: %v", err) - } - if serverError.Code() != tc.expErrCode { - t.Fatalf("Expected error code: %v, got: %v. err : %v", tc.expErrCode, serverError.Code(), err) - } - continue - } - if tc.expErrCode != codes.OK { - t.Fatalf("Expected error: %v, got no error", tc.expErrCode) - } - } -} - -func TestNodeExpandVolume(t *testing.T) { - // TODO: Add tests/functionality for non-existant volume - var resizedBytes int64 = 2000000000 - volumeID := "project/test001/zones/c1/disks/testDisk" - testCases := []struct { - name string - req *csi.NodeExpandVolumeRequest - fsOrBlock string - expRespBytes int64 - expErrCode codes.Code - }{ - { - name: "ext4 fs expand", - req: &csi.NodeExpandVolumeRequest{ - VolumeId: volumeID, - VolumePath: "some-path", - CapacityRange: &csi.CapacityRange{ - RequiredBytes: resizedBytes, - }, - }, - fsOrBlock: "ext4", - expRespBytes: resizedBytes, - }, - { - name: "block device expand", - req: &csi.NodeExpandVolumeRequest{ - VolumeId: volumeID, - VolumePath: "some-path", - CapacityRange: &csi.CapacityRange{ - RequiredBytes: resizedBytes, - }, - }, - fsOrBlock: "block", - expRespBytes: resizedBytes, - }, - { - name: "xfs fs expand", - req: &csi.NodeExpandVolumeRequest{ - VolumeId: volumeID, - VolumePath: "some-path", - CapacityRange: &csi.CapacityRange{ - RequiredBytes: resizedBytes, - }, - }, - fsOrBlock: "xfs", - expRespBytes: resizedBytes, - }, - } - for _, tc := range testCases { - t.Logf("Test case: %s", tc.name) - - execCallback := func(cmd string, args ...string) ([]byte, error) { - switch cmd { - case "blkid": - if tc.fsOrBlock == "block" { - // blkid returns exit code 2 when run on unformatted device - return nil, utilexec.CodeExitError{ - Err: errors.New("this is an exit error"), - Code: 2, - } - } - return []byte(fmt.Sprintf("DEVNAME=/dev/sdb\nTYPE=%s", tc.fsOrBlock)), nil - case "resize2fs": - if tc.fsOrBlock == "ext4" { - return nil, nil - } - t.Fatalf("resize fs called on device with %s", tc.fsOrBlock) - case "xfs_growfs": - if tc.fsOrBlock != "xfs" { - t.Fatalf("xfs_growfs called on device with %s", tc.fsOrBlock) - } - for _, arg := range args { - if arg == tc.req.VolumePath { - return nil, nil - } - } - t.Errorf("xfs_growfs args did not contain volume path %s", tc.req.VolumePath) - case "df": - return []byte(fmt.Sprintf("FOO\n%v", common.BytesToGb(resizedBytes))), nil - case "blockdev": - return []byte(strconv.Itoa(int(resizedBytes))), nil - } - - return nil, fmt.Errorf("fake exec got unknown call to %v %v", cmd, args) - } - mounter := mountmanager.NewFakeSafeMounterWithCustomExec(mount.NewFakeExec(execCallback)) - gceDriver := getTestGCEDriverWithCustomMounter(t, mounter) - - resp, err := gceDriver.ns.NodeExpandVolume(context.Background(), tc.req) - if err != nil { - serverError, ok := status.FromError(err) - if !ok { - t.Fatalf("Could not get error status code from err: %v", err) - } - if serverError.Code() != tc.expErrCode { - t.Fatalf("Expected error code: %v, got: %v. err : %v", tc.expErrCode, serverError.Code(), err) - } - continue - } - if tc.expErrCode != codes.OK { - t.Fatalf("Expected error: %v, got no error", tc.expErrCode) - } - - if resp.CapacityBytes != tc.expRespBytes { - t.Fatalf("Expected bytes: %v, got: %v", tc.expRespBytes, resp.CapacityBytes) - } - } -} - -func TestNodeUnstageVolume(t *testing.T) { - gceDriver := getTestGCEDriver(t) - ns := gceDriver.ns - testCases := []struct { - name string - req *csi.NodeUnstageVolumeRequest - expErrCode codes.Code - }{ - { - name: "Valid request", - req: &csi.NodeUnstageVolumeRequest{ - VolumeId: defaultVolumeID, - StagingTargetPath: defaultStagingPath, - }, - }, - { - name: "Invalid request (No VolumeId)", - req: &csi.NodeUnstageVolumeRequest{ - StagingTargetPath: defaultStagingPath, - }, - expErrCode: codes.InvalidArgument, - }, - { - name: "Invalid request (No StagingTargetPath)", - req: &csi.NodeUnstageVolumeRequest{ - VolumeId: defaultVolumeID, - }, - expErrCode: codes.InvalidArgument, - }, - } - for _, tc := range testCases { - t.Logf("Test case: %s", tc.name) - _, err := ns.NodeUnstageVolume(context.Background(), tc.req) - if err != nil { - serverError, ok := status.FromError(err) - if !ok { - t.Fatalf("Could not get error status code from err: %v", err) - } - if serverError.Code() != tc.expErrCode { - t.Fatalf("Expected error code: %v, got: %v. err : %v", tc.expErrCode, serverError.Code(), err) - } - continue - } - if tc.expErrCode != codes.OK { - t.Fatalf("Expected error: %v, got no error", tc.expErrCode) - } - } -} - -func TestNodeGetCapabilities(t *testing.T) { - gceDriver := getTestGCEDriver(t) - ns := gceDriver.ns - req := &csi.NodeGetCapabilitiesRequest{} - - _, err := ns.NodeGetCapabilities(context.Background(), req) - if err != nil { - t.Fatalf("Unexpedted error: %v", err) - } -} - -func TestConcurrentNodeOperations(t *testing.T) { - readyToExecute := make(chan chan struct{}, 1) - gceDriver := getTestBlockingGCEDriver(t, readyToExecute) - ns := gceDriver.ns - - vol1PublishTargetAReq := &csi.NodePublishVolumeRequest{ - VolumeId: defaultVolumeID + "1", - TargetPath: defaultTargetPath + "a", - StagingTargetPath: defaultStagingPath + "1", - Readonly: false, - VolumeCapability: stdVolCap, - } - vol1PublishTargetBReq := &csi.NodePublishVolumeRequest{ - VolumeId: defaultVolumeID + "1", - TargetPath: defaultTargetPath + "b", - StagingTargetPath: defaultStagingPath + "1", - Readonly: false, - VolumeCapability: stdVolCap, - } - vol2PublishTargetCReq := &csi.NodePublishVolumeRequest{ - VolumeId: defaultVolumeID + "2", - TargetPath: defaultTargetPath + "c", - StagingTargetPath: defaultStagingPath + "2", - Readonly: false, - VolumeCapability: stdVolCap, - } - - runRequest := func(req *csi.NodePublishVolumeRequest) chan error { - response := make(chan error) - go func() { - _, err := ns.NodePublishVolume(context.Background(), req) - response <- err - }() - return response - } - - // Start first valid request vol1PublishTargetA and block until it reaches the Mount - vol1PublishTargetAResp := runRequest(vol1PublishTargetAReq) - execVol1PublishTargetA := <-readyToExecute - - // Start vol1PublishTargetB and allow it to execute to completion. Then check for Aborted error. - // If a non Abort error is received or if the operation was started, then there is a problem - // with volume locking. - vol1PublishTargetBResp := runRequest(vol1PublishTargetBReq) - select { - case err := <-vol1PublishTargetBResp: - if err != nil { - serverError, ok := status.FromError(err) - if !ok { - t.Fatalf("Could not get error status code from err: %v", err) - } - if serverError.Code() != codes.Aborted { - t.Errorf("Expected error code: %v, got: %v. err : %v", codes.Aborted, serverError.Code(), err) - } - } else { - t.Errorf("Expected error: %v, got no error", codes.Aborted) - } - case <-readyToExecute: - t.Errorf("The operation for vol1PublishTargetB should have been aborted, but was started") - } - - // Start vol2PublishTargetC and allow it to execute to completion. Then check for success. - vol2PublishTargetCResp := runRequest(vol2PublishTargetCReq) - execVol2PublishTargetC := <-readyToExecute - execVol2PublishTargetC <- struct{}{} - if err := <-vol2PublishTargetCResp; err != nil { - t.Errorf("Unexpected error: %v", err) - } - - // To clean up, allow the vol1PublishTargetA to complete - execVol1PublishTargetA <- struct{}{} - if err := <-vol1PublishTargetAResp; err != nil { - t.Errorf("Unexpected error: %v", err) - } -} diff --git a/pkg/mount-manager/fake-safe-mounter.go b/pkg/mount-manager/fake-safe-mounter.go index 661998baa..6d2ff61e8 100644 --- a/pkg/mount-manager/fake-safe-mounter.go +++ b/pkg/mount-manager/fake-safe-mounter.go @@ -17,7 +17,7 @@ package mountmanager import "k8s.io/kubernetes/pkg/util/mount" var ( - fakeMounter = &mount.FakeMounter{MountPoints: []mount.MountPoint{}, Log: []mount.FakeAction{}} + fakeMounter = &mount.FakeMounter{MountPoints: []mount.MountPoint{}, Log: []mount.FakeAction{}, Filesystem: map[string]mount.FileType{}} fakeExec = mount.NewFakeExec(execCallback) ) diff --git a/pkg/mount-manager/statter.go b/pkg/mount-manager/statter.go new file mode 100644 index 000000000..b4a4ad900 --- /dev/null +++ b/pkg/mount-manager/statter.go @@ -0,0 +1,81 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package mountmanager + +import ( + "fmt" + + "golang.org/x/sys/unix" +) + +type Statter interface { + StatFS(path string) (int64, int64, int64, int64, int64, int64, error) + IsBlockDevice(string) (bool, error) +} + +var _ Statter = realStatter{} + +type realStatter struct { +} + +func NewStatter() realStatter { + return realStatter{} +} + +// IsBlock checks if the given path is a block device +func (realStatter) IsBlockDevice(fullPath string) (bool, error) { + var st unix.Stat_t + err := unix.Stat(fullPath, &st) + if err != nil { + return false, err + } + + return (st.Mode & unix.S_IFMT) == unix.S_IFBLK, nil +} + +func (realStatter) StatFS(path string) (available, capacity, used, inodesFree, inodes, inodesUsed int64, err error) { + statfs := &unix.Statfs_t{} + err = unix.Statfs(path, statfs) + if err != nil { + err = fmt.Errorf("failed to get fs info on path %s: %v", path, err) + return + } + + // Available is blocks available * fragment size + available = int64(statfs.Bavail) * int64(statfs.Bsize) + // Capacity is total block count * fragment size + capacity = int64(statfs.Blocks) * int64(statfs.Bsize) + // Usage is block being used * fragment size (aka block size). + used = (int64(statfs.Blocks) - int64(statfs.Bfree)) * int64(statfs.Bsize) + inodes = int64(statfs.Files) + inodesFree = int64(statfs.Ffree) + inodesUsed = inodes - inodesFree + return +} + +type fakeStatter struct{} + +func NewFakeStatter() fakeStatter { + return fakeStatter{} +} + +func (fakeStatter) StatFS(path string) (available, capacity, used, inodesFree, inodes, inodesUsed int64, err error) { + // Assume the file exists and give some dummy values back + return 1, 1, 1, 1, 1, 1, nil +} + +func (fakeStatter) IsBlockDevice(fullPath string) (bool, error) { + return false, nil +} diff --git a/test/e2e/tests/multi_zone_e2e_test.go b/test/e2e/tests/multi_zone_e2e_test.go index 5e166ccdb..0df823f45 100644 --- a/test/e2e/tests/multi_zone_e2e_test.go +++ b/test/e2e/tests/multi_zone_e2e_test.go @@ -139,14 +139,46 @@ var _ = Describe("GCE PD CSI Driver Multi-Zone", func() { testAttachWriteReadDetach(volID, volName, testContext.Instance, testContext.Client, readOnly) i = i + 1 } - }) - }) +type verifyArgs struct { + publishDir string +} + +type verifyFunc func(verifyArgs) error + func testAttachWriteReadDetach(volID string, volName string, instance *remote.InstanceInfo, client *remote.CsiClient, readOnly bool) error { - var err error + var testFileContents = "test" + writeFile := func(a verifyArgs) error { + if !readOnly { + // Write a file + testFile := filepath.Join(a.publishDir, "testfile") + err := testutils.WriteFile(instance, testFile, testFileContents) + if err != nil { + return fmt.Errorf("Failed to write file: %v", err) + } + } + return nil + } + verifyReadFile := func(a verifyArgs) error { + // Read File + secondTestFile := filepath.Join(a.publishDir, "testfile") + readContents, err := testutils.ReadFile(instance, secondTestFile) + if err != nil { + return fmt.Errorf("ReadFile failed with error: %v", err) + } + if strings.TrimSpace(string(readContents)) != testFileContents { + return fmt.Errorf("wanted test file content: %s, got content: %s", testFileContents, readContents) + } + return nil + } + return testLifecycleWithVerify(volID, volName, instance, client, readOnly, false /* fs */, writeFile, verifyReadFile) +} + +func testLifecycleWithVerify(volID string, volName string, instance *remote.InstanceInfo, client *remote.CsiClient, readOnly, useBlock bool, firstMountVerify, secondMountVerify verifyFunc) error { + var err error klog.Infof("Starting testAttachWriteReadDetach with volume %v node %v with readonly %v\n", volID, instance.GetNodeID(), readOnly) // Attach Disk err = client.ControllerPublishVolume(volID, instance.GetNodeID()) @@ -165,7 +197,13 @@ func testAttachWriteReadDetach(volID string, volName string, instance *remote.In // Stage Disk stageDir := filepath.Join("/tmp/", volName, "stage") - err = client.NodeStageExt4Volume(volID, stageDir) + if useBlock { + err = client.NodeStageBlockVolume(volID, stageDir) + } else { + err = client.NodeStageExt4Volume(volID, stageDir) + } + + //err = client.NodeStageExt4Volume(volID, stageDir) if err != nil { return fmt.Errorf("NodeStageExt4Volume failed with error: %v", err) } @@ -185,33 +223,13 @@ func testAttachWriteReadDetach(volID string, volName string, instance *remote.In // Mount Disk publishDir := filepath.Join("/tmp/", volName, "mount") - err = client.NodePublishVolume(volID, stageDir, publishDir) - if err != nil { - return fmt.Errorf("NodePublishVolume failed with error: %v", err) - } - err = testutils.ForceChmod(instance, filepath.Join("/tmp/", volName), "777") - if err != nil { - return fmt.Errorf("Chmod failed with error: %v", err) - } - testFileContents := "test" - if !readOnly { - // Write a file - testFile := filepath.Join(publishDir, "testfile") - err = testutils.WriteFile(instance, testFile, testFileContents) - if err != nil { - return fmt.Errorf("Failed to write file: %v", err) - } - } - // Unmount Disk - err = client.NodeUnpublishVolume(volID, publishDir) - if err != nil { - return fmt.Errorf("NodeUnpublishVolume failed with error: %v", err) + if useBlock { + err = client.NodePublishBlockVolume(volID, stageDir, publishDir) + } else { + err = client.NodePublishVolume(volID, stageDir, publishDir) } - // Mount disk somewhere else - secondPublishDir := filepath.Join("/tmp/", volName, "secondmount") - err = client.NodePublishVolume(volID, stageDir, secondPublishDir) if err != nil { return fmt.Errorf("NodePublishVolume failed with error: %v", err) } @@ -220,20 +238,45 @@ func testAttachWriteReadDetach(volID string, volName string, instance *remote.In return fmt.Errorf("Chmod failed with error: %v", err) } - // Read File - secondTestFile := filepath.Join(secondPublishDir, "testfile") - readContents, err := testutils.ReadFile(instance, secondTestFile) + a := verifyArgs{ + publishDir: publishDir, + } + + err = firstMountVerify(a) if err != nil { - return fmt.Errorf("ReadFile failed with error: %v", err) + return fmt.Errorf("failed to verify after first mount to %s: %v", publishDir, err) } - Expect(strings.TrimSpace(string(readContents))).To(Equal(testFileContents)) // Unmount Disk - err = client.NodeUnpublishVolume(volID, secondPublishDir) + err = client.NodeUnpublishVolume(volID, publishDir) if err != nil { return fmt.Errorf("NodeUnpublishVolume failed with error: %v", err) } + if secondMountVerify != nil { + // Mount disk somewhere else + secondPublishDir := filepath.Join("/tmp/", volName, "secondmount") + err = client.NodePublishVolume(volID, stageDir, secondPublishDir) + if err != nil { + return fmt.Errorf("NodePublishVolume failed with error: %v", err) + } + err = testutils.ForceChmod(instance, filepath.Join("/tmp/", volName), "777") + if err != nil { + return fmt.Errorf("Chmod failed with error: %v", err) + } + + b := verifyArgs{ + publishDir: secondPublishDir, + } + secondMountVerify(b) + + // Unmount Disk + err = client.NodeUnpublishVolume(volID, secondPublishDir) + if err != nil { + return fmt.Errorf("NodeUnpublishVolume failed with error: %v", err) + } + } + klog.Infof("Completed testAttachWriteReadDetach with volume %v node %v\n", volID, instance.GetNodeID()) return nil } diff --git a/test/e2e/tests/single_zone_e2e_test.go b/test/e2e/tests/single_zone_e2e_test.go index 5b6a97043..5f18fe947 100644 --- a/test/e2e/tests/single_zone_e2e_test.go +++ b/test/e2e/tests/single_zone_e2e_test.go @@ -24,6 +24,7 @@ import ( "k8s.io/apimachinery/pkg/util/wait" "sigs.k8s.io/gcp-compute-persistent-disk-csi-driver/pkg/common" gce "sigs.k8s.io/gcp-compute-persistent-disk-csi-driver/pkg/gce-cloud-provider/compute" + remote "sigs.k8s.io/gcp-compute-persistent-disk-csi-driver/test/remote" csi "github.com/container-storage-interface/spec/lib/go/csi" . "github.com/onsi/ginkgo" @@ -43,6 +44,8 @@ const ( standardDiskType = "pd-standard" ssdDiskType = "pd-ssd" defaultVolumeLimit int64 = 127 + + defaultEpsilon = 500000000 // 500M ) var _ = Describe("GCE PD CSI Driver", func() { @@ -63,28 +66,11 @@ var _ = Describe("GCE PD CSI Driver", func() { instance := testContext.Instance // Create Disk - volName := testNamePrefix + string(uuid.NewUUID()) - volID, err := client.CreateVolume(volName, nil, defaultSizeGb, - &csi.TopologyRequirement{ - Requisite: []*csi.Topology{ - { - Segments: map[string]string{common.TopologyKeyZone: z}, - }, - }, - }) - Expect(err).To(BeNil(), "CreateVolume failed with error: %v", err) - - // Validate Disk Created - cloudDisk, err := computeService.Disks.Get(p, z, volName).Do() - Expect(err).To(BeNil(), "Could not get disk from cloud directly") - Expect(cloudDisk.Type).To(ContainSubstring(standardDiskType)) - Expect(cloudDisk.Status).To(Equal(readyState)) - Expect(cloudDisk.SizeGb).To(Equal(defaultSizeGb)) - Expect(cloudDisk.Name).To(Equal(volName)) + volName, volID := createAndValidateUniqueZonalDisk(client, p, z) defer func() { // Delete Disk - client.DeleteVolume(volID) + err := client.DeleteVolume(volID) Expect(err).To(BeNil(), "DeleteVolume failed") // Validate Disk Deleted @@ -93,7 +79,7 @@ var _ = Describe("GCE PD CSI Driver", func() { }() // Attach Disk - err = testAttachWriteReadDetach(volID, volName, instance, client, false /* readOnly */) + err := testAttachWriteReadDetach(volID, volName, instance, client, false /* readOnly */) Expect(err).To(BeNil(), "Failed to go through volume lifecycle") }) @@ -135,31 +121,13 @@ var _ = Describe("GCE PD CSI Driver", func() { client := testContext.Client instance := testContext.Instance - // Create Disk - volName := testNamePrefix + string(uuid.NewUUID()) - _, err := client.CreateVolume(volName, nil, defaultSizeGb, - &csi.TopologyRequirement{ - Requisite: []*csi.Topology{ - { - Segments: map[string]string{common.TopologyKeyZone: z}, - }, - }, - }) - Expect(err).To(BeNil(), "CreateVolume failed with error: %v", err) - - // Validate Disk Created - cloudDisk, err := computeService.Disks.Get(p, z, volName).Do() - Expect(err).To(BeNil(), "Could not get disk from cloud directly") - Expect(cloudDisk.Type).To(ContainSubstring(standardDiskType)) - Expect(cloudDisk.Status).To(Equal(readyState)) - Expect(cloudDisk.SizeGb).To(Equal(defaultSizeGb)) - Expect(cloudDisk.Name).To(Equal(volName)) + volName, _ := createAndValidateUniqueZonalDisk(client, p, z) underSpecifiedID := common.GenerateUnderspecifiedVolumeID(volName, true /* isZonal */) defer func() { // Delete Disk - client.DeleteVolume(underSpecifiedID) + err := client.DeleteVolume(underSpecifiedID) Expect(err).To(BeNil(), "DeleteVolume failed") // Validate Disk Deleted @@ -168,7 +136,7 @@ var _ = Describe("GCE PD CSI Driver", func() { }() // Attach Disk - err = testAttachWriteReadDetach(underSpecifiedID, volName, instance, client, false /* readOnly */) + err := testAttachWriteReadDetach(underSpecifiedID, volName, instance, client, false /* readOnly */) Expect(err).To(BeNil(), "Failed to go through volume lifecycle") }) @@ -260,22 +228,11 @@ var _ = Describe("GCE PD CSI Driver", func() { p, z, _ := testContext.Instance.GetIdentity() client := testContext.Client - // Create Disk - volName := testNamePrefix + string(uuid.NewUUID()) - volId, err := client.CreateVolume(volName, nil, defaultSizeGb, nil) - Expect(err).To(BeNil(), "CreateVolume failed with error: %v", err) - - // Validate Disk Created - cloudDisk, err := computeService.Disks.Get(p, z, volName).Do() - Expect(err).To(BeNil(), "Could not get disk from cloud directly") - Expect(cloudDisk.Type).To(ContainSubstring(standardDiskType)) - Expect(cloudDisk.Status).To(Equal(readyState)) - Expect(cloudDisk.SizeGb).To(Equal(defaultSizeGb)) - Expect(cloudDisk.Name).To(Equal(volName)) + volName, volID := createAndValidateUniqueZonalDisk(client, p, z) // Create Snapshot snapshotName := testNamePrefix + string(uuid.NewUUID()) - snapshotID, err := client.CreateSnapshot(snapshotName, volId, nil) + snapshotID, err := client.CreateSnapshot(snapshotName, volID, nil) Expect(err).To(BeNil(), "CreateSnapshot failed with error: %v", err) // Validate Snapshot Created @@ -295,7 +252,7 @@ var _ = Describe("GCE PD CSI Driver", func() { defer func() { // Delete Disk - err := client.DeleteVolume(volId) + err := client.DeleteVolume(volID) Expect(err).To(BeNil(), "DeleteVolume failed") // Validate Disk Deleted @@ -480,7 +437,7 @@ var _ = Describe("GCE PD CSI Driver", func() { // Create Disk volName := testNamePrefix + string(uuid.NewUUID()) - volId, err := controllerClient.CreateVolume(volName, map[string]string{ + volID, err := controllerClient.CreateVolume(volName, map[string]string{ common.ParameterKeyReplicationType: "regional-pd", }, defaultRepdSizeGb, nil) Expect(err).To(BeNil(), "CreateVolume failed with error: %v", err) @@ -503,7 +460,7 @@ var _ = Describe("GCE PD CSI Driver", func() { // Create Snapshot snapshotName := testNamePrefix + string(uuid.NewUUID()) - snapshotID, err := controllerClient.CreateSnapshot(snapshotName, volId, nil) + snapshotID, err := controllerClient.CreateSnapshot(snapshotName, volID, nil) Expect(err).To(BeNil(), "CreateSnapshot failed with error: %v", err) // Validate Snapshot Created @@ -523,7 +480,7 @@ var _ = Describe("GCE PD CSI Driver", func() { defer func() { // Delete Disk - err := controllerClient.DeleteVolume(volId) + err := controllerClient.DeleteVolume(volID) Expect(err).To(BeNil(), "DeleteVolume failed") // Validate Disk Deleted @@ -539,4 +496,110 @@ var _ = Describe("GCE PD CSI Driver", func() { Expect(gce.IsGCEError(err, "notFound")).To(BeTrue(), "Expected snapshot to not be found") }() }) + + It("Should get correct VolumeStats for Block", func() { + testContext := getRandomTestContext() + + p, z, _ := testContext.Instance.GetIdentity() + client := testContext.Client + instance := testContext.Instance + + volName, volID := createAndValidateUniqueZonalDisk(client, p, z) + + defer func() { + // Delete Disk + err := client.DeleteVolume(volID) + Expect(err).To(BeNil(), "DeleteVolume failed") + + // Validate Disk Deleted + _, err = computeService.Disks.Get(p, z, volName).Do() + Expect(gce.IsGCEError(err, "notFound")).To(BeTrue(), "Expected disk to not be found") + }() + + verifyVolumeStats := func(a verifyArgs) error { + available, capacity, used, inodesFree, inodes, inodesUsed, err := client.NodeGetVolumeStats(volID, a.publishDir) + if err != nil { + return fmt.Errorf("failed to get node volume stats: %v", err) + } + if available != 0 || capacity != common.GbToBytes(defaultSizeGb) || used != 0 || + inodesFree != 0 || inodes != 0 || inodesUsed != 0 { + return fmt.Errorf("got: available %v, capacity %v, used %v, inodesFree %v, inodes %v, inodesUsed %v -- expected: capacity = %v, available = 0, used = 0, inodesFree = 0, inodes = 0 , inodesUsed = 0", + available, capacity, used, inodesFree, inodes, inodesUsed, common.GbToBytes(defaultSizeGb)) + } + return nil + } + + // Attach Disk + err := testLifecycleWithVerify(volID, volName, instance, client, false /* readOnly */, true /* block */, verifyVolumeStats, nil) + Expect(err).To(BeNil(), "Failed to go through volume lifecycle") + }) + + It("Should get correct VolumeStats", func() { + testContext := getRandomTestContext() + + p, z, _ := testContext.Instance.GetIdentity() + client := testContext.Client + instance := testContext.Instance + + volName, volID := createAndValidateUniqueZonalDisk(client, p, z) + + defer func() { + // Delete Disk + err := client.DeleteVolume(volID) + Expect(err).To(BeNil(), "DeleteVolume failed") + + // Validate Disk Deleted + _, err = computeService.Disks.Get(p, z, volName).Do() + Expect(gce.IsGCEError(err, "notFound")).To(BeTrue(), "Expected disk to not be found") + }() + + verifyVolumeStats := func(a verifyArgs) error { + available, capacity, used, inodesFree, inodes, inodesUsed, err := client.NodeGetVolumeStats(volID, a.publishDir) + if err != nil { + return fmt.Errorf("failed to get node volume stats: %v", err) + } + if !equalWithinEpsilon(available, common.GbToBytes(defaultSizeGb), defaultEpsilon) || !equalWithinEpsilon(capacity, common.GbToBytes(defaultSizeGb), defaultEpsilon) || !equalWithinEpsilon(used, 0, defaultEpsilon) || + inodesFree == 0 || inodes == 0 || inodesUsed == 0 { + return fmt.Errorf("got: available %v, capacity %v, used %v, inodesFree %v, inodes %v, inodesUsed %v -- expected: available ~= %v, capacity ~= %v, used = 0, inodesFree != 0, inodes != 0 , inodesUsed != 0", + available, capacity, used, inodesFree, inodes, inodesUsed, common.GbToBytes(defaultSizeGb), common.GbToBytes(defaultSizeGb)) + } + return nil + } + + // Attach Disk + err := testLifecycleWithVerify(volID, volName, instance, client, false /* readOnly */, false /* fs */, verifyVolumeStats, nil) + Expect(err).To(BeNil(), "Failed to go through volume lifecycle") + }) + }) + +func equalWithinEpsilon(a, b, epsiolon int64) bool { + if a > b { + return a-b < epsiolon + } + return b-a < epsiolon +} + +func createAndValidateUniqueZonalDisk(client *remote.CsiClient, project, zone string) (string, string) { + // Create Disk + volName := testNamePrefix + string(uuid.NewUUID()) + volID, err := client.CreateVolume(volName, nil, defaultSizeGb, + &csi.TopologyRequirement{ + Requisite: []*csi.Topology{ + { + Segments: map[string]string{common.TopologyKeyZone: zone}, + }, + }, + }) + Expect(err).To(BeNil(), "CreateVolume failed with error: %v", err) + + // Validate Disk Created + cloudDisk, err := computeService.Disks.Get(project, zone, volName).Do() + Expect(err).To(BeNil(), "Could not get disk from cloud directly") + Expect(cloudDisk.Type).To(ContainSubstring(standardDiskType)) + Expect(cloudDisk.Status).To(Equal(readyState)) + Expect(cloudDisk.SizeGb).To(Equal(defaultSizeGb)) + Expect(cloudDisk.Name).To(Equal(volName)) + + return volName, volID +} diff --git a/test/remote/client-wrappers.go b/test/remote/client-wrappers.go index 39c29938e..a340ed91d 100644 --- a/test/remote/client-wrappers.go +++ b/test/remote/client-wrappers.go @@ -234,6 +234,36 @@ func (c *CsiClient) NodeGetInfo() (*csipb.NodeGetInfoResponse, error) { return resp, err } +func (c *CsiClient) NodeGetVolumeStats(volumeID, volumePath string) (available, capacity, used, inodesFree, inodes, inodesUsed int64, err error) { + resp, err := c.nodeClient.NodeGetVolumeStats(context.Background(), &csipb.NodeGetVolumeStatsRequest{ + VolumeId: volumeID, + VolumePath: volumePath, + }) + if err != nil { + return + } + for _, usage := range resp.Usage { + if usage == nil { + continue + } + unit := usage.GetUnit() + switch unit { + case csipb.VolumeUsage_BYTES: + available = usage.GetAvailable() + capacity = usage.GetTotal() + used = usage.GetUsed() + case csipb.VolumeUsage_INODES: + inodesFree = usage.GetAvailable() + inodes = usage.GetTotal() + inodesUsed = usage.GetUsed() + default: + err = fmt.Errorf("unknown key %s in usage", unit.String()) + return + } + } + return +} + func (c *CsiClient) CreateSnapshot(snapshotName, sourceVolumeId string, params map[string]string) (string, error) { csr := &csipb.CreateSnapshotRequest{ diff --git a/test/sanity/sanity_test.go b/test/sanity/sanity_test.go index 177af85ab..b8e143508 100644 --- a/test/sanity/sanity_test.go +++ b/test/sanity/sanity_test.go @@ -53,7 +53,7 @@ func TestSanity(t *testing.T) { deviceUtils := mountmanager.NewFakeDeviceUtils() //Initialize GCE Driver - err = gceDriver.SetupGCEDriver(cloudProvider, mounter, deviceUtils, metadataservice.NewFakeService(), driverName, vendorVersion) + err = gceDriver.SetupGCEDriver(cloudProvider, mounter, deviceUtils, metadataservice.NewFakeService(), mountmanager.NewFakeStatter(), driverName, vendorVersion) if err != nil { t.Fatalf("Failed to initialize GCE CSI Driver: %v", err) } diff --git a/vendor/k8s.io/kubernetes/pkg/util/mount/fake.go b/vendor/k8s.io/kubernetes/pkg/util/mount/fake.go index 22b46d95c..3a8db1179 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/mount/fake.go +++ b/vendor/k8s.io/kubernetes/pkg/util/mount/fake.go @@ -94,6 +94,7 @@ func (f *FakeMounter) Mount(source string, target string, fstype string, options if err != nil { absTarget = target } + f.Filesystem[absTarget] = FileTypeDirectory f.MountPoints = append(f.MountPoints, MountPoint{Device: source, Path: absTarget, Type: fstype, Opts: opts}) klog.V(5).Infof("Fake mounter: mounted %s to %s", source, absTarget) f.Log = append(f.Log, FakeAction{Action: FakeActionMount, Target: absTarget, Source: source, FSType: fstype}) @@ -120,6 +121,7 @@ func (f *FakeMounter) Unmount(target string) error { newMountpoints = append(newMountpoints, MountPoint{Device: mp.Device, Path: mp.Path, Type: mp.Type}) } f.MountPoints = newMountpoints + delete(f.Filesystem, absTarget) f.Log = append(f.Log, FakeAction{Action: FakeActionUnmount, Target: absTarget}) delete(f.MountCheckErrors, target) return nil