diff --git a/Gopkg.lock b/Gopkg.lock index 1d808553a..d70e2f4f1 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -487,9 +487,12 @@ version = "v0.3.3" [[projects]] - digest = "1:8097e2ccae8b2f6fe627c94831a08e2e11ac4ddddf6994139076bac826b39464" + digest = "1:9e805f5365b94782315c8904fd99545b0c751b9afd70da64c4abb72574898341" name = "k8s.io/kubernetes" - packages = ["pkg/util/mount"] + packages = [ + "pkg/util/mount", + "pkg/util/resizefs", + ] pruneopts = "NUT" revision = "e8462b5b5dc2584fdcd18e6bcfe9f1e4d970a529" version = "v1.15.0" @@ -560,6 +563,7 @@ "k8s.io/client-go/util/flowcontrol", "k8s.io/klog", "k8s.io/kubernetes/pkg/util/mount", + "k8s.io/kubernetes/pkg/util/resizefs", "k8s.io/test-infra/boskos/client", "k8s.io/test-infra/boskos/common", ] diff --git a/README.md b/README.md index 384c0e629..b4d91878b 100644 --- a/README.md +++ b/README.md @@ -34,14 +34,14 @@ This plugin is compatible with CSI versions [v1.0.0](https://github.com/containe ### Kubernetes Compatibility -| GCE PD CSI Driver\Kubernetes Version | 1.10.5 - 1.11 | 1.12 | 1.13 | 1.14+ -|--------------------------------------|---------------|------|------|------| -| v0.1.x.alpha | yes | no | no | no | -| v0.2.x (alpha) | no | yes | no | no | -| v0.3.x (beta) | no | no | yes | yes | -| v0.4.x (beta) | no | no | yes | yes | -| v0.5.x (beta) | no | no | no | yes | -| dev | no | no | no | yes | +| GCE PD CSI Driver\Kubernetes Version | 1.10.5 - 1.11 | 1.12 | 1.13 | 1.14 | 1.15+| +|--------------------------------------|---------------|------|------|------|------| +| v0.1.x.alpha | yes | no | no | no | no | +| v0.2.x (alpha) | no | yes | no | no | no | +| v0.3.x (beta) | no | no | yes | yes | yes | +| v0.4.x (beta) | no | no | yes | yes | yes | +| v0.5.x (beta) | no | no | no | yes | yes | +| dev | no | no | no | no | yes | ### Known Issues @@ -62,14 +62,13 @@ This driver supports only one topology key: `topology.gke.io/zone` that represents availability by zone. -### Kubernetes Beta Features +### Features in Development -* Topology: Requires K8s 1.14+ on Master and Nodes and PD driver v0.5.0+ - -### Kubernetes Alpha Features - -* Snapshots: Requires K8s 1.13+ on Master and PD driver v0.3.0+ with the alpha - overlay +| Feature | Stage | Min Kubernetes Master Version | Min Kubernetes Nodes Version | Min Driver Version | Deployment Overlay | +|-----------------|-------|-------------------------------|------------------------------|--------------------|--------------------| +| Topology | Beta | 1.14 | 1.14 | v0.5.0 | Stable | +| Snapshots | Alpha | 1.13 | Any | v0.3.0 | Alpha | +| Resize (Expand) | Alpha | 1.14 | 1.14 | dev | Alpha | ### Future Features diff --git a/deploy/kubernetes/overlays/alpha/controller_add_resizer.yaml b/deploy/kubernetes/overlays/alpha/controller_add_resizer.yaml new file mode 100644 index 000000000..c336f01ae --- /dev/null +++ b/deploy/kubernetes/overlays/alpha/controller_add_resizer.yaml @@ -0,0 +1,17 @@ +kind: StatefulSet +apiVersion: apps/v1 +metadata: + name: csi-gce-pd-controller +spec: + template: + spec: + containers: + - name: csi-resizer + imagePullPolicy: Always + image: quay.io/k8scsi/csi-resizer:canary + args: + - "--v=5" + - "--csi-address=/csi/csi.sock" + volumeMounts: + - name: socket-dir + mountPath: /csi \ No newline at end of file diff --git a/deploy/kubernetes/overlays/alpha/kustomization.yaml b/deploy/kubernetes/overlays/alpha/kustomization.yaml index 0830b91e1..49c51ff4a 100644 --- a/deploy/kubernetes/overlays/alpha/kustomization.yaml +++ b/deploy/kubernetes/overlays/alpha/kustomization.yaml @@ -4,6 +4,7 @@ bases: - ../stable patches: - controller_add_snapshotter.yaml +- controller_add_resizer.yaml patchesJson6902: - target: group: rbac.authorization.k8s.io @@ -13,3 +14,4 @@ patchesJson6902: path: rbac_add_snapshots_to_provisioner.yaml resources: - rbac_add_snapshotter.yaml +- rbac_add_resizer.yaml diff --git a/deploy/kubernetes/overlays/alpha/rbac_add_resizer.yaml b/deploy/kubernetes/overlays/alpha/rbac_add_resizer.yaml new file mode 100644 index 000000000..93ae01a15 --- /dev/null +++ b/deploy/kubernetes/overlays/alpha/rbac_add_resizer.yaml @@ -0,0 +1,32 @@ +# Resizer must be able to work with PVCs, PVs, SCs. +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: external-resizer-role +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "update", "patch"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["persistentvolumeclaims/status"] + verbs: ["update", "patch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["list", "watch", "create", "update", "patch"] + +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-resizer-role +subjects: + - kind: ServiceAccount + name: csi-controller-sa + namespace: default +roleRef: + kind: ClusterRole + name: external-resizer-role + apiGroup: rbac.authorization.k8s.io \ No newline at end of file diff --git a/docs/kubernetes/user-guides/resize.md b/docs/kubernetes/user-guides/resize.md new file mode 100644 index 000000000..30dd068bb --- /dev/null +++ b/docs/kubernetes/user-guides/resize.md @@ -0,0 +1,101 @@ +# Kubernetes Resize User Guide (Alpha) + +>**Attention:** Volume Resize is an alpha feature. Make sure you have enabled it in Kubernetes API server using `--feature-gates=ExpandCSIVolumes=true` flag. +>**Attention:** Volume Resize is only available in the driver version v0.6.0+ + +### Install Driver with alpha resize feature + +1. [One-time per project] Create GCP service account for the CSI driver and set required roles + +``` +$ PROJECT=your-project-here # GCP project +$ GCE_PD_SA_NAME=my-gce-pd-csi-sa # Name of the service account to create +$ GCE_PD_SA_DIR=/my/safe/credentials/directory # Directory to save the service account key +$ ./deploy/setup-project.sh +``` + +2. Deploy driver to Kubernetes Cluster + +``` +$ GCE_PD_SA_DIR=/my/safe/credentials/directory # Directory to get the service account key +$ GCE_PD_DRIVER_VERSION=alpha # Driver version to deploy +$ ./deploy/kubernetes/deploy-driver.sh +``` + +### Resize Example + +This example provisions a zonal PD in both single-zone and regional clusters. + +1. Add resize field to example Zonal Storage Class +``` +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: csi-gce-pd +provisioner: pd.csi.storage.gke.io +parameters: + type: pd-standard +volumeBindingMode: WaitForFirstConsumer +allowVolumeExpansion: true +``` + +2. Create example Zonal Storage Class with resize enabled +``` +$ kubectl apply -f ./examples/kubernetes/demo-zonal-sc.yaml +``` + +3. Create example PVC and Pod +``` +$ kubectl apply -f ./examples/kubernetes/demo-pod.yaml +``` + +4. Verify PV is created and bound to PVC +``` +$ kubectl get pvc +NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE +podpvc Bound pvc-e36abf50-84f3-11e8-8538-42010a800002 10Gi RWO csi-gce-pd 9s +``` + +5. Verify pod is created and in `RUNNING` state (it may take a few minutes to get to running state) +``` +$ kubectl get pods +NAME READY STATUS RESTARTS AGE +web-server 1/1 Running 0 1m +``` + +8. Check current filesystem size on the running pod +``` +$ kubectl exec web-server -- df -h /var/lib/www/html +Filesystem Size Used Avail Use% Mounted on +/dev/sdb 5.9G 24M 5.9G 1% /var/lib/www/html +``` + +6. Resize volume by modifying the field `spec -> resources -> requests -> storage` +``` +$ kubectl edit pvc podpvc +apiVersion: v1 +kind: PersistentVolumeClaim +... +spec: + resources: + requests: + storage: 9Gi + ... +... +``` + +7. Verify actual disk resized on cloud +``` +$ gcloud compute disks describe ${disk_name} --zone=${zone} +description: Disk created by GCE-PD CSI Driver +name: pvc-10ea155f-e5a4-4a82-a171-21481742c80c +... +sizeGb: '9' +``` + +8. Verify filesystem resized on the running pod +``` +$ kubectl exec web-server -- df -h /var/lib/www/html +Filesystem Size Used Avail Use% Mounted on +/dev/sdb 8.8G 27M 8.8G 1% /var/lib/www/html +``` \ No newline at end of file diff --git a/pkg/gce-cloud-provider/compute/cloud-disk.go b/pkg/gce-cloud-provider/compute/cloud-disk.go index 7d35670d9..2eb4ac4ef 100644 --- a/pkg/gce-cloud-provider/compute/cloud-disk.go +++ b/pkg/gce-cloud-provider/compute/cloud-disk.go @@ -124,6 +124,17 @@ func (d *CloudDisk) GetSizeGb() int64 { } } +// setSizeGb sets the size of the disk used ONLY +// for testing purposes. +func (d *CloudDisk) setSizeGb(size int64) { + switch d.Type() { + case Zonal: + d.ZonalDisk.SizeGb = size + case Regional: + d.RegionalDisk.SizeGb = size + } +} + func (d *CloudDisk) GetZone() string { switch d.Type() { case Zonal: diff --git a/pkg/gce-cloud-provider/compute/fake-gce.go b/pkg/gce-cloud-provider/compute/fake-gce.go index 83ccfa3bd..80353c5c0 100644 --- a/pkg/gce-cloud-provider/compute/fake-gce.go +++ b/pkg/gce-cloud-provider/compute/fake-gce.go @@ -367,6 +367,18 @@ func (cloud *FakeCloudProvider) CreateSnapshot(ctx context.Context, volKey *meta return snapshotToCreate, nil } +func (cloud *FakeCloudProvider) ResizeDisk(ctx context.Context, volKey *meta.Key, requestBytes int64) (int64, error) { + disk, ok := cloud.disks[volKey.Name] + if !ok { + return -1, notFoundError() + } + + disk.setSizeGb(common.BytesToGb(requestBytes)) + + return requestBytes, nil + +} + // Snapshot Methods func (cloud *FakeCloudProvider) DeleteSnapshot(ctx context.Context, snapshotName string) error { delete(cloud.snapshots, snapshotName) diff --git a/pkg/gce-cloud-provider/compute/gce-compute.go b/pkg/gce-cloud-provider/compute/gce-compute.go index 9e56dbd81..578b69681 100644 --- a/pkg/gce-cloud-provider/compute/gce-compute.go +++ b/pkg/gce-cloud-provider/compute/gce-compute.go @@ -50,6 +50,7 @@ type GCECompute interface { GetDiskSourceURI(volKey *meta.Key) string GetDiskTypeURI(volKey *meta.Key, diskType string) string WaitForAttach(ctx context.Context, volKey *meta.Key, instanceZone, instanceName string) error + ResizeDisk(ctx context.Context, volKey *meta.Key, requestBytes int64) (int64, error) // Regional Disk Methods GetReplicaZoneURI(zone string) string // Instance Methods @@ -593,6 +594,65 @@ func (cloud *CloudProvider) CreateSnapshot(ctx context.Context, volKey *meta.Key } } +func (cloud *CloudProvider) ResizeDisk(ctx context.Context, volKey *meta.Key, requestBytes int64) (int64, error) { + cloudDisk, err := cloud.GetDisk(ctx, volKey) + if err != nil { + return -1, fmt.Errorf("failed to get disk: %v", err) + } + + sizeGb := cloudDisk.GetSizeGb() + requestGb := common.BytesToGb(requestBytes) + + // If disk is already of size equal or greater than requested size, we simply return + if sizeGb >= requestGb { + return requestBytes, nil + } + + switch volKey.Type() { + case meta.Zonal: + return cloud.resizeZonalDisk(ctx, volKey, requestGb) + case meta.Regional: + return cloud.resizeRegionalDisk(ctx, volKey, requestGb) + default: + return -1, fmt.Errorf("could not resize disk, key was neither zonal nor regional, instead got: %v", volKey.String()) + } +} + +func (cloud *CloudProvider) resizeZonalDisk(ctx context.Context, volKey *meta.Key, requestGb int64) (int64, error) { + resizeReq := &compute.DisksResizeRequest{ + SizeGb: requestGb, + } + op, err := cloud.service.Disks.Resize(cloud.project, volKey.Zone, volKey.Name, resizeReq).Context(ctx).Do() + if err != nil { + return -1, fmt.Errorf("failed to resize zonal volume %v: %v", volKey.String(), err) + } + + err = cloud.waitForZonalOp(ctx, op, volKey.Zone) + if err != nil { + return -1, fmt.Errorf("failed waiting for op for zonal resize for %s: %v", volKey.String(), err) + } + + return requestGb, nil +} + +func (cloud *CloudProvider) resizeRegionalDisk(ctx context.Context, volKey *meta.Key, requestGb int64) (int64, error) { + resizeReq := &computebeta.RegionDisksResizeRequest{ + SizeGb: requestGb, + } + + op, err := cloud.betaService.RegionDisks.Resize(cloud.project, volKey.Region, volKey.Name, resizeReq).Context(ctx).Do() + if err != nil { + return -1, fmt.Errorf("failed to resize regional volume %v: %v", volKey.String(), err) + } + + err = cloud.waitForRegionalOp(ctx, op, volKey.Region) + if err != nil { + return -1, fmt.Errorf("failed waiting for op for regional resize for %s: %v", volKey.String(), err) + } + + return requestGb, nil +} + func (cloud *CloudProvider) createZonalDiskSnapshot(ctx context.Context, volKey *meta.Key, snapshotName string) (*compute.Snapshot, error) { snapshotToCreate := &compute.Snapshot{ Name: snapshotName, diff --git a/pkg/gce-pd-csi-driver/controller.go b/pkg/gce-pd-csi-driver/controller.go index f3228ac2c..c7bd04e6b 100644 --- a/pkg/gce-pd-csi-driver/controller.go +++ b/pkg/gce-pd-csi-driver/controller.go @@ -659,7 +659,30 @@ func (gceCS *GCEControllerServer) ListSnapshots(ctx context.Context, req *csi.Li } func (gceCS *GCEControllerServer) ControllerExpandVolume(ctx context.Context, req *csi.ControllerExpandVolumeRequest) (*csi.ControllerExpandVolumeResponse, error) { - return nil, status.Error(codes.Unimplemented, "ControllerExpandVolume is not yet implemented") + volumeID := req.GetVolumeId() + if len(volumeID) == 0 { + return nil, status.Error(codes.InvalidArgument, "ControllerExpandVolume volume ID must be provided") + } + capacityRange := req.GetCapacityRange() + reqBytes, err := getRequestCapacity(capacityRange) + if err != nil { + return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("ControllerExpandVolume capacity range is invalid: %v", err)) + } + + volKey, err := common.VolumeIDToKey(volumeID) + if err != nil { + return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("ControllerExpandVolume volume ID is invalid: %v", err)) + } + + resizedGb, err := gceCS.CloudProvider.ResizeDisk(ctx, volKey, reqBytes) + if err != nil { + return nil, status.Error(codes.Internal, fmt.Sprintf("ControllerExpandVolume failed to resize disk: %v", err)) + } + + return &csi.ControllerExpandVolumeResponse{ + CapacityBytes: common.GbToBytes(resizedGb), + NodeExpansionRequired: true, + }, nil } func (gceCS *GCEControllerServer) getSnapshots(ctx context.Context, req *csi.ListSnapshotsRequest) (*csi.ListSnapshotsResponse, error) { diff --git a/pkg/gce-pd-csi-driver/gce-pd-driver.go b/pkg/gce-pd-csi-driver/gce-pd-driver.go index c2c9196a9..4dcb227b8 100644 --- a/pkg/gce-pd-csi-driver/gce-pd-driver.go +++ b/pkg/gce-pd-csi-driver/gce-pd-driver.go @@ -66,10 +66,12 @@ func (gceDriver *GCEDriver) SetupGCEDriver(cloudProvider gce.GCECompute, mounter csi.ControllerServiceCapability_RPC_CREATE_DELETE_SNAPSHOT, csi.ControllerServiceCapability_RPC_LIST_SNAPSHOTS, csi.ControllerServiceCapability_RPC_PUBLISH_READONLY, + csi.ControllerServiceCapability_RPC_EXPAND_VOLUME, } gceDriver.AddControllerServiceCapabilities(csc) ns := []csi.NodeServiceCapability_RPC_Type{ csi.NodeServiceCapability_RPC_STAGE_UNSTAGE_VOLUME, + csi.NodeServiceCapability_RPC_EXPAND_VOLUME, } gceDriver.AddNodeServiceCapabilities(ns) diff --git a/pkg/gce-pd-csi-driver/identity.go b/pkg/gce-pd-csi-driver/identity.go index 71b9df7e4..e879c3750 100644 --- a/pkg/gce-pd-csi-driver/identity.go +++ b/pkg/gce-pd-csi-driver/identity.go @@ -16,6 +16,7 @@ package gceGCEDriver import ( "context" + csi "github.com/container-storage-interface/spec/lib/go/csi" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" @@ -58,6 +59,20 @@ func (gceIdentity *GCEIdentityServer) GetPluginCapabilities(ctx context.Context, }, }, }, + { + Type: &csi.PluginCapability_VolumeExpansion_{ + VolumeExpansion: &csi.PluginCapability_VolumeExpansion{ + Type: csi.PluginCapability_VolumeExpansion_ONLINE, + }, + }, + }, + { + Type: &csi.PluginCapability_VolumeExpansion_{ + VolumeExpansion: &csi.PluginCapability_VolumeExpansion{ + Type: csi.PluginCapability_VolumeExpansion_OFFLINE, + }, + }, + }, }, }, nil } diff --git a/pkg/gce-pd-csi-driver/identity_test.go b/pkg/gce-pd-csi-driver/identity_test.go index 7b2a4f493..51bee490d 100644 --- a/pkg/gce-pd-csi-driver/identity_test.go +++ b/pkg/gce-pd-csi-driver/identity_test.go @@ -18,6 +18,7 @@ import ( "testing" "context" + csi "github.com/container-storage-interface/spec/lib/go/csi" metadataservice "sigs.k8s.io/gcp-compute-persistent-disk-csi-driver/pkg/gce-cloud-provider/metadata" ) @@ -58,11 +59,21 @@ func TestGetPluginCapabilities(t *testing.T) { } for _, capability := range resp.GetCapabilities() { - switch capability.GetService().GetType() { - case csi.PluginCapability_Service_CONTROLLER_SERVICE: - case csi.PluginCapability_Service_VOLUME_ACCESSIBILITY_CONSTRAINTS: - default: - t.Fatalf("Unknown capability: %v", capability.GetService().GetType()) + if capability.GetVolumeExpansion() != nil { + switch capability.GetVolumeExpansion().GetType() { + case csi.PluginCapability_VolumeExpansion_ONLINE: + case csi.PluginCapability_VolumeExpansion_OFFLINE: + default: + t.Fatalf("Unknown capability: %v", capability.GetVolumeExpansion().GetType()) + } + } + if capability.GetService() != nil { + switch capability.GetService().GetType() { + case csi.PluginCapability_Service_CONTROLLER_SERVICE: + case csi.PluginCapability_Service_VOLUME_ACCESSIBILITY_CONSTRAINTS: + default: + t.Fatalf("Unknown capability: %v", capability.GetService().GetType()) + } } } } diff --git a/pkg/gce-pd-csi-driver/node.go b/pkg/gce-pd-csi-driver/node.go index 8e4769663..ecf1e7a4b 100644 --- a/pkg/gce-pd-csi-driver/node.go +++ b/pkg/gce-pd-csi-driver/node.go @@ -17,14 +17,17 @@ package gceGCEDriver import ( "fmt" "os" + "strconv" "strings" "context" + csi "github.com/container-storage-interface/spec/lib/go/csi" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" "k8s.io/klog" "k8s.io/kubernetes/pkg/util/mount" + "k8s.io/kubernetes/pkg/util/resizefs" "sigs.k8s.io/gcp-compute-persistent-disk-csi-driver/pkg/common" metadataservice "sigs.k8s.io/gcp-compute-persistent-disk-csi-driver/pkg/gce-cloud-provider/metadata" mountmanager "sigs.k8s.io/gcp-compute-persistent-disk-csi-driver/pkg/mount-manager" @@ -364,7 +367,73 @@ func (ns *GCENodeServer) NodeGetVolumeStats(ctx context.Context, req *csi.NodeGe } func (ns *GCENodeServer) NodeExpandVolume(ctx context.Context, req *csi.NodeExpandVolumeRequest) (*csi.NodeExpandVolumeResponse, error) { - return nil, status.Error(codes.Unimplemented, fmt.Sprintf("NodeExpandVolume is not yet implemented")) + volumeID := req.GetVolumeId() + if len(volumeID) == 0 { + return nil, status.Error(codes.InvalidArgument, "ControllerExpandVolume volume ID must be provided") + } + capacityRange := req.GetCapacityRange() + reqBytes, err := getRequestCapacity(capacityRange) + if err != nil { + return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("ControllerExpandVolume capacity range is invalid: %v", err)) + } + + volumePath := req.GetVolumePath() + if len(volumePath) == 0 { + return nil, status.Error(codes.InvalidArgument, "ControllerExpandVolume volume path must be provided") + } + + volKey, err := common.VolumeIDToKey(volumeID) + if err != nil { + return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("ControllerExpandVolume volume ID is invalid: %v", err)) + } + + devicePath, err := ns.getDevicePath(volumeID, "") + if err != nil { + return nil, status.Error(codes.Internal, fmt.Sprintf("ControllerExpandVolume error when getting device path for %s: %v", volumeID, err)) + } + + // TODO(#328): Use requested size in resize if provided + resizer := resizefs.NewResizeFs(ns.Mounter) + _, err = resizer.Resize(devicePath, volumePath) + if err != nil { + return nil, status.Error(codes.Internal, fmt.Sprintf("ControllerExpandVolume error when resizing volume %s: %v", volKey.String(), err)) + + } + + // Check the block size + gotBlockSizeBytes, err := ns.getBlockSizeBytes(devicePath) + if err != nil { + return nil, status.Error(codes.Internal, fmt.Sprintf("ControllerExpandVolume error when getting size of block volume at path %s: %v", devicePath, err)) + } + if gotBlockSizeBytes < reqBytes { + // It's possible that the somewhere the volume size was rounded up, getting more size than requested is a success :) + return nil, status.Errorf(codes.Internal, "ControllerExpandVolume resize requested for %v but after resize volume was size %v", reqBytes, gotBlockSizeBytes) + } + + // TODO(dyzz) Some sort of formatted volume could also check the fs size. + // Issue is that we don't know how to account for filesystem overhead, it + // could be proportional to fs size and different for xfs, ext4 and we don't + // know the proportions + + /* + format, err := ns.Mounter.GetDiskFormat(devicePath) + if err != nil { + return nil, status.Error(codes.Internal, fmt.Sprintf("ControllerExpandVolume error checking format for device %s: %v", devicePath, err)) + } + gotSizeBytes, err = ns.getFSSizeBytes(devicePath) + + if err != nil { + return nil, status.Errorf(codes.Internal, "ControllerExpandVolume resize could not get fs size of %s: %v", volumePath, err) + } + if gotSizeBytes != reqBytes { + return nil, status.Errorf(codes.Internal, "ControllerExpandVolume resize requested for size %v but after resize volume was size %v", reqBytes, gotSizeBytes) + } + */ + + // Respond + return &csi.NodeExpandVolumeResponse{ + CapacityBytes: reqBytes, + }, nil } func (ns *GCENodeServer) GetVolumeLimits() (int64, error) { @@ -401,3 +470,16 @@ func (ns *GCENodeServer) getDevicePath(volumeID string, partition string) (strin } return devicePath, nil } + +func (ns *GCENodeServer) getBlockSizeBytes(devicePath string) (int64, error) { + output, err := ns.Mounter.Exec.Run("blockdev", "--getsize64", devicePath) + if err != nil { + return -1, fmt.Errorf("error when getting size of block volume at path %s: output: %s, err: %v", devicePath, string(output), err) + } + strOut := strings.TrimSpace(string(output)) + gotSizeBytes, err := strconv.ParseInt(strOut, 10, 64) + if err != nil { + return -1, fmt.Errorf("failed to parse size %s into int a size", strOut) + } + return gotSizeBytes, nil +} diff --git a/pkg/gce-pd-csi-driver/node_test.go b/pkg/gce-pd-csi-driver/node_test.go index cb486ee9b..f90724e3c 100644 --- a/pkg/gce-pd-csi-driver/node_test.go +++ b/pkg/gce-pd-csi-driver/node_test.go @@ -16,11 +16,16 @@ package gceGCEDriver import ( "context" + "errors" + "fmt" + "strconv" "testing" csi "github.com/container-storage-interface/spec/lib/go/csi" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" + "k8s.io/kubernetes/pkg/util/mount" + utilexec "k8s.io/utils/exec" metadataservice "sigs.k8s.io/gcp-compute-persistent-disk-csi-driver/pkg/gce-cloud-provider/metadata" mountmanager "sigs.k8s.io/gcp-compute-persistent-disk-csi-driver/pkg/mount-manager" ) @@ -30,8 +35,16 @@ const defaultTargetPath = "/mnt/test" const defaultStagingPath = "/staging" func getTestGCEDriver(t *testing.T) *GCEDriver { + return getCustomTestGCEDriver(t, mountmanager.NewFakeSafeMounter(), mountmanager.NewFakeDeviceUtils(), metadataservice.NewFakeService()) +} + +func getTestGCEDriverWithCustomMounter(t *testing.T, mounter *mount.SafeFormatAndMount) *GCEDriver { + return getCustomTestGCEDriver(t, mounter, mountmanager.NewFakeDeviceUtils(), metadataservice.NewFakeService()) +} + +func getCustomTestGCEDriver(t *testing.T, mounter *mount.SafeFormatAndMount, deviceUtils mountmanager.DeviceUtils, metaService metadataservice.MetadataService) *GCEDriver { gceDriver := GetGCEDriver() - err := gceDriver.SetupGCEDriver(nil, mountmanager.NewFakeSafeMounter(), mountmanager.NewFakeDeviceUtils(), metadataservice.NewFakeService(), driver, "test-vendor") + err := gceDriver.SetupGCEDriver(nil, mounter, deviceUtils, metaService, driver, "test-vendor") if err != nil { t.Fatalf("Failed to setup GCE Driver: %v", err) } @@ -328,6 +341,113 @@ func TestNodeStageVolume(t *testing.T) { } } +func TestNodeExpandVolume(t *testing.T) { + // TODO: Add tests/functionality for non-existant volume + var resizedBytes int64 = 2000000000 + volumeID := "project/test001/zones/c1/disks/testDisk" + testCases := []struct { + name string + req *csi.NodeExpandVolumeRequest + fsOrBlock string + expRespBytes int64 + expErrCode codes.Code + }{ + { + name: "ext4 fs expand", + req: &csi.NodeExpandVolumeRequest{ + VolumeId: volumeID, + VolumePath: "some-path", + CapacityRange: &csi.CapacityRange{ + RequiredBytes: resizedBytes, + }, + }, + fsOrBlock: "ext4", + expRespBytes: resizedBytes, + }, + { + name: "block device expand", + req: &csi.NodeExpandVolumeRequest{ + VolumeId: volumeID, + VolumePath: "some-path", + CapacityRange: &csi.CapacityRange{ + RequiredBytes: resizedBytes, + }, + }, + fsOrBlock: "block", + expRespBytes: resizedBytes, + }, + { + name: "xfs fs expand", + req: &csi.NodeExpandVolumeRequest{ + VolumeId: volumeID, + VolumePath: "some-path", + CapacityRange: &csi.CapacityRange{ + RequiredBytes: resizedBytes, + }, + }, + fsOrBlock: "xfs", + expRespBytes: resizedBytes, + }, + } + for _, tc := range testCases { + t.Logf("Test case: %s", tc.name) + + execCallback := func(cmd string, args ...string) ([]byte, error) { + switch cmd { + case "blkid": + if tc.fsOrBlock == "block" { + // blkid returns exit code 2 when run on unformatted device + return nil, utilexec.CodeExitError{ + Err: errors.New("this is an exit error"), + Code: 2, + } + } + return []byte(fmt.Sprintf("DEVNAME=/dev/sdb\nTYPE=%s", tc.fsOrBlock)), nil + case "resize2fs": + if tc.fsOrBlock == "ext4" { + return nil, nil + } + t.Fatalf("resize fs called on device with %s", tc.fsOrBlock) + case "xfs_growfs": + if tc.fsOrBlock != "xfs" { + t.Fatalf("xfs_growfs called on device with %s", tc.fsOrBlock) + } + for _, arg := range args { + if arg == tc.req.VolumePath { + return nil, nil + } + } + t.Errorf("xfs_growfs args did not contain volume path %s", tc.req.VolumePath) + case "blockdev": + return []byte(strconv.Itoa(int(resizedBytes))), nil + } + + return nil, fmt.Errorf("fake exec got unknown call to %v %v", cmd, args) + } + mounter := mountmanager.NewFakeSafeMounterWithCustomExec(mount.NewFakeExec(execCallback)) + gceDriver := getTestGCEDriverWithCustomMounter(t, mounter) + + resp, err := gceDriver.ns.NodeExpandVolume(context.Background(), tc.req) + if err != nil { + serverError, ok := status.FromError(err) + if !ok { + t.Fatalf("Could not get error status code from err: %v", err) + } + if serverError.Code() != tc.expErrCode { + t.Fatalf("Expected error code: %v, got: %v. err : %v", tc.expErrCode, serverError.Code(), err) + } + continue + } + if tc.expErrCode != codes.OK { + t.Fatalf("Expected error: %v, got no error", tc.expErrCode) + } + + if resp.CapacityBytes != tc.expRespBytes { + t.Fatalf("Expected bytes: %v, got: %v", tc.expRespBytes, resp.CapacityBytes) + } + } +} + func TestNodeUnstageVolume(t *testing.T) { gceDriver := getTestGCEDriver(t) ns := gceDriver.ns diff --git a/pkg/gce-pd-csi-driver/node_test.go.orig b/pkg/gce-pd-csi-driver/node_test.go.orig new file mode 100644 index 000000000..c5d2e46e7 --- /dev/null +++ b/pkg/gce-pd-csi-driver/node_test.go.orig @@ -0,0 +1,595 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package gceGCEDriver + +import ( +<<<<<<< HEAD + "context" + "testing" + +======= + "errors" + "fmt" + "strconv" + "testing" + + "context" + +>>>>>>> Add implementation and unit tests for Expand capability + csi "github.com/container-storage-interface/spec/lib/go/csi" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + "k8s.io/kubernetes/pkg/util/mount" + utilexec "k8s.io/utils/exec" + "sigs.k8s.io/gcp-compute-persistent-disk-csi-driver/pkg/common" + metadataservice "sigs.k8s.io/gcp-compute-persistent-disk-csi-driver/pkg/gce-cloud-provider/metadata" + mountmanager "sigs.k8s.io/gcp-compute-persistent-disk-csi-driver/pkg/mount-manager" +) + +const defaultVolumeID = "project/test001/zones/c1/disks/testDisk" +const defaultTargetPath = "/mnt/test" +const defaultStagingPath = "/staging" + +func getTestGCEDriver(t *testing.T) *GCEDriver { + return getCustomTestGCEDriver(t, mountmanager.NewFakeSafeMounter(), mountmanager.NewFakeDeviceUtils(), metadataservice.NewFakeService()) +} + +func getTestGCEDriverWithCustomMounter(t *testing.T, mounter *mount.SafeFormatAndMount) *GCEDriver { + return getCustomTestGCEDriver(t, mounter, mountmanager.NewFakeDeviceUtils(), metadataservice.NewFakeService()) +} + +func getCustomTestGCEDriver(t *testing.T, mounter *mount.SafeFormatAndMount, deviceUtils mountmanager.DeviceUtils, metaService metadataservice.MetadataService) *GCEDriver { + gceDriver := GetGCEDriver() + err := gceDriver.SetupGCEDriver(nil, mounter, deviceUtils, metaService, driver, "test-vendor") + if err != nil { + t.Fatalf("Failed to setup GCE Driver: %v", err) + } + return gceDriver +} + +func getTestBlockingGCEDriver(t *testing.T, readyToExecute chan chan struct{}) *GCEDriver { + gceDriver := GetGCEDriver() + err := gceDriver.SetupGCEDriver(nil, mountmanager.NewFakeSafeBlockingMounter(readyToExecute), mountmanager.NewFakeDeviceUtils(), metadataservice.NewFakeService(), driver, "test-vendor") + if err != nil { + t.Fatalf("Failed to setup GCE Driver: %v", err) + } + return gceDriver +} + +func TestNodeGetVolumeLimits(t *testing.T) { + + gceDriver := getTestGCEDriver(t) + ns := gceDriver.ns + req := &csi.NodeGetInfoRequest{} + + testCases := []struct { + name string + machineType string + expVolumeLimit int64 + }{ + { + name: "Predifined standard machine", + machineType: "n1-standard-1", + expVolumeLimit: volumeLimit128, + }, + { + name: "Predifined micro machine", + machineType: "f1-micro", + expVolumeLimit: volumeLimit16, + }, + { + name: "Predifined small machine", + machineType: "g1-small", + expVolumeLimit: volumeLimit16, + }, + { + name: "Custom machine with 1GiB Mem", + machineType: "custom-1-1024", + expVolumeLimit: volumeLimit128, + }, + { + name: "Custom machine with 4GiB Mem", + machineType: "custom-2-4096", + expVolumeLimit: volumeLimit128, + }, + } + + for _, tc := range testCases { + t.Logf("Test case: %s", tc.name) + metadataservice.SetMachineType(tc.machineType) + res, err := ns.NodeGetInfo(context.Background(), req) + if err != nil { + t.Fatalf("Failed to get node info: %v", err) + } else { + volumeLimit := res.GetMaxVolumesPerNode() + if volumeLimit != tc.expVolumeLimit { + t.Fatalf("Expected volume limit: %v, got %v, for machine-type: %v", + tc.expVolumeLimit, volumeLimit, tc.machineType) + } + t.Logf("Get node info: %v", res) + } + } +} + +func TestNodePublishVolume(t *testing.T) { + gceDriver := getTestGCEDriver(t) + ns := gceDriver.ns + testCases := []struct { + name string + req *csi.NodePublishVolumeRequest + expErrCode codes.Code + }{ + { + name: "Valid request", + req: &csi.NodePublishVolumeRequest{ + VolumeId: defaultVolumeID, + TargetPath: defaultTargetPath, + StagingTargetPath: defaultStagingPath, + Readonly: false, + VolumeCapability: stdVolCap, + }, + }, + { + name: "Invalid request (invalid access mode)", + req: &csi.NodePublishVolumeRequest{ + VolumeId: defaultVolumeID, + TargetPath: defaultTargetPath, + StagingTargetPath: defaultStagingPath, + Readonly: false, + VolumeCapability: createVolumeCapability(csi.VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER), + }, + expErrCode: codes.InvalidArgument, + }, + { + name: "Invalid request (No VolumeId)", + req: &csi.NodePublishVolumeRequest{ + TargetPath: defaultTargetPath, + StagingTargetPath: defaultStagingPath, + Readonly: false, + VolumeCapability: stdVolCap, + }, + expErrCode: codes.InvalidArgument, + }, + { + name: "Invalid request (No TargetPath)", + req: &csi.NodePublishVolumeRequest{ + VolumeId: defaultVolumeID, + StagingTargetPath: defaultStagingPath, + Readonly: false, + VolumeCapability: stdVolCap, + }, + expErrCode: codes.InvalidArgument, + }, + { + name: "Invalid request (No StagingTargetPath)", + req: &csi.NodePublishVolumeRequest{ + VolumeId: defaultVolumeID, + TargetPath: defaultTargetPath, + Readonly: false, + VolumeCapability: stdVolCap, + }, + expErrCode: codes.InvalidArgument, + }, + { + name: "Invalid request (Nil VolumeCapability)", + req: &csi.NodePublishVolumeRequest{ + VolumeId: defaultVolumeID, + TargetPath: defaultTargetPath, + StagingTargetPath: defaultStagingPath, + Readonly: false, + VolumeCapability: nil, + }, + expErrCode: codes.InvalidArgument, + }, + } + for _, tc := range testCases { + t.Logf("Test case: %s", tc.name) + _, err := ns.NodePublishVolume(context.Background(), tc.req) + if err != nil { + serverError, ok := status.FromError(err) + if !ok { + t.Fatalf("Could not get error status code from err: %v", err) + } + if serverError.Code() != tc.expErrCode { + t.Fatalf("Expected error code: %v, got: %v. err : %v", tc.expErrCode, serverError.Code(), err) + } + continue + } + if tc.expErrCode != codes.OK { + t.Fatalf("Expected error: %v, got no error", tc.expErrCode) + } + } +} + +func TestNodeUnpublishVolume(t *testing.T) { + gceDriver := getTestGCEDriver(t) + ns := gceDriver.ns + testCases := []struct { + name string + req *csi.NodeUnpublishVolumeRequest + expErrCode codes.Code + }{ + { + name: "Valid request", + req: &csi.NodeUnpublishVolumeRequest{ + VolumeId: defaultVolumeID, + TargetPath: defaultTargetPath, + }, + }, + { + name: "Invalid request (No VolumeId)", + req: &csi.NodeUnpublishVolumeRequest{ + TargetPath: defaultTargetPath, + }, + expErrCode: codes.InvalidArgument, + }, + { + name: "Invalid request (No TargetPath)", + req: &csi.NodeUnpublishVolumeRequest{ + VolumeId: defaultVolumeID, + }, + expErrCode: codes.InvalidArgument, + }, + } + for _, tc := range testCases { + t.Logf("Test case: %s", tc.name) + _, err := ns.NodeUnpublishVolume(context.Background(), tc.req) + if err != nil { + serverError, ok := status.FromError(err) + if !ok { + t.Fatalf("Could not get error status code from err: %v", err) + } + if serverError.Code() != tc.expErrCode { + t.Fatalf("Expected error code: %v, got: %v. err : %v", tc.expErrCode, serverError.Code(), err) + } + continue + } + if tc.expErrCode != codes.OK { + t.Fatalf("Expected error: %v, got no error", tc.expErrCode) + } + } +} + +func TestNodeStageVolume(t *testing.T) { + gceDriver := getTestGCEDriver(t) + ns := gceDriver.ns + volumeID := "project/test001/zones/c1/disks/testDisk" + blockCap := &csi.VolumeCapability_Block{ + Block: &csi.VolumeCapability_BlockVolume{}, + } + cap := &csi.VolumeCapability{ + AccessType: blockCap, + } + + testCases := []struct { + name string + req *csi.NodeStageVolumeRequest + expErrCode codes.Code + }{ + { + name: "Valid request", + req: &csi.NodeStageVolumeRequest{ + VolumeId: volumeID, + StagingTargetPath: defaultStagingPath, + VolumeCapability: stdVolCap, + }, + }, + { + name: "Invalid request (Bad Access Mode)", + req: &csi.NodeStageVolumeRequest{ + VolumeId: volumeID, + StagingTargetPath: defaultStagingPath, + VolumeCapability: createVolumeCapability(csi.VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER), + }, + expErrCode: codes.InvalidArgument, + }, + { + name: "Invalid request (No VolumeId)", + req: &csi.NodeStageVolumeRequest{ + StagingTargetPath: defaultStagingPath, + VolumeCapability: stdVolCap, + }, + expErrCode: codes.InvalidArgument, + }, + { + name: "Invalid request (No StagingTargetPath)", + req: &csi.NodeStageVolumeRequest{ + VolumeId: volumeID, + VolumeCapability: stdVolCap, + }, + expErrCode: codes.InvalidArgument, + }, + { + name: "Invalid request (Nil VolumeCapability)", + req: &csi.NodeStageVolumeRequest{ + VolumeId: volumeID, + StagingTargetPath: defaultStagingPath, + VolumeCapability: nil, + }, + expErrCode: codes.InvalidArgument, + }, + { + name: "Invalid request (No Mount in capability)", + req: &csi.NodeStageVolumeRequest{ + VolumeId: volumeID, + StagingTargetPath: defaultStagingPath, + VolumeCapability: cap, + }, + expErrCode: codes.InvalidArgument, + }, + } + for _, tc := range testCases { + t.Logf("Test case: %s", tc.name) + _, err := ns.NodeStageVolume(context.Background(), tc.req) + if err != nil { + serverError, ok := status.FromError(err) + if !ok { + t.Fatalf("Could not get error status code from err: %v", err) + } + if serverError.Code() != tc.expErrCode { + t.Fatalf("Expected error code: %v, got: %v. err : %v", tc.expErrCode, serverError.Code(), err) + } + continue + } + if tc.expErrCode != codes.OK { + t.Fatalf("Expected error: %v, got no error", tc.expErrCode) + } + } +} + +func TestNodeExpandVolume(t *testing.T) { + // TODO: Add tests/functionality for non-existant volume + var resizedBytes int64 = 2000000000 + volumeID := "project/test001/zones/c1/disks/testDisk" + testCases := []struct { + name string + req *csi.NodeExpandVolumeRequest + fsOrBlock string + expRespBytes int64 + expErrCode codes.Code + }{ + { + name: "ext4 fs expand", + req: &csi.NodeExpandVolumeRequest{ + VolumeId: volumeID, + VolumePath: "some-path", + CapacityRange: &csi.CapacityRange{ + RequiredBytes: resizedBytes, + }, + }, + fsOrBlock: "ext4", + expRespBytes: resizedBytes, + }, + { + name: "block device expand", + req: &csi.NodeExpandVolumeRequest{ + VolumeId: volumeID, + VolumePath: "some-path", + CapacityRange: &csi.CapacityRange{ + RequiredBytes: resizedBytes, + }, + }, + fsOrBlock: "block", + expRespBytes: resizedBytes, + }, + { + name: "xfs fs expand", + req: &csi.NodeExpandVolumeRequest{ + VolumeId: volumeID, + VolumePath: "some-path", + CapacityRange: &csi.CapacityRange{ + RequiredBytes: resizedBytes, + }, + }, + fsOrBlock: "xfs", + expRespBytes: resizedBytes, + }, + } + for _, tc := range testCases { + t.Logf("Test case: %s", tc.name) + + execCallback := func(cmd string, args ...string) ([]byte, error) { + switch cmd { + case "blkid": + if tc.fsOrBlock == "block" { + // blkid returns exit code 2 when run on unformatted device + return nil, utilexec.CodeExitError{ + Err: errors.New("this is an exit error"), + Code: 2, + } + } + return []byte(fmt.Sprintf("DEVNAME=/dev/sdb\nTYPE=%s", tc.fsOrBlock)), nil + case "resize2fs": + if tc.fsOrBlock == "ext4" { + return nil, nil + } + t.Fatalf("resize fs called on device with %s", tc.fsOrBlock) + case "xfs_growfs": + if tc.fsOrBlock != "xfs" { + t.Fatalf("xfs_growfs called on device with %s", tc.fsOrBlock) + } + for _, arg := range args { + if arg == tc.req.VolumePath { + return nil, nil + } + } + t.Errorf("xfs_growfs args did not contain volume path %s", tc.req.VolumePath) + case "df": + return []byte(fmt.Sprintf("FOO\n%v", common.BytesToGb(resizedBytes))), nil + case "blockdev": + return []byte(strconv.Itoa(int(resizedBytes))), nil + } + + return nil, fmt.Errorf("fake exec got unknown call to %v %v", cmd, args) + } + mounter := mountmanager.NewFakeSafeMounterWithCustomExec(mount.NewFakeExec(execCallback)) + gceDriver := getTestGCEDriverWithCustomMounter(t, mounter) + + resp, err := gceDriver.ns.NodeExpandVolume(context.Background(), tc.req) + if err != nil { + serverError, ok := status.FromError(err) + if !ok { + t.Fatalf("Could not get error status code from err: %v", err) + } + if serverError.Code() != tc.expErrCode { + t.Fatalf("Expected error code: %v, got: %v. err : %v", tc.expErrCode, serverError.Code(), err) + } + continue + } + if tc.expErrCode != codes.OK { + t.Fatalf("Expected error: %v, got no error", tc.expErrCode) + } + + if resp.CapacityBytes != tc.expRespBytes { + t.Fatalf("Expected bytes: %v, got: %v", tc.expRespBytes, resp.CapacityBytes) + } + } +} + +func TestNodeUnstageVolume(t *testing.T) { + gceDriver := getTestGCEDriver(t) + ns := gceDriver.ns + testCases := []struct { + name string + req *csi.NodeUnstageVolumeRequest + expErrCode codes.Code + }{ + { + name: "Valid request", + req: &csi.NodeUnstageVolumeRequest{ + VolumeId: defaultVolumeID, + StagingTargetPath: defaultStagingPath, + }, + }, + { + name: "Invalid request (No VolumeId)", + req: &csi.NodeUnstageVolumeRequest{ + StagingTargetPath: defaultStagingPath, + }, + expErrCode: codes.InvalidArgument, + }, + { + name: "Invalid request (No StagingTargetPath)", + req: &csi.NodeUnstageVolumeRequest{ + VolumeId: defaultVolumeID, + }, + expErrCode: codes.InvalidArgument, + }, + } + for _, tc := range testCases { + t.Logf("Test case: %s", tc.name) + _, err := ns.NodeUnstageVolume(context.Background(), tc.req) + if err != nil { + serverError, ok := status.FromError(err) + if !ok { + t.Fatalf("Could not get error status code from err: %v", err) + } + if serverError.Code() != tc.expErrCode { + t.Fatalf("Expected error code: %v, got: %v. err : %v", tc.expErrCode, serverError.Code(), err) + } + continue + } + if tc.expErrCode != codes.OK { + t.Fatalf("Expected error: %v, got no error", tc.expErrCode) + } + } +} + +func TestNodeGetCapabilities(t *testing.T) { + gceDriver := getTestGCEDriver(t) + ns := gceDriver.ns + req := &csi.NodeGetCapabilitiesRequest{} + + _, err := ns.NodeGetCapabilities(context.Background(), req) + if err != nil { + t.Fatalf("Unexpedted error: %v", err) + } +} + +func TestConcurrentNodeOperations(t *testing.T) { + readyToExecute := make(chan chan struct{}, 1) + gceDriver := getTestBlockingGCEDriver(t, readyToExecute) + ns := gceDriver.ns + + vol1PublishTargetAReq := &csi.NodePublishVolumeRequest{ + VolumeId: defaultVolumeID + "1", + TargetPath: defaultTargetPath + "a", + StagingTargetPath: defaultStagingPath + "1", + Readonly: false, + VolumeCapability: stdVolCap, + } + vol1PublishTargetBReq := &csi.NodePublishVolumeRequest{ + VolumeId: defaultVolumeID + "1", + TargetPath: defaultTargetPath + "b", + StagingTargetPath: defaultStagingPath + "1", + Readonly: false, + VolumeCapability: stdVolCap, + } + vol2PublishTargetCReq := &csi.NodePublishVolumeRequest{ + VolumeId: defaultVolumeID + "2", + TargetPath: defaultTargetPath + "c", + StagingTargetPath: defaultStagingPath + "2", + Readonly: false, + VolumeCapability: stdVolCap, + } + + runRequest := func(req *csi.NodePublishVolumeRequest) chan error { + response := make(chan error) + go func() { + _, err := ns.NodePublishVolume(context.Background(), req) + response <- err + }() + return response + } + + // Start first valid request vol1PublishTargetA and block until it reaches the Mount + vol1PublishTargetAResp := runRequest(vol1PublishTargetAReq) + execVol1PublishTargetA := <-readyToExecute + + // Start vol1PublishTargetB and allow it to execute to completion. Then check for Aborted error. + // If a non Abort error is received or if the operation was started, then there is a problem + // with volume locking. + vol1PublishTargetBResp := runRequest(vol1PublishTargetBReq) + select { + case err := <-vol1PublishTargetBResp: + if err != nil { + serverError, ok := status.FromError(err) + if !ok { + t.Fatalf("Could not get error status code from err: %v", err) + } + if serverError.Code() != codes.Aborted { + t.Errorf("Expected error code: %v, got: %v. err : %v", codes.Aborted, serverError.Code(), err) + } + } else { + t.Errorf("Expected error: %v, got no error", codes.Aborted) + } + case <-readyToExecute: + t.Errorf("The operation for vol1PublishTargetB should have been aborted, but was started") + } + + // Start vol2PublishTargetC and allow it to execute to completion. Then check for success. + vol2PublishTargetCResp := runRequest(vol2PublishTargetCReq) + execVol2PublishTargetC := <-readyToExecute + execVol2PublishTargetC <- struct{}{} + if err := <-vol2PublishTargetCResp; err != nil { + t.Errorf("Unexpected error: %v", err) + } + + // To clean up, allow the vol1PublishTargetA to complete + execVol1PublishTargetA <- struct{}{} + if err := <-vol1PublishTargetAResp; err != nil { + t.Errorf("Unexpected error: %v", err) + } +} diff --git a/pkg/mount-manager/fake-safe-mounter.go b/pkg/mount-manager/fake-safe-mounter.go index af8216d3e..661998baa 100644 --- a/pkg/mount-manager/fake-safe-mounter.go +++ b/pkg/mount-manager/fake-safe-mounter.go @@ -44,9 +44,18 @@ func execCallback(cmd string, args ...string) ([]byte, error) { } func NewFakeSafeMounter() *mount.SafeFormatAndMount { + return NewCustomFakeSafeMounter(fakeMounter, fakeExec) +} + +func NewFakeSafeMounterWithCustomExec(exec mount.Exec) *mount.SafeFormatAndMount { + fakeMounter := &mount.FakeMounter{MountPoints: []mount.MountPoint{}, Log: []mount.FakeAction{}} + return NewCustomFakeSafeMounter(fakeMounter, exec) +} + +func NewCustomFakeSafeMounter(mounter mount.Interface, exec mount.Exec) *mount.SafeFormatAndMount { return &mount.SafeFormatAndMount{ - Interface: fakeMounter, - Exec: fakeExec, + Interface: mounter, + Exec: exec, } } @@ -73,6 +82,5 @@ func NewFakeSafeBlockingMounter(readyToExecute chan chan struct{}) *mount.SafeFo } return &mount.SafeFormatAndMount{ Interface: fakeBlockingMounter, - Exec: fakeExec, } } diff --git a/test/e2e/tests/multi_zone_e2e_test.go b/test/e2e/tests/multi_zone_e2e_test.go index 0e8774cc9..65a51c105 100644 --- a/test/e2e/tests/multi_zone_e2e_test.go +++ b/test/e2e/tests/multi_zone_e2e_test.go @@ -165,9 +165,9 @@ func testAttachWriteReadDetach(volID string, volName string, instance *remote.In // Stage Disk stageDir := filepath.Join("/tmp/", volName, "stage") - err = client.NodeStageVolume(volID, stageDir) + err = client.NodeStageExt4Volume(volID, stageDir) if err != nil { - return fmt.Errorf("NodeStageVolume failed with error: %v", err) + return fmt.Errorf("NodeStageExt4Volume failed with error: %v", err) } defer func() { diff --git a/test/e2e/tests/resize_e2e_test.go b/test/e2e/tests/resize_e2e_test.go new file mode 100644 index 000000000..00ab12ed8 --- /dev/null +++ b/test/e2e/tests/resize_e2e_test.go @@ -0,0 +1,359 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package tests + +import ( + "path/filepath" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + csi "github.com/container-storage-interface/spec/lib/go/csi" + "k8s.io/apimachinery/pkg/util/uuid" + "k8s.io/klog" + "sigs.k8s.io/gcp-compute-persistent-disk-csi-driver/pkg/common" + gce "sigs.k8s.io/gcp-compute-persistent-disk-csi-driver/pkg/gce-cloud-provider/compute" + testutils "sigs.k8s.io/gcp-compute-persistent-disk-csi-driver/test/e2e/utils" +) + +var _ = Describe("GCE PD CSI Driver", func() { + It("Should online resize controller and node for an ext4 volume", func() { + testContext := getRandomTestContext() + + p, z, _ := testContext.Instance.GetIdentity() + client := testContext.Client + instance := testContext.Instance + + // Create Disk + volName := testNamePrefix + string(uuid.NewUUID()) + volID, err := client.CreateVolume(volName, nil, defaultSizeGb, + &csi.TopologyRequirement{ + Requisite: []*csi.Topology{ + { + Segments: map[string]string{common.TopologyKeyZone: z}, + }, + }, + }) + Expect(err).To(BeNil(), "CreateVolume failed with error: %v", err) + + // Validate Disk Created + cloudDisk, err := computeService.Disks.Get(p, z, volName).Do() + Expect(err).To(BeNil(), "Could not get disk from cloud directly") + Expect(cloudDisk.Type).To(ContainSubstring(standardDiskType)) + Expect(cloudDisk.Status).To(Equal(readyState)) + Expect(cloudDisk.SizeGb).To(Equal(defaultSizeGb)) + Expect(cloudDisk.Name).To(Equal(volName)) + + defer func() { + // Delete Disk + client.DeleteVolume(volID) + Expect(err).To(BeNil(), "DeleteVolume failed") + + // Validate Disk Deleted + _, err = computeService.Disks.Get(p, z, volName).Do() + Expect(gce.IsGCEError(err, "notFound")).To(BeTrue(), "Expected disk to not be found") + }() + + // Attach Disk + err = client.ControllerPublishVolume(volID, instance.GetNodeID()) + Expect(err).To(BeNil(), "Controller publish volume failed") + + defer func() { + // Detach Disk + err = client.ControllerUnpublishVolume(volID, instance.GetNodeID()) + if err != nil { + klog.Errorf("Failed to detach disk: %v", err) + } + }() + + // Stage Disk + stageDir := filepath.Join("/tmp/", volName, "stage") + err = client.NodeStageExt4Volume(volID, stageDir) + Expect(err).To(BeNil(), "Node Stage volume failed") + + defer func() { + // Unstage Disk + err = client.NodeUnstageVolume(volID, stageDir) + if err != nil { + klog.Errorf("Failed to unstage volume: %v", err) + } + fp := filepath.Join("/tmp/", volName) + err = testutils.RmAll(instance, fp) + if err != nil { + klog.Errorf("Failed to rm file path %s: %v", fp, err) + } + }() + + // Mount Disk + publishDir := filepath.Join("/tmp/", volName, "mount") + err = client.NodePublishVolume(volID, stageDir, publishDir) + Expect(err).To(BeNil(), "Node publish volume failed") + + defer func() { + // Unmount Disk + err = client.NodeUnpublishVolume(volID, publishDir) + if err != nil { + klog.Errorf("NodeUnpublishVolume failed with error: %v", err) + } + }() + + // Verify pre-resize fs size + sizeGb, err := testutils.GetFSSizeInGb(instance, publishDir) + Expect(err).To(BeNil(), "Failed to get FSSize in GB") + Expect(sizeGb).To(Equal(defaultSizeGb)) + + // Resize controller + var newSizeGb int64 = 10 + err = client.ControllerExpandVolume(volID, newSizeGb) + + Expect(err).To(BeNil(), "Controller expand volume failed") + + // Verify cloud size + cloudDisk, err = computeService.Disks.Get(p, z, volName).Do() + Expect(err).To(BeNil(), "Get cloud disk failed") + Expect(cloudDisk.SizeGb).To(Equal(newSizeGb)) + + // Resize node + _, err = client.NodeExpandVolume(volID, publishDir, newSizeGb) + Expect(err).To(BeNil(), "Node expand volume failed") + + // Verify disk size + sizeGb, err = testutils.GetFSSizeInGb(instance, publishDir) + Expect(err).To(BeNil(), "Failed to get FSSize in GB") + Expect(sizeGb).To(Equal(newSizeGb)) + + }) + + It("Should offline resize controller and node for an ext4 volume", func() { + testContext := getRandomTestContext() + + p, z, _ := testContext.Instance.GetIdentity() + client := testContext.Client + instance := testContext.Instance + + // Create Disk + volName := testNamePrefix + string(uuid.NewUUID()) + volID, err := client.CreateVolume(volName, nil, defaultSizeGb, + &csi.TopologyRequirement{ + Requisite: []*csi.Topology{ + { + Segments: map[string]string{common.TopologyKeyZone: z}, + }, + }, + }) + Expect(err).To(BeNil(), "CreateVolume failed with error: %v", err) + + // Validate Disk Created & size + cloudDisk, err := computeService.Disks.Get(p, z, volName).Do() + Expect(err).To(BeNil(), "Could not get disk from cloud directly") + Expect(cloudDisk.Type).To(ContainSubstring(standardDiskType)) + Expect(cloudDisk.Status).To(Equal(readyState)) + Expect(cloudDisk.SizeGb).To(Equal(defaultSizeGb)) + Expect(cloudDisk.Name).To(Equal(volName)) + + defer func() { + // Delete Disk + client.DeleteVolume(volID) + Expect(err).To(BeNil(), "DeleteVolume failed") + + // Validate Disk Deleted + _, err = computeService.Disks.Get(p, z, volName).Do() + Expect(gce.IsGCEError(err, "notFound")).To(BeTrue(), "Expected disk to not be found") + }() + + // Volume should be attached/formatted/mounted/unmounted/detached + err = testAttachWriteReadDetach(volID, volName, instance, client, false /* readOnly */) + Expect(err).To(BeNil(), "Failed to go through volume lifecycle") + + // Resize controller + var newSizeGb int64 = 10 + err = client.ControllerExpandVolume(volID, newSizeGb) + + Expect(err).To(BeNil(), "Controller expand volume failed") + + // Verify cloud size + cloudDisk, err = computeService.Disks.Get(p, z, volName).Do() + Expect(err).To(BeNil(), "Get cloud disk failed") + Expect(cloudDisk.SizeGb).To(Equal(newSizeGb)) + + // Attach and mount again + err = client.ControllerPublishVolume(volID, instance.GetNodeID()) + Expect(err).To(BeNil(), "Controller publish volume failed") + + defer func() { + // Detach Disk + err = client.ControllerUnpublishVolume(volID, instance.GetNodeID()) + if err != nil { + klog.Errorf("Failed to detach disk: %v", err) + } + + }() + + // Stage Disk + stageDir := filepath.Join("/tmp/", volName, "stage") + err = client.NodeStageExt4Volume(volID, stageDir) + Expect(err).To(BeNil(), "Node Stage volume failed") + + defer func() { + // Unstage Disk + err = client.NodeUnstageVolume(volID, stageDir) + if err != nil { + klog.Errorf("Failed to unstage volume: %v", err) + } + fp := filepath.Join("/tmp/", volName) + err = testutils.RmAll(instance, fp) + if err != nil { + klog.Errorf("Failed to rm file path %s: %v", fp, err) + } + }() + + // Mount Disk + publishDir := filepath.Join("/tmp/", volName, "mount") + err = client.NodePublishVolume(volID, stageDir, publishDir) + Expect(err).To(BeNil(), "Node publish volume failed") + + defer func() { + // Unmount Disk + err = client.NodeUnpublishVolume(volID, publishDir) + if err != nil { + klog.Errorf("NodeUnpublishVolume failed with error: %v", err) + } + }() + + // Verify pre-resize fs size + sizeGb, err := testutils.GetFSSizeInGb(instance, publishDir) + Expect(err).To(BeNil(), "Failed to get FSSize in GB") + Expect(sizeGb).To(Equal(defaultSizeGb)) + + // Resize node + _, err = client.NodeExpandVolume(volID, publishDir, newSizeGb) + Expect(err).To(BeNil(), "Node expand volume failed") + + // Verify disk size + sizeGb, err = testutils.GetFSSizeInGb(instance, publishDir) + Expect(err).To(BeNil(), "Failed to get FSSize in GB") + Expect(sizeGb).To(Equal(newSizeGb)) + + }) + + It("Should resize controller and node for an block volume", func() { + testContext := getRandomTestContext() + + p, z, _ := testContext.Instance.GetIdentity() + client := testContext.Client + instance := testContext.Instance + + // Create Disk + volName := testNamePrefix + string(uuid.NewUUID()) + volID, err := client.CreateVolume(volName, nil, defaultSizeGb, + &csi.TopologyRequirement{ + Requisite: []*csi.Topology{ + { + Segments: map[string]string{common.TopologyKeyZone: z}, + }, + }, + }) + Expect(err).To(BeNil(), "CreateVolume failed with error: %v", err) + + // Validate Disk Created + cloudDisk, err := computeService.Disks.Get(p, z, volName).Do() + Expect(err).To(BeNil(), "Could not get disk from cloud directly") + Expect(cloudDisk.Type).To(ContainSubstring(standardDiskType)) + Expect(cloudDisk.Status).To(Equal(readyState)) + Expect(cloudDisk.SizeGb).To(Equal(defaultSizeGb)) + Expect(cloudDisk.Name).To(Equal(volName)) + + defer func() { + // Delete Disk + client.DeleteVolume(volID) + Expect(err).To(BeNil(), "DeleteVolume failed") + + // Validate Disk Deleted + _, err = computeService.Disks.Get(p, z, volName).Do() + Expect(gce.IsGCEError(err, "notFound")).To(BeTrue(), "Expected disk to not be found") + }() + + // Attach Disk + err = client.ControllerPublishVolume(volID, instance.GetNodeID()) + Expect(err).To(BeNil(), "Controller publish volume failed") + + defer func() { + // Detach Disk + err = client.ControllerUnpublishVolume(volID, instance.GetNodeID()) + if err != nil { + klog.Errorf("Failed to detach disk: %v", err) + } + + }() + + // Stage Disk + stageDir := filepath.Join("/tmp/", volName, "stage") + err = client.NodeStageBlockVolume(volID, stageDir) + Expect(err).To(BeNil(), "Node Stage volume failed") + + defer func() { + // Unstage Disk + err = client.NodeUnstageVolume(volID, stageDir) + if err != nil { + klog.Errorf("Failed to unstage volume: %v", err) + } + fp := filepath.Join("/tmp/", volName) + err = testutils.RmAll(instance, fp) + if err != nil { + klog.Errorf("Failed to rm file path %s: %v", fp, err) + } + }() + + // Mount Disk + publishDir := filepath.Join("/tmp/", volName, "mount") + err = client.NodePublishBlockVolume(volID, stageDir, publishDir) + Expect(err).To(BeNil(), "Node publish volume failed") + + defer func() { + // Unmount Disk + err = client.NodeUnpublishVolume(volID, publishDir) + if err != nil { + klog.Errorf("NodeUnpublishVolume failed with error: %v", err) + } + }() + + // Verify pre-resize fs size + sizeGb, err := testutils.GetBlockSizeInGb(instance, publishDir) + Expect(err).To(BeNil(), "Failed to get block device size in GB") + Expect(sizeGb).To(Equal(defaultSizeGb), "Old size should be equal") + + // Resize controller + var newSizeGb int64 = 10 + err = client.ControllerExpandVolume(volID, newSizeGb) + + Expect(err).To(BeNil(), "Controller expand volume failed") + + // Verify cloud size + cloudDisk, err = computeService.Disks.Get(p, z, volName).Do() + Expect(err).To(BeNil(), "Get cloud disk failed") + Expect(cloudDisk.SizeGb).To(Equal(newSizeGb)) + + // Resize node + resp, err := client.NodeExpandVolume(volID, publishDir, newSizeGb) + Expect(err).To(BeNil(), "Node expand volume failed") + Expect(resp.CapacityBytes).To(Equal(common.GbToBytes(newSizeGb)), "Node expand should not do anything") + + // Verify disk size + sizeGb, err = testutils.GetBlockSizeInGb(instance, publishDir) + Expect(err).To(BeNil(), "Failed to get block device size in GB") + Expect(sizeGb).To(Equal(newSizeGb), "New size should be equal") + + }) +}) diff --git a/test/e2e/utils/utils.go b/test/e2e/utils/utils.go index e48e80114..d212ee451 100644 --- a/test/e2e/utils/utils.go +++ b/test/e2e/utils/utils.go @@ -20,6 +20,8 @@ import ( "math/rand" "os" "path" + "strconv" + "strings" "time" "golang.org/x/oauth2/google" @@ -27,6 +29,7 @@ import ( "k8s.io/klog" boskosclient "k8s.io/test-infra/boskos/client" "k8s.io/test-infra/boskos/common" + utilcommon "sigs.k8s.io/gcp-compute-persistent-disk-csi-driver/pkg/common" remote "sigs.k8s.io/gcp-compute-persistent-disk-csi-driver/test/remote" ) @@ -167,6 +170,31 @@ func ReadFile(instance *remote.InstanceInfo, filePath string) (string, error) { return output, nil } +func GetFSSizeInGb(instance *remote.InstanceInfo, mountPath string) (int64, error) { + output, err := instance.SSHNoSudo("df", "--output=size", "-BG", mountPath, "|", "awk", "'NR==2'") + if err != nil { + return -1, fmt.Errorf("failed to get size of path %s. Output: %v, error: %v", mountPath, output, err) + } + output = strings.TrimSuffix(strings.TrimSpace(output), "G") + n, err := strconv.ParseInt(output, 10, 64) + if err != nil { + return -1, fmt.Errorf("failed to parse size %s into int", output) + } + return n, nil +} + +func GetBlockSizeInGb(instance *remote.InstanceInfo, devicePath string) (int64, error) { + output, err := instance.SSH("blockdev", "--getsize64", devicePath) + if err != nil { + return -1, fmt.Errorf("failed to get size of path %s. Output: %v, error: %v", devicePath, output, err) + } + n, err := strconv.ParseInt(strings.TrimSpace(output), 10, 64) + if err != nil { + return -1, fmt.Errorf("failed to parse size %s into int", output) + } + return utilcommon.BytesToGb(n), nil +} + func RmAll(instance *remote.InstanceInfo, filePath string) error { output, err := instance.SSH("rm", "-rf", filePath) if err != nil { diff --git a/test/remote/client-wrappers.go b/test/remote/client-wrappers.go index c901d0e54..9168d3f30 100644 --- a/test/remote/client-wrappers.go +++ b/test/remote/client-wrappers.go @@ -36,6 +36,14 @@ var ( Mode: csipb.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, }, } + blockVolCap = &csipb.VolumeCapability{ + AccessType: &csipb.VolumeCapability_Block{ + Block: &csipb.VolumeCapability_BlockVolume{}, + }, + AccessMode: &csipb.VolumeCapability_AccessMode{ + Mode: csipb.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, + }, + } stdVolCaps = []*csipb.VolumeCapability{ stdVolCap, } @@ -139,11 +147,19 @@ func (c *CsiClient) ControllerUnpublishVolume(volId, nodeId string) error { return err } -func (c *CsiClient) NodeStageVolume(volId, stageDir string) error { +func (c *CsiClient) NodeStageExt4Volume(volId, stageDir string) error { + return c.NodeStageVolume(volId, stageDir, stdVolCap) +} + +func (c *CsiClient) NodeStageBlockVolume(volId, stageDir string) error { + return c.NodeStageVolume(volId, stageDir, blockVolCap) +} + +func (c *CsiClient) NodeStageVolume(volId, stageDir string, volumeCap *csipb.VolumeCapability) error { nodeStageReq := &csipb.NodeStageVolumeRequest{ VolumeId: volId, StagingTargetPath: stageDir, - VolumeCapability: stdVolCap, + VolumeCapability: volumeCap, } _, err := c.nodeClient.NodeStageVolume(context.Background(), nodeStageReq) return err @@ -179,6 +195,40 @@ func (c *CsiClient) NodePublishVolume(volumeId, stageDir, publishDir string) err return err } +func (c *CsiClient) NodePublishBlockVolume(volumeId, stageDir, publishDir string) error { + nodePublishReq := &csipb.NodePublishVolumeRequest{ + VolumeId: volumeId, + StagingTargetPath: stageDir, + TargetPath: publishDir, + VolumeCapability: blockVolCap, + Readonly: false, + } + _, err := c.nodeClient.NodePublishVolume(context.Background(), nodePublishReq) + return err +} + +func (c *CsiClient) ControllerExpandVolume(volumeId string, sizeGb int64) error { + controllerExpandReq := &csipb.ControllerExpandVolumeRequest{ + VolumeId: volumeId, + CapacityRange: &csipb.CapacityRange{ + RequiredBytes: common.GbToBytes(sizeGb), + }, + } + _, err := c.ctrlClient.ControllerExpandVolume(context.Background(), controllerExpandReq) + return err +} + +func (c *CsiClient) NodeExpandVolume(volumeId, volumePath string, sizeGb int64) (*csipb.NodeExpandVolumeResponse, error) { + nodeExpandReq := &csipb.NodeExpandVolumeRequest{ + VolumeId: volumeId, + VolumePath: volumePath, + CapacityRange: &csipb.CapacityRange{ + RequiredBytes: common.GbToBytes(sizeGb), + }, + } + return c.nodeClient.NodeExpandVolume(context.Background(), nodeExpandReq) +} + func (c *CsiClient) NodeGetInfo() (*csipb.NodeGetInfoResponse, error) { resp, err := c.nodeClient.NodeGetInfo(context.Background(), &csipb.NodeGetInfoRequest{}) return resp, err diff --git a/vendor/k8s.io/kubernetes/pkg/util/resizefs/resizefs_linux.go b/vendor/k8s.io/kubernetes/pkg/util/resizefs/resizefs_linux.go new file mode 100644 index 000000000..4eabdb1dd --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/util/resizefs/resizefs_linux.go @@ -0,0 +1,86 @@ +// +build linux + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resizefs + +import ( + "fmt" + + "k8s.io/klog" + "k8s.io/kubernetes/pkg/util/mount" +) + +// ResizeFs Provides support for resizing file systems +type ResizeFs struct { + mounter *mount.SafeFormatAndMount +} + +// NewResizeFs returns new instance of resizer +func NewResizeFs(mounter *mount.SafeFormatAndMount) *ResizeFs { + return &ResizeFs{mounter: mounter} +} + +// Resize perform resize of file system +func (resizefs *ResizeFs) Resize(devicePath string, deviceMountPath string) (bool, error) { + format, err := resizefs.mounter.GetDiskFormat(devicePath) + + if err != nil { + formatErr := fmt.Errorf("ResizeFS.Resize - error checking format for device %s: %v", devicePath, err) + return false, formatErr + } + + // If disk has no format, there is no need to resize the disk because mkfs.* + // by default will use whole disk anyways. + if format == "" { + return false, nil + } + + klog.V(3).Infof("ResizeFS.Resize - Expanding mounted volume %s", devicePath) + switch format { + case "ext3", "ext4": + return resizefs.extResize(devicePath) + case "xfs": + return resizefs.xfsResize(deviceMountPath) + } + return false, fmt.Errorf("ResizeFS.Resize - resize of format %s is not supported for device %s mounted at %s", format, devicePath, deviceMountPath) +} + +func (resizefs *ResizeFs) extResize(devicePath string) (bool, error) { + output, err := resizefs.mounter.Exec.Run("resize2fs", devicePath) + if err == nil { + klog.V(2).Infof("Device %s resized successfully", devicePath) + return true, nil + } + + resizeError := fmt.Errorf("resize of device %s failed: %v. resize2fs output: %s", devicePath, err, string(output)) + return false, resizeError + +} + +func (resizefs *ResizeFs) xfsResize(deviceMountPath string) (bool, error) { + args := []string{"-d", deviceMountPath} + output, err := resizefs.mounter.Exec.Run("xfs_growfs", args...) + + if err == nil { + klog.V(2).Infof("Device %s resized successfully", deviceMountPath) + return true, nil + } + + resizeError := fmt.Errorf("resize of device %s failed: %v. xfs_growfs output: %s", deviceMountPath, err, string(output)) + return false, resizeError +} diff --git a/vendor/k8s.io/kubernetes/pkg/util/resizefs/resizefs_unsupported.go b/vendor/k8s.io/kubernetes/pkg/util/resizefs/resizefs_unsupported.go new file mode 100644 index 000000000..dd4dd017e --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/util/resizefs/resizefs_unsupported.go @@ -0,0 +1,40 @@ +// +build !linux + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resizefs + +import ( + "fmt" + + "k8s.io/kubernetes/pkg/util/mount" +) + +// ResizeFs Provides support for resizing file systems +type ResizeFs struct { + mounter *mount.SafeFormatAndMount +} + +// NewResizeFs returns new instance of resizer +func NewResizeFs(mounter *mount.SafeFormatAndMount) *ResizeFs { + return &ResizeFs{mounter: mounter} +} + +// Resize perform resize of file system +func (resizefs *ResizeFs) Resize(devicePath string, deviceMountPath string) (bool, error) { + return false, fmt.Errorf("Resize is not supported for this build") +}