Skip to content

Validate that existing mount point is compatible with volume and capabilities when it already exists for Stage and Publish #479

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Closed
wants to merge 1 commit into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
92 changes: 75 additions & 17 deletions pkg/gce-pd-csi-driver/node.go
Original file line number Diff line number Diff line change
Expand Up @@ -15,8 +15,10 @@ limitations under the License.
package gceGCEDriver

import (
"errors"
"fmt"
"os"
"path/filepath"
"strconv"
"strings"

Expand Down Expand Up @@ -59,6 +61,8 @@ var _ csi.NodeServer = &GCENodeServer{}
const (
volumeLimitSmall int64 = 15
volumeLimitBig int64 = 127

defaultFSType string = "ext4"
)

func (ns *GCENodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePublishVolumeRequest) (*csi.NodePublishVolumeResponse, error) {
Expand Down Expand Up @@ -95,13 +99,20 @@ func (ns *GCENodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePub
return nil, status.Error(codes.Internal, fmt.Sprintf("cannot validate mount point: %s %v", targetPath, err))
}
if !notMnt {
// TODO(#95): check if mount is compatible. Return OK if it is, or appropriate error.
/*
1) Target Path MUST be the vol referenced by vol ID
2) TODO(#253): Check volume capability matches for ALREADY_EXISTS
3) Readonly MUST match
// Validate that the existing volume is compatible
partition := ""
if part, ok := req.GetVolumeContext()[common.VolumeAttributePartition]; ok {
partition = part
}
devicePath, err := ns.getDevicePath(volumeID, partition)
if err != nil {
return nil, status.Error(codes.Internal, fmt.Sprintf("Error when getting device path: %v", err))
}

if err := ns.volumeCompatible(targetPath, devicePath, volumeCapability); err != nil {
return nil, status.Error(codes.AlreadyExists, fmt.Sprintf("Mount point %s already exists but found to be incompatible: %v", stagingTargetPath, err))
}

*/
klog.V(4).Infof("NodePublishVolume succeeded on volume %v to %s, mount already exists.", volumeID, targetPath)
return &csi.NodePublishVolumeResponse{}, nil
}
Expand All @@ -118,8 +129,7 @@ func (ns *GCENodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePub
if mnt.FsType != "" {
fstype = mnt.FsType
} else {
// Default fstype is ext4
fstype = "ext4"
fstype = defaultFSType
}

klog.V(4).Infof("NodePublishVolume with filesystem %s", fstype)
Expand Down Expand Up @@ -286,22 +296,18 @@ func (ns *GCENodeServer) NodeStageVolume(ctx context.Context, req *csi.NodeStage
}

if !notMnt {
// TODO(#95): Check who is mounted here. No error if its us
/*
1) Target Path MUST be the vol referenced by vol ID
2) VolumeCapability MUST match
3) Readonly MUST match

*/
// Validate that the existing volume is compatible
if err := ns.volumeCompatible(stagingTargetPath, devicePath, volumeCapability); err != nil {
return nil, status.Error(codes.AlreadyExists, fmt.Sprintf("Mount point %s already exists but found to be incompatible: %v", stagingTargetPath, err))
}

klog.V(4).Infof("NodeStageVolume succeded on %v to %s, mount already exists.", volumeID, stagingTargetPath)
return &csi.NodeStageVolumeResponse{}, nil

}

// Part 3: Mount device to stagingTargetPath
// Default fstype is ext4
fstype := "ext4"
fstype := defaultFSType
options := []string{}
if mnt := volumeCapability.GetMount(); mnt != nil {
if mnt.FsType != "" {
Expand All @@ -327,6 +333,58 @@ func (ns *GCENodeServer) NodeStageVolume(ctx context.Context, req *csi.NodeStage
return &csi.NodeStageVolumeResponse{}, nil
}

func (ns *GCENodeServer) volumeCompatible(mountedPath, devicePath string, volumeCapability *csi.VolumeCapability) error {
if blk := volumeCapability.GetBlock(); blk != nil {
// If the volume capability request is type "Block" we don't care what
// format the disk is in because user could have re-formatted to a
// different type
// TODO: Need to check whether loopback device file is the same as the
// device path given, the following code to check for mount path doesn't
// work for block devices
return nil
}

// Part 1: Check that volume mounted at mountedPath is the same as one at
// devicePath
devicePathDev, err := filepath.EvalSymlinks(devicePath)
if err != nil {
return fmt.Errorf("failed to find backing disk for devicePath %s: %v", devicePath, err)
}
// df -P returns with the rows containing the backing disk and the location
// of the mount. awk proccesses the result by getting the line with the
// location of the mount we're looking for and printing out the backing
// disk. the resulting output should be a path to a device such as
// "/dev/sda"
mountedPathDevBytes, err := ns.Mounter.Exec.Command("sh", "-c", fmt.Sprintf("df -P | awk '$6==\"%s\"{print $1}'", mountedPath)).CombinedOutput()
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I'm not sure if df works for bind mounts. Mounter I believe has GetMountRefs() which I think handles bind mounts

if err != nil || len(mountedPathDevBytes) == 0 {
return fmt.Errorf("failed to find backing disk for mountedPath %s: %s: %v", mountedPath, string(mountedPathDevBytes), err)
}
mountedPathDev := strings.TrimSpace(string(mountedPathDevBytes))
if devicePathDev != mountedPathDev {
return fmt.Errorf("devices at paths %s and %s were not the same, got %s and %s respectively", devicePath, mountedPath, devicePathDev, mountedPathDev)
}

// Part 2: Check volumeCapability format compatibility
format, err := ns.Mounter.GetDiskFormat(devicePath)
if err != nil {
return fmt.Errorf("failed to get the format of disk %s: %v", devicePath, err)
}

mnt := volumeCapability.GetMount()
if mnt == nil {
return errors.New("block and mount capabilities are nil, invalid volume capability")
}

wantFmt := mnt.FsType
if wantFmt == "" {
wantFmt = defaultFSType
}
if mnt.FsType != format {
return fmt.Errorf("device at %s has format %s but volume capability requires %s", devicePath, format, mnt.FsType)
}
return nil
}

func (ns *GCENodeServer) NodeUnstageVolume(ctx context.Context, req *csi.NodeUnstageVolumeRequest) (*csi.NodeUnstageVolumeResponse, error) {
// Validate arguments
volumeID := req.GetVolumeId()
Expand Down
143 changes: 143 additions & 0 deletions test/e2e/tests/single_zone_e2e_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,8 @@ import (
"google.golang.org/api/iterator"
kmspb "google.golang.org/genproto/googleapis/cloud/kms/v1"
fieldmask "google.golang.org/genproto/protobuf/field_mask"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)

const (
Expand Down Expand Up @@ -159,6 +161,147 @@ var _ = Describe("GCE PD CSI Driver", func() {
}()
})

It("Should fail validation if the same disk with different capabilities is staged/published to the same path", func() {
testContext := getRandomTestContext()

p, z, _ := testContext.Instance.GetIdentity()
client := testContext.Client
instance := testContext.Instance

// Setup: Create Disk A
volNameA, volIDA := createAndValidateUniqueZonalDisk(client, p, z)
defer func() {
// Delete Disk
err := client.DeleteVolume(volIDA)
Expect(err).To(BeNil(), "DeleteVolume failed")

// Validate Disk Deleted
_, err = computeService.Disks.Get(p, z, volNameA).Do()
Expect(gce.IsGCEError(err, "notFound")).To(BeTrue(), "Expected disk to not be found")
}()

// Setup: Attach Disk A
err := client.ControllerPublishVolume(volIDA, instance.GetNodeID())
Expect(err).To(BeNil(), "ControllerPublishVolume failed with error for disk %v on node %v: %v", volIDA, instance.GetNodeID())
defer func() {
// Detach Disk
err = client.ControllerUnpublishVolume(volIDA, instance.GetNodeID())
if err != nil {
klog.Errorf("Failed to detach disk: %v", err)
}
}()

// Setup: Create Disk B
volNameB, volIDB := createAndValidateUniqueZonalDisk(client, p, z)
defer func() {
// Delete Disk
err := client.DeleteVolume(volIDB)
Expect(err).To(BeNil(), "DeleteVolume failed")

// Validate Disk Deleted
_, err = computeService.Disks.Get(p, z, volNameB).Do()
Expect(gce.IsGCEError(err, "notFound")).To(BeTrue(), "Expected disk to not be found")
}()

// Setup: Attach Disk B
err = client.ControllerPublishVolume(volIDB, instance.GetNodeID())
Expect(err).To(BeNil(), "ControllerPublishVolume failed with error for disk %v on node %v: %v", volIDB, instance.GetNodeID())
defer func() {
// Detach Disk
err = client.ControllerUnpublishVolume(volIDB, instance.GetNodeID())
if err != nil {
klog.Errorf("Failed to detach disk: %v", err)
}
}()

// Setup: Stage Disk A
stageDirA := filepath.Join("/tmp/", volNameA, "stage")
err = client.NodeStageExt4Volume(volIDA, stageDirA)
Expect(err).To(BeNil(), "failed to stage volume")

// Assert: Stage to same location with different fstype should fail
err = client.NodeStageVolume(volIDA, stageDirA, &csi.VolumeCapability{
AccessType: &csi.VolumeCapability_Mount{
Mount: &csi.VolumeCapability_MountVolume{
FsType: "xfs",
},
},
AccessMode: &csi.VolumeCapability_AccessMode{
Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER,
},
})
e, ok := status.FromError(err)
Expect(ok).To(BeTrue(), "Could not get error type from err", err)
Expect(e.Code()).To(Equal(codes.AlreadyExists), "Volume staged with different fs type should result in already exists error")

// Assert: Stage to same location with same fstype should work
err = client.NodeStageVolume(volIDA, stageDirA, &csi.VolumeCapability{
AccessType: &csi.VolumeCapability_Mount{
Mount: &csi.VolumeCapability_MountVolume{
FsType: "ext4",
},
},
AccessMode: &csi.VolumeCapability_AccessMode{
Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER,
},
})
Expect(err).To(BeNil(), "Staged volume to same location with same fs type should work")

// Assert: Stage volume using block to location with fs type already should always work
err = client.NodeStageBlockVolume(volIDA, stageDirA)
Expect(err).To(BeNil(), "Staged volume of block type should always work")

defer func() {
// Unstage Disk
err = client.NodeUnstageVolume(volIDA, stageDirA)
if err != nil {
klog.Errorf("Failed to unstage volume: %v", err)
}
fp := filepath.Join("/tmp/", volNameA)
err = testutils.RmAll(instance, fp)
if err != nil {
klog.Errorf("Failed to rm file path %s: %v", fp, err)
}
}()

// Assert: Stage Disk B to Disk A position and fail even both as EXT4
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Can we run each scenario as a separate test case?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

There's a lot of boilerplate setup for "later assertions" as asserting things for publish depends on doing a attach and stage. I actually think this is a lot cleaner especially since i've documented it quite thoroughly. In the passing case we're happy, in the failing case I'm not really concerned about "knock on errors" since a failure is a failure and you can debug based on the first error seen.

err = client.NodeStageExt4Volume(volIDB, stageDirA)
e, ok = status.FromError(err)
Expect(ok).To(BeTrue(), "Could not get error type from err", err)
Expect(e.Code()).To(Equal(codes.AlreadyExists), "Volume B staged with same fs type to Volume A staging path should result in already exists error")

// Setup: Stage Disk B with EXT3
stageDirB := filepath.Join("/tmp/", volNameB, "stage")
err = client.NodeStageVolume(volIDB, stageDirB, &csi.VolumeCapability{
AccessType: &csi.VolumeCapability_Mount{
Mount: &csi.VolumeCapability_MountVolume{
FsType: "ext3",
},
},
AccessMode: &csi.VolumeCapability_AccessMode{
Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER,
},
})
Expect(err).To(BeNil(), "failed to stage volume")

// Setup: Publish A to publishDirA
publishDirA := filepath.Join("/tmp/", volNameA, "mount")
err = client.NodePublishVolume(volIDA, stageDirA, publishDirA)
Expect(err).To(BeNil(), "failed to publish volume")
defer func() {
err = client.NodeUnpublishVolume(volIDA, publishDirA)
Expect(err).To(BeNil(), "failed to unpublish volume")
}()
// Assert: Publish A to publishDirA with block which already has an fs should work
err = client.NodePublishBlockVolume(volIDA, stageDirA, publishDirA)
Expect(err).To(BeNil(), "publish block volume to an the existing location with fstype should work")

// Assert: Publish B to publishDirA should fail because disks are different
err = client.NodePublishBlockVolume(volIDB, stageDirB, publishDirA)
Expect(ok).To(BeTrue(), "Could not get error type from err", err)
Expect(e.Code()).To(Equal(codes.AlreadyExists), "Volume B staged with same fs type to Volume A staging path should result in already exists error")
})

It("Should create disks in correct zones when topology is specified", func() {
Expect(testContexts).ToNot(BeEmpty())
testContext := getRandomTestContext()
Expand Down