Skip to content

Removed fixed TODOs and tagged issue numbers in remaining ones #98

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Aug 17, 2018
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions deploy/kubernetes/dev/node.yaml
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
#TODO: Force DaemonSet to not run on master.
#TODO(#40): Force DaemonSet to not run on master.
kind: DaemonSet
apiVersion: apps/v1
metadata:
Expand Down Expand Up @@ -52,7 +52,7 @@ spec:
- name: device-dir
mountPath: /host/dev
volumes:
# TODO(dependency): this will work when kublet registrar functionality exists
# TODO(#92): this will work when kublet registrar functionality exists
#- name: registrar-socket-dir
# hostPath:
# path: /var/lib/kubelet/device-plugins/
Expand Down
2 changes: 0 additions & 2 deletions pkg/gce-cloud-provider/compute/gce-compute.go
Original file line number Diff line number Diff line change
Expand Up @@ -380,7 +380,6 @@ func (cloud *CloudProvider) getRegionalDiskTypeURI(region, diskType string) stri
func (cloud *CloudProvider) waitForZonalOp(ctx context.Context, op *compute.Operation, zone string) error {
svc := cloud.service
project := cloud.project
// TODO: Double check that these timeouts are reasonable
return wait.Poll(3*time.Second, 5*time.Minute, func() (bool, error) {
pollOp, err := svc.ZoneOperations.Get(project, zone, op.Name).Context(ctx).Do()
if err != nil {
Expand All @@ -393,7 +392,6 @@ func (cloud *CloudProvider) waitForZonalOp(ctx context.Context, op *compute.Oper
}

func (cloud *CloudProvider) waitForRegionalOp(ctx context.Context, op *computebeta.Operation, region string) error {
// TODO: Double check that these timeouts are reasonable
return wait.Poll(3*time.Second, 5*time.Minute, func() (bool, error) {
pollOp, err := cloud.betaService.RegionOperations.Get(cloud.project, region, op.Name).Context(ctx).Do()
if err != nil {
Expand Down
1 change: 0 additions & 1 deletion pkg/gce-cloud-provider/compute/gce.go
Original file line number Diff line number Diff line change
Expand Up @@ -94,7 +94,6 @@ func createBetaCloudService(vendorVersion string) (*beta.Service, error) {
}

func createCloudService(vendorVersion string) (*compute.Service, error) {
// TODO: support alternate methods of authentication
svc, err := createCloudServiceWithDefaultServiceAccount(vendorVersion)
return svc, err
}
Expand Down
22 changes: 7 additions & 15 deletions pkg/gce-pd-csi-driver/controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -34,10 +34,6 @@ import (
metadataservice "sigs.k8s.io/gcp-compute-persistent-disk-csi-driver/pkg/gce-cloud-provider/metadata"
)

// TODO: Add noisy glog.V(5).Infof() EVERYWHERE
// TODO: Improve errors to only expose codes at top level
// TODO: Improve error prefix to explicitly state what function it is in.

type GCEControllerServer struct {
Driver *GCEDriver
CloudProvider gce.GCECompute
Expand Down Expand Up @@ -82,9 +78,10 @@ func (gceCS *GCEControllerServer) CreateVolume(ctx context.Context, req *csi.Cre
return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("CreateVolume Request Capacity is invalid: %v", err))
}

// TODO: Validate volume capabilities
// TODO(#94): Validate volume capabilities

// TODO(#93): Support fs type parameter

// TODO: Support fs type. Can vendor in api-machinery stuff for sets etc.
// Apply Parameters (case-insensitive). We leave validation of
// the values to the cloud provider.
diskType := "pd-standard"
Expand Down Expand Up @@ -180,8 +177,6 @@ func (gceCS *GCEControllerServer) CreateVolume(ctx context.Context, req *csi.Cre
}

func (gceCS *GCEControllerServer) DeleteVolume(ctx context.Context, req *csi.DeleteVolumeRequest) (*csi.DeleteVolumeResponse, error) {
// TODO: Only allow deletion of volumes that were created by the driver
// Assuming ID is of form {zone}/{id}
glog.V(4).Infof("DeleteVolume called with request %v", *req)

// Validate arguments
Expand Down Expand Up @@ -228,10 +223,9 @@ func (gceCS *GCEControllerServer) ControllerPublishVolume(ctx context.Context, r
return nil, status.Error(codes.NotFound, fmt.Sprintf("Could not find volume with ID %v: %v", volumeID, err))
}

// TODO: Check volume capability matches
// TODO(#94): Check volume capability matches

pubVolResp := &csi.ControllerPublishVolumeResponse{
// TODO: Info gets sent to NodePublishVolume. Send something if necessary.
PublishInfo: nil,
}

Expand Down Expand Up @@ -336,7 +330,7 @@ func (gceCS *GCEControllerServer) ControllerUnpublishVolume(ctx context.Context,
}

func (gceCS *GCEControllerServer) ValidateVolumeCapabilities(ctx context.Context, req *csi.ValidateVolumeCapabilitiesRequest) (*csi.ValidateVolumeCapabilitiesResponse, error) {
// TODO: Factor out the volume capability functionality and use as validation in all other functions as well
// TODO(#94): Factor out the volume capability functionality and use as validation in all other functions as well
glog.V(5).Infof("Using default ValidateVolumeCapabilities")
// Validate Arguments
if req.GetVolumeCapabilities() == nil || len(req.GetVolumeCapabilities()) == 0 {
Expand Down Expand Up @@ -449,7 +443,6 @@ func (gceCS *GCEControllerServer) ListSnapshots(ctx context.Context, req *csi.Li
}

func getRequestCapacity(capRange *csi.CapacityRange) (int64, error) {
// TODO: Take another look at these casts/caps. Make sure this func is correct
var capBytes int64
// Default case where nothing is set
if capRange == nil {
Expand Down Expand Up @@ -499,7 +492,7 @@ func diskIsAttachedAndCompatible(volume *gce.CloudDisk, instance *compute.Instan
if disk.Mode != readWrite {
return true, fmt.Errorf("disk mode does not match. Got %v. Want %v", disk.Mode, readWrite)
}
// TODO: Check volume_capability.
// TODO(#94): Check volume_capability.
return true, nil
}
}
Expand Down Expand Up @@ -532,7 +525,6 @@ func pickZonesFromTopology(top *csi.TopologyRequirement, numZones int) ([]string
return nil, fmt.Errorf("need %v zones from topology, only got %v unique zones", numZones, reqSet.Union(prefSet).Len())
}
// Add the remaining number of zones into the set
// TODO: Maybe we should pick a random set to get
nSlice, err := pickRandAndConsecutive(remainingZones.List(), remainingNumZones)
if err != nil {
return nil, err
Expand Down Expand Up @@ -600,7 +592,7 @@ func getDefaultZonesInRegion(gceCS *GCEControllerServer, existingZones []string,
return nil, fmt.Errorf("failed to get region from zones: %v", err)
}
needToGet := numZones - len(existingZones)
totZones, err := gceCS.CloudProvider.ListZones(context.TODO(), region)
totZones, err := gceCS.CloudProvider.ListZones(context.Background(), region)
if err != nil {
return nil, fmt.Errorf("failed to list zones from cloud provider: %v", err)
}
Expand Down
6 changes: 3 additions & 3 deletions pkg/gce-pd-csi-driver/controller_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -358,7 +358,7 @@ func TestCreateVolumeArguments(t *testing.T) {
}

// Start Test
resp, err := gceDriver.cs.CreateVolume(context.TODO(), tc.req)
resp, err := gceDriver.cs.CreateVolume(context.Background(), tc.req)
//check response
if err != nil {
serverError, ok := status.FromError(err)
Expand Down Expand Up @@ -429,7 +429,7 @@ func TestCreateVolumeRandomRequisiteTopology(t *testing.T) {
tZones := map[string]bool{}
// Start Test
for i := 0; i < 25; i++ {
resp, err := gceDriver.cs.CreateVolume(context.TODO(), req)
resp, err := gceDriver.cs.CreateVolume(context.Background(), req)
if err != nil {
t.Fatalf("CreateVolume did not expect error, but got %v", err)
}
Expand Down Expand Up @@ -470,7 +470,7 @@ func TestDeleteVolume(t *testing.T) {
// Setup new driver each time so no interference
gceDriver := initGCEDriver(t)

_, err := gceDriver.cs.DeleteVolume(context.TODO(), tc.req)
_, err := gceDriver.cs.DeleteVolume(context.Background(), tc.req)
if err == nil && tc.expErr {
t.Fatalf("Expected error but got none")
}
Expand Down
3 changes: 2 additions & 1 deletion pkg/gce-pd-csi-driver/gce-pd-driver.go
Original file line number Diff line number Diff line change
Expand Up @@ -149,9 +149,10 @@ func (gceDriver *GCEDriver) Run(endpoint string) {

//Start the nonblocking GRPC
s := NewNonBlockingGRPCServer()
// TODO: Only start specific servers based on a flag.
// TODO(#34): Only start specific servers based on a flag.
// In the future have this only run specific combinations of servers depending on which version this is.
// The schema for that was in util. basically it was just s.start but with some nil servers.

s.Start(endpoint, gceDriver.ids, gceDriver.cs, gceDriver.ns)
s.Wait()
}
6 changes: 3 additions & 3 deletions pkg/gce-pd-csi-driver/identity_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ func TestGetPluginInfo(t *testing.T) {
t.Fatalf("Failed to setup GCE Driver: %v", err)
}

resp, err := gceDriver.ids.GetPluginInfo(context.TODO(), &csi.GetPluginInfoRequest{})
resp, err := gceDriver.ids.GetPluginInfo(context.Background(), &csi.GetPluginInfoRequest{})
if err != nil {
t.Fatalf("GetPluginInfo returned unexpected error: %v", err)
}
Expand All @@ -52,7 +52,7 @@ func TestGetPluginCapabilities(t *testing.T) {
t.Fatalf("Failed to setup GCE Driver: %v", err)
}

resp, err := gceDriver.ids.GetPluginCapabilities(context.TODO(), &csi.GetPluginCapabilitiesRequest{})
resp, err := gceDriver.ids.GetPluginCapabilities(context.Background(), &csi.GetPluginCapabilitiesRequest{})
if err != nil {
t.Fatalf("GetPluginCapabilities returned unexpected error: %v", err)
}
Expand All @@ -74,7 +74,7 @@ func TestProbe(t *testing.T) {
t.Fatalf("Failed to setup GCE Driver: %v", err)
}

_, err = gceDriver.ids.Probe(context.TODO(), &csi.ProbeRequest{})
_, err = gceDriver.ids.Probe(context.Background(), &csi.ProbeRequest{})
if err != nil {
t.Fatalf("Probe returned unexpected error: %v", err)
}
Expand Down
29 changes: 12 additions & 17 deletions pkg/gce-pd-csi-driver/node.go
Original file line number Diff line number Diff line change
Expand Up @@ -72,8 +72,8 @@ func (ns *GCENodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePub
return nil, err
}
if !notMnt {
// TODO: check if mount is compatible. Return OK if it is, or appropriate error.
return nil, nil
// TODO(#95): check if mount is compatible. Return OK if it is, or appropriate error.
return nil, status.Error(codes.Unimplemented, "NodePublishVolume Mount point already exists, but cannot determine whether it is compatible or not")
}

if err := ns.Mounter.Interface.MakeDir(targetPath); err != nil {
Expand All @@ -92,27 +92,27 @@ func (ns *GCENodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePub
notMnt, mntErr := ns.Mounter.Interface.IsLikelyNotMountPoint(targetPath)
if mntErr != nil {
glog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr)
return nil, status.Error(codes.Internal, fmt.Sprintf("TODO: %v", err))
return nil, status.Error(codes.Internal, fmt.Sprintf("NodePublishVolume failed to check whether target path is a mount point: %v", err))
}
if !notMnt {
if mntErr = ns.Mounter.Interface.Unmount(targetPath); mntErr != nil {
glog.Errorf("Failed to unmount: %v", mntErr)
return nil, status.Error(codes.Internal, fmt.Sprintf("TODO: %v", err))
return nil, status.Error(codes.Internal, fmt.Sprintf("NodePublishVolume failed to unmount target path: %v", err))
}
notMnt, mntErr := ns.Mounter.Interface.IsLikelyNotMountPoint(targetPath)
if mntErr != nil {
glog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr)
return nil, status.Error(codes.Internal, fmt.Sprintf("TODO: %v", err))
return nil, status.Error(codes.Internal, fmt.Sprintf("NodePublishVolume failed to check whether target path is a mount point: %v", err))
}
if !notMnt {
// This is very odd, we don't expect it. We'll try again next sync loop.
glog.Errorf("%s is still mounted, despite call to unmount(). Will try again next sync loop.", targetPath)
return nil, status.Error(codes.Internal, fmt.Sprintf("TODO: %v", err))
return nil, status.Error(codes.Internal, fmt.Sprintf("NodePublishVolume something is wrong with mounting: %v", err))
}
}
os.Remove(targetPath)
glog.Errorf("Mount of disk %s failed: %v", targetPath, err)
return nil, status.Error(codes.Internal, fmt.Sprintf("TODO: %v", err))
return nil, status.Error(codes.Internal, fmt.Sprintf("NodePublishVolume mount of disk failed: %v", err))
}

glog.V(4).Infof("Successfully mounted %s", targetPath)
Expand All @@ -133,7 +133,7 @@ func (ns *GCENodeServer) NodeUnpublishVolume(ctx context.Context, req *csi.NodeU
return nil, status.Error(codes.InvalidArgument, "NodeUnpublishVolume Target Path must be provided")
}

// TODO: Check volume still exists
// TODO(#96): Check volume still exists

err := ns.Mounter.Interface.Unmount(targetPath)
if err != nil {
Expand Down Expand Up @@ -167,10 +167,8 @@ func (ns *GCENodeServer) NodeStageVolume(ctx context.Context, req *csi.NodeStage
return nil, err
}

// TODO: Check volume still exists?

// Part 1: Get device path of attached device
// TODO: Get real partitions
// TODO(#83): Get real partitions
partition := ""

devicePaths := ns.DeviceUtils.GetDiskByIdPaths(volumeKey.Name, partition)
Expand Down Expand Up @@ -199,9 +197,7 @@ func (ns *GCENodeServer) NodeStageVolume(ctx context.Context, req *csi.NodeStage
}

if !notMnt {
// TODO: Validate disk mode

// TODO: Check who is mounted here. No error if its us
// TODO(#95): Check who is mounted here. No error if its us
return nil, fmt.Errorf("already a mount point")

}
Expand All @@ -215,11 +211,10 @@ func (ns *GCENodeServer) NodeStageVolume(ctx context.Context, req *csi.NodeStage
fstype = mnt.FsType
}
for _, flag := range mnt.MountFlags {
// TODO: Not sure if MountFlags == options
options = append(options, flag)
}
} else if blk := volumeCapability.GetBlock(); blk != nil {
// TODO: Block volume support
// TODO(#64): Block volume support
return nil, status.Error(codes.Unimplemented, fmt.Sprintf("Block volume support is not yet implemented"))
}

Expand Down Expand Up @@ -283,7 +278,7 @@ func (ns *GCENodeServer) NodeGetInfo(ctx context.Context, req *csi.NodeGetInfoRe

resp := &csi.NodeGetInfoResponse{
NodeId: nodeID,
// TODO: Set MaxVolumesPerNode based on Node Type
// TODO(#19): Set MaxVolumesPerNode based on Node Type
// Default of 0 means that CO Decides how many nodes can be published
// Can get from metadata server "machine-type"
MaxVolumesPerNode: 0,
Expand Down
11 changes: 8 additions & 3 deletions pkg/mount-manager/device-utils.go
Original file line number Diff line number Diff line change
Expand Up @@ -49,13 +49,18 @@ const (
defaultMountCommand = "mount"
)

// DeviceUtils are a collection of methods that act on the devices attached
// to a GCE Instance
type DeviceUtils interface {
// GetDiskByIdPaths returns a list of all possible paths for a
// given Persistent Disk
GetDiskByIdPaths(pdName string, partition string) []string
// TODO: Info

// VerifyDevicePath returns the first of the list of device paths that
// exists on the machine, or an empty string if none exists
VerifyDevicePath(devicePaths []string) (string, error)
}

// TODO: Info
type deviceUtils struct {
}

Expand Down Expand Up @@ -91,7 +96,7 @@ func (m *deviceUtils) VerifyDevicePath(devicePaths []string) (string, error) {
glog.Errorf("Error filepath.Glob(\"%s\"): %v\r\n", diskSDPattern, err)
}
sdBeforeSet := sets.NewString(sdBefore...)
// TODO: Remove this udevadm stuff. Not applicable because can't access /dev/sd* from container
// TODO(#69): Verify udevadm works as intended in driver
if err := udevadmChangeToNewDrives(sdBeforeSet); err != nil {
// udevadm errors should not block disk detachment, log and continue
glog.Errorf("udevadmChangeToNewDrives failed with: %v", err)
Expand Down
2 changes: 1 addition & 1 deletion pkg/mount-manager/fake-safe-mounter.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ import "k8s.io/kubernetes/pkg/util/mount"
func NewFakeSafeMounter() *mount.SafeFormatAndMount {
execCallback := func(cmd string, args ...string) ([]byte, error) {
return nil, nil
// TODO: Fill out exec callback for errors
// TODO(#48): Fill out exec callback for errors
/*
if len(test.execScripts) <= execCallCount {
t.Errorf("Unexpected command: %s %v", cmd, args)
Expand Down
4 changes: 1 addition & 3 deletions test/e2e/tests/multi_zone_e2e_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -32,8 +32,6 @@ import (
var _ = Describe("GCE PD CSI Driver Multi-Zone", func() {
BeforeEach(func() {
Expect(len(testInstances)).To(BeNumerically(">", 1))
// TODO: Check whether the instances are in different zones???
// I Think there should be a better way of guaranteeing this. Like a map from zone to instance for testInstances (?)
})

It("Should get reasonable topology from nodes with NodeGetInfo", func() {
Expand Down Expand Up @@ -138,7 +136,7 @@ var _ = Describe("GCE PD CSI Driver Multi-Zone", func() {
controllerClient.DeleteVolume(volId)
Expect(err).To(BeNil(), "DeleteVolume failed")

// TODO: Validate Disk Deleted
// Validate Disk Deleted
_, err = betaComputeService.RegionDisks.Get(p, region, volName).Do()
Expect(gce.IsGCEError(err, "notFound")).To(BeTrue(), "Expected disk to not be found")
}()
Expand Down
Loading