From 76d75b315424da5a4db4fb955633ff30cbb04921 Mon Sep 17 00:00:00 2001 From: David Zhu Date: Fri, 17 Aug 2018 11:15:45 -0700 Subject: [PATCH] Removed fixed TODOs and tagged issue numbers in remaining ones --- deploy/kubernetes/dev/node.yaml | 4 +-- pkg/gce-cloud-provider/compute/gce-compute.go | 2 -- pkg/gce-cloud-provider/compute/gce.go | 1 - pkg/gce-pd-csi-driver/controller.go | 22 +++++--------- pkg/gce-pd-csi-driver/controller_test.go | 6 ++-- pkg/gce-pd-csi-driver/gce-pd-driver.go | 3 +- pkg/gce-pd-csi-driver/identity_test.go | 6 ++-- pkg/gce-pd-csi-driver/node.go | 29 ++++++++----------- pkg/mount-manager/device-utils.go | 11 +++++-- pkg/mount-manager/fake-safe-mounter.go | 2 +- test/e2e/tests/multi_zone_e2e_test.go | 4 +-- test/e2e/tests/single_zone_e2e_test.go | 13 ++++----- test/e2e/utils/utils.go | 2 +- test/remote/instance.go | 7 ++--- test/remote/runner.go | 2 -- test/remote/setup-teardown.go | 2 -- test/remote/ssh.go | 2 +- 17 files changed, 50 insertions(+), 68 deletions(-) diff --git a/deploy/kubernetes/dev/node.yaml b/deploy/kubernetes/dev/node.yaml index 8413f834c..21316653f 100644 --- a/deploy/kubernetes/dev/node.yaml +++ b/deploy/kubernetes/dev/node.yaml @@ -1,4 +1,4 @@ -#TODO: Force DaemonSet to not run on master. +#TODO(#40): Force DaemonSet to not run on master. kind: DaemonSet apiVersion: apps/v1 metadata: @@ -52,7 +52,7 @@ spec: - name: device-dir mountPath: /host/dev volumes: - # TODO(dependency): this will work when kublet registrar functionality exists + # TODO(#92): this will work when kublet registrar functionality exists #- name: registrar-socket-dir # hostPath: # path: /var/lib/kubelet/device-plugins/ diff --git a/pkg/gce-cloud-provider/compute/gce-compute.go b/pkg/gce-cloud-provider/compute/gce-compute.go index f10c950ab..ab795d173 100644 --- a/pkg/gce-cloud-provider/compute/gce-compute.go +++ b/pkg/gce-cloud-provider/compute/gce-compute.go @@ -380,7 +380,6 @@ func (cloud *CloudProvider) getRegionalDiskTypeURI(region, diskType string) stri func (cloud *CloudProvider) waitForZonalOp(ctx context.Context, op *compute.Operation, zone string) error { svc := cloud.service project := cloud.project - // TODO: Double check that these timeouts are reasonable return wait.Poll(3*time.Second, 5*time.Minute, func() (bool, error) { pollOp, err := svc.ZoneOperations.Get(project, zone, op.Name).Context(ctx).Do() if err != nil { @@ -393,7 +392,6 @@ func (cloud *CloudProvider) waitForZonalOp(ctx context.Context, op *compute.Oper } func (cloud *CloudProvider) waitForRegionalOp(ctx context.Context, op *computebeta.Operation, region string) error { - // TODO: Double check that these timeouts are reasonable return wait.Poll(3*time.Second, 5*time.Minute, func() (bool, error) { pollOp, err := cloud.betaService.RegionOperations.Get(cloud.project, region, op.Name).Context(ctx).Do() if err != nil { diff --git a/pkg/gce-cloud-provider/compute/gce.go b/pkg/gce-cloud-provider/compute/gce.go index d6690d038..898e87702 100644 --- a/pkg/gce-cloud-provider/compute/gce.go +++ b/pkg/gce-cloud-provider/compute/gce.go @@ -94,7 +94,6 @@ func createBetaCloudService(vendorVersion string) (*beta.Service, error) { } func createCloudService(vendorVersion string) (*compute.Service, error) { - // TODO: support alternate methods of authentication svc, err := createCloudServiceWithDefaultServiceAccount(vendorVersion) return svc, err } diff --git a/pkg/gce-pd-csi-driver/controller.go b/pkg/gce-pd-csi-driver/controller.go index 7d34dc9e9..5c39adda2 100644 --- a/pkg/gce-pd-csi-driver/controller.go +++ b/pkg/gce-pd-csi-driver/controller.go @@ -34,10 +34,6 @@ import ( metadataservice "sigs.k8s.io/gcp-compute-persistent-disk-csi-driver/pkg/gce-cloud-provider/metadata" ) -// TODO: Add noisy glog.V(5).Infof() EVERYWHERE -// TODO: Improve errors to only expose codes at top level -// TODO: Improve error prefix to explicitly state what function it is in. - type GCEControllerServer struct { Driver *GCEDriver CloudProvider gce.GCECompute @@ -82,9 +78,10 @@ func (gceCS *GCEControllerServer) CreateVolume(ctx context.Context, req *csi.Cre return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("CreateVolume Request Capacity is invalid: %v", err)) } - // TODO: Validate volume capabilities + // TODO(#94): Validate volume capabilities + + // TODO(#93): Support fs type parameter - // TODO: Support fs type. Can vendor in api-machinery stuff for sets etc. // Apply Parameters (case-insensitive). We leave validation of // the values to the cloud provider. diskType := "pd-standard" @@ -180,8 +177,6 @@ func (gceCS *GCEControllerServer) CreateVolume(ctx context.Context, req *csi.Cre } func (gceCS *GCEControllerServer) DeleteVolume(ctx context.Context, req *csi.DeleteVolumeRequest) (*csi.DeleteVolumeResponse, error) { - // TODO: Only allow deletion of volumes that were created by the driver - // Assuming ID is of form {zone}/{id} glog.V(4).Infof("DeleteVolume called with request %v", *req) // Validate arguments @@ -228,10 +223,9 @@ func (gceCS *GCEControllerServer) ControllerPublishVolume(ctx context.Context, r return nil, status.Error(codes.NotFound, fmt.Sprintf("Could not find volume with ID %v: %v", volumeID, err)) } - // TODO: Check volume capability matches + // TODO(#94): Check volume capability matches pubVolResp := &csi.ControllerPublishVolumeResponse{ - // TODO: Info gets sent to NodePublishVolume. Send something if necessary. PublishInfo: nil, } @@ -336,7 +330,7 @@ func (gceCS *GCEControllerServer) ControllerUnpublishVolume(ctx context.Context, } func (gceCS *GCEControllerServer) ValidateVolumeCapabilities(ctx context.Context, req *csi.ValidateVolumeCapabilitiesRequest) (*csi.ValidateVolumeCapabilitiesResponse, error) { - // TODO: Factor out the volume capability functionality and use as validation in all other functions as well + // TODO(#94): Factor out the volume capability functionality and use as validation in all other functions as well glog.V(5).Infof("Using default ValidateVolumeCapabilities") // Validate Arguments if req.GetVolumeCapabilities() == nil || len(req.GetVolumeCapabilities()) == 0 { @@ -449,7 +443,6 @@ func (gceCS *GCEControllerServer) ListSnapshots(ctx context.Context, req *csi.Li } func getRequestCapacity(capRange *csi.CapacityRange) (int64, error) { - // TODO: Take another look at these casts/caps. Make sure this func is correct var capBytes int64 // Default case where nothing is set if capRange == nil { @@ -499,7 +492,7 @@ func diskIsAttachedAndCompatible(volume *gce.CloudDisk, instance *compute.Instan if disk.Mode != readWrite { return true, fmt.Errorf("disk mode does not match. Got %v. Want %v", disk.Mode, readWrite) } - // TODO: Check volume_capability. + // TODO(#94): Check volume_capability. return true, nil } } @@ -532,7 +525,6 @@ func pickZonesFromTopology(top *csi.TopologyRequirement, numZones int) ([]string return nil, fmt.Errorf("need %v zones from topology, only got %v unique zones", numZones, reqSet.Union(prefSet).Len()) } // Add the remaining number of zones into the set - // TODO: Maybe we should pick a random set to get nSlice, err := pickRandAndConsecutive(remainingZones.List(), remainingNumZones) if err != nil { return nil, err @@ -600,7 +592,7 @@ func getDefaultZonesInRegion(gceCS *GCEControllerServer, existingZones []string, return nil, fmt.Errorf("failed to get region from zones: %v", err) } needToGet := numZones - len(existingZones) - totZones, err := gceCS.CloudProvider.ListZones(context.TODO(), region) + totZones, err := gceCS.CloudProvider.ListZones(context.Background(), region) if err != nil { return nil, fmt.Errorf("failed to list zones from cloud provider: %v", err) } diff --git a/pkg/gce-pd-csi-driver/controller_test.go b/pkg/gce-pd-csi-driver/controller_test.go index b064e91da..d9d2cdff6 100644 --- a/pkg/gce-pd-csi-driver/controller_test.go +++ b/pkg/gce-pd-csi-driver/controller_test.go @@ -358,7 +358,7 @@ func TestCreateVolumeArguments(t *testing.T) { } // Start Test - resp, err := gceDriver.cs.CreateVolume(context.TODO(), tc.req) + resp, err := gceDriver.cs.CreateVolume(context.Background(), tc.req) //check response if err != nil { serverError, ok := status.FromError(err) @@ -429,7 +429,7 @@ func TestCreateVolumeRandomRequisiteTopology(t *testing.T) { tZones := map[string]bool{} // Start Test for i := 0; i < 25; i++ { - resp, err := gceDriver.cs.CreateVolume(context.TODO(), req) + resp, err := gceDriver.cs.CreateVolume(context.Background(), req) if err != nil { t.Fatalf("CreateVolume did not expect error, but got %v", err) } @@ -470,7 +470,7 @@ func TestDeleteVolume(t *testing.T) { // Setup new driver each time so no interference gceDriver := initGCEDriver(t) - _, err := gceDriver.cs.DeleteVolume(context.TODO(), tc.req) + _, err := gceDriver.cs.DeleteVolume(context.Background(), tc.req) if err == nil && tc.expErr { t.Fatalf("Expected error but got none") } diff --git a/pkg/gce-pd-csi-driver/gce-pd-driver.go b/pkg/gce-pd-csi-driver/gce-pd-driver.go index 05e6a1ff3..a1ce03419 100644 --- a/pkg/gce-pd-csi-driver/gce-pd-driver.go +++ b/pkg/gce-pd-csi-driver/gce-pd-driver.go @@ -149,9 +149,10 @@ func (gceDriver *GCEDriver) Run(endpoint string) { //Start the nonblocking GRPC s := NewNonBlockingGRPCServer() - // TODO: Only start specific servers based on a flag. + // TODO(#34): Only start specific servers based on a flag. // In the future have this only run specific combinations of servers depending on which version this is. // The schema for that was in util. basically it was just s.start but with some nil servers. + s.Start(endpoint, gceDriver.ids, gceDriver.cs, gceDriver.ns) s.Wait() } diff --git a/pkg/gce-pd-csi-driver/identity_test.go b/pkg/gce-pd-csi-driver/identity_test.go index ce0e49320..527db6ea0 100644 --- a/pkg/gce-pd-csi-driver/identity_test.go +++ b/pkg/gce-pd-csi-driver/identity_test.go @@ -30,7 +30,7 @@ func TestGetPluginInfo(t *testing.T) { t.Fatalf("Failed to setup GCE Driver: %v", err) } - resp, err := gceDriver.ids.GetPluginInfo(context.TODO(), &csi.GetPluginInfoRequest{}) + resp, err := gceDriver.ids.GetPluginInfo(context.Background(), &csi.GetPluginInfoRequest{}) if err != nil { t.Fatalf("GetPluginInfo returned unexpected error: %v", err) } @@ -52,7 +52,7 @@ func TestGetPluginCapabilities(t *testing.T) { t.Fatalf("Failed to setup GCE Driver: %v", err) } - resp, err := gceDriver.ids.GetPluginCapabilities(context.TODO(), &csi.GetPluginCapabilitiesRequest{}) + resp, err := gceDriver.ids.GetPluginCapabilities(context.Background(), &csi.GetPluginCapabilitiesRequest{}) if err != nil { t.Fatalf("GetPluginCapabilities returned unexpected error: %v", err) } @@ -74,7 +74,7 @@ func TestProbe(t *testing.T) { t.Fatalf("Failed to setup GCE Driver: %v", err) } - _, err = gceDriver.ids.Probe(context.TODO(), &csi.ProbeRequest{}) + _, err = gceDriver.ids.Probe(context.Background(), &csi.ProbeRequest{}) if err != nil { t.Fatalf("Probe returned unexpected error: %v", err) } diff --git a/pkg/gce-pd-csi-driver/node.go b/pkg/gce-pd-csi-driver/node.go index e1902fef0..64df239d4 100644 --- a/pkg/gce-pd-csi-driver/node.go +++ b/pkg/gce-pd-csi-driver/node.go @@ -72,8 +72,8 @@ func (ns *GCENodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePub return nil, err } if !notMnt { - // TODO: check if mount is compatible. Return OK if it is, or appropriate error. - return nil, nil + // TODO(#95): check if mount is compatible. Return OK if it is, or appropriate error. + return nil, status.Error(codes.Unimplemented, "NodePublishVolume Mount point already exists, but cannot determine whether it is compatible or not") } if err := ns.Mounter.Interface.MakeDir(targetPath); err != nil { @@ -92,27 +92,27 @@ func (ns *GCENodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePub notMnt, mntErr := ns.Mounter.Interface.IsLikelyNotMountPoint(targetPath) if mntErr != nil { glog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr) - return nil, status.Error(codes.Internal, fmt.Sprintf("TODO: %v", err)) + return nil, status.Error(codes.Internal, fmt.Sprintf("NodePublishVolume failed to check whether target path is a mount point: %v", err)) } if !notMnt { if mntErr = ns.Mounter.Interface.Unmount(targetPath); mntErr != nil { glog.Errorf("Failed to unmount: %v", mntErr) - return nil, status.Error(codes.Internal, fmt.Sprintf("TODO: %v", err)) + return nil, status.Error(codes.Internal, fmt.Sprintf("NodePublishVolume failed to unmount target path: %v", err)) } notMnt, mntErr := ns.Mounter.Interface.IsLikelyNotMountPoint(targetPath) if mntErr != nil { glog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr) - return nil, status.Error(codes.Internal, fmt.Sprintf("TODO: %v", err)) + return nil, status.Error(codes.Internal, fmt.Sprintf("NodePublishVolume failed to check whether target path is a mount point: %v", err)) } if !notMnt { // This is very odd, we don't expect it. We'll try again next sync loop. glog.Errorf("%s is still mounted, despite call to unmount(). Will try again next sync loop.", targetPath) - return nil, status.Error(codes.Internal, fmt.Sprintf("TODO: %v", err)) + return nil, status.Error(codes.Internal, fmt.Sprintf("NodePublishVolume something is wrong with mounting: %v", err)) } } os.Remove(targetPath) glog.Errorf("Mount of disk %s failed: %v", targetPath, err) - return nil, status.Error(codes.Internal, fmt.Sprintf("TODO: %v", err)) + return nil, status.Error(codes.Internal, fmt.Sprintf("NodePublishVolume mount of disk failed: %v", err)) } glog.V(4).Infof("Successfully mounted %s", targetPath) @@ -133,7 +133,7 @@ func (ns *GCENodeServer) NodeUnpublishVolume(ctx context.Context, req *csi.NodeU return nil, status.Error(codes.InvalidArgument, "NodeUnpublishVolume Target Path must be provided") } - // TODO: Check volume still exists + // TODO(#96): Check volume still exists err := ns.Mounter.Interface.Unmount(targetPath) if err != nil { @@ -167,10 +167,8 @@ func (ns *GCENodeServer) NodeStageVolume(ctx context.Context, req *csi.NodeStage return nil, err } - // TODO: Check volume still exists? - // Part 1: Get device path of attached device - // TODO: Get real partitions + // TODO(#83): Get real partitions partition := "" devicePaths := ns.DeviceUtils.GetDiskByIdPaths(volumeKey.Name, partition) @@ -199,9 +197,7 @@ func (ns *GCENodeServer) NodeStageVolume(ctx context.Context, req *csi.NodeStage } if !notMnt { - // TODO: Validate disk mode - - // TODO: Check who is mounted here. No error if its us + // TODO(#95): Check who is mounted here. No error if its us return nil, fmt.Errorf("already a mount point") } @@ -215,11 +211,10 @@ func (ns *GCENodeServer) NodeStageVolume(ctx context.Context, req *csi.NodeStage fstype = mnt.FsType } for _, flag := range mnt.MountFlags { - // TODO: Not sure if MountFlags == options options = append(options, flag) } } else if blk := volumeCapability.GetBlock(); blk != nil { - // TODO: Block volume support + // TODO(#64): Block volume support return nil, status.Error(codes.Unimplemented, fmt.Sprintf("Block volume support is not yet implemented")) } @@ -283,7 +278,7 @@ func (ns *GCENodeServer) NodeGetInfo(ctx context.Context, req *csi.NodeGetInfoRe resp := &csi.NodeGetInfoResponse{ NodeId: nodeID, - // TODO: Set MaxVolumesPerNode based on Node Type + // TODO(#19): Set MaxVolumesPerNode based on Node Type // Default of 0 means that CO Decides how many nodes can be published // Can get from metadata server "machine-type" MaxVolumesPerNode: 0, diff --git a/pkg/mount-manager/device-utils.go b/pkg/mount-manager/device-utils.go index d3aad5787..fd8f44706 100644 --- a/pkg/mount-manager/device-utils.go +++ b/pkg/mount-manager/device-utils.go @@ -49,13 +49,18 @@ const ( defaultMountCommand = "mount" ) +// DeviceUtils are a collection of methods that act on the devices attached +// to a GCE Instance type DeviceUtils interface { + // GetDiskByIdPaths returns a list of all possible paths for a + // given Persistent Disk GetDiskByIdPaths(pdName string, partition string) []string - // TODO: Info + + // VerifyDevicePath returns the first of the list of device paths that + // exists on the machine, or an empty string if none exists VerifyDevicePath(devicePaths []string) (string, error) } -// TODO: Info type deviceUtils struct { } @@ -91,7 +96,7 @@ func (m *deviceUtils) VerifyDevicePath(devicePaths []string) (string, error) { glog.Errorf("Error filepath.Glob(\"%s\"): %v\r\n", diskSDPattern, err) } sdBeforeSet := sets.NewString(sdBefore...) - // TODO: Remove this udevadm stuff. Not applicable because can't access /dev/sd* from container + // TODO(#69): Verify udevadm works as intended in driver if err := udevadmChangeToNewDrives(sdBeforeSet); err != nil { // udevadm errors should not block disk detachment, log and continue glog.Errorf("udevadmChangeToNewDrives failed with: %v", err) diff --git a/pkg/mount-manager/fake-safe-mounter.go b/pkg/mount-manager/fake-safe-mounter.go index 430d96767..b75ea6eab 100644 --- a/pkg/mount-manager/fake-safe-mounter.go +++ b/pkg/mount-manager/fake-safe-mounter.go @@ -19,7 +19,7 @@ import "k8s.io/kubernetes/pkg/util/mount" func NewFakeSafeMounter() *mount.SafeFormatAndMount { execCallback := func(cmd string, args ...string) ([]byte, error) { return nil, nil - // TODO: Fill out exec callback for errors + // TODO(#48): Fill out exec callback for errors /* if len(test.execScripts) <= execCallCount { t.Errorf("Unexpected command: %s %v", cmd, args) diff --git a/test/e2e/tests/multi_zone_e2e_test.go b/test/e2e/tests/multi_zone_e2e_test.go index b21d0a517..c559af3fc 100644 --- a/test/e2e/tests/multi_zone_e2e_test.go +++ b/test/e2e/tests/multi_zone_e2e_test.go @@ -32,8 +32,6 @@ import ( var _ = Describe("GCE PD CSI Driver Multi-Zone", func() { BeforeEach(func() { Expect(len(testInstances)).To(BeNumerically(">", 1)) - // TODO: Check whether the instances are in different zones??? - // I Think there should be a better way of guaranteeing this. Like a map from zone to instance for testInstances (?) }) It("Should get reasonable topology from nodes with NodeGetInfo", func() { @@ -138,7 +136,7 @@ var _ = Describe("GCE PD CSI Driver Multi-Zone", func() { controllerClient.DeleteVolume(volId) Expect(err).To(BeNil(), "DeleteVolume failed") - // TODO: Validate Disk Deleted + // Validate Disk Deleted _, err = betaComputeService.RegionDisks.Get(p, region, volName).Do() Expect(gce.IsGCEError(err, "notFound")).To(BeTrue(), "Expected disk to not be found") }() diff --git a/test/e2e/tests/single_zone_e2e_test.go b/test/e2e/tests/single_zone_e2e_test.go index 7148fea4b..c3600aee7 100644 --- a/test/e2e/tests/single_zone_e2e_test.go +++ b/test/e2e/tests/single_zone_e2e_test.go @@ -42,7 +42,6 @@ var _ = Describe("GCE PD CSI Driver", func() { It("Should create->attach->stage->mount volume and check if it is writable, then unmount->unstage->detach->delete and check disk is deleted", func() { // Create new driver and client - // TODO: Should probably actual have some object that includes both client and instance so we can relate the two?? Expect(testInstances).NotTo(BeEmpty()) testContext, err := testutils.GCEClientAndDriverSetup(testInstances[0]) Expect(err).To(BeNil(), "Set up new Driver and Client failed with error") @@ -67,7 +66,7 @@ var _ = Describe("GCE PD CSI Driver", func() { }) Expect(err).To(BeNil(), "CreateVolume failed with error: %v", err) - // TODO: Validate Disk Created + // Validate Disk Created cloudDisk, err := computeService.Disks.Get(p, z, volName).Do() Expect(err).To(BeNil(), "Could not get disk from cloud directly") Expect(cloudDisk.Type).To(ContainSubstring(standardDiskType)) @@ -80,7 +79,7 @@ var _ = Describe("GCE PD CSI Driver", func() { client.DeleteVolume(volId) Expect(err).To(BeNil(), "DeleteVolume failed") - // TODO: Validate Disk Deleted + // Validate Disk Deleted _, err = computeService.Disks.Get(p, z, volName).Do() Expect(gce.IsGCEError(err, "notFound")).To(BeTrue(), "Expected disk to not be found") }() @@ -151,7 +150,7 @@ var _ = Describe("GCE PD CSI Driver", func() { }, defaultSizeGb, nil) Expect(err).To(BeNil(), "CreateVolume failed with error: %v", err) - // TODO: Validate Disk Created + // Validate Disk Created cloudDisk, err := betaComputeService.RegionDisks.Get(p, region, volName).Do() Expect(err).To(BeNil(), "Could not get disk from cloud directly") Expect(cloudDisk.Type).To(ContainSubstring(standardDiskType)) @@ -171,7 +170,7 @@ var _ = Describe("GCE PD CSI Driver", func() { controllerClient.DeleteVolume(volId) Expect(err).To(BeNil(), "DeleteVolume failed") - // TODO: Validate Disk Deleted + // Validate Disk Deleted _, err = betaComputeService.RegionDisks.Get(p, region, volName).Do() Expect(gce.IsGCEError(err, "notFound")).To(BeTrue(), "Expected disk to not be found") }() @@ -195,7 +194,7 @@ var _ = Describe("GCE PD CSI Driver", func() { volId, err := client.CreateVolume(volName, nil, defaultSizeGb, nil) Expect(err).To(BeNil(), "CreateVolume failed with error: %v", err) - // TODO: Validate Disk Created + // Validate Disk Created cloudDisk, err := computeService.Disks.Get(p, z, volName).Do() Expect(err).To(BeNil(), "Could not get disk from cloud directly") Expect(cloudDisk.Type).To(ContainSubstring(standardDiskType)) @@ -208,7 +207,7 @@ var _ = Describe("GCE PD CSI Driver", func() { client.DeleteVolume(volId) Expect(err).To(BeNil(), "DeleteVolume failed") - // TODO: Validate Disk Deleted + // Validate Disk Deleted _, err = computeService.Disks.Get(p, z, volName).Do() Expect(gce.IsGCEError(err, "notFound")).To(BeTrue(), "Expected disk to not be found") }() diff --git a/test/e2e/utils/utils.go b/test/e2e/utils/utils.go index d4d07e382..fbb749be6 100644 --- a/test/e2e/utils/utils.go +++ b/test/e2e/utils/utils.go @@ -86,7 +86,7 @@ func SetupProwConfig() (project, serviceAccount string) { // If we're on CI overwrite the service account glog.V(4).Infof("Fetching the default compute service account") - c, err := google.DefaultClient(context.TODO(), cloudresourcemanager.CloudPlatformScope) + c, err := google.DefaultClient(context.Background(), cloudresourcemanager.CloudPlatformScope) if err != nil { glog.Fatalf("Failed to get Google Default Client: %v", err) } diff --git a/test/remote/instance.go b/test/remote/instance.go index 7e68a9969..607d2c5c0 100644 --- a/test/remote/instance.go +++ b/test/remote/instance.go @@ -48,7 +48,6 @@ type InstanceInfo struct { name string // External IP is filled in after instance creation - // TODO: Maybe combine create instance and create instance info so all fields are set up at the same time... externalIP string computeService *compute.Service @@ -89,7 +88,7 @@ func (i *InstanceInfo) CreateOrGetInstance(serviceAccount string) error { return fmt.Errorf("Failed to create firewall rule: %v", err) } - // TODO: Pick a better boot disk image + // TODO(#97): Pick a better boot disk image imageURL := "projects/ml-images/global/images/family/tf-1-9" inst := &compute.Instance{ Name: i.name, @@ -263,7 +262,7 @@ func GetComputeClient() (*compute.Service, error) { } var client *http.Client - client, err = google.DefaultClient(context.TODO(), compute.ComputeScope) + client, err = google.DefaultClient(context.Background(), compute.ComputeScope) if err != nil { continue } @@ -293,7 +292,7 @@ func GetBetaComputeClient() (*computebeta.Service, error) { } var client *http.Client - client, err = google.DefaultClient(context.TODO(), computebeta.ComputeScope) + client, err = google.DefaultClient(context.Background(), computebeta.ComputeScope) if err != nil { continue } diff --git a/test/remote/runner.go b/test/remote/runner.go index f0d81a141..d03de39e5 100644 --- a/test/remote/runner.go +++ b/test/remote/runner.go @@ -84,8 +84,6 @@ func (i *InstanceInfo) UploadAndRun(archivePath, remoteWorkspace, driverRunCmd s if err != nil { return -1, fmt.Errorf("failed to convert driver PID from string %s to int: %v", driverPIDString, err) } - // TODO: return the PID so that we can kill the driver later - // Actually just do a pkill -f my_pattern return driverPID, nil } diff --git a/test/remote/setup-teardown.go b/test/remote/setup-teardown.go index 04f21bbff..575e1715d 100644 --- a/test/remote/setup-teardown.go +++ b/test/remote/setup-teardown.go @@ -138,7 +138,5 @@ func TeardownDriverAndClient(context *TestContext) error { return fmt.Errorf("failed to kill driver on remote instance, got output %s: %v", output, err) } - // TODO: Cleanup driver workspace on remote - return nil } diff --git a/test/remote/ssh.go b/test/remote/ssh.go index 01f86f821..5f1d9de8a 100644 --- a/test/remote/ssh.go +++ b/test/remote/ssh.go @@ -77,7 +77,7 @@ func (i *InstanceInfo) CreateSSHTunnel(localPort, serverPort string) (int, error if err != nil { return 0, err } - // TODO: use this process and kill it at the end of the test as well. + return cmd.Process.Pid, nil }