From 1fd4433ed4d5b5c80cce6dbcb527a4f1c8869c4f Mon Sep 17 00:00:00 2001 From: Sunny Song Date: Tue, 22 Nov 2022 14:30:03 -0800 Subject: [PATCH 1/2] Separate user errors from internal errors --- cmd/gce-pd-csi-driver/main.go | 10 +- pkg/gce-cloud-provider/compute/fake-gce.go | 2 +- pkg/gce-cloud-provider/compute/gce-compute.go | 38 ++-- pkg/gce-cloud-provider/compute/gce.go | 8 +- pkg/gce-cloud-provider/metadata/metadata.go | 8 +- pkg/gce-pd-csi-driver/controller.go | 215 +++++++++--------- pkg/gce-pd-csi-driver/node.go | 83 ++++--- pkg/gce-pd-csi-driver/server.go | 4 +- pkg/gce-pd-csi-driver/utils.go | 39 +++- pkg/gce-pd-csi-driver/utils_linux.go | 6 +- pkg/gce-pd-csi-driver/utils_test.go | 38 ++++ pkg/gce-pd-csi-driver/utils_windows.go | 2 +- pkg/metrics/metrics.go | 2 +- pkg/mount-manager/device-utils.go | 30 +-- pkg/mount-manager/safe-mounter_windows.go | 4 +- pkg/mount-manager/statter_linux.go | 2 +- test/k8s-integration/cluster.go | 2 +- test/remote/ssh.go | 2 +- test/sanity/sanity_test.go | 8 +- 19 files changed, 296 insertions(+), 207 deletions(-) diff --git a/cmd/gce-pd-csi-driver/main.go b/cmd/gce-pd-csi-driver/main.go index e9b58bb76..5b6bcc8a5 100644 --- a/cmd/gce-pd-csi-driver/main.go +++ b/cmd/gce-pd-csi-driver/main.go @@ -102,7 +102,7 @@ func handle() { } extraVolumeLabels, err := common.ConvertLabelsStringToMap(*extraVolumeLabelsStr) if err != nil { - klog.Fatalf("Bad extra volume labels: %w", err) + klog.Fatalf("Bad extra volume labels: %v", err.Error()) } gceDriver := driver.GetGCEDriver() @@ -119,7 +119,7 @@ func handle() { if *runControllerService { cloudProvider, err := gce.CreateCloudProvider(ctx, version, *cloudConfigFilePath) if err != nil { - klog.Fatalf("Failed to get cloud provider: %w", err) + klog.Fatalf("Failed to get cloud provider: %v", err.Error()) } controllerServer = driver.NewControllerServer(gceDriver, cloudProvider) } else if *cloudConfigFilePath != "" { @@ -131,20 +131,20 @@ func handle() { if *runNodeService { mounter, err := mountmanager.NewSafeMounter() if err != nil { - klog.Fatalf("Failed to get safe mounter: %w", err) + klog.Fatalf("Failed to get safe mounter: %v", err.Error()) } deviceUtils := mountmanager.NewDeviceUtils() statter := mountmanager.NewStatter(mounter) meta, err := metadataservice.NewMetadataService() if err != nil { - klog.Fatalf("Failed to set up metadata service: %w", err) + klog.Fatalf("Failed to set up metadata service: %v", err.Error()) } nodeServer = driver.NewNodeServer(gceDriver, mounter, deviceUtils, meta, statter) } err = gceDriver.SetupGCEDriver(driverName, version, extraVolumeLabels, identityServer, controllerServer, nodeServer) if err != nil { - klog.Fatalf("Failed to initialize GCE CSI Driver: %w", err) + klog.Fatalf("Failed to initialize GCE CSI Driver: %v", err.Error()) } gce.AttachDiskBackoff.Duration = *attachDiskBackoffDuration diff --git a/pkg/gce-cloud-provider/compute/fake-gce.go b/pkg/gce-cloud-provider/compute/fake-gce.go index 39806b1cb..2bdd8f8ed 100644 --- a/pkg/gce-cloud-provider/compute/fake-gce.go +++ b/pkg/gce-cloud-provider/compute/fake-gce.go @@ -103,7 +103,7 @@ func (cloud *FakeCloudProvider) RepairUnderspecifiedVolumeKey(ctx context.Contex } r, err := common.GetRegionFromZones([]string{cloud.zone}) if err != nil { - return "", nil, fmt.Errorf("failed to get region from zones: %v", err) + return "", nil, fmt.Errorf("failed to get region from zones: %w", err) } volumeKey.Region = r return project, volumeKey, nil diff --git a/pkg/gce-cloud-provider/compute/gce-compute.go b/pkg/gce-cloud-provider/compute/gce-compute.go index b6e5b43aa..30be59599 100644 --- a/pkg/gce-cloud-provider/compute/gce-compute.go +++ b/pkg/gce-cloud-provider/compute/gce-compute.go @@ -115,7 +115,7 @@ func (cloud *CloudProvider) GetDefaultZone() string { func (cloud *CloudProvider) ListDisks(ctx context.Context) ([]*computev1.Disk, string, error) { region, err := common.GetRegionFromZones([]string{cloud.zone}) if err != nil { - return nil, "", fmt.Errorf("failed to get region from zones: %v", err) + return nil, "", fmt.Errorf("failed to get region from zones: %w", err) } zones, err := cloud.ListZones(ctx, region) if err != nil { @@ -162,7 +162,7 @@ func (cloud *CloudProvider) RepairUnderspecifiedVolumeKey(ctx context.Context, p } region, err := common.GetRegionFromZones([]string{cloud.zone}) if err != nil { - return "", nil, fmt.Errorf("failed to get region from zones: %v", err) + return "", nil, fmt.Errorf("failed to get region from zones: %w", err) } switch volumeKey.Type() { case meta.Zonal: @@ -216,7 +216,7 @@ func (cloud *CloudProvider) ListZones(ctx context.Context, region string) ([]str zones := []string{} zoneList, err := cloud.service.Zones.List(cloud.project).Filter(fmt.Sprintf("region eq .*%s$", region)).Do() if err != nil { - return nil, fmt.Errorf("failed to list zones in region %s: %v", region, err) + return nil, fmt.Errorf("failed to list zones in region %s: %w", region, err) } for _, zone := range zoneList.Items { zones = append(zones, zone.Name) @@ -493,7 +493,7 @@ func (cloud *CloudProvider) insertRegionalDisk( klog.Warningf("GCE PD %s already exists, reusing", volKey.Name) return nil } - return status.Error(codes.Internal, fmt.Sprintf("unknown Insert disk error: %v", err)) + return status.Error(codes.Internal, fmt.Sprintf("unknown Insert disk error: %v", err.Error())) } klog.V(5).Infof("InsertDisk operation %s for disk %s", opName, diskToCreate.Name) @@ -514,7 +514,7 @@ func (cloud *CloudProvider) insertRegionalDisk( klog.Warningf("GCE PD %s already exists after wait, reusing", volKey.Name) return nil } - return fmt.Errorf("unknown Insert disk operation error: %v", err) + return fmt.Errorf("unknown Insert disk operation error: %w", err) } return nil } @@ -604,7 +604,7 @@ func (cloud *CloudProvider) insertZonalDisk( klog.Warningf("GCE PD %s already exists, reusing", volKey.Name) return nil } - return fmt.Errorf("unknown Insert disk error: %v", err) + return fmt.Errorf("unknown Insert disk error: %w", err) } klog.V(5).Infof("InsertDisk operation %s for disk %s", opName, diskToCreate.Name) @@ -626,7 +626,7 @@ func (cloud *CloudProvider) insertZonalDisk( klog.Warningf("GCE PD %s already exists after wait, reusing", volKey.Name) return nil } - return fmt.Errorf("unknown Insert disk operation error: %v", err) + return fmt.Errorf("unknown Insert disk operation error: %w", err) } return nil } @@ -685,7 +685,7 @@ func (cloud *CloudProvider) AttachDisk(ctx context.Context, project string, volK deviceName, err := common.GetDeviceName(volKey) if err != nil { - return fmt.Errorf("failed to get device name: %v", err) + return fmt.Errorf("failed to get device name: %w", err) } attachedDiskV1 := &computev1.AttachedDisk{ DeviceName: deviceName, @@ -697,13 +697,13 @@ func (cloud *CloudProvider) AttachDisk(ctx context.Context, project string, volK op, err := cloud.service.Instances.AttachDisk(project, instanceZone, instanceName, attachedDiskV1).Context(ctx).Do() if err != nil { - return fmt.Errorf("failed cloud service attach disk call: %v", err) + return fmt.Errorf("failed cloud service attach disk call: %w", err) } klog.V(5).Infof("AttachDisk operation %s for disk %s", op.Name, attachedDiskV1.DeviceName) err = cloud.waitForZonalOp(ctx, project, op.Name, instanceZone) if err != nil { - return fmt.Errorf("failed when waiting for zonal op: %v", err) + return fmt.Errorf("failed when waiting for zonal op: %w", err) } return nil } @@ -814,7 +814,7 @@ func (cloud *CloudProvider) WaitForAttach(ctx context.Context, project string, v klog.V(6).Infof("Polling for attach of disk %v to instance %v to complete for %v", volKey.Name, instanceName, time.Since(start)) disk, err := cloud.GetDisk(ctx, project, volKey, GCEAPIVersionV1) if err != nil { - return false, fmt.Errorf("GetDisk failed to get disk: %v", err) + return false, fmt.Errorf("GetDisk failed to get disk: %w", err) } if disk == nil { @@ -946,7 +946,7 @@ func (cloud *CloudProvider) waitForImageCreation(ctx context.Context, project, i klog.V(6).Infof("Checking GCE Image %s.", imageName) image, err := cloud.GetImage(ctx, project, imageName) if err != nil { - klog.Warningf("Error in getting image %s, %w", imageName, err) + klog.Warningf("Error in getting image %s, %v", imageName, err.Error()) } else if image != nil { if image.Status != "PENDING" { klog.V(6).Infof("Image %s status is %s", imageName, image.Status) @@ -1009,7 +1009,7 @@ func (cloud *CloudProvider) ResizeDisk(ctx context.Context, project string, volK klog.V(5).Infof("Resizing disk %v to size %v", volKey, requestBytes) cloudDisk, err := cloud.GetDisk(ctx, project, volKey, GCEAPIVersionV1) if err != nil { - return -1, fmt.Errorf("failed to get disk: %v", err) + return -1, fmt.Errorf("failed to get disk: %w", err) } sizeGb := cloudDisk.GetSizeGb() @@ -1037,13 +1037,13 @@ func (cloud *CloudProvider) resizeZonalDisk(ctx context.Context, project string, } op, err := cloud.service.Disks.Resize(project, volKey.Zone, volKey.Name, resizeReq).Context(ctx).Do() if err != nil { - return -1, fmt.Errorf("failed to resize zonal volume %v: %v", volKey.String(), err) + return -1, fmt.Errorf("failed to resize zonal volume %v: %w", volKey.String(), err) } klog.V(5).Infof("ResizeDisk operation %s for disk %s", op.Name, volKey.Name) err = cloud.waitForZonalOp(ctx, project, op.Name, volKey.Zone) if err != nil { - return -1, fmt.Errorf("failed waiting for op for zonal resize for %s: %v", volKey.String(), err) + return -1, fmt.Errorf("failed waiting for op for zonal resize for %s: %w", volKey.String(), err) } return requestGb, nil @@ -1056,13 +1056,13 @@ func (cloud *CloudProvider) resizeRegionalDisk(ctx context.Context, project stri op, err := cloud.service.RegionDisks.Resize(project, volKey.Region, volKey.Name, resizeReq).Context(ctx).Do() if err != nil { - return -1, fmt.Errorf("failed to resize regional volume %v: %v", volKey.String(), err) + return -1, fmt.Errorf("failed to resize regional volume %v: %w", volKey.String(), err) } klog.V(5).Infof("ResizeDisk operation %s for disk %s", op.Name, volKey.Name) err = cloud.waitForRegionalOp(ctx, project, op.Name, volKey.Region) if err != nil { - return -1, fmt.Errorf("failed waiting for op for regional resize for %s: %v", volKey.String(), err) + return -1, fmt.Errorf("failed waiting for op for regional resize for %s: %w", volKey.String(), err) } return requestGb, nil @@ -1112,7 +1112,7 @@ func (cloud *CloudProvider) waitForSnapshotCreation(ctx context.Context, project klog.V(6).Infof("Checking GCE Snapshot %s.", snapshotName) snapshot, err := cloud.GetSnapshot(ctx, project, snapshotName) if err != nil { - klog.Warningf("Error in getting snapshot %s, %w", snapshotName, err) + klog.Warningf("Error in getting snapshot %s, %v", snapshotName, err.Error()) } else if snapshot != nil { if snapshot.Status != "CREATING" { klog.V(6).Infof("Snapshot %s status is %s", snapshotName, snapshot.Status) @@ -1159,7 +1159,7 @@ func encodeTags(tags map[string]string) (string, error) { enc, err := json.Marshal(tags) if err != nil { - return "", fmt.Errorf("failed to encodeTags %v: %v", tags, err) + return "", fmt.Errorf("failed to encodeTags %v: %w", tags, err) } return string(enc), nil } diff --git a/pkg/gce-cloud-provider/compute/gce.go b/pkg/gce-cloud-provider/compute/gce.go index e949bd6cb..5f7317798 100644 --- a/pkg/gce-cloud-provider/compute/gce.go +++ b/pkg/gce-cloud-provider/compute/gce.go @@ -100,7 +100,7 @@ func CreateCloudProvider(ctx context.Context, vendorVersion string, configPath s project, zone, err := getProjectAndZone(configFile) if err != nil { - return nil, fmt.Errorf("Failed getting Project and Zone: %v", err) + return nil, fmt.Errorf("Failed getting Project and Zone: %w", err) } return &CloudProvider{ @@ -148,13 +148,13 @@ func readConfig(configPath string) (*ConfigFile, error) { reader, err := os.Open(configPath) if err != nil { - return nil, fmt.Errorf("couldn't open cloud provider configuration at %s: %v", configPath, err) + return nil, fmt.Errorf("couldn't open cloud provider configuration at %s: %w", configPath, err) } defer reader.Close() cfg := &ConfigFile{} if err := gcfg.FatalOnly(gcfg.ReadInto(cfg, reader)); err != nil { - return nil, fmt.Errorf("couldn't read cloud provider configuration at %s: %v", configPath, err) + return nil, fmt.Errorf("couldn't read cloud provider configuration at %s: %w", configPath, err) } return cfg, nil } @@ -193,7 +193,7 @@ func createCloudServiceWithDefaultServiceAccount(ctx context.Context, vendorVers func newOauthClient(ctx context.Context, tokenSource oauth2.TokenSource) (*http.Client, error) { if err := wait.PollImmediate(5*time.Second, 30*time.Second, func() (bool, error) { if _, err := tokenSource.Token(); err != nil { - klog.Errorf("error fetching initial token: %w", err) + klog.Errorf("error fetching initial token: %v", err.Error()) return false, nil } return true, nil diff --git a/pkg/gce-cloud-provider/metadata/metadata.go b/pkg/gce-cloud-provider/metadata/metadata.go index 8e2d7c596..460bd00ad 100644 --- a/pkg/gce-cloud-provider/metadata/metadata.go +++ b/pkg/gce-cloud-provider/metadata/metadata.go @@ -45,19 +45,19 @@ var _ MetadataService = &metadataServiceManager{} func NewMetadataService() (MetadataService, error) { zone, err := metadata.Zone() if err != nil { - return nil, fmt.Errorf("failed to get current zone: %v", err) + return nil, fmt.Errorf("failed to get current zone: %w", err) } projectID, err := metadata.ProjectID() if err != nil { - return nil, fmt.Errorf("failed to get project: %v", err) + return nil, fmt.Errorf("failed to get project: %w", err) } name, err := metadata.InstanceName() if err != nil { - return nil, fmt.Errorf("failed to get instance name: %v", err) + return nil, fmt.Errorf("failed to get instance name: %w", err) } fullMachineType, err := metadata.Get("instance/machine-type") if err != nil { - return nil, fmt.Errorf("failed to get machine-type: %v", err) + return nil, fmt.Errorf("failed to get machine-type: %w", err) } // Response format: "projects/[NUMERIC_PROJECT_ID]/machineTypes/[MACHINE_TYPE]" splits := strings.Split(fullMachineType, "/") diff --git a/pkg/gce-pd-csi-driver/controller.go b/pkg/gce-pd-csi-driver/controller.go index 61bd3c64a..c4521cfe2 100644 --- a/pkg/gce-pd-csi-driver/controller.go +++ b/pkg/gce-pd-csi-driver/controller.go @@ -18,6 +18,7 @@ import ( "context" "fmt" "math/rand" + "regexp" "sort" "strings" "time" @@ -163,19 +164,19 @@ func (gceCS *GCEControllerServer) CreateVolume(ctx context.Context, req *csi.Cre capBytes, err := getRequestCapacity(capacityRange) if err != nil { - return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("CreateVolume Request Capacity is invalid: %v", err)) + return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("CreateVolume Request Capacity is invalid: %v", err.Error())) } err = validateVolumeCapabilities(volumeCapabilities) if err != nil { - return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("VolumeCapabilities is invalid: %v", err)) + return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("VolumeCapabilities is invalid: %v", err.Error())) } // Apply Parameters (case-insensitive). We leave validation of // the values to the cloud provider. params, err := common.ExtractAndDefaultParameters(req.GetParameters(), gceCS.Driver.name, gceCS.Driver.extraVolumeLabels) if err != nil { - return nil, status.Errorf(codes.InvalidArgument, "failed to extract parameters: %v", err) + return nil, status.Errorf(codes.InvalidArgument, "failed to extract parameters: %v", err.Error()) } // Determine multiWriter gceAPIVersion := gce.GCEAPIVersionV1 @@ -190,7 +191,7 @@ func (gceCS *GCEControllerServer) CreateVolume(ctx context.Context, req *csi.Cre case replicationTypeNone: zones, err = pickZones(ctx, gceCS, req.GetAccessibilityRequirements(), 1) if err != nil { - return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("CreateVolume failed to pick zones for disk: %v", err)) + return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("CreateVolume failed to pick zones for disk: %v", err.Error())) } if len(zones) != 1 { return nil, status.Errorf(codes.Internal, fmt.Sprintf("Failed to pick exactly 1 zone for zonal disk, got %v instead", len(zones))) @@ -200,11 +201,11 @@ func (gceCS *GCEControllerServer) CreateVolume(ctx context.Context, req *csi.Cre case replicationTypeRegionalPD: zones, err = pickZones(ctx, gceCS, req.GetAccessibilityRequirements(), 2) if err != nil { - return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("CreateVolume failed to pick zones for disk: %v", err)) + return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("CreateVolume failed to pick zones for disk: %v", err.Error())) } region, err := common.GetRegionFromZones(zones) if err != nil { - return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("CreateVolume failed to get region from zones: %v", err)) + return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("CreateVolume failed to get region from zones: %v", err.Error())) } volKey = meta.RegionalKey(name, region) default: @@ -213,7 +214,7 @@ func (gceCS *GCEControllerServer) CreateVolume(ctx context.Context, req *csi.Cre volumeID, err := common.KeyToVolumeID(volKey, gceCS.CloudProvider.GetDefaultProject()) if err != nil { - return nil, status.Errorf(codes.Internal, "Failed to convert volume key to volume ID: %v", err) + return nil, LoggedError("Failed to convert volume key to volume ID: ", err) } if acquired := gceCS.volumeLocks.TryAcquire(volumeID); !acquired { return nil, status.Errorf(codes.Aborted, common.VolumeOperationAlreadyExistsFmt, volumeID) @@ -224,7 +225,7 @@ func (gceCS *GCEControllerServer) CreateVolume(ctx context.Context, req *csi.Cre existingDisk, err := gceCS.CloudProvider.GetDisk(ctx, gceCS.CloudProvider.GetDefaultProject(), volKey, gceAPIVersion) if err != nil { if !gce.IsGCEError(err, "notFound") { - return nil, status.Error(codes.Internal, fmt.Sprintf("CreateVolume unknown get disk error when validating: %v", err)) + return nil, LoggedError("CreateVolume unknown get disk error when validating: ", err) } } if err == nil { @@ -234,12 +235,12 @@ func (gceCS *GCEControllerServer) CreateVolume(ctx context.Context, req *csi.Cre int64(capacityRange.GetLimitBytes()), multiWriter) if err != nil { - return nil, status.Error(codes.AlreadyExists, fmt.Sprintf("CreateVolume disk already exists with same name and is incompatible: %v", err)) + return nil, status.Error(codes.AlreadyExists, fmt.Sprintf("CreateVolume disk already exists with same name and is incompatible: %v", err.Error())) } ready, err := isDiskReady(existingDisk) if err != nil { - return nil, status.Error(codes.Internal, fmt.Sprintf("CreateVolume disk %v had error checking ready status: %v", volKey, err)) + return nil, LoggedError("CreateVolume disk "+volKey.String()+" had error checking ready status: ", err) } if !ready { return nil, status.Error(codes.Internal, fmt.Sprintf("CreateVolume existing disk %v is not ready", volKey)) @@ -260,7 +261,7 @@ func (gceCS *GCEControllerServer) CreateVolume(ctx context.Context, req *csi.Cre // Verify that snapshot exists sl, err := gceCS.getSnapshotByID(ctx, snapshotID) if err != nil { - return nil, status.Errorf(codes.Internal, "CreateVolume failed to get snapshot %s: %v", snapshotID, err) + return nil, LoggedError("CreateVolume failed to get snapshot "+snapshotID+": ", err) } else if len(sl.Entries) == 0 { return nil, status.Errorf(codes.NotFound, "CreateVolume source snapshot %s does not exist", snapshotID) } @@ -271,7 +272,7 @@ func (gceCS *GCEControllerServer) CreateVolume(ctx context.Context, req *csi.Cre // Verify that the source VolumeID is in the correct format. project, sourceVolKey, err := common.VolumeIDToKey(volumeContentSourceVolumeID) if err != nil { - return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("CreateVolume source volume id is invalid: %v", err)) + return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("CreateVolume source volume id is invalid: %v", err.Error())) } // Verify that the volume in VolumeContentSource exists. @@ -280,7 +281,7 @@ func (gceCS *GCEControllerServer) CreateVolume(ctx context.Context, req *csi.Cre if gce.IsGCEError(err, "notFound") { return nil, status.Errorf(codes.NotFound, "CreateVolume source volume %s does not exist", volumeContentSourceVolumeID) } else { - return nil, status.Error(codes.Internal, fmt.Sprintf("CreateVolume unknown get disk error when validating: %v", err)) + return nil, LoggedError("CreateVolume unknown get disk error when validating: ", err) } } @@ -318,7 +319,7 @@ func (gceCS *GCEControllerServer) CreateVolume(ctx context.Context, req *csi.Cre // Verify the source disk is ready. ready, err := isDiskReady(diskFromSourceVolume) if err != nil { - return nil, status.Error(codes.Internal, fmt.Sprintf("CreateVolume disk from source volume %v had error checking ready status: %v", sourceVolKey, err)) + return nil, LoggedError("CreateVolume disk from source volume "+sourceVolKey.String()+" had error checking ready status: ", err) } if !ready { return nil, status.Error(codes.Internal, fmt.Sprintf("CreateVolume disk from source volume %v is not ready", sourceVolKey)) @@ -339,7 +340,7 @@ func (gceCS *GCEControllerServer) CreateVolume(ctx context.Context, req *csi.Cre } disk, err = createSingleZoneDisk(ctx, gceCS.CloudProvider, name, zones, params, capacityRange, capBytes, snapshotID, volumeContentSourceVolumeID, multiWriter) if err != nil { - return nil, status.Error(codes.Internal, fmt.Sprintf("CreateVolume failed to create single zonal disk %#v: %v", name, err)) + return nil, LoggedError("CreateVolume failed to create single zonal disk "+name+": ", err) } case replicationTypeRegionalPD: if len(zones) != 2 { @@ -347,7 +348,7 @@ func (gceCS *GCEControllerServer) CreateVolume(ctx context.Context, req *csi.Cre } disk, err = createRegionalDisk(ctx, gceCS.CloudProvider, name, zones, params, capacityRange, capBytes, snapshotID, volumeContentSourceVolumeID, multiWriter) if err != nil { - return nil, status.Error(codes.Internal, fmt.Sprintf("CreateVolume failed to create regional disk %#v: %v", name, err)) + return nil, LoggedError("CreateVolume failed to create regional disk "+name+": ", err) } default: return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("CreateVolume replication type '%s' is not supported", params.ReplicationType)) @@ -355,7 +356,7 @@ func (gceCS *GCEControllerServer) CreateVolume(ctx context.Context, req *csi.Cre ready, err := isDiskReady(disk) if err != nil { - return nil, status.Error(codes.Internal, fmt.Sprintf("CreateVolume disk %v had error checking ready status: %v", volKey, err)) + return nil, status.Error(codes.Internal, fmt.Sprintf("CreateVolume disk %v had error checking ready status: %v", volKey, err.Error())) } if !ready { return nil, status.Error(codes.Internal, fmt.Sprintf("CreateVolume disk %v is not ready", volKey)) @@ -377,17 +378,17 @@ func (gceCS *GCEControllerServer) DeleteVolume(ctx context.Context, req *csi.Del if err != nil { // Cannot find volume associated with this ID because VolumeID is not in // correct format, this is a success according to the Spec - klog.Warningf("DeleteVolume treating volume as deleted because volume id %s is invalid: %w", volumeID, err) + klog.Warningf("DeleteVolume treating volume as deleted because volume id %s is invalid: %v", volumeID, err.Error()) return &csi.DeleteVolumeResponse{}, nil } project, volKey, err = gceCS.CloudProvider.RepairUnderspecifiedVolumeKey(ctx, project, volKey) if err != nil { if gce.IsGCENotFoundError(err) { - klog.Warningf("DeleteVolume treating volume as deleted because cannot find volume %v: %w", volumeID, err) + klog.Warningf("DeleteVolume treating volume as deleted because cannot find volume %v: %v", volumeID, err.Error()) return &csi.DeleteVolumeResponse{}, nil } - return nil, status.Errorf(codes.Internal, "DeleteVolume error repairing underspecified volume key: %v", err) + return nil, LoggedError("DeleteVolume error repairing underspecified volume key: ", err) } if acquired := gceCS.volumeLocks.TryAcquire(volumeID); !acquired { @@ -397,7 +398,7 @@ func (gceCS *GCEControllerServer) DeleteVolume(ctx context.Context, req *csi.Del err = gceCS.CloudProvider.DeleteDisk(ctx, project, volKey) if err != nil { - return nil, status.Error(codes.Internal, fmt.Sprintf("unknown Delete disk error: %v", err)) + return nil, LoggedError("unknown Delete disk error: ", err) } klog.V(4).Infof("DeleteVolume succeeded for disk %v", volKey) @@ -418,7 +419,7 @@ func (gceCS *GCEControllerServer) ControllerPublishVolume(ctx context.Context, r resp, err := gceCS.executeControllerPublishVolume(ctx, req) if err != nil { - klog.Infof("For node %s adding backoff due to error for volume %s: %w", req.NodeId, req.VolumeId, err) + klog.Infof("For node %s adding backoff due to error for volume %s: %v", req.NodeId, req.VolumeId, err.Error()) gceCS.errorBackoff.next(backoffId) } else { klog.Infof("For node %s clear backoff due to successful publish of volume %v", req.NodeId, req.VolumeId) @@ -444,12 +445,12 @@ func (gceCS *GCEControllerServer) validateControllerPublishVolumeRequest(ctx con project, volKey, err := common.VolumeIDToKey(volumeID) if err != nil { - return "", nil, status.Error(codes.InvalidArgument, fmt.Sprintf("ControllerPublishVolume volume ID is invalid: %v", err)) + return "", nil, status.Error(codes.InvalidArgument, fmt.Sprintf("ControllerPublishVolume volume ID is invalid: %v", err.Error())) } // TODO(#253): Check volume capability matches for ALREADY_EXISTS if err = validateVolumeCapability(volumeCapability); err != nil { - return "", nil, status.Error(codes.InvalidArgument, fmt.Sprintf("VolumeCapabilities is invalid: %v", err)) + return "", nil, status.Error(codes.InvalidArgument, fmt.Sprintf("VolumeCapabilities is invalid: %v", err.Error())) } return project, volKey, nil @@ -474,9 +475,9 @@ func (gceCS *GCEControllerServer) executeControllerPublishVolume(ctx context.Con project, volKey, err = gceCS.CloudProvider.RepairUnderspecifiedVolumeKey(ctx, project, volKey) if err != nil { if gce.IsGCENotFoundError(err) { - return nil, status.Errorf(codes.NotFound, "ControllerPublishVolume could not find volume with ID %v: %v", volumeID, err) + return nil, status.Errorf(codes.NotFound, "ControllerPublishVolume could not find volume with ID %v: %v", volumeID, err.Error()) } - return nil, status.Errorf(codes.Internal, "ControllerPublishVolume error repairing underspecified volume key: %v", err) + return nil, LoggedError("ControllerPublishVolume error repairing underspecified volume key: ", err) } // Acquires the lock for the volume on that node only, because we need to support the ability @@ -490,20 +491,20 @@ func (gceCS *GCEControllerServer) executeControllerPublishVolume(ctx context.Con _, err = gceCS.CloudProvider.GetDisk(ctx, project, volKey, gce.GCEAPIVersionV1) if err != nil { if gce.IsGCENotFoundError(err) { - return nil, status.Error(codes.NotFound, fmt.Sprintf("Could not find disk %v: %v", volKey.String(), err)) + return nil, status.Error(codes.NotFound, fmt.Sprintf("Could not find disk %v: %v", volKey.String(), err.Error())) } - return nil, status.Error(codes.Internal, fmt.Sprintf("Unknown get disk error: %v", err)) + return nil, status.Error(codes.Internal, fmt.Sprintf("Unknown get disk error: %v", err.Error())) } instanceZone, instanceName, err := common.NodeIDToZoneAndName(nodeID) if err != nil { - return nil, status.Error(codes.NotFound, fmt.Sprintf("could not split nodeID: %v", err)) + return nil, status.Error(codes.NotFound, fmt.Sprintf("could not split nodeID: %v", err.Error())) } instance, err := gceCS.CloudProvider.GetInstanceOrError(ctx, instanceZone, instanceName) if err != nil { if gce.IsGCENotFoundError(err) { - return nil, status.Error(codes.NotFound, fmt.Sprintf("Could not find instance %v: %v", nodeID, err)) + return nil, status.Error(codes.NotFound, fmt.Sprintf("Could not find instance %v: %v", nodeID, err.Error())) } - return nil, status.Error(codes.Internal, fmt.Sprintf("Unknown get instance error: %v", err)) + return nil, status.Error(codes.Internal, fmt.Sprintf("Unknown get instance error: %v", err.Error())) } readWrite := "READ_WRITE" @@ -513,12 +514,12 @@ func (gceCS *GCEControllerServer) executeControllerPublishVolume(ctx context.Con deviceName, err := common.GetDeviceName(volKey) if err != nil { - return nil, status.Error(codes.Internal, fmt.Sprintf("error getting device name: %v", err)) + return nil, status.Error(codes.Internal, fmt.Sprintf("error getting device name: %v", err.Error())) } attached, err := diskIsAttachedAndCompatible(deviceName, instance, volumeCapability, readWrite) if err != nil { - return nil, status.Error(codes.AlreadyExists, fmt.Sprintf("Disk %v already published to node %v but incompatbile: %v", volKey.Name, nodeID, err)) + return nil, status.Error(codes.AlreadyExists, fmt.Sprintf("Disk %v already published to node %v but incompatible: %v", volKey.Name, nodeID, err.Error())) } if attached { // Volume is attached to node. Success! @@ -527,16 +528,16 @@ func (gceCS *GCEControllerServer) executeControllerPublishVolume(ctx context.Con } instanceZone, instanceName, err = common.NodeIDToZoneAndName(nodeID) if err != nil { - return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("could not split nodeID: %v", err)) + return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("could not split nodeID: %v", err.Error())) } err = gceCS.CloudProvider.AttachDisk(ctx, project, volKey, readWrite, attachableDiskTypePersistent, instanceZone, instanceName) if err != nil { - return nil, status.Error(codes.Internal, fmt.Sprintf("unknown Attach error: %v", err)) + return nil, status.Error(codes.Internal, fmt.Sprintf("unknown Attach error: %v", err.Error())) } err = gceCS.CloudProvider.WaitForAttach(ctx, project, volKey, instanceZone, instanceName) if err != nil { - return nil, status.Error(codes.Internal, fmt.Sprintf("unknown WaitForAttach error: %v", err)) + return nil, status.Error(codes.Internal, fmt.Sprintf("unknown WaitForAttach error: %v", err.Error())) } klog.V(4).Infof("ControllerPublishVolume succeeded for disk %v to instance %v", volKey, nodeID) return pubVolResp, nil @@ -578,7 +579,7 @@ func (gceCS *GCEControllerServer) validateControllerUnpublishVolumeRequest(ctx c project, volKey, err := common.VolumeIDToKey(volumeID) if err != nil { - return "", nil, status.Error(codes.InvalidArgument, fmt.Sprintf("ControllerUnpublishVolume Volume ID is invalid: %v", err)) + return "", nil, status.Error(codes.InvalidArgument, fmt.Sprintf("ControllerUnpublishVolume Volume ID is invalid: %v", err.Error())) } return project, volKey, nil @@ -596,9 +597,9 @@ func (gceCS *GCEControllerServer) executeControllerUnpublishVolume(ctx context.C project, volKey, err = gceCS.CloudProvider.RepairUnderspecifiedVolumeKey(ctx, project, volKey) if err != nil { if gce.IsGCENotFoundError(err) { - return nil, status.Errorf(codes.NotFound, "ControllerUnpublishVolume could not find volume with ID %v: %v", volumeID, err) + return nil, status.Errorf(codes.NotFound, "ControllerUnpublishVolume could not find volume with ID %v: %v", volumeID, err.Error()) } - return nil, status.Errorf(codes.Internal, "ControllerUnpublishVolume error repairing underspecified volume key: %v", err) + return nil, LoggedError("ControllerUnpublishVolume error repairing underspecified volume key: ", err) } // Acquires the lock for the volume on that node only, because we need to support the ability @@ -611,7 +612,7 @@ func (gceCS *GCEControllerServer) executeControllerUnpublishVolume(ctx context.C instanceZone, instanceName, err := common.NodeIDToZoneAndName(nodeID) if err != nil { - return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("could not split nodeID: %v", err)) + return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("could not split nodeID: %v", err.Error())) } instance, err := gceCS.CloudProvider.GetInstanceOrError(ctx, instanceZone, instanceName) if err != nil { @@ -620,12 +621,12 @@ func (gceCS *GCEControllerServer) executeControllerUnpublishVolume(ctx context.C klog.Warningf("Treating volume %v as unpublished because node %v could not be found", volKey.String(), instanceName) return &csi.ControllerUnpublishVolumeResponse{}, nil } - return nil, status.Error(codes.Internal, fmt.Sprintf("error getting instance: %v", err)) + return nil, status.Error(codes.Internal, fmt.Sprintf("error getting instance: %v", err.Error())) } deviceName, err := common.GetDeviceName(volKey) if err != nil { - return nil, status.Error(codes.Internal, fmt.Sprintf("error getting device name: %v", err)) + return nil, status.Error(codes.Internal, fmt.Sprintf("error getting device name: %v", err.Error())) } attached := diskIsAttached(deviceName, instance) @@ -638,7 +639,7 @@ func (gceCS *GCEControllerServer) executeControllerUnpublishVolume(ctx context.C err = gceCS.CloudProvider.DetachDisk(ctx, project, deviceName, instanceZone, instanceName) if err != nil { - return nil, status.Error(codes.Internal, fmt.Sprintf("unknown detach error: %v", err)) + return nil, LoggedError("unknown detach error: ", err) } klog.V(4).Infof("ControllerUnpublishVolume succeeded for disk %v from node %v", volKey, nodeID) @@ -655,15 +656,15 @@ func (gceCS *GCEControllerServer) ValidateVolumeCapabilities(ctx context.Context } project, volKey, err := common.VolumeIDToKey(volumeID) if err != nil { - return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("Volume ID is invalid: %v", err)) + return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("Volume ID is invalid: %v", err.Error())) } project, volKey, err = gceCS.CloudProvider.RepairUnderspecifiedVolumeKey(ctx, project, volKey) if err != nil { if gce.IsGCENotFoundError(err) { - return nil, status.Errorf(codes.NotFound, "ValidateVolumeCapabilities could not find volume with ID %v: %v", volumeID, err) + return nil, status.Errorf(codes.NotFound, "ValidateVolumeCapabilities could not find volume with ID %v: %v", volumeID, err.Error()) } - return nil, status.Errorf(codes.Internal, "ValidateVolumeCapabilities error repairing underspecified volume key: %v", err) + return nil, LoggedError("ValidateVolumeCapabilities error repairing underspecified volume key: ", err) } if acquired := gceCS.volumeLocks.TryAcquire(volumeID); !acquired { @@ -674,9 +675,9 @@ func (gceCS *GCEControllerServer) ValidateVolumeCapabilities(ctx context.Context disk, err := gceCS.CloudProvider.GetDisk(ctx, project, volKey, gce.GCEAPIVersionV1) if err != nil { if gce.IsGCENotFoundError(err) { - return nil, status.Error(codes.NotFound, fmt.Sprintf("Could not find disk %v: %v", volKey.Name, err)) + return nil, status.Error(codes.NotFound, fmt.Sprintf("Could not find disk %v: %v", volKey.Name, err.Error())) } - return nil, status.Error(codes.Internal, fmt.Sprintf("Unknown get disk error: %v", err)) + return nil, LoggedError("Unknown get disk error: ", err) } // Check Volume Context is Empty @@ -686,16 +687,16 @@ func (gceCS *GCEControllerServer) ValidateVolumeCapabilities(ctx context.Context // Check volume capabilities supported by PD. These are the same for any PD if err := validateVolumeCapabilities(req.GetVolumeCapabilities()); err != nil { - return generateFailedValidationMessage("VolumeCapabilities not valid: %v", err), nil + return generateFailedValidationMessage("VolumeCapabilities not valid: %v", err.Error()), nil } // Validate the disk parameters match the disk we GET params, err := common.ExtractAndDefaultParameters(req.GetParameters(), gceCS.Driver.name, gceCS.Driver.extraVolumeLabels) if err != nil { - return nil, status.Errorf(codes.InvalidArgument, "failed to extract parameters: %v", err) + return nil, status.Errorf(codes.InvalidArgument, "failed to extract parameters: %v", err.Error()) } if err := gce.ValidateDiskParameters(disk, params); err != nil { - return generateFailedValidationMessage("Parameters %v do not match given disk %s: %v", req.GetParameters(), disk.GetName(), err), nil + return generateFailedValidationMessage("Parameters %v do not match given disk %s: %v", req.GetParameters(), disk.GetName(), err.Error()), nil } // Ignore secrets @@ -737,9 +738,9 @@ func (gceCS *GCEControllerServer) ListVolumes(ctx context.Context, req *csi.List diskList, _, err := gceCS.CloudProvider.ListDisks(ctx) if err != nil { if gce.IsGCEInvalidError(err) { - return nil, status.Error(codes.Aborted, fmt.Sprintf("ListVolumes error with invalid request: %v", err)) + return nil, status.Error(codes.Aborted, fmt.Sprintf("ListVolumes error with invalid request: %v", err.Error())) } - return nil, status.Error(codes.Internal, fmt.Sprintf("Unknown list disk error: %v", err)) + return nil, LoggedError("Unknown list disk error: ", err) } gceCS.disks = diskList gceCS.seen = map[string]int{} @@ -808,7 +809,7 @@ func (gceCS *GCEControllerServer) CreateSnapshot(ctx context.Context, req *csi.C } project, volKey, err := common.VolumeIDToKey(volumeID) if err != nil { - return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("CreateSnapshot Volume ID is invalid: %v", err)) + return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("CreateSnapshot Volume ID is invalid: %v", err.Error())) } if acquired := gceCS.volumeLocks.TryAcquire(volumeID); !acquired { @@ -820,14 +821,14 @@ func (gceCS *GCEControllerServer) CreateSnapshot(ctx context.Context, req *csi.C _, err = gceCS.CloudProvider.GetDisk(ctx, project, volKey, gce.GCEAPIVersionV1) if err != nil { if gce.IsGCENotFoundError(err) { - return nil, status.Error(codes.NotFound, fmt.Sprintf("CreateSnapshot could not find disk %v: %v", volKey.String(), err)) + return nil, status.Error(codes.NotFound, fmt.Sprintf("CreateSnapshot could not find disk %v: %v", volKey.String(), err.Error())) } - return nil, status.Error(codes.Internal, fmt.Sprintf("CreateSnapshot unknown get disk error: %v", err)) + return nil, LoggedError("CreateSnapshot unknown get disk error: ", err) } snapshotParams, err := common.ExtractAndDefaultSnapshotParameters(req.GetParameters(), gceCS.Driver.name) if err != nil { - return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("Invalid snapshot parameters: %v", err)) + return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("Invalid snapshot parameters: %v", err.Error())) } var snapshot *csi.Snapshot @@ -861,31 +862,31 @@ func (gceCS *GCEControllerServer) createPDSnapshot(ctx context.Context, project snapshot, err = gceCS.CloudProvider.GetSnapshot(ctx, project, snapshotName) if err != nil { if !gce.IsGCEError(err, "notFound") { - return nil, status.Error(codes.Internal, fmt.Sprintf("Unknown get snapshot error: %v", err)) + return nil, status.Error(codes.Internal, fmt.Sprintf("Unknown get snapshot error: %v", err.Error())) } // If we could not find the snapshot, we create a new one snapshot, err = gceCS.CloudProvider.CreateSnapshot(ctx, project, volKey, snapshotName, snapshotParams) if err != nil { if gce.IsGCEError(err, "notFound") { - return nil, status.Error(codes.NotFound, fmt.Sprintf("Could not find volume with ID %v: %v", volKey.String(), err)) + return nil, status.Error(codes.NotFound, fmt.Sprintf("Could not find volume with ID %v: %v", volKey.String(), err.Error())) } - return nil, status.Error(codes.Internal, fmt.Sprintf("Unknown create snapshot error: %v", err)) + return nil, LoggedError("Unknown create snapshot error: ", err) } } err = gceCS.validateExistingSnapshot(snapshot, volKey) if err != nil { - return nil, status.Error(codes.AlreadyExists, fmt.Sprintf("Error in creating snapshot: %v", err)) + return nil, status.Error(codes.AlreadyExists, fmt.Sprintf("Error in creating snapshot: %v", err.Error())) } timestamp, err := parseTimestamp(snapshot.CreationTimestamp) if err != nil { - return nil, status.Error(codes.Internal, fmt.Sprintf("Failed to covert creation timestamp: %v", err)) + return nil, status.Error(codes.Internal, fmt.Sprintf("Failed to covert creation timestamp: %v", err.Error())) } ready, err := isCSISnapshotReady(snapshot.Status) if err != nil { - return nil, status.Error(codes.Internal, fmt.Sprintf("Snapshot had error checking ready status: %v", err)) + return nil, status.Error(codes.Internal, fmt.Sprintf("Snapshot had error checking ready status: %v", err.Error())) } return &csi.Snapshot{ @@ -908,31 +909,31 @@ func (gceCS *GCEControllerServer) createImage(ctx context.Context, project strin image, err = gceCS.CloudProvider.GetImage(ctx, project, imageName) if err != nil { if !gce.IsGCEError(err, "notFound") { - return nil, status.Error(codes.Internal, fmt.Sprintf("Unknown get image error: %v", err)) + return nil, LoggedError("Unknown get image error: ", err) } // create a new image image, err = gceCS.CloudProvider.CreateImage(ctx, project, volKey, imageName, snapshotParams) if err != nil { if gce.IsGCEError(err, "notFound") { - return nil, status.Error(codes.NotFound, fmt.Sprintf("Could not find volume with ID %v: %v", volKey.String(), err)) + return nil, status.Error(codes.NotFound, fmt.Sprintf("Could not find volume with ID %v: %v", volKey.String(), err.Error())) } - return nil, status.Error(codes.Internal, fmt.Sprintf("Unknown create image error: %v", err)) + return nil, LoggedError("Unknown create image error: ", err) } } err = gceCS.validateExistingImage(image, volKey) if err != nil { - return nil, status.Errorf(codes.AlreadyExists, fmt.Sprintf("Error in creating snapshot: %v", err)) + return nil, status.Errorf(codes.AlreadyExists, fmt.Sprintf("Error in creating snapshot: %v", err.Error())) } timestamp, err := parseTimestamp(image.CreationTimestamp) if err != nil { - return nil, status.Error(codes.Internal, fmt.Sprintf("Failed to covert creation timestamp: %v", err)) + return nil, status.Error(codes.Internal, fmt.Sprintf("Failed to covert creation timestamp: %v", err.Error())) } ready, err := isImageReady(image.Status) if err != nil { - return nil, status.Errorf(codes.Internal, fmt.Sprintf("Failed to check image status: %v", err)) + return nil, status.Errorf(codes.Internal, fmt.Sprintf("Failed to check image status: %v", err.Error())) } return &csi.Snapshot{ @@ -951,7 +952,7 @@ func (gceCS *GCEControllerServer) validateExistingImage(image *compute.Image, vo _, sourceKey, err := common.VolumeIDToKey(cleanSelfLink(image.SourceDisk)) if err != nil { - return fmt.Errorf("fail to get source disk key %s, %v", image.SourceDisk, err) + return fmt.Errorf("fail to get source disk key %s, %w", image.SourceDisk, err) } if sourceKey.String() != volKey.String() { @@ -1002,7 +1003,7 @@ func (gceCS *GCEControllerServer) validateExistingSnapshot(snapshot *compute.Sna _, sourceKey, err := common.VolumeIDToKey(cleanSelfLink(snapshot.SourceDisk)) if err != nil { - return fmt.Errorf("fail to get source disk key %s, %v", snapshot.SourceDisk, err) + return fmt.Errorf("fail to get source disk key %s, %w", snapshot.SourceDisk, err) } if sourceKey.String() != volKey.String() { @@ -1046,12 +1047,12 @@ func (gceCS *GCEControllerServer) DeleteSnapshot(ctx context.Context, req *csi.D case common.DiskSnapshotType: err = gceCS.CloudProvider.DeleteSnapshot(ctx, project, key) if err != nil { - return nil, status.Error(codes.Internal, fmt.Sprintf("unknown Delete snapshot error: %v", err)) + return nil, LoggedError("unknown Delete snapshot error: ", err) } case common.DiskImageType: err = gceCS.CloudProvider.DeleteImage(ctx, project, key) if err != nil { - return nil, status.Error(codes.Internal, fmt.Sprintf("unknown Delete image error: %v", err)) + return nil, LoggedError("unknown Delete image error: ", err) } default: return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("unknown snapshot type %s", snapshotType)) @@ -1081,9 +1082,9 @@ func (gceCS *GCEControllerServer) ListSnapshots(ctx context.Context, req *csi.Li snapshotList, err := gceCS.getSnapshots(ctx, req) if err != nil { if gce.IsGCEInvalidError(err) { - return nil, status.Error(codes.Aborted, fmt.Sprintf("ListSnapshots error with invalid request: %v", err)) + return nil, status.Error(codes.Aborted, fmt.Sprintf("ListSnapshots error with invalid request: %v", err.Error())) } - return nil, status.Error(codes.Internal, fmt.Sprintf("Unknown list snapshots error: %v", err)) + return nil, LoggedError("Unknown list snapshots error: ", err) } gceCS.snapshots = snapshotList gceCS.snapshotTokens = map[string]int{} @@ -1119,25 +1120,25 @@ func (gceCS *GCEControllerServer) ControllerExpandVolume(ctx context.Context, re capacityRange := req.GetCapacityRange() reqBytes, err := getRequestCapacity(capacityRange) if err != nil { - return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("ControllerExpandVolume capacity range is invalid: %v", err)) + return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("ControllerExpandVolume capacity range is invalid: %v", err.Error())) } project, volKey, err := common.VolumeIDToKey(volumeID) if err != nil { - return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("ControllerExpandVolume Volume ID is invalid: %v", err)) + return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("ControllerExpandVolume Volume ID is invalid: %v", err.Error())) } project, volKey, err = gceCS.CloudProvider.RepairUnderspecifiedVolumeKey(ctx, project, volKey) if err != nil { if gce.IsGCENotFoundError(err) { - return nil, status.Errorf(codes.NotFound, "ControllerExpandVolume could not find volume with ID %v: %v", volumeID, err) + return nil, status.Errorf(codes.NotFound, "ControllerExpandVolume could not find volume with ID %v: %v", volumeID, err.Error()) } - return nil, status.Errorf(codes.Internal, "ControllerExpandVolume error repairing underspecified volume key: %v", err) + return nil, LoggedError("ControllerExpandVolume error repairing underspecified volume key: ", err) } resizedGb, err := gceCS.CloudProvider.ResizeDisk(ctx, project, volKey, reqBytes) if err != nil { - return nil, status.Error(codes.Internal, fmt.Sprintf("ControllerExpandVolume failed to resize disk: %v", err)) + return nil, LoggedError("ControllerExpandVolume failed to resize disk: ", err) } klog.V(4).Infof("ControllerExpandVolume succeeded for disk %v to size %v", volKey, resizedGb) @@ -1158,17 +1159,17 @@ func (gceCS *GCEControllerServer) getSnapshots(ctx context.Context, req *csi.Lis snapshots, _, err = gceCS.CloudProvider.ListSnapshots(ctx, filter) if err != nil { if gce.IsGCEError(err, "invalid") { - return nil, status.Error(codes.Aborted, fmt.Sprintf("Invalid error: %v", err)) + return nil, status.Error(codes.Aborted, fmt.Sprintf("Invalid error: %v", err.Error())) } - return nil, status.Error(codes.Internal, fmt.Sprintf("Unknown list snapshot error: %v", err)) + return nil, LoggedError("Unknown list snapshot error: ", err) } images, _, err = gceCS.CloudProvider.ListImages(ctx, filter) if err != nil { if gce.IsGCEError(err, "invalid") { - return nil, status.Error(codes.Aborted, fmt.Sprintf("Invalid error: %v", err)) + return nil, status.Error(codes.Aborted, fmt.Sprintf("Invalid error: %v", err.Error())) } - return nil, status.Error(codes.Internal, fmt.Sprintf("Unknown list image error: %v", err)) + return nil, LoggedError("Unknown list image error: ", err) } entries := []*csi.ListSnapshotsResponse_Entry{} @@ -1176,7 +1177,7 @@ func (gceCS *GCEControllerServer) getSnapshots(ctx context.Context, req *csi.Lis for _, snapshot := range snapshots { entry, err := generateDiskSnapshotEntry(snapshot) if err != nil { - return nil, fmt.Errorf("failed to generate snapshot entry: %v", err) + return nil, fmt.Errorf("failed to generate snapshot entry: %w", err) } entries = append(entries, entry) } @@ -1184,7 +1185,7 @@ func (gceCS *GCEControllerServer) getSnapshots(ctx context.Context, req *csi.Lis for _, image := range images { entry, err := generateDiskImageEntry(image) if err != nil { - return nil, fmt.Errorf("failed to generate image entry: %v", err) + return nil, fmt.Errorf("failed to generate image entry: %w", err) } entries = append(entries, entry) } @@ -1209,11 +1210,11 @@ func (gceCS *GCEControllerServer) getSnapshotByID(ctx context.Context, snapshotI // return empty list if no snapshot is found return &csi.ListSnapshotsResponse{}, nil } - return nil, status.Error(codes.Internal, fmt.Sprintf("Unknown list snapshot error: %v", err)) + return nil, LoggedError("Unknown list snapshot error: ", err) } e, err := generateDiskSnapshotEntry(snapshot) if err != nil { - return nil, fmt.Errorf("failed to generate snapshot entry: %v", err) + return nil, fmt.Errorf("failed to generate snapshot entry: %w", err) } entries = []*csi.ListSnapshotsResponse_Entry{e} case common.DiskImageType: @@ -1226,7 +1227,7 @@ func (gceCS *GCEControllerServer) getSnapshotByID(ctx context.Context, snapshotI } e, err := generateImageEntry(image) if err != nil { - return nil, fmt.Errorf("failed to generate image entry: %v", err) + return nil, fmt.Errorf("failed to generate image entry: %w", err) } entries = []*csi.ListSnapshotsResponse_Entry{e} } @@ -1241,9 +1242,9 @@ func (gceCS *GCEControllerServer) getSnapshotByID(ctx context.Context, snapshotI func generateDiskSnapshotEntry(snapshot *compute.Snapshot) (*csi.ListSnapshotsResponse_Entry, error) { t, _ := time.Parse(time.RFC3339, snapshot.CreationTimestamp) - tp, err := ptypes.TimestampProto(t) - if err != nil { - return nil, fmt.Errorf("Failed to covert creation timestamp: %v", err) + tp := timestamppb.New(t) + if err := tp.CheckValid(); err != nil { + return nil, fmt.Errorf("Failed to covert creation timestamp: %w", err) } // We ignore the error intentionally here since we are just listing snapshots @@ -1266,9 +1267,9 @@ func generateDiskSnapshotEntry(snapshot *compute.Snapshot) (*csi.ListSnapshotsRe func generateDiskImageEntry(image *compute.Image) (*csi.ListSnapshotsResponse_Entry, error) { t, _ := time.Parse(time.RFC3339, image.CreationTimestamp) - tp, err := ptypes.TimestampProto(t) - if err != nil { - return nil, fmt.Errorf("Failed to covert creation timestamp: %v", err) + tp := timestamppb.New(t) + if err := tp.CheckValid(); err != nil { + return nil, fmt.Errorf("failed to covert creation timestamp: %w", err) } ready, _ := isImageReady(image.Status) @@ -1288,7 +1289,7 @@ func generateDiskImageEntry(image *compute.Image) (*csi.ListSnapshotsResponse_En func generateImageEntry(image *compute.Image) (*csi.ListSnapshotsResponse_Entry, error) { timestamp, err := parseTimestamp(image.CreationTimestamp) if err != nil { - return nil, fmt.Errorf("Failed to covert creation timestamp: %v", err) + return nil, fmt.Errorf("Failed to covert creation timestamp: %w", err) } // ignore the error intentionally here since we are just listing images @@ -1366,11 +1367,11 @@ func diskIsAttachedAndCompatible(deviceName string, instance *compute.Instance, func pickZonesFromTopology(top *csi.TopologyRequirement, numZones int) ([]string, error) { reqZones, err := getZonesFromTopology(top.GetRequisite()) if err != nil { - return nil, fmt.Errorf("could not get zones from requisite topology: %v", err) + return nil, fmt.Errorf("could not get zones from requisite topology: %w", err) } prefZones, err := getZonesFromTopology(top.GetPreferred()) if err != nil { - return nil, fmt.Errorf("could not get zones from preferred topology: %v", err) + return nil, fmt.Errorf("could not get zones from preferred topology: %w", err) } if numZones <= len(prefZones) { @@ -1408,7 +1409,7 @@ func getZonesFromTopology(topList []*csi.Topology) ([]string, error) { // GCE PD cloud provider Create has no restrictions so just create in top preferred zone zone, err := getZoneFromSegment(top.GetSegments()) if err != nil { - return nil, fmt.Errorf("could not get zone from preferred topology: %v", err) + return nil, fmt.Errorf("could not get zone from preferred topology: %w", err) } zones = append(zones, zone) } @@ -1437,12 +1438,12 @@ func pickZones(ctx context.Context, gceCS *GCEControllerServer, top *csi.Topolog if top != nil { zones, err = pickZonesFromTopology(top, numZones) if err != nil { - return nil, fmt.Errorf("failed to pick zones from topology: %v", err) + return nil, fmt.Errorf("failed to pick zones from topology: %w", err) } } else { zones, err = getDefaultZonesInRegion(ctx, gceCS, []string{gceCS.CloudProvider.GetDefaultZone()}, numZones) if err != nil { - return nil, fmt.Errorf("failed to get default %v zones in region: %v", numZones, err) + return nil, fmt.Errorf("failed to get default %v zones in region: %w", numZones, err) } klog.Warningf("No zones have been specified in either topology or params, picking default zone: %v", zones) @@ -1453,12 +1454,12 @@ func pickZones(ctx context.Context, gceCS *GCEControllerServer, top *csi.Topolog func getDefaultZonesInRegion(ctx context.Context, gceCS *GCEControllerServer, existingZones []string, numZones int) ([]string, error) { region, err := common.GetRegionFromZones(existingZones) if err != nil { - return nil, fmt.Errorf("failed to get region from zones: %v", err) + return nil, fmt.Errorf("failed to get region from zones: %w", err) } needToGet := numZones - len(existingZones) totZones, err := gceCS.CloudProvider.ListZones(ctx, region) if err != nil { - return nil, fmt.Errorf("failed to list zones from cloud provider: %v", err) + return nil, fmt.Errorf("failed to list zones from cloud provider: %w", err) } remainingZones := sets.NewString(totZones...).Difference(sets.NewString(existingZones...)) l := remainingZones.List() @@ -1536,7 +1537,7 @@ func createRegionalDisk(ctx context.Context, cloudProvider gce.GCECompute, name project := cloudProvider.GetDefaultProject() region, err := common.GetRegionFromZones(zones) if err != nil { - return nil, fmt.Errorf("failed to get region from zones: %v", err) + return nil, fmt.Errorf("failed to get region from zones: %w", err) } fullyQualifiedReplicaZones := []string{} @@ -1547,7 +1548,7 @@ func createRegionalDisk(ctx context.Context, cloudProvider gce.GCECompute, name err = cloudProvider.InsertDisk(ctx, project, meta.RegionalKey(name, region), params, capBytes, capacityRange, fullyQualifiedReplicaZones, snapshotID, volumeContentSourceVolumeID, multiWriter) if err != nil { - return nil, fmt.Errorf("failed to insert regional disk: %v", err) + return nil, fmt.Errorf("failed to insert regional disk: %w", err) } gceAPIVersion := gce.GCEAPIVersionV1 @@ -1557,7 +1558,7 @@ func createRegionalDisk(ctx context.Context, cloudProvider gce.GCECompute, name disk, err := cloudProvider.GetDisk(ctx, project, meta.RegionalKey(name, region), gceAPIVersion) if err != nil { - return nil, fmt.Errorf("failed to get disk after creating regional disk: %v", err) + return nil, fmt.Errorf("failed to get disk after creating regional disk: %w", err) } return disk, nil } @@ -1570,7 +1571,7 @@ func createSingleZoneDisk(ctx context.Context, cloudProvider gce.GCECompute, nam diskZone := zones[0] err := cloudProvider.InsertDisk(ctx, project, meta.ZonalKey(name, diskZone), params, capBytes, capacityRange, nil, snapshotID, volumeContentSourceVolumeID, multiWriter) if err != nil { - return nil, fmt.Errorf("failed to insert zonal disk: %v", err) + return nil, fmt.Errorf("failed to insert zonal disk: %w", err) } gceAPIVersion := gce.GCEAPIVersionV1 diff --git a/pkg/gce-pd-csi-driver/node.go b/pkg/gce-pd-csi-driver/node.go index 550ce97d7..340caa5a2 100644 --- a/pkg/gce-pd-csi-driver/node.go +++ b/pkg/gce-pd-csi-driver/node.go @@ -79,7 +79,6 @@ func (ns *GCENodeServer) isVolumePathMounted(path string) bool { 1) Target Path MUST be the vol referenced by vol ID 2) TODO(#253): Check volume capability matches for ALREADY_EXISTS 3) Readonly MUST match - */ return true } @@ -112,7 +111,7 @@ func (ns *GCENodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePub defer ns.volumeLocks.Release(volumeID) if err := validateVolumeCapability(volumeCapability); err != nil { - return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("VolumeCapability is invalid: %v", err)) + return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("VolumeCapability is invalid: %v", err.Error())) } if ns.isVolumePathMounted(targetPath) { @@ -142,7 +141,7 @@ func (ns *GCENodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePub sourcePath = stagingTargetPath if err := preparePublishPath(targetPath, ns.Mounter); err != nil { - return nil, status.Error(codes.Internal, fmt.Sprintf("mkdir failed on disk %s (%v)", targetPath, err)) + return nil, status.Error(codes.Internal, fmt.Sprintf("mkdir failed on disk %s (%v)", targetPath, err.Error())) } } else if blk := volumeCapability.GetBlock(); blk != nil { @@ -155,16 +154,16 @@ func (ns *GCENodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePub sourcePath, err = getDevicePath(ns, volumeID, partition) if err != nil { - return nil, status.Error(codes.Internal, fmt.Sprintf("Error when getting device path: %v", err)) + return nil, status.Error(codes.Internal, fmt.Sprintf("Error when getting device path: %v", err.Error())) } // Expose block volume as file at target path err = makeFile(targetPath) if err != nil { if removeErr := os.Remove(targetPath); removeErr != nil { - return nil, status.Error(codes.Internal, fmt.Sprintf("Error removing block file at target path %v: %v, mounti error: %v", targetPath, removeErr, err)) + return nil, status.Error(codes.Internal, fmt.Sprintf("Error removing block file at target path %v: %v, mounti error: %v", targetPath, removeErr, err.Error())) } - return nil, status.Error(codes.Internal, fmt.Sprintf("Failed to create block file at target path %v: %v", targetPath, err)) + return nil, status.Error(codes.Internal, fmt.Sprintf("Failed to create block file at target path %v: %v", targetPath, err.Error())) } } else { return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("NodePublishVolume volume capability must specify either mount or block mode")) @@ -172,35 +171,35 @@ func (ns *GCENodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePub err = ns.Mounter.Interface.Mount(sourcePath, targetPath, fstype, options) if err != nil { - klog.Errorf("Mount of disk %s failed: %w", targetPath, err) + klog.Errorf("Mount of disk %s failed: %v", targetPath, err.Error()) notMnt, mntErr := ns.Mounter.Interface.IsLikelyNotMountPoint(targetPath) if mntErr != nil { - klog.Errorf("IsLikelyNotMountPoint check failed: %w", mntErr) - return nil, status.Error(codes.Internal, fmt.Sprintf("NodePublishVolume failed to check whether target path is a mount point: %v", err)) + klog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr.Error()) + return nil, status.Error(codes.Internal, fmt.Sprintf("NodePublishVolume failed to check whether target path is a mount point: %v", err.Error())) } if !notMnt { // TODO: check the logic here again. If mntErr == nil & notMnt == false, it means volume is actually mounted. // Why need to unmount? klog.Warningf("Although volume mount failed, but IsLikelyNotMountPoint returns volume %s is mounted already at %s", volumeID, targetPath) if mntErr = ns.Mounter.Interface.Unmount(targetPath); mntErr != nil { - klog.Errorf("Failed to unmount: %w", mntErr) - return nil, status.Error(codes.Internal, fmt.Sprintf("NodePublishVolume failed to unmount target path: %v", err)) + klog.Errorf("Failed to unmount: %v", mntErr.Error()) + return nil, status.Error(codes.Internal, fmt.Sprintf("NodePublishVolume failed to unmount target path: %v", err.Error())) } notMnt, mntErr := ns.Mounter.Interface.IsLikelyNotMountPoint(targetPath) if mntErr != nil { - klog.Errorf("IsLikelyNotMountPoint check failed: %w", mntErr) - return nil, status.Error(codes.Internal, fmt.Sprintf("NodePublishVolume failed to check whether target path is a mount point: %v", err)) + klog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr.Error()) + return nil, status.Error(codes.Internal, fmt.Sprintf("NodePublishVolume failed to check whether target path is a mount point: %v", err.Error())) } if !notMnt { // This is very odd, we don't expect it. We'll try again next sync loop. klog.Errorf("%s is still mounted, despite call to unmount(). Will try again next sync loop.", targetPath) - return nil, status.Error(codes.Internal, fmt.Sprintf("NodePublishVolume something is wrong with mounting: %v", err)) + return nil, status.Error(codes.Internal, fmt.Sprintf("NodePublishVolume something is wrong with mounting: %v", err.Error())) } } if err := os.Remove(targetPath); err != nil { - klog.Errorf("failed to remove targetPath %s: %v", targetPath, err) + klog.Errorf("failed to remove targetPath %s: %v", targetPath, err.Error()) } - return nil, status.Error(codes.Internal, fmt.Sprintf("NodePublishVolume mount of disk failed: %v", err)) + return nil, status.Error(codes.Internal, fmt.Sprintf("NodePublishVolume mount of disk failed: %v", err.Error())) } klog.V(4).Infof("NodePublishVolume succeeded on volume %v to %s", volumeID, targetPath) @@ -211,10 +210,10 @@ func makeFile(path string) error { // Create file newFile, err := os.OpenFile(path, os.O_CREATE|os.O_RDWR, 0750) if err != nil { - return fmt.Errorf("failed to open file %s: %v", path, err) + return fmt.Errorf("failed to open file %s: %w", path, err) } if err := newFile.Close(); err != nil { - return fmt.Errorf("failed to close file %s: %v", path, err) + return fmt.Errorf("failed to close file %s: %w", path, err) } return nil } @@ -236,7 +235,7 @@ func (ns *GCENodeServer) NodeUnpublishVolume(ctx context.Context, req *csi.NodeU defer ns.volumeLocks.Release(volumeID) if err := cleanupPublishPath(targetPath, ns.Mounter); err != nil { - return nil, status.Error(codes.Internal, fmt.Sprintf("Unmount failed: %v\nUnmounting arguments: %s\n", err, targetPath)) + return nil, status.Error(codes.Internal, fmt.Sprintf("Unmount failed: %v\nUnmounting arguments: %s\n", err.Error(), targetPath)) } klog.V(4).Infof("NodeUnpublishVolume succeeded on %v from %s", volumeID, targetPath) return &csi.NodeUnpublishVolumeResponse{}, nil @@ -263,14 +262,14 @@ func (ns *GCENodeServer) NodeStageVolume(ctx context.Context, req *csi.NodeStage defer ns.volumeLocks.Release(volumeID) if err := validateVolumeCapability(volumeCapability); err != nil { - return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("VolumeCapability is invalid: %v", err)) + return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("VolumeCapability is invalid: %v", err.Error())) } // TODO(#253): Check volume capability matches for ALREADY_EXISTS _, volumeKey, err := common.VolumeIDToKey(volumeID) if err != nil { - return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("NodeStageVolume Volume ID is invalid: %v", err)) + return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("NodeStageVolume Volume ID is invalid: %v", err.Error())) } // Part 1: Get device path of attached device @@ -282,7 +281,7 @@ func (ns *GCENodeServer) NodeStageVolume(ctx context.Context, req *csi.NodeStage devicePath, err := getDevicePath(ns, volumeID, partition) if err != nil { - return nil, status.Error(codes.Internal, fmt.Sprintf("Error when getting device path: %v", err)) + return nil, status.Error(codes.Internal, fmt.Sprintf("Error when getting device path: %v", err.Error())) } klog.V(4).Infof("Successfully found attached GCE PD %q at device path %s.", volumeKey.Name, devicePath) @@ -294,7 +293,7 @@ func (ns *GCENodeServer) NodeStageVolume(ctx context.Context, req *csi.NodeStage } if err := prepareStagePath(stagingTargetPath, ns.Mounter); err != nil { - return nil, status.Error(codes.Internal, fmt.Sprintf("mkdir failed on disk %s (%v)", stagingTargetPath, err)) + return nil, status.Error(codes.Internal, fmt.Sprintf("mkdir failed on disk %s (%v)", stagingTargetPath, err.Error())) } // Part 3: Mount device to stagingTargetPath @@ -337,7 +336,7 @@ func (ns *GCENodeServer) NodeStageVolume(ctx context.Context, req *csi.NodeStage } return nil, status.Error(codes.Internal, fmt.Sprintf("Failed to format and mount device from (%q) to (%q) with fstype (%q) and options (%q): %v", - devicePath, stagingTargetPath, fstype, options, err)) + devicePath, stagingTargetPath, fstype, options, err.Error())) } klog.V(4).Infof("NodeStageVolume succeeded on %v to %s", volumeID, stagingTargetPath) @@ -361,9 +360,19 @@ func (ns *GCENodeServer) NodeUnstageVolume(ctx context.Context, req *csi.NodeUns defer ns.volumeLocks.Release(volumeID) if err := cleanupStagePath(stagingTargetPath, ns.Mounter); err != nil { - return nil, status.Error(codes.Internal, fmt.Sprintf("NodeUnstageVolume failed: %v\nUnmounting arguments: %s\n", err, stagingTargetPath)) + return nil, status.Error(codes.Internal, fmt.Sprintf("NodeUnstageVolume failed: %v\nUnmounting arguments: %s\n", err.Error(), stagingTargetPath)) + } + +<<<<<<< HEAD +======= + devicePath, err := getDevicePath(ns, volumeID, "" /* partition, which is unused */) + if err != nil { + klog.Errorf("Failed to find device path for volume %s. Device may not be detached cleanly (error is ignored and unstaging is continuing): %v", volumeID, err.Error()) + } else if err := ns.DeviceUtils.DisableDevice(devicePath); err != nil { + klog.Errorf("Failed to disabled device %s for volume %s. Device may not be detached cleanly (error is ignored and unstaging is continuing): %v", devicePath, volumeID, err.Error()) } +>>>>>>> Separate user errors from internal errors klog.V(4).Infof("NodeUnstageVolume succeeded on %v from %s", volumeID, stagingTargetPath) return &csi.NodeUnstageVolumeResponse{}, nil } @@ -404,17 +413,17 @@ func (ns *GCENodeServer) NodeGetVolumeStats(ctx context.Context, req *csi.NodeGe if os.IsNotExist(err) { return nil, status.Errorf(codes.NotFound, "path %s does not exist", req.VolumePath) } - return nil, status.Errorf(codes.Internal, "unknown error when stat on %s: %v", req.VolumePath, err) + return nil, status.Errorf(codes.Internal, "unknown error when stat on %s: %v", req.VolumePath, err.Error()) } isBlock, err := ns.VolumeStatter.IsBlockDevice(req.VolumePath) if err != nil { - return nil, status.Errorf(codes.Internal, "failed to determine whether %s is block device: %v", req.VolumePath, err) + return nil, status.Errorf(codes.Internal, "failed to determine whether %s is block device: %v", req.VolumePath, err.Error()) } if isBlock { bcap, err := getBlockSizeBytes(req.VolumePath, ns.Mounter) if err != nil { - return nil, status.Errorf(codes.Internal, "failed to get block capacity on path %s: %v", req.VolumePath, err) + return nil, status.Errorf(codes.Internal, "failed to get block capacity on path %s: %v", req.VolumePath, err.Error()) } return &csi.NodeGetVolumeStatsResponse{ Usage: []*csi.VolumeUsage{ @@ -427,7 +436,7 @@ func (ns *GCENodeServer) NodeGetVolumeStats(ctx context.Context, req *csi.NodeGe } available, capacity, used, inodesFree, inodes, inodesUsed, err := ns.VolumeStatter.StatFS(req.VolumePath) if err != nil { - return nil, status.Errorf(codes.Internal, "failed to get fs info on path %s: %v", req.VolumePath, err) + return nil, status.Errorf(codes.Internal, "failed to get fs info on path %s: %v", req.VolumePath, err.Error()) } return &csi.NodeGetVolumeStatsResponse{ @@ -456,7 +465,7 @@ func (ns *GCENodeServer) NodeExpandVolume(ctx context.Context, req *csi.NodeExpa capacityRange := req.GetCapacityRange() reqBytes, err := getRequestCapacity(capacityRange) if err != nil { - return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("capacity range is invalid: %v", err)) + return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("capacity range is invalid: %v", err.Error())) } volumePath := req.GetVolumePath() @@ -466,19 +475,19 @@ func (ns *GCENodeServer) NodeExpandVolume(ctx context.Context, req *csi.NodeExpa _, volKey, err := common.VolumeIDToKey(volumeID) if err != nil { - return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("volume ID is invalid: %v", err)) + return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("volume ID is invalid: %v", err.Error())) } devicePath, err := getDevicePath(ns, volumeID, "") if err != nil { - return nil, status.Error(codes.Internal, fmt.Sprintf("error when getting device path for %s: %v", volumeID, err)) + return nil, status.Error(codes.Internal, fmt.Sprintf("error when getting device path for %s: %v", volumeID, err.Error())) } volumeCapability := req.GetVolumeCapability() if volumeCapability != nil { // VolumeCapability is optional, if specified, validate it if err := validateVolumeCapability(volumeCapability); err != nil { - return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("VolumeCapability is invalid: %v", err)) + return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("VolumeCapability is invalid: %v", err.Error())) } if blk := volumeCapability.GetBlock(); blk != nil { @@ -492,7 +501,11 @@ func (ns *GCENodeServer) NodeExpandVolume(ctx context.Context, req *csi.NodeExpa resizer := resizefs.NewResizeFs(ns.Mounter) _, err = resizer.Resize(devicePath, volumePath) if err != nil { +<<<<<<< HEAD return nil, status.Error(codes.Internal, fmt.Sprintf("error when resizing volume %s: %v", volKey.String(), err)) +======= + return nil, status.Error(codes.Internal, fmt.Sprintf("error when resizing volume %s from device '%s' at path '%s': %v", volKey.String(), devicePath, volumePath, err.Error())) +>>>>>>> Separate user errors from internal errors } @@ -510,12 +523,12 @@ func (ns *GCENodeServer) NodeExpandVolume(ctx context.Context, req *csi.NodeExpa /* format, err := ns.Mounter.GetDiskFormat(devicePath) if err != nil { - return nil, status.Error(codes.Internal, fmt.Sprintf("ControllerExpandVolume error checking format for device %s: %v", devicePath, err)) + return nil, status.Error(codes.Internal, fmt.Sprintf("ControllerExpandVolume error checking format for device %s: %v", devicePath, err.Error())) } gotSizeBytes, err = ns.getFSSizeBytes(devicePath) if err != nil { - return nil, status.Errorf(codes.Internal, "ControllerExpandVolume resize could not get fs size of %s: %v", volumePath, err) + return nil, status.Errorf(codes.Internal, "ControllerExpandVolume resize could not get fs size of %s: %v", volumePath, err.Error()) } if gotSizeBytes != reqBytes { return nil, status.Errorf(codes.Internal, "ControllerExpandVolume resize requested for size %v but after resize volume was size %v", reqBytes, gotSizeBytes) diff --git a/pkg/gce-pd-csi-driver/server.go b/pkg/gce-pd-csi-driver/server.go index eb5108611..48f3af692 100644 --- a/pkg/gce-pd-csi-driver/server.go +++ b/pkg/gce-pd-csi-driver/server.go @@ -106,7 +106,7 @@ func (s *nonBlockingGRPCServer) serve(endpoint string, ids csi.IdentityServer, c klog.V(4).Infof("Start listening with scheme %v, addr %v", u.Scheme, addr) listener, err := net.Listen(u.Scheme, addr) if err != nil { - klog.Fatalf("Failed to listen: %w", err) + klog.Fatalf("Failed to listen: %v", err.Error()) } server := grpc.NewServer(opts...) @@ -125,7 +125,7 @@ func (s *nonBlockingGRPCServer) serve(endpoint string, ids csi.IdentityServer, c klog.V(4).Infof("Listening for connections on address: %#v", listener.Addr()) if err := server.Serve(listener); err != nil { - klog.Fatalf("Failed to serve: %w", err) + klog.Fatalf("Failed to serve: %v", err.Error()) } } diff --git a/pkg/gce-pd-csi-driver/utils.go b/pkg/gce-pd-csi-driver/utils.go index a7af4da79..a60f8fc6c 100644 --- a/pkg/gce-pd-csi-driver/utils.go +++ b/pkg/gce-pd-csi-driver/utils.go @@ -19,11 +19,15 @@ package gceGCEDriver import ( "errors" "fmt" + "net/http" "context" csi "github.com/container-storage-interface/spec/lib/go/csi" + "google.golang.org/api/googleapi" "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" "k8s.io/klog/v2" ) @@ -67,7 +71,7 @@ func logGRPC(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, h klog.V(4).Infof("%s called with request: %s", info.FullMethod, req) resp, err := handler(ctx, req) if err != nil { - klog.Errorf("%s returned with error: %w", info.FullMethod, err) + klog.Errorf("%s returned with error: %v", info.FullMethod, err.Error()) } else { klog.V(4).Infof("%s returned with response: %s", info.FullMethod, resp) } @@ -209,3 +213,36 @@ func containsZone(zones []string, zone string) bool { return false } + +// CodeForError returns a pointer to the grpc error code that maps to the http +// error code for the passed in user googleapi error. Returns codes.Internal if +// the given error is not a googleapi error caused by the user. The following +// http error codes are considered user errors: +// (1) http 400 Bad Request, returns grpc InvalidArgument, +// (2) http 403 Forbidden, returns grpc PermissionDenied, +// (3) http 404 Not Found, returns grpc NotFound +// (4) http 429 Too Many Requests, returns grpc ResourceExhausted +func CodeForError(err error) *codes.Code { + internalErrorCode := codes.Internal + // Upwrap the error + var apiErr *googleapi.Error + if !errors.As(err, &apiErr) { + return &internalErrorCode + } + + userErrors := map[int]codes.Code{ + http.StatusForbidden: codes.PermissionDenied, + http.StatusBadRequest: codes.InvalidArgument, + http.StatusTooManyRequests: codes.ResourceExhausted, + http.StatusNotFound: codes.NotFound, + } + if code, ok := userErrors[apiErr.Code]; ok { + return &code + } + return &internalErrorCode +} + +func LoggedError(msg string, err error) error { + klog.Errorf(msg+"%v", err.Error()) + return status.Errorf(*CodeForError(err), msg+"%v", err.Error()) +} diff --git a/pkg/gce-pd-csi-driver/utils_linux.go b/pkg/gce-pd-csi-driver/utils_linux.go index 9247b78b2..8f1f03673 100644 --- a/pkg/gce-pd-csi-driver/utils_linux.go +++ b/pkg/gce-pd-csi-driver/utils_linux.go @@ -34,12 +34,12 @@ func getDevicePath(ns *GCENodeServer, volumeID, partition string) (string, error } deviceName, err := common.GetDeviceName(volumeKey) if err != nil { - return "", fmt.Errorf("error getting device name: %v", err) + return "", fmt.Errorf("error getting device name: %w", err) } devicePaths := ns.DeviceUtils.GetDiskByIdPaths(deviceName, partition) devicePath, err := ns.DeviceUtils.VerifyDevicePath(devicePaths, deviceName) if err != nil { - return "", status.Error(codes.Internal, fmt.Sprintf("error verifying GCE PD (%q) is attached: %v", deviceName, err)) + return "", status.Error(codes.Internal, fmt.Sprintf("error verifying GCE PD (%q) is attached: %v", deviceName, err.Error())) } if devicePath == "" { return "", status.Error(codes.Internal, fmt.Sprintf("Unable to find device path out of attempted paths: %v", devicePaths)) @@ -70,7 +70,7 @@ func cleanupStagePath(path string, m *mount.SafeFormatAndMount) error { func getBlockSizeBytes(devicePath string, m *mount.SafeFormatAndMount) (int64, error) { output, err := m.Exec.Command("blockdev", "--getsize64", devicePath).CombinedOutput() if err != nil { - return -1, fmt.Errorf("error when getting size of block volume at path %s: output: %s, err: %v", devicePath, string(output), err) + return -1, fmt.Errorf("error when getting size of block volume at path %s: output: %s, err: %w", devicePath, string(output), err) } strOut := strings.TrimSpace(string(output)) gotSizeBytes, err := strconv.ParseInt(strOut, 10, 64) diff --git a/pkg/gce-pd-csi-driver/utils_test.go b/pkg/gce-pd-csi-driver/utils_test.go index 2a0fd5f45..ab92e45ee 100644 --- a/pkg/gce-pd-csi-driver/utils_test.go +++ b/pkg/gce-pd-csi-driver/utils_test.go @@ -18,9 +18,13 @@ limitations under the License. package gceGCEDriver import ( + "errors" + "net/http" "testing" csi "github.com/container-storage-interface/spec/lib/go/csi" + "google.golang.org/api/googleapi" + "google.golang.org/grpc/codes" ) var ( @@ -291,3 +295,37 @@ func TestGetReadOnlyFromCapabilities(t *testing.T) { } } } + +func TestCodeForError(t *testing.T) { + internalErrorCode := codes.Internal + userErrorCode := codes.InvalidArgument + testCases := []struct { + name string + inputErr error + expCode *codes.Code + }{ + { + name: "Not googleapi.Error", + inputErr: errors.New("I am not a googleapi.Error"), + expCode: &internalErrorCode, + }, + { + name: "User error", + inputErr: &googleapi.Error{Code: http.StatusBadRequest, Message: "User error with bad request"}, + expCode: &userErrorCode, + }, + { + name: "googleapi.Error but not a user error", + inputErr: &googleapi.Error{Code: http.StatusInternalServerError, Message: "Internal error"}, + expCode: &internalErrorCode, + }, + } + + for _, tc := range testCases { + t.Logf("Running test: %v", tc.name) + actualCode := *CodeForError(tc.inputErr) + if *tc.expCode != actualCode { + t.Fatalf("Expected error code '%v' but got '%v'", tc.expCode, actualCode) + } + } +} diff --git a/pkg/gce-pd-csi-driver/utils_windows.go b/pkg/gce-pd-csi-driver/utils_windows.go index 42fcf881f..09ece4eae 100644 --- a/pkg/gce-pd-csi-driver/utils_windows.go +++ b/pkg/gce-pd-csi-driver/utils_windows.go @@ -83,7 +83,7 @@ func getDevicePath(ns *GCENodeServer, volumeID, partition string) (string, error } deviceName, err := common.GetDeviceName(volumeKey) if err != nil { - return "", fmt.Errorf("error getting device name: %v", err) + return "", fmt.Errorf("error getting device name: %w", err) } proxy, ok := ns.Mounter.Interface.(mounter.CSIProxyMounter) diff --git a/pkg/metrics/metrics.go b/pkg/metrics/metrics.go index 3d44c6d9d..cae379231 100644 --- a/pkg/metrics/metrics.go +++ b/pkg/metrics/metrics.go @@ -101,7 +101,7 @@ func (mm *metricsManager) InitializeHttpHandler(address, path string) { go func() { klog.Infof("Metric server listening at %q", address) if err := http.ListenAndServe(address, mux); err != nil { - klog.Fatalf("Failed to start metric server at specified address (%q) and path (%q): %w", address, path, err) + klog.Fatalf("Failed to start metric server at specified address (%q) and path (%q): %v", address, path, err.Error()) } }() } diff --git a/pkg/mount-manager/device-utils.go b/pkg/mount-manager/device-utils.go index f399f4af2..873f82fe9 100644 --- a/pkg/mount-manager/device-utils.go +++ b/pkg/mount-manager/device-utils.go @@ -110,7 +110,7 @@ func (m *deviceUtils) GetDiskByIdPaths(deviceName string, partition string) []st func existingDevicePath(devicePaths []string) (string, error) { for _, devicePath := range devicePaths { if pathExists, err := pathExists(devicePath); err != nil { - return "", fmt.Errorf("error checking if path exists: %v", err) + return "", fmt.Errorf("error checking if path exists: %w", err) } else if pathExists { return devicePath, nil } @@ -129,7 +129,7 @@ func getScsiSerial(devicePath string) (string, error) { "--whitelisted", fmt.Sprintf("--device=%v", devicePath)).CombinedOutput() if err != nil { - return "", fmt.Errorf("scsi_id failed for device %q with output %s: %v", devicePath, string(out), err) + return "", fmt.Errorf("scsi_id failed for device %q with output %s: %w", devicePath, string(out), err) } return parseScsiSerial(string(out)) @@ -155,7 +155,7 @@ func getNvmeSerial(devicePath string) (string, error) { nvmeIdPath, fmt.Sprintf("-d%s", devicePath)).CombinedOutput() if err != nil { - return "", fmt.Errorf("google_nvme_id failed for device %q with output %v: %v", devicePath, out, err) + return "", fmt.Errorf("google_nvme_id failed for device %q with output %v: %w", devicePath, out, err) } return parseNvmeSerial(string(out)) @@ -174,7 +174,7 @@ func parseNvmeSerial(output string) (string, error) { func ensureUdevToolExists(toolPath string) error { exists, err := pathutils.Exists(pathutils.CheckFollowSymlink, toolPath) if err != nil { - return fmt.Errorf("failed to check existence of %q: %v", toolPath, err) + return fmt.Errorf("failed to check existence of %q: %w", toolPath, err) } if !exists { // The driver should be containerized with the tool so maybe something is @@ -217,7 +217,7 @@ func (m *deviceUtils) VerifyDevicePath(devicePaths []string, deviceName string) devicePath, innerErr = existingDevicePath(devicePaths) if innerErr != nil { - return false, fmt.Errorf("failed to check for existing device path: %v", innerErr) + return false, fmt.Errorf("failed to check for existing device path: %w", innerErr) } if len(devicePath) == 0 { @@ -226,7 +226,7 @@ func (m *deviceUtils) VerifyDevicePath(devicePaths []string, deviceName string) // to repair the symlink. innerErr := udevadmTriggerForDiskIfExists(deviceName) if innerErr != nil { - return false, fmt.Errorf("failed to trigger udevadm fix of non existent disk for %q: %v", deviceName, innerErr) + return false, fmt.Errorf("failed to trigger udevadm fix of non existent disk for %q: %w", deviceName, innerErr) } // Go to next retry loop to get the deviceName again after // potentially fixing it with the udev command @@ -237,12 +237,12 @@ func (m *deviceUtils) VerifyDevicePath(devicePaths []string, deviceName string) // expected disk at devicePath by matching device Serial to the disk name devFsPath, innerErr := filepath.EvalSymlinks(devicePath) if innerErr != nil { - return false, fmt.Errorf("filepath.EvalSymlinks(%q) failed with %v", devicePath, innerErr) + return false, fmt.Errorf("filepath.EvalSymlinks(%q) failed with %w", devicePath, innerErr) } devFsSerial, innerErr := getDevFsSerial(devFsPath) if innerErr != nil { - return false, fmt.Errorf("couldn't get serial number for disk %s at path %s: %v", deviceName, devFsPath, innerErr) + return false, fmt.Errorf("couldn't get serial number for disk %s at path %s: %w", deviceName, devFsPath, innerErr) } // SUCCESS! devicePath points to a /dev/* path that has a serial // equivalent to our disk name @@ -255,7 +255,7 @@ func (m *deviceUtils) VerifyDevicePath(devicePaths []string, deviceName string) // Attempt a repair innerErr = udevadmTriggerForDiskIfExists(deviceName) if innerErr != nil { - return false, fmt.Errorf("failed to trigger udevadm fix of misconfigured disk for %q: %v", deviceName, innerErr) + return false, fmt.Errorf("failed to trigger udevadm fix of misconfigured disk for %q: %w", deviceName, innerErr) } // Go to next retry loop to get the deviceName again after // potentially fixing it with the udev command @@ -263,7 +263,7 @@ func (m *deviceUtils) VerifyDevicePath(devicePaths []string, deviceName string) }) if err != nil { - return "", fmt.Errorf("failed to find and re-link disk %s with udevadm after retrying for %v: %v", deviceName, pollTimeout, err) + return "", fmt.Errorf("failed to find and re-link disk %s with udevadm after retrying for %v: %w", deviceName, pollTimeout, err) } return devicePath, nil @@ -285,11 +285,11 @@ func getDevFsSerial(devFsPath string) (string, error) { func findAvailableDevFsPaths() ([]string, error) { diskSDPaths, err := filepath.Glob(diskSDPattern) if err != nil { - return nil, fmt.Errorf("failed to filepath.Glob(\"%s\"): %v", diskSDPattern, err) + return nil, fmt.Errorf("failed to filepath.Glob(\"%s\"): %w", diskSDPattern, err) } diskNvmePaths, err := filepath.Glob(diskNvmePattern) if err != nil { - return nil, fmt.Errorf("failed to filepath.Glob(\"%s\"): %v", diskNvmePattern, err) + return nil, fmt.Errorf("failed to filepath.Glob(\"%s\"): %w", diskNvmePattern, err) } return append(diskSDPaths, diskNvmePaths...), nil } @@ -305,7 +305,7 @@ func udevadmTriggerForDiskIfExists(deviceName string) error { if err != nil || len(devFsSerial) == 0 { // If we get an error, ignore. Either this isn't a block device, or it // isn't something we can get a serial number from - klog.V(7).Infof("failed to get Serial num for disk %s at path %s: %v", deviceName, devFsPath, err) + klog.V(7).Infof("failed to get Serial num for disk %s at path %s: %v", deviceName, devFsPath, err.Error()) continue } devFsPathToSerial[devFsPath] = devFsSerial @@ -315,7 +315,7 @@ func udevadmTriggerForDiskIfExists(deviceName string) error { klog.Warningf("udevadm --trigger running to fix disk at path %s which has serial numberID %s", devFsPath, devFsSerial) err := udevadmChangeToDrive(devFsPath) if err != nil { - return fmt.Errorf("failed to fix disk which has serial numberID %s: %v", devFsSerial, err) + return fmt.Errorf("failed to fix disk which has serial numberID %s: %w", devFsSerial, err) } return nil } @@ -340,7 +340,7 @@ func udevadmChangeToDrive(devFsPath string) error { "--action=change", fmt.Sprintf("--property-match=DEVNAME=%s", devFsPath)).CombinedOutput() if err != nil { - return fmt.Errorf("udevadmChangeToDrive: udevadm trigger failed for drive %q with output %s: %v.", devFsPath, string(out), err) + return fmt.Errorf("udevadmChangeToDrive: udevadm trigger failed for drive %q with output %s: %w.", devFsPath, string(out), err) } return nil } diff --git a/pkg/mount-manager/safe-mounter_windows.go b/pkg/mount-manager/safe-mounter_windows.go index 4a5114d5a..23e6dd36a 100644 --- a/pkg/mount-manager/safe-mounter_windows.go +++ b/pkg/mount-manager/safe-mounter_windows.go @@ -67,7 +67,7 @@ func NewSafeMounter() (*mount.SafeFormatAndMount, error) { Exec: utilexec.New(), }, nil } - klog.V(4).Infof("failed to connect to csi-proxy v1 with error=%v, will try with v1Beta", err) + klog.V(4).Infof("failed to connect to csi-proxy v1 with error=%v, will try with v1Beta", err.Error()) csiProxyMounterV1Beta, err := NewCSIProxyMounterV1Beta() if err == nil { @@ -77,6 +77,6 @@ func NewSafeMounter() (*mount.SafeFormatAndMount, error) { Exec: utilexec.New(), }, nil } - klog.V(4).Infof("failed to connect to csi-proxy v1beta with error=%v", err) + klog.V(4).Infof("failed to connect to csi-proxy v1beta with error=%v", err.Error()) return nil, err } diff --git a/pkg/mount-manager/statter_linux.go b/pkg/mount-manager/statter_linux.go index a4e0b1947..d75c39b32 100644 --- a/pkg/mount-manager/statter_linux.go +++ b/pkg/mount-manager/statter_linux.go @@ -45,7 +45,7 @@ func (*realStatter) StatFS(path string) (available, capacity, used, inodesFree, statfs := &unix.Statfs_t{} err = unix.Statfs(path, statfs) if err != nil { - err = fmt.Errorf("failed to get fs info on path %s: %v", path, err) + err = fmt.Errorf("failed to get fs info on path %s: %w", path, err) return } diff --git a/test/k8s-integration/cluster.go b/test/k8s-integration/cluster.go index 309b94d08..e85cbe844 100644 --- a/test/k8s-integration/cluster.go +++ b/test/k8s-integration/cluster.go @@ -372,7 +372,7 @@ func getKubeClusterVersion() (string, error) { func mustGetKubeClusterVersion() string { ver, err := getKubeClusterVersion() if err != nil { - klog.Fatalf("Error: %w", err) + klog.Fatalf("Error: %v", err.Error()) } return ver } diff --git a/test/remote/ssh.go b/test/remote/ssh.go index 9f3445c11..7036dcb2b 100644 --- a/test/remote/ssh.go +++ b/test/remote/ssh.go @@ -34,7 +34,7 @@ var ( func init() { usr, err := user.Current() if err != nil { - klog.Fatal(err) + klog.Fatal(err.Error()) } sshDefaultKey = fmt.Sprintf("%s/.ssh/google_compute_engine", usr.HomeDir) diff --git a/test/sanity/sanity_test.go b/test/sanity/sanity_test.go index d3ac9b36d..9eac7c836 100644 --- a/test/sanity/sanity_test.go +++ b/test/sanity/sanity_test.go @@ -56,7 +56,7 @@ func TestSanity(t *testing.T) { cloudProvider, err := gce.CreateFakeCloudProvider(project, zone, nil) if err != nil { - t.Fatalf("Failed to get cloud provider: %v", err) + t.Fatalf("Failed to get cloud provider: %v", err.Error()) } mounter := mountmanager.NewFakeSafeMounter() @@ -68,7 +68,7 @@ func TestSanity(t *testing.T) { nodeServer := driver.NewNodeServer(gceDriver, mounter, deviceUtils, metadataservice.NewFakeService(), mountmanager.NewFakeStatter(mounter)) err = gceDriver.SetupGCEDriver(driverName, vendorVersion, extraLabels, identityServer, controllerServer, nodeServer) if err != nil { - t.Fatalf("Failed to initialize GCE CSI Driver: %v", err) + t.Fatalf("Failed to initialize GCE CSI Driver: %v", err.Error()) } instance := &compute.Instance{ @@ -79,13 +79,13 @@ func TestSanity(t *testing.T) { err = os.MkdirAll(tmpDir, 0755) if err != nil { - t.Fatalf("Failed to create sanity temp working dir %s: %v", tmpDir, err) + t.Fatalf("Failed to create sanity temp working dir %s: %v", tmpDir, err.Error()) } defer func() { // Clean up tmp dir if err = os.RemoveAll(tmpDir); err != nil { - t.Fatalf("Failed to clean up sanity temp working dir %s: %v", tmpDir, err) + t.Fatalf("Failed to clean up sanity temp working dir %s: %v", tmpDir, err.Error()) } }() From 7f52192594db446ab80c87120bd0f61e048d4bdf Mon Sep 17 00:00:00 2001 From: Sunny Song Date: Tue, 20 Dec 2022 14:40:33 -0800 Subject: [PATCH 2/2] Change status.Error to status.Errorf in controller.go to avoid the Sprintf call --- pkg/gce-pd-csi-driver/controller.go | 127 ++++++++++++++-------------- pkg/gce-pd-csi-driver/node.go | 14 --- 2 files changed, 64 insertions(+), 77 deletions(-) diff --git a/pkg/gce-pd-csi-driver/controller.go b/pkg/gce-pd-csi-driver/controller.go index c4521cfe2..a867b6b27 100644 --- a/pkg/gce-pd-csi-driver/controller.go +++ b/pkg/gce-pd-csi-driver/controller.go @@ -18,12 +18,13 @@ import ( "context" "fmt" "math/rand" - "regexp" "sort" "strings" "time" "github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud/meta" + "google.golang.org/protobuf/types/known/timestamppb" + csi "github.com/container-storage-interface/spec/lib/go/csi" "github.com/golang/protobuf/ptypes" "github.com/golang/protobuf/ptypes/timestamp" @@ -164,12 +165,12 @@ func (gceCS *GCEControllerServer) CreateVolume(ctx context.Context, req *csi.Cre capBytes, err := getRequestCapacity(capacityRange) if err != nil { - return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("CreateVolume Request Capacity is invalid: %v", err.Error())) + return nil, status.Errorf(codes.InvalidArgument, "CreateVolume Request Capacity is invalid: %v", err.Error()) } err = validateVolumeCapabilities(volumeCapabilities) if err != nil { - return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("VolumeCapabilities is invalid: %v", err.Error())) + return nil, status.Errorf(codes.InvalidArgument, "VolumeCapabilities is invalid: %v", err.Error()) } // Apply Parameters (case-insensitive). We leave validation of @@ -191,25 +192,25 @@ func (gceCS *GCEControllerServer) CreateVolume(ctx context.Context, req *csi.Cre case replicationTypeNone: zones, err = pickZones(ctx, gceCS, req.GetAccessibilityRequirements(), 1) if err != nil { - return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("CreateVolume failed to pick zones for disk: %v", err.Error())) + return nil, status.Errorf(codes.InvalidArgument, "CreateVolume failed to pick zones for disk: %v", err.Error()) } if len(zones) != 1 { - return nil, status.Errorf(codes.Internal, fmt.Sprintf("Failed to pick exactly 1 zone for zonal disk, got %v instead", len(zones))) + return nil, status.Errorf(codes.Internal, "Failed to pick exactly 1 zone for zonal disk, got %v instead", len(zones)) } volKey = meta.ZonalKey(name, zones[0]) case replicationTypeRegionalPD: zones, err = pickZones(ctx, gceCS, req.GetAccessibilityRequirements(), 2) if err != nil { - return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("CreateVolume failed to pick zones for disk: %v", err.Error())) + return nil, status.Errorf(codes.InvalidArgument, "CreateVolume failed to pick zones for disk: %v", err.Error()) } region, err := common.GetRegionFromZones(zones) if err != nil { - return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("CreateVolume failed to get region from zones: %v", err.Error())) + return nil, status.Errorf(codes.InvalidArgument, "CreateVolume failed to get region from zones: %v", err.Error()) } volKey = meta.RegionalKey(name, region) default: - return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("CreateVolume replication type '%s' is not supported", params.ReplicationType)) + return nil, status.Errorf(codes.InvalidArgument, "CreateVolume replication type '%s' is not supported", params.ReplicationType) } volumeID, err := common.KeyToVolumeID(volKey, gceCS.CloudProvider.GetDefaultProject()) @@ -235,7 +236,7 @@ func (gceCS *GCEControllerServer) CreateVolume(ctx context.Context, req *csi.Cre int64(capacityRange.GetLimitBytes()), multiWriter) if err != nil { - return nil, status.Error(codes.AlreadyExists, fmt.Sprintf("CreateVolume disk already exists with same name and is incompatible: %v", err.Error())) + return nil, status.Errorf(codes.AlreadyExists, "CreateVolume disk already exists with same name and is incompatible: %v", err.Error()) } ready, err := isDiskReady(existingDisk) @@ -243,7 +244,7 @@ func (gceCS *GCEControllerServer) CreateVolume(ctx context.Context, req *csi.Cre return nil, LoggedError("CreateVolume disk "+volKey.String()+" had error checking ready status: ", err) } if !ready { - return nil, status.Error(codes.Internal, fmt.Sprintf("CreateVolume existing disk %v is not ready", volKey)) + return nil, status.Errorf(codes.Internal, "CreateVolume existing disk %v is not ready", volKey) } // If there is no validation error, immediately return success @@ -272,7 +273,7 @@ func (gceCS *GCEControllerServer) CreateVolume(ctx context.Context, req *csi.Cre // Verify that the source VolumeID is in the correct format. project, sourceVolKey, err := common.VolumeIDToKey(volumeContentSourceVolumeID) if err != nil { - return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("CreateVolume source volume id is invalid: %v", err.Error())) + return nil, status.Errorf(codes.InvalidArgument, "CreateVolume source volume id is invalid: %v", err.Error()) } // Verify that the volume in VolumeContentSource exists. @@ -322,7 +323,7 @@ func (gceCS *GCEControllerServer) CreateVolume(ctx context.Context, req *csi.Cre return nil, LoggedError("CreateVolume disk from source volume "+sourceVolKey.String()+" had error checking ready status: ", err) } if !ready { - return nil, status.Error(codes.Internal, fmt.Sprintf("CreateVolume disk from source volume %v is not ready", sourceVolKey)) + return nil, status.Errorf(codes.Internal, "CreateVolume disk from source volume %v is not ready", sourceVolKey) } } } else { // if VolumeContentSource is nil, validate access mode is not read only @@ -336,7 +337,7 @@ func (gceCS *GCEControllerServer) CreateVolume(ctx context.Context, req *csi.Cre switch params.ReplicationType { case replicationTypeNone: if len(zones) != 1 { - return nil, status.Error(codes.Internal, fmt.Sprintf("CreateVolume failed to get a single zone for creating zonal disk, instead got: %v", zones)) + return nil, status.Errorf(codes.Internal, "CreateVolume failed to get a single zone for creating zonal disk, instead got: %v", zones) } disk, err = createSingleZoneDisk(ctx, gceCS.CloudProvider, name, zones, params, capacityRange, capBytes, snapshotID, volumeContentSourceVolumeID, multiWriter) if err != nil { @@ -344,22 +345,22 @@ func (gceCS *GCEControllerServer) CreateVolume(ctx context.Context, req *csi.Cre } case replicationTypeRegionalPD: if len(zones) != 2 { - return nil, status.Errorf(codes.Internal, fmt.Sprintf("CreateVolume failed to get a 2 zones for creating regional disk, instead got: %v", zones)) + return nil, status.Errorf(codes.Internal, "CreateVolume failed to get a 2 zones for creating regional disk, instead got: %v", zones) } disk, err = createRegionalDisk(ctx, gceCS.CloudProvider, name, zones, params, capacityRange, capBytes, snapshotID, volumeContentSourceVolumeID, multiWriter) if err != nil { return nil, LoggedError("CreateVolume failed to create regional disk "+name+": ", err) } default: - return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("CreateVolume replication type '%s' is not supported", params.ReplicationType)) + return nil, status.Errorf(codes.InvalidArgument, "CreateVolume replication type '%s' is not supported", params.ReplicationType) } ready, err := isDiskReady(disk) if err != nil { - return nil, status.Error(codes.Internal, fmt.Sprintf("CreateVolume disk %v had error checking ready status: %v", volKey, err.Error())) + return nil, status.Errorf(codes.Internal, "CreateVolume disk %v had error checking ready status: %v", volKey, err.Error()) } if !ready { - return nil, status.Error(codes.Internal, fmt.Sprintf("CreateVolume disk %v is not ready", volKey)) + return nil, status.Errorf(codes.Internal, "CreateVolume disk %v is not ready", volKey) } klog.V(4).Infof("CreateVolume succeeded for disk %v", volKey) @@ -445,12 +446,12 @@ func (gceCS *GCEControllerServer) validateControllerPublishVolumeRequest(ctx con project, volKey, err := common.VolumeIDToKey(volumeID) if err != nil { - return "", nil, status.Error(codes.InvalidArgument, fmt.Sprintf("ControllerPublishVolume volume ID is invalid: %v", err.Error())) + return "", nil, status.Errorf(codes.InvalidArgument, "ControllerPublishVolume volume ID is invalid: %v", err.Error()) } // TODO(#253): Check volume capability matches for ALREADY_EXISTS if err = validateVolumeCapability(volumeCapability); err != nil { - return "", nil, status.Error(codes.InvalidArgument, fmt.Sprintf("VolumeCapabilities is invalid: %v", err.Error())) + return "", nil, status.Errorf(codes.InvalidArgument, "VolumeCapabilities is invalid: %v", err.Error()) } return project, volKey, nil @@ -491,20 +492,20 @@ func (gceCS *GCEControllerServer) executeControllerPublishVolume(ctx context.Con _, err = gceCS.CloudProvider.GetDisk(ctx, project, volKey, gce.GCEAPIVersionV1) if err != nil { if gce.IsGCENotFoundError(err) { - return nil, status.Error(codes.NotFound, fmt.Sprintf("Could not find disk %v: %v", volKey.String(), err.Error())) + return nil, status.Errorf(codes.NotFound, "Could not find disk %v: %v", volKey.String(), err.Error()) } - return nil, status.Error(codes.Internal, fmt.Sprintf("Unknown get disk error: %v", err.Error())) + return nil, status.Errorf(codes.Internal, "Unknown get disk error: %v", err.Error()) } instanceZone, instanceName, err := common.NodeIDToZoneAndName(nodeID) if err != nil { - return nil, status.Error(codes.NotFound, fmt.Sprintf("could not split nodeID: %v", err.Error())) + return nil, status.Errorf(codes.NotFound, "could not split nodeID: %v", err.Error()) } instance, err := gceCS.CloudProvider.GetInstanceOrError(ctx, instanceZone, instanceName) if err != nil { if gce.IsGCENotFoundError(err) { - return nil, status.Error(codes.NotFound, fmt.Sprintf("Could not find instance %v: %v", nodeID, err.Error())) + return nil, status.Errorf(codes.NotFound, "Could not find instance %v: %v", nodeID, err.Error()) } - return nil, status.Error(codes.Internal, fmt.Sprintf("Unknown get instance error: %v", err.Error())) + return nil, status.Errorf(codes.Internal, "Unknown get instance error: %v", err.Error()) } readWrite := "READ_WRITE" @@ -514,12 +515,12 @@ func (gceCS *GCEControllerServer) executeControllerPublishVolume(ctx context.Con deviceName, err := common.GetDeviceName(volKey) if err != nil { - return nil, status.Error(codes.Internal, fmt.Sprintf("error getting device name: %v", err.Error())) + return nil, status.Errorf(codes.Internal, "error getting device name: %v", err.Error()) } attached, err := diskIsAttachedAndCompatible(deviceName, instance, volumeCapability, readWrite) if err != nil { - return nil, status.Error(codes.AlreadyExists, fmt.Sprintf("Disk %v already published to node %v but incompatible: %v", volKey.Name, nodeID, err.Error())) + return nil, status.Errorf(codes.AlreadyExists, "Disk %v already published to node %v but incompatible: %v", volKey.Name, nodeID, err.Error()) } if attached { // Volume is attached to node. Success! @@ -528,16 +529,16 @@ func (gceCS *GCEControllerServer) executeControllerPublishVolume(ctx context.Con } instanceZone, instanceName, err = common.NodeIDToZoneAndName(nodeID) if err != nil { - return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("could not split nodeID: %v", err.Error())) + return nil, status.Errorf(codes.InvalidArgument, "could not split nodeID: %v", err.Error()) } err = gceCS.CloudProvider.AttachDisk(ctx, project, volKey, readWrite, attachableDiskTypePersistent, instanceZone, instanceName) if err != nil { - return nil, status.Error(codes.Internal, fmt.Sprintf("unknown Attach error: %v", err.Error())) + return nil, status.Errorf(codes.Internal, "unknown Attach error: %v", err.Error()) } err = gceCS.CloudProvider.WaitForAttach(ctx, project, volKey, instanceZone, instanceName) if err != nil { - return nil, status.Error(codes.Internal, fmt.Sprintf("unknown WaitForAttach error: %v", err.Error())) + return nil, status.Errorf(codes.Internal, "unknown WaitForAttach error: %v", err.Error()) } klog.V(4).Infof("ControllerPublishVolume succeeded for disk %v to instance %v", volKey, nodeID) return pubVolResp, nil @@ -579,7 +580,7 @@ func (gceCS *GCEControllerServer) validateControllerUnpublishVolumeRequest(ctx c project, volKey, err := common.VolumeIDToKey(volumeID) if err != nil { - return "", nil, status.Error(codes.InvalidArgument, fmt.Sprintf("ControllerUnpublishVolume Volume ID is invalid: %v", err.Error())) + return "", nil, status.Errorf(codes.InvalidArgument, "ControllerUnpublishVolume Volume ID is invalid: %v", err.Error()) } return project, volKey, nil @@ -612,7 +613,7 @@ func (gceCS *GCEControllerServer) executeControllerUnpublishVolume(ctx context.C instanceZone, instanceName, err := common.NodeIDToZoneAndName(nodeID) if err != nil { - return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("could not split nodeID: %v", err.Error())) + return nil, status.Errorf(codes.InvalidArgument, "could not split nodeID: %v", err.Error()) } instance, err := gceCS.CloudProvider.GetInstanceOrError(ctx, instanceZone, instanceName) if err != nil { @@ -621,12 +622,12 @@ func (gceCS *GCEControllerServer) executeControllerUnpublishVolume(ctx context.C klog.Warningf("Treating volume %v as unpublished because node %v could not be found", volKey.String(), instanceName) return &csi.ControllerUnpublishVolumeResponse{}, nil } - return nil, status.Error(codes.Internal, fmt.Sprintf("error getting instance: %v", err.Error())) + return nil, status.Errorf(codes.Internal, "error getting instance: %v", err.Error()) } deviceName, err := common.GetDeviceName(volKey) if err != nil { - return nil, status.Error(codes.Internal, fmt.Sprintf("error getting device name: %v", err.Error())) + return nil, status.Errorf(codes.Internal, "error getting device name: %v", err.Error()) } attached := diskIsAttached(deviceName, instance) @@ -656,7 +657,7 @@ func (gceCS *GCEControllerServer) ValidateVolumeCapabilities(ctx context.Context } project, volKey, err := common.VolumeIDToKey(volumeID) if err != nil { - return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("Volume ID is invalid: %v", err.Error())) + return nil, status.Errorf(codes.InvalidArgument, "Volume ID is invalid: %v", err.Error()) } project, volKey, err = gceCS.CloudProvider.RepairUnderspecifiedVolumeKey(ctx, project, volKey) @@ -675,7 +676,7 @@ func (gceCS *GCEControllerServer) ValidateVolumeCapabilities(ctx context.Context disk, err := gceCS.CloudProvider.GetDisk(ctx, project, volKey, gce.GCEAPIVersionV1) if err != nil { if gce.IsGCENotFoundError(err) { - return nil, status.Error(codes.NotFound, fmt.Sprintf("Could not find disk %v: %v", volKey.Name, err.Error())) + return nil, status.Errorf(codes.NotFound, "Could not find disk %v: %v", volKey.Name, err.Error()) } return nil, LoggedError("Unknown get disk error: ", err) } @@ -728,8 +729,8 @@ func generateFailedValidationMessage(format string, a ...interface{}) *csi.Valid func (gceCS *GCEControllerServer) ListVolumes(ctx context.Context, req *csi.ListVolumesRequest) (*csi.ListVolumesResponse, error) { // https://cloud.google.com/compute/docs/reference/beta/disks/list if req.MaxEntries < 0 { - return nil, status.Error(codes.InvalidArgument, fmt.Sprintf( - "ListVolumes got max entries request %v. GCE only supports values >0", req.MaxEntries)) + return nil, status.Errorf(codes.InvalidArgument, + "ListVolumes got max entries request %v. GCE only supports values >0", req.MaxEntries) } offset := 0 @@ -738,7 +739,7 @@ func (gceCS *GCEControllerServer) ListVolumes(ctx context.Context, req *csi.List diskList, _, err := gceCS.CloudProvider.ListDisks(ctx) if err != nil { if gce.IsGCEInvalidError(err) { - return nil, status.Error(codes.Aborted, fmt.Sprintf("ListVolumes error with invalid request: %v", err.Error())) + return nil, status.Errorf(codes.Aborted, "ListVolumes error with invalid request: %v", err.Error()) } return nil, LoggedError("Unknown list disk error: ", err) } @@ -747,7 +748,7 @@ func (gceCS *GCEControllerServer) ListVolumes(ctx context.Context, req *csi.List } else { offset, ok = gceCS.seen[req.StartingToken] if !ok { - return nil, status.Error(codes.Aborted, fmt.Sprintf("ListVolumes error with invalid startingToken: %s", req.StartingToken)) + return nil, status.Errorf(codes.Aborted, "ListVolumes error with invalid startingToken: %s", req.StartingToken) } } @@ -809,7 +810,7 @@ func (gceCS *GCEControllerServer) CreateSnapshot(ctx context.Context, req *csi.C } project, volKey, err := common.VolumeIDToKey(volumeID) if err != nil { - return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("CreateSnapshot Volume ID is invalid: %v", err.Error())) + return nil, status.Errorf(codes.InvalidArgument, "CreateSnapshot Volume ID is invalid: %v", err.Error()) } if acquired := gceCS.volumeLocks.TryAcquire(volumeID); !acquired { @@ -821,14 +822,14 @@ func (gceCS *GCEControllerServer) CreateSnapshot(ctx context.Context, req *csi.C _, err = gceCS.CloudProvider.GetDisk(ctx, project, volKey, gce.GCEAPIVersionV1) if err != nil { if gce.IsGCENotFoundError(err) { - return nil, status.Error(codes.NotFound, fmt.Sprintf("CreateSnapshot could not find disk %v: %v", volKey.String(), err.Error())) + return nil, status.Errorf(codes.NotFound, "CreateSnapshot could not find disk %v: %v", volKey.String(), err.Error()) } return nil, LoggedError("CreateSnapshot unknown get disk error: ", err) } snapshotParams, err := common.ExtractAndDefaultSnapshotParameters(req.GetParameters(), gceCS.Driver.name) if err != nil { - return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("Invalid snapshot parameters: %v", err.Error())) + return nil, status.Errorf(codes.InvalidArgument, "Invalid snapshot parameters: %v", err.Error()) } var snapshot *csi.Snapshot @@ -844,7 +845,7 @@ func (gceCS *GCEControllerServer) CreateSnapshot(ctx context.Context, req *csi.C return nil, err } default: - return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("Invalid snapshot type: %s", snapshotParams.SnapshotType)) + return nil, status.Errorf(codes.InvalidArgument, "Invalid snapshot type: %s", snapshotParams.SnapshotType) } klog.V(4).Infof("CreateSnapshot succeeded for snapshot %s on volume %s", snapshot.SnapshotId, volumeID) @@ -854,7 +855,7 @@ func (gceCS *GCEControllerServer) CreateSnapshot(ctx context.Context, req *csi.C func (gceCS *GCEControllerServer) createPDSnapshot(ctx context.Context, project string, volKey *meta.Key, snapshotName string, snapshotParams common.SnapshotParameters) (*csi.Snapshot, error) { volumeID, err := common.KeyToVolumeID(volKey, project) if err != nil { - return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("Invalid volume key: %v", volKey)) + return nil, status.Errorf(codes.InvalidArgument, "Invalid volume key: %v", volKey) } // Check if PD snapshot already exists @@ -862,13 +863,13 @@ func (gceCS *GCEControllerServer) createPDSnapshot(ctx context.Context, project snapshot, err = gceCS.CloudProvider.GetSnapshot(ctx, project, snapshotName) if err != nil { if !gce.IsGCEError(err, "notFound") { - return nil, status.Error(codes.Internal, fmt.Sprintf("Unknown get snapshot error: %v", err.Error())) + return nil, status.Errorf(codes.Internal, "Unknown get snapshot error: %v", err.Error()) } // If we could not find the snapshot, we create a new one snapshot, err = gceCS.CloudProvider.CreateSnapshot(ctx, project, volKey, snapshotName, snapshotParams) if err != nil { if gce.IsGCEError(err, "notFound") { - return nil, status.Error(codes.NotFound, fmt.Sprintf("Could not find volume with ID %v: %v", volKey.String(), err.Error())) + return nil, status.Errorf(codes.NotFound, "Could not find volume with ID %v: %v", volKey.String(), err.Error()) } return nil, LoggedError("Unknown create snapshot error: ", err) } @@ -876,17 +877,17 @@ func (gceCS *GCEControllerServer) createPDSnapshot(ctx context.Context, project err = gceCS.validateExistingSnapshot(snapshot, volKey) if err != nil { - return nil, status.Error(codes.AlreadyExists, fmt.Sprintf("Error in creating snapshot: %v", err.Error())) + return nil, status.Errorf(codes.AlreadyExists, "Error in creating snapshot: %v", err.Error()) } timestamp, err := parseTimestamp(snapshot.CreationTimestamp) if err != nil { - return nil, status.Error(codes.Internal, fmt.Sprintf("Failed to covert creation timestamp: %v", err.Error())) + return nil, status.Errorf(codes.Internal, "Failed to covert creation timestamp: %v", err.Error()) } ready, err := isCSISnapshotReady(snapshot.Status) if err != nil { - return nil, status.Error(codes.Internal, fmt.Sprintf("Snapshot had error checking ready status: %v", err.Error())) + return nil, status.Errorf(codes.Internal, "Snapshot had error checking ready status: %v", err.Error()) } return &csi.Snapshot{ @@ -901,7 +902,7 @@ func (gceCS *GCEControllerServer) createPDSnapshot(ctx context.Context, project func (gceCS *GCEControllerServer) createImage(ctx context.Context, project string, volKey *meta.Key, imageName string, snapshotParams common.SnapshotParameters) (*csi.Snapshot, error) { volumeID, err := common.KeyToVolumeID(volKey, project) if err != nil { - return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("Invalid volume key: %v", volKey)) + return nil, status.Errorf(codes.InvalidArgument, "Invalid volume key: %v", volKey) } // Check if image already exists @@ -915,7 +916,7 @@ func (gceCS *GCEControllerServer) createImage(ctx context.Context, project strin image, err = gceCS.CloudProvider.CreateImage(ctx, project, volKey, imageName, snapshotParams) if err != nil { if gce.IsGCEError(err, "notFound") { - return nil, status.Error(codes.NotFound, fmt.Sprintf("Could not find volume with ID %v: %v", volKey.String(), err.Error())) + return nil, status.Errorf(codes.NotFound, "Could not find volume with ID %v: %v", volKey.String(), err.Error()) } return nil, LoggedError("Unknown create image error: ", err) } @@ -923,17 +924,17 @@ func (gceCS *GCEControllerServer) createImage(ctx context.Context, project strin err = gceCS.validateExistingImage(image, volKey) if err != nil { - return nil, status.Errorf(codes.AlreadyExists, fmt.Sprintf("Error in creating snapshot: %v", err.Error())) + return nil, status.Errorf(codes.AlreadyExists, "Error in creating snapshot: %v", err.Error()) } timestamp, err := parseTimestamp(image.CreationTimestamp) if err != nil { - return nil, status.Error(codes.Internal, fmt.Sprintf("Failed to covert creation timestamp: %v", err.Error())) + return nil, status.Errorf(codes.Internal, "Failed to covert creation timestamp: %v", err.Error()) } ready, err := isImageReady(image.Status) if err != nil { - return nil, status.Errorf(codes.Internal, fmt.Sprintf("Failed to check image status: %v", err.Error())) + return nil, status.Errorf(codes.Internal, "Failed to check image status: %v", err.Error()) } return &csi.Snapshot{ @@ -1055,7 +1056,7 @@ func (gceCS *GCEControllerServer) DeleteSnapshot(ctx context.Context, req *csi.D return nil, LoggedError("unknown Delete image error: ", err) } default: - return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("unknown snapshot type %s", snapshotType)) + return nil, status.Errorf(codes.InvalidArgument, "unknown snapshot type %s", snapshotType) } return &csi.DeleteSnapshotResponse{}, nil @@ -1070,8 +1071,8 @@ func (gceCS *GCEControllerServer) ListSnapshots(ctx context.Context, req *csi.Li // case 2: no SnapshotId is set, so we return all the snapshots that satify the reqeust. var maxEntries int = int(req.MaxEntries) if maxEntries < 0 { - return nil, status.Error(codes.InvalidArgument, fmt.Sprintf( - "ListSnapshots got max entries request %v. GCE only supports values >0", maxEntries)) + return nil, status.Errorf(codes.InvalidArgument, + "ListSnapshots got max entries request %v. GCE only supports values >0", maxEntries) } var offset int @@ -1082,7 +1083,7 @@ func (gceCS *GCEControllerServer) ListSnapshots(ctx context.Context, req *csi.Li snapshotList, err := gceCS.getSnapshots(ctx, req) if err != nil { if gce.IsGCEInvalidError(err) { - return nil, status.Error(codes.Aborted, fmt.Sprintf("ListSnapshots error with invalid request: %v", err.Error())) + return nil, status.Errorf(codes.Aborted, "ListSnapshots error with invalid request: %v", err.Error()) } return nil, LoggedError("Unknown list snapshots error: ", err) } @@ -1091,7 +1092,7 @@ func (gceCS *GCEControllerServer) ListSnapshots(ctx context.Context, req *csi.Li } else { offset, ok = gceCS.snapshotTokens[req.StartingToken] if !ok { - return nil, status.Error(codes.Aborted, fmt.Sprintf("ListSnapshots error with invalid startingToken: %s", req.StartingToken)) + return nil, status.Errorf(codes.Aborted, "ListSnapshots error with invalid startingToken: %s", req.StartingToken) } } @@ -1120,12 +1121,12 @@ func (gceCS *GCEControllerServer) ControllerExpandVolume(ctx context.Context, re capacityRange := req.GetCapacityRange() reqBytes, err := getRequestCapacity(capacityRange) if err != nil { - return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("ControllerExpandVolume capacity range is invalid: %v", err.Error())) + return nil, status.Errorf(codes.InvalidArgument, "ControllerExpandVolume capacity range is invalid: %v", err.Error()) } project, volKey, err := common.VolumeIDToKey(volumeID) if err != nil { - return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("ControllerExpandVolume Volume ID is invalid: %v", err.Error())) + return nil, status.Errorf(codes.InvalidArgument, "ControllerExpandVolume Volume ID is invalid: %v", err.Error()) } project, volKey, err = gceCS.CloudProvider.RepairUnderspecifiedVolumeKey(ctx, project, volKey) @@ -1159,7 +1160,7 @@ func (gceCS *GCEControllerServer) getSnapshots(ctx context.Context, req *csi.Lis snapshots, _, err = gceCS.CloudProvider.ListSnapshots(ctx, filter) if err != nil { if gce.IsGCEError(err, "invalid") { - return nil, status.Error(codes.Aborted, fmt.Sprintf("Invalid error: %v", err.Error())) + return nil, status.Errorf(codes.Aborted, "Invalid error: %v", err.Error()) } return nil, LoggedError("Unknown list snapshot error: ", err) } @@ -1167,7 +1168,7 @@ func (gceCS *GCEControllerServer) getSnapshots(ctx context.Context, req *csi.Lis images, _, err = gceCS.CloudProvider.ListImages(ctx, filter) if err != nil { if gce.IsGCEError(err, "invalid") { - return nil, status.Error(codes.Aborted, fmt.Sprintf("Invalid error: %v", err.Error())) + return nil, status.Errorf(codes.Aborted, "Invalid error: %v", err.Error()) } return nil, LoggedError("Unknown list image error: ", err) } diff --git a/pkg/gce-pd-csi-driver/node.go b/pkg/gce-pd-csi-driver/node.go index 340caa5a2..4f6748eb5 100644 --- a/pkg/gce-pd-csi-driver/node.go +++ b/pkg/gce-pd-csi-driver/node.go @@ -363,16 +363,6 @@ func (ns *GCENodeServer) NodeUnstageVolume(ctx context.Context, req *csi.NodeUns return nil, status.Error(codes.Internal, fmt.Sprintf("NodeUnstageVolume failed: %v\nUnmounting arguments: %s\n", err.Error(), stagingTargetPath)) } -<<<<<<< HEAD -======= - devicePath, err := getDevicePath(ns, volumeID, "" /* partition, which is unused */) - if err != nil { - klog.Errorf("Failed to find device path for volume %s. Device may not be detached cleanly (error is ignored and unstaging is continuing): %v", volumeID, err.Error()) - } else if err := ns.DeviceUtils.DisableDevice(devicePath); err != nil { - klog.Errorf("Failed to disabled device %s for volume %s. Device may not be detached cleanly (error is ignored and unstaging is continuing): %v", devicePath, volumeID, err.Error()) - } - ->>>>>>> Separate user errors from internal errors klog.V(4).Infof("NodeUnstageVolume succeeded on %v from %s", volumeID, stagingTargetPath) return &csi.NodeUnstageVolumeResponse{}, nil } @@ -501,11 +491,7 @@ func (ns *GCENodeServer) NodeExpandVolume(ctx context.Context, req *csi.NodeExpa resizer := resizefs.NewResizeFs(ns.Mounter) _, err = resizer.Resize(devicePath, volumePath) if err != nil { -<<<<<<< HEAD - return nil, status.Error(codes.Internal, fmt.Sprintf("error when resizing volume %s: %v", volKey.String(), err)) -======= return nil, status.Error(codes.Internal, fmt.Sprintf("error when resizing volume %s from device '%s' at path '%s': %v", volKey.String(), devicePath, volumePath, err.Error())) ->>>>>>> Separate user errors from internal errors }