From eb56ce885e0742c2e6ddad2e5a810833f5cf07c9 Mon Sep 17 00:00:00 2001 From: David Zhu Date: Tue, 15 Oct 2019 16:37:10 -0700 Subject: [PATCH] Clean up all logging for the driver, log successes in all cases --- cmd/main.go | 7 +++- pkg/gce-cloud-provider/compute/gce-compute.go | 39 +++++++++++-------- pkg/gce-cloud-provider/compute/gce.go | 12 +++--- pkg/gce-pd-csi-driver/controller.go | 39 ++++++------------- pkg/gce-pd-csi-driver/identity.go | 5 --- pkg/gce-pd-csi-driver/node.go | 23 +++++------ pkg/gce-pd-csi-driver/utils.go | 8 ++-- pkg/mount-manager/device-utils.go | 4 +- 8 files changed, 61 insertions(+), 76 deletions(-) diff --git a/cmd/main.go b/cmd/main.go index dfcf99f6a..822667c4b 100644 --- a/cmd/main.go +++ b/cmd/main.go @@ -43,6 +43,11 @@ const ( ) func init() { + // klog verbosity guide for this package + // Use V(2) for one time config information + // Use V(4) for general debug information logging + // Use V(5) for GCE Cloud Provider Call informational logging + // Use V(6) for extra repeated/polling information klog.InitFlags(flag.CommandLine) } @@ -57,7 +62,7 @@ func handle() { if vendorVersion == "" { klog.Fatalf("vendorVersion must be set at compile time") } - klog.V(4).Infof("Driver vendor version %v", vendorVersion) + klog.V(2).Infof("Driver vendor version %v", vendorVersion) gceDriver := driver.GetGCEDriver() diff --git a/pkg/gce-cloud-provider/compute/gce-compute.go b/pkg/gce-cloud-provider/compute/gce-compute.go index ca564152f..e3962ccf9 100644 --- a/pkg/gce-cloud-provider/compute/gce-compute.go +++ b/pkg/gce-cloud-provider/compute/gce-compute.go @@ -65,6 +65,7 @@ type GCECompute interface { // RepairUnderspecifiedVolumeKey will query the cloud provider and check each zone for the disk specified // by the volume key and return a volume key with a correct zone func (cloud *CloudProvider) RepairUnderspecifiedVolumeKey(ctx context.Context, volumeKey *meta.Key) (*meta.Key, error) { + klog.V(5).Infof("Repairing potentially underspecified volume key %v", volumeKey) region, err := common.GetRegionFromZones([]string{cloud.zone}) if err != nil { return nil, fmt.Errorf("failed to get region from zones: %v", err) @@ -114,6 +115,7 @@ func (cloud *CloudProvider) RepairUnderspecifiedVolumeKey(ctx context.Context, v } func (cloud *CloudProvider) ListZones(ctx context.Context, region string) ([]string, error) { + klog.V(5).Infof("Listing zones in region: %v", region) if len(cloud.zonesCache[region]) > 0 { return cloud.zonesCache[region], nil } @@ -131,6 +133,7 @@ func (cloud *CloudProvider) ListZones(ctx context.Context, region string) ([]str } func (cloud *CloudProvider) ListSnapshots(ctx context.Context, filter string, maxEntries int64, pageToken string) ([]*computev1.Snapshot, string, error) { + klog.V(5).Infof("Listing snapshots with filter: %s, max entries: %v, page token: %s", filter, maxEntries, pageToken) snapshots := []*computev1.Snapshot{} snapshotList, err := cloud.service.Snapshots.List(cloud.project).Filter(filter).MaxResults(maxEntries).PageToken(pageToken).Do() if err != nil { @@ -144,6 +147,7 @@ func (cloud *CloudProvider) ListSnapshots(ctx context.Context, filter string, ma } func (cloud *CloudProvider) GetDisk(ctx context.Context, key *meta.Key) (*CloudDisk, error) { + klog.V(5).Infof("Getting disk %v", key) switch key.Type() { case meta.Zonal: disk, err := cloud.getZonalDiskOrError(ctx, key.Zone, key.Name) @@ -159,23 +163,20 @@ func (cloud *CloudProvider) GetDisk(ctx context.Context, key *meta.Key) (*CloudD func (cloud *CloudProvider) getZonalDiskOrError(ctx context.Context, volumeZone, volumeName string) (*computev1.Disk, error) { svc := cloud.service project := cloud.project - klog.V(4).Infof("Getting disk %v from zone %v", volumeName, volumeZone) + disk, err := svc.Disks.Get(project, volumeZone, volumeName).Context(ctx).Do() if err != nil { return nil, err } - klog.V(4).Infof("Got disk %v from zone %v", volumeName, volumeZone) return disk, nil } func (cloud *CloudProvider) getRegionalDiskOrError(ctx context.Context, volumeRegion, volumeName string) (*computev1.Disk, error) { project := cloud.project - klog.V(4).Infof("Getting disk %v from region %v", volumeName, volumeRegion) disk, err := cloud.service.RegionDisks.Get(project, volumeRegion, volumeName).Context(ctx).Do() if err != nil { return nil, err } - klog.V(4).Infof("Got disk %v from region %v", volumeName, volumeRegion) return disk, nil } @@ -194,6 +195,7 @@ func (cloud *CloudProvider) getRegionURI(region string) string { } func (cloud *CloudProvider) ValidateExistingDisk(ctx context.Context, resp *CloudDisk, diskType string, reqBytes, limBytes int64) error { + klog.V(5).Infof("Validating existing disk %v with diskType: %s, reqested bytes: %v, limit bytes: %v", resp, diskType, reqBytes, limBytes) if resp == nil { return fmt.Errorf("disk does not exist") } @@ -212,13 +214,11 @@ func (cloud *CloudProvider) ValidateExistingDisk(ctx context.Context, resp *Clou return fmt.Errorf("disk already exists with incompatible type. Need %v. Got %v", diskType, respType[len(respType)-1]) } - - // Volume exists with matching name, capacity, type. - klog.V(4).Infof("Compatible disk already exists. Reusing existing.") return nil } func (cloud *CloudProvider) InsertDisk(ctx context.Context, volKey *meta.Key, diskType string, capBytes int64, capacityRange *csi.CapacityRange, replicaZones []string, snapshotID, diskEncryptionKmsKey string) error { + klog.V(5).Infof("Inserting disk %v", volKey) switch volKey.Type() { case meta.Zonal: return cloud.insertZonalDisk(ctx, volKey, diskType, capBytes, capacityRange, snapshotID, diskEncryptionKmsKey) @@ -349,6 +349,7 @@ func (cloud *CloudProvider) insertZonalDisk(ctx context.Context, volKey *meta.Ke } func (cloud *CloudProvider) DeleteDisk(ctx context.Context, volKey *meta.Key) error { + klog.V(5).Infof("Deleting disk: %v", volKey) switch volKey.Type() { case meta.Zonal: return cloud.deleteZonalDisk(ctx, volKey.Zone, volKey.Name) @@ -392,6 +393,7 @@ func (cloud *CloudProvider) deleteRegionalDisk(ctx context.Context, region, name } func (cloud *CloudProvider) AttachDisk(ctx context.Context, volKey *meta.Key, readWrite, diskType, instanceZone, instanceName string) error { + klog.V(5).Infof("Attaching disk %v to %s", volKey, instanceName) source := cloud.GetDiskSourceURI(volKey) deviceName, err := common.GetDeviceName(volKey) @@ -418,6 +420,7 @@ func (cloud *CloudProvider) AttachDisk(ctx context.Context, volKey *meta.Key, re } func (cloud *CloudProvider) DetachDisk(ctx context.Context, deviceName, instanceZone, instanceName string) error { + klog.V(5).Infof("Detaching disk %v from %v", deviceName, instanceName) op, err := cloud.service.Instances.DetachDisk(cloud.project, instanceZone, instanceName, deviceName).Context(ctx).Do() if err != nil { return err @@ -516,11 +519,13 @@ func (cloud *CloudProvider) waitForGlobalOp(ctx context.Context, op *computev1.O } func (cloud *CloudProvider) WaitForAttach(ctx context.Context, volKey *meta.Key, instanceZone, instanceName string) error { + klog.V(5).Infof("Waiting for attach of disk %v to instance %v to complete...", volKey.Name, instanceName) + start := time.Now() return wait.Poll(5*time.Second, 2*time.Minute, func() (bool, error) { + klog.V(6).Infof("Polling for attach of disk %v to instance %v to complete for %v", volKey.Name, instanceName, time.Since(start)) disk, err := cloud.GetDisk(ctx, volKey) if err != nil { - klog.Errorf("GetDisk failed to get disk: %v", err) - return false, err + return false, fmt.Errorf("GetDisk failed to get disk: %v", err) } if disk == nil { @@ -547,31 +552,29 @@ func opIsDone(op *computev1.Operation) (bool, error) { } func (cloud *CloudProvider) GetInstanceOrError(ctx context.Context, instanceZone, instanceName string) (*computev1.Instance, error) { + klog.V(5).Infof("Getting instance %v from zone %v", instanceName, instanceZone) svc := cloud.service project := cloud.project - klog.V(4).Infof("Getting instance %v from zone %v", instanceName, instanceZone) - instance, err := svc.Instances.Get(project, instanceZone, instanceName).Do() if err != nil { return nil, err } - klog.V(4).Infof("Got instance %v from zone %v", instanceName, instanceZone) return instance, nil } func (cloud *CloudProvider) GetSnapshot(ctx context.Context, snapshotName string) (*computev1.Snapshot, error) { + klog.V(5).Infof("Getting snapshot %v", snapshotName) svc := cloud.service project := cloud.project - klog.V(4).Infof("Getting snapshot %v", snapshotName) snapshot, err := svc.Snapshots.Get(project, snapshotName).Context(ctx).Do() if err != nil { return nil, err } - klog.V(4).Infof("Got snapshot %v", snapshotName) return snapshot, nil } func (cloud *CloudProvider) DeleteSnapshot(ctx context.Context, snapshotName string) error { + klog.V(5).Infof("Deleting snapshot %v", snapshotName) op, err := cloud.service.Snapshots.Delete(cloud.project, snapshotName).Context(ctx).Do() if err != nil { if IsGCEError(err, "notFound") { @@ -588,6 +591,7 @@ func (cloud *CloudProvider) DeleteSnapshot(ctx context.Context, snapshotName str } func (cloud *CloudProvider) CreateSnapshot(ctx context.Context, volKey *meta.Key, snapshotName string) (*computev1.Snapshot, error) { + klog.V(5).Infof("Creating snapshot %s for volume %v", snapshotName, volKey) switch volKey.Type() { case meta.Zonal: return cloud.createZonalDiskSnapshot(ctx, volKey, snapshotName) @@ -599,6 +603,7 @@ func (cloud *CloudProvider) CreateSnapshot(ctx context.Context, volKey *meta.Key } func (cloud *CloudProvider) ResizeDisk(ctx context.Context, volKey *meta.Key, requestBytes int64) (int64, error) { + klog.V(5).Infof("Resizing disk %v to size %v", volKey, requestBytes) cloudDisk, err := cloud.GetDisk(ctx, volKey) if err != nil { return -1, fmt.Errorf("failed to get disk: %v", err) @@ -694,16 +699,16 @@ func (cloud *CloudProvider) waitForSnapshotCreation(ctx context.Context, snapsho for { select { case <-ticker.C: - klog.V(5).Infof("Checking GCE Snapshot %s.", snapshotName) + klog.V(6).Infof("Checking GCE Snapshot %s.", snapshotName) snapshot, err := cloud.GetSnapshot(ctx, snapshotName) if err != nil { klog.Warningf("Error in getting snapshot %s, %v", snapshotName, err) } else if snapshot != nil { if snapshot.Status != "CREATING" { - klog.V(5).Infof("Snapshot %s status is %s", snapshotName, snapshot.Status) + klog.V(6).Infof("Snapshot %s status is %s", snapshotName, snapshot.Status) return snapshot, nil } else { - klog.V(5).Infof("Snapshot %s is still creating ...", snapshotName) + klog.V(6).Infof("Snapshot %s is still creating ...", snapshotName) } } case <-timer.C: diff --git a/pkg/gce-cloud-provider/compute/gce.go b/pkg/gce-cloud-provider/compute/gce.go index c9fda001c..9432da86d 100644 --- a/pkg/gce-cloud-provider/compute/gce.go +++ b/pkg/gce-cloud-provider/compute/gce.go @@ -76,7 +76,7 @@ func CreateCloudProvider(vendorVersion string, configPath string) (*CloudProvide // At this point configFile could still be nil. // Any following code that uses configFile should handle nil pointer gracefully. - klog.V(1).Infof("Using GCE provider config %+v", configFile) + klog.V(2).Infof("Using GCE provider config %+v", configFile) tokenSource, err := generateTokenSource(configFile) if err != nil { @@ -109,7 +109,7 @@ func generateTokenSource(configFile *ConfigFile) (oauth2.TokenSource, error) { // Use AltTokenSource tokenSource := NewAltTokenSource(configFile.Global.TokenURL, configFile.Global.TokenBody) - klog.V(4).Infof("Using AltTokenSource %#v", tokenSource) + klog.V(2).Infof("Using AltTokenSource %#v", tokenSource) return tokenSource, nil } @@ -122,11 +122,11 @@ func generateTokenSource(configFile *ConfigFile) (oauth2.TokenSource, error) { // DefaultTokenSource relies on GOOGLE_APPLICATION_CREDENTIALS env var being set. if gac, ok := os.LookupEnv("GOOGLE_APPLICATION_CREDENTIALS"); ok { - klog.V(4).Infof("GOOGLE_APPLICATION_CREDENTIALS env var set %v", gac) + klog.V(2).Infof("GOOGLE_APPLICATION_CREDENTIALS env var set %v", gac) } else { klog.Warningf("GOOGLE_APPLICATION_CREDENTIALS env var not set") } - klog.V(4).Infof("Using DefaultTokenSource %#v", tokenSource) + klog.V(2).Infof("Using DefaultTokenSource %#v", tokenSource) return tokenSource, err } @@ -198,10 +198,10 @@ func getProjectAndZone(config *ConfigFile) (string, string, error) { if err != nil { return "", "", err } - klog.V(4).Infof("Using GCP project ID from the Metadata server: %q", projectID) + klog.V(2).Infof("Using GCP project ID from the Metadata server: %q", projectID) } else { projectID = config.Global.ProjectId - klog.V(4).Infof("Using GCP project ID from the local GCE cloud provider config file: %q", projectID) + klog.V(2).Infof("Using GCP project ID from the local GCE cloud provider config file: %q", projectID) } return projectID, zone, nil diff --git a/pkg/gce-pd-csi-driver/controller.go b/pkg/gce-pd-csi-driver/controller.go index 9b418ed6d..44dab8ee9 100644 --- a/pkg/gce-pd-csi-driver/controller.go +++ b/pkg/gce-pd-csi-driver/controller.go @@ -43,8 +43,9 @@ type GCEControllerServer struct { CloudProvider gce.GCECompute MetadataService metadataservice.MetadataService - // A map storing all volumes with ongoing operations so that additional operations - // for that same volume (as defined by Volume Key) return an Aborted error + // A map storing all volumes with ongoing operations so that additional + // operations for that same volume (as defined by Volume Key) return an + // Aborted error volumeLocks *common.VolumeLocks } @@ -67,8 +68,6 @@ const ( func (gceCS *GCEControllerServer) CreateVolume(ctx context.Context, req *csi.CreateVolumeRequest) (*csi.CreateVolumeResponse, error) { var err error - klog.V(4).Infof("CreateVolume called with request %v", *req) - // Validate arguments volumeCapabilities := req.GetVolumeCapabilities() name := req.GetName() @@ -167,6 +166,7 @@ func (gceCS *GCEControllerServer) CreateVolume(ctx context.Context, req *csi.Cre return nil, status.Error(codes.AlreadyExists, fmt.Sprintf("CreateVolume disk already exists with same name and is incompatible: %v", err)) } // If there is no validation error, immediately return success + klog.V(4).Infof("CreateVolume succeeded for disk %v, it already exists and was compatible", volKey) return generateCreateVolumeResponse(existingDisk, capBytes, zones), nil } @@ -209,13 +209,12 @@ func (gceCS *GCEControllerServer) CreateVolume(ctx context.Context, req *csi.Cre default: return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("CreateVolume replication type '%s' is not supported", replicationType)) } + klog.V(4).Infof("CreateVolume succeeded for disk %v", volKey) return generateCreateVolumeResponse(disk, capBytes, zones), nil } func (gceCS *GCEControllerServer) DeleteVolume(ctx context.Context, req *csi.DeleteVolumeRequest) (*csi.DeleteVolumeResponse, error) { - klog.V(4).Infof("DeleteVolume called with request %v", *req) - // Validate arguments volumeID := req.GetVolumeId() if len(volumeID) == 0 { @@ -249,12 +248,11 @@ func (gceCS *GCEControllerServer) DeleteVolume(ctx context.Context, req *csi.Del return nil, status.Error(codes.Internal, fmt.Sprintf("unknown Delete disk error: %v", err)) } + klog.V(4).Infof("DeleteVolume succeeded for disk %v", volKey) return &csi.DeleteVolumeResponse{}, nil } func (gceCS *GCEControllerServer) ControllerPublishVolume(ctx context.Context, req *csi.ControllerPublishVolumeRequest) (*csi.ControllerPublishVolumeResponse, error) { - klog.V(4).Infof("ControllerPublishVolume called with request %v", *req) - // Validate arguments volumeID := req.GetVolumeId() readOnly := req.GetReadonly() @@ -335,7 +333,7 @@ func (gceCS *GCEControllerServer) ControllerPublishVolume(ctx context.Context, r } if attached { // Volume is attached to node. Success! - klog.V(4).Infof("Attach operation is successful. PD %q was already attached to node %q.", volKey.Name, nodeID) + klog.V(4).Infof("ControllerPublishVolume succeeded for disk %v to instance %v, already attached.", volKey, nodeID) return pubVolResp, nil } instanceZone, instanceName, err = common.NodeIDToZoneAndName(nodeID) @@ -347,20 +345,16 @@ func (gceCS *GCEControllerServer) ControllerPublishVolume(ctx context.Context, r return nil, status.Error(codes.Internal, fmt.Sprintf("unknown Attach error: %v", err)) } - klog.V(4).Infof("Waiting for attach of disk %v to instance %v to complete...", volKey.Name, nodeID) - err = gceCS.CloudProvider.WaitForAttach(ctx, volKey, instanceZone, instanceName) if err != nil { return nil, status.Error(codes.Internal, fmt.Sprintf("unknown WaitForAttach error: %v", err)) } - klog.V(4).Infof("Disk %v attached to instance %v successfully", volKey.Name, nodeID) + klog.V(4).Infof("ControllerPublishVolume succeeded for disk %v to instance %v", volKey, nodeID) return pubVolResp, nil } func (gceCS *GCEControllerServer) ControllerUnpublishVolume(ctx context.Context, req *csi.ControllerUnpublishVolumeRequest) (*csi.ControllerUnpublishVolumeResponse, error) { - klog.V(4).Infof("ControllerUnpublishVolume called with request %v", *req) - // Validate arguments volumeID := req.GetVolumeId() nodeID := req.GetNodeId() @@ -407,7 +401,7 @@ func (gceCS *GCEControllerServer) ControllerUnpublishVolume(ctx context.Context, if !attached { // Volume is not attached to node. Success! - klog.V(4).Infof("Detach operation is successful. PD %q was not attached to node %q.", volKey.Name, nodeID) + klog.V(4).Infof("ControllerUnpublishVolume succeeded for disk %v from node %v. Already not attached.", volKey, nodeID) return &csi.ControllerUnpublishVolumeResponse{}, nil } @@ -416,6 +410,7 @@ func (gceCS *GCEControllerServer) ControllerUnpublishVolume(ctx context.Context, return nil, status.Error(codes.Internal, fmt.Sprintf("unknown detach error: %v", err)) } + klog.V(4).Infof("ControllerUnpublishVolume succeeded for disk %v from node %v", volKey, nodeID) return &csi.ControllerUnpublishVolumeResponse{}, nil } @@ -533,8 +528,6 @@ func (gceCS *GCEControllerServer) ControllerGetCapabilities(ctx context.Context, } func (gceCS *GCEControllerServer) CreateSnapshot(ctx context.Context, req *csi.CreateSnapshotRequest) (*csi.CreateSnapshotResponse, error) { - klog.V(4).Infof("CreateSnapshot called with request %v", *req) - // Validate arguments volumeID := req.GetSourceVolumeId() if len(req.Name) == 0 { @@ -607,6 +600,7 @@ func (gceCS *GCEControllerServer) CreateSnapshot(ctx context.Context, req *csi.C ReadyToUse: ready, }, } + klog.V(4).Infof("CreateSnapshot succeeded for snapshot %s on volume %s", cleanSelfLink(snapshot.SelfLink), volumeID) return createResp, nil } @@ -643,8 +637,6 @@ func isCSISnapshotReady(status string) (bool, error) { } func (gceCS *GCEControllerServer) DeleteSnapshot(ctx context.Context, req *csi.DeleteSnapshotRequest) (*csi.DeleteSnapshotResponse, error) { - klog.V(4).Infof("DeleteSnapshot called with request %v", *req) - // Validate arguments snapshotID := req.GetSnapshotId() if len(snapshotID) == 0 { @@ -668,8 +660,6 @@ func (gceCS *GCEControllerServer) DeleteSnapshot(ctx context.Context, req *csi.D } func (gceCS *GCEControllerServer) ListSnapshots(ctx context.Context, req *csi.ListSnapshotsRequest) (*csi.ListSnapshotsResponse, error) { - klog.V(4).Infof("ListSnapshots called with request %v", *req) - // case 1: SnapshotId is not empty, return snapshots that match the snapshot id. if len(req.GetSnapshotId()) != 0 { return gceCS.getSnapshotByID(ctx, req.GetSnapshotId()) @@ -700,6 +690,7 @@ func (gceCS *GCEControllerServer) ControllerExpandVolume(ctx context.Context, re return nil, status.Error(codes.Internal, fmt.Sprintf("ControllerExpandVolume failed to resize disk: %v", err)) } + klog.V(4).Infof("ControllerExpandVolume succeeded for disk %v to size %v", volKey, resizedGb) return &csi.ControllerExpandVolumeResponse{ CapacityBytes: common.GbToBytes(resizedGb), NodeExpansionRequired: true, @@ -735,7 +726,6 @@ func (gceCS *GCEControllerServer) getSnapshots(ctx context.Context, req *csi.Lis NextToken: nextToken, } return listSnapshotResp, nil - } func (gceCS *GCEControllerServer) getSnapshotByID(ctx context.Context, snapshotID string) (*csi.ListSnapshotsResponse, error) { @@ -984,7 +974,6 @@ func generateCreateVolumeResponse(disk *gce.CloudDisk, capBytes int64, zones []s }, } createResp.Volume.ContentSource = source - } return createResp } @@ -1011,12 +1000,10 @@ func createRegionalDisk(ctx context.Context, cloudProvider gce.GCECompute, name return nil, fmt.Errorf("failed to insert regional disk: %v", err) } - klog.V(4).Infof("Completed creation of disk %v", name) disk, err := cloudProvider.GetDisk(ctx, meta.RegionalKey(name, region)) if err != nil { return nil, fmt.Errorf("failed to get disk after creating regional disk: %v", err) } - klog.Warningf("GCE PD %s already exists after wait, reusing", name) return disk, nil } @@ -1029,8 +1016,6 @@ func createSingleZoneDisk(ctx context.Context, cloudProvider gce.GCECompute, nam if err != nil { return nil, fmt.Errorf("failed to insert zonal disk: %v", err) } - - klog.V(4).Infof("Completed creation of disk %v", name) disk, err := cloudProvider.GetDisk(ctx, meta.ZonalKey(name, diskZone)) if err != nil { return nil, err diff --git a/pkg/gce-pd-csi-driver/identity.go b/pkg/gce-pd-csi-driver/identity.go index e879c3750..898122f82 100644 --- a/pkg/gce-pd-csi-driver/identity.go +++ b/pkg/gce-pd-csi-driver/identity.go @@ -20,7 +20,6 @@ import ( csi "github.com/container-storage-interface/spec/lib/go/csi" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" - "k8s.io/klog" ) type GCEIdentityServer struct { @@ -29,8 +28,6 @@ type GCEIdentityServer struct { // GetPluginInfo(context.Context, *GetPluginInfoRequest) (*GetPluginInfoResponse, error) func (gceIdentity *GCEIdentityServer) GetPluginInfo(ctx context.Context, req *csi.GetPluginInfoRequest) (*csi.GetPluginInfoResponse, error) { - klog.V(5).Infof("Using default GetPluginInfo") - if gceIdentity.Driver.name == "" { return nil, status.Error(codes.Unavailable, "Driver name not configured") } @@ -42,7 +39,6 @@ func (gceIdentity *GCEIdentityServer) GetPluginInfo(ctx context.Context, req *cs } func (gceIdentity *GCEIdentityServer) GetPluginCapabilities(ctx context.Context, req *csi.GetPluginCapabilitiesRequest) (*csi.GetPluginCapabilitiesResponse, error) { - klog.V(5).Infof("Using default GetPluginCapabilities") return &csi.GetPluginCapabilitiesResponse{ Capabilities: []*csi.PluginCapability{ { @@ -78,6 +74,5 @@ func (gceIdentity *GCEIdentityServer) GetPluginCapabilities(ctx context.Context, } func (gceIdentity *GCEIdentityServer) Probe(ctx context.Context, req *csi.ProbeRequest) (*csi.ProbeResponse, error) { - klog.V(4).Infof("Probe called with args: %#v", req) return &csi.ProbeResponse{}, nil } diff --git a/pkg/gce-pd-csi-driver/node.go b/pkg/gce-pd-csi-driver/node.go index 295ac0210..0ef57cb46 100644 --- a/pkg/gce-pd-csi-driver/node.go +++ b/pkg/gce-pd-csi-driver/node.go @@ -58,8 +58,6 @@ const ( ) func (ns *GCENodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePublishVolumeRequest) (*csi.NodePublishVolumeResponse, error) { - klog.V(4).Infof("NodePublishVolume called with req: %#v", req) - // Validate Arguments targetPath := req.GetTargetPath() stagingTargetPath := req.GetStagingTargetPath() @@ -100,6 +98,7 @@ func (ns *GCENodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePub 3) Readonly MUST match */ + klog.V(4).Infof("NodePublishVolume succeeded on volume %v to %s, mount already exists.", volumeID, targetPath) return &csi.NodePublishVolumeResponse{}, nil } @@ -183,13 +182,11 @@ func (ns *GCENodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePub return nil, status.Error(codes.Internal, fmt.Sprintf("NodePublishVolume mount of disk failed: %v", err)) } - klog.V(4).Infof("Successfully mounted %s", targetPath) + klog.V(4).Infof("NodePublishVolume succeeded on volume %v to %s", volumeID, targetPath) return &csi.NodePublishVolumeResponse{}, nil } func (ns *GCENodeServer) NodeUnpublishVolume(ctx context.Context, req *csi.NodeUnpublishVolumeRequest) (*csi.NodeUnpublishVolumeResponse, error) { - klog.V(4).Infof("NodeUnpublishVolume called with args: %v", req) - // Validate Arguments targetPath := req.GetTargetPath() volumeID := req.GetVolumeId() @@ -210,12 +207,11 @@ func (ns *GCENodeServer) NodeUnpublishVolume(ctx context.Context, req *csi.NodeU return nil, status.Error(codes.Internal, fmt.Sprintf("Unmount failed: %v\nUnmounting arguments: %s\n", err, targetPath)) } + klog.V(4).Infof("NodeUnpublishVolume succeded on %v from %s", volumeID, targetPath) return &csi.NodeUnpublishVolumeResponse{}, nil } func (ns *GCENodeServer) NodeStageVolume(ctx context.Context, req *csi.NodeStageVolumeRequest) (*csi.NodeStageVolumeResponse, error) { - klog.V(4).Infof("NodeStageVolume called with req: %#v", req) - // Validate Arguments volumeID := req.GetVolumeId() stagingTargetPath := req.GetStagingTargetPath() @@ -281,6 +277,8 @@ func (ns *GCENodeServer) NodeStageVolume(ctx context.Context, req *csi.NodeStage 3) Readonly MUST match */ + + klog.V(4).Infof("NodeStageVolume succeded on %v to %s, mount already exists.", volumeID, stagingTargetPath) return &csi.NodeStageVolumeResponse{}, nil } @@ -298,6 +296,7 @@ func (ns *GCENodeServer) NodeStageVolume(ctx context.Context, req *csi.NodeStage } } else if blk := volumeCapability.GetBlock(); blk != nil { // Noop for Block NodeStageVolume + klog.V(4).Infof("NodeStageVolume succeded on %v to %s, capability is block so this is a no-op", volumeID, stagingTargetPath) return &csi.NodeStageVolumeResponse{}, nil } @@ -308,12 +307,11 @@ func (ns *GCENodeServer) NodeStageVolume(ctx context.Context, req *csi.NodeStage devicePath, stagingTargetPath, fstype, options, err)) } + klog.V(4).Infof("NodeStageVolume succeded on %v to %s", volumeID, stagingTargetPath) return &csi.NodeStageVolumeResponse{}, nil } func (ns *GCENodeServer) NodeUnstageVolume(ctx context.Context, req *csi.NodeUnstageVolumeRequest) (*csi.NodeUnstageVolumeResponse, error) { - klog.V(4).Infof("NodeUnstageVolume called with req: %#v", req) - // Validate arguments volumeID := req.GetVolumeId() stagingTargetPath := req.GetStagingTargetPath() @@ -333,20 +331,18 @@ func (ns *GCENodeServer) NodeUnstageVolume(ctx context.Context, req *csi.NodeUns if err != nil { return nil, status.Error(codes.Internal, fmt.Sprintf("NodeUnstageVolume failed to unmount at path %s: %v", stagingTargetPath, err)) } + + klog.V(4).Infof("NodeUnstageVolume succeded on %v from %s", volumeID, stagingTargetPath) return &csi.NodeUnstageVolumeResponse{}, nil } func (ns *GCENodeServer) NodeGetCapabilities(ctx context.Context, req *csi.NodeGetCapabilitiesRequest) (*csi.NodeGetCapabilitiesResponse, error) { - klog.V(4).Infof("NodeGetCapabilities called with req: %#v", req) - return &csi.NodeGetCapabilitiesResponse{ Capabilities: ns.Driver.nscap, }, nil } func (ns *GCENodeServer) NodeGetInfo(ctx context.Context, req *csi.NodeGetInfoRequest) (*csi.NodeGetInfoResponse, error) { - klog.V(4).Infof("NodeGetInfo called with req: %#v", req) - top := &csi.Topology{ Segments: map[string]string{common.TopologyKeyZone: ns.MetadataService.GetZone()}, } @@ -432,6 +428,7 @@ func (ns *GCENodeServer) NodeExpandVolume(ctx context.Context, req *csi.NodeExpa */ // Respond + klog.V(4).Infof("NodeExpandVolume succeeded on volume %v to size %v", volKey, reqBytes) return &csi.NodeExpandVolumeResponse{ CapacityBytes: reqBytes, }, nil diff --git a/pkg/gce-pd-csi-driver/utils.go b/pkg/gce-pd-csi-driver/utils.go index c5937f08e..00966254f 100644 --- a/pkg/gce-pd-csi-driver/utils.go +++ b/pkg/gce-pd-csi-driver/utils.go @@ -21,6 +21,7 @@ import ( "fmt" "context" + csi "github.com/container-storage-interface/spec/lib/go/csi" "github.com/kubernetes-csi/csi-lib-utils/protosanitizer" "google.golang.org/grpc" @@ -52,13 +53,12 @@ func NewNodeServiceCapability(cap csi.NodeServiceCapability_RPC_Type) *csi.NodeS } func logGRPC(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { - klog.V(3).Infof("GRPC call: %s", info.FullMethod) - klog.V(5).Infof("GRPC request: %+v", protosanitizer.StripSecrets(req)) + klog.V(4).Infof("%s called with request: %+v", info.FullMethod, protosanitizer.StripSecrets(req)) resp, err := handler(ctx, req) if err != nil { - klog.Errorf("GRPC error: %v", err) + klog.Errorf("%s returned with error: %v", info.FullMethod, err) } else { - klog.V(5).Infof("GRPC response: %+v", protosanitizer.StripSecrets(resp)) + klog.V(4).Infof("%s returned with response: %+v", info.FullMethod, protosanitizer.StripSecrets(resp)) } return resp, err } diff --git a/pkg/mount-manager/device-utils.go b/pkg/mount-manager/device-utils.go index 2afaefabc..ec9c88091 100644 --- a/pkg/mount-manager/device-utils.go +++ b/pkg/mount-manager/device-utils.go @@ -133,15 +133,13 @@ func udevadmChangeToNewDrives(sdBeforeSet sets.String) error { // drivePath must be the block device path to trigger on, in the format "/dev/sd*", or a symlink to it. // This is workaround for Issue #7972. Once the underlying issue has been resolved, this may be removed. func udevadmChangeToDrive(drivePath string) error { - klog.V(5).Infof("udevadmChangeToDrive: drive=%q", drivePath) + klog.V(4).Infof("udevadmChangeToDrive: drive=%q", drivePath) // Evaluate symlink, if any drive, err := filepath.EvalSymlinks(drivePath) if err != nil { return fmt.Errorf("udevadmChangeToDrive: filepath.EvalSymlinks(%q) failed with %v.", drivePath, err) } - klog.V(5).Infof("udevadmChangeToDrive: symlink path is %q", drive) - // Check to make sure input is "/dev/sd*" if !strings.Contains(drive, diskSDPath) { return fmt.Errorf("udevadmChangeToDrive: expected input in the form \"%s\" but drive is %q.", diskSDPattern, drive)