From fb2f27d4819f5d1e8fc0627a8bc02174e083d8f7 Mon Sep 17 00:00:00 2001 From: Alexis MacAskill Date: Fri, 17 Feb 2023 01:18:24 +0000 Subject: [PATCH 1/2] fix bug where volume cloning topology requirements are ignored when chosing the location of the volume --- pkg/gce-pd-csi-driver/controller.go | 132 +++- pkg/gce-pd-csi-driver/controller_test.go | 850 +++++++++++++++++++++-- test/e2e/tests/single_zone_e2e_test.go | 185 ++++- 3 files changed, 1112 insertions(+), 55 deletions(-) diff --git a/pkg/gce-pd-csi-driver/controller.go b/pkg/gce-pd-csi-driver/controller.go index 86d96d05f..e76b26fe9 100644 --- a/pkg/gce-pd-csi-driver/controller.go +++ b/pkg/gce-pd-csi-driver/controller.go @@ -36,6 +36,7 @@ import ( "k8s.io/apimachinery/pkg/util/uuid" "k8s.io/client-go/util/flowcontrol" "k8s.io/klog/v2" + "k8s.io/utils/strings/slices" "sigs.k8s.io/gcp-compute-persistent-disk-csi-driver/pkg/common" gce "sigs.k8s.io/gcp-compute-persistent-disk-csi-driver/pkg/gce-cloud-provider/compute" @@ -107,6 +108,14 @@ type workItem struct { unpublishReq *csi.ControllerUnpublishVolumeRequest } +// locationRequirements are additional location topology requirements that must be respected when creating a volume. +type locationRequirements struct { + srcVolRegion string + srcVolZone string + srcReplicationType string + cloneReplicationType string +} + var _ csi.ControllerServer = &GCEControllerServer{} const ( @@ -151,6 +160,44 @@ func isDiskReady(disk *gce.CloudDisk) (bool, error) { return false, nil } +// cloningLocationRequirements returns additional location requirements to be applied to the given create volume requests topology. +// If the CreateVolumeRequest will use volume cloning, location requirements in compliance with the volume cloning limitations +// will be returned: https://cloud.google.com/kubernetes-engine/docs/how-to/persistent-volumes/volume-cloning#limitations. +func cloningLocationRequirements(req *csi.CreateVolumeRequest, cloneReplicationType string) (*locationRequirements, error) { + if !useVolumeCloning(req) { + return nil, nil + } + // If we are using volume cloning, this will be set. + volSrc := req.VolumeContentSource.GetVolume() + volSrcVolID := volSrc.GetVolumeId() + + _, sourceVolKey, err := common.VolumeIDToKey(volSrcVolID) + if err != nil { + return nil, fmt.Errorf("volume ID is invalid: %w", err) + } + + isZonalSrcVol := sourceVolKey.Type() == meta.Zonal + if isZonalSrcVol { + region, err := common.GetRegionFromZones([]string{sourceVolKey.Zone}) + if err != nil { + return nil, fmt.Errorf("failed to get region from zones: %w", err) + } + sourceVolKey.Region = region + } + + srcReplicationType := replicationTypeNone + if !isZonalSrcVol { + srcReplicationType = replicationTypeRegionalPD + } + + return &locationRequirements{srcVolZone: sourceVolKey.Zone, srcVolRegion: sourceVolKey.Region, srcReplicationType: srcReplicationType, cloneReplicationType: cloneReplicationType}, nil +} + +// useVolumeCloning returns true if the create volume request should be created with volume cloning. +func useVolumeCloning(req *csi.CreateVolumeRequest) bool { + return req.VolumeContentSource != nil && req.VolumeContentSource.GetVolume() != nil +} + func (gceCS *GCEControllerServer) CreateVolume(ctx context.Context, req *csi.CreateVolumeRequest) (*csi.CreateVolumeResponse, error) { var err error // Validate arguments @@ -186,12 +233,21 @@ func (gceCS *GCEControllerServer) CreateVolume(ctx context.Context, req *csi.Cre if multiWriter { gceAPIVersion = gce.GCEAPIVersionBeta } + + var locationTopReq *locationRequirements + if useVolumeCloning(req) { + locationTopReq, err = cloningLocationRequirements(req, params.ReplicationType) + if err != nil { + return nil, status.Errorf(codes.InvalidArgument, "failed to get location requirements: %v", err.Error()) + } + } + // Determine the zone or zones+region of the disk var zones []string var volKey *meta.Key switch params.ReplicationType { case replicationTypeNone: - zones, err = pickZones(ctx, gceCS, req.GetAccessibilityRequirements(), 1) + zones, err = pickZones(ctx, gceCS, req.GetAccessibilityRequirements(), 1, locationTopReq) if err != nil { return nil, status.Errorf(codes.InvalidArgument, "CreateVolume failed to pick zones for disk: %v", err.Error()) } @@ -201,7 +257,7 @@ func (gceCS *GCEControllerServer) CreateVolume(ctx context.Context, req *csi.Cre volKey = meta.ZonalKey(name, zones[0]) case replicationTypeRegionalPD: - zones, err = pickZones(ctx, gceCS, req.GetAccessibilityRequirements(), 2) + zones, err = pickZones(ctx, gceCS, req.GetAccessibilityRequirements(), 2, locationTopReq) if err != nil { return nil, status.Errorf(codes.InvalidArgument, "CreateVolume failed to pick zones for disk: %v", err.Error()) } @@ -1382,7 +1438,29 @@ func diskIsAttachedAndCompatible(deviceName string, instance *compute.Instance, return false, nil } -func pickZonesFromTopology(top *csi.TopologyRequirement, numZones int) ([]string, error) { +// pickZonesInRegion will remove any zones that are not in the given region. +func pickZonesInRegion(region string, zones []string) []string { + refinedZones := []string{} + for _, zone := range zones { + if strings.Contains(zone, region) { + refinedZones = append(refinedZones, zone) + } + } + return refinedZones +} + +func prependZone(zone string, zones []string) []string { + newZones := []string{zone} + for i := 0; i < len(zones); i++ { + // Do not add a zone if it is equal to the zone that is already prepended to newZones. + if zones[i] != zone { + newZones = append(newZones, zones[i]) + } + } + return newZones +} + +func pickZonesFromTopology(top *csi.TopologyRequirement, numZones int, locationTopReq *locationRequirements) ([]string, error) { reqZones, err := getZonesFromTopology(top.GetRequisite()) if err != nil { return nil, fmt.Errorf("could not get zones from requisite topology: %w", err) @@ -1392,6 +1470,39 @@ func pickZonesFromTopology(top *csi.TopologyRequirement, numZones int) ([]string return nil, fmt.Errorf("could not get zones from preferred topology: %w", err) } + if locationTopReq != nil { + srcVolZone := locationTopReq.srcVolZone + switch locationTopReq.cloneReplicationType { + // For zonal -> zonal cloning, the source disk zone must match the destination disk zone. + case replicationTypeNone: + // If the source volume zone is not in the topology requirement, we return an error. + if !slices.Contains(prefZones, srcVolZone) && !slices.Contains(reqZones, srcVolZone) { + volumeCloningReq := fmt.Sprintf("clone zone must match source disk zone: %s", srcVolZone) + return nil, fmt.Errorf("failed to find zone from topology %v: %s", top, volumeCloningReq) + } + return []string{srcVolZone}, nil + // For zonal or regional -> regional disk cloning, the source disk region must match the destination disk region. + case replicationTypeRegionalPD: + srcVolRegion := locationTopReq.srcVolRegion + prefZones = pickZonesInRegion(srcVolRegion, prefZones) + reqZones = pickZonesInRegion(srcVolRegion, reqZones) + + if len(prefZones) == 0 && len(reqZones) == 0 { + volumeCloningReq := fmt.Sprintf("clone zone must reside in source disk region %s", srcVolRegion) + return nil, fmt.Errorf("failed to find zone from topology %v: %s", top, volumeCloningReq) + } + + // For zonal -> regional disk cloning, one of the replicated zones must match the source zone. + if locationTopReq.srcReplicationType == replicationTypeNone { + if !slices.Contains(prefZones, srcVolZone) && !slices.Contains(reqZones, srcVolZone) { + volumeCloningReq := fmt.Sprintf("one of the replica zones of the clone must match the source disk zone: %s", srcVolZone) + return nil, fmt.Errorf("failed to find zone from topology %v: %s", top, volumeCloningReq) + } + prefZones = prependZone(srcVolZone, prefZones) + } + } + } + if numZones <= len(prefZones) { return prefZones[0:numZones], nil } else { @@ -1450,16 +1561,25 @@ func getZoneFromSegment(seg map[string]string) (string, error) { return zone, nil } -func pickZones(ctx context.Context, gceCS *GCEControllerServer, top *csi.TopologyRequirement, numZones int) ([]string, error) { +func pickZones(ctx context.Context, gceCS *GCEControllerServer, top *csi.TopologyRequirement, numZones int, locationTopReq *locationRequirements) ([]string, error) { var zones []string var err error if top != nil { - zones, err = pickZonesFromTopology(top, numZones) + zones, err = pickZonesFromTopology(top, numZones, locationTopReq) if err != nil { return nil, fmt.Errorf("failed to pick zones from topology: %w", err) } } else { - zones, err = getDefaultZonesInRegion(ctx, gceCS, []string{gceCS.CloudProvider.GetDefaultZone()}, numZones) + existingZones := []string{gceCS.CloudProvider.GetDefaultZone()} + // We set existingZones to the source volume zone so that for zonal -> zonal cloning, the clone is provisioned + // in the same zone as the source volume, and for zonal -> regional, one of the replicated zones will always + // be the zone of the source volume. For regional -> regional cloning, the srcVolZone will not be set, so we + // just use the default zone. + if locationTopReq != nil && locationTopReq.srcReplicationType == replicationTypeNone { + existingZones = []string{locationTopReq.srcVolZone} + } + // If topology is nil, then the Immediate binding mode was used without setting allowedTopologies in the storageclass. + zones, err = getDefaultZonesInRegion(ctx, gceCS, existingZones, numZones) if err != nil { return nil, fmt.Errorf("failed to get default %v zones in region: %w", numZones, err) } diff --git a/pkg/gce-pd-csi-driver/controller_test.go b/pkg/gce-pd-csi-driver/controller_test.go index ecf2915b9..e785453be 100644 --- a/pkg/gce-pd-csi-driver/controller_test.go +++ b/pkg/gce-pd-csi-driver/controller_test.go @@ -1072,8 +1072,112 @@ func TestCreateVolumeWithVolumeSourceFromSnapshot(t *testing.T) { } } +func TestCloningLocationRequirements(t *testing.T) { + testSourceVolumeName := "test-volume-source-name" + testZonalVolumeSourceID := fmt.Sprintf("projects/%s/zones/%s/disks/%s", project, zone, testSourceVolumeName) + testRegionalVolumeSourceID := fmt.Sprintf("projects/%s/regions/%s/disks/%s", project, region, testSourceVolumeName) + + testCases := []struct { + name string + sourceVolumeID string + nilVolumeContentSource bool + reqParameters map[string]string + requestCapacityRange *csi.CapacityRange + replicationType string + expectedLocationRequirements *locationRequirements + expectedErr bool + }{ + { + name: "success zonal disk clone of zonal source disk", + sourceVolumeID: testZonalVolumeSourceID, + requestCapacityRange: stdCapRange, + reqParameters: map[string]string{ + common.ParameterKeyReplicationType: replicationTypeNone, + }, + replicationType: replicationTypeNone, + expectedLocationRequirements: &locationRequirements{srcVolRegion: region, srcVolZone: zone, srcReplicationType: replicationTypeNone, cloneReplicationType: replicationTypeNone}, + expectedErr: false, + }, + { + name: "success regional disk clone of regional source disk", + sourceVolumeID: testRegionalVolumeSourceID, + requestCapacityRange: stdCapRange, + reqParameters: map[string]string{ + common.ParameterKeyReplicationType: replicationTypeRegionalPD, + }, + replicationType: replicationTypeRegionalPD, + expectedLocationRequirements: &locationRequirements{srcVolRegion: region, srcVolZone: "", srcReplicationType: replicationTypeRegionalPD, cloneReplicationType: replicationTypeRegionalPD}, + expectedErr: false, + }, + { + name: "success regional disk clone of zonal data source", + sourceVolumeID: testZonalVolumeSourceID, + requestCapacityRange: stdCapRange, + reqParameters: map[string]string{ + common.ParameterKeyReplicationType: replicationTypeRegionalPD, + }, + replicationType: replicationTypeRegionalPD, + expectedLocationRequirements: &locationRequirements{srcVolRegion: region, srcVolZone: zone, srcReplicationType: replicationTypeNone, cloneReplicationType: replicationTypeRegionalPD}, + expectedErr: false, + }, + { + name: "non-cloning CreateVolumeRequest", + nilVolumeContentSource: true, + requestCapacityRange: stdCapRange, + reqParameters: map[string]string{ + common.ParameterKeyReplicationType: replicationTypeRegionalPD, + }, + replicationType: replicationTypeRegionalPD, + expectedLocationRequirements: nil, + expectedErr: false, + }, + { + name: "failure invalid volumeID", + sourceVolumeID: fmt.Sprintf("projects/%s/disks/%s", project, testSourceVolumeName), + requestCapacityRange: stdCapRange, + reqParameters: map[string]string{ + common.ParameterKeyReplicationType: replicationTypeNone, + }, + replicationType: replicationTypeNone, + expectedLocationRequirements: nil, + expectedErr: true, + }, + } + + for _, tc := range testCases { + t.Logf("test case: %s", tc.name) + req := &csi.CreateVolumeRequest{ + Name: name, + CapacityRange: tc.requestCapacityRange, + VolumeCapabilities: stdVolCaps, + Parameters: tc.reqParameters, + VolumeContentSource: &csi.VolumeContentSource{ + Type: &csi.VolumeContentSource_Volume{ + Volume: &csi.VolumeContentSource_VolumeSource{ + VolumeId: tc.sourceVolumeID, + }, + }, + }, + } + if tc.nilVolumeContentSource { + req.VolumeContentSource = nil + } + + locationRequirements, err := cloningLocationRequirements(req, tc.replicationType) + + if err != nil != tc.expectedErr { + t.Fatalf("Got error %v, expected error %t", err, tc.expectedErr) + } + input := fmt.Sprintf("cloningLocationRequirements(%v, %s", req, tc.replicationType) + if fmt.Sprintf("%v", tc.expectedLocationRequirements) != fmt.Sprintf("%v", locationRequirements) { + t.Fatalf("%s returned unexpected diff got: %v, want %v", input, locationRequirements, tc.expectedLocationRequirements) + } + } +} + func TestCreateVolumeWithVolumeSourceFromVolume(t *testing.T) { testSourceVolumeName := "test-volume-source-name" + testCloneVolumeName := "test-volume-clone" testZonalVolumeSourceID := fmt.Sprintf("projects/%s/zones/%s/disks/%s", project, zone, testSourceVolumeName) testRegionalVolumeSourceID := fmt.Sprintf("projects/%s/regions/%s/disks/%s", project, region, testSourceVolumeName) testSecondZonalVolumeSourceID := fmt.Sprintf("projects/%s/zones/%s/disks/%s", project, "different-zone1", testSourceVolumeName) @@ -1085,14 +1189,33 @@ func TestCreateVolumeWithVolumeSourceFromVolume(t *testing.T) { common.ParameterKeyType: "test-type", common.ParameterKeyReplicationType: replicationTypeRegionalPD, common.ParameterKeyDiskEncryptionKmsKey: "encryption-key", } - topology := &csi.TopologyRequirement{ - Requisite: []*csi.Topology{ - { - Segments: map[string]string{common.TopologyKeyZone: zone}, - }, - { - Segments: map[string]string{common.TopologyKeyZone: secondZone}, - }, + requisiteTopology := []*csi.Topology{ + { + Segments: map[string]string{common.TopologyKeyZone: zone}, + }, + { + Segments: map[string]string{common.TopologyKeyZone: secondZone}, + }, + } + + requisiteAllRegionZonesTopology := []*csi.Topology{ + { + Segments: map[string]string{common.TopologyKeyZone: "country-region-fakethirdzone"}, + }, + { + Segments: map[string]string{common.TopologyKeyZone: zone}, + }, + { + Segments: map[string]string{common.TopologyKeyZone: secondZone}, + }, + } + + prefTopology := []*csi.Topology{ + { + Segments: map[string]string{common.TopologyKeyZone: zone}, + }, + { + Segments: map[string]string{common.TopologyKeyZone: secondZone}, }, } @@ -1108,39 +1231,326 @@ func TestCreateVolumeWithVolumeSourceFromVolume(t *testing.T) { requestCapacityRange *csi.CapacityRange sourceTopology *csi.TopologyRequirement requestTopology *csi.TopologyRequirement + expCloneKey *meta.Key + // Accessible topologies validates that the replica zones are valid for regional disk clones. + expAccessibleTop []*csi.Topology }{ + { - name: "success zonal disk clone of zonal source disk", + name: "success zonal -> zonal cloning, nil topology: immediate binding w/ no allowedTopologies", volumeOnCloud: true, sourceVolumeID: testZonalVolumeSourceID, requestCapacityRange: stdCapRange, sourceCapacityRange: stdCapRange, reqParameters: zonalParams, sourceReqParameters: zonalParams, - sourceTopology: topology, - requestTopology: topology, + // Source volume will be in the zone that is the first element of preferred topologies (country-region-zone) + sourceTopology: &csi.TopologyRequirement{ + Requisite: requisiteTopology, + Preferred: prefTopology, + }, + requestTopology: nil, + expCloneKey: &meta.Key{Name: testCloneVolumeName, Zone: zone, Region: ""}, + expAccessibleTop: []*csi.Topology{ + { + Segments: map[string]string{common.TopologyKeyZone: "country-region-zone"}, + }, + }, }, { - name: "success regional disk clone of regional source disk", + name: "success zonal -> zonal cloning, req = allowedTopologies, pref = req w/ randomly selected zone as first element: immediate binding w/ allowedTopologies", volumeOnCloud: true, - sourceVolumeID: testRegionalVolumeSourceID, + sourceVolumeID: testZonalVolumeSourceID, + requestCapacityRange: stdCapRange, + sourceCapacityRange: stdCapRange, + reqParameters: zonalParams, + sourceReqParameters: zonalParams, + // Source volume will be in the zone that is the first element of preferred topologies (country-region-zone) + sourceTopology: &csi.TopologyRequirement{ + Requisite: requisiteTopology, + Preferred: prefTopology, + }, + requestTopology: &csi.TopologyRequirement{ + Requisite: requisiteTopology, + Preferred: []*csi.Topology{ + { + Segments: map[string]string{common.TopologyKeyZone: secondZone}, + }, + { + Segments: map[string]string{common.TopologyKeyZone: zone}, + }, + }, + }, + expCloneKey: &meta.Key{Name: testCloneVolumeName, Zone: zone, Region: ""}, + expAccessibleTop: []*csi.Topology{ + { + Segments: map[string]string{common.TopologyKeyZone: "country-region-zone"}, + }, + }, + }, + { + name: "success zonal -> zonal cloning, req = allowedTopologies, pref = req w/ src zone as first element: delayed binding w/ allowedTopologies", + volumeOnCloud: true, + sourceVolumeID: testZonalVolumeSourceID, + requestCapacityRange: stdCapRange, + sourceCapacityRange: stdCapRange, + reqParameters: zonalParams, + sourceReqParameters: zonalParams, + // Source volume will be in the zone that is the first element of preferred topologies (country-region-zone) + sourceTopology: &csi.TopologyRequirement{ + Requisite: requisiteTopology, + Preferred: prefTopology, + }, + requestTopology: &csi.TopologyRequirement{ + Requisite: requisiteTopology, + Preferred: prefTopology, + }, + expCloneKey: &meta.Key{Name: testCloneVolumeName, Zone: zone, Region: ""}, + expAccessibleTop: []*csi.Topology{ + { + Segments: map[string]string{common.TopologyKeyZone: "country-region-zone"}, + }, + }, + }, + { + name: "success zonal -> zonal cloning, req = all zones in region, pref = req w/ src zone as first element: delayed binding without allowedTopologies", + volumeOnCloud: true, + sourceVolumeID: testZonalVolumeSourceID, + requestCapacityRange: stdCapRange, + sourceCapacityRange: stdCapRange, + reqParameters: zonalParams, + sourceReqParameters: zonalParams, + // Source volume will be in the zone that is the first element of preferred topologies (country-region-zone) + sourceTopology: &csi.TopologyRequirement{ + Requisite: requisiteTopology, + Preferred: prefTopology, + }, + requestTopology: &csi.TopologyRequirement{ + Requisite: requisiteAllRegionZonesTopology, + Preferred: prefTopology, + }, + expCloneKey: &meta.Key{Name: testCloneVolumeName, Zone: zone, Region: ""}, + expAccessibleTop: []*csi.Topology{ + { + Segments: map[string]string{common.TopologyKeyZone: "country-region-zone"}, + }, + }, + }, + { + name: "success zonal -> regional cloning, nil topology: immediate binding w/ no allowedTopologies", + volumeOnCloud: true, + sourceVolumeID: testZonalVolumeSourceID, requestCapacityRange: stdCapRange, sourceCapacityRange: stdCapRange, reqParameters: regionalParams, - sourceReqParameters: regionalParams, - sourceTopology: topology, - requestTopology: topology, + sourceReqParameters: zonalParams, + sourceTopology: &csi.TopologyRequirement{ + Requisite: requisiteTopology, + Preferred: prefTopology, + }, + requestTopology: nil, + expCloneKey: &meta.Key{Name: testCloneVolumeName, Zone: "", Region: "country-region"}, + expAccessibleTop: []*csi.Topology{ + { + Segments: map[string]string{common.TopologyKeyZone: "country-region-zone"}, + }, + { + Segments: map[string]string{common.TopologyKeyZone: "country-region-fakesecondzone"}, + }, + }, }, { - name: "success regional disk clone of zonal data source", + name: "success zonal -> regional cloning, req = allowedTopologies, pref = req w/ randomly selected zone as first element: immediate binding w/ allowedTopologies", + volumeOnCloud: true, + sourceVolumeID: testZonalVolumeSourceID, + requestCapacityRange: stdCapRange, + sourceCapacityRange: stdCapRange, + reqParameters: regionalParams, + sourceReqParameters: zonalParams, + sourceTopology: &csi.TopologyRequirement{ + Requisite: requisiteTopology, + Preferred: prefTopology, + }, + requestTopology: &csi.TopologyRequirement{ + Requisite: requisiteTopology, + Preferred: []*csi.Topology{ + { + Segments: map[string]string{common.TopologyKeyZone: secondZone}, + }, + { + Segments: map[string]string{common.TopologyKeyZone: zone}, + }, + }, + }, + expCloneKey: &meta.Key{Name: testCloneVolumeName, Zone: "", Region: "country-region"}, + expAccessibleTop: []*csi.Topology{ + { + Segments: map[string]string{common.TopologyKeyZone: "country-region-zone"}, + }, + { + Segments: map[string]string{common.TopologyKeyZone: "country-region-fakesecondzone"}, + }, + }, + }, + { + name: "success zonal -> regional cloning, req = allowedTopologies, pref = req w/ src zone as first element: delayed binding w/ allowedTopologies", + volumeOnCloud: true, + sourceVolumeID: testZonalVolumeSourceID, + requestCapacityRange: stdCapRange, + sourceCapacityRange: stdCapRange, + reqParameters: regionalParams, + sourceReqParameters: zonalParams, + sourceTopology: &csi.TopologyRequirement{ + Requisite: requisiteTopology, + Preferred: prefTopology, + }, + requestTopology: &csi.TopologyRequirement{ + Requisite: requisiteTopology, + Preferred: prefTopology, + }, + expCloneKey: &meta.Key{Name: testCloneVolumeName, Zone: "", Region: "country-region"}, + expAccessibleTop: []*csi.Topology{ + { + Segments: map[string]string{common.TopologyKeyZone: "country-region-zone"}, + }, + { + Segments: map[string]string{common.TopologyKeyZone: "country-region-fakesecondzone"}, + }, + }, + }, + { + name: "success zonal -> regional cloning, req = all zones in region, pref = req w/ src zone as first element: delayed binding without allowedTopologies", volumeOnCloud: true, sourceVolumeID: testZonalVolumeSourceID, requestCapacityRange: stdCapRange, sourceCapacityRange: stdCapRange, reqParameters: regionalParams, sourceReqParameters: zonalParams, - sourceTopology: topology, - requestTopology: topology, + sourceTopology: &csi.TopologyRequirement{ + Requisite: requisiteTopology, + Preferred: prefTopology, + }, + requestTopology: &csi.TopologyRequirement{ + Requisite: requisiteAllRegionZonesTopology, + Preferred: prefTopology, + }, + expCloneKey: &meta.Key{Name: testCloneVolumeName, Zone: "", Region: "country-region"}, + expAccessibleTop: []*csi.Topology{ + { + Segments: map[string]string{common.TopologyKeyZone: "country-region-zone"}, + }, + { + Segments: map[string]string{common.TopologyKeyZone: "country-region-fakesecondzone"}, + }, + }, + }, + { + name: "success regional -> regional cloning, nil topology: immediate binding w/ no allowedTopologies", + volumeOnCloud: true, + sourceVolumeID: testRegionalVolumeSourceID, + requestCapacityRange: stdCapRange, + sourceCapacityRange: stdCapRange, + reqParameters: regionalParams, + sourceReqParameters: regionalParams, + sourceTopology: &csi.TopologyRequirement{ + Requisite: requisiteTopology, + Preferred: prefTopology, + }, + requestTopology: nil, + expCloneKey: &meta.Key{Name: testCloneVolumeName, Zone: "", Region: "country-region"}, + expAccessibleTop: []*csi.Topology{ + { + Segments: map[string]string{common.TopologyKeyZone: "country-region-zone"}, + }, + { + Segments: map[string]string{common.TopologyKeyZone: "country-region-fakesecondzone"}, + }, + }, + }, + { + name: "success regional -> regional cloning, req = allowedTopologies, pref = req w/ randomly selected zone as first element: immediate binding w/ allowedTopologies", + volumeOnCloud: true, + sourceVolumeID: testRegionalVolumeSourceID, + requestCapacityRange: stdCapRange, + sourceCapacityRange: stdCapRange, + reqParameters: regionalParams, + sourceReqParameters: regionalParams, + sourceTopology: &csi.TopologyRequirement{ + Requisite: requisiteTopology, + Preferred: prefTopology, + }, + requestTopology: &csi.TopologyRequirement{ + Requisite: requisiteTopology, + Preferred: []*csi.Topology{ + { + Segments: map[string]string{common.TopologyKeyZone: secondZone}, + }, + { + Segments: map[string]string{common.TopologyKeyZone: zone}, + }, + }, + }, + expCloneKey: &meta.Key{Name: testCloneVolumeName, Zone: "", Region: "country-region"}, + expAccessibleTop: []*csi.Topology{ + { + Segments: map[string]string{common.TopologyKeyZone: "country-region-zone"}, + }, + { + Segments: map[string]string{common.TopologyKeyZone: "country-region-fakesecondzone"}, + }, + }, + }, + { + name: "success regional -> regional cloning, req = allowedTopologies, pref = req w/ src zone as first element: delayed binding w/ allowedTopologies", + volumeOnCloud: true, + sourceVolumeID: testRegionalVolumeSourceID, + requestCapacityRange: stdCapRange, + sourceCapacityRange: stdCapRange, + reqParameters: regionalParams, + sourceReqParameters: regionalParams, + sourceTopology: &csi.TopologyRequirement{ + Requisite: requisiteTopology, + Preferred: prefTopology, + }, + requestTopology: &csi.TopologyRequirement{ + Requisite: requisiteTopology, + Preferred: prefTopology, + }, + expCloneKey: &meta.Key{Name: testCloneVolumeName, Zone: "", Region: "country-region"}, + expAccessibleTop: []*csi.Topology{ + { + Segments: map[string]string{common.TopologyKeyZone: "country-region-zone"}, + }, + { + Segments: map[string]string{common.TopologyKeyZone: "country-region-fakesecondzone"}, + }, + }, + }, + { + name: "success regional -> regional cloning, req = all zones in region, pref = req w/ src zone as first element: delayed binding without allowedTopologies", + volumeOnCloud: true, + sourceVolumeID: testRegionalVolumeSourceID, + requestCapacityRange: stdCapRange, + sourceCapacityRange: stdCapRange, + reqParameters: regionalParams, + sourceReqParameters: regionalParams, + sourceTopology: &csi.TopologyRequirement{ + Requisite: requisiteTopology, + Preferred: prefTopology, + }, + requestTopology: &csi.TopologyRequirement{ + Requisite: requisiteTopology, + Preferred: prefTopology, + }, + expCloneKey: &meta.Key{Name: testCloneVolumeName, Zone: "", Region: "country-region"}, + expAccessibleTop: []*csi.Topology{ + { + Segments: map[string]string{common.TopologyKeyZone: "country-region-zone"}, + }, + { + Segments: map[string]string{common.TopologyKeyZone: "country-region-fakesecondzone"}, + }, + }, }, { name: "fail regional disk clone with no matching replica zone of zonal data source", @@ -1151,7 +1561,10 @@ func TestCreateVolumeWithVolumeSourceFromVolume(t *testing.T) { sourceCapacityRange: stdCapRange, reqParameters: regionalParams, sourceReqParameters: zonalParams, - sourceTopology: topology, + sourceTopology: &csi.TopologyRequirement{ + Requisite: requisiteTopology, + Preferred: prefTopology, + }, requestTopology: &csi.TopologyRequirement{ Requisite: []*csi.Topology{ { @@ -1174,8 +1587,14 @@ func TestCreateVolumeWithVolumeSourceFromVolume(t *testing.T) { sourceReqParameters: map[string]string{ common.ParameterKeyType: "different-type", }, - sourceTopology: topology, - requestTopology: topology, + sourceTopology: &csi.TopologyRequirement{ + Requisite: requisiteTopology, + Preferred: prefTopology, + }, + requestTopology: &csi.TopologyRequirement{ + Requisite: requisiteTopology, + Preferred: prefTopology, + }, }, { name: "fail zonal disk clone with different DiskEncryptionKMSKey", @@ -1189,8 +1608,14 @@ func TestCreateVolumeWithVolumeSourceFromVolume(t *testing.T) { common.ParameterKeyType: "test-type", common.ParameterKeyReplicationType: replicationTypeNone, common.ParameterKeyDiskEncryptionKmsKey: "different-encryption-key", }, - sourceTopology: topology, - requestTopology: topology, + sourceTopology: &csi.TopologyRequirement{ + Requisite: requisiteTopology, + Preferred: prefTopology, + }, + requestTopology: &csi.TopologyRequirement{ + Requisite: requisiteTopology, + Preferred: prefTopology, + }, }, { name: "fail zonal disk clone with different zone", @@ -1211,7 +1636,10 @@ func TestCreateVolumeWithVolumeSourceFromVolume(t *testing.T) { }, }, }, - requestTopology: topology, + requestTopology: &csi.TopologyRequirement{ + Requisite: requisiteTopology, + Preferred: prefTopology, + }, }, { name: "fail zonal disk clone of regional data source", @@ -1222,8 +1650,14 @@ func TestCreateVolumeWithVolumeSourceFromVolume(t *testing.T) { sourceCapacityRange: stdCapRange, reqParameters: zonalParams, sourceReqParameters: regionalParams, - sourceTopology: topology, - requestTopology: topology, + sourceTopology: &csi.TopologyRequirement{ + Requisite: requisiteTopology, + Preferred: prefTopology, + }, + requestTopology: &csi.TopologyRequirement{ + Requisite: requisiteTopology, + Preferred: prefTopology, + }, }, { @@ -1235,7 +1669,10 @@ func TestCreateVolumeWithVolumeSourceFromVolume(t *testing.T) { sourceCapacityRange: stdCapRange, reqParameters: stdParams, sourceReqParameters: stdParams, - requestTopology: topology, + requestTopology: &csi.TopologyRequirement{ + Requisite: requisiteTopology, + Preferred: prefTopology, + }, }, { name: "fail invalid source disk volume id format", @@ -1246,7 +1683,10 @@ func TestCreateVolumeWithVolumeSourceFromVolume(t *testing.T) { sourceCapacityRange: stdCapRange, reqParameters: stdParams, sourceReqParameters: stdParams, - requestTopology: topology, + requestTopology: &csi.TopologyRequirement{ + Requisite: requisiteTopology, + Preferred: prefTopology, + }, }, { name: "fail zonal disk clone with smaller disk capacity", @@ -1259,16 +1699,23 @@ func TestCreateVolumeWithVolumeSourceFromVolume(t *testing.T) { }, reqParameters: zonalParams, sourceReqParameters: zonalParams, - sourceTopology: topology, - requestTopology: topology, - }} + sourceTopology: &csi.TopologyRequirement{ + Requisite: requisiteTopology, + Preferred: prefTopology, + }, + requestTopology: &csi.TopologyRequirement{ + Requisite: requisiteTopology, + Preferred: prefTopology, + }, + }, + } for _, tc := range testCases { t.Logf("test case: %s", tc.name) gceDriver := initGCEDriver(t, nil) req := &csi.CreateVolumeRequest{ - Name: name, + Name: testCloneVolumeName, CapacityRange: tc.requestCapacityRange, VolumeCapabilities: stdVolCaps, Parameters: tc.reqParameters, @@ -1303,7 +1750,6 @@ func TestCreateVolumeWithVolumeSourceFromVolume(t *testing.T) { } resp, err := gceDriver.cs.CreateVolume(context.Background(), req) - t.Logf("response: %v err: %v", resp, err) if err != nil { serverError, ok := status.FromError(err) if !ok { @@ -1319,14 +1765,39 @@ func TestCreateVolumeWithVolumeSourceFromVolume(t *testing.T) { } // Make sure the response has the source volume. - sourceVolume := resp.GetVolume() - if sourceVolume.ContentSource == nil || sourceVolume.ContentSource.Type == nil || - sourceVolume.ContentSource.GetVolume() == nil || sourceVolume.ContentSource.GetVolume().VolumeId == "" { + respVolume := resp.GetVolume() + if respVolume.ContentSource == nil || respVolume.ContentSource.Type == nil || + respVolume.ContentSource.GetVolume() == nil || respVolume.ContentSource.GetVolume().VolumeId == "" { t.Fatalf("Expected volume content source to have volume ID, got none") } + // Validate that the cloned volume is in the region/zone that we expect + cloneVolID := respVolume.VolumeId + _, cloneVolKey, err := common.VolumeIDToKey(cloneVolID) + if err != nil { + t.Fatalf("failed to get key from volume id %q: %v", cloneVolID, err) + } + if cloneVolKey.String() != tc.expCloneKey.String() { + t.Fatalf("got clone volume key: %q, expected clone volume key: %q", cloneVolKey.String(), tc.expCloneKey.String()) + } + if !accessibleTopologiesEqual(respVolume.AccessibleTopology, tc.expAccessibleTop) { + t.Fatalf("got accessible topology: %q, expected accessible topology: %q", fmt.Sprintf("%+v", respVolume.AccessibleTopology), fmt.Sprintf("%+v", tc.expAccessibleTop)) + + } } } +func sortTopologies(in []*csi.Topology) { + sort.Slice(in, func(i, j int) bool { + return in[i].Segments[common.TopologyKeyZone] < in[j].Segments[common.TopologyKeyZone] + }) +} + +func accessibleTopologiesEqual(got []*csi.Topology, expected []*csi.Topology) bool { + sortTopologies(got) + sortTopologies(expected) + return fmt.Sprintf("%+v", got) == fmt.Sprintf("%+v", expected) +} + func TestCreateVolumeRandomRequisiteTopology(t *testing.T) { req := &csi.CreateVolumeRequest{ Name: "test-name", @@ -1757,10 +2228,93 @@ func TestGetZonesFromTopology(t *testing.T) { } } +func TestPickZonesInRegion(t *testing.T) { + testCases := []struct { + name string + region string + zones []string + expZones []string + }{ + { + name: "all zones in region", + region: "us-central1", + zones: []string{"us-central1-a", "us-central1-b", "us-central1-c"}, + expZones: []string{"us-central1-a", "us-central1-b", "us-central1-c"}, + }, + { + name: "removes zones not in region", + region: "us-central1", + zones: []string{"us-central1-a", "us-central1-b", "us-central1-c", "us-east1-a, us-west1-a"}, + expZones: []string{"us-central1-a", "us-central1-b", "us-central1-c"}, + }, + { + name: "region not in zones", + region: "us-west1", + zones: []string{"us-central1-a", "us-central1-b", "us-central1-c"}, + expZones: []string{}, + }, + { + name: "empty zones", + region: "us-central1", + zones: []string{}, + expZones: []string{}, + }, + } + for _, tc := range testCases { + t.Logf("test case: %s", tc.name) + gotZones := pickZonesInRegion(tc.region, tc.zones) + if !sets.NewString(gotZones...).Equal(sets.NewString(tc.expZones...)) { + t.Errorf("Got zones: %v, expected: %v", gotZones, tc.expZones) + } + } +} + +func TestPrependZone(t *testing.T) { + testCases := []struct { + name string + zone string + zones []string + expZones []string + }{ + { + name: "zone already at index 0", + zone: "us-central1-a", + zones: []string{"us-central1-a", "us-central1-b", "us-central1-c"}, + expZones: []string{"us-central1-a", "us-central1-b", "us-central1-c"}, + }, + { + name: "zone at index 1", + zone: "us-central1-b", + zones: []string{"us-central1-a", "us-central1-b", "us-central1-c"}, + expZones: []string{"us-central1-b", "us-central1-a", "us-central1-c"}, + }, + { + name: "zone not in zones", + zone: "us-central1-f", + zones: []string{"us-central1-a", "us-central1-b", "us-central1-c"}, + expZones: []string{"us-central1-f", "us-central1-a", "us-central1-b", "us-central1-c"}, + }, + { + name: "empty zones", + zone: "us-central1-a", + zones: []string{}, + expZones: []string{"us-central1-a"}, + }, + } + for _, tc := range testCases { + t.Logf("test case: %s", tc.name) + gotZones := prependZone(tc.zone, tc.zones) + if !zonesEqual(gotZones, tc.expZones) { + t.Errorf("Got zones: %v, expected: %v", gotZones, tc.expZones) + } + } +} + func TestPickZonesFromTopology(t *testing.T) { testCases := []struct { name string top *csi.TopologyRequirement + locReq *locationRequirements numZones int expZones []string expErr bool @@ -1794,6 +2348,36 @@ func TestPickZonesFromTopology(t *testing.T) { numZones: 2, expZones: []string{"topology-zone2", "topology-zone3"}, }, + { + name: "success: preferred, locationRequirements[region:us-central1, zone:us-central1-a, srcReplicationType:none, cloneReplicationType:none]", + top: &csi.TopologyRequirement{ + Requisite: []*csi.Topology{ + { + Segments: map[string]string{common.TopologyKeyZone: "us-central1-c"}, + }, + { + Segments: map[string]string{common.TopologyKeyZone: "us-central1-a"}, + }, + { + Segments: map[string]string{common.TopologyKeyZone: "us-central1-b"}, + }, + }, + Preferred: []*csi.Topology{ + { + Segments: map[string]string{common.TopologyKeyZone: "us-central1-b"}, + }, + { + Segments: map[string]string{common.TopologyKeyZone: "us-central1-c"}, + }, + { + Segments: map[string]string{common.TopologyKeyZone: "us-central1-a"}, + }, + }, + }, + locReq: &locationRequirements{srcVolRegion: "us-central1", srcVolZone: "us-central1-a", srcReplicationType: replicationTypeNone, cloneReplicationType: replicationTypeNone}, + numZones: 1, + expZones: []string{"us-central1-a"}, + }, { name: "success: preferred and requisite", top: &csi.TopologyRequirement{ @@ -1829,6 +2413,72 @@ func TestPickZonesFromTopology(t *testing.T) { numZones: 5, expZones: []string{"topology-zone2", "topology-zone3", "topology-zone1", "topology-zone5", "topology-zone6"}, }, + { + name: "success: preferred and requisite, locationRequirements[region:us-central1, zone:us-central1-a, srcReplicationType:regional-pd, cloneReplicationType:regional-pd]", + top: &csi.TopologyRequirement{ + Requisite: []*csi.Topology{ + { + Segments: map[string]string{common.TopologyKeyZone: "us-central1-c"}, + }, + { + Segments: map[string]string{common.TopologyKeyZone: "us-central1-d"}, + }, + { + Segments: map[string]string{common.TopologyKeyZone: "us-central1-f"}, + }, + { + Segments: map[string]string{common.TopologyKeyZone: "us-west1-a"}, + }, + }, + Preferred: []*csi.Topology{ + { + Segments: map[string]string{common.TopologyKeyZone: "us-central1-b"}, + }, + { + Segments: map[string]string{common.TopologyKeyZone: "us-central1-a"}, + }, + { + Segments: map[string]string{common.TopologyKeyZone: "us-east1-a"}, + }, + }, + }, + locReq: &locationRequirements{srcVolRegion: "us-central1", srcVolZone: "us-central1-a", srcReplicationType: replicationTypeRegionalPD, cloneReplicationType: replicationTypeRegionalPD}, + numZones: 5, + expZones: []string{"us-central1-b", "us-central1-c", "us-central1-a", "us-central1-d", "us-central1-f"}, + }, + { + name: "success: preferred and requisite, locationRequirements[region:us-central1, zone:us-central1-a, srcReplicationType:none, cloneReplicationType:regional-pd]", + top: &csi.TopologyRequirement{ + Requisite: []*csi.Topology{ + { + Segments: map[string]string{common.TopologyKeyZone: "us-central1-c"}, + }, + { + Segments: map[string]string{common.TopologyKeyZone: "us-central1-d"}, + }, + { + Segments: map[string]string{common.TopologyKeyZone: "us-central1-f"}, + }, + { + Segments: map[string]string{common.TopologyKeyZone: "us-west1-a"}, + }, + }, + Preferred: []*csi.Topology{ + { + Segments: map[string]string{common.TopologyKeyZone: "us-central1-b"}, + }, + { + Segments: map[string]string{common.TopologyKeyZone: "us-central1-a"}, + }, + { + Segments: map[string]string{common.TopologyKeyZone: "us-east1-a"}, + }, + }, + }, + locReq: &locationRequirements{srcVolRegion: "us-central1", srcVolZone: "us-central1-a", srcReplicationType: replicationTypeNone, cloneReplicationType: replicationTypeRegionalPD}, + numZones: 5, + expZones: []string{"us-central1-a", "us-central1-b", "us-central1-c", "us-central1-d", "us-central1-f"}, + }, { name: "fail: not enough topologies", top: &csi.TopologyRequirement{ @@ -1858,6 +2508,114 @@ func TestPickZonesFromTopology(t *testing.T) { numZones: 4, expErr: true, }, + { + name: "fail: no topologies that match locationRequirment, locationRequirements[region:us-east1, zone:us-east1-a, replicationType:none]", + top: &csi.TopologyRequirement{ + Requisite: []*csi.Topology{ + { + Segments: map[string]string{common.TopologyKeyZone: "us-central1-c"}, + }, + { + Segments: map[string]string{common.TopologyKeyZone: "us-central1-a"}, + }, + { + Segments: map[string]string{common.TopologyKeyZone: "us-central1-b"}, + }, + }, + Preferred: []*csi.Topology{ + { + Segments: map[string]string{common.TopologyKeyZone: "us-central1-b"}, + }, + { + Segments: map[string]string{common.TopologyKeyZone: "us-central1-c"}, + }, + { + Segments: map[string]string{common.TopologyKeyZone: "us-central1-a"}, + }, + }, + }, + locReq: &locationRequirements{srcVolRegion: "us-east1", srcVolZone: "us-east1-a", cloneReplicationType: replicationTypeNone}, + numZones: 1, + expErr: true, + }, + { + name: "fail: no topologies that match locationRequirment, locationRequirements[region:us-east1, zone:us-east1-a, replicationType:regional-pd]", + top: &csi.TopologyRequirement{ + Requisite: []*csi.Topology{ + { + Segments: map[string]string{common.TopologyKeyZone: "us-central1-c"}, + }, + { + Segments: map[string]string{common.TopologyKeyZone: "us-central1-a"}, + }, + { + Segments: map[string]string{common.TopologyKeyZone: "us-central1-b"}, + }, + }, + Preferred: []*csi.Topology{ + { + Segments: map[string]string{common.TopologyKeyZone: "us-central1-b"}, + }, + { + Segments: map[string]string{common.TopologyKeyZone: "us-central1-c"}, + }, + { + Segments: map[string]string{common.TopologyKeyZone: "us-central1-a"}, + }, + }, + }, + locReq: &locationRequirements{srcVolRegion: "us-east1", srcVolZone: "us-east1-a", cloneReplicationType: replicationTypeRegionalPD}, + numZones: 2, + expErr: true, + }, + { + name: "fail: not enough topologies, locationRequirements[region:us-central1, zone:us-central1-a, replicationType:regional-pd]", + top: &csi.TopologyRequirement{ + Requisite: []*csi.Topology{ + { + Segments: map[string]string{common.TopologyKeyZone: "us-central1-c"}, + }, + { + Segments: map[string]string{common.TopologyKeyZone: "us-central1-a"}, + }, + { + Segments: map[string]string{common.TopologyKeyZone: "us-central1-b"}, + }, + }, + Preferred: []*csi.Topology{ + { + Segments: map[string]string{common.TopologyKeyZone: "us-central1-b"}, + }, + { + Segments: map[string]string{common.TopologyKeyZone: "us-central1-c"}, + }, + { + Segments: map[string]string{common.TopologyKeyZone: "us-central1-a"}, + }, + }, + }, + locReq: &locationRequirements{srcVolRegion: "us-central1", srcVolZone: "us-central1-a", cloneReplicationType: replicationTypeRegionalPD}, + numZones: 4, + expErr: true, + }, + { + name: "success: only requisite, locationRequirements[region:us-central1, zone:us-central1-a, replicationType:regional-pd", + top: &csi.TopologyRequirement{ + Requisite: []*csi.Topology{ + { + Segments: map[string]string{common.TopologyKeyZone: "us-central1-c"}, + }, + { + Segments: map[string]string{common.TopologyKeyZone: "us-central1-a"}, + }, + { + Segments: map[string]string{common.TopologyKeyZone: "us-central1-b"}, + }, + }, + }, + numZones: 3, + expZones: []string{"us-central1-b", "us-central1-c", "us-central1-a"}, + }, { name: "success: only requisite", top: &csi.TopologyRequirement{ @@ -1879,12 +2637,12 @@ func TestPickZonesFromTopology(t *testing.T) { } for _, tc := range testCases { t.Logf("test case: %s", tc.name) - gotZones, err := pickZonesFromTopology(tc.top, tc.numZones) + gotZones, err := pickZonesFromTopology(tc.top, tc.numZones, tc.locReq) if err != nil && !tc.expErr { - t.Errorf("Did not expect error but got: %v", err) + t.Errorf("got error: %v, but did not expect error", err) } if err == nil && tc.expErr { - t.Errorf("Expected error but got none") + t.Errorf("got no error, but expected error") } if !sets.NewString(gotZones...).Equal(sets.NewString(tc.expZones...)) { t.Errorf("Expected zones: %v, but got: %v", tc.expZones, gotZones) @@ -1892,6 +2650,18 @@ func TestPickZonesFromTopology(t *testing.T) { } } +func zonesEqual(gotZones, expectedZones []string) bool { + if len(gotZones) != len(expectedZones) { + return false + } + for i := 0; i < len(gotZones); i++ { + if gotZones[i] != expectedZones[i] { + return false + } + } + return true +} + func TestPickRandAndConsecutive(t *testing.T) { rand.Seed(time.Now().UnixNano()) testCases := []struct { diff --git a/test/e2e/tests/single_zone_e2e_test.go b/test/e2e/tests/single_zone_e2e_test.go index 1d301c33f..13e3efa47 100644 --- a/test/e2e/tests/single_zone_e2e_test.go +++ b/test/e2e/tests/single_zone_e2e_test.go @@ -314,8 +314,7 @@ var _ = Describe("GCE PD CSI Driver", func() { Expect(cloudDisk.Name).To(Equal(volName)) Expect(len(cloudDisk.ReplicaZones)).To(Equal(2)) for _, replicaZone := range cloudDisk.ReplicaZones { - tokens := strings.Split(replicaZone, "/") - actualZone := tokens[len(tokens)-1] + actualZone := zoneFromURL(replicaZone) gotRegion, err := common.GetRegionFromZones([]string{actualZone}) Expect(err).To(BeNil(), "failed to get region from actual zone %v", actualZone) Expect(gotRegion).To(Equal(region), "Got region from replica zone that did not match supplied region") @@ -668,8 +667,7 @@ var _ = Describe("GCE PD CSI Driver", func() { Expect(cloudDisk.Name).To(Equal(volName)) Expect(len(cloudDisk.ReplicaZones)).To(Equal(2)) for _, replicaZone := range cloudDisk.ReplicaZones { - tokens := strings.Split(replicaZone, "/") - actualZone := tokens[len(tokens)-1] + actualZone := zoneFromURL(replicaZone) gotRegion, err := common.GetRegionFromZones([]string{actualZone}) Expect(err).To(BeNil(), "failed to get region from actual zone %v", actualZone) Expect(gotRegion).To(Equal(region), "Got region from replica zone that did not match supplied region") @@ -1055,6 +1053,10 @@ var _ = Describe("GCE PD CSI Driver", func() { Expect(cloudDisk.Status).To(Equal(readyState)) Expect(cloudDisk.SizeGb).To(Equal(defaultSizeGb)) Expect(cloudDisk.Name).To(Equal(volName)) + // Validate the the clone disk zone matches the source disk zone. + _, srcKey, err := common.VolumeIDToKey(srcVolID) + Expect(err).To(BeNil(), "Could not get source volume key from id") + Expect(zoneFromURL(cloudDisk.Zone)).To(Equal(srcKey.Zone)) defer func() { // Delete Disk controllerClient.DeleteVolume(volID) @@ -1106,9 +1108,77 @@ var _ = Describe("GCE PD CSI Driver", func() { Expect(cloudDisk.SizeGb).To(Equal(defaultRepdSizeGb)) Expect(cloudDisk.Name).To(Equal(volName)) Expect(len(cloudDisk.ReplicaZones)).To(Equal(2)) + replicaZonesCompatible := false + _, srcKey, err := common.VolumeIDToKey(srcVolID) + Expect(err).To(BeNil(), "Could not get source volume key from id") for _, replicaZone := range cloudDisk.ReplicaZones { - tokens := strings.Split(replicaZone, "/") - actualZone := tokens[len(tokens)-1] + actualZone := zoneFromURL(replicaZone) + if actualZone == srcKey.Zone { + replicaZonesCompatible = true + } + gotRegion, err := common.GetRegionFromZones([]string{actualZone}) + Expect(err).To(BeNil(), "failed to get region from actual zone %v", actualZone) + Expect(gotRegion).To(Equal(region), "Got region from replica zone that did not match supplied region") + } + // Validate that one of the replicaZones of the clone matches the zone of the source disk. + Expect(replicaZonesCompatible).To(Equal(true)) + defer func() { + // Delete Disk + controllerClient.DeleteVolume(volID) + Expect(err).To(BeNil(), "DeleteVolume failed") + + // Validate Disk Deleted + _, err = computeService.RegionDisks.Get(p, region, volName).Do() + Expect(gce.IsGCEError(err, "notFound")).To(BeTrue(), "Expected disk to not be found") + }() + }) + + It("Should successfully create RePD from a RePD VolumeContentSource", func() { + Expect(testContexts).ToNot(BeEmpty()) + testContext := getRandomTestContext() + + controllerInstance := testContext.Instance + controllerClient := testContext.Client + + p, z, _ := controllerInstance.GetIdentity() + + region, err := common.GetRegionFromZones([]string{z}) + Expect(err).To(BeNil(), "Failed to get region from zones") + + // Create Source Disk + srcVolName := testNamePrefix + string(uuid.NewUUID()) + srcVolID, err := controllerClient.CreateVolume(srcVolName, map[string]string{ + common.ParameterKeyReplicationType: "regional-pd", + }, defaultRepdSizeGb, nil, nil) + // Create Disk + volName := testNamePrefix + string(uuid.NewUUID()) + volID, err := controllerClient.CreateVolume(volName, map[string]string{ + common.ParameterKeyReplicationType: "regional-pd", + }, defaultRepdSizeGb, nil, + &csi.VolumeContentSource{ + Type: &csi.VolumeContentSource_Volume{ + Volume: &csi.VolumeContentSource_VolumeSource{ + VolumeId: srcVolID, + }, + }, + }) + + Expect(err).To(BeNil(), "CreateVolume failed with error: %v", err) + + // Validate Disk Created + cloudDisk, err := computeService.RegionDisks.Get(p, region, volName).Do() + Expect(err).To(BeNil(), "Could not get disk from cloud directly") + Expect(cloudDisk.Type).To(ContainSubstring(standardDiskType)) + Expect(cloudDisk.Status).To(Equal(readyState)) + Expect(cloudDisk.SizeGb).To(Equal(defaultRepdSizeGb)) + Expect(cloudDisk.Name).To(Equal(volName)) + Expect(len(cloudDisk.ReplicaZones)).To(Equal(2)) + // Validate that the replicaZones of the clone match the replicaZones of the source disk. + srcCloudDisk, err := computeService.RegionDisks.Get(p, region, srcVolName).Do() + Expect(err).To(BeNil(), "Could not get source disk from cloud directly") + Expect(srcCloudDisk.ReplicaZones).To(Equal(cloudDisk.ReplicaZones)) + for _, replicaZone := range cloudDisk.ReplicaZones { + actualZone := zoneFromURL(replicaZone) gotRegion, err := common.GetRegionFromZones([]string{actualZone}) Expect(err).To(BeNil(), "failed to get region from actual zone %v", actualZone) Expect(gotRegion).To(Equal(region), "Got region from replica zone that did not match supplied region") @@ -1204,7 +1274,104 @@ func createAndValidateUniqueZonalMultiWriterDisk(client *remote.CsiClient, proje } func cleanSelfLink(selfLink string) string { - temp := strings.TrimPrefix(selfLink, gce.GCEComputeAPIEndpoint) - temp = strings.TrimPrefix(temp, gce.GCEComputeBetaAPIEndpoint) - return strings.TrimPrefix(temp, gce.GCEComputeAlphaAPIEndpoint) + r, _ := regexp.Compile("https:\\/\\/www.*apis.com\\/.*(v1|beta|alpha)\\/") + return r.ReplaceAllString(selfLink, "") +} + +// Returns the zone from the URL with the format https://compute.googleapis.com/compute/v1/projects/{project}/zones/{zone}. +// Returns the empty string if the zone cannot be abstracted from the URL. +func zoneFromURL(url string) string { + tokens := strings.Split(url, "/") + if len(tokens) == 0 { + return "" + } + return tokens[len(tokens)-1] +} + +func setupKeyRing(ctx context.Context, parentName string, keyRingId string) (*kmspb.CryptoKey, []string) { + // Create KeyRing + ringReq := &kmspb.CreateKeyRingRequest{ + Parent: parentName, + KeyRingId: keyRingId, + } + keyRing, err := kmsClient.CreateKeyRing(ctx, ringReq) + if !gce.IsGCEError(err, "alreadyExists") { + getKeyRingReq := &kmspb.GetKeyRingRequest{ + Name: fmt.Sprintf("%s/keyRings/%s", parentName, keyRingId), + } + keyRing, err = kmsClient.GetKeyRing(ctx, getKeyRingReq) + + } + Expect(err).To(BeNil(), "Failed to create or get key ring %v", keyRingId) + + // Create CryptoKey in KeyRing + keyId := "test-key-" + string(uuid.NewUUID()) + keyReq := &kmspb.CreateCryptoKeyRequest{ + Parent: keyRing.Name, + CryptoKeyId: keyId, + CryptoKey: &kmspb.CryptoKey{ + Purpose: kmspb.CryptoKey_ENCRYPT_DECRYPT, + VersionTemplate: &kmspb.CryptoKeyVersionTemplate{ + Algorithm: kmspb.CryptoKeyVersion_GOOGLE_SYMMETRIC_ENCRYPTION, + }, + }, + } + key, err := kmsClient.CreateCryptoKey(ctx, keyReq) + Expect(err).To(BeNil(), "Failed to create crypto key %v in key ring %v", keyId, keyRing.Name) + + keyVersions := []string{} + keyVersionReq := &kmspb.ListCryptoKeyVersionsRequest{ + Parent: key.Name, + } + + it := kmsClient.ListCryptoKeyVersions(ctx, keyVersionReq) + + for { + keyVersion, err := it.Next() + if err == iterator.Done { + break + } + Expect(err).To(BeNil(), "Failed to list crypto key versions") + + keyVersions = append(keyVersions, keyVersion.Name) + } + return key, keyVersions +} + +type disk struct { + params map[string]string + validate func(disk *compute.Disk) +} + +var typeToDisk = map[string]*disk{ + standardDiskType: { + params: map[string]string{ + common.ParameterKeyType: standardDiskType, + }, + validate: func(disk *compute.Disk) { + Expect(disk.Type).To(ContainSubstring(standardDiskType)) + }, + }, + extremeDiskType: { + params: map[string]string{ + common.ParameterKeyType: extremeDiskType, + common.ParameterKeyProvisionedIOPSOnCreate: provisionedIOPSOnCreate, + }, + validate: func(disk *compute.Disk) { + Expect(disk.Type).To(ContainSubstring(extremeDiskType)) + Expect(disk.ProvisionedIops).To(Equal(provisionedIOPSOnCreateInt)) + }, + }, +} + +func merge(a, b map[string]string) map[string]string { + res := map[string]string{} + for k, v := range a { + res[k] = v + } + for k, v := range b { + res[k] = v + } + return res +>>>>>>> b9028774 (fix bug where volume cloning topology requirements are ignored when chosing the location of the volume) } From 3e2fe553d7c2973fe32c110098a40ceb165f8b59 Mon Sep 17 00:00:00 2001 From: Sunny Song Date: Wed, 19 Jul 2023 23:14:47 +0000 Subject: [PATCH 2/2] Update go.mod --- go.mod | 7 +- go.sum | 8 +- test/e2e/tests/single_zone_e2e_test.go | 89 +--- vendor/k8s.io/klog/v2/OWNERS | 1 + vendor/k8s.io/klog/v2/README.md | 1 - vendor/k8s.io/klog/v2/contextual.go | 43 +- vendor/k8s.io/klog/v2/internal/dbg/dbg.go | 42 ++ .../klog/v2/internal/serialize/keyvalues.go | 120 ++++-- vendor/k8s.io/klog/v2/k8s_references.go | 64 +++ vendor/k8s.io/klog/v2/klog.go | 395 +++++++++++------- vendor/k8s.io/klog/v2/klogr.go | 8 +- vendor/k8s.io/utils/exec/exec.go | 10 +- vendor/k8s.io/utils/exec/fixup_go118.go | 32 ++ vendor/k8s.io/utils/exec/fixup_go119.go | 40 ++ vendor/k8s.io/utils/exec/testing/fake_exec.go | 33 +- vendor/k8s.io/utils/strings/slices/slices.go | 82 ++++ vendor/modules.txt | 8 +- 17 files changed, 648 insertions(+), 335 deletions(-) create mode 100644 vendor/k8s.io/klog/v2/internal/dbg/dbg.go create mode 100644 vendor/k8s.io/utils/exec/fixup_go118.go create mode 100644 vendor/k8s.io/utils/exec/fixup_go119.go create mode 100644 vendor/k8s.io/utils/strings/slices/slices.go diff --git a/go.mod b/go.mod index d962061b0..403e3e281 100644 --- a/go.mod +++ b/go.mod @@ -22,13 +22,15 @@ require ( k8s.io/apimachinery v0.22.0 k8s.io/client-go v11.0.1-0.20190805182717-6502b5e7b1b5+incompatible k8s.io/component-base v0.22.0 - k8s.io/klog/v2 v2.60.1 + k8s.io/klog/v2 v2.80.1 k8s.io/kubernetes v1.22.0 k8s.io/mount-utils v0.22.0 k8s.io/test-infra v0.0.0-20200115230622-70a5174aa78d - k8s.io/utils v0.0.0-20210707171843-4b05e18ac7d9 + k8s.io/utils v0.0.0-20230711102312-30195339c3c7 ) +require google.golang.org/protobuf v1.26.0 + require ( github.com/Microsoft/go-winio v0.4.16 // indirect github.com/beorn7/perks v1.0.1 // indirect @@ -65,7 +67,6 @@ require ( golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac // indirect golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect google.golang.org/appengine v1.6.6 // indirect - google.golang.org/protobuf v1.26.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect diff --git a/go.sum b/go.sum index c8e3c8eb0..dae9dbd9f 100644 --- a/go.sum +++ b/go.sum @@ -647,7 +647,6 @@ github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4k github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= -github.com/spf13/afero v1.2.2 h1:5jhuqJyZCZf2JRofRvN/nIFgIWNzPa3/Vz8mYylgbWc= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= @@ -1218,8 +1217,8 @@ k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= k8s.io/klog/v2 v2.9.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec= -k8s.io/klog/v2 v2.60.1 h1:VW25q3bZx9uE3vvdL6M8ezOX79vA2Aq1nEWLqNQclHc= -k8s.io/klog/v2 v2.60.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= +k8s.io/klog/v2 v2.80.1 h1:atnLQ121W371wYYFawwYx1aEY2eUfs4l3J72wtgAwV4= +k8s.io/klog/v2 v2.80.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= k8s.io/kube-aggregator v0.22.0/go.mod h1:zHTepg0Q4tKzru7Pwg1QYHWrU/wrvIXM8hUdDAH66qg= k8s.io/kube-controller-manager v0.22.0/go.mod h1:E/EYMoCj8bbPRmu19JF4B9QLyQL8Tywg+9Q/rg+F80U= k8s.io/kube-openapi v0.0.0-20180731170545-e3762e86a74c/go.mod h1:BXM9ceUBTj2QnfH2MK1odQs778ajze1RxcmP6S8RVVc= @@ -1245,8 +1244,9 @@ k8s.io/test-infra v0.0.0-20200115230622-70a5174aa78d/go.mod h1:d8SKryJBXAwfCFVL4 k8s.io/utils v0.0.0-20181019225348-5e321f9a457c/go.mod h1:8k8uAuAQ0rXslZKaEWd0c3oVhZz7sSzSiPnVZayjIX0= k8s.io/utils v0.0.0-20190506122338-8fab8cb257d5/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -k8s.io/utils v0.0.0-20210707171843-4b05e18ac7d9 h1:imL9YgXQ9p7xmPzHFm/vVd/cF78jad+n4wK1ABwYtMM= k8s.io/utils v0.0.0-20210707171843-4b05e18ac7d9/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/utils v0.0.0-20230711102312-30195339c3c7 h1:ZgnF1KZsYxWIifwSNZFZgNtWE89WI5yiP5WwlfDoIyc= +k8s.io/utils v0.0.0-20230711102312-30195339c3c7/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= knative.dev/pkg v0.0.0-20191101194912-56c2594e4f11/go.mod h1:pgODObA1dTyhNoFxPZTTjNWfx6F0aKsKzn+vaT9XO/Q= modernc.org/cc v1.0.0/go.mod h1:1Sk4//wdnYJiUIxnW8ddKpaOJCF37yAdqYnkxUpaYxw= modernc.org/golex v1.0.0/go.mod h1:b/QX9oBD/LhixY6NDh+IdGv17hgB+51fET1i2kPSmvk= diff --git a/test/e2e/tests/single_zone_e2e_test.go b/test/e2e/tests/single_zone_e2e_test.go index 13e3efa47..8d2bfcc66 100644 --- a/test/e2e/tests/single_zone_e2e_test.go +++ b/test/e2e/tests/single_zone_e2e_test.go @@ -18,6 +18,7 @@ import ( "context" "fmt" "path/filepath" + "regexp" "strings" "time" @@ -1287,91 +1288,3 @@ func zoneFromURL(url string) string { } return tokens[len(tokens)-1] } - -func setupKeyRing(ctx context.Context, parentName string, keyRingId string) (*kmspb.CryptoKey, []string) { - // Create KeyRing - ringReq := &kmspb.CreateKeyRingRequest{ - Parent: parentName, - KeyRingId: keyRingId, - } - keyRing, err := kmsClient.CreateKeyRing(ctx, ringReq) - if !gce.IsGCEError(err, "alreadyExists") { - getKeyRingReq := &kmspb.GetKeyRingRequest{ - Name: fmt.Sprintf("%s/keyRings/%s", parentName, keyRingId), - } - keyRing, err = kmsClient.GetKeyRing(ctx, getKeyRingReq) - - } - Expect(err).To(BeNil(), "Failed to create or get key ring %v", keyRingId) - - // Create CryptoKey in KeyRing - keyId := "test-key-" + string(uuid.NewUUID()) - keyReq := &kmspb.CreateCryptoKeyRequest{ - Parent: keyRing.Name, - CryptoKeyId: keyId, - CryptoKey: &kmspb.CryptoKey{ - Purpose: kmspb.CryptoKey_ENCRYPT_DECRYPT, - VersionTemplate: &kmspb.CryptoKeyVersionTemplate{ - Algorithm: kmspb.CryptoKeyVersion_GOOGLE_SYMMETRIC_ENCRYPTION, - }, - }, - } - key, err := kmsClient.CreateCryptoKey(ctx, keyReq) - Expect(err).To(BeNil(), "Failed to create crypto key %v in key ring %v", keyId, keyRing.Name) - - keyVersions := []string{} - keyVersionReq := &kmspb.ListCryptoKeyVersionsRequest{ - Parent: key.Name, - } - - it := kmsClient.ListCryptoKeyVersions(ctx, keyVersionReq) - - for { - keyVersion, err := it.Next() - if err == iterator.Done { - break - } - Expect(err).To(BeNil(), "Failed to list crypto key versions") - - keyVersions = append(keyVersions, keyVersion.Name) - } - return key, keyVersions -} - -type disk struct { - params map[string]string - validate func(disk *compute.Disk) -} - -var typeToDisk = map[string]*disk{ - standardDiskType: { - params: map[string]string{ - common.ParameterKeyType: standardDiskType, - }, - validate: func(disk *compute.Disk) { - Expect(disk.Type).To(ContainSubstring(standardDiskType)) - }, - }, - extremeDiskType: { - params: map[string]string{ - common.ParameterKeyType: extremeDiskType, - common.ParameterKeyProvisionedIOPSOnCreate: provisionedIOPSOnCreate, - }, - validate: func(disk *compute.Disk) { - Expect(disk.Type).To(ContainSubstring(extremeDiskType)) - Expect(disk.ProvisionedIops).To(Equal(provisionedIOPSOnCreateInt)) - }, - }, -} - -func merge(a, b map[string]string) map[string]string { - res := map[string]string{} - for k, v := range a { - res[k] = v - } - for k, v := range b { - res[k] = v - } - return res ->>>>>>> b9028774 (fix bug where volume cloning topology requirements are ignored when chosing the location of the volume) -} diff --git a/vendor/k8s.io/klog/v2/OWNERS b/vendor/k8s.io/klog/v2/OWNERS index 8cccebf2e..a2fe8f351 100644 --- a/vendor/k8s.io/klog/v2/OWNERS +++ b/vendor/k8s.io/klog/v2/OWNERS @@ -1,5 +1,6 @@ # See the OWNERS docs at https://go.k8s.io/owners reviewers: + - harshanarayana - pohly approvers: - dims diff --git a/vendor/k8s.io/klog/v2/README.md b/vendor/k8s.io/klog/v2/README.md index 7de2212cc..d45cbe172 100644 --- a/vendor/k8s.io/klog/v2/README.md +++ b/vendor/k8s.io/klog/v2/README.md @@ -28,7 +28,6 @@ Historical context is available here: Semantic versioning is used in this repository. It contains several Go modules with different levels of stability: - `k8s.io/klog/v2` - stable API, `vX.Y.Z` tags -- `k8s.io/hack/tools` - no stable API yet (may change eventually or get moved to separate repo), `hack/tools/v0.Y.Z` tags - `examples` - no stable API, no tags, no intention to ever stabilize Exempt from the API stability guarantee are items (packages, functions, etc.) diff --git a/vendor/k8s.io/klog/v2/contextual.go b/vendor/k8s.io/klog/v2/contextual.go index 0bf19280e..2428963c0 100644 --- a/vendor/k8s.io/klog/v2/contextual.go +++ b/vendor/k8s.io/klog/v2/contextual.go @@ -34,18 +34,6 @@ import ( // mutex locking. var ( - // contextualLoggingEnabled controls whether contextual logging is - // active. Disabling it may have some small performance benefit. - contextualLoggingEnabled = true - - // globalLogger is the global Logger chosen by users of klog, nil if - // none is available. - globalLogger *Logger - - // globalLoggerOptions contains the options that were supplied for - // globalLogger. - globalLoggerOptions loggerOptions - // klogLogger is used as fallback for logging through the normal klog code // when no Logger is set. klogLogger logr.Logger = logr.New(&klogger{}) @@ -59,8 +47,9 @@ var ( // If set, all log lines will be suppressed from the regular output, and // redirected to the logr implementation. // Use as: -// ... -// klog.SetLogger(zapr.NewLogger(zapLog)) +// +// ... +// klog.SetLogger(zapr.NewLogger(zapLog)) // // To remove a backing logr implemention, use ClearLogger. Setting an // empty logger with SetLogger(logr.Logger{}) does not work. @@ -81,10 +70,10 @@ func SetLogger(logger logr.Logger) { // routing log entries through klogr into klog and then into the actual Logger // backend. func SetLoggerWithOptions(logger logr.Logger, opts ...LoggerOption) { - globalLogger = &logger - globalLoggerOptions = loggerOptions{} + logging.logger = &logger + logging.loggerOptions = loggerOptions{} for _, opt := range opts { - opt(&globalLoggerOptions) + opt(&logging.loggerOptions) } } @@ -119,8 +108,8 @@ type loggerOptions struct { // Modifying the logger is not thread-safe and should be done while no other // goroutines invoke log calls, usually during program initialization. func ClearLogger() { - globalLogger = nil - globalLoggerOptions = loggerOptions{} + logging.logger = nil + logging.loggerOptions = loggerOptions{} } // EnableContextualLogging controls whether contextual logging is enabled. @@ -132,14 +121,14 @@ func ClearLogger() { // // This must be called during initialization before goroutines are started. func EnableContextualLogging(enabled bool) { - contextualLoggingEnabled = enabled + logging.contextualLoggingEnabled = enabled } // FromContext retrieves a logger set by the caller or, if not set, // falls back to the program's global logger (a Logger instance or klog // itself). func FromContext(ctx context.Context) Logger { - if contextualLoggingEnabled { + if logging.contextualLoggingEnabled { if logger, err := logr.FromContext(ctx); err == nil { return logger } @@ -160,10 +149,10 @@ func TODO() Logger { // better receive a logger via its parameters. TODO can be used as a temporary // solution for such code. func Background() Logger { - if globalLoggerOptions.contextualLogger { - // Is non-nil because globalLoggerOptions.contextualLogger is + if logging.loggerOptions.contextualLogger { + // Is non-nil because logging.loggerOptions.contextualLogger is // only true if a logger was set. - return *globalLogger + return *logging.logger } return klogLogger @@ -172,7 +161,7 @@ func Background() Logger { // LoggerWithValues returns logger.WithValues(...kv) when // contextual logging is enabled, otherwise the logger. func LoggerWithValues(logger Logger, kv ...interface{}) Logger { - if contextualLoggingEnabled { + if logging.contextualLoggingEnabled { return logger.WithValues(kv...) } return logger @@ -181,7 +170,7 @@ func LoggerWithValues(logger Logger, kv ...interface{}) Logger { // LoggerWithName returns logger.WithName(name) when contextual logging is // enabled, otherwise the logger. func LoggerWithName(logger Logger, name string) Logger { - if contextualLoggingEnabled { + if logging.contextualLoggingEnabled { return logger.WithName(name) } return logger @@ -190,7 +179,7 @@ func LoggerWithName(logger Logger, name string) Logger { // NewContext returns logr.NewContext(ctx, logger) when // contextual logging is enabled, otherwise ctx. func NewContext(ctx context.Context, logger Logger) context.Context { - if contextualLoggingEnabled { + if logging.contextualLoggingEnabled { return logr.NewContext(ctx, logger) } return ctx diff --git a/vendor/k8s.io/klog/v2/internal/dbg/dbg.go b/vendor/k8s.io/klog/v2/internal/dbg/dbg.go new file mode 100644 index 000000000..f27bd1447 --- /dev/null +++ b/vendor/k8s.io/klog/v2/internal/dbg/dbg.go @@ -0,0 +1,42 @@ +// Go support for leveled logs, analogous to https://code.google.com/p/google-glog/ +// +// Copyright 2013 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package dbg provides some helper code for call traces. +package dbg + +import ( + "runtime" +) + +// Stacks is a wrapper for runtime.Stack that attempts to recover the data for +// all goroutines or the calling one. +func Stacks(all bool) []byte { + // We don't know how big the traces are, so grow a few times if they don't fit. Start large, though. + n := 10000 + if all { + n = 100000 + } + var trace []byte + for i := 0; i < 5; i++ { + trace = make([]byte, n) + nbytes := runtime.Stack(trace, all) + if nbytes < len(trace) { + return trace[:nbytes] + } + n *= 2 + } + return trace +} diff --git a/vendor/k8s.io/klog/v2/internal/serialize/keyvalues.go b/vendor/k8s.io/klog/v2/internal/serialize/keyvalues.go index d89731368..ad6bf1116 100644 --- a/vendor/k8s.io/klog/v2/internal/serialize/keyvalues.go +++ b/vendor/k8s.io/klog/v2/internal/serialize/keyvalues.go @@ -20,6 +20,8 @@ import ( "bytes" "fmt" "strconv" + + "github.com/go-logr/logr" ) // WithValues implements LogSink.WithValues. The old key/value pairs are @@ -44,53 +46,49 @@ func WithValues(oldKV, newKV []interface{}) []interface{} { return kv } -// TrimDuplicates deduplicates elements provided in multiple key/value tuple -// slices, whilst maintaining the distinction between where the items are -// contained. -func TrimDuplicates(kvLists ...[]interface{}) [][]interface{} { - // maintain a map of all seen keys - seenKeys := map[interface{}]struct{}{} - // build the same number of output slices as inputs - outs := make([][]interface{}, len(kvLists)) - // iterate over the input slices backwards, as 'later' kv specifications - // of the same key will take precedence over earlier ones - for i := len(kvLists) - 1; i >= 0; i-- { - // initialise this output slice - outs[i] = []interface{}{} - // obtain a reference to the kvList we are processing - // and make sure it has an even number of entries - kvList := kvLists[i] - if len(kvList)%2 != 0 { - kvList = append(kvList, missingValue) - } +// MergeKVs deduplicates elements provided in two key/value slices. +// +// Keys in each slice are expected to be unique, so duplicates can only occur +// when the first and second slice contain the same key. When that happens, the +// key/value pair from the second slice is used. The first slice must be well-formed +// (= even key/value pairs). The second one may have a missing value, in which +// case the special "missing value" is added to the result. +func MergeKVs(first, second []interface{}) []interface{} { + maxLength := len(first) + (len(second)+1)/2*2 + if maxLength == 0 { + // Nothing to do at all. + return nil + } - // start iterating at len(kvList) - 2 (i.e. the 2nd last item) for - // slices that have an even number of elements. - // We add (len(kvList) % 2) here to handle the case where there is an - // odd number of elements in a kvList. - // If there is an odd number, then the last element in the slice will - // have the value 'null'. - for i2 := len(kvList) - 2 + (len(kvList) % 2); i2 >= 0; i2 -= 2 { - k := kvList[i2] - // if we have already seen this key, do not include it again - if _, ok := seenKeys[k]; ok { - continue - } - // make a note that we've observed a new key - seenKeys[k] = struct{}{} - // attempt to obtain the value of the key - var v interface{} - // i2+1 should only ever be out of bounds if we handling the first - // iteration over a slice with an odd number of elements - if i2+1 < len(kvList) { - v = kvList[i2+1] - } - // add this KV tuple to the *start* of the output list to maintain - // the original order as we are iterating over the slice backwards - outs[i] = append([]interface{}{k, v}, outs[i]...) + if len(first) == 0 && len(second)%2 == 0 { + // Nothing to be overridden, second slice is well-formed + // and can be used directly. + return second + } + + // Determine which keys are in the second slice so that we can skip + // them when iterating over the first one. The code intentionally + // favors performance over completeness: we assume that keys are string + // constants and thus compare equal when the string values are equal. A + // string constant being overridden by, for example, a fmt.Stringer is + // not handled. + overrides := map[interface{}]bool{} + for i := 0; i < len(second); i += 2 { + overrides[second[i]] = true + } + merged := make([]interface{}, 0, maxLength) + for i := 0; i+1 < len(first); i += 2 { + key := first[i] + if overrides[key] { + continue } + merged = append(merged, key, first[i+1]) } - return outs + merged = append(merged, second...) + if len(merged)%2 != 0 { + merged = append(merged, missingValue) + } + return merged } const missingValue = "(MISSING)" @@ -111,10 +109,10 @@ func KVListFormat(b *bytes.Buffer, keysAndValues ...interface{}) { // https://github.com/kubernetes/community/blob/master/contributors/devel/sig-instrumentation/migration-to-structured-logging.md#name-arguments // for the sake of performance. Keys with spaces, // special characters, etc. will break parsing. - if k, ok := k.(string); ok { + if sK, ok := k.(string); ok { // Avoid one allocation when the key is a string, which // normally it should be. - b.WriteString(k) + b.WriteString(sK) } else { b.WriteString(fmt.Sprintf("%s", k)) } @@ -131,6 +129,24 @@ func KVListFormat(b *bytes.Buffer, keysAndValues ...interface{}) { writeStringValue(b, true, v) case error: writeStringValue(b, true, ErrorToString(v)) + case logr.Marshaler: + value := MarshalerToValue(v) + // A marshaler that returns a string is useful for + // delayed formatting of complex values. We treat this + // case like a normal string. This is useful for + // multi-line support. + // + // We could do this by recursively formatting a value, + // but that comes with the risk of infinite recursion + // if a marshaler returns itself. Instead we call it + // only once and rely on it returning the intended + // value directly. + switch value := value.(type) { + case string: + writeStringValue(b, true, value) + default: + writeStringValue(b, false, fmt.Sprintf("%+v", value)) + } case []byte: // In https://github.com/kubernetes/klog/pull/237 it was decided // to format byte slices with "%+q". The advantages of that are: @@ -163,6 +179,18 @@ func StringerToString(s fmt.Stringer) (ret string) { return } +// MarshalerToValue invokes a marshaler and catches +// panics. +func MarshalerToValue(m logr.Marshaler) (ret interface{}) { + defer func() { + if err := recover(); err != nil { + ret = fmt.Sprintf("", err) + } + }() + ret = m.MarshalLog() + return +} + // ErrorToString converts an error to a string, // handling panics if they occur. func ErrorToString(err error) (ret string) { diff --git a/vendor/k8s.io/klog/v2/k8s_references.go b/vendor/k8s.io/klog/v2/k8s_references.go index db58f8baa..2c218f698 100644 --- a/vendor/k8s.io/klog/v2/k8s_references.go +++ b/vendor/k8s.io/klog/v2/k8s_references.go @@ -77,6 +77,8 @@ func KRef(namespace, name string) ObjectRef { } // KObjs returns slice of ObjectRef from an slice of ObjectMeta +// +// DEPRECATED: Use KObjSlice instead, it has better performance. func KObjs(arg interface{}) []ObjectRef { s := reflect.ValueOf(arg) if s.Kind() != reflect.Slice { @@ -92,3 +94,65 @@ func KObjs(arg interface{}) []ObjectRef { } return objectRefs } + +// KObjSlice takes a slice of objects that implement the KMetadata interface +// and returns an object that gets logged as a slice of ObjectRef values or a +// string containing those values, depending on whether the logger prefers text +// output or structured output. +// +// An error string is logged when KObjSlice is not passed a suitable slice. +// +// Processing of the argument is delayed until the value actually gets logged, +// in contrast to KObjs where that overhead is incurred regardless of whether +// the result is needed. +func KObjSlice(arg interface{}) interface{} { + return kobjSlice{arg: arg} +} + +type kobjSlice struct { + arg interface{} +} + +var _ fmt.Stringer = kobjSlice{} +var _ logr.Marshaler = kobjSlice{} + +func (ks kobjSlice) String() string { + objectRefs, err := ks.process() + if err != nil { + return err.Error() + } + return fmt.Sprintf("%v", objectRefs) +} + +func (ks kobjSlice) MarshalLog() interface{} { + objectRefs, err := ks.process() + if err != nil { + return err.Error() + } + return objectRefs +} + +func (ks kobjSlice) process() ([]interface{}, error) { + s := reflect.ValueOf(ks.arg) + switch s.Kind() { + case reflect.Invalid: + // nil parameter, print as nil. + return nil, nil + case reflect.Slice: + // Okay, handle below. + default: + return nil, fmt.Errorf("", ks.arg) + } + objectRefs := make([]interface{}, 0, s.Len()) + for i := 0; i < s.Len(); i++ { + item := s.Index(i).Interface() + if item == nil { + objectRefs = append(objectRefs, nil) + } else if v, ok := item.(KMetadata); ok { + objectRefs = append(objectRefs, KObj(v)) + } else { + return nil, fmt.Errorf("", item) + } + } + return objectRefs, nil +} diff --git a/vendor/k8s.io/klog/v2/klog.go b/vendor/k8s.io/klog/v2/klog.go index cb04590fe..1bd11b675 100644 --- a/vendor/k8s.io/klog/v2/klog.go +++ b/vendor/k8s.io/klog/v2/klog.go @@ -39,35 +39,38 @@ // This package provides several flags that modify this behavior. // As a result, flag.Parse must be called before any logging is done. // -// -logtostderr=true -// Logs are written to standard error instead of to files. -// -alsologtostderr=false -// Logs are written to standard error as well as to files. -// -stderrthreshold=ERROR -// Log events at or above this severity are logged to standard -// error as well as to files. -// -log_dir="" -// Log files will be written to this directory instead of the -// default temporary directory. +// -logtostderr=true +// Logs are written to standard error instead of to files. +// This shortcuts most of the usual output routing: +// -alsologtostderr, -stderrthreshold and -log_dir have no +// effect and output redirection at runtime with SetOutput is +// ignored. +// -alsologtostderr=false +// Logs are written to standard error as well as to files. +// -stderrthreshold=ERROR +// Log events at or above this severity are logged to standard +// error as well as to files. +// -log_dir="" +// Log files will be written to this directory instead of the +// default temporary directory. // -// Other flags provide aids to debugging. -// -// -log_backtrace_at="" -// When set to a file and line number holding a logging statement, -// such as -// -log_backtrace_at=gopherflakes.go:234 -// a stack trace will be written to the Info log whenever execution -// hits that statement. (Unlike with -vmodule, the ".go" must be -// present.) -// -v=0 -// Enable V-leveled logging at the specified level. -// -vmodule="" -// The syntax of the argument is a comma-separated list of pattern=N, -// where pattern is a literal file name (minus the ".go" suffix) or -// "glob" pattern and N is a V level. For instance, -// -vmodule=gopher*=3 -// sets the V level to 3 in all Go files whose names begin "gopher". +// Other flags provide aids to debugging. // +// -log_backtrace_at="" +// When set to a file and line number holding a logging statement, +// such as +// -log_backtrace_at=gopherflakes.go:234 +// a stack trace will be written to the Info log whenever execution +// hits that statement. (Unlike with -vmodule, the ".go" must be +// present.) +// -v=0 +// Enable V-leveled logging at the specified level. +// -vmodule="" +// The syntax of the argument is a comma-separated list of pattern=N, +// where pattern is a literal file name (minus the ".go" suffix) or +// "glob" pattern and N is a V level. For instance, +// -vmodule=gopher*=3 +// sets the V level to 3 in all Go files whose names begin "gopher". package klog import ( @@ -92,6 +95,7 @@ import ( "k8s.io/klog/v2/internal/buffer" "k8s.io/klog/v2/internal/clock" + "k8s.io/klog/v2/internal/dbg" "k8s.io/klog/v2/internal/serialize" "k8s.io/klog/v2/internal/severity" ) @@ -242,6 +246,10 @@ func (m *moduleSpec) String() string { // Lock because the type is not atomic. TODO: clean this up. logging.mu.Lock() defer logging.mu.Unlock() + return m.serialize() +} + +func (m *moduleSpec) serialize() string { var b bytes.Buffer for i, f := range m.filter { if i > 0 { @@ -263,6 +271,17 @@ var errVmoduleSyntax = errors.New("syntax error: expect comma-separated list of // Set will sets module value // Syntax: -vmodule=recordio=2,file=1,gfs*=3 func (m *moduleSpec) Set(value string) error { + filter, err := parseModuleSpec(value) + if err != nil { + return err + } + logging.mu.Lock() + defer logging.mu.Unlock() + logging.setVState(logging.verbosity, filter, true) + return nil +} + +func parseModuleSpec(value string) ([]modulePat, error) { var filter []modulePat for _, pat := range strings.Split(value, ",") { if len(pat) == 0 { @@ -271,15 +290,15 @@ func (m *moduleSpec) Set(value string) error { } patLev := strings.Split(pat, "=") if len(patLev) != 2 || len(patLev[0]) == 0 || len(patLev[1]) == 0 { - return errVmoduleSyntax + return nil, errVmoduleSyntax } pattern := patLev[0] v, err := strconv.ParseInt(patLev[1], 10, 32) if err != nil { - return errors.New("syntax error: expect comma-separated list of filename=N") + return nil, errors.New("syntax error: expect comma-separated list of filename=N") } if v < 0 { - return errors.New("negative value for vmodule level") + return nil, errors.New("negative value for vmodule level") } if v == 0 { continue // Ignore. It's harmless but no point in paying the overhead. @@ -287,10 +306,7 @@ func (m *moduleSpec) Set(value string) error { // TODO: check syntax of filter? filter = append(filter, modulePat{pattern, isLiteral(pattern), Level(v)}) } - logging.mu.Lock() - defer logging.mu.Unlock() - logging.setVState(logging.verbosity, filter, true) - return nil + return filter, nil } // isLiteral reports whether the pattern is a literal string, that is, has no metacharacters @@ -380,45 +396,48 @@ type flushSyncWriter interface { io.Writer } -// init sets up the defaults. +var logging loggingT +var commandLine flag.FlagSet + +// init sets up the defaults and creates command line flags. func init() { + commandLine.StringVar(&logging.logDir, "log_dir", "", "If non-empty, write log files in this directory (no effect when -logtostderr=true)") + commandLine.StringVar(&logging.logFile, "log_file", "", "If non-empty, use this log file (no effect when -logtostderr=true)") + commandLine.Uint64Var(&logging.logFileMaxSizeMB, "log_file_max_size", 1800, + "Defines the maximum size a log file can grow to (no effect when -logtostderr=true). Unit is megabytes. "+ + "If the value is 0, the maximum file size is unlimited.") + commandLine.BoolVar(&logging.toStderr, "logtostderr", true, "log to standard error instead of files") + commandLine.BoolVar(&logging.alsoToStderr, "alsologtostderr", false, "log to standard error as well as files (no effect when -logtostderr=true)") + logging.setVState(0, nil, false) + commandLine.Var(&logging.verbosity, "v", "number for the log level verbosity") + commandLine.BoolVar(&logging.addDirHeader, "add_dir_header", false, "If true, adds the file directory to the header of the log messages") + commandLine.BoolVar(&logging.skipHeaders, "skip_headers", false, "If true, avoid header prefixes in the log messages") + commandLine.BoolVar(&logging.oneOutput, "one_output", false, "If true, only write logs to their native severity level (vs also writing to each lower severity level; no effect when -logtostderr=true)") + commandLine.BoolVar(&logging.skipLogHeaders, "skip_log_headers", false, "If true, avoid headers when opening log files (no effect when -logtostderr=true)") logging.stderrThreshold = severityValue{ Severity: severity.ErrorLog, // Default stderrThreshold is ERROR. } - logging.setVState(0, nil, false) - logging.logDir = "" - logging.logFile = "" - logging.logFileMaxSizeMB = 1800 - logging.toStderr = true - logging.alsoToStderr = false - logging.skipHeaders = false - logging.addDirHeader = false - logging.skipLogHeaders = false - logging.oneOutput = false + commandLine.Var(&logging.stderrThreshold, "stderrthreshold", "logs at or above this threshold go to stderr when writing to files and stderr (no effect when -logtostderr=true or -alsologtostderr=false)") + commandLine.Var(&logging.vmodule, "vmodule", "comma-separated list of pattern=N settings for file-filtered logging") + commandLine.Var(&logging.traceLocation, "log_backtrace_at", "when logging hits line file:N, emit a stack trace") + + logging.settings.contextualLoggingEnabled = true logging.flushD = newFlushDaemon(logging.lockAndFlushAll, nil) } // InitFlags is for explicitly initializing the flags. +// It may get called repeatedly for different flagsets, but not +// twice for the same one. May get called concurrently +// to other goroutines using klog. However, only some flags +// may get set concurrently (see implementation). func InitFlags(flagset *flag.FlagSet) { if flagset == nil { flagset = flag.CommandLine } - flagset.StringVar(&logging.logDir, "log_dir", logging.logDir, "If non-empty, write log files in this directory") - flagset.StringVar(&logging.logFile, "log_file", logging.logFile, "If non-empty, use this log file") - flagset.Uint64Var(&logging.logFileMaxSizeMB, "log_file_max_size", logging.logFileMaxSizeMB, - "Defines the maximum size a log file can grow to. Unit is megabytes. "+ - "If the value is 0, the maximum file size is unlimited.") - flagset.BoolVar(&logging.toStderr, "logtostderr", logging.toStderr, "log to standard error instead of files") - flagset.BoolVar(&logging.alsoToStderr, "alsologtostderr", logging.alsoToStderr, "log to standard error as well as files") - flagset.Var(&logging.verbosity, "v", "number for the log level verbosity") - flagset.BoolVar(&logging.addDirHeader, "add_dir_header", logging.addDirHeader, "If true, adds the file directory to the header of the log messages") - flagset.BoolVar(&logging.skipHeaders, "skip_headers", logging.skipHeaders, "If true, avoid header prefixes in the log messages") - flagset.BoolVar(&logging.oneOutput, "one_output", logging.oneOutput, "If true, only write logs to their native severity level (vs also writing to each lower severity level)") - flagset.BoolVar(&logging.skipLogHeaders, "skip_log_headers", logging.skipLogHeaders, "If true, avoid headers when opening log files") - flagset.Var(&logging.stderrThreshold, "stderrthreshold", "logs at or above this threshold go to stderr") - flagset.Var(&logging.vmodule, "vmodule", "comma-separated list of pattern=N settings for file-filtered logging") - flagset.Var(&logging.traceLocation, "log_backtrace_at", "when logging hits line file:N, emit a stack trace") + commandLine.VisitAll(func(f *flag.Flag) { + flagset.Var(f.Value, f.Name, f.Usage) + }) } // Flush flushes all pending log I/O. @@ -426,8 +445,20 @@ func Flush() { logging.lockAndFlushAll() } -// loggingT collects all the global state of the logging setup. -type loggingT struct { +// settings collects global settings. +type settings struct { + // contextualLoggingEnabled controls whether contextual logging is + // active. Disabling it may have some small performance benefit. + contextualLoggingEnabled bool + + // logger is the global Logger chosen by users of klog, nil if + // none is available. + logger *Logger + + // loggerOptions contains the options that were supplied for + // globalLogger. + loggerOptions loggerOptions + // Boolean flags. Not handled atomically because the flag.Value interface // does not let us avoid the =true, and that shorthand is necessary for // compatibility. TODO: does this matter enough to fix? Seems unlikely. @@ -437,26 +468,14 @@ type loggingT struct { // Level flag. Handled atomically. stderrThreshold severityValue // The -stderrthreshold flag. - // bufferCache maintains the free list. It uses its own mutex - // so buffers can be grabbed and printed to without holding the main lock, - // for better parallelization. - bufferCache buffer.Buffers + // Access to all of the following fields must be protected via a mutex. - // mu protects the remaining elements of this structure and is - // used to synchronize logging. - mu sync.Mutex // file holds writer for each of the log types. file [severity.NumSeverity]flushSyncWriter - // flushD holds a flushDaemon that frequently flushes log file buffers. - flushD *flushDaemon // flushInterval is the interval for periodic flushing. If zero, // the global default will be used. flushInterval time.Duration - // pcs is used in V to avoid an allocation when computing the caller's PC. - pcs [1]uintptr - // vmap is a cache of the V Level for each V() call site, identified by PC. - // It is wiped whenever the vmodule flag changes state. - vmap map[uintptr]Level + // filterLength stores the length of the vmodule filter chain. If greater // than zero, it means vmodule is enabled. It may be read safely // using sync.LoadInt32, but is only modified under mu. @@ -496,7 +515,42 @@ type loggingT struct { filter LogFilter } -var logging loggingT +// deepCopy creates a copy that doesn't share anything with the original +// instance. +func (s settings) deepCopy() settings { + // vmodule is a slice and would be shared, so we have copy it. + filter := make([]modulePat, len(s.vmodule.filter)) + for i := range s.vmodule.filter { + filter[i] = s.vmodule.filter[i] + } + s.vmodule.filter = filter + + return s +} + +// loggingT collects all the global state of the logging setup. +type loggingT struct { + settings + + // bufferCache maintains the free list. It uses its own mutex + // so buffers can be grabbed and printed to without holding the main lock, + // for better parallelization. + bufferCache buffer.Buffers + + // flushD holds a flushDaemon that frequently flushes log file buffers. + // Uses its own mutex. + flushD *flushDaemon + + // mu protects the remaining elements of this structure and the fields + // in settingsT which need a mutex lock. + mu sync.Mutex + + // pcs is used in V to avoid an allocation when computing the caller's PC. + pcs [1]uintptr + // vmap is a cache of the V Level for each V() call site, identified by PC. + // It is wiped whenever the vmodule flag changes state. + vmap map[uintptr]Level +} // setVState sets a consistent state for V logging. // l.mu is held. @@ -520,14 +574,66 @@ func (l *loggingT) setVState(verbosity Level, filter []modulePat, setFilter bool var timeNow = time.Now // Stubbed out for testing. +// CaptureState gathers information about all current klog settings. +// The result can be used to restore those settings. +func CaptureState() State { + logging.mu.Lock() + defer logging.mu.Unlock() + return &state{ + settings: logging.settings.deepCopy(), + flushDRunning: logging.flushD.isRunning(), + maxSize: MaxSize, + } +} + +// State stores a snapshot of klog settings. It gets created with CaptureState +// and can be used to restore the entire state. Modifying individual settings +// is supported via the command line flags. +type State interface { + // Restore restore the entire state. It may get called more than once. + Restore() +} + +type state struct { + settings + + flushDRunning bool + maxSize uint64 +} + +func (s *state) Restore() { + // This needs to be done before mutex locking. + if s.flushDRunning && !logging.flushD.isRunning() { + // This is not quite accurate: StartFlushDaemon might + // have been called with some different interval. + interval := s.flushInterval + if interval == 0 { + interval = flushInterval + } + logging.flushD.run(interval) + } else if !s.flushDRunning && logging.flushD.isRunning() { + logging.flushD.stop() + } + + logging.mu.Lock() + defer logging.mu.Unlock() + + logging.settings = s.settings + logging.setVState(s.verbosity, s.vmodule.filter, true) + MaxSize = s.maxSize +} + /* header formats a log header as defined by the C++ implementation. It returns a buffer containing the formatted header and the user's file and line number. The depth specifies how many stack frames above lives the source line to be identified in the log message. Log lines have this form: + Lmmdd hh:mm:ss.uuuuuu threadid file:line] msg... + where the fields are defined as follows: + L A single character, representing the log level (eg 'I' for INFO) mm The month (zero padded; ie May is '05') dd The day (zero padded) @@ -688,7 +794,7 @@ func (l *loggingT) printS(err error, s severity.Severity, depth int, msg string, serialize.KVListFormat(&b.Buffer, "err", err) } serialize.KVListFormat(&b.Buffer, keysAndValues...) - l.printDepth(s, globalLogger, nil, depth+1, &b.Buffer) + l.printDepth(s, logging.logger, nil, depth+1, &b.Buffer) // Make the buffer available for reuse. l.bufferCache.PutBuffer(b) } @@ -757,7 +863,7 @@ func (l *loggingT) output(s severity.Severity, log *logr.Logger, buf *buffer.Buf if l.traceLocation.isSet() { if l.traceLocation.match(file, line) { - buf.Write(stacks(false)) + buf.Write(dbg.Stacks(false)) } } data := buf.Bytes() @@ -765,7 +871,7 @@ func (l *loggingT) output(s severity.Severity, log *logr.Logger, buf *buffer.Buf // TODO: set 'severity' and caller information as structured log info // keysAndValues := []interface{}{"severity", severityName[s], "file", file, "line", line} if s == severity.ErrorLog { - globalLogger.WithCallDepth(depth+3).Error(nil, string(data)) + logging.logger.WithCallDepth(depth+3).Error(nil, string(data)) } else { log.WithCallDepth(depth + 3).Info(string(data)) } @@ -822,12 +928,15 @@ func (l *loggingT) output(s severity.Severity, log *logr.Logger, buf *buffer.Buf OsExit(1) } // Dump all goroutine stacks before exiting. - trace := stacks(true) - // Write the stack trace for all goroutines to the stderr. - if l.toStderr || l.alsoToStderr || s >= l.stderrThreshold.get() || alsoToStderr { - os.Stderr.Write(trace) + // First, make sure we see the trace for the current goroutine on standard error. + // If -logtostderr has been specified, the loop below will do that anyway + // as the first stack in the full dump. + if !l.toStderr { + os.Stderr.Write(dbg.Stacks(false)) } + // Write the stack trace for all goroutines to the files. + trace := dbg.Stacks(true) logExitFunc = func(error) {} // If we get a write error, we'll still exit below. for log := severity.FatalLog; log >= severity.InfoLog; log-- { if f := l.file[log]; f != nil { // Can be nil if -logtostderr is set. @@ -847,25 +956,6 @@ func (l *loggingT) output(s severity.Severity, log *logr.Logger, buf *buffer.Buf } } -// stacks is a wrapper for runtime.Stack that attempts to recover the data for all goroutines. -func stacks(all bool) []byte { - // We don't know how big the traces are, so grow a few times if they don't fit. Start large, though. - n := 10000 - if all { - n = 100000 - } - var trace []byte - for i := 0; i < 5; i++ { - trace = make([]byte, n) - nbytes := runtime.Stack(trace, all) - if nbytes < len(trace) { - return trace[:nbytes] - } - n *= 2 - } - return trace -} - // logExitFunc provides a simple mechanism to override the default behavior // of exiting on error. Used in testing and to guarantee we reach a required exit // for fatal logs. Instead, exit could be a function rather than a method but that @@ -1077,9 +1167,9 @@ func (f *flushDaemon) isRunning() bool { return f.stopC != nil } -// StopFlushDaemon stops the flush daemon, if running. +// StopFlushDaemon stops the flush daemon, if running, and flushes once. // This prevents klog from leaking goroutines on shutdown. After stopping -// the daemon, you can still manually flush buffers by calling Flush(). +// the daemon, you can still manually flush buffers again by calling Flush(). func StopFlushDaemon() { logging.flushD.stop() } @@ -1109,8 +1199,8 @@ func (l *loggingT) flushAll() { file.Sync() // ignore error } } - if globalLoggerOptions.flush != nil { - globalLoggerOptions.flush() + if logging.loggerOptions.flush != nil { + logging.loggerOptions.flush() } } @@ -1158,7 +1248,7 @@ func (lb logBridge) Write(b []byte) (n int, err error) { } // printWithFileLine with alsoToStderr=true, so standard log messages // always appear on standard error. - logging.printWithFileLine(severity.Severity(lb), globalLogger, logging.filter, file, line, true, text) + logging.printWithFileLine(severity.Severity(lb), logging.logger, logging.filter, file, line, true, text) return len(b), nil } @@ -1196,10 +1286,10 @@ type Verbose struct { } func newVerbose(level Level, b bool) Verbose { - if globalLogger == nil { + if logging.logger == nil { return Verbose{b, nil} } - v := globalLogger.V(int(level)) + v := logging.logger.V(int(level)) return Verbose{b, &v} } @@ -1207,9 +1297,13 @@ func newVerbose(level Level, b bool) Verbose { // The returned value is a struct of type Verbose, which implements Info, Infoln // and Infof. These methods will write to the Info log if called. // Thus, one may write either +// // if klog.V(2).Enabled() { klog.Info("log this") } +// // or +// // klog.V(2).Info("log this") +// // The second form is shorter but the first is cheaper if logging is off because it does // not evaluate its arguments. // @@ -1318,7 +1412,7 @@ func (v Verbose) InfoS(msg string, keysAndValues ...interface{}) { // InfoSDepth acts as InfoS but uses depth to determine which call frame to log. // InfoSDepth(0, "msg") is the same as InfoS("msg"). func InfoSDepth(depth int, msg string, keysAndValues ...interface{}) { - logging.infoS(globalLogger, logging.filter, depth, msg, keysAndValues...) + logging.infoS(logging.logger, logging.filter, depth, msg, keysAndValues...) } // InfoSDepth is equivalent to the global InfoSDepth function, guarded by the value of v. @@ -1347,37 +1441,37 @@ func (v Verbose) ErrorS(err error, msg string, keysAndValues ...interface{}) { // Info logs to the INFO log. // Arguments are handled in the manner of fmt.Print; a newline is appended if missing. func Info(args ...interface{}) { - logging.print(severity.InfoLog, globalLogger, logging.filter, args...) + logging.print(severity.InfoLog, logging.logger, logging.filter, args...) } // InfoDepth acts as Info but uses depth to determine which call frame to log. // InfoDepth(0, "msg") is the same as Info("msg"). func InfoDepth(depth int, args ...interface{}) { - logging.printDepth(severity.InfoLog, globalLogger, logging.filter, depth, args...) + logging.printDepth(severity.InfoLog, logging.logger, logging.filter, depth, args...) } // Infoln logs to the INFO log. // Arguments are handled in the manner of fmt.Println; a newline is always appended. func Infoln(args ...interface{}) { - logging.println(severity.InfoLog, globalLogger, logging.filter, args...) + logging.println(severity.InfoLog, logging.logger, logging.filter, args...) } // InfolnDepth acts as Infoln but uses depth to determine which call frame to log. // InfolnDepth(0, "msg") is the same as Infoln("msg"). func InfolnDepth(depth int, args ...interface{}) { - logging.printlnDepth(severity.InfoLog, globalLogger, logging.filter, depth, args...) + logging.printlnDepth(severity.InfoLog, logging.logger, logging.filter, depth, args...) } // Infof logs to the INFO log. // Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. func Infof(format string, args ...interface{}) { - logging.printf(severity.InfoLog, globalLogger, logging.filter, format, args...) + logging.printf(severity.InfoLog, logging.logger, logging.filter, format, args...) } // InfofDepth acts as Infof but uses depth to determine which call frame to log. // InfofDepth(0, "msg", args...) is the same as Infof("msg", args...). func InfofDepth(depth int, format string, args ...interface{}) { - logging.printfDepth(severity.InfoLog, globalLogger, logging.filter, depth, format, args...) + logging.printfDepth(severity.InfoLog, logging.logger, logging.filter, depth, format, args...) } // InfoS structured logs to the INFO log. @@ -1389,79 +1483,79 @@ func InfofDepth(depth int, format string, args ...interface{}) { // output: // >> I1025 00:15:15.525108 1 controller_utils.go:116] "Pod status updated" pod="kubedns" status="ready" func InfoS(msg string, keysAndValues ...interface{}) { - logging.infoS(globalLogger, logging.filter, 0, msg, keysAndValues...) + logging.infoS(logging.logger, logging.filter, 0, msg, keysAndValues...) } // Warning logs to the WARNING and INFO logs. // Arguments are handled in the manner of fmt.Print; a newline is appended if missing. func Warning(args ...interface{}) { - logging.print(severity.WarningLog, globalLogger, logging.filter, args...) + logging.print(severity.WarningLog, logging.logger, logging.filter, args...) } // WarningDepth acts as Warning but uses depth to determine which call frame to log. // WarningDepth(0, "msg") is the same as Warning("msg"). func WarningDepth(depth int, args ...interface{}) { - logging.printDepth(severity.WarningLog, globalLogger, logging.filter, depth, args...) + logging.printDepth(severity.WarningLog, logging.logger, logging.filter, depth, args...) } // Warningln logs to the WARNING and INFO logs. // Arguments are handled in the manner of fmt.Println; a newline is always appended. func Warningln(args ...interface{}) { - logging.println(severity.WarningLog, globalLogger, logging.filter, args...) + logging.println(severity.WarningLog, logging.logger, logging.filter, args...) } // WarninglnDepth acts as Warningln but uses depth to determine which call frame to log. // WarninglnDepth(0, "msg") is the same as Warningln("msg"). func WarninglnDepth(depth int, args ...interface{}) { - logging.printlnDepth(severity.WarningLog, globalLogger, logging.filter, depth, args...) + logging.printlnDepth(severity.WarningLog, logging.logger, logging.filter, depth, args...) } // Warningf logs to the WARNING and INFO logs. // Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. func Warningf(format string, args ...interface{}) { - logging.printf(severity.WarningLog, globalLogger, logging.filter, format, args...) + logging.printf(severity.WarningLog, logging.logger, logging.filter, format, args...) } // WarningfDepth acts as Warningf but uses depth to determine which call frame to log. // WarningfDepth(0, "msg", args...) is the same as Warningf("msg", args...). func WarningfDepth(depth int, format string, args ...interface{}) { - logging.printfDepth(severity.WarningLog, globalLogger, logging.filter, depth, format, args...) + logging.printfDepth(severity.WarningLog, logging.logger, logging.filter, depth, format, args...) } // Error logs to the ERROR, WARNING, and INFO logs. // Arguments are handled in the manner of fmt.Print; a newline is appended if missing. func Error(args ...interface{}) { - logging.print(severity.ErrorLog, globalLogger, logging.filter, args...) + logging.print(severity.ErrorLog, logging.logger, logging.filter, args...) } // ErrorDepth acts as Error but uses depth to determine which call frame to log. // ErrorDepth(0, "msg") is the same as Error("msg"). func ErrorDepth(depth int, args ...interface{}) { - logging.printDepth(severity.ErrorLog, globalLogger, logging.filter, depth, args...) + logging.printDepth(severity.ErrorLog, logging.logger, logging.filter, depth, args...) } // Errorln logs to the ERROR, WARNING, and INFO logs. // Arguments are handled in the manner of fmt.Println; a newline is always appended. func Errorln(args ...interface{}) { - logging.println(severity.ErrorLog, globalLogger, logging.filter, args...) + logging.println(severity.ErrorLog, logging.logger, logging.filter, args...) } // ErrorlnDepth acts as Errorln but uses depth to determine which call frame to log. // ErrorlnDepth(0, "msg") is the same as Errorln("msg"). func ErrorlnDepth(depth int, args ...interface{}) { - logging.printlnDepth(severity.ErrorLog, globalLogger, logging.filter, depth, args...) + logging.printlnDepth(severity.ErrorLog, logging.logger, logging.filter, depth, args...) } // Errorf logs to the ERROR, WARNING, and INFO logs. // Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. func Errorf(format string, args ...interface{}) { - logging.printf(severity.ErrorLog, globalLogger, logging.filter, format, args...) + logging.printf(severity.ErrorLog, logging.logger, logging.filter, format, args...) } // ErrorfDepth acts as Errorf but uses depth to determine which call frame to log. // ErrorfDepth(0, "msg", args...) is the same as Errorf("msg", args...). func ErrorfDepth(depth int, format string, args ...interface{}) { - logging.printfDepth(severity.ErrorLog, globalLogger, logging.filter, depth, format, args...) + logging.printfDepth(severity.ErrorLog, logging.logger, logging.filter, depth, format, args...) } // ErrorS structured logs to the ERROR, WARNING, and INFO logs. @@ -1474,52 +1568,63 @@ func ErrorfDepth(depth int, format string, args ...interface{}) { // output: // >> E1025 00:15:15.525108 1 controller_utils.go:114] "Failed to update pod status" err="timeout" func ErrorS(err error, msg string, keysAndValues ...interface{}) { - logging.errorS(err, globalLogger, logging.filter, 0, msg, keysAndValues...) + logging.errorS(err, logging.logger, logging.filter, 0, msg, keysAndValues...) } // ErrorSDepth acts as ErrorS but uses depth to determine which call frame to log. // ErrorSDepth(0, "msg") is the same as ErrorS("msg"). func ErrorSDepth(depth int, err error, msg string, keysAndValues ...interface{}) { - logging.errorS(err, globalLogger, logging.filter, depth, msg, keysAndValues...) + logging.errorS(err, logging.logger, logging.filter, depth, msg, keysAndValues...) } // Fatal logs to the FATAL, ERROR, WARNING, and INFO logs, -// including a stack trace of all running goroutines, then calls OsExit(255). +// prints stack trace(s), then calls OsExit(255). +// +// Stderr only receives a dump of the current goroutine's stack trace. Log files, +// if there are any, receive a dump of the stack traces in all goroutines. +// +// Callers who want more control over handling of fatal events may instead use a +// combination of different functions: +// - some info or error logging function, optionally with a stack trace +// value generated by github.com/go-logr/lib/dbg.Backtrace +// - Flush to flush pending log data +// - panic, os.Exit or returning to the caller with an error +// // Arguments are handled in the manner of fmt.Print; a newline is appended if missing. func Fatal(args ...interface{}) { - logging.print(severity.FatalLog, globalLogger, logging.filter, args...) + logging.print(severity.FatalLog, logging.logger, logging.filter, args...) } // FatalDepth acts as Fatal but uses depth to determine which call frame to log. // FatalDepth(0, "msg") is the same as Fatal("msg"). func FatalDepth(depth int, args ...interface{}) { - logging.printDepth(severity.FatalLog, globalLogger, logging.filter, depth, args...) + logging.printDepth(severity.FatalLog, logging.logger, logging.filter, depth, args...) } // Fatalln logs to the FATAL, ERROR, WARNING, and INFO logs, // including a stack trace of all running goroutines, then calls OsExit(255). // Arguments are handled in the manner of fmt.Println; a newline is always appended. func Fatalln(args ...interface{}) { - logging.println(severity.FatalLog, globalLogger, logging.filter, args...) + logging.println(severity.FatalLog, logging.logger, logging.filter, args...) } // FatallnDepth acts as Fatalln but uses depth to determine which call frame to log. // FatallnDepth(0, "msg") is the same as Fatalln("msg"). func FatallnDepth(depth int, args ...interface{}) { - logging.printlnDepth(severity.FatalLog, globalLogger, logging.filter, depth, args...) + logging.printlnDepth(severity.FatalLog, logging.logger, logging.filter, depth, args...) } // Fatalf logs to the FATAL, ERROR, WARNING, and INFO logs, // including a stack trace of all running goroutines, then calls OsExit(255). // Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. func Fatalf(format string, args ...interface{}) { - logging.printf(severity.FatalLog, globalLogger, logging.filter, format, args...) + logging.printf(severity.FatalLog, logging.logger, logging.filter, format, args...) } // FatalfDepth acts as Fatalf but uses depth to determine which call frame to log. // FatalfDepth(0, "msg", args...) is the same as Fatalf("msg", args...). func FatalfDepth(depth int, format string, args ...interface{}) { - logging.printfDepth(severity.FatalLog, globalLogger, logging.filter, depth, format, args...) + logging.printfDepth(severity.FatalLog, logging.logger, logging.filter, depth, format, args...) } // fatalNoStacks is non-zero if we are to exit without dumping goroutine stacks. @@ -1530,41 +1635,41 @@ var fatalNoStacks uint32 // Arguments are handled in the manner of fmt.Print; a newline is appended if missing. func Exit(args ...interface{}) { atomic.StoreUint32(&fatalNoStacks, 1) - logging.print(severity.FatalLog, globalLogger, logging.filter, args...) + logging.print(severity.FatalLog, logging.logger, logging.filter, args...) } // ExitDepth acts as Exit but uses depth to determine which call frame to log. // ExitDepth(0, "msg") is the same as Exit("msg"). func ExitDepth(depth int, args ...interface{}) { atomic.StoreUint32(&fatalNoStacks, 1) - logging.printDepth(severity.FatalLog, globalLogger, logging.filter, depth, args...) + logging.printDepth(severity.FatalLog, logging.logger, logging.filter, depth, args...) } // Exitln logs to the FATAL, ERROR, WARNING, and INFO logs, then calls OsExit(1). func Exitln(args ...interface{}) { atomic.StoreUint32(&fatalNoStacks, 1) - logging.println(severity.FatalLog, globalLogger, logging.filter, args...) + logging.println(severity.FatalLog, logging.logger, logging.filter, args...) } // ExitlnDepth acts as Exitln but uses depth to determine which call frame to log. // ExitlnDepth(0, "msg") is the same as Exitln("msg"). func ExitlnDepth(depth int, args ...interface{}) { atomic.StoreUint32(&fatalNoStacks, 1) - logging.printlnDepth(severity.FatalLog, globalLogger, logging.filter, depth, args...) + logging.printlnDepth(severity.FatalLog, logging.logger, logging.filter, depth, args...) } // Exitf logs to the FATAL, ERROR, WARNING, and INFO logs, then calls OsExit(1). // Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. func Exitf(format string, args ...interface{}) { atomic.StoreUint32(&fatalNoStacks, 1) - logging.printf(severity.FatalLog, globalLogger, logging.filter, format, args...) + logging.printf(severity.FatalLog, logging.logger, logging.filter, format, args...) } // ExitfDepth acts as Exitf but uses depth to determine which call frame to log. // ExitfDepth(0, "msg", args...) is the same as Exitf("msg", args...). func ExitfDepth(depth int, format string, args ...interface{}) { atomic.StoreUint32(&fatalNoStacks, 1) - logging.printfDepth(severity.FatalLog, globalLogger, logging.filter, depth, format, args...) + logging.printfDepth(severity.FatalLog, logging.logger, logging.filter, depth, format, args...) } // LogFilter is a collection of functions that can filter all logging calls, diff --git a/vendor/k8s.io/klog/v2/klogr.go b/vendor/k8s.io/klog/v2/klogr.go index 351d7a740..027a4014a 100644 --- a/vendor/k8s.io/klog/v2/klogr.go +++ b/vendor/k8s.io/klog/v2/klogr.go @@ -43,11 +43,11 @@ func (l *klogger) Init(info logr.RuntimeInfo) { } func (l klogger) Info(level int, msg string, kvList ...interface{}) { - trimmed := serialize.TrimDuplicates(l.values, kvList) + merged := serialize.MergeKVs(l.values, kvList) if l.prefix != "" { msg = l.prefix + ": " + msg } - V(Level(level)).InfoSDepth(l.callDepth+1, msg, append(trimmed[0], trimmed[1]...)...) + V(Level(level)).InfoSDepth(l.callDepth+1, msg, merged...) } func (l klogger) Enabled(level int) bool { @@ -55,11 +55,11 @@ func (l klogger) Enabled(level int) bool { } func (l klogger) Error(err error, msg string, kvList ...interface{}) { - trimmed := serialize.TrimDuplicates(l.values, kvList) + merged := serialize.MergeKVs(l.values, kvList) if l.prefix != "" { msg = l.prefix + ": " + msg } - ErrorSDepth(l.callDepth+1, err, msg, append(trimmed[0], trimmed[1]...)...) + ErrorSDepth(l.callDepth+1, err, msg, merged...) } // WithName returns a new logr.Logger with the specified name appended. klogr diff --git a/vendor/k8s.io/utils/exec/exec.go b/vendor/k8s.io/utils/exec/exec.go index 96bec01ca..d9c91e3ca 100644 --- a/vendor/k8s.io/utils/exec/exec.go +++ b/vendor/k8s.io/utils/exec/exec.go @@ -19,6 +19,7 @@ package exec import ( "context" "io" + "io/fs" osexec "os/exec" "syscall" "time" @@ -98,17 +99,18 @@ func New() Interface { // Command is part of the Interface interface. func (executor *executor) Command(cmd string, args ...string) Cmd { - return (*cmdWrapper)(osexec.Command(cmd, args...)) + return (*cmdWrapper)(maskErrDotCmd(osexec.Command(cmd, args...))) } // CommandContext is part of the Interface interface. func (executor *executor) CommandContext(ctx context.Context, cmd string, args ...string) Cmd { - return (*cmdWrapper)(osexec.CommandContext(ctx, cmd, args...)) + return (*cmdWrapper)(maskErrDotCmd(osexec.CommandContext(ctx, cmd, args...))) } // LookPath is part of the Interface interface func (executor *executor) LookPath(file string) (string, error) { - return osexec.LookPath(file) + path, err := osexec.LookPath(file) + return path, handleError(maskErrDot(err)) } // Wraps exec.Cmd so we can capture errors. @@ -198,6 +200,8 @@ func handleError(err error) error { switch e := err.(type) { case *osexec.ExitError: return &ExitErrorWrapper{e} + case *fs.PathError: + return ErrExecutableNotFound case *osexec.Error: if e.Err == osexec.ErrNotFound { return ErrExecutableNotFound diff --git a/vendor/k8s.io/utils/exec/fixup_go118.go b/vendor/k8s.io/utils/exec/fixup_go118.go new file mode 100644 index 000000000..acf45f1cd --- /dev/null +++ b/vendor/k8s.io/utils/exec/fixup_go118.go @@ -0,0 +1,32 @@ +//go:build !go1.19 +// +build !go1.19 + +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package exec + +import ( + osexec "os/exec" +) + +func maskErrDotCmd(cmd *osexec.Cmd) *osexec.Cmd { + return cmd +} + +func maskErrDot(err error) error { + return err +} diff --git a/vendor/k8s.io/utils/exec/fixup_go119.go b/vendor/k8s.io/utils/exec/fixup_go119.go new file mode 100644 index 000000000..55874c929 --- /dev/null +++ b/vendor/k8s.io/utils/exec/fixup_go119.go @@ -0,0 +1,40 @@ +//go:build go1.19 +// +build go1.19 + +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package exec + +import ( + "errors" + osexec "os/exec" +) + +// maskErrDotCmd reverts the behavior of osexec.Cmd to what it was before go1.19 +// specifically set the Err field to nil (LookPath returns a new error when the file +// is resolved to the current directory. +func maskErrDotCmd(cmd *osexec.Cmd) *osexec.Cmd { + cmd.Err = maskErrDot(cmd.Err) + return cmd +} + +func maskErrDot(err error) error { + if err != nil && errors.Is(err, osexec.ErrDot) { + return nil + } + return err +} diff --git a/vendor/k8s.io/utils/exec/testing/fake_exec.go b/vendor/k8s.io/utils/exec/testing/fake_exec.go index 738068925..7c125a6b4 100644 --- a/vendor/k8s.io/utils/exec/testing/fake_exec.go +++ b/vendor/k8s.io/utils/exec/testing/fake_exec.go @@ -20,6 +20,7 @@ import ( "context" "fmt" "io" + "sync" "k8s.io/utils/exec" ) @@ -32,11 +33,12 @@ type FakeExec struct { // ExactOrder enforces that commands are called in the order they are scripted, // and with the exact same arguments ExactOrder bool - // DisableScripts removes the requirement that a slice of FakeCommandAction be - // populated before calling Command(). This makes the fakeexec (and subsequent - // calls to Run() or CombinedOutput() always return success and there is no - // ability to set their output. + // DisableScripts removes the requirement that CommandScripts be populated + // before calling Command(). This makes Command() and subsequent calls to + // Run() or CombinedOutput() always return success and empty output. DisableScripts bool + + mu sync.Mutex } var _ exec.Interface = &FakeExec{} @@ -44,18 +46,15 @@ var _ exec.Interface = &FakeExec{} // FakeCommandAction is the function to be executed type FakeCommandAction func(cmd string, args ...string) exec.Cmd -// Command is to track the commands that are executed +// Command returns the next unexecuted command in CommandScripts. +// This function is safe for concurrent access as long as the underlying +// FakeExec struct is not modified during execution. func (fake *FakeExec) Command(cmd string, args ...string) exec.Cmd { if fake.DisableScripts { fakeCmd := &FakeCmd{DisableScripts: true} return InitFakeCmd(fakeCmd, cmd, args...) } - if fake.CommandCalls > len(fake.CommandScript)-1 { - panic(fmt.Sprintf("ran out of Command() actions. Could not handle command [%d]: %s args: %v", fake.CommandCalls, cmd, args)) - } - i := fake.CommandCalls - fake.CommandCalls++ - fakeCmd := fake.CommandScript[i](cmd, args...) + fakeCmd := fake.nextCommand(cmd, args) if fake.ExactOrder { argv := append([]string{cmd}, args...) fc := fakeCmd.(*FakeCmd) @@ -74,6 +73,18 @@ func (fake *FakeExec) Command(cmd string, args ...string) exec.Cmd { return fakeCmd } +func (fake *FakeExec) nextCommand(cmd string, args []string) exec.Cmd { + fake.mu.Lock() + defer fake.mu.Unlock() + + if fake.CommandCalls > len(fake.CommandScript)-1 { + panic(fmt.Sprintf("ran out of Command() actions. Could not handle command [%d]: %s args: %v", fake.CommandCalls, cmd, args)) + } + i := fake.CommandCalls + fake.CommandCalls++ + return fake.CommandScript[i](cmd, args...) +} + // CommandContext wraps arguments into exec.Cmd func (fake *FakeExec) CommandContext(ctx context.Context, cmd string, args ...string) exec.Cmd { return fake.Command(cmd, args...) diff --git a/vendor/k8s.io/utils/strings/slices/slices.go b/vendor/k8s.io/utils/strings/slices/slices.go new file mode 100644 index 000000000..8e21838f2 --- /dev/null +++ b/vendor/k8s.io/utils/strings/slices/slices.go @@ -0,0 +1,82 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package slices defines various functions useful with slices of string type. +// The goal is to be as close as possible to +// https://github.com/golang/go/issues/45955. Ideal would be if we can just +// replace "stringslices" if the "slices" package becomes standard. +package slices + +// Equal reports whether two slices are equal: the same length and all +// elements equal. If the lengths are different, Equal returns false. +// Otherwise, the elements are compared in index order, and the +// comparison stops at the first unequal pair. +func Equal(s1, s2 []string) bool { + if len(s1) != len(s2) { + return false + } + for i, n := range s1 { + if n != s2[i] { + return false + } + } + return true +} + +// Filter appends to d each element e of s for which keep(e) returns true. +// It returns the modified d. d may be s[:0], in which case the kept +// elements will be stored in the same slice. +// if the slices overlap in some other way, the results are unspecified. +// To create a new slice with the filtered results, pass nil for d. +func Filter(d, s []string, keep func(string) bool) []string { + for _, n := range s { + if keep(n) { + d = append(d, n) + } + } + return d +} + +// Contains reports whether v is present in s. +func Contains(s []string, v string) bool { + return Index(s, v) >= 0 +} + +// Index returns the index of the first occurrence of v in s, or -1 if +// not present. +func Index(s []string, v string) int { + // "Contains" may be replaced with "Index(s, v) >= 0": + // https://github.com/golang/go/issues/45955#issuecomment-873377947 + for i, n := range s { + if n == v { + return i + } + } + return -1 +} + +// Functions below are not in https://github.com/golang/go/issues/45955 + +// Clone returns a new clone of s. +func Clone(s []string) []string { + // https://github.com/go101/go101/wiki/There-is-not-a-perfect-way-to-clone-slices-in-Go + if s == nil { + return nil + } + c := make([]string, len(s)) + copy(c, s) + return c +} diff --git a/vendor/modules.txt b/vendor/modules.txt index f50ba11d9..07149465b 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -615,11 +615,12 @@ k8s.io/client-go/util/workqueue ## explicit; go 1.16 k8s.io/component-base/metrics k8s.io/component-base/version -# k8s.io/klog/v2 v2.60.1 +# k8s.io/klog/v2 v2.80.1 ## explicit; go 1.13 k8s.io/klog/v2 k8s.io/klog/v2/internal/buffer k8s.io/klog/v2/internal/clock +k8s.io/klog/v2/internal/dbg k8s.io/klog/v2/internal/serialize k8s.io/klog/v2/internal/severity # k8s.io/kubernetes v1.22.0 @@ -635,14 +636,15 @@ k8s.io/test-infra/boskos/common k8s.io/test-infra/boskos/storage k8s.io/test-infra/prow/config/secret k8s.io/test-infra/prow/logrusutil -# k8s.io/utils v0.0.0-20210707171843-4b05e18ac7d9 -## explicit; go 1.12 +# k8s.io/utils v0.0.0-20230711102312-30195339c3c7 +## explicit; go 1.18 k8s.io/utils/exec k8s.io/utils/exec/testing k8s.io/utils/integer k8s.io/utils/io k8s.io/utils/keymutex k8s.io/utils/path +k8s.io/utils/strings/slices # sigs.k8s.io/structured-merge-diff/v4 v4.1.2 ## explicit; go 1.13 sigs.k8s.io/structured-merge-diff/v4/fieldpath