From b2eff3fad3e47adcfaadf46debc2017c62d1a99a Mon Sep 17 00:00:00 2001 From: juliankatz Date: Fri, 7 Feb 2025 14:10:38 -0800 Subject: [PATCH 1/5] Updated NodeGetinfo to retrieve the node based on GCE MDS node name and add the labels starting with topology.gke.io/ to the Topology info returned in NodeGetInfoResponse --- cmd/gce-pd-csi-driver/main.go | 18 ++++++++++- pkg/gce-pd-csi-driver/gce-pd-driver.go | 4 ++- pkg/gce-pd-csi-driver/node.go | 44 ++++++++++++++++++++++++-- pkg/gce-pd-csi-driver/node_test.go | 6 ++-- test/sanity/sanity_test.go | 2 +- 5 files changed, 66 insertions(+), 8 deletions(-) diff --git a/cmd/gce-pd-csi-driver/main.go b/cmd/gce-pd-csi-driver/main.go index 2447572bc..9406639fb 100644 --- a/cmd/gce-pd-csi-driver/main.go +++ b/cmd/gce-pd-csi-driver/main.go @@ -27,6 +27,8 @@ import ( "strings" "time" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" "k8s.io/klog/v2" "k8s.io/utils/strings/slices" @@ -76,6 +78,7 @@ var ( fallbackRequisiteZonesFlag = flag.String("fallback-requisite-zones", "", "Comma separated list of requisite zones that will be used if there are not sufficient zones present in requisite topologies when provisioning a disk") enableStoragePoolsFlag = flag.Bool("enable-storage-pools", false, "If set to true, the CSI Driver will allow volumes to be provisioned in Storage Pools") enableHdHAFlag = flag.Bool("allow-hdha-provisioning", false, "If set to true, will allow the driver to provision Hyperdisk-balanced High Availability disks") + nodeName = flag.String("node-name", "", "The node this driver is running on") multiZoneVolumeHandleDiskTypesFlag = flag.String("multi-zone-volume-handle-disk-types", "", "Comma separated list of allowed disk types that can use the multi-zone volumeHandle. Used only if --multi-zone-volume-handle-enable") multiZoneVolumeHandleEnableFlag = flag.Bool("multi-zone-volume-handle-enable", false, "If set to true, the multi-zone volumeHandle feature will be enabled") @@ -244,11 +247,24 @@ func handle() { if err != nil { klog.Fatalf("Failed to set up metadata service: %v", err.Error()) } + + // Instantiate a kubeClient and pass it. + klog.V(2).Infof("Setting up kubeClient") + cfg, err := rest.InClusterConfig() + if err != nil { + klog.Fatalf("Failed to create REST Config for k8s client: %v", err.Error()) + } + kubeClient, err := kubernetes.NewForConfig(cfg) + if err != nil { + klog.Fatalf("Failed to create k8s client: %v", err.Error()) + } + nsArgs := driver.NodeServerArgs{ EnableDeviceInUseCheck: *enableDeviceInUseCheck, DeviceInUseTimeout: *deviceInUseTimeout, } - nodeServer = driver.NewNodeServer(gceDriver, mounter, deviceUtils, meta, statter, nsArgs) + + nodeServer = driver.NewNodeServer(gceDriver, mounter, deviceUtils, meta, statter, kubeClient, nsArgs) if *maxConcurrentFormatAndMount > 0 { nodeServer = nodeServer.WithSerializedFormatAndMount(*formatAndMountTimeout, *maxConcurrentFormatAndMount) } diff --git a/pkg/gce-pd-csi-driver/gce-pd-driver.go b/pkg/gce-pd-csi-driver/gce-pd-driver.go index 9133ce035..156ed57a8 100644 --- a/pkg/gce-pd-csi-driver/gce-pd-driver.go +++ b/pkg/gce-pd-csi-driver/gce-pd-driver.go @@ -21,6 +21,7 @@ import ( csi "github.com/container-storage-interface/spec/lib/go/csi" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" + "k8s.io/client-go/kubernetes" "k8s.io/klog/v2" "k8s.io/mount-utils" common "sigs.k8s.io/gcp-compute-persistent-disk-csi-driver/pkg/common" @@ -145,7 +146,7 @@ func NewIdentityServer(gceDriver *GCEDriver) *GCEIdentityServer { } } -func NewNodeServer(gceDriver *GCEDriver, mounter *mount.SafeFormatAndMount, deviceUtils deviceutils.DeviceUtils, meta metadataservice.MetadataService, statter mountmanager.Statter, args NodeServerArgs) *GCENodeServer { +func NewNodeServer(gceDriver *GCEDriver, mounter *mount.SafeFormatAndMount, deviceUtils deviceutils.DeviceUtils, meta metadataservice.MetadataService, statter mountmanager.Statter, kubeClient *kubernetes.Clientset, args NodeServerArgs) *GCENodeServer { return &GCENodeServer{ Driver: gceDriver, Mounter: mounter, @@ -155,6 +156,7 @@ func NewNodeServer(gceDriver *GCEDriver, mounter *mount.SafeFormatAndMount, devi VolumeStatter: statter, enableDeviceInUseCheck: args.EnableDeviceInUseCheck, deviceInUseErrors: newDeviceErrMap(args.DeviceInUseTimeout), + kubeClient: kubeClient, } } diff --git a/pkg/gce-pd-csi-driver/node.go b/pkg/gce-pd-csi-driver/node.go index 7bc934036..47708ff02 100644 --- a/pkg/gce-pd-csi-driver/node.go +++ b/pkg/gce-pd-csi-driver/node.go @@ -23,6 +23,7 @@ import ( "regexp" "runtime" "strconv" + "strings" "time" "google.golang.org/grpc/codes" @@ -30,6 +31,8 @@ import ( csi "github.com/container-storage-interface/spec/lib/go/csi" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" "k8s.io/klog/v2" "k8s.io/mount-utils" @@ -40,6 +43,8 @@ import ( "sigs.k8s.io/gcp-compute-persistent-disk-csi-driver/pkg/resizefs" ) +const gkeTopologyLabelPrefix = "topology.gke.io/" + type GCENodeServer struct { Driver *GCEDriver Mounter *mount.SafeFormatAndMount @@ -47,6 +52,8 @@ type GCENodeServer struct { VolumeStatter mountmanager.Statter MetadataService metadataservice.MetadataService + kubeClient *kubernetes.Clientset + // A map storing all volumes with ongoing operations so that additional operations // for that same volume (as defined by VolumeID) return an Aborted error volumeLocks *common.VolumeLocks @@ -520,12 +527,20 @@ func (ns *GCENodeServer) NodeGetCapabilities(ctx context.Context, req *csi.NodeG } func (ns *GCENodeServer) NodeGetInfo(ctx context.Context, req *csi.NodeGetInfoRequest) (*csi.NodeGetInfoResponse, error) { + labels, err := ns.gkeTopologyLabels(ctx, ns.MetadataService.GetName()) + if err != nil { + // Perhaps we don't want to fail here. We are introducing a new + // dependency and we might be better off allowing this failure to + // happen and moving on to retrieve the zone from GCE MDS. + return nil, err + } + + labels[common.TopologyKeyZone] = ns.MetadataService.GetZone() top := &csi.Topology{ - Segments: map[string]string{common.TopologyKeyZone: ns.MetadataService.GetZone()}, + Segments: labels, } nodeID := common.CreateNodeID(ns.MetadataService.GetProject(), ns.MetadataService.GetZone(), ns.MetadataService.GetName()) - volumeLimits, err := ns.GetVolumeLimits() resp := &csi.NodeGetInfoResponse{ @@ -536,6 +551,31 @@ func (ns *GCENodeServer) NodeGetInfo(ctx context.Context, req *csi.NodeGetInfoRe return resp, err } +// gkeTopologyLabels retrieves the node labels with the prefix +// `topology.gke.io/`. +func (ns *GCENodeServer) gkeTopologyLabels(ctx context.Context, nodeName string) (map[string]string, error) { + klog.V(2).Infof("Retrieving node topology labels for node %q", nodeName) + + node, err := ns.kubeClient.CoreV1().Nodes().Get(ctx, nodeName, metav1.GetOptions{}) + if err != nil { + // We should retry instead. Need to figure out how much wrong-ness can be tolerated and how often CSINode gets refreshed. + return nil, err + } + + topology := make(map[string]string) + for k, v := range node.GetLabels() { + if isGKETopologyLabel(k) { + topology[k] = v + } + } + + return topology, nil +} + +func isGKETopologyLabel(key string) bool { + return strings.HasPrefix(key, gkeTopologyLabelPrefix) +} + func (ns *GCENodeServer) NodeGetVolumeStats(ctx context.Context, req *csi.NodeGetVolumeStatsRequest) (*csi.NodeGetVolumeStatsResponse, error) { if len(req.VolumeId) == 0 { return nil, status.Error(codes.InvalidArgument, "NodeGetVolumeStats volume ID was empty") diff --git a/pkg/gce-pd-csi-driver/node_test.go b/pkg/gce-pd-csi-driver/node_test.go index d1d0920bf..1d856bdae 100644 --- a/pkg/gce-pd-csi-driver/node_test.go +++ b/pkg/gce-pd-csi-driver/node_test.go @@ -51,7 +51,7 @@ func getTestGCEDriverWithCustomMounter(t *testing.T, mounter *mount.SafeFormatAn func getCustomTestGCEDriver(t *testing.T, mounter *mount.SafeFormatAndMount, deviceUtils deviceutils.DeviceUtils, metaService metadataservice.MetadataService) *GCEDriver { gceDriver := GetGCEDriver() - nodeServer := NewNodeServer(gceDriver, mounter, deviceUtils, metaService, mountmanager.NewFakeStatter(mounter), NodeServerArgs{true, 0}) + nodeServer := NewNodeServer(gceDriver, mounter, deviceUtils, metaService, mountmanager.NewFakeStatter(mounter), nil, NodeServerArgs{true, 0}) err := gceDriver.SetupGCEDriver(driver, "test-vendor", nil, nil, nil, nil, nodeServer) if err != nil { t.Fatalf("Failed to setup GCE Driver: %v", err) @@ -62,7 +62,7 @@ func getCustomTestGCEDriver(t *testing.T, mounter *mount.SafeFormatAndMount, dev func getTestBlockingMountGCEDriver(t *testing.T, readyToExecute chan chan struct{}) *GCEDriver { gceDriver := GetGCEDriver() mounter := mountmanager.NewFakeSafeBlockingMounter(readyToExecute) - nodeServer := NewNodeServer(gceDriver, mounter, deviceutils.NewFakeDeviceUtils(false), metadataservice.NewFakeService(), mountmanager.NewFakeStatter(mounter), NodeServerArgs{true, 0}) + nodeServer := NewNodeServer(gceDriver, mounter, deviceutils.NewFakeDeviceUtils(false), metadataservice.NewFakeService(), mountmanager.NewFakeStatter(mounter), nil, NodeServerArgs{true, 0}) err := gceDriver.SetupGCEDriver(driver, "test-vendor", nil, nil, nil, nil, nodeServer) if err != nil { t.Fatalf("Failed to setup GCE Driver: %v", err) @@ -73,7 +73,7 @@ func getTestBlockingMountGCEDriver(t *testing.T, readyToExecute chan chan struct func getTestBlockingFormatAndMountGCEDriver(t *testing.T, readyToExecute chan chan struct{}) *GCEDriver { gceDriver := GetGCEDriver() mounter := mountmanager.NewFakeSafeBlockingMounter(readyToExecute) - nodeServer := NewNodeServer(gceDriver, mounter, deviceutils.NewFakeDeviceUtils(false), metadataservice.NewFakeService(), mountmanager.NewFakeStatter(mounter), NodeServerArgs{true, 0}).WithSerializedFormatAndMount(5*time.Second, 1) + nodeServer := NewNodeServer(gceDriver, mounter, deviceutils.NewFakeDeviceUtils(false), metadataservice.NewFakeService(), mountmanager.NewFakeStatter(mounter), nil, NodeServerArgs{true, 0}).WithSerializedFormatAndMount(5*time.Second, 1) err := gceDriver.SetupGCEDriver(driver, "test-vendor", nil, nil, nil, nil, nodeServer) if err != nil { diff --git a/test/sanity/sanity_test.go b/test/sanity/sanity_test.go index 1ff33a41b..c7f1290c5 100644 --- a/test/sanity/sanity_test.go +++ b/test/sanity/sanity_test.go @@ -77,7 +77,7 @@ func TestSanity(t *testing.T) { identityServer := driver.NewIdentityServer(gceDriver) controllerServer := driver.NewControllerServer(gceDriver, cloudProvider, 0, 5*time.Minute, fallbackRequisiteZones, enableStoragePools, multiZoneVolumeHandleConfig, listVolumesConfig, provisionableDisksConfig, true) fakeStatter := mountmanager.NewFakeStatterWithOptions(mounter, mountmanager.FakeStatterOptions{IsBlock: false}) - nodeServer := driver.NewNodeServer(gceDriver, mounter, deviceUtils, metadataservice.NewFakeService(), fakeStatter, driver.NodeServerArgs{EnableDeviceInUseCheck: true, DeviceInUseTimeout: 0}) + nodeServer := driver.NewNodeServer(gceDriver, mounter, deviceUtils, metadataservice.NewFakeService(), fakeStatter, nil, driver.NodeServerArgs{EnableDeviceInUseCheck: true, DeviceInUseTimeout: 0}) err = gceDriver.SetupGCEDriver(driverName, vendorVersion, extraLabels, nil, identityServer, controllerServer, nodeServer) if err != nil { t.Fatalf("Failed to initialize GCE CSI Driver: %v", err.Error()) From 494d2e6ccbc1a3e5c67ed68cdaeae54abeb8b294 Mon Sep 17 00:00:00 2001 From: juliankatz Date: Mon, 10 Feb 2025 13:23:27 -0800 Subject: [PATCH 2/5] Further changes --- deploy/kubernetes/deploy-driver.sh | 2 ++ .../images/stable-master/image.yaml | 4 ++-- deploy/setup-project.sh | 4 ++++ pkg/common/constants.go | 3 ++- pkg/gce-pd-csi-driver/controller.go | 22 ++++++++++++++++++- pkg/gce-pd-csi-driver/node.go | 5 +++++ 6 files changed, 36 insertions(+), 4 deletions(-) diff --git a/deploy/kubernetes/deploy-driver.sh b/deploy/kubernetes/deploy-driver.sh index 3c4a893cd..320402d73 100755 --- a/deploy/kubernetes/deploy-driver.sh +++ b/deploy/kubernetes/deploy-driver.sh @@ -54,6 +54,8 @@ function check_service_account() # Grepping for a line with client email returning anything quoted after the colon readonly IAM_NAME=$(grep -Po '"client_email": *\K"[^"]*"' "${GCE_PD_SA_DIR}/cloud-sa.json" | tr -d '"') readonly PROJECT=$(grep -Po '"project_id": *\K"[^"]*"' "${GCE_PD_SA_DIR}/cloud-sa.json" | tr -d '"') + echo "FOUND PROJECT: $PROJECT" + echo "FOUND IAM_NAME: $IAM_NAME" readonly GOTTEN_BIND_ROLES=$(gcloud projects get-iam-policy "${PROJECT}" --flatten="bindings[].members" --format='table(bindings.role)' --filter="bindings.members:${IAM_NAME}") readonly BIND_ROLES=$(get_needed_roles) MISSING_ROLES=false diff --git a/deploy/kubernetes/images/stable-master/image.yaml b/deploy/kubernetes/images/stable-master/image.yaml index 2d2df2367..aea2b6566 100644 --- a/deploy/kubernetes/images/stable-master/image.yaml +++ b/deploy/kubernetes/images/stable-master/image.yaml @@ -51,6 +51,6 @@ imageTag: name: gke.gcr.io/gcp-compute-persistent-disk-csi-driver # Don't change stable image without changing pdImagePlaceholder in # test/k8s-integration/main.go - newName: registry.k8s.io/cloud-provider-gcp/gcp-compute-persistent-disk-csi-driver - newTag: "v1.15.0" + newName: us-central1-docker.pkg.dev/juliankatz-joonix/csi-dev/gcp-compute-persistent-disk-csi-driver + newTag: "latest" --- diff --git a/deploy/setup-project.sh b/deploy/setup-project.sh index c992dc1d1..b7cc706c0 100755 --- a/deploy/setup-project.sh +++ b/deploy/setup-project.sh @@ -72,6 +72,7 @@ then if [ "${CREATE_SA_KEY}" = true ]; then if [ -f "${GCE_PD_SA_DIR}/cloud-sa.json" ]; then + echo "REMOVING cloud-sa.json" rm "${GCE_PD_SA_DIR}/cloud-sa.json" fi fi @@ -121,7 +122,10 @@ then fi # Export key if needed +echo "EXPORT KEY?" if [ "${CREATE_SA}" = true ] && [ "${CREATE_SA_KEY}" = true ]; then + echo "WRITING KEY FILE" + echo "USER: $USER" gcloud iam service-accounts keys create "${GCE_PD_SA_DIR}/cloud-sa.json" --iam-account "${IAM_NAME}" --project "${PROJECT}" fi diff --git a/pkg/common/constants.go b/pkg/common/constants.go index 9851b11d1..7cbaa8b48 100644 --- a/pkg/common/constants.go +++ b/pkg/common/constants.go @@ -17,8 +17,9 @@ limitations under the License. package common const ( + TopologyKeyPrefix = "topology.gke.io/" // Keys for Topology. This key will be shared amongst drivers from GCP - TopologyKeyZone = "topology.gke.io/zone" + TopologyKeyZone = TopologyKeyPrefix + "zone" // VolumeAttributes for Partition VolumeAttributePartition = "partition" diff --git a/pkg/gce-pd-csi-driver/controller.go b/pkg/gce-pd-csi-driver/controller.go index 117df6413..8b4e639fc 100644 --- a/pkg/gce-pd-csi-driver/controller.go +++ b/pkg/gce-pd-csi-driver/controller.go @@ -2342,11 +2342,21 @@ func extractVolumeContext(context map[string]string) (*PDCSIContext, error) { func generateCreateVolumeResponseWithVolumeId(disk *gce.CloudDisk, zones []string, params common.DiskParameters, volumeId string) *csi.CreateVolumeResponse { tops := []*csi.Topology{} + + // Parsing the disk's type field will allow this to continue to work after + // generic volumes makes PDCSI select a disk type dynamically. Does this + // also work for hyperdisk? for _, zone := range zones { tops = append(tops, &csi.Topology{ - Segments: map[string]string{common.TopologyKeyZone: zone}, + // Each segment is AND'd into a single matchable block in the + // matchExpressions field of a PV's NodeAffinity. + Segments: map[string]string{ + common.TopologyKeyZone: zone, + common.TopologyKeyPrefix + diskTypeFromDisk(disk): "true", + }, }) } + realDiskSizeBytes := common.GbToBytes(disk.GetSizeGb()) createResp := &csi.CreateVolumeResponse{ Volume: &csi.Volume{ @@ -2356,6 +2366,7 @@ func generateCreateVolumeResponseWithVolumeId(disk *gce.CloudDisk, zones []strin AccessibleTopology: tops, }, } + snapshotID := disk.GetSnapshotId() imageID := disk.GetImageId() diskID := disk.GetSourceDiskId() @@ -2390,9 +2401,18 @@ func generateCreateVolumeResponseWithVolumeId(disk *gce.CloudDisk, zones []strin } createResp.Volume.ContentSource = contentSource } + return createResp } +// diskTypeFromDisk parses the DiskType field on the disk, which returns a +// path like `projects/dummy-project/zones/us-central1-a/diskTypes/pd-ssd`, and +// returns the last token (`pd-ssd` in the example). +func diskTypeFromDisk(disk *gce.CloudDisk) string { + tokens := strings.Split(disk.GetPDType(), "/") + return tokens[len(tokens)-1] +} + func getResourceId(resourceLink string) (string, error) { url, err := neturl.Parse(resourceLink) if err != nil { diff --git a/pkg/gce-pd-csi-driver/node.go b/pkg/gce-pd-csi-driver/node.go index 47708ff02..1527b8895 100644 --- a/pkg/gce-pd-csi-driver/node.go +++ b/pkg/gce-pd-csi-driver/node.go @@ -536,6 +536,11 @@ func (ns *GCENodeServer) NodeGetInfo(ctx context.Context, req *csi.NodeGetInfoRe } labels[common.TopologyKeyZone] = ns.MetadataService.GetZone() + + // Each "Topology" struct will later be translated into an individual + // 'matchExpressions' block in the PV's NodeAffinity. Because we always + // need to match on both the zone AND the disk type, both the zone and the + // supported disks belong as segments on a single Topology. top := &csi.Topology{ Segments: labels, } From 22c25ee0be50bc84c84ae006262a3bcb33b45c5b Mon Sep 17 00:00:00 2001 From: juliankatz Date: Tue, 11 Feb 2025 17:30:50 -0800 Subject: [PATCH 3/5] Add node read permission --- deploy/kubernetes/base/controller/cluster_setup.yaml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/deploy/kubernetes/base/controller/cluster_setup.yaml b/deploy/kubernetes/base/controller/cluster_setup.yaml index a6941a119..e1462493f 100644 --- a/deploy/kubernetes/base/controller/cluster_setup.yaml +++ b/deploy/kubernetes/base/controller/cluster_setup.yaml @@ -205,6 +205,9 @@ rules: verbs: ['use'] resourceNames: - csi-gce-pd-node-psp + - apiGroups: [""] # The core API group + resources: ["nodes"] + verbs: ["get", "list"] --- kind: ClusterRole @@ -217,6 +220,9 @@ rules: verbs: ['use'] resourceNames: - csi-gce-pd-node-psp-win + - apiGroups: [""] # The core API group + resources: ["nodes"] + verbs: ["get", "list"] --- apiVersion: rbac.authorization.k8s.io/v1 From 0e5045ab5fade9a88f353286e821e471f9c84d86 Mon Sep 17 00:00:00 2001 From: juliankatz Date: Tue, 11 Feb 2025 17:31:15 -0800 Subject: [PATCH 4/5] Read gke labels in controller --- pkg/common/utils.go | 10 ++++++++++ pkg/gce-pd-csi-driver/controller.go | 22 +--------------------- pkg/gce-pd-csi-driver/node.go | 13 +++++-------- 3 files changed, 16 insertions(+), 29 deletions(-) diff --git a/pkg/common/utils.go b/pkg/common/utils.go index a69d30453..e83796504 100644 --- a/pkg/common/utils.go +++ b/pkg/common/utils.go @@ -78,6 +78,8 @@ const ( // Full or partial URL of the zone resource, in the format: // projects/{project}/zones/{zone} zoneURIPattern = "projects/[^/]+/zones/([^/]+)$" + + gkeTopologyLabelPrefix = "topology.gke.io/" ) var ( @@ -696,3 +698,11 @@ func NewLimiter(limit, burst int, emptyBucket bool) *rate.Limiter { return limiter } + +func IsGKETopologyLabel(key string) bool { + // This is the actual code + // return strings.HasPrefix(key, gkeTopologyLabelPrefix) + + // More permissive code for testing + return strings.HasPrefix(key, "topology.gke") +} diff --git a/pkg/gce-pd-csi-driver/controller.go b/pkg/gce-pd-csi-driver/controller.go index 8b4e639fc..117df6413 100644 --- a/pkg/gce-pd-csi-driver/controller.go +++ b/pkg/gce-pd-csi-driver/controller.go @@ -2342,21 +2342,11 @@ func extractVolumeContext(context map[string]string) (*PDCSIContext, error) { func generateCreateVolumeResponseWithVolumeId(disk *gce.CloudDisk, zones []string, params common.DiskParameters, volumeId string) *csi.CreateVolumeResponse { tops := []*csi.Topology{} - - // Parsing the disk's type field will allow this to continue to work after - // generic volumes makes PDCSI select a disk type dynamically. Does this - // also work for hyperdisk? for _, zone := range zones { tops = append(tops, &csi.Topology{ - // Each segment is AND'd into a single matchable block in the - // matchExpressions field of a PV's NodeAffinity. - Segments: map[string]string{ - common.TopologyKeyZone: zone, - common.TopologyKeyPrefix + diskTypeFromDisk(disk): "true", - }, + Segments: map[string]string{common.TopologyKeyZone: zone}, }) } - realDiskSizeBytes := common.GbToBytes(disk.GetSizeGb()) createResp := &csi.CreateVolumeResponse{ Volume: &csi.Volume{ @@ -2366,7 +2356,6 @@ func generateCreateVolumeResponseWithVolumeId(disk *gce.CloudDisk, zones []strin AccessibleTopology: tops, }, } - snapshotID := disk.GetSnapshotId() imageID := disk.GetImageId() diskID := disk.GetSourceDiskId() @@ -2401,18 +2390,9 @@ func generateCreateVolumeResponseWithVolumeId(disk *gce.CloudDisk, zones []strin } createResp.Volume.ContentSource = contentSource } - return createResp } -// diskTypeFromDisk parses the DiskType field on the disk, which returns a -// path like `projects/dummy-project/zones/us-central1-a/diskTypes/pd-ssd`, and -// returns the last token (`pd-ssd` in the example). -func diskTypeFromDisk(disk *gce.CloudDisk) string { - tokens := strings.Split(disk.GetPDType(), "/") - return tokens[len(tokens)-1] -} - func getResourceId(resourceLink string) (string, error) { url, err := neturl.Parse(resourceLink) if err != nil { diff --git a/pkg/gce-pd-csi-driver/node.go b/pkg/gce-pd-csi-driver/node.go index 1527b8895..a87a8b892 100644 --- a/pkg/gce-pd-csi-driver/node.go +++ b/pkg/gce-pd-csi-driver/node.go @@ -23,7 +23,6 @@ import ( "regexp" "runtime" "strconv" - "strings" "time" "google.golang.org/grpc/codes" @@ -43,8 +42,6 @@ import ( "sigs.k8s.io/gcp-compute-persistent-disk-csi-driver/pkg/resizefs" ) -const gkeTopologyLabelPrefix = "topology.gke.io/" - type GCENodeServer struct { Driver *GCEDriver Mounter *mount.SafeFormatAndMount @@ -553,6 +550,9 @@ func (ns *GCENodeServer) NodeGetInfo(ctx context.Context, req *csi.NodeGetInfoRe MaxVolumesPerNode: volumeLimits, AccessibleTopology: top, } + + klog.V(2).Infof("Returning NodeGetInfoResponse: %+v", resp) + return resp, err } @@ -569,7 +569,8 @@ func (ns *GCENodeServer) gkeTopologyLabels(ctx context.Context, nodeName string) topology := make(map[string]string) for k, v := range node.GetLabels() { - if isGKETopologyLabel(k) { + if common.IsGKETopologyLabel(k) { + klog.V(2).Infof("Including node topology label %q=%q", k, v) topology[k] = v } } @@ -577,10 +578,6 @@ func (ns *GCENodeServer) gkeTopologyLabels(ctx context.Context, nodeName string) return topology, nil } -func isGKETopologyLabel(key string) bool { - return strings.HasPrefix(key, gkeTopologyLabelPrefix) -} - func (ns *GCENodeServer) NodeGetVolumeStats(ctx context.Context, req *csi.NodeGetVolumeStatsRequest) (*csi.NodeGetVolumeStatsResponse, error) { if len(req.VolumeId) == 0 { return nil, status.Error(codes.InvalidArgument, "NodeGetVolumeStats volume ID was empty") From 96276a563e539bd04852527babe062cd45e5abe8 Mon Sep 17 00:00:00 2001 From: juliankatz Date: Tue, 11 Feb 2025 17:43:27 -0800 Subject: [PATCH 5/5] Revert some changes to deploy scripts --- deploy/kubernetes/deploy-driver.sh | 2 -- deploy/setup-project.sh | 4 ---- 2 files changed, 6 deletions(-) diff --git a/deploy/kubernetes/deploy-driver.sh b/deploy/kubernetes/deploy-driver.sh index 320402d73..3c4a893cd 100755 --- a/deploy/kubernetes/deploy-driver.sh +++ b/deploy/kubernetes/deploy-driver.sh @@ -54,8 +54,6 @@ function check_service_account() # Grepping for a line with client email returning anything quoted after the colon readonly IAM_NAME=$(grep -Po '"client_email": *\K"[^"]*"' "${GCE_PD_SA_DIR}/cloud-sa.json" | tr -d '"') readonly PROJECT=$(grep -Po '"project_id": *\K"[^"]*"' "${GCE_PD_SA_DIR}/cloud-sa.json" | tr -d '"') - echo "FOUND PROJECT: $PROJECT" - echo "FOUND IAM_NAME: $IAM_NAME" readonly GOTTEN_BIND_ROLES=$(gcloud projects get-iam-policy "${PROJECT}" --flatten="bindings[].members" --format='table(bindings.role)' --filter="bindings.members:${IAM_NAME}") readonly BIND_ROLES=$(get_needed_roles) MISSING_ROLES=false diff --git a/deploy/setup-project.sh b/deploy/setup-project.sh index b7cc706c0..c992dc1d1 100755 --- a/deploy/setup-project.sh +++ b/deploy/setup-project.sh @@ -72,7 +72,6 @@ then if [ "${CREATE_SA_KEY}" = true ]; then if [ -f "${GCE_PD_SA_DIR}/cloud-sa.json" ]; then - echo "REMOVING cloud-sa.json" rm "${GCE_PD_SA_DIR}/cloud-sa.json" fi fi @@ -122,10 +121,7 @@ then fi # Export key if needed -echo "EXPORT KEY?" if [ "${CREATE_SA}" = true ] && [ "${CREATE_SA_KEY}" = true ]; then - echo "WRITING KEY FILE" - echo "USER: $USER" gcloud iam service-accounts keys create "${GCE_PD_SA_DIR}/cloud-sa.json" --iam-account "${IAM_NAME}" --project "${PROJECT}" fi