@@ -30,9 +30,11 @@ import (
30
30
31
31
csi "github.com/container-storage-interface/spec/lib/go/csi"
32
32
33
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
33
34
"k8s.io/klog/v2"
34
35
"k8s.io/mount-utils"
35
36
37
+ "k8s.io/client-go/kubernetes"
36
38
"sigs.k8s.io/gcp-compute-persistent-disk-csi-driver/pkg/common"
37
39
"sigs.k8s.io/gcp-compute-persistent-disk-csi-driver/pkg/deviceutils"
38
40
metadataservice "sigs.k8s.io/gcp-compute-persistent-disk-csi-driver/pkg/gce-cloud-provider/metadata"
@@ -49,6 +51,8 @@ type GCENodeServer struct {
49
51
EnableDataCache bool
50
52
DataCacheEnabledNodePool bool
51
53
54
+ KubeClient * kubernetes.Clientset
55
+
52
56
// A map storing all volumes with ongoing operations so that additional operations
53
57
// for that same volume (as defined by VolumeID) return an Aborted error
54
58
volumeLocks * common.VolumeLocks
@@ -84,6 +88,9 @@ type NodeServerArgs struct {
84
88
EnableDataCache bool
85
89
86
90
DataCacheEnabledNodePool bool
91
+
92
+ // Kubernetes client for API server interactions
93
+ KubeClient * kubernetes.Clientset
87
94
}
88
95
89
96
var _ csi.NodeServer = & GCENodeServer {}
@@ -556,22 +563,58 @@ func (ns *GCENodeServer) NodeGetCapabilities(ctx context.Context, req *csi.NodeG
556
563
}
557
564
558
565
func (ns * GCENodeServer ) NodeGetInfo (ctx context.Context , req * csi.NodeGetInfoRequest ) (* csi.NodeGetInfoResponse , error ) {
566
+ labels , err := ns .gkeTopologyLabels (ctx , ns .MetadataService .GetName ())
567
+ if err != nil {
568
+ // Perhaps we don't want to fail here. We are introducing a new
569
+ // dependency and we might be better off allowing this failure to
570
+ // happen and moving on to retrieve the zone from GCE MDS.
571
+ return nil , err
572
+ }
573
+
574
+ // Each "Topology" struct will later be translated into an individual
575
+ // 'matchExpressions' block in the PV's NodeAffinity. Because we always
576
+ // need to match on both the zone AND the disk type, both the zone and the
577
+ // supported disks belong as segments on a single Topology.
559
578
top := & csi.Topology {
560
- Segments : map [ string ] string { common . TopologyKeyZone : ns . MetadataService . GetZone ()} ,
579
+ Segments : labels ,
561
580
}
562
581
563
582
nodeID := common .CreateNodeID (ns .MetadataService .GetProject (), ns .MetadataService .GetZone (), ns .MetadataService .GetName ())
564
-
565
583
volumeLimits , err := ns .GetVolumeLimits ()
566
584
567
585
resp := & csi.NodeGetInfoResponse {
568
586
NodeId : nodeID ,
569
587
MaxVolumesPerNode : volumeLimits ,
570
588
AccessibleTopology : top ,
571
589
}
590
+
591
+ klog .V (2 ).Infof ("Returning NodeGetInfoResponse: %+v" , resp )
592
+
572
593
return resp , err
573
594
}
574
595
596
+ // gkeTopologyLabels retrieves the node labels with the prefix
597
+ // `topology.gke.io/`.
598
+ func (ns * GCENodeServer ) gkeTopologyLabels (ctx context.Context , nodeName string ) (map [string ]string , error ) {
599
+ klog .V (2 ).Infof ("Retrieving node topology labels for node %q" , nodeName )
600
+
601
+ node , err := ns .KubeClient .CoreV1 ().Nodes ().Get (ctx , nodeName , metav1.GetOptions {})
602
+ if err != nil {
603
+ // We should retry instead. Need to figure out how much wrong-ness can be tolerated and how often CSINode gets refreshed.
604
+ return nil , err
605
+ }
606
+
607
+ topology := make (map [string ]string )
608
+ for k , v := range node .GetLabels () {
609
+ if common .IsGKETopologyLabel (k ) {
610
+ klog .V (2 ).Infof ("Including node topology label %q=%q" , k , v )
611
+ topology [k ] = v
612
+ }
613
+ }
614
+
615
+ return topology , nil
616
+ }
617
+
575
618
func (ns * GCENodeServer ) NodeGetVolumeStats (ctx context.Context , req * csi.NodeGetVolumeStatsRequest ) (* csi.NodeGetVolumeStatsResponse , error ) {
576
619
if len (req .VolumeId ) == 0 {
577
620
return nil , status .Error (codes .InvalidArgument , "NodeGetVolumeStats volume ID was empty" )
0 commit comments