@@ -47,8 +47,8 @@ type GCEControllerServer struct {
47
47
CloudProvider gce.GCECompute
48
48
Metrics metrics.MetricsManager
49
49
50
- disks []* compute. Disk
51
- seen map [string ]int
50
+ volumeEntries []* csi. ListVolumesResponse_Entry
51
+ volumeEntriesSeen map [string ]int
52
52
53
53
snapshots []* csi.ListSnapshotsResponse_Entry
54
54
snapshotTokens map [string ]int
@@ -603,14 +603,24 @@ func (gceCS *GCEControllerServer) executeControllerPublishVolume(ctx context.Con
603
603
PublishContext : nil ,
604
604
}
605
605
606
- project , volKey , err = gceCS .CloudProvider .RepairUnderspecifiedVolumeKey (ctx , project , volKey )
606
+ instanceZone , instanceName , err := common .NodeIDToZoneAndName (nodeID )
607
+ if err != nil {
608
+ return nil , status .Errorf (codes .NotFound , "could not split nodeID: %v" , err .Error ()), nil
609
+ }
610
+
611
+ project , volKey , err = gceCS .CloudProvider .RepairUnderspecifiedVolumeKeyForZone (ctx , project , volKey , instanceZone )
607
612
if err != nil {
608
613
if gce .IsGCENotFoundError (err ) {
609
614
return nil , status .Errorf (codes .NotFound , "ControllerPublishVolume could not find volume with ID %v: %v" , volumeID , err .Error ()), nil
610
615
}
611
616
return nil , common .LoggedError ("ControllerPublishVolume error repairing underspecified volume key: " , err ), nil
612
617
}
613
618
619
+ // Only allow read-only attachment for "multi-zone" volumes
620
+ if volKey .Type () == meta .Zonal && volKey .Zone == common .MultiZoneValue && ! readOnly {
621
+ return nil , status .Errorf (codes .InvalidArgument , "'multi-zone' volume %v only supports READ_ONLY: %v" , volumeID , err .Error ()), nil
622
+ }
623
+
614
624
// Acquires the lock for the volume on that node only, because we need to support the ability
615
625
// to publish the same volume onto different nodes concurrently
616
626
lockingVolumeID := fmt .Sprintf ("%s/%s" , nodeID , volumeID )
@@ -625,10 +635,6 @@ func (gceCS *GCEControllerServer) executeControllerPublishVolume(ctx context.Con
625
635
}
626
636
return nil , common .LoggedError ("Failed to getDisk: " , err ), disk
627
637
}
628
- instanceZone , instanceName , err := common .NodeIDToZoneAndName (nodeID )
629
- if err != nil {
630
- return nil , status .Errorf (codes .NotFound , "could not split nodeID: %v" , err .Error ()), disk
631
- }
632
638
instance , err := gceCS .CloudProvider .GetInstanceOrError (ctx , instanceZone , instanceName )
633
639
if err != nil {
634
640
if gce .IsGCENotFoundError (err ) {
@@ -739,7 +745,13 @@ func (gceCS *GCEControllerServer) executeControllerUnpublishVolume(ctx context.C
739
745
740
746
volumeID := req .GetVolumeId ()
741
747
nodeID := req .GetNodeId ()
742
- project , volKey , err = gceCS .CloudProvider .RepairUnderspecifiedVolumeKey (ctx , project , volKey )
748
+
749
+ instanceZone , instanceName , err := common .NodeIDToZoneAndName (nodeID )
750
+ if err != nil {
751
+ return nil , status .Errorf (codes .InvalidArgument , "could not split nodeID: %v" , err .Error ()), nil
752
+ }
753
+
754
+ project , volKey , err = gceCS .CloudProvider .RepairUnderspecifiedVolumeKeyForZone (ctx , project , volKey , instanceZone )
743
755
if err != nil {
744
756
if gce .IsGCENotFoundError (err ) {
745
757
klog .Warningf ("Treating volume %v as unpublished because it could not be found" , volumeID )
@@ -756,10 +768,6 @@ func (gceCS *GCEControllerServer) executeControllerUnpublishVolume(ctx context.C
756
768
}
757
769
defer gceCS .volumeLocks .Release (lockingVolumeID )
758
770
diskToUnpublish , _ := gceCS .CloudProvider .GetDisk (ctx , project , volKey , gce .GCEAPIVersionV1 )
759
- instanceZone , instanceName , err := common .NodeIDToZoneAndName (nodeID )
760
- if err != nil {
761
- return nil , status .Errorf (codes .InvalidArgument , "could not split nodeID: %v" , err .Error ()), diskToUnpublish
762
- }
763
771
instance , err := gceCS .CloudProvider .GetInstanceOrError (ctx , instanceZone , instanceName )
764
772
if err != nil {
765
773
if gce .IsGCENotFoundError (err ) {
@@ -810,6 +818,7 @@ func (gceCS *GCEControllerServer) ValidateVolumeCapabilities(ctx context.Context
810
818
if err != nil {
811
819
return nil , status .Errorf (codes .InvalidArgument , "Volume ID is invalid: %v" , err .Error ())
812
820
}
821
+
813
822
project , volKey , err = gceCS .CloudProvider .RepairUnderspecifiedVolumeKey (ctx , project , volKey )
814
823
if err != nil {
815
824
if gce .IsGCENotFoundError (err ) {
@@ -879,8 +888,9 @@ func (gceCS *GCEControllerServer) ListVolumes(ctx context.Context, req *csi.List
879
888
"ListVolumes got max entries request %v. GCE only supports values >0" , req .MaxEntries )
880
889
}
881
890
882
- offset := 0
891
+ offsetLow := 0
883
892
var ok bool
893
+ var volumeEntries []* csi.ListVolumesResponse_Entry
884
894
if req .StartingToken == "" {
885
895
diskList , _ , err := gceCS .CloudProvider .ListDisks (ctx )
886
896
if err != nil {
@@ -889,10 +899,14 @@ func (gceCS *GCEControllerServer) ListVolumes(ctx context.Context, req *csi.List
889
899
}
890
900
return nil , common .LoggedError ("Failed to list disk: " , err )
891
901
}
892
- gceCS .disks = diskList
893
- gceCS .seen = map [string ]int {}
902
+ volumeEntries = disksToVolumeEntries (diskList )
903
+ }
904
+
905
+ if req .StartingToken == "" {
906
+ gceCS .volumeEntries = volumeEntries
907
+ gceCS .volumeEntriesSeen = map [string ]int {}
894
908
} else {
895
- offset , ok = gceCS .seen [req .StartingToken ]
909
+ offsetLow , ok = gceCS .volumeEntriesSeen [req .StartingToken ]
896
910
if ! ok {
897
911
return nil , status .Errorf (codes .Aborted , "ListVolumes error with invalid startingToken: %s" , req .StartingToken )
898
912
}
@@ -903,9 +917,50 @@ func (gceCS *GCEControllerServer) ListVolumes(ctx context.Context, req *csi.List
903
917
maxEntries = maxListVolumesResponseEntries
904
918
}
905
919
920
+ nextToken := ""
921
+ offsetHigh := offsetLow + maxEntries
922
+ if offsetHigh < len (gceCS .volumeEntries ) {
923
+ nextToken = string (uuid .NewUUID ())
924
+ gceCS .volumeEntriesSeen [nextToken ] = offsetHigh
925
+ } else {
926
+ offsetHigh = len (gceCS .volumeEntries )
927
+ }
928
+
929
+ return & csi.ListVolumesResponse {
930
+ Entries : gceCS .volumeEntries [offsetLow :offsetHigh ],
931
+ NextToken : nextToken ,
932
+ }, nil
933
+ }
934
+
935
+ // isMultiZoneDisk returns the multi-zone volumeId of a disk if it is
936
+ // "multi-zone", otherwise returns an empty string
937
+ // The second parameter indiciates if it is a "multi-zone" disk
938
+ func isMultiZoneDisk (diskRsrc string , diskLabels map [string ]string ) (string , bool ) {
939
+ isMultiZoneDisk := false
940
+ for l := range diskLabels {
941
+ if l == common .MultiZoneLabel {
942
+ isMultiZoneDisk = true
943
+ }
944
+ }
945
+ if ! isMultiZoneDisk {
946
+ return "" , false
947
+ }
948
+
949
+ multiZoneVolumeId , err := common .VolumeIdAsMultiZone (diskRsrc )
950
+ if err != nil {
951
+ klog .Warningf ("Error converting multi-zone volume handle for disk %s, skipped: %v" , diskRsrc , err )
952
+ return "" , false
953
+ }
954
+ return multiZoneVolumeId , true
955
+ }
956
+
957
+ // disksToVolumeEntries converts a list of disks to a list of CSI ListVolumeResponse entries
958
+ // It appends "multi-zone" volumeHandles at the end. These are volumeHandles which
959
+ // map to multiple volumeHandles in different zones
960
+ func disksToVolumeEntries (disks []* compute.Disk ) []* csi.ListVolumesResponse_Entry {
961
+ multiZoneNodesByVolumeId := map [string ][]string {}
906
962
entries := []* csi.ListVolumesResponse_Entry {}
907
- for i := 0 ; i + offset < len (gceCS .disks ) && i < maxEntries ; i ++ {
908
- d := gceCS .disks [i + offset ]
963
+ for _ , d := range disks {
909
964
diskRsrc , err := getResourceId (d .SelfLink )
910
965
if err != nil {
911
966
klog .Warningf ("Bad ListVolumes disk resource %s, skipped: %v (%+v)" , d .SelfLink , err , d )
@@ -920,6 +975,13 @@ func (gceCS *GCEControllerServer) ListVolumes(ctx context.Context, req *csi.List
920
975
users = append (users , rsrc )
921
976
}
922
977
}
978
+ if multiZoneVolumeId , isMultiZone := isMultiZoneDisk (diskRsrc , d .Labels ); isMultiZone {
979
+ _ , ok := multiZoneNodesByVolumeId [multiZoneVolumeId ]
980
+ if ! ok {
981
+ multiZoneNodesByVolumeId [multiZoneVolumeId ] = []string {}
982
+ }
983
+ multiZoneNodesByVolumeId [multiZoneVolumeId ] = append (multiZoneNodesByVolumeId [multiZoneVolumeId ], users ... )
984
+ }
923
985
entries = append (entries , & csi.ListVolumesResponse_Entry {
924
986
Volume : & csi.Volume {
925
987
VolumeId : diskRsrc ,
@@ -929,17 +991,17 @@ func (gceCS *GCEControllerServer) ListVolumes(ctx context.Context, req *csi.List
929
991
},
930
992
})
931
993
}
932
-
933
- nextToken := ""
934
- if len (entries )+ offset < len (gceCS .disks ) {
935
- nextToken = string (uuid .NewUUID ())
936
- gceCS .seen [nextToken ] = len (entries ) + offset
994
+ for volumeId , nodeIds := range multiZoneNodesByVolumeId {
995
+ entries = append (entries , & csi.ListVolumesResponse_Entry {
996
+ Volume : & csi.Volume {
997
+ VolumeId : volumeId ,
998
+ },
999
+ Status : & csi.ListVolumesResponse_VolumeStatus {
1000
+ PublishedNodeIds : nodeIds ,
1001
+ },
1002
+ })
937
1003
}
938
-
939
- return & csi.ListVolumesResponse {
940
- Entries : entries ,
941
- NextToken : nextToken ,
942
- }, nil
1004
+ return entries
943
1005
}
944
1006
945
1007
func (gceCS * GCEControllerServer ) GetCapacity (ctx context.Context , req * csi.GetCapacityRequest ) (* csi.GetCapacityResponse , error ) {
0 commit comments