From 0b7121d0a644b024986a85f785260002b7764788 Mon Sep 17 00:00:00 2001 From: Peter Schuurman Date: Mon, 4 Dec 2023 13:53:43 -0800 Subject: [PATCH] Add --fallback-requisite-zones command line argument --- cmd/gce-pd-csi-driver/main.go | 8 +- pkg/gce-pd-csi-driver/controller.go | 67 +++-- pkg/gce-pd-csi-driver/controller_test.go | 282 ++++++++++++++------ pkg/gce-pd-csi-driver/gce-pd-driver.go | 13 +- pkg/gce-pd-csi-driver/gce-pd-driver_test.go | 3 +- test/sanity/sanity_test.go | 4 +- 6 files changed, 261 insertions(+), 116 deletions(-) diff --git a/cmd/gce-pd-csi-driver/main.go b/cmd/gce-pd-csi-driver/main.go index 479559afe..74872b03f 100644 --- a/cmd/gce-pd-csi-driver/main.go +++ b/cmd/gce-pd-csi-driver/main.go @@ -21,6 +21,7 @@ import ( "math/rand" "os" "runtime" + "strings" "time" "k8s.io/klog/v2" @@ -66,6 +67,8 @@ var ( maxConcurrentFormatAndMount = flag.Int("max-concurrent-format-and-mount", 1, "If set then format and mount operations are serialized on each node. This is stronger than max-concurrent-format as it includes fsck and other mount operations") formatAndMountTimeout = flag.Duration("format-and-mount-timeout", 1*time.Minute, "The maximum duration of a format and mount operation before another such operation will be started. Used only if --serialize-format-and-mount") + fallbackRequisiteZonesFlag = flag.String("fallback-requisite-zones", "", "Comma separated list of requisite zones that will be used if there are not sufficient zones present in requisite topologies when provisioning a disk") + version string ) @@ -128,6 +131,9 @@ func handle() { // Initialize identity server identityServer := driver.NewIdentityServer(gceDriver) + // Initilaize requisite zones + fallbackRequisiteZones := strings.Split(*fallbackRequisiteZonesFlag, ",") + // Initialize requirements for the controller service var controllerServer *driver.GCEControllerServer if *runControllerService { @@ -137,7 +143,7 @@ func handle() { } initialBackoffDuration := time.Duration(*errorBackoffInitialDurationMs) * time.Millisecond maxBackoffDuration := time.Duration(*errorBackoffMaxDurationMs) * time.Millisecond - controllerServer = driver.NewControllerServer(gceDriver, cloudProvider, initialBackoffDuration, maxBackoffDuration) + controllerServer = driver.NewControllerServer(gceDriver, cloudProvider, initialBackoffDuration, maxBackoffDuration, fallbackRequisiteZones) } else if *cloudConfigFilePath != "" { klog.Warningf("controller service is disabled but cloud config given - it has no effect") } diff --git a/pkg/gce-pd-csi-driver/controller.go b/pkg/gce-pd-csi-driver/controller.go index 38ba10091..26537adc9 100644 --- a/pkg/gce-pd-csi-driver/controller.go +++ b/pkg/gce-pd-csi-driver/controller.go @@ -91,6 +91,15 @@ type GCEControllerServer struct { // publish/unpublish call will clear the backoff condition for a node and // disk. errorBackoff *csiErrorBackoff + + // Requisite zones to fallback to when provisioning a disk. + // If there are an insufficient number of zones available in the union + // of preferred/requisite topology, this list is used instead of + // the passed in requisite topology. + // The main use case of this field is to support Regional Persistent Disk + // provisioning in GKE Autopilot, where a GKE cluster to + // be scaled down to 1 zone. + fallbackRequisiteZones []string } type csiErrorBackoffId string @@ -272,7 +281,7 @@ func (gceCS *GCEControllerServer) CreateVolume(ctx context.Context, req *csi.Cre var volKey *meta.Key switch params.ReplicationType { case replicationTypeNone: - zones, err = pickZones(ctx, gceCS, req.GetAccessibilityRequirements(), 1, locationTopReq) + zones, err = gceCS.pickZones(ctx, req.GetAccessibilityRequirements(), 1, locationTopReq) if err != nil { return nil, status.Errorf(codes.InvalidArgument, "CreateVolume failed to pick zones for disk: %v", err.Error()) } @@ -282,7 +291,7 @@ func (gceCS *GCEControllerServer) CreateVolume(ctx context.Context, req *csi.Cre volKey = meta.ZonalKey(name, zones[0]) case replicationTypeRegionalPD: - zones, err = pickZones(ctx, gceCS, req.GetAccessibilityRequirements(), 2, locationTopReq) + zones, err = gceCS.pickZones(ctx, req.GetAccessibilityRequirements(), 2, locationTopReq) if err != nil { return nil, status.Errorf(codes.InvalidArgument, "CreateVolume failed to pick zones for disk: %v", err.Error()) } @@ -1551,7 +1560,7 @@ func prependZone(zone string, zones []string) []string { return newZones } -func pickZonesFromTopology(top *csi.TopologyRequirement, numZones int, locationTopReq *locationRequirements) ([]string, error) { +func pickZonesFromTopology(top *csi.TopologyRequirement, numZones int, locationTopReq *locationRequirements, fallbackRequisiteZones []string) ([]string, error) { reqZones, err := getZonesFromTopology(top.GetRequisite()) if err != nil { return nil, fmt.Errorf("could not get zones from requisite topology: %w", err) @@ -1596,27 +1605,39 @@ func pickZonesFromTopology(top *csi.TopologyRequirement, numZones int, locationT if numZones <= len(prefZones) { return prefZones[0:numZones], nil - } else { - zones := sets.String{} - // Add all preferred zones into zones - zones.Insert(prefZones...) - remainingNumZones := numZones - len(prefZones) - // Take all of the remaining zones from requisite zones - reqSet := sets.NewString(reqZones...) - prefSet := sets.NewString(prefZones...) - remainingZones := reqSet.Difference(prefSet) - - if remainingZones.Len() < remainingNumZones { + } + + remainingNumZones := numZones - len(prefZones) + // Take all of the remaining zones from requisite zones + reqSet := sets.NewString(reqZones...) + prefSet := sets.NewString(prefZones...) + remainingZones := reqSet.Difference(prefSet) + + if remainingZones.Len() < remainingNumZones { + fallbackSet := sets.NewString(fallbackRequisiteZones...) + remainingFallbackZones := fallbackSet.Difference(prefSet) + if remainingFallbackZones.Len() >= remainingNumZones { + remainingZones = remainingFallbackZones + } else { return nil, fmt.Errorf("need %v zones from topology, only got %v unique zones", numZones, reqSet.Union(prefSet).Len()) } - // Add the remaining number of zones into the set - nSlice, err := pickRandAndConsecutive(remainingZones.List(), remainingNumZones) - if err != nil { - return nil, err - } - zones.Insert(nSlice...) - return zones.List(), nil } + + allZones := prefSet.Union(remainingZones).List() + sort.Strings(allZones) + var shiftIndex int + if len(prefZones) == 0 { + // Random shift the requisite zones, since there is no preferred start. + shiftIndex = rand.Intn(len(allZones)) + } else { + shiftIndex = slices.Index(allZones, prefZones[0]) + } + shiftedZones := append(allZones[shiftIndex:], allZones[:shiftIndex]...) + sortedShiftedReqZones := slices.Filter(nil, shiftedZones, func(v string) bool { return !prefSet.Has(v) }) + zones := make([]string, 0, numZones) + zones = append(zones, prefZones...) + zones = append(zones, sortedShiftedReqZones...) + return zones[:numZones], nil } func getZonesFromTopology(topList []*csi.Topology) ([]string, error) { @@ -1652,11 +1673,11 @@ func getZoneFromSegment(seg map[string]string) (string, error) { return zone, nil } -func pickZones(ctx context.Context, gceCS *GCEControllerServer, top *csi.TopologyRequirement, numZones int, locationTopReq *locationRequirements) ([]string, error) { +func (gceCS *GCEControllerServer) pickZones(ctx context.Context, top *csi.TopologyRequirement, numZones int, locationTopReq *locationRequirements) ([]string, error) { var zones []string var err error if top != nil { - zones, err = pickZonesFromTopology(top, numZones, locationTopReq) + zones, err = pickZonesFromTopology(top, numZones, locationTopReq, gceCS.fallbackRequisiteZones) if err != nil { return nil, fmt.Errorf("failed to pick zones from topology: %w", err) } diff --git a/pkg/gce-pd-csi-driver/controller_test.go b/pkg/gce-pd-csi-driver/controller_test.go index 8c5e611b8..0aca02db2 100644 --- a/pkg/gce-pd-csi-driver/controller_test.go +++ b/pkg/gce-pd-csi-driver/controller_test.go @@ -33,6 +33,7 @@ import ( "k8s.io/apimachinery/pkg/util/clock" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/client-go/util/flowcontrol" + "k8s.io/utils/strings/slices" csi "github.com/container-storage-interface/spec/lib/go/csi" "sigs.k8s.io/gcp-compute-persistent-disk-csi-driver/pkg/common" @@ -2401,12 +2402,13 @@ func TestPrependZone(t *testing.T) { func TestPickZonesFromTopology(t *testing.T) { testCases := []struct { - name string - top *csi.TopologyRequirement - locReq *locationRequirements - numZones int - expZones []string - expErr bool + name string + top *csi.TopologyRequirement + locReq *locationRequirements + numZones int + fallbackRequisiteZones []string + expZones []string + expErr bool }{ { name: "success: preferred", @@ -2467,6 +2469,29 @@ func TestPickZonesFromTopology(t *testing.T) { numZones: 1, expZones: []string{"us-central1-a"}, }, + { + name: "success: requisite, locationRequirements[region:us-central1, zone:us-central1-a, srcReplicationType:none, cloneReplicationType:regional-pd]", + top: &csi.TopologyRequirement{ + Requisite: []*csi.Topology{ + { + Segments: map[string]string{common.TopologyKeyZone: "us-central1-a"}, + }, + { + Segments: map[string]string{common.TopologyKeyZone: "us-central1-b"}, + }, + { + Segments: map[string]string{common.TopologyKeyZone: "us-central1-c"}, + }, + { + Segments: map[string]string{common.TopologyKeyZone: "us-central1-f"}, + }, + }, + Preferred: []*csi.Topology{}, + }, + locReq: &locationRequirements{srcVolRegion: "us-central1", srcVolZone: "us-central1-c", srcReplicationType: replicationTypeNone, cloneReplicationType: replicationTypeRegionalPD}, + numZones: 2, + expZones: []string{"us-central1-c", "us-central1-f"}, + }, { name: "success: preferred and requisite", top: &csi.TopologyRequirement{ @@ -2533,7 +2558,7 @@ func TestPickZonesFromTopology(t *testing.T) { }, locReq: &locationRequirements{srcVolRegion: "us-central1", srcVolZone: "us-central1-a", srcReplicationType: replicationTypeRegionalPD, cloneReplicationType: replicationTypeRegionalPD}, numZones: 5, - expZones: []string{"us-central1-b", "us-central1-c", "us-central1-a", "us-central1-d", "us-central1-f"}, + expZones: []string{"us-central1-b", "us-central1-a", "us-central1-c", "us-central1-d", "us-central1-f"}, }, { name: "success: preferred and requisite, locationRequirements[region:us-central1, zone:us-central1-a, srcReplicationType:none, cloneReplicationType:regional-pd]", @@ -2568,6 +2593,149 @@ func TestPickZonesFromTopology(t *testing.T) { numZones: 5, expZones: []string{"us-central1-a", "us-central1-b", "us-central1-c", "us-central1-d", "us-central1-f"}, }, + { + name: "success: preferred and requisite, locationRequirements[region:us-central1, zone:us-central1-a, srcReplicationType:none, cloneReplicationType:regional-pd], 3 zones {a, b, c}", + top: &csi.TopologyRequirement{ + Requisite: []*csi.Topology{ + { + Segments: map[string]string{common.TopologyKeyZone: "us-central1-a"}, + }, + { + Segments: map[string]string{common.TopologyKeyZone: "us-central1-b"}, + }, + { + Segments: map[string]string{common.TopologyKeyZone: "us-central1-c"}, + }, + { + Segments: map[string]string{common.TopologyKeyZone: "us-central1-f"}, + }, + }, + Preferred: []*csi.Topology{ + { + Segments: map[string]string{common.TopologyKeyZone: "us-central1-a"}, + }, + { + Segments: map[string]string{common.TopologyKeyZone: "us-central1-c"}, + }, + }, + }, + locReq: &locationRequirements{srcVolRegion: "us-central1", srcVolZone: "us-central1-a", srcReplicationType: replicationTypeNone, cloneReplicationType: replicationTypeRegionalPD}, + numZones: 3, + expZones: []string{"us-central1-a", "us-central1-c", "us-central1-b"}, + }, + { + name: "success: preferred and requisite, locationRequirements[region:us-central1, zone:us-central1-a, srcReplicationType:none, cloneReplicationType:regional-pd], 3 zones {b, c, f}", + top: &csi.TopologyRequirement{ + Requisite: []*csi.Topology{ + { + Segments: map[string]string{common.TopologyKeyZone: "us-central1-a"}, + }, + { + Segments: map[string]string{common.TopologyKeyZone: "us-central1-b"}, + }, + { + Segments: map[string]string{common.TopologyKeyZone: "us-central1-c"}, + }, + { + Segments: map[string]string{common.TopologyKeyZone: "us-central1-f"}, + }, + }, + Preferred: []*csi.Topology{ + { + Segments: map[string]string{common.TopologyKeyZone: "us-central1-b"}, + }, + { + Segments: map[string]string{common.TopologyKeyZone: "us-central1-c"}, + }, + }, + }, + locReq: &locationRequirements{srcVolRegion: "us-central1", srcVolZone: "us-central1-b", srcReplicationType: replicationTypeNone, cloneReplicationType: replicationTypeRegionalPD}, + numZones: 3, + expZones: []string{"us-central1-b", "us-central1-c", "us-central1-f"}, + }, + { + name: "success: preferred and requisite, locationRequirements[region:us-central1, zone:us-central1-a, srcReplicationType:none, cloneReplicationType:regional-pd], fallback topologies specified but unused", + top: &csi.TopologyRequirement{ + Requisite: []*csi.Topology{ + { + Segments: map[string]string{common.TopologyKeyZone: "us-central1-a"}, + }, + { + Segments: map[string]string{common.TopologyKeyZone: "us-central1-b"}, + }, + { + Segments: map[string]string{common.TopologyKeyZone: "us-central1-c"}, + }, + }, + Preferred: []*csi.Topology{ + { + Segments: map[string]string{common.TopologyKeyZone: "us-central1-a"}, + }, + }, + }, + fallbackRequisiteZones: []string{"us-central1-a", "us-central1-f", "us-central1-g"}, + locReq: &locationRequirements{srcVolRegion: "us-central1", srcVolZone: "us-central1-a", srcReplicationType: replicationTypeNone, cloneReplicationType: replicationTypeRegionalPD}, + numZones: 2, + expZones: []string{"us-central1-a", "us-central1-b"}, + }, + { + name: "success: preferred and requisite, locationRequirements[region:us-central1, zone:us-central1-a, srcReplicationType:none, cloneReplicationType:regional-pd], fallback topologies specified", + top: &csi.TopologyRequirement{ + Requisite: []*csi.Topology{ + { + Segments: map[string]string{common.TopologyKeyZone: "us-central1-b"}, + }, + }, + Preferred: []*csi.Topology{ + { + Segments: map[string]string{common.TopologyKeyZone: "us-central1-b"}, + }, + { + Segments: map[string]string{common.TopologyKeyZone: "us-west1-b"}, + }, + }, + }, + fallbackRequisiteZones: []string{"us-central1-a", "us-central1-b", "us-central1-c"}, + locReq: &locationRequirements{srcVolRegion: "us-central1", srcVolZone: "us-central1-b", srcReplicationType: replicationTypeNone, cloneReplicationType: replicationTypeRegionalPD}, + numZones: 2, + expZones: []string{"us-central1-b", "us-central1-c"}, + }, + { + name: "success: preferred and requisite, locationRequirements[region:us-central1, zone:us-central1-a, srcReplicationType:regional-pd, cloneReplicationType:regional-pd], fallback topologies specified", + top: &csi.TopologyRequirement{ + Requisite: []*csi.Topology{}, + Preferred: []*csi.Topology{ + // This is a bit contrived, a real regional PD should have two zones + // This only has one, so we can test that a second is pulled from + // fallbackRequisiteZones. + { + Segments: map[string]string{common.TopologyKeyZone: "us-central1-b"}, + }, + }, + }, + fallbackRequisiteZones: []string{"us-central1-a", "us-central1-b", "us-central1-c", "us-central1-f"}, + locReq: &locationRequirements{srcVolRegion: "us-central1", srcVolZone: "us-central1-b", srcReplicationType: replicationTypeRegionalPD, cloneReplicationType: replicationTypeRegionalPD}, + numZones: 2, + expZones: []string{"us-central1-b", "us-central1-c"}, + }, + { + name: "success: preferred and requisite, fallback topologies specified", + top: &csi.TopologyRequirement{ + Requisite: []*csi.Topology{ + { + Segments: map[string]string{common.TopologyKeyZone: "us-central1-b"}, + }, + }, + Preferred: []*csi.Topology{ + { + Segments: map[string]string{common.TopologyKeyZone: "us-central1-b"}, + }, + }, + }, + fallbackRequisiteZones: []string{"us-central1-a", "us-central1-b", "us-central1-c"}, + numZones: 2, + expZones: []string{"us-central1-b", "us-central1-c"}, + }, { name: "fail: not enough topologies", top: &csi.TopologyRequirement{ @@ -2597,6 +2765,24 @@ func TestPickZonesFromTopology(t *testing.T) { numZones: 4, expErr: true, }, + { + name: "fail: not enough topologies, fallback topologies specified", + top: &csi.TopologyRequirement{ + Requisite: []*csi.Topology{ + { + Segments: map[string]string{common.TopologyKeyZone: "us-central1-a"}, + }, + }, + Preferred: []*csi.Topology{ + { + Segments: map[string]string{common.TopologyKeyZone: "us-central1-a"}, + }, + }, + }, + fallbackRequisiteZones: []string{"us-central1-a", "us-central1-b", "us-central1-c"}, + numZones: 4, + expErr: true, + }, { name: "fail: no topologies that match locationRequirment, locationRequirements[region:us-east1, zone:us-east1-a, replicationType:none]", top: &csi.TopologyRequirement{ @@ -2688,7 +2874,7 @@ func TestPickZonesFromTopology(t *testing.T) { expErr: true, }, { - name: "success: only requisite, locationRequirements[region:us-central1, zone:us-central1-a, replicationType:regional-pd", + name: "success: only requisite, all zones", top: &csi.TopologyRequirement{ Requisite: []*csi.Topology{ { @@ -2725,15 +2911,17 @@ func TestPickZonesFromTopology(t *testing.T) { }, } for _, tc := range testCases { + // Apply a deterministic seed to make the test that calls rand.Intn stable. + rand.Seed(8) t.Logf("test case: %s", tc.name) - gotZones, err := pickZonesFromTopology(tc.top, tc.numZones, tc.locReq) + gotZones, err := pickZonesFromTopology(tc.top, tc.numZones, tc.locReq, tc.fallbackRequisiteZones) if err != nil && !tc.expErr { t.Errorf("got error: %v, but did not expect error", err) } if err == nil && tc.expErr { t.Errorf("got no error, but expected error") } - if !sets.NewString(gotZones...).Equal(sets.NewString(tc.expZones...)) { + if !slices.Equal(gotZones, tc.expZones) { t.Errorf("Expected zones: %v, but got: %v", tc.expZones, gotZones) } } @@ -2751,80 +2939,6 @@ func zonesEqual(gotZones, expectedZones []string) bool { return true } -func TestPickRandAndConsecutive(t *testing.T) { - rand.Seed(time.Now().UnixNano()) - testCases := []struct { - name string - slice []string - n int - expErr bool - }{ - { - name: "success: normal", - slice: []string{"test", "second", "third"}, - n: 2, - }, - { - name: "success: full", - slice: []string{"test", "second", "third"}, - n: 3, - }, - { - name: "success: large", - slice: []string{"test", "second", "third", "fourth", "fifth", "sixth"}, - n: 2, - }, - { - name: "fail: n too large", - slice: []string{}, - n: 2, - expErr: true, - }, - } - for _, tc := range testCases { - t.Logf("test case: %s", tc.name) - tot := sets.String{} - sort.Strings(tc.slice) - for i := 0; i < 25; i++ { - theslice, err := pickRandAndConsecutive(tc.slice, tc.n) - if err != nil && !tc.expErr { - t.Errorf("Did not expect error but got: %v", err) - } - if err == nil && tc.expErr { - t.Errorf("Expected error but got none") - } - if err != nil { - break - } - if len(theslice) != tc.n { - t.Errorf("expected the resulting slice to be length %v, but got %v instead", tc.n, theslice) - } - // Find where it is in the slice - var idx = -1 - for j, elem := range tc.slice { - if elem == theslice[0] { - idx = j - break - } - } - if idx == -1 { - t.Errorf("could not find %v in the original slice %v", theslice[0], tc.slice) - } - for j := 0; j < tc.n; j++ { - if theslice[j] != tc.slice[(idx+j)%len(tc.slice)] { - t.Errorf("did not pick sorted consecutive values from the slice") - } - } - - tot.Insert(theslice...) - } - if !tot.Equal(sets.NewString(tc.slice...)) { - t.Errorf("randomly picking n from slice did not get all %v, instead got only %v", tc.slice, tot) - } - - } -} - func TestVolumeOperationConcurrency(t *testing.T) { readyToExecute := make(chan chan gce.Signal, 1) gceDriver := initBlockingGCEDriver(t, []*gce.CloudDisk{ diff --git a/pkg/gce-pd-csi-driver/gce-pd-driver.go b/pkg/gce-pd-csi-driver/gce-pd-driver.go index 20ab50b01..3f196a5f5 100644 --- a/pkg/gce-pd-csi-driver/gce-pd-driver.go +++ b/pkg/gce-pd-csi-driver/gce-pd-driver.go @@ -152,13 +152,14 @@ func NewNodeServer(gceDriver *GCEDriver, mounter *mount.SafeFormatAndMount, devi } } -func NewControllerServer(gceDriver *GCEDriver, cloudProvider gce.GCECompute, errorBackoffInitialDuration, errorBackoffMaxDuration time.Duration) *GCEControllerServer { +func NewControllerServer(gceDriver *GCEDriver, cloudProvider gce.GCECompute, errorBackoffInitialDuration, errorBackoffMaxDuration time.Duration, fallbackRequisiteZones []string) *GCEControllerServer { return &GCEControllerServer{ - Driver: gceDriver, - CloudProvider: cloudProvider, - seen: map[string]int{}, - volumeLocks: common.NewVolumeLocks(), - errorBackoff: newCsiErrorBackoff(errorBackoffInitialDuration, errorBackoffMaxDuration), + Driver: gceDriver, + CloudProvider: cloudProvider, + seen: map[string]int{}, + volumeLocks: common.NewVolumeLocks(), + errorBackoff: newCsiErrorBackoff(errorBackoffInitialDuration, errorBackoffMaxDuration), + fallbackRequisiteZones: fallbackRequisiteZones, } } diff --git a/pkg/gce-pd-csi-driver/gce-pd-driver_test.go b/pkg/gce-pd-csi-driver/gce-pd-driver_test.go index b32bc64b6..32613b5f5 100644 --- a/pkg/gce-pd-csi-driver/gce-pd-driver_test.go +++ b/pkg/gce-pd-csi-driver/gce-pd-driver_test.go @@ -46,8 +46,9 @@ func initGCEDriverWithCloudProvider(t *testing.T, cloudProvider gce.GCECompute) gceDriver := GetGCEDriver() errorBackoffInitialDuration := 200 * time.Millisecond errorBackoffMaxDuration := 5 * time.Minute + fallbackRequisiteZones := []string{} - controllerServer := NewControllerServer(gceDriver, cloudProvider, errorBackoffInitialDuration, errorBackoffMaxDuration) + controllerServer := NewControllerServer(gceDriver, cloudProvider, errorBackoffInitialDuration, errorBackoffMaxDuration, fallbackRequisiteZones) err := gceDriver.SetupGCEDriver(driver, vendorVersion, nil, nil, controllerServer, nil) if err != nil { t.Fatalf("Failed to setup GCE Driver: %v", err) diff --git a/test/sanity/sanity_test.go b/test/sanity/sanity_test.go index 358d401a8..191d33182 100644 --- a/test/sanity/sanity_test.go +++ b/test/sanity/sanity_test.go @@ -61,12 +61,14 @@ func TestSanity(t *testing.T) { t.Fatalf("Failed to get cloud provider: %v", err.Error()) } + fallbackRequisiteZones := []string{} + mounter := mountmanager.NewFakeSafeMounter() deviceUtils := deviceutils.NewFakeDeviceUtils(true) //Initialize GCE Driver identityServer := driver.NewIdentityServer(gceDriver) - controllerServer := driver.NewControllerServer(gceDriver, cloudProvider, 0, 5*time.Minute) + controllerServer := driver.NewControllerServer(gceDriver, cloudProvider, 0, 5*time.Minute, fallbackRequisiteZones) nodeServer := driver.NewNodeServer(gceDriver, mounter, deviceUtils, metadataservice.NewFakeService(), mountmanager.NewFakeStatter(mounter)) err = gceDriver.SetupGCEDriver(driverName, vendorVersion, extraLabels, identityServer, controllerServer, nodeServer) if err != nil {