diff --git a/pkg/common/utils.go b/pkg/common/utils.go index 64acef3da..3355024dc 100644 --- a/pkg/common/utils.go +++ b/pkg/common/utils.go @@ -39,10 +39,12 @@ const ( // Snapshot ID snapshotTotalElements = 5 snapshotTopologyKey = 2 + snapshotProjectKey = 1 // Node ID Expected Format // "projects/{projectName}/zones/{zoneName}/disks/{diskName}" nodeIDFmt = "projects/%s/zones/%s/instances/%s" + nodeIDProjectValue = 1 nodeIDZoneValue = 3 nodeIDNameValue = 5 nodeIDTotalElements = 6 @@ -68,17 +70,17 @@ func GbToBytes(Gb int64) int64 { return Gb * 1024 * 1024 * 1024 } -func VolumeIDToKey(id string) (*meta.Key, error) { +func VolumeIDToKey(id string) (string, *meta.Key, error) { splitId := strings.Split(id, "/") if len(splitId) != volIDTotalElements { - return nil, fmt.Errorf("failed to get id components. Expected projects/{project}/zones/{zone}/disks/{name}. Got: %s", id) + return "", nil, fmt.Errorf("failed to get id components. Expected projects/{project}/zones/{zone}/disks/{name}. Got: %s", id) } if splitId[volIDToplogyKey] == "zones" { - return meta.ZonalKey(splitId[volIDDiskNameValue], splitId[volIDToplogyValue]), nil + return splitId[nodeIDProjectValue], meta.ZonalKey(splitId[volIDDiskNameValue], splitId[volIDToplogyValue]), nil } else if splitId[volIDToplogyKey] == "regions" { - return meta.RegionalKey(splitId[volIDDiskNameValue], splitId[volIDToplogyValue]), nil + return splitId[nodeIDProjectValue], meta.RegionalKey(splitId[volIDDiskNameValue], splitId[volIDToplogyValue]), nil } else { - return nil, fmt.Errorf("could not get id components, expected either zones or regions, got: %v", splitId[volIDToplogyKey]) + return "", nil, fmt.Errorf("could not get id components, expected either zones or regions, got: %v", splitId[volIDToplogyKey]) } } @@ -100,15 +102,15 @@ func GenerateUnderspecifiedVolumeID(diskName string, isZonal bool) string { return fmt.Sprintf(volIDRegionalFmt, UnspecifiedValue, UnspecifiedValue, diskName) } -func SnapshotIDToKey(id string) (string, error) { +func SnapshotIDToProjectKey(id string) (string, string, error) { splitId := strings.Split(id, "/") if len(splitId) != snapshotTotalElements { - return "", fmt.Errorf("failed to get id components. Expected projects/{project}/global/snapshot/{name}. Got: %s", id) + return "", "", fmt.Errorf("failed to get id components. Expected projects/{project}/global/snapshot/{name}. Got: %s", id) } if splitId[snapshotTopologyKey] == "global" { - return splitId[snapshotTotalElements-1], nil + return splitId[snapshotProjectKey], splitId[snapshotTotalElements-1], nil } else { - return "", fmt.Errorf("could not get id components, expected global, got: %v", splitId[snapshotTopologyKey]) + return "", "", fmt.Errorf("could not get id components, expected global, got: %v", splitId[snapshotTopologyKey]) } } diff --git a/pkg/common/utils_test.go b/pkg/common/utils_test.go index be1f31734..c9c7a6864 100644 --- a/pkg/common/utils_test.go +++ b/pkg/common/utils_test.go @@ -146,23 +146,33 @@ func TestVolumeIDToKey(t *testing.T) { testName := "test-name" testZone := "test-zone" testProject := "test-project" + testCrossProject := "test-cross-project" testRegion := "test-region" testCases := []struct { - name string - volID string - expKey *meta.Key - expErr bool + name string + volID string + expProject string + expKey *meta.Key + expErr bool }{ { - name: "normal zonal", - volID: fmt.Sprintf(volIDZoneFmt, testProject, testZone, testName), - expKey: meta.ZonalKey(testName, testZone), + name: "normal zonal", + volID: fmt.Sprintf(volIDZoneFmt, testProject, testZone, testName), + expKey: meta.ZonalKey(testName, testZone), + expProject: testProject, + }, + { + name: "cross project", + volID: fmt.Sprintf(volIDZoneFmt, testCrossProject, testZone, testName), + expKey: meta.ZonalKey(testName, testZone), + expProject: testCrossProject, }, { - name: "normal regional", - volID: fmt.Sprintf(volIDRegionFmt, testProject, testRegion, testName), - expKey: meta.RegionalKey(testName, testRegion), + name: "normal regional", + volID: fmt.Sprintf(volIDRegionFmt, testProject, testRegion, testName), + expKey: meta.RegionalKey(testName, testRegion), + expProject: testProject, }, { name: "malformed", @@ -177,7 +187,7 @@ func TestVolumeIDToKey(t *testing.T) { } for _, tc := range testCases { t.Logf("test case: %s", tc.name) - gotKey, err := VolumeIDToKey(tc.volID) + project, gotKey, err := VolumeIDToKey(tc.volID) if err == nil && tc.expErr { t.Errorf("Expected error but got none") } @@ -191,6 +201,10 @@ func TestVolumeIDToKey(t *testing.T) { if !reflect.DeepEqual(gotKey, tc.expKey) { t.Errorf("Got key %v, but expected %v, from volume ID %v", gotKey, tc.expKey, tc.volID) } + + if project != tc.expProject { + t.Errorf("Got project %v, but expected %v, from volume ID %v", project, tc.expProject, tc.volID) + } } } diff --git a/pkg/gce-cloud-provider/compute/fake-gce.go b/pkg/gce-cloud-provider/compute/fake-gce.go index a4d3f9a95..fc18eda7e 100644 --- a/pkg/gce-cloud-provider/compute/fake-gce.go +++ b/pkg/gce-cloud-provider/compute/fake-gce.go @@ -80,31 +80,34 @@ func (cloud *FakeCloudProvider) GetDefaultZone() string { return cloud.zone } -func (cloud *FakeCloudProvider) RepairUnderspecifiedVolumeKey(ctx context.Context, volumeKey *meta.Key) (*meta.Key, error) { +func (cloud *FakeCloudProvider) RepairUnderspecifiedVolumeKey(ctx context.Context, project string, volumeKey *meta.Key) (string, *meta.Key, error) { + if project == common.UnspecifiedValue { + project = cloud.project + } switch volumeKey.Type() { case meta.Zonal: if volumeKey.Zone != common.UnspecifiedValue { - return volumeKey, nil + return project, volumeKey, nil } for name, d := range cloud.disks { if name == volumeKey.Name { volumeKey.Zone = d.GetZone() - return volumeKey, nil + return project, volumeKey, nil } } - return nil, notFoundError() + return "", nil, notFoundError() case meta.Regional: if volumeKey.Region != common.UnspecifiedValue { - return volumeKey, nil + return project, volumeKey, nil } r, err := common.GetRegionFromZones([]string{cloud.zone}) if err != nil { - return nil, fmt.Errorf("failed to get region from zones: %v", err) + return "", nil, fmt.Errorf("failed to get region from zones: %v", err) } volumeKey.Region = r - return volumeKey, nil + return project, volumeKey, nil default: - return nil, fmt.Errorf("Volume key %v not zonal nor regional", volumeKey.Name) + return "", nil, fmt.Errorf("Volume key %v not zonal nor regional", volumeKey.Name) } } @@ -212,7 +215,7 @@ func (cloud *FakeCloudProvider) ListSnapshots(ctx context.Context, filter string } // Disk Methods -func (cloud *FakeCloudProvider) GetDisk(ctx context.Context, volKey *meta.Key, api GCEAPIVersion) (*CloudDisk, error) { +func (cloud *FakeCloudProvider) GetDisk(ctx context.Context, project string, volKey *meta.Key, api GCEAPIVersion) (*CloudDisk, error) { disk, ok := cloud.disks[volKey.Name] if !ok { return nil, notFoundError() @@ -249,7 +252,7 @@ func (cloud *FakeCloudProvider) ValidateExistingDisk(ctx context.Context, resp * return ValidateDiskParameters(resp, params) } -func (cloud *FakeCloudProvider) InsertDisk(ctx context.Context, volKey *meta.Key, params common.DiskParameters, capBytes int64, capacityRange *csi.CapacityRange, replicaZones []string, snapshotID string, multiWriter bool) error { +func (cloud *FakeCloudProvider) InsertDisk(ctx context.Context, project string, volKey *meta.Key, params common.DiskParameters, capBytes int64, capacityRange *csi.CapacityRange, replicaZones []string, snapshotID string, multiWriter bool) error { if disk, ok := cloud.disks[volKey.Name]; ok { err := cloud.ValidateExistingDisk(ctx, disk, params, int64(capacityRange.GetRequiredBytes()), @@ -264,7 +267,7 @@ func (cloud *FakeCloudProvider) InsertDisk(ctx context.Context, volKey *meta.Key Name: volKey.Name, SizeGb: common.BytesToGbRoundUp(capBytes), Description: "Disk created by GCE-PD CSI Driver", - Type: cloud.GetDiskTypeURI(volKey, params.DiskType), + Type: cloud.GetDiskTypeURI(project, volKey, params.DiskType), SourceSnapshotId: snapshotID, Status: cloud.mockDiskStatus, Labels: params.Labels, @@ -277,10 +280,10 @@ func (cloud *FakeCloudProvider) InsertDisk(ctx context.Context, volKey *meta.Key switch volKey.Type() { case meta.Zonal: computeDisk.Zone = volKey.Zone - computeDisk.SelfLink = fmt.Sprintf("projects/%s/zones/%s/disks/%s", cloud.project, volKey.Zone, volKey.Name) + computeDisk.SelfLink = fmt.Sprintf("projects/%s/zones/%s/disks/%s", project, volKey.Zone, volKey.Name) case meta.Regional: computeDisk.Region = volKey.Region - computeDisk.SelfLink = fmt.Sprintf("projects/%s/regions/%s/disks/%s", cloud.project, volKey.Region, volKey.Name) + computeDisk.SelfLink = fmt.Sprintf("projects/%s/regions/%s/disks/%s", project, volKey.Region, volKey.Name) default: return fmt.Errorf("could not create disk, key was neither zonal nor regional, instead got: %v", volKey.String()) } @@ -289,7 +292,7 @@ func (cloud *FakeCloudProvider) InsertDisk(ctx context.Context, volKey *meta.Key return nil } -func (cloud *FakeCloudProvider) DeleteDisk(ctx context.Context, volKey *meta.Key) error { +func (cloud *FakeCloudProvider) DeleteDisk(ctx context.Context, project string, volKey *meta.Key) error { if _, ok := cloud.disks[volKey.Name]; !ok { return notFoundError() } @@ -297,8 +300,8 @@ func (cloud *FakeCloudProvider) DeleteDisk(ctx context.Context, volKey *meta.Key return nil } -func (cloud *FakeCloudProvider) AttachDisk(ctx context.Context, volKey *meta.Key, readWrite, diskType, instanceZone, instanceName string) error { - source := cloud.GetDiskSourceURI(volKey) +func (cloud *FakeCloudProvider) AttachDisk(ctx context.Context, project string, volKey *meta.Key, readWrite, diskType, instanceZone, instanceName string) error { + source := cloud.GetDiskSourceURI(project, volKey) attachedDiskV1 := &computev1.AttachedDisk{ DeviceName: volKey.Name, @@ -315,7 +318,7 @@ func (cloud *FakeCloudProvider) AttachDisk(ctx context.Context, volKey *meta.Key return nil } -func (cloud *FakeCloudProvider) DetachDisk(ctx context.Context, deviceName, instanceZone, instanceName string) error { +func (cloud *FakeCloudProvider) DetachDisk(ctx context.Context, project, deviceName, instanceZone, instanceName string) error { instance, ok := cloud.instances[instanceName] if !ok { return fmt.Errorf("Failed to get instance %v", instanceName) @@ -332,31 +335,31 @@ func (cloud *FakeCloudProvider) DetachDisk(ctx context.Context, deviceName, inst return nil } -func (cloud *FakeCloudProvider) GetDiskTypeURI(volKey *meta.Key, diskType string) string { +func (cloud *FakeCloudProvider) GetDiskTypeURI(project string, volKey *meta.Key, diskType string) string { switch volKey.Type() { case meta.Zonal: - return cloud.getZonalDiskTypeURI(volKey.Zone, diskType) + return cloud.getZonalDiskTypeURI(project, volKey.Zone, diskType) case meta.Regional: - return cloud.getRegionalDiskTypeURI(volKey.Region, diskType) + return cloud.getRegionalDiskTypeURI(project, volKey.Region, diskType) default: return fmt.Sprintf("could not get disk type uri, key was neither zonal nor regional, instead got: %v", volKey.String()) } } -func (cloud *FakeCloudProvider) getZonalDiskTypeURI(zone, diskType string) string { - return fmt.Sprintf(diskTypeURITemplateSingleZone, cloud.project, zone, diskType) +func (cloud *FakeCloudProvider) getZonalDiskTypeURI(project, zone, diskType string) string { + return fmt.Sprintf(diskTypeURITemplateSingleZone, project, zone, diskType) } -func (cloud *FakeCloudProvider) getRegionalDiskTypeURI(region, diskType string) string { - return fmt.Sprintf(diskTypeURITemplateRegional, cloud.project, region, diskType) +func (cloud *FakeCloudProvider) getRegionalDiskTypeURI(project, region, diskType string) string { + return fmt.Sprintf(diskTypeURITemplateRegional, project, region, diskType) } -func (cloud *FakeCloudProvider) WaitForAttach(ctx context.Context, volKey *meta.Key, instanceZone, instanceName string) error { +func (cloud *FakeCloudProvider) WaitForAttach(ctx context.Context, project string, volKey *meta.Key, instanceZone, instanceName string) error { return nil } // Regional Disk Methods -func (cloud *FakeCloudProvider) GetReplicaZoneURI(zone string) string { +func (cloud *FakeCloudProvider) GetReplicaZoneURI(project, zone string) string { return "" } @@ -375,7 +378,7 @@ func (cloud *FakeCloudProvider) GetInstanceOrError(ctx context.Context, instance } // Snapshot Methods -func (cloud *FakeCloudProvider) GetSnapshot(ctx context.Context, snapshotName string) (*computev1.Snapshot, error) { +func (cloud *FakeCloudProvider) GetSnapshot(ctx context.Context, project, snapshotName string) (*computev1.Snapshot, error) { snapshot, ok := cloud.snapshots[snapshotName] if !ok { return nil, notFoundError() @@ -384,7 +387,7 @@ func (cloud *FakeCloudProvider) GetSnapshot(ctx context.Context, snapshotName st return snapshot, nil } -func (cloud *FakeCloudProvider) CreateSnapshot(ctx context.Context, volKey *meta.Key, snapshotName string) (*computev1.Snapshot, error) { +func (cloud *FakeCloudProvider) CreateSnapshot(ctx context.Context, project string, volKey *meta.Key, snapshotName string) (*computev1.Snapshot, error) { if snapshot, ok := cloud.snapshots[snapshotName]; ok { return snapshot, nil } @@ -394,13 +397,13 @@ func (cloud *FakeCloudProvider) CreateSnapshot(ctx context.Context, volKey *meta DiskSizeGb: int64(DiskSizeGb), CreationTimestamp: Timestamp, Status: "UPLOADING", - SelfLink: cloud.getGlobalSnapshotURI(snapshotName), + SelfLink: cloud.getGlobalSnapshotURI(project, snapshotName), } switch volKey.Type() { case meta.Zonal: - snapshotToCreate.SourceDisk = cloud.getZonalDiskSourceURI(volKey.Name, volKey.Zone) + snapshotToCreate.SourceDisk = cloud.getZonalDiskSourceURI(project, volKey.Name, volKey.Zone) case meta.Regional: - snapshotToCreate.SourceDisk = cloud.getRegionalDiskSourceURI(volKey.Name, volKey.Region) + snapshotToCreate.SourceDisk = cloud.getRegionalDiskSourceURI(project, volKey.Name, volKey.Region) default: return nil, fmt.Errorf("could not create snapshot, disk key was neither zonal nor regional, instead got: %v", volKey.String()) } @@ -409,7 +412,7 @@ func (cloud *FakeCloudProvider) CreateSnapshot(ctx context.Context, volKey *meta return snapshotToCreate, nil } -func (cloud *FakeCloudProvider) ResizeDisk(ctx context.Context, volKey *meta.Key, requestBytes int64) (int64, error) { +func (cloud *FakeCloudProvider) ResizeDisk(ctx context.Context, project string, volKey *meta.Key, requestBytes int64) (int64, error) { disk, ok := cloud.disks[volKey.Name] if !ok { return -1, notFoundError() @@ -424,7 +427,7 @@ func (cloud *FakeCloudProvider) ResizeDisk(ctx context.Context, volKey *meta.Key } // Snapshot Methods -func (cloud *FakeCloudProvider) DeleteSnapshot(ctx context.Context, snapshotName string) error { +func (cloud *FakeCloudProvider) DeleteSnapshot(ctx context.Context, project, snapshotName string) error { delete(cloud.snapshots, snapshotName) return nil } @@ -434,7 +437,7 @@ func (cloud *FakeCloudProvider) ValidateExistingSnapshot(resp *computev1.Snapsho return fmt.Errorf("disk does not exist") } - diskSource := cloud.GetDiskSourceURI(volKey) + diskSource := cloud.GetDiskSourceURI(cloud.project, volKey) if resp.SourceDisk != diskSource { return status.Error(codes.AlreadyExists, fmt.Sprintf("snapshot already exists with same name but with a different disk source %s, expected disk source %s", diskSource, resp.SourceDisk)) } @@ -443,37 +446,37 @@ func (cloud *FakeCloudProvider) ValidateExistingSnapshot(resp *computev1.Snapsho return nil } -func (cloud *FakeCloudProvider) GetDiskSourceURI(volKey *meta.Key) string { +func (cloud *FakeCloudProvider) GetDiskSourceURI(project string, volKey *meta.Key) string { switch volKey.Type() { case meta.Zonal: - return cloud.getZonalDiskSourceURI(volKey.Name, volKey.Zone) + return cloud.getZonalDiskSourceURI(project, volKey.Name, volKey.Zone) case meta.Regional: - return cloud.getRegionalDiskSourceURI(volKey.Name, volKey.Region) + return cloud.getRegionalDiskSourceURI(project, volKey.Name, volKey.Region) default: return "" } } -func (cloud *FakeCloudProvider) getZonalDiskSourceURI(diskName, zone string) string { +func (cloud *FakeCloudProvider) getZonalDiskSourceURI(project, diskName, zone string) string { return BasePath + fmt.Sprintf( diskSourceURITemplateSingleZone, - cloud.project, + project, zone, diskName) } -func (cloud *FakeCloudProvider) getRegionalDiskSourceURI(diskName, region string) string { +func (cloud *FakeCloudProvider) getRegionalDiskSourceURI(project, diskName, region string) string { return BasePath + fmt.Sprintf( diskSourceURITemplateRegional, - cloud.project, + project, region, diskName) } -func (cloud *FakeCloudProvider) getGlobalSnapshotURI(snapshotName string) string { +func (cloud *FakeCloudProvider) getGlobalSnapshotURI(project, snapshotName string) string { return BasePath + fmt.Sprintf( snapshotURITemplateGlobal, - cloud.project, + project, snapshotName) } @@ -490,11 +493,11 @@ type FakeBlockingCloudProvider struct { // Upon starting a CreateSnapshot, it passes a chan 'executeCreateSnapshot' into readyToExecute, then blocks on executeCreateSnapshot. // The test calling this function can block on readyToExecute to ensure that the operation has started and // allowed the CreateSnapshot to continue by passing a struct into executeCreateSnapshot. -func (cloud *FakeBlockingCloudProvider) CreateSnapshot(ctx context.Context, volKey *meta.Key, snapshotName string) (*computev1.Snapshot, error) { +func (cloud *FakeBlockingCloudProvider) CreateSnapshot(ctx context.Context, project string, volKey *meta.Key, snapshotName string) (*computev1.Snapshot, error) { executeCreateSnapshot := make(chan struct{}) cloud.ReadyToExecute <- executeCreateSnapshot <-executeCreateSnapshot - return cloud.FakeCloudProvider.CreateSnapshot(ctx, volKey, snapshotName) + return cloud.FakeCloudProvider.CreateSnapshot(ctx, project, volKey, snapshotName) } func notFoundError() *googleapi.Error { diff --git a/pkg/gce-cloud-provider/compute/gce-compute.go b/pkg/gce-cloud-provider/compute/gce-compute.go index a33e2e78b..b0df5a231 100644 --- a/pkg/gce-cloud-provider/compute/gce-compute.go +++ b/pkg/gce-cloud-provider/compute/gce-compute.go @@ -53,28 +53,28 @@ type GCECompute interface { GetDefaultProject() string GetDefaultZone() string // Disk Methods - GetDisk(ctx context.Context, volumeKey *meta.Key, gceAPIVersion GCEAPIVersion) (*CloudDisk, error) - RepairUnderspecifiedVolumeKey(ctx context.Context, volumeKey *meta.Key) (*meta.Key, error) + GetDisk(ctx context.Context, project string, volumeKey *meta.Key, gceAPIVersion GCEAPIVersion) (*CloudDisk, error) + RepairUnderspecifiedVolumeKey(ctx context.Context, project string, volumeKey *meta.Key) (string, *meta.Key, error) ValidateExistingDisk(ctx context.Context, disk *CloudDisk, params common.DiskParameters, reqBytes, limBytes int64, multiWriter bool) error - InsertDisk(ctx context.Context, volKey *meta.Key, params common.DiskParameters, capBytes int64, capacityRange *csi.CapacityRange, replicaZones []string, snapshotID string, multiWriter bool) error - DeleteDisk(ctx context.Context, volumeKey *meta.Key) error - AttachDisk(ctx context.Context, volKey *meta.Key, readWrite, diskType, instanceZone, instanceName string) error - DetachDisk(ctx context.Context, deviceName string, instanceZone, instanceName string) error - GetDiskSourceURI(volKey *meta.Key) string - GetDiskTypeURI(volKey *meta.Key, diskType string) string - WaitForAttach(ctx context.Context, volKey *meta.Key, instanceZone, instanceName string) error - ResizeDisk(ctx context.Context, volKey *meta.Key, requestBytes int64) (int64, error) + InsertDisk(ctx context.Context, project string, volKey *meta.Key, params common.DiskParameters, capBytes int64, capacityRange *csi.CapacityRange, replicaZones []string, snapshotID string, multiWriter bool) error + DeleteDisk(ctx context.Context, project string, volumeKey *meta.Key) error + AttachDisk(ctx context.Context, project string, volKey *meta.Key, readWrite, diskType, instanceZone, instanceName string) error + DetachDisk(ctx context.Context, project, deviceName, instanceZone, instanceName string) error + GetDiskSourceURI(project string, volKey *meta.Key) string + GetDiskTypeURI(project string, volKey *meta.Key, diskType string) string + WaitForAttach(ctx context.Context, project string, volKey *meta.Key, instanceZone, instanceName string) error + ResizeDisk(ctx context.Context, project string, volKey *meta.Key, requestBytes int64) (int64, error) ListDisks(ctx context.Context, maxEntries int64, pageToken string) ([]*computev1.Disk, string, error) // Regional Disk Methods - GetReplicaZoneURI(zone string) string + GetReplicaZoneURI(project string, zone string) string // Instance Methods GetInstanceOrError(ctx context.Context, instanceZone, instanceName string) (*computev1.Instance, error) // Zone Methods ListZones(ctx context.Context, region string) ([]string, error) ListSnapshots(ctx context.Context, filter string, maxEntries int64, pageToken string) ([]*computev1.Snapshot, string, error) - GetSnapshot(ctx context.Context, snapshotName string) (*computev1.Snapshot, error) - CreateSnapshot(ctx context.Context, volKey *meta.Key, snapshotName string) (*computev1.Snapshot, error) - DeleteSnapshot(ctx context.Context, snapshotName string) error + GetSnapshot(ctx context.Context, project, snapshotName string) (*computev1.Snapshot, error) + CreateSnapshot(ctx context.Context, project string, volKey *meta.Key, snapshotName string) (*computev1.Snapshot, error) + DeleteSnapshot(ctx context.Context, project, snapshotName string) error } // GetDefaultProject returns the project that was used to instantiate this GCE client. @@ -106,11 +106,14 @@ func (cloud *CloudProvider) ListDisks(ctx context.Context, maxEntries int64, pag // RepairUnderspecifiedVolumeKey will query the cloud provider and check each zone for the disk specified // by the volume key and return a volume key with a correct zone -func (cloud *CloudProvider) RepairUnderspecifiedVolumeKey(ctx context.Context, volumeKey *meta.Key) (*meta.Key, error) { +func (cloud *CloudProvider) RepairUnderspecifiedVolumeKey(ctx context.Context, project string, volumeKey *meta.Key) (string, *meta.Key, error) { klog.V(5).Infof("Repairing potentially underspecified volume key %v", volumeKey) + if project == common.UnspecifiedValue { + project = cloud.project + } region, err := common.GetRegionFromZones([]string{cloud.zone}) if err != nil { - return nil, fmt.Errorf("failed to get region from zones: %v", err) + return "", nil, fmt.Errorf("failed to get region from zones: %v", err) } switch volumeKey.Type() { case meta.Zonal: @@ -119,10 +122,10 @@ func (cloud *CloudProvider) RepairUnderspecifiedVolumeKey(ctx context.Context, v // list all zones, try to get disk in each zone zones, err := cloud.ListZones(ctx, region) if err != nil { - return nil, err + return "", nil, err } for _, zone := range zones { - _, err := cloud.getZonalDiskOrError(ctx, zone, volumeKey.Name) + _, err := cloud.getZonalDiskOrError(ctx, project, zone, volumeKey.Name) if err != nil { if IsGCENotFoundError(err) { // Couldn't find the disk in this zone so we keep @@ -131,28 +134,28 @@ func (cloud *CloudProvider) RepairUnderspecifiedVolumeKey(ctx context.Context, v } // There is some miscellaneous error getting disk from zone // so we return error immediately - return nil, err + return "", nil, err } if len(foundZone) > 0 { - return nil, fmt.Errorf("found disk %s in more than one zone: %s and %s", volumeKey.Name, foundZone, zone) + return "", nil, fmt.Errorf("found disk %s in more than one zone: %s and %s", volumeKey.Name, foundZone, zone) } foundZone = zone } if len(foundZone) == 0 { - return nil, notFoundError() + return "", nil, notFoundError() } volumeKey.Zone = foundZone - return volumeKey, nil + return project, volumeKey, nil } - return volumeKey, nil + return project, volumeKey, nil case meta.Regional: if volumeKey.Region == common.UnspecifiedValue { volumeKey.Region = region } - return volumeKey, nil + return project, volumeKey, nil default: - return nil, fmt.Errorf("key was neither zonal nor regional, got: %v", volumeKey.String()) + return "", nil, fmt.Errorf("key was neither zonal nor regional, got: %v", volumeKey.String()) } } @@ -188,23 +191,23 @@ func (cloud *CloudProvider) ListSnapshots(ctx context.Context, filter string, ma } -func (cloud *CloudProvider) GetDisk(ctx context.Context, key *meta.Key, gceAPIVersion GCEAPIVersion) (*CloudDisk, error) { +func (cloud *CloudProvider) GetDisk(ctx context.Context, project string, key *meta.Key, gceAPIVersion GCEAPIVersion) (*CloudDisk, error) { klog.V(5).Infof("Getting disk %v", key) switch key.Type() { case meta.Zonal: if gceAPIVersion == GCEAPIVersionBeta { - disk, err := cloud.getZonalBetaDiskOrError(ctx, key.Zone, key.Name) + disk, err := cloud.getZonalBetaDiskOrError(ctx, project, key.Zone, key.Name) return CloudDiskFromBeta(disk), err } else { - disk, err := cloud.getZonalDiskOrError(ctx, key.Zone, key.Name) + disk, err := cloud.getZonalDiskOrError(ctx, project, key.Zone, key.Name) return CloudDiskFromV1(disk), err } case meta.Regional: if gceAPIVersion == GCEAPIVersionBeta { - disk, err := cloud.getRegionalAlphaDiskOrError(ctx, key.Region, key.Name) + disk, err := cloud.getRegionalAlphaDiskOrError(ctx, project, key.Region, key.Name) return CloudDiskFromBeta(disk), err } else { - disk, err := cloud.getRegionalDiskOrError(ctx, key.Region, key.Name) + disk, err := cloud.getRegionalDiskOrError(ctx, project, key.Region, key.Name) return CloudDiskFromV1(disk), err } default: @@ -212,19 +215,15 @@ func (cloud *CloudProvider) GetDisk(ctx context.Context, key *meta.Key, gceAPIVe } } -func (cloud *CloudProvider) getZonalDiskOrError(ctx context.Context, volumeZone, volumeName string) (*computev1.Disk, error) { - svc := cloud.service - project := cloud.project - - disk, err := svc.Disks.Get(project, volumeZone, volumeName).Context(ctx).Do() +func (cloud *CloudProvider) getZonalDiskOrError(ctx context.Context, project, volumeZone, volumeName string) (*computev1.Disk, error) { + disk, err := cloud.service.Disks.Get(project, volumeZone, volumeName).Context(ctx).Do() if err != nil { return nil, err } return disk, nil } -func (cloud *CloudProvider) getRegionalDiskOrError(ctx context.Context, volumeRegion, volumeName string) (*computev1.Disk, error) { - project := cloud.project +func (cloud *CloudProvider) getRegionalDiskOrError(ctx context.Context, project, volumeRegion, volumeName string) (*computev1.Disk, error) { disk, err := cloud.service.RegionDisks.Get(project, volumeRegion, volumeName).Context(ctx).Do() if err != nil { return nil, err @@ -232,8 +231,7 @@ func (cloud *CloudProvider) getRegionalDiskOrError(ctx context.Context, volumeRe return disk, nil } -func (cloud *CloudProvider) getZonalBetaDiskOrError(ctx context.Context, volumeZone, volumeName string) (*computebeta.Disk, error) { - project := cloud.project +func (cloud *CloudProvider) getZonalBetaDiskOrError(ctx context.Context, project, volumeZone, volumeName string) (*computebeta.Disk, error) { disk, err := cloud.betaService.Disks.Get(project, volumeZone, volumeName).Context(ctx).Do() if err != nil { return nil, err @@ -241,8 +239,7 @@ func (cloud *CloudProvider) getZonalBetaDiskOrError(ctx context.Context, volumeZ return disk, nil } -func (cloud *CloudProvider) getRegionalAlphaDiskOrError(ctx context.Context, volumeRegion, volumeName string) (*computebeta.Disk, error) { - project := cloud.project +func (cloud *CloudProvider) getRegionalAlphaDiskOrError(ctx context.Context, project, volumeRegion, volumeName string) (*computebeta.Disk, error) { disk, err := cloud.betaService.RegionDisks.Get(project, volumeRegion, volumeName).Context(ctx).Do() if err != nil { return nil, err @@ -250,17 +247,17 @@ func (cloud *CloudProvider) getRegionalAlphaDiskOrError(ctx context.Context, vol return disk, nil } -func (cloud *CloudProvider) GetReplicaZoneURI(zone string) string { +func (cloud *CloudProvider) GetReplicaZoneURI(project, zone string) string { return cloud.service.BasePath + fmt.Sprintf( replicaZoneURITemplateSingleZone, - cloud.project, + project, zone) } -func (cloud *CloudProvider) getRegionURI(region string) string { +func (cloud *CloudProvider) getRegionURI(project, region string) string { return cloud.service.BasePath + fmt.Sprintf( regionURITemplate, - cloud.project, + project, region) } @@ -306,7 +303,7 @@ func ValidateDiskParameters(disk *CloudDisk, params common.DiskParameters) error return nil } -func (cloud *CloudProvider) InsertDisk(ctx context.Context, volKey *meta.Key, params common.DiskParameters, capBytes int64, capacityRange *csi.CapacityRange, replicaZones []string, snapshotID string, multiWriter bool) error { +func (cloud *CloudProvider) InsertDisk(ctx context.Context, project string, volKey *meta.Key, params common.DiskParameters, capBytes int64, capacityRange *csi.CapacityRange, replicaZones []string, snapshotID string, multiWriter bool) error { klog.V(5).Infof("Inserting disk %v", volKey) description, err := encodeDiskTags(params.Tags) @@ -319,12 +316,12 @@ func (cloud *CloudProvider) InsertDisk(ctx context.Context, volKey *meta.Key, pa if description == "" { description = "Disk created by GCE-PD CSI Driver" } - return cloud.insertZonalDisk(ctx, volKey, params, capBytes, capacityRange, snapshotID, description, multiWriter) + return cloud.insertZonalDisk(ctx, project, volKey, params, capBytes, capacityRange, snapshotID, description, multiWriter) case meta.Regional: if description == "" { description = "Regional disk created by GCE-PD CSI Driver" } - return cloud.insertRegionalDisk(ctx, volKey, params, capBytes, capacityRange, replicaZones, snapshotID, description, multiWriter) + return cloud.insertRegionalDisk(ctx, project, volKey, params, capBytes, capacityRange, replicaZones, snapshotID, description, multiWriter) default: return fmt.Errorf("could not insert disk, key was neither zonal nor regional, instead got: %v", volKey.String()) } @@ -361,6 +358,7 @@ func convertV1DiskToBetaDisk(v1Disk *computev1.Disk) *computebeta.Disk { func (cloud *CloudProvider) insertRegionalDisk( ctx context.Context, + project string, volKey *meta.Key, params common.DiskParameters, capBytes int64, @@ -383,7 +381,7 @@ func (cloud *CloudProvider) insertRegionalDisk( Name: volKey.Name, SizeGb: common.BytesToGbRoundUp(capBytes), Description: description, - Type: cloud.GetDiskTypeURI(volKey, params.DiskType), + Type: cloud.GetDiskTypeURI(cloud.project, volKey, params.DiskType), Labels: params.Labels, } if snapshotID != "" { @@ -402,20 +400,20 @@ func (cloud *CloudProvider) insertRegionalDisk( var insertOp *computebeta.Operation betaDiskToCreate := convertV1DiskToBetaDisk(diskToCreate) betaDiskToCreate.MultiWriter = multiWriter - insertOp, err = cloud.betaService.RegionDisks.Insert(cloud.project, volKey.Region, betaDiskToCreate).Context(ctx).Do() + insertOp, err = cloud.betaService.RegionDisks.Insert(project, volKey.Region, betaDiskToCreate).Context(ctx).Do() if insertOp != nil { opName = insertOp.Name } } else { var insertOp *computev1.Operation - insertOp, err = cloud.service.RegionDisks.Insert(cloud.project, volKey.Region, diskToCreate).Context(ctx).Do() + insertOp, err = cloud.service.RegionDisks.Insert(project, volKey.Region, diskToCreate).Context(ctx).Do() if insertOp != nil { opName = insertOp.Name } } if err != nil { if IsGCEError(err, "alreadyExists") { - disk, err := cloud.GetDisk(ctx, volKey, gceAPIVersion) + disk, err := cloud.GetDisk(ctx, project, volKey, gceAPIVersion) if err != nil { return err } @@ -432,10 +430,10 @@ func (cloud *CloudProvider) insertRegionalDisk( return status.Error(codes.Internal, fmt.Sprintf("unknown Insert disk error: %v", err)) } - err = cloud.waitForRegionalOp(ctx, opName, volKey.Region) + err = cloud.waitForRegionalOp(ctx, project, opName, volKey.Region) if err != nil { if IsGCEError(err, "alreadyExists") { - disk, err := cloud.GetDisk(ctx, volKey, gceAPIVersion) + disk, err := cloud.GetDisk(ctx, project, volKey, gceAPIVersion) if err != nil { return err } @@ -456,6 +454,7 @@ func (cloud *CloudProvider) insertRegionalDisk( func (cloud *CloudProvider) insertZonalDisk( ctx context.Context, + project string, volKey *meta.Key, params common.DiskParameters, capBytes int64, @@ -477,7 +476,7 @@ func (cloud *CloudProvider) insertZonalDisk( Name: volKey.Name, SizeGb: common.BytesToGbRoundUp(capBytes), Description: description, - Type: cloud.GetDiskTypeURI(volKey, params.DiskType), + Type: cloud.GetDiskTypeURI(project, volKey, params.DiskType), Labels: params.Labels, } @@ -495,13 +494,13 @@ func (cloud *CloudProvider) insertZonalDisk( var insertOp *computebeta.Operation betaDiskToCreate := convertV1DiskToBetaDisk(diskToCreate) betaDiskToCreate.MultiWriter = multiWriter - insertOp, err = cloud.betaService.Disks.Insert(cloud.project, volKey.Zone, betaDiskToCreate).Context(ctx).Do() + insertOp, err = cloud.betaService.Disks.Insert(project, volKey.Zone, betaDiskToCreate).Context(ctx).Do() if insertOp != nil { opName = insertOp.Name } } else { var insertOp *computev1.Operation - insertOp, err = cloud.service.Disks.Insert(cloud.project, volKey.Zone, diskToCreate).Context(ctx).Do() + insertOp, err = cloud.service.Disks.Insert(project, volKey.Zone, diskToCreate).Context(ctx).Do() if insertOp != nil { opName = insertOp.Name } @@ -509,7 +508,7 @@ func (cloud *CloudProvider) insertZonalDisk( if err != nil { if IsGCEError(err, "alreadyExists") { - disk, err := cloud.GetDisk(ctx, volKey, gceAPIVersion) + disk, err := cloud.GetDisk(ctx, project, volKey, gceAPIVersion) if err != nil { return err } @@ -526,11 +525,11 @@ func (cloud *CloudProvider) insertZonalDisk( return fmt.Errorf("unknown Insert disk error: %v", err) } - err = cloud.waitForZonalOp(ctx, opName, volKey.Zone) + err = cloud.waitForZonalOp(ctx, project, opName, volKey.Zone) if err != nil { if IsGCEError(err, "alreadyExists") { - disk, err := cloud.GetDisk(ctx, volKey, gceAPIVersion) + disk, err := cloud.GetDisk(ctx, project, volKey, gceAPIVersion) if err != nil { return err } @@ -549,20 +548,20 @@ func (cloud *CloudProvider) insertZonalDisk( return nil } -func (cloud *CloudProvider) DeleteDisk(ctx context.Context, volKey *meta.Key) error { +func (cloud *CloudProvider) DeleteDisk(ctx context.Context, project string, volKey *meta.Key) error { klog.V(5).Infof("Deleting disk: %v", volKey) switch volKey.Type() { case meta.Zonal: - return cloud.deleteZonalDisk(ctx, volKey.Zone, volKey.Name) + return cloud.deleteZonalDisk(ctx, project, volKey.Zone, volKey.Name) case meta.Regional: - return cloud.deleteRegionalDisk(ctx, volKey.Region, volKey.Name) + return cloud.deleteRegionalDisk(ctx, project, volKey.Region, volKey.Name) default: return fmt.Errorf("could not delete disk, key was neither zonal nor regional, instead got: %v", volKey.String()) } } -func (cloud *CloudProvider) deleteZonalDisk(ctx context.Context, zone, name string) error { - op, err := cloud.service.Disks.Delete(cloud.project, zone, name).Context(ctx).Do() +func (cloud *CloudProvider) deleteZonalDisk(ctx context.Context, project, zone, name string) error { + op, err := cloud.service.Disks.Delete(project, zone, name).Context(ctx).Do() if err != nil { if IsGCEError(err, "notFound") { // Already deleted @@ -570,15 +569,15 @@ func (cloud *CloudProvider) deleteZonalDisk(ctx context.Context, zone, name stri } return err } - err = cloud.waitForZonalOp(ctx, op.Name, zone) + err = cloud.waitForZonalOp(ctx, project, op.Name, zone) if err != nil { return err } return nil } -func (cloud *CloudProvider) deleteRegionalDisk(ctx context.Context, region, name string) error { - op, err := cloud.service.RegionDisks.Delete(cloud.project, region, name).Context(ctx).Do() +func (cloud *CloudProvider) deleteRegionalDisk(ctx context.Context, project, region, name string) error { + op, err := cloud.service.RegionDisks.Delete(project, region, name).Context(ctx).Do() if err != nil { if IsGCEError(err, "notFound") { // Already deleted @@ -586,16 +585,16 @@ func (cloud *CloudProvider) deleteRegionalDisk(ctx context.Context, region, name } return err } - err = cloud.waitForRegionalOp(ctx, op.Name, region) + err = cloud.waitForRegionalOp(ctx, project, op.Name, region) if err != nil { return err } return nil } -func (cloud *CloudProvider) AttachDisk(ctx context.Context, volKey *meta.Key, readWrite, diskType, instanceZone, instanceName string) error { +func (cloud *CloudProvider) AttachDisk(ctx context.Context, project string, volKey *meta.Key, readWrite, diskType, instanceZone, instanceName string) error { klog.V(5).Infof("Attaching disk %v to %s", volKey, instanceName) - source := cloud.GetDiskSourceURI(volKey) + source := cloud.GetDiskSourceURI(project, volKey) deviceName, err := common.GetDeviceName(volKey) if err != nil { @@ -609,82 +608,80 @@ func (cloud *CloudProvider) AttachDisk(ctx context.Context, volKey *meta.Key, re Type: diskType, } - op, err := cloud.service.Instances.AttachDisk(cloud.project, instanceZone, instanceName, attachedDiskV1).Context(ctx).Do() + op, err := cloud.service.Instances.AttachDisk(project, instanceZone, instanceName, attachedDiskV1).Context(ctx).Do() if err != nil { return fmt.Errorf("failed cloud service attach disk call: %v", err) } - err = cloud.waitForZonalOp(ctx, op.Name, instanceZone) + err = cloud.waitForZonalOp(ctx, project, op.Name, instanceZone) if err != nil { return fmt.Errorf("failed when waiting for zonal op: %v", err) } return nil } -func (cloud *CloudProvider) DetachDisk(ctx context.Context, deviceName, instanceZone, instanceName string) error { +func (cloud *CloudProvider) DetachDisk(ctx context.Context, project, deviceName, instanceZone, instanceName string) error { klog.V(5).Infof("Detaching disk %v from %v", deviceName, instanceName) - op, err := cloud.service.Instances.DetachDisk(cloud.project, instanceZone, instanceName, deviceName).Context(ctx).Do() + op, err := cloud.service.Instances.DetachDisk(project, instanceZone, instanceName, deviceName).Context(ctx).Do() if err != nil { return err } - err = cloud.waitForZonalOp(ctx, op.Name, instanceZone) + err = cloud.waitForZonalOp(ctx, project, op.Name, instanceZone) if err != nil { return err } return nil } -func (cloud *CloudProvider) GetDiskSourceURI(volKey *meta.Key) string { +func (cloud *CloudProvider) GetDiskSourceURI(project string, volKey *meta.Key) string { switch volKey.Type() { case meta.Zonal: - return cloud.getZonalDiskSourceURI(volKey.Name, volKey.Zone) + return cloud.getZonalDiskSourceURI(project, volKey.Name, volKey.Zone) case meta.Regional: - return cloud.getRegionalDiskSourceURI(volKey.Name, volKey.Region) + return cloud.getRegionalDiskSourceURI(project, volKey.Name, volKey.Region) default: return "" } } -func (cloud *CloudProvider) getZonalDiskSourceURI(diskName, zone string) string { +func (cloud *CloudProvider) getZonalDiskSourceURI(project, diskName, zone string) string { return cloud.service.BasePath + fmt.Sprintf( diskSourceURITemplateSingleZone, - cloud.project, + project, zone, diskName) } -func (cloud *CloudProvider) getRegionalDiskSourceURI(diskName, region string) string { +func (cloud *CloudProvider) getRegionalDiskSourceURI(project, diskName, region string) string { return cloud.service.BasePath + fmt.Sprintf( diskSourceURITemplateRegional, - cloud.project, + project, region, diskName) } -func (cloud *CloudProvider) GetDiskTypeURI(volKey *meta.Key, diskType string) string { +func (cloud *CloudProvider) GetDiskTypeURI(project string, volKey *meta.Key, diskType string) string { switch volKey.Type() { case meta.Zonal: - return cloud.getZonalDiskTypeURI(volKey.Zone, diskType) + return cloud.getZonalDiskTypeURI(project, volKey.Zone, diskType) case meta.Regional: - return cloud.getRegionalDiskTypeURI(volKey.Region, diskType) + return cloud.getRegionalDiskTypeURI(project, volKey.Region, diskType) default: return fmt.Sprintf("could get disk type URI, key was neither zonal nor regional, instead got: %v", volKey.String()) } } -func (cloud *CloudProvider) getZonalDiskTypeURI(zone, diskType string) string { - return cloud.service.BasePath + fmt.Sprintf(diskTypeURITemplateSingleZone, cloud.project, zone, diskType) +func (cloud *CloudProvider) getZonalDiskTypeURI(project string, zone, diskType string) string { + return cloud.service.BasePath + fmt.Sprintf(diskTypeURITemplateSingleZone, project, zone, diskType) } -func (cloud *CloudProvider) getRegionalDiskTypeURI(region, diskType string) string { - return cloud.service.BasePath + fmt.Sprintf(diskTypeURITemplateRegional, cloud.project, region, diskType) +func (cloud *CloudProvider) getRegionalDiskTypeURI(project string, region, diskType string) string { + return cloud.service.BasePath + fmt.Sprintf(diskTypeURITemplateRegional, project, region, diskType) } -func (cloud *CloudProvider) waitForZonalOp(ctx context.Context, opName string, zone string) error { +func (cloud *CloudProvider) waitForZonalOp(ctx context.Context, project, opName string, zone string) error { // The v1 API can query for v1, alpha, or beta operations. - svc := cloud.service - project := cloud.project return wait.Poll(3*time.Second, 5*time.Minute, func() (bool, error) { - pollOp, err := svc.ZoneOperations.Get(project, zone, opName).Context(ctx).Do() + pollOp, err := cloud.service.ZoneOperations.Get(project, zone, opName).Context(ctx).Do() if err != nil { klog.Errorf("WaitForOp(op: %s, zone: %#v) failed to poll the operation", opName, zone) return false, err @@ -694,10 +691,10 @@ func (cloud *CloudProvider) waitForZonalOp(ctx context.Context, opName string, z }) } -func (cloud *CloudProvider) waitForRegionalOp(ctx context.Context, opName string, region string) error { +func (cloud *CloudProvider) waitForRegionalOp(ctx context.Context, project, opName string, region string) error { // The v1 API can query for v1, alpha, or beta operations. return wait.Poll(3*time.Second, 5*time.Minute, func() (bool, error) { - pollOp, err := cloud.service.RegionOperations.Get(cloud.project, region, opName).Context(ctx).Do() + pollOp, err := cloud.service.RegionOperations.Get(project, region, opName).Context(ctx).Do() if err != nil { klog.Errorf("WaitForOp(op: %s, region: %#v) failed to poll the operation", opName, region) return false, err @@ -707,11 +704,9 @@ func (cloud *CloudProvider) waitForRegionalOp(ctx context.Context, opName string }) } -func (cloud *CloudProvider) waitForGlobalOp(ctx context.Context, opName string) error { - svc := cloud.service - project := cloud.project +func (cloud *CloudProvider) waitForGlobalOp(ctx context.Context, project, opName string) error { return wait.Poll(3*time.Second, 5*time.Minute, func() (bool, error) { - pollOp, err := svc.GlobalOperations.Get(project, opName).Context(ctx).Do() + pollOp, err := cloud.service.GlobalOperations.Get(project, opName).Context(ctx).Do() if err != nil { klog.Errorf("waitForGlobalOp(op: %s) failed to poll the operation", opName) return false, err @@ -721,12 +716,12 @@ func (cloud *CloudProvider) waitForGlobalOp(ctx context.Context, opName string) }) } -func (cloud *CloudProvider) WaitForAttach(ctx context.Context, volKey *meta.Key, instanceZone, instanceName string) error { +func (cloud *CloudProvider) WaitForAttach(ctx context.Context, project string, volKey *meta.Key, instanceZone, instanceName string) error { klog.V(5).Infof("Waiting for attach of disk %v to instance %v to complete...", volKey.Name, instanceName) start := time.Now() return wait.Poll(5*time.Second, 2*time.Minute, func() (bool, error) { klog.V(6).Infof("Polling for attach of disk %v to instance %v to complete for %v", volKey.Name, instanceName, time.Since(start)) - disk, err := cloud.GetDisk(ctx, volKey, GCEAPIVersionV1) + disk, err := cloud.GetDisk(ctx, project, volKey, GCEAPIVersionV1) if err != nil { return false, fmt.Errorf("GetDisk failed to get disk: %v", err) } @@ -765,10 +760,9 @@ func (cloud *CloudProvider) GetInstanceOrError(ctx context.Context, instanceZone return instance, nil } -func (cloud *CloudProvider) GetSnapshot(ctx context.Context, snapshotName string) (*computev1.Snapshot, error) { +func (cloud *CloudProvider) GetSnapshot(ctx context.Context, project, snapshotName string) (*computev1.Snapshot, error) { klog.V(5).Infof("Getting snapshot %v", snapshotName) svc := cloud.service - project := cloud.project snapshot, err := svc.Snapshots.Get(project, snapshotName).Context(ctx).Do() if err != nil { return nil, err @@ -776,9 +770,9 @@ func (cloud *CloudProvider) GetSnapshot(ctx context.Context, snapshotName string return snapshot, nil } -func (cloud *CloudProvider) DeleteSnapshot(ctx context.Context, snapshotName string) error { +func (cloud *CloudProvider) DeleteSnapshot(ctx context.Context, project, snapshotName string) error { klog.V(5).Infof("Deleting snapshot %v", snapshotName) - op, err := cloud.service.Snapshots.Delete(cloud.project, snapshotName).Context(ctx).Do() + op, err := cloud.service.Snapshots.Delete(project, snapshotName).Context(ctx).Do() if err != nil { if IsGCEError(err, "notFound") { // Already deleted @@ -786,20 +780,20 @@ func (cloud *CloudProvider) DeleteSnapshot(ctx context.Context, snapshotName str } return err } - err = cloud.waitForGlobalOp(ctx, op.Name) + err = cloud.waitForGlobalOp(ctx, project, op.Name) if err != nil { return err } return nil } -func (cloud *CloudProvider) CreateSnapshot(ctx context.Context, volKey *meta.Key, snapshotName string) (*computev1.Snapshot, error) { +func (cloud *CloudProvider) CreateSnapshot(ctx context.Context, project string, volKey *meta.Key, snapshotName string) (*computev1.Snapshot, error) { klog.V(5).Infof("Creating snapshot %s for volume %v", snapshotName, volKey) switch volKey.Type() { case meta.Zonal: - return cloud.createZonalDiskSnapshot(ctx, volKey, snapshotName) + return cloud.createZonalDiskSnapshot(ctx, project, volKey, snapshotName) case meta.Regional: - return cloud.createRegionalDiskSnapshot(ctx, volKey, snapshotName) + return cloud.createRegionalDiskSnapshot(ctx, project, volKey, snapshotName) default: return nil, fmt.Errorf("could not create snapshot, key was neither zonal nor regional, instead got: %v", volKey.String()) } @@ -809,9 +803,9 @@ func (cloud *CloudProvider) CreateSnapshot(ctx context.Context, volKey *meta.Key // size in Gi // TODO(#461) The whole driver could benefit from standardized usage of the // k8s.io/apimachinery/quantity package for better size handling -func (cloud *CloudProvider) ResizeDisk(ctx context.Context, volKey *meta.Key, requestBytes int64) (int64, error) { +func (cloud *CloudProvider) ResizeDisk(ctx context.Context, project string, volKey *meta.Key, requestBytes int64) (int64, error) { klog.V(5).Infof("Resizing disk %v to size %v", volKey, requestBytes) - cloudDisk, err := cloud.GetDisk(ctx, volKey, GCEAPIVersionV1) + cloudDisk, err := cloud.GetDisk(ctx, project, volKey, GCEAPIVersionV1) if err != nil { return -1, fmt.Errorf("failed to get disk: %v", err) } @@ -827,24 +821,24 @@ func (cloud *CloudProvider) ResizeDisk(ctx context.Context, volKey *meta.Key, re switch volKey.Type() { case meta.Zonal: - return cloud.resizeZonalDisk(ctx, volKey, requestGb) + return cloud.resizeZonalDisk(ctx, project, volKey, requestGb) case meta.Regional: - return cloud.resizeRegionalDisk(ctx, volKey, requestGb) + return cloud.resizeRegionalDisk(ctx, project, volKey, requestGb) default: return -1, fmt.Errorf("could not resize disk, key was neither zonal nor regional, instead got: %v", volKey.String()) } } -func (cloud *CloudProvider) resizeZonalDisk(ctx context.Context, volKey *meta.Key, requestGb int64) (int64, error) { +func (cloud *CloudProvider) resizeZonalDisk(ctx context.Context, project string, volKey *meta.Key, requestGb int64) (int64, error) { resizeReq := &computev1.DisksResizeRequest{ SizeGb: requestGb, } - op, err := cloud.service.Disks.Resize(cloud.project, volKey.Zone, volKey.Name, resizeReq).Context(ctx).Do() + op, err := cloud.service.Disks.Resize(project, volKey.Zone, volKey.Name, resizeReq).Context(ctx).Do() if err != nil { return -1, fmt.Errorf("failed to resize zonal volume %v: %v", volKey.String(), err) } - err = cloud.waitForZonalOp(ctx, op.Name, volKey.Zone) + err = cloud.waitForZonalOp(ctx, project, op.Name, volKey.Zone) if err != nil { return -1, fmt.Errorf("failed waiting for op for zonal resize for %s: %v", volKey.String(), err) } @@ -852,17 +846,17 @@ func (cloud *CloudProvider) resizeZonalDisk(ctx context.Context, volKey *meta.Ke return requestGb, nil } -func (cloud *CloudProvider) resizeRegionalDisk(ctx context.Context, volKey *meta.Key, requestGb int64) (int64, error) { +func (cloud *CloudProvider) resizeRegionalDisk(ctx context.Context, project string, volKey *meta.Key, requestGb int64) (int64, error) { resizeReq := &computev1.RegionDisksResizeRequest{ SizeGb: requestGb, } - op, err := cloud.service.RegionDisks.Resize(cloud.project, volKey.Region, volKey.Name, resizeReq).Context(ctx).Do() + op, err := cloud.service.RegionDisks.Resize(project, volKey.Region, volKey.Name, resizeReq).Context(ctx).Do() if err != nil { return -1, fmt.Errorf("failed to resize regional volume %v: %v", volKey.String(), err) } - err = cloud.waitForRegionalOp(ctx, op.Name, volKey.Region) + err = cloud.waitForRegionalOp(ctx, project, op.Name, volKey.Region) if err != nil { return -1, fmt.Errorf("failed waiting for op for regional resize for %s: %v", volKey.String(), err) } @@ -870,35 +864,35 @@ func (cloud *CloudProvider) resizeRegionalDisk(ctx context.Context, volKey *meta return requestGb, nil } -func (cloud *CloudProvider) createZonalDiskSnapshot(ctx context.Context, volKey *meta.Key, snapshotName string) (*computev1.Snapshot, error) { +func (cloud *CloudProvider) createZonalDiskSnapshot(ctx context.Context, project string, volKey *meta.Key, snapshotName string) (*computev1.Snapshot, error) { snapshotToCreate := &computev1.Snapshot{ Name: snapshotName, } - _, err := cloud.service.Disks.CreateSnapshot(cloud.project, volKey.Zone, volKey.Name, snapshotToCreate).Context(ctx).Do() + _, err := cloud.service.Disks.CreateSnapshot(project, volKey.Zone, volKey.Name, snapshotToCreate).Context(ctx).Do() if err != nil { return nil, err } - return cloud.waitForSnapshotCreation(ctx, snapshotName) + return cloud.waitForSnapshotCreation(ctx, project, snapshotName) } -func (cloud *CloudProvider) createRegionalDiskSnapshot(ctx context.Context, volKey *meta.Key, snapshotName string) (*computev1.Snapshot, error) { +func (cloud *CloudProvider) createRegionalDiskSnapshot(ctx context.Context, project string, volKey *meta.Key, snapshotName string) (*computev1.Snapshot, error) { snapshotToCreate := &computev1.Snapshot{ Name: snapshotName, } - _, err := cloud.service.RegionDisks.CreateSnapshot(cloud.project, volKey.Region, volKey.Name, snapshotToCreate).Context(ctx).Do() + _, err := cloud.service.RegionDisks.CreateSnapshot(project, volKey.Region, volKey.Name, snapshotToCreate).Context(ctx).Do() if err != nil { return nil, err } - return cloud.waitForSnapshotCreation(ctx, snapshotName) + return cloud.waitForSnapshotCreation(ctx, project, snapshotName) } -func (cloud *CloudProvider) waitForSnapshotCreation(ctx context.Context, snapshotName string) (*computev1.Snapshot, error) { +func (cloud *CloudProvider) waitForSnapshotCreation(ctx context.Context, project, snapshotName string) (*computev1.Snapshot, error) { ticker := time.NewTicker(time.Second) defer ticker.Stop() timer := time.NewTimer(waitForSnapshotCreationTimeOut) @@ -908,7 +902,7 @@ func (cloud *CloudProvider) waitForSnapshotCreation(ctx context.Context, snapsho select { case <-ticker.C: klog.V(6).Infof("Checking GCE Snapshot %s.", snapshotName) - snapshot, err := cloud.GetSnapshot(ctx, snapshotName) + snapshot, err := cloud.GetSnapshot(ctx, project, snapshotName) if err != nil { klog.Warningf("Error in getting snapshot %s, %v", snapshotName, err) } else if snapshot != nil { diff --git a/pkg/gce-pd-csi-driver/controller.go b/pkg/gce-pd-csi-driver/controller.go index bdf2042ed..e8a16fcd6 100644 --- a/pkg/gce-pd-csi-driver/controller.go +++ b/pkg/gce-pd-csi-driver/controller.go @@ -155,7 +155,7 @@ func (gceCS *GCEControllerServer) CreateVolume(ctx context.Context, req *csi.Cre defer gceCS.volumeLocks.Release(volumeID) // Validate if disk already exists - existingDisk, err := gceCS.CloudProvider.GetDisk(ctx, volKey, gceAPIVersion) + existingDisk, err := gceCS.CloudProvider.GetDisk(ctx, gceCS.CloudProvider.GetDefaultProject(), volKey, gceAPIVersion) if err != nil { if !gce.IsGCEError(err, "notFound") { return nil, status.Error(codes.Internal, fmt.Sprintf("CreateVolume unknown get disk error when validating: %v", err)) @@ -245,7 +245,7 @@ func (gceCS *GCEControllerServer) DeleteVolume(ctx context.Context, req *csi.Del return nil, status.Error(codes.InvalidArgument, "DeleteVolume Volume ID must be provided") } - volKey, err := common.VolumeIDToKey(volumeID) + project, volKey, err := common.VolumeIDToKey(volumeID) if err != nil { // Cannot find volume associated with this ID because VolumeID is not in // correct format, this is a success according to the Spec @@ -253,7 +253,7 @@ func (gceCS *GCEControllerServer) DeleteVolume(ctx context.Context, req *csi.Del return &csi.DeleteVolumeResponse{}, nil } - volKey, err = gceCS.CloudProvider.RepairUnderspecifiedVolumeKey(ctx, volKey) + project, volKey, err = gceCS.CloudProvider.RepairUnderspecifiedVolumeKey(ctx, project, volKey) if err != nil { if gce.IsGCENotFoundError(err) { klog.Warningf("DeleteVolume treating volume as deleted because cannot find volume %v: %v", volumeID, err) @@ -267,7 +267,7 @@ func (gceCS *GCEControllerServer) DeleteVolume(ctx context.Context, req *csi.Del } defer gceCS.volumeLocks.Release(volumeID) - err = gceCS.CloudProvider.DeleteDisk(ctx, volKey) + err = gceCS.CloudProvider.DeleteDisk(ctx, project, volKey) if err != nil { return nil, status.Error(codes.Internal, fmt.Sprintf("unknown Delete disk error: %v", err)) } @@ -292,12 +292,12 @@ func (gceCS *GCEControllerServer) ControllerPublishVolume(ctx context.Context, r return nil, status.Error(codes.InvalidArgument, "ControllerPublishVolume Volume capability must be provided") } - volKey, err := common.VolumeIDToKey(volumeID) + project, volKey, err := common.VolumeIDToKey(volumeID) if err != nil { return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("ControllerPublishVolume volume ID is invalid: %v", err)) } - volKey, err = gceCS.CloudProvider.RepairUnderspecifiedVolumeKey(ctx, volKey) + project, volKey, err = gceCS.CloudProvider.RepairUnderspecifiedVolumeKey(ctx, project, volKey) if err != nil { if gce.IsGCENotFoundError(err) { return nil, status.Errorf(codes.NotFound, "ControllerPublishVolume could not find volume with ID %v: %v", volumeID, err) @@ -322,7 +322,7 @@ func (gceCS *GCEControllerServer) ControllerPublishVolume(ctx context.Context, r PublishContext: nil, } - _, err = gceCS.CloudProvider.GetDisk(ctx, volKey, gce.GCEAPIVersionV1) + _, err = gceCS.CloudProvider.GetDisk(ctx, project, volKey, gce.GCEAPIVersionV1) if err != nil { if gce.IsGCENotFoundError(err) { return nil, status.Error(codes.NotFound, fmt.Sprintf("Could not find disk %v: %v", volKey.String(), err)) @@ -364,12 +364,12 @@ func (gceCS *GCEControllerServer) ControllerPublishVolume(ctx context.Context, r if err != nil { return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("could not split nodeID: %v", err)) } - err = gceCS.CloudProvider.AttachDisk(ctx, volKey, readWrite, attachableDiskTypePersistent, instanceZone, instanceName) + err = gceCS.CloudProvider.AttachDisk(ctx, project, volKey, readWrite, attachableDiskTypePersistent, instanceZone, instanceName) if err != nil { return nil, status.Error(codes.Internal, fmt.Sprintf("unknown Attach error: %v", err)) } - err = gceCS.CloudProvider.WaitForAttach(ctx, volKey, instanceZone, instanceName) + err = gceCS.CloudProvider.WaitForAttach(ctx, project, volKey, instanceZone, instanceName) if err != nil { return nil, status.Error(codes.Internal, fmt.Sprintf("unknown WaitForAttach error: %v", err)) } @@ -389,11 +389,19 @@ func (gceCS *GCEControllerServer) ControllerUnpublishVolume(ctx context.Context, return nil, status.Error(codes.InvalidArgument, "ControllerUnpublishVolume Node ID must be provided") } - volKey, err := common.VolumeIDToKey(volumeID) + project, volKey, err := common.VolumeIDToKey(volumeID) if err != nil { return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("ControllerUnpublishVolume Volume ID is invalid: %v", err)) } + project, volKey, err = gceCS.CloudProvider.RepairUnderspecifiedVolumeKey(ctx, project, volKey) + if err != nil { + if gce.IsGCENotFoundError(err) { + return nil, status.Errorf(codes.NotFound, "ControllerUnpublishVolume could not find volume with ID %v: %v", volumeID, err) + } + return nil, status.Errorf(codes.Internal, "ControllerUnpublishVolume error repairing underspecified volume key: %v", err) + } + // Acquires the lock for the volume on that node only, because we need to support the ability // to unpublish the same volume from different nodes concurrently lockingVolumeID := fmt.Sprintf("%s/%s", nodeID, volumeID) @@ -429,7 +437,7 @@ func (gceCS *GCEControllerServer) ControllerUnpublishVolume(ctx context.Context, return &csi.ControllerUnpublishVolumeResponse{}, nil } - err = gceCS.CloudProvider.DetachDisk(ctx, deviceName, instanceZone, instanceName) + err = gceCS.CloudProvider.DetachDisk(ctx, project, deviceName, instanceZone, instanceName) if err != nil { return nil, status.Error(codes.Internal, fmt.Sprintf("unknown detach error: %v", err)) } @@ -446,17 +454,25 @@ func (gceCS *GCEControllerServer) ValidateVolumeCapabilities(ctx context.Context if len(volumeID) == 0 { return nil, status.Error(codes.InvalidArgument, "Volume ID must be provided") } - volKey, err := common.VolumeIDToKey(volumeID) + project, volKey, err := common.VolumeIDToKey(volumeID) if err != nil { return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("Volume ID is invalid: %v", err)) } + project, volKey, err = gceCS.CloudProvider.RepairUnderspecifiedVolumeKey(ctx, project, volKey) + if err != nil { + if gce.IsGCENotFoundError(err) { + return nil, status.Errorf(codes.NotFound, "ValidateVolumeCapabilities could not find volume with ID %v: %v", volumeID, err) + } + return nil, status.Errorf(codes.Internal, "ValidateVolumeCapabilities error repairing underspecified volume key: %v", err) + } + if acquired := gceCS.volumeLocks.TryAcquire(volumeID); !acquired { return nil, status.Errorf(codes.Aborted, common.VolumeOperationAlreadyExistsFmt, volumeID) } defer gceCS.volumeLocks.Release(volumeID) - disk, err := gceCS.CloudProvider.GetDisk(ctx, volKey, gce.GCEAPIVersionV1) + disk, err := gceCS.CloudProvider.GetDisk(ctx, project, volKey, gce.GCEAPIVersionV1) if err != nil { if gce.IsGCENotFoundError(err) { return nil, status.Error(codes.NotFound, fmt.Sprintf("Could not find disk %v: %v", volKey.Name, err)) @@ -566,7 +582,7 @@ func (gceCS *GCEControllerServer) CreateSnapshot(ctx context.Context, req *csi.C if len(volumeID) == 0 { return nil, status.Error(codes.InvalidArgument, "CreateSnapshot Source Volume ID must be provided") } - volKey, err := common.VolumeIDToKey(volumeID) + project, volKey, err := common.VolumeIDToKey(volumeID) if err != nil { return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("CreateSnapshot Volume ID is invalid: %v", err)) } @@ -577,7 +593,7 @@ func (gceCS *GCEControllerServer) CreateSnapshot(ctx context.Context, req *csi.C defer gceCS.volumeLocks.Release(volumeID) // Check if volume exists - _, err = gceCS.CloudProvider.GetDisk(ctx, volKey, gce.GCEAPIVersionV1) + _, err = gceCS.CloudProvider.GetDisk(ctx, project, volKey, gce.GCEAPIVersionV1) if err != nil { if gce.IsGCENotFoundError(err) { return nil, status.Error(codes.NotFound, fmt.Sprintf("CreateSnapshot could not find disk %v: %v", volKey.String(), err)) @@ -587,13 +603,13 @@ func (gceCS *GCEControllerServer) CreateSnapshot(ctx context.Context, req *csi.C // Check if snapshot already exists var snapshot *compute.Snapshot - snapshot, err = gceCS.CloudProvider.GetSnapshot(ctx, req.Name) + snapshot, err = gceCS.CloudProvider.GetSnapshot(ctx, project, req.Name) if err != nil { if !gce.IsGCEError(err, "notFound") { return nil, status.Error(codes.Internal, fmt.Sprintf("Unknown get snapshot error: %v", err)) } // If we could not find the snapshot, we create a new one - snapshot, err = gceCS.CloudProvider.CreateSnapshot(ctx, volKey, req.Name) + snapshot, err = gceCS.CloudProvider.CreateSnapshot(ctx, project, volKey, req.Name) if err != nil { if gce.IsGCEError(err, "notFound") { return nil, status.Error(codes.NotFound, fmt.Sprintf("Could not find volume with ID %v: %v", volKey.String(), err)) @@ -639,7 +655,7 @@ func (gceCS *GCEControllerServer) validateExistingSnapshot(snapshot *compute.Sna return fmt.Errorf("disk does not exist") } - sourceKey, err := common.VolumeIDToKey(cleanSelfLink(snapshot.SourceDisk)) + _, sourceKey, err := common.VolumeIDToKey(cleanSelfLink(snapshot.SourceDisk)) if err != nil { return fmt.Errorf("fail to get source disk key %s, %v", snapshot.SourceDisk, err) } @@ -673,7 +689,7 @@ func (gceCS *GCEControllerServer) DeleteSnapshot(ctx context.Context, req *csi.D return nil, status.Error(codes.InvalidArgument, "DeleteSnapshot Snapshot ID must be provided") } - key, err := common.SnapshotIDToKey(snapshotID) + project, key, err := common.SnapshotIDToProjectKey(snapshotID) if err != nil { // Cannot get snapshot ID from the passing request // This is a success according to the spec @@ -681,7 +697,7 @@ func (gceCS *GCEControllerServer) DeleteSnapshot(ctx context.Context, req *csi.D return &csi.DeleteSnapshotResponse{}, nil } - err = gceCS.CloudProvider.DeleteSnapshot(ctx, key) + err = gceCS.CloudProvider.DeleteSnapshot(ctx, project, key) if err != nil { return nil, status.Error(codes.Internal, fmt.Sprintf("unknown Delete snapshot error: %v", err)) } @@ -710,12 +726,20 @@ func (gceCS *GCEControllerServer) ControllerExpandVolume(ctx context.Context, re return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("ControllerExpandVolume capacity range is invalid: %v", err)) } - volKey, err := common.VolumeIDToKey(volumeID) + project, volKey, err := common.VolumeIDToKey(volumeID) if err != nil { return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("ControllerExpandVolume Volume ID is invalid: %v", err)) } - resizedGb, err := gceCS.CloudProvider.ResizeDisk(ctx, volKey, reqBytes) + project, volKey, err = gceCS.CloudProvider.RepairUnderspecifiedVolumeKey(ctx, project, volKey) + if err != nil { + if gce.IsGCENotFoundError(err) { + return nil, status.Errorf(codes.NotFound, "ControllerExpandVolume could not find volume with ID %v: %v", volumeID, err) + } + return nil, status.Errorf(codes.Internal, "ControllerExpandVolume error repairing underspecified volume key: %v", err) + } + + resizedGb, err := gceCS.CloudProvider.ResizeDisk(ctx, project, volKey, reqBytes) if err != nil { return nil, status.Error(codes.Internal, fmt.Sprintf("ControllerExpandVolume failed to resize disk: %v", err)) } @@ -759,14 +783,14 @@ func (gceCS *GCEControllerServer) getSnapshots(ctx context.Context, req *csi.Lis } func (gceCS *GCEControllerServer) getSnapshotByID(ctx context.Context, snapshotID string) (*csi.ListSnapshotsResponse, error) { - key, err := common.SnapshotIDToKey(snapshotID) + project, key, err := common.SnapshotIDToProjectKey(snapshotID) if err != nil { // Cannot get snapshot ID from the passing request klog.Warningf("invalid snapshot id format %s", snapshotID) return &csi.ListSnapshotsResponse{}, nil } - snapshot, err := gceCS.CloudProvider.GetSnapshot(ctx, key) + snapshot, err := gceCS.CloudProvider.GetSnapshot(ctx, project, key) if err != nil { if gce.IsGCEError(err, "notFound") { // return empty list if no snapshot is found @@ -1016,6 +1040,7 @@ func cleanSelfLink(selfLink string) string { } func createRegionalDisk(ctx context.Context, cloudProvider gce.GCECompute, name string, zones []string, params common.DiskParameters, capacityRange *csi.CapacityRange, capBytes int64, snapshotID string, multiWriter bool) (*gce.CloudDisk, error) { + project := cloudProvider.GetDefaultProject() region, err := common.GetRegionFromZones(zones) if err != nil { return nil, fmt.Errorf("failed to get region from zones: %v", err) @@ -1024,10 +1049,10 @@ func createRegionalDisk(ctx context.Context, cloudProvider gce.GCECompute, name fullyQualifiedReplicaZones := []string{} for _, replicaZone := range zones { fullyQualifiedReplicaZones = append( - fullyQualifiedReplicaZones, cloudProvider.GetReplicaZoneURI(replicaZone)) + fullyQualifiedReplicaZones, cloudProvider.GetReplicaZoneURI(project, replicaZone)) } - err = cloudProvider.InsertDisk(ctx, meta.RegionalKey(name, region), params, capBytes, capacityRange, fullyQualifiedReplicaZones, snapshotID, multiWriter) + err = cloudProvider.InsertDisk(ctx, project, meta.RegionalKey(name, region), params, capBytes, capacityRange, fullyQualifiedReplicaZones, snapshotID, multiWriter) if err != nil { return nil, fmt.Errorf("failed to insert regional disk: %v", err) } @@ -1037,7 +1062,7 @@ func createRegionalDisk(ctx context.Context, cloudProvider gce.GCECompute, name gceAPIVersion = gce.GCEAPIVersionBeta } - disk, err := cloudProvider.GetDisk(ctx, meta.RegionalKey(name, region), gceAPIVersion) + disk, err := cloudProvider.GetDisk(ctx, project, meta.RegionalKey(name, region), gceAPIVersion) if err != nil { return nil, fmt.Errorf("failed to get disk after creating regional disk: %v", err) } @@ -1045,11 +1070,12 @@ func createRegionalDisk(ctx context.Context, cloudProvider gce.GCECompute, name } func createSingleZoneDisk(ctx context.Context, cloudProvider gce.GCECompute, name string, zones []string, params common.DiskParameters, capacityRange *csi.CapacityRange, capBytes int64, snapshotID string, multiWriter bool) (*gce.CloudDisk, error) { + project := cloudProvider.GetDefaultProject() if len(zones) != 1 { return nil, fmt.Errorf("got wrong number of zones for zonal create volume: %v", len(zones)) } diskZone := zones[0] - err := cloudProvider.InsertDisk(ctx, meta.ZonalKey(name, diskZone), params, capBytes, capacityRange, nil, snapshotID, multiWriter) + err := cloudProvider.InsertDisk(ctx, project, meta.ZonalKey(name, diskZone), params, capBytes, capacityRange, nil, snapshotID, multiWriter) if err != nil { return nil, fmt.Errorf("failed to insert zonal disk: %v", err) } @@ -1058,7 +1084,7 @@ func createSingleZoneDisk(ctx context.Context, cloudProvider gce.GCECompute, nam if multiWriter { gceAPIVersion = gce.GCEAPIVersionBeta } - disk, err := cloudProvider.GetDisk(ctx, meta.ZonalKey(name, diskZone), gceAPIVersion) + disk, err := cloudProvider.GetDisk(ctx, project, meta.ZonalKey(name, diskZone), gceAPIVersion) if err != nil { return nil, err } diff --git a/pkg/gce-pd-csi-driver/controller_test.go b/pkg/gce-pd-csi-driver/controller_test.go index a3c904f42..1a0d3f4db 100644 --- a/pkg/gce-pd-csi-driver/controller_test.go +++ b/pkg/gce-pd-csi-driver/controller_test.go @@ -807,17 +807,20 @@ func TestCreateVolumeWithVolumeSource(t *testing.T) { // Define test cases testCases := []struct { name string + project string volKey *meta.Key snapshotOnCloud bool expErrCode codes.Code }{ { name: "success with data source of snapshot type", + project: "test-project", volKey: meta.ZonalKey("my-disk", zone), snapshotOnCloud: true, }, { name: "fail with data source of snapshot type that doesn't exist", + project: "test-project", volKey: meta.ZonalKey("my-disk", zone), snapshotOnCloud: false, expErrCode: codes.NotFound, @@ -845,7 +848,7 @@ func TestCreateVolumeWithVolumeSource(t *testing.T) { } if tc.snapshotOnCloud { - gceDriver.cs.CloudProvider.CreateSnapshot(context.Background(), tc.volKey, name) + gceDriver.cs.CloudProvider.CreateSnapshot(context.Background(), tc.project, tc.volKey, name) } resp, err := gceDriver.cs.CreateVolume(context.Background(), req) //check response diff --git a/pkg/gce-pd-csi-driver/node.go b/pkg/gce-pd-csi-driver/node.go index 0afa8217a..f195afd2a 100644 --- a/pkg/gce-pd-csi-driver/node.go +++ b/pkg/gce-pd-csi-driver/node.go @@ -270,7 +270,7 @@ func (ns *GCENodeServer) NodeStageVolume(ctx context.Context, req *csi.NodeStage // TODO(#253): Check volume capability matches for ALREADY_EXISTS - volumeKey, err := common.VolumeIDToKey(volumeID) + _, volumeKey, err := common.VolumeIDToKey(volumeID) if err != nil { return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("NodeStageVolume Volume ID is invalid: %v", err)) } @@ -447,7 +447,7 @@ func (ns *GCENodeServer) NodeExpandVolume(ctx context.Context, req *csi.NodeExpa return nil, status.Error(codes.InvalidArgument, "volume path must be provided") } - volKey, err := common.VolumeIDToKey(volumeID) + _, volKey, err := common.VolumeIDToKey(volumeID) if err != nil { return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("volume ID is invalid: %v", err)) } diff --git a/pkg/gce-pd-csi-driver/utils_linux.go b/pkg/gce-pd-csi-driver/utils_linux.go index 52be9b6c1..a56a21957 100644 --- a/pkg/gce-pd-csi-driver/utils_linux.go +++ b/pkg/gce-pd-csi-driver/utils_linux.go @@ -28,7 +28,7 @@ import ( ) func getDevicePath(ns *GCENodeServer, volumeID, partition string) (string, error) { - volumeKey, err := common.VolumeIDToKey(volumeID) + _, volumeKey, err := common.VolumeIDToKey(volumeID) if err != nil { return "", err } diff --git a/pkg/gce-pd-csi-driver/utils_windows.go b/pkg/gce-pd-csi-driver/utils_windows.go index 056ad435f..f89a2d7d6 100644 --- a/pkg/gce-pd-csi-driver/utils_windows.go +++ b/pkg/gce-pd-csi-driver/utils_windows.go @@ -77,7 +77,7 @@ func cleanupStagePath(path string, m *mount.SafeFormatAndMount) error { // search Windows disk number by volumeID func getDevicePath(ns *GCENodeServer, volumeID, partition string) (string, error) { - volumeKey, err := common.VolumeIDToKey(volumeID) + _, volumeKey, err := common.VolumeIDToKey(volumeID) if err != nil { return "", err } diff --git a/test/e2e/tests/single_zone_e2e_test.go b/test/e2e/tests/single_zone_e2e_test.go index dde521757..067a2d8f6 100644 --- a/test/e2e/tests/single_zone_e2e_test.go +++ b/test/e2e/tests/single_zone_e2e_test.go @@ -548,10 +548,10 @@ var _ = Describe("GCE PD CSI Driver", func() { nodeID := testContext.Instance.GetNodeID() _, volID := createAndValidateUniqueZonalDisk(client, p, z) - defer deleteVolumeOrError(client, volID, p) + defer deleteVolumeOrError(client, volID) _, secondVolID := createAndValidateUniqueZonalDisk(client, p, z) - defer deleteVolumeOrError(client, secondVolID, p) + defer deleteVolumeOrError(client, secondVolID) // Attach volID to current instance err := client.ControllerPublishVolume(volID, nodeID) @@ -854,13 +854,13 @@ func createAndValidateUniqueZonalDisk(client *remote.CsiClient, project, zone st return } -func deleteVolumeOrError(client *remote.CsiClient, volID, project string) { +func deleteVolumeOrError(client *remote.CsiClient, volID string) { // Delete Disk err := client.DeleteVolume(volID) Expect(err).To(BeNil(), "DeleteVolume failed") // Validate Disk Deleted - key, err := common.VolumeIDToKey(volID) + project, key, err := common.VolumeIDToKey(volID) Expect(err).To(BeNil(), "Failed to conver volume ID To key") _, err = computeService.Disks.Get(project, key.Zone, key.Name).Do() Expect(gce.IsGCEError(err, "notFound")).To(BeTrue(), "Expected disk to not be found")