diff --git a/pkg/common/constants.go b/pkg/common/constants.go index 8aaa6bd5a..9ca91f40c 100644 --- a/pkg/common/constants.go +++ b/pkg/common/constants.go @@ -17,11 +17,6 @@ limitations under the License. package common const ( - // Keys for Storage Class Parameters - ParameterKeyType = "type" - ParameterKeyReplicationType = "replication-type" - ParameterKeyDiskEncryptionKmsKey = "disk-encryption-kms-key" - // Keys for Topology. This key will be shared amongst drivers from GCP TopologyKeyZone = "topology.gke.io/zone" diff --git a/pkg/common/parameters.go b/pkg/common/parameters.go new file mode 100644 index 000000000..26117316c --- /dev/null +++ b/pkg/common/parameters.go @@ -0,0 +1,75 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package common + +import ( + "fmt" + "strings" +) + +const ( + ParameterKeyType = "type" + ParameterKeyReplicationType = "replication-type" + ParameterKeyDiskEncryptionKmsKey = "disk-encryption-kms-key" + + replicationTypeNone = "none" +) + +// DiskParameters contains normalized and defaulted disk parameters +type DiskParameters struct { + // Values: pd-standard OR pd-ssd + // Default: pd-standard + DiskType string + // Values: "none", regional-pd + // Default: "none" + ReplicationType string + // Values: {string} + // Default: "" + DiskEncryptionKMSKey string +} + +// ExtractAndDefaultParameters will take the relevant parameters from a map and +// put them into a well defined struct making sure to default unspecified fields +func ExtractAndDefaultParameters(parameters map[string]string) (DiskParameters, error) { + p := DiskParameters{ + DiskType: "pd-standard", // Default + ReplicationType: replicationTypeNone, // Default + DiskEncryptionKMSKey: "", // Default + } + for k, v := range parameters { + if k == "csiProvisionerSecretName" || k == "csiProvisionerSecretNamespace" { + // These are hardcoded secrets keys required to function but not needed by GCE PD + continue + } + switch strings.ToLower(k) { + case ParameterKeyType: + if v != "" { + p.DiskType = strings.ToLower(v) + } + case ParameterKeyReplicationType: + if v != "" { + p.ReplicationType = strings.ToLower(v) + } + case ParameterKeyDiskEncryptionKmsKey: + // Resource names (e.g. "keyRings", "cryptoKeys", etc.) are case sensitive, so do not change case + p.DiskEncryptionKMSKey = v + default: + return p, fmt.Errorf("parameters contains invalid option %q", k) + } + } + return p, nil +} diff --git a/pkg/common/parameters_test.go b/pkg/common/parameters_test.go new file mode 100644 index 000000000..825bb2141 --- /dev/null +++ b/pkg/common/parameters_test.go @@ -0,0 +1,89 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package common + +import ( + "reflect" + "testing" +) + +func TestExtractAndDefaultParameters(t *testing.T) { + tests := []struct { + name string + parameters map[string]string + expectParams DiskParameters + expectErr bool + }{ + { + name: "defaults", + parameters: map[string]string{}, + expectParams: DiskParameters{ + DiskType: "pd-standard", + ReplicationType: "none", + DiskEncryptionKMSKey: "", + }, + }, + { + name: "specified empties", + parameters: map[string]string{ParameterKeyType: "", ParameterKeyReplicationType: "", ParameterKeyDiskEncryptionKmsKey: ""}, + expectParams: DiskParameters{ + DiskType: "pd-standard", + ReplicationType: "none", + DiskEncryptionKMSKey: "", + }, + }, + { + name: "random keys", + parameters: map[string]string{ParameterKeyType: "", "foo": "", ParameterKeyDiskEncryptionKmsKey: ""}, + expectErr: true, + }, + { + name: "real values", + parameters: map[string]string{ParameterKeyType: "pd-ssd", ParameterKeyReplicationType: "regional-pd", ParameterKeyDiskEncryptionKmsKey: "foo/key"}, + expectParams: DiskParameters{ + DiskType: "pd-ssd", + ReplicationType: "regional-pd", + DiskEncryptionKMSKey: "foo/key", + }, + }, + { + name: "partial spec", + parameters: map[string]string{ParameterKeyDiskEncryptionKmsKey: "foo/key"}, + expectParams: DiskParameters{ + DiskType: "pd-standard", + ReplicationType: "none", + DiskEncryptionKMSKey: "foo/key", + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + p, err := ExtractAndDefaultParameters(tc.parameters) + if gotErr := err != nil; gotErr != tc.expectErr { + t.Fatalf("ExtractAndDefaultParameters(%+v) = %v; expectedErr: %v", tc.parameters, err, tc.expectErr) + } + if err != nil { + return + } + + if !reflect.DeepEqual(p, tc.expectParams) { + t.Errorf("ExtractAndDefaultParameters(%+v) = %v; expected params: %v", tc.parameters, p, tc.expectParams) + } + }) + } +} diff --git a/pkg/gce-cloud-provider/compute/cloud-disk.go b/pkg/gce-cloud-provider/compute/cloud-disk.go index b8f63a157..23709cbe6 100644 --- a/pkg/gce-cloud-provider/compute/cloud-disk.go +++ b/pkg/gce-cloud-provider/compute/cloud-disk.go @@ -15,6 +15,8 @@ limitations under the License. package gcecloudprovider import ( + "strings" + computev1 "google.golang.org/api/compute/v1" ) @@ -90,15 +92,21 @@ func (d *CloudDisk) GetKind() string { } } -func (d *CloudDisk) GetType() string { +// GetPDType returns the type of the PD as either 'pd-standard' or 'pd-ssd' The +// "Type" field on the compute disk is stored as a url like +// projects/project/zones/zone/diskTypes/pd-standard +func (d *CloudDisk) GetPDType() string { + var pdType string switch d.Type() { case Zonal: - return d.ZonalDisk.Type + pdType = d.ZonalDisk.Type case Regional: - return d.RegionalDisk.Type + pdType = d.RegionalDisk.Type default: return "" } + respType := strings.Split(pdType, "/") + return strings.TrimSpace(respType[len(respType)-1]) } func (d *CloudDisk) GetSelfLink() string { @@ -155,3 +163,19 @@ func (d *CloudDisk) GetSnapshotId() string { return "" } } + +func (d *CloudDisk) GetKMSKeyName() string { + var dek *computev1.CustomerEncryptionKey + switch d.Type() { + case Zonal: + dek = d.ZonalDisk.DiskEncryptionKey + case Regional: + dek = d.RegionalDisk.DiskEncryptionKey + default: + return "" + } + if dek == nil { + return "" + } + return dek.KmsKeyName +} diff --git a/pkg/gce-cloud-provider/compute/fake-gce.go b/pkg/gce-cloud-provider/compute/fake-gce.go index d737f16d0..a5927aa33 100644 --- a/pkg/gce-cloud-provider/compute/fake-gce.go +++ b/pkg/gce-cloud-provider/compute/fake-gce.go @@ -207,7 +207,7 @@ func (cloud *FakeCloudProvider) GetDisk(ctx context.Context, volKey *meta.Key) ( return disk, nil } -func (cloud *FakeCloudProvider) ValidateExistingDisk(ctx context.Context, resp *CloudDisk, diskType string, reqBytes, limBytes int64) error { +func (cloud *FakeCloudProvider) ValidateExistingDisk(ctx context.Context, resp *CloudDisk, params common.DiskParameters, reqBytes, limBytes int64) error { if resp == nil { return fmt.Errorf("disk does not exist") } @@ -219,20 +219,12 @@ func (cloud *FakeCloudProvider) ValidateExistingDisk(ctx context.Context, resp * reqBytes, common.GbToBytes(resp.GetSizeGb()), limBytes) } - respType := strings.Split(resp.GetType(), "/") - typeMatch := strings.TrimSpace(respType[len(respType)-1]) == strings.TrimSpace(diskType) - typeDefault := diskType == "" && strings.TrimSpace(respType[len(respType)-1]) == "pd-standard" - if !typeMatch && !typeDefault { - return fmt.Errorf("disk already exists with incompatible type. Need %v. Got %v", - diskType, respType[len(respType)-1]) - } - klog.V(4).Infof("Compatible disk already exists") - return nil + return ValidateDiskParameters(resp, params) } -func (cloud *FakeCloudProvider) InsertDisk(ctx context.Context, volKey *meta.Key, diskType string, capBytes int64, capacityRange *csi.CapacityRange, replicaZones []string, snapshotID, diskEncryptionKmsKey string) error { +func (cloud *FakeCloudProvider) InsertDisk(ctx context.Context, volKey *meta.Key, params common.DiskParameters, capBytes int64, capacityRange *csi.CapacityRange, replicaZones []string, snapshotID string) error { if disk, ok := cloud.disks[volKey.Name]; ok { - err := cloud.ValidateExistingDisk(ctx, disk, diskType, + err := cloud.ValidateExistingDisk(ctx, disk, params, int64(capacityRange.GetRequiredBytes()), int64(capacityRange.GetLimitBytes())) if err != nil { @@ -247,13 +239,13 @@ func (cloud *FakeCloudProvider) InsertDisk(ctx context.Context, volKey *meta.Key Name: volKey.Name, SizeGb: common.BytesToGb(capBytes), Description: "Disk created by GCE-PD CSI Driver", - Type: cloud.GetDiskTypeURI(volKey, diskType), + Type: cloud.GetDiskTypeURI(volKey, params.DiskType), SelfLink: fmt.Sprintf("projects/%s/zones/%s/disks/%s", cloud.project, volKey.Zone, volKey.Name), SourceSnapshotId: snapshotID, } - if diskEncryptionKmsKey != "" { + if params.DiskEncryptionKMSKey != "" { diskToCreateGA.DiskEncryptionKey = &computev1.CustomerEncryptionKey{ - KmsKeyName: diskEncryptionKmsKey, + KmsKeyName: params.DiskEncryptionKMSKey, } } diskToCreate = ZonalCloudDisk(diskToCreateGA) @@ -262,13 +254,13 @@ func (cloud *FakeCloudProvider) InsertDisk(ctx context.Context, volKey *meta.Key Name: volKey.Name, SizeGb: common.BytesToGb(capBytes), Description: "Regional disk created by GCE-PD CSI Driver", - Type: cloud.GetDiskTypeURI(volKey, diskType), + Type: cloud.GetDiskTypeURI(volKey, params.DiskType), SelfLink: fmt.Sprintf("projects/%s/regions/%s/disks/%s", cloud.project, volKey.Region, volKey.Name), SourceSnapshotId: snapshotID, } - if diskEncryptionKmsKey != "" { + if params.DiskEncryptionKMSKey != "" { diskToCreateV1.DiskEncryptionKey = &computev1.CustomerEncryptionKey{ - KmsKeyName: diskEncryptionKmsKey, + KmsKeyName: params.DiskEncryptionKMSKey, } } diskToCreate = RegionalCloudDisk(diskToCreateV1) diff --git a/pkg/gce-cloud-provider/compute/gce-compute.go b/pkg/gce-cloud-provider/compute/gce-compute.go index b37b912a9..58f6de19d 100644 --- a/pkg/gce-cloud-provider/compute/gce-compute.go +++ b/pkg/gce-cloud-provider/compute/gce-compute.go @@ -41,8 +41,8 @@ type GCECompute interface { // Disk Methods GetDisk(ctx context.Context, volumeKey *meta.Key) (*CloudDisk, error) RepairUnderspecifiedVolumeKey(ctx context.Context, volumeKey *meta.Key) (*meta.Key, error) - ValidateExistingDisk(ctx context.Context, disk *CloudDisk, diskType string, reqBytes, limBytes int64) error - InsertDisk(ctx context.Context, volKey *meta.Key, diskType string, capBytes int64, capacityRange *csi.CapacityRange, replicaZones []string, snapshotID, diskEncryptionKmsKey string) error + ValidateExistingDisk(ctx context.Context, disk *CloudDisk, params common.DiskParameters, reqBytes, limBytes int64) error + InsertDisk(ctx context.Context, volKey *meta.Key, params common.DiskParameters, capBytes int64, capacityRange *csi.CapacityRange, replicaZones []string, snapshotID string) error DeleteDisk(ctx context.Context, volumeKey *meta.Key) error AttachDisk(ctx context.Context, volKey *meta.Key, readWrite, diskType, instanceZone, instanceName string) error DetachDisk(ctx context.Context, deviceName string, instanceZone, instanceName string) error @@ -212,8 +212,8 @@ func (cloud *CloudProvider) getRegionURI(region string) string { region) } -func (cloud *CloudProvider) ValidateExistingDisk(ctx context.Context, resp *CloudDisk, diskType string, reqBytes, limBytes int64) error { - klog.V(5).Infof("Validating existing disk %v with diskType: %s, reqested bytes: %v, limit bytes: %v", resp, diskType, reqBytes, limBytes) +func (cloud *CloudProvider) ValidateExistingDisk(ctx context.Context, resp *CloudDisk, params common.DiskParameters, reqBytes, limBytes int64) error { + klog.V(5).Infof("Validating existing disk %v with diskType: %s, reqested bytes: %v, limit bytes: %v", resp, params.DiskType, reqBytes, limBytes) if resp == nil { return fmt.Errorf("disk does not exist") } @@ -225,34 +225,49 @@ func (cloud *CloudProvider) ValidateExistingDisk(ctx context.Context, resp *Clou reqBytes, common.GbToBytes(resp.GetSizeGb()), limBytes) } - respType := strings.Split(resp.GetType(), "/") - typeMatch := strings.TrimSpace(respType[len(respType)-1]) == strings.TrimSpace(diskType) - typeDefault := diskType == "" && strings.TrimSpace(respType[len(respType)-1]) == "pd-standard" - if !typeMatch && !typeDefault { - return fmt.Errorf("disk already exists with incompatible type. Need %v. Got %v", - diskType, respType[len(respType)-1]) + return ValidateDiskParameters(resp, params) +} + +// ValidateDiskParameters takes a CloudDisk and returns true if the parameters +// specified validly describe the disk provided, and false otherwise. +func ValidateDiskParameters(disk *CloudDisk, params common.DiskParameters) error { + if disk.GetPDType() != params.DiskType { + return fmt.Errorf("actual pd type %s did not match the expected param %s", disk.GetPDType(), params.DiskType) } + + if params.ReplicationType == "none" && disk.Type() != Zonal { + return fmt.Errorf("actual disk replication type %v did not match expected param %s", disk.Type(), params.ReplicationType) + } + + if params.ReplicationType == "regional-pd" && disk.Type() != Regional { + return fmt.Errorf("actual disk replication type %v did not match expected param %s", disk.Type(), "regional-pd") + } + + if disk.GetKMSKeyName() != params.DiskEncryptionKMSKey { + return fmt.Errorf("actual disk KMS key name %s did not match expected param %s", disk.GetKMSKeyName(), params.DiskEncryptionKMSKey) + } + return nil } -func (cloud *CloudProvider) InsertDisk(ctx context.Context, volKey *meta.Key, diskType string, capBytes int64, capacityRange *csi.CapacityRange, replicaZones []string, snapshotID, diskEncryptionKmsKey string) error { +func (cloud *CloudProvider) InsertDisk(ctx context.Context, volKey *meta.Key, params common.DiskParameters, capBytes int64, capacityRange *csi.CapacityRange, replicaZones []string, snapshotID string) error { klog.V(5).Infof("Inserting disk %v", volKey) switch volKey.Type() { case meta.Zonal: - return cloud.insertZonalDisk(ctx, volKey, diskType, capBytes, capacityRange, snapshotID, diskEncryptionKmsKey) + return cloud.insertZonalDisk(ctx, volKey, params, capBytes, capacityRange, snapshotID) case meta.Regional: - return cloud.insertRegionalDisk(ctx, volKey, diskType, capBytes, capacityRange, replicaZones, snapshotID, diskEncryptionKmsKey) + return cloud.insertRegionalDisk(ctx, volKey, params, capBytes, capacityRange, replicaZones, snapshotID) default: return fmt.Errorf("could not insert disk, key was neither zonal nor regional, instead got: %v", volKey.String()) } } -func (cloud *CloudProvider) insertRegionalDisk(ctx context.Context, volKey *meta.Key, diskType string, capBytes int64, capacityRange *csi.CapacityRange, replicaZones []string, snapshotID, diskEncryptionKmsKey string) error { +func (cloud *CloudProvider) insertRegionalDisk(ctx context.Context, volKey *meta.Key, params common.DiskParameters, capBytes int64, capacityRange *csi.CapacityRange, replicaZones []string, snapshotID string) error { diskToCreate := &computev1.Disk{ Name: volKey.Name, SizeGb: common.BytesToGb(capBytes), Description: "Regional disk created by GCE-PD CSI Driver", - Type: cloud.GetDiskTypeURI(volKey, diskType), + Type: cloud.GetDiskTypeURI(volKey, params.DiskType), } if snapshotID != "" { diskToCreate.SourceSnapshot = snapshotID @@ -260,9 +275,9 @@ func (cloud *CloudProvider) insertRegionalDisk(ctx context.Context, volKey *meta if len(replicaZones) != 0 { diskToCreate.ReplicaZones = replicaZones } - if diskEncryptionKmsKey != "" { + if params.DiskEncryptionKMSKey != "" { diskToCreate.DiskEncryptionKey = &computev1.CustomerEncryptionKey{ - KmsKeyName: diskEncryptionKmsKey, + KmsKeyName: params.DiskEncryptionKMSKey, } } @@ -273,7 +288,7 @@ func (cloud *CloudProvider) insertRegionalDisk(ctx context.Context, volKey *meta if err != nil { return err } - err = cloud.ValidateExistingDisk(ctx, disk, diskType, + err = cloud.ValidateExistingDisk(ctx, disk, params, int64(capacityRange.GetRequiredBytes()), int64(capacityRange.GetLimitBytes())) if err != nil { @@ -292,7 +307,7 @@ func (cloud *CloudProvider) insertRegionalDisk(ctx context.Context, volKey *meta if err != nil { return err } - err = cloud.ValidateExistingDisk(ctx, disk, diskType, + err = cloud.ValidateExistingDisk(ctx, disk, params, int64(capacityRange.GetRequiredBytes()), int64(capacityRange.GetLimitBytes())) if err != nil { @@ -306,21 +321,21 @@ func (cloud *CloudProvider) insertRegionalDisk(ctx context.Context, volKey *meta return nil } -func (cloud *CloudProvider) insertZonalDisk(ctx context.Context, volKey *meta.Key, diskType string, capBytes int64, capacityRange *csi.CapacityRange, snapshotID, diskEncryptionKmsKey string) error { +func (cloud *CloudProvider) insertZonalDisk(ctx context.Context, volKey *meta.Key, params common.DiskParameters, capBytes int64, capacityRange *csi.CapacityRange, snapshotID string) error { diskToCreate := &computev1.Disk{ Name: volKey.Name, SizeGb: common.BytesToGb(capBytes), Description: "Disk created by GCE-PD CSI Driver", - Type: cloud.GetDiskTypeURI(volKey, diskType), + Type: cloud.GetDiskTypeURI(volKey, params.DiskType), } if snapshotID != "" { diskToCreate.SourceSnapshot = snapshotID } - if diskEncryptionKmsKey != "" { + if params.DiskEncryptionKMSKey != "" { diskToCreate.DiskEncryptionKey = &computev1.CustomerEncryptionKey{ - KmsKeyName: diskEncryptionKmsKey, + KmsKeyName: params.DiskEncryptionKMSKey, } } @@ -332,7 +347,7 @@ func (cloud *CloudProvider) insertZonalDisk(ctx context.Context, volKey *meta.Ke if err != nil { return err } - err = cloud.ValidateExistingDisk(ctx, disk, diskType, + err = cloud.ValidateExistingDisk(ctx, disk, params, int64(capacityRange.GetRequiredBytes()), int64(capacityRange.GetLimitBytes())) if err != nil { @@ -352,7 +367,7 @@ func (cloud *CloudProvider) insertZonalDisk(ctx context.Context, volKey *meta.Ke if err != nil { return err } - err = cloud.ValidateExistingDisk(ctx, disk, diskType, + err = cloud.ValidateExistingDisk(ctx, disk, params, int64(capacityRange.GetRequiredBytes()), int64(capacityRange.GetLimitBytes())) if err != nil { diff --git a/pkg/gce-pd-csi-driver/controller.go b/pkg/gce-pd-csi-driver/controller.go index 84c62211c..a67dd0bfc 100644 --- a/pkg/gce-pd-csi-driver/controller.go +++ b/pkg/gce-pd-csi-driver/controller.go @@ -91,32 +91,14 @@ func (gceCS *GCEControllerServer) CreateVolume(ctx context.Context, req *csi.Cre // Apply Parameters (case-insensitive). We leave validation of // the values to the cloud provider. - diskType := "pd-standard" - // Start process for creating a new disk - replicationType := replicationTypeNone - diskEncryptionKmsKey := "" - for k, v := range req.GetParameters() { - if k == "csiProvisionerSecretName" || k == "csiProvisionerSecretNamespace" { - // These are hardcoded secrets keys required to function but not needed by GCE PD - continue - } - switch strings.ToLower(k) { - case common.ParameterKeyType: - klog.V(4).Infof("Setting type: %v", v) - diskType = v - case common.ParameterKeyReplicationType: - replicationType = strings.ToLower(v) - case common.ParameterKeyDiskEncryptionKmsKey: - // Resource names (e.g. "keyRings", "cryptoKeys", etc.) are case sensitive, so do not change case - diskEncryptionKmsKey = v - default: - return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("CreateVolume invalid option %q", k)) - } + params, err := common.ExtractAndDefaultParameters(req.GetParameters()) + if err != nil { + return nil, status.Errorf(codes.InvalidArgument, "failed to extract parameters: %v", err) } // Determine the zone or zones+region of the disk var zones []string var volKey *meta.Key - switch replicationType { + switch params.ReplicationType { case replicationTypeNone: zones, err = pickZones(gceCS, req.GetAccessibilityRequirements(), 1) if err != nil { @@ -138,7 +120,7 @@ func (gceCS *GCEControllerServer) CreateVolume(ctx context.Context, req *csi.Cre } volKey = meta.RegionalKey(name, region) default: - return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("CreateVolume replication type '%s' is not supported", replicationType)) + return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("CreateVolume replication type '%s' is not supported", params.ReplicationType)) } volumeID, err := common.KeyToVolumeID(volKey, gceCS.MetadataService.GetProject()) @@ -159,7 +141,7 @@ func (gceCS *GCEControllerServer) CreateVolume(ctx context.Context, req *csi.Cre } if err == nil { // There was no error so we want to validate the disk that we find - err = gceCS.CloudProvider.ValidateExistingDisk(ctx, existingDisk, diskType, + err = gceCS.CloudProvider.ValidateExistingDisk(ctx, existingDisk, params, int64(capacityRange.GetRequiredBytes()), int64(capacityRange.GetLimitBytes())) if err != nil { @@ -189,12 +171,12 @@ func (gceCS *GCEControllerServer) CreateVolume(ctx context.Context, req *csi.Cre // Create the disk var disk *gce.CloudDisk - switch replicationType { + switch params.ReplicationType { case replicationTypeNone: if len(zones) != 1 { return nil, status.Error(codes.Internal, fmt.Sprintf("CreateVolume failed to get a single zone for creating zonal disk, instead got: %v", zones)) } - disk, err = createSingleZoneDisk(ctx, gceCS.CloudProvider, name, zones, diskType, capacityRange, capBytes, snapshotID, diskEncryptionKmsKey) + disk, err = createSingleZoneDisk(ctx, gceCS.CloudProvider, name, zones, params, capacityRange, capBytes, snapshotID) if err != nil { return nil, status.Error(codes.Internal, fmt.Sprintf("CreateVolume failed to create single zonal disk %#v: %v", name, err)) } @@ -202,12 +184,12 @@ func (gceCS *GCEControllerServer) CreateVolume(ctx context.Context, req *csi.Cre if len(zones) != 2 { return nil, status.Errorf(codes.Internal, fmt.Sprintf("CreateVolume failed to get a 2 zones for creating regional disk, instead got: %v", zones)) } - disk, err = createRegionalDisk(ctx, gceCS.CloudProvider, name, zones, diskType, capacityRange, capBytes, snapshotID, diskEncryptionKmsKey) + disk, err = createRegionalDisk(ctx, gceCS.CloudProvider, name, zones, params, capacityRange, capBytes, snapshotID) if err != nil { return nil, status.Error(codes.Internal, fmt.Sprintf("CreateVolume failed to create regional disk %#v: %v", name, err)) } default: - return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("CreateVolume replication type '%s' is not supported", replicationType)) + return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("CreateVolume replication type '%s' is not supported", params.ReplicationType)) } klog.V(4).Infof("CreateVolume succeeded for disk %v", volKey) return generateCreateVolumeResponse(disk, capBytes, zones), nil @@ -415,20 +397,16 @@ func (gceCS *GCEControllerServer) ControllerUnpublishVolume(ctx context.Context, } func (gceCS *GCEControllerServer) ValidateVolumeCapabilities(ctx context.Context, req *csi.ValidateVolumeCapabilitiesRequest) (*csi.ValidateVolumeCapabilitiesResponse, error) { - // TODO(#162): Implement ValidateVolumeCapabilities - - klog.V(5).Infof("Using default ValidateVolumeCapabilities") - // Validate Arguments if req.GetVolumeCapabilities() == nil || len(req.GetVolumeCapabilities()) == 0 { - return nil, status.Error(codes.InvalidArgument, "ValidateVolumeCapabilities Volume Capabilities must be provided") + return nil, status.Error(codes.InvalidArgument, "Volume Capabilities must be provided") } volumeID := req.GetVolumeId() if len(volumeID) == 0 { - return nil, status.Error(codes.InvalidArgument, "ValidateVolumeCapabilities Volume ID must be provided") + return nil, status.Error(codes.InvalidArgument, "Volume ID must be provided") } volKey, err := common.VolumeIDToKey(volumeID) if err != nil { - return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("ValidateVolumeCapabilities Volume ID is invalid: %v", err)) + return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("Volume ID is invalid: %v", err)) } if acquired := gceCS.volumeLocks.TryAcquire(volumeID); !acquired { @@ -436,7 +414,7 @@ func (gceCS *GCEControllerServer) ValidateVolumeCapabilities(ctx context.Context } defer gceCS.volumeLocks.Release(volumeID) - _, err = gceCS.CloudProvider.GetDisk(ctx, volKey) + disk, err := gceCS.CloudProvider.GetDisk(ctx, volKey) if err != nil { if gce.IsGCENotFoundError(err) { return nil, status.Error(codes.NotFound, fmt.Sprintf("Could not find disk %v: %v", volKey.Name, err)) @@ -444,68 +422,44 @@ func (gceCS *GCEControllerServer) ValidateVolumeCapabilities(ctx context.Context return nil, status.Error(codes.Internal, fmt.Sprintf("Unknown get disk error: %v", err)) } + // Check Volume Context is Empty + if len(req.GetVolumeContext()) != 0 { + return generateFailedValidationMessage("VolumeContext expected to be empty but got %v", req.GetVolumeContext()), nil + } + + // Check volume capabilities supported by PD. These are the same for any PD + if err := validateVolumeCapabilities(req.GetVolumeCapabilities()); err != nil { + return generateFailedValidationMessage("VolumeCapabilities not valid: %v", err), nil + } + + // Validate the disk parameters match the disk we GET + params, err := common.ExtractAndDefaultParameters(req.GetParameters()) + if err != nil { + return nil, status.Errorf(codes.InvalidArgument, "failed to extract parameters: %v", err) + } + if err := gce.ValidateDiskParameters(disk, params); err != nil { + return generateFailedValidationMessage("Parameters %v do not match given disk %s: %v", req.GetParameters(), disk.GetName(), err), nil + } + + // Ignore secrets + if len(req.GetSecrets()) != 0 { + return generateFailedValidationMessage("Secrets expected to be empty but got %v", req.GetSecrets()), nil + } + + // All valid, return success return &csi.ValidateVolumeCapabilitiesResponse{ - Message: "ValidateVolumeCapabilities is currently unimplemented for CSI v1.0.0", + Confirmed: &csi.ValidateVolumeCapabilitiesResponse_Confirmed{ + VolumeContext: req.GetVolumeContext(), + VolumeCapabilities: req.GetVolumeCapabilities(), + Parameters: req.GetParameters(), + }, }, nil - /* - for _, c := range req.GetVolumeCapabilities() { - found := false - for _, c1 := range gceCS.Driver.vcap { - if c1.Mode == c.GetAccessMode().Mode { - found = true - } - } - if !found { - return &csi.ValidateVolumeCapabilitiesResponse{ - Supported: false, - Message: "Driver does not support mode:" + c.GetAccessMode().Mode.String(), - }, status.Error(codes.InvalidArgument, "Driver does not support mode:"+c.GetAccessMode().Mode.String()) - } - // TODO: Ignoring mount & block types for now. - } - - for _, top := range req.GetAccessibleTopology() { - for k, v := range top.GetSegments() { - switch k { - case common.TopologyKeyZone: - switch volKey.Type() { - case meta.Zonal: - if v == volKey.Zone { - // Accessible zone matches with storage zone - return &csi.ValidateVolumeCapabilitiesResponse{ - Supported: true, - }, nil - } - case meta.Regional: - // TODO: This should more accurately check the disks replica Zones but that involves - // GET-ing the disk - region, err := common.GetRegionFromZones([]string{v}) - if err != nil { - return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("ValidateVolumeCapabilities could not extract topology region from zone %v: %v", v, err)) - } - if region == volKey.Region { - // Accessible region matches with storage region - return &csi.ValidateVolumeCapabilitiesResponse{ - Supported: true, - }, nil - } - default: - // Accessible zone does not match - return &csi.ValidateVolumeCapabilitiesResponse{ - Supported: false, - Message: fmt.Sprintf("Volume %s is not accesible from topology %s:%s", volumeID, k, v), - }, nil - } - default: - return nil, status.Error(codes.InvalidArgument, "ValidateVolumeCapabilities unknown topology segment key") - } - } - } +} - return &csi.ValidateVolumeCapabilitiesResponse{ - Supported: true, - }, nil - */ +func generateFailedValidationMessage(format string, a ...interface{}) *csi.ValidateVolumeCapabilitiesResponse { + return &csi.ValidateVolumeCapabilitiesResponse{ + Message: fmt.Sprintf(format, a...), + } } func (gceCS *GCEControllerServer) ListVolumes(ctx context.Context, req *csi.ListVolumesRequest) (*csi.ListVolumesResponse, error) { @@ -1017,7 +971,7 @@ func cleanSelfLink(selfLink string) string { return strings.TrimPrefix(temp, gce.GCEComputeBetaAPIEndpoint) } -func createRegionalDisk(ctx context.Context, cloudProvider gce.GCECompute, name string, zones []string, diskType string, capacityRange *csi.CapacityRange, capBytes int64, snapshotID, diskEncryptionKmsKey string) (*gce.CloudDisk, error) { +func createRegionalDisk(ctx context.Context, cloudProvider gce.GCECompute, name string, zones []string, params common.DiskParameters, capacityRange *csi.CapacityRange, capBytes int64, snapshotID string) (*gce.CloudDisk, error) { region, err := common.GetRegionFromZones(zones) if err != nil { return nil, fmt.Errorf("failed to get region from zones: %v", err) @@ -1029,7 +983,7 @@ func createRegionalDisk(ctx context.Context, cloudProvider gce.GCECompute, name fullyQualifiedReplicaZones, cloudProvider.GetReplicaZoneURI(replicaZone)) } - err = cloudProvider.InsertDisk(ctx, meta.RegionalKey(name, region), diskType, capBytes, capacityRange, fullyQualifiedReplicaZones, snapshotID, diskEncryptionKmsKey) + err = cloudProvider.InsertDisk(ctx, meta.RegionalKey(name, region), params, capBytes, capacityRange, fullyQualifiedReplicaZones, snapshotID) if err != nil { return nil, fmt.Errorf("failed to insert regional disk: %v", err) } @@ -1041,12 +995,12 @@ func createRegionalDisk(ctx context.Context, cloudProvider gce.GCECompute, name return disk, nil } -func createSingleZoneDisk(ctx context.Context, cloudProvider gce.GCECompute, name string, zones []string, diskType string, capacityRange *csi.CapacityRange, capBytes int64, snapshotID, diskEncryptionKmsKey string) (*gce.CloudDisk, error) { +func createSingleZoneDisk(ctx context.Context, cloudProvider gce.GCECompute, name string, zones []string, params common.DiskParameters, capacityRange *csi.CapacityRange, capBytes int64, snapshotID string) (*gce.CloudDisk, error) { if len(zones) != 1 { return nil, fmt.Errorf("got wrong number of zones for zonal create volume: %v", len(zones)) } diskZone := zones[0] - err := cloudProvider.InsertDisk(ctx, meta.ZonalKey(name, diskZone), diskType, capBytes, capacityRange, nil, snapshotID, diskEncryptionKmsKey) + err := cloudProvider.InsertDisk(ctx, meta.ZonalKey(name, diskZone), params, capBytes, capacityRange, nil, snapshotID) if err != nil { return nil, fmt.Errorf("failed to insert zonal disk: %v", err) }