Skip to content

Commit 1009e57

Browse files
authored
Merge pull request #495 from ialidzhikov/doc/snapshot-example
Update snapshot guide
2 parents 2cd5cf5 + 064bbaa commit 1009e57

File tree

8 files changed

+104
-90
lines changed

8 files changed

+104
-90
lines changed

Makefile

+2-2
Original file line numberDiff line numberDiff line change
@@ -41,15 +41,15 @@ endif
4141

4242
build-and-push-windows-container-ltsc2019:
4343
ifndef GCE_PD_CSI_STAGING_IMAGE
44-
$(error "Must set enviroment variable GCE_PD_CSI_STAGING_IMAGE to staging image repository")
44+
$(error "Must set environment variable GCE_PD_CSI_STAGING_IMAGE to staging image repository")
4545
endif
4646
@sh init-buildx.sh; \
4747
DOCKER_CLI_EXPERIMENTAL=enabled docker buildx build --file=Dockerfile.Windows --platform=windows \
4848
-t $(STAGINGIMAGE):$(STAGINGVERSION) --build-arg BASE_IMAGE=servercore --build-arg BASE_IMAGE_TAG=ltsc2019 --push .
4949

5050
build-and-push-windows-container-1909:
5151
ifndef GCE_PD_CSI_STAGING_IMAGE
52-
$(error "Must set enviroment variable GCE_PD_CSI_STAGING_IMAGE to staging image repository")
52+
$(error "Must set environment variable GCE_PD_CSI_STAGING_IMAGE to staging image repository")
5353
endif
5454
@sh init-buildx.sh; \
5555
DOCKER_CLI_EXPERIMENTAL=enabled docker buildx build --file=Dockerfile.Windows --platform=windows \

deploy/kubernetes/overlays/alpha/rbac_add_snapshotter.yaml

+1-1
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,7 @@ rules:
1616
- apiGroups: [""]
1717
resources: ["events"]
1818
verbs: ["list", "watch", "create", "update", "patch"]
19-
# Secrets resource ommitted since GCE PD snapshots does not require them
19+
# Secrets resource omitted since GCE PD snapshots does not require them
2020
- apiGroups: ["snapshot.storage.k8s.io"]
2121
resources: ["volumesnapshotclasses"]
2222
verbs: ["get", "list", "watch"]

docs/kubernetes/user-guides/snapshots.md

+90-75
Original file line numberDiff line numberDiff line change
@@ -6,106 +6,121 @@
66

77
1. [One-time per project] Create GCP service account for the CSI driver and set required roles
88

9-
```
10-
$ PROJECT=your-project-here # GCP project
11-
$ GCE_PD_SA_NAME=my-gce-pd-csi-sa # Name of the service account to create
12-
$ GCE_PD_SA_DIR=/my/safe/credentials/directory # Directory to save the service account key
13-
$ ./deploy/setup-project.sh
14-
```
9+
```
10+
PROJECT=your-project-here # GCP project
11+
GCE_PD_SA_NAME=my-gce-pd-csi-sa # Name of the service account to create
12+
GCE_PD_SA_DIR=/my/safe/credentials/directory # Directory to save the service account key
13+
./deploy/setup-project.sh
14+
```
1515
16-
2. Deploy driver to Kubernetes Cluster
16+
1. Deploy driver to Kubernetes Cluster
1717
18-
```
19-
$ GCE_PD_SA_DIR=/my/safe/credentials/directory # Directory to get the service account key
20-
$ GCE_PD_DRIVER_VERSION=alpha # Driver version to deploy
21-
$ ./deploy/kubernetes/deploy-driver.sh
22-
```
18+
```
19+
GCE_PD_SA_DIR=/my/safe/credentials/directory # Directory to get the service account key
20+
GCE_PD_DRIVER_VERSION=alpha # Driver version to deploy
21+
./deploy/kubernetes/deploy-driver.sh
22+
```
2323
2424
### Snapshot Example
2525
26-
**Create Storage Class:**
26+
1. Create `StorageClass`
2727
28-
If you haven't created a `StorageClass` yet, create one first:
28+
If you haven't created a `StorageClass` yet, create one first:
2929
30-
```console
31-
kubectl apply -f ./examples/kubernetes/demo-zonal-sc.yaml
32-
```
30+
```console
31+
kubectl apply -f ./examples/kubernetes/demo-zonal-sc.yaml
32+
```
3333
34-
**Create Default VolumeSnapshotClass:**
34+
1. Create default `VolumeSnapshotClass`
3535
36-
```console
37-
kubectl create -f ./examples/kubernetes/snapshot/default_volumesnapshotclass.yaml
38-
```
36+
```console
37+
kubectl create -f ./examples/kubernetes/snapshot/default-volumesnapshotclass.yaml
38+
```
3939
40-
**Create source PVC:**
40+
1. Create source PVC
4141
42-
```console
43-
kubectl create -f ./examples/kubernetes/snapshot/source_pvc.yaml
44-
```
42+
```console
43+
kubectl create -f ./examples/kubernetes/snapshot/source-pvc.yaml
44+
```
4545
46-
**Generate sample data:**
46+
1. Generate sample data
4747
48-
Create a sample pod with the source PVC. The source PVC is mounted into `/demo/data` directory of this pod. This pod will create a file `sample-file.txt` in `/demo/data` directory.
48+
Create a sample pod with the source PVC. The source PVC is mounted into `/demo/data` directory of this pod. This pod will create a file `sample-file.txt` in `/demo/data` directory.
4949
50-
```console
51-
kubectl create -f ./examples/kubernetes/snapshot/source_pod.yaml
52-
```
50+
```console
51+
kubectl create -f ./examples/kubernetes/snapshot/source-pod.yaml
52+
```
5353
54-
Check if the file has been created successfully:
54+
Check if the file has been created successfully:
5555
56-
```console
57-
$ kubectl exec source-pod -- ls /demo/data/
58-
lost+found
59-
sample-file.txt
60-
```
56+
```console
57+
kubectl exec source-pod -- ls /demo/data/
58+
```
6159
62-
**Create a snapshot of the source PVC:**
60+
The output should be:
6361
64-
```console
65-
kubectl create -f ./examples/kubernetes/snapshot/snapshot.yaml
66-
```
62+
```
63+
lost+found
64+
sample-file.txt
65+
```
6766
68-
**Verify Snapshot has been created and it is ready to use:**
67+
1. Create a `VolumeSnapshot` of the source PVC
6968
70-
```console
71-
$ kubectl get volumesnapshot snapshot-source-pvc -o yaml
72-
apiVersion: snapshot.storage.k8s.io/v1alpha1
73-
kind: VolumeSnapshot
74-
metadata:
75-
...
76-
name: snapshot-source-pvc
77-
namespace: default
78-
...
79-
spec:
80-
snapshotClassName: default-snapshot-class
81-
snapshotContentName: snapcontent-b408076b-720b-11e9-b9e3-42010a800014
82-
...
83-
status:
84-
creationTime: "2019-05-09T03:37:01Z"
85-
readyToUse: true
86-
restoreSize: 6Gi
87-
```
69+
```console
70+
kubectl create -f ./examples/kubernetes/snapshot/snapshot.yaml
71+
```
8872
89-
**Restore the Snapshot into a new PVC:**
73+
1. Verify that `VolumeSnapshot` has been created and it is ready to use:
9074
91-
Create a new PVC. Specify `spec.dataSource` section to restore from VolumeSnapshot `snapshot-source-pvc`.
75+
```console
76+
kubectl get volumesnapshot snapshot-source-pvc -o yaml
77+
```
9278
93-
```console
94-
kubectl create -f ./examples/kubernetes/snapshot/restored_pvc.yaml
95-
```
79+
The output is similar to this:
9680
97-
**Verify sample data has been restored:**
81+
```yaml
82+
apiVersion: snapshot.storage.k8s.io/v1alpha1
83+
kind: VolumeSnapshot
84+
metadata:
85+
...
86+
name: snapshot-source-pvc
87+
namespace: default
88+
...
89+
spec:
90+
snapshotClassName: default-snapshot-class
91+
snapshotContentName: snapcontent-b408076b-720b-11e9-b9e3-42010a800014
92+
...
93+
status:
94+
creationTime: "2019-05-09T03:37:01Z"
95+
readyToUse: true
96+
restoreSize: 6Gi
97+
```
9898
99-
Create a sample pod with the restored PVC:
99+
1. Restore the `VolumeSnapshot` into a new PVC:
100100
101-
```console
102-
kubectl create -f ./examples/kubernetes/snapshot/restored_pod.yaml
103-
```
101+
Create a new PVC. Specify `spec.dataSource` section to restore from VolumeSnapshot `snapshot-source-pvc`.
104102
105-
Check data has been restored in `/demo/data` directory:
103+
```console
104+
kubectl create -f ./examples/kubernetes/snapshot/restored-pvc.yaml
105+
```
106106
107-
```console
108-
$ kubectl exec restored-pod -- ls /demo/data/
109-
lost+found
110-
sample-file.txt
111-
```
107+
1. Verify sample data has been restored:
108+
109+
Create a sample pod with the restored PVC:
110+
111+
```console
112+
kubectl create -f ./examples/kubernetes/snapshot/restored-pod.yaml
113+
```
114+
115+
Check data has been restored in `/demo/data` directory:
116+
117+
```console
118+
kubectl exec restored-pod -- ls /demo/data/
119+
```
120+
121+
Verify that the output is:
122+
123+
```
124+
lost+found
125+
sample-file.txt
126+
```

examples/kubernetes/snapshot/default-volumesnapshotclass.yaml

-1
Original file line numberDiff line numberDiff line change
@@ -5,4 +5,3 @@ metadata:
55
snapshot.storage.kubernetes.io/is-default-class: "true"
66
name: default-snapshot-class
77
snapshotter: pd.csi.storage.gke.io
8-

pkg/gce-cloud-provider/compute/gce-compute.go

+4-4
Original file line numberDiff line numberDiff line change
@@ -310,7 +310,7 @@ func (cloud *CloudProvider) insertRegionalDisk(ctx context.Context, volKey *meta
310310
klog.Warningf("GCE PD %s already exists, reusing", volKey.Name)
311311
return nil
312312
}
313-
return status.Error(codes.Internal, fmt.Sprintf("unkown Insert disk error: %v", err))
313+
return status.Error(codes.Internal, fmt.Sprintf("unknown Insert disk error: %v", err))
314314
}
315315

316316
err = cloud.waitForRegionalOp(ctx, insertOp, volKey.Region)
@@ -329,7 +329,7 @@ func (cloud *CloudProvider) insertRegionalDisk(ctx context.Context, volKey *meta
329329
klog.Warningf("GCE PD %s already exists after wait, reusing", volKey.Name)
330330
return nil
331331
}
332-
return fmt.Errorf("unkown Insert disk operation error: %v", err)
332+
return fmt.Errorf("unknown Insert disk operation error: %v", err)
333333
}
334334
return nil
335335
}
@@ -369,7 +369,7 @@ func (cloud *CloudProvider) insertZonalDisk(ctx context.Context, volKey *meta.Ke
369369
klog.Warningf("GCE PD %s already exists, reusing", volKey.Name)
370370
return nil
371371
}
372-
return fmt.Errorf("unkown Insert disk error: %v", err)
372+
return fmt.Errorf("unknown Insert disk error: %v", err)
373373
}
374374

375375
err = cloud.waitForZonalOp(ctx, op, volKey.Zone)
@@ -389,7 +389,7 @@ func (cloud *CloudProvider) insertZonalDisk(ctx context.Context, volKey *meta.Ke
389389
klog.Warningf("GCE PD %s already exists after wait, reusing", volKey.Name)
390390
return nil
391391
}
392-
return fmt.Errorf("unkown Insert disk operation error: %v", err)
392+
return fmt.Errorf("unknown Insert disk operation error: %v", err)
393393
}
394394
return nil
395395
}

pkg/gce-pd-csi-driver/node.go

+5-5
Original file line numberDiff line numberDiff line change
@@ -232,7 +232,7 @@ func (ns *GCENodeServer) NodeUnpublishVolume(ctx context.Context, req *csi.NodeU
232232
if err := cleanupPublishPath(targetPath, ns.Mounter); err != nil {
233233
return nil, status.Error(codes.Internal, fmt.Sprintf("Unmount failed: %v\nUnmounting arguments: %s\n", err, targetPath))
234234
}
235-
klog.V(4).Infof("NodeUnpublishVolume succeded on %v from %s", volumeID, targetPath)
235+
klog.V(4).Infof("NodeUnpublishVolume succeeded on %v from %s", volumeID, targetPath)
236236
return &csi.NodeUnpublishVolumeResponse{}, nil
237237
}
238238

@@ -294,7 +294,7 @@ func (ns *GCENodeServer) NodeStageVolume(ctx context.Context, req *csi.NodeStage
294294
3) Readonly MUST match
295295
296296
*/
297-
klog.V(4).Infof("NodeStageVolume succeded on %v to %s, mount already exists.", volumeID, stagingTargetPath)
297+
klog.V(4).Infof("NodeStageVolume succeeded on %v to %s, mount already exists.", volumeID, stagingTargetPath)
298298
return &csi.NodeStageVolumeResponse{}, nil
299299

300300
}
@@ -315,7 +315,7 @@ func (ns *GCENodeServer) NodeStageVolume(ctx context.Context, req *csi.NodeStage
315315
}
316316
} else if blk := volumeCapability.GetBlock(); blk != nil {
317317
// Noop for Block NodeStageVolume
318-
klog.V(4).Infof("NodeStageVolume succeded on %v to %s, capability is block so this is a no-op", volumeID, stagingTargetPath)
318+
klog.V(4).Infof("NodeStageVolume succeeded on %v to %s, capability is block so this is a no-op", volumeID, stagingTargetPath)
319319
return &csi.NodeStageVolumeResponse{}, nil
320320
}
321321

@@ -326,7 +326,7 @@ func (ns *GCENodeServer) NodeStageVolume(ctx context.Context, req *csi.NodeStage
326326
devicePath, stagingTargetPath, fstype, options, err))
327327
}
328328

329-
klog.V(4).Infof("NodeStageVolume succeded on %v to %s", volumeID, stagingTargetPath)
329+
klog.V(4).Infof("NodeStageVolume succeeded on %v to %s", volumeID, stagingTargetPath)
330330
return &csi.NodeStageVolumeResponse{}, nil
331331
}
332332

@@ -350,7 +350,7 @@ func (ns *GCENodeServer) NodeUnstageVolume(ctx context.Context, req *csi.NodeUns
350350
return nil, status.Error(codes.Internal, fmt.Sprintf("NodeUnstageVolume failed: %v\nUnmounting arguments: %s\n", err, stagingTargetPath))
351351
}
352352

353-
klog.V(4).Infof("NodeUnstageVolume succeded on %v from %s", volumeID, stagingTargetPath)
353+
klog.V(4).Infof("NodeUnstageVolume succeeded on %v from %s", volumeID, stagingTargetPath)
354354
return &csi.NodeUnstageVolumeResponse{}, nil
355355
}
356356

pkg/mount-manager/device-utils.go

+1-1
Original file line numberDiff line numberDiff line change
@@ -191,7 +191,7 @@ func (m *deviceUtils) VerifyDevicePath(devicePaths []string, deviceName string)
191191
return false, fmt.Errorf("couldn't get SCSI serial number for disk %s: %v", deviceName, innerErr)
192192
}
193193
// SUCCESS! devicePath points to a /dev/sdx that has a SCSI serial
194-
// equivilant to our disk name
194+
// equivalent to our disk name
195195
if scsiSerial == deviceName {
196196
return true, nil
197197
}

test/k8s-integration/cluster.go

+1-1
Original file line numberDiff line numberDiff line change
@@ -271,7 +271,7 @@ func getNormalizedVersion(kubeVersion, gkeVersion string) (string, error) {
271271
return "", fmt.Errorf("both kube version (%s) and gke version (%s) specified", kubeVersion, gkeVersion)
272272
}
273273
if kubeVersion == "" && gkeVersion == "" {
274-
return "", errors.New("neither kube verison nor gke verison specified")
274+
return "", errors.New("neither kube version nor gke version specified")
275275
}
276276
var v string
277277
if kubeVersion != "" {

0 commit comments

Comments
 (0)