Skip to content

Commit e84da89

Browse files
authored
Merge pull request #1028 from mattcary/perdisk
backoff per {node,disk} pair instead of just node}
2 parents 6e3972c + 11611ed commit e84da89

File tree

3 files changed

+154
-76
lines changed

3 files changed

+154
-76
lines changed

pkg/gce-pd-csi-driver/controller.go

+70-12
Original file line numberDiff line numberDiff line change
@@ -39,8 +39,8 @@ import (
3939
)
4040

4141
const (
42-
nodeBackoffInitialDuration = 200 * time.Millisecond
43-
nodeBackoffMaxDuration = 5 * time.Minute
42+
errorBackoffInitialDuration = 200 * time.Millisecond
43+
errorBackoffMaxDuration = 5 * time.Minute
4444
)
4545

4646
type GCEControllerServer struct {
@@ -58,11 +58,46 @@ type GCEControllerServer struct {
5858
// Aborted error
5959
volumeLocks *common.VolumeLocks
6060

61-
// When the attacher sidecar issues controller publish/unpublish for multiple disks for a given node, the per-instance operation queue in GCE fills up causing attach/detach disk requests to immediately return with an error until the queue drains. nodeBackoff keeps track of any active backoff condition on a given node, and the time when retry of controller publish/unpublish is permissible. A node is marked with backoff when any error is encountered by the driver during controller publish/unpublish calls.
62-
// If the controller eventually allows controller publish/publish requests for volumes (because the backoff time expired), and those requests fail, the next backoff retry time will be updated on every failure and capped at 'nodeBackoffMaxDuration'. Also, any successful controller publish/unpublish call will clear the backoff condition for the node.
63-
nodeBackoff *flowcontrol.Backoff
61+
// There are several kinds of errors that are immediately retried by either
62+
// the CSI sidecars or the k8s control plane. The retries consume GCP api
63+
// quota, eg by doing ListVolumes, and so backoff needs to be used to
64+
// prevent quota exhaustion.
65+
//
66+
// Examples of these errors are the per-instance GCE operation queue getting
67+
// full (typically only 32 operations in flight at a time are allowed), and
68+
// disks being deleted out from under a PV causing unpublish errors.
69+
//
70+
// While we need to backoff, we also need some semblance of fairness. In
71+
// particular, volume unpublish retries happen very quickly, and with
72+
// a single backoff per node these retries can prevent any other operation
73+
// from making progess, even if it would succeed. Hence we track errors on
74+
// node and disk pairs, backing off only for calls matching such a
75+
// pair.
76+
//
77+
// An implication is that in the full operation queue situation, requests
78+
// for new disks will not backoff the first time. This is acceptible as a
79+
// single spurious call will not cause problems for quota exhaustion or make
80+
// the operation queue problem worse. This is well compensated by giving
81+
// disks where no problems are ocurring a chance to be processed.
82+
//
83+
// errorBackoff keeps track of any active backoff condition on a given node,
84+
// and the time when retry of controller publish/unpublish is permissible. A
85+
// node and disk pair is marked with backoff when any error is encountered
86+
// by the driver during controller publish/unpublish calls. If the
87+
// controller eventually allows controller publish/publish requests for
88+
// volumes (because the backoff time expired), and those requests fail, the
89+
// next backoff retry time will be updated on every failure and capped at
90+
// 'errorBackoffMaxDuration'. Also, any successful controller
91+
// publish/unpublish call will clear the backoff condition for a node and
92+
// disk.
93+
errorBackoff *csiErrorBackoff
6494
}
6595

96+
type csiErrorBackoff struct {
97+
backoff *flowcontrol.Backoff
98+
}
99+
type csiErrorBackoffId string
100+
66101
type workItem struct {
67102
ctx context.Context
68103
publishReq *csi.ControllerPublishVolumeRequest
@@ -376,17 +411,18 @@ func (gceCS *GCEControllerServer) ControllerPublishVolume(ctx context.Context, r
376411
return nil, err
377412
}
378413

379-
if gceCS.nodeBackoff.IsInBackOffSinceUpdate(req.NodeId, gceCS.nodeBackoff.Clock.Now()) {
414+
backoffId := gceCS.errorBackoff.backoffId(req.NodeId, req.VolumeId)
415+
if gceCS.errorBackoff.blocking(backoffId) {
380416
return nil, status.Errorf(codes.Unavailable, "ControllerPublish not permitted on node %q due to backoff condition", req.NodeId)
381417
}
382418

383419
resp, err := gceCS.executeControllerPublishVolume(ctx, req)
384420
if err != nil {
385-
klog.Infof("For node %s adding backoff due to error for volume %s", req.NodeId, req.VolumeId)
386-
gceCS.nodeBackoff.Next(req.NodeId, gceCS.nodeBackoff.Clock.Now())
421+
klog.Infof("For node %s adding backoff due to error for volume %s: %v", req.NodeId, req.VolumeId, err)
422+
gceCS.errorBackoff.next(backoffId)
387423
} else {
388424
klog.Infof("For node %s clear backoff due to successful publish of volume %v", req.NodeId, req.VolumeId)
389-
gceCS.nodeBackoff.Reset(req.NodeId)
425+
gceCS.errorBackoff.reset(backoffId)
390426
}
391427
return resp, err
392428
}
@@ -513,17 +549,18 @@ func (gceCS *GCEControllerServer) ControllerUnpublishVolume(ctx context.Context,
513549
return nil, err
514550
}
515551

516-
if gceCS.nodeBackoff.IsInBackOffSinceUpdate(req.NodeId, gceCS.nodeBackoff.Clock.Now()) {
552+
backoffId := gceCS.errorBackoff.backoffId(req.NodeId, req.VolumeId)
553+
if gceCS.errorBackoff.blocking(backoffId) {
517554
return nil, status.Errorf(codes.Unavailable, "ControllerUnpublish not permitted on node %q due to backoff condition", req.NodeId)
518555
}
519556

520557
resp, err := gceCS.executeControllerUnpublishVolume(ctx, req)
521558
if err != nil {
522559
klog.Infof("For node %s adding backoff due to error for volume %s", req.NodeId, req.VolumeId)
523-
gceCS.nodeBackoff.Next(req.NodeId, gceCS.nodeBackoff.Clock.Now())
560+
gceCS.errorBackoff.next(backoffId)
524561
} else {
525562
klog.Infof("For node %s clear backoff due to successful unpublish of volume %v", req.NodeId, req.VolumeId)
526-
gceCS.nodeBackoff.Reset(req.NodeId)
563+
gceCS.errorBackoff.reset(backoffId)
527564
}
528565
return resp, err
529566
}
@@ -1560,3 +1597,24 @@ func pickRandAndConsecutive(slice []string, n int) ([]string, error) {
15601597
}
15611598
return ret, nil
15621599
}
1600+
1601+
func newCsiErrorBackoff() *csiErrorBackoff {
1602+
return &csiErrorBackoff{flowcontrol.NewBackOff(errorBackoffInitialDuration, errorBackoffMaxDuration)}
1603+
}
1604+
1605+
func (_ *csiErrorBackoff) backoffId(nodeId, volumeId string) csiErrorBackoffId {
1606+
return csiErrorBackoffId(fmt.Sprintf("%s:%s", nodeId, volumeId))
1607+
}
1608+
1609+
func (b *csiErrorBackoff) blocking(id csiErrorBackoffId) bool {
1610+
blk := b.backoff.IsInBackOffSinceUpdate(string(id), b.backoff.Clock.Now())
1611+
return blk
1612+
}
1613+
1614+
func (b *csiErrorBackoff) next(id csiErrorBackoffId) {
1615+
b.backoff.Next(string(id), b.backoff.Clock.Now())
1616+
}
1617+
1618+
func (b *csiErrorBackoff) reset(id csiErrorBackoffId) {
1619+
b.backoff.Reset(string(id))
1620+
}

0 commit comments

Comments
 (0)