Skip to content

Commit 2ebab48

Browse files
authored
Merge pull request #2169 from vincepri/graceful-shutdown-1
🐛 Allow to set GracefulShutdownTimeout to -1, disabling timeouts
2 parents 2183c1e + 4272b5a commit 2ebab48

File tree

2 files changed

+50
-1
lines changed

2 files changed

+50
-1
lines changed

pkg/manager/internal.go

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -509,7 +509,12 @@ func (cm *controllerManager) engageStopProcedure(stopComplete <-chan struct{}) e
509509
//
510510
// The shutdown context immediately expires if the gracefulShutdownTimeout is not set.
511511
var shutdownCancel context.CancelFunc
512-
cm.shutdownCtx, shutdownCancel = context.WithTimeout(context.Background(), cm.gracefulShutdownTimeout)
512+
if cm.gracefulShutdownTimeout < 0 {
513+
// We want to wait forever for the runnables to stop.
514+
cm.shutdownCtx, shutdownCancel = context.WithCancel(context.Background())
515+
} else {
516+
cm.shutdownCtx, shutdownCancel = context.WithTimeout(context.Background(), cm.gracefulShutdownTimeout)
517+
}
513518
defer shutdownCancel()
514519

515520
// Start draining the errors before acquiring the lock to make sure we don't deadlock

pkg/manager/manager_test.go

Lines changed: 44 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1073,6 +1073,50 @@ var _ = Describe("manger.Manager", func() {
10731073
<-runnableStopped
10741074
})
10751075

1076+
It("should wait forever for runnables if gracefulShutdownTimeout is <0 (-1)", func() {
1077+
m, err := New(cfg, options)
1078+
Expect(err).NotTo(HaveOccurred())
1079+
for _, cb := range callbacks {
1080+
cb(m)
1081+
}
1082+
m.(*controllerManager).gracefulShutdownTimeout = time.Duration(-1)
1083+
1084+
Expect(m.Add(RunnableFunc(func(ctx context.Context) error {
1085+
<-ctx.Done()
1086+
time.Sleep(100 * time.Millisecond)
1087+
return nil
1088+
}))).ToNot(HaveOccurred())
1089+
Expect(m.Add(RunnableFunc(func(ctx context.Context) error {
1090+
<-ctx.Done()
1091+
time.Sleep(200 * time.Millisecond)
1092+
return nil
1093+
}))).ToNot(HaveOccurred())
1094+
Expect(m.Add(RunnableFunc(func(ctx context.Context) error {
1095+
<-ctx.Done()
1096+
time.Sleep(500 * time.Millisecond)
1097+
return nil
1098+
}))).ToNot(HaveOccurred())
1099+
Expect(m.Add(RunnableFunc(func(ctx context.Context) error {
1100+
<-ctx.Done()
1101+
time.Sleep(1500 * time.Millisecond)
1102+
return nil
1103+
}))).ToNot(HaveOccurred())
1104+
1105+
ctx, cancel := context.WithCancel(context.Background())
1106+
managerStopDone := make(chan struct{})
1107+
go func() {
1108+
defer GinkgoRecover()
1109+
Expect(m.Start(ctx)).NotTo(HaveOccurred())
1110+
close(managerStopDone)
1111+
}()
1112+
<-m.Elected()
1113+
cancel()
1114+
1115+
beforeDone := time.Now()
1116+
<-managerStopDone
1117+
Expect(time.Since(beforeDone)).To(BeNumerically(">=", 1500*time.Millisecond))
1118+
})
1119+
10761120
}
10771121

10781122
Context("with defaults", func() {

0 commit comments

Comments
 (0)