Skip to content

Commit 4272b5a

Browse files
committed
Allow to set GracefulShutdownTimeout to -1, disabling timeouts
Signed-off-by: Vince Prignano <[email protected]>
1 parent 6adc01f commit 4272b5a

File tree

2 files changed

+50
-1
lines changed

2 files changed

+50
-1
lines changed

pkg/manager/internal.go

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -509,7 +509,12 @@ func (cm *controllerManager) engageStopProcedure(stopComplete <-chan struct{}) e
509509
//
510510
// The shutdown context immediately expires if the gracefulShutdownTimeout is not set.
511511
var shutdownCancel context.CancelFunc
512-
cm.shutdownCtx, shutdownCancel = context.WithTimeout(context.Background(), cm.gracefulShutdownTimeout)
512+
if cm.gracefulShutdownTimeout < 0 {
513+
// We want to wait forever for the runnables to stop.
514+
cm.shutdownCtx, shutdownCancel = context.WithCancel(context.Background())
515+
} else {
516+
cm.shutdownCtx, shutdownCancel = context.WithTimeout(context.Background(), cm.gracefulShutdownTimeout)
517+
}
513518
defer shutdownCancel()
514519

515520
// Start draining the errors before acquiring the lock to make sure we don't deadlock

pkg/manager/manager_test.go

Lines changed: 44 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -936,6 +936,50 @@ var _ = Describe("manger.Manager", func() {
936936
<-runnableStopped
937937
})
938938

939+
It("should wait forever for runnables if gracefulShutdownTimeout is <0 (-1)", func() {
940+
m, err := New(cfg, options)
941+
Expect(err).NotTo(HaveOccurred())
942+
for _, cb := range callbacks {
943+
cb(m)
944+
}
945+
m.(*controllerManager).gracefulShutdownTimeout = time.Duration(-1)
946+
947+
Expect(m.Add(RunnableFunc(func(ctx context.Context) error {
948+
<-ctx.Done()
949+
time.Sleep(100 * time.Millisecond)
950+
return nil
951+
}))).ToNot(HaveOccurred())
952+
Expect(m.Add(RunnableFunc(func(ctx context.Context) error {
953+
<-ctx.Done()
954+
time.Sleep(200 * time.Millisecond)
955+
return nil
956+
}))).ToNot(HaveOccurred())
957+
Expect(m.Add(RunnableFunc(func(ctx context.Context) error {
958+
<-ctx.Done()
959+
time.Sleep(500 * time.Millisecond)
960+
return nil
961+
}))).ToNot(HaveOccurred())
962+
Expect(m.Add(RunnableFunc(func(ctx context.Context) error {
963+
<-ctx.Done()
964+
time.Sleep(1500 * time.Millisecond)
965+
return nil
966+
}))).ToNot(HaveOccurred())
967+
968+
ctx, cancel := context.WithCancel(context.Background())
969+
managerStopDone := make(chan struct{})
970+
go func() {
971+
defer GinkgoRecover()
972+
Expect(m.Start(ctx)).NotTo(HaveOccurred())
973+
close(managerStopDone)
974+
}()
975+
<-m.Elected()
976+
cancel()
977+
978+
beforeDone := time.Now()
979+
<-managerStopDone
980+
Expect(time.Since(beforeDone)).To(BeNumerically(">=", 1500*time.Millisecond))
981+
})
982+
939983
}
940984

941985
Context("with defaults", func() {

0 commit comments

Comments
 (0)