Skip to content
21 changes: 17 additions & 4 deletions pkg/manager/runnable_group.go
Original file line number Diff line number Diff line change
Expand Up @@ -167,8 +167,6 @@ func (r *runnableGroup) Start(ctx context.Context) error {
var retErr error

r.startOnce.Do(func() {
defer close(r.startReadyCh)

// Start the internal reconciler.
go r.reconcile()

Expand All @@ -194,6 +192,12 @@ func (r *runnableGroup) Start(ctx context.Context) error {
if err := ctx.Err(); !errors.Is(err, context.Canceled) {
retErr = err
}
return
Comment thread
sbueringer marked this conversation as resolved.
case <-r.ctx.Done():
// The group's internal context was cancelled (by StopAndWait).
// This unblocks readiness waiting when senders have exited via
// r.ctx.Done() and will no longer send to startReadyCh.
return
case rn := <-r.startReadyCh:
for i, existing := range r.startQueue {
if existing == rn {
Expand All @@ -202,8 +206,10 @@ func (r *runnableGroup) Start(ctx context.Context) error {
break
}
}
// We're done waiting if the queue is empty, return.
// We're done waiting if the queue is empty.
// All senders have already sent, so it is safe to close.
if len(r.startQueue) == 0 {
close(r.startReadyCh)
return
}
}
Expand Down Expand Up @@ -245,7 +251,14 @@ func (r *runnableGroup) reconcile() {
go func() {
if rn.Check(r.ctx) {
if rn.signalReady {
r.startReadyCh <- rn
// Use select to avoid sending after Start() has returned.
// When ctx is cancelled, Start() exits and is no longer
// receiving from startReadyCh. Without this select, we'd
// either block forever or panic if the channel were closed.
select {
case <-r.ctx.Done():
Copy link
Copy Markdown
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think this has to be the first case otherwise this will still send always on startReadyCh

case r.startReadyCh <- rn:
}
}
}
}()
Expand Down
48 changes: 48 additions & 0 deletions pkg/manager/runnable_group_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -363,6 +363,54 @@ func (r leaderElectionAndWarmupRunnable) NeedLeaderElection() bool {
return r.needLeaderElection
}

// TestStartReturnsWhenContextCancelledWithPendingReadinessCheck verifies that
// runnableGroup.Start() returns promptly when the context is cancelled, even if
Copy link
Copy Markdown
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Looks like there's an issue, not sure if in prod or test code

WARNING: DATA RACE
Write at 0x00c0006184e0 by goroutine 191:
  runtime.closechan()
      /home/prow/go/pkg/mod/golang.org/toolchain@v0.0.1-go1.25.0.linux-amd64/src/runtime/chan.go:414 +0x0
  sigs.k8s.io/controller-runtime/pkg/manager.(*runnableGroup).Start.func1.deferwrap1()
      /home/prow/go/src/sigs.k8s.io/controller-runtime/pkg/manager/runnable_group.go:170 +0x33
  runtime.deferreturn()
      /home/prow/go/pkg/mod/golang.org/toolchain@v0.0.1-go1.25.0.linux-amd64/src/runtime/panic.go:589 +0x5d
  sync.(*Once).doSlow()
      /home/prow/go/pkg/mod/golang.org/toolchain@v0.0.1-go1.25.0.linux-amd64/src/sync/once.go:78 +0xd1
  sync.(*Once).Do()
      /home/prow/go/pkg/mod/golang.org/toolchain@v0.0.1-go1.25.0.linux-amd64/src/sync/once.go:69 +0x44
  sigs.k8s.io/controller-runtime/pkg/manager.(*runnableGroup).Start()
      /home/prow/go/src/sigs.k8s.io/controller-runtime/pkg/manager/runnable_group.go:169 +0xb5
  sigs.k8s.io/controller-runtime/pkg/manager.init.func5.3.1()
      /home/prow/go/src/sigs.k8s.io/controller-runtime/pkg/manager/runnable_group_test.go:181 +0xca

Previous read at 0x00c0006184e0 by goroutine 238:
  runtime.chansend()
      /home/prow/go/pkg/mod/golang.org/toolchain@v0.0.1-go1.25.0.linux-amd64/src/runtime/chan.go:176 +0x0
  sigs.k8s.io/controller-runtime/pkg/manager.(*runnableGroup).reconcile.func1.1()
      /home/prow/go/src/sigs.k8s.io/controller-runtime/pkg/manager/runnable_group.go:249 +0xc4

Goroutine 191 (running) created at:
  sigs.k8s.io/controller-runtime/pkg/manager.init.func5.3()
      /home/prow/go/src/sigs.k8s.io/controller-runtime/pkg/manager/runnable_group_test.go:178 +0xf9
  github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func3()
      /home/prow/go/pkg/mod/github.com/onsi/ginkgo/v2@v2.27.2/internal/suite.go:942 +0x6ed

Goroutine 238 (running) created at:
  sigs.k8s.io/controller-runtime/pkg/manager.(*runnableGroup).reconcile.func1()
      /home/prow/go/src/sigs.k8s.io/controller-runtime/pkg/manager/runnable_group.go:246 +0xfc
  sigs.k8s.io/controller-runtime/pkg/manager.(*runnableGroup).reconcile.gowrap1()
      /home/prow/go/src/sigs.k8s.io/controller-runtime/pkg/manager/runnable_group.go:284 +0x41
==================
panic: send on closed channel

goroutine 281 [running]:
sigs.k8s.io/controller-runtime/pkg/manager.(*runnableGroup).reconcile.func1.1()
	/home/prow/go/src/sigs.k8s.io/controller-runtime/pkg/manager/runnable_group.go:249 +0xc5
created by sigs.k8s.io/controller-runtime/pkg/manager.(*runnableGroup).reconcile.func1 in goroutine 279
	/home/prow/go/src/sigs.k8s.io/controller-runtime/pkg/manager/runnable_group.go:246 +0xfd
FAIL	sigs.k8s.io/controller-runtime/pkg/manager	5.433s

https://prow.k8s.io/view/gs/kubernetes-ci-logs/pr-logs/pull/kubernetes-sigs_controller-runtime/3440/pull-controller-runtime-test/2018359601396715520 (see https://storage.googleapis.com/kubernetes-ci-logs/pr-logs/pull/kubernetes-sigs_controller-runtime/3440/pull-controller-runtime-test/2018359601396715520/build-log.txt)

Copy link
Copy Markdown
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

hi @sbueringer Thanks for the clarification request, this turned out to be a real concurrency issue in runnableGroup, not just a test artifact.

Start() was closing startReadyCh while readiness goroutines could still send on it, which caused a data race and send on closed channel panic depending on timing. Tests just made this easier to reproduce.

Removing the channel close exposed a second shutdown issue: Start() could block waiting for readiness when StopAndWait() cancelled the group context and all senders had already exited.

The fix now handles both sides explicitly: senders stop on r.ctx.Done(), and Start() exits readiness waiting on either ctx.Done() or r.ctx.Done(). This avoids races and deadlocks while preserving normal startup behavior.

Unit tests and go test -race pass locally; envtest-based integration tests should be covered by CI.

// some runnables have not signaled readiness. This is a regression test for a bug
// where Start() would spin indefinitely on ctx.Done() without returning.
func TestStartReturnsWhenContextCancelledWithPendingReadinessCheck(t *testing.T) {
t.Parallel()

g := NewWithT(t)
rg := newRunnableGroup(defaultBaseContext, make(chan error, 1))

// Add a runnable with a readiness check that never completes.
// This simulates a cache sync that hangs or fails.
g.Expect(rg.Add(
RunnableFunc(func(c context.Context) error {
<-c.Done()
return nil
}),
func(ctx context.Context) bool {
// Block forever - never signal ready
<-ctx.Done()
return false
},
)).To(Succeed())

ctx, cancel := context.WithCancel(t.Context())

// Start the group in a goroutine
startReturned := make(chan struct{})
go func() {
defer close(startReturned)
_ = rg.Start(ctx)
}()

// Cancel the context
cancel()

// Start() should return promptly after context cancellation, not hang or spin
select {
case <-startReturned:
// Success: Start() returned after context cancellation
case <-time.After(5 * time.Second):
t.Fatal("Start() did not return after context was cancelled - likely spinning on ctx.Done()")
}

// Cleanup
rg.StopAndWait(t.Context())
}

func TestWarmupFunctionIsExecutedWhenWarmupGroupIsStarted(t *testing.T) {
t.Parallel()
synctest.Test(t, func(t *testing.T) {
Expand Down
Loading