refactor: use single poller with semaphore-based capacity control

Previously, capacity=N spawned N independent polling goroutines, each
making FetchTask RPCs to the Gitea server concurrently. This caused
unnecessary connection load on the server proportional to the runner's
capacity setting.

Replace the N-goroutine model with a single polling loop that uses a
buffered channel as a semaphore to control concurrent task execution.
The poller acquires a capacity slot before fetching; when at capacity,
it blocks without issuing RPCs. Fetched tasks are dispatched to
independent goroutines that release their slot on completion.

Also fix a pre-existing bug in Shutdown() where the timeout branch
used a blocking receive on p.done instead of a non-blocking select,
which prevented shutdownJobs() from ever being called on timeout.

Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
This commit is contained in:
Bo-Yi Wu
2026-04-16 14:56:13 +08:00
parent 48944e136c
commit 7abef361b0
3 changed files with 85 additions and 71 deletions

View File

@@ -37,16 +37,15 @@ type Poller struct {
done chan struct{}
}
// workerState holds per-goroutine polling state. Backoff counters are
// per-worker so that with Capacity > 1, N workers each seeing one empty
// response don't combine into a "consecutive N empty" reading on a shared
// counter and trigger an unnecessarily long backoff.
// workerState holds the single poller's backoff state. Consecutive empty or
// error responses drive exponential backoff; a successful task fetch resets
// both counters so the next poll fires immediately.
type workerState struct {
consecutiveEmpty int64
consecutiveErrors int64
// lastBackoff is the last interval reported to the PollBackoffSeconds gauge
// from this worker; used to suppress redundant no-op Set calls when the
// backoff plateaus (e.g. at FetchIntervalMax).
// lastBackoff is the last interval reported to the PollBackoffSeconds gauge;
// used to suppress redundant no-op Set calls when the backoff plateaus
// (e.g. at FetchIntervalMax).
lastBackoff time.Duration
}
@@ -73,22 +72,57 @@ func New(cfg *config.Config, client client.Client, runner *run.Runner) *Poller {
}
func (p *Poller) Poll() {
sem := make(chan struct{}, p.cfg.Runner.Capacity)
wg := &sync.WaitGroup{}
for i := 0; i < p.cfg.Runner.Capacity; i++ {
wg.Add(1)
go p.poll(wg)
}
wg.Wait()
s := &workerState{}
// signal that we shutdown
close(p.done)
defer func() {
wg.Wait()
close(p.done)
}()
for {
select {
case sem <- struct{}{}:
case <-p.pollingCtx.Done():
return
}
task, ok := p.fetchTask(p.pollingCtx, s)
if !ok {
<-sem
if !p.waitBackoff(s) {
return
}
continue
}
s.resetBackoff()
wg.Add(1)
go func(t *runnerv1.Task) {
defer wg.Done()
defer func() { <-sem }()
p.runTaskWithRecover(p.jobsCtx, t)
}(task)
}
}
func (p *Poller) PollOnce() {
p.pollOnce(&workerState{})
// signal that we're done
close(p.done)
defer close(p.done)
s := &workerState{}
for {
task, ok := p.fetchTask(p.pollingCtx, s)
if !ok {
if !p.waitBackoff(s) {
return
}
continue
}
s.resetBackoff()
p.runTaskWithRecover(p.jobsCtx, task)
return
}
}
func (p *Poller) Shutdown(ctx context.Context) error {
@@ -101,13 +135,13 @@ func (p *Poller) Shutdown(ctx context.Context) error {
// our timeout for shutting down ran out
case <-ctx.Done():
// when both the timeout fires and the graceful shutdown
// completed succsfully, this branch of the select may
// fire. Do a non-blocking check here against the graceful
// shutdown status to avoid sending an error if we don't need to.
_, ok := <-p.done
if !ok {
// Both the timeout and the graceful shutdown may fire
// simultaneously. Do a non-blocking check to avoid forcing
// a shutdown when graceful already completed.
select {
case <-p.done:
return nil
default:
}
// force a shutdown of all running jobs
@@ -120,18 +154,27 @@ func (p *Poller) Shutdown(ctx context.Context) error {
}
}
func (p *Poller) poll(wg *sync.WaitGroup) {
defer wg.Done()
s := &workerState{}
for {
p.pollOnce(s)
func (s *workerState) resetBackoff() {
s.consecutiveEmpty = 0
s.consecutiveErrors = 0
s.lastBackoff = 0
}
select {
case <-p.pollingCtx.Done():
return
default:
continue
}
// waitBackoff sleeps for the current backoff interval (with jitter).
// Returns false if the polling context was cancelled during the wait.
func (p *Poller) waitBackoff(s *workerState) bool {
base := p.calculateInterval(s)
if base != s.lastBackoff {
metrics.PollBackoffSeconds.Set(base.Seconds())
s.lastBackoff = base
}
timer := time.NewTimer(addJitter(base))
select {
case <-timer.C:
return true
case <-p.pollingCtx.Done():
timer.Stop()
return false
}
}
@@ -167,34 +210,6 @@ func addJitter(d time.Duration) time.Duration {
return d + time.Duration(jitter)
}
func (p *Poller) pollOnce(s *workerState) {
for {
task, ok := p.fetchTask(p.pollingCtx, s)
if !ok {
base := p.calculateInterval(s)
if base != s.lastBackoff {
metrics.PollBackoffSeconds.Set(base.Seconds())
s.lastBackoff = base
}
timer := time.NewTimer(addJitter(base))
select {
case <-timer.C:
case <-p.pollingCtx.Done():
timer.Stop()
return
}
continue
}
// Got a task — reset backoff counters for fast subsequent polling.
s.consecutiveEmpty = 0
s.consecutiveErrors = 0
p.runTaskWithRecover(p.jobsCtx, task)
return
}
}
func (p *Poller) runTaskWithRecover(ctx context.Context, task *runnerv1.Task) {
defer func() {
if r := recover(); r != nil {