mirror of
https://gitea.com/gitea/act_runner.git
synced 2026-05-08 08:13:25 +02:00
- Replace fixed 1s RunDaemon timer with event-driven select loop using separate log (3s) and state (5s) tickers for periodic flush - Add batch-size threshold (default 100 rows) to flush logs immediately during bursty output like npm install - Add max-latency timer (default 5s) to guarantee single log lines are delivered within a bounded time - Trigger immediate flush on step transitions (start/stop) and job result for responsive frontend UX - Skip ReportLog when no pending rows and ReportState when state is unchanged to eliminate no-op HTTP requests - Replace fixed-rate polling with exponential backoff and jitter to prevent thundering herd on idle runners - Tune HTTP client with MaxIdleConnsPerHost=10 and share a single http.Client between Ping and Runner service clients - Add configurable options: log_report_interval, log_report_max_latency, log_report_batch_size, state_report_interval, fetch_interval_max Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
236 lines
5.4 KiB
Go
236 lines
5.4 KiB
Go
// Copyright 2023 The Gitea Authors. All rights reserved.
|
|
// SPDX-License-Identifier: MIT
|
|
|
|
package poll
|
|
|
|
import (
|
|
"context"
|
|
"errors"
|
|
"fmt"
|
|
"math/rand/v2"
|
|
"sync"
|
|
"sync/atomic"
|
|
"time"
|
|
|
|
runnerv1 "code.gitea.io/actions-proto-go/runner/v1"
|
|
"connectrpc.com/connect"
|
|
log "github.com/sirupsen/logrus"
|
|
|
|
"gitea.com/gitea/act_runner/internal/app/run"
|
|
"gitea.com/gitea/act_runner/internal/pkg/client"
|
|
"gitea.com/gitea/act_runner/internal/pkg/config"
|
|
)
|
|
|
|
type Poller struct {
|
|
client client.Client
|
|
runner *run.Runner
|
|
cfg *config.Config
|
|
tasksVersion atomic.Int64 // tasksVersion used to store the version of the last task fetched from the Gitea.
|
|
|
|
pollingCtx context.Context
|
|
shutdownPolling context.CancelFunc
|
|
|
|
jobsCtx context.Context
|
|
shutdownJobs context.CancelFunc
|
|
|
|
done chan struct{}
|
|
|
|
consecutiveEmpty atomic.Int64 // count of consecutive polls with no task available
|
|
consecutiveErrors atomic.Int64 // count of consecutive fetch errors
|
|
}
|
|
|
|
func New(cfg *config.Config, client client.Client, runner *run.Runner) *Poller {
|
|
pollingCtx, shutdownPolling := context.WithCancel(context.Background())
|
|
|
|
jobsCtx, shutdownJobs := context.WithCancel(context.Background())
|
|
|
|
done := make(chan struct{})
|
|
|
|
return &Poller{
|
|
client: client,
|
|
runner: runner,
|
|
cfg: cfg,
|
|
|
|
pollingCtx: pollingCtx,
|
|
shutdownPolling: shutdownPolling,
|
|
|
|
jobsCtx: jobsCtx,
|
|
shutdownJobs: shutdownJobs,
|
|
|
|
done: done,
|
|
}
|
|
}
|
|
|
|
func (p *Poller) Poll() {
|
|
wg := &sync.WaitGroup{}
|
|
for i := 0; i < p.cfg.Runner.Capacity; i++ {
|
|
wg.Add(1)
|
|
go p.poll(wg)
|
|
}
|
|
wg.Wait()
|
|
|
|
// signal that we shutdown
|
|
close(p.done)
|
|
}
|
|
|
|
func (p *Poller) PollOnce() {
|
|
p.pollOnce()
|
|
|
|
// signal that we're done
|
|
close(p.done)
|
|
}
|
|
|
|
func (p *Poller) Shutdown(ctx context.Context) error {
|
|
p.shutdownPolling()
|
|
|
|
select {
|
|
// graceful shutdown completed succesfully
|
|
case <-p.done:
|
|
return nil
|
|
|
|
// our timeout for shutting down ran out
|
|
case <-ctx.Done():
|
|
// when both the timeout fires and the graceful shutdown
|
|
// completed succsfully, this branch of the select may
|
|
// fire. Do a non-blocking check here against the graceful
|
|
// shutdown status to avoid sending an error if we don't need to.
|
|
_, ok := <-p.done
|
|
if !ok {
|
|
return nil
|
|
}
|
|
|
|
// force a shutdown of all running jobs
|
|
p.shutdownJobs()
|
|
|
|
// wait for running jobs to report their status to Gitea
|
|
<-p.done
|
|
|
|
return ctx.Err()
|
|
}
|
|
}
|
|
|
|
func (p *Poller) poll(wg *sync.WaitGroup) {
|
|
defer wg.Done()
|
|
for {
|
|
p.pollOnce()
|
|
|
|
select {
|
|
case <-p.pollingCtx.Done():
|
|
return
|
|
default:
|
|
continue
|
|
}
|
|
}
|
|
}
|
|
|
|
// calculateInterval returns the polling interval with exponential backoff based on
|
|
// consecutive empty or error responses. The interval starts at FetchInterval and
|
|
// doubles with each consecutive empty/error, capped at FetchIntervalMax.
|
|
func (p *Poller) calculateInterval() time.Duration {
|
|
base := p.cfg.Runner.FetchInterval
|
|
maxInterval := p.cfg.Runner.FetchIntervalMax
|
|
|
|
n := max(p.consecutiveEmpty.Load(), p.consecutiveErrors.Load())
|
|
if n <= 1 {
|
|
return base
|
|
}
|
|
|
|
// Capped exponential backoff: base * 2^(n-1), max shift=5 so multiplier <= 32
|
|
shift := min(n-1, 5)
|
|
interval := base * time.Duration(int64(1)<<shift)
|
|
return min(interval, maxInterval)
|
|
}
|
|
|
|
// addJitter adds +/- 20% random jitter to the given duration to avoid thundering herd.
|
|
func addJitter(d time.Duration) time.Duration {
|
|
if d <= 0 {
|
|
return d
|
|
}
|
|
// jitter range: [-20%, +20%] of d
|
|
jitterRange := int64(d) * 2 / 5 // 40% total range
|
|
if jitterRange <= 0 {
|
|
return d
|
|
}
|
|
jitter := rand.Int64N(jitterRange) - jitterRange/2
|
|
return d + time.Duration(jitter)
|
|
}
|
|
|
|
func (p *Poller) pollOnce() {
|
|
for {
|
|
interval := addJitter(p.calculateInterval())
|
|
timer := time.NewTimer(interval)
|
|
select {
|
|
case <-timer.C:
|
|
case <-p.pollingCtx.Done():
|
|
timer.Stop()
|
|
return
|
|
}
|
|
|
|
task, ok := p.fetchTask(p.pollingCtx)
|
|
if !ok {
|
|
continue
|
|
}
|
|
|
|
// Got a task — reset backoff counters for fast subsequent polling.
|
|
p.consecutiveEmpty.Store(0)
|
|
p.consecutiveErrors.Store(0)
|
|
|
|
p.runTaskWithRecover(p.jobsCtx, task)
|
|
return
|
|
}
|
|
}
|
|
|
|
func (p *Poller) runTaskWithRecover(ctx context.Context, task *runnerv1.Task) {
|
|
defer func() {
|
|
if r := recover(); r != nil {
|
|
err := fmt.Errorf("panic: %v", r)
|
|
log.WithError(err).Error("panic in runTaskWithRecover")
|
|
}
|
|
}()
|
|
|
|
if err := p.runner.Run(ctx, task); err != nil {
|
|
log.WithError(err).Error("failed to run task")
|
|
}
|
|
}
|
|
|
|
func (p *Poller) fetchTask(ctx context.Context) (*runnerv1.Task, bool) {
|
|
reqCtx, cancel := context.WithTimeout(ctx, p.cfg.Runner.FetchTimeout)
|
|
defer cancel()
|
|
|
|
// Load the version value that was in the cache when the request was sent.
|
|
v := p.tasksVersion.Load()
|
|
resp, err := p.client.FetchTask(reqCtx, connect.NewRequest(&runnerv1.FetchTaskRequest{
|
|
TasksVersion: v,
|
|
}))
|
|
if errors.Is(err, context.DeadlineExceeded) {
|
|
err = nil
|
|
}
|
|
if err != nil {
|
|
log.WithError(err).Error("failed to fetch task")
|
|
p.consecutiveErrors.Add(1)
|
|
return nil, false
|
|
}
|
|
|
|
// Successful response — reset error counter.
|
|
p.consecutiveErrors.Store(0)
|
|
|
|
if resp == nil || resp.Msg == nil {
|
|
p.consecutiveEmpty.Add(1)
|
|
return nil, false
|
|
}
|
|
|
|
if resp.Msg.TasksVersion > v {
|
|
p.tasksVersion.CompareAndSwap(v, resp.Msg.TasksVersion)
|
|
}
|
|
|
|
if resp.Msg.Task == nil {
|
|
p.consecutiveEmpty.Add(1)
|
|
return nil, false
|
|
}
|
|
|
|
// got a task, set `tasksVersion` to zero to focre query db in next request.
|
|
p.tasksVersion.CompareAndSwap(resp.Msg.TasksVersion, 0)
|
|
|
|
return resp.Msg.Task, true
|
|
}
|