mirror of
https://gitea.com/gitea/act_runner.git
synced 2026-05-08 00:03:24 +02:00
Previously, capacity=N spawned N independent polling goroutines, each making FetchTask RPCs to the Gitea server concurrently. This caused unnecessary connection load on the server proportional to the runner's capacity setting. Replace the N-goroutine model with a single polling loop that uses a buffered channel as a semaphore to control concurrent task execution. The poller acquires a capacity slot before fetching; when at capacity, it blocks without issuing RPCs. Fetched tasks are dispatched to independent goroutines that release their slot on completion. Also fix a pre-existing bug in Shutdown() where the timeout branch used a blocking receive on p.done instead of a non-blocking select, which prevented shutdownJobs() from ever being called on timeout. Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
108 lines
3.6 KiB
Go
108 lines
3.6 KiB
Go
// Copyright 2026 The Gitea Authors. All rights reserved.
|
|
// SPDX-License-Identifier: MIT
|
|
|
|
package poll
|
|
|
|
import (
|
|
"context"
|
|
"errors"
|
|
"testing"
|
|
"time"
|
|
|
|
"gitea.com/gitea/act_runner/internal/pkg/client/mocks"
|
|
"gitea.com/gitea/act_runner/internal/pkg/config"
|
|
|
|
runnerv1 "code.gitea.io/actions-proto-go/runner/v1"
|
|
connect_go "connectrpc.com/connect"
|
|
"github.com/stretchr/testify/assert"
|
|
"github.com/stretchr/testify/mock"
|
|
"github.com/stretchr/testify/require"
|
|
)
|
|
|
|
// TestPoller_WorkerStateCounters verifies that workerState correctly tracks
|
|
// consecutive empty responses independently per state instance, and that
|
|
// fetchTask increments only the relevant counter.
|
|
func TestPoller_WorkerStateCounters(t *testing.T) {
|
|
client := mocks.NewClient(t)
|
|
client.On("FetchTask", mock.Anything, mock.Anything).Return(
|
|
func(_ context.Context, _ *connect_go.Request[runnerv1.FetchTaskRequest]) (*connect_go.Response[runnerv1.FetchTaskResponse], error) {
|
|
// Always return an empty response.
|
|
return connect_go.NewResponse(&runnerv1.FetchTaskResponse{}), nil
|
|
},
|
|
)
|
|
|
|
cfg, err := config.LoadDefault("")
|
|
require.NoError(t, err)
|
|
p := &Poller{client: client, cfg: cfg}
|
|
|
|
ctx := context.Background()
|
|
s1 := &workerState{}
|
|
s2 := &workerState{}
|
|
|
|
// Each worker independently observes one empty response.
|
|
_, ok := p.fetchTask(ctx, s1)
|
|
require.False(t, ok)
|
|
_, ok = p.fetchTask(ctx, s2)
|
|
require.False(t, ok)
|
|
|
|
assert.Equal(t, int64(1), s1.consecutiveEmpty, "worker 1 should only count its own empty response")
|
|
assert.Equal(t, int64(1), s2.consecutiveEmpty, "worker 2 should only count its own empty response")
|
|
|
|
// Worker 1 sees a second empty; worker 2 stays at 1.
|
|
_, ok = p.fetchTask(ctx, s1)
|
|
require.False(t, ok)
|
|
assert.Equal(t, int64(2), s1.consecutiveEmpty)
|
|
assert.Equal(t, int64(1), s2.consecutiveEmpty, "worker 2's counter must not be affected by worker 1's empty fetches")
|
|
}
|
|
|
|
// TestPoller_FetchErrorIncrementsErrorsOnly verifies that a fetch error
|
|
// increments only the per-worker error counter, not the empty counter.
|
|
func TestPoller_FetchErrorIncrementsErrorsOnly(t *testing.T) {
|
|
client := mocks.NewClient(t)
|
|
client.On("FetchTask", mock.Anything, mock.Anything).Return(
|
|
func(_ context.Context, _ *connect_go.Request[runnerv1.FetchTaskRequest]) (*connect_go.Response[runnerv1.FetchTaskResponse], error) {
|
|
return nil, errors.New("network unreachable")
|
|
},
|
|
)
|
|
|
|
cfg, err := config.LoadDefault("")
|
|
require.NoError(t, err)
|
|
p := &Poller{client: client, cfg: cfg}
|
|
|
|
s := &workerState{}
|
|
_, ok := p.fetchTask(context.Background(), s)
|
|
require.False(t, ok)
|
|
assert.Equal(t, int64(1), s.consecutiveErrors)
|
|
assert.Equal(t, int64(0), s.consecutiveEmpty)
|
|
}
|
|
|
|
// TestPoller_CalculateInterval verifies the exponential backoff math is
|
|
// correctly driven by the workerState counters.
|
|
func TestPoller_CalculateInterval(t *testing.T) {
|
|
cfg, err := config.LoadDefault("")
|
|
require.NoError(t, err)
|
|
cfg.Runner.FetchInterval = 2 * time.Second
|
|
cfg.Runner.FetchIntervalMax = 60 * time.Second
|
|
p := &Poller{cfg: cfg}
|
|
|
|
cases := []struct {
|
|
name string
|
|
empty, errs int64
|
|
wantInterval time.Duration
|
|
}{
|
|
{"first poll, no backoff", 0, 0, 2 * time.Second},
|
|
{"single empty, still base", 1, 0, 2 * time.Second},
|
|
{"two empties, doubled", 2, 0, 4 * time.Second},
|
|
{"five empties, capped path", 5, 0, 32 * time.Second},
|
|
{"many empties, capped at max", 20, 0, 60 * time.Second},
|
|
{"errors drive backoff too", 0, 3, 8 * time.Second},
|
|
{"max(empty, errors) wins", 2, 4, 16 * time.Second},
|
|
}
|
|
for _, tc := range cases {
|
|
t.Run(tc.name, func(t *testing.T) {
|
|
s := &workerState{consecutiveEmpty: tc.empty, consecutiveErrors: tc.errs}
|
|
assert.Equal(t, tc.wantInterval, p.calculateInterval(s))
|
|
})
|
|
}
|
|
}
|