mirror of
https://gitea.com/gitea/act_runner.git
synced 2026-05-08 16:23:23 +02:00
Compare commits
4 Commits
v0.4.1
...
7031b3507d
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
7031b3507d | ||
|
|
fc4eef3e0d | ||
|
|
cce2dd9b9b | ||
|
|
ec07b8c00b |
1
.gitignore
vendored
1
.gitignore
vendored
@@ -2,6 +2,7 @@
|
||||
.env
|
||||
.runner
|
||||
coverage.txt
|
||||
/gitea-vet
|
||||
/config.yaml
|
||||
|
||||
# Jetbrains
|
||||
|
||||
@@ -13,7 +13,6 @@ linters:
|
||||
- forbidigo
|
||||
- gocheckcompilerdirectives
|
||||
- gocritic
|
||||
- goheader
|
||||
- govet
|
||||
- ineffassign
|
||||
- mirror
|
||||
@@ -86,11 +85,6 @@ linters:
|
||||
enable:
|
||||
- nilness
|
||||
- unusedwrite
|
||||
goheader:
|
||||
values:
|
||||
regexp:
|
||||
HEADER: 'Copyright \d{4} The Gitea Authors\. All rights reserved\.(\nCopyright [^\n]+)*\nSPDX-License-Identifier: MIT'
|
||||
template: '{{ HEADER }}'
|
||||
exclusions:
|
||||
generated: lax
|
||||
presets:
|
||||
@@ -107,16 +101,9 @@ issues:
|
||||
max-same-issues: 0
|
||||
formatters:
|
||||
enable:
|
||||
- gci
|
||||
- gofmt
|
||||
- gofumpt
|
||||
settings:
|
||||
gci:
|
||||
custom-order: true
|
||||
sections:
|
||||
- standard
|
||||
- prefix(gitea.com/gitea/act_runner)
|
||||
- blank
|
||||
- default
|
||||
gofumpt:
|
||||
extra-rules: true
|
||||
exclusions:
|
||||
|
||||
32
Makefile
32
Makefile
@@ -1,5 +1,6 @@
|
||||
DIST := dist
|
||||
EXECUTABLE := act_runner
|
||||
GOFMT ?= gofumpt -l
|
||||
DIST_DIRS := $(DIST)/binaries $(DIST)/release
|
||||
GO ?= go
|
||||
SHASUM ?= shasum -a 256
|
||||
@@ -11,6 +12,7 @@ GXZ_PAGAGE ?= github.com/ulikunitz/xz/cmd/gxz@v0.5.10
|
||||
LINUX_ARCHS ?= linux/amd64,linux/arm64
|
||||
DARWIN_ARCHS ?= darwin-12/amd64,darwin-12/arm64
|
||||
WINDOWS_ARCHS ?= windows/amd64
|
||||
GO_FMT_FILES := $(shell find . -type f -name "*.go" ! -name "generated.*")
|
||||
GOFILES := $(shell find . -type f -name "*.go" -o -name "go.mod" ! -name "generated.*")
|
||||
|
||||
DOCKER_IMAGE ?= gitea/act_runner
|
||||
@@ -18,7 +20,7 @@ DOCKER_TAG ?= nightly
|
||||
DOCKER_REF := $(DOCKER_IMAGE):$(DOCKER_TAG)
|
||||
DOCKER_ROOTLESS_REF := $(DOCKER_IMAGE):$(DOCKER_TAG)-dind-rootless
|
||||
|
||||
GOLANGCI_LINT_PACKAGE ?= github.com/golangci/golangci-lint/v2/cmd/golangci-lint@v2.11.4
|
||||
GOLANGCI_LINT_PACKAGE ?= github.com/golangci/golangci-lint/v2/cmd/golangci-lint@v2.10.1
|
||||
GOVULNCHECK_PACKAGE ?= golang.org/x/vuln/cmd/govulncheck@v1
|
||||
|
||||
ifneq ($(shell uname), Darwin)
|
||||
@@ -66,14 +68,19 @@ else
|
||||
endif
|
||||
endif
|
||||
|
||||
GO_PACKAGES_TO_VET ?= $(filter-out gitea.com/gitea/act_runner/internal/pkg/client/mocks,$(shell $(GO) list ./...))
|
||||
|
||||
|
||||
TAGS ?=
|
||||
LDFLAGS ?= -X "gitea.com/gitea/act_runner/internal/pkg/ver.version=v$(RELASE_VERSION)"
|
||||
|
||||
all: build
|
||||
|
||||
.PHONY: fmt
|
||||
fmt:
|
||||
$(GO) run $(GOLANGCI_LINT_PACKAGE) fmt
|
||||
@hash gofumpt > /dev/null 2>&1; if [ $$? -ne 0 ]; then \
|
||||
$(GO) install mvdan.cc/gofumpt@latest; \
|
||||
fi
|
||||
$(GOFMT) -w $(GO_FMT_FILES)
|
||||
|
||||
.PHONY: go-check
|
||||
go-check:
|
||||
@@ -86,20 +93,23 @@ go-check:
|
||||
fi
|
||||
|
||||
.PHONY: fmt-check
|
||||
fmt-check: fmt
|
||||
@diff=$$(git diff --color=always); \
|
||||
fmt-check:
|
||||
@hash gofumpt > /dev/null 2>&1; if [ $$? -ne 0 ]; then \
|
||||
$(GO) install mvdan.cc/gofumpt@latest; \
|
||||
fi
|
||||
@diff=$$($(GOFMT) -d $(GO_FMT_FILES)); \
|
||||
if [ -n "$$diff" ]; then \
|
||||
echo "Please run 'make fmt' and commit the result:"; \
|
||||
printf "%s" "$${diff}"; \
|
||||
echo "$${diff}"; \
|
||||
exit 1; \
|
||||
fi
|
||||
fi;
|
||||
|
||||
.PHONY: deps-tools
|
||||
deps-tools: ## install tool dependencies
|
||||
$(GO) install $(GOVULNCHECK_PACKAGE)
|
||||
|
||||
.PHONY: lint
|
||||
lint: lint-go
|
||||
lint: lint-go vet
|
||||
|
||||
.PHONY: lint-go
|
||||
lint-go: ## lint go files
|
||||
@@ -129,6 +139,12 @@ tidy-check: tidy
|
||||
test: fmt-check security-check
|
||||
@$(GO) test -race -v -cover -coverprofile coverage.txt ./... && echo "\n==>\033[32m Ok\033[m\n" || exit 1
|
||||
|
||||
.PHONY: vet
|
||||
vet:
|
||||
@echo "Running go vet..."
|
||||
@$(GO) build code.gitea.io/gitea-vet
|
||||
@$(GO) vet -vettool=gitea-vet $(GO_PACKAGES_TO_VET)
|
||||
|
||||
install: $(GOFILES)
|
||||
$(GO) install -v -tags '$(TAGS)' -ldflags '$(EXTLDFLAGS)-s -w $(LDFLAGS)'
|
||||
|
||||
|
||||
11
build.go
Normal file
11
build.go
Normal file
@@ -0,0 +1,11 @@
|
||||
// Copyright 2023 The Gitea Authors. All rights reserved.
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
//go:build vendor
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
// for vet
|
||||
_ "code.gitea.io/gitea-vet"
|
||||
)
|
||||
10
go.mod
10
go.mod
@@ -4,6 +4,7 @@ go 1.26.0
|
||||
|
||||
require (
|
||||
code.gitea.io/actions-proto-go v0.4.1
|
||||
code.gitea.io/gitea-vet v0.2.3
|
||||
connectrpc.com/connect v1.19.1
|
||||
github.com/avast/retry-go/v4 v4.7.0
|
||||
github.com/docker/docker v25.0.13+incompatible
|
||||
@@ -21,8 +22,6 @@ require (
|
||||
gotest.tools/v3 v3.5.2
|
||||
)
|
||||
|
||||
require github.com/prometheus/client_golang v1.23.2
|
||||
|
||||
require (
|
||||
cyphar.com/go-pathrs v0.2.3 // indirect
|
||||
dario.cat/mergo v1.0.2 // indirect
|
||||
@@ -30,7 +29,6 @@ require (
|
||||
github.com/Masterminds/semver v1.5.0 // indirect
|
||||
github.com/Microsoft/go-winio v0.6.2 // indirect
|
||||
github.com/ProtonMail/go-crypto v1.3.0 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/bmatcuk/doublestar/v4 v4.10.0 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
||||
github.com/clipperhouse/uax29/v2 v2.7.0 // indirect
|
||||
@@ -77,16 +75,12 @@ require (
|
||||
github.com/moby/sys/user v0.4.0 // indirect
|
||||
github.com/moby/sys/userns v0.1.0 // indirect
|
||||
github.com/moby/term v0.5.2 // indirect
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||
github.com/opencontainers/go-digest v1.0.0 // indirect
|
||||
github.com/opencontainers/image-spec v1.1.1 // indirect
|
||||
github.com/opencontainers/selinux v1.13.1 // indirect
|
||||
github.com/pjbgf/sha1cd v0.5.0 // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/prometheus/client_model v0.6.2 // indirect
|
||||
github.com/prometheus/common v0.66.1 // indirect
|
||||
github.com/prometheus/procfs v0.16.1 // indirect
|
||||
github.com/rhysd/actionlint v1.7.11 // indirect
|
||||
github.com/robfig/cron/v3 v3.0.1 // indirect
|
||||
github.com/sergi/go-diff v1.4.0 // indirect
|
||||
@@ -104,11 +98,11 @@ require (
|
||||
go.opentelemetry.io/otel v1.40.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.40.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.40.0 // indirect
|
||||
go.yaml.in/yaml/v2 v2.4.2 // indirect
|
||||
golang.org/x/crypto v0.48.0 // indirect
|
||||
golang.org/x/net v0.50.0 // indirect
|
||||
golang.org/x/sync v0.19.0 // indirect
|
||||
golang.org/x/sys v0.41.0 // indirect
|
||||
golang.org/x/tools v0.42.0 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 // indirect
|
||||
google.golang.org/grpc v1.67.0 // indirect
|
||||
gopkg.in/warnings.v0 v0.1.2 // indirect
|
||||
|
||||
26
go.sum
26
go.sum
@@ -1,5 +1,7 @@
|
||||
code.gitea.io/actions-proto-go v0.4.1 h1:l0EYhjsgpUe/1VABo2eK7zcoNX2W44WOnb0MSLrKfls=
|
||||
code.gitea.io/actions-proto-go v0.4.1/go.mod h1:mn7Wkqz6JbnTOHQpot3yDeHx+O5C9EGhMEE+htvHBas=
|
||||
code.gitea.io/gitea-vet v0.2.3 h1:gdFmm6WOTM65rE8FUBTRzeQZYzXePKSSB1+r574hWwI=
|
||||
code.gitea.io/gitea-vet v0.2.3/go.mod h1:zcNbT/aJEmivCAhfmkHOlT645KNOf9W2KnkLgFjGGfE=
|
||||
connectrpc.com/connect v1.19.1 h1:R5M57z05+90EfEvCY1b7hBxDVOUl45PrtXtAV2fOC14=
|
||||
connectrpc.com/connect v1.19.1/go.mod h1:tN20fjdGlewnSFeZxLKb0xwIZ6ozc3OQs2hTXy4du9w=
|
||||
cyphar.com/go-pathrs v0.2.3 h1:0pH8gep37wB0BgaXrEaN1OtZhUMeS7VvaejSr6i822o=
|
||||
@@ -27,8 +29,6 @@ github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPd
|
||||
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs=
|
||||
github.com/avast/retry-go/v4 v4.7.0 h1:yjDs35SlGvKwRNSykujfjdMxMhMQQM0TnIjJaHB+Zio=
|
||||
github.com/avast/retry-go/v4 v4.7.0/go.mod h1:ZMPDa3sY2bKgpLtap9JRUgk2yTAba7cgiFhqxY2Sg6Q=
|
||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
github.com/bmatcuk/doublestar/v4 v4.10.0 h1:zU9WiOla1YA122oLM6i4EXvGW62DvKZVxIe6TYWexEs=
|
||||
github.com/bmatcuk/doublestar/v4 v4.10.0/go.mod h1:xBQ8jztBU6kakFMg+8WGxn0c6z1fTSPVIjEY1Wr7jzc=
|
||||
github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM=
|
||||
@@ -129,8 +129,6 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
|
||||
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
|
||||
github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE=
|
||||
github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8=
|
||||
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
|
||||
@@ -155,8 +153,6 @@ github.com/moby/term v0.5.2 h1:6qk3FJAFDs6i/q3W/pQ97SX192qKfZgGjCQqfCJkgzQ=
|
||||
github.com/moby/term v0.5.2/go.mod h1:d3djjFCrjnB+fl8NJux+EJzu0msscUP+f8it8hPkFLc=
|
||||
github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A=
|
||||
github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
|
||||
github.com/onsi/gomega v1.34.1 h1:EUMJIKUjM8sKjYbtxQI9A4z2o+rruxnzNvpknOXie6k=
|
||||
github.com/onsi/gomega v1.34.1/go.mod h1:kU1QgUvBDLXBJq618Xvm2LUX6rSAfRaFRTcdOeDLwwY=
|
||||
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
|
||||
@@ -171,14 +167,6 @@ github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/prometheus/client_golang v1.23.2 h1:Je96obch5RDVy3FDMndoUsjAhG5Edi49h0RJWRi/o0o=
|
||||
github.com/prometheus/client_golang v1.23.2/go.mod h1:Tb1a6LWHB3/SPIzCoaDXI4I8UHKeFTEQ1YCr+0Gyqmg=
|
||||
github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk=
|
||||
github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE=
|
||||
github.com/prometheus/common v0.66.1 h1:h5E0h5/Y8niHc5DlaLlWLArTQI7tMrsfQjHV+d9ZoGs=
|
||||
github.com/prometheus/common v0.66.1/go.mod h1:gcaUsgf3KfRSwHY4dIMXLPV0K/Wg1oZ8+SbZk/HH/dA=
|
||||
github.com/prometheus/procfs v0.16.1 h1:hZ15bTNuirocR6u0JZ6BAHHmwS1p8B4P6MRqxtzMyRg=
|
||||
github.com/prometheus/procfs v0.16.1/go.mod h1:teAbpZRB1iIAJYREa1LsoWUXykVXA1KlTmWl8x/U+Is=
|
||||
github.com/rhysd/actionlint v1.7.11 h1:m+aSuCpCIClS8X02xMG4Z8s87fCHPsAtYkAoWGQZgEE=
|
||||
github.com/rhysd/actionlint v1.7.11/go.mod h1:8n50YougV9+50niD7oxgDTZ1KbN/ZnKiQ2xpLFeVhsI=
|
||||
github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs=
|
||||
@@ -222,6 +210,7 @@ github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHo
|
||||
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ=
|
||||
github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74=
|
||||
github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y=
|
||||
github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
go.etcd.io/bbolt v1.3.8/go.mod h1:N9Mkw9X8x5fupy0IKsmuqVtoGDyxsaDlbk4Rd05IAQw=
|
||||
@@ -248,10 +237,6 @@ go.opentelemetry.io/otel/trace v1.40.0 h1:WA4etStDttCSYuhwvEa8OP8I5EWu24lkOzp+ZY
|
||||
go.opentelemetry.io/otel/trace v1.40.0/go.mod h1:zeAhriXecNGP/s2SEG3+Y8X9ujcJOTqQ5RgdEJcawiA=
|
||||
go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I=
|
||||
go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM=
|
||||
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
|
||||
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
|
||||
go.yaml.in/yaml/v2 v2.4.2 h1:DzmwEr2rDGHl7lsFgAHxmNz/1NlQ7xLIrlN2h5d1eGI=
|
||||
go.yaml.in/yaml/v2 v2.4.2/go.mod h1:081UH+NErpNdqlCXm3TtEran0rJZGxAYx9hb/ELlsPU=
|
||||
go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg=
|
||||
go.yaml.in/yaml/v4 v4.0.0-rc.3 h1:3h1fjsh1CTAPjW7q/EMe+C8shx5d8ctzZTrLcs/j8Go=
|
||||
go.yaml.in/yaml/v4 v4.0.0-rc.3/go.mod h1:aZqd9kCMsGL7AuUv/m/PvWLdg5sjJsZ4oHDEnfPPfY0=
|
||||
@@ -265,6 +250,8 @@ golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0
|
||||
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY=
|
||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.33.0 h1:tHFzIWbBifEmbwtGz65eaWyGiGZatSrT9prnU8DbVL8=
|
||||
golang.org/x/mod v0.33.0/go.mod h1:swjeQEj+6r7fODbD2cqrnje9PnziFuw4bmLbBZFrQ5w=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
@@ -303,8 +290,11 @@ golang.org/x/time v0.14.0 h1:MRx4UaLrDotUKUdCIqzPC48t1Y9hANFKIRpNx+Te8PI=
|
||||
golang.org/x/time v0.14.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20200325010219-a49f79bcc224/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
|
||||
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.42.0 h1:uNgphsn75Tdz5Ji2q36v/nsFSfR/9BRFvqhGBaJGd5k=
|
||||
golang.org/x/tools v0.42.0/go.mod h1:Ma6lCIwGZvHK6XtgbswSoWroEkhugApmsXyrUmBhfr0=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
|
||||
@@ -8,10 +8,10 @@ import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"gitea.com/gitea/act_runner/internal/pkg/config"
|
||||
"gitea.com/gitea/act_runner/internal/pkg/ver"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
func Execute(ctx context.Context) {
|
||||
|
||||
@@ -16,19 +16,18 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"connectrpc.com/connect"
|
||||
"github.com/mattn/go-isatty"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"gitea.com/gitea/act_runner/internal/app/poll"
|
||||
"gitea.com/gitea/act_runner/internal/app/run"
|
||||
"gitea.com/gitea/act_runner/internal/pkg/client"
|
||||
"gitea.com/gitea/act_runner/internal/pkg/config"
|
||||
"gitea.com/gitea/act_runner/internal/pkg/envcheck"
|
||||
"gitea.com/gitea/act_runner/internal/pkg/labels"
|
||||
"gitea.com/gitea/act_runner/internal/pkg/metrics"
|
||||
"gitea.com/gitea/act_runner/internal/pkg/ver"
|
||||
|
||||
"connectrpc.com/connect"
|
||||
"github.com/mattn/go-isatty"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
func runDaemon(ctx context.Context, daemArgs *daemonArgs, configFile *string) func(cmd *cobra.Command, args []string) error {
|
||||
@@ -150,15 +149,6 @@ func runDaemon(ctx context.Context, daemArgs *daemonArgs, configFile *string) fu
|
||||
resp.Msg.Runner.Name, resp.Msg.Runner.Version, resp.Msg.Runner.Labels)
|
||||
}
|
||||
|
||||
if cfg.Metrics.Enabled {
|
||||
metrics.Init()
|
||||
metrics.RunnerInfo.WithLabelValues(ver.Version(), resp.Msg.Runner.Name).Set(1)
|
||||
metrics.RunnerCapacity.Set(float64(cfg.Runner.Capacity))
|
||||
metrics.RegisterUptimeFunc(time.Now())
|
||||
metrics.RegisterRunningJobsFunc(runner.RunningCount, cfg.Runner.Capacity)
|
||||
metrics.StartServer(ctx, cfg.Metrics.Addr)
|
||||
}
|
||||
|
||||
poller := poll.New(cfg, cli, runner)
|
||||
|
||||
if daemArgs.Once || reg.Ephemeral {
|
||||
|
||||
@@ -14,17 +14,17 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"gitea.com/gitea/act_runner/internal/pkg/client"
|
||||
"gitea.com/gitea/act_runner/internal/pkg/config"
|
||||
"gitea.com/gitea/act_runner/internal/pkg/labels"
|
||||
"gitea.com/gitea/act_runner/internal/pkg/ver"
|
||||
|
||||
pingv1 "code.gitea.io/actions-proto-go/ping/v1"
|
||||
runnerv1 "code.gitea.io/actions-proto-go/runner/v1"
|
||||
"connectrpc.com/connect"
|
||||
"github.com/mattn/go-isatty"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"gitea.com/gitea/act_runner/internal/pkg/client"
|
||||
"gitea.com/gitea/act_runner/internal/pkg/config"
|
||||
"gitea.com/gitea/act_runner/internal/pkg/labels"
|
||||
"gitea.com/gitea/act_runner/internal/pkg/ver"
|
||||
)
|
||||
|
||||
// runRegister registers a runner to the server
|
||||
|
||||
@@ -12,24 +12,18 @@ import (
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"gitea.com/gitea/act_runner/internal/pkg/client"
|
||||
"gitea.com/gitea/act_runner/internal/pkg/config"
|
||||
"gitea.com/gitea/act_runner/internal/pkg/metrics"
|
||||
|
||||
runnerv1 "code.gitea.io/actions-proto-go/runner/v1"
|
||||
"connectrpc.com/connect"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// TaskRunner abstracts task execution so the poller can be tested
|
||||
// without a real runner.
|
||||
type TaskRunner interface {
|
||||
Run(ctx context.Context, task *runnerv1.Task) error
|
||||
}
|
||||
"gitea.com/gitea/act_runner/internal/app/run"
|
||||
"gitea.com/gitea/act_runner/internal/pkg/client"
|
||||
"gitea.com/gitea/act_runner/internal/pkg/config"
|
||||
)
|
||||
|
||||
type Poller struct {
|
||||
client client.Client
|
||||
runner TaskRunner
|
||||
runner *run.Runner
|
||||
cfg *config.Config
|
||||
tasksVersion atomic.Int64 // tasksVersion used to store the version of the last task fetched from the Gitea.
|
||||
|
||||
@@ -40,21 +34,12 @@ type Poller struct {
|
||||
shutdownJobs context.CancelFunc
|
||||
|
||||
done chan struct{}
|
||||
|
||||
consecutiveEmpty atomic.Int64 // count of consecutive polls with no task available
|
||||
consecutiveErrors atomic.Int64 // count of consecutive fetch errors
|
||||
}
|
||||
|
||||
// workerState holds the single poller's backoff state. Consecutive empty or
|
||||
// error responses drive exponential backoff; a successful task fetch resets
|
||||
// both counters so the next poll fires immediately.
|
||||
type workerState struct {
|
||||
consecutiveEmpty int64
|
||||
consecutiveErrors int64
|
||||
// lastBackoff is the last interval reported to the PollBackoffSeconds gauge;
|
||||
// used to suppress redundant no-op Set calls when the backoff plateaus
|
||||
// (e.g. at FetchIntervalMax).
|
||||
lastBackoff time.Duration
|
||||
}
|
||||
|
||||
func New(cfg *config.Config, client client.Client, runner TaskRunner) *Poller {
|
||||
func New(cfg *config.Config, client client.Client, runner *run.Runner) *Poller {
|
||||
pollingCtx, shutdownPolling := context.WithCancel(context.Background())
|
||||
|
||||
jobsCtx, shutdownJobs := context.WithCancel(context.Background())
|
||||
@@ -77,57 +62,22 @@ func New(cfg *config.Config, client client.Client, runner TaskRunner) *Poller {
|
||||
}
|
||||
|
||||
func (p *Poller) Poll() {
|
||||
sem := make(chan struct{}, p.cfg.Runner.Capacity)
|
||||
wg := &sync.WaitGroup{}
|
||||
s := &workerState{}
|
||||
|
||||
defer func() {
|
||||
wg.Wait()
|
||||
close(p.done)
|
||||
}()
|
||||
|
||||
for {
|
||||
select {
|
||||
case sem <- struct{}{}:
|
||||
case <-p.pollingCtx.Done():
|
||||
return
|
||||
}
|
||||
|
||||
task, ok := p.fetchTask(p.pollingCtx, s)
|
||||
if !ok {
|
||||
<-sem
|
||||
if !p.waitBackoff(s) {
|
||||
return
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
s.resetBackoff()
|
||||
|
||||
for i := 0; i < p.cfg.Runner.Capacity; i++ {
|
||||
wg.Add(1)
|
||||
go func(t *runnerv1.Task) {
|
||||
defer wg.Done()
|
||||
defer func() { <-sem }()
|
||||
p.runTaskWithRecover(p.jobsCtx, t)
|
||||
}(task)
|
||||
go p.poll(wg)
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
// signal that we shutdown
|
||||
close(p.done)
|
||||
}
|
||||
|
||||
func (p *Poller) PollOnce() {
|
||||
defer close(p.done)
|
||||
s := &workerState{}
|
||||
for {
|
||||
task, ok := p.fetchTask(p.pollingCtx, s)
|
||||
if !ok {
|
||||
if !p.waitBackoff(s) {
|
||||
return
|
||||
}
|
||||
continue
|
||||
}
|
||||
s.resetBackoff()
|
||||
p.runTaskWithRecover(p.jobsCtx, task)
|
||||
return
|
||||
}
|
||||
p.pollOnce()
|
||||
|
||||
// signal that we're done
|
||||
close(p.done)
|
||||
}
|
||||
|
||||
func (p *Poller) Shutdown(ctx context.Context) error {
|
||||
@@ -140,13 +90,13 @@ func (p *Poller) Shutdown(ctx context.Context) error {
|
||||
|
||||
// our timeout for shutting down ran out
|
||||
case <-ctx.Done():
|
||||
// Both the timeout and the graceful shutdown may fire
|
||||
// simultaneously. Do a non-blocking check to avoid forcing
|
||||
// a shutdown when graceful already completed.
|
||||
select {
|
||||
case <-p.done:
|
||||
// when both the timeout fires and the graceful shutdown
|
||||
// completed succsfully, this branch of the select may
|
||||
// fire. Do a non-blocking check here against the graceful
|
||||
// shutdown status to avoid sending an error if we don't need to.
|
||||
_, ok := <-p.done
|
||||
if !ok {
|
||||
return nil
|
||||
default:
|
||||
}
|
||||
|
||||
// force a shutdown of all running jobs
|
||||
@@ -159,38 +109,28 @@ func (p *Poller) Shutdown(ctx context.Context) error {
|
||||
}
|
||||
}
|
||||
|
||||
func (s *workerState) resetBackoff() {
|
||||
s.consecutiveEmpty = 0
|
||||
s.consecutiveErrors = 0
|
||||
s.lastBackoff = 0
|
||||
}
|
||||
func (p *Poller) poll(wg *sync.WaitGroup) {
|
||||
defer wg.Done()
|
||||
for {
|
||||
p.pollOnce()
|
||||
|
||||
// waitBackoff sleeps for the current backoff interval (with jitter).
|
||||
// Returns false if the polling context was cancelled during the wait.
|
||||
func (p *Poller) waitBackoff(s *workerState) bool {
|
||||
base := p.calculateInterval(s)
|
||||
if base != s.lastBackoff {
|
||||
metrics.PollBackoffSeconds.Set(base.Seconds())
|
||||
s.lastBackoff = base
|
||||
}
|
||||
timer := time.NewTimer(addJitter(base))
|
||||
select {
|
||||
case <-timer.C:
|
||||
return true
|
||||
case <-p.pollingCtx.Done():
|
||||
timer.Stop()
|
||||
return false
|
||||
select {
|
||||
case <-p.pollingCtx.Done():
|
||||
return
|
||||
default:
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// calculateInterval returns the polling interval with exponential backoff based on
|
||||
// consecutive empty or error responses. The interval starts at FetchInterval and
|
||||
// doubles with each consecutive empty/error, capped at FetchIntervalMax.
|
||||
func (p *Poller) calculateInterval(s *workerState) time.Duration {
|
||||
func (p *Poller) calculateInterval() time.Duration {
|
||||
base := p.cfg.Runner.FetchInterval
|
||||
maxInterval := p.cfg.Runner.FetchIntervalMax
|
||||
|
||||
n := max(s.consecutiveEmpty, s.consecutiveErrors)
|
||||
n := max(p.consecutiveEmpty.Load(), p.consecutiveErrors.Load())
|
||||
if n <= 1 {
|
||||
return base
|
||||
}
|
||||
@@ -215,6 +155,30 @@ func addJitter(d time.Duration) time.Duration {
|
||||
return d + time.Duration(jitter)
|
||||
}
|
||||
|
||||
func (p *Poller) pollOnce() {
|
||||
for {
|
||||
task, ok := p.fetchTask(p.pollingCtx)
|
||||
if !ok {
|
||||
interval := addJitter(p.calculateInterval())
|
||||
timer := time.NewTimer(interval)
|
||||
select {
|
||||
case <-timer.C:
|
||||
case <-p.pollingCtx.Done():
|
||||
timer.Stop()
|
||||
return
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// Got a task — reset backoff counters for fast subsequent polling.
|
||||
p.consecutiveEmpty.Store(0)
|
||||
p.consecutiveErrors.Store(0)
|
||||
|
||||
p.runTaskWithRecover(p.jobsCtx, task)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Poller) runTaskWithRecover(ctx context.Context, task *runnerv1.Task) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
@@ -228,42 +192,29 @@ func (p *Poller) runTaskWithRecover(ctx context.Context, task *runnerv1.Task) {
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Poller) fetchTask(ctx context.Context, s *workerState) (*runnerv1.Task, bool) {
|
||||
func (p *Poller) fetchTask(ctx context.Context) (*runnerv1.Task, bool) {
|
||||
reqCtx, cancel := context.WithTimeout(ctx, p.cfg.Runner.FetchTimeout)
|
||||
defer cancel()
|
||||
|
||||
// Load the version value that was in the cache when the request was sent.
|
||||
v := p.tasksVersion.Load()
|
||||
start := time.Now()
|
||||
resp, err := p.client.FetchTask(reqCtx, connect.NewRequest(&runnerv1.FetchTaskRequest{
|
||||
TasksVersion: v,
|
||||
}))
|
||||
|
||||
// DeadlineExceeded is the designed idle path for a long-poll: the server
|
||||
// found no work within FetchTimeout. Treat it as an empty response and do
|
||||
// not record the duration — the timeout value would swamp the histogram.
|
||||
if errors.Is(err, context.DeadlineExceeded) {
|
||||
s.consecutiveEmpty++
|
||||
s.consecutiveErrors = 0 // timeout is a healthy idle response
|
||||
metrics.PollFetchTotal.WithLabelValues(metrics.LabelResultEmpty).Inc()
|
||||
return nil, false
|
||||
err = nil
|
||||
}
|
||||
metrics.PollFetchDuration.Observe(time.Since(start).Seconds())
|
||||
|
||||
if err != nil {
|
||||
log.WithError(err).Error("failed to fetch task")
|
||||
s.consecutiveErrors++
|
||||
metrics.PollFetchTotal.WithLabelValues(metrics.LabelResultError).Inc()
|
||||
metrics.ClientErrors.WithLabelValues(metrics.LabelMethodFetchTask).Inc()
|
||||
p.consecutiveErrors.Add(1)
|
||||
return nil, false
|
||||
}
|
||||
|
||||
// Successful response — reset error counter.
|
||||
s.consecutiveErrors = 0
|
||||
p.consecutiveErrors.Store(0)
|
||||
|
||||
if resp == nil || resp.Msg == nil {
|
||||
s.consecutiveEmpty++
|
||||
metrics.PollFetchTotal.WithLabelValues(metrics.LabelResultEmpty).Inc()
|
||||
p.consecutiveEmpty.Add(1)
|
||||
return nil, false
|
||||
}
|
||||
|
||||
@@ -272,14 +223,12 @@ func (p *Poller) fetchTask(ctx context.Context, s *workerState) (*runnerv1.Task,
|
||||
}
|
||||
|
||||
if resp.Msg.Task == nil {
|
||||
s.consecutiveEmpty++
|
||||
metrics.PollFetchTotal.WithLabelValues(metrics.LabelResultEmpty).Inc()
|
||||
p.consecutiveEmpty.Add(1)
|
||||
return nil, false
|
||||
}
|
||||
|
||||
// got a task, set `tasksVersion` to zero to force query db in next request.
|
||||
// got a task, set `tasksVersion` to zero to focre query db in next request.
|
||||
p.tasksVersion.CompareAndSwap(resp.Msg.TasksVersion, 0)
|
||||
|
||||
metrics.PollFetchTotal.WithLabelValues(metrics.LabelResultTask).Inc()
|
||||
return resp.Msg.Task, true
|
||||
}
|
||||
|
||||
@@ -1,260 +0,0 @@
|
||||
// Copyright 2026 The Gitea Authors. All rights reserved.
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
package poll
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"gitea.com/gitea/act_runner/internal/pkg/client/mocks"
|
||||
"gitea.com/gitea/act_runner/internal/pkg/config"
|
||||
|
||||
runnerv1 "code.gitea.io/actions-proto-go/runner/v1"
|
||||
connect_go "connectrpc.com/connect"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/mock"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// TestPoller_WorkerStateCounters verifies that workerState correctly tracks
|
||||
// consecutive empty responses independently per state instance, and that
|
||||
// fetchTask increments only the relevant counter.
|
||||
func TestPoller_WorkerStateCounters(t *testing.T) {
|
||||
client := mocks.NewClient(t)
|
||||
client.On("FetchTask", mock.Anything, mock.Anything).Return(
|
||||
func(_ context.Context, _ *connect_go.Request[runnerv1.FetchTaskRequest]) (*connect_go.Response[runnerv1.FetchTaskResponse], error) {
|
||||
// Always return an empty response.
|
||||
return connect_go.NewResponse(&runnerv1.FetchTaskResponse{}), nil
|
||||
},
|
||||
)
|
||||
|
||||
cfg, err := config.LoadDefault("")
|
||||
require.NoError(t, err)
|
||||
p := &Poller{client: client, cfg: cfg}
|
||||
|
||||
ctx := context.Background()
|
||||
s1 := &workerState{}
|
||||
s2 := &workerState{}
|
||||
|
||||
// Each worker independently observes one empty response.
|
||||
_, ok := p.fetchTask(ctx, s1)
|
||||
require.False(t, ok)
|
||||
_, ok = p.fetchTask(ctx, s2)
|
||||
require.False(t, ok)
|
||||
|
||||
assert.Equal(t, int64(1), s1.consecutiveEmpty, "worker 1 should only count its own empty response")
|
||||
assert.Equal(t, int64(1), s2.consecutiveEmpty, "worker 2 should only count its own empty response")
|
||||
|
||||
// Worker 1 sees a second empty; worker 2 stays at 1.
|
||||
_, ok = p.fetchTask(ctx, s1)
|
||||
require.False(t, ok)
|
||||
assert.Equal(t, int64(2), s1.consecutiveEmpty)
|
||||
assert.Equal(t, int64(1), s2.consecutiveEmpty, "worker 2's counter must not be affected by worker 1's empty fetches")
|
||||
}
|
||||
|
||||
// TestPoller_FetchErrorIncrementsErrorsOnly verifies that a fetch error
|
||||
// increments only the per-worker error counter, not the empty counter.
|
||||
func TestPoller_FetchErrorIncrementsErrorsOnly(t *testing.T) {
|
||||
client := mocks.NewClient(t)
|
||||
client.On("FetchTask", mock.Anything, mock.Anything).Return(
|
||||
func(_ context.Context, _ *connect_go.Request[runnerv1.FetchTaskRequest]) (*connect_go.Response[runnerv1.FetchTaskResponse], error) {
|
||||
return nil, errors.New("network unreachable")
|
||||
},
|
||||
)
|
||||
|
||||
cfg, err := config.LoadDefault("")
|
||||
require.NoError(t, err)
|
||||
p := &Poller{client: client, cfg: cfg}
|
||||
|
||||
s := &workerState{}
|
||||
_, ok := p.fetchTask(context.Background(), s)
|
||||
require.False(t, ok)
|
||||
assert.Equal(t, int64(1), s.consecutiveErrors)
|
||||
assert.Equal(t, int64(0), s.consecutiveEmpty)
|
||||
}
|
||||
|
||||
// TestPoller_CalculateInterval verifies the exponential backoff math is
|
||||
// correctly driven by the workerState counters.
|
||||
func TestPoller_CalculateInterval(t *testing.T) {
|
||||
cfg, err := config.LoadDefault("")
|
||||
require.NoError(t, err)
|
||||
cfg.Runner.FetchInterval = 2 * time.Second
|
||||
cfg.Runner.FetchIntervalMax = 60 * time.Second
|
||||
p := &Poller{cfg: cfg}
|
||||
|
||||
cases := []struct {
|
||||
name string
|
||||
empty, errs int64
|
||||
wantInterval time.Duration
|
||||
}{
|
||||
{"first poll, no backoff", 0, 0, 2 * time.Second},
|
||||
{"single empty, still base", 1, 0, 2 * time.Second},
|
||||
{"two empties, doubled", 2, 0, 4 * time.Second},
|
||||
{"five empties, capped path", 5, 0, 32 * time.Second},
|
||||
{"many empties, capped at max", 20, 0, 60 * time.Second},
|
||||
{"errors drive backoff too", 0, 3, 8 * time.Second},
|
||||
{"max(empty, errors) wins", 2, 4, 16 * time.Second},
|
||||
}
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
s := &workerState{consecutiveEmpty: tc.empty, consecutiveErrors: tc.errs}
|
||||
assert.Equal(t, tc.wantInterval, p.calculateInterval(s))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// atomicMax atomically updates target to max(target, val).
|
||||
func atomicMax(target *atomic.Int64, val int64) {
|
||||
for {
|
||||
old := target.Load()
|
||||
if val <= old || target.CompareAndSwap(old, val) {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type mockRunner struct {
|
||||
delay time.Duration
|
||||
running atomic.Int64
|
||||
maxConcurrent atomic.Int64
|
||||
totalCompleted atomic.Int64
|
||||
}
|
||||
|
||||
func (m *mockRunner) Run(ctx context.Context, _ *runnerv1.Task) error {
|
||||
atomicMax(&m.maxConcurrent, m.running.Add(1))
|
||||
select {
|
||||
case <-time.After(m.delay):
|
||||
case <-ctx.Done():
|
||||
}
|
||||
m.running.Add(-1)
|
||||
m.totalCompleted.Add(1)
|
||||
return nil
|
||||
}
|
||||
|
||||
// TestPoller_ConcurrencyLimitedByCapacity verifies that with capacity=3 and
|
||||
// 6 available tasks, at most 3 tasks run concurrently, and FetchTask is
|
||||
// never called concurrently (single poller).
|
||||
func TestPoller_ConcurrencyLimitedByCapacity(t *testing.T) {
|
||||
const (
|
||||
capacity = 3
|
||||
totalTasks = 6
|
||||
taskDelay = 50 * time.Millisecond
|
||||
)
|
||||
|
||||
var (
|
||||
tasksReturned atomic.Int64
|
||||
fetchConcur atomic.Int64
|
||||
maxFetchConcur atomic.Int64
|
||||
)
|
||||
|
||||
cli := mocks.NewClient(t)
|
||||
cli.On("FetchTask", mock.Anything, mock.Anything).Return(
|
||||
func(_ context.Context, _ *connect_go.Request[runnerv1.FetchTaskRequest]) (*connect_go.Response[runnerv1.FetchTaskResponse], error) {
|
||||
atomicMax(&maxFetchConcur, fetchConcur.Add(1))
|
||||
defer fetchConcur.Add(-1)
|
||||
|
||||
n := tasksReturned.Add(1)
|
||||
if n <= totalTasks {
|
||||
return connect_go.NewResponse(&runnerv1.FetchTaskResponse{
|
||||
Task: &runnerv1.Task{Id: n},
|
||||
}), nil
|
||||
}
|
||||
return connect_go.NewResponse(&runnerv1.FetchTaskResponse{}), nil
|
||||
},
|
||||
)
|
||||
|
||||
runner := &mockRunner{delay: taskDelay}
|
||||
|
||||
cfg, err := config.LoadDefault("")
|
||||
require.NoError(t, err)
|
||||
cfg.Runner.Capacity = capacity
|
||||
cfg.Runner.FetchInterval = 10 * time.Millisecond
|
||||
cfg.Runner.FetchIntervalMax = 10 * time.Millisecond
|
||||
|
||||
poller := New(cfg, cli, runner)
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Go(poller.Poll)
|
||||
|
||||
require.Eventually(t, func() bool {
|
||||
return runner.totalCompleted.Load() >= totalTasks
|
||||
}, 2*time.Second, 10*time.Millisecond, "all tasks should complete")
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
|
||||
defer cancel()
|
||||
err = poller.Shutdown(ctx)
|
||||
require.NoError(t, err)
|
||||
wg.Wait()
|
||||
|
||||
assert.LessOrEqual(t, runner.maxConcurrent.Load(), int64(capacity),
|
||||
"concurrent running tasks must not exceed capacity")
|
||||
assert.GreaterOrEqual(t, runner.maxConcurrent.Load(), int64(2),
|
||||
"with 6 tasks and capacity 3, at least 2 should overlap")
|
||||
assert.Equal(t, int64(1), maxFetchConcur.Load(),
|
||||
"FetchTask must never be called concurrently (single poller)")
|
||||
assert.Equal(t, int64(totalTasks), runner.totalCompleted.Load(),
|
||||
"all tasks should have been executed")
|
||||
}
|
||||
|
||||
// TestPoller_ShutdownForcesJobsOnTimeout locks in the fix for a
|
||||
// pre-existing bug where Shutdown's timeout branch used a blocking
|
||||
// `<-p.done` receive, leaving p.shutdownJobs() unreachable. With a
|
||||
// task parked on jobsCtx and a Shutdown deadline shorter than the
|
||||
// task's natural completion, Shutdown must force-cancel via
|
||||
// shutdownJobs() and return ctx.Err() promptly — not block until the
|
||||
// task would have finished on its own.
|
||||
func TestPoller_ShutdownForcesJobsOnTimeout(t *testing.T) {
|
||||
var served atomic.Bool
|
||||
cli := mocks.NewClient(t)
|
||||
cli.On("FetchTask", mock.Anything, mock.Anything).Return(
|
||||
func(_ context.Context, _ *connect_go.Request[runnerv1.FetchTaskRequest]) (*connect_go.Response[runnerv1.FetchTaskResponse], error) {
|
||||
if served.CompareAndSwap(false, true) {
|
||||
return connect_go.NewResponse(&runnerv1.FetchTaskResponse{
|
||||
Task: &runnerv1.Task{Id: 1},
|
||||
}), nil
|
||||
}
|
||||
return connect_go.NewResponse(&runnerv1.FetchTaskResponse{}), nil
|
||||
},
|
||||
)
|
||||
|
||||
// delay >> Shutdown timeout: Run only returns when jobsCtx is
|
||||
// cancelled by shutdownJobs().
|
||||
runner := &mockRunner{delay: 30 * time.Second}
|
||||
|
||||
cfg, err := config.LoadDefault("")
|
||||
require.NoError(t, err)
|
||||
cfg.Runner.Capacity = 1
|
||||
cfg.Runner.FetchInterval = 10 * time.Millisecond
|
||||
cfg.Runner.FetchIntervalMax = 10 * time.Millisecond
|
||||
|
||||
poller := New(cfg, cli, runner)
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Go(poller.Poll)
|
||||
|
||||
require.Eventually(t, func() bool {
|
||||
return runner.running.Load() == 1
|
||||
}, time.Second, 10*time.Millisecond, "task should start running")
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 50*time.Millisecond)
|
||||
defer cancel()
|
||||
start := time.Now()
|
||||
err = poller.Shutdown(ctx)
|
||||
elapsed := time.Since(start)
|
||||
|
||||
require.ErrorIs(t, err, context.DeadlineExceeded)
|
||||
// With the fix, Shutdown returns shortly after the deadline once
|
||||
// the forced job unwinds. Without the fix, the blocking <-p.done
|
||||
// would hang for the full 30s mockRunner delay.
|
||||
assert.Less(t, elapsed, 5*time.Second,
|
||||
"Shutdown must not block on the parked task; shutdownJobs() must run on timeout")
|
||||
|
||||
wg.Wait()
|
||||
assert.Equal(t, int64(1), runner.totalCompleted.Load(),
|
||||
"the parked task must be cancelled and unwound")
|
||||
}
|
||||
@@ -12,16 +12,8 @@ import (
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"gitea.com/gitea/act_runner/internal/pkg/client"
|
||||
"gitea.com/gitea/act_runner/internal/pkg/config"
|
||||
"gitea.com/gitea/act_runner/internal/pkg/labels"
|
||||
"gitea.com/gitea/act_runner/internal/pkg/metrics"
|
||||
"gitea.com/gitea/act_runner/internal/pkg/report"
|
||||
"gitea.com/gitea/act_runner/internal/pkg/ver"
|
||||
|
||||
runnerv1 "code.gitea.io/actions-proto-go/runner/v1"
|
||||
"connectrpc.com/connect"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
@@ -30,6 +22,12 @@ import (
|
||||
"github.com/nektos/act/pkg/model"
|
||||
"github.com/nektos/act/pkg/runner"
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
"gitea.com/gitea/act_runner/internal/pkg/client"
|
||||
"gitea.com/gitea/act_runner/internal/pkg/config"
|
||||
"gitea.com/gitea/act_runner/internal/pkg/labels"
|
||||
"gitea.com/gitea/act_runner/internal/pkg/report"
|
||||
"gitea.com/gitea/act_runner/internal/pkg/ver"
|
||||
)
|
||||
|
||||
// Runner runs the pipeline.
|
||||
@@ -43,7 +41,6 @@ type Runner struct {
|
||||
envs map[string]string
|
||||
|
||||
runningTasks sync.Map
|
||||
runningCount atomic.Int64
|
||||
}
|
||||
|
||||
func NewRunner(cfg *config.Config, reg *config.Registration, cli client.Client) *Runner {
|
||||
@@ -99,25 +96,16 @@ func (r *Runner) Run(ctx context.Context, task *runnerv1.Task) error {
|
||||
r.runningTasks.Store(task.Id, struct{}{})
|
||||
defer r.runningTasks.Delete(task.Id)
|
||||
|
||||
r.runningCount.Add(1)
|
||||
|
||||
start := time.Now()
|
||||
|
||||
ctx, cancel := context.WithTimeout(ctx, r.cfg.Runner.Timeout)
|
||||
defer cancel()
|
||||
reporter := report.NewReporter(ctx, cancel, r.client, task, r.cfg)
|
||||
var runErr error
|
||||
defer func() {
|
||||
r.runningCount.Add(-1)
|
||||
|
||||
lastWords := ""
|
||||
if runErr != nil {
|
||||
lastWords = runErr.Error()
|
||||
}
|
||||
_ = reporter.Close(lastWords)
|
||||
|
||||
metrics.JobDuration.Observe(time.Since(start).Seconds())
|
||||
metrics.JobsTotal.WithLabelValues(metrics.ResultToStatusLabel(reporter.Result())).Inc()
|
||||
}()
|
||||
reporter.RunDaemon()
|
||||
runErr = r.run(ctx, task, reporter)
|
||||
@@ -278,10 +266,6 @@ func (r *Runner) run(ctx context.Context, task *runnerv1.Task, reporter *report.
|
||||
return execErr
|
||||
}
|
||||
|
||||
func (r *Runner) RunningCount() int64 {
|
||||
return r.runningCount.Load()
|
||||
}
|
||||
|
||||
func (r *Runner) Declare(ctx context.Context, labels []string) (*connect.Response[runnerv1.DeclareResponse], error) {
|
||||
return r.client.Declare(ctx, connect.NewRequest(&runnerv1.DeclareRequest{
|
||||
Version: ver.Version(),
|
||||
|
||||
@@ -132,12 +132,3 @@ host:
|
||||
# The parent directory of a job's working directory.
|
||||
# If it's empty, $HOME/.cache/act/ will be used.
|
||||
workdir_parent:
|
||||
|
||||
metrics:
|
||||
# Enable the Prometheus metrics endpoint.
|
||||
# When enabled, metrics are served at http://<addr>/metrics and a liveness check at /healthz.
|
||||
enabled: false
|
||||
# The address for the metrics HTTP server to listen on.
|
||||
# Defaults to localhost only. Set to ":9101" to allow external access,
|
||||
# but ensure the port is firewall-protected as there is no authentication.
|
||||
addr: "127.0.0.1:9101"
|
||||
|
||||
@@ -70,12 +70,6 @@ type Host struct {
|
||||
WorkdirParent string `yaml:"workdir_parent"` // WorkdirParent specifies the parent directory for the host's working directory.
|
||||
}
|
||||
|
||||
// Metrics represents the configuration for the Prometheus metrics endpoint.
|
||||
type Metrics struct {
|
||||
Enabled bool `yaml:"enabled"` // Enabled indicates whether the metrics endpoint is exposed.
|
||||
Addr string `yaml:"addr"` // Addr specifies the listen address for the metrics HTTP server (e.g., ":9101").
|
||||
}
|
||||
|
||||
// Config represents the overall configuration.
|
||||
type Config struct {
|
||||
Log Log `yaml:"log"` // Log represents the configuration for logging.
|
||||
@@ -83,7 +77,6 @@ type Config struct {
|
||||
Cache Cache `yaml:"cache"` // Cache represents the configuration for caching.
|
||||
Container Container `yaml:"container"` // Container represents the configuration for the container.
|
||||
Host Host `yaml:"host"` // Host represents the configuration for the host.
|
||||
Metrics Metrics `yaml:"metrics"` // Metrics represents the configuration for the Prometheus metrics endpoint.
|
||||
}
|
||||
|
||||
// LoadDefault returns the default configuration.
|
||||
@@ -164,9 +157,6 @@ func LoadDefault(file string) (*Config, error) {
|
||||
if cfg.Runner.StateReportInterval <= 0 {
|
||||
cfg.Runner.StateReportInterval = 5 * time.Second
|
||||
}
|
||||
if cfg.Metrics.Addr == "" {
|
||||
cfg.Metrics.Addr = "127.0.0.1:9101"
|
||||
}
|
||||
|
||||
// Validate and fix invalid config combinations to prevent confusing behavior.
|
||||
if cfg.Runner.FetchIntervalMax < cfg.Runner.FetchInterval {
|
||||
|
||||
@@ -1,216 +0,0 @@
|
||||
// Copyright 2026 The Gitea Authors. All rights reserved.
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
runnerv1 "code.gitea.io/actions-proto-go/runner/v1"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/collectors"
|
||||
)
|
||||
|
||||
// Namespace is the Prometheus namespace for all act_runner metrics.
|
||||
const Namespace = "act_runner"
|
||||
|
||||
// Label value constants for Prometheus metrics.
|
||||
// Using constants prevents typos from silently creating new time-series.
|
||||
//
|
||||
// LabelResult* values are used on metrics with label key "result" (RPC outcomes).
|
||||
// LabelStatus* values are used on metrics with label key "status" (job outcomes).
|
||||
const (
|
||||
LabelResultTask = "task"
|
||||
LabelResultEmpty = "empty"
|
||||
LabelResultError = "error"
|
||||
LabelResultSuccess = "success"
|
||||
|
||||
LabelMethodFetchTask = "FetchTask"
|
||||
LabelMethodUpdateLog = "UpdateLog"
|
||||
LabelMethodUpdateTask = "UpdateTask"
|
||||
|
||||
LabelStatusSuccess = "success"
|
||||
LabelStatusFailure = "failure"
|
||||
LabelStatusCancelled = "cancelled"
|
||||
LabelStatusSkipped = "skipped"
|
||||
LabelStatusUnknown = "unknown"
|
||||
)
|
||||
|
||||
// rpcDurationBuckets covers the expected latency range for short-running
|
||||
// UpdateLog / UpdateTask RPCs. FetchTask uses its own buckets (it has a 10s tail).
|
||||
var rpcDurationBuckets = []float64{0.01, 0.05, 0.1, 0.25, 0.5, 1, 2, 5}
|
||||
|
||||
// ResultToStatusLabel maps a runnerv1.Result to the "status" label value used on job metrics.
|
||||
func ResultToStatusLabel(r runnerv1.Result) string {
|
||||
switch r {
|
||||
case runnerv1.Result_RESULT_SUCCESS:
|
||||
return LabelStatusSuccess
|
||||
case runnerv1.Result_RESULT_FAILURE:
|
||||
return LabelStatusFailure
|
||||
case runnerv1.Result_RESULT_CANCELLED:
|
||||
return LabelStatusCancelled
|
||||
case runnerv1.Result_RESULT_SKIPPED:
|
||||
return LabelStatusSkipped
|
||||
default:
|
||||
return LabelStatusUnknown
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
RunnerInfo = prometheus.NewGaugeVec(prometheus.GaugeOpts{
|
||||
Namespace: Namespace,
|
||||
Name: "info",
|
||||
Help: "Runner metadata. Always 1. Labels carry version and name.",
|
||||
}, []string{"version", "name"})
|
||||
|
||||
RunnerCapacity = prometheus.NewGauge(prometheus.GaugeOpts{
|
||||
Namespace: Namespace,
|
||||
Name: "capacity",
|
||||
Help: "Configured maximum concurrent jobs.",
|
||||
})
|
||||
|
||||
PollFetchTotal = prometheus.NewCounterVec(prometheus.CounterOpts{
|
||||
Namespace: Namespace,
|
||||
Subsystem: "poll",
|
||||
Name: "fetch_total",
|
||||
Help: "Total number of FetchTask RPCs by result (task, empty, error).",
|
||||
}, []string{"result"})
|
||||
|
||||
PollFetchDuration = prometheus.NewHistogram(prometheus.HistogramOpts{
|
||||
Namespace: Namespace,
|
||||
Subsystem: "poll",
|
||||
Name: "fetch_duration_seconds",
|
||||
Help: "Latency of FetchTask RPCs, excluding expected long-poll timeouts.",
|
||||
Buckets: []float64{0.01, 0.05, 0.1, 0.25, 0.5, 1, 2, 5, 10},
|
||||
})
|
||||
|
||||
PollBackoffSeconds = prometheus.NewGauge(prometheus.GaugeOpts{
|
||||
Namespace: Namespace,
|
||||
Subsystem: "poll",
|
||||
Name: "backoff_seconds",
|
||||
Help: "Last observed polling backoff interval in seconds.",
|
||||
})
|
||||
|
||||
JobsTotal = prometheus.NewCounterVec(prometheus.CounterOpts{
|
||||
Namespace: Namespace,
|
||||
Subsystem: "job",
|
||||
Name: "total",
|
||||
Help: "Total jobs processed by status (success, failure, cancelled, skipped, unknown).",
|
||||
}, []string{"status"})
|
||||
|
||||
JobDuration = prometheus.NewHistogram(prometheus.HistogramOpts{
|
||||
Namespace: Namespace,
|
||||
Subsystem: "job",
|
||||
Name: "duration_seconds",
|
||||
Help: "Duration of job execution from start to finish.",
|
||||
Buckets: prometheus.ExponentialBuckets(1, 2, 14), // 1s to ~4.5h
|
||||
})
|
||||
|
||||
ReportLogTotal = prometheus.NewCounterVec(prometheus.CounterOpts{
|
||||
Namespace: Namespace,
|
||||
Subsystem: "report",
|
||||
Name: "log_total",
|
||||
Help: "Total UpdateLog RPCs by result (success, error).",
|
||||
}, []string{"result"})
|
||||
|
||||
ReportLogDuration = prometheus.NewHistogram(prometheus.HistogramOpts{
|
||||
Namespace: Namespace,
|
||||
Subsystem: "report",
|
||||
Name: "log_duration_seconds",
|
||||
Help: "Latency of UpdateLog RPCs.",
|
||||
Buckets: rpcDurationBuckets,
|
||||
})
|
||||
|
||||
ReportStateTotal = prometheus.NewCounterVec(prometheus.CounterOpts{
|
||||
Namespace: Namespace,
|
||||
Subsystem: "report",
|
||||
Name: "state_total",
|
||||
Help: "Total UpdateTask (state) RPCs by result (success, error).",
|
||||
}, []string{"result"})
|
||||
|
||||
ReportStateDuration = prometheus.NewHistogram(prometheus.HistogramOpts{
|
||||
Namespace: Namespace,
|
||||
Subsystem: "report",
|
||||
Name: "state_duration_seconds",
|
||||
Help: "Latency of UpdateTask RPCs.",
|
||||
Buckets: rpcDurationBuckets,
|
||||
})
|
||||
|
||||
ReportLogBufferRows = prometheus.NewGauge(prometheus.GaugeOpts{
|
||||
Namespace: Namespace,
|
||||
Subsystem: "report",
|
||||
Name: "log_buffer_rows",
|
||||
Help: "Current number of buffered log rows awaiting send.",
|
||||
})
|
||||
|
||||
ClientErrors = prometheus.NewCounterVec(prometheus.CounterOpts{
|
||||
Namespace: Namespace,
|
||||
Subsystem: "client",
|
||||
Name: "errors_total",
|
||||
Help: "Total client RPC errors by method.",
|
||||
}, []string{"method"})
|
||||
)
|
||||
|
||||
// Registry is the custom Prometheus registry used by the runner.
|
||||
var Registry = prometheus.NewRegistry()
|
||||
|
||||
var initOnce sync.Once
|
||||
|
||||
// Init registers all static metrics and the standard Go/process collectors.
|
||||
// Safe to call multiple times; only the first call has effect.
|
||||
func Init() {
|
||||
initOnce.Do(func() {
|
||||
Registry.MustRegister(
|
||||
collectors.NewGoCollector(),
|
||||
collectors.NewProcessCollector(collectors.ProcessCollectorOpts{}),
|
||||
RunnerInfo, RunnerCapacity,
|
||||
PollFetchTotal, PollFetchDuration, PollBackoffSeconds,
|
||||
JobsTotal, JobDuration,
|
||||
ReportLogTotal, ReportLogDuration,
|
||||
ReportStateTotal, ReportStateDuration, ReportLogBufferRows,
|
||||
ClientErrors,
|
||||
)
|
||||
})
|
||||
}
|
||||
|
||||
// RegisterUptimeFunc registers a GaugeFunc that reports seconds since startTime.
|
||||
func RegisterUptimeFunc(startTime time.Time) {
|
||||
Registry.MustRegister(prometheus.NewGaugeFunc(
|
||||
prometheus.GaugeOpts{
|
||||
Namespace: Namespace,
|
||||
Name: "uptime_seconds",
|
||||
Help: "Seconds since the runner daemon started.",
|
||||
},
|
||||
func() float64 { return time.Since(startTime).Seconds() },
|
||||
))
|
||||
}
|
||||
|
||||
// RegisterRunningJobsFunc registers GaugeFuncs for the running job count and
|
||||
// capacity utilisation ratio, evaluated lazily at Prometheus scrape time.
|
||||
func RegisterRunningJobsFunc(countFn func() int64, capacity int) {
|
||||
capF := float64(capacity)
|
||||
Registry.MustRegister(prometheus.NewGaugeFunc(
|
||||
prometheus.GaugeOpts{
|
||||
Namespace: Namespace,
|
||||
Subsystem: "job",
|
||||
Name: "running",
|
||||
Help: "Number of jobs currently executing.",
|
||||
},
|
||||
func() float64 { return float64(countFn()) },
|
||||
))
|
||||
Registry.MustRegister(prometheus.NewGaugeFunc(
|
||||
prometheus.GaugeOpts{
|
||||
Namespace: Namespace,
|
||||
Subsystem: "job",
|
||||
Name: "capacity_utilization_ratio",
|
||||
Help: "Ratio of running jobs to configured capacity (0.0-1.0).",
|
||||
},
|
||||
func() float64 {
|
||||
if capF <= 0 {
|
||||
return 0
|
||||
}
|
||||
return float64(countFn()) / capF
|
||||
},
|
||||
))
|
||||
}
|
||||
@@ -1,50 +0,0 @@
|
||||
// Copyright 2026 The Gitea Authors. All rights reserved.
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// StartServer starts an HTTP server that serves Prometheus metrics on /metrics
|
||||
// and a liveness check on /healthz. The server shuts down when ctx is cancelled.
|
||||
// Call Init() before StartServer to register metrics with the Registry.
|
||||
func StartServer(ctx context.Context, addr string) {
|
||||
mux := http.NewServeMux()
|
||||
mux.Handle("/metrics", promhttp.HandlerFor(Registry, promhttp.HandlerOpts{}))
|
||||
mux.HandleFunc("/healthz", func(w http.ResponseWriter, _ *http.Request) {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
_, _ = w.Write([]byte("ok"))
|
||||
})
|
||||
|
||||
srv := &http.Server{
|
||||
Addr: addr,
|
||||
Handler: mux,
|
||||
ReadHeaderTimeout: 5 * time.Second,
|
||||
ReadTimeout: 10 * time.Second,
|
||||
WriteTimeout: 10 * time.Second,
|
||||
IdleTimeout: 60 * time.Second,
|
||||
}
|
||||
|
||||
go func() {
|
||||
log.Infof("metrics server listening on %s", addr)
|
||||
if err := srv.ListenAndServe(); err != nil && err != http.ErrServerClosed {
|
||||
log.WithError(err).Error("metrics server failed")
|
||||
}
|
||||
}()
|
||||
|
||||
go func() {
|
||||
<-ctx.Done()
|
||||
shutCtx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer cancel()
|
||||
if err := srv.Shutdown(shutCtx); err != nil {
|
||||
log.WithError(err).Warn("metrics server shutdown error")
|
||||
}
|
||||
}()
|
||||
}
|
||||
@@ -12,16 +12,15 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"gitea.com/gitea/act_runner/internal/pkg/client"
|
||||
"gitea.com/gitea/act_runner/internal/pkg/config"
|
||||
"gitea.com/gitea/act_runner/internal/pkg/metrics"
|
||||
|
||||
runnerv1 "code.gitea.io/actions-proto-go/runner/v1"
|
||||
"connectrpc.com/connect"
|
||||
"github.com/avast/retry-go/v4"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"google.golang.org/protobuf/proto"
|
||||
"google.golang.org/protobuf/types/known/timestamppb"
|
||||
|
||||
"gitea.com/gitea/act_runner/internal/pkg/client"
|
||||
"gitea.com/gitea/act_runner/internal/pkg/config"
|
||||
)
|
||||
|
||||
type Reporter struct {
|
||||
@@ -37,11 +36,6 @@ type Reporter struct {
|
||||
logReplacer *strings.Replacer
|
||||
oldnew []string
|
||||
|
||||
// lastLogBufferRows is the last value written to the ReportLogBufferRows
|
||||
// gauge; guarded by clientM (the same lock held around each ReportLog call)
|
||||
// so the gauge skips no-op Set calls when the buffer size is unchanged.
|
||||
lastLogBufferRows int
|
||||
|
||||
state *runnerv1.TaskState
|
||||
stateChanged bool
|
||||
stateMu sync.RWMutex
|
||||
@@ -99,13 +93,6 @@ func NewReporter(ctx context.Context, cancel context.CancelFunc, client client.C
|
||||
return rv
|
||||
}
|
||||
|
||||
// Result returns the final job result. Safe to call after Close() returns.
|
||||
func (r *Reporter) Result() runnerv1.Result {
|
||||
r.stateMu.RLock()
|
||||
defer r.stateMu.RUnlock()
|
||||
return r.state.Result
|
||||
}
|
||||
|
||||
func (r *Reporter) ResetSteps(l int) {
|
||||
r.stateMu.Lock()
|
||||
defer r.stateMu.Unlock()
|
||||
@@ -434,20 +421,15 @@ func (r *Reporter) ReportLog(noMore bool) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
start := time.Now()
|
||||
resp, err := r.client.UpdateLog(r.ctx, connect.NewRequest(&runnerv1.UpdateLogRequest{
|
||||
TaskId: r.state.Id,
|
||||
Index: int64(r.logOffset),
|
||||
Rows: rows,
|
||||
NoMore: noMore,
|
||||
}))
|
||||
metrics.ReportLogDuration.Observe(time.Since(start).Seconds())
|
||||
if err != nil {
|
||||
metrics.ReportLogTotal.WithLabelValues(metrics.LabelResultError).Inc()
|
||||
metrics.ClientErrors.WithLabelValues(metrics.LabelMethodUpdateLog).Inc()
|
||||
return err
|
||||
}
|
||||
metrics.ReportLogTotal.WithLabelValues(metrics.LabelResultSuccess).Inc()
|
||||
|
||||
ack := int(resp.Msg.AckIndex)
|
||||
if ack < r.logOffset {
|
||||
@@ -458,12 +440,7 @@ func (r *Reporter) ReportLog(noMore bool) error {
|
||||
r.logRows = r.logRows[ack-r.logOffset:]
|
||||
submitted := r.logOffset + len(rows)
|
||||
r.logOffset = ack
|
||||
remaining := len(r.logRows)
|
||||
r.stateMu.Unlock()
|
||||
if remaining != r.lastLogBufferRows {
|
||||
metrics.ReportLogBufferRows.Set(float64(remaining))
|
||||
r.lastLogBufferRows = remaining
|
||||
}
|
||||
|
||||
if noMore && ack < submitted {
|
||||
return errors.New("not all logs are submitted")
|
||||
@@ -487,36 +464,34 @@ func (r *Reporter) ReportState(reportResult bool) error {
|
||||
return true
|
||||
})
|
||||
|
||||
// Consume stateChanged atomically with the snapshot; restored on error
|
||||
// below so a concurrent Fire() during UpdateTask isn't silently lost.
|
||||
r.stateMu.Lock()
|
||||
if !reportResult && !r.stateChanged && len(outputs) == 0 {
|
||||
r.stateMu.Unlock()
|
||||
r.stateMu.RLock()
|
||||
changed := r.stateChanged
|
||||
r.stateMu.RUnlock()
|
||||
|
||||
// Early return avoids the expensive proto.Clone on the common no-op path.
|
||||
if !reportResult && !changed && len(outputs) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
r.stateMu.RLock()
|
||||
state := proto.Clone(r.state).(*runnerv1.TaskState)
|
||||
r.stateChanged = false
|
||||
r.stateMu.Unlock()
|
||||
r.stateMu.RUnlock()
|
||||
|
||||
if !reportResult {
|
||||
state.Result = runnerv1.Result_RESULT_UNSPECIFIED
|
||||
}
|
||||
|
||||
start := time.Now()
|
||||
resp, err := r.client.UpdateTask(r.ctx, connect.NewRequest(&runnerv1.UpdateTaskRequest{
|
||||
State: state,
|
||||
Outputs: outputs,
|
||||
}))
|
||||
metrics.ReportStateDuration.Observe(time.Since(start).Seconds())
|
||||
if err != nil {
|
||||
metrics.ReportStateTotal.WithLabelValues(metrics.LabelResultError).Inc()
|
||||
metrics.ClientErrors.WithLabelValues(metrics.LabelMethodUpdateTask).Inc()
|
||||
r.stateMu.Lock()
|
||||
r.stateChanged = true
|
||||
r.stateMu.Unlock()
|
||||
return err
|
||||
}
|
||||
metrics.ReportStateTotal.WithLabelValues(metrics.LabelResultSuccess).Inc()
|
||||
|
||||
r.stateMu.Lock()
|
||||
r.stateChanged = false
|
||||
r.stateMu.Unlock()
|
||||
|
||||
for _, k := range resp.Msg.SentOutputs {
|
||||
r.outputs.Store(k, struct{}{})
|
||||
|
||||
@@ -12,9 +12,6 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"gitea.com/gitea/act_runner/internal/pkg/client/mocks"
|
||||
"gitea.com/gitea/act_runner/internal/pkg/config"
|
||||
|
||||
runnerv1 "code.gitea.io/actions-proto-go/runner/v1"
|
||||
connect_go "connectrpc.com/connect"
|
||||
log "github.com/sirupsen/logrus"
|
||||
@@ -23,6 +20,9 @@ import (
|
||||
"github.com/stretchr/testify/require"
|
||||
"google.golang.org/protobuf/types/known/structpb"
|
||||
"google.golang.org/protobuf/types/known/timestamppb"
|
||||
|
||||
"gitea.com/gitea/act_runner/internal/pkg/client/mocks"
|
||||
"gitea.com/gitea/act_runner/internal/pkg/config"
|
||||
)
|
||||
|
||||
func TestReporter_parseLogRow(t *testing.T) {
|
||||
@@ -442,112 +442,6 @@ func TestReporter_BatchSizeFlush(t *testing.T) {
|
||||
"batch size threshold should have triggered immediate flush")
|
||||
}
|
||||
|
||||
// TestReporter_StateChangedNotLostDuringReport asserts that a Fire() arriving
|
||||
// mid-UpdateTask re-dirties the flag so the change is picked up by the next report.
|
||||
func TestReporter_StateChangedNotLostDuringReport(t *testing.T) {
|
||||
var updateTaskCalls atomic.Int64
|
||||
inFlight := make(chan struct{})
|
||||
release := make(chan struct{})
|
||||
|
||||
client := mocks.NewClient(t)
|
||||
client.On("UpdateTask", mock.Anything, mock.Anything).Return(
|
||||
func(_ context.Context, _ *connect_go.Request[runnerv1.UpdateTaskRequest]) (*connect_go.Response[runnerv1.UpdateTaskResponse], error) {
|
||||
n := updateTaskCalls.Add(1)
|
||||
if n == 1 {
|
||||
// Signal that the first UpdateTask is in flight, then block until released.
|
||||
close(inFlight)
|
||||
<-release
|
||||
}
|
||||
return connect_go.NewResponse(&runnerv1.UpdateTaskResponse{}), nil
|
||||
},
|
||||
)
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
taskCtx, err := structpb.NewStruct(map[string]any{})
|
||||
require.NoError(t, err)
|
||||
cfg, _ := config.LoadDefault("")
|
||||
reporter := NewReporter(ctx, cancel, client, &runnerv1.Task{Context: taskCtx}, cfg)
|
||||
reporter.ResetSteps(2)
|
||||
|
||||
// Mark stateChanged=true so the first ReportState proceeds to UpdateTask.
|
||||
reporter.stateMu.Lock()
|
||||
reporter.stateChanged = true
|
||||
reporter.stateMu.Unlock()
|
||||
|
||||
// Kick off the first ReportState in a goroutine — it will block in UpdateTask.
|
||||
done := make(chan error, 1)
|
||||
go func() {
|
||||
done <- reporter.ReportState(false)
|
||||
}()
|
||||
|
||||
// Wait until UpdateTask is in flight (snapshot taken, flag consumed).
|
||||
<-inFlight
|
||||
|
||||
// Concurrent Fire() modifies state — must re-flip stateChanged so the
|
||||
// change is not lost when the in-flight ReportState finishes.
|
||||
require.NoError(t, reporter.Fire(&log.Entry{
|
||||
Message: "step starts",
|
||||
Data: log.Fields{"stage": "Main", "stepNumber": 1, "raw_output": true},
|
||||
}))
|
||||
|
||||
// Release the in-flight UpdateTask and wait for it to return.
|
||||
close(release)
|
||||
require.NoError(t, <-done)
|
||||
|
||||
// stateChanged must still be true so the next ReportState picks up the
|
||||
// concurrent Fire()'s change instead of skipping via the early-return path.
|
||||
reporter.stateMu.RLock()
|
||||
changed := reporter.stateChanged
|
||||
reporter.stateMu.RUnlock()
|
||||
assert.True(t, changed, "stateChanged must remain true after a concurrent Fire() during in-flight ReportState")
|
||||
|
||||
// And the next ReportState must actually send a second UpdateTask.
|
||||
require.NoError(t, reporter.ReportState(false))
|
||||
assert.Equal(t, int64(2), updateTaskCalls.Load(), "concurrent Fire() change must trigger a second UpdateTask, not be silently lost")
|
||||
}
|
||||
|
||||
// TestReporter_StateChangedRestoredOnError verifies that when UpdateTask fails,
|
||||
// the dirty flag is restored so the snapshotted change isn't silently lost.
|
||||
func TestReporter_StateChangedRestoredOnError(t *testing.T) {
|
||||
var updateTaskCalls atomic.Int64
|
||||
|
||||
client := mocks.NewClient(t)
|
||||
client.On("UpdateTask", mock.Anything, mock.Anything).Return(
|
||||
func(_ context.Context, _ *connect_go.Request[runnerv1.UpdateTaskRequest]) (*connect_go.Response[runnerv1.UpdateTaskResponse], error) {
|
||||
n := updateTaskCalls.Add(1)
|
||||
if n == 1 {
|
||||
return nil, errors.New("transient network error")
|
||||
}
|
||||
return connect_go.NewResponse(&runnerv1.UpdateTaskResponse{}), nil
|
||||
},
|
||||
)
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
taskCtx, err := structpb.NewStruct(map[string]any{})
|
||||
require.NoError(t, err)
|
||||
cfg, _ := config.LoadDefault("")
|
||||
reporter := NewReporter(ctx, cancel, client, &runnerv1.Task{Context: taskCtx}, cfg)
|
||||
reporter.ResetSteps(1)
|
||||
|
||||
reporter.stateMu.Lock()
|
||||
reporter.stateChanged = true
|
||||
reporter.stateMu.Unlock()
|
||||
|
||||
// First ReportState fails — flag must be restored to true.
|
||||
require.Error(t, reporter.ReportState(false))
|
||||
|
||||
reporter.stateMu.RLock()
|
||||
changed := reporter.stateChanged
|
||||
reporter.stateMu.RUnlock()
|
||||
assert.True(t, changed, "stateChanged must be restored to true after UpdateTask error so the change is retried")
|
||||
|
||||
// The next ReportState should still issue a request because the flag was restored.
|
||||
require.NoError(t, reporter.ReportState(false))
|
||||
assert.Equal(t, int64(2), updateTaskCalls.Load())
|
||||
}
|
||||
|
||||
// TestReporter_StateNotifyFlush verifies that step transitions trigger
|
||||
// an immediate state flush via the stateNotify channel.
|
||||
func TestReporter_StateNotifyFlush(t *testing.T) {
|
||||
|
||||
Reference in New Issue
Block a user