2 Commits

Author SHA1 Message Date
Garet Halliday
47c8d37a8a sort imports 2024-09-11 12:24:25 -06:00
Garet Halliday
d1a6a360e4 add once flag to daemon 2024-09-10 12:26:09 -06:00
10 changed files with 64 additions and 19 deletions

View File

@@ -17,9 +17,10 @@ jobs:
with:
go-version-file: "go.mod"
- name: goreleaser
uses: goreleaser/goreleaser-action@v6
uses: goreleaser/goreleaser-action@v5
with:
distribution: goreleaser-pro
version: latest
args: release --nightly
env:
GORELEASER_KEY: ${{ secrets.GORELEASER_KEY }}

View File

@@ -23,9 +23,10 @@ jobs:
passphrase: ${{ secrets.PASSPHRASE }}
fingerprint: CC64B1DB67ABBEECAB24B6455FC346329753F4B0
- name: goreleaser
uses: goreleaser/goreleaser-action@v6
uses: goreleaser/goreleaser-action@v5
with:
distribution: goreleaser-pro
version: latest
args: release
env:
GORELEASER_KEY: ${{ secrets.GORELEASER_KEY }}

View File

@@ -1,5 +1,3 @@
version: 2
before:
hooks:
- go mod tidy
@@ -83,7 +81,7 @@ blobs:
provider: s3
bucket: "{{ .Env.S3_BUCKET }}"
region: "{{ .Env.S3_REGION }}"
directory: "act_runner/{{.Version}}"
folder: "act_runner/{{.Version}}"
extra_files:
- glob: ./**.xz
- glob: ./**.sha256
@@ -99,10 +97,10 @@ checksum:
- glob: ./**.xz
snapshot:
version_template: "{{ .Branch }}-devel"
name_template: "{{ .Branch }}-devel"
nightly:
version_template: "nightly"
name_template: "nightly"
gitea_urls:
api: https://gitea.com/api/v1

View File

@@ -1,4 +1,4 @@
FROM golang:1.23-alpine AS builder
FROM golang:1.21-alpine3.18 as builder
# Do not remove `git` here, it is required for getting runner version when executing `make build`
RUN apk add --no-cache make git
@@ -7,7 +7,7 @@ WORKDIR /opt/src/act_runner
RUN make clean && make build
FROM alpine
FROM alpine:3.18
RUN apk add --no-cache git bash tini
COPY --from=builder /opt/src/act_runner/act_runner /usr/local/bin/act_runner

View File

@@ -1,4 +1,4 @@
FROM golang:1.23-alpine AS builder
FROM golang:1.21-alpine3.18 as builder
# Do not remove `git` here, it is required for getting runner version when executing `make build`
RUN apk add --no-cache make git

4
go.mod
View File

@@ -1,6 +1,6 @@
module gitea.com/gitea/act_runner
go 1.23
go 1.22
require (
code.gitea.io/actions-proto-go v0.4.0
@@ -98,4 +98,4 @@ require (
gopkg.in/yaml.v2 v2.4.0 // indirect
)
replace github.com/nektos/act => gitea.com/gitea/act v0.261.2
replace github.com/nektos/act => gitea.com/gitea/act v0.261.1

4
go.sum
View File

@@ -6,8 +6,8 @@ connectrpc.com/connect v1.16.2 h1:ybd6y+ls7GOlb7Bh5C8+ghA6SvCBajHwxssO2CGFjqE=
connectrpc.com/connect v1.16.2/go.mod h1:n2kgwskMHXC+lVqb18wngEpF95ldBHXjZYJussz5FRc=
dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk=
dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk=
gitea.com/gitea/act v0.261.2 h1:yAhxlt11gpRmF7CeVsVjgLg1Ph0xxroJ/l2fWaYyl84=
gitea.com/gitea/act v0.261.2/go.mod h1:Pg5C9kQY1CEA3QjthjhlrqOC/QOT5NyWNjOjRHw23Ok=
gitea.com/gitea/act v0.261.1 h1:iACWLc/k8wct9fCF2WdYKqn2Hxx6NjW9zbOP79HF4H4=
gitea.com/gitea/act v0.261.1/go.mod h1:Pg5C9kQY1CEA3QjthjhlrqOC/QOT5NyWNjOjRHw23Ok=
github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 h1:bvDV9vkmnHYOMsOr4WLk+Vo07yKIzd94sVoIqshQ4bU=
github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24/go.mod h1:8o94RPi1/7XTJvwPpRSzSUedZrtlirdB3r9Z20bi2f8=
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0=

View File

@@ -42,12 +42,14 @@ func Execute(ctx context.Context) {
rootCmd.AddCommand(registerCmd)
// ./act_runner daemon
var daemArgs daemonArgs
daemonCmd := &cobra.Command{
Use: "daemon",
Short: "Run as a runner daemon",
Args: cobra.MaximumNArgs(1),
RunE: runDaemon(ctx, &configFile),
Args: cobra.MaximumNArgs(0),
RunE: runDaemon(ctx, &daemArgs, &configFile),
}
daemonCmd.Flags().BoolVar(&daemArgs.Once, "once", false, "Run one job then exit")
rootCmd.AddCommand(daemonCmd)
// ./act_runner exec

View File

@@ -28,7 +28,7 @@ import (
"gitea.com/gitea/act_runner/internal/pkg/ver"
)
func runDaemon(ctx context.Context, configFile *string) func(cmd *cobra.Command, args []string) error {
func runDaemon(ctx context.Context, daemArgs *daemonArgs, configFile *string) func(cmd *cobra.Command, args []string) error {
return func(cmd *cobra.Command, args []string) error {
cfg, err := config.LoadDefault(*configFile)
if err != nil {
@@ -122,9 +122,24 @@ func runDaemon(ctx context.Context, configFile *string) func(cmd *cobra.Command,
poller := poll.New(cfg, cli, runner)
go poller.Poll()
if daemArgs.Once {
done := make(chan struct{})
go func() {
defer close(done)
poller.PollOnce()
}()
// shutdown when we complete a job or cancel is requested
select {
case <-ctx.Done():
case <-done:
}
} else {
go poller.Poll()
<-ctx.Done()
}
<-ctx.Done()
log.Infof("runner: %s shutdown initiated, waiting %s for running jobs to complete before shutting down", resp.Msg.Runner.Name, cfg.Runner.ShutdownTimeout)
ctx, cancel := context.WithTimeout(context.Background(), cfg.Runner.ShutdownTimeout)
@@ -134,10 +149,15 @@ func runDaemon(ctx context.Context, configFile *string) func(cmd *cobra.Command,
if err != nil {
log.Warnf("runner: %s cancelled in progress jobs during shutdown", resp.Msg.Runner.Name)
}
return nil
}
}
type daemonArgs struct {
Once bool
}
// initLogging setup the global logrus logger.
func initLogging(cfg *config.Config) {
isTerm := isatty.IsTerminal(os.Stdout.Fd())

View File

@@ -70,6 +70,15 @@ func (p *Poller) Poll() {
close(p.done)
}
func (p *Poller) PollOnce() {
limiter := rate.NewLimiter(rate.Every(p.cfg.Runner.FetchInterval), 1)
p.pollOnce(limiter)
// signal that we're done
close(p.done)
}
func (p *Poller) Shutdown(ctx context.Context) error {
p.shutdownPolling()
@@ -101,6 +110,19 @@ func (p *Poller) Shutdown(ctx context.Context) error {
func (p *Poller) poll(wg *sync.WaitGroup, limiter *rate.Limiter) {
defer wg.Done()
for {
p.pollOnce(limiter)
select {
case <-p.pollingCtx.Done():
return
default:
continue
}
}
}
func (p *Poller) pollOnce(limiter *rate.Limiter) {
for {
if err := limiter.Wait(p.pollingCtx); err != nil {
if p.pollingCtx.Err() != nil {
@@ -114,6 +136,7 @@ func (p *Poller) poll(wg *sync.WaitGroup, limiter *rate.Limiter) {
}
p.runTaskWithRecover(p.jobsCtx, task)
return
}
}