mirror of
https://gitea.com/gitea/act_runner.git
synced 2026-05-08 16:23:23 +02:00
Compare commits
175 Commits
44da20bd14
...
renovate/g
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
1e71a8f891 | ||
|
|
dfeb463904 | ||
|
|
c36198ab55 | ||
|
|
594c9ade7c | ||
|
|
2a4d56c650 | ||
|
|
a22119cf88 | ||
|
|
b68ecf2580 | ||
|
|
d1434237c2 | ||
|
|
35c65e2b14 | ||
|
|
0060993e76 | ||
|
|
c45a4e6d32 | ||
|
|
68d9fc45c9 | ||
|
|
b1c873a66b | ||
|
|
1d6e7879c8 | ||
|
|
13dc9386fe | ||
|
|
8e6b3be96a | ||
|
|
e5e53c732e | ||
|
|
2516573592 | ||
|
|
35834bf817 | ||
|
|
11a5dc8936 | ||
|
|
f09fafcb0a | ||
|
|
801e5cf4d5 | ||
|
|
3f05040438 | ||
|
|
59d90bff26 | ||
|
|
5edc4ba550 | ||
|
|
547a0ff297 | ||
|
|
f2b4dbf05f | ||
|
|
bad4239d18 | ||
|
|
589db33e70 | ||
|
|
1032f857a1 | ||
|
|
e56b984c04 | ||
|
|
fa5334eb24 | ||
|
|
7c6f1261d4 | ||
|
|
fbd6316928 | ||
|
|
ade5b8202e | ||
|
|
a31f3962c0 | ||
|
|
04244fc3f7 | ||
|
|
cb58492678 | ||
|
|
9faadad0ce | ||
|
|
352096c5bf | ||
|
|
b5c50bb3ab | ||
|
|
8af9a2b47a | ||
|
|
fab2d6ae04 | ||
|
|
15dd63a839 | ||
|
|
9aafec169b | ||
|
|
f923badec7 | ||
|
|
48944e136c | ||
|
|
40dcee0991 | ||
|
|
f33e5a6245 | ||
|
|
f2d545565f | ||
|
|
90c1275f0e | ||
|
|
3232358e71 | ||
|
|
2e98baa34a | ||
|
|
505907eb2a | ||
|
|
9933ea0d92 | ||
|
|
5dd5436169 | ||
|
|
28740d7788 | ||
|
|
ddf9159a8f | ||
|
|
43e6958fa3 | ||
|
|
c0f19d9a26 | ||
|
|
495185446f | ||
|
|
3a07d231a0 | ||
|
|
5417d3ac67 | ||
|
|
f56fd693ee | ||
|
|
34f68b3c18 | ||
|
|
ac6e4b7517 | ||
|
|
91852faf93 | ||
|
|
39509e9ad0 | ||
|
|
9924aea786 | ||
|
|
65c232c4a5 | ||
|
|
5da4954b65 | ||
|
|
ec091ad269 | ||
|
|
1656206765 | ||
|
|
6cdf1e5788 | ||
|
|
ab381649da | ||
|
|
38e7e9e939 | ||
|
|
2ab806053c | ||
|
|
6a090f67e5 | ||
|
|
517d11c671 | ||
|
|
e1b1e81124 | ||
|
|
64876e3696 | ||
|
|
3fa1dba92b | ||
|
|
9725f60394 | ||
|
|
a79d81989f | ||
|
|
655f578563 | ||
|
|
0054a45d1b | ||
|
|
79a7577c15 | ||
|
|
a28ebf0a48 | ||
|
|
2b860ce371 | ||
|
|
3a9e7d18de | ||
|
|
b4edc952d9 | ||
|
|
f1213213d8 | ||
|
|
15045b4fc0 | ||
|
|
67918333fa | ||
|
|
c93462e19f | ||
|
|
f3264cac20 | ||
|
|
4699c3b689 | ||
|
|
22d91e3ac3 | ||
|
|
cdc6d4bc6a | ||
|
|
2069b04779 | ||
|
|
3813f40cba | ||
|
|
eb19987893 | ||
|
|
545802b97b | ||
|
|
515c2c429d | ||
|
|
a165e17878 | ||
|
|
56e103b4ba | ||
|
|
422cbdf446 | ||
|
|
8c56bd3aa5 | ||
|
|
a94498b482 | ||
|
|
fe76a035ad | ||
|
|
6ce5c93cc8 | ||
|
|
92b4d73376 | ||
|
|
183bb7af1b | ||
|
|
a72822b3f8 | ||
|
|
9283cfc9b1 | ||
|
|
27846050ae | ||
|
|
ed9b6643ca | ||
|
|
a94a01bff2 | ||
|
|
229dbaf153 | ||
|
|
a18648ee73 | ||
|
|
518d8c96f3 | ||
|
|
0c1f2edb99 | ||
|
|
721857e4a0 | ||
|
|
6b1010ad07 | ||
|
|
e12252a43a | ||
|
|
8609522aa4 | ||
|
|
6a876c4f99 | ||
|
|
de529139af | ||
|
|
d3a56cdb69 | ||
|
|
9bdddf18e0 | ||
|
|
ac1ba34518 | ||
|
|
5c4a96bcb7 | ||
|
|
62abf4fe11 | ||
|
|
cfedc518ca | ||
|
|
5e76853b55 | ||
|
|
2eb4de02ee | ||
|
|
342ad6a51a | ||
|
|
568f053723 | ||
|
|
8f12a6c947 | ||
|
|
83fb85f702 | ||
|
|
3daf313205 | ||
|
|
7c5400d75b | ||
|
|
929ea6df75 | ||
|
|
f6a8a0e643 | ||
|
|
556fd20aed | ||
|
|
a8298365fe | ||
|
|
1dda0aec69 | ||
|
|
49e204166d | ||
|
|
a36b003f7a | ||
|
|
0671d16694 | ||
|
|
881dbdb81b | ||
|
|
1252e551b8 | ||
|
|
c614d8b96c | ||
|
|
84b6649b8b | ||
|
|
dca7801682 | ||
|
|
4b99ed8916 | ||
|
|
e46ede1b17 | ||
|
|
1ba076d321 | ||
|
|
0efa2d5e63 | ||
|
|
0a37a03f2e | ||
|
|
88cce47022 | ||
|
|
7920109e89 | ||
|
|
4cacc14d22 | ||
|
|
c6b8548d35 | ||
|
|
64cae197a4 | ||
|
|
7fb84a54a8 | ||
|
|
70cc6c017b | ||
|
|
d7e9ea75fc | ||
|
|
b9c20dcaa4 | ||
|
|
97629ae8af | ||
|
|
b9a9812ad9 | ||
|
|
113c3e98fb | ||
|
|
7815eec33b | ||
|
|
c051090583 | ||
|
|
0fa1fe0310 |
@@ -1,6 +0,0 @@
|
|||||||
[codespell]
|
|
||||||
# Ref: https://github.com/codespell-project/codespell#using-a-config-file
|
|
||||||
skip = .git*,go.sum,package-lock.json,*.min.*,.codespellrc,testdata,./pkg/runner/hashfiles/index.js
|
|
||||||
check-hidden = true
|
|
||||||
ignore-regex = .*Te\{0\}st.*
|
|
||||||
# ignore-words-list =
|
|
||||||
52
.dockerignore
Normal file
52
.dockerignore
Normal file
@@ -0,0 +1,52 @@
|
|||||||
|
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
||||||
|
*.o
|
||||||
|
*.a
|
||||||
|
*.so
|
||||||
|
|
||||||
|
# Folders
|
||||||
|
_obj
|
||||||
|
_test
|
||||||
|
|
||||||
|
# IntelliJ
|
||||||
|
.idea
|
||||||
|
# Goland's output filename can not be set manually
|
||||||
|
/go_build_*
|
||||||
|
|
||||||
|
# MS VSCode
|
||||||
|
.vscode
|
||||||
|
__debug_bin*
|
||||||
|
|
||||||
|
# Architecture specific extensions/prefixes
|
||||||
|
*.[568vq]
|
||||||
|
[568vq].out
|
||||||
|
|
||||||
|
*.cgo1.go
|
||||||
|
*.cgo2.c
|
||||||
|
_cgo_defun.c
|
||||||
|
_cgo_gotypes.go
|
||||||
|
_cgo_export.*
|
||||||
|
|
||||||
|
_testmain.go
|
||||||
|
|
||||||
|
*.exe
|
||||||
|
*.test
|
||||||
|
*.prof
|
||||||
|
|
||||||
|
*coverage.out
|
||||||
|
coverage.all
|
||||||
|
coverage.txt
|
||||||
|
cpu.out
|
||||||
|
|
||||||
|
*.db
|
||||||
|
*.log
|
||||||
|
|
||||||
|
/gitea-runner
|
||||||
|
/debug
|
||||||
|
|
||||||
|
/bin
|
||||||
|
/dist
|
||||||
|
/.env
|
||||||
|
/.runner
|
||||||
|
/config.yaml
|
||||||
|
/Dockerfile
|
||||||
|
.DS_Store
|
||||||
@@ -12,5 +12,8 @@ insert_final_newline = true
|
|||||||
[*.{go}]
|
[*.{go}]
|
||||||
indent_style = tab
|
indent_style = tab
|
||||||
|
|
||||||
|
[go.*]
|
||||||
|
indent_style = tab
|
||||||
|
|
||||||
[Makefile]
|
[Makefile]
|
||||||
indent_style = tab
|
indent_style = tab
|
||||||
|
|||||||
@@ -1,154 +0,0 @@
|
|||||||
name: checks
|
|
||||||
on: [pull_request, workflow_dispatch]
|
|
||||||
|
|
||||||
concurrency:
|
|
||||||
cancel-in-progress: true
|
|
||||||
group: ${{ github.workflow }}-${{ github.ref }}
|
|
||||||
|
|
||||||
env:
|
|
||||||
ACT_OWNER: ${{ github.repository_owner }}
|
|
||||||
ACT_REPOSITORY: ${{ github.repository }}
|
|
||||||
CGO_ENABLED: 0
|
|
||||||
NO_QEMU: 1
|
|
||||||
NO_EXTERNAL_IP: 1
|
|
||||||
DOOD: 1
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
lint:
|
|
||||||
name: lint
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v5
|
|
||||||
with:
|
|
||||||
fetch-depth: 0
|
|
||||||
- uses: actions/setup-go@v6
|
|
||||||
with:
|
|
||||||
go-version-file: go.mod
|
|
||||||
check-latest: true
|
|
||||||
- uses: golangci/golangci-lint-action@v8.0.0
|
|
||||||
- uses: megalinter/megalinter/flavors/go@v9.1.0
|
|
||||||
env:
|
|
||||||
DEFAULT_BRANCH: ${{ github.event.repository.default_branch }}
|
|
||||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
VALIDATE_ALL_CODEBASE: false
|
|
||||||
GITHUB_STATUS_REPORTER: ${{ !env.ACT }}
|
|
||||||
GITHUB_COMMENT_REPORTER: ${{ !env.ACT }}
|
|
||||||
|
|
||||||
test-linux:
|
|
||||||
name: test-linux
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v5
|
|
||||||
with:
|
|
||||||
fetch-depth: 2
|
|
||||||
- name: Cleanup Docker Engine
|
|
||||||
run: |
|
|
||||||
docker ps -a --format '{{ if eq (truncate .Names 4) "act-" }}
|
|
||||||
{{ .ID }}
|
|
||||||
{{end}}' | xargs -r docker rm -f || :
|
|
||||||
docker volume ls --format '{{ if eq (truncate .Name 4) "act-" }}
|
|
||||||
{{ .Name }}
|
|
||||||
{{ end }}' | xargs -r docker volume rm -f || :
|
|
||||||
docker images --format '{{ if eq (truncate .Repository 4) "act-" }}
|
|
||||||
{{ .ID }}
|
|
||||||
{{ end }}' | xargs -r docker rmi -f || :
|
|
||||||
docker images -q | xargs -r docker rmi || :
|
|
||||||
- name: Set up QEMU
|
|
||||||
if: '!env.NO_QEMU'
|
|
||||||
uses: docker/setup-qemu-action@v3
|
|
||||||
- uses: actions/setup-go@v6
|
|
||||||
with:
|
|
||||||
go-version-file: go.mod
|
|
||||||
check-latest: true
|
|
||||||
- uses: actions/cache@v4
|
|
||||||
if: ${{ !env.ACT }}
|
|
||||||
with:
|
|
||||||
path: ~/go/pkg/mod
|
|
||||||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
|
||||||
restore-keys: |
|
|
||||||
${{ runner.os }}-go-
|
|
||||||
- name: Install gotestfmt
|
|
||||||
run: go install github.com/gotesttools/gotestfmt/v2/cmd/gotestfmt@v2.5.0
|
|
||||||
# Regressions by Gitea Actions CI Migration
|
|
||||||
# GITHUB_REPOSITORY contains the server url
|
|
||||||
# ACTIONS_RUNTIME_URL provided to every step, act does not override
|
|
||||||
- name: Run Tests
|
|
||||||
run: |
|
|
||||||
unset ACTIONS_RUNTIME_URL
|
|
||||||
unset ACTIONS_RESULTS_URL
|
|
||||||
unset ACTIONS_RUNTIME_TOKEN
|
|
||||||
export GITHUB_REPOSITORY="${GITHUB_REPOSITORY#${SERVER_URL%/}/}"
|
|
||||||
export ACT_REPOSITORY="${GITHUB_REPOSITORY#${SERVER_URL%/}/}"
|
|
||||||
export ACT_OWNER="${ACT_OWNER#${SERVER_URL%/}/}"
|
|
||||||
env
|
|
||||||
go test -json -v -cover -coverpkg=./... -coverprofile=coverage.txt -covermode=atomic -timeout 20m ./... | gotestfmt -hide successful-packages,empty-packages 2>&1
|
|
||||||
env:
|
|
||||||
SERVER_URL: ${{ github.server_url }}
|
|
||||||
- name: Run act from cli
|
|
||||||
run: go run main.go exec -i node:16-buster-slim -C ./pkg/runner/testdata/ -W ./basic/push.yml
|
|
||||||
- name: Run act from cli without docker support
|
|
||||||
run: go run -tags WITHOUT_DOCKER main.go exec -i "-self-hosted" -C ./pkg/runner/testdata/ -W ./local-action-js/push.yml
|
|
||||||
|
|
||||||
snapshot:
|
|
||||||
name: snapshot
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v5
|
|
||||||
- uses: actions/setup-go@v6
|
|
||||||
with:
|
|
||||||
go-version-file: go.mod
|
|
||||||
check-latest: true
|
|
||||||
- uses: actions/cache@v4
|
|
||||||
if: ${{ !env.ACT }}
|
|
||||||
with:
|
|
||||||
path: ~/go/pkg/mod
|
|
||||||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
|
||||||
restore-keys: |
|
|
||||||
${{ runner.os }}-go-
|
|
||||||
- name: GoReleaser
|
|
||||||
id: goreleaser
|
|
||||||
uses: goreleaser/goreleaser-action@v6
|
|
||||||
with:
|
|
||||||
version: v2
|
|
||||||
args: release --snapshot --clean
|
|
||||||
- name: Setup Node
|
|
||||||
continue-on-error: true
|
|
||||||
uses: actions/setup-node@v6
|
|
||||||
with:
|
|
||||||
node-version: 20
|
|
||||||
- name: Install @actions/artifact@2.1.0
|
|
||||||
continue-on-error: true
|
|
||||||
run: npm install @actions/artifact@2.1.0
|
|
||||||
- name: Upload All
|
|
||||||
uses: actions/github-script@v8
|
|
||||||
continue-on-error: true
|
|
||||||
with:
|
|
||||||
script: |
|
|
||||||
// We do not use features depending on GITHUB_API_URL so we can hardcode it to avoid the GHES no support error
|
|
||||||
process.env["GITHUB_SERVER_URL"] = "https://github.com";
|
|
||||||
const {DefaultArtifactClient} = require('@actions/artifact');
|
|
||||||
const aartifact = new DefaultArtifactClient();
|
|
||||||
var artifacts = JSON.parse(process.env.ARTIFACTS);
|
|
||||||
for(var artifact of artifacts) {
|
|
||||||
if(artifact.type === "Binary") {
|
|
||||||
const {id, size} = await aartifact.uploadArtifact(
|
|
||||||
// name of the artifact
|
|
||||||
`${artifact.name}-${artifact.target}`,
|
|
||||||
// files to include (supports absolute and relative paths)
|
|
||||||
[artifact.path],
|
|
||||||
process.cwd(),
|
|
||||||
{
|
|
||||||
// optional: how long to retain the artifact
|
|
||||||
// if unspecified, defaults to repository/org retention settings (the limit of this value)
|
|
||||||
retentionDays: 10
|
|
||||||
}
|
|
||||||
);
|
|
||||||
console.log(`Created artifact with id: ${id} (bytes: ${size}`);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
env:
|
|
||||||
ARTIFACTS: ${{ steps.goreleaser.outputs.artifacts }}
|
|
||||||
- name: Chocolatey
|
|
||||||
uses: ./.github/actions/choco
|
|
||||||
with:
|
|
||||||
version: v0.0.0-pr
|
|
||||||
@@ -69,7 +69,7 @@ jobs:
|
|||||||
password: ${{ secrets.DOCKER_PASSWORD }}
|
password: ${{ secrets.DOCKER_PASSWORD }}
|
||||||
|
|
||||||
- name: Echo the tag
|
- name: Echo the tag
|
||||||
run: echo "${{ env.DOCKER_ORG }}/act_runner:nightly${{ matrix.variant.tag_suffix }}"
|
run: echo "${{ env.DOCKER_ORG }}/runner:nightly${{ matrix.variant.tag_suffix }}"
|
||||||
|
|
||||||
- name: Build and push
|
- name: Build and push
|
||||||
uses: docker/build-push-action@v6
|
uses: docker/build-push-action@v6
|
||||||
@@ -82,4 +82,4 @@ jobs:
|
|||||||
linux/arm64
|
linux/arm64
|
||||||
push: true
|
push: true
|
||||||
tags: |
|
tags: |
|
||||||
${{ env.DOCKER_ORG }}/act_runner:nightly${{ matrix.variant.tag_suffix }}
|
${{ env.DOCKER_ORG }}/runner:nightly${{ matrix.variant.tag_suffix }}
|
||||||
|
|||||||
@@ -17,7 +17,7 @@ jobs:
|
|||||||
go-version-file: "go.mod"
|
go-version-file: "go.mod"
|
||||||
- name: Import GPG key
|
- name: Import GPG key
|
||||||
id: import_gpg
|
id: import_gpg
|
||||||
uses: crazy-max/ghaction-import-gpg@v6
|
uses: crazy-max/ghaction-import-gpg@v7
|
||||||
with:
|
with:
|
||||||
gpg_private_key: ${{ secrets.GPG_PRIVATE_KEY }}
|
gpg_private_key: ${{ secrets.GPG_PRIVATE_KEY }}
|
||||||
passphrase: ${{ secrets.PASSPHRASE }}
|
passphrase: ${{ secrets.PASSPHRASE }}
|
||||||
@@ -39,6 +39,15 @@ jobs:
|
|||||||
GPG_FINGERPRINT: ${{ steps.import_gpg.outputs.fingerprint }}
|
GPG_FINGERPRINT: ${{ steps.import_gpg.outputs.fingerprint }}
|
||||||
release-image:
|
release-image:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
|
strategy:
|
||||||
|
matrix:
|
||||||
|
variant:
|
||||||
|
- target: basic
|
||||||
|
tag_suffix: ""
|
||||||
|
- target: dind
|
||||||
|
tag_suffix: "-dind"
|
||||||
|
- target: dind-rootless
|
||||||
|
tag_suffix: "-dind-rootless"
|
||||||
container:
|
container:
|
||||||
image: catthehacker/ubuntu:act-latest
|
image: catthehacker/ubuntu:act-latest
|
||||||
env:
|
env:
|
||||||
@@ -62,50 +71,28 @@ jobs:
|
|||||||
username: ${{ secrets.DOCKER_USERNAME }}
|
username: ${{ secrets.DOCKER_USERNAME }}
|
||||||
password: ${{ secrets.DOCKER_PASSWORD }}
|
password: ${{ secrets.DOCKER_PASSWORD }}
|
||||||
|
|
||||||
- name: Get Meta
|
- name: "Docker meta"
|
||||||
id: meta
|
id: docker_meta
|
||||||
run: |
|
uses: https://github.com/docker/metadata-action@v5
|
||||||
echo REPO_NAME=$(echo ${GITHUB_REPOSITORY} | awk -F"/" '{print $2}') >> $GITHUB_OUTPUT
|
with:
|
||||||
echo REPO_VERSION=${GITHUB_REF_NAME#v} >> $GITHUB_OUTPUT
|
images: |
|
||||||
|
${{ env.DOCKER_ORG }}/runner
|
||||||
|
tags: |
|
||||||
|
type=semver,pattern={{major}}.{{minor}}.{{patch}}
|
||||||
|
type=semver,pattern={{major}}.{{minor}}
|
||||||
|
type=semver,pattern={{major}}
|
||||||
|
flavor: |
|
||||||
|
latest=true
|
||||||
|
suffix=${{ matrix.variant.tag_suffix }},onlatest=true
|
||||||
|
|
||||||
- name: Build and push
|
- name: Build and push
|
||||||
uses: docker/build-push-action@v6
|
uses: docker/build-push-action@v6
|
||||||
with:
|
with:
|
||||||
context: .
|
context: .
|
||||||
file: ./Dockerfile
|
file: ./Dockerfile
|
||||||
target: basic
|
target: ${{ matrix.variant.target }}
|
||||||
platforms: |
|
platforms: |
|
||||||
linux/amd64
|
linux/amd64
|
||||||
linux/arm64
|
linux/arm64
|
||||||
push: true
|
push: true
|
||||||
tags: |
|
tags: ${{ steps.docker_meta.outputs.tags }}
|
||||||
${{ env.DOCKER_ORG }}/${{ steps.meta.outputs.REPO_NAME }}:${{ steps.meta.outputs.REPO_VERSION }}
|
|
||||||
${{ env.DOCKER_ORG }}/${{ steps.meta.outputs.REPO_NAME }}:${{ env.DOCKER_LATEST }}
|
|
||||||
|
|
||||||
- name: Build and push dind
|
|
||||||
uses: docker/build-push-action@v6
|
|
||||||
with:
|
|
||||||
context: .
|
|
||||||
file: ./Dockerfile
|
|
||||||
target: dind
|
|
||||||
platforms: |
|
|
||||||
linux/amd64
|
|
||||||
linux/arm64
|
|
||||||
push: true
|
|
||||||
tags: |
|
|
||||||
${{ env.DOCKER_ORG }}/${{ steps.meta.outputs.REPO_NAME }}:${{ steps.meta.outputs.REPO_VERSION }}-dind
|
|
||||||
${{ env.DOCKER_ORG }}/${{ steps.meta.outputs.REPO_NAME }}:${{ env.DOCKER_LATEST }}-dind
|
|
||||||
|
|
||||||
- name: Build and push dind-rootless
|
|
||||||
uses: docker/build-push-action@v6
|
|
||||||
with:
|
|
||||||
context: .
|
|
||||||
file: ./Dockerfile
|
|
||||||
target: dind-rootless
|
|
||||||
platforms: |
|
|
||||||
linux/amd64
|
|
||||||
linux/arm64
|
|
||||||
push: true
|
|
||||||
tags: |
|
|
||||||
${{ env.DOCKER_ORG }}/${{ steps.meta.outputs.REPO_NAME }}:${{ steps.meta.outputs.REPO_VERSION }}-dind-rootless
|
|
||||||
${{ env.DOCKER_ORG }}/${{ steps.meta.outputs.REPO_NAME }}:${{ env.DOCKER_LATEST }}-dind-rootless
|
|
||||||
|
|||||||
@@ -1,72 +0,0 @@
|
|||||||
name: release
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
tags:
|
|
||||||
- v*
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
release:
|
|
||||||
# TODO use environment to scope secrets
|
|
||||||
name: release
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v5
|
|
||||||
with:
|
|
||||||
fetch-depth: 0
|
|
||||||
- uses: actions/setup-go@v6
|
|
||||||
with:
|
|
||||||
go-version-file: go.mod
|
|
||||||
check-latest: true
|
|
||||||
- uses: actions/cache@v4
|
|
||||||
if: ${{ !env.ACT }}
|
|
||||||
with:
|
|
||||||
path: ~/go/pkg/mod
|
|
||||||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
|
||||||
restore-keys: |
|
|
||||||
${{ runner.os }}-go-
|
|
||||||
- name: GoReleaser
|
|
||||||
uses: goreleaser/goreleaser-action@v6
|
|
||||||
with:
|
|
||||||
version: latest
|
|
||||||
args: release --clean -f ./.goreleaser.yml -f ./.goreleaser.gitea.yml
|
|
||||||
env:
|
|
||||||
GITEA_TOKEN: ${{ secrets.GORELEASER_GITHUB_TOKEN || github.token }}
|
|
||||||
- name: Winget
|
|
||||||
uses: vedantmgoyal2009/winget-releaser@v2
|
|
||||||
with:
|
|
||||||
identifier: nektos.act
|
|
||||||
installers-regex: '_Windows_\w+\.zip$'
|
|
||||||
token: ${{ secrets.WINGET_TOKEN }}
|
|
||||||
if: env.ENABLED
|
|
||||||
env:
|
|
||||||
ENABLED: ${{ secrets.WINGET_TOKEN && '1' || '' }}
|
|
||||||
- name: Chocolatey
|
|
||||||
uses: ./.github/actions/choco
|
|
||||||
with:
|
|
||||||
version: ${{ github.ref }}
|
|
||||||
apiKey: ${{ secrets.CHOCO_APIKEY }}
|
|
||||||
push: true
|
|
||||||
if: env.ENABLED
|
|
||||||
env:
|
|
||||||
ENABLED: ${{ secrets.CHOCO_APIKEY && '1' || '' }}
|
|
||||||
# TODO use ssh deployment key
|
|
||||||
- name: GitHub CLI extension
|
|
||||||
uses: actions/github-script@v8
|
|
||||||
with:
|
|
||||||
github-token: ${{ secrets.CLI_GITHUB_TOKEN || secrets.GORELEASER_GITHUB_TOKEN }}
|
|
||||||
script: |
|
|
||||||
const mainRef = (await github.rest.git.getRef({
|
|
||||||
owner: context.repo.owner,
|
|
||||||
repo: 'gh-act',
|
|
||||||
ref: 'heads/main',
|
|
||||||
})).data;
|
|
||||||
console.log(mainRef);
|
|
||||||
github.rest.git.createRef({
|
|
||||||
owner: 'nektos',
|
|
||||||
repo: 'gh-act',
|
|
||||||
ref: context.ref,
|
|
||||||
sha: mainRef.object.sha,
|
|
||||||
});
|
|
||||||
if: env.ENABLED
|
|
||||||
env:
|
|
||||||
ENABLED: ${{ (secrets.CLI_GITHUB_TOKEN || secrets.GORELEASER_GITHUB_TOKEN) && '1' || '' }}
|
|
||||||
@@ -1,12 +1,9 @@
|
|||||||
name: checks
|
name: checks
|
||||||
on:
|
on:
|
||||||
- push
|
push:
|
||||||
- pull_request
|
branches:
|
||||||
|
- main
|
||||||
env:
|
pull_request:
|
||||||
DOOD: 1
|
|
||||||
NO_QEMU: 1
|
|
||||||
NO_EXTERNAL_IP: 1
|
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
lint:
|
lint:
|
||||||
@@ -22,13 +19,4 @@ jobs:
|
|||||||
- name: build
|
- name: build
|
||||||
run: make build
|
run: make build
|
||||||
- name: test
|
- name: test
|
||||||
run: |
|
run: make test
|
||||||
unset ACTIONS_RUNTIME_URL
|
|
||||||
unset ACTIONS_RESULTS_URL
|
|
||||||
unset ACTIONS_RUNTIME_TOKEN
|
|
||||||
export GITHUB_REPOSITORY="${GITHUB_REPOSITORY#${SERVER_URL%/}/}"
|
|
||||||
export ACT_REPOSITORY="${GITHUB_REPOSITORY}"
|
|
||||||
export ACT_OWNER="${ACT_REPOSITORY%%/*}"
|
|
||||||
make test
|
|
||||||
env:
|
|
||||||
SERVER_URL: ${{ github.server_url }}
|
|
||||||
88
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
88
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
@@ -1,88 +0,0 @@
|
|||||||
name: Bug report
|
|
||||||
description: Use this template for reporting bugs/issues.
|
|
||||||
labels:
|
|
||||||
- 'kind/bug'
|
|
||||||
body:
|
|
||||||
- type: markdown
|
|
||||||
attributes:
|
|
||||||
value: |
|
|
||||||
Thanks for taking the time to fill out this bug report!
|
|
||||||
- type: textarea
|
|
||||||
id: act-debug
|
|
||||||
attributes:
|
|
||||||
label: Bug report info
|
|
||||||
render: plain text
|
|
||||||
description: |
|
|
||||||
Output of `act --bug-report`
|
|
||||||
placeholder: |
|
|
||||||
act --bug-report
|
|
||||||
validations:
|
|
||||||
required: true
|
|
||||||
- type: textarea
|
|
||||||
id: act-command
|
|
||||||
attributes:
|
|
||||||
label: Command used with act
|
|
||||||
description: |
|
|
||||||
Please paste your whole command
|
|
||||||
placeholder: |
|
|
||||||
act -P ubuntu-latest=node:12 -v -d ...
|
|
||||||
render: sh
|
|
||||||
validations:
|
|
||||||
required: true
|
|
||||||
- type: textarea
|
|
||||||
id: what-happened
|
|
||||||
attributes:
|
|
||||||
label: Describe issue
|
|
||||||
description: |
|
|
||||||
Also tell us what did you expect to happen?
|
|
||||||
placeholder: |
|
|
||||||
Describe issue
|
|
||||||
validations:
|
|
||||||
required: true
|
|
||||||
- type: input
|
|
||||||
id: repo
|
|
||||||
attributes:
|
|
||||||
label: Link to GitHub repository
|
|
||||||
description: |
|
|
||||||
Provide link to GitHub repository, you can skip it if the repository is private or you don't have it on GitHub, otherwise please provide it as it might help us troubleshoot problem
|
|
||||||
placeholder: |
|
|
||||||
https://github.com/nektos/act
|
|
||||||
validations:
|
|
||||||
required: false
|
|
||||||
- type: textarea
|
|
||||||
id: workflow
|
|
||||||
attributes:
|
|
||||||
label: Workflow content
|
|
||||||
description: |
|
|
||||||
Please paste your **whole** workflow here
|
|
||||||
placeholder: |
|
|
||||||
name: My workflow
|
|
||||||
on: ['push', 'schedule']
|
|
||||||
jobs:
|
|
||||||
test:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
env:
|
|
||||||
KEY: VAL
|
|
||||||
[...]
|
|
||||||
render: yml
|
|
||||||
validations:
|
|
||||||
required: true
|
|
||||||
- type: textarea
|
|
||||||
id: logs
|
|
||||||
attributes:
|
|
||||||
label: Relevant log output
|
|
||||||
description: |
|
|
||||||
Please copy and paste any relevant log output. This will be automatically formatted into code, so no need for backticks. Please verify that the log output doesn't contain any sensitive data.
|
|
||||||
render: sh
|
|
||||||
placeholder: |
|
|
||||||
Use `act -v` for verbose output
|
|
||||||
validations:
|
|
||||||
required: true
|
|
||||||
- type: textarea
|
|
||||||
id: additional-info
|
|
||||||
attributes:
|
|
||||||
label: Additional information
|
|
||||||
placeholder: |
|
|
||||||
Additional information that doesn't fit elsewhere
|
|
||||||
validations:
|
|
||||||
required: false
|
|
||||||
8
.github/ISSUE_TEMPLATE/config.yml
vendored
8
.github/ISSUE_TEMPLATE/config.yml
vendored
@@ -1,8 +0,0 @@
|
|||||||
blank_issues_enabled: true
|
|
||||||
contact_links:
|
|
||||||
- name: Start a discussion
|
|
||||||
url: https://github.com/actions-oss/act-cli/discussions/new
|
|
||||||
about: You can ask for help here!
|
|
||||||
- name: Want to contribute to act?
|
|
||||||
url: https://github.com/actions-oss/act-cli/blob/main/CONTRIBUTING.md
|
|
||||||
about: Be sure to read contributing guidelines!
|
|
||||||
28
.github/ISSUE_TEMPLATE/feature_template.yml
vendored
28
.github/ISSUE_TEMPLATE/feature_template.yml
vendored
@@ -1,28 +0,0 @@
|
|||||||
name: Feature request
|
|
||||||
description: Use this template for requesting a feature/enhancement.
|
|
||||||
labels:
|
|
||||||
- 'kind/feature-request'
|
|
||||||
body:
|
|
||||||
- type: markdown
|
|
||||||
attributes:
|
|
||||||
value: |
|
|
||||||
Please note that incompatibility with GitHub Actions should be opened as a bug report, not a new feature.
|
|
||||||
- type: input
|
|
||||||
id: act-version
|
|
||||||
attributes:
|
|
||||||
label: Act version
|
|
||||||
description: |
|
|
||||||
What version of `act` are you using? Version can be obtained via `act --version`
|
|
||||||
If you've built it from source, please provide commit hash
|
|
||||||
placeholder: |
|
|
||||||
act --version
|
|
||||||
validations:
|
|
||||||
required: true
|
|
||||||
- type: textarea
|
|
||||||
id: feature
|
|
||||||
attributes:
|
|
||||||
label: Feature description
|
|
||||||
description: Describe feature that you would like to see
|
|
||||||
placeholder: ...
|
|
||||||
validations:
|
|
||||||
required: true
|
|
||||||
20
.github/actions/choco/Dockerfile
vendored
20
.github/actions/choco/Dockerfile
vendored
@@ -1,20 +0,0 @@
|
|||||||
FROM alpine:3.21
|
|
||||||
|
|
||||||
ARG CHOCOVERSION=1.1.0
|
|
||||||
|
|
||||||
RUN apk add --no-cache bash ca-certificates git \
|
|
||||||
&& apk --no-cache --repository http://dl-cdn.alpinelinux.org/alpine/edge/community add mono mono-dev \
|
|
||||||
&& cert-sync /etc/ssl/certs/ca-certificates.crt \
|
|
||||||
&& wget "https://github.com/chocolatey/choco/archive/${CHOCOVERSION}.tar.gz" -O- | tar -xzf - \
|
|
||||||
&& cd choco-"${CHOCOVERSION}" \
|
|
||||||
&& chmod +x build.sh zip.sh \
|
|
||||||
&& ./build.sh -v \
|
|
||||||
&& mv ./code_drop/chocolatey/console /opt/chocolatey \
|
|
||||||
&& mkdir -p /opt/chocolatey/lib \
|
|
||||||
&& rm -rf /choco-"${CHOCOVERSION}" \
|
|
||||||
&& apk del mono-dev \
|
|
||||||
&& rm -rf /var/cache/apk/*
|
|
||||||
|
|
||||||
ENV ChocolateyInstall=/opt/chocolatey
|
|
||||||
COPY entrypoint.sh /entrypoint.sh
|
|
||||||
ENTRYPOINT ["/entrypoint.sh"]
|
|
||||||
16
.github/actions/choco/action.yml
vendored
16
.github/actions/choco/action.yml
vendored
@@ -1,16 +0,0 @@
|
|||||||
name: 'Chocolatey Packager'
|
|
||||||
description: 'Create the choco package and push it'
|
|
||||||
inputs:
|
|
||||||
version:
|
|
||||||
description: 'Version of package'
|
|
||||||
required: false
|
|
||||||
apiKey:
|
|
||||||
description: 'API Key for chocolately'
|
|
||||||
required: false
|
|
||||||
push:
|
|
||||||
description: 'Option for if package is going to be pushed'
|
|
||||||
required: false
|
|
||||||
default: 'false'
|
|
||||||
runs:
|
|
||||||
using: 'docker'
|
|
||||||
image: 'Dockerfile'
|
|
||||||
31
.github/actions/choco/entrypoint.sh
vendored
31
.github/actions/choco/entrypoint.sh
vendored
@@ -1,31 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
set -e
|
|
||||||
|
|
||||||
function choco {
|
|
||||||
mono /opt/chocolatey/choco.exe "$@" --allow-unofficial --nocolor
|
|
||||||
}
|
|
||||||
|
|
||||||
function get_version {
|
|
||||||
local version=${INPUT_VERSION:-$(git describe --tags)}
|
|
||||||
version=(${version//[!0-9.-]/})
|
|
||||||
local version_parts=(${version//-/ })
|
|
||||||
version=${version_parts[0]}
|
|
||||||
if [ ${#version_parts[@]} -gt 1 ]; then
|
|
||||||
version=${version_parts}.${version_parts[1]}
|
|
||||||
fi
|
|
||||||
echo "$version"
|
|
||||||
}
|
|
||||||
|
|
||||||
## Determine the version to pack
|
|
||||||
VERSION=$(get_version)
|
|
||||||
echo "Packing version ${VERSION} of act"
|
|
||||||
rm -f act-cli.*.nupkg
|
|
||||||
mkdir -p tools
|
|
||||||
cp LICENSE tools/LICENSE.txt
|
|
||||||
cp VERIFICATION tools/VERIFICATION.txt
|
|
||||||
cp dist/act-cli_windows_amd64*/act.exe tools/
|
|
||||||
choco pack act-cli.nuspec --version ${VERSION}
|
|
||||||
if [[ "$INPUT_PUSH" == "true" ]]; then
|
|
||||||
choco push act-cli.${VERSION}.nupkg --api-key ${INPUT_APIKEY} -s https://push.chocolatey.org/ --timeout 180
|
|
||||||
fi
|
|
||||||
23
.github/dependabot.yml
vendored
23
.github/dependabot.yml
vendored
@@ -1,23 +0,0 @@
|
|||||||
# To get started with Dependabot version updates, you'll need to specify which
|
|
||||||
# package ecosystems to update and where the package manifests are located.
|
|
||||||
# Please see the documentation for all configuration options:
|
|
||||||
# https://docs.github.com/github/administering-a-repository/configuration-options-for-dependency-updates
|
|
||||||
|
|
||||||
version: 2
|
|
||||||
updates:
|
|
||||||
- package-ecosystem: 'github-actions'
|
|
||||||
directory: '/'
|
|
||||||
schedule:
|
|
||||||
interval: 'monthly'
|
|
||||||
groups:
|
|
||||||
dependencies:
|
|
||||||
patterns:
|
|
||||||
- '*'
|
|
||||||
- package-ecosystem: 'gomod'
|
|
||||||
directory: '/'
|
|
||||||
schedule:
|
|
||||||
interval: 'monthly'
|
|
||||||
groups:
|
|
||||||
dependencies:
|
|
||||||
patterns:
|
|
||||||
- '*'
|
|
||||||
1
.github/workflows/.gitignore
vendored
1
.github/workflows/.gitignore
vendored
@@ -1 +0,0 @@
|
|||||||
test-*.yml
|
|
||||||
151
.github/workflows/checks.yml
vendored
151
.github/workflows/checks.yml
vendored
@@ -1,151 +0,0 @@
|
|||||||
name: checks
|
|
||||||
on: [pull_request, workflow_dispatch]
|
|
||||||
|
|
||||||
concurrency:
|
|
||||||
cancel-in-progress: true
|
|
||||||
group: ${{ github.workflow }}-${{ github.ref }}
|
|
||||||
|
|
||||||
env:
|
|
||||||
ACT_OWNER: ${{ github.repository_owner }}
|
|
||||||
ACT_REPOSITORY: ${{ github.repository }}
|
|
||||||
CGO_ENABLED: 0
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
lint:
|
|
||||||
name: lint
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v5
|
|
||||||
with:
|
|
||||||
fetch-depth: 0
|
|
||||||
- uses: actions/setup-go@v6
|
|
||||||
with:
|
|
||||||
go-version-file: go.mod
|
|
||||||
check-latest: true
|
|
||||||
- uses: golangci/golangci-lint-action@v8.0.0
|
|
||||||
with:
|
|
||||||
version: v2.1.6
|
|
||||||
- uses: megalinter/megalinter/flavors/go@v9.1.0
|
|
||||||
env:
|
|
||||||
DEFAULT_BRANCH: ${{ github.event.repository.default_branch }}
|
|
||||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
VALIDATE_ALL_CODEBASE: false
|
|
||||||
GITHUB_STATUS_REPORTER: ${{ !env.ACT }}
|
|
||||||
GITHUB_COMMENT_REPORTER: ${{ !env.ACT }}
|
|
||||||
|
|
||||||
test-linux:
|
|
||||||
name: test-linux
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v5
|
|
||||||
with:
|
|
||||||
fetch-depth: 2
|
|
||||||
- name: Set up QEMU
|
|
||||||
uses: docker/setup-qemu-action@v3
|
|
||||||
- uses: actions/setup-go@v6
|
|
||||||
with:
|
|
||||||
go-version-file: go.mod
|
|
||||||
check-latest: true
|
|
||||||
- uses: actions/cache@v4
|
|
||||||
if: ${{ !env.ACT }}
|
|
||||||
with:
|
|
||||||
path: ~/go/pkg/mod
|
|
||||||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
|
||||||
restore-keys: |
|
|
||||||
${{ runner.os }}-go-
|
|
||||||
- name: Install gotestfmt
|
|
||||||
run: go install github.com/gotesttools/gotestfmt/v2/cmd/gotestfmt@v2.5.0
|
|
||||||
- name: Run Tests
|
|
||||||
run: go test -json -v -cover -coverpkg=./... -coverprofile=coverage.txt -covermode=atomic -timeout 20m ./... | gotestfmt -hide successful-packages,empty-packages 2>&1
|
|
||||||
- name: Run act from cli
|
|
||||||
run: go run main.go -P ubuntu-latest=node:16-buster-slim -C ./pkg/runner/testdata/ -W ./basic/push.yml
|
|
||||||
- name: Run act from cli without docker support
|
|
||||||
run: go run -tags WITHOUT_DOCKER main.go -P ubuntu-latest=-self-hosted -C ./pkg/runner/testdata/ -W ./local-action-js/push.yml
|
|
||||||
- name: Upload Codecov report
|
|
||||||
uses: codecov/codecov-action@v5
|
|
||||||
with:
|
|
||||||
files: coverage.txt
|
|
||||||
fail_ci_if_error: true # optional (default = false)
|
|
||||||
token: ${{ secrets.CODECOV_TOKEN }}
|
|
||||||
|
|
||||||
test-host:
|
|
||||||
strategy:
|
|
||||||
matrix:
|
|
||||||
os:
|
|
||||||
- windows-latest
|
|
||||||
- macos-latest
|
|
||||||
name: test-host-${{matrix.os}}
|
|
||||||
runs-on: ${{matrix.os}}
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v5
|
|
||||||
with:
|
|
||||||
fetch-depth: 2
|
|
||||||
- uses: actions/setup-go@v6
|
|
||||||
with:
|
|
||||||
go-version-file: go.mod
|
|
||||||
check-latest: true
|
|
||||||
- name: Install gotestfmt
|
|
||||||
run: go install github.com/gotesttools/gotestfmt/v2/cmd/gotestfmt@v2.5.0
|
|
||||||
- name: Run Tests
|
|
||||||
run: go test -v -cover -coverpkg=./... -coverprofile=coverage.txt -covermode=atomic -timeout 20m -run ^TestRunEventHostEnvironment$ ./...
|
|
||||||
shell: bash
|
|
||||||
|
|
||||||
|
|
||||||
snapshot:
|
|
||||||
name: snapshot
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v5
|
|
||||||
- uses: actions/setup-go@v6
|
|
||||||
with:
|
|
||||||
go-version-file: go.mod
|
|
||||||
check-latest: true
|
|
||||||
- uses: actions/cache@v4
|
|
||||||
if: ${{ !env.ACT }}
|
|
||||||
with:
|
|
||||||
path: ~/go/pkg/mod
|
|
||||||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
|
||||||
restore-keys: |
|
|
||||||
${{ runner.os }}-go-
|
|
||||||
- name: GoReleaser
|
|
||||||
id: goreleaser
|
|
||||||
uses: goreleaser/goreleaser-action@v6
|
|
||||||
with:
|
|
||||||
version: v2
|
|
||||||
args: release --snapshot --clean
|
|
||||||
- name: Setup Node
|
|
||||||
uses: actions/setup-node@v6
|
|
||||||
with:
|
|
||||||
node-version: 20
|
|
||||||
- name: Install @actions/artifact
|
|
||||||
run: npm install @actions/artifact
|
|
||||||
- name: Upload All
|
|
||||||
uses: actions/github-script@v8
|
|
||||||
with:
|
|
||||||
script: |
|
|
||||||
const {DefaultArtifactClient} = require('@actions/artifact');
|
|
||||||
const aartifact = new DefaultArtifactClient();
|
|
||||||
var artifacts = JSON.parse(process.env.ARTIFACTS);
|
|
||||||
for(var artifact of artifacts) {
|
|
||||||
if(artifact.type === "Binary") {
|
|
||||||
const {id, size} = await aartifact.uploadArtifact(
|
|
||||||
// name of the artifact
|
|
||||||
`${artifact.name}-${artifact.target}`,
|
|
||||||
// files to include (supports absolute and relative paths)
|
|
||||||
[artifact.path],
|
|
||||||
process.cwd(),
|
|
||||||
{
|
|
||||||
// optional: how long to retain the artifact
|
|
||||||
// if unspecified, defaults to repository/org retention settings (the limit of this value)
|
|
||||||
retentionDays: 10
|
|
||||||
}
|
|
||||||
);
|
|
||||||
console.log(`Created artifact with id: ${id} (bytes: ${size}`);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
env:
|
|
||||||
ARTIFACTS: ${{ steps.goreleaser.outputs.artifacts }}
|
|
||||||
- name: Chocolatey
|
|
||||||
uses: ./.github/actions/choco
|
|
||||||
with:
|
|
||||||
version: v0.0.0-pr
|
|
||||||
23
.github/workflows/codespell.yml
vendored
23
.github/workflows/codespell.yml
vendored
@@ -1,23 +0,0 @@
|
|||||||
# Codespell configuration is within .codespellrc
|
|
||||||
---
|
|
||||||
name: Codespell
|
|
||||||
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
branches: [master]
|
|
||||||
pull_request:
|
|
||||||
branches: [master]
|
|
||||||
|
|
||||||
permissions:
|
|
||||||
contents: read
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
codespell:
|
|
||||||
name: Check for spelling errors
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- name: Checkout
|
|
||||||
uses: actions/checkout@v5
|
|
||||||
- name: Codespell
|
|
||||||
uses: codespell-project/actions-codespell@v2
|
|
||||||
30
.github/workflows/promote.yml
vendored
30
.github/workflows/promote.yml
vendored
@@ -1,30 +0,0 @@
|
|||||||
name: promote
|
|
||||||
on:
|
|
||||||
schedule:
|
|
||||||
- cron: '0 2 1 * *'
|
|
||||||
workflow_dispatch: {}
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
release:
|
|
||||||
if: vars.ENABLE_PROMOTE || github.event_name != 'schedule'
|
|
||||||
name: promote
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v5
|
|
||||||
with:
|
|
||||||
fetch-depth: 0
|
|
||||||
ref: master
|
|
||||||
token: ${{ secrets.GORELEASER_GITHUB_TOKEN }}
|
|
||||||
- uses: fregante/setup-git-user@v2
|
|
||||||
- uses: actions/setup-go@v6
|
|
||||||
with:
|
|
||||||
go-version-file: go.mod
|
|
||||||
check-latest: true
|
|
||||||
- uses: actions/cache@v4
|
|
||||||
if: ${{ !env.ACT }}
|
|
||||||
with:
|
|
||||||
path: ~/go/pkg/mod
|
|
||||||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
|
||||||
restore-keys: |
|
|
||||||
${{ runner.os }}-go-
|
|
||||||
- run: make promote
|
|
||||||
72
.github/workflows/release.yml
vendored
72
.github/workflows/release.yml
vendored
@@ -1,72 +0,0 @@
|
|||||||
name: release
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
tags:
|
|
||||||
- v*
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
release:
|
|
||||||
# TODO use environment to scope secrets
|
|
||||||
name: release
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v5
|
|
||||||
with:
|
|
||||||
fetch-depth: 0
|
|
||||||
- uses: actions/setup-go@v6
|
|
||||||
with:
|
|
||||||
go-version-file: go.mod
|
|
||||||
check-latest: true
|
|
||||||
- uses: actions/cache@v4
|
|
||||||
if: ${{ !env.ACT }}
|
|
||||||
with:
|
|
||||||
path: ~/go/pkg/mod
|
|
||||||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
|
||||||
restore-keys: |
|
|
||||||
${{ runner.os }}-go-
|
|
||||||
- name: GoReleaser
|
|
||||||
uses: goreleaser/goreleaser-action@v6
|
|
||||||
with:
|
|
||||||
version: latest
|
|
||||||
args: release --clean
|
|
||||||
env:
|
|
||||||
GITHUB_TOKEN: ${{ secrets.GORELEASER_GITHUB_TOKEN || github.token }}
|
|
||||||
- name: Winget
|
|
||||||
uses: vedantmgoyal2009/winget-releaser@v2
|
|
||||||
with:
|
|
||||||
identifier: nektos.act
|
|
||||||
installers-regex: '_Windows_\w+\.zip$'
|
|
||||||
token: ${{ secrets.WINGET_TOKEN }}
|
|
||||||
if: env.ENABLED
|
|
||||||
env:
|
|
||||||
ENABLED: ${{ secrets.WINGET_TOKEN && '1' || '' }}
|
|
||||||
- name: Chocolatey
|
|
||||||
uses: ./.github/actions/choco
|
|
||||||
with:
|
|
||||||
version: ${{ github.ref }}
|
|
||||||
apiKey: ${{ secrets.CHOCO_APIKEY }}
|
|
||||||
push: true
|
|
||||||
if: env.ENABLED
|
|
||||||
env:
|
|
||||||
ENABLED: ${{ secrets.CHOCO_APIKEY && '1' || '' }}
|
|
||||||
# TODO use ssh deployment key
|
|
||||||
- name: GitHub CLI extension
|
|
||||||
uses: actions/github-script@v8
|
|
||||||
with:
|
|
||||||
github-token: ${{ secrets.CLI_GITHUB_TOKEN || secrets.GORELEASER_GITHUB_TOKEN }}
|
|
||||||
script: |
|
|
||||||
const mainRef = (await github.rest.git.getRef({
|
|
||||||
owner: context.repo.owner,
|
|
||||||
repo: 'gh-act',
|
|
||||||
ref: 'heads/main',
|
|
||||||
})).data;
|
|
||||||
console.log(mainRef);
|
|
||||||
github.rest.git.createRef({
|
|
||||||
owner: 'nektos',
|
|
||||||
repo: 'gh-act',
|
|
||||||
ref: context.ref,
|
|
||||||
sha: mainRef.object.sha,
|
|
||||||
});
|
|
||||||
if: env.ENABLED
|
|
||||||
env:
|
|
||||||
ENABLED: ${{ (secrets.CLI_GITHUB_TOKEN || secrets.GORELEASER_GITHUB_TOKEN) && '1' || '' }}
|
|
||||||
23
.github/workflows/stale.yml
vendored
23
.github/workflows/stale.yml
vendored
@@ -1,23 +0,0 @@
|
|||||||
name: 'Close stale issues'
|
|
||||||
on:
|
|
||||||
schedule:
|
|
||||||
- cron: '0 0 * * *'
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
stale:
|
|
||||||
name: Stale
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- uses: actions/stale@v10
|
|
||||||
with:
|
|
||||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
stale-issue-message: 'Issue is stale and will be closed in 14 days unless there is new activity'
|
|
||||||
stale-pr-message: 'PR is stale and will be closed in 14 days unless there is new activity'
|
|
||||||
stale-issue-label: 'stale'
|
|
||||||
exempt-issue-labels: 'stale-exempt,kind/feature-request'
|
|
||||||
stale-pr-label: 'stale'
|
|
||||||
exempt-pr-labels: 'stale-exempt'
|
|
||||||
remove-stale-when-updated: 'True'
|
|
||||||
operations-per-run: 500
|
|
||||||
days-before-stale: 180
|
|
||||||
days-before-close: 14
|
|
||||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -1,8 +1,7 @@
|
|||||||
/act_runner
|
/gitea-runner
|
||||||
.env
|
.env
|
||||||
.runner
|
.runner
|
||||||
coverage.txt
|
coverage.txt
|
||||||
/gitea-vet
|
|
||||||
/config.yaml
|
/config.yaml
|
||||||
|
|
||||||
# Jetbrains
|
# Jetbrains
|
||||||
|
|||||||
@@ -1,2 +0,0 @@
|
|||||||
b910a42edfab7a02b08a52ecef203fd419725642:pkg/container/testdata/docker-pull-options/config.json:generic-api-key:4
|
|
||||||
710a3ac94c3dc0eaf680d417c87f37f92b4887f4:pkg/container/docker_pull_test.go:generic-api-key:45
|
|
||||||
@@ -13,6 +13,7 @@ linters:
|
|||||||
- forbidigo
|
- forbidigo
|
||||||
- gocheckcompilerdirectives
|
- gocheckcompilerdirectives
|
||||||
- gocritic
|
- gocritic
|
||||||
|
- goheader
|
||||||
- govet
|
- govet
|
||||||
- ineffassign
|
- ineffassign
|
||||||
- mirror
|
- mirror
|
||||||
@@ -35,23 +36,61 @@ linters:
|
|||||||
rules:
|
rules:
|
||||||
main:
|
main:
|
||||||
deny:
|
deny:
|
||||||
|
- pkg: io/ioutil
|
||||||
|
desc: use os or io instead
|
||||||
|
- pkg: golang.org/x/exp
|
||||||
|
desc: it's experimental and unreliable
|
||||||
- pkg: github.com/pkg/errors
|
- pkg: github.com/pkg/errors
|
||||||
desc: Please use "errors" package from standard library
|
desc: use builtin errors package instead
|
||||||
- pkg: gotest.tools/v3
|
nolintlint:
|
||||||
desc: Please keep tests unified using only github.com/stretchr/testify
|
allow-unused: false
|
||||||
- pkg: log
|
require-explanation: true
|
||||||
desc: Please keep logging unified using only github.com/sirupsen/logrus
|
require-specific: true
|
||||||
gocritic:
|
gocritic:
|
||||||
|
enabled-checks:
|
||||||
|
- equalFold
|
||||||
disabled-checks:
|
disabled-checks:
|
||||||
- ifElseChain
|
- ifElseChain
|
||||||
gocyclo:
|
revive:
|
||||||
min-complexity: 20
|
severity: error
|
||||||
importas:
|
rules:
|
||||||
alias:
|
- name: blank-imports
|
||||||
- pkg: github.com/sirupsen/logrus
|
- name: constant-logical-expr
|
||||||
alias: log
|
- name: context-as-argument
|
||||||
- pkg: github.com/stretchr/testify/assert
|
- name: context-keys-type
|
||||||
alias: assert
|
- name: dot-imports
|
||||||
|
- name: empty-lines
|
||||||
|
- name: error-return
|
||||||
|
- name: error-strings
|
||||||
|
- name: exported
|
||||||
|
- name: identical-branches
|
||||||
|
- name: if-return
|
||||||
|
- name: increment-decrement
|
||||||
|
- name: modifies-value-receiver
|
||||||
|
- name: package-comments
|
||||||
|
- name: redefines-builtin-id
|
||||||
|
- name: superfluous-else
|
||||||
|
- name: time-naming
|
||||||
|
- name: unexported-return
|
||||||
|
- name: var-declaration
|
||||||
|
- name: var-naming
|
||||||
|
staticcheck:
|
||||||
|
checks:
|
||||||
|
- all
|
||||||
|
- -ST1005
|
||||||
|
usetesting:
|
||||||
|
os-temp-dir: true
|
||||||
|
perfsprint:
|
||||||
|
concat-loop: false
|
||||||
|
govet:
|
||||||
|
enable:
|
||||||
|
- nilness
|
||||||
|
- unusedwrite
|
||||||
|
goheader:
|
||||||
|
values:
|
||||||
|
regexp:
|
||||||
|
HEADER: 'Copyright \d{4} The Gitea Authors\. All rights reserved\.(\nCopyright [^\n]+)*\nSPDX-License-Identifier: MIT'
|
||||||
|
template: '{{ HEADER }}'
|
||||||
exclusions:
|
exclusions:
|
||||||
generated: lax
|
generated: lax
|
||||||
presets:
|
presets:
|
||||||
@@ -60,23 +99,27 @@ linters:
|
|||||||
- legacy
|
- legacy
|
||||||
- std-error-handling
|
- std-error-handling
|
||||||
rules:
|
rules:
|
||||||
- linters: [revive]
|
- linters:
|
||||||
text: avoid meaningless package names
|
- forbidigo
|
||||||
paths:
|
path: cmd
|
||||||
- report
|
|
||||||
- third_party$
|
|
||||||
- builtin$
|
|
||||||
- examples$
|
|
||||||
issues:
|
issues:
|
||||||
max-issues-per-linter: 0
|
max-issues-per-linter: 0
|
||||||
max-same-issues: 0
|
max-same-issues: 0
|
||||||
formatters:
|
formatters:
|
||||||
enable:
|
enable:
|
||||||
- goimports
|
- gci
|
||||||
|
- gofumpt
|
||||||
|
settings:
|
||||||
|
gci:
|
||||||
|
custom-order: true
|
||||||
|
sections:
|
||||||
|
- standard
|
||||||
|
- prefix(gitea.com/gitea/runner)
|
||||||
|
- blank
|
||||||
|
- default
|
||||||
|
gofumpt:
|
||||||
|
extra-rules: true
|
||||||
exclusions:
|
exclusions:
|
||||||
generated: lax
|
generated: lax
|
||||||
paths:
|
run:
|
||||||
- report
|
timeout: 10m
|
||||||
- third_party$
|
|
||||||
- builtin$
|
|
||||||
- examples$
|
|
||||||
|
|||||||
@@ -1,3 +0,0 @@
|
|||||||
gitea_urls:
|
|
||||||
api: https://gitea.com/api/v1/
|
|
||||||
download: https://gitea.com/
|
|
||||||
@@ -1,5 +1,7 @@
|
|||||||
version: 2
|
version: 2
|
||||||
|
|
||||||
|
project_name: gitea-runner
|
||||||
|
|
||||||
before:
|
before:
|
||||||
hooks:
|
hooks:
|
||||||
- go mod tidy
|
- go mod tidy
|
||||||
@@ -63,7 +65,7 @@ builds:
|
|||||||
flags:
|
flags:
|
||||||
- -trimpath
|
- -trimpath
|
||||||
ldflags:
|
ldflags:
|
||||||
- -s -w -X gitea.com/gitea/act_runner/internal/pkg/ver.version={{ .Summary }}
|
- -s -w -X gitea.com/gitea/runner/internal/pkg/ver.version={{ .Summary }}
|
||||||
binary: >-
|
binary: >-
|
||||||
{{ .ProjectName }}-
|
{{ .ProjectName }}-
|
||||||
{{- .Version }}-
|
{{- .Version }}-
|
||||||
@@ -86,7 +88,7 @@ blobs:
|
|||||||
provider: s3
|
provider: s3
|
||||||
bucket: "{{ .Env.S3_BUCKET }}"
|
bucket: "{{ .Env.S3_BUCKET }}"
|
||||||
region: "{{ .Env.S3_REGION }}"
|
region: "{{ .Env.S3_REGION }}"
|
||||||
directory: "act_runner/{{.Version}}"
|
directory: "gitea-runner/{{.Version}}"
|
||||||
extra_files:
|
extra_files:
|
||||||
- glob: ./**.xz
|
- glob: ./**.xz
|
||||||
- glob: ./**.sha256
|
- glob: ./**.sha256
|
||||||
|
|||||||
@@ -1,54 +0,0 @@
|
|||||||
version: 2
|
|
||||||
before:
|
|
||||||
hooks:
|
|
||||||
- go mod tidy
|
|
||||||
builds:
|
|
||||||
- env:
|
|
||||||
- CGO_ENABLED=0
|
|
||||||
goos:
|
|
||||||
- darwin
|
|
||||||
- linux
|
|
||||||
- windows
|
|
||||||
goarch:
|
|
||||||
- amd64
|
|
||||||
- '386'
|
|
||||||
- arm64
|
|
||||||
- arm
|
|
||||||
- riscv64
|
|
||||||
goarm:
|
|
||||||
- '6'
|
|
||||||
- '7'
|
|
||||||
ignore:
|
|
||||||
- goos: windows
|
|
||||||
goarch: arm
|
|
||||||
binary: act
|
|
||||||
checksum:
|
|
||||||
name_template: 'checksums.txt'
|
|
||||||
archives:
|
|
||||||
- name_template: >-
|
|
||||||
{{ .ProjectName }}_
|
|
||||||
{{- title .Os }}_
|
|
||||||
{{- if eq .Arch "amd64" }}x86_64
|
|
||||||
{{- else if eq .Arch "386" }}i386
|
|
||||||
{{- else }}{{ .Arch }}{{ end }}
|
|
||||||
{{- if .Arm }}v{{ .Arm }}{{ end }}
|
|
||||||
format_overrides:
|
|
||||||
- goos: windows
|
|
||||||
formats:
|
|
||||||
- zip
|
|
||||||
changelog:
|
|
||||||
groups:
|
|
||||||
- title: 'New Features'
|
|
||||||
regexp: "^.*feat[(\\w)]*:+.*$"
|
|
||||||
order: 0
|
|
||||||
- title: 'Bug fixes'
|
|
||||||
regexp: "^.*fix[(\\w)]*:+.*$"
|
|
||||||
order: 1
|
|
||||||
- title: 'Documentation updates'
|
|
||||||
regexp: "^.*docs[(\\w)]*:+.*$"
|
|
||||||
order: 2
|
|
||||||
- title: 'Other'
|
|
||||||
order: 999
|
|
||||||
release:
|
|
||||||
prerelease: auto
|
|
||||||
mode: append
|
|
||||||
@@ -1,12 +0,0 @@
|
|||||||
# Default state for all rules
|
|
||||||
default: true
|
|
||||||
|
|
||||||
# MD013/line-length - Line length
|
|
||||||
MD013:
|
|
||||||
line_length: 1024
|
|
||||||
|
|
||||||
# MD033/no-inline-html - Inline HTML
|
|
||||||
MD033: false
|
|
||||||
|
|
||||||
# MD041/first-line-heading/first-line-h1 - First line in a file should be a top-level heading
|
|
||||||
MD041: false
|
|
||||||
@@ -1,20 +0,0 @@
|
|||||||
---
|
|
||||||
APPLY_FIXES: none
|
|
||||||
DISABLE:
|
|
||||||
- ACTION
|
|
||||||
- BASH
|
|
||||||
- COPYPASTE
|
|
||||||
- DOCKERFILE
|
|
||||||
- GO
|
|
||||||
- JAVASCRIPT
|
|
||||||
- SPELL
|
|
||||||
DISABLE_LINTERS:
|
|
||||||
- YAML_YAMLLINT
|
|
||||||
- MARKDOWN_MARKDOWN_TABLE_FORMATTER
|
|
||||||
- MARKDOWN_MARKDOWN_LINK_CHECK
|
|
||||||
- REPOSITORY_CHECKOV
|
|
||||||
- REPOSITORY_TRIVY
|
|
||||||
FILTER_REGEX_EXCLUDE: (.*testdata/*|install.sh|pkg/container/docker_cli.go|pkg/container/DOCKER_LICENSE|VERSION)
|
|
||||||
MARKDOWN_MARKDOWNLINT_CONFIG_FILE: .markdownlint.yml
|
|
||||||
PARALLEL: false
|
|
||||||
PRINT_ALPACA: false
|
|
||||||
98
.mergify.yml
98
.mergify.yml
@@ -1,98 +0,0 @@
|
|||||||
|
|
||||||
pull_request_rules:
|
|
||||||
- name: warn on conflicts
|
|
||||||
conditions:
|
|
||||||
- -draft
|
|
||||||
- -closed
|
|
||||||
- -merged
|
|
||||||
- conflict
|
|
||||||
actions:
|
|
||||||
comment:
|
|
||||||
message: '@{{author}} this pull request is now in conflict 😩'
|
|
||||||
label:
|
|
||||||
add:
|
|
||||||
- conflict
|
|
||||||
- name: remove conflict label if not needed
|
|
||||||
conditions:
|
|
||||||
- -conflict
|
|
||||||
actions:
|
|
||||||
label:
|
|
||||||
remove:
|
|
||||||
- conflict
|
|
||||||
- name: warn on needs-work
|
|
||||||
conditions:
|
|
||||||
- -draft
|
|
||||||
- -closed
|
|
||||||
- -merged
|
|
||||||
- or:
|
|
||||||
- check-failure=lint
|
|
||||||
- check-failure=test-linux
|
|
||||||
- check-failure=codecov/patch
|
|
||||||
- check-failure=codecov/project
|
|
||||||
- check-failure=snapshot
|
|
||||||
actions:
|
|
||||||
comment:
|
|
||||||
message: '@{{author}} this pull request has failed checks 🛠'
|
|
||||||
label:
|
|
||||||
add:
|
|
||||||
- needs-work
|
|
||||||
- name: remove needs-work label if not needed
|
|
||||||
conditions:
|
|
||||||
- check-success=lint
|
|
||||||
- check-success=test-linux
|
|
||||||
- check-success=codecov/patch
|
|
||||||
- check-success=codecov/project
|
|
||||||
- check-success=snapshot
|
|
||||||
actions:
|
|
||||||
label:
|
|
||||||
remove:
|
|
||||||
- needs-work
|
|
||||||
- name: Automatic maintainer assignment
|
|
||||||
conditions:
|
|
||||||
- '-approved-reviews-by=@nektos/act-maintainers'
|
|
||||||
- -draft
|
|
||||||
- -merged
|
|
||||||
- -closed
|
|
||||||
- -conflict
|
|
||||||
- check-success=lint
|
|
||||||
- check-success=test-linux
|
|
||||||
- check-success=codecov/patch
|
|
||||||
- check-success=codecov/project
|
|
||||||
- check-success=snapshot
|
|
||||||
actions:
|
|
||||||
request_reviews:
|
|
||||||
teams:
|
|
||||||
- '@nektos/act-maintainers'
|
|
||||||
- name: Automatic merge on approval
|
|
||||||
conditions: []
|
|
||||||
actions:
|
|
||||||
queue:
|
|
||||||
queue_rules:
|
|
||||||
- name: default
|
|
||||||
queue_conditions:
|
|
||||||
- '#changes-requested-reviews-by=0'
|
|
||||||
- or:
|
|
||||||
- 'approved-reviews-by=@nektos/act-committers'
|
|
||||||
- 'author~=^dependabot(|-preview)\[bot\]$'
|
|
||||||
- and:
|
|
||||||
- 'approved-reviews-by=@nektos/act-maintainers'
|
|
||||||
- '#approved-reviews-by>=2'
|
|
||||||
- and:
|
|
||||||
- 'author=@nektos/act-maintainers'
|
|
||||||
- 'approved-reviews-by=@nektos/act-maintainers'
|
|
||||||
- '#approved-reviews-by>=1'
|
|
||||||
- -draft
|
|
||||||
- -merged
|
|
||||||
- -closed
|
|
||||||
- check-success=lint
|
|
||||||
- check-success=test-linux
|
|
||||||
- check-success=codecov/patch
|
|
||||||
- check-success=codecov/project
|
|
||||||
- check-success=snapshot
|
|
||||||
merge_conditions:
|
|
||||||
- check-success=lint
|
|
||||||
- check-success=test-linux
|
|
||||||
- check-success=codecov/patch
|
|
||||||
- check-success=codecov/project
|
|
||||||
- check-success=snapshot
|
|
||||||
merge_method: squash
|
|
||||||
@@ -1,2 +0,0 @@
|
|||||||
**/testdata
|
|
||||||
pkg/runner/res
|
|
||||||
@@ -1,7 +0,0 @@
|
|||||||
overrides:
|
|
||||||
- files: '*.yml'
|
|
||||||
options:
|
|
||||||
singleQuote: true
|
|
||||||
- files: '*.json'
|
|
||||||
options:
|
|
||||||
singleQuote: false
|
|
||||||
9
.vscode/extensions.json
vendored
9
.vscode/extensions.json
vendored
@@ -1,9 +0,0 @@
|
|||||||
{
|
|
||||||
"recommendations": [
|
|
||||||
"editorconfig.editorconfig",
|
|
||||||
"golang.go",
|
|
||||||
"davidanson.vscode-markdownlint",
|
|
||||||
"esbenp.prettier-vscode",
|
|
||||||
"redhat.vscode-yaml"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
14
.vscode/settings.json
vendored
14
.vscode/settings.json
vendored
@@ -1,14 +0,0 @@
|
|||||||
{
|
|
||||||
"go.lintTool": "golangci-lint",
|
|
||||||
"go.lintFlags": ["--fix"],
|
|
||||||
"go.testTimeout": "300s",
|
|
||||||
"[json]": {
|
|
||||||
"editor.defaultFormatter": "esbenp.prettier-vscode"
|
|
||||||
},
|
|
||||||
"[markdown]": {
|
|
||||||
"editor.defaultFormatter": "esbenp.prettier-vscode"
|
|
||||||
},
|
|
||||||
"[yaml]": {
|
|
||||||
"editor.defaultFormatter": "esbenp.prettier-vscode"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
10
AGENTS.md
Normal file
10
AGENTS.md
Normal file
@@ -0,0 +1,10 @@
|
|||||||
|
- Use `make help` to find available development targets
|
||||||
|
- Run `make fmt` to format `.go` files, and run `make lint-go` to lint them
|
||||||
|
- Run `make tidy` after any `go.mod` changes
|
||||||
|
- Run single go unit tests with `go test -run '^TestName$' ./modulepath/`
|
||||||
|
- Add the current year into the copyright header of new `.go` files
|
||||||
|
- Ensure no trailing whitespace in edited files
|
||||||
|
- Never force-push, amend, or squash unless asked. Use new commits and normal push for pull request updates
|
||||||
|
- Preserve existing code comments, do not remove or rewrite comments that are still relevant
|
||||||
|
- Include authorship attribution in issue and pull request comments
|
||||||
|
- Add `Co-Authored-By` lines to all commits, indicating name and model used
|
||||||
@@ -1 +0,0 @@
|
|||||||
* @nektos/act-maintainers
|
|
||||||
@@ -1,69 +0,0 @@
|
|||||||
# Contributing to Act
|
|
||||||
|
|
||||||
Help wanted! We'd love your contributions to Act. Please review the following guidelines before contributing. Also, feel free to propose changes to these guidelines by updating this file and submitting a pull request.
|
|
||||||
|
|
||||||
- [I have a question...](#questions)
|
|
||||||
- [I found a bug...](#bugs)
|
|
||||||
- [I have a feature request...](#features)
|
|
||||||
- [I have a contribution to share...](#process)
|
|
||||||
|
|
||||||
## <a id="questions"></a> Have a Question?
|
|
||||||
|
|
||||||
Please don't open a GitHub issue for questions about how to use `act`, as the goal is to use issues for managing bugs and feature requests. Issues that are related to general support will be closed and redirected to our gitter room.
|
|
||||||
|
|
||||||
For all support related questions, please ask the question in discussions: [actions-oss/act-cli](https://github.com/actions-oss/act-cli/discussions).
|
|
||||||
|
|
||||||
## <a id="bugs"></a> Found a Bug?
|
|
||||||
|
|
||||||
If you've identified a bug in `act`, please [submit an issue](#issue) to our GitHub repo: [actions-oss/act-cli](https://github.com/actions-oss/act-cli/issues/new). Please also feel free to submit a [Pull Request](#pr) with a fix for the bug!
|
|
||||||
|
|
||||||
## <a id="features"></a> Have a Feature Request?
|
|
||||||
|
|
||||||
All feature requests should start with [submitting an issue](#issue) documenting the user story and acceptance criteria. Again, feel free to submit a [Pull Request](#pr) with a proposed implementation of the feature.
|
|
||||||
|
|
||||||
## <a id="process"></a> Ready to Contribute
|
|
||||||
|
|
||||||
### <a id="issue"></a> Create an issue
|
|
||||||
|
|
||||||
Before submitting a new issue, please search the issues to make sure there isn't a similar issue doesn't already exist.
|
|
||||||
|
|
||||||
Assuming no existing issues exist, please ensure you include required information when submitting the issue to ensure we can quickly reproduce your issue.
|
|
||||||
|
|
||||||
We may have additional questions and will communicate through the GitHub issue, so please respond back to our questions to help reproduce and resolve the issue as quickly as possible.
|
|
||||||
|
|
||||||
New issues can be created with in our [GitHub repo](https://github.com/actions-oss/act-cli/issues/new).
|
|
||||||
|
|
||||||
### <a id="pr"></a>Pull Requests
|
|
||||||
|
|
||||||
Pull requests should target the `master` branch. Please also reference the issue from the description of the pull request using [special keyword syntax](https://help.github.com/articles/closing-issues-via-commit-messages/) to auto close the issue when the PR is merged. For example, include the phrase `fixes #14` in the PR description to have issue #14 auto close. Please send documentation updates for the [act user guide](https://actions-oss.github.io/act-docs/) to [actions-oss/act-docs](https://github.com/actions-oss/act-docs).
|
|
||||||
|
|
||||||
### <a id="style"></a> Styleguide
|
|
||||||
|
|
||||||
When submitting code, please make every effort to follow existing conventions and style in order to keep the code as readable as possible. Here are a few points to keep in mind:
|
|
||||||
|
|
||||||
- Please run `go fmt ./...` before committing to ensure code aligns with go standards.
|
|
||||||
- We use [`golangci-lint`](https://golangci-lint.run/) for linting Go code, run `golangci-lint run --fix` before submitting PR. Editors such as Visual Studio Code or JetBrains IntelliJ; with Go support plugin will offer `golangci-lint` automatically.
|
|
||||||
- There are additional linters and formatters for files such as Markdown documents or YAML/JSON:
|
|
||||||
- Please refer to the [Makefile](Makefile) or [`lint` job in our workflow](.github/workflows/checks.yml) to see how to those linters/formatters work.
|
|
||||||
- You can lint codebase by running `go run main.go -j lint --env RUN_LOCAL=true` or `act -j lint --env RUN_LOCAL=true`
|
|
||||||
- In `Makefile`, there are tools that require `npx` which is shipped with `nodejs`.
|
|
||||||
- Our `Makefile` exports `GITHUB_TOKEN` from `~/.config/github/token`, you have been warned.
|
|
||||||
- You can run `make pr` to cleanup dependencies, format/lint code and run tests.
|
|
||||||
- All dependencies must be defined in the `go.mod` file.
|
|
||||||
- Advanced IDEs and code editors (like VSCode) will take care of that, but to be sure, run `go mod tidy` to validate dependencies.
|
|
||||||
- For details on the approved style, check out [Effective Go](https://golang.org/doc/effective_go.html).
|
|
||||||
- Before running tests, please be aware that they are multi-architecture so for them to not fail, you need to run `docker run --privileged --rm tonistiigi/binfmt --install amd64,arm64` before ([more info available in #765](https://github.com/nektos/act/issues/765)).
|
|
||||||
|
|
||||||
Also, consider the original design principles:
|
|
||||||
|
|
||||||
- **Polyglot** - There will be no prescribed language or framework for developing the microservices. The only requirement will be that the service will be run inside a container and exposed via an HTTP endpoint.
|
|
||||||
- **Cloud Provider** - At this point, the tool will assume AWS for the cloud provider and will not be written in a cloud agnostic manner. However, this does not preclude refactoring to add support for other providers at a later time.
|
|
||||||
- **Declarative** - All resource administration will be handled in a declarative vs. imperative manner. A file will be used to declared the desired state of the resources and the tool will simply assert the actual state matches the desired state. The tool will accomplish this by generating CloudFormation templates.
|
|
||||||
- **Stateless** - The tool will not maintain its own state. Rather, it will rely on the CloudFormation stacks to determine the state of the platform.
|
|
||||||
- **Secure** - All security will be managed by AWS IAM credentials. No additional authentication or authorization mechanisms will be introduced.
|
|
||||||
|
|
||||||
### License
|
|
||||||
|
|
||||||
By contributing your code, you agree to license your contribution under the terms of the [MIT License](LICENSE).
|
|
||||||
|
|
||||||
All files are released with the MIT license.
|
|
||||||
14
Dockerfile
14
Dockerfile
@@ -9,19 +9,19 @@ RUN apk add --no-cache make git
|
|||||||
ARG GOPROXY
|
ARG GOPROXY
|
||||||
ENV GOPROXY=${GOPROXY:-}
|
ENV GOPROXY=${GOPROXY:-}
|
||||||
|
|
||||||
COPY . /opt/src/act_runner
|
COPY . /opt/src/runner
|
||||||
WORKDIR /opt/src/act_runner
|
WORKDIR /opt/src/runner
|
||||||
|
|
||||||
RUN make clean && make build
|
RUN make clean && make build
|
||||||
|
|
||||||
### DIND VARIANT
|
### DIND VARIANT
|
||||||
#
|
#
|
||||||
#
|
#
|
||||||
FROM docker:28-dind AS dind
|
FROM docker:29-dind AS dind
|
||||||
|
|
||||||
RUN apk add --no-cache s6 bash git tzdata
|
RUN apk add --no-cache s6 bash git tzdata
|
||||||
|
|
||||||
COPY --from=builder /opt/src/act_runner/act_runner /usr/local/bin/act_runner
|
COPY --from=builder /opt/src/runner/gitea-runner /usr/local/bin/gitea-runner
|
||||||
COPY scripts/run.sh /usr/local/bin/run.sh
|
COPY scripts/run.sh /usr/local/bin/run.sh
|
||||||
COPY scripts/s6 /etc/s6
|
COPY scripts/s6 /etc/s6
|
||||||
|
|
||||||
@@ -32,12 +32,12 @@ ENTRYPOINT ["s6-svscan","/etc/s6"]
|
|||||||
### DIND-ROOTLESS VARIANT
|
### DIND-ROOTLESS VARIANT
|
||||||
#
|
#
|
||||||
#
|
#
|
||||||
FROM docker:28-dind-rootless AS dind-rootless
|
FROM docker:29-dind-rootless AS dind-rootless
|
||||||
|
|
||||||
USER root
|
USER root
|
||||||
RUN apk add --no-cache s6 bash git tzdata
|
RUN apk add --no-cache s6 bash git tzdata
|
||||||
|
|
||||||
COPY --from=builder /opt/src/act_runner/act_runner /usr/local/bin/act_runner
|
COPY --from=builder /opt/src/runner/gitea-runner /usr/local/bin/gitea-runner
|
||||||
COPY scripts/run.sh /usr/local/bin/run.sh
|
COPY scripts/run.sh /usr/local/bin/run.sh
|
||||||
COPY scripts/s6 /etc/s6
|
COPY scripts/s6 /etc/s6
|
||||||
|
|
||||||
@@ -56,7 +56,7 @@ ENTRYPOINT ["s6-svscan","/etc/s6"]
|
|||||||
FROM alpine AS basic
|
FROM alpine AS basic
|
||||||
RUN apk add --no-cache tini bash git tzdata
|
RUN apk add --no-cache tini bash git tzdata
|
||||||
|
|
||||||
COPY --from=builder /opt/src/act_runner/act_runner /usr/local/bin/act_runner
|
COPY --from=builder /opt/src/runner/gitea-runner /usr/local/bin/gitea-runner
|
||||||
COPY scripts/run.sh /usr/local/bin/run.sh
|
COPY scripts/run.sh /usr/local/bin/run.sh
|
||||||
|
|
||||||
VOLUME /data
|
VOLUME /data
|
||||||
|
|||||||
106
Makefile
106
Makefile
@@ -1,32 +1,30 @@
|
|||||||
DIST := dist
|
DIST := dist
|
||||||
EXECUTABLE := act_runner
|
EXECUTABLE := gitea-runner
|
||||||
GOFMT ?= gofumpt -l
|
|
||||||
DIST_DIRS := $(DIST)/binaries $(DIST)/release
|
DIST_DIRS := $(DIST)/binaries $(DIST)/release
|
||||||
GO ?= go
|
GO ?= go
|
||||||
SHASUM ?= shasum -a 256
|
SHASUM ?= shasum -a 256
|
||||||
HAS_GO = $(shell hash $(GO) > /dev/null 2>&1 && echo "GO" || echo "NOGO" )
|
HAS_GO = $(shell hash $(GO) > /dev/null 2>&1 && echo "GO" || echo "NOGO" )
|
||||||
XGO_PACKAGE ?= src.techknowlogick.com/xgo@latest
|
XGO_PACKAGE ?= src.techknowlogick.com/xgo@latest
|
||||||
XGO_VERSION := go-1.26.x
|
XGO_VERSION := go-1.26.x
|
||||||
GXZ_PAGAGE ?= github.com/ulikunitz/xz/cmd/gxz@v0.5.10
|
GXZ_PACKAGE ?= github.com/ulikunitz/xz/cmd/gxz@v0.5.10
|
||||||
|
|
||||||
LINUX_ARCHS ?= linux/amd64,linux/arm64
|
LINUX_ARCHS ?= linux/amd64,linux/arm64
|
||||||
DARWIN_ARCHS ?= darwin-12/amd64,darwin-12/arm64
|
DARWIN_ARCHS ?= darwin-12/amd64,darwin-12/arm64
|
||||||
WINDOWS_ARCHS ?= windows/amd64
|
WINDOWS_ARCHS ?= windows/amd64
|
||||||
GO_FMT_FILES := $(shell find . -type f -name "*.go" ! -name "generated.*")
|
|
||||||
GOFILES := $(shell find . -type f -name "*.go" -o -name "go.mod" ! -name "generated.*")
|
GOFILES := $(shell find . -type f -name "*.go" -o -name "go.mod" ! -name "generated.*")
|
||||||
|
|
||||||
DOCKER_IMAGE ?= gitea/act_runner
|
DOCKER_IMAGE ?= gitea/runner
|
||||||
DOCKER_TAG ?= nightly
|
DOCKER_TAG ?= nightly
|
||||||
DOCKER_REF := $(DOCKER_IMAGE):$(DOCKER_TAG)
|
DOCKER_REF := $(DOCKER_IMAGE):$(DOCKER_TAG)
|
||||||
DOCKER_ROOTLESS_REF := $(DOCKER_IMAGE):$(DOCKER_TAG)-dind-rootless
|
DOCKER_ROOTLESS_REF := $(DOCKER_IMAGE):$(DOCKER_TAG)-dind-rootless
|
||||||
|
|
||||||
GOLANGCI_LINT_PACKAGE ?= github.com/golangci/golangci-lint/v2/cmd/golangci-lint@v2.10.1
|
GOLANGCI_LINT_PACKAGE ?= github.com/golangci/golangci-lint/v2/cmd/golangci-lint@v2.11.4
|
||||||
GOVULNCHECK_PACKAGE ?= golang.org/x/vuln/cmd/govulncheck@v1
|
GOVULNCHECK_PACKAGE ?= golang.org/x/vuln/cmd/govulncheck@v1
|
||||||
|
|
||||||
ifneq ($(shell uname), Darwin)
|
STATIC ?=
|
||||||
EXTLDFLAGS = -extldflags "-static" $(null)
|
EXTLDFLAGS ?=
|
||||||
else
|
ifneq ($(STATIC),)
|
||||||
EXTLDFLAGS =
|
EXTLDFLAGS = -extldflags "-static"
|
||||||
endif
|
endif
|
||||||
|
|
||||||
ifeq ($(HAS_GO), GO)
|
ifeq ($(HAS_GO), GO)
|
||||||
@@ -68,19 +66,19 @@ else
|
|||||||
endif
|
endif
|
||||||
endif
|
endif
|
||||||
|
|
||||||
GO_PACKAGES_TO_VET ?= $(filter-out gitea.com/gitea/act_runner/cmd gitea.com/gitea/act_runner/internal/app/act-cli gitea.com/gitea/act_runner/internal/eval/functions gitea.com/gitea/act_runner/internal/eval/v2 gitea.com/gitea/act_runner/internal/expr gitea.com/gitea/act_runner/internal/model gitea.com/gitea/act_runner/internal/templateeval gitea.com/gitea/act_runner/pkg/artifactcache gitea.com/gitea/act_runner/pkg/artifacts gitea.com/gitea/act_runner/pkg/common gitea.com/gitea/act_runner/pkg/common/git gitea.com/gitea/act_runner/pkg/container gitea.com/gitea/act_runner/pkg/exprparser gitea.com/gitea/act_runner/pkg/filecollector gitea.com/gitea/act_runner/pkg/gh gitea.com/gitea/act_runner/pkg/model gitea.com/gitea/act_runner/pkg/runner gitea.com/gitea/act_runner/pkg/schema gitea.com/gitea/act_runner/pkg/tart gitea.com/gitea/act_runner/pkg/workflowpattern gitea.com/gitea/act_runner/pkg/lookpath gitea.com/gitea/act_runner/internal/pkg/client/mocks,$(shell $(GO) list ./...))
|
|
||||||
|
|
||||||
|
|
||||||
TAGS ?=
|
TAGS ?=
|
||||||
LDFLAGS ?= -X "gitea.com/gitea/act_runner/internal/pkg/ver.version=v$(RELASE_VERSION)"
|
LDFLAGS ?= -X "gitea.com/gitea/runner/internal/pkg/ver.version=v$(RELASE_VERSION)"
|
||||||
|
|
||||||
|
.PHONY: all
|
||||||
all: build
|
all: build
|
||||||
|
|
||||||
fmt:
|
.PHONY: help
|
||||||
@hash gofumpt > /dev/null 2>&1; if [ $$? -ne 0 ]; then \
|
help: Makefile ## print Makefile help information.
|
||||||
$(GO) install mvdan.cc/gofumpt@latest; \
|
@awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m[TARGETS] default target: build\033[0m\n\n\033[35mTargets:\033[0m\n"} /^[0-9A-Za-z._-]+:.*?##/ { printf " \033[36m%-45s\033[0m %s\n", $$1, $$2 }' Makefile
|
||||||
fi
|
|
||||||
$(GOFMT) -w $(GO_FMT_FILES)
|
.PHONY: fmt
|
||||||
|
fmt: ## format the Go code
|
||||||
|
$(GO) run $(GOLANGCI_LINT_PACKAGE) fmt
|
||||||
|
|
||||||
.PHONY: go-check
|
.PHONY: go-check
|
||||||
go-check:
|
go-check:
|
||||||
@@ -88,28 +86,29 @@ go-check:
|
|||||||
$(eval MIN_GO_VERSION := $(shell printf "%03d%03d" $(shell echo '$(MIN_GO_VERSION_STR)' | tr '.' ' ')))
|
$(eval MIN_GO_VERSION := $(shell printf "%03d%03d" $(shell echo '$(MIN_GO_VERSION_STR)' | tr '.' ' ')))
|
||||||
$(eval GO_VERSION := $(shell printf "%03d%03d" $(shell $(GO) version | grep -Eo '[0-9]+\.[0-9]+' | tr '.' ' ');))
|
$(eval GO_VERSION := $(shell printf "%03d%03d" $(shell $(GO) version | grep -Eo '[0-9]+\.[0-9]+' | tr '.' ' ');))
|
||||||
@if [ "$(GO_VERSION)" -lt "$(MIN_GO_VERSION)" ]; then \
|
@if [ "$(GO_VERSION)" -lt "$(MIN_GO_VERSION)" ]; then \
|
||||||
echo "Act Runner requires Go $(MIN_GO_VERSION_STR) or greater to build. You can get it at https://go.dev/dl/"; \
|
echo "Gitea Runner requires Go $(MIN_GO_VERSION_STR) or greater to build. You can get it at https://go.dev/dl/"; \
|
||||||
exit 1; \
|
exit 1; \
|
||||||
fi
|
fi
|
||||||
|
|
||||||
.PHONY: fmt-check
|
.PHONY: fmt-check
|
||||||
fmt-check:
|
fmt-check: fmt
|
||||||
@hash gofumpt > /dev/null 2>&1; if [ $$? -ne 0 ]; then \
|
@diff=$$(git diff --color=always); \
|
||||||
$(GO) install mvdan.cc/gofumpt@latest; \
|
|
||||||
fi
|
|
||||||
@diff=$$($(GOFMT) -d $(GO_FMT_FILES)); \
|
|
||||||
if [ -n "$$diff" ]; then \
|
if [ -n "$$diff" ]; then \
|
||||||
echo "Please run 'make fmt' and commit the result:"; \
|
echo "Please run 'make fmt' and commit the result:"; \
|
||||||
echo "$${diff}"; \
|
printf "%s" "$${diff}"; \
|
||||||
exit 1; \
|
exit 1; \
|
||||||
fi;
|
fi
|
||||||
|
|
||||||
.PHONY: deps-tools
|
.PHONY: deps-tools
|
||||||
deps-tools: ## install tool dependencies
|
deps-tools: ## install tool dependencies
|
||||||
$(GO) install $(GOVULNCHECK_PACKAGE)
|
$(GO) install $(GOLANGCI_LINT_PACKAGE) & \
|
||||||
|
$(GO) install $(GXZ_PACKAGE) & \
|
||||||
|
$(GO) install $(XGO_PACKAGE) & \
|
||||||
|
$(GO) install $(GOVULNCHECK_PACKAGE) & \
|
||||||
|
wait
|
||||||
|
|
||||||
.PHONY: lint
|
.PHONY: lint
|
||||||
lint: lint-go vet
|
lint: lint-go ## lint everything
|
||||||
|
|
||||||
.PHONY: lint-go
|
.PHONY: lint-go
|
||||||
lint-go: ## lint go files
|
lint-go: ## lint go files
|
||||||
@@ -124,64 +123,59 @@ security-check: deps-tools
|
|||||||
GOEXPERIMENT= $(GO) run $(GOVULNCHECK_PACKAGE) -show color ./... || true
|
GOEXPERIMENT= $(GO) run $(GOVULNCHECK_PACKAGE) -show color ./... || true
|
||||||
|
|
||||||
.PHONY: tidy
|
.PHONY: tidy
|
||||||
tidy:
|
tidy: ## run go mod tidy
|
||||||
$(GO) mod tidy
|
$(GO) mod tidy
|
||||||
|
|
||||||
.PHONY: tidy-check
|
.PHONY: tidy-check
|
||||||
tidy-check: tidy
|
tidy-check: tidy
|
||||||
@diff=$$(git diff -- go.mod go.sum); \
|
@diff=$$(git diff --color=always -- go.mod go.sum); \
|
||||||
if [ -n "$$diff" ]; then \
|
if [ -n "$$diff" ]; then \
|
||||||
echo "Please run 'make tidy' and commit the result:"; \
|
echo "Please run 'make tidy' and commit the result:"; \
|
||||||
echo "$${diff}"; \
|
printf "%s" "$${diff}"; \
|
||||||
exit 1; \
|
exit 1; \
|
||||||
fi
|
fi
|
||||||
|
|
||||||
test: fmt-check security-check
|
.PHONY: test
|
||||||
@$(GO) test -v -cover -coverprofile coverage.txt ./... && echo "\n==>\033[32m Ok\033[m\n" || exit 1
|
test: fmt-check security-check ## test everything
|
||||||
|
@$(GO) test -race -short -v -cover -coverprofile coverage.txt ./... && echo "\n==>\033[32m Ok\033[m\n" || exit 1
|
||||||
|
|
||||||
.PHONY: vet
|
.PHONY: install
|
||||||
vet:
|
install: $(GOFILES) ## install the runner binary via `go install`
|
||||||
@echo "Running go vet..."
|
$(GO) install -v -tags '$(TAGS)' -ldflags '-s -w $(EXTLDFLAGS) $(LDFLAGS)'
|
||||||
@$(GO) build code.gitea.io/gitea-vet
|
|
||||||
@$(GO) vet -vettool=gitea-vet $(GO_PACKAGES_TO_VET)
|
|
||||||
|
|
||||||
install: $(GOFILES)
|
.PHONY: build
|
||||||
$(GO) install -v -tags '$(TAGS)' -ldflags '$(EXTLDFLAGS)-s -w $(LDFLAGS)'
|
build: go-check $(EXECUTABLE) ## build the runner binary
|
||||||
|
|
||||||
build: go-check $(EXECUTABLE)
|
|
||||||
|
|
||||||
$(EXECUTABLE): $(GOFILES)
|
$(EXECUTABLE): $(GOFILES)
|
||||||
$(GO) build -v -tags '$(TAGS)' -ldflags '$(EXTLDFLAGS)-s -w $(LDFLAGS)' -o $@
|
$(GO) build -v -tags '$(TAGS)' -ldflags '-s -w $(EXTLDFLAGS) $(LDFLAGS)' -o $@
|
||||||
|
|
||||||
.PHONY: deps-backend
|
.PHONY: deps-backend
|
||||||
deps-backend:
|
deps-backend: ## install backend dependencies
|
||||||
$(GO) mod download
|
$(GO) mod download
|
||||||
$(GO) install $(GXZ_PAGAGE)
|
|
||||||
$(GO) install $(XGO_PACKAGE)
|
|
||||||
|
|
||||||
.PHONY: release
|
.PHONY: release
|
||||||
release: release-windows release-linux release-darwin release-copy release-compress release-check
|
release: release-windows release-linux release-darwin release-copy release-compress release-check ## build release artifacts
|
||||||
|
|
||||||
$(DIST_DIRS):
|
$(DIST_DIRS):
|
||||||
mkdir -p $(DIST_DIRS)
|
mkdir -p $(DIST_DIRS)
|
||||||
|
|
||||||
.PHONY: release-windows
|
.PHONY: release-windows
|
||||||
release-windows: | $(DIST_DIRS)
|
release-windows: | $(DIST_DIRS)
|
||||||
CGO_CFLAGS="$(CGO_CFLAGS)" $(GO) run $(XGO_PACKAGE) -go $(XGO_VERSION) -buildmode exe -dest $(DIST)/binaries -tags 'netgo osusergo $(TAGS)' -ldflags '-linkmode external -extldflags "-static" $(LDFLAGS)' -targets '$(WINDOWS_ARCHS)' -out $(EXECUTABLE)-$(VERSION) .
|
CGO_CFLAGS="$(CGO_CFLAGS)" $(GO) run $(XGO_PACKAGE) -go $(XGO_VERSION) -buildmode exe -dest $(DIST)/binaries -tags 'netgo osusergo $(TAGS)' -ldflags '-s -w -linkmode external -extldflags "-static" $(LDFLAGS)' -targets '$(WINDOWS_ARCHS)' -out $(EXECUTABLE)-$(VERSION) .
|
||||||
ifeq ($(CI),true)
|
ifeq ($(CI),true)
|
||||||
cp -r /build/* $(DIST)/binaries/
|
cp -r /build/* $(DIST)/binaries/
|
||||||
endif
|
endif
|
||||||
|
|
||||||
.PHONY: release-linux
|
.PHONY: release-linux
|
||||||
release-linux: | $(DIST_DIRS)
|
release-linux: | $(DIST_DIRS)
|
||||||
CGO_CFLAGS="$(CGO_CFLAGS)" $(GO) run $(XGO_PACKAGE) -go $(XGO_VERSION) -dest $(DIST)/binaries -tags 'netgo osusergo $(TAGS)' -ldflags '-linkmode external -extldflags "-static" $(LDFLAGS)' -targets '$(LINUX_ARCHS)' -out $(EXECUTABLE)-$(VERSION) .
|
CGO_CFLAGS="$(CGO_CFLAGS)" $(GO) run $(XGO_PACKAGE) -go $(XGO_VERSION) -dest $(DIST)/binaries -tags 'netgo osusergo $(TAGS)' -ldflags '-s -w -linkmode external -extldflags "-static" $(LDFLAGS)' -targets '$(LINUX_ARCHS)' -out $(EXECUTABLE)-$(VERSION) .
|
||||||
ifeq ($(CI),true)
|
ifeq ($(CI),true)
|
||||||
cp -r /build/* $(DIST)/binaries/
|
cp -r /build/* $(DIST)/binaries/
|
||||||
endif
|
endif
|
||||||
|
|
||||||
.PHONY: release-darwin
|
.PHONY: release-darwin
|
||||||
release-darwin: | $(DIST_DIRS)
|
release-darwin: | $(DIST_DIRS)
|
||||||
CGO_CFLAGS="$(CGO_CFLAGS)" $(GO) run $(XGO_PACKAGE) -go $(XGO_VERSION) -dest $(DIST)/binaries -tags 'netgo osusergo $(TAGS)' -ldflags '$(LDFLAGS)' -targets '$(DARWIN_ARCHS)' -out $(EXECUTABLE)-$(VERSION) .
|
CGO_CFLAGS="$(CGO_CFLAGS)" $(GO) run $(XGO_PACKAGE) -go $(XGO_VERSION) -dest $(DIST)/binaries -tags 'netgo osusergo $(TAGS)' -ldflags '-s -w $(LDFLAGS)' -targets '$(DARWIN_ARCHS)' -out $(EXECUTABLE)-$(VERSION) .
|
||||||
ifeq ($(CI),true)
|
ifeq ($(CI),true)
|
||||||
cp -r /build/* $(DIST)/binaries/
|
cp -r /build/* $(DIST)/binaries/
|
||||||
endif
|
endif
|
||||||
@@ -196,18 +190,20 @@ release-check: | $(DIST_DIRS)
|
|||||||
|
|
||||||
.PHONY: release-compress
|
.PHONY: release-compress
|
||||||
release-compress: | $(DIST_DIRS)
|
release-compress: | $(DIST_DIRS)
|
||||||
cd $(DIST)/release/; for file in `find . -type f -name "*"`; do echo "compressing $${file}" && $(GO) run $(GXZ_PAGAGE) -k -9 $${file}; done;
|
cd $(DIST)/release/; for file in `find . -type f -name "*"`; do echo "compressing $${file}" && $(GO) run $(GXZ_PACKAGE) -k -9 $${file}; done;
|
||||||
|
|
||||||
.PHONY: docker
|
.PHONY: docker
|
||||||
docker:
|
docker: ## build the docker image
|
||||||
if ! docker buildx version >/dev/null 2>&1; then \
|
if ! docker buildx version >/dev/null 2>&1; then \
|
||||||
ARG_DISABLE_CONTENT_TRUST=--disable-content-trust=false; \
|
ARG_DISABLE_CONTENT_TRUST=--disable-content-trust=false; \
|
||||||
fi; \
|
fi; \
|
||||||
docker build $${ARG_DISABLE_CONTENT_TRUST} -t $(DOCKER_REF) .
|
docker build $${ARG_DISABLE_CONTENT_TRUST} -t $(DOCKER_REF) .
|
||||||
|
|
||||||
clean:
|
.PHONY: clean
|
||||||
|
clean: ## delete binary and coverage files
|
||||||
$(GO) clean -x -i ./...
|
$(GO) clean -x -i ./...
|
||||||
rm -rf coverage.txt $(EXECUTABLE) $(DIST)
|
rm -rf coverage.txt $(EXECUTABLE) $(DIST)
|
||||||
|
|
||||||
version:
|
.PHONY: version
|
||||||
|
version: ## print the version
|
||||||
@echo $(VERSION)
|
@echo $(VERSION)
|
||||||
|
|||||||
69
README.md
69
README.md
@@ -1,6 +1,4 @@
|
|||||||
# act runner
|
# Gitea Runner
|
||||||
|
|
||||||
Act runner is a runner for Gitea based on [Gitea fork](https://gitea.com/gitea/act) of [act](https://github.com/nektos/act).
|
|
||||||
|
|
||||||
## Installation
|
## Installation
|
||||||
|
|
||||||
@@ -10,7 +8,7 @@ Docker Engine Community version is required for docker mode. To install Docker C
|
|||||||
|
|
||||||
### Download pre-built binary
|
### Download pre-built binary
|
||||||
|
|
||||||
Visit [here](https://dl.gitea.com/act_runner/) and download the right version for your platform.
|
Visit [here](https://dl.gitea.com/gitea-runner/) and download the right version for your platform.
|
||||||
|
|
||||||
### Build from source
|
### Build from source
|
||||||
|
|
||||||
@@ -26,8 +24,8 @@ make docker
|
|||||||
|
|
||||||
## Quickstart
|
## Quickstart
|
||||||
|
|
||||||
Actions are disabled by default, so you need to add the following to the configuration file of your Gitea instance to enable it:
|
Actions are disabled by default, so you need to add the following to the configuration file of your Gitea instance to enable it:
|
||||||
|
|
||||||
```ini
|
```ini
|
||||||
[actions]
|
[actions]
|
||||||
ENABLED=true
|
ENABLED=true
|
||||||
@@ -36,7 +34,7 @@ ENABLED=true
|
|||||||
### Register
|
### Register
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
./act_runner register
|
./gitea-runner register
|
||||||
```
|
```
|
||||||
|
|
||||||
And you will be asked to input:
|
And you will be asked to input:
|
||||||
@@ -68,7 +66,7 @@ INFO Runner registered successfully.
|
|||||||
You can also register with command line arguments.
|
You can also register with command line arguments.
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
./act_runner register --instance http://192.168.8.8:3000 --token <my_runner_token> --no-interactive
|
./gitea-runner register --instance http://192.168.8.8:3000 --token <my_runner_token> --no-interactive
|
||||||
```
|
```
|
||||||
|
|
||||||
If the registry succeed, it will run immediately. Next time, you could run the runner directly.
|
If the registry succeed, it will run immediately. Next time, you could run the runner directly.
|
||||||
@@ -76,32 +74,69 @@ If the registry succeed, it will run immediately. Next time, you could run the r
|
|||||||
### Run
|
### Run
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
./act_runner daemon
|
./gitea-runner daemon
|
||||||
```
|
```
|
||||||
|
|
||||||
### Run with docker
|
### Run with docker
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
docker run -e GITEA_INSTANCE_URL=https://your_gitea.com -e GITEA_RUNNER_REGISTRATION_TOKEN=<your_token> -v /var/run/docker.sock:/var/run/docker.sock --name my_runner gitea/act_runner:nightly
|
docker run -e GITEA_INSTANCE_URL=https://your_gitea.com -e GITEA_RUNNER_REGISTRATION_TOKEN=<your_token> -v /var/run/docker.sock:/var/run/docker.sock --name my_runner gitea/runner:nightly
|
||||||
```
|
```
|
||||||
|
|
||||||
|
Mount a volume on `/data` if you want the registration file and optional config to survive container recreation (see [scripts/run.sh](scripts/run.sh)).
|
||||||
|
|
||||||
### Configuration
|
### Configuration
|
||||||
|
|
||||||
You can also configure the runner with a configuration file.
|
The runner is configured with a YAML file. Generate a starting point (this matches what ships in the tree):
|
||||||
The configuration file is a YAML file, you can generate a sample configuration file with `./act_runner generate-config`.
|
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
./act_runner generate-config > config.yaml
|
./gitea-runner generate-config > config.yaml
|
||||||
```
|
```
|
||||||
|
|
||||||
You can specify the configuration file path with `-c`/`--config` argument.
|
Pass it with `-c` / `--config` on any command that loads configuration (`register`, `daemon`, `cache-server`):
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
./act_runner -c config.yaml register # register with config file
|
./gitea-runner -c config.yaml register
|
||||||
./act_runner -c config.yaml daemon # run with config file
|
./gitea-runner -c config.yaml daemon
|
||||||
|
./gitea-runner -c config.yaml cache-server
|
||||||
```
|
```
|
||||||
|
|
||||||
You can read the latest version of the configuration file online at [config.example.yaml](internal/pkg/config/config.example.yaml).
|
Every option is described in [config.example.yaml](internal/pkg/config/config.example.yaml) (the same content `generate-config` prints).
|
||||||
|
|
||||||
|
#### Without a config file
|
||||||
|
|
||||||
|
If you omit `-c`, built-in defaults apply (same as an empty YAML document). A small set of **deprecated** environment variables can still override parts of that default config, but **only when no `-c` path was given**; they are ignored if you use a config file:
|
||||||
|
|
||||||
|
| Variable | Effect |
|
||||||
|
| --- | --- |
|
||||||
|
| `GITEA_DEBUG` | If true, sets log level to `debug` |
|
||||||
|
| `GITEA_TRACE` | If true, sets log level to `trace` |
|
||||||
|
| `GITEA_RUNNER_CAPACITY` | Concurrent jobs (integer) |
|
||||||
|
| `GITEA_RUNNER_FILE` | Registration state file path (default `.runner`) |
|
||||||
|
| `GITEA_RUNNER_ENVIRON` | Extra job env vars as comma-separated `KEY:VALUE` pairs |
|
||||||
|
| `GITEA_RUNNER_ENV_FILE` | Path to an env file merged into job env (same idea as `runner.env_file` in YAML) |
|
||||||
|
|
||||||
|
Prefer a YAML file for all settings.
|
||||||
|
|
||||||
|
#### Registration vs config labels
|
||||||
|
|
||||||
|
If `runner.labels` is set in the YAML file, those labels are used during `register` and the `--labels` CLI flag is ignored.
|
||||||
|
|
||||||
|
#### External cache (`actions/cache`)
|
||||||
|
|
||||||
|
If `cache.external_server` is set, you must set `cache.external_secret` to the same value on this runner and on the standalone cache server. Run the server with `gitea-runner cache-server` using a config that defines `cache.external_secret` (and matching `cache.dir` / host / port as needed). Flags `--dir`, `--host`, and `--port` on `cache-server` override the file.
|
||||||
|
|
||||||
|
#### Official Docker image
|
||||||
|
|
||||||
|
Besides `GITEA_INSTANCE_URL` and `GITEA_RUNNER_REGISTRATION_TOKEN`, the image entrypoint supports optional variables such as `CONFIG_FILE` (passed through as `-c`), `GITEA_RUNNER_LABELS`, `GITEA_RUNNER_EPHEMERAL`, `GITEA_RUNNER_ONCE`, `GITEA_RUNNER_NAME`, `GITEA_MAX_REG_ATTEMPTS`, `RUNNER_STATE_FILE`, and `GITEA_RUNNER_REGISTRATION_TOKEN_FILE`. See [scripts/run.sh](scripts/run.sh) for exact behavior.
|
||||||
|
|
||||||
|
For a fuller container-oriented walkthrough, see [examples/docker](examples/docker/README.md).
|
||||||
|
|
||||||
|
When `container.bind_workdir` is enabled, stale task workspace directories can be cleaned while the runner is idle:
|
||||||
|
- directories older than `runner.workdir_cleanup_age` are removed (default: `24h`; set `0` to disable)
|
||||||
|
- cleanup runs every `runner.idle_cleanup_interval` (default: `10m`; set `0` to disable)
|
||||||
|
- only purely numeric subdirectories under `container.workdir_parent` are treated as task workspaces and may be removed
|
||||||
|
- cleanup assumes `container.workdir_parent` is not shared across multiple runners
|
||||||
|
|
||||||
### Example Deployments
|
### Example Deployments
|
||||||
|
|
||||||
|
|||||||
@@ -1,5 +0,0 @@
|
|||||||
VERIFICATION
|
|
||||||
Verification is intended to assist the Chocolatey moderators and community
|
|
||||||
in verifying that this package's contents are trustworthy.
|
|
||||||
|
|
||||||
Checksums: https://github.com/nektos/act/releases, in the checksums.txt file
|
|
||||||
@@ -1,26 +0,0 @@
|
|||||||
<?xml version="1.0" encoding="utf-8"?>
|
|
||||||
<!-- Do not remove this test for UTF-8: if “Ω” doesn’t appear as greek uppercase omega letter enclosed in quotation marks, you should use an editor that supports UTF-8, not this one. -->
|
|
||||||
<package xmlns="http://schemas.microsoft.com/packaging/2015/06/nuspec.xsd">
|
|
||||||
<metadata>
|
|
||||||
<id>act-cli</id>
|
|
||||||
<version>0.0.0</version>
|
|
||||||
<packageSourceUrl>https://github.com/nektos/act</packageSourceUrl>
|
|
||||||
<owners>nektos</owners>
|
|
||||||
<title>act (GitHub Actions CLI)</title>
|
|
||||||
<authors>nektos</authors>
|
|
||||||
<projectUrl>https://github.com/nektos/act</projectUrl>
|
|
||||||
<iconUrl>https://raw.githubusercontent.com/wiki/nektos/act/img/logo-150.png</iconUrl>
|
|
||||||
<copyright>Nektos</copyright>
|
|
||||||
<licenseUrl>https://raw.githubusercontent.com/nektos/act/master/LICENSE</licenseUrl>
|
|
||||||
<requireLicenseAcceptance>true</requireLicenseAcceptance>
|
|
||||||
<projectSourceUrl>https://github.com/nektos/act</projectSourceUrl>
|
|
||||||
<docsUrl>https://raw.githubusercontent.com/nektos/act/master/README.md</docsUrl>
|
|
||||||
<bugTrackerUrl>https://github.com/nektos/act/issues</bugTrackerUrl>
|
|
||||||
<tags>act github-actions actions golang ci devops</tags>
|
|
||||||
<summary>Run your GitHub Actions locally 🚀</summary>
|
|
||||||
<description>Run your GitHub Actions locally 🚀</description>
|
|
||||||
</metadata>
|
|
||||||
<files>
|
|
||||||
<file src="tools/**" target="tools" />
|
|
||||||
</files>
|
|
||||||
</package>
|
|
||||||
22
act/LICENSE
Normal file
22
act/LICENSE
Normal file
@@ -0,0 +1,22 @@
|
|||||||
|
MIT License
|
||||||
|
|
||||||
|
Copyright (c) 2022 The Gitea Authors
|
||||||
|
Copyright (c) 2019
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
in the Software without restriction, including without limitation the rights
|
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
|
furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all
|
||||||
|
copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
|
SOFTWARE.
|
||||||
@@ -1,3 +1,7 @@
|
|||||||
|
// Copyright 2023 The Gitea Authors. All rights reserved.
|
||||||
|
// Copyright 2023 The nektos/act Authors. All rights reserved.
|
||||||
|
// SPDX-License-Identifier: MIT
|
||||||
|
|
||||||
// Package artifactcache provides a cache handler for the runner.
|
// Package artifactcache provides a cache handler for the runner.
|
||||||
//
|
//
|
||||||
// Inspired by https://github.com/sp-ricard-valverde/github-act-cache-server
|
// Inspired by https://github.com/sp-ricard-valverde/github-act-cache-server
|
||||||
884
act/artifactcache/handler.go
Normal file
884
act/artifactcache/handler.go
Normal file
@@ -0,0 +1,884 @@
|
|||||||
|
// Copyright 2023 The Gitea Authors. All rights reserved.
|
||||||
|
// Copyright 2023 The nektos/act Authors. All rights reserved.
|
||||||
|
// SPDX-License-Identifier: MIT
|
||||||
|
|
||||||
|
package artifactcache
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"crypto/hmac"
|
||||||
|
"crypto/rand"
|
||||||
|
"crypto/sha256"
|
||||||
|
"encoding/hex"
|
||||||
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"regexp"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"sync/atomic"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"gitea.com/gitea/runner/act/common"
|
||||||
|
|
||||||
|
"github.com/julienschmidt/httprouter"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
|
"github.com/timshannon/bolthold"
|
||||||
|
"go.etcd.io/bbolt"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
apiPath = "/_apis/artifactcache"
|
||||||
|
internalPath = "/_internal"
|
||||||
|
|
||||||
|
// artifactURLTTL bounds how long a signed artifactLocation URL stays valid.
|
||||||
|
// Short enough that a leaked URL is near-worthless; long enough to let the
|
||||||
|
// @actions/cache client download a big blob that was returned from /cache.
|
||||||
|
artifactURLTTL = 10 * time.Minute
|
||||||
|
)
|
||||||
|
|
||||||
|
type credKey struct{}
|
||||||
|
|
||||||
|
// JobCredential ties a per-job bearer token (ACTIONS_RUNTIME_TOKEN) to the
|
||||||
|
// repository that owns it. Every cache entry is stamped with Repo on
|
||||||
|
// reserve/commit and checked on read/write so one repo can never observe or
|
||||||
|
// poison another repo's cache, even from inside a container that reaches the
|
||||||
|
// cache server over the docker bridge network.
|
||||||
|
type JobCredential struct {
|
||||||
|
Repo string
|
||||||
|
}
|
||||||
|
|
||||||
|
// credEntry holds a registered job's credential along with an active
|
||||||
|
// registration count. RegisterJob is reference-counted so that if two tasks
|
||||||
|
// briefly share an ACTIONS_RUNTIME_TOKEN — e.g. a runner that retries a task
|
||||||
|
// after a crash before the old registration is revoked — the first task's
|
||||||
|
// revoker does not cut the second task's auth out from under it.
|
||||||
|
type credEntry struct {
|
||||||
|
cred JobCredential
|
||||||
|
refs int
|
||||||
|
}
|
||||||
|
|
||||||
|
type Handler struct {
|
||||||
|
dir string
|
||||||
|
storage *Storage
|
||||||
|
router *httprouter.Router
|
||||||
|
listener net.Listener
|
||||||
|
server *http.Server
|
||||||
|
logger logrus.FieldLogger
|
||||||
|
|
||||||
|
gcing atomic.Bool
|
||||||
|
gcAt time.Time
|
||||||
|
|
||||||
|
outboundIP string
|
||||||
|
|
||||||
|
// internalSecret guards /_internal/{register,revoke}. When set, a remote
|
||||||
|
// runner can use these endpoints to pre-register per-job
|
||||||
|
// ACTIONS_RUNTIME_TOKENs against this server, enabling the same
|
||||||
|
// per-job auth and repo scoping as the embedded handler over the
|
||||||
|
// network. Empty disables the control-plane entirely.
|
||||||
|
internalSecret string
|
||||||
|
|
||||||
|
// secret signs short-lived artifact download URLs. The @actions/cache
|
||||||
|
// toolkit does not send Authorization on the download request, so blob
|
||||||
|
// GETs authenticate via a per-URL HMAC signature with expiry rather than
|
||||||
|
// via the bearer token used for management endpoints.
|
||||||
|
secret []byte
|
||||||
|
|
||||||
|
credMu sync.RWMutex
|
||||||
|
creds map[string]*credEntry
|
||||||
|
}
|
||||||
|
|
||||||
|
// StartHandler opens the on-disk cache store and starts the HTTP server.
|
||||||
|
//
|
||||||
|
// internalSecret, when non-empty, enables a control-plane API at
|
||||||
|
// /_internal/{register,revoke} that lets a remote runner pre-register the
|
||||||
|
// per-job ACTIONS_RUNTIME_TOKENs it expects this server to honor. The
|
||||||
|
// embedded in-process handler leaves it empty and registers tokens via the
|
||||||
|
// in-process RegisterJob method directly.
|
||||||
|
func StartHandler(dir, outboundIP string, port uint16, internalSecret string, logger logrus.FieldLogger) (*Handler, error) {
|
||||||
|
h := &Handler{
|
||||||
|
creds: make(map[string]*credEntry),
|
||||||
|
internalSecret: internalSecret,
|
||||||
|
}
|
||||||
|
|
||||||
|
if logger == nil {
|
||||||
|
discard := logrus.New()
|
||||||
|
discard.Out = io.Discard
|
||||||
|
logger = discard
|
||||||
|
}
|
||||||
|
logger = logger.WithField("module", "artifactcache")
|
||||||
|
h.logger = logger
|
||||||
|
|
||||||
|
if dir == "" {
|
||||||
|
home, err := os.UserHomeDir()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
dir = filepath.Join(home, ".cache", "actcache")
|
||||||
|
}
|
||||||
|
if err := os.MkdirAll(dir, 0o755); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
h.dir = dir
|
||||||
|
|
||||||
|
storage, err := NewStorage(filepath.Join(dir, "cache"))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
h.storage = storage
|
||||||
|
|
||||||
|
if outboundIP != "" {
|
||||||
|
h.outboundIP = outboundIP
|
||||||
|
} else if ip := common.GetOutboundIP(); ip == nil {
|
||||||
|
return nil, errors.New("unable to determine outbound IP address")
|
||||||
|
} else {
|
||||||
|
h.outboundIP = ip.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
secret, err := loadOrCreateSecret(dir)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
h.secret = secret
|
||||||
|
|
||||||
|
router := httprouter.New()
|
||||||
|
router.GET(apiPath+"/cache", h.bearerAuth(h.find))
|
||||||
|
router.POST(apiPath+"/caches", h.bearerAuth(h.reserve))
|
||||||
|
router.PATCH(apiPath+"/caches/:id", h.bearerAuth(h.upload))
|
||||||
|
router.POST(apiPath+"/caches/:id", h.bearerAuth(h.commit))
|
||||||
|
router.POST(apiPath+"/clean", h.bearerAuth(h.clean))
|
||||||
|
// Artifact GET is signed via query-string HMAC because @actions/cache
|
||||||
|
// does not attach Authorization when downloading archiveLocation.
|
||||||
|
router.GET(apiPath+"/artifacts/:id", h.signedURLAuth(h.get))
|
||||||
|
// Control-plane: a remote runner registers/revokes per-job tokens so the
|
||||||
|
// cache API can authenticate them. Always wired so the routes exist; the
|
||||||
|
// handlers themselves 401 when internalSecret is unset.
|
||||||
|
router.POST(internalPath+"/register", h.internalAuth(h.internalRegister))
|
||||||
|
router.POST(internalPath+"/revoke", h.internalAuth(h.internalRevoke))
|
||||||
|
|
||||||
|
h.router = router
|
||||||
|
|
||||||
|
h.gcCache()
|
||||||
|
|
||||||
|
// Listen on all interfaces. Binding to outboundIP only would give no real
|
||||||
|
// security benefit (it is the LAN/internet-facing address either way) and
|
||||||
|
// can break Docker Desktop variants where the host's outbound IP is not
|
||||||
|
// routable from inside the container network. Authentication is enforced
|
||||||
|
// by the bearer middleware and per-repo scoping, not by reachability.
|
||||||
|
listener, err := net.Listen("tcp", fmt.Sprintf(":%d", port))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
server := &http.Server{
|
||||||
|
ReadHeaderTimeout: 2 * time.Second,
|
||||||
|
Handler: router,
|
||||||
|
}
|
||||||
|
go func() {
|
||||||
|
if err := server.Serve(listener); err != nil && errors.Is(err, net.ErrClosed) {
|
||||||
|
logger.Errorf("http serve: %v", err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
h.listener = listener
|
||||||
|
h.server = server
|
||||||
|
|
||||||
|
return h, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *Handler) ExternalURL() string {
|
||||||
|
// TODO: make the external url configurable if necessary
|
||||||
|
return fmt.Sprintf("http://%s:%d",
|
||||||
|
h.outboundIP,
|
||||||
|
h.listener.Addr().(*net.TCPAddr).Port)
|
||||||
|
}
|
||||||
|
|
||||||
|
// RegisterJob makes token a valid bearer credential for cache requests from
|
||||||
|
// the given repository and returns a function that removes it. The runner
|
||||||
|
// calls this at job start and defers the returned func so that the credential
|
||||||
|
// is only accepted while the job is running.
|
||||||
|
//
|
||||||
|
// Registrations are reference-counted: if a token is already registered, the
|
||||||
|
// existing repo is kept and the refcount is incremented. The entry is
|
||||||
|
// removed only when every revoker returned by RegisterJob has been called.
|
||||||
|
// This keeps a stray re-registration from silently revoking a live job.
|
||||||
|
func (h *Handler) RegisterJob(token, repo string) func() {
|
||||||
|
if h == nil || token == "" {
|
||||||
|
return func() {}
|
||||||
|
}
|
||||||
|
h.credMu.Lock()
|
||||||
|
if existing, ok := h.creds[token]; ok {
|
||||||
|
existing.refs++
|
||||||
|
} else {
|
||||||
|
h.creds[token] = &credEntry{
|
||||||
|
cred: JobCredential{Repo: repo},
|
||||||
|
refs: 1,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
h.credMu.Unlock()
|
||||||
|
return func() {
|
||||||
|
h.credMu.Lock()
|
||||||
|
if entry, ok := h.creds[token]; ok {
|
||||||
|
entry.refs--
|
||||||
|
if entry.refs <= 0 {
|
||||||
|
delete(h.creds, token)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
h.credMu.Unlock()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// RevokeJob explicitly revokes one registration of token, mirroring one call
|
||||||
|
// of the closure returned by RegisterJob. Used by the control-plane endpoint
|
||||||
|
// so a remote runner can revoke without holding the closure.
|
||||||
|
func (h *Handler) RevokeJob(token string) {
|
||||||
|
if h == nil || token == "" {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
h.credMu.Lock()
|
||||||
|
if entry, ok := h.creds[token]; ok {
|
||||||
|
entry.refs--
|
||||||
|
if entry.refs <= 0 {
|
||||||
|
delete(h.creds, token)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
h.credMu.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *Handler) lookupCredential(token string) (JobCredential, bool) {
|
||||||
|
h.credMu.RLock()
|
||||||
|
entry, ok := h.creds[token]
|
||||||
|
h.credMu.RUnlock()
|
||||||
|
if !ok {
|
||||||
|
return JobCredential{}, false
|
||||||
|
}
|
||||||
|
return entry.cred, true
|
||||||
|
}
|
||||||
|
|
||||||
|
// loadOrCreateSecret returns the 32-byte HMAC signing key for artifact URLs,
|
||||||
|
// persisted in dir/.secret so signed URLs handed out before a restart stay
|
||||||
|
// valid across the restart and so the standalone cache-server can be pointed
|
||||||
|
// at by config.Cache.ExternalServer without the URL rotating.
|
||||||
|
func loadOrCreateSecret(dir string) ([]byte, error) {
|
||||||
|
path := filepath.Join(dir, ".secret")
|
||||||
|
if data, err := os.ReadFile(path); err == nil {
|
||||||
|
if secret, err := hex.DecodeString(strings.TrimSpace(string(data))); err == nil && len(secret) >= 32 {
|
||||||
|
return secret, nil
|
||||||
|
}
|
||||||
|
} else if !os.IsNotExist(err) {
|
||||||
|
return nil, fmt.Errorf("read cache secret: %w", err)
|
||||||
|
}
|
||||||
|
secret := make([]byte, 32)
|
||||||
|
if _, err := rand.Read(secret); err != nil {
|
||||||
|
return nil, fmt.Errorf("generate cache secret: %w", err)
|
||||||
|
}
|
||||||
|
if err := os.WriteFile(path, []byte(hex.EncodeToString(secret)), 0o600); err != nil {
|
||||||
|
return nil, fmt.Errorf("write cache secret: %w", err)
|
||||||
|
}
|
||||||
|
return secret, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *Handler) Close() error {
|
||||||
|
if h == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
var retErr error
|
||||||
|
if h.server != nil {
|
||||||
|
err := h.server.Close()
|
||||||
|
if err != nil {
|
||||||
|
retErr = err
|
||||||
|
}
|
||||||
|
h.server = nil
|
||||||
|
}
|
||||||
|
if h.listener != nil {
|
||||||
|
err := h.listener.Close()
|
||||||
|
if errors.Is(err, net.ErrClosed) {
|
||||||
|
err = nil
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
retErr = err
|
||||||
|
}
|
||||||
|
h.listener = nil
|
||||||
|
}
|
||||||
|
return retErr
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *Handler) openDB() (*bolthold.Store, error) {
|
||||||
|
return bolthold.Open(filepath.Join(h.dir, "bolt.db"), 0o644, &bolthold.Options{
|
||||||
|
Encoder: json.Marshal,
|
||||||
|
Decoder: json.Unmarshal,
|
||||||
|
Options: &bbolt.Options{
|
||||||
|
Timeout: 5 * time.Second,
|
||||||
|
NoGrowSync: bbolt.DefaultOptions.NoGrowSync,
|
||||||
|
FreelistType: bbolt.DefaultOptions.FreelistType,
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// GET /_apis/artifactcache/cache
|
||||||
|
func (h *Handler) find(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {
|
||||||
|
cred := credFromContext(r.Context())
|
||||||
|
keys := strings.Split(r.URL.Query().Get("keys"), ",")
|
||||||
|
// cache keys are case insensitive
|
||||||
|
for i, key := range keys {
|
||||||
|
keys[i] = strings.ToLower(key)
|
||||||
|
}
|
||||||
|
version := r.URL.Query().Get("version")
|
||||||
|
|
||||||
|
db, err := h.openDB()
|
||||||
|
if err != nil {
|
||||||
|
h.responseJSON(w, r, 500, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
defer db.Close()
|
||||||
|
|
||||||
|
cache, err := findCache(db, cred.Repo, keys, version)
|
||||||
|
if err != nil {
|
||||||
|
h.responseJSON(w, r, 500, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if cache == nil {
|
||||||
|
h.responseJSON(w, r, 204)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if ok, err := h.storage.Exist(cache.ID); err != nil {
|
||||||
|
h.responseJSON(w, r, 500, err)
|
||||||
|
return
|
||||||
|
} else if !ok {
|
||||||
|
_ = db.Delete(cache.ID, cache)
|
||||||
|
h.responseJSON(w, r, 204)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
h.responseJSON(w, r, 200, map[string]any{
|
||||||
|
"result": "hit",
|
||||||
|
"archiveLocation": h.signedArtifactURL(cache.ID, time.Now().Add(artifactURLTTL)),
|
||||||
|
"cacheKey": cache.Key,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// POST /_apis/artifactcache/caches
|
||||||
|
func (h *Handler) reserve(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {
|
||||||
|
cred := credFromContext(r.Context())
|
||||||
|
api := &Request{}
|
||||||
|
if err := json.NewDecoder(r.Body).Decode(api); err != nil {
|
||||||
|
h.responseJSON(w, r, 400, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// cache keys are case insensitive
|
||||||
|
api.Key = strings.ToLower(api.Key)
|
||||||
|
|
||||||
|
cache := api.ToCache()
|
||||||
|
cache.Repo = cred.Repo
|
||||||
|
db, err := h.openDB()
|
||||||
|
if err != nil {
|
||||||
|
h.responseJSON(w, r, 500, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
defer db.Close()
|
||||||
|
|
||||||
|
now := time.Now().Unix()
|
||||||
|
cache.CreatedAt = now
|
||||||
|
cache.UsedAt = now
|
||||||
|
if err := insertCache(db, cache); err != nil {
|
||||||
|
h.responseJSON(w, r, 500, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
h.responseJSON(w, r, 200, map[string]any{
|
||||||
|
"cacheId": cache.ID,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// PATCH /_apis/artifactcache/caches/:id
|
||||||
|
func (h *Handler) upload(w http.ResponseWriter, r *http.Request, params httprouter.Params) {
|
||||||
|
cred := credFromContext(r.Context())
|
||||||
|
id, err := strconv.ParseInt(params.ByName("id"), 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
h.responseJSON(w, r, 400, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
cache := &Cache{}
|
||||||
|
db, err := h.openDB()
|
||||||
|
if err != nil {
|
||||||
|
h.responseJSON(w, r, 500, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
defer db.Close()
|
||||||
|
if err := db.Get(id, cache); err != nil {
|
||||||
|
if errors.Is(err, bolthold.ErrNotFound) {
|
||||||
|
h.responseJSON(w, r, 400, fmt.Errorf("cache %d: not reserved", id))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
h.responseJSON(w, r, 500, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if cache.Repo != cred.Repo {
|
||||||
|
h.responseJSON(w, r, 403, fmt.Errorf("cache %d: forbidden", id))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if cache.Complete {
|
||||||
|
h.responseJSON(w, r, 400, fmt.Errorf("cache %v %q: already complete", cache.ID, cache.Key))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
db.Close()
|
||||||
|
start, _, err := parseContentRange(r.Header.Get("Content-Range"))
|
||||||
|
if err != nil {
|
||||||
|
h.responseJSON(w, r, 400, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if err := h.storage.Write(cache.ID, start, r.Body); err != nil {
|
||||||
|
h.responseJSON(w, r, 500, err)
|
||||||
|
}
|
||||||
|
h.useCache(id)
|
||||||
|
h.responseJSON(w, r, 200)
|
||||||
|
}
|
||||||
|
|
||||||
|
// POST /_apis/artifactcache/caches/:id
|
||||||
|
func (h *Handler) commit(w http.ResponseWriter, r *http.Request, params httprouter.Params) {
|
||||||
|
cred := credFromContext(r.Context())
|
||||||
|
id, err := strconv.ParseInt(params.ByName("id"), 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
h.responseJSON(w, r, 400, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
cache := &Cache{}
|
||||||
|
db, err := h.openDB()
|
||||||
|
if err != nil {
|
||||||
|
h.responseJSON(w, r, 500, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
defer db.Close()
|
||||||
|
if err := db.Get(id, cache); err != nil {
|
||||||
|
if errors.Is(err, bolthold.ErrNotFound) {
|
||||||
|
h.responseJSON(w, r, 400, fmt.Errorf("cache %d: not reserved", id))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
h.responseJSON(w, r, 500, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if cache.Repo != cred.Repo {
|
||||||
|
h.responseJSON(w, r, 403, fmt.Errorf("cache %d: forbidden", id))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if cache.Complete {
|
||||||
|
h.responseJSON(w, r, 400, fmt.Errorf("cache %v %q: already complete", cache.ID, cache.Key))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
db.Close()
|
||||||
|
|
||||||
|
size, err := h.storage.Commit(cache.ID, cache.Size)
|
||||||
|
if err != nil {
|
||||||
|
h.responseJSON(w, r, 500, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// write real size back to cache, it may be different from the current value when the request doesn't specify it.
|
||||||
|
cache.Size = size
|
||||||
|
|
||||||
|
db, err = h.openDB()
|
||||||
|
if err != nil {
|
||||||
|
h.responseJSON(w, r, 500, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
defer db.Close()
|
||||||
|
|
||||||
|
cache.Complete = true
|
||||||
|
if err := db.Update(cache.ID, cache); err != nil {
|
||||||
|
h.responseJSON(w, r, 500, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
h.responseJSON(w, r, 200)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GET /_apis/artifactcache/artifacts/:id
|
||||||
|
// Authenticated via signed URL (see signedURLAuth), not bearer, because the
|
||||||
|
// @actions/cache toolkit downloads archiveLocation without Authorization.
|
||||||
|
// Repository scoping is already enforced at find() time; the signature binds
|
||||||
|
// the URL to the specific cache ID and an expiry.
|
||||||
|
func (h *Handler) get(w http.ResponseWriter, r *http.Request, params httprouter.Params) {
|
||||||
|
id, err := strconv.ParseInt(params.ByName("id"), 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
h.responseJSON(w, r, 400, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
h.useCache(id)
|
||||||
|
h.storage.Serve(w, r, uint64(id))
|
||||||
|
}
|
||||||
|
|
||||||
|
// POST /_apis/artifactcache/clean
|
||||||
|
func (h *Handler) clean(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {
|
||||||
|
// TODO: don't support force deleting cache entries
|
||||||
|
// see: https://docs.github.com/en/actions/using-workflows/caching-dependencies-to-speed-up-workflows#force-deleting-cache-entries
|
||||||
|
|
||||||
|
h.responseJSON(w, r, 200)
|
||||||
|
}
|
||||||
|
|
||||||
|
// bearerAuth resolves ACTIONS_RUNTIME_TOKEN against the set of currently
|
||||||
|
// registered jobs. A match attaches the job's JobCredential to the request
|
||||||
|
// context; a miss returns 401 before the handler body runs.
|
||||||
|
func (h *Handler) bearerAuth(handler httprouter.Handle) httprouter.Handle {
|
||||||
|
return func(w http.ResponseWriter, r *http.Request, params httprouter.Params) {
|
||||||
|
h.logger.Debugf("%s %s", r.Method, r.URL.Path)
|
||||||
|
token := bearerToken(r)
|
||||||
|
if token == "" {
|
||||||
|
h.responseJSON(w, r, http.StatusUnauthorized, errors.New("missing bearer token"))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
cred, ok := h.lookupCredential(token)
|
||||||
|
if !ok {
|
||||||
|
h.responseJSON(w, r, http.StatusUnauthorized, errors.New("unknown bearer token"))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
ctx := context.WithValue(r.Context(), credKey{}, cred)
|
||||||
|
handler(w, r.WithContext(ctx), params)
|
||||||
|
go h.gcCache()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *Handler) signedURLAuth(handler httprouter.Handle) httprouter.Handle {
|
||||||
|
return func(w http.ResponseWriter, r *http.Request, params httprouter.Params) {
|
||||||
|
h.logger.Debugf("%s %s", r.Method, r.URL.Path)
|
||||||
|
id, err := strconv.ParseInt(params.ByName("id"), 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
h.responseJSON(w, r, 400, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
expStr := r.URL.Query().Get("exp")
|
||||||
|
sig := r.URL.Query().Get("sig")
|
||||||
|
if expStr == "" || sig == "" {
|
||||||
|
h.responseJSON(w, r, http.StatusUnauthorized, errors.New("missing signature"))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
exp, err := strconv.ParseInt(expStr, 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
h.responseJSON(w, r, http.StatusUnauthorized, errors.New("invalid expiry"))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if time.Now().Unix() > exp {
|
||||||
|
h.responseJSON(w, r, http.StatusUnauthorized, errors.New("signature expired"))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
expected := h.computeSignature(id, exp)
|
||||||
|
if !hmac.Equal([]byte(sig), []byte(expected)) {
|
||||||
|
h.responseJSON(w, r, http.StatusUnauthorized, errors.New("bad signature"))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
handler(w, r, params)
|
||||||
|
go h.gcCache()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// internalAuth gates the control-plane endpoints. The bearer must
|
||||||
|
// constant-time-equal the configured internalSecret. If the secret is empty,
|
||||||
|
// the control-plane is disabled and every request gets 404 — which matches
|
||||||
|
// the upstream nektos/act behavior of "the route does not exist".
|
||||||
|
func (h *Handler) internalAuth(handler httprouter.Handle) httprouter.Handle {
|
||||||
|
return func(w http.ResponseWriter, r *http.Request, params httprouter.Params) {
|
||||||
|
if h.internalSecret == "" {
|
||||||
|
http.NotFound(w, r)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
token := bearerToken(r)
|
||||||
|
if token == "" || !hmac.Equal([]byte(token), []byte(h.internalSecret)) {
|
||||||
|
h.responseJSON(w, r, http.StatusUnauthorized, errors.New("internal: bad secret"))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
handler(w, r, params)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type internalRegisterBody struct {
|
||||||
|
Token string `json:"token"`
|
||||||
|
Repo string `json:"repo"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type internalRevokeBody struct {
|
||||||
|
Token string `json:"token"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// POST /_internal/register
|
||||||
|
func (h *Handler) internalRegister(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {
|
||||||
|
var body internalRegisterBody
|
||||||
|
if err := json.NewDecoder(r.Body).Decode(&body); err != nil {
|
||||||
|
h.responseJSON(w, r, http.StatusBadRequest, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if body.Token == "" {
|
||||||
|
h.responseJSON(w, r, http.StatusBadRequest, errors.New("token is required"))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
h.RegisterJob(body.Token, body.Repo)
|
||||||
|
h.responseJSON(w, r, http.StatusOK)
|
||||||
|
}
|
||||||
|
|
||||||
|
// POST /_internal/revoke
|
||||||
|
func (h *Handler) internalRevoke(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {
|
||||||
|
var body internalRevokeBody
|
||||||
|
if err := json.NewDecoder(r.Body).Decode(&body); err != nil {
|
||||||
|
h.responseJSON(w, r, http.StatusBadRequest, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if body.Token == "" {
|
||||||
|
h.responseJSON(w, r, http.StatusBadRequest, errors.New("token is required"))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
h.RevokeJob(body.Token)
|
||||||
|
h.responseJSON(w, r, http.StatusOK)
|
||||||
|
}
|
||||||
|
|
||||||
|
func bearerToken(r *http.Request) string {
|
||||||
|
auth := r.Header.Get("Authorization")
|
||||||
|
const prefix = "Bearer "
|
||||||
|
if len(auth) > len(prefix) && strings.EqualFold(auth[:len(prefix)], prefix) {
|
||||||
|
return auth[len(prefix):]
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func credFromContext(ctx context.Context) JobCredential {
|
||||||
|
if cred, ok := ctx.Value(credKey{}).(JobCredential); ok {
|
||||||
|
return cred
|
||||||
|
}
|
||||||
|
return JobCredential{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *Handler) computeSignature(cacheID, exp int64) string {
|
||||||
|
mac := hmac.New(sha256.New, h.secret)
|
||||||
|
fmt.Fprintf(mac, "%d:%d", cacheID, exp)
|
||||||
|
return hex.EncodeToString(mac.Sum(nil))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *Handler) signedArtifactURL(cacheID uint64, exp time.Time) string {
|
||||||
|
expUnix := exp.Unix()
|
||||||
|
sig := h.computeSignature(int64(cacheID), expUnix)
|
||||||
|
q := url.Values{}
|
||||||
|
q.Set("exp", strconv.FormatInt(expUnix, 10))
|
||||||
|
q.Set("sig", sig)
|
||||||
|
return fmt.Sprintf("%s%s/artifacts/%d?%s", h.ExternalURL(), apiPath, cacheID, q.Encode())
|
||||||
|
}
|
||||||
|
|
||||||
|
// if not found, return (nil, nil) instead of an error.
|
||||||
|
func findCache(db *bolthold.Store, repo string, keys []string, version string) (*Cache, error) {
|
||||||
|
cache := &Cache{}
|
||||||
|
for _, prefix := range keys {
|
||||||
|
// if a key in the list matches exactly, don't return partial matches
|
||||||
|
if err := db.FindOne(cache,
|
||||||
|
bolthold.Where("Repo").Eq(repo).
|
||||||
|
And("Key").Eq(prefix).
|
||||||
|
And("Version").Eq(version).
|
||||||
|
And("Complete").Eq(true).
|
||||||
|
SortBy("CreatedAt").Reverse()); err == nil || !errors.Is(err, bolthold.ErrNotFound) {
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("find cache: %w", err)
|
||||||
|
}
|
||||||
|
return cache, nil
|
||||||
|
}
|
||||||
|
prefixPattern := "^" + regexp.QuoteMeta(prefix)
|
||||||
|
re, err := regexp.Compile(prefixPattern)
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if err := db.FindOne(cache,
|
||||||
|
bolthold.Where("Repo").Eq(repo).
|
||||||
|
And("Key").RegExp(re).
|
||||||
|
And("Version").Eq(version).
|
||||||
|
And("Complete").Eq(true).
|
||||||
|
SortBy("CreatedAt").Reverse()); err != nil {
|
||||||
|
if errors.Is(err, bolthold.ErrNotFound) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
return nil, fmt.Errorf("find cache: %w", err)
|
||||||
|
}
|
||||||
|
return cache, nil
|
||||||
|
}
|
||||||
|
return nil, nil //nolint:nilnil // pre-existing issue from nektos/act
|
||||||
|
}
|
||||||
|
|
||||||
|
func insertCache(db *bolthold.Store, cache *Cache) error {
|
||||||
|
if err := db.Insert(bolthold.NextSequence(), cache); err != nil {
|
||||||
|
return fmt.Errorf("insert cache: %w", err)
|
||||||
|
}
|
||||||
|
// write back id to db
|
||||||
|
if err := db.Update(cache.ID, cache); err != nil {
|
||||||
|
return fmt.Errorf("write back id to db: %w", err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *Handler) useCache(id int64) {
|
||||||
|
db, err := h.openDB()
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
defer db.Close()
|
||||||
|
cache := &Cache{}
|
||||||
|
if err := db.Get(id, cache); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
cache.UsedAt = time.Now().Unix()
|
||||||
|
_ = db.Update(cache.ID, cache)
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
keepUsed = 30 * 24 * time.Hour
|
||||||
|
keepUnused = 7 * 24 * time.Hour
|
||||||
|
keepTemp = 5 * time.Minute
|
||||||
|
keepOld = 5 * time.Minute
|
||||||
|
)
|
||||||
|
|
||||||
|
func (h *Handler) gcCache() {
|
||||||
|
if h.gcing.Load() {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if !h.gcing.CompareAndSwap(false, true) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
defer h.gcing.Store(false)
|
||||||
|
|
||||||
|
if time.Since(h.gcAt) < time.Hour {
|
||||||
|
h.logger.Debugf("skip gc: %v", h.gcAt.String())
|
||||||
|
return
|
||||||
|
}
|
||||||
|
h.gcAt = time.Now()
|
||||||
|
h.logger.Debugf("gc: %v", h.gcAt.String())
|
||||||
|
|
||||||
|
db, err := h.openDB()
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
defer db.Close()
|
||||||
|
|
||||||
|
// Remove the caches which are not completed for a while, they are most likely to be broken.
|
||||||
|
var caches []*Cache
|
||||||
|
if err := db.Find(&caches, bolthold.
|
||||||
|
Where("UsedAt").Lt(time.Now().Add(-keepTemp).Unix()).
|
||||||
|
And("Complete").Eq(false),
|
||||||
|
); err != nil {
|
||||||
|
h.logger.Warnf("find caches: %v", err)
|
||||||
|
} else {
|
||||||
|
for _, cache := range caches {
|
||||||
|
h.storage.Remove(cache.ID)
|
||||||
|
if err := db.Delete(cache.ID, cache); err != nil {
|
||||||
|
h.logger.Warnf("delete cache: %v", err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
h.logger.Infof("deleted cache: %+v", cache)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove the old caches which have not been used recently.
|
||||||
|
caches = caches[:0]
|
||||||
|
if err := db.Find(&caches, bolthold.
|
||||||
|
Where("UsedAt").Lt(time.Now().Add(-keepUnused).Unix()),
|
||||||
|
); err != nil {
|
||||||
|
h.logger.Warnf("find caches: %v", err)
|
||||||
|
} else {
|
||||||
|
for _, cache := range caches {
|
||||||
|
h.storage.Remove(cache.ID)
|
||||||
|
if err := db.Delete(cache.ID, cache); err != nil {
|
||||||
|
h.logger.Warnf("delete cache: %v", err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
h.logger.Infof("deleted cache: %+v", cache)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove the old caches which are too old.
|
||||||
|
caches = caches[:0]
|
||||||
|
if err := db.Find(&caches, bolthold.
|
||||||
|
Where("CreatedAt").Lt(time.Now().Add(-keepUsed).Unix()),
|
||||||
|
); err != nil {
|
||||||
|
h.logger.Warnf("find caches: %v", err)
|
||||||
|
} else {
|
||||||
|
for _, cache := range caches {
|
||||||
|
h.storage.Remove(cache.ID)
|
||||||
|
if err := db.Delete(cache.ID, cache); err != nil {
|
||||||
|
h.logger.Warnf("delete cache: %v", err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
h.logger.Infof("deleted cache: %+v", cache)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove the old caches with the same key and version within the same
|
||||||
|
// repository, keep the latest one. Aggregation must include Repo so two
|
||||||
|
// repos that happen to share a (key, version) do not evict each other —
|
||||||
|
// otherwise per-repo scoping holds for reads but one repo can age
|
||||||
|
// another out after keepOld.
|
||||||
|
// Also keep the olds which have been used recently for a while in case of the cache is still in use.
|
||||||
|
if results, err := db.FindAggregate(
|
||||||
|
&Cache{},
|
||||||
|
bolthold.Where("Complete").Eq(true),
|
||||||
|
"Repo", "Key", "Version",
|
||||||
|
); err != nil {
|
||||||
|
h.logger.Warnf("find aggregate caches: %v", err)
|
||||||
|
} else {
|
||||||
|
for _, result := range results {
|
||||||
|
if result.Count() <= 1 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
result.Sort("CreatedAt")
|
||||||
|
caches = caches[:0]
|
||||||
|
result.Reduction(&caches)
|
||||||
|
for _, cache := range caches[:len(caches)-1] {
|
||||||
|
if time.Since(time.Unix(cache.UsedAt, 0)) < keepOld {
|
||||||
|
// Keep it since it has been used recently, even if it's old.
|
||||||
|
// Or it could break downloading in process.
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
h.storage.Remove(cache.ID)
|
||||||
|
if err := db.Delete(cache.ID, cache); err != nil {
|
||||||
|
h.logger.Warnf("delete cache: %v", err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
h.logger.Infof("deleted cache: %+v", cache)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *Handler) responseJSON(w http.ResponseWriter, r *http.Request, code int, v ...any) {
|
||||||
|
w.Header().Set("Content-Type", "application/json; charset=utf-8")
|
||||||
|
var data []byte
|
||||||
|
if len(v) == 0 || v[0] == nil {
|
||||||
|
data, _ = json.Marshal(struct{}{})
|
||||||
|
} else if err, ok := v[0].(error); ok {
|
||||||
|
h.logger.Errorf("%v %v: %v", r.Method, r.URL.Path, err)
|
||||||
|
data, _ = json.Marshal(map[string]any{
|
||||||
|
"error": err.Error(),
|
||||||
|
})
|
||||||
|
} else {
|
||||||
|
data, _ = json.Marshal(v[0])
|
||||||
|
}
|
||||||
|
w.WriteHeader(code)
|
||||||
|
_, _ = w.Write(data)
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseContentRange(s string) (int64, int64, error) {
|
||||||
|
// support the format like "bytes 11-22/*" only
|
||||||
|
s, _, _ = strings.Cut(strings.TrimPrefix(s, "bytes "), "/")
|
||||||
|
s1, s2, _ := strings.Cut(s, "-")
|
||||||
|
|
||||||
|
start, err := strconv.ParseInt(s1, 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return 0, 0, fmt.Errorf("parse %q: %w", s, err)
|
||||||
|
}
|
||||||
|
stop, err := strconv.ParseInt(s2, 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return 0, 0, fmt.Errorf("parse %q: %w", s, err)
|
||||||
|
}
|
||||||
|
return start, stop, nil
|
||||||
|
}
|
||||||
1185
act/artifactcache/handler_test.go
Normal file
1185
act/artifactcache/handler_test.go
Normal file
File diff suppressed because it is too large
Load Diff
@@ -1,3 +1,7 @@
|
|||||||
|
// Copyright 2023 The Gitea Authors. All rights reserved.
|
||||||
|
// Copyright 2023 The nektos/act Authors. All rights reserved.
|
||||||
|
// SPDX-License-Identifier: MIT
|
||||||
|
|
||||||
package artifactcache
|
package artifactcache
|
||||||
|
|
||||||
type Request struct {
|
type Request struct {
|
||||||
@@ -25,6 +29,7 @@ func (c *Request) ToCache() *Cache {
|
|||||||
|
|
||||||
type Cache struct {
|
type Cache struct {
|
||||||
ID uint64 `json:"id" boltholdKey:"ID"`
|
ID uint64 `json:"id" boltholdKey:"ID"`
|
||||||
|
Repo string `json:"repo" boltholdIndex:"Repo"`
|
||||||
Key string `json:"key" boltholdIndex:"Key"`
|
Key string `json:"key" boltholdIndex:"Key"`
|
||||||
Version string `json:"version" boltholdIndex:"Version"`
|
Version string `json:"version" boltholdIndex:"Version"`
|
||||||
Size int64 `json:"cacheSize"`
|
Size int64 `json:"cacheSize"`
|
||||||
@@ -1,3 +1,7 @@
|
|||||||
|
// Copyright 2023 The Gitea Authors. All rights reserved.
|
||||||
|
// Copyright 2023 The nektos/act Authors. All rights reserved.
|
||||||
|
// SPDX-License-Identifier: MIT
|
||||||
|
|
||||||
package artifactcache
|
package artifactcache
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@@ -1,3 +1,7 @@
|
|||||||
|
// Copyright 2023 The Gitea Authors. All rights reserved.
|
||||||
|
// Copyright 2021 The nektos/act Authors. All rights reserved.
|
||||||
|
// SPDX-License-Identifier: MIT
|
||||||
|
|
||||||
package artifacts
|
package artifacts
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@@ -13,9 +17,9 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/julienschmidt/httprouter"
|
"gitea.com/gitea/runner/act/common"
|
||||||
|
|
||||||
"gitea.com/gitea/act_runner/pkg/common"
|
"github.com/julienschmidt/httprouter"
|
||||||
)
|
)
|
||||||
|
|
||||||
type FileContainerResourceURL struct {
|
type FileContainerResourceURL struct {
|
||||||
@@ -86,7 +90,7 @@ func (fwfs readWriteFSImpl) OpenAppendable(name string) (WritableFile, error) {
|
|||||||
|
|
||||||
var gzipExtension = ".gz__"
|
var gzipExtension = ".gz__"
|
||||||
|
|
||||||
func safeResolve(baseDir string, relPath string) string {
|
func safeResolve(baseDir, relPath string) string {
|
||||||
return filepath.Join(baseDir, filepath.Clean(filepath.Join(string(os.PathSeparator), relPath)))
|
return filepath.Join(baseDir, filepath.Clean(filepath.Join(string(os.PathSeparator), relPath)))
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -132,11 +136,11 @@ func uploads(router *httprouter.Router, baseDir string, fsys WriteFS) {
|
|||||||
|
|
||||||
writer, ok := file.(io.Writer)
|
writer, ok := file.(io.Writer)
|
||||||
if !ok {
|
if !ok {
|
||||||
panic(errors.New("file is not writable"))
|
panic(errors.New("File is not writable"))
|
||||||
}
|
}
|
||||||
|
|
||||||
if req.Body == nil {
|
if req.Body == nil {
|
||||||
panic(errors.New("no body given"))
|
panic(errors.New("No body given"))
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err = io.Copy(writer, req.Body)
|
_, err = io.Copy(writer, req.Body)
|
||||||
@@ -157,7 +161,7 @@ func uploads(router *httprouter.Router, baseDir string, fsys WriteFS) {
|
|||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
router.PATCH("/_apis/pipelines/workflows/:runId/artifacts", func(w http.ResponseWriter, _ *http.Request, _ httprouter.Params) {
|
router.PATCH("/_apis/pipelines/workflows/:runId/artifacts", func(w http.ResponseWriter, req *http.Request, params httprouter.Params) {
|
||||||
json, err := json.Marshal(ResponseMessage{
|
json, err := json.Marshal(ResponseMessage{
|
||||||
Message: "success",
|
Message: "success",
|
||||||
})
|
})
|
||||||
@@ -211,7 +215,7 @@ func downloads(router *httprouter.Router, baseDir string, fsys fs.FS) {
|
|||||||
safePath := safeResolve(baseDir, filepath.Join(container, itemPath))
|
safePath := safeResolve(baseDir, filepath.Join(container, itemPath))
|
||||||
|
|
||||||
var files []ContainerItem
|
var files []ContainerItem
|
||||||
err := fs.WalkDir(fsys, safePath, func(path string, entry fs.DirEntry, _ error) error {
|
err := fs.WalkDir(fsys, safePath, func(path string, entry fs.DirEntry, err error) error {
|
||||||
if !entry.IsDir() {
|
if !entry.IsDir() {
|
||||||
rel, err := filepath.Rel(safePath, path)
|
rel, err := filepath.Rel(safePath, path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -250,7 +254,7 @@ func downloads(router *httprouter.Router, baseDir string, fsys fs.FS) {
|
|||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
router.GET("/artifact/*path", func(w http.ResponseWriter, _ *http.Request, params httprouter.Params) {
|
router.GET("/artifact/*path", func(w http.ResponseWriter, req *http.Request, params httprouter.Params) {
|
||||||
path := params.ByName("path")[1:]
|
path := params.ByName("path")[1:]
|
||||||
|
|
||||||
safePath := safeResolve(baseDir, path)
|
safePath := safeResolve(baseDir, path)
|
||||||
@@ -272,7 +276,7 @@ func downloads(router *httprouter.Router, baseDir string, fsys fs.FS) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func Serve(ctx context.Context, artifactPath string, addr string, port string) context.CancelFunc {
|
func Serve(ctx context.Context, artifactPath, addr, port string) context.CancelFunc {
|
||||||
serverContext, cancel := context.WithCancel(ctx)
|
serverContext, cancel := context.WithCancel(ctx)
|
||||||
logger := common.Logger(serverContext)
|
logger := common.Logger(serverContext)
|
||||||
|
|
||||||
@@ -286,7 +290,6 @@ func Serve(ctx context.Context, artifactPath string, addr string, port string) c
|
|||||||
fsys := readWriteFSImpl{}
|
fsys := readWriteFSImpl{}
|
||||||
uploads(router, artifactPath, fsys)
|
uploads(router, artifactPath, fsys)
|
||||||
downloads(router, artifactPath, fsys)
|
downloads(router, artifactPath, fsys)
|
||||||
RoutesV4(router, artifactPath, fsys, fsys)
|
|
||||||
|
|
||||||
server := &http.Server{
|
server := &http.Server{
|
||||||
Addr: fmt.Sprintf("%s:%s", addr, port),
|
Addr: fmt.Sprintf("%s:%s", addr, port),
|
||||||
@@ -307,7 +310,7 @@ func Serve(ctx context.Context, artifactPath string, addr string, port string) c
|
|||||||
<-serverContext.Done()
|
<-serverContext.Done()
|
||||||
|
|
||||||
if err := server.Shutdown(ctx); err != nil {
|
if err := server.Shutdown(ctx); err != nil {
|
||||||
logger.Errorf("failed shutdown gracefully - force shutdown: %v", err)
|
logger.Errorf("Failed shutdown gracefully - force shutdown: %v", err)
|
||||||
server.Close()
|
server.Close()
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
@@ -1,3 +1,7 @@
|
|||||||
|
// Copyright 2023 The Gitea Authors. All rights reserved.
|
||||||
|
// Copyright 2021 The nektos/act Authors. All rights reserved.
|
||||||
|
// SPDX-License-Identifier: MIT
|
||||||
|
|
||||||
package artifacts
|
package artifacts
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@@ -13,13 +17,12 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
"testing/fstest"
|
"testing/fstest"
|
||||||
|
|
||||||
|
"gitea.com/gitea/runner/act/model"
|
||||||
|
"gitea.com/gitea/runner/act/runner"
|
||||||
|
|
||||||
"github.com/julienschmidt/httprouter"
|
"github.com/julienschmidt/httprouter"
|
||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
|
|
||||||
"gitea.com/gitea/act_runner/pkg/model"
|
|
||||||
"gitea.com/gitea/act_runner/pkg/runner"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type writableMapFile struct {
|
type writableMapFile struct {
|
||||||
@@ -250,9 +253,6 @@ func TestArtifactFlow(t *testing.T) {
|
|||||||
if testing.Short() {
|
if testing.Short() {
|
||||||
t.Skip("skipping integration test")
|
t.Skip("skipping integration test")
|
||||||
}
|
}
|
||||||
if _, ok := os.LookupEnv("NO_EXTERNAL_IP"); ok {
|
|
||||||
t.Skip("skipping test because QEMU is disabled")
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
|
|
||||||
@@ -266,7 +266,6 @@ func TestArtifactFlow(t *testing.T) {
|
|||||||
tables := []TestJobFileInfo{
|
tables := []TestJobFileInfo{
|
||||||
{"testdata", "upload-and-download", "push", "", platforms, ""},
|
{"testdata", "upload-and-download", "push", "", platforms, ""},
|
||||||
{"testdata", "GHSL-2023-004", "push", "", platforms, ""},
|
{"testdata", "GHSL-2023-004", "push", "", platforms, ""},
|
||||||
{"testdata", "v4", "push", "", platforms, ""},
|
|
||||||
}
|
}
|
||||||
log.SetLevel(log.DebugLevel)
|
log.SetLevel(log.DebugLevel)
|
||||||
|
|
||||||
@@ -277,14 +276,14 @@ func TestArtifactFlow(t *testing.T) {
|
|||||||
|
|
||||||
func runTestJobFile(ctx context.Context, t *testing.T, tjfi TestJobFileInfo) {
|
func runTestJobFile(ctx context.Context, t *testing.T, tjfi TestJobFileInfo) {
|
||||||
t.Run(tjfi.workflowPath, func(t *testing.T) {
|
t.Run(tjfi.workflowPath, func(t *testing.T) {
|
||||||
t.Logf("::group::%s\n", tjfi.workflowPath)
|
fmt.Printf("::group::%s\n", tjfi.workflowPath) //nolint:forbidigo // pre-existing issue from nektos/act
|
||||||
|
|
||||||
if err := os.RemoveAll(artifactsPath); err != nil {
|
if err := os.RemoveAll(artifactsPath); err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
workdir, err := filepath.Abs(tjfi.workdir)
|
workdir, err := filepath.Abs(tjfi.workdir)
|
||||||
require.NoError(t, err, workdir)
|
assert.NoError(t, err, workdir) //nolint:testifylint // pre-existing issue from nektos/act
|
||||||
fullWorkflowPath := filepath.Join(workdir, tjfi.workflowPath)
|
fullWorkflowPath := filepath.Join(workdir, tjfi.workflowPath)
|
||||||
runnerConfig := &runner.Config{
|
runnerConfig := &runner.Config{
|
||||||
Workdir: workdir,
|
Workdir: workdir,
|
||||||
@@ -300,28 +299,30 @@ func runTestJobFile(ctx context.Context, t *testing.T, tjfi TestJobFileInfo) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
runner, err := runner.New(runnerConfig)
|
runner, err := runner.New(runnerConfig)
|
||||||
require.NoError(t, err, tjfi.workflowPath)
|
assert.NoError(t, err, tjfi.workflowPath) //nolint:testifylint // pre-existing issue from nektos/act
|
||||||
|
|
||||||
planner, err := model.NewWorkflowPlanner(fullWorkflowPath, model.PlannerConfig{})
|
planner, err := model.NewWorkflowPlanner(fullWorkflowPath, true)
|
||||||
require.NoError(t, err, fullWorkflowPath)
|
assert.NoError(t, err, fullWorkflowPath) //nolint:testifylint // pre-existing issue from nektos/act
|
||||||
|
|
||||||
plan, err := planner.PlanEvent(tjfi.eventName)
|
plan, err := planner.PlanEvent(tjfi.eventName)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
err = runner.NewPlanExecutor(plan)(ctx)
|
err = runner.NewPlanExecutor(plan)(ctx)
|
||||||
if tjfi.errorMessage == "" {
|
if tjfi.errorMessage == "" {
|
||||||
require.NoError(t, err, fullWorkflowPath)
|
assert.NoError(t, err, fullWorkflowPath) //nolint:testifylint // pre-existing issue from nektos/act
|
||||||
} else {
|
} else {
|
||||||
require.Error(t, err, tjfi.errorMessage)
|
assert.Error(t, err, tjfi.errorMessage) //nolint:testifylint // pre-existing issue from nektos/act
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
assert.Nil(t, plan)
|
assert.Nil(t, plan)
|
||||||
}
|
}
|
||||||
|
|
||||||
t.Log("::endgroup::")
|
fmt.Println("::endgroup::") //nolint:forbidigo // pre-existing issue from nektos/act
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestMkdirFsImplSafeResolve(t *testing.T) {
|
func TestMkdirFsImplSafeResolve(t *testing.T) {
|
||||||
|
assert := assert.New(t)
|
||||||
|
|
||||||
baseDir := "/foo/bar"
|
baseDir := "/foo/bar"
|
||||||
|
|
||||||
tests := map[string]struct {
|
tests := map[string]struct {
|
||||||
@@ -339,7 +340,6 @@ func TestMkdirFsImplSafeResolve(t *testing.T) {
|
|||||||
|
|
||||||
for name, tc := range tests {
|
for name, tc := range tests {
|
||||||
t.Run(name, func(t *testing.T) {
|
t.Run(name, func(t *testing.T) {
|
||||||
assert := assert.New(t)
|
|
||||||
assert.Equal(tc.want, safeResolve(baseDir, tc.input))
|
assert.Equal(tc.want, safeResolve(baseDir, tc.input))
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@@ -1,3 +1,7 @@
|
|||||||
|
// Copyright 2026 The Gitea Authors. All rights reserved.
|
||||||
|
// Copyright 2020 The nektos/act Authors. All rights reserved.
|
||||||
|
// SPDX-License-Identifier: MIT
|
||||||
|
|
||||||
package common
|
package common
|
||||||
|
|
||||||
// CartesianProduct takes map of lists and returns list of unique tuples
|
// CartesianProduct takes map of lists and returns list of unique tuples
|
||||||
@@ -1,3 +1,7 @@
|
|||||||
|
// Copyright 2026 The Gitea Authors. All rights reserved.
|
||||||
|
// Copyright 2020 The nektos/act Authors. All rights reserved.
|
||||||
|
// SPDX-License-Identifier: MIT
|
||||||
|
|
||||||
package common
|
package common
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@@ -1,3 +1,7 @@
|
|||||||
|
// Copyright 2026 The Gitea Authors. All rights reserved.
|
||||||
|
// Copyright 2020 The nektos/act Authors. All rights reserved.
|
||||||
|
// SPDX-License-Identifier: MIT
|
||||||
|
|
||||||
package common
|
package common
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@@ -1,3 +1,7 @@
|
|||||||
|
// Copyright 2020 The Gitea Authors. All rights reserved.
|
||||||
|
// Copyright 2020 The nektos/act Authors. All rights reserved.
|
||||||
|
// SPDX-License-Identifier: MIT
|
||||||
|
|
||||||
package common
|
package common
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@@ -1,9 +1,13 @@
|
|||||||
|
// Copyright 2023 The Gitea Authors. All rights reserved.
|
||||||
|
// Copyright 2020 The nektos/act Authors. All rights reserved.
|
||||||
|
// SPDX-License-Identifier: MIT
|
||||||
|
|
||||||
package common
|
package common
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"runtime/debug"
|
||||||
|
|
||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
)
|
)
|
||||||
@@ -53,7 +57,7 @@ func NewDebugExecutor(format string, args ...any) Executor {
|
|||||||
// NewPipelineExecutor creates a new executor from a series of other executors
|
// NewPipelineExecutor creates a new executor from a series of other executors
|
||||||
func NewPipelineExecutor(executors ...Executor) Executor {
|
func NewPipelineExecutor(executors ...Executor) Executor {
|
||||||
if len(executors) == 0 {
|
if len(executors) == 0 {
|
||||||
return func(_ context.Context) error {
|
return func(ctx context.Context) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -69,7 +73,7 @@ func NewPipelineExecutor(executors ...Executor) Executor {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewConditionalExecutor creates a new executor based on conditions
|
// NewConditionalExecutor creates a new executor based on conditions
|
||||||
func NewConditionalExecutor(conditional Conditional, trueExecutor Executor, falseExecutor Executor) Executor {
|
func NewConditionalExecutor(conditional Conditional, trueExecutor, falseExecutor Executor) Executor {
|
||||||
return func(ctx context.Context) error {
|
return func(ctx context.Context) error {
|
||||||
if conditional(ctx) {
|
if conditional(ctx) {
|
||||||
if trueExecutor != nil {
|
if trueExecutor != nil {
|
||||||
@@ -86,7 +90,7 @@ func NewConditionalExecutor(conditional Conditional, trueExecutor Executor, fals
|
|||||||
|
|
||||||
// NewErrorExecutor creates a new executor that always errors out
|
// NewErrorExecutor creates a new executor that always errors out
|
||||||
func NewErrorExecutor(err error) Executor {
|
func NewErrorExecutor(err error) Executor {
|
||||||
return func(_ context.Context) error {
|
return func(ctx context.Context) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -102,12 +106,30 @@ func NewParallelExecutor(parallel int, executors ...Executor) Executor {
|
|||||||
parallel = 1
|
parallel = 1
|
||||||
}
|
}
|
||||||
|
|
||||||
|
log.Infof("NewParallelExecutor: Creating %d workers for %d executors", parallel, len(executors))
|
||||||
|
|
||||||
for i := 0; i < parallel; i++ {
|
for i := 0; i < parallel; i++ {
|
||||||
go func(work <-chan Executor, errs chan<- error) {
|
go func(workerID int, work <-chan Executor, errs chan<- error) {
|
||||||
|
log.Debugf("Worker %d started", workerID)
|
||||||
|
taskCount := 0
|
||||||
for executor := range work {
|
for executor := range work {
|
||||||
errs <- executor(ctx)
|
taskCount++
|
||||||
|
log.Debugf("Worker %d executing task %d", workerID, taskCount)
|
||||||
|
// Recover from panics in executors to avoid crashing the worker
|
||||||
|
// goroutine which would leave the runner process hung.
|
||||||
|
// https://gitea.com/gitea/runner/issues/371
|
||||||
|
errs <- func() (err error) {
|
||||||
|
defer func() {
|
||||||
|
if r := recover(); r != nil {
|
||||||
|
log.Errorf("panic in executor: %v\n%s", r, debug.Stack())
|
||||||
|
err = fmt.Errorf("panic: %v", r)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
return executor(ctx)
|
||||||
|
}()
|
||||||
}
|
}
|
||||||
}(work, errs)
|
log.Debugf("Worker %d finished (%d tasks executed)", workerID, taskCount)
|
||||||
|
}(i, work, errs)
|
||||||
}
|
}
|
||||||
|
|
||||||
for i := range executors {
|
for i := range executors {
|
||||||
@@ -131,31 +153,6 @@ func NewParallelExecutor(parallel int, executors ...Executor) Executor {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewFieldExecutor(name string, value any, exec Executor) Executor {
|
|
||||||
return func(ctx context.Context) error {
|
|
||||||
return exec(WithLogger(ctx, Logger(ctx).WithField(name, value)))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Then runs another executor if this executor succeeds
|
|
||||||
func (e Executor) ThenError(then func(ctx context.Context, err error) error) Executor {
|
|
||||||
return func(ctx context.Context) error {
|
|
||||||
err := e(ctx)
|
|
||||||
if err != nil {
|
|
||||||
switch err.(type) {
|
|
||||||
case Warning:
|
|
||||||
Logger(ctx).Warning(err.Error())
|
|
||||||
default:
|
|
||||||
return then(ctx, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if ctx.Err() != nil {
|
|
||||||
return ctx.Err()
|
|
||||||
}
|
|
||||||
return then(ctx, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Then runs another executor if this executor succeeds
|
// Then runs another executor if this executor succeeds
|
||||||
func (e Executor) Then(then Executor) Executor {
|
func (e Executor) Then(then Executor) Executor {
|
||||||
return func(ctx context.Context) error {
|
return func(ctx context.Context) error {
|
||||||
@@ -175,25 +172,6 @@ func (e Executor) Then(then Executor) Executor {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Then runs another executor if this executor succeeds
|
|
||||||
func (e Executor) OnError(then Executor) Executor {
|
|
||||||
return func(ctx context.Context) error {
|
|
||||||
err := e(ctx)
|
|
||||||
if err != nil {
|
|
||||||
switch err.(type) {
|
|
||||||
case Warning:
|
|
||||||
Logger(ctx).Warning(err.Error())
|
|
||||||
default:
|
|
||||||
return errors.Join(err, then(ctx))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if ctx.Err() != nil {
|
|
||||||
return ctx.Err()
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// If only runs this executor if conditional is true
|
// If only runs this executor if conditional is true
|
||||||
func (e Executor) If(conditional Conditional) Executor {
|
func (e Executor) If(conditional Conditional) Executor {
|
||||||
return func(ctx context.Context) error {
|
return func(ctx context.Context) error {
|
||||||
@@ -216,22 +194,20 @@ func (e Executor) IfNot(conditional Conditional) Executor {
|
|||||||
|
|
||||||
// IfBool only runs this executor if conditional is true
|
// IfBool only runs this executor if conditional is true
|
||||||
func (e Executor) IfBool(conditional bool) Executor {
|
func (e Executor) IfBool(conditional bool) Executor {
|
||||||
return e.If(func(_ context.Context) bool {
|
return e.If(func(ctx context.Context) bool {
|
||||||
return conditional
|
return conditional
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// Finally adds an executor to run after other executor
|
// Finally adds an executor to run after other executor
|
||||||
func (e Executor) Finally(finally Executor) Executor {
|
func (e Executor) Finally(finally Executor) Executor {
|
||||||
return func(ctx context.Context) (err error) {
|
return func(ctx context.Context) error {
|
||||||
defer func() {
|
err := e(ctx)
|
||||||
err2 := finally(ctx)
|
err2 := finally(ctx)
|
||||||
if err2 != nil {
|
if err2 != nil {
|
||||||
err = fmt.Errorf("error occurred running finally: %v (original error: %v)", err2, err)
|
return fmt.Errorf("Error occurred running finally: %v (original error: %v)", err2, err)
|
||||||
}
|
}
|
||||||
}()
|
return err
|
||||||
err = e(ctx)
|
|
||||||
return
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
89
act/common/executor_max_parallel_test.go
Normal file
89
act/common/executor_max_parallel_test.go
Normal file
@@ -0,0 +1,89 @@
|
|||||||
|
// Copyright 2026 The Gitea Authors. All rights reserved.
|
||||||
|
// SPDX-License-Identifier: MIT
|
||||||
|
|
||||||
|
package common
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"sync"
|
||||||
|
"sync/atomic"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Simple fast test that verifies max-parallel: 2 limits concurrency
|
||||||
|
func TestMaxParallel2Quick(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
|
|
||||||
|
var currentRunning atomic.Int32
|
||||||
|
var maxSimultaneous atomic.Int32
|
||||||
|
|
||||||
|
executors := make([]Executor, 4)
|
||||||
|
for i := range 4 {
|
||||||
|
executors[i] = func(ctx context.Context) error {
|
||||||
|
current := currentRunning.Add(1)
|
||||||
|
|
||||||
|
// Update max if needed
|
||||||
|
for {
|
||||||
|
maxValue := maxSimultaneous.Load()
|
||||||
|
if current <= maxValue || maxSimultaneous.CompareAndSwap(maxValue, current) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
time.Sleep(10 * time.Millisecond)
|
||||||
|
currentRunning.Add(-1)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
err := NewParallelExecutor(2, executors...)(ctx)
|
||||||
|
|
||||||
|
assert.NoError(t, err) //nolint:testifylint // pre-existing issue from nektos/act
|
||||||
|
assert.LessOrEqual(t, maxSimultaneous.Load(), int32(2),
|
||||||
|
"Should not exceed max-parallel: 2")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test that verifies max-parallel: 1 enforces sequential execution
|
||||||
|
func TestMaxParallel1Sequential(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
|
|
||||||
|
var currentRunning atomic.Int32
|
||||||
|
var maxSimultaneous atomic.Int32
|
||||||
|
var executionOrder []int
|
||||||
|
var orderMutex sync.Mutex
|
||||||
|
|
||||||
|
executors := make([]Executor, 5)
|
||||||
|
for i := range 5 {
|
||||||
|
taskID := i
|
||||||
|
executors[i] = func(ctx context.Context) error {
|
||||||
|
current := currentRunning.Add(1)
|
||||||
|
|
||||||
|
// Track execution order
|
||||||
|
orderMutex.Lock()
|
||||||
|
executionOrder = append(executionOrder, taskID)
|
||||||
|
orderMutex.Unlock()
|
||||||
|
|
||||||
|
// Update max if needed
|
||||||
|
for {
|
||||||
|
maxValue := maxSimultaneous.Load()
|
||||||
|
if current <= maxValue || maxSimultaneous.CompareAndSwap(maxValue, current) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
time.Sleep(20 * time.Millisecond)
|
||||||
|
currentRunning.Add(-1)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
err := NewParallelExecutor(1, executors...)(ctx)
|
||||||
|
|
||||||
|
assert.NoError(t, err) //nolint:testifylint // pre-existing issue from nektos/act
|
||||||
|
assert.Equal(t, int32(1), maxSimultaneous.Load(),
|
||||||
|
"max-parallel: 1 should only run 1 task at a time")
|
||||||
|
assert.Len(t, executionOrder, 5, "All 5 tasks should have executed")
|
||||||
|
}
|
||||||
283
act/common/executor_parallel_advanced_test.go
Normal file
283
act/common/executor_parallel_advanced_test.go
Normal file
@@ -0,0 +1,283 @@
|
|||||||
|
// Copyright 2026 The Gitea Authors. All rights reserved.
|
||||||
|
// SPDX-License-Identifier: MIT
|
||||||
|
|
||||||
|
package common
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"sync"
|
||||||
|
"sync/atomic"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
// TestMaxParallelJobExecution tests actual job execution with max-parallel
|
||||||
|
func TestMaxParallelJobExecution(t *testing.T) {
|
||||||
|
t.Run("MaxParallel=1 Sequential", func(t *testing.T) {
|
||||||
|
var currentRunning atomic.Int32
|
||||||
|
var maxConcurrent int32
|
||||||
|
var executionOrder []int
|
||||||
|
var mu sync.Mutex
|
||||||
|
|
||||||
|
executors := make([]Executor, 5)
|
||||||
|
for i := range 5 {
|
||||||
|
taskID := i
|
||||||
|
executors[i] = func(ctx context.Context) error {
|
||||||
|
current := currentRunning.Add(1)
|
||||||
|
|
||||||
|
// Track max concurrent
|
||||||
|
for {
|
||||||
|
maxValue := atomic.LoadInt32(&maxConcurrent)
|
||||||
|
if current <= maxValue || atomic.CompareAndSwapInt32(&maxConcurrent, maxValue, current) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
mu.Lock()
|
||||||
|
executionOrder = append(executionOrder, taskID)
|
||||||
|
mu.Unlock()
|
||||||
|
|
||||||
|
time.Sleep(10 * time.Millisecond)
|
||||||
|
currentRunning.Add(-1)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
err := NewParallelExecutor(1, executors...)(ctx)
|
||||||
|
assert.NoError(t, err) //nolint:testifylint // pre-existing issue from nektos/act
|
||||||
|
|
||||||
|
assert.Equal(t, int32(1), maxConcurrent, "Should never exceed 1 concurrent execution")
|
||||||
|
assert.Len(t, executionOrder, 5, "All tasks should execute")
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("MaxParallel=3 Limited", func(t *testing.T) {
|
||||||
|
var currentRunning atomic.Int32
|
||||||
|
var maxConcurrent int32
|
||||||
|
|
||||||
|
executors := make([]Executor, 10)
|
||||||
|
for i := range 10 {
|
||||||
|
executors[i] = func(ctx context.Context) error {
|
||||||
|
current := currentRunning.Add(1)
|
||||||
|
|
||||||
|
for {
|
||||||
|
maxValue := atomic.LoadInt32(&maxConcurrent)
|
||||||
|
if current <= maxValue || atomic.CompareAndSwapInt32(&maxConcurrent, maxValue, current) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
time.Sleep(20 * time.Millisecond)
|
||||||
|
currentRunning.Add(-1)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
err := NewParallelExecutor(3, executors...)(ctx)
|
||||||
|
assert.NoError(t, err) //nolint:testifylint // pre-existing issue from nektos/act
|
||||||
|
|
||||||
|
assert.LessOrEqual(t, int(maxConcurrent), 3, "Should never exceed 3 concurrent executions")
|
||||||
|
assert.GreaterOrEqual(t, int(maxConcurrent), 1, "Should have at least 1 concurrent execution")
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("MaxParallel=0 Uses1Worker", func(t *testing.T) {
|
||||||
|
var maxConcurrent int32
|
||||||
|
var currentRunning atomic.Int32
|
||||||
|
|
||||||
|
executors := make([]Executor, 5)
|
||||||
|
for i := range 5 {
|
||||||
|
executors[i] = func(ctx context.Context) error {
|
||||||
|
current := currentRunning.Add(1)
|
||||||
|
|
||||||
|
for {
|
||||||
|
maxValue := atomic.LoadInt32(&maxConcurrent)
|
||||||
|
if current <= maxValue || atomic.CompareAndSwapInt32(&maxConcurrent, maxValue, current) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
time.Sleep(10 * time.Millisecond)
|
||||||
|
currentRunning.Add(-1)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
// When maxParallel is 0 or negative, it defaults to 1
|
||||||
|
err := NewParallelExecutor(0, executors...)(ctx)
|
||||||
|
assert.NoError(t, err) //nolint:testifylint // pre-existing issue from nektos/act
|
||||||
|
|
||||||
|
assert.Equal(t, int32(1), maxConcurrent, "Should use 1 worker when max-parallel is 0")
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestMaxParallelWithErrors tests error handling with max-parallel
|
||||||
|
func TestMaxParallelWithErrors(t *testing.T) {
|
||||||
|
t.Run("OneTaskFailsOthersContinue", func(t *testing.T) {
|
||||||
|
var successCount int32
|
||||||
|
|
||||||
|
executors := make([]Executor, 5)
|
||||||
|
for i := range 5 {
|
||||||
|
taskID := i
|
||||||
|
executors[i] = func(ctx context.Context) error {
|
||||||
|
if taskID == 2 {
|
||||||
|
return assert.AnError
|
||||||
|
}
|
||||||
|
atomic.AddInt32(&successCount, 1)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
err := NewParallelExecutor(2, executors...)(ctx)
|
||||||
|
|
||||||
|
// Should return the error from task 2
|
||||||
|
assert.Error(t, err) //nolint:testifylint // pre-existing issue from nektos/act
|
||||||
|
|
||||||
|
// Other tasks should still execute
|
||||||
|
assert.Equal(t, int32(4), successCount, "4 tasks should succeed")
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("ContextCancellation", func(t *testing.T) {
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
|
||||||
|
var startedCount int32
|
||||||
|
executors := make([]Executor, 10)
|
||||||
|
for i := range 10 {
|
||||||
|
executors[i] = func(ctx context.Context) error {
|
||||||
|
atomic.AddInt32(&startedCount, 1)
|
||||||
|
time.Sleep(100 * time.Millisecond)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Cancel after a short delay
|
||||||
|
go func() {
|
||||||
|
time.Sleep(30 * time.Millisecond)
|
||||||
|
cancel()
|
||||||
|
}()
|
||||||
|
|
||||||
|
err := NewParallelExecutor(3, executors...)(ctx)
|
||||||
|
assert.Error(t, err) //nolint:testifylint // pre-existing issue from nektos/act
|
||||||
|
assert.ErrorIs(t, err, context.Canceled) //nolint:testifylint // pre-existing issue from nektos/act
|
||||||
|
|
||||||
|
// Not all tasks should start due to cancellation (but timing may vary)
|
||||||
|
// Just verify cancellation occurred
|
||||||
|
t.Logf("Started %d tasks before cancellation", startedCount)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestMaxParallelPerformance tests performance characteristics
|
||||||
|
func TestMaxParallelPerformance(t *testing.T) {
|
||||||
|
if testing.Short() {
|
||||||
|
t.Skip("Skipping performance test in short mode")
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Run("ParallelFasterThanSequential", func(t *testing.T) {
|
||||||
|
executors := make([]Executor, 10)
|
||||||
|
for i := range 10 {
|
||||||
|
executors[i] = func(ctx context.Context) error {
|
||||||
|
time.Sleep(50 * time.Millisecond)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
|
||||||
|
// Sequential (max-parallel=1)
|
||||||
|
start := time.Now()
|
||||||
|
err := NewParallelExecutor(1, executors...)(ctx)
|
||||||
|
sequentialDuration := time.Since(start)
|
||||||
|
assert.NoError(t, err) //nolint:testifylint // pre-existing issue from nektos/act
|
||||||
|
|
||||||
|
// Parallel (max-parallel=5)
|
||||||
|
start = time.Now()
|
||||||
|
err = NewParallelExecutor(5, executors...)(ctx)
|
||||||
|
parallelDuration := time.Since(start)
|
||||||
|
assert.NoError(t, err) //nolint:testifylint // pre-existing issue from nektos/act
|
||||||
|
|
||||||
|
// Parallel should be significantly faster
|
||||||
|
assert.Less(t, parallelDuration, sequentialDuration/2,
|
||||||
|
"Parallel execution should be at least 2x faster")
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("OptimalWorkerCount", func(t *testing.T) {
|
||||||
|
executors := make([]Executor, 20)
|
||||||
|
for i := range 20 {
|
||||||
|
executors[i] = func(ctx context.Context) error {
|
||||||
|
time.Sleep(10 * time.Millisecond)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
|
||||||
|
// Test with different worker counts
|
||||||
|
workerCounts := []int{1, 2, 5, 10, 20}
|
||||||
|
durations := make(map[int]time.Duration)
|
||||||
|
|
||||||
|
for _, count := range workerCounts {
|
||||||
|
start := time.Now()
|
||||||
|
err := NewParallelExecutor(count, executors...)(ctx)
|
||||||
|
durations[count] = time.Since(start)
|
||||||
|
assert.NoError(t, err) //nolint:testifylint // pre-existing issue from nektos/act
|
||||||
|
}
|
||||||
|
|
||||||
|
// More workers should generally be faster (up to a point)
|
||||||
|
assert.Less(t, durations[5], durations[1], "5 workers should be faster than 1")
|
||||||
|
assert.Less(t, durations[10], durations[2], "10 workers should be faster than 2")
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestMaxParallelResourceSharing tests resource sharing scenarios
|
||||||
|
func TestMaxParallelResourceSharing(t *testing.T) {
|
||||||
|
t.Run("SharedResourceWithMutex", func(t *testing.T) {
|
||||||
|
var sharedCounter int
|
||||||
|
var mu sync.Mutex
|
||||||
|
|
||||||
|
executors := make([]Executor, 100)
|
||||||
|
for i := range 100 {
|
||||||
|
executors[i] = func(ctx context.Context) error {
|
||||||
|
mu.Lock()
|
||||||
|
sharedCounter++
|
||||||
|
mu.Unlock()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
err := NewParallelExecutor(10, executors...)(ctx)
|
||||||
|
assert.NoError(t, err) //nolint:testifylint // pre-existing issue from nektos/act
|
||||||
|
|
||||||
|
assert.Equal(t, 100, sharedCounter, "All tasks should increment counter")
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("ChannelCommunication", func(t *testing.T) {
|
||||||
|
resultChan := make(chan int, 50)
|
||||||
|
|
||||||
|
executors := make([]Executor, 50)
|
||||||
|
for i := range 50 {
|
||||||
|
taskID := i
|
||||||
|
executors[i] = func(ctx context.Context) error {
|
||||||
|
resultChan <- taskID
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
err := NewParallelExecutor(5, executors...)(ctx)
|
||||||
|
assert.NoError(t, err) //nolint:testifylint // pre-existing issue from nektos/act
|
||||||
|
|
||||||
|
close(resultChan)
|
||||||
|
|
||||||
|
results := make(map[int]bool)
|
||||||
|
for result := range resultChan {
|
||||||
|
results[result] = true
|
||||||
|
}
|
||||||
|
|
||||||
|
assert.Len(t, results, 50, "All task IDs should be received")
|
||||||
|
})
|
||||||
|
}
|
||||||
@@ -1,3 +1,7 @@
|
|||||||
|
// Copyright 2023 The Gitea Authors. All rights reserved.
|
||||||
|
// Copyright 2020 The nektos/act Authors. All rights reserved.
|
||||||
|
// SPDX-License-Identifier: MIT
|
||||||
|
|
||||||
package common
|
package common
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@@ -8,7 +12,6 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestNewWorkflow(t *testing.T) {
|
func TestNewWorkflow(t *testing.T) {
|
||||||
@@ -18,24 +21,24 @@ func TestNewWorkflow(t *testing.T) {
|
|||||||
|
|
||||||
// empty
|
// empty
|
||||||
emptyWorkflow := NewPipelineExecutor()
|
emptyWorkflow := NewPipelineExecutor()
|
||||||
require.NoError(t, emptyWorkflow(ctx))
|
assert.NoError(emptyWorkflow(ctx)) //nolint:testifylint // pre-existing issue from nektos/act
|
||||||
|
|
||||||
// error case
|
// error case
|
||||||
errorWorkflow := NewErrorExecutor(errors.New("test error"))
|
errorWorkflow := NewErrorExecutor(errors.New("test error"))
|
||||||
require.Error(t, errorWorkflow(ctx))
|
assert.Error(errorWorkflow(ctx)) //nolint:testifylint // pre-existing issue from nektos/act
|
||||||
|
|
||||||
// multiple success case
|
// multiple success case
|
||||||
runcount := 0
|
runcount := 0
|
||||||
successWorkflow := NewPipelineExecutor(
|
successWorkflow := NewPipelineExecutor(
|
||||||
func(_ context.Context) error {
|
func(ctx context.Context) error {
|
||||||
runcount++
|
runcount++
|
||||||
return nil
|
return nil
|
||||||
},
|
},
|
||||||
func(_ context.Context) error {
|
func(ctx context.Context) error {
|
||||||
runcount++
|
runcount++
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
require.NoError(t, successWorkflow(ctx))
|
assert.NoError(successWorkflow(ctx)) //nolint:testifylint // pre-existing issue from nektos/act
|
||||||
assert.Equal(2, runcount)
|
assert.Equal(2, runcount)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -47,31 +50,31 @@ func TestNewConditionalExecutor(t *testing.T) {
|
|||||||
trueCount := 0
|
trueCount := 0
|
||||||
falseCount := 0
|
falseCount := 0
|
||||||
|
|
||||||
err := NewConditionalExecutor(func(_ context.Context) bool {
|
err := NewConditionalExecutor(func(ctx context.Context) bool {
|
||||||
return false
|
return false
|
||||||
}, func(_ context.Context) error {
|
}, func(ctx context.Context) error {
|
||||||
trueCount++
|
trueCount++
|
||||||
return nil
|
return nil
|
||||||
}, func(_ context.Context) error {
|
}, func(ctx context.Context) error {
|
||||||
falseCount++
|
falseCount++
|
||||||
return nil
|
return nil
|
||||||
})(ctx)
|
})(ctx)
|
||||||
|
|
||||||
require.NoError(t, err)
|
assert.NoError(err) //nolint:testifylint // pre-existing issue from nektos/act
|
||||||
assert.Equal(0, trueCount)
|
assert.Equal(0, trueCount)
|
||||||
assert.Equal(1, falseCount)
|
assert.Equal(1, falseCount)
|
||||||
|
|
||||||
err = NewConditionalExecutor(func(_ context.Context) bool {
|
err = NewConditionalExecutor(func(ctx context.Context) bool {
|
||||||
return true
|
return true
|
||||||
}, func(_ context.Context) error {
|
}, func(ctx context.Context) error {
|
||||||
trueCount++
|
trueCount++
|
||||||
return nil
|
return nil
|
||||||
}, func(_ context.Context) error {
|
}, func(ctx context.Context) error {
|
||||||
falseCount++
|
falseCount++
|
||||||
return nil
|
return nil
|
||||||
})(ctx)
|
})(ctx)
|
||||||
|
|
||||||
require.NoError(t, err)
|
assert.NoError(err) //nolint:testifylint // pre-existing issue from nektos/act
|
||||||
assert.Equal(1, trueCount)
|
assert.Equal(1, trueCount)
|
||||||
assert.Equal(1, falseCount)
|
assert.Equal(1, falseCount)
|
||||||
}
|
}
|
||||||
@@ -81,16 +84,14 @@ func TestNewParallelExecutor(t *testing.T) {
|
|||||||
|
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
|
|
||||||
var count atomic.Int32
|
var count, activeCount, maxCount atomic.Int32
|
||||||
var activeCount atomic.Int32
|
emptyWorkflow := NewPipelineExecutor(func(ctx context.Context) error {
|
||||||
var maxCount atomic.Int32
|
|
||||||
emptyWorkflow := NewPipelineExecutor(func(_ context.Context) error {
|
|
||||||
count.Add(1)
|
count.Add(1)
|
||||||
|
|
||||||
cur := activeCount.Add(1)
|
active := activeCount.Add(1)
|
||||||
for {
|
for {
|
||||||
old := maxCount.Load()
|
m := maxCount.Load()
|
||||||
if cur <= old || maxCount.CompareAndSwap(old, cur) {
|
if active <= m || maxCount.CompareAndSwap(m, active) {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -104,7 +105,7 @@ func TestNewParallelExecutor(t *testing.T) {
|
|||||||
|
|
||||||
assert.Equal(int32(3), count.Load(), "should run all 3 executors")
|
assert.Equal(int32(3), count.Load(), "should run all 3 executors")
|
||||||
assert.Equal(int32(2), maxCount.Load(), "should run at most 2 executors in parallel")
|
assert.Equal(int32(2), maxCount.Load(), "should run at most 2 executors in parallel")
|
||||||
require.NoError(t, err)
|
assert.NoError(err) //nolint:testifylint // pre-existing issue from nektos/act
|
||||||
|
|
||||||
// Reset to test running the executor with 0 parallelism
|
// Reset to test running the executor with 0 parallelism
|
||||||
count.Store(0)
|
count.Store(0)
|
||||||
@@ -115,7 +116,7 @@ func TestNewParallelExecutor(t *testing.T) {
|
|||||||
|
|
||||||
assert.Equal(int32(3), count.Load(), "should run all 3 executors")
|
assert.Equal(int32(3), count.Load(), "should run all 3 executors")
|
||||||
assert.Equal(int32(1), maxCount.Load(), "should run at most 1 executors in parallel")
|
assert.Equal(int32(1), maxCount.Load(), "should run at most 1 executors in parallel")
|
||||||
require.NoError(t, errSingle)
|
assert.NoError(errSingle)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestNewParallelExecutorFailed(t *testing.T) {
|
func TestNewParallelExecutorFailed(t *testing.T) {
|
||||||
@@ -125,13 +126,13 @@ func TestNewParallelExecutorFailed(t *testing.T) {
|
|||||||
cancel()
|
cancel()
|
||||||
|
|
||||||
count := 0
|
count := 0
|
||||||
errorWorkflow := NewPipelineExecutor(func(_ context.Context) error {
|
errorWorkflow := NewPipelineExecutor(func(ctx context.Context) error {
|
||||||
count++
|
count++
|
||||||
return errors.New("fake error")
|
return errors.New("fake error")
|
||||||
})
|
})
|
||||||
err := NewParallelExecutor(1, errorWorkflow)(ctx)
|
err := NewParallelExecutor(1, errorWorkflow)(ctx)
|
||||||
assert.Equal(1, count)
|
assert.Equal(1, count)
|
||||||
assert.ErrorIs(err, context.Canceled)
|
assert.ErrorIs(context.Canceled, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestNewParallelExecutorCanceled(t *testing.T) {
|
func TestNewParallelExecutorCanceled(t *testing.T) {
|
||||||
@@ -140,16 +141,18 @@ func TestNewParallelExecutorCanceled(t *testing.T) {
|
|||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
cancel()
|
cancel()
|
||||||
|
|
||||||
|
errExpected := errors.New("fake error")
|
||||||
|
|
||||||
var count atomic.Int32
|
var count atomic.Int32
|
||||||
successWorkflow := NewPipelineExecutor(func(_ context.Context) error {
|
successWorkflow := NewPipelineExecutor(func(ctx context.Context) error {
|
||||||
count.Add(1)
|
count.Add(1)
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
errorWorkflow := NewPipelineExecutor(func(_ context.Context) error {
|
errorWorkflow := NewPipelineExecutor(func(ctx context.Context) error {
|
||||||
count.Add(1)
|
count.Add(1)
|
||||||
return errors.New("fake error")
|
return errExpected
|
||||||
})
|
})
|
||||||
err := NewParallelExecutor(3, errorWorkflow, successWorkflow, successWorkflow)(ctx)
|
err := NewParallelExecutor(3, errorWorkflow, successWorkflow, successWorkflow)(ctx)
|
||||||
assert.Equal(int32(3), count.Load())
|
assert.Equal(int32(3), count.Load())
|
||||||
assert.ErrorIs(err, context.Canceled)
|
assert.Error(errExpected, err) //nolint:testifylint // pre-existing issue from nektos/act
|
||||||
}
|
}
|
||||||
@@ -1,14 +1,17 @@
|
|||||||
|
// Copyright 2022 The Gitea Authors. All rights reserved.
|
||||||
|
// Copyright 2020 The nektos/act Authors. All rights reserved.
|
||||||
|
// SPDX-License-Identifier: MIT
|
||||||
|
|
||||||
package common
|
package common
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"os"
|
"os"
|
||||||
|
|
||||||
log "github.com/sirupsen/logrus"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// CopyFile copy file
|
// CopyFile copy file
|
||||||
func CopyFile(source string, dest string) (err error) {
|
func CopyFile(source, dest string) (err error) {
|
||||||
sourcefile, err := os.Open(source)
|
sourcefile, err := os.Open(source)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -31,11 +34,11 @@ func CopyFile(source string, dest string) (err error) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// CopyDir recursive copy of directory
|
// CopyDir recursive copy of directory
|
||||||
func CopyDir(source string, dest string) (err error) {
|
func CopyDir(source, dest string) (err error) {
|
||||||
// get properties of source dir
|
// get properties of source dir
|
||||||
sourceinfo, err := os.Stat(source)
|
sourceinfo, err := os.Stat(source)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -60,13 +63,13 @@ func CopyDir(source string, dest string) (err error) {
|
|||||||
// create sub-directories - recursively
|
// create sub-directories - recursively
|
||||||
err = CopyDir(sourcefilepointer, destinationfilepointer)
|
err = CopyDir(sourcefilepointer, destinationfilepointer)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(err)
|
fmt.Println(err) //nolint:forbidigo // pre-existing issue from nektos/act
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// perform copy
|
// perform copy
|
||||||
err = CopyFile(sourcefilepointer, destinationfilepointer)
|
err = CopyFile(sourcefilepointer, destinationfilepointer)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(err)
|
fmt.Println(err) //nolint:forbidigo // pre-existing issue from nektos/act
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -1,3 +1,7 @@
|
|||||||
|
// Copyright 2022 The Gitea Authors. All rights reserved.
|
||||||
|
// Copyright 2022 The nektos/act Authors. All rights reserved.
|
||||||
|
// SPDX-License-Identifier: MIT
|
||||||
|
|
||||||
package git
|
package git
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@@ -11,6 +15,8 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
|
"gitea.com/gitea/runner/act/common"
|
||||||
|
|
||||||
"github.com/go-git/go-git/v5"
|
"github.com/go-git/go-git/v5"
|
||||||
"github.com/go-git/go-git/v5/config"
|
"github.com/go-git/go-git/v5/config"
|
||||||
"github.com/go-git/go-git/v5/plumbing"
|
"github.com/go-git/go-git/v5/plumbing"
|
||||||
@@ -18,8 +24,6 @@ import (
|
|||||||
"github.com/go-git/go-git/v5/plumbing/transport/http"
|
"github.com/go-git/go-git/v5/plumbing/transport/http"
|
||||||
"github.com/mattn/go-isatty"
|
"github.com/mattn/go-isatty"
|
||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
|
|
||||||
"gitea.com/gitea/act_runner/pkg/common"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@@ -28,12 +32,21 @@ var (
|
|||||||
githubHTTPRegex = regexp.MustCompile(`^https?://.*github.com.*/(.+)/(.+?)(?:.git)?$`)
|
githubHTTPRegex = regexp.MustCompile(`^https?://.*github.com.*/(.+)/(.+?)(?:.git)?$`)
|
||||||
githubSSHRegex = regexp.MustCompile(`github.com[:/](.+)/(.+?)(?:.git)?$`)
|
githubSSHRegex = regexp.MustCompile(`github.com[:/](.+)/(.+?)(?:.git)?$`)
|
||||||
|
|
||||||
cloneLock sync.Mutex
|
cloneLocks sync.Map // key: clone target directory; value: *sync.Mutex
|
||||||
|
|
||||||
ErrShortRef = errors.New("short SHA references are not supported")
|
ErrShortRef = errors.New("short SHA references are not supported")
|
||||||
ErrNoRepo = errors.New("unable to find git repo")
|
ErrNoRepo = errors.New("unable to find git repo")
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// acquireCloneLock returns an unlock function after locking the per-directory mutex for dir.
|
||||||
|
// Only concurrent operations targeting the same directory are erialized; clones into different directories run in parallel.
|
||||||
|
func acquireCloneLock(dir string) func() {
|
||||||
|
v, _ := cloneLocks.LoadOrStore(dir, &sync.Mutex{})
|
||||||
|
mu := v.(*sync.Mutex)
|
||||||
|
mu.Lock()
|
||||||
|
return mu.Unlock
|
||||||
|
}
|
||||||
|
|
||||||
type Error struct {
|
type Error struct {
|
||||||
err error
|
err error
|
||||||
commit string
|
commit string
|
||||||
@@ -52,7 +65,7 @@ func (e *Error) Commit() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// FindGitRevision get the current git revision
|
// FindGitRevision get the current git revision
|
||||||
func FindGitRevision(ctx context.Context, file string) (shortSha string, sha string, err error) {
|
func FindGitRevision(ctx context.Context, file string) (shortSha, sha string, err error) {
|
||||||
logger := common.Logger(ctx)
|
logger := common.Logger(ctx)
|
||||||
|
|
||||||
gitDir, err := git.PlainOpenWithOptions(
|
gitDir, err := git.PlainOpenWithOptions(
|
||||||
@@ -73,7 +86,7 @@ func FindGitRevision(ctx context.Context, file string) (shortSha string, sha str
|
|||||||
}
|
}
|
||||||
|
|
||||||
if head.Hash().IsZero() {
|
if head.Hash().IsZero() {
|
||||||
return "", "", errors.New("head sha1 could not be resolved")
|
return "", "", errors.New("HEAD sha1 could not be resolved")
|
||||||
}
|
}
|
||||||
|
|
||||||
hash := head.Hash().String()
|
hash := head.Hash().String()
|
||||||
@@ -124,7 +137,7 @@ func FindGitRef(ctx context.Context, file string) (string, error) {
|
|||||||
* it means we checked out a branch
|
* it means we checked out a branch
|
||||||
*
|
*
|
||||||
* If a branches matches first we must continue and check all tags (all references)
|
* If a branches matches first we must continue and check all tags (all references)
|
||||||
* in case we match with a tag later in the iteration
|
* in case we match with a tag later in the interation
|
||||||
*/
|
*/
|
||||||
if r.Hash().String() == ref {
|
if r.Hash().String() == ref {
|
||||||
if r.Name().IsTag() {
|
if r.Name().IsTag() {
|
||||||
@@ -167,8 +180,8 @@ func FindGithubRepo(ctx context.Context, file, githubInstance, remoteName string
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
_, slug := findGitSlug(url, githubInstance)
|
_, slug, err := findGitSlug(url, githubInstance)
|
||||||
return slug, nil
|
return slug, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func findGitRemoteURL(_ context.Context, file, remoteName string) (string, error) {
|
func findGitRemoteURL(_ context.Context, file, remoteName string) (string, error) {
|
||||||
@@ -195,38 +208,25 @@ func findGitRemoteURL(_ context.Context, file, remoteName string) (string, error
|
|||||||
return remote.Config().URLs[0], nil
|
return remote.Config().URLs[0], nil
|
||||||
}
|
}
|
||||||
|
|
||||||
type findStringSubmatcher interface {
|
func findGitSlug(url, githubInstance string) (string, string, error) { //nolint:unparam // pre-existing issue from nektos/act
|
||||||
FindStringSubmatch(string) []string
|
if matches := codeCommitHTTPRegex.FindStringSubmatch(url); matches != nil {
|
||||||
}
|
return "CodeCommit", matches[2], nil
|
||||||
|
} else if matches := codeCommitSSHRegex.FindStringSubmatch(url); matches != nil {
|
||||||
func matchesRegex(url string, matchers ...findStringSubmatcher) []string {
|
return "CodeCommit", matches[2], nil
|
||||||
for _, regex := range matchers {
|
} else if matches := githubHTTPRegex.FindStringSubmatch(url); matches != nil {
|
||||||
if matches := regex.FindStringSubmatch(url); matches != nil {
|
return "GitHub", fmt.Sprintf("%s/%s", matches[1], matches[2]), nil
|
||||||
return matches
|
} else if matches := githubSSHRegex.FindStringSubmatch(url); matches != nil {
|
||||||
|
return "GitHub", fmt.Sprintf("%s/%s", matches[1], matches[2]), nil
|
||||||
|
} else if githubInstance != "github.com" {
|
||||||
|
gheHTTPRegex := regexp.MustCompile(fmt.Sprintf(`^https?://%s/(.+)/(.+?)(?:.git)?$`, githubInstance))
|
||||||
|
gheSSHRegex := regexp.MustCompile(githubInstance + "[:/](.+)/(.+?)(?:.git)?$")
|
||||||
|
if matches := gheHTTPRegex.FindStringSubmatch(url); matches != nil {
|
||||||
|
return "GitHubEnterprise", fmt.Sprintf("%s/%s", matches[1], matches[2]), nil
|
||||||
|
} else if matches := gheSSHRegex.FindStringSubmatch(url); matches != nil {
|
||||||
|
return "GitHubEnterprise", fmt.Sprintf("%s/%s", matches[1], matches[2]), nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
return "", url, nil
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO deprecate and remove githubInstance parameter
|
|
||||||
func findGitSlug(url string, _ /* githubInstance */ string) (string, string) {
|
|
||||||
if matches := matchesRegex(url, codeCommitHTTPRegex, codeCommitSSHRegex); matches != nil {
|
|
||||||
return "CodeCommit", matches[2]
|
|
||||||
}
|
|
||||||
|
|
||||||
if matches := matchesRegex(url, githubHTTPRegex, githubSSHRegex); matches != nil {
|
|
||||||
return "GitHub", fmt.Sprintf("%s/%s", matches[1], matches[2])
|
|
||||||
}
|
|
||||||
|
|
||||||
if matches := matchesRegex(url,
|
|
||||||
regexp.MustCompile(`^https?://(?:[^/]+)/([^/]+)/([^/]+)(?:.git)?$`),
|
|
||||||
regexp.MustCompile(`([^/]+)[:/]([^/]+)/([^/]+)(?:.git)?$`),
|
|
||||||
); matches != nil {
|
|
||||||
return "GitHubEnterprise", fmt.Sprintf("%s/%s", matches[1], matches[2])
|
|
||||||
}
|
|
||||||
|
|
||||||
return "", url
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewGitCloneExecutorInput the input for the NewGitCloneExecutor
|
// NewGitCloneExecutorInput the input for the NewGitCloneExecutor
|
||||||
@@ -236,19 +236,13 @@ type NewGitCloneExecutorInput struct {
|
|||||||
Dir string
|
Dir string
|
||||||
Token string
|
Token string
|
||||||
OfflineMode bool
|
OfflineMode bool
|
||||||
|
|
||||||
|
// For Gitea
|
||||||
|
InsecureSkipTLS bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// CloneIfRequired ...
|
// CloneIfRequired ...
|
||||||
func CloneIfRequired(ctx context.Context, refName plumbing.ReferenceName, input NewGitCloneExecutorInput, logger log.FieldLogger) (*git.Repository, error) {
|
func CloneIfRequired(ctx context.Context, refName plumbing.ReferenceName, input NewGitCloneExecutorInput, logger log.FieldLogger) (*git.Repository, error) {
|
||||||
// If the remote URL has changed, remove the directory and clone again.
|
|
||||||
if r, err := git.PlainOpen(input.Dir); err == nil {
|
|
||||||
if remote, err := r.Remote("origin"); err == nil {
|
|
||||||
if len(remote.Config().URLs) > 0 && remote.Config().URLs[0] != input.URL {
|
|
||||||
_ = os.RemoveAll(input.Dir)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
r, err := git.PlainOpen(input.Dir)
|
r, err := git.PlainOpen(input.Dir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
var progressWriter io.Writer
|
var progressWriter io.Writer
|
||||||
@@ -258,7 +252,7 @@ func CloneIfRequired(ctx context.Context, refName plumbing.ReferenceName, input
|
|||||||
} else if lgr, ok := logger.(*log.Logger); ok {
|
} else if lgr, ok := logger.(*log.Logger); ok {
|
||||||
progressWriter = lgr.WriterLevel(log.DebugLevel)
|
progressWriter = lgr.WriterLevel(log.DebugLevel)
|
||||||
} else {
|
} else {
|
||||||
log.Errorf("unable to get writer from logger (type=%T)", logger)
|
log.Errorf("Unable to get writer from logger (type=%T)", logger)
|
||||||
progressWriter = os.Stdout
|
progressWriter = os.Stdout
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -266,6 +260,8 @@ func CloneIfRequired(ctx context.Context, refName plumbing.ReferenceName, input
|
|||||||
cloneOptions := git.CloneOptions{
|
cloneOptions := git.CloneOptions{
|
||||||
URL: input.URL,
|
URL: input.URL,
|
||||||
Progress: progressWriter,
|
Progress: progressWriter,
|
||||||
|
|
||||||
|
InsecureSkipTLS: input.InsecureSkipTLS, // For Gitea
|
||||||
}
|
}
|
||||||
if input.Token != "" {
|
if input.Token != "" {
|
||||||
cloneOptions.Auth = &http.BasicAuth{
|
cloneOptions.Auth = &http.BasicAuth{
|
||||||
@@ -276,7 +272,7 @@ func CloneIfRequired(ctx context.Context, refName plumbing.ReferenceName, input
|
|||||||
|
|
||||||
r, err = git.PlainCloneContext(ctx, input.Dir, false, &cloneOptions)
|
r, err = git.PlainCloneContext(ctx, input.Dir, false, &cloneOptions)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Errorf("unable to clone %v %s: %v", input.URL, refName, err)
|
logger.Errorf("Unable to clone %v %s: %v", input.URL, refName, err)
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -290,6 +286,7 @@ func CloneIfRequired(ctx context.Context, refName plumbing.ReferenceName, input
|
|||||||
|
|
||||||
func gitOptions(token string) (fetchOptions git.FetchOptions, pullOptions git.PullOptions) {
|
func gitOptions(token string) (fetchOptions git.FetchOptions, pullOptions git.PullOptions) {
|
||||||
fetchOptions.RefSpecs = []config.RefSpec{"refs/*:refs/*", "HEAD:refs/heads/HEAD"}
|
fetchOptions.RefSpecs = []config.RefSpec{"refs/*:refs/*", "HEAD:refs/heads/HEAD"}
|
||||||
|
fetchOptions.Force = true
|
||||||
pullOptions.Force = true
|
pullOptions.Force = true
|
||||||
|
|
||||||
if token != "" {
|
if token != "" {
|
||||||
@@ -305,16 +302,13 @@ func gitOptions(token string) (fetchOptions git.FetchOptions, pullOptions git.Pu
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewGitCloneExecutor creates an executor to clone git repos
|
// NewGitCloneExecutor creates an executor to clone git repos
|
||||||
//
|
|
||||||
//nolint:gocyclo
|
|
||||||
func NewGitCloneExecutor(input NewGitCloneExecutorInput) common.Executor {
|
func NewGitCloneExecutor(input NewGitCloneExecutorInput) common.Executor {
|
||||||
return func(ctx context.Context) error {
|
return func(ctx context.Context) error {
|
||||||
logger := common.Logger(ctx)
|
logger := common.Logger(ctx)
|
||||||
logger.Infof(" \u2601 git clone '%s' # ref=%s", input.URL, input.Ref)
|
logger.Infof(" \u2601 git clone '%s' # ref=%s", input.URL, input.Ref)
|
||||||
logger.Debugf(" cloning %s to %s", input.URL, input.Dir)
|
logger.Debugf(" cloning %s to %s", input.URL, input.Dir)
|
||||||
|
|
||||||
cloneLock.Lock()
|
defer acquireCloneLock(input.Dir)()
|
||||||
defer cloneLock.Unlock()
|
|
||||||
|
|
||||||
refName := plumbing.ReferenceName("refs/heads/" + input.Ref)
|
refName := plumbing.ReferenceName("refs/heads/" + input.Ref)
|
||||||
r, err := CloneIfRequired(ctx, refName, input, logger)
|
r, err := CloneIfRequired(ctx, refName, input, logger)
|
||||||
@@ -327,6 +321,11 @@ func NewGitCloneExecutor(input NewGitCloneExecutorInput) common.Executor {
|
|||||||
// fetch latest changes
|
// fetch latest changes
|
||||||
fetchOptions, pullOptions := gitOptions(input.Token)
|
fetchOptions, pullOptions := gitOptions(input.Token)
|
||||||
|
|
||||||
|
if input.InsecureSkipTLS { // For Gitea
|
||||||
|
fetchOptions.InsecureSkipTLS = true
|
||||||
|
pullOptions.InsecureSkipTLS = true
|
||||||
|
}
|
||||||
|
|
||||||
if !isOfflineMode {
|
if !isOfflineMode {
|
||||||
err = r.Fetch(&fetchOptions)
|
err = r.Fetch(&fetchOptions)
|
||||||
if err != nil && !errors.Is(err, git.NoErrAlreadyUpToDate) {
|
if err != nil && !errors.Is(err, git.NoErrAlreadyUpToDate) {
|
||||||
@@ -337,10 +336,10 @@ func NewGitCloneExecutor(input NewGitCloneExecutorInput) common.Executor {
|
|||||||
var hash *plumbing.Hash
|
var hash *plumbing.Hash
|
||||||
rev := plumbing.Revision(input.Ref)
|
rev := plumbing.Revision(input.Ref)
|
||||||
if hash, err = r.ResolveRevision(rev); err != nil {
|
if hash, err = r.ResolveRevision(rev); err != nil {
|
||||||
logger.Errorf("unable to resolve %s: %v", input.Ref, err)
|
logger.Errorf("Unable to resolve %s: %v", input.Ref, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if hash.String() != input.Ref && len(input.Ref) >= 4 && strings.HasPrefix(hash.String(), input.Ref) {
|
if hash.String() != input.Ref && strings.HasPrefix(hash.String(), input.Ref) {
|
||||||
return &Error{
|
return &Error{
|
||||||
err: ErrShortRef,
|
err: ErrShortRef,
|
||||||
commit: hash.String(),
|
commit: hash.String(),
|
||||||
@@ -366,7 +365,7 @@ func NewGitCloneExecutor(input NewGitCloneExecutorInput) common.Executor {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if hash, err = r.ResolveRevision(rev); err != nil {
|
if hash, err = r.ResolveRevision(rev); err != nil {
|
||||||
logger.Errorf("unable to resolve %s: %v", input.Ref, err)
|
logger.Errorf("Unable to resolve %s: %v", input.Ref, err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -387,7 +386,7 @@ func NewGitCloneExecutor(input NewGitCloneExecutorInput) common.Executor {
|
|||||||
Branch: sourceRef,
|
Branch: sourceRef,
|
||||||
Force: true,
|
Force: true,
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
logger.Errorf("unable to checkout %s: %v", sourceRef, err)
|
logger.Errorf("Unable to checkout %s: %v", sourceRef, err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -401,7 +400,7 @@ func NewGitCloneExecutor(input NewGitCloneExecutorInput) common.Executor {
|
|||||||
if hash.String() != input.Ref && refType == "branch" {
|
if hash.String() != input.Ref && refType == "branch" {
|
||||||
logger.Debugf("Provided ref is not a sha. Updating branch ref after pull")
|
logger.Debugf("Provided ref is not a sha. Updating branch ref after pull")
|
||||||
if hash, err = r.ResolveRevision(rev); err != nil {
|
if hash, err = r.ResolveRevision(rev); err != nil {
|
||||||
logger.Errorf("unable to resolve %s: %v", input.Ref, err)
|
logger.Errorf("Unable to resolve %s: %v", input.Ref, err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -409,7 +408,7 @@ func NewGitCloneExecutor(input NewGitCloneExecutorInput) common.Executor {
|
|||||||
Hash: *hash,
|
Hash: *hash,
|
||||||
Force: true,
|
Force: true,
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
logger.Errorf("unable to checkout %s: %v", *hash, err)
|
logger.Errorf("Unable to checkout %s: %v", *hash, err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -417,7 +416,7 @@ func NewGitCloneExecutor(input NewGitCloneExecutorInput) common.Executor {
|
|||||||
Mode: git.HardReset,
|
Mode: git.HardReset,
|
||||||
Commit: *hash,
|
Commit: *hash,
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
logger.Errorf("unable to reset to %s: %v", hash.String(), err)
|
logger.Errorf("Unable to reset to %s: %v", hash.String(), err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1,3 +1,7 @@
|
|||||||
|
// Copyright 2022 The Gitea Authors. All rights reserved.
|
||||||
|
// Copyright 2022 The nektos/act Authors. All rights reserved.
|
||||||
|
// SPDX-License-Identifier: MIT
|
||||||
|
|
||||||
package git
|
package git
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@@ -6,14 +10,15 @@ import (
|
|||||||
"os"
|
"os"
|
||||||
"os/exec"
|
"os/exec"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
"syscall"
|
"syscall"
|
||||||
"testing"
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
"gitea.com/gitea/act_runner/pkg/common"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestFindGitSlug(t *testing.T) {
|
func TestFindGitSlug(t *testing.T) {
|
||||||
@@ -26,20 +31,20 @@ func TestFindGitSlug(t *testing.T) {
|
|||||||
}{
|
}{
|
||||||
{"https://git-codecommit.us-east-1.amazonaws.com/v1/repos/my-repo-name", "CodeCommit", "my-repo-name"},
|
{"https://git-codecommit.us-east-1.amazonaws.com/v1/repos/my-repo-name", "CodeCommit", "my-repo-name"},
|
||||||
{"ssh://git-codecommit.us-west-2.amazonaws.com/v1/repos/my-repo", "CodeCommit", "my-repo"},
|
{"ssh://git-codecommit.us-west-2.amazonaws.com/v1/repos/my-repo", "CodeCommit", "my-repo"},
|
||||||
{"git@github.com:actions-oss/act-cli.git", "GitHub", "actions-oss/act-cli"},
|
{"git@github.com:nektos/act.git", "GitHub", "nektos/act"},
|
||||||
{"git@github.com:actions-oss/act-cli", "GitHub", "actions-oss/act-cli"},
|
{"git@github.com:nektos/act", "GitHub", "nektos/act"},
|
||||||
{"https://github.com/actions-oss/act-cli.git", "GitHub", "actions-oss/act-cli"},
|
{"https://github.com/nektos/act.git", "GitHub", "nektos/act"},
|
||||||
{"http://github.com/actions-oss/act-cli.git", "GitHub", "actions-oss/act-cli"},
|
{"http://github.com/nektos/act.git", "GitHub", "nektos/act"},
|
||||||
{"https://github.com/actions-oss/act-cli", "GitHub", "actions-oss/act-cli"},
|
{"https://github.com/nektos/act", "GitHub", "nektos/act"},
|
||||||
{"http://github.com/actions-oss/act-cli", "GitHub", "actions-oss/act-cli"},
|
{"http://github.com/nektos/act", "GitHub", "nektos/act"},
|
||||||
{"git+ssh://git@github.com/owner/repo.git", "GitHub", "owner/repo"},
|
{"git+ssh://git@github.com/owner/repo.git", "GitHub", "owner/repo"},
|
||||||
{"http://myotherrepo.com/act.git", "", "http://myotherrepo.com/act.git"},
|
{"http://myotherrepo.com/act.git", "", "http://myotherrepo.com/act.git"},
|
||||||
{"https://gitea.com/actions-oss/act-cli.git", "GitHubEnterprise", "actions-oss/act-cli.git"},
|
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, tt := range slugTests {
|
for _, tt := range slugTests {
|
||||||
provider, slug := findGitSlug(tt.url, "github.com")
|
provider, slug, err := findGitSlug(tt.url, "github.com")
|
||||||
|
|
||||||
|
assert.NoError(err) //nolint:testifylint // pre-existing issue from nektos/act
|
||||||
assert.Equal(tt.provider, provider)
|
assert.Equal(tt.provider, provider)
|
||||||
assert.Equal(tt.slug, slug)
|
assert.Equal(tt.slug, slug)
|
||||||
}
|
}
|
||||||
@@ -76,23 +81,23 @@ func TestFindGitRemoteURL(t *testing.T) {
|
|||||||
basedir := testDir(t)
|
basedir := testDir(t)
|
||||||
gitConfig()
|
gitConfig()
|
||||||
err := gitCmd("init", basedir)
|
err := gitCmd("init", basedir)
|
||||||
require.NoError(t, err)
|
assert.NoError(err) //nolint:testifylint // pre-existing issue from nektos/act
|
||||||
err = cleanGitHooks(basedir)
|
err = cleanGitHooks(basedir)
|
||||||
require.NoError(t, err)
|
assert.NoError(err) //nolint:testifylint // pre-existing issue from nektos/act
|
||||||
|
|
||||||
remoteURL := "https://git-codecommit.us-east-1.amazonaws.com/v1/repos/my-repo-name"
|
remoteURL := "https://git-codecommit.us-east-1.amazonaws.com/v1/repos/my-repo-name"
|
||||||
err = gitCmd("-C", basedir, "remote", "add", "origin", remoteURL)
|
err = gitCmd("-C", basedir, "remote", "add", "origin", remoteURL)
|
||||||
require.NoError(t, err)
|
assert.NoError(err) //nolint:testifylint // pre-existing issue from nektos/act
|
||||||
|
|
||||||
u, err := findGitRemoteURL(context.Background(), basedir, "origin")
|
u, err := findGitRemoteURL(context.Background(), basedir, "origin")
|
||||||
require.NoError(t, err)
|
assert.NoError(err) //nolint:testifylint // pre-existing issue from nektos/act
|
||||||
assert.Equal(remoteURL, u)
|
assert.Equal(remoteURL, u)
|
||||||
|
|
||||||
remoteURL = "git@github.com/AwesomeOwner/MyAwesomeRepo.git"
|
remoteURL = "git@github.com/AwesomeOwner/MyAwesomeRepo.git"
|
||||||
err = gitCmd("-C", basedir, "remote", "add", "upstream", remoteURL)
|
err = gitCmd("-C", basedir, "remote", "add", "upstream", remoteURL)
|
||||||
require.NoError(t, err)
|
assert.NoError(err) //nolint:testifylint // pre-existing issue from nektos/act
|
||||||
u, err = findGitRemoteURL(context.Background(), basedir, "upstream")
|
u, err = findGitRemoteURL(context.Background(), basedir, "upstream")
|
||||||
require.NoError(t, err)
|
assert.NoError(err) //nolint:testifylint // pre-existing issue from nektos/act
|
||||||
assert.Equal(remoteURL, u)
|
assert.Equal(remoteURL, u)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -105,8 +110,8 @@ func TestGitFindRef(t *testing.T) {
|
|||||||
Assert func(t *testing.T, ref string, err error)
|
Assert func(t *testing.T, ref string, err error)
|
||||||
}{
|
}{
|
||||||
"new_repo": {
|
"new_repo": {
|
||||||
Prepare: func(_ *testing.T, _ string) {},
|
Prepare: func(t *testing.T, dir string) {},
|
||||||
Assert: func(t *testing.T, _ string, err error) {
|
Assert: func(t *testing.T, ref string, err error) {
|
||||||
require.Error(t, err)
|
require.Error(t, err)
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@@ -209,15 +214,71 @@ func TestGitCloneExecutor(t *testing.T) {
|
|||||||
|
|
||||||
err := clone(context.Background())
|
err := clone(context.Background())
|
||||||
if tt.Err != nil {
|
if tt.Err != nil {
|
||||||
require.Error(t, err)
|
assert.Error(t, err) //nolint:testifylint // pre-existing issue from nektos/act
|
||||||
assert.Equal(t, tt.Err, err)
|
assert.Equal(t, tt.Err, err)
|
||||||
} else {
|
} else {
|
||||||
require.NoError(t, err)
|
assert.Empty(t, err) //nolint:testifylint // pre-existing issue from nektos/act
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestGitCloneExecutorNonFastForwardRef(t *testing.T) {
|
||||||
|
// Simulate the scenario where a remote ref (e.g. a GitHub PR head ref) changes
|
||||||
|
// non-fast-forward between two fetches. Before the fix, the fetch used Force=false,
|
||||||
|
// causing go-git to return ErrForceNeeded and short-circuit the checkout.
|
||||||
|
|
||||||
|
gitConfig()
|
||||||
|
|
||||||
|
// Create a bare "remote" repo with an initial commit on main and a feature branch.
|
||||||
|
remoteDir := t.TempDir()
|
||||||
|
require.NoError(t, gitCmd("init", "--bare", "--initial-branch=main", remoteDir))
|
||||||
|
|
||||||
|
// We need a working clone to push commits from.
|
||||||
|
workDir := t.TempDir()
|
||||||
|
require.NoError(t, gitCmd("clone", remoteDir, workDir))
|
||||||
|
require.NoError(t, gitCmd("-C", workDir, "checkout", "-b", "main"))
|
||||||
|
require.NoError(t, gitCmd("-C", workDir, "commit", "--allow-empty", "-m", "initial"))
|
||||||
|
require.NoError(t, gitCmd("-C", workDir, "push", "-u", "origin", "main"))
|
||||||
|
|
||||||
|
// Create a feature branch (simulates refs/pull/N/head).
|
||||||
|
require.NoError(t, gitCmd("-C", workDir, "checkout", "-b", "feature"))
|
||||||
|
require.NoError(t, gitCmd("-C", workDir, "commit", "--allow-empty", "-m", "feature-1"))
|
||||||
|
require.NoError(t, gitCmd("-C", workDir, "push", "origin", "feature"))
|
||||||
|
|
||||||
|
// First clone via the executor — should succeed and cache the repo.
|
||||||
|
cloneDir := t.TempDir()
|
||||||
|
clone := NewGitCloneExecutor(NewGitCloneExecutorInput{
|
||||||
|
URL: remoteDir,
|
||||||
|
Ref: "main",
|
||||||
|
Dir: cloneDir,
|
||||||
|
})
|
||||||
|
require.NoError(t, clone(context.Background()))
|
||||||
|
|
||||||
|
// Now force-push the feature branch to a non-fast-forward commit (simulates
|
||||||
|
// a PR rebase). This makes refs/heads/feature non-fast-forward.
|
||||||
|
require.NoError(t, gitCmd("-C", workDir, "checkout", "main"))
|
||||||
|
require.NoError(t, gitCmd("-C", workDir, "branch", "-D", "feature"))
|
||||||
|
require.NoError(t, gitCmd("-C", workDir, "checkout", "-b", "feature"))
|
||||||
|
require.NoError(t, gitCmd("-C", workDir, "commit", "--allow-empty", "-m", "feature-rewritten"))
|
||||||
|
require.NoError(t, gitCmd("-C", workDir, "push", "--force", "origin", "feature"))
|
||||||
|
|
||||||
|
// Also advance main so we can verify the clone picks up the new commit.
|
||||||
|
require.NoError(t, gitCmd("-C", workDir, "checkout", "main"))
|
||||||
|
require.NoError(t, gitCmd("-C", workDir, "commit", "--allow-empty", "-m", "second"))
|
||||||
|
require.NoError(t, gitCmd("-C", workDir, "push", "origin", "main"))
|
||||||
|
|
||||||
|
// Second clone to the same directory — before the fix this returned ErrForceNeeded
|
||||||
|
// and left the working tree at the old commit.
|
||||||
|
err := clone(context.Background())
|
||||||
|
require.NoError(t, err, "fetch with non-fast-forward refs must not fail when Force=true")
|
||||||
|
|
||||||
|
// Verify the working tree was actually updated to the latest main commit.
|
||||||
|
out, err := exec.Command("git", "-C", cloneDir, "log", "--oneline", "-1", "--format=%s").Output()
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Equal(t, "second", strings.TrimSpace(string(out)), "working tree should be at the latest commit")
|
||||||
|
}
|
||||||
|
|
||||||
func gitConfig() {
|
func gitConfig() {
|
||||||
if os.Getenv("GITHUB_ACTIONS") == "true" {
|
if os.Getenv("GITHUB_ACTIONS") == "true" {
|
||||||
var err error
|
var err error
|
||||||
@@ -238,37 +299,67 @@ func gitCmd(args ...string) error {
|
|||||||
err := cmd.Run()
|
err := cmd.Run()
|
||||||
if exitError, ok := err.(*exec.ExitError); ok {
|
if exitError, ok := err.(*exec.ExitError); ok {
|
||||||
if waitStatus, ok := exitError.Sys().(syscall.WaitStatus); ok {
|
if waitStatus, ok := exitError.Sys().(syscall.WaitStatus); ok {
|
||||||
return fmt.Errorf("exit error %d", waitStatus.ExitStatus())
|
return fmt.Errorf("Exit error %d", waitStatus.ExitStatus())
|
||||||
}
|
}
|
||||||
return exitError
|
return exitError
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestCloneIfRequired(t *testing.T) {
|
func TestAcquireCloneLock(t *testing.T) {
|
||||||
tempDir := t.TempDir()
|
t.Run("same directory serializes", func(t *testing.T) {
|
||||||
ctx := context.Background()
|
dir := t.TempDir()
|
||||||
|
|
||||||
t.Run("clone", func(t *testing.T) {
|
unlock1 := acquireCloneLock(dir)
|
||||||
repo, err := CloneIfRequired(ctx, "refs/heads/main", NewGitCloneExecutorInput{
|
|
||||||
URL: "https://github.com/actions/checkout",
|
secondAcquired := make(chan struct{})
|
||||||
Dir: tempDir,
|
go func() {
|
||||||
}, common.Logger(ctx))
|
unlock := acquireCloneLock(dir)
|
||||||
require.NoError(t, err)
|
close(secondAcquired)
|
||||||
assert.NotNil(t, repo)
|
unlock()
|
||||||
|
}()
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-secondAcquired:
|
||||||
|
t.Fatal("second acquire should block while first holds the lock")
|
||||||
|
case <-time.After(50 * time.Millisecond):
|
||||||
|
}
|
||||||
|
|
||||||
|
unlock1()
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-secondAcquired:
|
||||||
|
case <-time.After(time.Second):
|
||||||
|
t.Fatal("second acquire should proceed after first releases the lock")
|
||||||
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("clone different remote", func(t *testing.T) {
|
t.Run("different directories do not block", func(t *testing.T) {
|
||||||
repo, err := CloneIfRequired(ctx, "refs/heads/main", NewGitCloneExecutorInput{
|
dirA := t.TempDir()
|
||||||
URL: "https://github.com/actions/setup-go",
|
dirB := t.TempDir()
|
||||||
Dir: tempDir,
|
|
||||||
}, common.Logger(ctx))
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.NotNil(t, repo)
|
|
||||||
|
|
||||||
remote, err := repo.Remote("origin")
|
unlockA := acquireCloneLock(dirA)
|
||||||
require.NoError(t, err)
|
defer unlockA()
|
||||||
require.Len(t, remote.Config().URLs, 1)
|
|
||||||
assert.Equal(t, "https://github.com/actions/setup-go", remote.Config().URLs[0])
|
done := make(chan struct{})
|
||||||
|
go func() {
|
||||||
|
unlock := acquireCloneLock(dirB)
|
||||||
|
unlock()
|
||||||
|
close(done)
|
||||||
|
}()
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-done:
|
||||||
|
case <-time.After(time.Second):
|
||||||
|
t.Fatal("acquire on a different directory must not block")
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("same directory reuses the same mutex", func(t *testing.T) {
|
||||||
|
dir := t.TempDir()
|
||||||
|
|
||||||
|
v1, _ := cloneLocks.LoadOrStore(dir, &sync.Mutex{})
|
||||||
|
v2, _ := cloneLocks.LoadOrStore(dir, &sync.Mutex{})
|
||||||
|
require.Same(t, v1, v2)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
34
act/common/job_error.go
Normal file
34
act/common/job_error.go
Normal file
@@ -0,0 +1,34 @@
|
|||||||
|
// Copyright 2021 The Gitea Authors. All rights reserved.
|
||||||
|
// Copyright 2021 The nektos/act Authors. All rights reserved.
|
||||||
|
// SPDX-License-Identifier: MIT
|
||||||
|
|
||||||
|
package common
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
)
|
||||||
|
|
||||||
|
type jobErrorContextKey string
|
||||||
|
|
||||||
|
const jobErrorContextKeyVal = jobErrorContextKey("job.error")
|
||||||
|
|
||||||
|
// JobError returns the job error for current context if any
|
||||||
|
func JobError(ctx context.Context) error {
|
||||||
|
val := ctx.Value(jobErrorContextKeyVal)
|
||||||
|
if val != nil {
|
||||||
|
if container, ok := val.(map[string]error); ok {
|
||||||
|
return container["error"]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func SetJobError(ctx context.Context, err error) {
|
||||||
|
ctx.Value(jobErrorContextKeyVal).(map[string]error)["error"] = err
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithJobErrorContainer adds a value to the context as a container for an error
|
||||||
|
func WithJobErrorContainer(ctx context.Context) context.Context {
|
||||||
|
container := map[string]error{}
|
||||||
|
return context.WithValue(ctx, jobErrorContextKeyVal, container)
|
||||||
|
}
|
||||||
@@ -1,3 +1,7 @@
|
|||||||
|
// Copyright 2020 The Gitea Authors. All rights reserved.
|
||||||
|
// Copyright 2020 The nektos/act Authors. All rights reserved.
|
||||||
|
// SPDX-License-Identifier: MIT
|
||||||
|
|
||||||
package common
|
package common
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@@ -1,10 +1,13 @@
|
|||||||
|
// Copyright 2020 The Gitea Authors. All rights reserved.
|
||||||
|
// Copyright 2020 The nektos/act Authors. All rights reserved.
|
||||||
|
// SPDX-License-Identifier: MIT
|
||||||
|
|
||||||
package common
|
package common
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestLineWriter(t *testing.T) {
|
func TestLineWriter(t *testing.T) {
|
||||||
@@ -19,7 +22,7 @@ func TestLineWriter(t *testing.T) {
|
|||||||
assert := assert.New(t)
|
assert := assert.New(t)
|
||||||
write := func(s string) {
|
write := func(s string) {
|
||||||
n, err := lineWriter.Write([]byte(s))
|
n, err := lineWriter.Write([]byte(s))
|
||||||
require.NoError(t, err)
|
assert.NoError(err) //nolint:testifylint // pre-existing issue from nektos/act
|
||||||
assert.Equal(len(s), n, s)
|
assert.Equal(len(s), n, s)
|
||||||
}
|
}
|
||||||
|
|
||||||
52
act/common/logger.go
Normal file
52
act/common/logger.go
Normal file
@@ -0,0 +1,52 @@
|
|||||||
|
// Copyright 2022 The Gitea Authors. All rights reserved.
|
||||||
|
// Copyright 2020 The nektos/act Authors. All rights reserved.
|
||||||
|
// SPDX-License-Identifier: MIT
|
||||||
|
|
||||||
|
package common
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
|
)
|
||||||
|
|
||||||
|
type loggerContextKey string
|
||||||
|
|
||||||
|
const loggerContextKeyVal = loggerContextKey("logrus.FieldLogger")
|
||||||
|
|
||||||
|
// Logger returns the appropriate logger for current context
|
||||||
|
func Logger(ctx context.Context) logrus.FieldLogger {
|
||||||
|
val := ctx.Value(loggerContextKeyVal)
|
||||||
|
if val != nil {
|
||||||
|
if logger, ok := val.(logrus.FieldLogger); ok {
|
||||||
|
return logger
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return logrus.StandardLogger()
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithLogger adds a value to the context for the logger
|
||||||
|
func WithLogger(ctx context.Context, logger logrus.FieldLogger) context.Context {
|
||||||
|
return context.WithValue(ctx, loggerContextKeyVal, logger)
|
||||||
|
}
|
||||||
|
|
||||||
|
type loggerHookKey string
|
||||||
|
|
||||||
|
const loggerHookKeyVal = loggerHookKey("logrus.Hook")
|
||||||
|
|
||||||
|
// LoggerHook returns the appropriate logger hook for current context
|
||||||
|
// the hook affects job logger, not global logger
|
||||||
|
func LoggerHook(ctx context.Context) logrus.Hook {
|
||||||
|
val := ctx.Value(loggerHookKeyVal)
|
||||||
|
if val != nil {
|
||||||
|
if hook, ok := val.(logrus.Hook); ok {
|
||||||
|
return hook
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithLoggerHook adds a value to the context for the logger hook
|
||||||
|
func WithLoggerHook(ctx context.Context, hook logrus.Hook) context.Context {
|
||||||
|
return context.WithValue(ctx, loggerHookKeyVal, hook)
|
||||||
|
}
|
||||||
@@ -1,3 +1,7 @@
|
|||||||
|
// Copyright 2023 The Gitea Authors. All rights reserved.
|
||||||
|
// Copyright 2021 The nektos/act Authors. All rights reserved.
|
||||||
|
// SPDX-License-Identifier: MIT
|
||||||
|
|
||||||
package common
|
package common
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@@ -1,15 +1,26 @@
|
|||||||
|
// Copyright 2023 The Gitea Authors. All rights reserved.
|
||||||
|
// Copyright 2023 The nektos/act Authors. All rights reserved.
|
||||||
|
// SPDX-License-Identifier: MIT
|
||||||
|
|
||||||
package container
|
package container
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"os"
|
|
||||||
|
|
||||||
"gitea.com/gitea/act_runner/pkg/common"
|
"gitea.com/gitea/runner/act/common"
|
||||||
|
|
||||||
"github.com/docker/go-connections/nat"
|
"github.com/docker/go-connections/nat"
|
||||||
"golang.org/x/term"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// ExitCodeError reports a non-zero process exit code from a container command.
|
||||||
|
type ExitCodeError int
|
||||||
|
|
||||||
|
func (e ExitCodeError) Error() string {
|
||||||
|
return fmt.Sprintf("Process completed with exit code %d.", int(e))
|
||||||
|
}
|
||||||
|
|
||||||
// NewContainerInput the input for the New function
|
// NewContainerInput the input for the New function
|
||||||
type NewContainerInput struct {
|
type NewContainerInput struct {
|
||||||
Image string
|
Image string
|
||||||
@@ -32,21 +43,26 @@ type NewContainerInput struct {
|
|||||||
NetworkAliases []string
|
NetworkAliases []string
|
||||||
ExposedPorts nat.PortSet
|
ExposedPorts nat.PortSet
|
||||||
PortBindings nat.PortMap
|
PortBindings nat.PortMap
|
||||||
|
|
||||||
|
// Gitea specific
|
||||||
|
AutoRemove bool
|
||||||
|
ValidVolumes []string
|
||||||
}
|
}
|
||||||
|
|
||||||
// FileEntry is a file to copy to a container
|
// FileEntry is a file to copy to a container
|
||||||
type FileEntry struct {
|
type FileEntry struct {
|
||||||
Name string
|
Name string
|
||||||
Mode uint32
|
Mode int64
|
||||||
Body string
|
Body string
|
||||||
}
|
}
|
||||||
|
|
||||||
// Container for managing docker run containers
|
// Container for managing docker run containers
|
||||||
type Container interface {
|
type Container interface {
|
||||||
Create(capAdd []string, capDrop []string) common.Executor
|
Create(capAdd, capDrop []string) common.Executor
|
||||||
|
ConnectToNetwork(name string) common.Executor
|
||||||
Copy(destPath string, files ...*FileEntry) common.Executor
|
Copy(destPath string, files ...*FileEntry) common.Executor
|
||||||
CopyTarStream(ctx context.Context, destPath string, tarStream io.Reader) error
|
CopyTarStream(ctx context.Context, destPath string, tarStream io.Reader) error
|
||||||
CopyDir(destPath string, srcPath string, useGitIgnore bool) common.Executor
|
CopyDir(destPath, srcPath string, useGitIgnore bool) common.Executor
|
||||||
GetContainerArchive(ctx context.Context, srcPath string) (io.ReadCloser, error)
|
GetContainerArchive(ctx context.Context, srcPath string) (io.ReadCloser, error)
|
||||||
Pull(forcePull bool) common.Executor
|
Pull(forcePull bool) common.Executor
|
||||||
Start(attach bool) common.Executor
|
Start(attach bool) common.Executor
|
||||||
@@ -56,7 +72,6 @@ type Container interface {
|
|||||||
Remove() common.Executor
|
Remove() common.Executor
|
||||||
Close() common.Executor
|
Close() common.Executor
|
||||||
ReplaceLogWriter(io.Writer, io.Writer) (io.Writer, io.Writer)
|
ReplaceLogWriter(io.Writer, io.Writer) (io.Writer, io.Writer)
|
||||||
GetHealth(ctx context.Context) Health
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewDockerBuildExecutorInput the input for the NewDockerBuildExecutor function
|
// NewDockerBuildExecutorInput the input for the NewDockerBuildExecutor function
|
||||||
@@ -76,21 +91,3 @@ type NewDockerPullExecutorInput struct {
|
|||||||
Username string
|
Username string
|
||||||
Password string
|
Password string
|
||||||
}
|
}
|
||||||
|
|
||||||
type Health int
|
|
||||||
|
|
||||||
const (
|
|
||||||
HealthStarting Health = iota
|
|
||||||
HealthHealthy
|
|
||||||
HealthUnHealthy
|
|
||||||
)
|
|
||||||
|
|
||||||
var containerAllocateTerminal bool
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
containerAllocateTerminal = term.IsTerminal(int(os.Stdout.Fd()))
|
|
||||||
}
|
|
||||||
|
|
||||||
func SetContainerAllocateTerminal(val bool) {
|
|
||||||
containerAllocateTerminal = val
|
|
||||||
}
|
|
||||||
@@ -1,3 +1,7 @@
|
|||||||
|
// Copyright 2023 The Gitea Authors. All rights reserved.
|
||||||
|
// Copyright 2021 The nektos/act Authors. All rights reserved.
|
||||||
|
// SPDX-License-Identifier: MIT
|
||||||
|
|
||||||
//go:build !(WITHOUT_DOCKER || !(linux || darwin || windows || netbsd))
|
//go:build !(WITHOUT_DOCKER || !(linux || darwin || windows || netbsd))
|
||||||
|
|
||||||
package container
|
package container
|
||||||
@@ -6,7 +10,8 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"gitea.com/gitea/act_runner/pkg/common"
|
"gitea.com/gitea/runner/act/common"
|
||||||
|
|
||||||
"github.com/docker/cli/cli/config"
|
"github.com/docker/cli/cli/config"
|
||||||
"github.com/docker/cli/cli/config/credentials"
|
"github.com/docker/cli/cli/config/credentials"
|
||||||
"github.com/docker/docker/api/types/registry"
|
"github.com/docker/docker/api/types/registry"
|
||||||
@@ -1,21 +1,23 @@
|
|||||||
|
// Copyright 2023 The Gitea Authors. All rights reserved.
|
||||||
|
// Copyright 2020 The nektos/act Authors. All rights reserved.
|
||||||
|
// SPDX-License-Identifier: MIT
|
||||||
|
|
||||||
//go:build !(WITHOUT_DOCKER || !(linux || darwin || windows || netbsd))
|
//go:build !(WITHOUT_DOCKER || !(linux || darwin || windows || netbsd))
|
||||||
|
|
||||||
package container
|
package container
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
|
||||||
"io"
|
"io"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
|
||||||
"github.com/docker/docker/api/types/build"
|
"gitea.com/gitea/runner/act/common"
|
||||||
"github.com/moby/go-archive"
|
|
||||||
|
|
||||||
|
"github.com/docker/docker/api/types"
|
||||||
|
"github.com/docker/docker/pkg/archive"
|
||||||
|
"github.com/moby/buildkit/frontend/dockerfile/dockerignore"
|
||||||
"github.com/moby/patternmatcher"
|
"github.com/moby/patternmatcher"
|
||||||
"github.com/moby/patternmatcher/ignorefile"
|
|
||||||
|
|
||||||
"gitea.com/gitea/act_runner/pkg/common"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// NewDockerBuildExecutor function to create a run executor for the container
|
// NewDockerBuildExecutor function to create a run executor for the container
|
||||||
@@ -40,7 +42,7 @@ func NewDockerBuildExecutor(input NewDockerBuildExecutorInput) common.Executor {
|
|||||||
logger.Debugf("Building image from '%v'", input.ContextDir)
|
logger.Debugf("Building image from '%v'", input.ContextDir)
|
||||||
|
|
||||||
tags := []string{input.ImageTag}
|
tags := []string{input.ImageTag}
|
||||||
options := build.ImageBuildOptions{
|
options := types.ImageBuildOptions{
|
||||||
Tags: tags,
|
Tags: tags,
|
||||||
Remove: true,
|
Remove: true,
|
||||||
Platform: input.Platform,
|
Platform: input.Platform,
|
||||||
@@ -62,16 +64,19 @@ func NewDockerBuildExecutor(input NewDockerBuildExecutorInput) common.Executor {
|
|||||||
logger.Debugf("Creating image from context dir '%s' with tag '%s' and platform '%s'", input.ContextDir, input.ImageTag, input.Platform)
|
logger.Debugf("Creating image from context dir '%s' with tag '%s' and platform '%s'", input.ContextDir, input.ImageTag, input.Platform)
|
||||||
resp, err := cli.ImageBuild(ctx, buildContext, options)
|
resp, err := cli.ImageBuild(ctx, buildContext, options)
|
||||||
|
|
||||||
err = errors.Join(err, logDockerResponse(logger, resp.Body, err != nil))
|
err = logDockerResponse(logger, resp.Body, err != nil)
|
||||||
return err
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func createBuildContext(ctx context.Context, contextDir string, relDockerfile string) (io.ReadCloser, error) {
|
func createBuildContext(ctx context.Context, contextDir, relDockerfile string) (io.ReadCloser, error) {
|
||||||
common.Logger(ctx).Debugf("Creating archive for build context dir '%s' with relative dockerfile '%s'", contextDir, relDockerfile)
|
common.Logger(ctx).Debugf("Creating archive for build context dir '%s' with relative dockerfile '%s'", contextDir, relDockerfile)
|
||||||
|
|
||||||
// And canonicalize dockerfile name to a platform-independent one
|
// And canonicalize dockerfile name to a platform-independent one
|
||||||
relDockerfile = filepath.ToSlash(relDockerfile)
|
relDockerfile = archive.CanonicalTarNameForPath(relDockerfile)
|
||||||
|
|
||||||
f, err := os.Open(filepath.Join(contextDir, ".dockerignore"))
|
f, err := os.Open(filepath.Join(contextDir, ".dockerignore"))
|
||||||
if err != nil && !os.IsNotExist(err) {
|
if err != nil && !os.IsNotExist(err) {
|
||||||
@@ -81,7 +86,7 @@ func createBuildContext(ctx context.Context, contextDir string, relDockerfile st
|
|||||||
|
|
||||||
var excludes []string
|
var excludes []string
|
||||||
if err == nil {
|
if err == nil {
|
||||||
excludes, err = ignorefile.ReadAll(f)
|
excludes, err = dockerignore.ReadAll(f) //nolint:staticcheck // pre-existing issue from nektos/act
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -1,3 +1,7 @@
|
|||||||
|
// Copyright 2022 The Gitea Authors. All rights reserved.
|
||||||
|
// Copyright 2022 The nektos/act Authors. All rights reserved.
|
||||||
|
// SPDX-License-Identifier: MIT
|
||||||
|
|
||||||
//go:build !(WITHOUT_DOCKER || !(linux || darwin || windows || netbsd))
|
//go:build !(WITHOUT_DOCKER || !(linux || darwin || windows || netbsd))
|
||||||
|
|
||||||
// This file is exact copy of https://github.com/docker/cli/blob/9ac8584acfd501c3f4da0e845e3a40ed15c85041/cli/command/container/opts.go
|
// This file is exact copy of https://github.com/docker/cli/blob/9ac8584acfd501c3f4da0e845e3a40ed15c85041/cli/command/container/opts.go
|
||||||
@@ -7,7 +11,7 @@
|
|||||||
// See DOCKER_LICENSE for the full license text.
|
// See DOCKER_LICENSE for the full license text.
|
||||||
//
|
//
|
||||||
|
|
||||||
//nolint:errcheck,depguard,unused
|
//nolint:errcheck,depguard,unused // verbatim copy from docker/cli with minimal changes
|
||||||
package container
|
package container
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@@ -107,7 +111,7 @@ type containerOptions struct {
|
|||||||
cpusetCpus string
|
cpusetCpus string
|
||||||
cpusetMems string
|
cpusetMems string
|
||||||
blkioWeight uint16
|
blkioWeight uint16
|
||||||
ioMaxBandwidth uint64
|
ioMaxBandwidth opts.MemBytes
|
||||||
ioMaxIOps uint64
|
ioMaxIOps uint64
|
||||||
swappiness int64
|
swappiness int64
|
||||||
netMode opts.NetworkOpt
|
netMode opts.NetworkOpt
|
||||||
@@ -284,7 +288,7 @@ func addFlags(flags *pflag.FlagSet) *containerOptions {
|
|||||||
flags.Var(&copts.deviceReadIOps, "device-read-iops", "Limit read rate (IO per second) from a device")
|
flags.Var(&copts.deviceReadIOps, "device-read-iops", "Limit read rate (IO per second) from a device")
|
||||||
flags.Var(&copts.deviceWriteBps, "device-write-bps", "Limit write rate (bytes per second) to a device")
|
flags.Var(&copts.deviceWriteBps, "device-write-bps", "Limit write rate (bytes per second) to a device")
|
||||||
flags.Var(&copts.deviceWriteIOps, "device-write-iops", "Limit write rate (IO per second) to a device")
|
flags.Var(&copts.deviceWriteIOps, "device-write-iops", "Limit write rate (IO per second) to a device")
|
||||||
flags.Uint64Var(&copts.ioMaxBandwidth, "io-maxbandwidth", 0, "Maximum IO bandwidth limit for the system drive (Windows only)")
|
flags.Var(&copts.ioMaxBandwidth, "io-maxbandwidth", "Maximum IO bandwidth limit for the system drive (Windows only)")
|
||||||
flags.SetAnnotation("io-maxbandwidth", "ostype", []string{"windows"})
|
flags.SetAnnotation("io-maxbandwidth", "ostype", []string{"windows"})
|
||||||
flags.Uint64Var(&copts.ioMaxIOps, "io-maxiops", 0, "Maximum IOps limit for the system drive (Windows only)")
|
flags.Uint64Var(&copts.ioMaxIOps, "io-maxiops", 0, "Maximum IOps limit for the system drive (Windows only)")
|
||||||
flags.SetAnnotation("io-maxiops", "ostype", []string{"windows"})
|
flags.SetAnnotation("io-maxiops", "ostype", []string{"windows"})
|
||||||
@@ -320,8 +324,6 @@ type containerConfig struct {
|
|||||||
// parse parses the args for the specified command and generates a Config,
|
// parse parses the args for the specified command and generates a Config,
|
||||||
// a HostConfig and returns them with the specified command.
|
// a HostConfig and returns them with the specified command.
|
||||||
// If the specified args are not valid, it will return an error.
|
// If the specified args are not valid, it will return an error.
|
||||||
//
|
|
||||||
//nolint:gocyclo
|
|
||||||
func parse(flags *pflag.FlagSet, copts *containerOptions, serverOS string) (*containerConfig, error) {
|
func parse(flags *pflag.FlagSet, copts *containerOptions, serverOS string) (*containerConfig, error) {
|
||||||
var (
|
var (
|
||||||
attachStdin = copts.attach.Get("stdin")
|
attachStdin = copts.attach.Get("stdin")
|
||||||
@@ -387,7 +389,7 @@ func parse(flags *pflag.FlagSet, copts *containerOptions, serverOS string) (*con
|
|||||||
|
|
||||||
// Can't evaluate options passed into --tmpfs until we actually mount
|
// Can't evaluate options passed into --tmpfs until we actually mount
|
||||||
tmpfs := make(map[string]string)
|
tmpfs := make(map[string]string)
|
||||||
for _, t := range copts.tmpfs.GetSlice() {
|
for _, t := range copts.tmpfs.GetAll() {
|
||||||
if arr := strings.SplitN(t, ":", 2); len(arr) > 1 {
|
if arr := strings.SplitN(t, ":", 2); len(arr) > 1 {
|
||||||
tmpfs[arr[0]] = arr[1]
|
tmpfs[arr[0]] = arr[1]
|
||||||
} else {
|
} else {
|
||||||
@@ -411,7 +413,7 @@ func parse(flags *pflag.FlagSet, copts *containerOptions, serverOS string) (*con
|
|||||||
entrypoint = []string{""}
|
entrypoint = []string{""}
|
||||||
}
|
}
|
||||||
|
|
||||||
publishOpts := copts.publish.GetSlice()
|
publishOpts := copts.publish.GetAll()
|
||||||
var (
|
var (
|
||||||
ports map[nat.Port]struct{}
|
ports map[nat.Port]struct{}
|
||||||
portBindings map[nat.Port][]nat.PortBinding
|
portBindings map[nat.Port][]nat.PortBinding
|
||||||
@@ -429,7 +431,7 @@ func parse(flags *pflag.FlagSet, copts *containerOptions, serverOS string) (*con
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Merge in exposed ports to the map of published ports
|
// Merge in exposed ports to the map of published ports
|
||||||
for _, e := range copts.expose.GetSlice() {
|
for _, e := range copts.expose.GetAll() {
|
||||||
if strings.Contains(e, ":") {
|
if strings.Contains(e, ":") {
|
||||||
return nil, errors.Errorf("invalid port format for --expose: %s", e)
|
return nil, errors.Errorf("invalid port format for --expose: %s", e)
|
||||||
}
|
}
|
||||||
@@ -458,7 +460,7 @@ func parse(flags *pflag.FlagSet, copts *containerOptions, serverOS string) (*con
|
|||||||
// parsing flags, we haven't yet sent a _ping to the daemon to determine
|
// parsing flags, we haven't yet sent a _ping to the daemon to determine
|
||||||
// what operating system it is.
|
// what operating system it is.
|
||||||
deviceMappings := []container.DeviceMapping{}
|
deviceMappings := []container.DeviceMapping{}
|
||||||
for _, device := range copts.devices.GetSlice() {
|
for _, device := range copts.devices.GetAll() {
|
||||||
var (
|
var (
|
||||||
validated string
|
validated string
|
||||||
deviceMapping container.DeviceMapping
|
deviceMapping container.DeviceMapping
|
||||||
@@ -476,13 +478,13 @@ func parse(flags *pflag.FlagSet, copts *containerOptions, serverOS string) (*con
|
|||||||
}
|
}
|
||||||
|
|
||||||
// collect all the environment variables for the container
|
// collect all the environment variables for the container
|
||||||
envVariables, err := opts.ReadKVEnvStrings(copts.envFile.GetSlice(), copts.env.GetSlice())
|
envVariables, err := opts.ReadKVEnvStrings(copts.envFile.GetAll(), copts.env.GetAll())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// collect all the labels for the container
|
// collect all the labels for the container
|
||||||
labels, err := opts.ReadKVStrings(copts.labelsFile.GetSlice(), copts.labels.GetSlice())
|
labels, err := opts.ReadKVStrings(copts.labelsFile.GetAll(), copts.labels.GetAll())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -512,19 +514,19 @@ func parse(flags *pflag.FlagSet, copts *containerOptions, serverOS string) (*con
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
loggingOpts, err := parseLoggingOpts(copts.loggingDriver, copts.loggingOpts.GetSlice())
|
loggingOpts, err := parseLoggingOpts(copts.loggingDriver, copts.loggingOpts.GetAll())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
securityOpts, err := parseSecurityOpts(copts.securityOpt.GetSlice())
|
securityOpts, err := parseSecurityOpts(copts.securityOpt.GetAll())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
securityOpts, maskedPaths, readonlyPaths := parseSystemPaths(securityOpts)
|
securityOpts, maskedPaths, readonlyPaths := parseSystemPaths(securityOpts)
|
||||||
|
|
||||||
storageOpts, err := parseStorageOpts(copts.storageOpt.GetSlice())
|
storageOpts, err := parseStorageOpts(copts.storageOpt.GetAll())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -596,9 +598,9 @@ func parse(flags *pflag.FlagSet, copts *containerOptions, serverOS string) (*con
|
|||||||
BlkioDeviceReadIOps: copts.deviceReadIOps.GetList(),
|
BlkioDeviceReadIOps: copts.deviceReadIOps.GetList(),
|
||||||
BlkioDeviceWriteIOps: copts.deviceWriteIOps.GetList(),
|
BlkioDeviceWriteIOps: copts.deviceWriteIOps.GetList(),
|
||||||
IOMaximumIOps: copts.ioMaxIOps,
|
IOMaximumIOps: copts.ioMaxIOps,
|
||||||
IOMaximumBandwidth: copts.ioMaxBandwidth,
|
IOMaximumBandwidth: uint64(copts.ioMaxBandwidth),
|
||||||
Ulimits: copts.ulimits.GetList(),
|
Ulimits: copts.ulimits.GetList(),
|
||||||
DeviceCgroupRules: copts.deviceCgroupRules.GetSlice(),
|
DeviceCgroupRules: copts.deviceCgroupRules.GetAll(),
|
||||||
Devices: deviceMappings,
|
Devices: deviceMappings,
|
||||||
DeviceRequests: copts.gpus.Value(),
|
DeviceRequests: copts.gpus.Value(),
|
||||||
}
|
}
|
||||||
@@ -639,7 +641,7 @@ func parse(flags *pflag.FlagSet, copts *containerOptions, serverOS string) (*con
|
|||||||
AutoRemove: copts.autoRemove,
|
AutoRemove: copts.autoRemove,
|
||||||
Privileged: copts.privileged,
|
Privileged: copts.privileged,
|
||||||
PortBindings: portBindings,
|
PortBindings: portBindings,
|
||||||
Links: copts.links.GetSlice(),
|
Links: copts.links.GetAll(),
|
||||||
PublishAllPorts: copts.publishAll,
|
PublishAllPorts: copts.publishAll,
|
||||||
// Make sure the dns fields are never nil.
|
// Make sure the dns fields are never nil.
|
||||||
// New containers don't ever have those fields nil,
|
// New containers don't ever have those fields nil,
|
||||||
@@ -649,17 +651,17 @@ func parse(flags *pflag.FlagSet, copts *containerOptions, serverOS string) (*con
|
|||||||
DNS: copts.dns.GetAllOrEmpty(),
|
DNS: copts.dns.GetAllOrEmpty(),
|
||||||
DNSSearch: copts.dnsSearch.GetAllOrEmpty(),
|
DNSSearch: copts.dnsSearch.GetAllOrEmpty(),
|
||||||
DNSOptions: copts.dnsOptions.GetAllOrEmpty(),
|
DNSOptions: copts.dnsOptions.GetAllOrEmpty(),
|
||||||
ExtraHosts: copts.extraHosts.GetSlice(),
|
ExtraHosts: copts.extraHosts.GetAll(),
|
||||||
VolumesFrom: copts.volumesFrom.GetSlice(),
|
VolumesFrom: copts.volumesFrom.GetAll(),
|
||||||
IpcMode: container.IpcMode(copts.ipcMode),
|
IpcMode: container.IpcMode(copts.ipcMode),
|
||||||
NetworkMode: container.NetworkMode(copts.netMode.NetworkMode()),
|
NetworkMode: container.NetworkMode(copts.netMode.NetworkMode()),
|
||||||
PidMode: pidMode,
|
PidMode: pidMode,
|
||||||
UTSMode: utsMode,
|
UTSMode: utsMode,
|
||||||
UsernsMode: usernsMode,
|
UsernsMode: usernsMode,
|
||||||
CgroupnsMode: cgroupnsMode,
|
CgroupnsMode: cgroupnsMode,
|
||||||
CapAdd: strslice.StrSlice(copts.capAdd.GetSlice()),
|
CapAdd: strslice.StrSlice(copts.capAdd.GetAll()),
|
||||||
CapDrop: strslice.StrSlice(copts.capDrop.GetSlice()),
|
CapDrop: strslice.StrSlice(copts.capDrop.GetAll()),
|
||||||
GroupAdd: copts.groupAdd.GetSlice(),
|
GroupAdd: copts.groupAdd.GetAll(),
|
||||||
RestartPolicy: restartPolicy,
|
RestartPolicy: restartPolicy,
|
||||||
SecurityOpt: securityOpts,
|
SecurityOpt: securityOpts,
|
||||||
StorageOpt: storageOpts,
|
StorageOpt: storageOpts,
|
||||||
@@ -678,7 +680,7 @@ func parse(flags *pflag.FlagSet, copts *containerOptions, serverOS string) (*con
|
|||||||
}
|
}
|
||||||
|
|
||||||
if copts.autoRemove && !hostConfig.RestartPolicy.IsNone() {
|
if copts.autoRemove && !hostConfig.RestartPolicy.IsNone() {
|
||||||
return nil, errors.Errorf("conflicting options: --restart and --rm")
|
return nil, errors.Errorf("Conflicting options: --restart and --rm")
|
||||||
}
|
}
|
||||||
|
|
||||||
// only set this value if the user provided the flag, else it should default to nil
|
// only set this value if the user provided the flag, else it should default to nil
|
||||||
@@ -777,11 +779,11 @@ func applyContainerOptions(n *opts.NetworkAttachmentOpts, copts *containerOption
|
|||||||
}
|
}
|
||||||
if copts.aliases.Len() > 0 {
|
if copts.aliases.Len() > 0 {
|
||||||
n.Aliases = make([]string, copts.aliases.Len())
|
n.Aliases = make([]string, copts.aliases.Len())
|
||||||
copy(n.Aliases, copts.aliases.GetSlice())
|
copy(n.Aliases, copts.aliases.GetAll())
|
||||||
}
|
}
|
||||||
if copts.links.Len() > 0 {
|
if copts.links.Len() > 0 {
|
||||||
n.Links = make([]string, copts.links.Len())
|
n.Links = make([]string, copts.links.Len())
|
||||||
copy(n.Links, copts.links.GetSlice())
|
copy(n.Links, copts.links.GetAll())
|
||||||
}
|
}
|
||||||
if copts.ipv4Address != "" {
|
if copts.ipv4Address != "" {
|
||||||
n.IPv4Address = copts.ipv4Address
|
n.IPv4Address = copts.ipv4Address
|
||||||
@@ -793,7 +795,7 @@ func applyContainerOptions(n *opts.NetworkAttachmentOpts, copts *containerOption
|
|||||||
// TODO should linkLocalIPs be added to the _first_ network only, or to _all_ networks? (should this be a per-network option as well?)
|
// TODO should linkLocalIPs be added to the _first_ network only, or to _all_ networks? (should this be a per-network option as well?)
|
||||||
if copts.linkLocalIPs.Len() > 0 {
|
if copts.linkLocalIPs.Len() > 0 {
|
||||||
n.LinkLocalIPs = make([]string, copts.linkLocalIPs.Len())
|
n.LinkLocalIPs = make([]string, copts.linkLocalIPs.Len())
|
||||||
copy(n.LinkLocalIPs, copts.linkLocalIPs.GetSlice())
|
copy(n.LinkLocalIPs, copts.linkLocalIPs.GetAll())
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -867,7 +869,7 @@ func parseSecurityOpts(securityOpts []string) ([]string, error) {
|
|||||||
if strings.Contains(opt, ":") {
|
if strings.Contains(opt, ":") {
|
||||||
con = strings.SplitN(opt, ":", 2)
|
con = strings.SplitN(opt, ":", 2)
|
||||||
} else {
|
} else {
|
||||||
return securityOpts, errors.Errorf("invalid --security-opt: %q", opt)
|
return securityOpts, errors.Errorf("Invalid --security-opt: %q", opt)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if con[0] == "seccomp" && con[1] != "unconfined" {
|
if con[0] == "seccomp" && con[1] != "unconfined" {
|
||||||
@@ -1004,7 +1006,7 @@ func validDeviceMode(mode string) bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// validateDevice validates a path for devices
|
// validateDevice validates a path for devices
|
||||||
func validateDevice(val string, serverOS string) (string, error) {
|
func validateDevice(val, serverOS string) (string, error) {
|
||||||
switch serverOS {
|
switch serverOS {
|
||||||
case "linux":
|
case "linux":
|
||||||
return validateLinuxPath(val, validDeviceMode)
|
return validateLinuxPath(val, validDeviceMode)
|
||||||
@@ -1,3 +1,7 @@
|
|||||||
|
// Copyright 2022 The Gitea Authors. All rights reserved.
|
||||||
|
// Copyright 2022 The nektos/act Authors. All rights reserved.
|
||||||
|
// SPDX-License-Identifier: MIT
|
||||||
|
|
||||||
// This file is exact copy of https://github.com/docker/cli/blob/9ac8584acfd501c3f4da0e845e3a40ed15c85041/cli/command/container/opts_test.go with:
|
// This file is exact copy of https://github.com/docker/cli/blob/9ac8584acfd501c3f4da0e845e3a40ed15c85041/cli/command/container/opts_test.go with:
|
||||||
// * appended with license information
|
// * appended with license information
|
||||||
// * commented out case 'invalid-mixed-network-types' in test TestParseNetworkConfig
|
// * commented out case 'invalid-mixed-network-types' in test TestParseNetworkConfig
|
||||||
@@ -6,7 +10,7 @@
|
|||||||
// See DOCKER_LICENSE for the full license text.
|
// See DOCKER_LICENSE for the full license text.
|
||||||
//
|
//
|
||||||
|
|
||||||
//nolint:whitespace,depguard,dupl,gocritic
|
//nolint:depguard,gocritic // verbatim copy from docker/cli tests
|
||||||
package container
|
package container
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@@ -23,7 +27,6 @@ import (
|
|||||||
"github.com/docker/go-connections/nat"
|
"github.com/docker/go-connections/nat"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/spf13/pflag"
|
"github.com/spf13/pflag"
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
"gotest.tools/v3/assert"
|
"gotest.tools/v3/assert"
|
||||||
is "gotest.tools/v3/assert/cmp"
|
is "gotest.tools/v3/assert/cmp"
|
||||||
"gotest.tools/v3/skip"
|
"gotest.tools/v3/skip"
|
||||||
@@ -74,21 +77,21 @@ func setupRunFlags() (*pflag.FlagSet, *containerOptions) {
|
|||||||
return flags, copts
|
return flags, copts
|
||||||
}
|
}
|
||||||
|
|
||||||
func mustParse(t *testing.T, args string) (*container.Config, *container.HostConfig, *networktypes.NetworkingConfig) {
|
func mustParse(t *testing.T, args string) (*container.Config, *container.HostConfig) {
|
||||||
t.Helper()
|
t.Helper()
|
||||||
config, hostConfig, networkingConfig, err := parseRun(append(strings.Split(args, " "), "ubuntu", "bash"))
|
config, hostConfig, _, err := parseRun(append(strings.Split(args, " "), "ubuntu", "bash"))
|
||||||
assert.NilError(t, err)
|
assert.NilError(t, err)
|
||||||
return config, hostConfig, networkingConfig
|
return config, hostConfig
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestParseRunLinks(t *testing.T) {
|
func TestParseRunLinks(t *testing.T) {
|
||||||
if _, hostConfig, _ := mustParse(t, "--link a:b"); len(hostConfig.Links) == 0 || hostConfig.Links[0] != "a:b" {
|
if _, hostConfig := mustParse(t, "--link a:b"); len(hostConfig.Links) == 0 || hostConfig.Links[0] != "a:b" {
|
||||||
t.Fatalf("Error parsing links. Expected []string{\"a:b\"}, received: %v", hostConfig.Links)
|
t.Fatalf("Error parsing links. Expected []string{\"a:b\"}, received: %v", hostConfig.Links)
|
||||||
}
|
}
|
||||||
if _, hostConfig, _ := mustParse(t, "--link a:b --link c:d"); len(hostConfig.Links) < 2 || hostConfig.Links[0] != "a:b" || hostConfig.Links[1] != "c:d" {
|
if _, hostConfig := mustParse(t, "--link a:b --link c:d"); len(hostConfig.Links) < 2 || hostConfig.Links[0] != "a:b" || hostConfig.Links[1] != "c:d" {
|
||||||
t.Fatalf("Error parsing links. Expected []string{\"a:b\", \"c:d\"}, received: %v", hostConfig.Links)
|
t.Fatalf("Error parsing links. Expected []string{\"a:b\", \"c:d\"}, received: %v", hostConfig.Links)
|
||||||
}
|
}
|
||||||
if _, hostConfig, _ := mustParse(t, ""); len(hostConfig.Links) != 0 {
|
if _, hostConfig := mustParse(t, ""); len(hostConfig.Links) != 0 {
|
||||||
t.Fatalf("Error parsing links. No link expected, received: %v", hostConfig.Links)
|
t.Fatalf("Error parsing links. No link expected, received: %v", hostConfig.Links)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -137,7 +140,7 @@ func TestParseRunAttach(t *testing.T) {
|
|||||||
}
|
}
|
||||||
for _, tc := range tests {
|
for _, tc := range tests {
|
||||||
t.Run(tc.input, func(t *testing.T) {
|
t.Run(tc.input, func(t *testing.T) {
|
||||||
config, _, _ := mustParse(t, tc.input)
|
config, _ := mustParse(t, tc.input)
|
||||||
assert.Equal(t, config.AttachStdin, tc.expected.AttachStdin)
|
assert.Equal(t, config.AttachStdin, tc.expected.AttachStdin)
|
||||||
assert.Equal(t, config.AttachStdout, tc.expected.AttachStdout)
|
assert.Equal(t, config.AttachStdout, tc.expected.AttachStdout)
|
||||||
assert.Equal(t, config.AttachStderr, tc.expected.AttachStderr)
|
assert.Equal(t, config.AttachStderr, tc.expected.AttachStderr)
|
||||||
@@ -186,16 +189,15 @@ func TestParseRunWithInvalidArgs(t *testing.T) {
|
|||||||
flags, _ := setupRunFlags()
|
flags, _ := setupRunFlags()
|
||||||
for _, tc := range tests {
|
for _, tc := range tests {
|
||||||
t.Run(strings.Join(tc.args, " "), func(t *testing.T) {
|
t.Run(strings.Join(tc.args, " "), func(t *testing.T) {
|
||||||
require.Error(t, flags.Parse(tc.args), tc.error)
|
assert.Error(t, flags.Parse(tc.args), tc.error)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
//nolint:gocyclo
|
|
||||||
func TestParseWithVolumes(t *testing.T) {
|
func TestParseWithVolumes(t *testing.T) {
|
||||||
// A single volume
|
// A single volume
|
||||||
arr, tryit := setupPlatformVolume([]string{`/tmp`}, []string{`c:\tmp`})
|
arr, tryit := setupPlatformVolume([]string{`/tmp`}, []string{`c:\tmp`})
|
||||||
if config, hostConfig, _ := mustParse(t, tryit); hostConfig.Binds != nil {
|
if config, hostConfig := mustParse(t, tryit); hostConfig.Binds != nil {
|
||||||
t.Fatalf("Error parsing volume flags, %q should not mount-bind anything. Received %v", tryit, hostConfig.Binds)
|
t.Fatalf("Error parsing volume flags, %q should not mount-bind anything. Received %v", tryit, hostConfig.Binds)
|
||||||
} else if _, exists := config.Volumes[arr[0]]; !exists {
|
} else if _, exists := config.Volumes[arr[0]]; !exists {
|
||||||
t.Fatalf("Error parsing volume flags, %q is missing from volumes. Received %v", tryit, config.Volumes)
|
t.Fatalf("Error parsing volume flags, %q is missing from volumes. Received %v", tryit, config.Volumes)
|
||||||
@@ -203,7 +205,7 @@ func TestParseWithVolumes(t *testing.T) {
|
|||||||
|
|
||||||
// Two volumes
|
// Two volumes
|
||||||
arr, tryit = setupPlatformVolume([]string{`/tmp`, `/var`}, []string{`c:\tmp`, `c:\var`})
|
arr, tryit = setupPlatformVolume([]string{`/tmp`, `/var`}, []string{`c:\tmp`, `c:\var`})
|
||||||
if config, hostConfig, _ := mustParse(t, tryit); hostConfig.Binds != nil {
|
if config, hostConfig := mustParse(t, tryit); hostConfig.Binds != nil {
|
||||||
t.Fatalf("Error parsing volume flags, %q should not mount-bind anything. Received %v", tryit, hostConfig.Binds)
|
t.Fatalf("Error parsing volume flags, %q should not mount-bind anything. Received %v", tryit, hostConfig.Binds)
|
||||||
} else if _, exists := config.Volumes[arr[0]]; !exists {
|
} else if _, exists := config.Volumes[arr[0]]; !exists {
|
||||||
t.Fatalf("Error parsing volume flags, %s is missing from volumes. Received %v", arr[0], config.Volumes)
|
t.Fatalf("Error parsing volume flags, %s is missing from volumes. Received %v", arr[0], config.Volumes)
|
||||||
@@ -213,13 +215,13 @@ func TestParseWithVolumes(t *testing.T) {
|
|||||||
|
|
||||||
// A single bind mount
|
// A single bind mount
|
||||||
arr, tryit = setupPlatformVolume([]string{`/hostTmp:/containerTmp`}, []string{os.Getenv("TEMP") + `:c:\containerTmp`})
|
arr, tryit = setupPlatformVolume([]string{`/hostTmp:/containerTmp`}, []string{os.Getenv("TEMP") + `:c:\containerTmp`})
|
||||||
if config, hostConfig, _ := mustParse(t, tryit); hostConfig.Binds == nil || hostConfig.Binds[0] != arr[0] {
|
if config, hostConfig := mustParse(t, tryit); hostConfig.Binds == nil || hostConfig.Binds[0] != arr[0] {
|
||||||
t.Fatalf("Error parsing volume flags, %q should mount-bind the path before the colon into the path after the colon. Received %v %v", arr[0], hostConfig.Binds, config.Volumes)
|
t.Fatalf("Error parsing volume flags, %q should mount-bind the path before the colon into the path after the colon. Received %v %v", arr[0], hostConfig.Binds, config.Volumes)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Two bind mounts.
|
// Two bind mounts.
|
||||||
arr, tryit = setupPlatformVolume([]string{`/hostTmp:/containerTmp`, `/hostVar:/containerVar`}, []string{os.Getenv("ProgramData") + `:c:\ContainerPD`, os.Getenv("TEMP") + `:c:\containerTmp`})
|
arr, tryit = setupPlatformVolume([]string{`/hostTmp:/containerTmp`, `/hostVar:/containerVar`}, []string{os.Getenv("ProgramData") + `:c:\ContainerPD`, os.Getenv("TEMP") + `:c:\containerTmp`})
|
||||||
if _, hostConfig, _ := mustParse(t, tryit); hostConfig.Binds == nil || compareRandomizedStrings(hostConfig.Binds[0], hostConfig.Binds[1], arr[0], arr[1]) != nil {
|
if _, hostConfig := mustParse(t, tryit); hostConfig.Binds == nil || compareRandomizedStrings(hostConfig.Binds[0], hostConfig.Binds[1], arr[0], arr[1]) != nil {
|
||||||
t.Fatalf("Error parsing volume flags, `%s and %s` did not mount-bind correctly. Received %v", arr[0], arr[1], hostConfig.Binds)
|
t.Fatalf("Error parsing volume flags, `%s and %s` did not mount-bind correctly. Received %v", arr[0], arr[1], hostConfig.Binds)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -228,26 +230,26 @@ func TestParseWithVolumes(t *testing.T) {
|
|||||||
arr, tryit = setupPlatformVolume(
|
arr, tryit = setupPlatformVolume(
|
||||||
[]string{`/hostTmp:/containerTmp:ro`, `/hostVar:/containerVar:rw`},
|
[]string{`/hostTmp:/containerTmp:ro`, `/hostVar:/containerVar:rw`},
|
||||||
[]string{os.Getenv("TEMP") + `:c:\containerTmp:rw`, os.Getenv("ProgramData") + `:c:\ContainerPD:rw`})
|
[]string{os.Getenv("TEMP") + `:c:\containerTmp:rw`, os.Getenv("ProgramData") + `:c:\ContainerPD:rw`})
|
||||||
if _, hostConfig, _ := mustParse(t, tryit); hostConfig.Binds == nil || compareRandomizedStrings(hostConfig.Binds[0], hostConfig.Binds[1], arr[0], arr[1]) != nil {
|
if _, hostConfig := mustParse(t, tryit); hostConfig.Binds == nil || compareRandomizedStrings(hostConfig.Binds[0], hostConfig.Binds[1], arr[0], arr[1]) != nil {
|
||||||
t.Fatalf("Error parsing volume flags, `%s and %s` did not mount-bind correctly. Received %v", arr[0], arr[1], hostConfig.Binds)
|
t.Fatalf("Error parsing volume flags, `%s and %s` did not mount-bind correctly. Received %v", arr[0], arr[1], hostConfig.Binds)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Similar to previous test but with alternate modes which are only supported by Linux
|
// Similar to previous test but with alternate modes which are only supported by Linux
|
||||||
if runtime.GOOS != "windows" {
|
if runtime.GOOS != "windows" {
|
||||||
arr, tryit = setupPlatformVolume([]string{`/hostTmp:/containerTmp:ro,Z`, `/hostVar:/containerVar:rw,Z`}, []string{})
|
arr, tryit = setupPlatformVolume([]string{`/hostTmp:/containerTmp:ro,Z`, `/hostVar:/containerVar:rw,Z`}, []string{})
|
||||||
if _, hostConfig, _ := mustParse(t, tryit); hostConfig.Binds == nil || compareRandomizedStrings(hostConfig.Binds[0], hostConfig.Binds[1], arr[0], arr[1]) != nil {
|
if _, hostConfig := mustParse(t, tryit); hostConfig.Binds == nil || compareRandomizedStrings(hostConfig.Binds[0], hostConfig.Binds[1], arr[0], arr[1]) != nil {
|
||||||
t.Fatalf("Error parsing volume flags, `%s and %s` did not mount-bind correctly. Received %v", arr[0], arr[1], hostConfig.Binds)
|
t.Fatalf("Error parsing volume flags, `%s and %s` did not mount-bind correctly. Received %v", arr[0], arr[1], hostConfig.Binds)
|
||||||
}
|
}
|
||||||
|
|
||||||
arr, tryit = setupPlatformVolume([]string{`/hostTmp:/containerTmp:Z`, `/hostVar:/containerVar:z`}, []string{})
|
arr, tryit = setupPlatformVolume([]string{`/hostTmp:/containerTmp:Z`, `/hostVar:/containerVar:z`}, []string{})
|
||||||
if _, hostConfig, _ := mustParse(t, tryit); hostConfig.Binds == nil || compareRandomizedStrings(hostConfig.Binds[0], hostConfig.Binds[1], arr[0], arr[1]) != nil {
|
if _, hostConfig := mustParse(t, tryit); hostConfig.Binds == nil || compareRandomizedStrings(hostConfig.Binds[0], hostConfig.Binds[1], arr[0], arr[1]) != nil {
|
||||||
t.Fatalf("Error parsing volume flags, `%s and %s` did not mount-bind correctly. Received %v", arr[0], arr[1], hostConfig.Binds)
|
t.Fatalf("Error parsing volume flags, `%s and %s` did not mount-bind correctly. Received %v", arr[0], arr[1], hostConfig.Binds)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// One bind mount and one volume
|
// One bind mount and one volume
|
||||||
arr, tryit = setupPlatformVolume([]string{`/hostTmp:/containerTmp`, `/containerVar`}, []string{os.Getenv("TEMP") + `:c:\containerTmp`, `c:\containerTmp`})
|
arr, tryit = setupPlatformVolume([]string{`/hostTmp:/containerTmp`, `/containerVar`}, []string{os.Getenv("TEMP") + `:c:\containerTmp`, `c:\containerTmp`})
|
||||||
if config, hostConfig, _ := mustParse(t, tryit); hostConfig.Binds == nil || len(hostConfig.Binds) > 1 || hostConfig.Binds[0] != arr[0] {
|
if config, hostConfig := mustParse(t, tryit); hostConfig.Binds == nil || len(hostConfig.Binds) > 1 || hostConfig.Binds[0] != arr[0] {
|
||||||
t.Fatalf("Error parsing volume flags, %s and %s should only one and only one bind mount %s. Received %s", arr[0], arr[1], arr[0], hostConfig.Binds)
|
t.Fatalf("Error parsing volume flags, %s and %s should only one and only one bind mount %s. Received %s", arr[0], arr[1], arr[0], hostConfig.Binds)
|
||||||
} else if _, exists := config.Volumes[arr[1]]; !exists {
|
} else if _, exists := config.Volumes[arr[1]]; !exists {
|
||||||
t.Fatalf("Error parsing volume flags %s and %s. %s is missing from volumes. Received %v", arr[0], arr[1], arr[1], config.Volumes)
|
t.Fatalf("Error parsing volume flags %s and %s. %s is missing from volumes. Received %v", arr[0], arr[1], arr[1], config.Volumes)
|
||||||
@@ -256,7 +258,7 @@ func TestParseWithVolumes(t *testing.T) {
|
|||||||
// Root to non-c: drive letter (Windows specific)
|
// Root to non-c: drive letter (Windows specific)
|
||||||
if runtime.GOOS == "windows" {
|
if runtime.GOOS == "windows" {
|
||||||
arr, tryit = setupPlatformVolume([]string{}, []string{os.Getenv("SystemDrive") + `\:d:`})
|
arr, tryit = setupPlatformVolume([]string{}, []string{os.Getenv("SystemDrive") + `\:d:`})
|
||||||
if config, hostConfig, _ := mustParse(t, tryit); hostConfig.Binds == nil || len(hostConfig.Binds) > 1 || hostConfig.Binds[0] != arr[0] || len(config.Volumes) != 0 {
|
if config, hostConfig := mustParse(t, tryit); hostConfig.Binds == nil || len(hostConfig.Binds) > 1 || hostConfig.Binds[0] != arr[0] || len(config.Volumes) != 0 {
|
||||||
t.Fatalf("Error parsing %s. Should have a single bind mount and no volumes", arr[0])
|
t.Fatalf("Error parsing %s. Should have a single bind mount and no volumes", arr[0])
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -266,7 +268,7 @@ func TestParseWithVolumes(t *testing.T) {
|
|||||||
// spec and a Windows style spec. Depending on the platform being unit tested,
|
// spec and a Windows style spec. Depending on the platform being unit tested,
|
||||||
// it returns one of them, along with a volume string that would be passed
|
// it returns one of them, along with a volume string that would be passed
|
||||||
// on the docker CLI (e.g. -v /bar -v /foo).
|
// on the docker CLI (e.g. -v /bar -v /foo).
|
||||||
func setupPlatformVolume(u []string, w []string) ([]string, string) {
|
func setupPlatformVolume(u, w []string) ([]string, string) {
|
||||||
var a []string
|
var a []string
|
||||||
if runtime.GOOS == "windows" {
|
if runtime.GOOS == "windows" {
|
||||||
a = w
|
a = w
|
||||||
@@ -299,9 +301,9 @@ func TestParseWithMacAddress(t *testing.T) {
|
|||||||
if _, _, _, err := parseRun([]string{invalidMacAddress, "img", "cmd"}); err != nil && err.Error() != "invalidMacAddress is not a valid mac address" {
|
if _, _, _, err := parseRun([]string{invalidMacAddress, "img", "cmd"}); err != nil && err.Error() != "invalidMacAddress is not a valid mac address" {
|
||||||
t.Fatalf("Expected an error with %v mac-address, got %v", invalidMacAddress, err)
|
t.Fatalf("Expected an error with %v mac-address, got %v", invalidMacAddress, err)
|
||||||
}
|
}
|
||||||
config, hostConfig, _ := mustParse(t, validMacAddress)
|
if config, _ := mustParse(t, validMacAddress); config.MacAddress != "92:d0:c6:0a:29:33" { //nolint:staticcheck // pre-existing issue from nektos/act
|
||||||
t.Logf("MacAddress: %+v\n", hostConfig)
|
t.Fatalf("Expected the config to have '92:d0:c6:0a:29:33' as MacAddress, got '%v'", config.MacAddress) //nolint:staticcheck // pre-existing issue from nektos/act
|
||||||
assert.Equal(t, "92:d0:c6:0a:29:33", config.MacAddress) //nolint:staticcheck
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestRunFlagsParseWithMemory(t *testing.T) {
|
func TestRunFlagsParseWithMemory(t *testing.T) {
|
||||||
@@ -310,7 +312,7 @@ func TestRunFlagsParseWithMemory(t *testing.T) {
|
|||||||
err := flags.Parse(args)
|
err := flags.Parse(args)
|
||||||
assert.ErrorContains(t, err, `invalid argument "invalid" for "-m, --memory" flag`)
|
assert.ErrorContains(t, err, `invalid argument "invalid" for "-m, --memory" flag`)
|
||||||
|
|
||||||
_, hostconfig, _ := mustParse(t, "--memory=1G")
|
_, hostconfig := mustParse(t, "--memory=1G")
|
||||||
assert.Check(t, is.Equal(int64(1073741824), hostconfig.Memory))
|
assert.Check(t, is.Equal(int64(1073741824), hostconfig.Memory))
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -320,10 +322,10 @@ func TestParseWithMemorySwap(t *testing.T) {
|
|||||||
err := flags.Parse(args)
|
err := flags.Parse(args)
|
||||||
assert.ErrorContains(t, err, `invalid argument "invalid" for "--memory-swap" flag`)
|
assert.ErrorContains(t, err, `invalid argument "invalid" for "--memory-swap" flag`)
|
||||||
|
|
||||||
_, hostconfig, _ := mustParse(t, "--memory-swap=1G")
|
_, hostconfig := mustParse(t, "--memory-swap=1G")
|
||||||
assert.Check(t, is.Equal(int64(1073741824), hostconfig.MemorySwap))
|
assert.Check(t, is.Equal(int64(1073741824), hostconfig.MemorySwap))
|
||||||
|
|
||||||
_, hostconfig, _ = mustParse(t, "--memory-swap=-1")
|
_, hostconfig = mustParse(t, "--memory-swap=-1")
|
||||||
assert.Check(t, is.Equal(int64(-1), hostconfig.MemorySwap))
|
assert.Check(t, is.Equal(int64(-1), hostconfig.MemorySwap))
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -338,14 +340,14 @@ func TestParseHostname(t *testing.T) {
|
|||||||
hostnameWithDomain := "--hostname=hostname.domainname"
|
hostnameWithDomain := "--hostname=hostname.domainname"
|
||||||
hostnameWithDomainTld := "--hostname=hostname.domainname.tld"
|
hostnameWithDomainTld := "--hostname=hostname.domainname.tld"
|
||||||
for hostname, expectedHostname := range validHostnames {
|
for hostname, expectedHostname := range validHostnames {
|
||||||
if config, _, _ := mustParse(t, "--hostname="+hostname); config.Hostname != expectedHostname {
|
if config, _ := mustParse(t, "--hostname="+hostname); config.Hostname != expectedHostname {
|
||||||
t.Fatalf("Expected the config to have 'hostname' as %q, got %q", expectedHostname, config.Hostname)
|
t.Fatalf("Expected the config to have 'hostname' as %q, got %q", expectedHostname, config.Hostname)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if config, _, _ := mustParse(t, hostnameWithDomain); config.Hostname != "hostname.domainname" || config.Domainname != "" {
|
if config, _ := mustParse(t, hostnameWithDomain); config.Hostname != "hostname.domainname" || config.Domainname != "" {
|
||||||
t.Fatalf("Expected the config to have 'hostname' as hostname.domainname, got %q", config.Hostname)
|
t.Fatalf("Expected the config to have 'hostname' as hostname.domainname, got %q", config.Hostname)
|
||||||
}
|
}
|
||||||
if config, _, _ := mustParse(t, hostnameWithDomainTld); config.Hostname != "hostname.domainname.tld" || config.Domainname != "" {
|
if config, _ := mustParse(t, hostnameWithDomainTld); config.Hostname != "hostname.domainname.tld" || config.Domainname != "" {
|
||||||
t.Fatalf("Expected the config to have 'hostname' as hostname.domainname.tld, got %q", config.Hostname)
|
t.Fatalf("Expected the config to have 'hostname' as hostname.domainname.tld, got %q", config.Hostname)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -359,14 +361,14 @@ func TestParseHostnameDomainname(t *testing.T) {
|
|||||||
"domainname-63-bytes-long-should-be-valid-and-without-any-errors": "domainname-63-bytes-long-should-be-valid-and-without-any-errors",
|
"domainname-63-bytes-long-should-be-valid-and-without-any-errors": "domainname-63-bytes-long-should-be-valid-and-without-any-errors",
|
||||||
}
|
}
|
||||||
for domainname, expectedDomainname := range validDomainnames {
|
for domainname, expectedDomainname := range validDomainnames {
|
||||||
if config, _, _ := mustParse(t, "--domainname="+domainname); config.Domainname != expectedDomainname {
|
if config, _ := mustParse(t, "--domainname="+domainname); config.Domainname != expectedDomainname {
|
||||||
t.Fatalf("Expected the config to have 'domainname' as %q, got %q", expectedDomainname, config.Domainname)
|
t.Fatalf("Expected the config to have 'domainname' as %q, got %q", expectedDomainname, config.Domainname)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if config, _, _ := mustParse(t, "--hostname=some.prefix --domainname=domainname"); config.Hostname != "some.prefix" || config.Domainname != "domainname" {
|
if config, _ := mustParse(t, "--hostname=some.prefix --domainname=domainname"); config.Hostname != "some.prefix" || config.Domainname != "domainname" {
|
||||||
t.Fatalf("Expected the config to have 'hostname' as 'some.prefix' and 'domainname' as 'domainname', got %q and %q", config.Hostname, config.Domainname)
|
t.Fatalf("Expected the config to have 'hostname' as 'some.prefix' and 'domainname' as 'domainname', got %q and %q", config.Hostname, config.Domainname)
|
||||||
}
|
}
|
||||||
if config, _, _ := mustParse(t, "--hostname=another-prefix --domainname=domainname.tld"); config.Hostname != "another-prefix" || config.Domainname != "domainname.tld" {
|
if config, _ := mustParse(t, "--hostname=another-prefix --domainname=domainname.tld"); config.Hostname != "another-prefix" || config.Domainname != "domainname.tld" {
|
||||||
t.Fatalf("Expected the config to have 'hostname' as 'another-prefix' and 'domainname' as 'domainname.tld', got %q and %q", config.Hostname, config.Domainname)
|
t.Fatalf("Expected the config to have 'hostname' as 'another-prefix' and 'domainname' as 'domainname.tld', got %q and %q", config.Hostname, config.Domainname)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -375,8 +377,6 @@ func TestParseWithExpose(t *testing.T) {
|
|||||||
invalids := map[string]string{
|
invalids := map[string]string{
|
||||||
":": "invalid port format for --expose: :",
|
":": "invalid port format for --expose: :",
|
||||||
"8080:9090": "invalid port format for --expose: 8080:9090",
|
"8080:9090": "invalid port format for --expose: 8080:9090",
|
||||||
"/tcp": "invalid range format for --expose: /tcp, error: empty string specified for ports",
|
|
||||||
"/udp": "invalid range format for --expose: /udp, error: empty string specified for ports",
|
|
||||||
"NaN/tcp": `invalid range format for --expose: NaN/tcp, error: strconv.ParseUint: parsing "NaN": invalid syntax`,
|
"NaN/tcp": `invalid range format for --expose: NaN/tcp, error: strconv.ParseUint: parsing "NaN": invalid syntax`,
|
||||||
"NaN-NaN/tcp": `invalid range format for --expose: NaN-NaN/tcp, error: strconv.ParseUint: parsing "NaN": invalid syntax`,
|
"NaN-NaN/tcp": `invalid range format for --expose: NaN-NaN/tcp, error: strconv.ParseUint: parsing "NaN": invalid syntax`,
|
||||||
"8080-NaN/tcp": `invalid range format for --expose: 8080-NaN/tcp, error: strconv.ParseUint: parsing "NaN": invalid syntax`,
|
"8080-NaN/tcp": `invalid range format for --expose: 8080-NaN/tcp, error: strconv.ParseUint: parsing "NaN": invalid syntax`,
|
||||||
@@ -604,7 +604,7 @@ func TestParseNetworkConfig(t *testing.T) {
|
|||||||
_, hConfig, nwConfig, err := parseRun(tc.flags)
|
_, hConfig, nwConfig, err := parseRun(tc.flags)
|
||||||
|
|
||||||
if tc.expectedErr != "" {
|
if tc.expectedErr != "" {
|
||||||
require.Error(t, err, tc.expectedErr)
|
assert.Error(t, err, tc.expectedErr)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -631,7 +631,7 @@ func TestParseModes(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// uts ko
|
// uts ko
|
||||||
_, _, _, err = parseRun([]string{"--uts=container:", "img", "cmd"}) //nolint:dogsled
|
_, _, _, err = parseRun([]string{"--uts=container:", "img", "cmd"})
|
||||||
assert.ErrorContains(t, err, "--uts: invalid UTS mode")
|
assert.ErrorContains(t, err, "--uts: invalid UTS mode")
|
||||||
|
|
||||||
// uts ok
|
// uts ok
|
||||||
@@ -691,8 +691,8 @@ func TestParseRestartPolicy(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestParseRestartPolicyAutoRemove(t *testing.T) {
|
func TestParseRestartPolicyAutoRemove(t *testing.T) {
|
||||||
expected := "conflicting options: --restart and --rm"
|
expected := "Conflicting options: --restart and --rm"
|
||||||
_, _, _, err := parseRun([]string{"--rm", "--restart=always", "img", "cmd"}) //nolint:dogsled
|
_, _, _, err := parseRun([]string{"--rm", "--restart=always", "img", "cmd"})
|
||||||
if err == nil || err.Error() != expected {
|
if err == nil || err.Error() != expected {
|
||||||
t.Fatalf("Expected error %v, but got none", expected)
|
t.Fatalf("Expected error %v, but got none", expected)
|
||||||
}
|
}
|
||||||
@@ -752,7 +752,7 @@ func TestParseLoggingOpts(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestParseEnvfileVariables(t *testing.T) {
|
func TestParseEnvfileVariables(t *testing.T) { //nolint:dupl // pre-existing issue from nektos/act
|
||||||
e := "open nonexistent: no such file or directory"
|
e := "open nonexistent: no such file or directory"
|
||||||
if runtime.GOOS == "windows" {
|
if runtime.GOOS == "windows" {
|
||||||
e = "open nonexistent: The system cannot find the file specified."
|
e = "open nonexistent: The system cannot find the file specified."
|
||||||
@@ -795,7 +795,7 @@ func TestParseEnvfileVariablesWithBOMUnicode(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// UTF16 with BOM
|
// UTF16 with BOM
|
||||||
e := "invalid env file"
|
e := "contains invalid utf8 bytes at line"
|
||||||
if _, _, _, err := parseRun([]string{"--env-file=testdata/utf16.env", "img", "cmd"}); err == nil || !strings.Contains(err.Error(), e) {
|
if _, _, _, err := parseRun([]string{"--env-file=testdata/utf16.env", "img", "cmd"}); err == nil || !strings.Contains(err.Error(), e) {
|
||||||
t.Fatalf("Expected an error with message '%s', got %v", e, err)
|
t.Fatalf("Expected an error with message '%s', got %v", e, err)
|
||||||
}
|
}
|
||||||
@@ -805,7 +805,7 @@ func TestParseEnvfileVariablesWithBOMUnicode(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestParseLabelfileVariables(t *testing.T) {
|
func TestParseLabelfileVariables(t *testing.T) { //nolint:dupl // pre-existing issue from nektos/act
|
||||||
e := "open nonexistent: no such file or directory"
|
e := "open nonexistent: no such file or directory"
|
||||||
if runtime.GOOS == "windows" {
|
if runtime.GOOS == "windows" {
|
||||||
e = "open nonexistent: The system cannot find the file specified."
|
e = "open nonexistent: The system cannot find the file specified."
|
||||||
@@ -1,3 +1,7 @@
|
|||||||
|
// Copyright 2023 The Gitea Authors. All rights reserved.
|
||||||
|
// Copyright 2020 The nektos/act Authors. All rights reserved.
|
||||||
|
// SPDX-License-Identifier: MIT
|
||||||
|
|
||||||
//go:build !(WITHOUT_DOCKER || !(linux || darwin || windows || netbsd))
|
//go:build !(WITHOUT_DOCKER || !(linux || darwin || windows || netbsd))
|
||||||
|
|
||||||
package container
|
package container
|
||||||
@@ -6,21 +10,21 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
cerrdefs "github.com/containerd/errdefs"
|
"github.com/docker/docker/api/types"
|
||||||
"github.com/docker/docker/api/types/image"
|
"github.com/docker/docker/client"
|
||||||
)
|
)
|
||||||
|
|
||||||
// ImageExistsLocally returns a boolean indicating if an image with the
|
// ImageExistsLocally returns a boolean indicating if an image with the
|
||||||
// requested name, tag and architecture exists in the local docker image store
|
// requested name, tag and architecture exists in the local docker image store
|
||||||
func ImageExistsLocally(ctx context.Context, imageName string, platform string) (bool, error) {
|
func ImageExistsLocally(ctx context.Context, imageName, platform string) (bool, error) {
|
||||||
cli, err := GetDockerClient(ctx)
|
cli, err := GetDockerClient(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
defer cli.Close()
|
defer cli.Close()
|
||||||
|
|
||||||
inspectImage, err := cli.ImageInspect(ctx, imageName)
|
inspectImage, _, err := cli.ImageInspectWithRaw(ctx, imageName)
|
||||||
if cerrdefs.IsNotFound(err) {
|
if client.IsErrNotFound(err) {
|
||||||
return false, nil
|
return false, nil
|
||||||
} else if err != nil {
|
} else if err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
@@ -35,21 +39,21 @@ func ImageExistsLocally(ctx context.Context, imageName string, platform string)
|
|||||||
|
|
||||||
// RemoveImage removes image from local store, the function is used to run different
|
// RemoveImage removes image from local store, the function is used to run different
|
||||||
// container image architectures
|
// container image architectures
|
||||||
func RemoveImage(ctx context.Context, imageName string, force bool, pruneChildren bool) (bool, error) {
|
func RemoveImage(ctx context.Context, imageName string, force, pruneChildren bool) (bool, error) {
|
||||||
cli, err := GetDockerClient(ctx)
|
cli, err := GetDockerClient(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
defer cli.Close()
|
defer cli.Close()
|
||||||
|
|
||||||
inspectImage, err := cli.ImageInspect(ctx, imageName)
|
inspectImage, _, err := cli.ImageInspectWithRaw(ctx, imageName)
|
||||||
if cerrdefs.IsNotFound(err) {
|
if client.IsErrNotFound(err) {
|
||||||
return false, nil
|
return false, nil
|
||||||
} else if err != nil {
|
} else if err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if _, err = cli.ImageRemove(ctx, inspectImage.ID, image.RemoveOptions{
|
if _, err = cli.ImageRemove(ctx, inspectImage.ID, types.ImageRemoveOptions{
|
||||||
Force: force,
|
Force: force,
|
||||||
PruneChildren: pruneChildren,
|
PruneChildren: pruneChildren,
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
@@ -1,16 +1,18 @@
|
|||||||
|
// Copyright 2022 The Gitea Authors. All rights reserved.
|
||||||
|
// Copyright 2020 The nektos/act Authors. All rights reserved.
|
||||||
|
// SPDX-License-Identifier: MIT
|
||||||
|
|
||||||
package container
|
package container
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"io"
|
"io"
|
||||||
"os"
|
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/docker/docker/api/types/image"
|
"github.com/docker/docker/api/types"
|
||||||
"github.com/docker/docker/client"
|
"github.com/docker/docker/client"
|
||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
@@ -27,59 +29,43 @@ func TestImageExistsLocally(t *testing.T) {
|
|||||||
|
|
||||||
// Test if image exists with specific tag
|
// Test if image exists with specific tag
|
||||||
invalidImageTag, err := ImageExistsLocally(ctx, "library/alpine:this-random-tag-will-never-exist", "linux/amd64")
|
invalidImageTag, err := ImageExistsLocally(ctx, "library/alpine:this-random-tag-will-never-exist", "linux/amd64")
|
||||||
require.NoError(t, err)
|
assert.NoError(t, err) //nolint:testifylint // pre-existing issue from nektos/act
|
||||||
assert.False(t, invalidImageTag)
|
assert.False(t, invalidImageTag)
|
||||||
|
|
||||||
// Test if image exists with specific architecture (image platform)
|
// Test if image exists with specific architecture (image platform)
|
||||||
invalidImagePlatform, err := ImageExistsLocally(ctx, "alpine:latest", "windows/amd64")
|
invalidImagePlatform, err := ImageExistsLocally(ctx, "alpine:latest", "windows/amd64")
|
||||||
require.NoError(t, err)
|
assert.NoError(t, err) //nolint:testifylint // pre-existing issue from nektos/act
|
||||||
assert.False(t, invalidImagePlatform)
|
assert.False(t, invalidImagePlatform)
|
||||||
|
|
||||||
// pull an image
|
// pull an image
|
||||||
cli, err := client.NewClientWithOpts(client.FromEnv)
|
cli, err := client.NewClientWithOpts(client.FromEnv)
|
||||||
require.NoError(t, err)
|
assert.NoError(t, err) //nolint:testifylint // pre-existing issue from nektos/act
|
||||||
cli.NegotiateAPIVersion(context.Background())
|
cli.NegotiateAPIVersion(context.Background())
|
||||||
|
|
||||||
// Chose alpine latest because it's so small
|
// Chose alpine latest because it's so small
|
||||||
// maybe we should build an image instead so that tests aren't reliable on dockerhub
|
// maybe we should build an image instead so that tests aren't reliable on dockerhub
|
||||||
readerDefault, err := cli.ImagePull(ctx, "node:16-buster-slim", image.PullOptions{
|
readerDefault, err := cli.ImagePull(ctx, "node:16-buster-slim", types.ImagePullOptions{
|
||||||
Platform: "linux/amd64",
|
Platform: "linux/amd64",
|
||||||
})
|
})
|
||||||
require.NoError(t, err)
|
assert.NoError(t, err) //nolint:testifylint // pre-existing issue from nektos/act
|
||||||
defer readerDefault.Close()
|
defer readerDefault.Close()
|
||||||
_, err = io.ReadAll(readerDefault)
|
_, err = io.ReadAll(readerDefault)
|
||||||
require.NoError(t, err)
|
assert.NoError(t, err) //nolint:testifylint // pre-existing issue from nektos/act
|
||||||
|
|
||||||
imageDefaultArchExists, err := ImageExistsLocally(ctx, "node:16-buster-slim", "linux/amd64")
|
imageDefaultArchExists, err := ImageExistsLocally(ctx, "node:16-buster-slim", "linux/amd64")
|
||||||
require.NoError(t, err)
|
assert.NoError(t, err) //nolint:testifylint // pre-existing issue from nektos/act
|
||||||
assert.True(t, imageDefaultArchExists)
|
assert.True(t, imageDefaultArchExists)
|
||||||
}
|
|
||||||
|
|
||||||
func TestImageExistsLocallyQemu(t *testing.T) {
|
|
||||||
if testing.Short() {
|
|
||||||
t.Skip("skipping integration test")
|
|
||||||
}
|
|
||||||
if _, ok := os.LookupEnv("NO_QEMU"); ok {
|
|
||||||
t.Skip("skipping test because QEMU is disabled")
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx := context.Background()
|
|
||||||
|
|
||||||
// pull an image
|
|
||||||
cli, err := client.NewClientWithOpts(client.FromEnv)
|
|
||||||
require.NoError(t, err)
|
|
||||||
cli.NegotiateAPIVersion(context.Background())
|
|
||||||
|
|
||||||
// Validate if another architecture platform can be pulled
|
// Validate if another architecture platform can be pulled
|
||||||
readerArm64, err := cli.ImagePull(ctx, "node:16-buster-slim", image.PullOptions{
|
readerArm64, err := cli.ImagePull(ctx, "node:16-buster-slim", types.ImagePullOptions{
|
||||||
Platform: "linux/arm64",
|
Platform: "linux/arm64",
|
||||||
})
|
})
|
||||||
require.NoError(t, err)
|
assert.NoError(t, err) //nolint:testifylint // pre-existing issue from nektos/act
|
||||||
defer readerArm64.Close()
|
defer readerArm64.Close()
|
||||||
_, err = io.ReadAll(readerArm64)
|
_, err = io.ReadAll(readerArm64)
|
||||||
require.NoError(t, err)
|
assert.NoError(t, err) //nolint:testifylint // pre-existing issue from nektos/act
|
||||||
|
|
||||||
imageArm64Exists, err := ImageExistsLocally(ctx, "node:16-buster-slim", "linux/arm64")
|
imageArm64Exists, err := ImageExistsLocally(ctx, "node:16-buster-slim", "linux/arm64")
|
||||||
require.NoError(t, err)
|
assert.NoError(t, err) //nolint:testifylint // pre-existing issue from nektos/act
|
||||||
assert.True(t, imageArm64Exists)
|
assert.True(t, imageArm64Exists)
|
||||||
}
|
}
|
||||||
@@ -1,3 +1,7 @@
|
|||||||
|
// Copyright 2022 The Gitea Authors. All rights reserved.
|
||||||
|
// Copyright 2020 The nektos/act Authors. All rights reserved.
|
||||||
|
// SPDX-License-Identifier: MIT
|
||||||
|
|
||||||
//go:build !(WITHOUT_DOCKER || !(linux || darwin || windows || netbsd))
|
//go:build !(WITHOUT_DOCKER || !(linux || darwin || windows || netbsd))
|
||||||
|
|
||||||
package container
|
package container
|
||||||
@@ -1,3 +1,7 @@
|
|||||||
|
// Copyright 2023 The Gitea Authors. All rights reserved.
|
||||||
|
// Copyright 2023 The nektos/act Authors. All rights reserved.
|
||||||
|
// SPDX-License-Identifier: MIT
|
||||||
|
|
||||||
//go:build !(WITHOUT_DOCKER || !(linux || darwin || windows || netbsd))
|
//go:build !(WITHOUT_DOCKER || !(linux || darwin || windows || netbsd))
|
||||||
|
|
||||||
package container
|
package container
|
||||||
@@ -5,8 +9,9 @@ package container
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
|
||||||
"gitea.com/gitea/act_runner/pkg/common"
|
"gitea.com/gitea/runner/act/common"
|
||||||
"github.com/docker/docker/api/types/network"
|
|
||||||
|
"github.com/docker/docker/api/types"
|
||||||
)
|
)
|
||||||
|
|
||||||
func NewDockerNetworkCreateExecutor(name string) common.Executor {
|
func NewDockerNetworkCreateExecutor(name string) common.Executor {
|
||||||
@@ -18,11 +23,12 @@ func NewDockerNetworkCreateExecutor(name string) common.Executor {
|
|||||||
defer cli.Close()
|
defer cli.Close()
|
||||||
|
|
||||||
// Only create the network if it doesn't exist
|
// Only create the network if it doesn't exist
|
||||||
networks, err := cli.NetworkList(ctx, network.ListOptions{})
|
networks, err := cli.NetworkList(ctx, types.NetworkListOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
common.Logger(ctx).Debugf("%v", networks)
|
// For Gitea, reduce log noise
|
||||||
|
// common.Logger(ctx).Debugf("%v", networks)
|
||||||
for _, network := range networks {
|
for _, network := range networks {
|
||||||
if network.Name == name {
|
if network.Name == name {
|
||||||
common.Logger(ctx).Debugf("Network %v exists", name)
|
common.Logger(ctx).Debugf("Network %v exists", name)
|
||||||
@@ -30,7 +36,7 @@ func NewDockerNetworkCreateExecutor(name string) common.Executor {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err = cli.NetworkCreate(ctx, name, network.CreateOptions{
|
_, err = cli.NetworkCreate(ctx, name, types.NetworkCreate{
|
||||||
Driver: "bridge",
|
Driver: "bridge",
|
||||||
Scope: "local",
|
Scope: "local",
|
||||||
})
|
})
|
||||||
@@ -50,22 +56,23 @@ func NewDockerNetworkRemoveExecutor(name string) common.Executor {
|
|||||||
}
|
}
|
||||||
defer cli.Close()
|
defer cli.Close()
|
||||||
|
|
||||||
// Make sure that all network of the specified name are removed
|
// Make shure that all network of the specified name are removed
|
||||||
// cli.NetworkRemove refuses to remove a network if there are duplicates
|
// cli.NetworkRemove refuses to remove a network if there are duplicates
|
||||||
networks, err := cli.NetworkList(ctx, network.ListOptions{})
|
networks, err := cli.NetworkList(ctx, types.NetworkListOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
common.Logger(ctx).Debugf("%v", networks)
|
// For Gitea, reduce log noise
|
||||||
for _, net := range networks {
|
// common.Logger(ctx).Debugf("%v", networks)
|
||||||
if net.Name == name {
|
for _, network := range networks {
|
||||||
result, err := cli.NetworkInspect(ctx, net.ID, network.InspectOptions{})
|
if network.Name == name {
|
||||||
|
result, err := cli.NetworkInspect(ctx, network.ID, types.NetworkInspectOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(result.Containers) == 0 {
|
if len(result.Containers) == 0 {
|
||||||
if err = cli.NetworkRemove(ctx, net.ID); err != nil {
|
if err = cli.NetworkRemove(ctx, network.ID); err != nil {
|
||||||
common.Logger(ctx).Debugf("%v", err)
|
common.Logger(ctx).Debugf("%v", err)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
@@ -1,3 +1,7 @@
|
|||||||
|
// Copyright 2023 The Gitea Authors. All rights reserved.
|
||||||
|
// Copyright 2020 The nektos/act Authors. All rights reserved.
|
||||||
|
// SPDX-License-Identifier: MIT
|
||||||
|
|
||||||
//go:build !(WITHOUT_DOCKER || !(linux || darwin || windows || netbsd))
|
//go:build !(WITHOUT_DOCKER || !(linux || darwin || windows || netbsd))
|
||||||
|
|
||||||
package container
|
package container
|
||||||
@@ -9,11 +13,11 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/distribution/reference"
|
"gitea.com/gitea/runner/act/common"
|
||||||
"github.com/docker/docker/api/types/image"
|
|
||||||
"github.com/docker/docker/api/types/registry"
|
|
||||||
|
|
||||||
"gitea.com/gitea/act_runner/pkg/common"
|
"github.com/distribution/reference"
|
||||||
|
"github.com/docker/docker/api/types"
|
||||||
|
"github.com/docker/docker/api/types/registry"
|
||||||
)
|
)
|
||||||
|
|
||||||
// NewDockerPullExecutor function to create a run executor for the container
|
// NewDockerPullExecutor function to create a run executor for the container
|
||||||
@@ -74,8 +78,8 @@ func NewDockerPullExecutor(input NewDockerPullExecutorInput) common.Executor {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func getImagePullOptions(ctx context.Context, input NewDockerPullExecutorInput) (image.PullOptions, error) {
|
func getImagePullOptions(ctx context.Context, input NewDockerPullExecutorInput) (types.ImagePullOptions, error) {
|
||||||
imagePullOptions := image.PullOptions{
|
imagePullOptions := types.ImagePullOptions{
|
||||||
Platform: input.Platform,
|
Platform: input.Platform,
|
||||||
}
|
}
|
||||||
logger := common.Logger(ctx)
|
logger := common.Logger(ctx)
|
||||||
@@ -1,3 +1,7 @@
|
|||||||
|
// Copyright 2026 The Gitea Authors. All rights reserved.
|
||||||
|
// Copyright 2020 The nektos/act Authors. All rights reserved.
|
||||||
|
// SPDX-License-Identifier: MIT
|
||||||
|
|
||||||
package container
|
package container
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@@ -5,10 +9,8 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/docker/cli/cli/config"
|
"github.com/docker/cli/cli/config"
|
||||||
|
|
||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
assert "github.com/stretchr/testify/assert"
|
assert "github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
@@ -41,15 +43,15 @@ func TestGetImagePullOptions(t *testing.T) {
|
|||||||
config.SetDir("/non-existent/docker")
|
config.SetDir("/non-existent/docker")
|
||||||
|
|
||||||
options, err := getImagePullOptions(ctx, NewDockerPullExecutorInput{})
|
options, err := getImagePullOptions(ctx, NewDockerPullExecutorInput{})
|
||||||
require.NoError(t, err, "Failed to create ImagePullOptions")
|
assert.NoError(t, err, "Failed to create ImagePullOptions") //nolint:testifylint // pre-existing issue from nektos/act
|
||||||
assert.Empty(t, options.RegistryAuth, "RegistryAuth should be empty if no username or password is set")
|
assert.Equal(t, "", options.RegistryAuth, "RegistryAuth should be empty if no username or password is set") //nolint:testifylint // pre-existing issue from nektos/act
|
||||||
|
|
||||||
options, err = getImagePullOptions(ctx, NewDockerPullExecutorInput{
|
options, err = getImagePullOptions(ctx, NewDockerPullExecutorInput{
|
||||||
Image: "",
|
Image: "",
|
||||||
Username: "username",
|
Username: "username",
|
||||||
Password: "password",
|
Password: "password",
|
||||||
})
|
})
|
||||||
require.NoError(t, err, "Failed to create ImagePullOptions")
|
assert.NoError(t, err, "Failed to create ImagePullOptions") //nolint:testifylint // pre-existing issue from nektos/act
|
||||||
assert.Equal(t, "eyJ1c2VybmFtZSI6InVzZXJuYW1lIiwicGFzc3dvcmQiOiJwYXNzd29yZCJ9", options.RegistryAuth, "Username and Password should be provided")
|
assert.Equal(t, "eyJ1c2VybmFtZSI6InVzZXJuYW1lIiwicGFzc3dvcmQiOiJwYXNzd29yZCJ9", options.RegistryAuth, "Username and Password should be provided")
|
||||||
|
|
||||||
config.SetDir("testdata/docker-pull-options")
|
config.SetDir("testdata/docker-pull-options")
|
||||||
@@ -57,6 +59,6 @@ func TestGetImagePullOptions(t *testing.T) {
|
|||||||
options, err = getImagePullOptions(ctx, NewDockerPullExecutorInput{
|
options, err = getImagePullOptions(ctx, NewDockerPullExecutorInput{
|
||||||
Image: "nektos/act",
|
Image: "nektos/act",
|
||||||
})
|
})
|
||||||
require.NoError(t, err, "Failed to create ImagePullOptions")
|
assert.NoError(t, err, "Failed to create ImagePullOptions") //nolint:testifylint // pre-existing issue from nektos/act
|
||||||
assert.Equal(t, "eyJ1c2VybmFtZSI6InVzZXJuYW1lIiwicGFzc3dvcmQiOiJwYXNzd29yZFxuIiwic2VydmVyYWRkcmVzcyI6Imh0dHBzOi8vaW5kZXguZG9ja2VyLmlvL3YxLyJ9", options.RegistryAuth, "RegistryAuth should be taken from local docker config")
|
assert.Equal(t, "eyJ1c2VybmFtZSI6InVzZXJuYW1lIiwicGFzc3dvcmQiOiJwYXNzd29yZFxuIiwic2VydmVyYWRkcmVzcyI6Imh0dHBzOi8vaW5kZXguZG9ja2VyLmlvL3YxLyJ9", options.RegistryAuth, "RegistryAuth should be taken from local docker config")
|
||||||
}
|
}
|
||||||
@@ -1,3 +1,7 @@
|
|||||||
|
// Copyright 2022 The Gitea Authors. All rights reserved.
|
||||||
|
// Copyright 2020 The nektos/act Authors. All rights reserved.
|
||||||
|
// SPDX-License-Identifier: MIT
|
||||||
|
|
||||||
//go:build !(WITHOUT_DOCKER || !(linux || darwin || windows || netbsd))
|
//go:build !(WITHOUT_DOCKER || !(linux || darwin || windows || netbsd))
|
||||||
|
|
||||||
package container
|
package container
|
||||||
@@ -15,28 +19,29 @@ import (
|
|||||||
"runtime"
|
"runtime"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
|
||||||
|
|
||||||
"dario.cat/mergo"
|
"gitea.com/gitea/runner/act/common"
|
||||||
|
"gitea.com/gitea/runner/act/filecollector"
|
||||||
|
|
||||||
"github.com/Masterminds/semver"
|
"github.com/Masterminds/semver"
|
||||||
|
"github.com/docker/cli/cli/compose/loader"
|
||||||
"github.com/docker/cli/cli/connhelper"
|
"github.com/docker/cli/cli/connhelper"
|
||||||
"github.com/docker/docker/api/types"
|
"github.com/docker/docker/api/types"
|
||||||
"github.com/docker/docker/api/types/container"
|
"github.com/docker/docker/api/types/container"
|
||||||
"github.com/docker/docker/api/types/mount"
|
"github.com/docker/docker/api/types/mount"
|
||||||
"github.com/docker/docker/api/types/network"
|
"github.com/docker/docker/api/types/network"
|
||||||
"github.com/docker/docker/api/types/system"
|
|
||||||
"github.com/docker/docker/client"
|
"github.com/docker/docker/client"
|
||||||
"github.com/docker/docker/pkg/stdcopy"
|
"github.com/docker/docker/pkg/stdcopy"
|
||||||
"github.com/go-git/go-billy/v5/helper/polyfill"
|
"github.com/go-git/go-billy/v5/helper/polyfill"
|
||||||
"github.com/go-git/go-billy/v5/osfs"
|
"github.com/go-git/go-billy/v5/osfs"
|
||||||
"github.com/go-git/go-git/v5/plumbing/format/gitignore"
|
"github.com/go-git/go-git/v5/plumbing/format/gitignore"
|
||||||
|
"github.com/gobwas/glob"
|
||||||
|
"github.com/imdario/mergo"
|
||||||
"github.com/joho/godotenv"
|
"github.com/joho/godotenv"
|
||||||
"github.com/kballard/go-shellquote"
|
"github.com/kballard/go-shellquote"
|
||||||
specs "github.com/opencontainers/image-spec/specs-go/v1"
|
specs "github.com/opencontainers/image-spec/specs-go/v1"
|
||||||
"github.com/spf13/pflag"
|
"github.com/spf13/pflag"
|
||||||
|
"golang.org/x/term"
|
||||||
"gitea.com/gitea/act_runner/pkg/common"
|
|
||||||
"gitea.com/gitea/act_runner/pkg/filecollector"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// NewContainer creates a reference to a container
|
// NewContainer creates a reference to a container
|
||||||
@@ -46,6 +51,25 @@ func NewContainer(input *NewContainerInput) ExecutionsEnvironment {
|
|||||||
return cr
|
return cr
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (cr *containerReference) ConnectToNetwork(name string) common.Executor {
|
||||||
|
return common.
|
||||||
|
NewDebugExecutor("%sdocker network connect %s %s", logPrefix, name, cr.input.Name).
|
||||||
|
Then(
|
||||||
|
common.NewPipelineExecutor(
|
||||||
|
cr.connect(),
|
||||||
|
cr.connectToNetwork(name, cr.input.NetworkAliases),
|
||||||
|
).IfNot(common.Dryrun),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cr *containerReference) connectToNetwork(name string, aliases []string) common.Executor {
|
||||||
|
return func(ctx context.Context) error {
|
||||||
|
return cr.cli.NetworkConnect(ctx, name, cr.input.Name, &network.EndpointSettings{
|
||||||
|
Aliases: aliases,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// supportsContainerImagePlatform returns true if the underlying Docker server
|
// supportsContainerImagePlatform returns true if the underlying Docker server
|
||||||
// API version is 1.41 and beyond
|
// API version is 1.41 and beyond
|
||||||
func supportsContainerImagePlatform(ctx context.Context, cli client.APIClient) bool {
|
func supportsContainerImagePlatform(ctx context.Context, cli client.APIClient) bool {
|
||||||
@@ -64,7 +88,7 @@ func supportsContainerImagePlatform(ctx context.Context, cli client.APIClient) b
|
|||||||
return constraint.Check(sv)
|
return constraint.Check(sv)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cr *containerReference) Create(capAdd []string, capDrop []string) common.Executor {
|
func (cr *containerReference) Create(capAdd, capDrop []string) common.Executor {
|
||||||
return common.
|
return common.
|
||||||
NewInfoExecutor("%sdocker create image=%s platform=%s entrypoint=%+q cmd=%+q network=%+q", logPrefix, cr.input.Image, cr.input.Platform, cr.input.Entrypoint, cr.input.Cmd, cr.input.NetworkMode).
|
NewInfoExecutor("%sdocker create image=%s platform=%s entrypoint=%+q cmd=%+q network=%+q", logPrefix, cr.input.Image, cr.input.Platform, cr.input.Entrypoint, cr.input.Cmd, cr.input.NetworkMode).
|
||||||
Then(
|
Then(
|
||||||
@@ -121,7 +145,7 @@ func (cr *containerReference) Copy(destPath string, files ...*FileEntry) common.
|
|||||||
).IfNot(common.Dryrun)
|
).IfNot(common.Dryrun)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cr *containerReference) CopyDir(destPath string, srcPath string, useGitIgnore bool) common.Executor {
|
func (cr *containerReference) CopyDir(destPath, srcPath string, useGitIgnore bool) common.Executor {
|
||||||
return common.NewPipelineExecutor(
|
return common.NewPipelineExecutor(
|
||||||
common.NewInfoExecutor("%sdocker cp src=%s dst=%s", logPrefix, srcPath, destPath),
|
common.NewInfoExecutor("%sdocker cp src=%s dst=%s", logPrefix, srcPath, destPath),
|
||||||
cr.copyDir(destPath, srcPath, useGitIgnore),
|
cr.copyDir(destPath, srcPath, useGitIgnore),
|
||||||
@@ -137,7 +161,7 @@ func (cr *containerReference) CopyDir(destPath string, srcPath string, useGitIgn
|
|||||||
|
|
||||||
func (cr *containerReference) GetContainerArchive(ctx context.Context, srcPath string) (io.ReadCloser, error) {
|
func (cr *containerReference) GetContainerArchive(ctx context.Context, srcPath string) (io.ReadCloser, error) {
|
||||||
if common.Dryrun(ctx) {
|
if common.Dryrun(ctx) {
|
||||||
return nil, errors.New("dryrun is not supported in GetContainerArchive")
|
return nil, errors.New("DRYRUN is not supported in GetContainerArchive")
|
||||||
}
|
}
|
||||||
a, _, err := cr.cli.CopyFromContainer(ctx, cr.id, srcPath)
|
a, _, err := cr.cli.CopyFromContainer(ctx, cr.id, srcPath)
|
||||||
return a, err
|
return a, err
|
||||||
@@ -156,7 +180,7 @@ func (cr *containerReference) Exec(command []string, env map[string]string, user
|
|||||||
common.NewInfoExecutor("%sdocker exec cmd=[%s] user=%s workdir=%s", logPrefix, strings.Join(command, " "), user, workdir),
|
common.NewInfoExecutor("%sdocker exec cmd=[%s] user=%s workdir=%s", logPrefix, strings.Join(command, " "), user, workdir),
|
||||||
cr.connect(),
|
cr.connect(),
|
||||||
cr.find(),
|
cr.find(),
|
||||||
cr.execExt(command, env, user, workdir),
|
cr.exec(command, env, user, workdir),
|
||||||
).IfNot(common.Dryrun)
|
).IfNot(common.Dryrun)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -169,31 +193,7 @@ func (cr *containerReference) Remove() common.Executor {
|
|||||||
).IfNot(common.Dryrun)
|
).IfNot(common.Dryrun)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cr *containerReference) GetHealth(ctx context.Context) Health {
|
func (cr *containerReference) ReplaceLogWriter(stdout, stderr io.Writer) (io.Writer, io.Writer) {
|
||||||
resp, err := cr.cli.ContainerInspect(ctx, cr.id)
|
|
||||||
logger := common.Logger(ctx)
|
|
||||||
if err != nil {
|
|
||||||
logger.Errorf("failed to query container health %s", err)
|
|
||||||
return HealthUnHealthy
|
|
||||||
}
|
|
||||||
if resp.Config == nil || resp.Config.Healthcheck == nil || resp.State == nil || resp.State.Health == nil || len(resp.Config.Healthcheck.Test) == 1 && strings.EqualFold(resp.Config.Healthcheck.Test[0], "NONE") {
|
|
||||||
logger.Debugf("no container health check defined")
|
|
||||||
return HealthHealthy
|
|
||||||
}
|
|
||||||
|
|
||||||
logger.Infof("container health of %s (%s) is %s", cr.id, resp.Config.Image, resp.State.Health.Status)
|
|
||||||
switch resp.State.Health.Status {
|
|
||||||
case "starting":
|
|
||||||
return HealthStarting
|
|
||||||
case "healthy":
|
|
||||||
return HealthHealthy
|
|
||||||
case "unhealthy":
|
|
||||||
return HealthUnHealthy
|
|
||||||
}
|
|
||||||
return HealthUnHealthy
|
|
||||||
}
|
|
||||||
|
|
||||||
func (cr *containerReference) ReplaceLogWriter(stdout io.Writer, stderr io.Writer) (io.Writer, io.Writer) {
|
|
||||||
out := cr.input.Stdout
|
out := cr.input.Stdout
|
||||||
err := cr.input.Stderr
|
err := cr.input.Stderr
|
||||||
|
|
||||||
@@ -237,7 +237,7 @@ func GetDockerClient(ctx context.Context) (cli client.APIClient, err error) {
|
|||||||
return cli, nil
|
return cli, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func GetHostInfo(ctx context.Context) (info system.Info, err error) {
|
func GetHostInfo(ctx context.Context) (info types.Info, err error) { //nolint:staticcheck // pre-existing issue from nektos/act
|
||||||
var cli client.APIClient
|
var cli client.APIClient
|
||||||
cli, err = GetDockerClient(ctx)
|
cli, err = GetDockerClient(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -290,7 +290,7 @@ func (cr *containerReference) connect() common.Executor {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (cr *containerReference) Close() common.Executor {
|
func (cr *containerReference) Close() common.Executor {
|
||||||
return func(_ context.Context) error {
|
return func(ctx context.Context) error {
|
||||||
if cr.cli != nil {
|
if cr.cli != nil {
|
||||||
err := cr.cli.Close()
|
err := cr.cli.Close()
|
||||||
cr.cli = nil
|
cr.cli = nil
|
||||||
@@ -307,7 +307,7 @@ func (cr *containerReference) find() common.Executor {
|
|||||||
if cr.id != "" {
|
if cr.id != "" {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
containers, err := cr.cli.ContainerList(ctx, container.ListOptions{
|
containers, err := cr.cli.ContainerList(ctx, types.ContainerListOptions{ //nolint:staticcheck // pre-existing issue from nektos/act
|
||||||
All: true,
|
All: true,
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -335,7 +335,7 @@ func (cr *containerReference) remove() common.Executor {
|
|||||||
}
|
}
|
||||||
|
|
||||||
logger := common.Logger(ctx)
|
logger := common.Logger(ctx)
|
||||||
err := cr.cli.ContainerRemove(ctx, cr.id, container.RemoveOptions{
|
err := cr.cli.ContainerRemove(ctx, cr.id, types.ContainerRemoveOptions{ //nolint:staticcheck // pre-existing issue from nektos/act
|
||||||
RemoveVolumes: true,
|
RemoveVolumes: true,
|
||||||
Force: true,
|
Force: true,
|
||||||
})
|
})
|
||||||
@@ -363,30 +363,50 @@ func (cr *containerReference) mergeContainerConfigs(ctx context.Context, config
|
|||||||
|
|
||||||
optionsArgs, err := shellquote.Split(input.Options)
|
optionsArgs, err := shellquote.Split(input.Options)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, fmt.Errorf("cannot split container options: '%s': '%w'", input.Options, err)
|
return nil, nil, fmt.Errorf("Cannot split container options: '%s': '%w'", input.Options, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
err = flags.Parse(optionsArgs)
|
err = flags.Parse(optionsArgs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, fmt.Errorf("cannot parse container options: '%s': '%w'", input.Options, err)
|
return nil, nil, fmt.Errorf("Cannot parse container options: '%s': '%w'", input.Options, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// FIXME: If everything is fine after gitea/act v0.260.0, remove the following comment.
|
||||||
|
// In the old fork version, the code is
|
||||||
|
// if len(copts.netMode.Value()) == 0 {
|
||||||
|
// if err = copts.netMode.Set("host"); err != nil {
|
||||||
|
// return nil, nil, fmt.Errorf("Cannot parse networkmode=host. This is an internal error and should not happen: '%w'", err)
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
// And it has been commented with:
|
||||||
|
// If a service container's network is set to `host`, the container will not be able to
|
||||||
|
// connect to the specified network created for the job container and the service containers.
|
||||||
|
// So comment out the following code.
|
||||||
|
// Not the if it's necessary to comment it in the new version,
|
||||||
|
// since it's cr.input.NetworkMode now.
|
||||||
|
|
||||||
if len(copts.netMode.Value()) == 0 {
|
if len(copts.netMode.Value()) == 0 {
|
||||||
if err = copts.netMode.Set(cr.input.NetworkMode); err != nil {
|
if err = copts.netMode.Set(cr.input.NetworkMode); err != nil {
|
||||||
return nil, nil, fmt.Errorf("cannot parse networkmode=%s. This is an internal error and should not happen: '%w'", cr.input.NetworkMode, err)
|
return nil, nil, fmt.Errorf("Cannot parse networkmode=%s. This is an internal error and should not happen: '%w'", cr.input.NetworkMode, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// If the `privileged` config has been disabled, `copts.privileged` need to be forced to false,
|
||||||
|
// even if the user specifies `--privileged` in the options string.
|
||||||
|
if !hostConfig.Privileged {
|
||||||
|
copts.privileged = false
|
||||||
|
}
|
||||||
|
|
||||||
containerConfig, err := parse(flags, copts, runtime.GOOS)
|
containerConfig, err := parse(flags, copts, runtime.GOOS)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, fmt.Errorf("cannot process container options: '%s': '%w'", input.Options, err)
|
return nil, nil, fmt.Errorf("Cannot process container options: '%s': '%w'", input.Options, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
logger.Debugf("Custom container.Config from options ==> %+v", containerConfig.Config)
|
logger.Debugf("Custom container.Config from options ==> %+v", containerConfig.Config)
|
||||||
|
|
||||||
err = mergo.Merge(config, containerConfig.Config, mergo.WithOverride)
|
err = mergo.Merge(config, containerConfig.Config, mergo.WithOverride, mergo.WithAppendSlice)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, fmt.Errorf("cannot merge container.Config options: '%s': '%w'", input.Options, err)
|
return nil, nil, fmt.Errorf("Cannot merge container.Config options: '%s': '%w'", input.Options, err)
|
||||||
}
|
}
|
||||||
logger.Debugf("Merged container.Config ==> %+v", config)
|
logger.Debugf("Merged container.Config ==> %+v", config)
|
||||||
|
|
||||||
@@ -396,24 +416,29 @@ func (cr *containerReference) mergeContainerConfigs(ctx context.Context, config
|
|||||||
hostConfig.Mounts = append(hostConfig.Mounts, containerConfig.HostConfig.Mounts...)
|
hostConfig.Mounts = append(hostConfig.Mounts, containerConfig.HostConfig.Mounts...)
|
||||||
binds := hostConfig.Binds
|
binds := hostConfig.Binds
|
||||||
mounts := hostConfig.Mounts
|
mounts := hostConfig.Mounts
|
||||||
|
networkMode := hostConfig.NetworkMode
|
||||||
err = mergo.Merge(hostConfig, containerConfig.HostConfig, mergo.WithOverride)
|
err = mergo.Merge(hostConfig, containerConfig.HostConfig, mergo.WithOverride)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, fmt.Errorf("cannot merge container.HostConfig options: '%s': '%w'", input.Options, err)
|
return nil, nil, fmt.Errorf("Cannot merge container.HostConfig options: '%s': '%w'", input.Options, err)
|
||||||
}
|
}
|
||||||
hostConfig.Binds = binds
|
hostConfig.Binds = binds
|
||||||
hostConfig.Mounts = mounts
|
hostConfig.Mounts = mounts
|
||||||
|
if len(copts.netMode.Value()) > 0 {
|
||||||
|
logger.Warn("--network and --net in the options will be ignored.")
|
||||||
|
}
|
||||||
|
hostConfig.NetworkMode = networkMode
|
||||||
logger.Debugf("Merged container.HostConfig ==> %+v", hostConfig)
|
logger.Debugf("Merged container.HostConfig ==> %+v", hostConfig)
|
||||||
|
|
||||||
return config, hostConfig, nil
|
return config, hostConfig, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cr *containerReference) create(capAdd []string, capDrop []string) common.Executor {
|
func (cr *containerReference) create(capAdd, capDrop []string) common.Executor {
|
||||||
return func(ctx context.Context) error {
|
return func(ctx context.Context) error {
|
||||||
if cr.id != "" {
|
if cr.id != "" {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
logger := common.Logger(ctx)
|
logger := common.Logger(ctx)
|
||||||
isTerminal := containerAllocateTerminal
|
isTerminal := term.IsTerminal(int(os.Stdout.Fd()))
|
||||||
input := cr.input
|
input := cr.input
|
||||||
|
|
||||||
config := &container.Config{
|
config := &container.Config{
|
||||||
@@ -423,7 +448,8 @@ func (cr *containerReference) create(capAdd []string, capDrop []string) common.E
|
|||||||
ExposedPorts: input.ExposedPorts,
|
ExposedPorts: input.ExposedPorts,
|
||||||
Tty: isTerminal,
|
Tty: isTerminal,
|
||||||
}
|
}
|
||||||
logger.Debugf("Common container.Config ==> %+v", config)
|
// For Gitea, reduce log noise
|
||||||
|
// logger.Debugf("Common container.Config ==> %+v", config)
|
||||||
|
|
||||||
if len(input.Cmd) != 0 {
|
if len(input.Cmd) != 0 {
|
||||||
config.Cmd = input.Cmd
|
config.Cmd = input.Cmd
|
||||||
@@ -465,16 +491,22 @@ func (cr *containerReference) create(capAdd []string, capDrop []string) common.E
|
|||||||
Privileged: input.Privileged,
|
Privileged: input.Privileged,
|
||||||
UsernsMode: container.UsernsMode(input.UsernsMode),
|
UsernsMode: container.UsernsMode(input.UsernsMode),
|
||||||
PortBindings: input.PortBindings,
|
PortBindings: input.PortBindings,
|
||||||
|
AutoRemove: input.AutoRemove,
|
||||||
}
|
}
|
||||||
logger.Debugf("Common container.HostConfig ==> %+v", hostConfig)
|
// For Gitea, reduce log noise
|
||||||
|
// logger.Debugf("Common container.HostConfig ==> %+v", hostConfig)
|
||||||
|
|
||||||
config, hostConfig, err := cr.mergeContainerConfigs(ctx, config, hostConfig)
|
config, hostConfig, err := cr.mergeContainerConfigs(ctx, config, hostConfig)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// For Gitea
|
||||||
|
config, hostConfig = cr.sanitizeConfig(ctx, config, hostConfig)
|
||||||
|
|
||||||
var networkingConfig *network.NetworkingConfig
|
var networkingConfig *network.NetworkingConfig
|
||||||
logger.Debugf("input.NetworkAliases ==> %v", input.NetworkAliases)
|
// For Gitea, reduce log noise
|
||||||
|
// logger.Debugf("input.NetworkAliases ==> %v", input.NetworkAliases)
|
||||||
n := hostConfig.NetworkMode
|
n := hostConfig.NetworkMode
|
||||||
// IsUserDefined and IsHost are broken on windows
|
// IsUserDefined and IsHost are broken on windows
|
||||||
if n.IsUserDefined() && n != "host" && len(input.NetworkAliases) > 0 {
|
if n.IsUserDefined() && n != "host" && len(input.NetworkAliases) > 0 {
|
||||||
@@ -506,7 +538,7 @@ func (cr *containerReference) extractFromImageEnv(env *map[string]string) common
|
|||||||
return func(ctx context.Context) error {
|
return func(ctx context.Context) error {
|
||||||
logger := common.Logger(ctx)
|
logger := common.Logger(ctx)
|
||||||
|
|
||||||
inspect, err := cr.cli.ImageInspect(ctx, cr.input.Image)
|
inspect, _, err := cr.cli.ImageInspectWithRaw(ctx, cr.input.Image)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Error(err)
|
logger.Error(err)
|
||||||
return fmt.Errorf("inspect image: %w", err)
|
return fmt.Errorf("inspect image: %w", err)
|
||||||
@@ -539,108 +571,78 @@ func (cr *containerReference) extractFromImageEnv(env *map[string]string) common
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cr *containerReference) exec(ctx context.Context, cmd []string, env map[string]string, user, workdir string) error {
|
func (cr *containerReference) exec(cmd []string, env map[string]string, user, workdir string) common.Executor {
|
||||||
logger := common.Logger(ctx)
|
|
||||||
// Fix slashes when running on Windows
|
|
||||||
if runtime.GOOS == "windows" {
|
|
||||||
var newCmd []string
|
|
||||||
for _, v := range cmd {
|
|
||||||
newCmd = append(newCmd, strings.ReplaceAll(v, `\`, `/`))
|
|
||||||
}
|
|
||||||
cmd = newCmd
|
|
||||||
}
|
|
||||||
|
|
||||||
logger.Debugf("Exec command '%s'", cmd)
|
|
||||||
isTerminal := containerAllocateTerminal
|
|
||||||
envList := make([]string, 0)
|
|
||||||
for k, v := range env {
|
|
||||||
envList = append(envList, fmt.Sprintf("%s=%s", k, v))
|
|
||||||
}
|
|
||||||
|
|
||||||
var wd string
|
|
||||||
if workdir != "" {
|
|
||||||
if strings.HasPrefix(workdir, "/") {
|
|
||||||
wd = workdir
|
|
||||||
} else {
|
|
||||||
wd = fmt.Sprintf("%s/%s", cr.input.WorkingDir, workdir)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
wd = cr.input.WorkingDir
|
|
||||||
}
|
|
||||||
logger.Debugf("Working directory '%s'", wd)
|
|
||||||
|
|
||||||
idResp, err := cr.cli.ContainerExecCreate(ctx, cr.id, container.ExecOptions{
|
|
||||||
User: user,
|
|
||||||
Cmd: cmd,
|
|
||||||
WorkingDir: wd,
|
|
||||||
Env: envList,
|
|
||||||
Tty: isTerminal,
|
|
||||||
AttachStderr: true,
|
|
||||||
AttachStdout: true,
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to create exec: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
resp, err := cr.cli.ContainerExecAttach(ctx, idResp.ID, container.ExecStartOptions{
|
|
||||||
Tty: isTerminal,
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to attach to exec: %w", err)
|
|
||||||
}
|
|
||||||
defer resp.Close()
|
|
||||||
|
|
||||||
err = cr.waitForCommand(ctx, isTerminal, resp)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
inspectResp, err := cr.cli.ContainerExecInspect(ctx, idResp.ID)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to inspect exec: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
switch inspectResp.ExitCode {
|
|
||||||
case 0:
|
|
||||||
return nil
|
|
||||||
case 127:
|
|
||||||
return fmt.Errorf("exitcode '%d': command not found, please refer to https://github.com/nektos/act/issues/107 for more information", inspectResp.ExitCode)
|
|
||||||
default:
|
|
||||||
return fmt.Errorf("exitcode '%d': failure", inspectResp.ExitCode)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
//nolint:contextcheck
|
|
||||||
func (cr *containerReference) execExt(cmd []string, env map[string]string, user, workdir string) common.Executor {
|
|
||||||
return func(ctx context.Context) error {
|
return func(ctx context.Context) error {
|
||||||
logger := common.Logger(ctx)
|
logger := common.Logger(ctx)
|
||||||
done := make(chan error)
|
// Fix slashes when running on Windows
|
||||||
go func() {
|
if runtime.GOOS == "windows" {
|
||||||
defer func() {
|
var newCmd []string
|
||||||
done <- errors.New("invalid Operation")
|
for _, v := range cmd {
|
||||||
}()
|
newCmd = append(newCmd, strings.ReplaceAll(v, `\`, `/`))
|
||||||
done <- cr.exec(ctx, cmd, env, user, workdir)
|
|
||||||
}()
|
|
||||||
select {
|
|
||||||
case <-ctx.Done():
|
|
||||||
timed, cancelTimed := context.WithTimeout(context.Background(), 2*time.Minute)
|
|
||||||
defer cancelTimed()
|
|
||||||
err := cr.cli.ContainerKill(timed, cr.id, "kill")
|
|
||||||
if err != nil {
|
|
||||||
logger.Error(err)
|
|
||||||
}
|
}
|
||||||
_ = cr.start()(timed)
|
cmd = newCmd
|
||||||
logger.Info("This step was cancelled")
|
|
||||||
return fmt.Errorf("this step was cancelled: %w", ctx.Err())
|
|
||||||
case ret := <-done:
|
|
||||||
return ret
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
logger.Debugf("Exec command '%s'", cmd)
|
||||||
|
isTerminal := term.IsTerminal(int(os.Stdout.Fd()))
|
||||||
|
envList := make([]string, 0)
|
||||||
|
for k, v := range env {
|
||||||
|
envList = append(envList, fmt.Sprintf("%s=%s", k, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
var wd string
|
||||||
|
if workdir != "" {
|
||||||
|
if strings.HasPrefix(workdir, "/") {
|
||||||
|
wd = workdir
|
||||||
|
} else {
|
||||||
|
wd = fmt.Sprintf("%s/%s", cr.input.WorkingDir, workdir)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
wd = cr.input.WorkingDir
|
||||||
|
}
|
||||||
|
logger.Debugf("Working directory '%s'", wd)
|
||||||
|
|
||||||
|
idResp, err := cr.cli.ContainerExecCreate(ctx, cr.id, types.ExecConfig{
|
||||||
|
User: user,
|
||||||
|
Cmd: cmd,
|
||||||
|
WorkingDir: wd,
|
||||||
|
Env: envList,
|
||||||
|
Tty: isTerminal,
|
||||||
|
AttachStderr: true,
|
||||||
|
AttachStdout: true,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to create exec: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
resp, err := cr.cli.ContainerExecAttach(ctx, idResp.ID, types.ExecStartCheck{
|
||||||
|
Tty: isTerminal,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to attach to exec: %w", err)
|
||||||
|
}
|
||||||
|
defer resp.Close()
|
||||||
|
|
||||||
|
err = cr.waitForCommand(ctx, isTerminal, resp, idResp, user, workdir)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
inspectResp, err := cr.cli.ContainerExecInspect(ctx, idResp.ID)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to inspect exec: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if inspectResp.ExitCode == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return ExitCodeError(inspectResp.ExitCode)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cr *containerReference) tryReadID(opt string, cbk func(id int)) common.Executor {
|
func (cr *containerReference) tryReadID(opt string, cbk func(id int)) common.Executor {
|
||||||
return func(ctx context.Context) error {
|
return func(ctx context.Context) error {
|
||||||
idResp, err := cr.cli.ContainerExecCreate(ctx, cr.id, container.ExecOptions{
|
idResp, err := cr.cli.ContainerExecCreate(ctx, cr.id, types.ExecConfig{
|
||||||
Cmd: []string{"id", opt},
|
Cmd: []string{"id", opt},
|
||||||
AttachStdout: true,
|
AttachStdout: true,
|
||||||
AttachStderr: true,
|
AttachStderr: true,
|
||||||
@@ -649,7 +651,7 @@ func (cr *containerReference) tryReadID(opt string, cbk func(id int)) common.Exe
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
resp, err := cr.cli.ContainerExecAttach(ctx, idResp.ID, container.ExecStartOptions{})
|
resp, err := cr.cli.ContainerExecAttach(ctx, idResp.ID, types.ExecStartCheck{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -679,7 +681,7 @@ func (cr *containerReference) tryReadGID() common.Executor {
|
|||||||
return cr.tryReadID("-g", func(id int) { cr.GID = id })
|
return cr.tryReadID("-g", func(id int) { cr.GID = id })
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cr *containerReference) waitForCommand(ctx context.Context, isTerminal bool, resp types.HijackedResponse) error {
|
func (cr *containerReference) waitForCommand(ctx context.Context, isTerminal bool, resp types.HijackedResponse, _ types.IDResponse, _, _ string) error {
|
||||||
logger := common.Logger(ctx)
|
logger := common.Logger(ctx)
|
||||||
|
|
||||||
cmdResponse := make(chan error)
|
cmdResponse := make(chan error)
|
||||||
@@ -725,9 +727,6 @@ func (cr *containerReference) waitForCommand(ctx context.Context, isTerminal boo
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (cr *containerReference) CopyTarStream(ctx context.Context, destPath string, tarStream io.Reader) error {
|
func (cr *containerReference) CopyTarStream(ctx context.Context, destPath string, tarStream io.Reader) error {
|
||||||
if common.Dryrun(ctx) {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
// Mkdir
|
// Mkdir
|
||||||
buf := &bytes.Buffer{}
|
buf := &bytes.Buffer{}
|
||||||
tw := tar.NewWriter(buf)
|
tw := tar.NewWriter(buf)
|
||||||
@@ -737,12 +736,12 @@ func (cr *containerReference) CopyTarStream(ctx context.Context, destPath string
|
|||||||
Typeflag: tar.TypeDir,
|
Typeflag: tar.TypeDir,
|
||||||
})
|
})
|
||||||
tw.Close()
|
tw.Close()
|
||||||
err := cr.cli.CopyToContainer(ctx, cr.id, "/", buf, container.CopyToContainerOptions{})
|
err := cr.cli.CopyToContainer(ctx, cr.id, "/", buf, types.CopyToContainerOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to mkdir to copy content to container: %w", err)
|
return fmt.Errorf("failed to mkdir to copy content to container: %w", err)
|
||||||
}
|
}
|
||||||
// Copy Content
|
// Copy Content
|
||||||
err = cr.cli.CopyToContainer(ctx, cr.id, destPath, tarStream, container.CopyToContainerOptions{})
|
err = cr.cli.CopyToContainer(ctx, cr.id, destPath, tarStream, types.CopyToContainerOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to copy content to container: %w", err)
|
return fmt.Errorf("failed to copy content to container: %w", err)
|
||||||
}
|
}
|
||||||
@@ -753,7 +752,7 @@ func (cr *containerReference) CopyTarStream(ctx context.Context, destPath string
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cr *containerReference) copyDir(dstPath string, srcPath string, useGitIgnore bool) common.Executor {
|
func (cr *containerReference) copyDir(dstPath, srcPath string, useGitIgnore bool) common.Executor {
|
||||||
return func(ctx context.Context) error {
|
return func(ctx context.Context) error {
|
||||||
logger := common.Logger(ctx)
|
logger := common.Logger(ctx)
|
||||||
tarFile, err := os.CreateTemp("", "act")
|
tarFile, err := os.CreateTemp("", "act")
|
||||||
@@ -816,7 +815,7 @@ func (cr *containerReference) copyDir(dstPath string, srcPath string, useGitIgno
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to seek tar archive: %w", err)
|
return fmt.Errorf("failed to seek tar archive: %w", err)
|
||||||
}
|
}
|
||||||
err = cr.cli.CopyToContainer(ctx, cr.id, "/", tarFile, container.CopyToContainerOptions{})
|
err = cr.cli.CopyToContainer(ctx, cr.id, "/", tarFile, types.CopyToContainerOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to copy content to container: %w", err)
|
return fmt.Errorf("failed to copy content to container: %w", err)
|
||||||
}
|
}
|
||||||
@@ -833,7 +832,7 @@ func (cr *containerReference) copyContent(dstPath string, files ...*FileEntry) c
|
|||||||
logger.Debugf("Writing entry to tarball %s len:%d", file.Name, len(file.Body))
|
logger.Debugf("Writing entry to tarball %s len:%d", file.Name, len(file.Body))
|
||||||
hdr := &tar.Header{
|
hdr := &tar.Header{
|
||||||
Name: file.Name,
|
Name: file.Name,
|
||||||
Mode: int64(file.Mode),
|
Mode: file.Mode,
|
||||||
Size: int64(len(file.Body)),
|
Size: int64(len(file.Body)),
|
||||||
Uid: cr.UID,
|
Uid: cr.UID,
|
||||||
Gid: cr.GID,
|
Gid: cr.GID,
|
||||||
@@ -850,7 +849,7 @@ func (cr *containerReference) copyContent(dstPath string, files ...*FileEntry) c
|
|||||||
}
|
}
|
||||||
|
|
||||||
logger.Debugf("Extracting content to '%s'", dstPath)
|
logger.Debugf("Extracting content to '%s'", dstPath)
|
||||||
err := cr.cli.CopyToContainer(ctx, cr.id, dstPath, &buf, container.CopyToContainerOptions{})
|
err := cr.cli.CopyToContainer(ctx, cr.id, dstPath, &buf, types.CopyToContainerOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to copy content to container: %w", err)
|
return fmt.Errorf("failed to copy content to container: %w", err)
|
||||||
}
|
}
|
||||||
@@ -860,7 +859,7 @@ func (cr *containerReference) copyContent(dstPath string, files ...*FileEntry) c
|
|||||||
|
|
||||||
func (cr *containerReference) attach() common.Executor {
|
func (cr *containerReference) attach() common.Executor {
|
||||||
return func(ctx context.Context) error {
|
return func(ctx context.Context) error {
|
||||||
out, err := cr.cli.ContainerAttach(ctx, cr.id, container.AttachOptions{
|
out, err := cr.cli.ContainerAttach(ctx, cr.id, types.ContainerAttachOptions{ //nolint:staticcheck // pre-existing issue from nektos/act
|
||||||
Stream: true,
|
Stream: true,
|
||||||
Stdout: true,
|
Stdout: true,
|
||||||
Stderr: true,
|
Stderr: true,
|
||||||
@@ -868,7 +867,7 @@ func (cr *containerReference) attach() common.Executor {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to attach to container: %w", err)
|
return fmt.Errorf("failed to attach to container: %w", err)
|
||||||
}
|
}
|
||||||
isTerminal := containerAllocateTerminal
|
isTerminal := term.IsTerminal(int(os.Stdout.Fd()))
|
||||||
|
|
||||||
var outWriter io.Writer
|
var outWriter io.Writer
|
||||||
outWriter = cr.input.Stdout
|
outWriter = cr.input.Stdout
|
||||||
@@ -898,7 +897,7 @@ func (cr *containerReference) start() common.Executor {
|
|||||||
logger := common.Logger(ctx)
|
logger := common.Logger(ctx)
|
||||||
logger.Debugf("Starting container: %v", cr.id)
|
logger.Debugf("Starting container: %v", cr.id)
|
||||||
|
|
||||||
if err := cr.cli.ContainerStart(ctx, cr.id, container.StartOptions{}); err != nil {
|
if err := cr.cli.ContainerStart(ctx, cr.id, types.ContainerStartOptions{}); err != nil { //nolint:staticcheck // pre-existing issue from nektos/act
|
||||||
return fmt.Errorf("failed to start container: %w", err)
|
return fmt.Errorf("failed to start container: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -927,6 +926,66 @@ func (cr *containerReference) wait() common.Executor {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
return fmt.Errorf("exit with `FAILURE`: %v", statusCode)
|
return ExitCodeError(statusCode)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// For Gitea
|
||||||
|
// sanitizeConfig remove the invalid configurations from `config` and `hostConfig`
|
||||||
|
func (cr *containerReference) sanitizeConfig(ctx context.Context, config *container.Config, hostConfig *container.HostConfig) (*container.Config, *container.HostConfig) {
|
||||||
|
logger := common.Logger(ctx)
|
||||||
|
|
||||||
|
if len(cr.input.ValidVolumes) > 0 {
|
||||||
|
globs := make([]glob.Glob, 0, len(cr.input.ValidVolumes))
|
||||||
|
for _, v := range cr.input.ValidVolumes {
|
||||||
|
if g, err := glob.Compile(v); err != nil {
|
||||||
|
logger.Errorf("create glob from %s error: %v", v, err)
|
||||||
|
} else {
|
||||||
|
globs = append(globs, g)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
isValid := func(v string) bool {
|
||||||
|
for _, g := range globs {
|
||||||
|
if g.Match(v) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
// sanitize binds
|
||||||
|
sanitizedBinds := make([]string, 0, len(hostConfig.Binds))
|
||||||
|
for _, bind := range hostConfig.Binds {
|
||||||
|
parsed, err := loader.ParseVolume(bind)
|
||||||
|
if err != nil {
|
||||||
|
logger.Warnf("parse volume [%s] error: %v", bind, err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if parsed.Source == "" {
|
||||||
|
// anonymous volume
|
||||||
|
sanitizedBinds = append(sanitizedBinds, bind)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if isValid(parsed.Source) {
|
||||||
|
sanitizedBinds = append(sanitizedBinds, bind)
|
||||||
|
} else {
|
||||||
|
logger.Warnf("[%s] is not a valid volume, will be ignored", parsed.Source)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
hostConfig.Binds = sanitizedBinds
|
||||||
|
// sanitize mounts
|
||||||
|
sanitizedMounts := make([]mount.Mount, 0, len(hostConfig.Mounts))
|
||||||
|
for _, mt := range hostConfig.Mounts {
|
||||||
|
if isValid(mt.Source) {
|
||||||
|
sanitizedMounts = append(sanitizedMounts, mt)
|
||||||
|
} else {
|
||||||
|
logger.Warnf("[%s] is not a valid volume, will be ignored", mt.Source)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
hostConfig.Mounts = sanitizedMounts
|
||||||
|
} else {
|
||||||
|
hostConfig.Binds = []string{}
|
||||||
|
hostConfig.Mounts = []mount.Mount{}
|
||||||
|
}
|
||||||
|
|
||||||
|
return config, hostConfig
|
||||||
|
}
|
||||||
366
act/container/docker_run_test.go
Normal file
366
act/container/docker_run_test.go
Normal file
@@ -0,0 +1,366 @@
|
|||||||
|
// Copyright 2022 The Gitea Authors. All rights reserved.
|
||||||
|
// Copyright 2020 The nektos/act Authors. All rights reserved.
|
||||||
|
// SPDX-License-Identifier: MIT
|
||||||
|
|
||||||
|
package container
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"io"
|
||||||
|
"net"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"gitea.com/gitea/runner/act/common"
|
||||||
|
|
||||||
|
"github.com/docker/docker/api/types"
|
||||||
|
"github.com/docker/docker/api/types/container"
|
||||||
|
"github.com/docker/docker/client"
|
||||||
|
"github.com/sirupsen/logrus/hooks/test"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/mock"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestDocker(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
|
client, err := GetDockerClient(ctx)
|
||||||
|
assert.NoError(t, err) //nolint:testifylint // pre-existing issue from nektos/act
|
||||||
|
defer client.Close()
|
||||||
|
|
||||||
|
dockerBuild := NewDockerBuildExecutor(NewDockerBuildExecutorInput{
|
||||||
|
ContextDir: "testdata",
|
||||||
|
ImageTag: "envmergetest",
|
||||||
|
})
|
||||||
|
|
||||||
|
err = dockerBuild(ctx)
|
||||||
|
assert.NoError(t, err) //nolint:testifylint // pre-existing issue from nektos/act
|
||||||
|
|
||||||
|
cr := &containerReference{
|
||||||
|
cli: client,
|
||||||
|
input: &NewContainerInput{
|
||||||
|
Image: "envmergetest",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
env := map[string]string{
|
||||||
|
"PATH": "/usr/local/bin:/usr/bin:/usr/sbin:/bin:/sbin",
|
||||||
|
"RANDOM_VAR": "WITH_VALUE",
|
||||||
|
"ANOTHER_VAR": "",
|
||||||
|
"CONFLICT_VAR": "I_EXIST_IN_MULTIPLE_PLACES",
|
||||||
|
}
|
||||||
|
|
||||||
|
envExecutor := cr.extractFromImageEnv(&env)
|
||||||
|
err = envExecutor(ctx)
|
||||||
|
assert.NoError(t, err) //nolint:testifylint // pre-existing issue from nektos/act
|
||||||
|
assert.Equal(t, map[string]string{
|
||||||
|
"PATH": "/usr/local/bin:/usr/bin:/usr/sbin:/bin:/sbin:/this/path/does/not/exists/anywhere:/this/either",
|
||||||
|
"RANDOM_VAR": "WITH_VALUE",
|
||||||
|
"ANOTHER_VAR": "",
|
||||||
|
"SOME_RANDOM_VAR": "",
|
||||||
|
"ANOTHER_ONE": "BUT_I_HAVE_VALUE",
|
||||||
|
"CONFLICT_VAR": "I_EXIST_IN_MULTIPLE_PLACES",
|
||||||
|
}, env)
|
||||||
|
}
|
||||||
|
|
||||||
|
type mockDockerClient struct {
|
||||||
|
client.APIClient
|
||||||
|
mock.Mock
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *mockDockerClient) ContainerExecCreate(ctx context.Context, id string, opts types.ExecConfig) (types.IDResponse, error) {
|
||||||
|
args := m.Called(ctx, id, opts)
|
||||||
|
return args.Get(0).(types.IDResponse), args.Error(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *mockDockerClient) ContainerExecAttach(ctx context.Context, id string, opts types.ExecStartCheck) (types.HijackedResponse, error) {
|
||||||
|
args := m.Called(ctx, id, opts)
|
||||||
|
return args.Get(0).(types.HijackedResponse), args.Error(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *mockDockerClient) ContainerExecInspect(ctx context.Context, execID string) (types.ContainerExecInspect, error) {
|
||||||
|
args := m.Called(ctx, execID)
|
||||||
|
return args.Get(0).(types.ContainerExecInspect), args.Error(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *mockDockerClient) ContainerWait(ctx context.Context, containerID string, condition container.WaitCondition) (<-chan container.WaitResponse, <-chan error) {
|
||||||
|
args := m.Called(ctx, containerID, condition)
|
||||||
|
return args.Get(0).(<-chan container.WaitResponse), args.Get(1).(<-chan error)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *mockDockerClient) CopyToContainer(ctx context.Context, id, path string, content io.Reader, options types.CopyToContainerOptions) error {
|
||||||
|
args := m.Called(ctx, id, path, content, options)
|
||||||
|
return args.Error(0)
|
||||||
|
}
|
||||||
|
|
||||||
|
type endlessReader struct {
|
||||||
|
io.Reader
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r endlessReader) Read(_ []byte) (n int, err error) {
|
||||||
|
return 1, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type mockConn struct {
|
||||||
|
net.Conn
|
||||||
|
mock.Mock
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *mockConn) Write(b []byte) (n int, err error) {
|
||||||
|
args := m.Called(b)
|
||||||
|
return args.Int(0), args.Error(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *mockConn) Close() (err error) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDockerExecAbort(t *testing.T) {
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
|
||||||
|
conn := &mockConn{}
|
||||||
|
conn.On("Write", mock.AnythingOfType("[]uint8")).Return(1, nil)
|
||||||
|
|
||||||
|
client := &mockDockerClient{}
|
||||||
|
client.On("ContainerExecCreate", ctx, "123", mock.AnythingOfType("types.ExecConfig")).Return(types.IDResponse{ID: "id"}, nil)
|
||||||
|
client.On("ContainerExecAttach", ctx, "id", mock.AnythingOfType("types.ExecStartCheck")).Return(types.HijackedResponse{
|
||||||
|
Conn: conn,
|
||||||
|
Reader: bufio.NewReader(endlessReader{}),
|
||||||
|
}, nil)
|
||||||
|
|
||||||
|
cr := &containerReference{
|
||||||
|
id: "123",
|
||||||
|
cli: client,
|
||||||
|
input: &NewContainerInput{
|
||||||
|
Image: "image",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
channel := make(chan error)
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
channel <- cr.exec([]string{""}, map[string]string{}, "user", "workdir")(ctx)
|
||||||
|
}()
|
||||||
|
|
||||||
|
time.Sleep(500 * time.Millisecond)
|
||||||
|
|
||||||
|
cancel()
|
||||||
|
|
||||||
|
err := <-channel
|
||||||
|
assert.ErrorIs(t, err, context.Canceled) //nolint:testifylint // pre-existing issue from nektos/act
|
||||||
|
|
||||||
|
conn.AssertExpectations(t)
|
||||||
|
client.AssertExpectations(t)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDockerExecFailure(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
|
|
||||||
|
conn := &mockConn{}
|
||||||
|
|
||||||
|
client := &mockDockerClient{}
|
||||||
|
client.On("ContainerExecCreate", ctx, "123", mock.AnythingOfType("types.ExecConfig")).Return(types.IDResponse{ID: "id"}, nil)
|
||||||
|
client.On("ContainerExecAttach", ctx, "id", mock.AnythingOfType("types.ExecStartCheck")).Return(types.HijackedResponse{
|
||||||
|
Conn: conn,
|
||||||
|
Reader: bufio.NewReader(strings.NewReader("output")),
|
||||||
|
}, nil)
|
||||||
|
client.On("ContainerExecInspect", ctx, "id").Return(types.ContainerExecInspect{
|
||||||
|
ExitCode: 1,
|
||||||
|
}, nil)
|
||||||
|
|
||||||
|
cr := &containerReference{
|
||||||
|
id: "123",
|
||||||
|
cli: client,
|
||||||
|
input: &NewContainerInput{
|
||||||
|
Image: "image",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
err := cr.exec([]string{""}, map[string]string{}, "user", "workdir")(ctx)
|
||||||
|
var exitErr ExitCodeError
|
||||||
|
require.ErrorAs(t, err, &exitErr)
|
||||||
|
assert.Equal(t, ExitCodeError(1), exitErr)
|
||||||
|
assert.Equal(t, "Process completed with exit code 1.", err.Error())
|
||||||
|
|
||||||
|
conn.AssertExpectations(t)
|
||||||
|
client.AssertExpectations(t)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDockerWaitFailure(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
|
|
||||||
|
statusCh := make(chan container.WaitResponse, 1)
|
||||||
|
statusCh <- container.WaitResponse{StatusCode: 2}
|
||||||
|
errCh := make(chan error, 1)
|
||||||
|
|
||||||
|
client := &mockDockerClient{}
|
||||||
|
client.On("ContainerWait", ctx, "123", container.WaitConditionNotRunning).
|
||||||
|
Return((<-chan container.WaitResponse)(statusCh), (<-chan error)(errCh))
|
||||||
|
|
||||||
|
cr := &containerReference{
|
||||||
|
id: "123",
|
||||||
|
cli: client,
|
||||||
|
input: &NewContainerInput{
|
||||||
|
Image: "image",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
err := cr.wait()(ctx)
|
||||||
|
var exitErr ExitCodeError
|
||||||
|
require.ErrorAs(t, err, &exitErr)
|
||||||
|
assert.Equal(t, ExitCodeError(2), exitErr)
|
||||||
|
assert.Equal(t, "Process completed with exit code 2.", err.Error())
|
||||||
|
|
||||||
|
client.AssertExpectations(t)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDockerCopyTarStream(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
|
|
||||||
|
conn := &mockConn{}
|
||||||
|
|
||||||
|
client := &mockDockerClient{}
|
||||||
|
client.On("CopyToContainer", ctx, "123", "/", mock.Anything, mock.AnythingOfType("types.CopyToContainerOptions")).Return(nil)
|
||||||
|
client.On("CopyToContainer", ctx, "123", "/var/run/act", mock.Anything, mock.AnythingOfType("types.CopyToContainerOptions")).Return(nil)
|
||||||
|
cr := &containerReference{
|
||||||
|
id: "123",
|
||||||
|
cli: client,
|
||||||
|
input: &NewContainerInput{
|
||||||
|
Image: "image",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
_ = cr.CopyTarStream(ctx, "/var/run/act", &bytes.Buffer{})
|
||||||
|
|
||||||
|
conn.AssertExpectations(t)
|
||||||
|
client.AssertExpectations(t)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDockerCopyTarStreamErrorInCopyFiles(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
|
|
||||||
|
conn := &mockConn{}
|
||||||
|
|
||||||
|
merr := errors.New("Failure")
|
||||||
|
|
||||||
|
client := &mockDockerClient{}
|
||||||
|
client.On("CopyToContainer", ctx, "123", "/", mock.Anything, mock.AnythingOfType("types.CopyToContainerOptions")).Return(merr)
|
||||||
|
client.On("CopyToContainer", ctx, "123", "/", mock.Anything, mock.AnythingOfType("types.CopyToContainerOptions")).Return(merr)
|
||||||
|
cr := &containerReference{
|
||||||
|
id: "123",
|
||||||
|
cli: client,
|
||||||
|
input: &NewContainerInput{
|
||||||
|
Image: "image",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
err := cr.CopyTarStream(ctx, "/var/run/act", &bytes.Buffer{})
|
||||||
|
assert.ErrorIs(t, err, merr) //nolint:testifylint // pre-existing issue from nektos/act
|
||||||
|
|
||||||
|
conn.AssertExpectations(t)
|
||||||
|
client.AssertExpectations(t)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDockerCopyTarStreamErrorInMkdir(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
|
|
||||||
|
conn := &mockConn{}
|
||||||
|
|
||||||
|
merr := errors.New("Failure")
|
||||||
|
|
||||||
|
client := &mockDockerClient{}
|
||||||
|
client.On("CopyToContainer", ctx, "123", "/", mock.Anything, mock.AnythingOfType("types.CopyToContainerOptions")).Return(nil)
|
||||||
|
client.On("CopyToContainer", ctx, "123", "/var/run/act", mock.Anything, mock.AnythingOfType("types.CopyToContainerOptions")).Return(merr)
|
||||||
|
cr := &containerReference{
|
||||||
|
id: "123",
|
||||||
|
cli: client,
|
||||||
|
input: &NewContainerInput{
|
||||||
|
Image: "image",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
err := cr.CopyTarStream(ctx, "/var/run/act", &bytes.Buffer{})
|
||||||
|
assert.ErrorIs(t, err, merr) //nolint:testifylint // pre-existing issue from nektos/act
|
||||||
|
|
||||||
|
conn.AssertExpectations(t)
|
||||||
|
client.AssertExpectations(t)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Type assert containerReference implements ExecutionsEnvironment
|
||||||
|
var _ ExecutionsEnvironment = &containerReference{}
|
||||||
|
|
||||||
|
func TestCheckVolumes(t *testing.T) {
|
||||||
|
testCases := []struct {
|
||||||
|
desc string
|
||||||
|
validVolumes []string
|
||||||
|
binds []string
|
||||||
|
expectedBinds []string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
desc: "match all volumes",
|
||||||
|
validVolumes: []string{"**"},
|
||||||
|
binds: []string{
|
||||||
|
"shared_volume:/shared_volume",
|
||||||
|
"/home/test/data:/test_data",
|
||||||
|
"/etc/conf.d/base.json:/config/base.json",
|
||||||
|
"sql_data:/sql_data",
|
||||||
|
"/secrets/keys:/keys",
|
||||||
|
},
|
||||||
|
expectedBinds: []string{
|
||||||
|
"shared_volume:/shared_volume",
|
||||||
|
"/home/test/data:/test_data",
|
||||||
|
"/etc/conf.d/base.json:/config/base.json",
|
||||||
|
"sql_data:/sql_data",
|
||||||
|
"/secrets/keys:/keys",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
desc: "no volumes can be matched",
|
||||||
|
validVolumes: []string{},
|
||||||
|
binds: []string{
|
||||||
|
"shared_volume:/shared_volume",
|
||||||
|
"/home/test/data:/test_data",
|
||||||
|
"/etc/conf.d/base.json:/config/base.json",
|
||||||
|
"sql_data:/sql_data",
|
||||||
|
"/secrets/keys:/keys",
|
||||||
|
},
|
||||||
|
expectedBinds: []string{},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
desc: "only allowed volumes can be matched",
|
||||||
|
validVolumes: []string{
|
||||||
|
"shared_volume",
|
||||||
|
"/home/test/data",
|
||||||
|
"/etc/conf.d/*.json",
|
||||||
|
},
|
||||||
|
binds: []string{
|
||||||
|
"shared_volume:/shared_volume",
|
||||||
|
"/home/test/data:/test_data",
|
||||||
|
"/etc/conf.d/base.json:/config/base.json",
|
||||||
|
"sql_data:/sql_data",
|
||||||
|
"/secrets/keys:/keys",
|
||||||
|
},
|
||||||
|
expectedBinds: []string{
|
||||||
|
"shared_volume:/shared_volume",
|
||||||
|
"/home/test/data:/test_data",
|
||||||
|
"/etc/conf.d/base.json:/config/base.json",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, tc := range testCases {
|
||||||
|
t.Run(tc.desc, func(t *testing.T) {
|
||||||
|
logger, _ := test.NewNullLogger()
|
||||||
|
ctx := common.WithLogger(context.Background(), logger)
|
||||||
|
cr := &containerReference{
|
||||||
|
input: &NewContainerInput{
|
||||||
|
ValidVolumes: tc.validVolumes,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
_, hostConf := cr.sanitizeConfig(ctx, &container.Config{}, &container.HostConfig{Binds: tc.binds})
|
||||||
|
assert.Equal(t, tc.expectedBinds, hostConf.Binds)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -1,3 +1,7 @@
|
|||||||
|
// Copyright 2024 The Gitea Authors. All rights reserved.
|
||||||
|
// Copyright 2024 The nektos/act Authors. All rights reserved.
|
||||||
|
// SPDX-License-Identifier: MIT
|
||||||
|
|
||||||
package container
|
package container
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@@ -90,7 +94,7 @@ func GetSocketAndHost(containerSocket string) (SocketAndHost, error) {
|
|||||||
if !hasDockerHost && socketHost.Socket != "" && !isDockerHostURI(socketHost.Socket) {
|
if !hasDockerHost && socketHost.Socket != "" && !isDockerHostURI(socketHost.Socket) {
|
||||||
// Cases: 1B, 2B
|
// Cases: 1B, 2B
|
||||||
// Should we early-exit here, since there is no host nor socket to talk to?
|
// Should we early-exit here, since there is no host nor socket to talk to?
|
||||||
return SocketAndHost{}, fmt.Errorf("docker host aka DOCKER_HOST was not set, couldn't be found in the usual locations, and the container daemon socket ('%s') is invalid", socketHost.Socket)
|
return SocketAndHost{}, fmt.Errorf("DOCKER_HOST was not set, couldn't be found in the usual locations, and the container daemon socket ('%s') is invalid", socketHost.Socket)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Default to DOCKER_HOST if set
|
// Default to DOCKER_HOST if set
|
||||||
@@ -1,3 +1,7 @@
|
|||||||
|
// Copyright 2024 The Gitea Authors. All rights reserved.
|
||||||
|
// Copyright 2024 The nektos/act Authors. All rights reserved.
|
||||||
|
// SPDX-License-Identifier: MIT
|
||||||
|
|
||||||
package container
|
package container
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@@ -6,7 +10,6 @@ import (
|
|||||||
|
|
||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
assert "github.com/stretchr/testify/assert"
|
assert "github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
@@ -26,7 +29,7 @@ func TestGetSocketAndHostWithSocket(t *testing.T) {
|
|||||||
ret, err := GetSocketAndHost(socketURI)
|
ret, err := GetSocketAndHost(socketURI)
|
||||||
|
|
||||||
// Assert
|
// Assert
|
||||||
require.NoError(t, err)
|
assert.NoError(t, err) //nolint:testifylint // pre-existing issue from nektos/act
|
||||||
assert.Equal(t, SocketAndHost{socketURI, dockerHost}, ret)
|
assert.Equal(t, SocketAndHost{socketURI, dockerHost}, ret)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -39,7 +42,7 @@ func TestGetSocketAndHostNoSocket(t *testing.T) {
|
|||||||
ret, err := GetSocketAndHost("")
|
ret, err := GetSocketAndHost("")
|
||||||
|
|
||||||
// Assert
|
// Assert
|
||||||
require.NoError(t, err)
|
assert.NoError(t, err) //nolint:testifylint // pre-existing issue from nektos/act
|
||||||
assert.Equal(t, SocketAndHost{dockerHost, dockerHost}, ret)
|
assert.Equal(t, SocketAndHost{dockerHost, dockerHost}, ret)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -54,7 +57,7 @@ func TestGetSocketAndHostOnlySocket(t *testing.T) {
|
|||||||
ret, err := GetSocketAndHost(socketURI)
|
ret, err := GetSocketAndHost(socketURI)
|
||||||
|
|
||||||
// Assert
|
// Assert
|
||||||
require.NoError(t, err, "Expected no error from GetSocketAndHost")
|
assert.NoError(t, err, "Expected no error from GetSocketAndHost") //nolint:testifylint // pre-existing issue from nektos/act
|
||||||
assert.True(t, defaultSocketFound, "Expected to find default socket")
|
assert.True(t, defaultSocketFound, "Expected to find default socket")
|
||||||
assert.Equal(t, socketURI, ret.Socket, "Expected socket to match common location")
|
assert.Equal(t, socketURI, ret.Socket, "Expected socket to match common location")
|
||||||
assert.Equal(t, defaultSocket, ret.Host, "Expected ret.Host to match default socket location")
|
assert.Equal(t, defaultSocket, ret.Host, "Expected ret.Host to match default socket location")
|
||||||
@@ -70,7 +73,7 @@ func TestGetSocketAndHostDontMount(t *testing.T) {
|
|||||||
ret, err := GetSocketAndHost("-")
|
ret, err := GetSocketAndHost("-")
|
||||||
|
|
||||||
// Assert
|
// Assert
|
||||||
require.NoError(t, err)
|
assert.NoError(t, err) //nolint:testifylint // pre-existing issue from nektos/act
|
||||||
assert.Equal(t, SocketAndHost{"-", dockerHost}, ret)
|
assert.Equal(t, SocketAndHost{"-", dockerHost}, ret)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -85,7 +88,7 @@ func TestGetSocketAndHostNoHostNoSocket(t *testing.T) {
|
|||||||
|
|
||||||
// Assert
|
// Assert
|
||||||
assert.True(t, found, "Expected a default socket to be found")
|
assert.True(t, found, "Expected a default socket to be found")
|
||||||
require.NoError(t, err, "Expected no error from GetSocketAndHost")
|
assert.NoError(t, err, "Expected no error from GetSocketAndHost") //nolint:testifylint // pre-existing issue from nektos/act
|
||||||
assert.Equal(t, SocketAndHost{defaultSocket, defaultSocket}, ret, "Expected to match default socket location")
|
assert.Equal(t, SocketAndHost{defaultSocket, defaultSocket}, ret, "Expected to match default socket location")
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -98,7 +101,7 @@ func TestGetSocketAndHostNoHostNoSocketDefaultLocation(t *testing.T) {
|
|||||||
mySocket := mySocketFile.Name()
|
mySocket := mySocketFile.Name()
|
||||||
unixSocket := "unix://" + mySocket
|
unixSocket := "unix://" + mySocket
|
||||||
defer os.RemoveAll(mySocket)
|
defer os.RemoveAll(mySocket)
|
||||||
require.NoError(t, tmpErr)
|
assert.NoError(t, tmpErr) //nolint:testifylint // pre-existing issue from nektos/act
|
||||||
os.Unsetenv("DOCKER_HOST")
|
os.Unsetenv("DOCKER_HOST")
|
||||||
|
|
||||||
CommonSocketLocations = []string{mySocket}
|
CommonSocketLocations = []string{mySocket}
|
||||||
@@ -110,7 +113,7 @@ func TestGetSocketAndHostNoHostNoSocketDefaultLocation(t *testing.T) {
|
|||||||
// Assert
|
// Assert
|
||||||
assert.Equal(t, unixSocket, defaultSocket, "Expected default socket to match common socket location")
|
assert.Equal(t, unixSocket, defaultSocket, "Expected default socket to match common socket location")
|
||||||
assert.True(t, found, "Expected default socket to be found")
|
assert.True(t, found, "Expected default socket to be found")
|
||||||
require.NoError(t, err, "Expected no error from GetSocketAndHost")
|
assert.NoError(t, err, "Expected no error from GetSocketAndHost") //nolint:testifylint // pre-existing issue from nektos/act
|
||||||
assert.Equal(t, SocketAndHost{unixSocket, unixSocket}, ret, "Expected to match default socket location")
|
assert.Equal(t, SocketAndHost{unixSocket, unixSocket}, ret, "Expected to match default socket location")
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -126,9 +129,9 @@ func TestGetSocketAndHostNoHostInvalidSocket(t *testing.T) {
|
|||||||
|
|
||||||
// Assert
|
// Assert
|
||||||
assert.False(t, found, "Expected no default socket to be found")
|
assert.False(t, found, "Expected no default socket to be found")
|
||||||
assert.Empty(t, defaultSocket, "Expected no default socket to be found")
|
assert.Equal(t, "", defaultSocket, "Expected no default socket to be found") //nolint:testifylint // pre-existing issue from nektos/act
|
||||||
assert.Equal(t, SocketAndHost{}, ret, "Expected to match default socket location")
|
assert.Equal(t, SocketAndHost{}, ret, "Expected to match default socket location")
|
||||||
require.Error(t, err, "Expected an error in invalid state")
|
assert.Error(t, err, "Expected an error in invalid state")
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestGetSocketAndHostOnlySocketValidButUnusualLocation(t *testing.T) {
|
func TestGetSocketAndHostOnlySocketValidButUnusualLocation(t *testing.T) {
|
||||||
@@ -143,9 +146,9 @@ func TestGetSocketAndHostOnlySocketValidButUnusualLocation(t *testing.T) {
|
|||||||
|
|
||||||
// Assert
|
// Assert
|
||||||
// Default socket locations
|
// Default socket locations
|
||||||
assert.Empty(t, defaultSocket, "Expect default socket location to be empty")
|
assert.Equal(t, "", defaultSocket, "Expect default socket location to be empty") //nolint:testifylint // pre-existing issue from nektos/act
|
||||||
assert.False(t, found, "Expected no default socket to be found")
|
assert.False(t, found, "Expected no default socket to be found")
|
||||||
// Sane default
|
// Sane default
|
||||||
require.NoError(t, err, "Expect no error from GetSocketAndHost")
|
assert.NoError(t, err, "Expect no error from GetSocketAndHost") //nolint:testifylint // pre-existing issue from nektos/act
|
||||||
assert.Equal(t, socketURI, ret.Host, "Expect host to default to unusual socket")
|
assert.Equal(t, socketURI, ret.Host, "Expect host to default to unusual socket")
|
||||||
}
|
}
|
||||||
@@ -1,3 +1,7 @@
|
|||||||
|
// Copyright 2023 The Gitea Authors. All rights reserved.
|
||||||
|
// Copyright 2023 The nektos/act Authors. All rights reserved.
|
||||||
|
// SPDX-License-Identifier: MIT
|
||||||
|
|
||||||
//go:build WITHOUT_DOCKER || !(linux || darwin || windows || netbsd)
|
//go:build WITHOUT_DOCKER || !(linux || darwin || windows || netbsd)
|
||||||
|
|
||||||
package container
|
package container
|
||||||
@@ -6,20 +10,21 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"runtime"
|
"runtime"
|
||||||
|
|
||||||
"gitea.com/gitea/act_runner/pkg/common"
|
"gitea.com/gitea/runner/act/common"
|
||||||
"github.com/docker/docker/api/types/system"
|
|
||||||
|
"github.com/docker/docker/api/types"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
)
|
)
|
||||||
|
|
||||||
// ImageExistsLocally returns a boolean indicating if an image with the
|
// ImageExistsLocally returns a boolean indicating if an image with the
|
||||||
// requested name, tag and architecture exists in the local docker image store
|
// requested name, tag and architecture exists in the local docker image store
|
||||||
func ImageExistsLocally(ctx context.Context, imageName string, platform string) (bool, error) {
|
func ImageExistsLocally(ctx context.Context, imageName, platform string) (bool, error) {
|
||||||
return false, errors.New("Unsupported Operation")
|
return false, errors.New("Unsupported Operation")
|
||||||
}
|
}
|
||||||
|
|
||||||
// RemoveImage removes image from local store, the function is used to run different
|
// RemoveImage removes image from local store, the function is used to run different
|
||||||
// container image architectures
|
// container image architectures
|
||||||
func RemoveImage(ctx context.Context, imageName string, force bool, pruneChildren bool) (bool, error) {
|
func RemoveImage(ctx context.Context, imageName string, force, pruneChildren bool) (bool, error) {
|
||||||
return false, errors.New("Unsupported Operation")
|
return false, errors.New("Unsupported Operation")
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -46,8 +51,8 @@ func RunnerArch(ctx context.Context) string {
|
|||||||
return runtime.GOOS
|
return runtime.GOOS
|
||||||
}
|
}
|
||||||
|
|
||||||
func GetHostInfo(ctx context.Context) (info system.Info, err error) {
|
func GetHostInfo(ctx context.Context) (info types.Info, err error) {
|
||||||
return system.Info{}, nil
|
return types.Info{}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewDockerVolumeRemoveExecutor(volume string, force bool) common.Executor {
|
func NewDockerVolumeRemoveExecutor(volume string, force bool) common.Executor {
|
||||||
@@ -1,3 +1,7 @@
|
|||||||
|
// Copyright 2023 The Gitea Authors. All rights reserved.
|
||||||
|
// Copyright 2020 The nektos/act Authors. All rights reserved.
|
||||||
|
// SPDX-License-Identifier: MIT
|
||||||
|
|
||||||
//go:build !(WITHOUT_DOCKER || !(linux || darwin || windows || netbsd))
|
//go:build !(WITHOUT_DOCKER || !(linux || darwin || windows || netbsd))
|
||||||
|
|
||||||
package container
|
package container
|
||||||
@@ -5,7 +9,8 @@ package container
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
|
||||||
"gitea.com/gitea/act_runner/pkg/common"
|
"gitea.com/gitea/runner/act/common"
|
||||||
|
|
||||||
"github.com/docker/docker/api/types/filters"
|
"github.com/docker/docker/api/types/filters"
|
||||||
"github.com/docker/docker/api/types/volume"
|
"github.com/docker/docker/api/types/volume"
|
||||||
)
|
)
|
||||||
@@ -1,3 +1,7 @@
|
|||||||
|
// Copyright 2022 The Gitea Authors. All rights reserved.
|
||||||
|
// Copyright 2022 The nektos/act Authors. All rights reserved.
|
||||||
|
// SPDX-License-Identifier: MIT
|
||||||
|
|
||||||
package container
|
package container
|
||||||
|
|
||||||
import "context"
|
import "context"
|
||||||
@@ -1,3 +1,7 @@
|
|||||||
|
// Copyright 2022 The Gitea Authors. All rights reserved.
|
||||||
|
// Copyright 2022 The nektos/act Authors. All rights reserved.
|
||||||
|
// SPDX-License-Identifier: MIT
|
||||||
|
|
||||||
package container
|
package container
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@@ -12,17 +16,19 @@ import (
|
|||||||
"os/exec"
|
"os/exec"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"runtime"
|
"runtime"
|
||||||
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"gitea.com/gitea/runner/act/common"
|
||||||
|
"gitea.com/gitea/runner/act/filecollector"
|
||||||
|
"gitea.com/gitea/runner/act/lookpath"
|
||||||
|
|
||||||
"github.com/go-git/go-billy/v5/helper/polyfill"
|
"github.com/go-git/go-billy/v5/helper/polyfill"
|
||||||
"github.com/go-git/go-billy/v5/osfs"
|
"github.com/go-git/go-billy/v5/osfs"
|
||||||
"github.com/go-git/go-git/v5/plumbing/format/gitignore"
|
"github.com/go-git/go-git/v5/plumbing/format/gitignore"
|
||||||
"golang.org/x/term"
|
"golang.org/x/term"
|
||||||
|
|
||||||
"gitea.com/gitea/act_runner/pkg/common"
|
|
||||||
"gitea.com/gitea/act_runner/pkg/filecollector"
|
|
||||||
"gitea.com/gitea/act_runner/pkg/lookpath"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type HostEnvironment struct {
|
type HostEnvironment struct {
|
||||||
@@ -30,25 +36,37 @@ type HostEnvironment struct {
|
|||||||
TmpDir string
|
TmpDir string
|
||||||
ToolCache string
|
ToolCache string
|
||||||
Workdir string
|
Workdir string
|
||||||
ActPath string
|
// BindWorkdir is true when the app runner mounts the workspace on the host and
|
||||||
CleanUp func()
|
// deletes the task directory after the job; host teardown must not remove Workdir.
|
||||||
StdOut io.Writer
|
BindWorkdir bool
|
||||||
|
ActPath string
|
||||||
|
CleanUp func()
|
||||||
|
StdOut io.Writer
|
||||||
|
|
||||||
|
mu sync.Mutex
|
||||||
|
runningPIDs map[int]struct{}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e *HostEnvironment) Create(_ []string, _ []string) common.Executor {
|
func (e *HostEnvironment) Create(_, _ []string) common.Executor {
|
||||||
return func(_ context.Context) error {
|
return func(ctx context.Context) error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *HostEnvironment) ConnectToNetwork(name string) common.Executor {
|
||||||
|
return func(ctx context.Context) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e *HostEnvironment) Close() common.Executor {
|
func (e *HostEnvironment) Close() common.Executor {
|
||||||
return func(_ context.Context) error {
|
return func(ctx context.Context) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e *HostEnvironment) Copy(destPath string, files ...*FileEntry) common.Executor {
|
func (e *HostEnvironment) Copy(destPath string, files ...*FileEntry) common.Executor {
|
||||||
return func(_ context.Context) error {
|
return func(ctx context.Context) error {
|
||||||
for _, f := range files {
|
for _, f := range files {
|
||||||
if err := os.MkdirAll(filepath.Dir(filepath.Join(destPath, f.Name)), 0o777); err != nil {
|
if err := os.MkdirAll(filepath.Dir(filepath.Join(destPath, f.Name)), 0o777); err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -62,9 +80,6 @@ func (e *HostEnvironment) Copy(destPath string, files ...*FileEntry) common.Exec
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (e *HostEnvironment) CopyTarStream(ctx context.Context, destPath string, tarStream io.Reader) error {
|
func (e *HostEnvironment) CopyTarStream(ctx context.Context, destPath string, tarStream io.Reader) error {
|
||||||
if common.Dryrun(ctx) {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
if err := os.RemoveAll(destPath); err != nil {
|
if err := os.RemoveAll(destPath); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -83,7 +98,7 @@ func (e *HostEnvironment) CopyTarStream(ctx context.Context, destPath string, ta
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if ctx.Err() != nil {
|
if ctx.Err() != nil {
|
||||||
return errors.New("copyTarStream has been cancelled")
|
return errors.New("CopyTarStream has been cancelled")
|
||||||
}
|
}
|
||||||
if err := cp.WriteFile(ti.Name, ti.FileInfo(), ti.Linkname, tr); err != nil {
|
if err := cp.WriteFile(ti.Name, ti.FileInfo(), ti.Linkname, tr); err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -91,7 +106,7 @@ func (e *HostEnvironment) CopyTarStream(ctx context.Context, destPath string, ta
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e *HostEnvironment) CopyDir(destPath string, srcPath string, useGitIgnore bool) common.Executor {
|
func (e *HostEnvironment) CopyDir(destPath, srcPath string, useGitIgnore bool) common.Executor {
|
||||||
return func(ctx context.Context) error {
|
return func(ctx context.Context) error {
|
||||||
logger := common.Logger(ctx)
|
logger := common.Logger(ctx)
|
||||||
srcPrefix := filepath.Dir(srcPath)
|
srcPrefix := filepath.Dir(srcPath)
|
||||||
@@ -172,13 +187,13 @@ func (e *HostEnvironment) GetContainerArchive(ctx context.Context, srcPath strin
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (e *HostEnvironment) Pull(_ bool) common.Executor {
|
func (e *HostEnvironment) Pull(_ bool) common.Executor {
|
||||||
return func(_ context.Context) error {
|
return func(ctx context.Context) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e *HostEnvironment) Start(_ bool) common.Executor {
|
func (e *HostEnvironment) Start(_ bool) common.Executor {
|
||||||
return func(_ context.Context) error {
|
return func(ctx context.Context) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -272,7 +287,7 @@ func copyPtyOutput(writer io.Writer, ppty io.Reader, finishLog context.CancelFun
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (e *HostEnvironment) UpdateFromImageEnv(_ *map[string]string) common.Executor {
|
func (e *HostEnvironment) UpdateFromImageEnv(_ *map[string]string) common.Executor {
|
||||||
return func(_ context.Context) error {
|
return func(ctx context.Context) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -320,7 +335,7 @@ func (e *HostEnvironment) exec(ctx context.Context, command []string, cmdline st
|
|||||||
tty.Close()
|
tty.Close()
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
if containerAllocateTerminal /* allocate Terminal */ {
|
if true /* allocate Terminal */ {
|
||||||
var err error
|
var err error
|
||||||
ppty, tty, err = setupPty(cmd, cmdline)
|
ppty, tty, err = setupPty(cmd, cmdline)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -337,8 +352,30 @@ func (e *HostEnvironment) exec(ctx context.Context, command []string, cmdline st
|
|||||||
if ppty != nil {
|
if ppty != nil {
|
||||||
go writeKeepAlive(ppty)
|
go writeKeepAlive(ppty)
|
||||||
}
|
}
|
||||||
err = cmd.Run()
|
// Split Start/Wait so the PID can be registered before the process can exit;
|
||||||
|
// cmd.Run() would block until exit, by which time the PID may have been reused.
|
||||||
|
if err := cmd.Start(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if cmd.Process != nil {
|
||||||
|
e.mu.Lock()
|
||||||
|
if e.runningPIDs == nil {
|
||||||
|
e.runningPIDs = map[int]struct{}{}
|
||||||
|
}
|
||||||
|
e.runningPIDs[cmd.Process.Pid] = struct{}{}
|
||||||
|
e.mu.Unlock()
|
||||||
|
defer func(pid int) {
|
||||||
|
e.mu.Lock()
|
||||||
|
delete(e.runningPIDs, pid)
|
||||||
|
e.mu.Unlock()
|
||||||
|
}(cmd.Process.Pid)
|
||||||
|
}
|
||||||
|
err = cmd.Wait()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
var exitErr *exec.ExitError
|
||||||
|
if errors.As(err, &exitErr) {
|
||||||
|
return ExitCodeError(exitErr.ExitCode())
|
||||||
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if tty != nil {
|
if tty != nil {
|
||||||
@@ -378,12 +415,83 @@ func (e *HostEnvironment) UpdateFromEnv(srcPath string, env *map[string]string)
|
|||||||
return parseEnvFile(e, srcPath, env)
|
return parseEnvFile(e, srcPath, env)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func removePathWithRetry(ctx context.Context, path string) error {
|
||||||
|
if path == "" {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
attempts := 1
|
||||||
|
delay := time.Duration(0)
|
||||||
|
if runtime.GOOS == "windows" {
|
||||||
|
attempts = 5
|
||||||
|
delay = 200 * time.Millisecond
|
||||||
|
}
|
||||||
|
var lastErr error
|
||||||
|
for i := 0; i < attempts; i++ {
|
||||||
|
if i > 0 {
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
return ctx.Err()
|
||||||
|
case <-time.After(delay):
|
||||||
|
}
|
||||||
|
}
|
||||||
|
lastErr = os.RemoveAll(path)
|
||||||
|
if lastErr == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return lastErr
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *HostEnvironment) terminateRunningProcesses(ctx context.Context) {
|
||||||
|
if runtime.GOOS != "windows" {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
e.mu.Lock()
|
||||||
|
pids := make([]int, 0, len(e.runningPIDs))
|
||||||
|
for pid := range e.runningPIDs {
|
||||||
|
pids = append(pids, pid)
|
||||||
|
}
|
||||||
|
e.mu.Unlock()
|
||||||
|
|
||||||
|
if len(pids) == 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
logger := common.Logger(ctx)
|
||||||
|
for _, pid := range pids {
|
||||||
|
// Best-effort: forcibly terminate process tree to release file handles
|
||||||
|
// so that workspace cleanup can succeed on Windows.
|
||||||
|
cmd := exec.CommandContext(ctx, "taskkill", "/PID", strconv.Itoa(pid), "/T", "/F")
|
||||||
|
out, err := cmd.CombinedOutput()
|
||||||
|
if err != nil {
|
||||||
|
logger.Debugf("taskkill failed for pid=%d: %v output=%s", pid, err, strings.TrimSpace(string(out)))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func (e *HostEnvironment) Remove() common.Executor {
|
func (e *HostEnvironment) Remove() common.Executor {
|
||||||
return func(_ context.Context) error {
|
return func(ctx context.Context) error {
|
||||||
|
// Ensure any lingering child processes are ended before attempting
|
||||||
|
// to remove the workspace (Windows file locks otherwise prevent cleanup).
|
||||||
|
e.terminateRunningProcesses(ctx)
|
||||||
|
|
||||||
|
// Only removes per-job misc state. Must not remove the cache/toolcache root.
|
||||||
if e.CleanUp != nil {
|
if e.CleanUp != nil {
|
||||||
e.CleanUp()
|
e.CleanUp()
|
||||||
}
|
}
|
||||||
return os.RemoveAll(e.Path)
|
logger := common.Logger(ctx)
|
||||||
|
var errs []error
|
||||||
|
if err := removePathWithRetry(ctx, e.Path); err != nil {
|
||||||
|
logger.Warnf("failed to remove host misc state %s: %v", e.Path, err)
|
||||||
|
errs = append(errs, err)
|
||||||
|
}
|
||||||
|
if !e.BindWorkdir && e.Workdir != "" {
|
||||||
|
if err := removePathWithRetry(ctx, e.Workdir); err != nil {
|
||||||
|
logger.Warnf("failed to remove host workspace %s: %v", e.Workdir, err)
|
||||||
|
errs = append(errs, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return errors.Join(errs...)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -410,9 +518,8 @@ func (*HostEnvironment) GetPathVariableName() string {
|
|||||||
return "path"
|
return "path"
|
||||||
case "windows":
|
case "windows":
|
||||||
return "Path" // Actually we need a case insensitive map
|
return "Path" // Actually we need a case insensitive map
|
||||||
default:
|
|
||||||
return "PATH"
|
|
||||||
}
|
}
|
||||||
|
return "PATH"
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e *HostEnvironment) DefaultPathVariable() string {
|
func (e *HostEnvironment) DefaultPathVariable() string {
|
||||||
@@ -428,7 +535,6 @@ func (*HostEnvironment) JoinPathVariable(paths ...string) string {
|
|||||||
// https://docs.github.com/en/actions/learn-github-actions/contexts#runner-context
|
// https://docs.github.com/en/actions/learn-github-actions/contexts#runner-context
|
||||||
func goArchToActionArch(arch string) string {
|
func goArchToActionArch(arch string) string {
|
||||||
archMapper := map[string]string{
|
archMapper := map[string]string{
|
||||||
"amd64": "X64",
|
|
||||||
"x86_64": "X64",
|
"x86_64": "X64",
|
||||||
"386": "X86",
|
"386": "X86",
|
||||||
"aarch64": "ARM64",
|
"aarch64": "ARM64",
|
||||||
@@ -441,9 +547,7 @@ func goArchToActionArch(arch string) string {
|
|||||||
|
|
||||||
func goOsToActionOs(os string) string {
|
func goOsToActionOs(os string) string {
|
||||||
osMapper := map[string]string{
|
osMapper := map[string]string{
|
||||||
"linux": "Linux",
|
"darwin": "macOS",
|
||||||
"windows": "Windows",
|
|
||||||
"darwin": "macOS",
|
|
||||||
}
|
}
|
||||||
if os, ok := osMapper[os]; ok {
|
if os, ok := osMapper[os]; ok {
|
||||||
return os
|
return os
|
||||||
@@ -460,11 +564,7 @@ func (e *HostEnvironment) GetRunnerContext(_ context.Context) map[string]any {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e *HostEnvironment) GetHealth(_ context.Context) Health {
|
func (e *HostEnvironment) ReplaceLogWriter(stdout, _ io.Writer) (io.Writer, io.Writer) {
|
||||||
return HealthHealthy
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *HostEnvironment) ReplaceLogWriter(stdout io.Writer, _ io.Writer) (io.Writer, io.Writer) {
|
|
||||||
org := e.StdOut
|
org := e.StdOut
|
||||||
e.StdOut = stdout
|
e.StdOut = stdout
|
||||||
return org, org
|
return org, org
|
||||||
149
act/container/host_environment_test.go
Normal file
149
act/container/host_environment_test.go
Normal file
@@ -0,0 +1,149 @@
|
|||||||
|
// Copyright 2022 The Gitea Authors. All rights reserved.
|
||||||
|
// Copyright 2022 The nektos/act Authors. All rights reserved.
|
||||||
|
// SPDX-License-Identifier: MIT
|
||||||
|
|
||||||
|
package container
|
||||||
|
|
||||||
|
import (
|
||||||
|
"archive/tar"
|
||||||
|
"context"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"path"
|
||||||
|
"path/filepath"
|
||||||
|
"runtime"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"gitea.com/gitea/runner/act/common"
|
||||||
|
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Type assert HostEnvironment implements ExecutionsEnvironment
|
||||||
|
var _ ExecutionsEnvironment = &HostEnvironment{}
|
||||||
|
|
||||||
|
func TestCopyDir(t *testing.T) {
|
||||||
|
dir := t.TempDir()
|
||||||
|
ctx := context.Background()
|
||||||
|
e := &HostEnvironment{
|
||||||
|
Path: filepath.Join(dir, "path"),
|
||||||
|
TmpDir: filepath.Join(dir, "tmp"),
|
||||||
|
ToolCache: filepath.Join(dir, "tool_cache"),
|
||||||
|
ActPath: filepath.Join(dir, "act_path"),
|
||||||
|
StdOut: os.Stdout,
|
||||||
|
Workdir: path.Join("testdata", "scratch"),
|
||||||
|
}
|
||||||
|
_ = os.MkdirAll(e.Path, 0o700)
|
||||||
|
_ = os.MkdirAll(e.TmpDir, 0o700)
|
||||||
|
_ = os.MkdirAll(e.ToolCache, 0o700)
|
||||||
|
_ = os.MkdirAll(e.ActPath, 0o700)
|
||||||
|
err := e.CopyDir(e.Workdir, e.Path, true)(ctx)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGetContainerArchive(t *testing.T) {
|
||||||
|
dir := t.TempDir()
|
||||||
|
ctx := context.Background()
|
||||||
|
e := &HostEnvironment{
|
||||||
|
Path: filepath.Join(dir, "path"),
|
||||||
|
TmpDir: filepath.Join(dir, "tmp"),
|
||||||
|
ToolCache: filepath.Join(dir, "tool_cache"),
|
||||||
|
ActPath: filepath.Join(dir, "act_path"),
|
||||||
|
StdOut: os.Stdout,
|
||||||
|
Workdir: path.Join("testdata", "scratch"),
|
||||||
|
}
|
||||||
|
_ = os.MkdirAll(e.Path, 0o700)
|
||||||
|
_ = os.MkdirAll(e.TmpDir, 0o700)
|
||||||
|
_ = os.MkdirAll(e.ToolCache, 0o700)
|
||||||
|
_ = os.MkdirAll(e.ActPath, 0o700)
|
||||||
|
expectedContent := []byte("sdde/7sh")
|
||||||
|
err := os.WriteFile(filepath.Join(e.Path, "action.yml"), expectedContent, 0o600)
|
||||||
|
assert.NoError(t, err) //nolint:testifylint // pre-existing issue from nektos/act
|
||||||
|
archive, err := e.GetContainerArchive(ctx, e.Path)
|
||||||
|
assert.NoError(t, err) //nolint:testifylint // pre-existing issue from nektos/act
|
||||||
|
defer archive.Close()
|
||||||
|
reader := tar.NewReader(archive)
|
||||||
|
h, err := reader.Next()
|
||||||
|
assert.NoError(t, err) //nolint:testifylint // pre-existing issue from nektos/act
|
||||||
|
assert.Equal(t, "action.yml", h.Name)
|
||||||
|
content, err := io.ReadAll(reader)
|
||||||
|
assert.NoError(t, err) //nolint:testifylint // pre-existing issue from nektos/act
|
||||||
|
assert.Equal(t, expectedContent, content)
|
||||||
|
_, err = reader.Next()
|
||||||
|
assert.ErrorIs(t, err, io.EOF)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestHostEnvironmentExecExitCode(t *testing.T) {
|
||||||
|
if runtime.GOOS == "windows" {
|
||||||
|
t.Skip("uses POSIX shell")
|
||||||
|
}
|
||||||
|
dir := t.TempDir()
|
||||||
|
ctx := context.Background()
|
||||||
|
e := &HostEnvironment{
|
||||||
|
Path: filepath.Join(dir, "path"),
|
||||||
|
TmpDir: filepath.Join(dir, "tmp"),
|
||||||
|
ToolCache: filepath.Join(dir, "tool_cache"),
|
||||||
|
ActPath: filepath.Join(dir, "act_path"),
|
||||||
|
StdOut: io.Discard,
|
||||||
|
Workdir: filepath.Join(dir, "path"),
|
||||||
|
}
|
||||||
|
for _, p := range []string{e.Path, e.TmpDir, e.ToolCache, e.ActPath} {
|
||||||
|
assert.NoError(t, os.MkdirAll(p, 0o700)) //nolint:testifylint // test setup
|
||||||
|
}
|
||||||
|
|
||||||
|
err := e.Exec([]string{"sh", "-c", "exit 3"}, map[string]string{"PATH": os.Getenv("PATH")}, "", "")(ctx)
|
||||||
|
var exitErr ExitCodeError
|
||||||
|
require.ErrorAs(t, err, &exitErr)
|
||||||
|
assert.Equal(t, ExitCodeError(3), exitErr)
|
||||||
|
assert.Equal(t, "Process completed with exit code 3.", err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestHostEnvironmentRemoveCleansWorkdir(t *testing.T) {
|
||||||
|
logger := logrus.New()
|
||||||
|
ctx := common.WithLogger(context.Background(), logrus.NewEntry(logger))
|
||||||
|
base := t.TempDir()
|
||||||
|
miscRoot := filepath.Join(base, "misc")
|
||||||
|
path := filepath.Join(miscRoot, "hostexecutor")
|
||||||
|
require.NoError(t, os.MkdirAll(path, 0o700))
|
||||||
|
workdir := filepath.Join(base, "workspace", "owner", "repo")
|
||||||
|
require.NoError(t, os.MkdirAll(workdir, 0o700))
|
||||||
|
|
||||||
|
e := &HostEnvironment{
|
||||||
|
Path: path,
|
||||||
|
Workdir: workdir,
|
||||||
|
BindWorkdir: false,
|
||||||
|
CleanUp: func() {
|
||||||
|
_ = os.RemoveAll(miscRoot)
|
||||||
|
},
|
||||||
|
StdOut: os.Stdout,
|
||||||
|
}
|
||||||
|
require.NoError(t, e.Remove()(ctx))
|
||||||
|
_, err := os.Stat(workdir)
|
||||||
|
assert.ErrorIs(t, err, os.ErrNotExist)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestHostEnvironmentRemoveSkipsWorkdirWhenBindWorkdir(t *testing.T) {
|
||||||
|
logger := logrus.New()
|
||||||
|
ctx := common.WithLogger(context.Background(), logrus.NewEntry(logger))
|
||||||
|
base := t.TempDir()
|
||||||
|
miscRoot := filepath.Join(base, "misc")
|
||||||
|
path := filepath.Join(miscRoot, "hostexecutor")
|
||||||
|
require.NoError(t, os.MkdirAll(path, 0o700))
|
||||||
|
workdir := filepath.Join(base, "workspace", "123", "owner", "repo")
|
||||||
|
require.NoError(t, os.MkdirAll(workdir, 0o700))
|
||||||
|
|
||||||
|
e := &HostEnvironment{
|
||||||
|
Path: path,
|
||||||
|
Workdir: workdir,
|
||||||
|
BindWorkdir: true,
|
||||||
|
CleanUp: func() {
|
||||||
|
_ = os.RemoveAll(miscRoot)
|
||||||
|
},
|
||||||
|
StdOut: os.Stdout,
|
||||||
|
}
|
||||||
|
require.NoError(t, e.Remove()(ctx))
|
||||||
|
_, err := os.Stat(workdir)
|
||||||
|
require.NoError(t, err)
|
||||||
|
}
|
||||||
@@ -1,3 +1,7 @@
|
|||||||
|
// Copyright 2022 The Gitea Authors. All rights reserved.
|
||||||
|
// Copyright 2022 The nektos/act Authors. All rights reserved.
|
||||||
|
// SPDX-License-Identifier: MIT
|
||||||
|
|
||||||
package container
|
package container
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@@ -1,3 +1,7 @@
|
|||||||
|
// Copyright 2022 The Gitea Authors. All rights reserved.
|
||||||
|
// Copyright 2022 The nektos/act Authors. All rights reserved.
|
||||||
|
// SPDX-License-Identifier: MIT
|
||||||
|
|
||||||
package container
|
package container
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@@ -1,3 +1,7 @@
|
|||||||
|
// Copyright 2022 The Gitea Authors. All rights reserved.
|
||||||
|
// Copyright 2022 The nektos/act Authors. All rights reserved.
|
||||||
|
// SPDX-License-Identifier: MIT
|
||||||
|
|
||||||
package container
|
package container
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@@ -8,7 +12,7 @@ import (
|
|||||||
"io"
|
"io"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"gitea.com/gitea/act_runner/pkg/common"
|
"gitea.com/gitea/runner/act/common"
|
||||||
)
|
)
|
||||||
|
|
||||||
func parseEnvFile(e Container, srcPath string, env *map[string]string) common.Executor {
|
func parseEnvFile(e Container, srcPath string, env *map[string]string) common.Executor {
|
||||||
@@ -25,17 +29,8 @@ func parseEnvFile(e Container, srcPath string, env *map[string]string) common.Ex
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
s := bufio.NewScanner(reader)
|
s := bufio.NewScanner(reader)
|
||||||
s.Buffer(nil, 1024*1024*1024) // increase buffer to 1GB to avoid scanner buffer overflow
|
|
||||||
firstLine := true
|
|
||||||
for s.Scan() {
|
for s.Scan() {
|
||||||
line := s.Text()
|
line := s.Text()
|
||||||
if firstLine {
|
|
||||||
firstLine = false
|
|
||||||
// skip utf8 bom, powershell 5 legacy uses it for utf8
|
|
||||||
if len(line) >= 3 && line[0] == 239 && line[1] == 187 && line[2] == 191 {
|
|
||||||
line = line[3:]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
singleLineEnv := strings.Index(line, "=")
|
singleLineEnv := strings.Index(line, "=")
|
||||||
multiLineEnv := strings.Index(line, "<<")
|
multiLineEnv := strings.Index(line, "<<")
|
||||||
if singleLineEnv != -1 && (multiLineEnv == -1 || singleLineEnv < multiLineEnv) {
|
if singleLineEnv != -1 && (multiLineEnv == -1 || singleLineEnv < multiLineEnv) {
|
||||||
@@ -64,6 +59,6 @@ func parseEnvFile(e Container, srcPath string, env *map[string]string) common.Ex
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
env = &localEnv
|
env = &localEnv
|
||||||
return s.Err()
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user