mirror of
https://gitea.com/gitea/act_runner.git
synced 2026-05-14 20:03:24 +02:00
Fixes: https://gitea.com/gitea/runner/issues/859 Migration approach mirrors [actions-oss/act-cli#154](https://github.com/actions-oss/act-cli/pull/154). ### Dependency changes - `github.com/docker/docker` v25.0.15 → **removed** (v29 doesn't exist as docker/docker; the project moved to moby/moby) - `github.com/docker/cli` v25.0.7 → v29.4.3 - `github.com/docker/go-connections` v0.6.0 → v0.7.0 - `github.com/docker/docker-credential-helpers` v0.9.5 → v0.9.6 - `github.com/moby/go-archive` added at v0.2.0 - `github.com/moby/moby/api` added at v1.54.2 - `github.com/moby/moby/client` added at v0.4.1 - `github.com/moby/buildkit` removed (only used `dockerignore.ReadAll`, swapped for `moby/patternmatcher/ignorefile.ReadAll` directly) - `github.com/containerd/errdefs` v0.3.0 → v1.0.0 ### Migration - v28: type aliases moved to their subpackages (`types.{Container,Image,Network,Exec}*` → `container/image/network/...`); deprecated APIs replaced (`ImageInspectWithRaw`, `client.IsErrNotFound`, `archive.CanonicalTarNameForPath`, `opts.ValidateMACAddress`, `ListOpts.GetAll`) - v29: structural client redesign — every `cli.X(ctx, ...)` call switched to options-everywhere/Result-typed signatures, `ContainerExec*` → `Exec*`, `ContainerWait` returns a struct with `Result`/`Error` channels, `Tty`→`TTY`, `Copy*Container` takes options struct, `client.NewClientWithOpts` → `client.New`. `pkg/stdcopy` moved to `moby/moby/api/pkg/stdcopy`. The vendored copy of `cli/command/container/opts.go` was refreshed from cli v29 (now uses `netip.Addr` for IPs, port-set conversion helpers). A small local `parsePlatform` helper centralises the `os/arch[/variant]` parsing previously inlined into multiple call sites. ### Behaviour preservation The migration introduced several behavioural shifts vs the v25 client; all were caught in review and reverted/fixed in follow-up commits: - `GetDockerClient`: cli v29's `Ping(NegotiateAPIVersion: true)` returns errors that the old `NegotiateAPIVersion` silently swallowed. Restored best-effort behaviour (warn-log + continue) so daemons with blocked `_ping` or API < 1.40 keep working. The SSH-helper `client.New` call no longer inherits `client.FromEnv`, matching the old `NewClientWithOpts(WithHost, WithDialContext)` so `DOCKER_API_VERSION`/`DOCKER_TLS_VERIFY` don't leak into the SSH-tunneled client - `parsePlatform`: malformed input now returns an explicit error instead of silently dropping to "no platform constraint" and pulling the host-default architecture. Single-segment (`"linux"`), 4+-segment (`"linux/arm/v7/extra"`), and trailing-slash (`"linux/arm/"`) inputs are all rejected - `LoadDockerAuthConfig`/`LoadDockerAuthConfigs`: `config.LoadDefaultConfigFile(nil)` panics on a malformed config file (it does `fmt.Fprintln` on the nil `io.Writer`). Switched to `config.Load(config.Dir())` so load errors reach the logger and the panic path is gone. Restored the old behaviour of returning `config.Load` and `GetAuthConfig` errors to the caller (the v29 refactor had silently downgraded them to warn-only). A `reference.ParseNormalizedNamed` failure on the image string falls through to the `docker.io` default rather than aborting, since the old string-based hostname extraction was infallible Test assertions also updated for two upstream error-message string shifts (`go-connections` port-range parser; `cli/opts` envfile BOM check). Added unit-test coverage for the new `parsePlatform` helper, locking in the intentional limits (single-segment, 4+-segment, and trailing-slash platforms rejected). --- This PR was written with the help of Claude Opus 4.7 Reviewed-on: https://gitea.com/gitea/runner/pulls/943 Reviewed-by: Lunny Xiao <xiaolunwen@gmail.com> Co-authored-by: silverwind <me@silverwind.io> Co-committed-by: silverwind <me@silverwind.io>
378 lines
11 KiB
Go
378 lines
11 KiB
Go
// Copyright 2022 The Gitea Authors. All rights reserved.
|
|
// Copyright 2020 The nektos/act Authors. All rights reserved.
|
|
// SPDX-License-Identifier: MIT
|
|
|
|
package container
|
|
|
|
import (
|
|
"bufio"
|
|
"bytes"
|
|
"context"
|
|
"errors"
|
|
"io"
|
|
"net"
|
|
"strings"
|
|
"testing"
|
|
"time"
|
|
|
|
"gitea.com/gitea/runner/act/common"
|
|
|
|
"github.com/moby/moby/api/types/container"
|
|
mobyclient "github.com/moby/moby/client"
|
|
"github.com/sirupsen/logrus/hooks/test"
|
|
"github.com/stretchr/testify/assert"
|
|
"github.com/stretchr/testify/mock"
|
|
"github.com/stretchr/testify/require"
|
|
)
|
|
|
|
func TestDocker(t *testing.T) {
|
|
if testing.Short() {
|
|
t.Skip("skipping integration test")
|
|
}
|
|
ctx := context.Background()
|
|
client, err := GetDockerClient(ctx)
|
|
if err != nil {
|
|
t.Skipf("skipping integration test: %v", err)
|
|
}
|
|
defer client.Close()
|
|
|
|
dockerBuild := NewDockerBuildExecutor(NewDockerBuildExecutorInput{
|
|
ContextDir: "testdata",
|
|
ImageTag: "envmergetest",
|
|
})
|
|
|
|
err = dockerBuild(ctx)
|
|
assert.NoError(t, err) //nolint:testifylint // pre-existing issue from nektos/act
|
|
|
|
cr := &containerReference{
|
|
cli: client,
|
|
input: &NewContainerInput{
|
|
Image: "envmergetest",
|
|
},
|
|
}
|
|
env := map[string]string{
|
|
"PATH": "/usr/local/bin:/usr/bin:/usr/sbin:/bin:/sbin",
|
|
"RANDOM_VAR": "WITH_VALUE",
|
|
"ANOTHER_VAR": "",
|
|
"CONFLICT_VAR": "I_EXIST_IN_MULTIPLE_PLACES",
|
|
}
|
|
|
|
envExecutor := cr.extractFromImageEnv(&env)
|
|
err = envExecutor(ctx)
|
|
assert.NoError(t, err) //nolint:testifylint // pre-existing issue from nektos/act
|
|
assert.Equal(t, map[string]string{
|
|
"PATH": "/usr/local/bin:/usr/bin:/usr/sbin:/bin:/sbin:/this/path/does/not/exists/anywhere:/this/either",
|
|
"RANDOM_VAR": "WITH_VALUE",
|
|
"ANOTHER_VAR": "",
|
|
"SOME_RANDOM_VAR": "",
|
|
"ANOTHER_ONE": "BUT_I_HAVE_VALUE",
|
|
"CONFLICT_VAR": "I_EXIST_IN_MULTIPLE_PLACES",
|
|
}, env)
|
|
}
|
|
|
|
type mockDockerClient struct {
|
|
mobyclient.APIClient
|
|
mock.Mock
|
|
}
|
|
|
|
func (m *mockDockerClient) ExecCreate(ctx context.Context, id string, opts mobyclient.ExecCreateOptions) (mobyclient.ExecCreateResult, error) {
|
|
args := m.Called(ctx, id, opts)
|
|
return args.Get(0).(mobyclient.ExecCreateResult), args.Error(1)
|
|
}
|
|
|
|
func (m *mockDockerClient) ExecAttach(ctx context.Context, id string, opts mobyclient.ExecAttachOptions) (mobyclient.ExecAttachResult, error) {
|
|
args := m.Called(ctx, id, opts)
|
|
return args.Get(0).(mobyclient.ExecAttachResult), args.Error(1)
|
|
}
|
|
|
|
func (m *mockDockerClient) ExecInspect(ctx context.Context, execID string, opts mobyclient.ExecInspectOptions) (mobyclient.ExecInspectResult, error) {
|
|
args := m.Called(ctx, execID, opts)
|
|
return args.Get(0).(mobyclient.ExecInspectResult), args.Error(1)
|
|
}
|
|
|
|
func (m *mockDockerClient) ContainerWait(ctx context.Context, containerID string, opts mobyclient.ContainerWaitOptions) mobyclient.ContainerWaitResult {
|
|
args := m.Called(ctx, containerID, opts)
|
|
return args.Get(0).(mobyclient.ContainerWaitResult)
|
|
}
|
|
|
|
func (m *mockDockerClient) CopyToContainer(ctx context.Context, id string, options mobyclient.CopyToContainerOptions) (mobyclient.CopyToContainerResult, error) {
|
|
args := m.Called(ctx, id, options)
|
|
return args.Get(0).(mobyclient.CopyToContainerResult), args.Error(1)
|
|
}
|
|
|
|
type endlessReader struct {
|
|
io.Reader
|
|
}
|
|
|
|
func (r endlessReader) Read(_ []byte) (n int, err error) {
|
|
return 1, nil
|
|
}
|
|
|
|
type mockConn struct {
|
|
net.Conn
|
|
mock.Mock
|
|
}
|
|
|
|
func (m *mockConn) Write(b []byte) (n int, err error) {
|
|
args := m.Called(b)
|
|
return args.Int(0), args.Error(1)
|
|
}
|
|
|
|
func (m *mockConn) Close() (err error) {
|
|
return nil
|
|
}
|
|
|
|
func TestDockerExecAbort(t *testing.T) {
|
|
ctx, cancel := context.WithCancel(context.Background())
|
|
|
|
conn := &mockConn{}
|
|
conn.On("Write", mock.AnythingOfType("[]uint8")).Return(1, nil)
|
|
|
|
client := &mockDockerClient{}
|
|
client.On("ExecCreate", ctx, "123", mock.AnythingOfType("client.ExecCreateOptions")).Return(mobyclient.ExecCreateResult{ID: "id"}, nil)
|
|
client.On("ExecAttach", ctx, "id", mock.AnythingOfType("client.ExecAttachOptions")).Return(mobyclient.ExecAttachResult{
|
|
HijackedResponse: mobyclient.HijackedResponse{
|
|
Conn: conn,
|
|
Reader: bufio.NewReader(endlessReader{}),
|
|
},
|
|
}, nil)
|
|
|
|
cr := &containerReference{
|
|
id: "123",
|
|
cli: client,
|
|
input: &NewContainerInput{
|
|
Image: "image",
|
|
},
|
|
}
|
|
|
|
channel := make(chan error)
|
|
|
|
go func() {
|
|
channel <- cr.exec([]string{""}, map[string]string{}, "user", "workdir")(ctx)
|
|
}()
|
|
|
|
time.Sleep(500 * time.Millisecond)
|
|
|
|
cancel()
|
|
|
|
err := <-channel
|
|
assert.ErrorIs(t, err, context.Canceled) //nolint:testifylint // pre-existing issue from nektos/act
|
|
|
|
conn.AssertExpectations(t)
|
|
client.AssertExpectations(t)
|
|
}
|
|
|
|
func TestDockerExecFailure(t *testing.T) {
|
|
ctx := context.Background()
|
|
|
|
conn := &mockConn{}
|
|
|
|
client := &mockDockerClient{}
|
|
client.On("ExecCreate", ctx, "123", mock.AnythingOfType("client.ExecCreateOptions")).Return(mobyclient.ExecCreateResult{ID: "id"}, nil)
|
|
client.On("ExecAttach", ctx, "id", mock.AnythingOfType("client.ExecAttachOptions")).Return(mobyclient.ExecAttachResult{
|
|
HijackedResponse: mobyclient.HijackedResponse{
|
|
Conn: conn,
|
|
Reader: bufio.NewReader(strings.NewReader("output")),
|
|
},
|
|
}, nil)
|
|
client.On("ExecInspect", ctx, "id", mobyclient.ExecInspectOptions{}).Return(mobyclient.ExecInspectResult{
|
|
ExitCode: 1,
|
|
}, nil)
|
|
|
|
cr := &containerReference{
|
|
id: "123",
|
|
cli: client,
|
|
input: &NewContainerInput{
|
|
Image: "image",
|
|
},
|
|
}
|
|
|
|
err := cr.exec([]string{""}, map[string]string{}, "user", "workdir")(ctx)
|
|
var exitErr ExitCodeError
|
|
require.ErrorAs(t, err, &exitErr)
|
|
assert.Equal(t, ExitCodeError(1), exitErr)
|
|
assert.Equal(t, "Process completed with exit code 1.", err.Error())
|
|
|
|
conn.AssertExpectations(t)
|
|
client.AssertExpectations(t)
|
|
}
|
|
|
|
func TestDockerWaitFailure(t *testing.T) {
|
|
ctx := context.Background()
|
|
|
|
statusCh := make(chan container.WaitResponse, 1)
|
|
statusCh <- container.WaitResponse{StatusCode: 2}
|
|
errCh := make(chan error, 1)
|
|
|
|
client := &mockDockerClient{}
|
|
client.On("ContainerWait", ctx, "123", mobyclient.ContainerWaitOptions{Condition: container.WaitConditionNotRunning}).
|
|
Return(mobyclient.ContainerWaitResult{
|
|
Result: (<-chan container.WaitResponse)(statusCh),
|
|
Error: (<-chan error)(errCh),
|
|
})
|
|
|
|
cr := &containerReference{
|
|
id: "123",
|
|
cli: client,
|
|
input: &NewContainerInput{
|
|
Image: "image",
|
|
},
|
|
}
|
|
|
|
err := cr.wait()(ctx)
|
|
var exitErr ExitCodeError
|
|
require.ErrorAs(t, err, &exitErr)
|
|
assert.Equal(t, ExitCodeError(2), exitErr)
|
|
assert.Equal(t, "Process completed with exit code 2.", err.Error())
|
|
|
|
client.AssertExpectations(t)
|
|
}
|
|
|
|
func TestDockerCopyTarStream(t *testing.T) {
|
|
ctx := context.Background()
|
|
|
|
client := &mockDockerClient{}
|
|
client.On("CopyToContainer", ctx, "123", mock.MatchedBy(func(opts mobyclient.CopyToContainerOptions) bool {
|
|
return opts.DestinationPath == "/" && opts.Content != nil
|
|
})).Return(mobyclient.CopyToContainerResult{}, nil)
|
|
client.On("CopyToContainer", ctx, "123", mock.MatchedBy(func(opts mobyclient.CopyToContainerOptions) bool {
|
|
return opts.DestinationPath == "/var/run/act" && opts.Content != nil
|
|
})).Return(mobyclient.CopyToContainerResult{}, nil)
|
|
cr := &containerReference{
|
|
id: "123",
|
|
cli: client,
|
|
input: &NewContainerInput{
|
|
Image: "image",
|
|
},
|
|
}
|
|
|
|
_ = cr.CopyTarStream(ctx, "/var/run/act", &bytes.Buffer{})
|
|
|
|
client.AssertExpectations(t)
|
|
}
|
|
|
|
func TestDockerCopyTarStreamErrorInCopyFiles(t *testing.T) {
|
|
ctx := context.Background()
|
|
|
|
merr := errors.New("Failure")
|
|
|
|
client := &mockDockerClient{}
|
|
client.On("CopyToContainer", ctx, "123", mock.MatchedBy(func(opts mobyclient.CopyToContainerOptions) bool {
|
|
return opts.DestinationPath == "/" && opts.Content != nil
|
|
})).Return(mobyclient.CopyToContainerResult{}, merr)
|
|
cr := &containerReference{
|
|
id: "123",
|
|
cli: client,
|
|
input: &NewContainerInput{
|
|
Image: "image",
|
|
},
|
|
}
|
|
|
|
err := cr.CopyTarStream(ctx, "/var/run/act", &bytes.Buffer{})
|
|
assert.ErrorIs(t, err, merr) //nolint:testifylint // pre-existing issue from nektos/act
|
|
|
|
client.AssertExpectations(t)
|
|
}
|
|
|
|
func TestDockerCopyTarStreamErrorInMkdir(t *testing.T) {
|
|
ctx := context.Background()
|
|
|
|
merr := errors.New("Failure")
|
|
|
|
client := &mockDockerClient{}
|
|
client.On("CopyToContainer", ctx, "123", mock.MatchedBy(func(opts mobyclient.CopyToContainerOptions) bool {
|
|
return opts.DestinationPath == "/" && opts.Content != nil
|
|
})).Return(mobyclient.CopyToContainerResult{}, nil)
|
|
client.On("CopyToContainer", ctx, "123", mock.MatchedBy(func(opts mobyclient.CopyToContainerOptions) bool {
|
|
return opts.DestinationPath == "/var/run/act" && opts.Content != nil
|
|
})).Return(mobyclient.CopyToContainerResult{}, merr)
|
|
cr := &containerReference{
|
|
id: "123",
|
|
cli: client,
|
|
input: &NewContainerInput{
|
|
Image: "image",
|
|
},
|
|
}
|
|
|
|
err := cr.CopyTarStream(ctx, "/var/run/act", &bytes.Buffer{})
|
|
assert.ErrorIs(t, err, merr) //nolint:testifylint // pre-existing issue from nektos/act
|
|
|
|
client.AssertExpectations(t)
|
|
}
|
|
|
|
// Type assert containerReference implements ExecutionsEnvironment
|
|
var _ ExecutionsEnvironment = &containerReference{}
|
|
|
|
func TestCheckVolumes(t *testing.T) {
|
|
testCases := []struct {
|
|
desc string
|
|
validVolumes []string
|
|
binds []string
|
|
expectedBinds []string
|
|
}{
|
|
{
|
|
desc: "match all volumes",
|
|
validVolumes: []string{"**"},
|
|
binds: []string{
|
|
"shared_volume:/shared_volume",
|
|
"/home/test/data:/test_data",
|
|
"/etc/conf.d/base.json:/config/base.json",
|
|
"sql_data:/sql_data",
|
|
"/secrets/keys:/keys",
|
|
},
|
|
expectedBinds: []string{
|
|
"shared_volume:/shared_volume",
|
|
"/home/test/data:/test_data",
|
|
"/etc/conf.d/base.json:/config/base.json",
|
|
"sql_data:/sql_data",
|
|
"/secrets/keys:/keys",
|
|
},
|
|
},
|
|
{
|
|
desc: "no volumes can be matched",
|
|
validVolumes: []string{},
|
|
binds: []string{
|
|
"shared_volume:/shared_volume",
|
|
"/home/test/data:/test_data",
|
|
"/etc/conf.d/base.json:/config/base.json",
|
|
"sql_data:/sql_data",
|
|
"/secrets/keys:/keys",
|
|
},
|
|
expectedBinds: []string{},
|
|
},
|
|
{
|
|
desc: "only allowed volumes can be matched",
|
|
validVolumes: []string{
|
|
"shared_volume",
|
|
"/home/test/data",
|
|
"/etc/conf.d/*.json",
|
|
},
|
|
binds: []string{
|
|
"shared_volume:/shared_volume",
|
|
"/home/test/data:/test_data",
|
|
"/etc/conf.d/base.json:/config/base.json",
|
|
"sql_data:/sql_data",
|
|
"/secrets/keys:/keys",
|
|
},
|
|
expectedBinds: []string{
|
|
"shared_volume:/shared_volume",
|
|
"/home/test/data:/test_data",
|
|
"/etc/conf.d/base.json:/config/base.json",
|
|
},
|
|
},
|
|
}
|
|
for _, tc := range testCases {
|
|
t.Run(tc.desc, func(t *testing.T) {
|
|
logger, _ := test.NewNullLogger()
|
|
ctx := common.WithLogger(context.Background(), logger)
|
|
cr := &containerReference{
|
|
input: &NewContainerInput{
|
|
ValidVolumes: tc.validVolumes,
|
|
},
|
|
}
|
|
_, hostConf := cr.sanitizeConfig(ctx, &container.Config{}, &container.HostConfig{Binds: tc.binds})
|
|
assert.Equal(t, tc.expectedBinds, hostConf.Binds)
|
|
})
|
|
}
|
|
}
|