mirror of
https://gitea.com/gitea/act_runner.git
synced 2026-05-08 08:13:25 +02:00
Compare commits
14 Commits
e56b984c04
...
v0.6.1
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
e5e53c732e | ||
|
|
2516573592 | ||
|
|
35834bf817 | ||
|
|
11a5dc8936 | ||
|
|
f09fafcb0a | ||
|
|
801e5cf4d5 | ||
|
|
3f05040438 | ||
|
|
59d90bff26 | ||
|
|
5edc4ba550 | ||
|
|
547a0ff297 | ||
|
|
f2b4dbf05f | ||
|
|
bad4239d18 | ||
|
|
589db33e70 | ||
|
|
1032f857a1 |
@@ -5,17 +5,24 @@
|
|||||||
package artifactcache
|
package artifactcache
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
|
"crypto/hmac"
|
||||||
|
"crypto/rand"
|
||||||
|
"crypto/sha256"
|
||||||
|
"encoding/hex"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"net"
|
"net"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
"net/url"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"regexp"
|
"regexp"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
"sync"
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@@ -28,9 +35,36 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
urlBase = "/_apis/artifactcache"
|
apiPath = "/_apis/artifactcache"
|
||||||
|
internalPath = "/_internal"
|
||||||
|
|
||||||
|
// artifactURLTTL bounds how long a signed artifactLocation URL stays valid.
|
||||||
|
// Short enough that a leaked URL is near-worthless; long enough to let the
|
||||||
|
// @actions/cache client download a big blob that was returned from /cache.
|
||||||
|
artifactURLTTL = 10 * time.Minute
|
||||||
)
|
)
|
||||||
|
|
||||||
|
type credKey struct{}
|
||||||
|
|
||||||
|
// JobCredential ties a per-job bearer token (ACTIONS_RUNTIME_TOKEN) to the
|
||||||
|
// repository that owns it. Every cache entry is stamped with Repo on
|
||||||
|
// reserve/commit and checked on read/write so one repo can never observe or
|
||||||
|
// poison another repo's cache, even from inside a container that reaches the
|
||||||
|
// cache server over the docker bridge network.
|
||||||
|
type JobCredential struct {
|
||||||
|
Repo string
|
||||||
|
}
|
||||||
|
|
||||||
|
// credEntry holds a registered job's credential along with an active
|
||||||
|
// registration count. RegisterJob is reference-counted so that if two tasks
|
||||||
|
// briefly share an ACTIONS_RUNTIME_TOKEN — e.g. a runner that retries a task
|
||||||
|
// after a crash before the old registration is revoked — the first task's
|
||||||
|
// revoker does not cut the second task's auth out from under it.
|
||||||
|
type credEntry struct {
|
||||||
|
cred JobCredential
|
||||||
|
refs int
|
||||||
|
}
|
||||||
|
|
||||||
type Handler struct {
|
type Handler struct {
|
||||||
dir string
|
dir string
|
||||||
storage *Storage
|
storage *Storage
|
||||||
@@ -43,10 +77,36 @@ type Handler struct {
|
|||||||
gcAt time.Time
|
gcAt time.Time
|
||||||
|
|
||||||
outboundIP string
|
outboundIP string
|
||||||
|
|
||||||
|
// internalSecret guards /_internal/{register,revoke}. When set, a remote
|
||||||
|
// runner can use these endpoints to pre-register per-job
|
||||||
|
// ACTIONS_RUNTIME_TOKENs against this server, enabling the same
|
||||||
|
// per-job auth and repo scoping as the embedded handler over the
|
||||||
|
// network. Empty disables the control-plane entirely.
|
||||||
|
internalSecret string
|
||||||
|
|
||||||
|
// secret signs short-lived artifact download URLs. The @actions/cache
|
||||||
|
// toolkit does not send Authorization on the download request, so blob
|
||||||
|
// GETs authenticate via a per-URL HMAC signature with expiry rather than
|
||||||
|
// via the bearer token used for management endpoints.
|
||||||
|
secret []byte
|
||||||
|
|
||||||
|
credMu sync.RWMutex
|
||||||
|
creds map[string]*credEntry
|
||||||
}
|
}
|
||||||
|
|
||||||
func StartHandler(dir, outboundIP string, port uint16, logger logrus.FieldLogger) (*Handler, error) {
|
// StartHandler opens the on-disk cache store and starts the HTTP server.
|
||||||
h := &Handler{}
|
//
|
||||||
|
// internalSecret, when non-empty, enables a control-plane API at
|
||||||
|
// /_internal/{register,revoke} that lets a remote runner pre-register the
|
||||||
|
// per-job ACTIONS_RUNTIME_TOKENs it expects this server to honor. The
|
||||||
|
// embedded in-process handler leaves it empty and registers tokens via the
|
||||||
|
// in-process RegisterJob method directly.
|
||||||
|
func StartHandler(dir, outboundIP string, port uint16, internalSecret string, logger logrus.FieldLogger) (*Handler, error) {
|
||||||
|
h := &Handler{
|
||||||
|
creds: make(map[string]*credEntry),
|
||||||
|
internalSecret: internalSecret,
|
||||||
|
}
|
||||||
|
|
||||||
if logger == nil {
|
if logger == nil {
|
||||||
discard := logrus.New()
|
discard := logrus.New()
|
||||||
@@ -83,19 +143,37 @@ func StartHandler(dir, outboundIP string, port uint16, logger logrus.FieldLogger
|
|||||||
h.outboundIP = ip.String()
|
h.outboundIP = ip.String()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
secret, err := loadOrCreateSecret(dir)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
h.secret = secret
|
||||||
|
|
||||||
router := httprouter.New()
|
router := httprouter.New()
|
||||||
router.GET(urlBase+"/cache", h.middleware(h.find))
|
router.GET(apiPath+"/cache", h.bearerAuth(h.find))
|
||||||
router.POST(urlBase+"/caches", h.middleware(h.reserve))
|
router.POST(apiPath+"/caches", h.bearerAuth(h.reserve))
|
||||||
router.PATCH(urlBase+"/caches/:id", h.middleware(h.upload))
|
router.PATCH(apiPath+"/caches/:id", h.bearerAuth(h.upload))
|
||||||
router.POST(urlBase+"/caches/:id", h.middleware(h.commit))
|
router.POST(apiPath+"/caches/:id", h.bearerAuth(h.commit))
|
||||||
router.GET(urlBase+"/artifacts/:id", h.middleware(h.get))
|
router.POST(apiPath+"/clean", h.bearerAuth(h.clean))
|
||||||
router.POST(urlBase+"/clean", h.middleware(h.clean))
|
// Artifact GET is signed via query-string HMAC because @actions/cache
|
||||||
|
// does not attach Authorization when downloading archiveLocation.
|
||||||
|
router.GET(apiPath+"/artifacts/:id", h.signedURLAuth(h.get))
|
||||||
|
// Control-plane: a remote runner registers/revokes per-job tokens so the
|
||||||
|
// cache API can authenticate them. Always wired so the routes exist; the
|
||||||
|
// handlers themselves 401 when internalSecret is unset.
|
||||||
|
router.POST(internalPath+"/register", h.internalAuth(h.internalRegister))
|
||||||
|
router.POST(internalPath+"/revoke", h.internalAuth(h.internalRevoke))
|
||||||
|
|
||||||
h.router = router
|
h.router = router
|
||||||
|
|
||||||
h.gcCache()
|
h.gcCache()
|
||||||
|
|
||||||
listener, err := net.Listen("tcp", fmt.Sprintf(":%d", port)) // listen on all interfaces
|
// Listen on all interfaces. Binding to outboundIP only would give no real
|
||||||
|
// security benefit (it is the LAN/internet-facing address either way) and
|
||||||
|
// can break Docker Desktop variants where the host's outbound IP is not
|
||||||
|
// routable from inside the container network. Authentication is enforced
|
||||||
|
// by the bearer middleware and per-repo scoping, not by reachability.
|
||||||
|
listener, err := net.Listen("tcp", fmt.Sprintf(":%d", port))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -121,6 +199,91 @@ func (h *Handler) ExternalURL() string {
|
|||||||
h.listener.Addr().(*net.TCPAddr).Port)
|
h.listener.Addr().(*net.TCPAddr).Port)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// RegisterJob makes token a valid bearer credential for cache requests from
|
||||||
|
// the given repository and returns a function that removes it. The runner
|
||||||
|
// calls this at job start and defers the returned func so that the credential
|
||||||
|
// is only accepted while the job is running.
|
||||||
|
//
|
||||||
|
// Registrations are reference-counted: if a token is already registered, the
|
||||||
|
// existing repo is kept and the refcount is incremented. The entry is
|
||||||
|
// removed only when every revoker returned by RegisterJob has been called.
|
||||||
|
// This keeps a stray re-registration from silently revoking a live job.
|
||||||
|
func (h *Handler) RegisterJob(token, repo string) func() {
|
||||||
|
if h == nil || token == "" {
|
||||||
|
return func() {}
|
||||||
|
}
|
||||||
|
h.credMu.Lock()
|
||||||
|
if existing, ok := h.creds[token]; ok {
|
||||||
|
existing.refs++
|
||||||
|
} else {
|
||||||
|
h.creds[token] = &credEntry{
|
||||||
|
cred: JobCredential{Repo: repo},
|
||||||
|
refs: 1,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
h.credMu.Unlock()
|
||||||
|
return func() {
|
||||||
|
h.credMu.Lock()
|
||||||
|
if entry, ok := h.creds[token]; ok {
|
||||||
|
entry.refs--
|
||||||
|
if entry.refs <= 0 {
|
||||||
|
delete(h.creds, token)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
h.credMu.Unlock()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// RevokeJob explicitly revokes one registration of token, mirroring one call
|
||||||
|
// of the closure returned by RegisterJob. Used by the control-plane endpoint
|
||||||
|
// so a remote runner can revoke without holding the closure.
|
||||||
|
func (h *Handler) RevokeJob(token string) {
|
||||||
|
if h == nil || token == "" {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
h.credMu.Lock()
|
||||||
|
if entry, ok := h.creds[token]; ok {
|
||||||
|
entry.refs--
|
||||||
|
if entry.refs <= 0 {
|
||||||
|
delete(h.creds, token)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
h.credMu.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *Handler) lookupCredential(token string) (JobCredential, bool) {
|
||||||
|
h.credMu.RLock()
|
||||||
|
entry, ok := h.creds[token]
|
||||||
|
h.credMu.RUnlock()
|
||||||
|
if !ok {
|
||||||
|
return JobCredential{}, false
|
||||||
|
}
|
||||||
|
return entry.cred, true
|
||||||
|
}
|
||||||
|
|
||||||
|
// loadOrCreateSecret returns the 32-byte HMAC signing key for artifact URLs,
|
||||||
|
// persisted in dir/.secret so signed URLs handed out before a restart stay
|
||||||
|
// valid across the restart and so the standalone cache-server can be pointed
|
||||||
|
// at by config.Cache.ExternalServer without the URL rotating.
|
||||||
|
func loadOrCreateSecret(dir string) ([]byte, error) {
|
||||||
|
path := filepath.Join(dir, ".secret")
|
||||||
|
if data, err := os.ReadFile(path); err == nil {
|
||||||
|
if secret, err := hex.DecodeString(strings.TrimSpace(string(data))); err == nil && len(secret) >= 32 {
|
||||||
|
return secret, nil
|
||||||
|
}
|
||||||
|
} else if !os.IsNotExist(err) {
|
||||||
|
return nil, fmt.Errorf("read cache secret: %w", err)
|
||||||
|
}
|
||||||
|
secret := make([]byte, 32)
|
||||||
|
if _, err := rand.Read(secret); err != nil {
|
||||||
|
return nil, fmt.Errorf("generate cache secret: %w", err)
|
||||||
|
}
|
||||||
|
if err := os.WriteFile(path, []byte(hex.EncodeToString(secret)), 0o600); err != nil {
|
||||||
|
return nil, fmt.Errorf("write cache secret: %w", err)
|
||||||
|
}
|
||||||
|
return secret, nil
|
||||||
|
}
|
||||||
|
|
||||||
func (h *Handler) Close() error {
|
func (h *Handler) Close() error {
|
||||||
if h == nil {
|
if h == nil {
|
||||||
return nil
|
return nil
|
||||||
@@ -160,6 +323,7 @@ func (h *Handler) openDB() (*bolthold.Store, error) {
|
|||||||
|
|
||||||
// GET /_apis/artifactcache/cache
|
// GET /_apis/artifactcache/cache
|
||||||
func (h *Handler) find(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {
|
func (h *Handler) find(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {
|
||||||
|
cred := credFromContext(r.Context())
|
||||||
keys := strings.Split(r.URL.Query().Get("keys"), ",")
|
keys := strings.Split(r.URL.Query().Get("keys"), ",")
|
||||||
// cache keys are case insensitive
|
// cache keys are case insensitive
|
||||||
for i, key := range keys {
|
for i, key := range keys {
|
||||||
@@ -174,7 +338,7 @@ func (h *Handler) find(w http.ResponseWriter, r *http.Request, _ httprouter.Para
|
|||||||
}
|
}
|
||||||
defer db.Close()
|
defer db.Close()
|
||||||
|
|
||||||
cache, err := findCache(db, keys, version)
|
cache, err := findCache(db, cred.Repo, keys, version)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.responseJSON(w, r, 500, err)
|
h.responseJSON(w, r, 500, err)
|
||||||
return
|
return
|
||||||
@@ -194,13 +358,14 @@ func (h *Handler) find(w http.ResponseWriter, r *http.Request, _ httprouter.Para
|
|||||||
}
|
}
|
||||||
h.responseJSON(w, r, 200, map[string]any{
|
h.responseJSON(w, r, 200, map[string]any{
|
||||||
"result": "hit",
|
"result": "hit",
|
||||||
"archiveLocation": fmt.Sprintf("%s%s/artifacts/%d", h.ExternalURL(), urlBase, cache.ID),
|
"archiveLocation": h.signedArtifactURL(cache.ID, time.Now().Add(artifactURLTTL)),
|
||||||
"cacheKey": cache.Key,
|
"cacheKey": cache.Key,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// POST /_apis/artifactcache/caches
|
// POST /_apis/artifactcache/caches
|
||||||
func (h *Handler) reserve(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {
|
func (h *Handler) reserve(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {
|
||||||
|
cred := credFromContext(r.Context())
|
||||||
api := &Request{}
|
api := &Request{}
|
||||||
if err := json.NewDecoder(r.Body).Decode(api); err != nil {
|
if err := json.NewDecoder(r.Body).Decode(api); err != nil {
|
||||||
h.responseJSON(w, r, 400, err)
|
h.responseJSON(w, r, 400, err)
|
||||||
@@ -210,6 +375,7 @@ func (h *Handler) reserve(w http.ResponseWriter, r *http.Request, _ httprouter.P
|
|||||||
api.Key = strings.ToLower(api.Key)
|
api.Key = strings.ToLower(api.Key)
|
||||||
|
|
||||||
cache := api.ToCache()
|
cache := api.ToCache()
|
||||||
|
cache.Repo = cred.Repo
|
||||||
db, err := h.openDB()
|
db, err := h.openDB()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.responseJSON(w, r, 500, err)
|
h.responseJSON(w, r, 500, err)
|
||||||
@@ -231,6 +397,7 @@ func (h *Handler) reserve(w http.ResponseWriter, r *http.Request, _ httprouter.P
|
|||||||
|
|
||||||
// PATCH /_apis/artifactcache/caches/:id
|
// PATCH /_apis/artifactcache/caches/:id
|
||||||
func (h *Handler) upload(w http.ResponseWriter, r *http.Request, params httprouter.Params) {
|
func (h *Handler) upload(w http.ResponseWriter, r *http.Request, params httprouter.Params) {
|
||||||
|
cred := credFromContext(r.Context())
|
||||||
id, err := strconv.ParseInt(params.ByName("id"), 10, 64)
|
id, err := strconv.ParseInt(params.ByName("id"), 10, 64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.responseJSON(w, r, 400, err)
|
h.responseJSON(w, r, 400, err)
|
||||||
@@ -253,6 +420,11 @@ func (h *Handler) upload(w http.ResponseWriter, r *http.Request, params httprout
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if cache.Repo != cred.Repo {
|
||||||
|
h.responseJSON(w, r, 403, fmt.Errorf("cache %d: forbidden", id))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
if cache.Complete {
|
if cache.Complete {
|
||||||
h.responseJSON(w, r, 400, fmt.Errorf("cache %v %q: already complete", cache.ID, cache.Key))
|
h.responseJSON(w, r, 400, fmt.Errorf("cache %v %q: already complete", cache.ID, cache.Key))
|
||||||
return
|
return
|
||||||
@@ -272,6 +444,7 @@ func (h *Handler) upload(w http.ResponseWriter, r *http.Request, params httprout
|
|||||||
|
|
||||||
// POST /_apis/artifactcache/caches/:id
|
// POST /_apis/artifactcache/caches/:id
|
||||||
func (h *Handler) commit(w http.ResponseWriter, r *http.Request, params httprouter.Params) {
|
func (h *Handler) commit(w http.ResponseWriter, r *http.Request, params httprouter.Params) {
|
||||||
|
cred := credFromContext(r.Context())
|
||||||
id, err := strconv.ParseInt(params.ByName("id"), 10, 64)
|
id, err := strconv.ParseInt(params.ByName("id"), 10, 64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.responseJSON(w, r, 400, err)
|
h.responseJSON(w, r, 400, err)
|
||||||
@@ -294,6 +467,11 @@ func (h *Handler) commit(w http.ResponseWriter, r *http.Request, params httprout
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if cache.Repo != cred.Repo {
|
||||||
|
h.responseJSON(w, r, 403, fmt.Errorf("cache %d: forbidden", id))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
if cache.Complete {
|
if cache.Complete {
|
||||||
h.responseJSON(w, r, 400, fmt.Errorf("cache %v %q: already complete", cache.ID, cache.Key))
|
h.responseJSON(w, r, 400, fmt.Errorf("cache %v %q: already complete", cache.ID, cache.Key))
|
||||||
return
|
return
|
||||||
@@ -326,6 +504,10 @@ func (h *Handler) commit(w http.ResponseWriter, r *http.Request, params httprout
|
|||||||
}
|
}
|
||||||
|
|
||||||
// GET /_apis/artifactcache/artifacts/:id
|
// GET /_apis/artifactcache/artifacts/:id
|
||||||
|
// Authenticated via signed URL (see signedURLAuth), not bearer, because the
|
||||||
|
// @actions/cache toolkit downloads archiveLocation without Authorization.
|
||||||
|
// Repository scoping is already enforced at find() time; the signature binds
|
||||||
|
// the URL to the specific cache ID and an expiry.
|
||||||
func (h *Handler) get(w http.ResponseWriter, r *http.Request, params httprouter.Params) {
|
func (h *Handler) get(w http.ResponseWriter, r *http.Request, params httprouter.Params) {
|
||||||
id, err := strconv.ParseInt(params.ByName("id"), 10, 64)
|
id, err := strconv.ParseInt(params.ByName("id"), 10, 64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -344,21 +526,158 @@ func (h *Handler) clean(w http.ResponseWriter, r *http.Request, _ httprouter.Par
|
|||||||
h.responseJSON(w, r, 200)
|
h.responseJSON(w, r, 200)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *Handler) middleware(handler httprouter.Handle) httprouter.Handle {
|
// bearerAuth resolves ACTIONS_RUNTIME_TOKEN against the set of currently
|
||||||
|
// registered jobs. A match attaches the job's JobCredential to the request
|
||||||
|
// context; a miss returns 401 before the handler body runs.
|
||||||
|
func (h *Handler) bearerAuth(handler httprouter.Handle) httprouter.Handle {
|
||||||
return func(w http.ResponseWriter, r *http.Request, params httprouter.Params) {
|
return func(w http.ResponseWriter, r *http.Request, params httprouter.Params) {
|
||||||
h.logger.Debugf("%s %s", r.Method, r.RequestURI)
|
h.logger.Debugf("%s %s", r.Method, r.URL.Path)
|
||||||
|
token := bearerToken(r)
|
||||||
|
if token == "" {
|
||||||
|
h.responseJSON(w, r, http.StatusUnauthorized, errors.New("missing bearer token"))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
cred, ok := h.lookupCredential(token)
|
||||||
|
if !ok {
|
||||||
|
h.responseJSON(w, r, http.StatusUnauthorized, errors.New("unknown bearer token"))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
ctx := context.WithValue(r.Context(), credKey{}, cred)
|
||||||
|
handler(w, r.WithContext(ctx), params)
|
||||||
|
go h.gcCache()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *Handler) signedURLAuth(handler httprouter.Handle) httprouter.Handle {
|
||||||
|
return func(w http.ResponseWriter, r *http.Request, params httprouter.Params) {
|
||||||
|
h.logger.Debugf("%s %s", r.Method, r.URL.Path)
|
||||||
|
id, err := strconv.ParseInt(params.ByName("id"), 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
h.responseJSON(w, r, 400, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
expStr := r.URL.Query().Get("exp")
|
||||||
|
sig := r.URL.Query().Get("sig")
|
||||||
|
if expStr == "" || sig == "" {
|
||||||
|
h.responseJSON(w, r, http.StatusUnauthorized, errors.New("missing signature"))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
exp, err := strconv.ParseInt(expStr, 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
h.responseJSON(w, r, http.StatusUnauthorized, errors.New("invalid expiry"))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if time.Now().Unix() > exp {
|
||||||
|
h.responseJSON(w, r, http.StatusUnauthorized, errors.New("signature expired"))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
expected := h.computeSignature(id, exp)
|
||||||
|
if !hmac.Equal([]byte(sig), []byte(expected)) {
|
||||||
|
h.responseJSON(w, r, http.StatusUnauthorized, errors.New("bad signature"))
|
||||||
|
return
|
||||||
|
}
|
||||||
handler(w, r, params)
|
handler(w, r, params)
|
||||||
go h.gcCache()
|
go h.gcCache()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// internalAuth gates the control-plane endpoints. The bearer must
|
||||||
|
// constant-time-equal the configured internalSecret. If the secret is empty,
|
||||||
|
// the control-plane is disabled and every request gets 404 — which matches
|
||||||
|
// the upstream nektos/act behavior of "the route does not exist".
|
||||||
|
func (h *Handler) internalAuth(handler httprouter.Handle) httprouter.Handle {
|
||||||
|
return func(w http.ResponseWriter, r *http.Request, params httprouter.Params) {
|
||||||
|
if h.internalSecret == "" {
|
||||||
|
http.NotFound(w, r)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
token := bearerToken(r)
|
||||||
|
if token == "" || !hmac.Equal([]byte(token), []byte(h.internalSecret)) {
|
||||||
|
h.responseJSON(w, r, http.StatusUnauthorized, errors.New("internal: bad secret"))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
handler(w, r, params)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type internalRegisterBody struct {
|
||||||
|
Token string `json:"token"`
|
||||||
|
Repo string `json:"repo"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type internalRevokeBody struct {
|
||||||
|
Token string `json:"token"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// POST /_internal/register
|
||||||
|
func (h *Handler) internalRegister(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {
|
||||||
|
var body internalRegisterBody
|
||||||
|
if err := json.NewDecoder(r.Body).Decode(&body); err != nil {
|
||||||
|
h.responseJSON(w, r, http.StatusBadRequest, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if body.Token == "" {
|
||||||
|
h.responseJSON(w, r, http.StatusBadRequest, errors.New("token is required"))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
h.RegisterJob(body.Token, body.Repo)
|
||||||
|
h.responseJSON(w, r, http.StatusOK)
|
||||||
|
}
|
||||||
|
|
||||||
|
// POST /_internal/revoke
|
||||||
|
func (h *Handler) internalRevoke(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {
|
||||||
|
var body internalRevokeBody
|
||||||
|
if err := json.NewDecoder(r.Body).Decode(&body); err != nil {
|
||||||
|
h.responseJSON(w, r, http.StatusBadRequest, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if body.Token == "" {
|
||||||
|
h.responseJSON(w, r, http.StatusBadRequest, errors.New("token is required"))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
h.RevokeJob(body.Token)
|
||||||
|
h.responseJSON(w, r, http.StatusOK)
|
||||||
|
}
|
||||||
|
|
||||||
|
func bearerToken(r *http.Request) string {
|
||||||
|
auth := r.Header.Get("Authorization")
|
||||||
|
const prefix = "Bearer "
|
||||||
|
if len(auth) > len(prefix) && strings.EqualFold(auth[:len(prefix)], prefix) {
|
||||||
|
return auth[len(prefix):]
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func credFromContext(ctx context.Context) JobCredential {
|
||||||
|
if cred, ok := ctx.Value(credKey{}).(JobCredential); ok {
|
||||||
|
return cred
|
||||||
|
}
|
||||||
|
return JobCredential{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *Handler) computeSignature(cacheID, exp int64) string {
|
||||||
|
mac := hmac.New(sha256.New, h.secret)
|
||||||
|
fmt.Fprintf(mac, "%d:%d", cacheID, exp)
|
||||||
|
return hex.EncodeToString(mac.Sum(nil))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *Handler) signedArtifactURL(cacheID uint64, exp time.Time) string {
|
||||||
|
expUnix := exp.Unix()
|
||||||
|
sig := h.computeSignature(int64(cacheID), expUnix)
|
||||||
|
q := url.Values{}
|
||||||
|
q.Set("exp", strconv.FormatInt(expUnix, 10))
|
||||||
|
q.Set("sig", sig)
|
||||||
|
return fmt.Sprintf("%s%s/artifacts/%d?%s", h.ExternalURL(), apiPath, cacheID, q.Encode())
|
||||||
|
}
|
||||||
|
|
||||||
// if not found, return (nil, nil) instead of an error.
|
// if not found, return (nil, nil) instead of an error.
|
||||||
func findCache(db *bolthold.Store, keys []string, version string) (*Cache, error) {
|
func findCache(db *bolthold.Store, repo string, keys []string, version string) (*Cache, error) {
|
||||||
cache := &Cache{}
|
cache := &Cache{}
|
||||||
for _, prefix := range keys {
|
for _, prefix := range keys {
|
||||||
// if a key in the list matches exactly, don't return partial matches
|
// if a key in the list matches exactly, don't return partial matches
|
||||||
if err := db.FindOne(cache,
|
if err := db.FindOne(cache,
|
||||||
bolthold.Where("Key").Eq(prefix).
|
bolthold.Where("Repo").Eq(repo).
|
||||||
|
And("Key").Eq(prefix).
|
||||||
And("Version").Eq(version).
|
And("Version").Eq(version).
|
||||||
And("Complete").Eq(true).
|
And("Complete").Eq(true).
|
||||||
SortBy("CreatedAt").Reverse()); err == nil || !errors.Is(err, bolthold.ErrNotFound) {
|
SortBy("CreatedAt").Reverse()); err == nil || !errors.Is(err, bolthold.ErrNotFound) {
|
||||||
@@ -373,7 +692,8 @@ func findCache(db *bolthold.Store, keys []string, version string) (*Cache, error
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if err := db.FindOne(cache,
|
if err := db.FindOne(cache,
|
||||||
bolthold.Where("Key").RegExp(re).
|
bolthold.Where("Repo").Eq(repo).
|
||||||
|
And("Key").RegExp(re).
|
||||||
And("Version").Eq(version).
|
And("Version").Eq(version).
|
||||||
And("Complete").Eq(true).
|
And("Complete").Eq(true).
|
||||||
SortBy("CreatedAt").Reverse()); err != nil {
|
SortBy("CreatedAt").Reverse()); err != nil {
|
||||||
@@ -419,7 +739,6 @@ const (
|
|||||||
keepOld = 5 * time.Minute
|
keepOld = 5 * time.Minute
|
||||||
)
|
)
|
||||||
|
|
||||||
//nolint:gocyclo // function handles many cases
|
|
||||||
func (h *Handler) gcCache() {
|
func (h *Handler) gcCache() {
|
||||||
if h.gcing.Load() {
|
if h.gcing.Load() {
|
||||||
return
|
return
|
||||||
@@ -494,12 +813,16 @@ func (h *Handler) gcCache() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Remove the old caches with the same key and version, keep the latest one.
|
// Remove the old caches with the same key and version within the same
|
||||||
|
// repository, keep the latest one. Aggregation must include Repo so two
|
||||||
|
// repos that happen to share a (key, version) do not evict each other —
|
||||||
|
// otherwise per-repo scoping holds for reads but one repo can age
|
||||||
|
// another out after keepOld.
|
||||||
// Also keep the olds which have been used recently for a while in case of the cache is still in use.
|
// Also keep the olds which have been used recently for a while in case of the cache is still in use.
|
||||||
if results, err := db.FindAggregate(
|
if results, err := db.FindAggregate(
|
||||||
&Cache{},
|
&Cache{},
|
||||||
bolthold.Where("Complete").Eq(true),
|
bolthold.Where("Complete").Eq(true),
|
||||||
"Key", "Version",
|
"Repo", "Key", "Version",
|
||||||
); err != nil {
|
); err != nil {
|
||||||
h.logger.Warnf("find aggregate caches: %v", err)
|
h.logger.Warnf("find aggregate caches: %v", err)
|
||||||
} else {
|
} else {
|
||||||
@@ -533,7 +856,7 @@ func (h *Handler) responseJSON(w http.ResponseWriter, r *http.Request, code int,
|
|||||||
if len(v) == 0 || v[0] == nil {
|
if len(v) == 0 || v[0] == nil {
|
||||||
data, _ = json.Marshal(struct{}{})
|
data, _ = json.Marshal(struct{}{})
|
||||||
} else if err, ok := v[0].(error); ok {
|
} else if err, ok := v[0].(error); ok {
|
||||||
h.logger.Errorf("%v %v: %v", r.Method, r.RequestURI, err)
|
h.logger.Errorf("%v %v: %v", r.Method, r.URL.Path, err)
|
||||||
data, _ = json.Marshal(map[string]any{
|
data, _ = json.Marshal(map[string]any{
|
||||||
"error": err.Error(),
|
"error": err.Error(),
|
||||||
})
|
})
|
||||||
|
|||||||
@@ -22,12 +22,38 @@ import (
|
|||||||
"go.etcd.io/bbolt"
|
"go.etcd.io/bbolt"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// testToken is registered with the cache server in every test that needs to
|
||||||
|
// make authenticated requests; testClient then attaches it as the
|
||||||
|
// Authorization: Bearer header. testRepo is the repository scope used when
|
||||||
|
// registering it; cross-repo isolation is exercised in its own test.
|
||||||
|
const (
|
||||||
|
testToken = "test-runtime-token"
|
||||||
|
testRepo = "owner/repo"
|
||||||
|
)
|
||||||
|
|
||||||
|
type bearerTransport struct{ token string }
|
||||||
|
|
||||||
|
func (b *bearerTransport) RoundTrip(r *http.Request) (*http.Response, error) {
|
||||||
|
r.Header.Set("Authorization", "Bearer "+b.token)
|
||||||
|
return http.DefaultTransport.RoundTrip(r)
|
||||||
|
}
|
||||||
|
|
||||||
|
var testClient = &http.Client{Transport: &bearerTransport{token: testToken}}
|
||||||
|
|
||||||
|
// signArtifactURL builds a signed download URL the same way the server does;
|
||||||
|
// tests use it to reach the get handler directly without going through a
|
||||||
|
// find/cache-hit round trip.
|
||||||
|
func signArtifactURL(h *Handler, id int64) string {
|
||||||
|
return h.signedArtifactURL(uint64(id), time.Now().Add(artifactURLTTL))
|
||||||
|
}
|
||||||
|
|
||||||
func TestHandler(t *testing.T) {
|
func TestHandler(t *testing.T) {
|
||||||
dir := filepath.Join(t.TempDir(), "artifactcache")
|
dir := filepath.Join(t.TempDir(), "artifactcache")
|
||||||
handler, err := StartHandler(dir, "", 0, nil)
|
handler, err := StartHandler(dir, "", 0, "", nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
handler.RegisterJob(testToken, testRepo)
|
||||||
|
|
||||||
base := fmt.Sprintf("%s%s", handler.ExternalURL(), urlBase)
|
base := fmt.Sprintf("%s%s", handler.ExternalURL(), apiPath)
|
||||||
|
|
||||||
defer func() {
|
defer func() {
|
||||||
t.Run("inpect db", func(t *testing.T) {
|
t.Run("inpect db", func(t *testing.T) {
|
||||||
@@ -45,7 +71,10 @@ func TestHandler(t *testing.T) {
|
|||||||
require.NoError(t, handler.Close())
|
require.NoError(t, handler.Close())
|
||||||
assert.Nil(t, handler.server)
|
assert.Nil(t, handler.server)
|
||||||
assert.Nil(t, handler.listener)
|
assert.Nil(t, handler.listener)
|
||||||
_, err := http.Post(fmt.Sprintf("%s/caches/%d", base, 1), "", nil) //nolint:bodyclose // pre-existing issue from nektos/act
|
resp, err := testClient.Post(fmt.Sprintf("%s/caches/%d", base, 1), "", nil)
|
||||||
|
if err == nil {
|
||||||
|
resp.Body.Close()
|
||||||
|
}
|
||||||
assert.Error(t, err)
|
assert.Error(t, err)
|
||||||
})
|
})
|
||||||
}()
|
}()
|
||||||
@@ -53,8 +82,9 @@ func TestHandler(t *testing.T) {
|
|||||||
t.Run("get not exist", func(t *testing.T) {
|
t.Run("get not exist", func(t *testing.T) {
|
||||||
key := strings.ToLower(t.Name())
|
key := strings.ToLower(t.Name())
|
||||||
version := "c19da02a2bd7e77277f1ac29ab45c09b7d46a4ee758284e26bb3045ad11d9d20"
|
version := "c19da02a2bd7e77277f1ac29ab45c09b7d46a4ee758284e26bb3045ad11d9d20"
|
||||||
resp, err := http.Get(fmt.Sprintf("%s/cache?keys=%s&version=%s", base, key, version)) //nolint:bodyclose // pre-existing issue from nektos/act
|
resp, err := testClient.Get(fmt.Sprintf("%s/cache?keys=%s&version=%s", base, key, version))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
defer resp.Body.Close()
|
||||||
require.Equal(t, 204, resp.StatusCode)
|
require.Equal(t, 204, resp.StatusCode)
|
||||||
})
|
})
|
||||||
|
|
||||||
@@ -68,16 +98,18 @@ func TestHandler(t *testing.T) {
|
|||||||
})
|
})
|
||||||
|
|
||||||
t.Run("clean", func(t *testing.T) {
|
t.Run("clean", func(t *testing.T) {
|
||||||
resp, err := http.Post(base+"/clean", "", nil) //nolint:bodyclose // pre-existing issue from nektos/act
|
resp, err := testClient.Post(base+"/clean", "", nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
defer resp.Body.Close()
|
||||||
assert.Equal(t, 200, resp.StatusCode)
|
assert.Equal(t, 200, resp.StatusCode)
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("reserve with bad request", func(t *testing.T) {
|
t.Run("reserve with bad request", func(t *testing.T) {
|
||||||
body := []byte(`invalid json`)
|
body := []byte(`invalid json`)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
resp, err := http.Post(base+"/caches", "application/json", bytes.NewReader(body)) //nolint:bodyclose // pre-existing issue from nektos/act
|
resp, err := testClient.Post(base+"/caches", "application/json", bytes.NewReader(body))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
defer resp.Body.Close()
|
||||||
assert.Equal(t, 400, resp.StatusCode)
|
assert.Equal(t, 400, resp.StatusCode)
|
||||||
})
|
})
|
||||||
|
|
||||||
@@ -94,8 +126,9 @@ func TestHandler(t *testing.T) {
|
|||||||
Size: 100,
|
Size: 100,
|
||||||
})
|
})
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
resp, err := http.Post(base+"/caches", "application/json", bytes.NewReader(body)) //nolint:bodyclose // pre-existing issue from nektos/act
|
resp, err := testClient.Post(base+"/caches", "application/json", bytes.NewReader(body))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
defer resp.Body.Close()
|
||||||
assert.Equal(t, 200, resp.StatusCode)
|
assert.Equal(t, 200, resp.StatusCode)
|
||||||
|
|
||||||
require.NoError(t, json.NewDecoder(resp.Body).Decode(&first))
|
require.NoError(t, json.NewDecoder(resp.Body).Decode(&first))
|
||||||
@@ -108,8 +141,9 @@ func TestHandler(t *testing.T) {
|
|||||||
Size: 100,
|
Size: 100,
|
||||||
})
|
})
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
resp, err := http.Post(base+"/caches", "application/json", bytes.NewReader(body)) //nolint:bodyclose // pre-existing issue from nektos/act
|
resp, err := testClient.Post(base+"/caches", "application/json", bytes.NewReader(body))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
defer resp.Body.Close()
|
||||||
assert.Equal(t, 200, resp.StatusCode)
|
assert.Equal(t, 200, resp.StatusCode)
|
||||||
|
|
||||||
require.NoError(t, json.NewDecoder(resp.Body).Decode(&second))
|
require.NoError(t, json.NewDecoder(resp.Body).Decode(&second))
|
||||||
@@ -125,8 +159,9 @@ func TestHandler(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
req.Header.Set("Content-Type", "application/octet-stream")
|
req.Header.Set("Content-Type", "application/octet-stream")
|
||||||
req.Header.Set("Content-Range", "bytes 0-99/*")
|
req.Header.Set("Content-Range", "bytes 0-99/*")
|
||||||
resp, err := http.DefaultClient.Do(req) //nolint:bodyclose // pre-existing issue from nektos/act
|
resp, err := testClient.Do(req)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
defer resp.Body.Close()
|
||||||
assert.Equal(t, 400, resp.StatusCode)
|
assert.Equal(t, 400, resp.StatusCode)
|
||||||
})
|
})
|
||||||
|
|
||||||
@@ -136,8 +171,9 @@ func TestHandler(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
req.Header.Set("Content-Type", "application/octet-stream")
|
req.Header.Set("Content-Type", "application/octet-stream")
|
||||||
req.Header.Set("Content-Range", "bytes 0-99/*")
|
req.Header.Set("Content-Range", "bytes 0-99/*")
|
||||||
resp, err := http.DefaultClient.Do(req) //nolint:bodyclose // pre-existing issue from nektos/act
|
resp, err := testClient.Do(req)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
defer resp.Body.Close()
|
||||||
assert.Equal(t, 400, resp.StatusCode)
|
assert.Equal(t, 400, resp.StatusCode)
|
||||||
})
|
})
|
||||||
|
|
||||||
@@ -155,8 +191,9 @@ func TestHandler(t *testing.T) {
|
|||||||
Size: 100,
|
Size: 100,
|
||||||
})
|
})
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
resp, err := http.Post(base+"/caches", "application/json", bytes.NewReader(body)) //nolint:bodyclose // pre-existing issue from nektos/act
|
resp, err := testClient.Post(base+"/caches", "application/json", bytes.NewReader(body))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
defer resp.Body.Close()
|
||||||
assert.Equal(t, 200, resp.StatusCode)
|
assert.Equal(t, 200, resp.StatusCode)
|
||||||
|
|
||||||
got := struct {
|
got := struct {
|
||||||
@@ -171,13 +208,15 @@ func TestHandler(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
req.Header.Set("Content-Type", "application/octet-stream")
|
req.Header.Set("Content-Type", "application/octet-stream")
|
||||||
req.Header.Set("Content-Range", "bytes 0-99/*")
|
req.Header.Set("Content-Range", "bytes 0-99/*")
|
||||||
resp, err := http.DefaultClient.Do(req) //nolint:bodyclose // pre-existing issue from nektos/act
|
resp, err := testClient.Do(req)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
defer resp.Body.Close()
|
||||||
assert.Equal(t, 200, resp.StatusCode)
|
assert.Equal(t, 200, resp.StatusCode)
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
resp, err := http.Post(fmt.Sprintf("%s/caches/%d", base, id), "", nil) //nolint:bodyclose // pre-existing issue from nektos/act
|
resp, err := testClient.Post(fmt.Sprintf("%s/caches/%d", base, id), "", nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
defer resp.Body.Close()
|
||||||
assert.Equal(t, 200, resp.StatusCode)
|
assert.Equal(t, 200, resp.StatusCode)
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
@@ -186,8 +225,9 @@ func TestHandler(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
req.Header.Set("Content-Type", "application/octet-stream")
|
req.Header.Set("Content-Type", "application/octet-stream")
|
||||||
req.Header.Set("Content-Range", "bytes 0-99/*")
|
req.Header.Set("Content-Range", "bytes 0-99/*")
|
||||||
resp, err := http.DefaultClient.Do(req) //nolint:bodyclose // pre-existing issue from nektos/act
|
resp, err := testClient.Do(req)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
defer resp.Body.Close()
|
||||||
assert.Equal(t, 400, resp.StatusCode)
|
assert.Equal(t, 400, resp.StatusCode)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
@@ -206,8 +246,9 @@ func TestHandler(t *testing.T) {
|
|||||||
Size: 100,
|
Size: 100,
|
||||||
})
|
})
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
resp, err := http.Post(base+"/caches", "application/json", bytes.NewReader(body)) //nolint:bodyclose // pre-existing issue from nektos/act
|
resp, err := testClient.Post(base+"/caches", "application/json", bytes.NewReader(body))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
defer resp.Body.Close()
|
||||||
assert.Equal(t, 200, resp.StatusCode)
|
assert.Equal(t, 200, resp.StatusCode)
|
||||||
|
|
||||||
got := struct {
|
got := struct {
|
||||||
@@ -222,24 +263,27 @@ func TestHandler(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
req.Header.Set("Content-Type", "application/octet-stream")
|
req.Header.Set("Content-Type", "application/octet-stream")
|
||||||
req.Header.Set("Content-Range", "bytes xx-99/*")
|
req.Header.Set("Content-Range", "bytes xx-99/*")
|
||||||
resp, err := http.DefaultClient.Do(req) //nolint:bodyclose // pre-existing issue from nektos/act
|
resp, err := testClient.Do(req)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
defer resp.Body.Close()
|
||||||
assert.Equal(t, 400, resp.StatusCode)
|
assert.Equal(t, 400, resp.StatusCode)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("commit with bad id", func(t *testing.T) {
|
t.Run("commit with bad id", func(t *testing.T) {
|
||||||
{
|
{
|
||||||
resp, err := http.Post(base+"/caches/invalid_id", "", nil) //nolint:bodyclose // pre-existing issue from nektos/act
|
resp, err := testClient.Post(base+"/caches/invalid_id", "", nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
defer resp.Body.Close()
|
||||||
assert.Equal(t, 400, resp.StatusCode)
|
assert.Equal(t, 400, resp.StatusCode)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("commit with not exist id", func(t *testing.T) {
|
t.Run("commit with not exist id", func(t *testing.T) {
|
||||||
{
|
{
|
||||||
resp, err := http.Post(fmt.Sprintf("%s/caches/%d", base, 100), "", nil) //nolint:bodyclose // pre-existing issue from nektos/act
|
resp, err := testClient.Post(fmt.Sprintf("%s/caches/%d", base, 100), "", nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
defer resp.Body.Close()
|
||||||
assert.Equal(t, 400, resp.StatusCode)
|
assert.Equal(t, 400, resp.StatusCode)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
@@ -258,8 +302,9 @@ func TestHandler(t *testing.T) {
|
|||||||
Size: 100,
|
Size: 100,
|
||||||
})
|
})
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
resp, err := http.Post(base+"/caches", "application/json", bytes.NewReader(body)) //nolint:bodyclose // pre-existing issue from nektos/act
|
resp, err := testClient.Post(base+"/caches", "application/json", bytes.NewReader(body))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
defer resp.Body.Close()
|
||||||
assert.Equal(t, 200, resp.StatusCode)
|
assert.Equal(t, 200, resp.StatusCode)
|
||||||
|
|
||||||
got := struct {
|
got := struct {
|
||||||
@@ -274,18 +319,21 @@ func TestHandler(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
req.Header.Set("Content-Type", "application/octet-stream")
|
req.Header.Set("Content-Type", "application/octet-stream")
|
||||||
req.Header.Set("Content-Range", "bytes 0-99/*")
|
req.Header.Set("Content-Range", "bytes 0-99/*")
|
||||||
resp, err := http.DefaultClient.Do(req) //nolint:bodyclose // pre-existing issue from nektos/act
|
resp, err := testClient.Do(req)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
defer resp.Body.Close()
|
||||||
assert.Equal(t, 200, resp.StatusCode)
|
assert.Equal(t, 200, resp.StatusCode)
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
resp, err := http.Post(fmt.Sprintf("%s/caches/%d", base, id), "", nil) //nolint:bodyclose // pre-existing issue from nektos/act
|
resp, err := testClient.Post(fmt.Sprintf("%s/caches/%d", base, id), "", nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
defer resp.Body.Close()
|
||||||
assert.Equal(t, 200, resp.StatusCode)
|
assert.Equal(t, 200, resp.StatusCode)
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
resp, err := http.Post(fmt.Sprintf("%s/caches/%d", base, id), "", nil) //nolint:bodyclose // pre-existing issue from nektos/act
|
resp, err := testClient.Post(fmt.Sprintf("%s/caches/%d", base, id), "", nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
defer resp.Body.Close()
|
||||||
assert.Equal(t, 400, resp.StatusCode)
|
assert.Equal(t, 400, resp.StatusCode)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
@@ -304,8 +352,9 @@ func TestHandler(t *testing.T) {
|
|||||||
Size: 100,
|
Size: 100,
|
||||||
})
|
})
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
resp, err := http.Post(base+"/caches", "application/json", bytes.NewReader(body)) //nolint:bodyclose // pre-existing issue from nektos/act
|
resp, err := testClient.Post(base+"/caches", "application/json", bytes.NewReader(body))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
defer resp.Body.Close()
|
||||||
assert.Equal(t, 200, resp.StatusCode)
|
assert.Equal(t, 200, resp.StatusCode)
|
||||||
|
|
||||||
got := struct {
|
got := struct {
|
||||||
@@ -320,32 +369,37 @@ func TestHandler(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
req.Header.Set("Content-Type", "application/octet-stream")
|
req.Header.Set("Content-Type", "application/octet-stream")
|
||||||
req.Header.Set("Content-Range", "bytes 0-59/*")
|
req.Header.Set("Content-Range", "bytes 0-59/*")
|
||||||
resp, err := http.DefaultClient.Do(req) //nolint:bodyclose // pre-existing issue from nektos/act
|
resp, err := testClient.Do(req)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
defer resp.Body.Close()
|
||||||
assert.Equal(t, 200, resp.StatusCode)
|
assert.Equal(t, 200, resp.StatusCode)
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
resp, err := http.Post(fmt.Sprintf("%s/caches/%d", base, id), "", nil) //nolint:bodyclose // pre-existing issue from nektos/act
|
resp, err := testClient.Post(fmt.Sprintf("%s/caches/%d", base, id), "", nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
defer resp.Body.Close()
|
||||||
assert.Equal(t, 500, resp.StatusCode)
|
assert.Equal(t, 500, resp.StatusCode)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("get with bad id", func(t *testing.T) {
|
t.Run("get with bad id", func(t *testing.T) {
|
||||||
resp, err := http.Get(base + "/artifacts/invalid_id") //nolint:bodyclose // pre-existing issue from nektos/act
|
resp, err := testClient.Get(base + "/artifacts/invalid_id")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
defer resp.Body.Close()
|
||||||
require.Equal(t, 400, resp.StatusCode)
|
require.Equal(t, 400, resp.StatusCode)
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("get with not exist id", func(t *testing.T) {
|
t.Run("get with not exist id", func(t *testing.T) {
|
||||||
resp, err := http.Get(fmt.Sprintf("%s/artifacts/%d", base, 100)) //nolint:bodyclose // pre-existing issue from nektos/act
|
resp, err := testClient.Get(signArtifactURL(handler, 100))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
defer resp.Body.Close()
|
||||||
require.Equal(t, 404, resp.StatusCode)
|
require.Equal(t, 404, resp.StatusCode)
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("get with not exist id", func(t *testing.T) {
|
t.Run("get with not exist id", func(t *testing.T) {
|
||||||
resp, err := http.Get(fmt.Sprintf("%s/artifacts/%d", base, 100)) //nolint:bodyclose // pre-existing issue from nektos/act
|
resp, err := testClient.Get(signArtifactURL(handler, 100))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
defer resp.Body.Close()
|
||||||
require.Equal(t, 404, resp.StatusCode)
|
require.Equal(t, 404, resp.StatusCode)
|
||||||
})
|
})
|
||||||
|
|
||||||
@@ -375,8 +429,9 @@ func TestHandler(t *testing.T) {
|
|||||||
key + "_a",
|
key + "_a",
|
||||||
}, ",")
|
}, ",")
|
||||||
|
|
||||||
resp, err := http.Get(fmt.Sprintf("%s/cache?keys=%s&version=%s", base, reqKeys, version)) //nolint:bodyclose // pre-existing issue from nektos/act
|
resp, err := testClient.Get(fmt.Sprintf("%s/cache?keys=%s&version=%s", base, reqKeys, version))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
defer resp.Body.Close()
|
||||||
require.Equal(t, 200, resp.StatusCode)
|
require.Equal(t, 200, resp.StatusCode)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -395,8 +450,9 @@ func TestHandler(t *testing.T) {
|
|||||||
assert.Equal(t, "hit", got.Result)
|
assert.Equal(t, "hit", got.Result)
|
||||||
assert.Equal(t, keys[except], got.CacheKey)
|
assert.Equal(t, keys[except], got.CacheKey)
|
||||||
|
|
||||||
contentResp, err := http.Get(got.ArchiveLocation) //nolint:bodyclose // pre-existing issue from nektos/act
|
contentResp, err := testClient.Get(got.ArchiveLocation)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
defer contentResp.Body.Close()
|
||||||
require.Equal(t, 200, contentResp.StatusCode)
|
require.Equal(t, 200, contentResp.StatusCode)
|
||||||
content, err := io.ReadAll(contentResp.Body)
|
content, err := io.ReadAll(contentResp.Body)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@@ -413,8 +469,9 @@ func TestHandler(t *testing.T) {
|
|||||||
|
|
||||||
{
|
{
|
||||||
reqKey := key + "_aBc"
|
reqKey := key + "_aBc"
|
||||||
resp, err := http.Get(fmt.Sprintf("%s/cache?keys=%s&version=%s", base, reqKey, version)) //nolint:bodyclose // pre-existing issue from nektos/act
|
resp, err := testClient.Get(fmt.Sprintf("%s/cache?keys=%s&version=%s", base, reqKey, version))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
defer resp.Body.Close()
|
||||||
require.Equal(t, 200, resp.StatusCode)
|
require.Equal(t, 200, resp.StatusCode)
|
||||||
got := struct {
|
got := struct {
|
||||||
Result string `json:"result"`
|
Result string `json:"result"`
|
||||||
@@ -452,8 +509,9 @@ func TestHandler(t *testing.T) {
|
|||||||
key + "_a_b",
|
key + "_a_b",
|
||||||
}, ",")
|
}, ",")
|
||||||
|
|
||||||
resp, err := http.Get(fmt.Sprintf("%s/cache?keys=%s&version=%s", base, reqKeys, version)) //nolint:bodyclose // pre-existing issue from nektos/act
|
resp, err := testClient.Get(fmt.Sprintf("%s/cache?keys=%s&version=%s", base, reqKeys, version))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
defer resp.Body.Close()
|
||||||
require.Equal(t, 200, resp.StatusCode)
|
require.Equal(t, 200, resp.StatusCode)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -470,8 +528,9 @@ func TestHandler(t *testing.T) {
|
|||||||
require.NoError(t, json.NewDecoder(resp.Body).Decode(&got))
|
require.NoError(t, json.NewDecoder(resp.Body).Decode(&got))
|
||||||
assert.Equal(t, keys[expect], got.CacheKey)
|
assert.Equal(t, keys[expect], got.CacheKey)
|
||||||
|
|
||||||
contentResp, err := http.Get(got.ArchiveLocation) //nolint:bodyclose // pre-existing issue from nektos/act
|
contentResp, err := testClient.Get(got.ArchiveLocation)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
defer contentResp.Body.Close()
|
||||||
require.Equal(t, 200, contentResp.StatusCode)
|
require.Equal(t, 200, contentResp.StatusCode)
|
||||||
content, err := io.ReadAll(contentResp.Body)
|
content, err := io.ReadAll(contentResp.Body)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@@ -504,8 +563,9 @@ func TestHandler(t *testing.T) {
|
|||||||
key + "_a_b",
|
key + "_a_b",
|
||||||
}, ",")
|
}, ",")
|
||||||
|
|
||||||
resp, err := http.Get(fmt.Sprintf("%s/cache?keys=%s&version=%s", base, reqKeys, version)) //nolint:bodyclose // pre-existing issue from nektos/act
|
resp, err := testClient.Get(fmt.Sprintf("%s/cache?keys=%s&version=%s", base, reqKeys, version))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
defer resp.Body.Close()
|
||||||
require.Equal(t, 200, resp.StatusCode)
|
require.Equal(t, 200, resp.StatusCode)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -523,8 +583,9 @@ func TestHandler(t *testing.T) {
|
|||||||
require.NoError(t, json.NewDecoder(resp.Body).Decode(&got))
|
require.NoError(t, json.NewDecoder(resp.Body).Decode(&got))
|
||||||
assert.Equal(t, keys[expect], got.CacheKey)
|
assert.Equal(t, keys[expect], got.CacheKey)
|
||||||
|
|
||||||
contentResp, err := http.Get(got.ArchiveLocation) //nolint:bodyclose // pre-existing issue from nektos/act
|
contentResp, err := testClient.Get(got.ArchiveLocation)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
defer contentResp.Body.Close()
|
||||||
require.Equal(t, 200, contentResp.StatusCode)
|
require.Equal(t, 200, contentResp.StatusCode)
|
||||||
content, err := io.ReadAll(contentResp.Body)
|
content, err := io.ReadAll(contentResp.Body)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@@ -541,8 +602,9 @@ func uploadCacheNormally(t *testing.T, base, key, version string, content []byte
|
|||||||
Size: int64(len(content)),
|
Size: int64(len(content)),
|
||||||
})
|
})
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
resp, err := http.Post(base+"/caches", "application/json", bytes.NewReader(body)) //nolint:bodyclose // pre-existing issue from nektos/act
|
resp, err := testClient.Post(base+"/caches", "application/json", bytes.NewReader(body))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
defer resp.Body.Close()
|
||||||
assert.Equal(t, 200, resp.StatusCode)
|
assert.Equal(t, 200, resp.StatusCode)
|
||||||
|
|
||||||
got := struct {
|
got := struct {
|
||||||
@@ -557,19 +619,22 @@ func uploadCacheNormally(t *testing.T, base, key, version string, content []byte
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
req.Header.Set("Content-Type", "application/octet-stream")
|
req.Header.Set("Content-Type", "application/octet-stream")
|
||||||
req.Header.Set("Content-Range", "bytes 0-99/*")
|
req.Header.Set("Content-Range", "bytes 0-99/*")
|
||||||
resp, err := http.DefaultClient.Do(req) //nolint:bodyclose // pre-existing issue from nektos/act
|
resp, err := testClient.Do(req)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
defer resp.Body.Close()
|
||||||
assert.Equal(t, 200, resp.StatusCode)
|
assert.Equal(t, 200, resp.StatusCode)
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
resp, err := http.Post(fmt.Sprintf("%s/caches/%d", base, id), "", nil) //nolint:bodyclose // pre-existing issue from nektos/act
|
resp, err := testClient.Post(fmt.Sprintf("%s/caches/%d", base, id), "", nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
defer resp.Body.Close()
|
||||||
assert.Equal(t, 200, resp.StatusCode)
|
assert.Equal(t, 200, resp.StatusCode)
|
||||||
}
|
}
|
||||||
var archiveLocation string
|
var archiveLocation string
|
||||||
{
|
{
|
||||||
resp, err := http.Get(fmt.Sprintf("%s/cache?keys=%s&version=%s", base, key, version)) //nolint:bodyclose // pre-existing issue from nektos/act
|
resp, err := testClient.Get(fmt.Sprintf("%s/cache?keys=%s&version=%s", base, key, version))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
defer resp.Body.Close()
|
||||||
require.Equal(t, 200, resp.StatusCode)
|
require.Equal(t, 200, resp.StatusCode)
|
||||||
got := struct {
|
got := struct {
|
||||||
Result string `json:"result"`
|
Result string `json:"result"`
|
||||||
@@ -582,8 +647,9 @@ func uploadCacheNormally(t *testing.T, base, key, version string, content []byte
|
|||||||
archiveLocation = got.ArchiveLocation
|
archiveLocation = got.ArchiveLocation
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
resp, err := http.Get(archiveLocation) //nolint:bodyclose // pre-existing issue from nektos/act
|
resp, err := testClient.Get(archiveLocation)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
defer resp.Body.Close()
|
||||||
require.Equal(t, 200, resp.StatusCode)
|
require.Equal(t, 200, resp.StatusCode)
|
||||||
got, err := io.ReadAll(resp.Body)
|
got, err := io.ReadAll(resp.Body)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@@ -593,7 +659,7 @@ func uploadCacheNormally(t *testing.T, base, key, version string, content []byte
|
|||||||
|
|
||||||
func TestHandler_gcCache(t *testing.T) {
|
func TestHandler_gcCache(t *testing.T) {
|
||||||
dir := filepath.Join(t.TempDir(), "artifactcache")
|
dir := filepath.Join(t.TempDir(), "artifactcache")
|
||||||
handler, err := StartHandler(dir, "", 0, nil)
|
handler, err := StartHandler(dir, "", 0, "", nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
defer func() {
|
defer func() {
|
||||||
@@ -699,3 +765,421 @@ func TestHandler_gcCache(t *testing.T) {
|
|||||||
}
|
}
|
||||||
require.NoError(t, db.Close())
|
require.NoError(t, db.Close())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TestHandler_RejectsMissingBearer covers the advisory's root cause:
|
||||||
|
// unauthenticated access to management endpoints is now refused with 401.
|
||||||
|
func TestHandler_RejectsMissingBearer(t *testing.T) {
|
||||||
|
dir := filepath.Join(t.TempDir(), "artifactcache")
|
||||||
|
handler, err := StartHandler(dir, "", 0, "", nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
defer handler.Close()
|
||||||
|
|
||||||
|
base := handler.ExternalURL() + apiPath
|
||||||
|
|
||||||
|
for _, tc := range []struct {
|
||||||
|
name string
|
||||||
|
method string
|
||||||
|
path string
|
||||||
|
body string
|
||||||
|
}{
|
||||||
|
{"find", http.MethodGet, "/cache?keys=x&version=y", ""},
|
||||||
|
{"reserve", http.MethodPost, "/caches", "{}"},
|
||||||
|
{"upload", http.MethodPatch, "/caches/1", ""},
|
||||||
|
{"commit", http.MethodPost, "/caches/1", ""},
|
||||||
|
{"clean", http.MethodPost, "/clean", ""},
|
||||||
|
} {
|
||||||
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
|
req, err := http.NewRequest(tc.method, base+tc.path, strings.NewReader(tc.body))
|
||||||
|
require.NoError(t, err)
|
||||||
|
resp, err := http.DefaultClient.Do(req)
|
||||||
|
require.NoError(t, err)
|
||||||
|
resp.Body.Close()
|
||||||
|
assert.Equal(t, http.StatusUnauthorized, resp.StatusCode)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestHandler_RejectsUnknownBearer verifies that a bearer token is only
|
||||||
|
// accepted after RegisterJob; stale/forged tokens cannot be replayed.
|
||||||
|
func TestHandler_RejectsUnknownBearer(t *testing.T) {
|
||||||
|
dir := filepath.Join(t.TempDir(), "artifactcache")
|
||||||
|
handler, err := StartHandler(dir, "", 0, "", nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
defer handler.Close()
|
||||||
|
|
||||||
|
base := handler.ExternalURL() + apiPath
|
||||||
|
|
||||||
|
req, err := http.NewRequest(http.MethodGet, base+"/cache?keys=x&version=y", nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
req.Header.Set("Authorization", "Bearer not-a-registered-token")
|
||||||
|
resp, err := http.DefaultClient.Do(req)
|
||||||
|
require.NoError(t, err)
|
||||||
|
resp.Body.Close()
|
||||||
|
assert.Equal(t, http.StatusUnauthorized, resp.StatusCode)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestHandler_UnregisterRevokes ensures that the function returned by
|
||||||
|
// RegisterJob invalidates the credential, so a token leaked at job time stops
|
||||||
|
// working the moment the job ends instead of living for the runner's lifetime.
|
||||||
|
func TestHandler_UnregisterRevokes(t *testing.T) {
|
||||||
|
dir := filepath.Join(t.TempDir(), "artifactcache")
|
||||||
|
handler, err := StartHandler(dir, "", 0, "", nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
defer handler.Close()
|
||||||
|
|
||||||
|
unregister := handler.RegisterJob("tmp-token", testRepo)
|
||||||
|
|
||||||
|
base := handler.ExternalURL() + apiPath
|
||||||
|
req, err := http.NewRequest(http.MethodGet, base+"/cache?keys=x&version=y", nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
req.Header.Set("Authorization", "Bearer tmp-token")
|
||||||
|
|
||||||
|
resp, err := http.DefaultClient.Do(req)
|
||||||
|
require.NoError(t, err)
|
||||||
|
resp.Body.Close()
|
||||||
|
assert.NotEqual(t, http.StatusUnauthorized, resp.StatusCode)
|
||||||
|
|
||||||
|
unregister()
|
||||||
|
|
||||||
|
resp, err = http.DefaultClient.Do(req)
|
||||||
|
require.NoError(t, err)
|
||||||
|
resp.Body.Close()
|
||||||
|
assert.Equal(t, http.StatusUnauthorized, resp.StatusCode)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestHandler_CrossRepoIsolation addresses the intra-runner poisoning vector
|
||||||
|
// raised in GHSA-82g9-637c-2fx2: job containers can reach the cache server
|
||||||
|
// over the docker bridge, so IP allowlisting alone does not stop a malicious
|
||||||
|
// PR run from another repo. A cache entry created under repoA must be
|
||||||
|
// invisible to queries scoped to repoB.
|
||||||
|
func TestHandler_CrossRepoIsolation(t *testing.T) {
|
||||||
|
dir := filepath.Join(t.TempDir(), "artifactcache")
|
||||||
|
handler, err := StartHandler(dir, "", 0, "", nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
defer handler.Close()
|
||||||
|
handler.RegisterJob("token-a", "owner/repoA")
|
||||||
|
handler.RegisterJob("token-b", "owner/repoB")
|
||||||
|
|
||||||
|
base := handler.ExternalURL() + apiPath
|
||||||
|
key := "shared-key"
|
||||||
|
version := "c19da02a2bd7e77277f1ac29ab45c09b7d46a4ee758284e26bb3045ad11d9d20"
|
||||||
|
content := []byte("repoA-payload")
|
||||||
|
|
||||||
|
clientA := &http.Client{Transport: &bearerTransport{token: "token-a"}}
|
||||||
|
clientB := &http.Client{Transport: &bearerTransport{token: "token-b"}}
|
||||||
|
|
||||||
|
// repoA reserves + uploads + commits.
|
||||||
|
reserveBody, err := json.Marshal(&Request{Key: key, Version: version, Size: int64(len(content))})
|
||||||
|
require.NoError(t, err)
|
||||||
|
resp, err := clientA.Post(base+"/caches", "application/json", bytes.NewReader(reserveBody))
|
||||||
|
require.NoError(t, err)
|
||||||
|
var reserved struct {
|
||||||
|
CacheID uint64 `json:"cacheId"`
|
||||||
|
}
|
||||||
|
require.NoError(t, json.NewDecoder(resp.Body).Decode(&reserved))
|
||||||
|
resp.Body.Close()
|
||||||
|
require.NotZero(t, reserved.CacheID)
|
||||||
|
|
||||||
|
req, err := http.NewRequest(http.MethodPatch, fmt.Sprintf("%s/caches/%d", base, reserved.CacheID), bytes.NewReader(content))
|
||||||
|
require.NoError(t, err)
|
||||||
|
req.Header.Set("Content-Range", fmt.Sprintf("bytes 0-%d/*", len(content)-1))
|
||||||
|
resp, err = clientA.Do(req)
|
||||||
|
require.NoError(t, err)
|
||||||
|
resp.Body.Close()
|
||||||
|
require.Equal(t, http.StatusOK, resp.StatusCode)
|
||||||
|
|
||||||
|
resp, err = clientA.Post(fmt.Sprintf("%s/caches/%d", base, reserved.CacheID), "", nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
resp.Body.Close()
|
||||||
|
require.Equal(t, http.StatusOK, resp.StatusCode)
|
||||||
|
|
||||||
|
// repoB with a matching key and version must NOT see repoA's cache.
|
||||||
|
resp, err = clientB.Get(fmt.Sprintf("%s/cache?keys=%s&version=%s", base, key, version))
|
||||||
|
require.NoError(t, err)
|
||||||
|
resp.Body.Close()
|
||||||
|
assert.Equal(t, http.StatusNoContent, resp.StatusCode)
|
||||||
|
|
||||||
|
// repoA still sees its own cache.
|
||||||
|
resp, err = clientA.Get(fmt.Sprintf("%s/cache?keys=%s&version=%s", base, key, version))
|
||||||
|
require.NoError(t, err)
|
||||||
|
resp.Body.Close()
|
||||||
|
assert.Equal(t, http.StatusOK, resp.StatusCode)
|
||||||
|
|
||||||
|
// repoB cannot upload to repoA's reserved id either (forbidden, not 401).
|
||||||
|
req, err = http.NewRequest(http.MethodPatch, fmt.Sprintf("%s/caches/%d", base, reserved.CacheID), bytes.NewReader([]byte("poison")))
|
||||||
|
require.NoError(t, err)
|
||||||
|
req.Header.Set("Content-Range", "bytes 0-5/*")
|
||||||
|
resp, err = clientB.Do(req)
|
||||||
|
require.NoError(t, err)
|
||||||
|
resp.Body.Close()
|
||||||
|
assert.Equal(t, http.StatusForbidden, resp.StatusCode)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestHandler_ArtifactSignature verifies that archive downloads reject
|
||||||
|
// missing / tampered / expired signatures, so a leaked archiveLocation stops
|
||||||
|
// working after artifactURLTTL even if the bearer token is still registered.
|
||||||
|
func TestHandler_ArtifactSignature(t *testing.T) {
|
||||||
|
dir := filepath.Join(t.TempDir(), "artifactcache")
|
||||||
|
handler, err := StartHandler(dir, "", 0, "", nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
defer handler.Close()
|
||||||
|
handler.RegisterJob(testToken, testRepo)
|
||||||
|
|
||||||
|
base := handler.ExternalURL() + apiPath
|
||||||
|
|
||||||
|
t.Run("missing signature", func(t *testing.T) {
|
||||||
|
resp, err := testClient.Get(fmt.Sprintf("%s/artifacts/%d", base, 1))
|
||||||
|
require.NoError(t, err)
|
||||||
|
resp.Body.Close()
|
||||||
|
assert.Equal(t, http.StatusUnauthorized, resp.StatusCode)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("tampered signature", func(t *testing.T) {
|
||||||
|
good := handler.signedArtifactURL(1, time.Now().Add(artifactURLTTL))
|
||||||
|
bad := good[:len(good)-4] + "dead"
|
||||||
|
resp, err := testClient.Get(bad)
|
||||||
|
require.NoError(t, err)
|
||||||
|
resp.Body.Close()
|
||||||
|
assert.Equal(t, http.StatusUnauthorized, resp.StatusCode)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("expired signature", func(t *testing.T) {
|
||||||
|
expired := handler.signedArtifactURL(1, time.Now().Add(-time.Second))
|
||||||
|
resp, err := testClient.Get(expired)
|
||||||
|
require.NoError(t, err)
|
||||||
|
resp.Body.Close()
|
||||||
|
assert.Equal(t, http.StatusUnauthorized, resp.StatusCode)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("signature from a different server", func(t *testing.T) {
|
||||||
|
dir2 := filepath.Join(t.TempDir(), "artifactcache2")
|
||||||
|
other, err := StartHandler(dir2, "", 0, "", nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
defer other.Close()
|
||||||
|
otherURL := other.signedArtifactURL(1, time.Now().Add(artifactURLTTL))
|
||||||
|
// Rewrite the host so the request still lands on our handler, but
|
||||||
|
// the signature was computed with a different secret.
|
||||||
|
parts := strings.SplitN(otherURL, apiPath, 2)
|
||||||
|
forged := base + parts[1]
|
||||||
|
resp, err := testClient.Get(forged)
|
||||||
|
require.NoError(t, err)
|
||||||
|
resp.Body.Close()
|
||||||
|
assert.Equal(t, http.StatusUnauthorized, resp.StatusCode)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestHandler_SecretPersistsAcrossRestarts is the property that lets
|
||||||
|
// act_runner cache-server be pointed at via cfg.Cache.ExternalServer: a
|
||||||
|
// restart must not invalidate signed URLs the handler has already issued
|
||||||
|
// (within their expiry window).
|
||||||
|
func TestHandler_SecretPersistsAcrossRestarts(t *testing.T) {
|
||||||
|
dir := filepath.Join(t.TempDir(), "artifactcache")
|
||||||
|
|
||||||
|
first, err := StartHandler(dir, "127.0.0.1", 0, "", nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
exp := time.Now().Add(artifactURLTTL).Unix()
|
||||||
|
sig := first.computeSignature(42, exp)
|
||||||
|
require.NoError(t, first.Close())
|
||||||
|
|
||||||
|
second, err := StartHandler(dir, "127.0.0.1", 0, "", nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
defer second.Close()
|
||||||
|
|
||||||
|
assert.Equal(t, sig, second.computeSignature(42, exp))
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestHandler_ArtifactSignatureDownload is a happy-path round trip that
|
||||||
|
// ensures a real reserve/upload/commit/find/download flow still works after
|
||||||
|
// the auth refactor.
|
||||||
|
func TestHandler_ArtifactSignatureDownload(t *testing.T) {
|
||||||
|
dir := filepath.Join(t.TempDir(), "artifactcache")
|
||||||
|
handler, err := StartHandler(dir, "", 0, "", nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
defer handler.Close()
|
||||||
|
handler.RegisterJob(testToken, testRepo)
|
||||||
|
|
||||||
|
base := handler.ExternalURL() + apiPath
|
||||||
|
key := "download-key"
|
||||||
|
version := "c19da02a2bd7e77277f1ac29ab45c09b7d46a4ee758284e26bb3045ad11d9d20"
|
||||||
|
content := []byte("hello")
|
||||||
|
uploadCacheNormally(t, base, key, version, content)
|
||||||
|
|
||||||
|
resp, err := testClient.Get(fmt.Sprintf("%s/cache?keys=%s&version=%s", base, key, version))
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, http.StatusOK, resp.StatusCode)
|
||||||
|
var hit struct {
|
||||||
|
ArchiveLocation string `json:"archiveLocation"`
|
||||||
|
}
|
||||||
|
require.NoError(t, json.NewDecoder(resp.Body).Decode(&hit))
|
||||||
|
resp.Body.Close()
|
||||||
|
|
||||||
|
require.Contains(t, hit.ArchiveLocation, "sig=")
|
||||||
|
require.Contains(t, hit.ArchiveLocation, "exp=")
|
||||||
|
|
||||||
|
// Download without any Authorization header — the signature alone must
|
||||||
|
// be enough, because @actions/cache downloads archiveLocation unauth'd.
|
||||||
|
dl, err := http.Get(hit.ArchiveLocation)
|
||||||
|
require.NoError(t, err)
|
||||||
|
body, err := io.ReadAll(dl.Body)
|
||||||
|
dl.Body.Close()
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Equal(t, http.StatusOK, dl.StatusCode)
|
||||||
|
assert.Equal(t, content, body)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestHandler_RegisterJob_RefCounted verifies that a duplicate RegisterJob
|
||||||
|
// for the same token does not silently revoke the first registration on the
|
||||||
|
// first revoker call. This matters if a runner ever re-registers a token
|
||||||
|
// (restart mid-task, retry), which must not kill the live job's auth.
|
||||||
|
func TestHandler_RegisterJob_RefCounted(t *testing.T) {
|
||||||
|
dir := filepath.Join(t.TempDir(), "artifactcache")
|
||||||
|
handler, err := StartHandler(dir, "", 0, "", nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
defer handler.Close()
|
||||||
|
|
||||||
|
first := handler.RegisterJob("shared", testRepo)
|
||||||
|
second := handler.RegisterJob("shared", testRepo)
|
||||||
|
|
||||||
|
base := handler.ExternalURL() + apiPath
|
||||||
|
probe := func() int {
|
||||||
|
req, err := http.NewRequest(http.MethodGet, base+"/cache?keys=x&version=v", nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
req.Header.Set("Authorization", "Bearer shared")
|
||||||
|
resp, err := http.DefaultClient.Do(req)
|
||||||
|
require.NoError(t, err)
|
||||||
|
resp.Body.Close()
|
||||||
|
return resp.StatusCode
|
||||||
|
}
|
||||||
|
|
||||||
|
require.NotEqual(t, http.StatusUnauthorized, probe())
|
||||||
|
first()
|
||||||
|
assert.NotEqual(t, http.StatusUnauthorized, probe(),
|
||||||
|
"token must stay valid while another registration holds the refcount")
|
||||||
|
second()
|
||||||
|
assert.Equal(t, http.StatusUnauthorized, probe(),
|
||||||
|
"token is revoked only after every revoker has run")
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestHandler_GC_PerRepoDedup ensures duplicate-pruning does not evict
|
||||||
|
// another repo's entry. Two repos reserve the same (key, version); after the
|
||||||
|
// keepOld window, GC must keep the one from each repo.
|
||||||
|
func TestHandler_GC_PerRepoDedup(t *testing.T) {
|
||||||
|
dir := filepath.Join(t.TempDir(), "artifactcache")
|
||||||
|
handler, err := StartHandler(dir, "", 0, "", nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
defer handler.Close()
|
||||||
|
handler.RegisterJob("tok-a", "owner/repoA")
|
||||||
|
handler.RegisterJob("tok-b", "owner/repoB")
|
||||||
|
|
||||||
|
key := "shared-dedup-key"
|
||||||
|
version := "c19da02a2bd7e77277f1ac29ab45c09b7d46a4ee758284e26bb3045ad11d9d20"
|
||||||
|
|
||||||
|
// Seed one completed cache per repo directly via the DB, bypassing the
|
||||||
|
// HTTP round trip so we can precisely control UsedAt.
|
||||||
|
db, err := handler.openDB()
|
||||||
|
require.NoError(t, err)
|
||||||
|
now := time.Now().Unix()
|
||||||
|
stale := time.Now().Add(-keepOld - time.Minute).Unix()
|
||||||
|
a := &Cache{Repo: "owner/repoA", Key: key, Version: version, Complete: true, CreatedAt: stale, UsedAt: stale, Size: 1}
|
||||||
|
b := &Cache{Repo: "owner/repoB", Key: key, Version: version, Complete: true, CreatedAt: now, UsedAt: now, Size: 1}
|
||||||
|
require.NoError(t, insertCache(db, a))
|
||||||
|
require.NoError(t, insertCache(db, b))
|
||||||
|
// Write the backing blobs so the dedup deletion has something to remove.
|
||||||
|
require.NoError(t, handler.storage.Write(a.ID, 0, strings.NewReader("a")))
|
||||||
|
_, err = handler.storage.Commit(a.ID, 1)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NoError(t, handler.storage.Write(b.ID, 0, strings.NewReader("b")))
|
||||||
|
_, err = handler.storage.Commit(b.ID, 1)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NoError(t, db.Close())
|
||||||
|
|
||||||
|
// Force GC to run regardless of the cooldown.
|
||||||
|
handler.gcAt = time.Time{}
|
||||||
|
handler.gcCache()
|
||||||
|
|
||||||
|
db, err = handler.openDB()
|
||||||
|
require.NoError(t, err)
|
||||||
|
defer db.Close()
|
||||||
|
var after []Cache
|
||||||
|
require.NoError(t, db.Find(&after, bolthold.Where("Key").Eq(key).And("Version").Eq(version)))
|
||||||
|
|
||||||
|
repos := make(map[string]bool)
|
||||||
|
for _, c := range after {
|
||||||
|
repos[c.Repo] = true
|
||||||
|
}
|
||||||
|
assert.True(t, repos["owner/repoA"], "repoA's cache must survive dedup against repoB")
|
||||||
|
assert.True(t, repos["owner/repoB"], "repoB's cache must survive dedup against repoA")
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestHandler_InternalAPI_Disabled verifies that without an internalSecret
|
||||||
|
// the control-plane routes are 404 — operators can't accidentally hit
|
||||||
|
// register/revoke when the feature is off.
|
||||||
|
func TestHandler_InternalAPI_Disabled(t *testing.T) {
|
||||||
|
dir := filepath.Join(t.TempDir(), "artifactcache")
|
||||||
|
handler, err := StartHandler(dir, "", 0, "", nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
defer handler.Close()
|
||||||
|
|
||||||
|
for _, ep := range []string{"/_internal/register", "/_internal/revoke"} {
|
||||||
|
resp, err := http.Post(handler.ExternalURL()+ep, "application/json", strings.NewReader(`{}`))
|
||||||
|
require.NoError(t, err)
|
||||||
|
resp.Body.Close()
|
||||||
|
assert.Equal(t, http.StatusNotFound, resp.StatusCode, ep)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestHandler_InternalAPI_AuthAndUsage covers the control-plane: bad/missing
|
||||||
|
// secret → 401, malformed body → 400, happy path round-trips a token through
|
||||||
|
// register → cache-API accepts it → revoke → cache-API rejects it.
|
||||||
|
func TestHandler_InternalAPI_AuthAndUsage(t *testing.T) {
|
||||||
|
dir := filepath.Join(t.TempDir(), "artifactcache")
|
||||||
|
const secret = "internal-secret"
|
||||||
|
handler, err := StartHandler(dir, "", 0, secret, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
defer handler.Close()
|
||||||
|
|
||||||
|
base := handler.ExternalURL()
|
||||||
|
|
||||||
|
post := func(path, bearer, body string) int {
|
||||||
|
req, err := http.NewRequest(http.MethodPost, base+path, strings.NewReader(body))
|
||||||
|
require.NoError(t, err)
|
||||||
|
if bearer != "" {
|
||||||
|
req.Header.Set("Authorization", "Bearer "+bearer)
|
||||||
|
}
|
||||||
|
req.Header.Set("Content-Type", "application/json")
|
||||||
|
resp, err := http.DefaultClient.Do(req)
|
||||||
|
require.NoError(t, err)
|
||||||
|
resp.Body.Close()
|
||||||
|
return resp.StatusCode
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Run("missing secret 401", func(t *testing.T) {
|
||||||
|
assert.Equal(t, http.StatusUnauthorized, post("/_internal/register", "", `{"token":"x","repo":"r"}`))
|
||||||
|
})
|
||||||
|
t.Run("wrong secret 401", func(t *testing.T) {
|
||||||
|
assert.Equal(t, http.StatusUnauthorized, post("/_internal/register", "wrong", `{"token":"x","repo":"r"}`))
|
||||||
|
})
|
||||||
|
t.Run("malformed body 400", func(t *testing.T) {
|
||||||
|
assert.Equal(t, http.StatusBadRequest, post("/_internal/register", secret, `not json`))
|
||||||
|
})
|
||||||
|
t.Run("missing token 400", func(t *testing.T) {
|
||||||
|
assert.Equal(t, http.StatusBadRequest, post("/_internal/register", secret, `{"repo":"r"}`))
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("register then revoke round-trip", func(t *testing.T) {
|
||||||
|
probe := func(token string) int {
|
||||||
|
req, _ := http.NewRequest(http.MethodGet, base+apiPath+"/cache?keys=k&version=v", nil)
|
||||||
|
req.Header.Set("Authorization", "Bearer "+token)
|
||||||
|
resp, err := http.DefaultClient.Do(req)
|
||||||
|
require.NoError(t, err)
|
||||||
|
resp.Body.Close()
|
||||||
|
return resp.StatusCode
|
||||||
|
}
|
||||||
|
|
||||||
|
assert.Equal(t, http.StatusUnauthorized, probe("via-internal-api"))
|
||||||
|
assert.Equal(t, http.StatusOK, post("/_internal/register", secret, `{"token":"via-internal-api","repo":"owner/repo"}`))
|
||||||
|
assert.NotEqual(t, http.StatusUnauthorized, probe("via-internal-api"))
|
||||||
|
assert.Equal(t, http.StatusOK, post("/_internal/revoke", secret, `{"token":"via-internal-api"}`))
|
||||||
|
assert.Equal(t, http.StatusUnauthorized, probe("via-internal-api"))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|||||||
@@ -29,6 +29,7 @@ func (c *Request) ToCache() *Cache {
|
|||||||
|
|
||||||
type Cache struct {
|
type Cache struct {
|
||||||
ID uint64 `json:"id" boltholdKey:"ID"`
|
ID uint64 `json:"id" boltholdKey:"ID"`
|
||||||
|
Repo string `json:"repo" boltholdIndex:"Repo"`
|
||||||
Key string `json:"key" boltholdIndex:"Key"`
|
Key string `json:"key" boltholdIndex:"Key"`
|
||||||
Version string `json:"version" boltholdIndex:"Version"`
|
Version string `json:"version" boltholdIndex:"Version"`
|
||||||
Size int64 `json:"cacheSize"`
|
Size int64 `json:"cacheSize"`
|
||||||
|
|||||||
@@ -202,7 +202,7 @@ func TestListArtifactContainer(t *testing.T) {
|
|||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
assert.Equal(1, len(response.Value)) //nolint:testifylint // pre-existing issue from nektos/act
|
assert.Len(response.Value, 1)
|
||||||
assert.Equal("some/file", response.Value[0].Path)
|
assert.Equal("some/file", response.Value[0].Path)
|
||||||
assert.Equal("file", response.Value[0].ItemType)
|
assert.Equal("file", response.Value[0].ItemType)
|
||||||
assert.Equal("http://localhost/artifact/1/some/file/.", response.Value[0].ContentLocation)
|
assert.Equal("http://localhost/artifact/1/some/file/.", response.Value[0].ContentLocation)
|
||||||
@@ -283,7 +283,7 @@ func runTestJobFile(ctx context.Context, t *testing.T, tjfi TestJobFileInfo) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
workdir, err := filepath.Abs(tjfi.workdir)
|
workdir, err := filepath.Abs(tjfi.workdir)
|
||||||
assert.Nil(t, err, workdir) //nolint:testifylint // pre-existing issue from nektos/act
|
assert.NoError(t, err, workdir) //nolint:testifylint // pre-existing issue from nektos/act
|
||||||
fullWorkflowPath := filepath.Join(workdir, tjfi.workflowPath)
|
fullWorkflowPath := filepath.Join(workdir, tjfi.workflowPath)
|
||||||
runnerConfig := &runner.Config{
|
runnerConfig := &runner.Config{
|
||||||
Workdir: workdir,
|
Workdir: workdir,
|
||||||
@@ -299,16 +299,16 @@ func runTestJobFile(ctx context.Context, t *testing.T, tjfi TestJobFileInfo) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
runner, err := runner.New(runnerConfig)
|
runner, err := runner.New(runnerConfig)
|
||||||
assert.Nil(t, err, tjfi.workflowPath) //nolint:testifylint // pre-existing issue from nektos/act
|
assert.NoError(t, err, tjfi.workflowPath) //nolint:testifylint // pre-existing issue from nektos/act
|
||||||
|
|
||||||
planner, err := model.NewWorkflowPlanner(fullWorkflowPath, true)
|
planner, err := model.NewWorkflowPlanner(fullWorkflowPath, true)
|
||||||
assert.Nil(t, err, fullWorkflowPath) //nolint:testifylint // pre-existing issue from nektos/act
|
assert.NoError(t, err, fullWorkflowPath) //nolint:testifylint // pre-existing issue from nektos/act
|
||||||
|
|
||||||
plan, err := planner.PlanEvent(tjfi.eventName)
|
plan, err := planner.PlanEvent(tjfi.eventName)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
err = runner.NewPlanExecutor(plan)(ctx)
|
err = runner.NewPlanExecutor(plan)(ctx)
|
||||||
if tjfi.errorMessage == "" {
|
if tjfi.errorMessage == "" {
|
||||||
assert.Nil(t, err, fullWorkflowPath) //nolint:testifylint // pre-existing issue from nektos/act
|
assert.NoError(t, err, fullWorkflowPath) //nolint:testifylint // pre-existing issue from nektos/act
|
||||||
} else {
|
} else {
|
||||||
assert.Error(t, err, tjfi.errorMessage) //nolint:testifylint // pre-existing issue from nektos/act
|
assert.Error(t, err, tjfi.errorMessage) //nolint:testifylint // pre-existing issue from nektos/act
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -35,9 +35,9 @@ func TestCartesianProduct(t *testing.T) {
|
|||||||
"baz": {false, true},
|
"baz": {false, true},
|
||||||
}
|
}
|
||||||
output = CartesianProduct(input)
|
output = CartesianProduct(input)
|
||||||
assert.Len(output, 0) //nolint:testifylint // pre-existing issue from nektos/act
|
assert.Empty(output)
|
||||||
|
|
||||||
input = map[string][]any{}
|
input = map[string][]any{}
|
||||||
output = CartesianProduct(input)
|
output = CartesianProduct(input)
|
||||||
assert.Len(output, 0) //nolint:testifylint // pre-existing issue from nektos/act
|
assert.Empty(output)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -21,11 +21,11 @@ func TestNewWorkflow(t *testing.T) {
|
|||||||
|
|
||||||
// empty
|
// empty
|
||||||
emptyWorkflow := NewPipelineExecutor()
|
emptyWorkflow := NewPipelineExecutor()
|
||||||
assert.Nil(emptyWorkflow(ctx)) //nolint:testifylint // pre-existing issue from nektos/act
|
assert.NoError(emptyWorkflow(ctx)) //nolint:testifylint // pre-existing issue from nektos/act
|
||||||
|
|
||||||
// error case
|
// error case
|
||||||
errorWorkflow := NewErrorExecutor(errors.New("test error"))
|
errorWorkflow := NewErrorExecutor(errors.New("test error"))
|
||||||
assert.NotNil(errorWorkflow(ctx)) //nolint:testifylint // pre-existing issue from nektos/act
|
assert.Error(errorWorkflow(ctx)) //nolint:testifylint // pre-existing issue from nektos/act
|
||||||
|
|
||||||
// multiple success case
|
// multiple success case
|
||||||
runcount := 0
|
runcount := 0
|
||||||
@@ -38,7 +38,7 @@ func TestNewWorkflow(t *testing.T) {
|
|||||||
runcount++
|
runcount++
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
assert.Nil(successWorkflow(ctx)) //nolint:testifylint // pre-existing issue from nektos/act
|
assert.NoError(successWorkflow(ctx)) //nolint:testifylint // pre-existing issue from nektos/act
|
||||||
assert.Equal(2, runcount)
|
assert.Equal(2, runcount)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -60,7 +60,7 @@ func TestNewConditionalExecutor(t *testing.T) {
|
|||||||
return nil
|
return nil
|
||||||
})(ctx)
|
})(ctx)
|
||||||
|
|
||||||
assert.Nil(err) //nolint:testifylint // pre-existing issue from nektos/act
|
assert.NoError(err) //nolint:testifylint // pre-existing issue from nektos/act
|
||||||
assert.Equal(0, trueCount)
|
assert.Equal(0, trueCount)
|
||||||
assert.Equal(1, falseCount)
|
assert.Equal(1, falseCount)
|
||||||
|
|
||||||
@@ -74,7 +74,7 @@ func TestNewConditionalExecutor(t *testing.T) {
|
|||||||
return nil
|
return nil
|
||||||
})(ctx)
|
})(ctx)
|
||||||
|
|
||||||
assert.Nil(err) //nolint:testifylint // pre-existing issue from nektos/act
|
assert.NoError(err) //nolint:testifylint // pre-existing issue from nektos/act
|
||||||
assert.Equal(1, trueCount)
|
assert.Equal(1, trueCount)
|
||||||
assert.Equal(1, falseCount)
|
assert.Equal(1, falseCount)
|
||||||
}
|
}
|
||||||
@@ -105,7 +105,7 @@ func TestNewParallelExecutor(t *testing.T) {
|
|||||||
|
|
||||||
assert.Equal(int32(3), count.Load(), "should run all 3 executors")
|
assert.Equal(int32(3), count.Load(), "should run all 3 executors")
|
||||||
assert.Equal(int32(2), maxCount.Load(), "should run at most 2 executors in parallel")
|
assert.Equal(int32(2), maxCount.Load(), "should run at most 2 executors in parallel")
|
||||||
assert.Nil(err) //nolint:testifylint // pre-existing issue from nektos/act
|
assert.NoError(err) //nolint:testifylint // pre-existing issue from nektos/act
|
||||||
|
|
||||||
// Reset to test running the executor with 0 parallelism
|
// Reset to test running the executor with 0 parallelism
|
||||||
count.Store(0)
|
count.Store(0)
|
||||||
@@ -116,7 +116,7 @@ func TestNewParallelExecutor(t *testing.T) {
|
|||||||
|
|
||||||
assert.Equal(int32(3), count.Load(), "should run all 3 executors")
|
assert.Equal(int32(3), count.Load(), "should run all 3 executors")
|
||||||
assert.Equal(int32(1), maxCount.Load(), "should run at most 1 executors in parallel")
|
assert.Equal(int32(1), maxCount.Load(), "should run at most 1 executors in parallel")
|
||||||
assert.Nil(errSingle) //nolint:testifylint // pre-existing issue from nektos/act
|
assert.NoError(errSingle)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestNewParallelExecutorFailed(t *testing.T) {
|
func TestNewParallelExecutorFailed(t *testing.T) {
|
||||||
|
|||||||
@@ -32,12 +32,21 @@ var (
|
|||||||
githubHTTPRegex = regexp.MustCompile(`^https?://.*github.com.*/(.+)/(.+?)(?:.git)?$`)
|
githubHTTPRegex = regexp.MustCompile(`^https?://.*github.com.*/(.+)/(.+?)(?:.git)?$`)
|
||||||
githubSSHRegex = regexp.MustCompile(`github.com[:/](.+)/(.+?)(?:.git)?$`)
|
githubSSHRegex = regexp.MustCompile(`github.com[:/](.+)/(.+?)(?:.git)?$`)
|
||||||
|
|
||||||
cloneLock sync.Mutex
|
cloneLocks sync.Map // key: clone target directory; value: *sync.Mutex
|
||||||
|
|
||||||
ErrShortRef = errors.New("short SHA references are not supported")
|
ErrShortRef = errors.New("short SHA references are not supported")
|
||||||
ErrNoRepo = errors.New("unable to find git repo")
|
ErrNoRepo = errors.New("unable to find git repo")
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// acquireCloneLock returns an unlock function after locking the per-directory mutex for dir.
|
||||||
|
// Only concurrent operations targeting the same directory are erialized; clones into different directories run in parallel.
|
||||||
|
func acquireCloneLock(dir string) func() {
|
||||||
|
v, _ := cloneLocks.LoadOrStore(dir, &sync.Mutex{})
|
||||||
|
mu := v.(*sync.Mutex)
|
||||||
|
mu.Lock()
|
||||||
|
return mu.Unlock
|
||||||
|
}
|
||||||
|
|
||||||
type Error struct {
|
type Error struct {
|
||||||
err error
|
err error
|
||||||
commit string
|
commit string
|
||||||
@@ -293,16 +302,13 @@ func gitOptions(token string) (fetchOptions git.FetchOptions, pullOptions git.Pu
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewGitCloneExecutor creates an executor to clone git repos
|
// NewGitCloneExecutor creates an executor to clone git repos
|
||||||
//
|
|
||||||
//nolint:gocyclo // function handles many cases
|
|
||||||
func NewGitCloneExecutor(input NewGitCloneExecutorInput) common.Executor {
|
func NewGitCloneExecutor(input NewGitCloneExecutorInput) common.Executor {
|
||||||
return func(ctx context.Context) error {
|
return func(ctx context.Context) error {
|
||||||
logger := common.Logger(ctx)
|
logger := common.Logger(ctx)
|
||||||
logger.Infof(" \u2601 git clone '%s' # ref=%s", input.URL, input.Ref)
|
logger.Infof(" \u2601 git clone '%s' # ref=%s", input.URL, input.Ref)
|
||||||
logger.Debugf(" cloning %s to %s", input.URL, input.Dir)
|
logger.Debugf(" cloning %s to %s", input.URL, input.Dir)
|
||||||
|
|
||||||
cloneLock.Lock()
|
defer acquireCloneLock(input.Dir)()
|
||||||
defer cloneLock.Unlock()
|
|
||||||
|
|
||||||
refName := plumbing.ReferenceName("refs/heads/" + input.Ref)
|
refName := plumbing.ReferenceName("refs/heads/" + input.Ref)
|
||||||
r, err := CloneIfRequired(ctx, refName, input, logger)
|
r, err := CloneIfRequired(ctx, refName, input, logger)
|
||||||
|
|||||||
@@ -11,8 +11,10 @@ import (
|
|||||||
"os/exec"
|
"os/exec"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strings"
|
"strings"
|
||||||
|
"sync"
|
||||||
"syscall"
|
"syscall"
|
||||||
"testing"
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
@@ -303,3 +305,61 @@ func gitCmd(args ...string) error {
|
|||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestAcquireCloneLock(t *testing.T) {
|
||||||
|
t.Run("same directory serializes", func(t *testing.T) {
|
||||||
|
dir := t.TempDir()
|
||||||
|
|
||||||
|
unlock1 := acquireCloneLock(dir)
|
||||||
|
|
||||||
|
secondAcquired := make(chan struct{})
|
||||||
|
go func() {
|
||||||
|
unlock := acquireCloneLock(dir)
|
||||||
|
close(secondAcquired)
|
||||||
|
unlock()
|
||||||
|
}()
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-secondAcquired:
|
||||||
|
t.Fatal("second acquire should block while first holds the lock")
|
||||||
|
case <-time.After(50 * time.Millisecond):
|
||||||
|
}
|
||||||
|
|
||||||
|
unlock1()
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-secondAcquired:
|
||||||
|
case <-time.After(time.Second):
|
||||||
|
t.Fatal("second acquire should proceed after first releases the lock")
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("different directories do not block", func(t *testing.T) {
|
||||||
|
dirA := t.TempDir()
|
||||||
|
dirB := t.TempDir()
|
||||||
|
|
||||||
|
unlockA := acquireCloneLock(dirA)
|
||||||
|
defer unlockA()
|
||||||
|
|
||||||
|
done := make(chan struct{})
|
||||||
|
go func() {
|
||||||
|
unlock := acquireCloneLock(dirB)
|
||||||
|
unlock()
|
||||||
|
close(done)
|
||||||
|
}()
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-done:
|
||||||
|
case <-time.After(time.Second):
|
||||||
|
t.Fatal("acquire on a different directory must not block")
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("same directory reuses the same mutex", func(t *testing.T) {
|
||||||
|
dir := t.TempDir()
|
||||||
|
|
||||||
|
v1, _ := cloneLocks.LoadOrStore(dir, &sync.Mutex{})
|
||||||
|
v2, _ := cloneLocks.LoadOrStore(dir, &sync.Mutex{})
|
||||||
|
require.Same(t, v1, v2)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|||||||
@@ -324,8 +324,6 @@ type containerConfig struct {
|
|||||||
// parse parses the args for the specified command and generates a Config,
|
// parse parses the args for the specified command and generates a Config,
|
||||||
// a HostConfig and returns them with the specified command.
|
// a HostConfig and returns them with the specified command.
|
||||||
// If the specified args are not valid, it will return an error.
|
// If the specified args are not valid, it will return an error.
|
||||||
//
|
|
||||||
//nolint:gocyclo // function handles many cases
|
|
||||||
func parse(flags *pflag.FlagSet, copts *containerOptions, serverOS string) (*containerConfig, error) {
|
func parse(flags *pflag.FlagSet, copts *containerOptions, serverOS string) (*containerConfig, error) {
|
||||||
var (
|
var (
|
||||||
attachStdin = copts.attach.Get("stdin")
|
attachStdin = copts.attach.Get("stdin")
|
||||||
|
|||||||
@@ -194,7 +194,6 @@ func TestParseRunWithInvalidArgs(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
//nolint:gocyclo // function handles many cases
|
|
||||||
func TestParseWithVolumes(t *testing.T) {
|
func TestParseWithVolumes(t *testing.T) {
|
||||||
// A single volume
|
// A single volume
|
||||||
arr, tryit := setupPlatformVolume([]string{`/tmp`}, []string{`c:\tmp`})
|
arr, tryit := setupPlatformVolume([]string{`/tmp`}, []string{`c:\tmp`})
|
||||||
@@ -632,7 +631,7 @@ func TestParseModes(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// uts ko
|
// uts ko
|
||||||
_, _, _, err = parseRun([]string{"--uts=container:", "img", "cmd"}) //nolint:dogsled // ignoring multiple returns in test helpers
|
_, _, _, err = parseRun([]string{"--uts=container:", "img", "cmd"})
|
||||||
assert.ErrorContains(t, err, "--uts: invalid UTS mode")
|
assert.ErrorContains(t, err, "--uts: invalid UTS mode")
|
||||||
|
|
||||||
// uts ok
|
// uts ok
|
||||||
@@ -693,7 +692,7 @@ func TestParseRestartPolicy(t *testing.T) {
|
|||||||
|
|
||||||
func TestParseRestartPolicyAutoRemove(t *testing.T) {
|
func TestParseRestartPolicyAutoRemove(t *testing.T) {
|
||||||
expected := "Conflicting options: --restart and --rm"
|
expected := "Conflicting options: --restart and --rm"
|
||||||
_, _, _, err := parseRun([]string{"--rm", "--restart=always", "img", "cmd"}) //nolint:dogsled // ignoring multiple returns in test helpers
|
_, _, _, err := parseRun([]string{"--rm", "--restart=always", "img", "cmd"})
|
||||||
if err == nil || err.Error() != expected {
|
if err == nil || err.Error() != expected {
|
||||||
t.Fatalf("Expected error %v, but got none", expected)
|
t.Fatalf("Expected error %v, but got none", expected)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -29,17 +29,17 @@ func TestImageExistsLocally(t *testing.T) {
|
|||||||
|
|
||||||
// Test if image exists with specific tag
|
// Test if image exists with specific tag
|
||||||
invalidImageTag, err := ImageExistsLocally(ctx, "library/alpine:this-random-tag-will-never-exist", "linux/amd64")
|
invalidImageTag, err := ImageExistsLocally(ctx, "library/alpine:this-random-tag-will-never-exist", "linux/amd64")
|
||||||
assert.Nil(t, err) //nolint:testifylint // pre-existing issue from nektos/act
|
assert.NoError(t, err) //nolint:testifylint // pre-existing issue from nektos/act
|
||||||
assert.Equal(t, false, invalidImageTag) //nolint:testifylint // pre-existing issue from nektos/act
|
assert.False(t, invalidImageTag)
|
||||||
|
|
||||||
// Test if image exists with specific architecture (image platform)
|
// Test if image exists with specific architecture (image platform)
|
||||||
invalidImagePlatform, err := ImageExistsLocally(ctx, "alpine:latest", "windows/amd64")
|
invalidImagePlatform, err := ImageExistsLocally(ctx, "alpine:latest", "windows/amd64")
|
||||||
assert.Nil(t, err) //nolint:testifylint // pre-existing issue from nektos/act
|
assert.NoError(t, err) //nolint:testifylint // pre-existing issue from nektos/act
|
||||||
assert.Equal(t, false, invalidImagePlatform) //nolint:testifylint // pre-existing issue from nektos/act
|
assert.False(t, invalidImagePlatform)
|
||||||
|
|
||||||
// pull an image
|
// pull an image
|
||||||
cli, err := client.NewClientWithOpts(client.FromEnv)
|
cli, err := client.NewClientWithOpts(client.FromEnv)
|
||||||
assert.Nil(t, err) //nolint:testifylint // pre-existing issue from nektos/act
|
assert.NoError(t, err) //nolint:testifylint // pre-existing issue from nektos/act
|
||||||
cli.NegotiateAPIVersion(context.Background())
|
cli.NegotiateAPIVersion(context.Background())
|
||||||
|
|
||||||
// Chose alpine latest because it's so small
|
// Chose alpine latest because it's so small
|
||||||
@@ -47,25 +47,25 @@ func TestImageExistsLocally(t *testing.T) {
|
|||||||
readerDefault, err := cli.ImagePull(ctx, "node:16-buster-slim", types.ImagePullOptions{
|
readerDefault, err := cli.ImagePull(ctx, "node:16-buster-slim", types.ImagePullOptions{
|
||||||
Platform: "linux/amd64",
|
Platform: "linux/amd64",
|
||||||
})
|
})
|
||||||
assert.Nil(t, err) //nolint:testifylint // pre-existing issue from nektos/act
|
assert.NoError(t, err) //nolint:testifylint // pre-existing issue from nektos/act
|
||||||
defer readerDefault.Close()
|
defer readerDefault.Close()
|
||||||
_, err = io.ReadAll(readerDefault)
|
_, err = io.ReadAll(readerDefault)
|
||||||
assert.Nil(t, err) //nolint:testifylint // pre-existing issue from nektos/act
|
assert.NoError(t, err) //nolint:testifylint // pre-existing issue from nektos/act
|
||||||
|
|
||||||
imageDefaultArchExists, err := ImageExistsLocally(ctx, "node:16-buster-slim", "linux/amd64")
|
imageDefaultArchExists, err := ImageExistsLocally(ctx, "node:16-buster-slim", "linux/amd64")
|
||||||
assert.Nil(t, err) //nolint:testifylint // pre-existing issue from nektos/act
|
assert.NoError(t, err) //nolint:testifylint // pre-existing issue from nektos/act
|
||||||
assert.Equal(t, true, imageDefaultArchExists) //nolint:testifylint // pre-existing issue from nektos/act
|
assert.True(t, imageDefaultArchExists)
|
||||||
|
|
||||||
// Validate if another architecture platform can be pulled
|
// Validate if another architecture platform can be pulled
|
||||||
readerArm64, err := cli.ImagePull(ctx, "node:16-buster-slim", types.ImagePullOptions{
|
readerArm64, err := cli.ImagePull(ctx, "node:16-buster-slim", types.ImagePullOptions{
|
||||||
Platform: "linux/arm64",
|
Platform: "linux/arm64",
|
||||||
})
|
})
|
||||||
assert.Nil(t, err) //nolint:testifylint // pre-existing issue from nektos/act
|
assert.NoError(t, err) //nolint:testifylint // pre-existing issue from nektos/act
|
||||||
defer readerArm64.Close()
|
defer readerArm64.Close()
|
||||||
_, err = io.ReadAll(readerArm64)
|
_, err = io.ReadAll(readerArm64)
|
||||||
assert.Nil(t, err) //nolint:testifylint // pre-existing issue from nektos/act
|
assert.NoError(t, err) //nolint:testifylint // pre-existing issue from nektos/act
|
||||||
|
|
||||||
imageArm64Exists, err := ImageExistsLocally(ctx, "node:16-buster-slim", "linux/arm64")
|
imageArm64Exists, err := ImageExistsLocally(ctx, "node:16-buster-slim", "linux/arm64")
|
||||||
assert.Nil(t, err) //nolint:testifylint // pre-existing issue from nektos/act
|
assert.NoError(t, err) //nolint:testifylint // pre-existing issue from nektos/act
|
||||||
assert.Equal(t, true, imageArm64Exists) //nolint:testifylint // pre-existing issue from nektos/act
|
assert.True(t, imageArm64Exists)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -43,7 +43,7 @@ func TestGetImagePullOptions(t *testing.T) {
|
|||||||
config.SetDir("/non-existent/docker")
|
config.SetDir("/non-existent/docker")
|
||||||
|
|
||||||
options, err := getImagePullOptions(ctx, NewDockerPullExecutorInput{})
|
options, err := getImagePullOptions(ctx, NewDockerPullExecutorInput{})
|
||||||
assert.Nil(t, err, "Failed to create ImagePullOptions") //nolint:testifylint // pre-existing issue from nektos/act
|
assert.NoError(t, err, "Failed to create ImagePullOptions") //nolint:testifylint // pre-existing issue from nektos/act
|
||||||
assert.Equal(t, "", options.RegistryAuth, "RegistryAuth should be empty if no username or password is set") //nolint:testifylint // pre-existing issue from nektos/act
|
assert.Equal(t, "", options.RegistryAuth, "RegistryAuth should be empty if no username or password is set") //nolint:testifylint // pre-existing issue from nektos/act
|
||||||
|
|
||||||
options, err = getImagePullOptions(ctx, NewDockerPullExecutorInput{
|
options, err = getImagePullOptions(ctx, NewDockerPullExecutorInput{
|
||||||
@@ -51,7 +51,7 @@ func TestGetImagePullOptions(t *testing.T) {
|
|||||||
Username: "username",
|
Username: "username",
|
||||||
Password: "password",
|
Password: "password",
|
||||||
})
|
})
|
||||||
assert.Nil(t, err, "Failed to create ImagePullOptions") //nolint:testifylint // pre-existing issue from nektos/act
|
assert.NoError(t, err, "Failed to create ImagePullOptions") //nolint:testifylint // pre-existing issue from nektos/act
|
||||||
assert.Equal(t, "eyJ1c2VybmFtZSI6InVzZXJuYW1lIiwicGFzc3dvcmQiOiJwYXNzd29yZCJ9", options.RegistryAuth, "Username and Password should be provided")
|
assert.Equal(t, "eyJ1c2VybmFtZSI6InVzZXJuYW1lIiwicGFzc3dvcmQiOiJwYXNzd29yZCJ9", options.RegistryAuth, "Username and Password should be provided")
|
||||||
|
|
||||||
config.SetDir("testdata/docker-pull-options")
|
config.SetDir("testdata/docker-pull-options")
|
||||||
@@ -59,6 +59,6 @@ func TestGetImagePullOptions(t *testing.T) {
|
|||||||
options, err = getImagePullOptions(ctx, NewDockerPullExecutorInput{
|
options, err = getImagePullOptions(ctx, NewDockerPullExecutorInput{
|
||||||
Image: "nektos/act",
|
Image: "nektos/act",
|
||||||
})
|
})
|
||||||
assert.Nil(t, err, "Failed to create ImagePullOptions") //nolint:testifylint // pre-existing issue from nektos/act
|
assert.NoError(t, err, "Failed to create ImagePullOptions") //nolint:testifylint // pre-existing issue from nektos/act
|
||||||
assert.Equal(t, "eyJ1c2VybmFtZSI6InVzZXJuYW1lIiwicGFzc3dvcmQiOiJwYXNzd29yZFxuIiwic2VydmVyYWRkcmVzcyI6Imh0dHBzOi8vaW5kZXguZG9ja2VyLmlvL3YxLyJ9", options.RegistryAuth, "RegistryAuth should be taken from local docker config")
|
assert.Equal(t, "eyJ1c2VybmFtZSI6InVzZXJuYW1lIiwicGFzc3dvcmQiOiJwYXNzd29yZFxuIiwic2VydmVyYWRkcmVzcyI6Imh0dHBzOi8vaW5kZXguZG9ja2VyLmlvL3YxLyJ9", options.RegistryAuth, "RegistryAuth should be taken from local docker config")
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -29,7 +29,7 @@ func TestGetSocketAndHostWithSocket(t *testing.T) {
|
|||||||
ret, err := GetSocketAndHost(socketURI)
|
ret, err := GetSocketAndHost(socketURI)
|
||||||
|
|
||||||
// Assert
|
// Assert
|
||||||
assert.Nil(t, err) //nolint:testifylint // pre-existing issue from nektos/act
|
assert.NoError(t, err) //nolint:testifylint // pre-existing issue from nektos/act
|
||||||
assert.Equal(t, SocketAndHost{socketURI, dockerHost}, ret)
|
assert.Equal(t, SocketAndHost{socketURI, dockerHost}, ret)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -42,7 +42,7 @@ func TestGetSocketAndHostNoSocket(t *testing.T) {
|
|||||||
ret, err := GetSocketAndHost("")
|
ret, err := GetSocketAndHost("")
|
||||||
|
|
||||||
// Assert
|
// Assert
|
||||||
assert.Nil(t, err) //nolint:testifylint // pre-existing issue from nektos/act
|
assert.NoError(t, err) //nolint:testifylint // pre-existing issue from nektos/act
|
||||||
assert.Equal(t, SocketAndHost{dockerHost, dockerHost}, ret)
|
assert.Equal(t, SocketAndHost{dockerHost, dockerHost}, ret)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -57,8 +57,8 @@ func TestGetSocketAndHostOnlySocket(t *testing.T) {
|
|||||||
ret, err := GetSocketAndHost(socketURI)
|
ret, err := GetSocketAndHost(socketURI)
|
||||||
|
|
||||||
// Assert
|
// Assert
|
||||||
assert.NoError(t, err, "Expected no error from GetSocketAndHost") //nolint:testifylint // pre-existing issue from nektos/act
|
assert.NoError(t, err, "Expected no error from GetSocketAndHost") //nolint:testifylint // pre-existing issue from nektos/act
|
||||||
assert.Equal(t, true, defaultSocketFound, "Expected to find default socket") //nolint:testifylint // pre-existing issue from nektos/act
|
assert.True(t, defaultSocketFound, "Expected to find default socket")
|
||||||
assert.Equal(t, socketURI, ret.Socket, "Expected socket to match common location")
|
assert.Equal(t, socketURI, ret.Socket, "Expected socket to match common location")
|
||||||
assert.Equal(t, defaultSocket, ret.Host, "Expected ret.Host to match default socket location")
|
assert.Equal(t, defaultSocket, ret.Host, "Expected ret.Host to match default socket location")
|
||||||
}
|
}
|
||||||
@@ -73,7 +73,7 @@ func TestGetSocketAndHostDontMount(t *testing.T) {
|
|||||||
ret, err := GetSocketAndHost("-")
|
ret, err := GetSocketAndHost("-")
|
||||||
|
|
||||||
// Assert
|
// Assert
|
||||||
assert.Nil(t, err) //nolint:testifylint // pre-existing issue from nektos/act
|
assert.NoError(t, err) //nolint:testifylint // pre-existing issue from nektos/act
|
||||||
assert.Equal(t, SocketAndHost{"-", dockerHost}, ret)
|
assert.Equal(t, SocketAndHost{"-", dockerHost}, ret)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -87,8 +87,8 @@ func TestGetSocketAndHostNoHostNoSocket(t *testing.T) {
|
|||||||
ret, err := GetSocketAndHost("")
|
ret, err := GetSocketAndHost("")
|
||||||
|
|
||||||
// Assert
|
// Assert
|
||||||
assert.Equal(t, true, found, "Expected a default socket to be found") //nolint:testifylint // pre-existing issue from nektos/act
|
assert.True(t, found, "Expected a default socket to be found")
|
||||||
assert.Nil(t, err, "Expected no error from GetSocketAndHost") //nolint:testifylint // pre-existing issue from nektos/act
|
assert.NoError(t, err, "Expected no error from GetSocketAndHost") //nolint:testifylint // pre-existing issue from nektos/act
|
||||||
assert.Equal(t, SocketAndHost{defaultSocket, defaultSocket}, ret, "Expected to match default socket location")
|
assert.Equal(t, SocketAndHost{defaultSocket, defaultSocket}, ret, "Expected to match default socket location")
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -112,8 +112,8 @@ func TestGetSocketAndHostNoHostNoSocketDefaultLocation(t *testing.T) {
|
|||||||
|
|
||||||
// Assert
|
// Assert
|
||||||
assert.Equal(t, unixSocket, defaultSocket, "Expected default socket to match common socket location")
|
assert.Equal(t, unixSocket, defaultSocket, "Expected default socket to match common socket location")
|
||||||
assert.Equal(t, true, found, "Expected default socket to be found") //nolint:testifylint // pre-existing issue from nektos/act
|
assert.True(t, found, "Expected default socket to be found")
|
||||||
assert.Nil(t, err, "Expected no error from GetSocketAndHost") //nolint:testifylint // pre-existing issue from nektos/act
|
assert.NoError(t, err, "Expected no error from GetSocketAndHost") //nolint:testifylint // pre-existing issue from nektos/act
|
||||||
assert.Equal(t, SocketAndHost{unixSocket, unixSocket}, ret, "Expected to match default socket location")
|
assert.Equal(t, SocketAndHost{unixSocket, unixSocket}, ret, "Expected to match default socket location")
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -128,7 +128,7 @@ func TestGetSocketAndHostNoHostInvalidSocket(t *testing.T) {
|
|||||||
ret, err := GetSocketAndHost(mySocket)
|
ret, err := GetSocketAndHost(mySocket)
|
||||||
|
|
||||||
// Assert
|
// Assert
|
||||||
assert.Equal(t, false, found, "Expected no default socket to be found") //nolint:testifylint // pre-existing issue from nektos/act
|
assert.False(t, found, "Expected no default socket to be found")
|
||||||
assert.Equal(t, "", defaultSocket, "Expected no default socket to be found") //nolint:testifylint // pre-existing issue from nektos/act
|
assert.Equal(t, "", defaultSocket, "Expected no default socket to be found") //nolint:testifylint // pre-existing issue from nektos/act
|
||||||
assert.Equal(t, SocketAndHost{}, ret, "Expected to match default socket location")
|
assert.Equal(t, SocketAndHost{}, ret, "Expected to match default socket location")
|
||||||
assert.Error(t, err, "Expected an error in invalid state")
|
assert.Error(t, err, "Expected an error in invalid state")
|
||||||
@@ -147,8 +147,8 @@ func TestGetSocketAndHostOnlySocketValidButUnusualLocation(t *testing.T) {
|
|||||||
// Assert
|
// Assert
|
||||||
// Default socket locations
|
// Default socket locations
|
||||||
assert.Equal(t, "", defaultSocket, "Expect default socket location to be empty") //nolint:testifylint // pre-existing issue from nektos/act
|
assert.Equal(t, "", defaultSocket, "Expect default socket location to be empty") //nolint:testifylint // pre-existing issue from nektos/act
|
||||||
assert.Equal(t, false, found, "Expected no default socket to be found") //nolint:testifylint // pre-existing issue from nektos/act
|
assert.False(t, found, "Expected no default socket to be found")
|
||||||
// Sane default
|
// Sane default
|
||||||
assert.Nil(t, err, "Expect no error from GetSocketAndHost") //nolint:testifylint // pre-existing issue from nektos/act
|
assert.NoError(t, err, "Expect no error from GetSocketAndHost") //nolint:testifylint // pre-existing issue from nektos/act
|
||||||
assert.Equal(t, socketURI, ret.Host, "Expect host to default to unusual socket")
|
assert.Equal(t, socketURI, ret.Host, "Expect host to default to unusual socket")
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -43,7 +43,7 @@ func TestFunctionContains(t *testing.T) {
|
|||||||
for _, tt := range table {
|
for _, tt := range table {
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
output, err := NewInterpeter(env, Config{}).Evaluate(tt.input, DefaultStatusCheckNone)
|
output, err := NewInterpeter(env, Config{}).Evaluate(tt.input, DefaultStatusCheckNone)
|
||||||
assert.Nil(t, err) //nolint:testifylint // pre-existing issue from nektos/act
|
assert.NoError(t, err) //nolint:testifylint // pre-existing issue from nektos/act
|
||||||
|
|
||||||
assert.Equal(t, tt.expected, output)
|
assert.Equal(t, tt.expected, output)
|
||||||
})
|
})
|
||||||
@@ -72,7 +72,7 @@ func TestFunctionStartsWith(t *testing.T) {
|
|||||||
for _, tt := range table {
|
for _, tt := range table {
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
output, err := NewInterpeter(env, Config{}).Evaluate(tt.input, DefaultStatusCheckNone)
|
output, err := NewInterpeter(env, Config{}).Evaluate(tt.input, DefaultStatusCheckNone)
|
||||||
assert.Nil(t, err) //nolint:testifylint // pre-existing issue from nektos/act
|
assert.NoError(t, err) //nolint:testifylint // pre-existing issue from nektos/act
|
||||||
|
|
||||||
assert.Equal(t, tt.expected, output)
|
assert.Equal(t, tt.expected, output)
|
||||||
})
|
})
|
||||||
@@ -101,7 +101,7 @@ func TestFunctionEndsWith(t *testing.T) {
|
|||||||
for _, tt := range table {
|
for _, tt := range table {
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
output, err := NewInterpeter(env, Config{}).Evaluate(tt.input, DefaultStatusCheckNone)
|
output, err := NewInterpeter(env, Config{}).Evaluate(tt.input, DefaultStatusCheckNone)
|
||||||
assert.Nil(t, err) //nolint:testifylint // pre-existing issue from nektos/act
|
assert.NoError(t, err) //nolint:testifylint // pre-existing issue from nektos/act
|
||||||
|
|
||||||
assert.Equal(t, tt.expected, output)
|
assert.Equal(t, tt.expected, output)
|
||||||
})
|
})
|
||||||
@@ -128,7 +128,7 @@ func TestFunctionJoin(t *testing.T) {
|
|||||||
for _, tt := range table {
|
for _, tt := range table {
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
output, err := NewInterpeter(env, Config{}).Evaluate(tt.input, DefaultStatusCheckNone)
|
output, err := NewInterpeter(env, Config{}).Evaluate(tt.input, DefaultStatusCheckNone)
|
||||||
assert.Nil(t, err) //nolint:testifylint // pre-existing issue from nektos/act
|
assert.NoError(t, err) //nolint:testifylint // pre-existing issue from nektos/act
|
||||||
|
|
||||||
assert.Equal(t, tt.expected, output)
|
assert.Equal(t, tt.expected, output)
|
||||||
})
|
})
|
||||||
@@ -154,7 +154,7 @@ func TestFunctionToJSON(t *testing.T) {
|
|||||||
for _, tt := range table {
|
for _, tt := range table {
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
output, err := NewInterpeter(env, Config{}).Evaluate(tt.input, DefaultStatusCheckNone)
|
output, err := NewInterpeter(env, Config{}).Evaluate(tt.input, DefaultStatusCheckNone)
|
||||||
assert.Nil(t, err) //nolint:testifylint // pre-existing issue from nektos/act
|
assert.NoError(t, err) //nolint:testifylint // pre-existing issue from nektos/act
|
||||||
|
|
||||||
assert.Equal(t, tt.expected, output)
|
assert.Equal(t, tt.expected, output)
|
||||||
})
|
})
|
||||||
@@ -177,7 +177,7 @@ func TestFunctionFromJSON(t *testing.T) {
|
|||||||
for _, tt := range table {
|
for _, tt := range table {
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
output, err := NewInterpeter(env, Config{}).Evaluate(tt.input, DefaultStatusCheckNone)
|
output, err := NewInterpeter(env, Config{}).Evaluate(tt.input, DefaultStatusCheckNone)
|
||||||
assert.Nil(t, err) //nolint:testifylint // pre-existing issue from nektos/act
|
assert.NoError(t, err) //nolint:testifylint // pre-existing issue from nektos/act
|
||||||
|
|
||||||
assert.Equal(t, tt.expected, output)
|
assert.Equal(t, tt.expected, output)
|
||||||
})
|
})
|
||||||
@@ -205,9 +205,9 @@ func TestFunctionHashFiles(t *testing.T) {
|
|||||||
for _, tt := range table {
|
for _, tt := range table {
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
workdir, err := filepath.Abs("testdata")
|
workdir, err := filepath.Abs("testdata")
|
||||||
assert.Nil(t, err) //nolint:testifylint // pre-existing issue from nektos/act
|
assert.NoError(t, err) //nolint:testifylint // pre-existing issue from nektos/act
|
||||||
output, err := NewInterpeter(env, Config{WorkingDir: workdir}).Evaluate(tt.input, DefaultStatusCheckNone)
|
output, err := NewInterpeter(env, Config{WorkingDir: workdir}).Evaluate(tt.input, DefaultStatusCheckNone)
|
||||||
assert.Nil(t, err) //nolint:testifylint // pre-existing issue from nektos/act
|
assert.NoError(t, err) //nolint:testifylint // pre-existing issue from nektos/act
|
||||||
|
|
||||||
assert.Equal(t, tt.expected, output)
|
assert.Equal(t, tt.expected, output)
|
||||||
})
|
})
|
||||||
@@ -248,7 +248,7 @@ func TestFunctionFormat(t *testing.T) {
|
|||||||
if tt.error != nil {
|
if tt.error != nil {
|
||||||
assert.Equal(t, tt.error, err.Error())
|
assert.Equal(t, tt.error, err.Error())
|
||||||
} else {
|
} else {
|
||||||
assert.Nil(t, err) //nolint:testifylint // pre-existing issue from nektos/act
|
assert.NoError(t, err) //nolint:testifylint // pre-existing issue from nektos/act
|
||||||
assert.Equal(t, tt.expected, output)
|
assert.Equal(t, tt.expected, output)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|||||||
@@ -156,7 +156,6 @@ func (impl *interperterImpl) evaluateNode(exprNode actionlint.ExprNode) (any, er
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
//nolint:gocyclo // function handles many cases
|
|
||||||
func (impl *interperterImpl) evaluateVariable(variableNode *actionlint.VariableNode) (any, error) {
|
func (impl *interperterImpl) evaluateVariable(variableNode *actionlint.VariableNode) (any, error) {
|
||||||
switch strings.ToLower(variableNode.Name) {
|
switch strings.ToLower(variableNode.Name) {
|
||||||
case "github":
|
case "github":
|
||||||
@@ -584,7 +583,6 @@ func (impl *interperterImpl) evaluateLogicalCompare(compareNode *actionlint.Logi
|
|||||||
return nil, fmt.Errorf("Unable to compare incompatibles types '%s' and '%s'", leftValue.Kind(), rightValue.Kind())
|
return nil, fmt.Errorf("Unable to compare incompatibles types '%s' and '%s'", leftValue.Kind(), rightValue.Kind())
|
||||||
}
|
}
|
||||||
|
|
||||||
//nolint:gocyclo // function handles many cases
|
|
||||||
func (impl *interperterImpl) evaluateFuncCall(funcCallNode *actionlint.FuncCallNode) (any, error) {
|
func (impl *interperterImpl) evaluateFuncCall(funcCallNode *actionlint.FuncCallNode) (any, error) {
|
||||||
args := make([]reflect.Value, 0)
|
args := make([]reflect.Value, 0)
|
||||||
|
|
||||||
|
|||||||
@@ -35,7 +35,7 @@ func TestLiterals(t *testing.T) {
|
|||||||
for _, tt := range table {
|
for _, tt := range table {
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
output, err := NewInterpeter(env, Config{}).Evaluate(tt.input, DefaultStatusCheckNone)
|
output, err := NewInterpeter(env, Config{}).Evaluate(tt.input, DefaultStatusCheckNone)
|
||||||
assert.Nil(t, err) //nolint:testifylint // pre-existing issue from nektos/act
|
assert.NoError(t, err) //nolint:testifylint // pre-existing issue from nektos/act
|
||||||
|
|
||||||
assert.Equal(t, tt.expected, output)
|
assert.Equal(t, tt.expected, output)
|
||||||
})
|
})
|
||||||
@@ -105,10 +105,10 @@ func TestOperators(t *testing.T) {
|
|||||||
t.Run(tt.name, func(t *testing.T) {
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
output, err := NewInterpeter(env, Config{}).Evaluate(tt.input, DefaultStatusCheckNone)
|
output, err := NewInterpeter(env, Config{}).Evaluate(tt.input, DefaultStatusCheckNone)
|
||||||
if tt.error != "" {
|
if tt.error != "" {
|
||||||
assert.NotNil(t, err) //nolint:testifylint // pre-existing issue from nektos/act
|
assert.Error(t, err) //nolint:testifylint // pre-existing issue from nektos/act
|
||||||
assert.Equal(t, tt.error, err.Error())
|
assert.Equal(t, tt.error, err.Error())
|
||||||
} else {
|
} else {
|
||||||
assert.Nil(t, err) //nolint:testifylint // pre-existing issue from nektos/act
|
assert.NoError(t, err) //nolint:testifylint // pre-existing issue from nektos/act
|
||||||
}
|
}
|
||||||
|
|
||||||
assert.Equal(t, tt.expected, output)
|
assert.Equal(t, tt.expected, output)
|
||||||
@@ -157,7 +157,7 @@ func TestOperatorsCompare(t *testing.T) {
|
|||||||
for _, tt := range table {
|
for _, tt := range table {
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
output, err := NewInterpeter(env, Config{}).Evaluate(tt.input, DefaultStatusCheckNone)
|
output, err := NewInterpeter(env, Config{}).Evaluate(tt.input, DefaultStatusCheckNone)
|
||||||
assert.Nil(t, err) //nolint:testifylint // pre-existing issue from nektos/act
|
assert.NoError(t, err) //nolint:testifylint // pre-existing issue from nektos/act
|
||||||
|
|
||||||
assert.Equal(t, tt.expected, output)
|
assert.Equal(t, tt.expected, output)
|
||||||
})
|
})
|
||||||
@@ -520,7 +520,7 @@ func TestOperatorsBooleanEvaluation(t *testing.T) {
|
|||||||
for _, tt := range table {
|
for _, tt := range table {
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
output, err := NewInterpeter(env, Config{}).Evaluate(tt.input, DefaultStatusCheckNone)
|
output, err := NewInterpeter(env, Config{}).Evaluate(tt.input, DefaultStatusCheckNone)
|
||||||
assert.Nil(t, err) //nolint:testifylint // pre-existing issue from nektos/act
|
assert.NoError(t, err) //nolint:testifylint // pre-existing issue from nektos/act
|
||||||
|
|
||||||
if expected, ok := tt.expected.(float64); ok && math.IsNaN(expected) {
|
if expected, ok := tt.expected.(float64); ok && math.IsNaN(expected) {
|
||||||
assert.True(t, math.IsNaN(output.(float64)))
|
assert.True(t, math.IsNaN(output.(float64)))
|
||||||
@@ -624,7 +624,7 @@ func TestContexts(t *testing.T) {
|
|||||||
for _, tt := range table {
|
for _, tt := range table {
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
output, err := NewInterpeter(env, Config{}).Evaluate(tt.input, DefaultStatusCheckNone)
|
output, err := NewInterpeter(env, Config{}).Evaluate(tt.input, DefaultStatusCheckNone)
|
||||||
assert.Nil(t, err) //nolint:testifylint // pre-existing issue from nektos/act
|
assert.NoError(t, err) //nolint:testifylint // pre-existing issue from nektos/act
|
||||||
|
|
||||||
assert.Equal(t, tt.expected, output)
|
assert.Equal(t, tt.expected, output)
|
||||||
})
|
})
|
||||||
|
|||||||
@@ -128,7 +128,6 @@ func (*DefaultFs) Readlink(path string) (string, error) {
|
|||||||
return os.Readlink(path)
|
return os.Readlink(path)
|
||||||
}
|
}
|
||||||
|
|
||||||
//nolint:gocyclo // function handles many cases
|
|
||||||
func (fc *FileCollector) CollectFiles(ctx context.Context, submodulePath []string) filepath.WalkFunc {
|
func (fc *FileCollector) CollectFiles(ctx context.Context, submodulePath []string) filepath.WalkFunc {
|
||||||
i, _ := fc.Fs.OpenGitIndex(path.Join(fc.SrcPath, path.Join(submodulePath...)))
|
i, _ := fc.Fs.OpenGitIndex(path.Join(fc.SrcPath, path.Join(submodulePath...)))
|
||||||
return func(file string, fi os.FileInfo, err error) error {
|
return func(file string, fi os.FileInfo, err error) error {
|
||||||
|
|||||||
@@ -61,8 +61,6 @@ type WorkflowFiles struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewWorkflowPlanner will load a specific workflow, all workflows from a directory or all workflows from a directory and its subdirectories
|
// NewWorkflowPlanner will load a specific workflow, all workflows from a directory or all workflows from a directory and its subdirectories
|
||||||
//
|
|
||||||
//nolint:gocyclo // function handles many cases
|
|
||||||
func NewWorkflowPlanner(path string, noWorkflowRecurse bool) (WorkflowPlanner, error) {
|
func NewWorkflowPlanner(path string, noWorkflowRecurse bool) (WorkflowPlanner, error) {
|
||||||
path, err := filepath.Abs(path)
|
path, err := filepath.Abs(path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|||||||
@@ -57,11 +57,11 @@ func TestWorkflow(t *testing.T) {
|
|||||||
|
|
||||||
// Check that an invalid job id returns error
|
// Check that an invalid job id returns error
|
||||||
result, err := createStages(&workflow, "invalid_job_id")
|
result, err := createStages(&workflow, "invalid_job_id")
|
||||||
assert.NotNil(t, err) //nolint:testifylint // pre-existing issue from nektos/act
|
assert.Error(t, err) //nolint:testifylint // pre-existing issue from nektos/act
|
||||||
assert.Nil(t, result)
|
assert.Nil(t, result)
|
||||||
|
|
||||||
// Check that an valid job id returns non-error
|
// Check that an valid job id returns non-error
|
||||||
result, err = createStages(&workflow, "valid_job")
|
result, err = createStages(&workflow, "valid_job")
|
||||||
assert.Nil(t, err) //nolint:testifylint // pre-existing issue from nektos/act
|
assert.NoError(t, err) //nolint:testifylint // pre-existing issue from nektos/act
|
||||||
assert.NotNil(t, result)
|
assert.NotNil(t, result)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -440,8 +440,6 @@ func (j *Job) Matrix() map[string][]any {
|
|||||||
|
|
||||||
// GetMatrixes returns the matrix cross product
|
// GetMatrixes returns the matrix cross product
|
||||||
// It skips includes and hard fails excludes for non-existing keys
|
// It skips includes and hard fails excludes for non-existing keys
|
||||||
//
|
|
||||||
//nolint:gocyclo // function handles many cases
|
|
||||||
func (j *Job) GetMatrixes() ([]map[string]any, error) {
|
func (j *Job) GetMatrixes() ([]map[string]any, error) {
|
||||||
matrixes := make([]map[string]any, 0)
|
matrixes := make([]map[string]any, 0)
|
||||||
if j.Strategy != nil {
|
if j.Strategy != nil {
|
||||||
|
|||||||
@@ -56,7 +56,7 @@ jobs:
|
|||||||
assert.NoError(t, err, "read workflow should succeed") //nolint:testifylint // pre-existing issue from nektos/act
|
assert.NoError(t, err, "read workflow should succeed") //nolint:testifylint // pre-existing issue from nektos/act
|
||||||
|
|
||||||
newSchedules = workflow.OnSchedule()
|
newSchedules = workflow.OnSchedule()
|
||||||
assert.Len(t, newSchedules, 0) //nolint:testifylint // pre-existing issue from nektos/act
|
assert.Empty(t, newSchedules)
|
||||||
|
|
||||||
yaml = `
|
yaml = `
|
||||||
name: local-action-docker-url
|
name: local-action-docker-url
|
||||||
@@ -74,7 +74,7 @@ jobs:
|
|||||||
assert.NoError(t, err, "read workflow should succeed") //nolint:testifylint // pre-existing issue from nektos/act
|
assert.NoError(t, err, "read workflow should succeed") //nolint:testifylint // pre-existing issue from nektos/act
|
||||||
|
|
||||||
newSchedules = workflow.OnSchedule()
|
newSchedules = workflow.OnSchedule()
|
||||||
assert.Len(t, newSchedules, 0) //nolint:testifylint // pre-existing issue from nektos/act
|
assert.Empty(t, newSchedules)
|
||||||
|
|
||||||
yaml = `
|
yaml = `
|
||||||
name: local-action-docker-url
|
name: local-action-docker-url
|
||||||
@@ -91,7 +91,7 @@ jobs:
|
|||||||
assert.NoError(t, err, "read workflow should succeed") //nolint:testifylint // pre-existing issue from nektos/act
|
assert.NoError(t, err, "read workflow should succeed") //nolint:testifylint // pre-existing issue from nektos/act
|
||||||
|
|
||||||
newSchedules = workflow.OnSchedule()
|
newSchedules = workflow.OnSchedule()
|
||||||
assert.Len(t, newSchedules, 0) //nolint:testifylint // pre-existing issue from nektos/act
|
assert.Empty(t, newSchedules)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestReadWorkflow_StringEvent(t *testing.T) {
|
func TestReadWorkflow_StringEvent(t *testing.T) {
|
||||||
@@ -870,7 +870,7 @@ jobs:
|
|||||||
assert.Nil(t, matrix, "matrix should be nil for jobs without strategy")
|
assert.Nil(t, matrix, "matrix should be nil for jobs without strategy")
|
||||||
} else {
|
} else {
|
||||||
assert.NotNil(t, matrix, "matrix should not be nil")
|
assert.NotNil(t, matrix, "matrix should not be nil")
|
||||||
assert.Equal(t, tt.wantLen, len(matrix), "matrix should have expected number of keys") //nolint:testifylint // pre-existing issue from nektos/act
|
assert.Len(t, matrix, tt.wantLen, "matrix should have expected number of keys")
|
||||||
if tt.checkFn != nil {
|
if tt.checkFn != nil {
|
||||||
tt.checkFn(t, matrix)
|
tt.checkFn(t, matrix)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -265,8 +265,6 @@ func removeGitIgnore(ctx context.Context, directory string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// TODO: break out parts of function to reduce complexicity
|
// TODO: break out parts of function to reduce complexicity
|
||||||
//
|
|
||||||
//nolint:gocyclo // function handles many cases
|
|
||||||
func execAsDocker(ctx context.Context, step actionStep, actionName, basedir string, localAction bool) error {
|
func execAsDocker(ctx context.Context, step actionStep, actionName, basedir string, localAction bool) error {
|
||||||
logger := common.Logger(ctx)
|
logger := common.Logger(ctx)
|
||||||
rc := step.getRunContext()
|
rc := step.getRunContext()
|
||||||
@@ -429,7 +427,7 @@ func newStepContainer(ctx context.Context, step step, image string, cmd, entrypo
|
|||||||
Image: image,
|
Image: image,
|
||||||
Username: rc.Config.Secrets["DOCKER_USERNAME"],
|
Username: rc.Config.Secrets["DOCKER_USERNAME"],
|
||||||
Password: rc.Config.Secrets["DOCKER_PASSWORD"],
|
Password: rc.Config.Secrets["DOCKER_PASSWORD"],
|
||||||
Name: createSimpleContainerName(rc.jobContainerName(), "STEP-"+stepModel.ID),
|
Name: createContainerName(rc.jobContainerName(), "STEP-"+stepModel.ID),
|
||||||
Env: envList,
|
Env: envList,
|
||||||
Mounts: mounts,
|
Mounts: mounts,
|
||||||
NetworkMode: networkMode,
|
NetworkMode: networkMode,
|
||||||
|
|||||||
@@ -137,7 +137,7 @@ runs:
|
|||||||
|
|
||||||
action, err := readActionImpl(context.Background(), tt.step, "actionDir", "actionPath", readFile, writeFile)
|
action, err := readActionImpl(context.Background(), tt.step, "actionDir", "actionPath", readFile, writeFile)
|
||||||
|
|
||||||
assert.Nil(t, err) //nolint:testifylint // pre-existing issue from nektos/act
|
assert.NoError(t, err) //nolint:testifylint // pre-existing issue from nektos/act
|
||||||
assert.Equal(t, tt.expected, action)
|
assert.Equal(t, tt.expected, action)
|
||||||
|
|
||||||
closerMock.AssertExpectations(t)
|
closerMock.AssertExpectations(t)
|
||||||
@@ -247,7 +247,7 @@ func TestActionRunner(t *testing.T) {
|
|||||||
|
|
||||||
err := runActionImpl(tt.step, "dir", newRemoteAction("org/repo/path@ref"))(ctx)
|
err := runActionImpl(tt.step, "dir", newRemoteAction("org/repo/path@ref"))(ctx)
|
||||||
|
|
||||||
assert.Nil(t, err) //nolint:testifylint // pre-existing issue from nektos/act
|
assert.NoError(t, err) //nolint:testifylint // pre-existing issue from nektos/act
|
||||||
cm.AssertExpectations(t)
|
cm.AssertExpectations(t)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -405,7 +405,6 @@ func escapeFormatString(in string) string {
|
|||||||
return strings.ReplaceAll(strings.ReplaceAll(in, "{", "{{"), "}", "}}")
|
return strings.ReplaceAll(strings.ReplaceAll(in, "{", "{{"), "}", "}}")
|
||||||
}
|
}
|
||||||
|
|
||||||
//nolint:gocyclo // function handles many cases
|
|
||||||
func rewriteSubExpression(ctx context.Context, in string, forceFormat bool) (string, error) { //nolint:unparam // pre-existing issue from nektos/act
|
func rewriteSubExpression(ctx context.Context, in string, forceFormat bool) (string, error) { //nolint:unparam // pre-existing issue from nektos/act
|
||||||
if !strings.Contains(in, "${{") || !strings.Contains(in, "}}") {
|
if !strings.Contains(in, "${{") || !strings.Contains(in, "}}") {
|
||||||
return in, nil
|
return in, nil
|
||||||
@@ -472,7 +471,6 @@ func rewriteSubExpression(ctx context.Context, in string, forceFormat bool) (str
|
|||||||
return out, nil
|
return out, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
//nolint:gocyclo // function handles many cases
|
|
||||||
func getEvaluatorInputs(ctx context.Context, rc *RunContext, step step, ghc *model.GithubContext) map[string]any {
|
func getEvaluatorInputs(ctx context.Context, rc *RunContext, step step, ghc *model.GithubContext) map[string]any {
|
||||||
inputs := map[string]any{}
|
inputs := map[string]any{}
|
||||||
|
|
||||||
|
|||||||
@@ -24,7 +24,6 @@ type jobInfo interface {
|
|||||||
result(result string)
|
result(result string)
|
||||||
}
|
}
|
||||||
|
|
||||||
//nolint:contextcheck,gocyclo // composes many step executors
|
|
||||||
func newJobExecutor(info jobInfo, sf stepFactory, rc *RunContext) common.Executor {
|
func newJobExecutor(info jobInfo, sf stepFactory, rc *RunContext) common.Executor {
|
||||||
steps := make([]common.Executor, 0)
|
steps := make([]common.Executor, 0)
|
||||||
preSteps := make([]common.Executor, 0)
|
preSteps := make([]common.Executor, 0)
|
||||||
@@ -157,7 +156,7 @@ func newJobExecutor(info jobInfo, sf stepFactory, rc *RunContext) common.Executo
|
|||||||
pipeline = append(pipeline, steps...)
|
pipeline = append(pipeline, steps...)
|
||||||
|
|
||||||
return common.NewPipelineExecutor(info.startContainer(), common.NewPipelineExecutor(pipeline...).
|
return common.NewPipelineExecutor(info.startContainer(), common.NewPipelineExecutor(pipeline...).
|
||||||
Finally(func(ctx context.Context) error { //nolint:contextcheck // intentionally detaches from canceled parent
|
Finally(func(ctx context.Context) error {
|
||||||
var cancel context.CancelFunc
|
var cancel context.CancelFunc
|
||||||
if ctx.Err() == context.Canceled {
|
if ctx.Err() == context.Canceled {
|
||||||
// in case of an aborted run, we still should execute the
|
// in case of an aborted run, we still should execute the
|
||||||
|
|||||||
@@ -331,7 +331,7 @@ func TestNewJobExecutor(t *testing.T) {
|
|||||||
|
|
||||||
executor := newJobExecutor(jim, sfm, rc)
|
executor := newJobExecutor(jim, sfm, rc)
|
||||||
err := executor(ctx)
|
err := executor(ctx)
|
||||||
assert.Nil(t, err) //nolint:testifylint // pre-existing issue from nektos/act
|
assert.NoError(t, err) //nolint:testifylint // pre-existing issue from nektos/act
|
||||||
assert.Equal(t, tt.executedSteps, executorOrder)
|
assert.Equal(t, tt.executedSteps, executorOrder)
|
||||||
|
|
||||||
jim.AssertExpectations(t)
|
jim.AssertExpectations(t)
|
||||||
|
|||||||
@@ -30,6 +30,11 @@ const (
|
|||||||
gray = 37
|
gray = 37
|
||||||
)
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
rawOutputField = "raw_output"
|
||||||
|
scriptLineCyanField = "script_line_cyan"
|
||||||
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
colors []int
|
colors []int
|
||||||
nextColor int
|
nextColor int
|
||||||
@@ -161,6 +166,8 @@ func withStepLogger(ctx context.Context, stepNumber int, stepID, stepName, stage
|
|||||||
|
|
||||||
type entryProcessor func(entry *logrus.Entry) *logrus.Entry
|
type entryProcessor func(entry *logrus.Entry) *logrus.Entry
|
||||||
|
|
||||||
|
// valueMasker applies secrets and ::add-mask:: patterns to every log entry, including
|
||||||
|
// raw_output (command/stream) lines; there is no bypass by field.
|
||||||
func valueMasker(insecureSecrets bool, secrets map[string]string) entryProcessor {
|
func valueMasker(insecureSecrets bool, secrets map[string]string) entryProcessor {
|
||||||
return func(entry *logrus.Entry) *logrus.Entry {
|
return func(entry *logrus.Entry) *logrus.Entry {
|
||||||
if insecureSecrets {
|
if insecureSecrets {
|
||||||
@@ -227,8 +234,12 @@ func (f *jobLogFormatter) printColored(b *bytes.Buffer, entry *logrus.Entry) {
|
|||||||
debugFlag = "[DEBUG] "
|
debugFlag = "[DEBUG] "
|
||||||
}
|
}
|
||||||
|
|
||||||
if entry.Data["raw_output"] == true {
|
if entry.Data[rawOutputField] == true {
|
||||||
fmt.Fprintf(b, "\x1b[%dm|\x1b[0m %s", f.color, entry.Message)
|
if entry.Data[scriptLineCyanField] == true {
|
||||||
|
fmt.Fprintf(b, "\x1b[%dm|\x1b[0m \x1b[36;1m%s\x1b[0m", f.color, entry.Message)
|
||||||
|
} else {
|
||||||
|
fmt.Fprintf(b, "\x1b[%dm|\x1b[0m %s", f.color, entry.Message)
|
||||||
|
}
|
||||||
} else if entry.Data["dryrun"] == true {
|
} else if entry.Data["dryrun"] == true {
|
||||||
fmt.Fprintf(b, "\x1b[1m\x1b[%dm\x1b[7m*DRYRUN*\x1b[0m \x1b[%dm[%s] \x1b[0m%s%s", gray, f.color, job, debugFlag, entry.Message)
|
fmt.Fprintf(b, "\x1b[1m\x1b[%dm\x1b[7m*DRYRUN*\x1b[0m \x1b[%dm[%s] \x1b[0m%s%s", gray, f.color, job, debugFlag, entry.Message)
|
||||||
} else {
|
} else {
|
||||||
@@ -251,7 +262,7 @@ func (f *jobLogFormatter) print(b *bytes.Buffer, entry *logrus.Entry) {
|
|||||||
debugFlag = "[DEBUG] "
|
debugFlag = "[DEBUG] "
|
||||||
}
|
}
|
||||||
|
|
||||||
if entry.Data["raw_output"] == true {
|
if entry.Data[rawOutputField] == true {
|
||||||
fmt.Fprintf(b, "[%s] | %s", job, entry.Message)
|
fmt.Fprintf(b, "[%s] | %s", job, entry.Message)
|
||||||
} else if entry.Data["dryrun"] == true {
|
} else if entry.Data["dryrun"] == true {
|
||||||
fmt.Fprintf(b, "*DRYRUN* [%s] %s%s", job, debugFlag, entry.Message)
|
fmt.Fprintf(b, "*DRYRUN* [%s] %s%s", job, debugFlag, entry.Message)
|
||||||
|
|||||||
@@ -60,7 +60,7 @@ func TestMaxParallelStrategy(t *testing.T) {
|
|||||||
matrixes, err := job.GetMatrixes()
|
matrixes, err := job.GetMatrixes()
|
||||||
assert.NoError(t, err) //nolint:testifylint // pre-existing issue from nektos/act
|
assert.NoError(t, err) //nolint:testifylint // pre-existing issue from nektos/act
|
||||||
assert.NotNil(t, matrixes)
|
assert.NotNil(t, matrixes)
|
||||||
assert.Equal(t, 5, len(matrixes)) //nolint:testifylint // pre-existing issue from nektos/act
|
assert.Len(t, matrixes, 5)
|
||||||
assert.Equal(t, tt.expectedMaxParallel, job.Strategy.MaxParallel)
|
assert.Equal(t, tt.expectedMaxParallel, job.Strategy.MaxParallel)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -101,8 +101,7 @@ func (rc *RunContext) jobContainerName() string {
|
|||||||
if rc.caller != nil {
|
if rc.caller != nil {
|
||||||
nameParts = append(nameParts, "CALLED-BY-"+rc.caller.runContext.JobName)
|
nameParts = append(nameParts, "CALLED-BY-"+rc.caller.runContext.JobName)
|
||||||
}
|
}
|
||||||
// return createSimpleContainerName(rc.Config.ContainerNamePrefix, "WORKFLOW-"+rc.Run.Workflow.Name, "JOB-"+rc.Name)
|
return createContainerName(nameParts...) // For Gitea
|
||||||
return createSimpleContainerName(nameParts...) // For Gitea
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// networkNameForGitea return the name of the network
|
// networkNameForGitea return the name of the network
|
||||||
@@ -260,7 +259,6 @@ func (rc *RunContext) startHostEnvironment() common.Executor {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
//nolint:gocyclo // function handles many cases
|
|
||||||
func (rc *RunContext) startJobContainer() common.Executor {
|
func (rc *RunContext) startJobContainer() common.Executor {
|
||||||
return func(ctx context.Context) error {
|
return func(ctx context.Context) error {
|
||||||
logger := common.Logger(ctx)
|
logger := common.Logger(ctx)
|
||||||
@@ -769,7 +767,6 @@ func mergeMaps(maps ...map[string]string) map[string]string {
|
|||||||
return rtnMap
|
return rtnMap
|
||||||
}
|
}
|
||||||
|
|
||||||
// Deprecated: use createSimpleContainerName
|
|
||||||
func createContainerName(parts ...string) string {
|
func createContainerName(parts ...string) string {
|
||||||
name := strings.Join(parts, "-")
|
name := strings.Join(parts, "-")
|
||||||
pattern := regexp.MustCompile("[^a-zA-Z0-9]")
|
pattern := regexp.MustCompile("[^a-zA-Z0-9]")
|
||||||
@@ -783,22 +780,6 @@ func createContainerName(parts ...string) string {
|
|||||||
return fmt.Sprintf("%s-%x", trimmedName, hash)
|
return fmt.Sprintf("%s-%x", trimmedName, hash)
|
||||||
}
|
}
|
||||||
|
|
||||||
func createSimpleContainerName(parts ...string) string {
|
|
||||||
pattern := regexp.MustCompile("[^a-zA-Z0-9-]")
|
|
||||||
name := make([]string, 0, len(parts))
|
|
||||||
for _, v := range parts {
|
|
||||||
v = pattern.ReplaceAllString(v, "-")
|
|
||||||
v = strings.Trim(v, "-")
|
|
||||||
for strings.Contains(v, "--") {
|
|
||||||
v = strings.ReplaceAll(v, "--", "-")
|
|
||||||
}
|
|
||||||
if v != "" {
|
|
||||||
name = append(name, v)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return strings.Join(name, "_")
|
|
||||||
}
|
|
||||||
|
|
||||||
func trimToLen(s string, l int) string {
|
func trimToLen(s string, l int) string {
|
||||||
if l < 0 {
|
if l < 0 {
|
||||||
l = 0
|
l = 0
|
||||||
@@ -826,7 +807,6 @@ func (rc *RunContext) getStepsContext() map[string]*model.StepResult {
|
|||||||
return rc.StepResults
|
return rc.StepResults
|
||||||
}
|
}
|
||||||
|
|
||||||
//nolint:gocyclo // function handles many cases
|
|
||||||
func (rc *RunContext) getGithubContext(ctx context.Context) *model.GithubContext {
|
func (rc *RunContext) getGithubContext(ctx context.Context) *model.GithubContext {
|
||||||
logger := common.Logger(ctx)
|
logger := common.Logger(ctx)
|
||||||
ghc := &model.GithubContext{
|
ghc := &model.GithubContext{
|
||||||
|
|||||||
@@ -282,7 +282,7 @@ func TestGetGitHubContext(t *testing.T) {
|
|||||||
log.SetLevel(log.DebugLevel)
|
log.SetLevel(log.DebugLevel)
|
||||||
|
|
||||||
cwd, err := os.Getwd()
|
cwd, err := os.Getwd()
|
||||||
assert.Nil(t, err) //nolint:testifylint // pre-existing issue from nektos/act
|
assert.NoError(t, err) //nolint:testifylint // pre-existing issue from nektos/act
|
||||||
|
|
||||||
rc := &RunContext{
|
rc := &RunContext{
|
||||||
Config: &Config{
|
Config: &Config{
|
||||||
@@ -622,23 +622,16 @@ func TestRunContextGetEnv(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func Test_createSimpleContainerName(t *testing.T) {
|
func TestCreateContainerNameBoundedForLongMatrixInput(t *testing.T) {
|
||||||
tests := []struct {
|
longMatrixValue := strings.Repeat("os=ubuntu-latest-go=1.24-node=22-", 20)
|
||||||
parts []string
|
name := createContainerName(
|
||||||
want string
|
"gitea",
|
||||||
}{
|
"WORKFLOW-super-long-workflow-name",
|
||||||
{
|
"JOB-build-matrix-"+longMatrixValue,
|
||||||
parts: []string{"a--a", "BB正", "c-C"},
|
)
|
||||||
want: "a-a_BB_c-C",
|
|
||||||
},
|
assert.LessOrEqual(t, len(name), 128)
|
||||||
{
|
assert.LessOrEqual(t, len(name+"-env"), 255)
|
||||||
parts: []string{"a-a", "", "-"},
|
assert.LessOrEqual(t, len(name+"-network"), 255)
|
||||||
want: "a-a",
|
assert.LessOrEqual(t, len(name+"-job1234567890"), 255)
|
||||||
},
|
|
||||||
}
|
|
||||||
for _, tt := range tests {
|
|
||||||
t.Run(strings.Join(tt.parts, " "), func(t *testing.T) {
|
|
||||||
assert.Equalf(t, tt.want, createSimpleContainerName(tt.parts...), "createSimpleContainerName(%v)", tt.parts)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -137,8 +137,6 @@ func (runner *runnerImpl) configure() (Runner, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewPlanExecutor ...
|
// NewPlanExecutor ...
|
||||||
//
|
|
||||||
//nolint:gocyclo // function handles many cases
|
|
||||||
func (runner *runnerImpl) NewPlanExecutor(plan *model.Plan) common.Executor {
|
func (runner *runnerImpl) NewPlanExecutor(plan *model.Plan) common.Executor {
|
||||||
maxJobNameLen := 0
|
maxJobNameLen := 0
|
||||||
|
|
||||||
|
|||||||
@@ -87,7 +87,7 @@ func TestGraphMissingEvent(t *testing.T) {
|
|||||||
plan, err := planner.PlanEvent("push")
|
plan, err := planner.PlanEvent("push")
|
||||||
assert.NoError(t, err) //nolint:testifylint // pre-existing issue from nektos/act
|
assert.NoError(t, err) //nolint:testifylint // pre-existing issue from nektos/act
|
||||||
assert.NotNil(t, plan)
|
assert.NotNil(t, plan)
|
||||||
assert.Equal(t, 0, len(plan.Stages)) //nolint:testifylint // pre-existing issue from nektos/act
|
assert.Empty(t, plan.Stages)
|
||||||
|
|
||||||
assert.Contains(t, buf.String(), "no events found for workflow: no-event.yml")
|
assert.Contains(t, buf.String(), "no events found for workflow: no-event.yml")
|
||||||
log.SetOutput(out)
|
log.SetOutput(out)
|
||||||
@@ -100,7 +100,7 @@ func TestGraphMissingFirst(t *testing.T) {
|
|||||||
plan, err := planner.PlanEvent("push")
|
plan, err := planner.PlanEvent("push")
|
||||||
assert.EqualError(t, err, "unable to build dependency graph for no first (no-first.yml)") //nolint:testifylint // pre-existing issue from nektos/act
|
assert.EqualError(t, err, "unable to build dependency graph for no first (no-first.yml)") //nolint:testifylint // pre-existing issue from nektos/act
|
||||||
assert.NotNil(t, plan)
|
assert.NotNil(t, plan)
|
||||||
assert.Equal(t, 0, len(plan.Stages)) //nolint:testifylint // pre-existing issue from nektos/act
|
assert.Empty(t, plan.Stages)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestGraphWithMissing(t *testing.T) {
|
func TestGraphWithMissing(t *testing.T) {
|
||||||
@@ -114,7 +114,7 @@ func TestGraphWithMissing(t *testing.T) {
|
|||||||
|
|
||||||
plan, err := planner.PlanEvent("push")
|
plan, err := planner.PlanEvent("push")
|
||||||
assert.NotNil(t, plan)
|
assert.NotNil(t, plan)
|
||||||
assert.Equal(t, 0, len(plan.Stages)) //nolint:testifylint // pre-existing issue from nektos/act
|
assert.Empty(t, plan.Stages)
|
||||||
assert.EqualError(t, err, "unable to build dependency graph for missing (missing.yml)") //nolint:testifylint // pre-existing issue from nektos/act
|
assert.EqualError(t, err, "unable to build dependency graph for missing (missing.yml)") //nolint:testifylint // pre-existing issue from nektos/act
|
||||||
assert.Contains(t, buf.String(), "unable to build dependency graph for missing (missing.yml)")
|
assert.Contains(t, buf.String(), "unable to build dependency graph for missing (missing.yml)")
|
||||||
log.SetOutput(out)
|
log.SetOutput(out)
|
||||||
@@ -134,7 +134,7 @@ func TestGraphWithSomeMissing(t *testing.T) {
|
|||||||
plan, err := planner.PlanAll()
|
plan, err := planner.PlanAll()
|
||||||
assert.Error(t, err, "unable to build dependency graph for no first (no-first.yml)") //nolint:testifylint // pre-existing issue from nektos/act
|
assert.Error(t, err, "unable to build dependency graph for no first (no-first.yml)") //nolint:testifylint // pre-existing issue from nektos/act
|
||||||
assert.NotNil(t, plan)
|
assert.NotNil(t, plan)
|
||||||
assert.Equal(t, 1, len(plan.Stages)) //nolint:testifylint // pre-existing issue from nektos/act
|
assert.Len(t, plan.Stages, 1)
|
||||||
assert.Contains(t, buf.String(), "unable to build dependency graph for missing (missing.yml)")
|
assert.Contains(t, buf.String(), "unable to build dependency graph for missing (missing.yml)")
|
||||||
assert.Contains(t, buf.String(), "unable to build dependency graph for no first (no-first.yml)")
|
assert.Contains(t, buf.String(), "unable to build dependency graph for no first (no-first.yml)")
|
||||||
log.SetOutput(out)
|
log.SetOutput(out)
|
||||||
@@ -159,7 +159,7 @@ func TestGraphEvent(t *testing.T) {
|
|||||||
plan, err = planner.PlanEvent("release")
|
plan, err = planner.PlanEvent("release")
|
||||||
assert.NoError(t, err) //nolint:testifylint // pre-existing issue from nektos/act
|
assert.NoError(t, err) //nolint:testifylint // pre-existing issue from nektos/act
|
||||||
assert.NotNil(t, plan)
|
assert.NotNil(t, plan)
|
||||||
assert.Equal(t, 0, len(plan.Stages)) //nolint:testifylint // pre-existing issue from nektos/act
|
assert.Empty(t, plan.Stages)
|
||||||
}
|
}
|
||||||
|
|
||||||
type TestJobFileInfo struct {
|
type TestJobFileInfo struct {
|
||||||
@@ -177,7 +177,7 @@ func (j *TestJobFileInfo) runTest(ctx context.Context, t *testing.T, cfg *Config
|
|||||||
log.SetLevel(logLevel)
|
log.SetLevel(logLevel)
|
||||||
|
|
||||||
workdir, err := filepath.Abs(j.workdir)
|
workdir, err := filepath.Abs(j.workdir)
|
||||||
assert.Nil(t, err, workdir) //nolint:testifylint // pre-existing issue from nektos/act
|
assert.NoError(t, err, workdir) //nolint:testifylint // pre-existing issue from nektos/act
|
||||||
|
|
||||||
fullWorkflowPath := filepath.Join(workdir, j.workflowPath)
|
fullWorkflowPath := filepath.Join(workdir, j.workflowPath)
|
||||||
runnerConfig := &Config{
|
runnerConfig := &Config{
|
||||||
@@ -197,17 +197,17 @@ func (j *TestJobFileInfo) runTest(ctx context.Context, t *testing.T, cfg *Config
|
|||||||
}
|
}
|
||||||
|
|
||||||
runner, err := New(runnerConfig)
|
runner, err := New(runnerConfig)
|
||||||
assert.Nil(t, err, j.workflowPath) //nolint:testifylint // pre-existing issue from nektos/act
|
assert.NoError(t, err, j.workflowPath) //nolint:testifylint // pre-existing issue from nektos/act
|
||||||
|
|
||||||
planner, err := model.NewWorkflowPlanner(fullWorkflowPath, true)
|
planner, err := model.NewWorkflowPlanner(fullWorkflowPath, true)
|
||||||
assert.Nil(t, err, fullWorkflowPath) //nolint:testifylint // pre-existing issue from nektos/act
|
assert.NoError(t, err, fullWorkflowPath) //nolint:testifylint // pre-existing issue from nektos/act
|
||||||
|
|
||||||
plan, err := planner.PlanEvent(j.eventName)
|
plan, err := planner.PlanEvent(j.eventName)
|
||||||
assert.True(t, (err == nil) != (plan == nil), "PlanEvent should return either a plan or an error") //nolint:testifylint // pre-existing issue from nektos/act
|
assert.True(t, (err == nil) != (plan == nil), "PlanEvent should return either a plan or an error") //nolint:testifylint // pre-existing issue from nektos/act
|
||||||
if err == nil && plan != nil {
|
if err == nil && plan != nil {
|
||||||
err = runner.NewPlanExecutor(plan)(ctx)
|
err = runner.NewPlanExecutor(plan)(ctx)
|
||||||
if j.errorMessage == "" {
|
if j.errorMessage == "" {
|
||||||
assert.Nil(t, err, fullWorkflowPath) //nolint:testifylint // pre-existing issue from nektos/act
|
assert.NoError(t, err, fullWorkflowPath) //nolint:testifylint // pre-existing issue from nektos/act
|
||||||
} else {
|
} else {
|
||||||
assert.Error(t, err, j.errorMessage) //nolint:testifylint // pre-existing issue from nektos/act
|
assert.Error(t, err, j.errorMessage) //nolint:testifylint // pre-existing issue from nektos/act
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -44,6 +44,10 @@ func (sal *stepActionLocal) main() common.Executor {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
printRunActionHeader(ctx, sal.Step, sal.env, sal.getRunContext())
|
||||||
|
rawLogger := common.Logger(ctx).WithField(rawOutputField, true)
|
||||||
|
defer rawLogger.Infof("::endgroup::")
|
||||||
|
|
||||||
actionDir := filepath.Join(sal.getRunContext().Config.Workdir, sal.Step.Uses)
|
actionDir := filepath.Join(sal.getRunContext().Config.Workdir, sal.Step.Uses)
|
||||||
|
|
||||||
localReader := func(ctx context.Context) actionYamlReader {
|
localReader := func(ctx context.Context) actionYamlReader {
|
||||||
|
|||||||
@@ -97,10 +97,10 @@ func TestStepActionLocalTest(t *testing.T) {
|
|||||||
})
|
})
|
||||||
|
|
||||||
err := sal.pre()(ctx)
|
err := sal.pre()(ctx)
|
||||||
assert.Nil(t, err) //nolint:testifylint // pre-existing issue from nektos/act
|
assert.NoError(t, err) //nolint:testifylint // pre-existing issue from nektos/act
|
||||||
|
|
||||||
err = sal.main()(ctx)
|
err = sal.main()(ctx)
|
||||||
assert.Nil(t, err) //nolint:testifylint // pre-existing issue from nektos/act
|
assert.NoError(t, err) //nolint:testifylint // pre-existing issue from nektos/act
|
||||||
|
|
||||||
cm.AssertExpectations(t)
|
cm.AssertExpectations(t)
|
||||||
salm.AssertExpectations(t)
|
salm.AssertExpectations(t)
|
||||||
|
|||||||
@@ -39,7 +39,6 @@ type stepActionRemote struct {
|
|||||||
|
|
||||||
var stepActionRemoteNewCloneExecutor = git.NewGitCloneExecutor
|
var stepActionRemoteNewCloneExecutor = git.NewGitCloneExecutor
|
||||||
|
|
||||||
//nolint:gocyclo // function handles many cases
|
|
||||||
func (sar *stepActionRemote) prepareActionExecutor() common.Executor {
|
func (sar *stepActionRemote) prepareActionExecutor() common.Executor {
|
||||||
return func(ctx context.Context) error {
|
return func(ctx context.Context) error {
|
||||||
if sar.remoteAction != nil && sar.action != nil {
|
if sar.remoteAction != nil && sar.action != nil {
|
||||||
@@ -166,6 +165,10 @@ func (sar *stepActionRemote) main() common.Executor {
|
|||||||
return common.NewPipelineExecutor(
|
return common.NewPipelineExecutor(
|
||||||
sar.prepareActionExecutor(),
|
sar.prepareActionExecutor(),
|
||||||
runStepExecutor(sar, stepStageMain, func(ctx context.Context) error {
|
runStepExecutor(sar, stepStageMain, func(ctx context.Context) error {
|
||||||
|
printRunActionHeader(ctx, sar.Step, sar.env, sar.RunContext)
|
||||||
|
rawLogger := common.Logger(ctx).WithField(rawOutputField, true)
|
||||||
|
defer rawLogger.Infof("::endgroup::")
|
||||||
|
|
||||||
github := sar.getGithubContext(ctx)
|
github := sar.getGithubContext(ctx)
|
||||||
if sar.remoteAction.IsCheckout() && isLocalCheckout(github, sar.Step) && !sar.RunContext.Config.NoSkipCheckout {
|
if sar.remoteAction.IsCheckout() && isLocalCheckout(github, sar.Step) && !sar.RunContext.Config.NoSkipCheckout {
|
||||||
if sar.RunContext.Config.BindWorkdir {
|
if sar.RunContext.Config.BindWorkdir {
|
||||||
|
|||||||
@@ -272,8 +272,8 @@ func TestStepActionRemotePre(t *testing.T) {
|
|||||||
|
|
||||||
err := sar.pre()(ctx)
|
err := sar.pre()(ctx)
|
||||||
|
|
||||||
assert.Nil(t, err) //nolint:testifylint // pre-existing issue from nektos/act
|
assert.NoError(t, err) //nolint:testifylint // pre-existing issue from nektos/act
|
||||||
assert.Equal(t, true, clonedAction) //nolint:testifylint // pre-existing issue from nektos/act
|
assert.True(t, clonedAction)
|
||||||
|
|
||||||
sarm.AssertExpectations(t)
|
sarm.AssertExpectations(t)
|
||||||
})
|
})
|
||||||
@@ -343,8 +343,8 @@ func TestStepActionRemotePreThroughAction(t *testing.T) {
|
|||||||
|
|
||||||
err := sar.pre()(ctx)
|
err := sar.pre()(ctx)
|
||||||
|
|
||||||
assert.Nil(t, err) //nolint:testifylint // pre-existing issue from nektos/act
|
assert.NoError(t, err) //nolint:testifylint // pre-existing issue from nektos/act
|
||||||
assert.Equal(t, true, clonedAction) //nolint:testifylint // pre-existing issue from nektos/act
|
assert.True(t, clonedAction)
|
||||||
|
|
||||||
sarm.AssertExpectations(t)
|
sarm.AssertExpectations(t)
|
||||||
})
|
})
|
||||||
@@ -419,7 +419,7 @@ func TestStepActionRemotePreThroughActionToken(t *testing.T) {
|
|||||||
|
|
||||||
err := sar.pre()(ctx)
|
err := sar.pre()(ctx)
|
||||||
|
|
||||||
assert.Nil(t, err) //nolint:testifylint // pre-existing issue from nektos/act
|
assert.NoError(t, err) //nolint:testifylint // pre-existing issue from nektos/act
|
||||||
// Verify that the clone was called (URL should be redirected to github.com)
|
// Verify that the clone was called (URL should be redirected to github.com)
|
||||||
assert.True(t, actualURL != "", "Expected clone to be called") //nolint:testifylint // pre-existing issue from nektos/act
|
assert.True(t, actualURL != "", "Expected clone to be called") //nolint:testifylint // pre-existing issue from nektos/act
|
||||||
assert.Equal(t, "https://github.com/org/repo", actualURL, "URL should be redirected to github.com")
|
assert.Equal(t, "https://github.com/org/repo", actualURL, "URL should be redirected to github.com")
|
||||||
|
|||||||
@@ -116,6 +116,10 @@ func (sd *stepDocker) newStepContainer(ctx context.Context, image string, cmd, e
|
|||||||
envList = append(envList, fmt.Sprintf("%s=%s", "RUNNER_TEMP", "/tmp"))
|
envList = append(envList, fmt.Sprintf("%s=%s", "RUNNER_TEMP", "/tmp"))
|
||||||
|
|
||||||
binds, mounts := rc.GetBindsAndMounts()
|
binds, mounts := rc.GetBindsAndMounts()
|
||||||
|
networkMode := "container:" + rc.jobContainerName()
|
||||||
|
if rc.IsHostEnv(ctx) {
|
||||||
|
networkMode = "default"
|
||||||
|
}
|
||||||
stepContainer := ContainerNewContainer(&container.NewContainerInput{
|
stepContainer := ContainerNewContainer(&container.NewContainerInput{
|
||||||
Cmd: cmd,
|
Cmd: cmd,
|
||||||
Entrypoint: entrypoint,
|
Entrypoint: entrypoint,
|
||||||
@@ -123,10 +127,10 @@ func (sd *stepDocker) newStepContainer(ctx context.Context, image string, cmd, e
|
|||||||
Image: image,
|
Image: image,
|
||||||
Username: rc.Config.Secrets["DOCKER_USERNAME"],
|
Username: rc.Config.Secrets["DOCKER_USERNAME"],
|
||||||
Password: rc.Config.Secrets["DOCKER_PASSWORD"],
|
Password: rc.Config.Secrets["DOCKER_PASSWORD"],
|
||||||
Name: createSimpleContainerName(rc.jobContainerName(), "STEP-"+step.ID),
|
Name: createContainerName(rc.jobContainerName(), "STEP-"+step.ID),
|
||||||
Env: envList,
|
Env: envList,
|
||||||
Mounts: mounts,
|
Mounts: mounts,
|
||||||
NetworkMode: "container:" + rc.jobContainerName(),
|
NetworkMode: networkMode,
|
||||||
Binds: binds,
|
Binds: binds,
|
||||||
Stdout: logWriter,
|
Stdout: logWriter,
|
||||||
Stderr: logWriter,
|
Stderr: logWriter,
|
||||||
|
|||||||
@@ -8,6 +8,7 @@ import (
|
|||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
"io"
|
"io"
|
||||||
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"gitea.com/gitea/act_runner/act/container"
|
"gitea.com/gitea/act_runner/act/container"
|
||||||
@@ -101,7 +102,7 @@ func TestStepDockerMain(t *testing.T) {
|
|||||||
cm.On("GetContainerArchive", ctx, "/var/run/act/workflow/pathcmd.txt").Return(io.NopCloser(&bytes.Buffer{}), nil)
|
cm.On("GetContainerArchive", ctx, "/var/run/act/workflow/pathcmd.txt").Return(io.NopCloser(&bytes.Buffer{}), nil)
|
||||||
|
|
||||||
err := sd.main()(ctx)
|
err := sd.main()(ctx)
|
||||||
assert.Nil(t, err) //nolint:testifylint // pre-existing issue from nektos/act
|
assert.NoError(t, err) //nolint:testifylint // pre-existing issue from nektos/act
|
||||||
|
|
||||||
assert.Equal(t, "node:14", input.Image)
|
assert.Equal(t, "node:14", input.Image)
|
||||||
|
|
||||||
@@ -113,8 +114,86 @@ func TestStepDockerPrePost(t *testing.T) {
|
|||||||
sd := &stepDocker{}
|
sd := &stepDocker{}
|
||||||
|
|
||||||
err := sd.pre()(ctx)
|
err := sd.pre()(ctx)
|
||||||
assert.Nil(t, err) //nolint:testifylint // pre-existing issue from nektos/act
|
assert.NoError(t, err) //nolint:testifylint // pre-existing issue from nektos/act
|
||||||
|
|
||||||
err = sd.post()(ctx)
|
err = sd.post()(ctx)
|
||||||
assert.Nil(t, err) //nolint:testifylint // pre-existing issue from nektos/act
|
assert.NoError(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestStepDockerNewStepContainerNetworkMode(t *testing.T) {
|
||||||
|
cases := []struct {
|
||||||
|
name string
|
||||||
|
platform string
|
||||||
|
expectDefault bool
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "docker mode attaches to job container network",
|
||||||
|
platform: "node:14",
|
||||||
|
expectDefault: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "host mode uses default network",
|
||||||
|
platform: "-self-hosted",
|
||||||
|
expectDefault: true,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range cases {
|
||||||
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
|
cm := &containerMock{}
|
||||||
|
|
||||||
|
var captured *container.NewContainerInput
|
||||||
|
origContainerNewContainer := ContainerNewContainer
|
||||||
|
ContainerNewContainer = func(input *container.NewContainerInput) container.ExecutionsEnvironment {
|
||||||
|
captured = input
|
||||||
|
return cm
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
ContainerNewContainer = origContainerNewContainer
|
||||||
|
}()
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
|
||||||
|
platform := tc.platform
|
||||||
|
sd := &stepDocker{
|
||||||
|
RunContext: &RunContext{
|
||||||
|
StepResults: map[string]*model.StepResult{},
|
||||||
|
Config: &Config{
|
||||||
|
PlatformPicker: func(_ []string) string {
|
||||||
|
return platform
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Run: &model.Run{
|
||||||
|
JobID: "1",
|
||||||
|
Workflow: &model.Workflow{
|
||||||
|
Jobs: map[string]*model.Job{
|
||||||
|
"1": {},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
JobContainer: cm,
|
||||||
|
},
|
||||||
|
Step: &model.Step{
|
||||||
|
ID: "1",
|
||||||
|
Uses: "docker://alpine:3.20",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
sd.RunContext.ExprEval = sd.RunContext.NewExpressionEvaluator(ctx)
|
||||||
|
|
||||||
|
assert.Equal(t, tc.expectDefault, sd.RunContext.IsHostEnv(ctx),
|
||||||
|
"IsHostEnv mismatch for platform %q", tc.platform)
|
||||||
|
|
||||||
|
_ = sd.newStepContainer(ctx, "alpine:3.20", []string{"echo", "hello"}, nil)
|
||||||
|
|
||||||
|
if tc.expectDefault {
|
||||||
|
assert.Equal(t, "default", captured.NetworkMode,
|
||||||
|
"host-mode step container must use 'default' network, got %q",
|
||||||
|
captured.NetworkMode)
|
||||||
|
} else {
|
||||||
|
assert.True(t, strings.HasPrefix(captured.NetworkMode, "container:"),
|
||||||
|
"docker-mode step container must attach to job container network, got %q",
|
||||||
|
captured.NetworkMode)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -67,7 +67,7 @@ func TestStepFactoryNewStep(t *testing.T) {
|
|||||||
step, err := sf.newStep(tt.model, &RunContext{})
|
step, err := sf.newStep(tt.model, &RunContext{})
|
||||||
|
|
||||||
assert.True(t, tt.check((step)))
|
assert.True(t, tt.check((step)))
|
||||||
assert.Nil(t, err) //nolint:testifylint // pre-existing issue from nektos/act
|
assert.NoError(t, err)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -9,6 +9,7 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"maps"
|
"maps"
|
||||||
"runtime"
|
"runtime"
|
||||||
|
"slices"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"gitea.com/gitea/act_runner/act/common"
|
"gitea.com/gitea/act_runner/act/common"
|
||||||
@@ -17,15 +18,18 @@ import (
|
|||||||
"gitea.com/gitea/act_runner/act/model"
|
"gitea.com/gitea/act_runner/act/model"
|
||||||
|
|
||||||
"github.com/kballard/go-shellquote"
|
"github.com/kballard/go-shellquote"
|
||||||
|
yaml "go.yaml.in/yaml/v4"
|
||||||
)
|
)
|
||||||
|
|
||||||
type stepRun struct {
|
type stepRun struct {
|
||||||
Step *model.Step
|
Step *model.Step
|
||||||
RunContext *RunContext
|
RunContext *RunContext
|
||||||
cmd []string
|
cmd []string
|
||||||
cmdline string
|
cmdline string
|
||||||
env map[string]string
|
env map[string]string
|
||||||
WorkingDirectory string
|
WorkingDirectory string
|
||||||
|
interpolatedScript string
|
||||||
|
shellCommand string
|
||||||
}
|
}
|
||||||
|
|
||||||
func (sr *stepRun) pre() common.Executor {
|
func (sr *stepRun) pre() common.Executor {
|
||||||
@@ -39,15 +43,154 @@ func (sr *stepRun) main() common.Executor {
|
|||||||
return runStepExecutor(sr, stepStageMain, common.NewPipelineExecutor(
|
return runStepExecutor(sr, stepStageMain, common.NewPipelineExecutor(
|
||||||
sr.setupShellCommandExecutor(),
|
sr.setupShellCommandExecutor(),
|
||||||
func(ctx context.Context) error {
|
func(ctx context.Context) error {
|
||||||
sr.getRunContext().ApplyExtraPath(ctx, &sr.env)
|
rc := sr.getRunContext()
|
||||||
if he, ok := sr.getRunContext().JobContainer.(*container.HostEnvironment); ok && he != nil {
|
// Apply ::add-path:: effects before printing so PATH is accurate in the env: block.
|
||||||
|
rc.ApplyExtraPath(ctx, &sr.env)
|
||||||
|
sr.printRunScriptActionDetails(ctx)
|
||||||
|
if he, ok := rc.JobContainer.(*container.HostEnvironment); ok && he != nil {
|
||||||
return he.ExecWithCmdLine(sr.cmd, sr.cmdline, sr.env, "", sr.WorkingDirectory)(ctx)
|
return he.ExecWithCmdLine(sr.cmd, sr.cmdline, sr.env, "", sr.WorkingDirectory)(ctx)
|
||||||
}
|
}
|
||||||
return sr.getRunContext().JobContainer.Exec(sr.cmd, sr.env, "", sr.WorkingDirectory)(ctx)
|
return rc.JobContainer.Exec(sr.cmd, sr.env, "", sr.WorkingDirectory)(ctx)
|
||||||
},
|
},
|
||||||
))
|
))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// printRunScriptActionDetails mirrors actions/runner ScriptHandler.PrintActionDetails
|
||||||
|
// for script steps.
|
||||||
|
func (sr *stepRun) printRunScriptActionDetails(ctx context.Context) {
|
||||||
|
rawLogger := common.Logger(ctx).WithField(rawOutputField, true)
|
||||||
|
scriptLineLogger := rawLogger.WithField(scriptLineCyanField, true)
|
||||||
|
|
||||||
|
normalized := strings.TrimRight(strings.ReplaceAll(sr.interpolatedScript, "\r\n", "\n"), "\n")
|
||||||
|
|
||||||
|
rawLogger.Infof("::group::Run %s", sr.runScriptGroupTitle(normalized))
|
||||||
|
|
||||||
|
if normalized != "" {
|
||||||
|
for line := range strings.SplitSeq(normalized, "\n") {
|
||||||
|
scriptLineLogger.Info(line)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
rawLogger.Infof("shell: %s", sr.shellCommand)
|
||||||
|
|
||||||
|
printStepEnvBlock(ctx, sr.Step, sr.env, sr.getRunContext())
|
||||||
|
rawLogger.Infof("::endgroup::")
|
||||||
|
}
|
||||||
|
|
||||||
|
// printRunActionHeader mirrors actions/runner's "Run <action>" header for `uses:` steps,
|
||||||
|
// including the with: inputs and the step-level env: block. The caller is responsible
|
||||||
|
// for emitting ::endgroup:: after the action finishes.
|
||||||
|
func printRunActionHeader(ctx context.Context, step *model.Step, env map[string]string, rc *RunContext) {
|
||||||
|
if step == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
rawLogger := common.Logger(ctx).WithField(rawOutputField, true)
|
||||||
|
|
||||||
|
title := step.Uses
|
||||||
|
if step.Name != "" {
|
||||||
|
title = step.Name
|
||||||
|
}
|
||||||
|
rawLogger.Infof("::group::Run %s", title)
|
||||||
|
|
||||||
|
if len(step.With) > 0 {
|
||||||
|
rawLogger.Infof("with:")
|
||||||
|
for _, k := range slices.Sorted(maps.Keys(step.With)) {
|
||||||
|
rawLogger.Infof(" %s: %s", k, step.With[k])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
printStepEnvBlock(ctx, step, env, rc)
|
||||||
|
}
|
||||||
|
|
||||||
|
// printStepEnvBlock emits the declared-env block (YAML order, internal vars filtered)
|
||||||
|
// shared by the run: and uses: "Run" headers.
|
||||||
|
func printStepEnvBlock(ctx context.Context, step *model.Step, env map[string]string, rc *RunContext) {
|
||||||
|
rawLogger := common.Logger(ctx).WithField(rawOutputField, true)
|
||||||
|
caseInsensitive := rc != nil && rc.JobContainer != nil && rc.JobContainer.IsEnvironmentCaseInsensitive()
|
||||||
|
var visible []string
|
||||||
|
for _, k := range stepDeclaredEnvKeysInOrder(step) {
|
||||||
|
if !isInternalEnvKey(k, caseInsensitive) {
|
||||||
|
visible = append(visible, k)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(visible) == 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
rawLogger.Infof("env:")
|
||||||
|
envLookup := env
|
||||||
|
if caseInsensitive {
|
||||||
|
envLookup = make(map[string]string, len(env))
|
||||||
|
for k, v := range env {
|
||||||
|
envLookup[strings.ToUpper(k)] = v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for _, k := range visible {
|
||||||
|
lookupKey := k
|
||||||
|
if caseInsensitive {
|
||||||
|
lookupKey = strings.ToUpper(k)
|
||||||
|
}
|
||||||
|
rawLogger.Infof(" %s: %s", k, envLookup[lookupKey])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// isInternalEnvKey matches actions/runner's filtered set of vars that are hidden
|
||||||
|
// from the "Run" header's env: block because they are injected by the runner itself.
|
||||||
|
func isInternalEnvKey(k string, caseInsensitive bool) bool {
|
||||||
|
upper := k
|
||||||
|
if caseInsensitive {
|
||||||
|
upper = strings.ToUpper(k)
|
||||||
|
}
|
||||||
|
switch upper {
|
||||||
|
case "PATH", "HOME", "CI":
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return strings.HasPrefix(upper, "GITHUB_") ||
|
||||||
|
strings.HasPrefix(upper, "GITEA_") ||
|
||||||
|
strings.HasPrefix(upper, "RUNNER_") ||
|
||||||
|
strings.HasPrefix(upper, "INPUT_")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sr *stepRun) runScriptGroupTitle(normalizedScript string) string {
|
||||||
|
trimmed := strings.TrimLeft(normalizedScript, " \t\r\n")
|
||||||
|
if idx := strings.IndexAny(trimmed, "\r\n"); idx >= 0 {
|
||||||
|
trimmed = trimmed[:idx]
|
||||||
|
}
|
||||||
|
if trimmed != "" {
|
||||||
|
return trimmed
|
||||||
|
}
|
||||||
|
if sr.Step != nil {
|
||||||
|
if sr.Step.Name != "" {
|
||||||
|
return sr.Step.Name
|
||||||
|
}
|
||||||
|
return sr.Step.ID
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
// stepDeclaredEnvKeysInOrder walks the raw YAML Env mapping so keys are emitted in
|
||||||
|
// the order the workflow author wrote them; step.Environment() decodes into a Go map
|
||||||
|
// and loses ordering.
|
||||||
|
func stepDeclaredEnvKeysInOrder(step *model.Step) []string {
|
||||||
|
if step == nil || step.Env.Kind != yaml.MappingNode {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
content := step.Env.Content
|
||||||
|
keys := make([]string, 0, len(content)/2)
|
||||||
|
seen := make(map[string]struct{}, len(content)/2)
|
||||||
|
for i := 0; i+1 < len(content); i += 2 {
|
||||||
|
k := content[i]
|
||||||
|
if k.Kind != yaml.ScalarNode || k.Tag == "!!merge" || k.Value == "<<" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if _, dup := seen[k.Value]; dup {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
seen[k.Value] = struct{}{}
|
||||||
|
keys = append(keys, k.Value)
|
||||||
|
}
|
||||||
|
return keys
|
||||||
|
}
|
||||||
|
|
||||||
func (sr *stepRun) post() common.Executor {
|
func (sr *stepRun) post() common.Executor {
|
||||||
return func(ctx context.Context) error {
|
return func(ctx context.Context) error {
|
||||||
return nil
|
return nil
|
||||||
@@ -111,8 +254,10 @@ func (sr *stepRun) setupShellCommand(ctx context.Context) (name, script string,
|
|||||||
step := sr.Step
|
step := sr.Step
|
||||||
|
|
||||||
script = sr.RunContext.NewStepExpressionEvaluator(ctx, sr).Interpolate(ctx, step.Run)
|
script = sr.RunContext.NewStepExpressionEvaluator(ctx, sr).Interpolate(ctx, step.Run)
|
||||||
|
sr.interpolatedScript = script
|
||||||
|
|
||||||
scCmd := step.ShellCommand()
|
scCmd := step.ShellCommand()
|
||||||
|
sr.shellCommand = scCmd
|
||||||
|
|
||||||
name = getScriptName(sr.RunContext, step)
|
name = getScriptName(sr.RunContext, step)
|
||||||
|
|
||||||
|
|||||||
182
act/runner/step_run_print_test.go
Normal file
182
act/runner/step_run_print_test.go
Normal file
@@ -0,0 +1,182 @@
|
|||||||
|
// Copyright 2026 The Gitea Authors. All rights reserved.
|
||||||
|
// Copyright 2026 The nektos/act Authors. All rights reserved.
|
||||||
|
// SPDX-License-Identifier: MIT
|
||||||
|
|
||||||
|
package runner
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"gitea.com/gitea/act_runner/act/common"
|
||||||
|
"gitea.com/gitea/act_runner/act/model"
|
||||||
|
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
yaml "go.yaml.in/yaml/v4"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestRunScriptGroupTitle(t *testing.T) {
|
||||||
|
sr := &stepRun{Step: &model.Step{Name: "Build"}}
|
||||||
|
assert.Equal(t, "make build", sr.runScriptGroupTitle("make build"))
|
||||||
|
assert.Equal(t, "echo one", sr.runScriptGroupTitle(" \techo one\necho two"))
|
||||||
|
assert.Equal(t, "Build", sr.runScriptGroupTitle(""))
|
||||||
|
|
||||||
|
sr = &stepRun{Step: &model.Step{ID: "s1"}}
|
||||||
|
assert.Equal(t, "s1", sr.runScriptGroupTitle("\n \n"))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestStepDeclaredEnvOrderPreservesYAML(t *testing.T) {
|
||||||
|
raw := `id: s1
|
||||||
|
run: "echo 1"
|
||||||
|
env:
|
||||||
|
GITHUB_TOKEN: tok
|
||||||
|
PATH: /custom/bin
|
||||||
|
MY_VAR: hello
|
||||||
|
`
|
||||||
|
var step model.Step
|
||||||
|
require.NoError(t, yaml.Unmarshal([]byte(raw), &step))
|
||||||
|
assert.Equal(t, []string{"GITHUB_TOKEN", "PATH", "MY_VAR"}, stepDeclaredEnvKeysInOrder(&step))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestStepDeclaredEnvKeysInOrderEmpty(t *testing.T) {
|
||||||
|
assert.Nil(t, stepDeclaredEnvKeysInOrder(nil))
|
||||||
|
assert.Empty(t, stepDeclaredEnvKeysInOrder(&model.Step{}))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestStepDeclaredEnvKeysIgnoreYAMLMergeKey(t *testing.T) {
|
||||||
|
doc := `
|
||||||
|
common: &common
|
||||||
|
COMMON_A: a
|
||||||
|
COMMON_B: b
|
||||||
|
step:
|
||||||
|
env:
|
||||||
|
LOCAL_BEFORE: before
|
||||||
|
<<: *common
|
||||||
|
COMMON_B: overridden
|
||||||
|
LOCAL_AFTER: after
|
||||||
|
`
|
||||||
|
var root struct {
|
||||||
|
Step model.Step `yaml:"step"`
|
||||||
|
}
|
||||||
|
require.NoError(t, yaml.Unmarshal([]byte(doc), &root))
|
||||||
|
|
||||||
|
keys := stepDeclaredEnvKeysInOrder(&root.Step)
|
||||||
|
assert.Equal(t, []string{"LOCAL_BEFORE", "COMMON_B", "LOCAL_AFTER"}, keys)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPrintRunScriptActionDetailsGolden(t *testing.T) {
|
||||||
|
raw := `id: s1
|
||||||
|
name: Build
|
||||||
|
run: |
|
||||||
|
echo one
|
||||||
|
echo two
|
||||||
|
shell: pwsh
|
||||||
|
env:
|
||||||
|
PATH_PREFIX: /custom/bin
|
||||||
|
GITHUB_TOKEN: tok
|
||||||
|
GREETING: hello
|
||||||
|
`
|
||||||
|
var step model.Step
|
||||||
|
require.NoError(t, yaml.Unmarshal([]byte(raw), &step))
|
||||||
|
|
||||||
|
buf := &bytes.Buffer{}
|
||||||
|
logger := logrus.New()
|
||||||
|
logger.SetOutput(buf)
|
||||||
|
logger.SetLevel(logrus.InfoLevel)
|
||||||
|
logger.SetFormatter(&jobLogFormatter{color: cyan})
|
||||||
|
entry := logger.WithFields(logrus.Fields{"job": "j1"})
|
||||||
|
ctx := common.WithLogger(context.Background(), entry)
|
||||||
|
|
||||||
|
sr := &stepRun{
|
||||||
|
Step: &step,
|
||||||
|
RunContext: &RunContext{},
|
||||||
|
shellCommand: "pwsh -command . '{0}'",
|
||||||
|
interpolatedScript: "echo one\necho two\n",
|
||||||
|
env: map[string]string{
|
||||||
|
"PATH_PREFIX": "/custom/bin",
|
||||||
|
"GITHUB_TOKEN": "tok",
|
||||||
|
"GREETING": "hello",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
sr.printRunScriptActionDetails(ctx)
|
||||||
|
|
||||||
|
want := strings.Join([]string{
|
||||||
|
"[j1] | ::group::Run echo one",
|
||||||
|
"[j1] | echo one",
|
||||||
|
"[j1] | echo two",
|
||||||
|
"[j1] | shell: pwsh -command . '{0}'",
|
||||||
|
"[j1] | env:",
|
||||||
|
"[j1] | PATH_PREFIX: /custom/bin",
|
||||||
|
"[j1] | GREETING: hello",
|
||||||
|
"[j1] | ::endgroup::",
|
||||||
|
"",
|
||||||
|
}, "\n")
|
||||||
|
assert.Equal(t, want, buf.String())
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPrintRunActionHeaderGolden(t *testing.T) {
|
||||||
|
raw := `id: s1
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
fetch-depth: "0"
|
||||||
|
token: secret
|
||||||
|
env:
|
||||||
|
CUSTOM: value
|
||||||
|
GITHUB_TOKEN: tok
|
||||||
|
`
|
||||||
|
var step model.Step
|
||||||
|
require.NoError(t, yaml.Unmarshal([]byte(raw), &step))
|
||||||
|
|
||||||
|
buf := &bytes.Buffer{}
|
||||||
|
logger := logrus.New()
|
||||||
|
logger.SetOutput(buf)
|
||||||
|
logger.SetLevel(logrus.InfoLevel)
|
||||||
|
logger.SetFormatter(&jobLogFormatter{color: cyan})
|
||||||
|
entry := logger.WithFields(logrus.Fields{"job": "j1"})
|
||||||
|
ctx := common.WithLogger(context.Background(), entry)
|
||||||
|
|
||||||
|
printRunActionHeader(ctx, &step, map[string]string{"CUSTOM": "value", "GITHUB_TOKEN": "tok"}, &RunContext{})
|
||||||
|
|
||||||
|
want := strings.Join([]string{
|
||||||
|
"[j1] | ::group::Run actions/checkout@v4",
|
||||||
|
"[j1] | with:",
|
||||||
|
"[j1] | fetch-depth: 0",
|
||||||
|
"[j1] | token: secret",
|
||||||
|
"[j1] | env:",
|
||||||
|
"[j1] | CUSTOM: value",
|
||||||
|
"",
|
||||||
|
}, "\n")
|
||||||
|
assert.Equal(t, want, buf.String())
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestIsInternalEnvKey(t *testing.T) {
|
||||||
|
for _, k := range []string{"PATH", "HOME", "CI", "GITHUB_TOKEN", "GITEA_ACTIONS", "RUNNER_OS", "INPUT_FOO"} {
|
||||||
|
assert.True(t, isInternalEnvKey(k, false), k)
|
||||||
|
}
|
||||||
|
for _, k := range []string{"PATH_PREFIX", "MY_VAR", "GREETING", "HOMEPAGE"} {
|
||||||
|
assert.False(t, isInternalEnvKey(k, false), k)
|
||||||
|
}
|
||||||
|
assert.True(t, isInternalEnvKey("path", true))
|
||||||
|
assert.False(t, isInternalEnvKey("path", false))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPrintColoredScriptLineCyan(t *testing.T) {
|
||||||
|
f := &jobLogFormatter{color: cyan}
|
||||||
|
entry := &logrus.Entry{
|
||||||
|
Level: logrus.InfoLevel,
|
||||||
|
Message: "echo one",
|
||||||
|
Data: logrus.Fields{
|
||||||
|
"job": "j1",
|
||||||
|
rawOutputField: true,
|
||||||
|
scriptLineCyanField: true,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
buf := &bytes.Buffer{}
|
||||||
|
f.printColored(buf, entry)
|
||||||
|
assert.Equal(t, "\x1b[36m|\x1b[0m \x1b[36;1mecho one\x1b[0m", buf.String())
|
||||||
|
}
|
||||||
@@ -81,7 +81,7 @@ func TestStepRun(t *testing.T) {
|
|||||||
cm.On("GetContainerArchive", ctx, "/var/run/act/workflow/pathcmd.txt").Return(io.NopCloser(&bytes.Buffer{}), nil)
|
cm.On("GetContainerArchive", ctx, "/var/run/act/workflow/pathcmd.txt").Return(io.NopCloser(&bytes.Buffer{}), nil)
|
||||||
|
|
||||||
err := sr.main()(ctx)
|
err := sr.main()(ctx)
|
||||||
assert.Nil(t, err) //nolint:testifylint // pre-existing issue from nektos/act
|
assert.NoError(t, err) //nolint:testifylint // pre-existing issue from nektos/act
|
||||||
|
|
||||||
cm.AssertExpectations(t)
|
cm.AssertExpectations(t)
|
||||||
}
|
}
|
||||||
@@ -91,8 +91,8 @@ func TestStepRunPrePost(t *testing.T) {
|
|||||||
sr := &stepRun{}
|
sr := &stepRun{}
|
||||||
|
|
||||||
err := sr.pre()(ctx)
|
err := sr.pre()(ctx)
|
||||||
assert.Nil(t, err) //nolint:testifylint // pre-existing issue from nektos/act
|
assert.NoError(t, err) //nolint:testifylint // pre-existing issue from nektos/act
|
||||||
|
|
||||||
err = sr.post()(ctx)
|
err = sr.post()(ctx)
|
||||||
assert.Nil(t, err) //nolint:testifylint // pre-existing issue from nektos/act
|
assert.NoError(t, err)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -156,7 +156,7 @@ func TestSetupEnv(t *testing.T) {
|
|||||||
sm.On("getEnv").Return(&env)
|
sm.On("getEnv").Return(&env)
|
||||||
|
|
||||||
err := setupEnv(context.Background(), sm)
|
err := setupEnv(context.Background(), sm)
|
||||||
assert.Nil(t, err) //nolint:testifylint // pre-existing issue from nektos/act
|
assert.NoError(t, err) //nolint:testifylint // pre-existing issue from nektos/act
|
||||||
|
|
||||||
// These are commit or system specific
|
// These are commit or system specific
|
||||||
delete((env), "GITHUB_REF")
|
delete((env), "GITHUB_REF")
|
||||||
@@ -318,35 +318,35 @@ func TestIsContinueOnError(t *testing.T) {
|
|||||||
step := createTestStep(t, "name: test")
|
step := createTestStep(t, "name: test")
|
||||||
continueOnError, err := isContinueOnError(context.Background(), step.getStepModel().RawContinueOnError, step, stepStageMain)
|
continueOnError, err := isContinueOnError(context.Background(), step.getStepModel().RawContinueOnError, step, stepStageMain)
|
||||||
assertObject.False(continueOnError)
|
assertObject.False(continueOnError)
|
||||||
assertObject.Nil(err) //nolint:testifylint // pre-existing issue from nektos/act
|
assertObject.NoError(err) //nolint:testifylint // pre-existing issue from nektos/act
|
||||||
|
|
||||||
// explcit true
|
// explcit true
|
||||||
step = createTestStep(t, "continue-on-error: true")
|
step = createTestStep(t, "continue-on-error: true")
|
||||||
continueOnError, err = isContinueOnError(context.Background(), step.getStepModel().RawContinueOnError, step, stepStageMain)
|
continueOnError, err = isContinueOnError(context.Background(), step.getStepModel().RawContinueOnError, step, stepStageMain)
|
||||||
assertObject.True(continueOnError)
|
assertObject.True(continueOnError)
|
||||||
assertObject.Nil(err) //nolint:testifylint // pre-existing issue from nektos/act
|
assertObject.NoError(err) //nolint:testifylint // pre-existing issue from nektos/act
|
||||||
|
|
||||||
// explicit false
|
// explicit false
|
||||||
step = createTestStep(t, "continue-on-error: false")
|
step = createTestStep(t, "continue-on-error: false")
|
||||||
continueOnError, err = isContinueOnError(context.Background(), step.getStepModel().RawContinueOnError, step, stepStageMain)
|
continueOnError, err = isContinueOnError(context.Background(), step.getStepModel().RawContinueOnError, step, stepStageMain)
|
||||||
assertObject.False(continueOnError)
|
assertObject.False(continueOnError)
|
||||||
assertObject.Nil(err) //nolint:testifylint // pre-existing issue from nektos/act
|
assertObject.NoError(err) //nolint:testifylint // pre-existing issue from nektos/act
|
||||||
|
|
||||||
// expression true
|
// expression true
|
||||||
step = createTestStep(t, "continue-on-error: ${{ 'test' == 'test' }}")
|
step = createTestStep(t, "continue-on-error: ${{ 'test' == 'test' }}")
|
||||||
continueOnError, err = isContinueOnError(context.Background(), step.getStepModel().RawContinueOnError, step, stepStageMain)
|
continueOnError, err = isContinueOnError(context.Background(), step.getStepModel().RawContinueOnError, step, stepStageMain)
|
||||||
assertObject.True(continueOnError)
|
assertObject.True(continueOnError)
|
||||||
assertObject.Nil(err) //nolint:testifylint // pre-existing issue from nektos/act
|
assertObject.NoError(err) //nolint:testifylint // pre-existing issue from nektos/act
|
||||||
|
|
||||||
// expression false
|
// expression false
|
||||||
step = createTestStep(t, "continue-on-error: ${{ 'test' != 'test' }}")
|
step = createTestStep(t, "continue-on-error: ${{ 'test' != 'test' }}")
|
||||||
continueOnError, err = isContinueOnError(context.Background(), step.getStepModel().RawContinueOnError, step, stepStageMain)
|
continueOnError, err = isContinueOnError(context.Background(), step.getStepModel().RawContinueOnError, step, stepStageMain)
|
||||||
assertObject.False(continueOnError)
|
assertObject.False(continueOnError)
|
||||||
assertObject.Nil(err) //nolint:testifylint // pre-existing issue from nektos/act
|
assertObject.NoError(err) //nolint:testifylint // pre-existing issue from nektos/act
|
||||||
|
|
||||||
// expression parse error
|
// expression parse error
|
||||||
step = createTestStep(t, "continue-on-error: ${{ 'test' != test }}")
|
step = createTestStep(t, "continue-on-error: ${{ 'test' != test }}")
|
||||||
continueOnError, err = isContinueOnError(context.Background(), step.getStepModel().RawContinueOnError, step, stepStageMain)
|
continueOnError, err = isContinueOnError(context.Background(), step.getStepModel().RawContinueOnError, step, stepStageMain)
|
||||||
assertObject.False(continueOnError)
|
assertObject.False(continueOnError)
|
||||||
assertObject.NotNil(err) //nolint:testifylint // pre-existing issue from nektos/act
|
assertObject.Error(err)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -38,7 +38,6 @@ func CompilePattern(rawpattern string) (*WorkflowPattern, error) {
|
|||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
//nolint:gocyclo // function handles many cases
|
|
||||||
func PatternToRegex(pattern string) (string, error) {
|
func PatternToRegex(pattern string) (string, error) {
|
||||||
var rpattern strings.Builder
|
var rpattern strings.Builder
|
||||||
rpattern.WriteString("^")
|
rpattern.WriteString("^")
|
||||||
|
|||||||
14
go.mod
14
go.mod
@@ -4,11 +4,11 @@ go 1.26.0
|
|||||||
|
|
||||||
require (
|
require (
|
||||||
code.gitea.io/actions-proto-go v0.4.1
|
code.gitea.io/actions-proto-go v0.4.1
|
||||||
connectrpc.com/connect v1.19.1
|
connectrpc.com/connect v1.19.2
|
||||||
github.com/avast/retry-go/v4 v4.7.0
|
github.com/avast/retry-go/v4 v4.7.0
|
||||||
github.com/docker/docker v25.0.13+incompatible
|
github.com/docker/docker v25.0.15+incompatible
|
||||||
github.com/joho/godotenv v1.5.1
|
github.com/joho/godotenv v1.5.1
|
||||||
github.com/mattn/go-isatty v0.0.20
|
github.com/mattn/go-isatty v0.0.22
|
||||||
github.com/sirupsen/logrus v1.9.4
|
github.com/sirupsen/logrus v1.9.4
|
||||||
github.com/spf13/cobra v1.10.2
|
github.com/spf13/cobra v1.10.2
|
||||||
github.com/stretchr/testify v1.11.1
|
github.com/stretchr/testify v1.11.1
|
||||||
@@ -24,7 +24,7 @@ require (
|
|||||||
github.com/Masterminds/semver v1.5.0
|
github.com/Masterminds/semver v1.5.0
|
||||||
github.com/creack/pty v1.1.24
|
github.com/creack/pty v1.1.24
|
||||||
github.com/distribution/reference v0.6.0
|
github.com/distribution/reference v0.6.0
|
||||||
github.com/docker/cli v25.0.3+incompatible
|
github.com/docker/cli v25.0.7+incompatible
|
||||||
github.com/docker/go-connections v0.6.0
|
github.com/docker/go-connections v0.6.0
|
||||||
github.com/go-git/go-billy/v5 v5.8.0
|
github.com/go-git/go-billy/v5 v5.8.0
|
||||||
github.com/go-git/go-git/v5 v5.18.0
|
github.com/go-git/go-git/v5 v5.18.0
|
||||||
@@ -33,7 +33,7 @@ require (
|
|||||||
github.com/julienschmidt/httprouter v1.3.0
|
github.com/julienschmidt/httprouter v1.3.0
|
||||||
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51
|
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51
|
||||||
github.com/moby/buildkit v0.13.2
|
github.com/moby/buildkit v0.13.2
|
||||||
github.com/moby/patternmatcher v0.6.0
|
github.com/moby/patternmatcher v0.6.1
|
||||||
github.com/opencontainers/image-spec v1.1.1
|
github.com/opencontainers/image-spec v1.1.1
|
||||||
github.com/opencontainers/selinux v1.13.1
|
github.com/opencontainers/selinux v1.13.1
|
||||||
github.com/pkg/errors v0.9.1
|
github.com/pkg/errors v0.9.1
|
||||||
@@ -114,7 +114,3 @@ require (
|
|||||||
gopkg.in/warnings.v0 v0.1.2 // indirect
|
gopkg.in/warnings.v0 v0.1.2 // indirect
|
||||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||||
)
|
)
|
||||||
|
|
||||||
// Remove after github.com/docker/distribution is updated to support distribution/reference v0.6.0
|
|
||||||
// (pulled in via moby/buildkit, breaks on undefined: reference.SplitHostname)
|
|
||||||
replace github.com/distribution/reference v0.6.0 => github.com/distribution/reference v0.5.0
|
|
||||||
|
|||||||
20
go.sum
20
go.sum
@@ -1,7 +1,7 @@
|
|||||||
code.gitea.io/actions-proto-go v0.4.1 h1:l0EYhjsgpUe/1VABo2eK7zcoNX2W44WOnb0MSLrKfls=
|
code.gitea.io/actions-proto-go v0.4.1 h1:l0EYhjsgpUe/1VABo2eK7zcoNX2W44WOnb0MSLrKfls=
|
||||||
code.gitea.io/actions-proto-go v0.4.1/go.mod h1:mn7Wkqz6JbnTOHQpot3yDeHx+O5C9EGhMEE+htvHBas=
|
code.gitea.io/actions-proto-go v0.4.1/go.mod h1:mn7Wkqz6JbnTOHQpot3yDeHx+O5C9EGhMEE+htvHBas=
|
||||||
connectrpc.com/connect v1.19.1 h1:R5M57z05+90EfEvCY1b7hBxDVOUl45PrtXtAV2fOC14=
|
connectrpc.com/connect v1.19.2 h1:McQ83FGdzL+t60peksi0gXC7MQ/iLKgLduAnThbM0mo=
|
||||||
connectrpc.com/connect v1.19.1/go.mod h1:tN20fjdGlewnSFeZxLKb0xwIZ6ozc3OQs2hTXy4du9w=
|
connectrpc.com/connect v1.19.2/go.mod h1:tN20fjdGlewnSFeZxLKb0xwIZ6ozc3OQs2hTXy4du9w=
|
||||||
cyphar.com/go-pathrs v0.2.3 h1:0pH8gep37wB0BgaXrEaN1OtZhUMeS7VvaejSr6i822o=
|
cyphar.com/go-pathrs v0.2.3 h1:0pH8gep37wB0BgaXrEaN1OtZhUMeS7VvaejSr6i822o=
|
||||||
cyphar.com/go-pathrs v0.2.3/go.mod h1:y8f1EMG7r+hCuFf/rXsKqMJrJAUoADZGNh5/vZPKcGc=
|
cyphar.com/go-pathrs v0.2.3/go.mod h1:y8f1EMG7r+hCuFf/rXsKqMJrJAUoADZGNh5/vZPKcGc=
|
||||||
dario.cat/mergo v1.0.2 h1:85+piFYR1tMbRrLcDwR18y4UKJ3aH1Tbzi24VRW1TK8=
|
dario.cat/mergo v1.0.2 h1:85+piFYR1tMbRrLcDwR18y4UKJ3aH1Tbzi24VRW1TK8=
|
||||||
@@ -49,12 +49,16 @@ github.com/cyphar/filepath-securejoin v0.6.1/go.mod h1:A8hd4EnAeyujCJRrICiOWqjS1
|
|||||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0=
|
github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk=
|
||||||
github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
|
github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
|
||||||
github.com/docker/cli v25.0.3+incompatible h1:KLeNs7zws74oFuVhgZQ5ONGZiXUUdgsdy6/EsX/6284=
|
github.com/docker/cli v25.0.7+incompatible h1:scW/AbGafKmANsonsFckFHTwpz2QypoPA/zpoLnDs/E=
|
||||||
github.com/docker/cli v25.0.3+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
|
github.com/docker/cli v25.0.7+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
|
||||||
github.com/docker/docker v25.0.13+incompatible h1:YeBrkUd3q0ZoRDNoEzuopwCLU+uD8GZahDHwBdsTnkU=
|
github.com/docker/docker v25.0.13+incompatible h1:YeBrkUd3q0ZoRDNoEzuopwCLU+uD8GZahDHwBdsTnkU=
|
||||||
github.com/docker/docker v25.0.13+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
github.com/docker/docker v25.0.13+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||||
|
github.com/docker/docker v25.0.14+incompatible h1:+HNue3fKbqiDHYFAriyiMjfS5u25zB0E2/R8f42lOMc=
|
||||||
|
github.com/docker/docker v25.0.14+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||||
|
github.com/docker/docker v25.0.15+incompatible h1:JhRD6vZdk0Ms3SEMztefBISJL13NbxudQnGix6l+T5M=
|
||||||
|
github.com/docker/docker v25.0.15+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||||
github.com/docker/docker-credential-helpers v0.9.5 h1:EFNN8DHvaiK8zVqFA2DT6BjXE0GzfLOZ38ggPTKePkY=
|
github.com/docker/docker-credential-helpers v0.9.5 h1:EFNN8DHvaiK8zVqFA2DT6BjXE0GzfLOZ38ggPTKePkY=
|
||||||
github.com/docker/docker-credential-helpers v0.9.5/go.mod h1:v1S+hepowrQXITkEfw6o4+BMbGot02wiKpzWhGUZK6c=
|
github.com/docker/docker-credential-helpers v0.9.5/go.mod h1:v1S+hepowrQXITkEfw6o4+BMbGot02wiKpzWhGUZK6c=
|
||||||
github.com/docker/go-connections v0.6.0 h1:LlMG9azAe1TqfR7sO+NJttz1gy6KO7VJBh+pMmjSD94=
|
github.com/docker/go-connections v0.6.0 h1:LlMG9azAe1TqfR7sO+NJttz1gy6KO7VJBh+pMmjSD94=
|
||||||
@@ -131,6 +135,8 @@ github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHP
|
|||||||
github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8=
|
github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8=
|
||||||
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
|
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
|
||||||
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||||
|
github.com/mattn/go-isatty v0.0.22 h1:j8l17JJ9i6VGPUFUYoTUKPSgKe/83EYU2zBC7YNKMw4=
|
||||||
|
github.com/mattn/go-isatty v0.0.22/go.mod h1:ZXfXG4SQHsB/w3ZeOYbR0PrPwLy+n6xiMrJlRFqopa4=
|
||||||
github.com/mattn/go-runewidth v0.0.20 h1:WcT52H91ZUAwy8+HUkdM3THM6gXqXuLJi9O3rjcQQaQ=
|
github.com/mattn/go-runewidth v0.0.20 h1:WcT52H91ZUAwy8+HUkdM3THM6gXqXuLJi9O3rjcQQaQ=
|
||||||
github.com/mattn/go-runewidth v0.0.20/go.mod h1:XBkDxAl56ILZc9knddidhrOlY5R/pDhgLpndooCuJAs=
|
github.com/mattn/go-runewidth v0.0.20/go.mod h1:XBkDxAl56ILZc9knddidhrOlY5R/pDhgLpndooCuJAs=
|
||||||
github.com/mattn/go-shellwords v1.0.12 h1:M2zGm7EW6UQJvDeQxo4T51eKPurbeFbe8WtebGE2xrk=
|
github.com/mattn/go-shellwords v1.0.12 h1:M2zGm7EW6UQJvDeQxo4T51eKPurbeFbe8WtebGE2xrk=
|
||||||
@@ -141,6 +147,8 @@ github.com/moby/buildkit v0.13.2 h1:nXNszM4qD9E7QtG7bFWPnDI1teUQFQglBzon/IU3SzI=
|
|||||||
github.com/moby/buildkit v0.13.2/go.mod h1:2cyVOv9NoHM7arphK9ZfHIWKn9YVZRFd1wXB8kKmEzY=
|
github.com/moby/buildkit v0.13.2/go.mod h1:2cyVOv9NoHM7arphK9ZfHIWKn9YVZRFd1wXB8kKmEzY=
|
||||||
github.com/moby/patternmatcher v0.6.0 h1:GmP9lR19aU5GqSSFko+5pRqHi+Ohk1O69aFiKkVGiPk=
|
github.com/moby/patternmatcher v0.6.0 h1:GmP9lR19aU5GqSSFko+5pRqHi+Ohk1O69aFiKkVGiPk=
|
||||||
github.com/moby/patternmatcher v0.6.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc=
|
github.com/moby/patternmatcher v0.6.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc=
|
||||||
|
github.com/moby/patternmatcher v0.6.1 h1:qlhtafmr6kgMIJjKJMDmMWq7WLkKIo23hsrpR3x084U=
|
||||||
|
github.com/moby/patternmatcher v0.6.1/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc=
|
||||||
github.com/moby/sys/sequential v0.6.0 h1:qrx7XFUd/5DxtqcoH1h438hF5TmOvzC/lspjy7zgvCU=
|
github.com/moby/sys/sequential v0.6.0 h1:qrx7XFUd/5DxtqcoH1h438hF5TmOvzC/lspjy7zgvCU=
|
||||||
github.com/moby/sys/sequential v0.6.0/go.mod h1:uyv8EUTrca5PnDsdMGXhZe6CCe8U/UiTWd+lL+7b/Ko=
|
github.com/moby/sys/sequential v0.6.0/go.mod h1:uyv8EUTrca5PnDsdMGXhZe6CCe8U/UiTWd+lL+7b/Ko=
|
||||||
github.com/moby/sys/user v0.4.0 h1:jhcMKit7SA80hivmFJcbB1vqmw//wU61Zdui2eQXuMs=
|
github.com/moby/sys/user v0.4.0 h1:jhcMKit7SA80hivmFJcbB1vqmw//wU61Zdui2eQXuMs=
|
||||||
|
|||||||
@@ -4,6 +4,7 @@
|
|||||||
package cmd
|
package cmd
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
"os/signal"
|
"os/signal"
|
||||||
@@ -47,10 +48,15 @@ func runCacheServer(configFile *string, cacheArgs *cacheServerArgs) func(cmd *co
|
|||||||
port = cacheArgs.Port
|
port = cacheArgs.Port
|
||||||
}
|
}
|
||||||
|
|
||||||
|
secret := cfg.Cache.ExternalSecret
|
||||||
|
if secret == "" {
|
||||||
|
return errors.New("cache.external_secret must be set for cache-server; configure the same value on each runner that points at this server via cache.external_server")
|
||||||
|
}
|
||||||
cacheHandler, err := artifactcache.StartHandler(
|
cacheHandler, err := artifactcache.StartHandler(
|
||||||
dir,
|
dir,
|
||||||
host,
|
host,
|
||||||
port,
|
port,
|
||||||
|
secret,
|
||||||
log.StandardLogger().WithField("module", "cache_request"),
|
log.StandardLogger().WithField("module", "cache_request"),
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|||||||
@@ -6,6 +6,8 @@ package cmd
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"crypto/rand"
|
||||||
|
"encoding/hex"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"maps"
|
"maps"
|
||||||
@@ -368,7 +370,7 @@ func runExec(ctx context.Context, execArgs *executeArgs) func(cmd *cobra.Command
|
|||||||
}
|
}
|
||||||
|
|
||||||
// init a cache server
|
// init a cache server
|
||||||
handler, err := artifactcache.StartHandler("", "", 0, log.StandardLogger().WithField("module", "cache_request"))
|
handler, err := artifactcache.StartHandler("", "", 0, "", log.StandardLogger().WithField("module", "cache_request"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -393,6 +395,25 @@ func runExec(ctx context.Context, execArgs *executeArgs) func(cmd *cobra.Command
|
|||||||
execArgs.artifactServerPath = tempDir
|
execArgs.artifactServerPath = tempDir
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Register ACTIONS_RUNTIME_TOKEN against local cache server
|
||||||
|
env := execArgs.LoadEnvs()
|
||||||
|
const actionsRuntimeTokenEnvName = "ACTIONS_RUNTIME_TOKEN"
|
||||||
|
actionsRuntimeToken := env[actionsRuntimeTokenEnvName]
|
||||||
|
if actionsRuntimeToken == "" {
|
||||||
|
actionsRuntimeToken = os.Getenv(actionsRuntimeTokenEnvName)
|
||||||
|
}
|
||||||
|
if actionsRuntimeToken == "" {
|
||||||
|
tmpBranch := make([]byte, 12)
|
||||||
|
if _, err := rand.Read(tmpBranch); err != nil {
|
||||||
|
actionsRuntimeToken = "token"
|
||||||
|
} else {
|
||||||
|
actionsRuntimeToken = hex.EncodeToString(tmpBranch)
|
||||||
|
}
|
||||||
|
env[actionsRuntimeTokenEnvName] = actionsRuntimeToken
|
||||||
|
os.Setenv(actionsRuntimeTokenEnvName, actionsRuntimeToken)
|
||||||
|
}
|
||||||
|
handler.RegisterJob(actionsRuntimeToken, "__local/__exec")
|
||||||
|
|
||||||
// run the plan
|
// run the plan
|
||||||
config := &runner.Config{
|
config := &runner.Config{
|
||||||
Workdir: execArgs.Workdir(),
|
Workdir: execArgs.Workdir(),
|
||||||
@@ -402,7 +423,7 @@ func runExec(ctx context.Context, execArgs *executeArgs) func(cmd *cobra.Command
|
|||||||
ForceRebuild: execArgs.forceRebuild,
|
ForceRebuild: execArgs.forceRebuild,
|
||||||
LogOutput: true,
|
LogOutput: true,
|
||||||
JSONLogger: execArgs.jsonLogger,
|
JSONLogger: execArgs.jsonLogger,
|
||||||
Env: execArgs.LoadEnvs(),
|
Env: env,
|
||||||
Vars: execArgs.LoadVars(),
|
Vars: execArgs.LoadVars(),
|
||||||
Secrets: execArgs.LoadSecrets(),
|
Secrets: execArgs.LoadSecrets(),
|
||||||
InsecureSecrets: execArgs.insecureSecrets,
|
InsecureSecrets: execArgs.insecureSecrets,
|
||||||
|
|||||||
@@ -4,10 +4,12 @@
|
|||||||
package run
|
package run
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"maps"
|
"maps"
|
||||||
|
"net/http"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strings"
|
"strings"
|
||||||
@@ -38,9 +40,10 @@ type Runner struct {
|
|||||||
|
|
||||||
cfg *config.Config
|
cfg *config.Config
|
||||||
|
|
||||||
client client.Client
|
client client.Client
|
||||||
labels labels.Labels
|
labels labels.Labels
|
||||||
envs map[string]string
|
envs map[string]string
|
||||||
|
cacheHandler *artifactcache.Handler
|
||||||
|
|
||||||
runningTasks sync.Map
|
runningTasks sync.Map
|
||||||
runningCount atomic.Int64
|
runningCount atomic.Int64
|
||||||
@@ -55,21 +58,24 @@ func NewRunner(cfg *config.Config, reg *config.Registration, cli client.Client)
|
|||||||
}
|
}
|
||||||
envs := make(map[string]string, len(cfg.Runner.Envs))
|
envs := make(map[string]string, len(cfg.Runner.Envs))
|
||||||
maps.Copy(envs, cfg.Runner.Envs)
|
maps.Copy(envs, cfg.Runner.Envs)
|
||||||
|
var cacheHandler *artifactcache.Handler
|
||||||
if cfg.Cache.Enabled == nil || *cfg.Cache.Enabled {
|
if cfg.Cache.Enabled == nil || *cfg.Cache.Enabled {
|
||||||
if cfg.Cache.ExternalServer != "" {
|
if cfg.Cache.ExternalServer != "" {
|
||||||
envs["ACTIONS_CACHE_URL"] = cfg.Cache.ExternalServer
|
envs["ACTIONS_CACHE_URL"] = cfg.Cache.ExternalServer
|
||||||
} else {
|
} else {
|
||||||
cacheHandler, err := artifactcache.StartHandler(
|
handler, err := artifactcache.StartHandler(
|
||||||
cfg.Cache.Dir,
|
cfg.Cache.Dir,
|
||||||
cfg.Cache.Host,
|
cfg.Cache.Host,
|
||||||
cfg.Cache.Port,
|
cfg.Cache.Port,
|
||||||
|
"",
|
||||||
log.StandardLogger().WithField("module", "cache_request"),
|
log.StandardLogger().WithField("module", "cache_request"),
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("cannot init cache server, it will be disabled: %v", err)
|
log.Errorf("cannot init cache server, it will be disabled: %v", err)
|
||||||
// go on
|
// go on
|
||||||
} else {
|
} else {
|
||||||
envs["ACTIONS_CACHE_URL"] = cacheHandler.ExternalURL() + "/"
|
cacheHandler = handler
|
||||||
|
envs["ACTIONS_CACHE_URL"] = handler.ExternalURL() + "/"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -84,11 +90,12 @@ func NewRunner(cfg *config.Config, reg *config.Registration, cli client.Client)
|
|||||||
envs["GITEA_ACTIONS_RUNNER_VERSION"] = ver.Version()
|
envs["GITEA_ACTIONS_RUNNER_VERSION"] = ver.Version()
|
||||||
|
|
||||||
return &Runner{
|
return &Runner{
|
||||||
name: reg.Name,
|
name: reg.Name,
|
||||||
cfg: cfg,
|
cfg: cfg,
|
||||||
client: cli,
|
client: cli,
|
||||||
labels: ls,
|
labels: ls,
|
||||||
envs: envs,
|
envs: envs,
|
||||||
|
cacheHandler: cacheHandler,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -199,6 +206,21 @@ func (r *Runner) run(ctx context.Context, task *runnerv1.Task, reporter *report.
|
|||||||
giteaRuntimeToken = preset.Token
|
giteaRuntimeToken = preset.Token
|
||||||
}
|
}
|
||||||
r.envs["ACTIONS_RUNTIME_TOKEN"] = giteaRuntimeToken
|
r.envs["ACTIONS_RUNTIME_TOKEN"] = giteaRuntimeToken
|
||||||
|
// Mask the runtime token so it cannot be echoed in user step output; it is
|
||||||
|
// now also the cache server's bearer credential and leaking it would let
|
||||||
|
// any reader of the log impersonate this job against the cache.
|
||||||
|
if giteaRuntimeToken != "" {
|
||||||
|
task.Secrets["ACTIONS_RUNTIME_TOKEN"] = giteaRuntimeToken
|
||||||
|
}
|
||||||
|
|
||||||
|
// Register this job's runtime token with the local cache server so that
|
||||||
|
// cache requests from the job container can authenticate. The credential
|
||||||
|
// is removed when the task finishes, so a leaked token stops working as
|
||||||
|
// soon as the job ends rather than remaining valid for the runner's
|
||||||
|
// lifetime. Only applies to the embedded cache server; when the operator
|
||||||
|
// points the runner at an external cache via cfg.Cache.ExternalServer, it
|
||||||
|
// is that server's responsibility to authenticate requests.
|
||||||
|
defer r.registerCacheForTask(giteaRuntimeToken, preset.Repository, reporter)()
|
||||||
|
|
||||||
eventJSON, err := json.Marshal(preset.Event)
|
eventJSON, err := json.Marshal(preset.Event)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -278,6 +300,82 @@ func (r *Runner) run(ctx context.Context, task *runnerv1.Task, reporter *report.
|
|||||||
return execErr
|
return execErr
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// registerCacheForTask tells the cache server to accept requests authenticated
|
||||||
|
// with the given runtime token for the duration of this task. Returns a
|
||||||
|
// function the caller must invoke (typically via defer) to revoke the
|
||||||
|
// credential when the task finishes.
|
||||||
|
//
|
||||||
|
// Three modes:
|
||||||
|
// - Embedded handler: register in-process via RegisterJob.
|
||||||
|
// - external_server + external_secret: POST to the remote server's
|
||||||
|
// /_internal/register, defer a POST to /_internal/revoke. This is what
|
||||||
|
// enables full per-job auth and repo scoping over the network.
|
||||||
|
// - external_server alone (no secret): no-op revoker. The remote server is
|
||||||
|
// in legacy openMode and ignores the runtime token; trust is at the
|
||||||
|
// network layer.
|
||||||
|
//
|
||||||
|
// Safe with an empty token (older Gitea did not issue one).
|
||||||
|
func (r *Runner) registerCacheForTask(token, repo string, reporter *report.Reporter) func() {
|
||||||
|
if token == "" {
|
||||||
|
return func() {}
|
||||||
|
}
|
||||||
|
if r.cacheHandler != nil {
|
||||||
|
return r.cacheHandler.RegisterJob(token, repo)
|
||||||
|
}
|
||||||
|
if r.cfg.Cache.ExternalServer != "" && r.cfg.Cache.ExternalSecret != "" {
|
||||||
|
return r.registerExternalCacheJob(token, repo, reporter)
|
||||||
|
}
|
||||||
|
return func() {}
|
||||||
|
}
|
||||||
|
|
||||||
|
// registerExternalCacheJob POSTs to the remote cache-server's control-plane.
|
||||||
|
// Failures are logged but not fatal: if registration fails, the cache will
|
||||||
|
// 401 the job's requests — better than failing the whole task for a cache
|
||||||
|
// outage. The warning is mirrored to the job log so users can see why their
|
||||||
|
// cache calls 401, instead of having to read the runner daemon's stderr.
|
||||||
|
func (r *Runner) registerExternalCacheJob(token, repo string, reporter *report.Reporter) func() {
|
||||||
|
base := strings.TrimRight(r.cfg.Cache.ExternalServer, "/")
|
||||||
|
if err := postInternalCache(base+"/_internal/register", r.cfg.Cache.ExternalSecret,
|
||||||
|
map[string]string{"token": token, "repo": repo}); err != nil {
|
||||||
|
log.Warnf("cache external_server register failed (%s): %v", base, err)
|
||||||
|
if reporter != nil {
|
||||||
|
reporter.Logf("::warning::cache external_server register failed (%s): %v — cache requests from this job will be unauthenticated and likely return 401", base, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return func() {
|
||||||
|
if err := postInternalCache(base+"/_internal/revoke", r.cfg.Cache.ExternalSecret,
|
||||||
|
map[string]string{"token": token}); err != nil {
|
||||||
|
log.Warnf("cache external_server revoke failed (%s): %v", base, err)
|
||||||
|
if reporter != nil {
|
||||||
|
reporter.Logf("::warning::cache external_server revoke failed (%s): %v", base, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func postInternalCache(url, secret string, body map[string]string) error {
|
||||||
|
buf, err := json.Marshal(body)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
req, err := http.NewRequest(http.MethodPost, url, bytes.NewReader(buf))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
req.Header.Set("Authorization", "Bearer "+secret)
|
||||||
|
req.Header.Set("Content-Type", "application/json")
|
||||||
|
client := &http.Client{Timeout: 5 * time.Second}
|
||||||
|
resp, err := client.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
if resp.StatusCode/100 != 2 {
|
||||||
|
return fmt.Errorf("status %d", resp.StatusCode)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
func (r *Runner) RunningCount() int64 {
|
func (r *Runner) RunningCount() int64 {
|
||||||
return r.runningCount.Load()
|
return r.runningCount.Load()
|
||||||
}
|
}
|
||||||
|
|||||||
239
internal/app/run/runner_cache_test.go
Normal file
239
internal/app/run/runner_cache_test.go
Normal file
@@ -0,0 +1,239 @@
|
|||||||
|
// Copyright 2026 The Gitea Authors. All rights reserved.
|
||||||
|
// SPDX-License-Identifier: MIT
|
||||||
|
|
||||||
|
package run
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"gitea.com/gitea/act_runner/act/artifactcache"
|
||||||
|
"gitea.com/gitea/act_runner/internal/pkg/config"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func emptyCfg() *config.Config { return &config.Config{} }
|
||||||
|
|
||||||
|
func TestRunner_registerCacheForTask(t *testing.T) {
|
||||||
|
dir := filepath.Join(t.TempDir(), "artifactcache")
|
||||||
|
handler, err := artifactcache.StartHandler(dir, "127.0.0.1", 0, "", nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
defer handler.Close()
|
||||||
|
|
||||||
|
r := &Runner{cfg: emptyCfg(), cacheHandler: handler}
|
||||||
|
token := "run-token-123"
|
||||||
|
unregister := r.registerCacheForTask(token, "owner/repo", nil)
|
||||||
|
|
||||||
|
base := handler.ExternalURL() + "/_apis/artifactcache"
|
||||||
|
probe := func() int {
|
||||||
|
req, err := http.NewRequest(http.MethodGet, base+"/cache?keys=x&version=v", nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
req.Header.Set("Authorization", "Bearer "+token)
|
||||||
|
resp, err := http.DefaultClient.Do(req)
|
||||||
|
require.NoError(t, err)
|
||||||
|
resp.Body.Close()
|
||||||
|
return resp.StatusCode
|
||||||
|
}
|
||||||
|
|
||||||
|
assert.NotEqual(t, http.StatusUnauthorized, probe(),
|
||||||
|
"token should be accepted while task is registered")
|
||||||
|
|
||||||
|
unregister()
|
||||||
|
assert.Equal(t, http.StatusUnauthorized, probe(),
|
||||||
|
"token must be rejected after the revoker runs")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRunner_registerCacheForTask_NoOps(t *testing.T) {
|
||||||
|
t.Run("nil cacheHandler", func(t *testing.T) {
|
||||||
|
r := &Runner{cfg: emptyCfg()}
|
||||||
|
unregister := r.registerCacheForTask("tok", "owner/repo", nil)
|
||||||
|
require.NotNil(t, unregister)
|
||||||
|
unregister()
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("empty token", func(t *testing.T) {
|
||||||
|
dir := filepath.Join(t.TempDir(), "artifactcache")
|
||||||
|
handler, err := artifactcache.StartHandler(dir, "127.0.0.1", 0, "", nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
defer handler.Close()
|
||||||
|
|
||||||
|
r := &Runner{cfg: emptyCfg(), cacheHandler: handler}
|
||||||
|
unregister := r.registerCacheForTask("", "owner/repo", nil)
|
||||||
|
require.NotNil(t, unregister)
|
||||||
|
unregister()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Locks in @actions/cache's wire protocol: bearer on reserve/upload/commit
|
||||||
|
// /find, no auth on the signed archiveLocation download.
|
||||||
|
func TestRunner_CacheFullFlow_MatchesToolkit(t *testing.T) {
|
||||||
|
dir := filepath.Join(t.TempDir(), "artifactcache")
|
||||||
|
handler, err := artifactcache.StartHandler(dir, "127.0.0.1", 0, "", nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
defer handler.Close()
|
||||||
|
|
||||||
|
r := &Runner{cfg: emptyCfg(), cacheHandler: handler}
|
||||||
|
token := "full-flow-token"
|
||||||
|
unregister := r.registerCacheForTask(token, "owner/repo", nil)
|
||||||
|
defer unregister()
|
||||||
|
|
||||||
|
base := handler.ExternalURL() + "/_apis/artifactcache"
|
||||||
|
do := func(method, url, contentType, contentRange, body string) *http.Response {
|
||||||
|
req, err := http.NewRequest(method, url, strings.NewReader(body))
|
||||||
|
require.NoError(t, err)
|
||||||
|
req.Header.Set("Authorization", "Bearer "+token)
|
||||||
|
if contentType != "" {
|
||||||
|
req.Header.Set("Content-Type", contentType)
|
||||||
|
}
|
||||||
|
if contentRange != "" {
|
||||||
|
req.Header.Set("Content-Range", contentRange)
|
||||||
|
}
|
||||||
|
resp, err := http.DefaultClient.Do(req)
|
||||||
|
require.NoError(t, err)
|
||||||
|
return resp
|
||||||
|
}
|
||||||
|
|
||||||
|
key := "toolkit-flow"
|
||||||
|
version := "c19da02a2bd7e77277f1ac29ab45c09b7d46a4ee758284e26bb3045ad11d9d20"
|
||||||
|
body := `hello-cache-body`
|
||||||
|
|
||||||
|
// reserve
|
||||||
|
resp := do(http.MethodPost, base+"/caches", "application/json", "",
|
||||||
|
fmt.Sprintf(`{"key":"%s","version":"%s","cacheSize":%d}`, key, version, len(body)))
|
||||||
|
require.Equal(t, http.StatusOK, resp.StatusCode)
|
||||||
|
var reserved struct {
|
||||||
|
CacheID uint64 `json:"cacheId"`
|
||||||
|
}
|
||||||
|
require.NoError(t, decodeJSON(resp, &reserved))
|
||||||
|
require.NotZero(t, reserved.CacheID)
|
||||||
|
|
||||||
|
// upload
|
||||||
|
resp = do(http.MethodPatch, fmt.Sprintf("%s/caches/%d", base, reserved.CacheID),
|
||||||
|
"application/octet-stream", fmt.Sprintf("bytes 0-%d/*", len(body)-1), body)
|
||||||
|
require.Equal(t, http.StatusOK, resp.StatusCode)
|
||||||
|
resp.Body.Close()
|
||||||
|
|
||||||
|
// commit
|
||||||
|
resp = do(http.MethodPost, fmt.Sprintf("%s/caches/%d", base, reserved.CacheID), "", "", "")
|
||||||
|
require.Equal(t, http.StatusOK, resp.StatusCode)
|
||||||
|
resp.Body.Close()
|
||||||
|
|
||||||
|
// find — @actions/cache always sends comma-separated keys here
|
||||||
|
resp = do(http.MethodGet,
|
||||||
|
fmt.Sprintf("%s/cache?keys=%s,fallback&version=%s", base, key, version), "", "", "")
|
||||||
|
require.Equal(t, http.StatusOK, resp.StatusCode)
|
||||||
|
var hit struct {
|
||||||
|
ArchiveLocation string `json:"archiveLocation"`
|
||||||
|
CacheKey string `json:"cacheKey"`
|
||||||
|
}
|
||||||
|
require.NoError(t, decodeJSON(resp, &hit))
|
||||||
|
require.Equal(t, key, hit.CacheKey)
|
||||||
|
require.NotEmpty(t, hit.ArchiveLocation)
|
||||||
|
|
||||||
|
// download — toolkit does NOT attach Authorization here; the signature
|
||||||
|
// in the URL must be enough.
|
||||||
|
dl, err := http.Get(hit.ArchiveLocation)
|
||||||
|
require.NoError(t, err)
|
||||||
|
defer dl.Body.Close()
|
||||||
|
require.Equal(t, http.StatusOK, dl.StatusCode)
|
||||||
|
got := make([]byte, 64)
|
||||||
|
n, _ := dl.Body.Read(got)
|
||||||
|
assert.Equal(t, body, string(got[:n]))
|
||||||
|
}
|
||||||
|
|
||||||
|
func decodeJSON(resp *http.Response, v any) error {
|
||||||
|
defer resp.Body.Close()
|
||||||
|
return json.NewDecoder(resp.Body).Decode(v)
|
||||||
|
}
|
||||||
|
|
||||||
|
// End-to-end against a remote cache-server: token unknown → 401, register →
|
||||||
|
// reserve/upload/commit/find/download all OK, revoke → 401 again.
|
||||||
|
func TestRunner_ExternalCacheServer_RegisterRevoke(t *testing.T) {
|
||||||
|
dir := filepath.Join(t.TempDir(), "remote-cache")
|
||||||
|
const secret = "shared-secret-for-tests"
|
||||||
|
remote, err := artifactcache.StartHandler(dir, "127.0.0.1", 0, secret, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
defer remote.Close()
|
||||||
|
|
||||||
|
r := &Runner{cfg: &config.Config{Cache: config.Cache{
|
||||||
|
ExternalServer: remote.ExternalURL(),
|
||||||
|
ExternalSecret: secret,
|
||||||
|
}}}
|
||||||
|
|
||||||
|
token := "external-task-token"
|
||||||
|
repo := "owner/repoX"
|
||||||
|
base := remote.ExternalURL() + "/_apis/artifactcache"
|
||||||
|
probe := func() int {
|
||||||
|
req, _ := http.NewRequest(http.MethodGet, base+"/cache?keys=k&version=v", nil)
|
||||||
|
req.Header.Set("Authorization", "Bearer "+token)
|
||||||
|
resp, err := http.DefaultClient.Do(req)
|
||||||
|
require.NoError(t, err)
|
||||||
|
resp.Body.Close()
|
||||||
|
return resp.StatusCode
|
||||||
|
}
|
||||||
|
|
||||||
|
require.Equal(t, http.StatusUnauthorized, probe(),
|
||||||
|
"token must be unknown to the remote server before registration")
|
||||||
|
|
||||||
|
unregister := r.registerCacheForTask(token, repo, nil)
|
||||||
|
require.NotEqual(t, http.StatusUnauthorized, probe(),
|
||||||
|
"token must be accepted after registerCacheForTask")
|
||||||
|
|
||||||
|
// Full reserve→upload→commit→find→download cycle, identical to what
|
||||||
|
// @actions/cache does, against the remote (external) server.
|
||||||
|
body := []byte("payload-from-task")
|
||||||
|
reserveBody, _ := json.Marshal(&artifactcache.Request{Key: "ext-key", Version: "v", Size: int64(len(body))})
|
||||||
|
req, _ := http.NewRequest(http.MethodPost, base+"/caches", bytes.NewReader(reserveBody))
|
||||||
|
req.Header.Set("Authorization", "Bearer "+token)
|
||||||
|
req.Header.Set("Content-Type", "application/json")
|
||||||
|
resp, err := http.DefaultClient.Do(req)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, http.StatusOK, resp.StatusCode)
|
||||||
|
var reserved struct {
|
||||||
|
CacheID uint64 `json:"cacheId"`
|
||||||
|
}
|
||||||
|
require.NoError(t, decodeJSON(resp, &reserved))
|
||||||
|
require.NotZero(t, reserved.CacheID)
|
||||||
|
|
||||||
|
req, _ = http.NewRequest(http.MethodPatch, fmt.Sprintf("%s/caches/%d", base, reserved.CacheID), bytes.NewReader(body))
|
||||||
|
req.Header.Set("Authorization", "Bearer "+token)
|
||||||
|
req.Header.Set("Content-Range", fmt.Sprintf("bytes 0-%d/*", len(body)-1))
|
||||||
|
resp, err = http.DefaultClient.Do(req)
|
||||||
|
require.NoError(t, err)
|
||||||
|
resp.Body.Close()
|
||||||
|
require.Equal(t, http.StatusOK, resp.StatusCode)
|
||||||
|
|
||||||
|
req, _ = http.NewRequest(http.MethodPost, fmt.Sprintf("%s/caches/%d", base, reserved.CacheID), nil)
|
||||||
|
req.Header.Set("Authorization", "Bearer "+token)
|
||||||
|
resp, err = http.DefaultClient.Do(req)
|
||||||
|
require.NoError(t, err)
|
||||||
|
resp.Body.Close()
|
||||||
|
require.Equal(t, http.StatusOK, resp.StatusCode)
|
||||||
|
|
||||||
|
req, _ = http.NewRequest(http.MethodGet, base+"/cache?keys=ext-key&version=v", nil)
|
||||||
|
req.Header.Set("Authorization", "Bearer "+token)
|
||||||
|
resp, err = http.DefaultClient.Do(req)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, http.StatusOK, resp.StatusCode)
|
||||||
|
var hit struct {
|
||||||
|
ArchiveLocation string `json:"archiveLocation"`
|
||||||
|
}
|
||||||
|
require.NoError(t, decodeJSON(resp, &hit))
|
||||||
|
require.NotEmpty(t, hit.ArchiveLocation)
|
||||||
|
|
||||||
|
dl, err := http.Get(hit.ArchiveLocation)
|
||||||
|
require.NoError(t, err)
|
||||||
|
defer dl.Body.Close()
|
||||||
|
require.Equal(t, http.StatusOK, dl.StatusCode)
|
||||||
|
|
||||||
|
unregister()
|
||||||
|
assert.Equal(t, http.StatusUnauthorized, probe(),
|
||||||
|
"token must be rejected after the revoker runs")
|
||||||
|
}
|
||||||
@@ -35,7 +35,7 @@ runner:
|
|||||||
# The maximum interval for fetching the job from the Gitea instance.
|
# The maximum interval for fetching the job from the Gitea instance.
|
||||||
# The runner uses exponential backoff when idle, increasing the interval up to this maximum.
|
# The runner uses exponential backoff when idle, increasing the interval up to this maximum.
|
||||||
# Set to 0 or same as fetch_interval to disable backoff.
|
# Set to 0 or same as fetch_interval to disable backoff.
|
||||||
fetch_interval_max: 60s
|
fetch_interval_max: 5s
|
||||||
# The base interval for periodic log flush to the Gitea instance.
|
# The base interval for periodic log flush to the Gitea instance.
|
||||||
# Logs may be sent earlier if the buffer reaches log_report_batch_size
|
# Logs may be sent earlier if the buffer reaches log_report_batch_size
|
||||||
# or if log_report_max_latency expires after the first buffered row.
|
# or if log_report_max_latency expires after the first buffered row.
|
||||||
@@ -81,7 +81,12 @@ cache:
|
|||||||
# The external cache server URL. Valid only when enable is true.
|
# The external cache server URL. Valid only when enable is true.
|
||||||
# If it's specified, act_runner will use this URL as the ACTIONS_CACHE_URL rather than start a server by itself.
|
# If it's specified, act_runner will use this URL as the ACTIONS_CACHE_URL rather than start a server by itself.
|
||||||
# The URL should generally end with "/".
|
# The URL should generally end with "/".
|
||||||
|
# Requires external_secret below to be set to the same value on both this runner and the cache-server.
|
||||||
external_server: ""
|
external_server: ""
|
||||||
|
# Shared secret between this runner and the external `act_runner cache-server`. Required when external_server
|
||||||
|
# (or `act_runner cache-server`) is in use: the runner pre-registers each job's ACTIONS_RUNTIME_TOKEN with the
|
||||||
|
# cache-server, and the cache-server enforces bearer auth + per-repo cache isolation.
|
||||||
|
external_secret: ""
|
||||||
|
|
||||||
container:
|
container:
|
||||||
# Specifies the network to which the container will connect.
|
# Specifies the network to which the container will connect.
|
||||||
|
|||||||
@@ -4,6 +4,7 @@
|
|||||||
package config
|
package config
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"maps"
|
"maps"
|
||||||
"os"
|
"os"
|
||||||
@@ -47,6 +48,7 @@ type Cache struct {
|
|||||||
Host string `yaml:"host"` // Host specifies the caching host.
|
Host string `yaml:"host"` // Host specifies the caching host.
|
||||||
Port uint16 `yaml:"port"` // Port specifies the caching port.
|
Port uint16 `yaml:"port"` // Port specifies the caching port.
|
||||||
ExternalServer string `yaml:"external_server"` // ExternalServer specifies the URL of external cache server
|
ExternalServer string `yaml:"external_server"` // ExternalServer specifies the URL of external cache server
|
||||||
|
ExternalSecret string `yaml:"external_secret"` // ExternalSecret is a shared secret between this runner and an external act_runner cache-server, enabling per-job ACTIONS_RUNTIME_TOKEN authentication and repo scoping over the network. Leave empty to keep the legacy unauthenticated behavior.
|
||||||
}
|
}
|
||||||
|
|
||||||
// Container represents the configuration for the container.
|
// Container represents the configuration for the container.
|
||||||
@@ -135,6 +137,9 @@ func LoadDefault(file string) (*Config, error) {
|
|||||||
home, _ := os.UserHomeDir()
|
home, _ := os.UserHomeDir()
|
||||||
cfg.Cache.Dir = filepath.Join(home, ".cache", "actcache")
|
cfg.Cache.Dir = filepath.Join(home, ".cache", "actcache")
|
||||||
}
|
}
|
||||||
|
if cfg.Cache.ExternalServer != "" && cfg.Cache.ExternalSecret == "" {
|
||||||
|
return nil, errors.New("cache.external_server is set but cache.external_secret is empty; configure the same external_secret on this runner and the act_runner cache-server")
|
||||||
|
}
|
||||||
}
|
}
|
||||||
if cfg.Container.WorkdirParent == "" {
|
if cfg.Container.WorkdirParent == "" {
|
||||||
cfg.Container.WorkdirParent = "workspace"
|
cfg.Container.WorkdirParent = "workspace"
|
||||||
@@ -150,7 +155,7 @@ func LoadDefault(file string) (*Config, error) {
|
|||||||
cfg.Runner.FetchInterval = 2 * time.Second
|
cfg.Runner.FetchInterval = 2 * time.Second
|
||||||
}
|
}
|
||||||
if cfg.Runner.FetchIntervalMax <= 0 {
|
if cfg.Runner.FetchIntervalMax <= 0 {
|
||||||
cfg.Runner.FetchIntervalMax = 60 * time.Second
|
cfg.Runner.FetchIntervalMax = 5 * time.Second
|
||||||
}
|
}
|
||||||
if cfg.Runner.LogReportInterval <= 0 {
|
if cfg.Runner.LogReportInterval <= 0 {
|
||||||
cfg.Runner.LogReportInterval = 5 * time.Second
|
cfg.Runner.LogReportInterval = 5 * time.Second
|
||||||
|
|||||||
41
internal/pkg/config/config_test.go
Normal file
41
internal/pkg/config/config_test.go
Normal file
@@ -0,0 +1,41 @@
|
|||||||
|
// Copyright 2026 The Gitea Authors. All rights reserved.
|
||||||
|
// SPDX-License-Identifier: MIT
|
||||||
|
|
||||||
|
package config
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestLoadDefault_RejectsExternalServerWithoutSecret(t *testing.T) {
|
||||||
|
dir := t.TempDir()
|
||||||
|
path := filepath.Join(dir, "config.yaml")
|
||||||
|
require.NoError(t, os.WriteFile(path, []byte(`
|
||||||
|
cache:
|
||||||
|
enabled: true
|
||||||
|
external_server: "http://cache.invalid/"
|
||||||
|
`), 0o600))
|
||||||
|
|
||||||
|
_, err := LoadDefault(path)
|
||||||
|
require.Error(t, err)
|
||||||
|
assert.Contains(t, err.Error(), "external_secret")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestLoadDefault_AcceptsExternalServerWithSecret(t *testing.T) {
|
||||||
|
dir := t.TempDir()
|
||||||
|
path := filepath.Join(dir, "config.yaml")
|
||||||
|
require.NoError(t, os.WriteFile(path, []byte(`
|
||||||
|
cache:
|
||||||
|
enabled: true
|
||||||
|
external_server: "http://cache.invalid/"
|
||||||
|
external_secret: "shh"
|
||||||
|
`), 0o600))
|
||||||
|
|
||||||
|
_, err := LoadDefault(path)
|
||||||
|
require.NoError(t, err)
|
||||||
|
}
|
||||||
Reference in New Issue
Block a user