2024-03-02 12:12:17 +03:00
// Copyright 2024 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package actions
// GitHub Actions Artifacts V4 API Simple Description
//
// 1. Upload artifact
// 1.1. CreateArtifact
// Post: /twirp/github.actions.results.api.v1.ArtifactService/CreateArtifact
// Request:
// {
// "workflow_run_backend_id": "21",
// "workflow_job_run_backend_id": "49",
// "name": "test",
// "version": 4
// }
// Response:
// {
// "ok": true,
// "signedUploadUrl": "http://localhost:3000/twirp/github.actions.results.api.v1.ArtifactService/UploadArtifact?sig=mO7y35r4GyjN7fwg0DTv3-Fv1NDXD84KLEgLpoPOtDI=&expires=2024-01-23+21%3A48%3A37.20833956+%2B0100+CET&artifactName=test&taskID=75"
// }
// 1.2. Upload Zip Content to Blobstorage (unauthenticated request)
// PUT: http://localhost:3000/twirp/github.actions.results.api.v1.ArtifactService/UploadArtifact?sig=mO7y35r4GyjN7fwg0DTv3-Fv1NDXD84KLEgLpoPOtDI=&expires=2024-01-23+21%3A48%3A37.20833956+%2B0100+CET&artifactName=test&taskID=75&comp=block
// 1.3. Continue Upload Zip Content to Blobstorage (unauthenticated request), repeat until everything is uploaded
// PUT: http://localhost:3000/twirp/github.actions.results.api.v1.ArtifactService/UploadArtifact?sig=mO7y35r4GyjN7fwg0DTv3-Fv1NDXD84KLEgLpoPOtDI=&expires=2024-01-23+21%3A48%3A37.20833956+%2B0100+CET&artifactName=test&taskID=75&comp=appendBlock
2024-09-22 14:01:09 +03:00
// 1.4. BlockList xml payload to Blobstorage (unauthenticated request)
// Files of about 800MB are parallel in parallel and / or out of order, this file is needed to enshure the correct order
2024-03-02 12:12:17 +03:00
// PUT: http://localhost:3000/twirp/github.actions.results.api.v1.ArtifactService/UploadArtifact?sig=mO7y35r4GyjN7fwg0DTv3-Fv1NDXD84KLEgLpoPOtDI=&expires=2024-01-23+21%3A48%3A37.20833956+%2B0100+CET&artifactName=test&taskID=75&comp=blockList
2024-09-22 14:01:09 +03:00
// Request
// <?xml version="1.0" encoding="UTF-8" standalone="yes"?>
// <BlockList>
// <Latest>blockId1</Latest>
// <Latest>blockId2</Latest>
// </BlockList>
2024-03-02 12:12:17 +03:00
// 1.5. FinalizeArtifact
// Post: /twirp/github.actions.results.api.v1.ArtifactService/FinalizeArtifact
// Request
// {
// "workflow_run_backend_id": "21",
// "workflow_job_run_backend_id": "49",
// "name": "test",
// "size": "2097",
// "hash": "sha256:b6325614d5649338b87215d9536b3c0477729b8638994c74cdefacb020a2cad4"
// }
// Response
// {
// "ok": true,
// "artifactId": "4"
// }
// 2. Download artifact
// 2.1. ListArtifacts and optionally filter by artifact exact name or id
// Post: /twirp/github.actions.results.api.v1.ArtifactService/ListArtifacts
// Request
// {
// "workflow_run_backend_id": "21",
// "workflow_job_run_backend_id": "49",
// "name_filter": "test"
// }
// Response
// {
// "artifacts": [
// {
// "workflowRunBackendId": "21",
// "workflowJobRunBackendId": "49",
// "databaseId": "4",
// "name": "test",
// "size": "2093",
// "createdAt": "2024-01-23T00:13:28Z"
// }
// ]
// }
// 2.2. GetSignedArtifactURL get the URL to download the artifact zip file of a specific artifact
// Post: /twirp/github.actions.results.api.v1.ArtifactService/GetSignedArtifactURL
// Request
// {
// "workflow_run_backend_id": "21",
// "workflow_job_run_backend_id": "49",
// "name": "test"
// }
// Response
// {
// "signedUrl": "http://localhost:3000/twirp/github.actions.results.api.v1.ArtifactService/DownloadArtifact?sig=wHzFOwpF-6220-5CA0CIRmAX9VbiTC2Mji89UOqo1E8=&expires=2024-01-23+21%3A51%3A56.872846295+%2B0100+CET&artifactName=test&taskID=76"
// }
// 2.3. Download Zip from Blobstorage (unauthenticated request)
// GET: http://localhost:3000/twirp/github.actions.results.api.v1.ArtifactService/DownloadArtifact?sig=wHzFOwpF-6220-5CA0CIRmAX9VbiTC2Mji89UOqo1E8=&expires=2024-01-23+21%3A51%3A56.872846295+%2B0100+CET&artifactName=test&taskID=76
import (
"crypto/hmac"
"crypto/sha256"
"encoding/base64"
2024-09-22 14:01:09 +03:00
"encoding/xml"
2024-03-02 12:12:17 +03:00
"fmt"
"io"
"net/http"
"net/url"
"strconv"
"strings"
"time"
"code.gitea.io/gitea/models/actions"
"code.gitea.io/gitea/models/db"
feat(quota): Quota enforcement
The previous commit laid out the foundation of the quota engine, this
one builds on top of it, and implements the actual enforcement.
Enforcement happens at the route decoration level, whenever possible. In
case of the API, when over quota, a 413 error is returned, with an
appropriate JSON payload. In case of web routes, a 413 HTML page is
rendered with similar information.
This implementation is for a **soft quota**: quota usage is checked
before an operation is to be performed, and the operation is *only*
denied if the user is already over quota. This makes it possible to go
over quota, but has the significant advantage of being practically
implementable within the current Forgejo architecture.
The goal of enforcement is to deny actions that can make the user go
over quota, and allow the rest. As such, deleting things should - in
almost all cases - be possible. A prime exemption is deleting files via
the web ui: that creates a new commit, which in turn increases repo
size, thus, is denied if the user is over quota.
Limitations
-----------
Because we generally work at a route decorator level, and rarely
look *into* the operation itself, `size:repos:public` and
`size:repos:private` are not enforced at this level, the engine enforces
against `size:repos:all`. This will be improved in the future.
AGit does not play very well with this system, because AGit PRs count
toward the repo they're opened against, while in the GitHub-style fork +
pull model, it counts against the fork. This too, can be improved in the
future.
There's very little done on the UI side to guard against going over
quota. What this patch implements, is enforcement, not prevention. The
UI will still let you *try* operations that *will* result in a denial.
Signed-off-by: Gergely Nagy <forgejo@gergo.csillger.hu>
2024-07-06 11:30:16 +03:00
quota_model "code.gitea.io/gitea/models/quota"
2024-03-02 12:12:17 +03:00
"code.gitea.io/gitea/modules/log"
"code.gitea.io/gitea/modules/setting"
"code.gitea.io/gitea/modules/storage"
"code.gitea.io/gitea/modules/util"
"code.gitea.io/gitea/modules/web"
2024-07-10 08:28:01 +03:00
"code.gitea.io/gitea/routers/common"
2024-03-02 12:12:17 +03:00
"code.gitea.io/gitea/services/context"
"google.golang.org/protobuf/encoding/protojson"
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
"google.golang.org/protobuf/types/known/timestamppb"
)
const (
ArtifactV4RouteBase = "/twirp/github.actions.results.api.v1.ArtifactService"
ArtifactV4ContentEncoding = "application/zip"
)
type artifactV4Routes struct {
prefix string
fs storage . ObjectStorage
}
func ArtifactV4Contexter ( ) func ( next http . Handler ) http . Handler {
return func ( next http . Handler ) http . Handler {
return http . HandlerFunc ( func ( resp http . ResponseWriter , req * http . Request ) {
base , baseCleanUp := context . NewBaseContext ( resp , req )
defer baseCleanUp ( )
ctx := & ArtifactContext { Base : base }
ctx . AppendContextValue ( artifactContextKey , ctx )
next . ServeHTTP ( ctx . Resp , ctx . Req )
} )
}
}
func ArtifactsV4Routes ( prefix string ) * web . Route {
m := web . NewRoute ( )
r := artifactV4Routes {
prefix : prefix ,
fs : storage . ActionsArtifacts ,
}
m . Group ( "" , func ( ) {
m . Post ( "CreateArtifact" , r . createArtifact )
m . Post ( "FinalizeArtifact" , r . finalizeArtifact )
m . Post ( "ListArtifacts" , r . listArtifacts )
m . Post ( "GetSignedArtifactURL" , r . getSignedArtifactURL )
m . Post ( "DeleteArtifact" , r . deleteArtifact )
} , ArtifactContexter ( ) )
m . Group ( "" , func ( ) {
m . Put ( "UploadArtifact" , r . uploadArtifact )
m . Get ( "DownloadArtifact" , r . downloadArtifact )
} , ArtifactV4Contexter ( ) )
return m
}
2024-09-22 14:01:09 +03:00
func ( r artifactV4Routes ) buildSignature ( endp , expires , artifactName string , taskID , artifactID int64 ) [ ] byte {
2024-03-02 12:12:17 +03:00
mac := hmac . New ( sha256 . New , setting . GetGeneralTokenSigningSecret ( ) )
mac . Write ( [ ] byte ( endp ) )
mac . Write ( [ ] byte ( expires ) )
mac . Write ( [ ] byte ( artifactName ) )
mac . Write ( [ ] byte ( fmt . Sprint ( taskID ) ) )
2024-09-22 14:01:09 +03:00
mac . Write ( [ ] byte ( fmt . Sprint ( artifactID ) ) )
2024-03-02 12:12:17 +03:00
return mac . Sum ( nil )
}
2024-09-22 14:01:09 +03:00
func ( r artifactV4Routes ) buildArtifactURL ( endp , artifactName string , taskID , artifactID int64 ) string {
2024-03-02 12:12:17 +03:00
expires := time . Now ( ) . Add ( 60 * time . Minute ) . Format ( "2006-01-02 15:04:05.999999999 -0700 MST" )
uploadURL := strings . TrimSuffix ( setting . AppURL , "/" ) + strings . TrimSuffix ( r . prefix , "/" ) +
2024-09-22 14:01:09 +03:00
"/" + endp + "?sig=" + base64 . URLEncoding . EncodeToString ( r . buildSignature ( endp , expires , artifactName , taskID , artifactID ) ) + "&expires=" + url . QueryEscape ( expires ) + "&artifactName=" + url . QueryEscape ( artifactName ) + "&taskID=" + fmt . Sprint ( taskID ) + "&artifactID=" + fmt . Sprint ( artifactID )
2024-03-02 12:12:17 +03:00
return uploadURL
}
func ( r artifactV4Routes ) verifySignature ( ctx * ArtifactContext , endp string ) ( * actions . ActionTask , string , bool ) {
rawTaskID := ctx . Req . URL . Query ( ) . Get ( "taskID" )
2024-09-22 14:01:09 +03:00
rawArtifactID := ctx . Req . URL . Query ( ) . Get ( "artifactID" )
2024-03-02 12:12:17 +03:00
sig := ctx . Req . URL . Query ( ) . Get ( "sig" )
expires := ctx . Req . URL . Query ( ) . Get ( "expires" )
artifactName := ctx . Req . URL . Query ( ) . Get ( "artifactName" )
dsig , _ := base64 . URLEncoding . DecodeString ( sig )
taskID , _ := strconv . ParseInt ( rawTaskID , 10 , 64 )
2024-09-22 14:01:09 +03:00
artifactID , _ := strconv . ParseInt ( rawArtifactID , 10 , 64 )
2024-03-02 12:12:17 +03:00
2024-09-22 14:01:09 +03:00
expecedsig := r . buildSignature ( endp , expires , artifactName , taskID , artifactID )
2024-03-02 12:12:17 +03:00
if ! hmac . Equal ( dsig , expecedsig ) {
log . Error ( "Error unauthorized" )
ctx . Error ( http . StatusUnauthorized , "Error unauthorized" )
return nil , "" , false
}
t , err := time . Parse ( "2006-01-02 15:04:05.999999999 -0700 MST" , expires )
if err != nil || t . Before ( time . Now ( ) ) {
log . Error ( "Error link expired" )
ctx . Error ( http . StatusUnauthorized , "Error link expired" )
return nil , "" , false
}
task , err := actions . GetTaskByID ( ctx , taskID )
if err != nil {
log . Error ( "Error runner api getting task by ID: %v" , err )
ctx . Error ( http . StatusInternalServerError , "Error runner api getting task by ID" )
return nil , "" , false
}
if task . Status != actions . StatusRunning {
log . Error ( "Error runner api getting task: task is not running" )
ctx . Error ( http . StatusInternalServerError , "Error runner api getting task: task is not running" )
return nil , "" , false
}
if err := task . LoadJob ( ctx ) ; err != nil {
log . Error ( "Error runner api getting job: %v" , err )
ctx . Error ( http . StatusInternalServerError , "Error runner api getting job" )
return nil , "" , false
}
return task , artifactName , true
}
func ( r * artifactV4Routes ) getArtifactByName ( ctx * ArtifactContext , runID int64 , name string ) ( * actions . ActionArtifact , error ) {
var art actions . ActionArtifact
has , err := db . GetEngine ( ctx ) . Where ( "run_id = ? AND artifact_name = ? AND artifact_path = ? AND content_encoding = ?" , runID , name , name + ".zip" , ArtifactV4ContentEncoding ) . Get ( & art )
if err != nil {
return nil , err
} else if ! has {
return nil , util . ErrNotExist
}
return & art , nil
}
func ( r * artifactV4Routes ) parseProtbufBody ( ctx * ArtifactContext , req protoreflect . ProtoMessage ) bool {
body , err := io . ReadAll ( ctx . Req . Body )
if err != nil {
log . Error ( "Error decode request body: %v" , err )
ctx . Error ( http . StatusInternalServerError , "Error decode request body" )
return false
}
err = protojson . Unmarshal ( body , req )
if err != nil {
log . Error ( "Error decode request body: %v" , err )
ctx . Error ( http . StatusInternalServerError , "Error decode request body" )
return false
}
return true
}
func ( r * artifactV4Routes ) sendProtbufBody ( ctx * ArtifactContext , req protoreflect . ProtoMessage ) {
resp , err := protojson . Marshal ( req )
if err != nil {
log . Error ( "Error encode response body: %v" , err )
ctx . Error ( http . StatusInternalServerError , "Error encode response body" )
return
}
ctx . Resp . Header ( ) . Set ( "Content-Type" , "application/json;charset=utf-8" )
ctx . Resp . WriteHeader ( http . StatusOK )
_ , _ = ctx . Resp . Write ( resp )
}
func ( r * artifactV4Routes ) createArtifact ( ctx * ArtifactContext ) {
var req CreateArtifactRequest
if ok := r . parseProtbufBody ( ctx , & req ) ; ! ok {
return
}
_ , _ , ok := validateRunIDV4 ( ctx , req . WorkflowRunBackendId )
if ! ok {
return
}
artifactName := req . Name
rententionDays := setting . Actions . ArtifactRetentionDays
if req . ExpiresAt != nil {
rententionDays = int64 ( time . Until ( req . ExpiresAt . AsTime ( ) ) . Hours ( ) / 24 )
}
// create or get artifact with name and path
artifact , err := actions . CreateArtifact ( ctx , ctx . ActionTask , artifactName , artifactName + ".zip" , rententionDays )
if err != nil {
log . Error ( "Error create or get artifact: %v" , err )
ctx . Error ( http . StatusInternalServerError , "Error create or get artifact" )
return
}
artifact . ContentEncoding = ArtifactV4ContentEncoding
2024-09-22 14:01:09 +03:00
artifact . FileSize = 0
artifact . FileCompressedSize = 0
2024-03-02 12:12:17 +03:00
if err := actions . UpdateArtifactByID ( ctx , artifact . ID , artifact ) ; err != nil {
log . Error ( "Error UpdateArtifactByID: %v" , err )
ctx . Error ( http . StatusInternalServerError , "Error UpdateArtifactByID" )
return
}
respData := CreateArtifactResponse {
Ok : true ,
2024-09-22 14:01:09 +03:00
SignedUploadUrl : r . buildArtifactURL ( "UploadArtifact" , artifactName , ctx . ActionTask . ID , artifact . ID ) ,
2024-03-02 12:12:17 +03:00
}
r . sendProtbufBody ( ctx , & respData )
}
func ( r * artifactV4Routes ) uploadArtifact ( ctx * ArtifactContext ) {
task , artifactName , ok := r . verifySignature ( ctx , "UploadArtifact" )
if ! ok {
return
}
feat(quota): Quota enforcement
The previous commit laid out the foundation of the quota engine, this
one builds on top of it, and implements the actual enforcement.
Enforcement happens at the route decoration level, whenever possible. In
case of the API, when over quota, a 413 error is returned, with an
appropriate JSON payload. In case of web routes, a 413 HTML page is
rendered with similar information.
This implementation is for a **soft quota**: quota usage is checked
before an operation is to be performed, and the operation is *only*
denied if the user is already over quota. This makes it possible to go
over quota, but has the significant advantage of being practically
implementable within the current Forgejo architecture.
The goal of enforcement is to deny actions that can make the user go
over quota, and allow the rest. As such, deleting things should - in
almost all cases - be possible. A prime exemption is deleting files via
the web ui: that creates a new commit, which in turn increases repo
size, thus, is denied if the user is over quota.
Limitations
-----------
Because we generally work at a route decorator level, and rarely
look *into* the operation itself, `size:repos:public` and
`size:repos:private` are not enforced at this level, the engine enforces
against `size:repos:all`. This will be improved in the future.
AGit does not play very well with this system, because AGit PRs count
toward the repo they're opened against, while in the GitHub-style fork +
pull model, it counts against the fork. This too, can be improved in the
future.
There's very little done on the UI side to guard against going over
quota. What this patch implements, is enforcement, not prevention. The
UI will still let you *try* operations that *will* result in a denial.
Signed-off-by: Gergely Nagy <forgejo@gergo.csillger.hu>
2024-07-06 11:30:16 +03:00
// check the owner's quota
ok , err := quota_model . EvaluateForUser ( ctx , task . OwnerID , quota_model . LimitSubjectSizeAssetsArtifacts )
if err != nil {
log . Error ( "quota_model.EvaluateForUser: %v" , err )
ctx . Error ( http . StatusInternalServerError , "Error checking quota" )
return
}
if ! ok {
ctx . Error ( http . StatusRequestEntityTooLarge , "Quota exceeded" )
return
}
2024-03-02 12:12:17 +03:00
comp := ctx . Req . URL . Query ( ) . Get ( "comp" )
switch comp {
case "block" , "appendBlock" :
2024-09-22 14:01:09 +03:00
blockid := ctx . Req . URL . Query ( ) . Get ( "blockid" )
if blockid == "" {
// get artifact by name
artifact , err := r . getArtifactByName ( ctx , task . Job . RunID , artifactName )
if err != nil {
log . Error ( "Error artifact not found: %v" , err )
ctx . Error ( http . StatusNotFound , "Error artifact not found" )
return
}
_ , err = appendUploadChunk ( r . fs , ctx , artifact , artifact . FileSize , ctx . Req . ContentLength , artifact . RunID )
if err != nil {
log . Error ( "Error runner api getting task: task is not running" )
ctx . Error ( http . StatusInternalServerError , "Error runner api getting task: task is not running" )
return
}
artifact . FileCompressedSize += ctx . Req . ContentLength
artifact . FileSize += ctx . Req . ContentLength
if err := actions . UpdateArtifactByID ( ctx , artifact . ID , artifact ) ; err != nil {
log . Error ( "Error UpdateArtifactByID: %v" , err )
ctx . Error ( http . StatusInternalServerError , "Error UpdateArtifactByID" )
return
}
} else {
_ , err := r . fs . Save ( fmt . Sprintf ( "tmpv4%d/block-%d-%d-%s" , task . Job . RunID , task . Job . RunID , ctx . Req . ContentLength , base64 . URLEncoding . EncodeToString ( [ ] byte ( blockid ) ) ) , ctx . Req . Body , - 1 )
if err != nil {
log . Error ( "Error runner api getting task: task is not running" )
ctx . Error ( http . StatusInternalServerError , "Error runner api getting task: task is not running" )
return
}
2024-03-02 12:12:17 +03:00
}
2024-09-22 14:01:09 +03:00
ctx . JSON ( http . StatusCreated , "appended" )
case "blocklist" :
rawArtifactID := ctx . Req . URL . Query ( ) . Get ( "artifactID" )
artifactID , _ := strconv . ParseInt ( rawArtifactID , 10 , 64 )
_ , err := r . fs . Save ( fmt . Sprintf ( "tmpv4%d/%d-%d-blocklist" , task . Job . RunID , task . Job . RunID , artifactID ) , ctx . Req . Body , - 1 )
2024-03-02 12:12:17 +03:00
if err != nil {
log . Error ( "Error runner api getting task: task is not running" )
ctx . Error ( http . StatusInternalServerError , "Error runner api getting task: task is not running" )
return
}
ctx . JSON ( http . StatusCreated , "created" )
}
}
2024-09-22 14:01:09 +03:00
type BlockList struct {
Latest [ ] string ` xml:"Latest" `
}
type Latest struct {
Value string ` xml:",chardata" `
}
func ( r * artifactV4Routes ) readBlockList ( runID , artifactID int64 ) ( * BlockList , error ) {
blockListName := fmt . Sprintf ( "tmpv4%d/%d-%d-blocklist" , runID , runID , artifactID )
s , err := r . fs . Open ( blockListName )
if err != nil {
return nil , err
}
xdec := xml . NewDecoder ( s )
blockList := & BlockList { }
err = xdec . Decode ( blockList )
delerr := r . fs . Delete ( blockListName )
if delerr != nil {
log . Warn ( "Failed to delete blockList %s: %v" , blockListName , delerr )
}
return blockList , err
}
2024-03-02 12:12:17 +03:00
func ( r * artifactV4Routes ) finalizeArtifact ( ctx * ArtifactContext ) {
var req FinalizeArtifactRequest
if ok := r . parseProtbufBody ( ctx , & req ) ; ! ok {
return
}
_ , runID , ok := validateRunIDV4 ( ctx , req . WorkflowRunBackendId )
if ! ok {
return
}
// get artifact by name
artifact , err := r . getArtifactByName ( ctx , runID , req . Name )
if err != nil {
log . Error ( "Error artifact not found: %v" , err )
ctx . Error ( http . StatusNotFound , "Error artifact not found" )
return
}
2024-09-22 14:01:09 +03:00
var chunks [ ] * chunkFileItem
blockList , err := r . readBlockList ( runID , artifact . ID )
2024-03-02 12:12:17 +03:00
if err != nil {
2024-09-22 14:01:09 +03:00
log . Warn ( "Failed to read BlockList, fallback to old behavior: %v" , err )
chunkMap , err := listChunksByRunID ( r . fs , runID )
if err != nil {
log . Error ( "Error merge chunks: %v" , err )
ctx . Error ( http . StatusInternalServerError , "Error merge chunks" )
return
}
chunks , ok = chunkMap [ artifact . ID ]
if ! ok {
log . Error ( "Error merge chunks" )
ctx . Error ( http . StatusInternalServerError , "Error merge chunks" )
return
}
} else {
chunks , err = listChunksByRunIDV4 ( r . fs , runID , artifact . ID , blockList )
if err != nil {
log . Error ( "Error merge chunks: %v" , err )
ctx . Error ( http . StatusInternalServerError , "Error merge chunks" )
return
}
artifact . FileSize = chunks [ len ( chunks ) - 1 ] . End + 1
artifact . FileCompressedSize = chunks [ len ( chunks ) - 1 ] . End + 1
2024-03-02 12:12:17 +03:00
}
2024-09-22 14:01:09 +03:00
2024-03-02 12:12:17 +03:00
checksum := ""
if req . Hash != nil {
checksum = req . Hash . Value
}
if err := mergeChunksForArtifact ( ctx , chunks , r . fs , artifact , checksum ) ; err != nil {
2024-03-19 18:00:48 +03:00
log . Warn ( "Error merge chunks: %v" , err )
2024-03-02 12:12:17 +03:00
ctx . Error ( http . StatusInternalServerError , "Error merge chunks" )
return
}
respData := FinalizeArtifactResponse {
Ok : true ,
ArtifactId : artifact . ID ,
}
r . sendProtbufBody ( ctx , & respData )
}
func ( r * artifactV4Routes ) listArtifacts ( ctx * ArtifactContext ) {
var req ListArtifactsRequest
if ok := r . parseProtbufBody ( ctx , & req ) ; ! ok {
return
}
_ , runID , ok := validateRunIDV4 ( ctx , req . WorkflowRunBackendId )
if ! ok {
return
}
artifacts , err := db . Find [ actions . ActionArtifact ] ( ctx , actions . FindArtifactsOptions { RunID : runID } )
if err != nil {
log . Error ( "Error getting artifacts: %v" , err )
ctx . Error ( http . StatusInternalServerError , err . Error ( ) )
return
}
if len ( artifacts ) == 0 {
log . Debug ( "[artifact] handleListArtifacts, no artifacts" )
ctx . Error ( http . StatusNotFound )
return
}
list := [ ] * ListArtifactsResponse_MonolithArtifact { }
table := map [ string ] * ListArtifactsResponse_MonolithArtifact { }
for _ , artifact := range artifacts {
if _ , ok := table [ artifact . ArtifactName ] ; ok || req . IdFilter != nil && artifact . ID != req . IdFilter . Value || req . NameFilter != nil && artifact . ArtifactName != req . NameFilter . Value || artifact . ArtifactName + ".zip" != artifact . ArtifactPath || artifact . ContentEncoding != ArtifactV4ContentEncoding {
table [ artifact . ArtifactName ] = nil
continue
}
table [ artifact . ArtifactName ] = & ListArtifactsResponse_MonolithArtifact {
Name : artifact . ArtifactName ,
CreatedAt : timestamppb . New ( artifact . CreatedUnix . AsTime ( ) ) ,
DatabaseId : artifact . ID ,
WorkflowRunBackendId : req . WorkflowRunBackendId ,
WorkflowJobRunBackendId : req . WorkflowJobRunBackendId ,
Size : artifact . FileSize ,
}
}
for _ , artifact := range table {
if artifact != nil {
list = append ( list , artifact )
}
}
respData := ListArtifactsResponse {
Artifacts : list ,
}
r . sendProtbufBody ( ctx , & respData )
}
func ( r * artifactV4Routes ) getSignedArtifactURL ( ctx * ArtifactContext ) {
var req GetSignedArtifactURLRequest
if ok := r . parseProtbufBody ( ctx , & req ) ; ! ok {
return
}
_ , runID , ok := validateRunIDV4 ( ctx , req . WorkflowRunBackendId )
if ! ok {
return
}
artifactName := req . Name
// get artifact by name
artifact , err := r . getArtifactByName ( ctx , runID , artifactName )
if err != nil {
log . Error ( "Error artifact not found: %v" , err )
ctx . Error ( http . StatusNotFound , "Error artifact not found" )
return
}
respData := GetSignedArtifactURLResponse { }
if setting . Actions . ArtifactStorage . MinioConfig . ServeDirect {
Fix `missing signature key` error when pulling Docker images with `SERVE_DIRECT` enabled (#32365)
Fix #28121
I did some tests and found that the `missing signature key` error is
caused by an incorrect `Content-Type` header. Gitea correctly sets the
`Content-Type` header when serving files.
https://github.com/go-gitea/gitea/blob/348d1d0f322ca57c459acd902f54821d687ca804/routers/api/packages/container/container.go#L712-L717
However, when `SERVE_DIRECT` is enabled, the `Content-Type` header may
be set to an incorrect value by the storage service. To fix this issue,
we can use query parameters to override response header values.
https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html
<img width="600px"
src="https://github.com/user-attachments/assets/f2ff90f0-f1df-46f9-9680-b8120222c555"
/>
In this PR, I introduced a new parameter to the `URL` method to support
additional parameters.
```
URL(path, name string, reqParams url.Values) (*url.URL, error)
```
---
Most S3-like services support specifying the content type when storing
objects. However, Gitea always use `application/octet-stream`.
Therefore, I believe we also need to improve the `Save` method to
support storing objects with the correct content type.
https://github.com/go-gitea/gitea/blob/b7fb20e73e63b8edc9b90c52073e248bef428fcc/modules/storage/minio.go#L214-L221
(cherry picked from commit 0690cb076bf63f71988a709f62a9c04660b51a4f)
Conflicts:
- modules/storage/azureblob.go
Dropped the change, as we do not support Azure blob storage.
- modules/storage/helper.go
Resolved by adjusting their `discardStorage` to our
`DiscardStorage`
- routers/api/actions/artifacts.go
routers/api/actions/artifactsv4.go
routers/web/repo/actions/view.go
routers/web/repo/download.go
Resolved the conflicts by manually adding the new `nil`
parameter to the `storage.Attachments.URL()` calls.
Originally conflicted due to differences in the if expression
above these calls.
2024-10-31 18:28:25 +03:00
u , err := storage . ActionsArtifacts . URL ( artifact . StoragePath , artifact . ArtifactPath , nil )
2024-03-02 12:12:17 +03:00
if u != nil && err == nil {
respData . SignedUrl = u . String ( )
}
}
if respData . SignedUrl == "" {
2024-09-22 14:01:09 +03:00
respData . SignedUrl = r . buildArtifactURL ( "DownloadArtifact" , artifactName , ctx . ActionTask . ID , artifact . ID )
2024-03-02 12:12:17 +03:00
}
r . sendProtbufBody ( ctx , & respData )
}
func ( r * artifactV4Routes ) downloadArtifact ( ctx * ArtifactContext ) {
task , artifactName , ok := r . verifySignature ( ctx , "DownloadArtifact" )
if ! ok {
return
}
// get artifact by name
artifact , err := r . getArtifactByName ( ctx , task . Job . RunID , artifactName )
if err != nil {
log . Error ( "Error artifact not found: %v" , err )
ctx . Error ( http . StatusNotFound , "Error artifact not found" )
return
}
2024-07-10 08:28:01 +03:00
file , err := r . fs . Open ( artifact . StoragePath )
if err != nil {
log . Error ( "Error artifact could not be opened: %v" , err )
ctx . Error ( http . StatusInternalServerError , err . Error ( ) )
return
}
2024-03-02 12:12:17 +03:00
2024-07-10 08:28:01 +03:00
common . ServeContentByReadSeeker ( ctx . Base , artifactName , util . ToPointer ( artifact . UpdatedUnix . AsTime ( ) ) , file )
2024-03-02 12:12:17 +03:00
}
func ( r * artifactV4Routes ) deleteArtifact ( ctx * ArtifactContext ) {
var req DeleteArtifactRequest
if ok := r . parseProtbufBody ( ctx , & req ) ; ! ok {
return
}
_ , runID , ok := validateRunIDV4 ( ctx , req . WorkflowRunBackendId )
if ! ok {
return
}
// get artifact by name
artifact , err := r . getArtifactByName ( ctx , runID , req . Name )
if err != nil {
log . Error ( "Error artifact not found: %v" , err )
ctx . Error ( http . StatusNotFound , "Error artifact not found" )
return
}
err = actions . SetArtifactNeedDelete ( ctx , runID , req . Name )
if err != nil {
log . Error ( "Error deleting artifacts: %v" , err )
ctx . Error ( http . StatusInternalServerError , err . Error ( ) )
return
}
respData := DeleteArtifactResponse {
Ok : true ,
ArtifactId : artifact . ID ,
}
r . sendProtbufBody ( ctx , & respData )
}