mirror of
https://codeberg.org/forgejo/forgejo.git
synced 2024-12-27 14:13:52 +03:00
feat: support grouping by any path for arch package (#4903)
Previous arch package grouping was not well-suited for complex or multi-architecture environments. It now supports the following content: - Support grouping by any path. - New support for packages in `xz` format. - Fix clean up rules <!--start release-notes-assistant--> ## Draft release notes <!--URL:https://codeberg.org/forgejo/forgejo--> - Features - [PR](https://codeberg.org/forgejo/forgejo/pulls/4903): <!--number 4903 --><!--line 0 --><!--description c3VwcG9ydCBncm91cGluZyBieSBhbnkgcGF0aCBmb3IgYXJjaCBwYWNrYWdl-->support grouping by any path for arch package<!--description--> <!--end release-notes-assistant--> Reviewed-on: https://codeberg.org/forgejo/forgejo/pulls/4903 Reviewed-by: Earl Warren <earl-warren@noreply.codeberg.org> Co-authored-by: Exploding Dragon <explodingfkl@gmail.com> Co-committed-by: Exploding Dragon <explodingfkl@gmail.com>
This commit is contained in:
parent
a4da672134
commit
87d50eca87
7 changed files with 309 additions and 218 deletions
|
@ -41,11 +41,15 @@ var (
|
||||||
reVer = regexp.MustCompile(`^[a-zA-Z0-9:_.+]+-+[0-9]+$`)
|
reVer = regexp.MustCompile(`^[a-zA-Z0-9:_.+]+-+[0-9]+$`)
|
||||||
reOptDep = regexp.MustCompile(`^[a-zA-Z0-9@._+-]+$|^[a-zA-Z0-9@._+-]+(:.*)`)
|
reOptDep = regexp.MustCompile(`^[a-zA-Z0-9@._+-]+$|^[a-zA-Z0-9@._+-]+(:.*)`)
|
||||||
rePkgVer = regexp.MustCompile(`^[a-zA-Z0-9@._+-]+$|^[a-zA-Z0-9@._+-]+(>.*)|^[a-zA-Z0-9@._+-]+(<.*)|^[a-zA-Z0-9@._+-]+(=.*)`)
|
rePkgVer = regexp.MustCompile(`^[a-zA-Z0-9@._+-]+$|^[a-zA-Z0-9@._+-]+(>.*)|^[a-zA-Z0-9@._+-]+(<.*)|^[a-zA-Z0-9@._+-]+(=.*)`)
|
||||||
|
|
||||||
|
magicZSTD = []byte{0x28, 0xB5, 0x2F, 0xFD}
|
||||||
|
magicXZ = []byte{0xFD, 0x37, 0x7A, 0x58, 0x5A}
|
||||||
)
|
)
|
||||||
|
|
||||||
type Package struct {
|
type Package struct {
|
||||||
Name string `json:"name"`
|
Name string `json:"name"`
|
||||||
Version string `json:"version"` // Includes version, release and epoch
|
Version string `json:"version"` // Includes version, release and epoch
|
||||||
|
CompressType string `json:"compress_type"`
|
||||||
VersionMetadata VersionMetadata
|
VersionMetadata VersionMetadata
|
||||||
FileMetadata FileMetadata
|
FileMetadata FileMetadata
|
||||||
}
|
}
|
||||||
|
@ -89,18 +93,38 @@ func ParsePackage(r *packages.HashedBuffer) (*Package, error) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
zstd := archiver.NewTarZstd()
|
header := make([]byte, 5)
|
||||||
err = zstd.Open(r, 0)
|
_, err = r.Read(header)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
defer zstd.Close()
|
_, err = r.Seek(0, io.SeekStart)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var tarball archiver.Reader
|
||||||
|
var tarballType string
|
||||||
|
if bytes.Equal(header[:len(magicZSTD)], magicZSTD) {
|
||||||
|
tarballType = "zst"
|
||||||
|
tarball = archiver.NewTarZstd()
|
||||||
|
} else if bytes.Equal(header[:len(magicXZ)], magicXZ) {
|
||||||
|
tarballType = "xz"
|
||||||
|
tarball = archiver.NewTarXz()
|
||||||
|
} else {
|
||||||
|
return nil, errors.New("not supported compression")
|
||||||
|
}
|
||||||
|
err = tarball.Open(r, 0)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer tarball.Close()
|
||||||
|
|
||||||
var pkg *Package
|
var pkg *Package
|
||||||
var mtree bool
|
var mtree bool
|
||||||
|
|
||||||
for {
|
for {
|
||||||
f, err := zstd.Read()
|
f, err := tarball.Read()
|
||||||
if err == io.EOF {
|
if err == io.EOF {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
@ -111,7 +135,7 @@ func ParsePackage(r *packages.HashedBuffer) (*Package, error) {
|
||||||
|
|
||||||
switch f.Name() {
|
switch f.Name() {
|
||||||
case ".PKGINFO":
|
case ".PKGINFO":
|
||||||
pkg, err = ParsePackageInfo(f)
|
pkg, err = ParsePackageInfo(tarballType, f)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -137,8 +161,10 @@ func ParsePackage(r *packages.HashedBuffer) (*Package, error) {
|
||||||
|
|
||||||
// ParsePackageInfo Function that accepts reader for .PKGINFO file from package archive,
|
// ParsePackageInfo Function that accepts reader for .PKGINFO file from package archive,
|
||||||
// validates all field according to PKGBUILD spec and returns package.
|
// validates all field according to PKGBUILD spec and returns package.
|
||||||
func ParsePackageInfo(r io.Reader) (*Package, error) {
|
func ParsePackageInfo(compressType string, r io.Reader) (*Package, error) {
|
||||||
p := &Package{}
|
p := &Package{
|
||||||
|
CompressType: compressType,
|
||||||
|
}
|
||||||
|
|
||||||
scanner := bufio.NewScanner(r)
|
scanner := bufio.NewScanner(r)
|
||||||
for scanner.Scan() {
|
for scanner.Scan() {
|
||||||
|
@ -281,7 +307,7 @@ func ValidatePackageSpec(p *Package) error {
|
||||||
// Desc Create pacman package description file.
|
// Desc Create pacman package description file.
|
||||||
func (p *Package) Desc() string {
|
func (p *Package) Desc() string {
|
||||||
entries := []string{
|
entries := []string{
|
||||||
"FILENAME", fmt.Sprintf("%s-%s-%s.pkg.tar.zst", p.Name, p.Version, p.FileMetadata.Arch),
|
"FILENAME", fmt.Sprintf("%s-%s-%s.pkg.tar.%s", p.Name, p.Version, p.FileMetadata.Arch, p.CompressType),
|
||||||
"NAME", p.Name,
|
"NAME", p.Name,
|
||||||
"BASE", p.VersionMetadata.Base,
|
"BASE", p.VersionMetadata.Base,
|
||||||
"VERSION", p.Version,
|
"VERSION", p.Version,
|
||||||
|
|
|
@ -158,11 +158,12 @@ checkdepend = ola
|
||||||
makedepend = cmake
|
makedepend = cmake
|
||||||
backup = usr/bin/paket1
|
backup = usr/bin/paket1
|
||||||
`
|
`
|
||||||
p, err := ParsePackageInfo(strings.NewReader(PKGINFO))
|
p, err := ParsePackageInfo("zst", strings.NewReader(PKGINFO))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, Package{
|
require.Equal(t, Package{
|
||||||
Name: "a",
|
CompressType: "zst",
|
||||||
Version: "1-2",
|
Name: "a",
|
||||||
|
Version: "1-2",
|
||||||
VersionMetadata: VersionMetadata{
|
VersionMetadata: VersionMetadata{
|
||||||
Base: "b",
|
Base: "b",
|
||||||
Description: "comment",
|
Description: "comment",
|
||||||
|
@ -417,8 +418,9 @@ dummy6
|
||||||
`
|
`
|
||||||
|
|
||||||
md := &Package{
|
md := &Package{
|
||||||
Name: "zstd",
|
CompressType: "zst",
|
||||||
Version: "1.5.5-1",
|
Name: "zstd",
|
||||||
|
Version: "1.5.5-1",
|
||||||
VersionMetadata: VersionMetadata{
|
VersionMetadata: VersionMetadata{
|
||||||
Base: "zstd",
|
Base: "zstd",
|
||||||
Description: "Zstandard - Fast real-time compression algorithm",
|
Description: "Zstandard - Fast real-time compression algorithm",
|
||||||
|
|
|
@ -143,10 +143,59 @@ func CommonRoutes() *web.Route {
|
||||||
r.Head("", arch.GetRepositoryKey)
|
r.Head("", arch.GetRepositoryKey)
|
||||||
r.Get("", arch.GetRepositoryKey)
|
r.Get("", arch.GetRepositoryKey)
|
||||||
})
|
})
|
||||||
r.Group("/{distro}", func() {
|
|
||||||
r.Put("", reqPackageAccess(perm.AccessModeWrite), arch.PushPackage)
|
r.Methods("HEAD,GET,PUT,DELETE", "*", func(ctx *context.Context) {
|
||||||
r.Get("/{arch}/{file}", arch.GetPackageOrDB)
|
pathGroups := strings.Split(strings.Trim(ctx.Params("*"), "/"), "/")
|
||||||
r.Delete("/{package}/{version}", reqPackageAccess(perm.AccessModeWrite), arch.RemovePackage)
|
groupLen := len(pathGroups)
|
||||||
|
isGetHead := ctx.Req.Method == "HEAD" || ctx.Req.Method == "GET"
|
||||||
|
isPut := ctx.Req.Method == "PUT"
|
||||||
|
isDelete := ctx.Req.Method == "DELETE"
|
||||||
|
if isGetHead {
|
||||||
|
if groupLen < 2 {
|
||||||
|
ctx.Status(http.StatusNotFound)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if groupLen == 2 {
|
||||||
|
ctx.SetParams("group", "")
|
||||||
|
ctx.SetParams("arch", pathGroups[0])
|
||||||
|
ctx.SetParams("file", pathGroups[1])
|
||||||
|
} else {
|
||||||
|
ctx.SetParams("group", strings.Join(pathGroups[:groupLen-2], "/"))
|
||||||
|
ctx.SetParams("arch", pathGroups[groupLen-2])
|
||||||
|
ctx.SetParams("file", pathGroups[groupLen-1])
|
||||||
|
}
|
||||||
|
arch.GetPackageOrDB(ctx)
|
||||||
|
return
|
||||||
|
} else if isPut {
|
||||||
|
ctx.SetParams("group", strings.Join(pathGroups, "/"))
|
||||||
|
reqPackageAccess(perm.AccessModeWrite)(ctx)
|
||||||
|
if ctx.Written() {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
arch.PushPackage(ctx)
|
||||||
|
return
|
||||||
|
} else if isDelete {
|
||||||
|
if groupLen < 2 {
|
||||||
|
ctx.Status(http.StatusBadRequest)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if groupLen == 2 {
|
||||||
|
ctx.SetParams("group", "")
|
||||||
|
ctx.SetParams("package", pathGroups[0])
|
||||||
|
ctx.SetParams("version", pathGroups[1])
|
||||||
|
} else {
|
||||||
|
ctx.SetParams("group", strings.Join(pathGroups[:groupLen-2], "/"))
|
||||||
|
ctx.SetParams("package", pathGroups[groupLen-2])
|
||||||
|
ctx.SetParams("version", pathGroups[groupLen-1])
|
||||||
|
}
|
||||||
|
reqPackageAccess(perm.AccessModeWrite)(ctx)
|
||||||
|
if ctx.Written() {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
arch.RemovePackage(ctx)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
ctx.Status(http.StatusNotFound)
|
||||||
})
|
})
|
||||||
}, reqPackageAccess(perm.AccessModeRead))
|
}, reqPackageAccess(perm.AccessModeRead))
|
||||||
r.Group("/cargo", func() {
|
r.Group("/cargo", func() {
|
||||||
|
|
|
@ -9,6 +9,7 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
"regexp"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
packages_model "code.gitea.io/gitea/models/packages"
|
packages_model "code.gitea.io/gitea/models/packages"
|
||||||
|
@ -21,6 +22,11 @@ import (
|
||||||
arch_service "code.gitea.io/gitea/services/packages/arch"
|
arch_service "code.gitea.io/gitea/services/packages/arch"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
archPkgOrSig = regexp.MustCompile(`^.*\.pkg\.tar\.\w+(\.sig)*$`)
|
||||||
|
archDBOrSig = regexp.MustCompile(`^.*.db(\.tar\.gz)*(\.sig)*$`)
|
||||||
|
)
|
||||||
|
|
||||||
func apiError(ctx *context.Context, status int, obj any) {
|
func apiError(ctx *context.Context, status int, obj any) {
|
||||||
helper.LogAndProcessError(ctx, status, obj, func(message string) {
|
helper.LogAndProcessError(ctx, status, obj, func(message string) {
|
||||||
ctx.PlainText(status, message)
|
ctx.PlainText(status, message)
|
||||||
|
@ -41,7 +47,7 @@ func GetRepositoryKey(ctx *context.Context) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func PushPackage(ctx *context.Context) {
|
func PushPackage(ctx *context.Context) {
|
||||||
distro := ctx.Params("distro")
|
group := ctx.Params("group")
|
||||||
|
|
||||||
upload, needToClose, err := ctx.UploadStream()
|
upload, needToClose, err := ctx.UploadStream()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -61,7 +67,7 @@ func PushPackage(ctx *context.Context) {
|
||||||
|
|
||||||
p, err := arch_module.ParsePackage(buf)
|
p, err := arch_module.ParsePackage(buf)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
apiError(ctx, http.StatusInternalServerError, err)
|
apiError(ctx, http.StatusBadRequest, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -97,7 +103,7 @@ func PushPackage(ctx *context.Context) {
|
||||||
properties := map[string]string{
|
properties := map[string]string{
|
||||||
arch_module.PropertyDescription: p.Desc(),
|
arch_module.PropertyDescription: p.Desc(),
|
||||||
arch_module.PropertyArch: p.FileMetadata.Arch,
|
arch_module.PropertyArch: p.FileMetadata.Arch,
|
||||||
arch_module.PropertyDistribution: distro,
|
arch_module.PropertyDistribution: group,
|
||||||
}
|
}
|
||||||
|
|
||||||
version, _, err := packages_service.CreatePackageOrAddFileToExisting(
|
version, _, err := packages_service.CreatePackageOrAddFileToExisting(
|
||||||
|
@ -114,8 +120,8 @@ func PushPackage(ctx *context.Context) {
|
||||||
},
|
},
|
||||||
&packages_service.PackageFileCreationInfo{
|
&packages_service.PackageFileCreationInfo{
|
||||||
PackageFileInfo: packages_service.PackageFileInfo{
|
PackageFileInfo: packages_service.PackageFileInfo{
|
||||||
Filename: fmt.Sprintf("%s-%s-%s.pkg.tar.zst", p.Name, p.Version, p.FileMetadata.Arch),
|
Filename: fmt.Sprintf("%s-%s-%s.pkg.tar.%s", p.Name, p.Version, p.FileMetadata.Arch, p.CompressType),
|
||||||
CompositeKey: distro,
|
CompositeKey: group,
|
||||||
},
|
},
|
||||||
OverwriteExisting: false,
|
OverwriteExisting: false,
|
||||||
IsLead: true,
|
IsLead: true,
|
||||||
|
@ -138,8 +144,8 @@ func PushPackage(ctx *context.Context) {
|
||||||
// add sign file
|
// add sign file
|
||||||
_, err = packages_service.AddFileToPackageVersionInternal(ctx, version, &packages_service.PackageFileCreationInfo{
|
_, err = packages_service.AddFileToPackageVersionInternal(ctx, version, &packages_service.PackageFileCreationInfo{
|
||||||
PackageFileInfo: packages_service.PackageFileInfo{
|
PackageFileInfo: packages_service.PackageFileInfo{
|
||||||
CompositeKey: distro,
|
CompositeKey: group,
|
||||||
Filename: fmt.Sprintf("%s-%s-%s.pkg.tar.zst.sig", p.Name, p.Version, p.FileMetadata.Arch),
|
Filename: fmt.Sprintf("%s-%s-%s.pkg.tar.%s.sig", p.Name, p.Version, p.FileMetadata.Arch, p.CompressType),
|
||||||
},
|
},
|
||||||
OverwriteExisting: true,
|
OverwriteExisting: true,
|
||||||
IsLead: false,
|
IsLead: false,
|
||||||
|
@ -149,7 +155,7 @@ func PushPackage(ctx *context.Context) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
apiError(ctx, http.StatusInternalServerError, err)
|
apiError(ctx, http.StatusInternalServerError, err)
|
||||||
}
|
}
|
||||||
if err = arch_service.BuildPacmanDB(ctx, ctx.Package.Owner.ID, distro, p.FileMetadata.Arch); err != nil {
|
if err = arch_service.BuildPacmanDB(ctx, ctx.Package.Owner.ID, group, p.FileMetadata.Arch); err != nil {
|
||||||
apiError(ctx, http.StatusInternalServerError, err)
|
apiError(ctx, http.StatusInternalServerError, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -158,13 +164,12 @@ func PushPackage(ctx *context.Context) {
|
||||||
|
|
||||||
func GetPackageOrDB(ctx *context.Context) {
|
func GetPackageOrDB(ctx *context.Context) {
|
||||||
var (
|
var (
|
||||||
file = ctx.Params("file")
|
file = ctx.Params("file")
|
||||||
distro = ctx.Params("distro")
|
group = ctx.Params("group")
|
||||||
arch = ctx.Params("arch")
|
arch = ctx.Params("arch")
|
||||||
)
|
)
|
||||||
|
if archPkgOrSig.MatchString(file) {
|
||||||
if strings.HasSuffix(file, ".pkg.tar.zst") || strings.HasSuffix(file, ".pkg.tar.zst.sig") {
|
pkg, err := arch_service.GetPackageFile(ctx, group, file, ctx.Package.Owner.ID)
|
||||||
pkg, err := arch_service.GetPackageFile(ctx, distro, file, ctx.Package.Owner.ID)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if errors.Is(err, util.ErrNotExist) {
|
if errors.Is(err, util.ErrNotExist) {
|
||||||
apiError(ctx, http.StatusNotFound, err)
|
apiError(ctx, http.StatusNotFound, err)
|
||||||
|
@ -180,11 +185,8 @@ func GetPackageOrDB(ctx *context.Context) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if strings.HasSuffix(file, ".db.tar.gz") ||
|
if archDBOrSig.MatchString(file) {
|
||||||
strings.HasSuffix(file, ".db") ||
|
pkg, err := arch_service.GetPackageDBFile(ctx, group, arch, ctx.Package.Owner.ID,
|
||||||
strings.HasSuffix(file, ".db.tar.gz.sig") ||
|
|
||||||
strings.HasSuffix(file, ".db.sig") {
|
|
||||||
pkg, err := arch_service.GetPackageDBFile(ctx, distro, arch, ctx.Package.Owner.ID,
|
|
||||||
strings.HasSuffix(file, ".sig"))
|
strings.HasSuffix(file, ".sig"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if errors.Is(err, util.ErrNotExist) {
|
if errors.Is(err, util.ErrNotExist) {
|
||||||
|
@ -205,9 +207,9 @@ func GetPackageOrDB(ctx *context.Context) {
|
||||||
|
|
||||||
func RemovePackage(ctx *context.Context) {
|
func RemovePackage(ctx *context.Context) {
|
||||||
var (
|
var (
|
||||||
distro = ctx.Params("distro")
|
group = ctx.Params("group")
|
||||||
pkg = ctx.Params("package")
|
pkg = ctx.Params("package")
|
||||||
ver = ctx.Params("version")
|
ver = ctx.Params("version")
|
||||||
)
|
)
|
||||||
pv, err := packages_model.GetVersionByNameAndVersion(
|
pv, err := packages_model.GetVersionByNameAndVersion(
|
||||||
ctx, ctx.Package.Owner.ID, packages_model.TypeArch, pkg, ver,
|
ctx, ctx.Package.Owner.ID, packages_model.TypeArch, pkg, ver,
|
||||||
|
@ -227,7 +229,7 @@ func RemovePackage(ctx *context.Context) {
|
||||||
}
|
}
|
||||||
deleted := false
|
deleted := false
|
||||||
for _, file := range files {
|
for _, file := range files {
|
||||||
if file.CompositeKey == distro {
|
if file.CompositeKey == group {
|
||||||
deleted = true
|
deleted = true
|
||||||
err := packages_service.RemovePackageFileAndVersionIfUnreferenced(ctx, ctx.ContextUser, file)
|
err := packages_service.RemovePackageFileAndVersionIfUnreferenced(ctx, ctx.ContextUser, file)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -237,7 +239,7 @@ func RemovePackage(ctx *context.Context) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if deleted {
|
if deleted {
|
||||||
err = arch_service.BuildCustomRepositoryFiles(ctx, ctx.Package.Owner.ID, distro)
|
err = arch_service.BuildCustomRepositoryFiles(ctx, ctx.Package.Owner.ID, group)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
apiError(ctx, http.StatusInternalServerError, err)
|
apiError(ctx, http.StatusInternalServerError, err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -15,7 +15,7 @@ import (
|
||||||
type PackageCleanupRuleForm struct {
|
type PackageCleanupRuleForm struct {
|
||||||
ID int64
|
ID int64
|
||||||
Enabled bool
|
Enabled bool
|
||||||
Type string `binding:"Required;In(alpine,cargo,chef,composer,conan,conda,container,cran,debian,generic,go,helm,maven,npm,nuget,pub,pypi,rpm,rubygems,swift,vagrant)"`
|
Type string `binding:"Required;In(alpine,arch,cargo,chef,composer,conan,conda,container,cran,debian,generic,go,helm,maven,npm,nuget,pub,pypi,rpm,rubygems,swift,vagrant)"`
|
||||||
KeepCount int `binding:"In(0,1,5,10,25,50,100)"`
|
KeepCount int `binding:"In(0,1,5,10,25,50,100)"`
|
||||||
KeepPattern string `binding:"RegexPattern"`
|
KeepPattern string `binding:"RegexPattern"`
|
||||||
RemoveDays int `binding:"In(0,7,14,30,60,90,180)"`
|
RemoveDays int `binding:"In(0,7,14,30,60,90,180)"`
|
||||||
|
|
|
@ -11,6 +11,7 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"os"
|
"os"
|
||||||
|
"path/filepath"
|
||||||
"sort"
|
"sort"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
@ -43,7 +44,7 @@ func BuildAllRepositoryFiles(ctx context.Context, ownerID int64) error {
|
||||||
}
|
}
|
||||||
for _, pf := range pfs {
|
for _, pf := range pfs {
|
||||||
if strings.HasSuffix(pf.Name, ".db") {
|
if strings.HasSuffix(pf.Name, ".db") {
|
||||||
arch := strings.TrimSuffix(strings.TrimPrefix(pf.Name, fmt.Sprintf("%s-", pf.CompositeKey)), ".db")
|
arch := strings.TrimSuffix(pf.Name, ".db")
|
||||||
if err := BuildPacmanDB(ctx, ownerID, pf.CompositeKey, arch); err != nil {
|
if err := BuildPacmanDB(ctx, ownerID, pf.CompositeKey, arch); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -99,7 +100,7 @@ func NewFileSign(ctx context.Context, ownerID int64, input io.Reader) (*packages
|
||||||
}
|
}
|
||||||
|
|
||||||
// BuildPacmanDB Create db signature cache
|
// BuildPacmanDB Create db signature cache
|
||||||
func BuildPacmanDB(ctx context.Context, ownerID int64, distro, arch string) error {
|
func BuildPacmanDB(ctx context.Context, ownerID int64, group, arch string) error {
|
||||||
pv, err := GetOrCreateRepositoryVersion(ctx, ownerID)
|
pv, err := GetOrCreateRepositoryVersion(ctx, ownerID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -110,15 +111,15 @@ func BuildPacmanDB(ctx context.Context, ownerID int64, distro, arch string) erro
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
for _, pf := range pfs {
|
for _, pf := range pfs {
|
||||||
if pf.CompositeKey == distro && strings.HasPrefix(pf.Name, fmt.Sprintf("%s-%s", distro, arch)) {
|
if pf.CompositeKey == group && pf.Name == fmt.Sprintf("%s.db", arch) {
|
||||||
// remove distro and arch
|
// remove group and arch
|
||||||
if err := packages_service.DeletePackageFile(ctx, pf); err != nil {
|
if err := packages_service.DeletePackageFile(ctx, pf); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
db, err := flushDB(ctx, ownerID, distro, arch)
|
db, err := createDB(ctx, ownerID, group, arch)
|
||||||
if errors.Is(err, io.EOF) {
|
if errors.Is(err, io.EOF) {
|
||||||
return nil
|
return nil
|
||||||
} else if err != nil {
|
} else if err != nil {
|
||||||
|
@ -140,13 +141,13 @@ func BuildPacmanDB(ctx context.Context, ownerID int64, distro, arch string) erro
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
for name, data := range map[string]*packages_module.HashedBuffer{
|
for name, data := range map[string]*packages_module.HashedBuffer{
|
||||||
fmt.Sprintf("%s-%s.db", distro, arch): db,
|
fmt.Sprintf("%s.db", arch): db,
|
||||||
fmt.Sprintf("%s-%s.db.sig", distro, arch): sig,
|
fmt.Sprintf("%s.db.sig", arch): sig,
|
||||||
} {
|
} {
|
||||||
_, err = packages_service.AddFileToPackageVersionInternal(ctx, pv, &packages_service.PackageFileCreationInfo{
|
_, err = packages_service.AddFileToPackageVersionInternal(ctx, pv, &packages_service.PackageFileCreationInfo{
|
||||||
PackageFileInfo: packages_service.PackageFileInfo{
|
PackageFileInfo: packages_service.PackageFileInfo{
|
||||||
Filename: name,
|
Filename: name,
|
||||||
CompositeKey: distro,
|
CompositeKey: group,
|
||||||
},
|
},
|
||||||
Creator: user_model.NewGhostUser(),
|
Creator: user_model.NewGhostUser(),
|
||||||
Data: data,
|
Data: data,
|
||||||
|
@ -160,7 +161,7 @@ func BuildPacmanDB(ctx context.Context, ownerID int64, distro, arch string) erro
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func flushDB(ctx context.Context, ownerID int64, distro, arch string) (*packages_module.HashedBuffer, error) {
|
func createDB(ctx context.Context, ownerID int64, group, arch string) (*packages_module.HashedBuffer, error) {
|
||||||
pkgs, err := packages_model.GetPackagesByType(ctx, ownerID, packages_model.TypeArch)
|
pkgs, err := packages_model.GetPackagesByType(ctx, ownerID, packages_model.TypeArch)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -185,17 +186,29 @@ func flushDB(ctx context.Context, ownerID int64, distro, arch string) (*packages
|
||||||
sort.Slice(versions, func(i, j int) bool {
|
sort.Slice(versions, func(i, j int) bool {
|
||||||
return versions[i].CreatedUnix > versions[j].CreatedUnix
|
return versions[i].CreatedUnix > versions[j].CreatedUnix
|
||||||
})
|
})
|
||||||
|
|
||||||
for _, ver := range versions {
|
for _, ver := range versions {
|
||||||
file := fmt.Sprintf("%s-%s-%s.pkg.tar.zst", pkg.Name, ver.Version, arch)
|
files, err := packages_model.GetFilesByVersionID(ctx, ver.ID)
|
||||||
pf, err := packages_model.GetFileForVersionByName(ctx, ver.ID, file, distro)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// add any arch package
|
return nil, errors.Join(tw.Close(), gw.Close(), db.Close(), err)
|
||||||
file = fmt.Sprintf("%s-%s-any.pkg.tar.zst", pkg.Name, ver.Version)
|
}
|
||||||
pf, err = packages_model.GetFileForVersionByName(ctx, ver.ID, file, distro)
|
var pf *packages_model.PackageFile
|
||||||
if err != nil {
|
for _, file := range files {
|
||||||
continue
|
ext := filepath.Ext(file.Name)
|
||||||
|
if file.CompositeKey == group && ext != "" && ext != ".db" && ext != ".sig" {
|
||||||
|
if pf == nil && strings.HasSuffix(file.Name, fmt.Sprintf("any.pkg.tar%s", ext)) {
|
||||||
|
pf = file
|
||||||
|
}
|
||||||
|
if strings.HasSuffix(file.Name, fmt.Sprintf("%s.pkg.tar%s", arch, ext)) {
|
||||||
|
pf = file
|
||||||
|
break
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if pf == nil {
|
||||||
|
// file not exists
|
||||||
|
continue
|
||||||
|
}
|
||||||
pps, err := packages_model.GetPropertiesByName(
|
pps, err := packages_model.GetPropertiesByName(
|
||||||
ctx, packages_model.PropertyTypeFile, pf.ID, arch_module.PropertyDescription,
|
ctx, packages_model.PropertyTypeFile, pf.ID, arch_module.PropertyDescription,
|
||||||
)
|
)
|
||||||
|
@ -230,8 +243,8 @@ func flushDB(ctx context.Context, ownerID int64, distro, arch string) (*packages
|
||||||
|
|
||||||
// GetPackageFile Get data related to provided filename and distribution, for package files
|
// GetPackageFile Get data related to provided filename and distribution, for package files
|
||||||
// update download counter.
|
// update download counter.
|
||||||
func GetPackageFile(ctx context.Context, distro, file string, ownerID int64) (io.ReadSeekCloser, error) {
|
func GetPackageFile(ctx context.Context, group, file string, ownerID int64) (io.ReadSeekCloser, error) {
|
||||||
pf, err := getPackageFile(ctx, distro, file, ownerID)
|
pf, err := getPackageFile(ctx, group, file, ownerID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -241,7 +254,7 @@ func GetPackageFile(ctx context.Context, distro, file string, ownerID int64) (io
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ejects parameters required to get package file property from file name.
|
// Ejects parameters required to get package file property from file name.
|
||||||
func getPackageFile(ctx context.Context, distro, file string, ownerID int64) (*packages_model.PackageFile, error) {
|
func getPackageFile(ctx context.Context, group, file string, ownerID int64) (*packages_model.PackageFile, error) {
|
||||||
var (
|
var (
|
||||||
splt = strings.Split(file, "-")
|
splt = strings.Split(file, "-")
|
||||||
pkgname = strings.Join(splt[0:len(splt)-3], "-")
|
pkgname = strings.Join(splt[0:len(splt)-3], "-")
|
||||||
|
@ -253,23 +266,23 @@ func getPackageFile(ctx context.Context, distro, file string, ownerID int64) (*p
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
pkgfile, err := packages_model.GetFileForVersionByName(ctx, version.ID, file, distro)
|
pkgfile, err := packages_model.GetFileForVersionByName(ctx, version.ID, file, group)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return pkgfile, nil
|
return pkgfile, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func GetPackageDBFile(ctx context.Context, distro, arch string, ownerID int64, signFile bool) (io.ReadSeekCloser, error) {
|
func GetPackageDBFile(ctx context.Context, group, arch string, ownerID int64, signFile bool) (io.ReadSeekCloser, error) {
|
||||||
pv, err := GetOrCreateRepositoryVersion(ctx, ownerID)
|
pv, err := GetOrCreateRepositoryVersion(ctx, ownerID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
fileName := fmt.Sprintf("%s-%s.db", distro, arch)
|
fileName := fmt.Sprintf("%s.db", arch)
|
||||||
if signFile {
|
if signFile {
|
||||||
fileName = fmt.Sprintf("%s-%s.db.sig", distro, arch)
|
fileName = fmt.Sprintf("%s.db.sig", arch)
|
||||||
}
|
}
|
||||||
file, err := packages_model.GetFileForVersionByName(ctx, pv.ID, fileName, distro)
|
file, err := packages_model.GetFileForVersionByName(ctx, pv.ID, fileName, group)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
|
@ -81,18 +81,18 @@ jMBmtEhxyCnCZdUAwYKxAxeRFVk4TCL0aYgWjt3kHTg9SjVStppI2YCSWshUEFGdmJmyCVGpnqIU
|
||||||
KNlA0hEjIOACGSLqYpXAD5SSNVT2MJRJwREAF4FRHPBlCJMSNwFguGAWDJBg+KIArkIJGNtCydUL
|
KNlA0hEjIOACGSLqYpXAD5SSNVT2MJRJwREAF4FRHPBlCJMSNwFguGAWDJBg+KIArkIJGNtCydUL
|
||||||
TuN1oBh/+zKkEblAsgjGqVgUwKLP+UOMOGCpAhICtg6ncFJH`),
|
TuN1oBh/+zKkEblAsgjGqVgUwKLP+UOMOGCpAhICtg6ncFJH`),
|
||||||
"other": unPack(`
|
"other": unPack(`
|
||||||
KLUv/QBYbRMABuOHS9BSNQdQ56F+xNFoV3CijY54JYt3VqV1iUU3xmj00y2pyBOCuokbhDYpvNsj
|
/Td6WFoAAATm1rRGBMCyBIAYIQEWAAAAAAAAABaHRszgC/8CKl0AFxNGhTWwfXmuDQEJlHgNLrkq
|
||||||
ZJeCxqH+nQFpMf4Wa92okaZoF4eH6HsXXCBo+qy3Fn4AigBgAEaYrLCQEuAom6YbHyuKZAFYksqi
|
VxpJY6d9iRTt6gB4uCj0481rnYfXaUADHzOFuF3490RPrM6juPXrknqtVyuWJ5efW19BgwctN6xk
|
||||||
sSOFiRs0WDmlACk0CnpnaAeKiCS3BlwVkViJEbDS43lFNbLkZEmGhc305Nn4AMLGiUkBDiMTG5Vz
|
UiXiZaXVAWVWJWy2XHJiyYCMWBfIjUfo1ccOgwolwgFHJ64ZJjbayA3k6lYPcImuAqYL5NEVHpwl
|
||||||
q4ZISjCofEfR1NpXijvP2X95Hu1e+zLalc0+mjeT3Z/FPGvt62WymbX2dXMDIYKDLjjP8n03RrPf
|
Z8CWIjiXXSMQGsB3gxMdq9nySZbHQLK/KCKQ+oseF6kXyIgSEyuG4HhjVBBYIwTvWzI06kjNUXEy
|
||||||
A1vOApwGOh2MgE2LpgZrgXLDF2CUJ15idG2J8GCSgcc2ZVRgA8+RHD0k2VJjg6mRUgGGhBWEyEcz
|
2sw0n50uocLSAwJ/3mdX3n3XF5nmmuQMPtFbdQgQtC2VhyVd3TdIF+pT6zAEzXFJJ3uLkNbKSS88
|
||||||
5EePLhUeWlYhoFCKONxUiBiIUiQeDIqiQwkjLiyqnF5eGs6a2gGRapbU9JRyuXAlPemYajlJojJd
|
ZdBny6X/ftT5lQpNi/Wg0xLEQA4m4fu4fRAR0kOKzHM2svNLbTxa/wOPidqPzR6b/jfKmHkXxBNa
|
||||||
GBBJjo5GxFRkITOAvLhSCr2TDz4uzdU8Yh3i/SHP4qh3vTG2s9198NP8M+pdR73BvIP6qPeDjzsW
|
jFafty0a5K2S3F6JpwXZ2fqti/zG9NtMc+bbuXycC327EofXRXNtuOupELDD+ltTOIBF7CcTswyi
|
||||||
gTi+jXrXWOe5P/jZxOeod/287v6JljzNP99RNM0a+/x4ljz3LNV2t5v9qHfW2Pyg24u54zSfObWX
|
MZDP1PBie6GqDV2GuPz+0XXmul/ds+XysG19HIkKbJ+cQKp5o7Y0tI7EHM8GhwMl7MjgpQGj5nuv
|
||||||
Y9bYrCTHtwdfPPPOYiU5fvB5FssfNN2V5EIPfg9LnM+JhtVEO8+FZw5LXA068YNPhimu9sHPQiWv
|
0u2hqt4NXPNYqaMm9bFnnIUxEN82HgNWBcXf2baWKOdGzPzCuWg2fAM4zxHnBWcimxLXiJgaI8mU
|
||||||
qc6fE9BTnxIe/LTKatab+WYu7T74uWNRxJW5W5Ux0bDLuG1ioCwjg4DvGgBcgB8cUDHJ1RQ89neE
|
J/QqTPWE0nJf1PW/J9yFQVR1Xo0TJyiX8/ObwmbqUPpxRGjKlYRBvn0jbTdUAENBSn+QVcASRGFE
|
||||||
wvjbNUMiIZdo5hbHgEpANwMkDnL0Jr7kVFg+0pZKjBkmklNgBH1YI8dQOAAKbr6EF5wYM80KWnAd
|
SB9OM2B8Bg4jR/oojs8Beoq7zbIblgAAAACfRtXvhmznOgABzgSAGAAAKklb4rHEZ/sCAAAAAARZ
|
||||||
nYAR`),
|
Wg==`), // this is tar.xz file
|
||||||
}
|
}
|
||||||
|
|
||||||
t.Run("RepositoryKey", func(t *testing.T) {
|
t.Run("RepositoryKey", func(t *testing.T) {
|
||||||
|
@ -105,155 +105,154 @@ nYAR`),
|
||||||
require.Contains(t, resp.Body.String(), "-----BEGIN PGP PUBLIC KEY BLOCK-----")
|
require.Contains(t, resp.Body.String(), "-----BEGIN PGP PUBLIC KEY BLOCK-----")
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("Upload", func(t *testing.T) {
|
for _, group := range []string{"", "arch", "arch/os", "x86_64"} {
|
||||||
defer tests.PrintCurrentTest(t)()
|
groupURL := rootURL
|
||||||
|
if group != "" {
|
||||||
req := NewRequestWithBody(t, "PUT", rootURL+"/default", bytes.NewReader(pkgs["any"]))
|
groupURL = groupURL + "/" + group
|
||||||
MakeRequest(t, req, http.StatusUnauthorized)
|
|
||||||
|
|
||||||
req = NewRequestWithBody(t, "PUT", rootURL+"/default", bytes.NewReader(pkgs["any"])).
|
|
||||||
AddBasicAuth(user.Name)
|
|
||||||
MakeRequest(t, req, http.StatusCreated)
|
|
||||||
|
|
||||||
pvs, err := packages.GetVersionsByPackageType(db.DefaultContext, user.ID, packages.TypeArch)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Len(t, pvs, 1)
|
|
||||||
|
|
||||||
pd, err := packages.GetPackageDescriptor(db.DefaultContext, pvs[0])
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Nil(t, pd.SemVer)
|
|
||||||
require.IsType(t, &arch_model.VersionMetadata{}, pd.Metadata)
|
|
||||||
require.Equal(t, "test", pd.Package.Name)
|
|
||||||
require.Equal(t, "1.0.0-1", pd.Version.Version)
|
|
||||||
|
|
||||||
pfs, err := packages.GetFilesByVersionID(db.DefaultContext, pvs[0].ID)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Len(t, pfs, 2) // zst and zst.sig
|
|
||||||
require.True(t, pfs[0].IsLead)
|
|
||||||
|
|
||||||
pb, err := packages.GetBlobByID(db.DefaultContext, pfs[0].BlobID)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Equal(t, int64(len(pkgs["any"])), pb.Size)
|
|
||||||
|
|
||||||
req = NewRequestWithBody(t, "PUT", rootURL+"/default", bytes.NewReader(pkgs["any"])).
|
|
||||||
AddBasicAuth(user.Name)
|
|
||||||
MakeRequest(t, req, http.StatusConflict)
|
|
||||||
req = NewRequestWithBody(t, "PUT", rootURL+"/default", bytes.NewReader(pkgs["x86_64"])).
|
|
||||||
AddBasicAuth(user.Name)
|
|
||||||
MakeRequest(t, req, http.StatusCreated)
|
|
||||||
req = NewRequestWithBody(t, "PUT", rootURL+"/other", bytes.NewReader(pkgs["any"])).
|
|
||||||
AddBasicAuth(user.Name)
|
|
||||||
MakeRequest(t, req, http.StatusCreated)
|
|
||||||
req = NewRequestWithBody(t, "PUT", rootURL+"/other", bytes.NewReader(pkgs["aarch64"])).
|
|
||||||
AddBasicAuth(user.Name)
|
|
||||||
MakeRequest(t, req, http.StatusCreated)
|
|
||||||
|
|
||||||
req = NewRequestWithBody(t, "PUT", rootURL+"/base", bytes.NewReader(pkgs["other"])).
|
|
||||||
AddBasicAuth(user.Name)
|
|
||||||
MakeRequest(t, req, http.StatusCreated)
|
|
||||||
req = NewRequestWithBody(t, "PUT", rootURL+"/base", bytes.NewReader(pkgs["x86_64"])).
|
|
||||||
AddBasicAuth(user.Name)
|
|
||||||
MakeRequest(t, req, http.StatusCreated)
|
|
||||||
req = NewRequestWithBody(t, "PUT", rootURL+"/base", bytes.NewReader(pkgs["aarch64"])).
|
|
||||||
AddBasicAuth(user.Name)
|
|
||||||
MakeRequest(t, req, http.StatusCreated)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("Download", func(t *testing.T) {
|
|
||||||
defer tests.PrintCurrentTest(t)()
|
|
||||||
req := NewRequest(t, "GET", rootURL+"/default/x86_64/test-1.0.0-1-x86_64.pkg.tar.zst")
|
|
||||||
resp := MakeRequest(t, req, http.StatusOK)
|
|
||||||
require.Equal(t, pkgs["x86_64"], resp.Body.Bytes())
|
|
||||||
|
|
||||||
req = NewRequest(t, "GET", rootURL+"/default/x86_64/test-1.0.0-1-any.pkg.tar.zst")
|
|
||||||
resp = MakeRequest(t, req, http.StatusOK)
|
|
||||||
require.Equal(t, pkgs["any"], resp.Body.Bytes())
|
|
||||||
|
|
||||||
req = NewRequest(t, "GET", rootURL+"/default/x86_64/test-1.0.0-1-aarch64.pkg.tar.zst")
|
|
||||||
MakeRequest(t, req, http.StatusNotFound)
|
|
||||||
|
|
||||||
req = NewRequest(t, "GET", rootURL+"/other/x86_64/test-1.0.0-1-x86_64.pkg.tar.zst")
|
|
||||||
MakeRequest(t, req, http.StatusNotFound)
|
|
||||||
|
|
||||||
req = NewRequest(t, "GET", rootURL+"/other/x86_64/test-1.0.0-1-any.pkg.tar.zst")
|
|
||||||
resp = MakeRequest(t, req, http.StatusOK)
|
|
||||||
require.Equal(t, pkgs["any"], resp.Body.Bytes())
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("SignVerify", func(t *testing.T) {
|
|
||||||
defer tests.PrintCurrentTest(t)()
|
|
||||||
req := NewRequest(t, "GET", rootURL+"/repository.key")
|
|
||||||
respPub := MakeRequest(t, req, http.StatusOK)
|
|
||||||
|
|
||||||
req = NewRequest(t, "GET", rootURL+"/other/x86_64/test-1.0.0-1-any.pkg.tar.zst")
|
|
||||||
respPkg := MakeRequest(t, req, http.StatusOK)
|
|
||||||
|
|
||||||
req = NewRequest(t, "GET", rootURL+"/other/x86_64/test-1.0.0-1-any.pkg.tar.zst.sig")
|
|
||||||
respSig := MakeRequest(t, req, http.StatusOK)
|
|
||||||
|
|
||||||
if err := gpgVerify(respPub.Body.Bytes(), respSig.Body.Bytes(), respPkg.Body.Bytes()); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
}
|
||||||
})
|
t.Run(fmt.Sprintf("Upload[%s]", group), func(t *testing.T) {
|
||||||
|
defer tests.PrintCurrentTest(t)()
|
||||||
|
|
||||||
t.Run("Repository", func(t *testing.T) {
|
req := NewRequestWithBody(t, "PUT", groupURL, bytes.NewReader(pkgs["any"]))
|
||||||
defer tests.PrintCurrentTest(t)()
|
MakeRequest(t, req, http.StatusUnauthorized)
|
||||||
req := NewRequest(t, "GET", rootURL+"/repository.key")
|
|
||||||
respPub := MakeRequest(t, req, http.StatusOK)
|
|
||||||
|
|
||||||
req = NewRequest(t, "GET", rootURL+"/base/x86_64/base.db")
|
req = NewRequestWithBody(t, "PUT", groupURL, bytes.NewReader(pkgs["any"])).
|
||||||
respPkg := MakeRequest(t, req, http.StatusOK)
|
AddBasicAuth(user.Name)
|
||||||
|
MakeRequest(t, req, http.StatusCreated)
|
||||||
|
|
||||||
req = NewRequest(t, "GET", rootURL+"/base/x86_64/base.db.sig")
|
req = NewRequestWithBody(t, "PUT", groupURL, bytes.NewBuffer([]byte("any string"))).
|
||||||
respSig := MakeRequest(t, req, http.StatusOK)
|
AddBasicAuth(user.Name)
|
||||||
|
MakeRequest(t, req, http.StatusBadRequest)
|
||||||
|
|
||||||
if err := gpgVerify(respPub.Body.Bytes(), respSig.Body.Bytes(), respPkg.Body.Bytes()); err != nil {
|
pvs, err := packages.GetVersionsByPackageType(db.DefaultContext, user.ID, packages.TypeArch)
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
files, err := listGzipFiles(respPkg.Body.Bytes())
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Len(t, files, 2)
|
|
||||||
for s, d := range files {
|
|
||||||
name := getProperty(string(d.Data), "NAME")
|
|
||||||
ver := getProperty(string(d.Data), "VERSION")
|
|
||||||
require.Equal(t, name+"-"+ver+"/desc", s)
|
|
||||||
fn := getProperty(string(d.Data), "FILENAME")
|
|
||||||
pgp := getProperty(string(d.Data), "PGPSIG")
|
|
||||||
req = NewRequest(t, "GET", rootURL+"/base/x86_64/"+fn+".sig")
|
|
||||||
respSig := MakeRequest(t, req, http.StatusOK)
|
|
||||||
decodeString, err := base64.StdEncoding.DecodeString(pgp)
|
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, respSig.Body.Bytes(), decodeString)
|
require.Len(t, pvs, 1)
|
||||||
}
|
|
||||||
})
|
|
||||||
t.Run("Delete", func(t *testing.T) {
|
|
||||||
defer tests.PrintCurrentTest(t)()
|
|
||||||
req := NewRequestWithBody(t, "DELETE", rootURL+"/base/notfound/1.0.0-1", nil).
|
|
||||||
AddBasicAuth(user.Name)
|
|
||||||
MakeRequest(t, req, http.StatusNotFound)
|
|
||||||
|
|
||||||
req = NewRequestWithBody(t, "DELETE", rootURL+"/base/test/1.0.0-1", nil).
|
pd, err := packages.GetPackageDescriptor(db.DefaultContext, pvs[0])
|
||||||
AddBasicAuth(user.Name)
|
require.NoError(t, err)
|
||||||
MakeRequest(t, req, http.StatusNoContent)
|
require.Nil(t, pd.SemVer)
|
||||||
|
require.IsType(t, &arch_model.VersionMetadata{}, pd.Metadata)
|
||||||
|
require.Equal(t, "test", pd.Package.Name)
|
||||||
|
require.Equal(t, "1.0.0-1", pd.Version.Version)
|
||||||
|
|
||||||
req = NewRequest(t, "GET", rootURL+"/base/x86_64/base.db")
|
pfs, err := packages.GetFilesByVersionID(db.DefaultContext, pvs[0].ID)
|
||||||
respPkg := MakeRequest(t, req, http.StatusOK)
|
require.NoError(t, err)
|
||||||
files, err := listGzipFiles(respPkg.Body.Bytes())
|
size := 0
|
||||||
require.NoError(t, err)
|
for _, pf := range pfs {
|
||||||
require.Len(t, files, 1)
|
if pf.CompositeKey == group {
|
||||||
|
size++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
require.Equal(t, 2, size) // zst and zst.sig
|
||||||
|
|
||||||
req = NewRequestWithBody(t, "DELETE", rootURL+"/base/test2/1.0.0-1", nil).
|
pb, err := packages.GetBlobByID(db.DefaultContext, pfs[0].BlobID)
|
||||||
AddBasicAuth(user.Name)
|
require.NoError(t, err)
|
||||||
MakeRequest(t, req, http.StatusNoContent)
|
require.Equal(t, int64(len(pkgs["any"])), pb.Size)
|
||||||
req = NewRequest(t, "GET", rootURL+"/base/x86_64/base.db")
|
|
||||||
MakeRequest(t, req, http.StatusNotFound)
|
|
||||||
|
|
||||||
req = NewRequest(t, "GET", rootURL+"/default/x86_64/base.db")
|
req = NewRequestWithBody(t, "PUT", groupURL, bytes.NewReader(pkgs["any"])).
|
||||||
respPkg = MakeRequest(t, req, http.StatusOK)
|
AddBasicAuth(user.Name) // exists
|
||||||
files, err = listGzipFiles(respPkg.Body.Bytes())
|
MakeRequest(t, req, http.StatusConflict)
|
||||||
require.NoError(t, err)
|
req = NewRequestWithBody(t, "PUT", groupURL, bytes.NewReader(pkgs["x86_64"])).
|
||||||
require.Len(t, files, 1)
|
AddBasicAuth(user.Name)
|
||||||
})
|
MakeRequest(t, req, http.StatusCreated)
|
||||||
|
req = NewRequestWithBody(t, "PUT", groupURL, bytes.NewReader(pkgs["aarch64"])).
|
||||||
|
AddBasicAuth(user.Name)
|
||||||
|
MakeRequest(t, req, http.StatusCreated)
|
||||||
|
req = NewRequestWithBody(t, "PUT", groupURL, bytes.NewReader(pkgs["aarch64"])).
|
||||||
|
AddBasicAuth(user.Name) // exists again
|
||||||
|
MakeRequest(t, req, http.StatusConflict)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run(fmt.Sprintf("Download[%s]", group), func(t *testing.T) {
|
||||||
|
defer tests.PrintCurrentTest(t)()
|
||||||
|
req := NewRequest(t, "GET", groupURL+"/x86_64/test-1.0.0-1-x86_64.pkg.tar.zst")
|
||||||
|
resp := MakeRequest(t, req, http.StatusOK)
|
||||||
|
require.Equal(t, pkgs["x86_64"], resp.Body.Bytes())
|
||||||
|
|
||||||
|
req = NewRequest(t, "GET", groupURL+"/x86_64/test-1.0.0-1-any.pkg.tar.zst")
|
||||||
|
resp = MakeRequest(t, req, http.StatusOK)
|
||||||
|
require.Equal(t, pkgs["any"], resp.Body.Bytes())
|
||||||
|
|
||||||
|
// get other group
|
||||||
|
req = NewRequest(t, "GET", rootURL+"/unknown/x86_64/test-1.0.0-1-aarch64.pkg.tar.zst")
|
||||||
|
MakeRequest(t, req, http.StatusNotFound)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run(fmt.Sprintf("SignVerify[%s]", group), func(t *testing.T) {
|
||||||
|
defer tests.PrintCurrentTest(t)()
|
||||||
|
req := NewRequest(t, "GET", rootURL+"/repository.key")
|
||||||
|
respPub := MakeRequest(t, req, http.StatusOK)
|
||||||
|
|
||||||
|
req = NewRequest(t, "GET", groupURL+"/x86_64/test-1.0.0-1-any.pkg.tar.zst")
|
||||||
|
respPkg := MakeRequest(t, req, http.StatusOK)
|
||||||
|
|
||||||
|
req = NewRequest(t, "GET", groupURL+"/x86_64/test-1.0.0-1-any.pkg.tar.zst.sig")
|
||||||
|
respSig := MakeRequest(t, req, http.StatusOK)
|
||||||
|
|
||||||
|
if err := gpgVerify(respPub.Body.Bytes(), respSig.Body.Bytes(), respPkg.Body.Bytes()); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run(fmt.Sprintf("RepositoryDB[%s]", group), func(t *testing.T) {
|
||||||
|
defer tests.PrintCurrentTest(t)()
|
||||||
|
req := NewRequest(t, "GET", rootURL+"/repository.key")
|
||||||
|
respPub := MakeRequest(t, req, http.StatusOK)
|
||||||
|
|
||||||
|
req = NewRequest(t, "GET", groupURL+"/x86_64/base.db")
|
||||||
|
respPkg := MakeRequest(t, req, http.StatusOK)
|
||||||
|
|
||||||
|
req = NewRequest(t, "GET", groupURL+"/x86_64/base.db.sig")
|
||||||
|
respSig := MakeRequest(t, req, http.StatusOK)
|
||||||
|
|
||||||
|
if err := gpgVerify(respPub.Body.Bytes(), respSig.Body.Bytes(), respPkg.Body.Bytes()); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
files, err := listTarGzFiles(respPkg.Body.Bytes())
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Len(t, files, 1)
|
||||||
|
for s, d := range files {
|
||||||
|
name := getProperty(string(d.Data), "NAME")
|
||||||
|
ver := getProperty(string(d.Data), "VERSION")
|
||||||
|
require.Equal(t, name+"-"+ver+"/desc", s)
|
||||||
|
fn := getProperty(string(d.Data), "FILENAME")
|
||||||
|
pgp := getProperty(string(d.Data), "PGPSIG")
|
||||||
|
req = NewRequest(t, "GET", groupURL+"/x86_64/"+fn+".sig")
|
||||||
|
respSig := MakeRequest(t, req, http.StatusOK)
|
||||||
|
decodeString, err := base64.StdEncoding.DecodeString(pgp)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, respSig.Body.Bytes(), decodeString)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run(fmt.Sprintf("Delete[%s]", group), func(t *testing.T) {
|
||||||
|
defer tests.PrintCurrentTest(t)()
|
||||||
|
// test data
|
||||||
|
req := NewRequestWithBody(t, "PUT", groupURL, bytes.NewReader(pkgs["other"])).
|
||||||
|
AddBasicAuth(user.Name)
|
||||||
|
MakeRequest(t, req, http.StatusCreated)
|
||||||
|
|
||||||
|
req = NewRequestWithBody(t, "DELETE", rootURL+"/base/notfound/1.0.0-1", nil).
|
||||||
|
AddBasicAuth(user.Name)
|
||||||
|
MakeRequest(t, req, http.StatusNotFound)
|
||||||
|
|
||||||
|
req = NewRequestWithBody(t, "DELETE", groupURL+"/test/1.0.0-1", nil).
|
||||||
|
AddBasicAuth(user.Name)
|
||||||
|
MakeRequest(t, req, http.StatusNoContent)
|
||||||
|
|
||||||
|
req = NewRequest(t, "GET", groupURL+"/x86_64/base.db")
|
||||||
|
respPkg := MakeRequest(t, req, http.StatusOK)
|
||||||
|
files, err := listTarGzFiles(respPkg.Body.Bytes())
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Len(t, files, 1) // other pkg in L225
|
||||||
|
|
||||||
|
req = NewRequestWithBody(t, "DELETE", groupURL+"/test2/1.0.0-1", nil).
|
||||||
|
AddBasicAuth(user.Name)
|
||||||
|
MakeRequest(t, req, http.StatusNoContent)
|
||||||
|
req = NewRequest(t, "GET", groupURL+"/x86_64/base.db")
|
||||||
|
MakeRequest(t, req, http.StatusNotFound)
|
||||||
|
})
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func getProperty(data, key string) string {
|
func getProperty(data, key string) string {
|
||||||
|
@ -270,7 +269,7 @@ func getProperty(data, key string) string {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func listGzipFiles(data []byte) (fstest.MapFS, error) {
|
func listTarGzFiles(data []byte) (fstest.MapFS, error) {
|
||||||
reader, err := gzip.NewReader(bytes.NewBuffer(data))
|
reader, err := gzip.NewReader(bytes.NewBuffer(data))
|
||||||
defer reader.Close()
|
defer reader.Close()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
Loading…
Reference in a new issue