2019-12-14 20:30:01 +03:00
|
|
|
// Copyright 2019 The Gitea Authors. All rights reserved.
|
2022-11-27 21:20:29 +03:00
|
|
|
// SPDX-License-Identifier: MIT
|
2019-12-14 20:30:01 +03:00
|
|
|
|
|
|
|
package repository
|
|
|
|
|
|
|
|
import (
|
2020-12-02 21:36:06 +03:00
|
|
|
"context"
|
2022-11-12 21:58:26 +03:00
|
|
|
"errors"
|
2019-12-14 20:30:01 +03:00
|
|
|
"fmt"
|
2021-06-14 20:20:43 +03:00
|
|
|
"io"
|
2021-11-20 12:34:05 +03:00
|
|
|
"net/http"
|
2019-12-14 20:30:01 +03:00
|
|
|
"strings"
|
|
|
|
"time"
|
|
|
|
|
2021-09-19 14:49:59 +03:00
|
|
|
"code.gitea.io/gitea/models/db"
|
2022-06-12 18:51:54 +03:00
|
|
|
git_model "code.gitea.io/gitea/models/git"
|
2022-03-29 09:29:02 +03:00
|
|
|
"code.gitea.io/gitea/models/organization"
|
2021-12-10 04:27:50 +03:00
|
|
|
repo_model "code.gitea.io/gitea/models/repo"
|
2021-11-24 12:49:20 +03:00
|
|
|
user_model "code.gitea.io/gitea/models/user"
|
2022-10-12 08:18:26 +03:00
|
|
|
"code.gitea.io/gitea/modules/container"
|
2019-12-14 20:30:01 +03:00
|
|
|
"code.gitea.io/gitea/modules/git"
|
Simplify how git repositories are opened (#28937)
## Purpose
This is a refactor toward building an abstraction over managing git
repositories.
Afterwards, it does not matter anymore if they are stored on the local
disk or somewhere remote.
## What this PR changes
We used `git.OpenRepository` everywhere previously.
Now, we should split them into two distinct functions:
Firstly, there are temporary repositories which do not change:
```go
git.OpenRepository(ctx, diskPath)
```
Gitea managed repositories having a record in the database in the
`repository` table are moved into the new package `gitrepo`:
```go
gitrepo.OpenRepository(ctx, repo_model.Repo)
```
Why is `repo_model.Repository` the second parameter instead of file
path?
Because then we can easily adapt our repository storage strategy.
The repositories can be stored locally, however, they could just as well
be stored on a remote server.
## Further changes in other PRs
- A Git Command wrapper on package `gitrepo` could be created. i.e.
`NewCommand(ctx, repo_model.Repository, commands...)`. `git.RunOpts{Dir:
repo.RepoPath()}`, the directory should be empty before invoking this
method and it can be filled in the function only. #28940
- Remove the `RepoPath()`/`WikiPath()` functions to reduce the
possibility of mistakes.
---------
Co-authored-by: delvh <dev.lh@web.de>
2024-01-27 23:09:51 +03:00
|
|
|
"code.gitea.io/gitea/modules/gitrepo"
|
2021-04-09 01:25:57 +03:00
|
|
|
"code.gitea.io/gitea/modules/lfs"
|
2019-12-14 20:30:01 +03:00
|
|
|
"code.gitea.io/gitea/modules/log"
|
2021-11-16 18:25:33 +03:00
|
|
|
"code.gitea.io/gitea/modules/migration"
|
2019-12-14 20:30:01 +03:00
|
|
|
"code.gitea.io/gitea/modules/setting"
|
|
|
|
"code.gitea.io/gitea/modules/timeutil"
|
2020-08-11 23:05:34 +03:00
|
|
|
"code.gitea.io/gitea/modules/util"
|
2019-12-14 20:30:01 +03:00
|
|
|
)
|
|
|
|
|
|
|
|
/*
|
2022-08-25 05:31:57 +03:00
|
|
|
GitHub, GitLab, Gogs: *.wiki.git
|
|
|
|
BitBucket: *.git/wiki
|
2019-12-14 20:30:01 +03:00
|
|
|
*/
|
|
|
|
var commonWikiURLSuffixes = []string{".wiki.git", ".git/wiki"}
|
|
|
|
|
2020-07-06 05:08:32 +03:00
|
|
|
// WikiRemoteURL returns accessible repository URL for wiki if exists.
|
2019-12-14 20:30:01 +03:00
|
|
|
// Otherwise, it returns an empty string.
|
2022-01-20 02:26:57 +03:00
|
|
|
func WikiRemoteURL(ctx context.Context, remote string) string {
|
2019-12-14 20:30:01 +03:00
|
|
|
remote = strings.TrimSuffix(remote, ".git")
|
|
|
|
for _, suffix := range commonWikiURLSuffixes {
|
|
|
|
wikiURL := remote + suffix
|
2022-01-20 02:26:57 +03:00
|
|
|
if git.IsRepoURLAccessible(ctx, wikiURL) {
|
2019-12-14 20:30:01 +03:00
|
|
|
return wikiURL
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return ""
|
|
|
|
}
|
|
|
|
|
|
|
|
// MigrateRepositoryGitData starts migrating git related data after created migrating repository
|
2021-11-24 12:49:20 +03:00
|
|
|
func MigrateRepositoryGitData(ctx context.Context, u *user_model.User,
|
2021-12-10 04:27:50 +03:00
|
|
|
repo *repo_model.Repository, opts migration.MigrateOptions,
|
2021-11-20 12:34:05 +03:00
|
|
|
httpTransport *http.Transport,
|
2021-12-10 04:27:50 +03:00
|
|
|
) (*repo_model.Repository, error) {
|
|
|
|
repoPath := repo_model.RepoPath(u.Name, opts.RepoName)
|
2019-12-14 20:30:01 +03:00
|
|
|
|
|
|
|
if u.IsOrganization() {
|
2023-02-08 09:44:42 +03:00
|
|
|
t, err := organization.OrgFromUser(u).GetOwnerTeam(ctx)
|
2019-12-14 20:30:01 +03:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
repo.NumWatches = t.NumMembers
|
|
|
|
} else {
|
|
|
|
repo.NumWatches = 1
|
|
|
|
}
|
|
|
|
|
|
|
|
migrateTimeout := time.Duration(setting.Git.Timeout.Migrate) * time.Second
|
|
|
|
|
|
|
|
var err error
|
2020-08-11 23:05:34 +03:00
|
|
|
if err = util.RemoveAll(repoPath); err != nil {
|
2022-10-24 22:29:17 +03:00
|
|
|
return repo, fmt.Errorf("Failed to remove %s: %w", repoPath, err)
|
2019-12-14 20:30:01 +03:00
|
|
|
}
|
|
|
|
|
2022-01-20 02:26:57 +03:00
|
|
|
if err = git.Clone(ctx, opts.CloneAddr, repoPath, git.CloneRepoOptions{
|
2022-03-19 17:16:38 +03:00
|
|
|
Mirror: true,
|
|
|
|
Quiet: true,
|
|
|
|
Timeout: migrateTimeout,
|
|
|
|
SkipTLSVerify: setting.Migrations.SkipTLSVerify,
|
2019-12-14 20:30:01 +03:00
|
|
|
}); err != nil {
|
2022-11-12 21:58:26 +03:00
|
|
|
if errors.Is(err, context.DeadlineExceeded) {
|
|
|
|
return repo, fmt.Errorf("Clone timed out. Consider increasing [git.timeout] MIGRATE in app.ini. Underlying Error: %w", err)
|
|
|
|
}
|
2022-10-24 22:29:17 +03:00
|
|
|
return repo, fmt.Errorf("Clone: %w", err)
|
2019-12-14 20:30:01 +03:00
|
|
|
}
|
|
|
|
|
2022-03-29 20:12:33 +03:00
|
|
|
if err := git.WriteCommitGraph(ctx, repoPath); err != nil {
|
|
|
|
return repo, err
|
|
|
|
}
|
|
|
|
|
2019-12-14 20:30:01 +03:00
|
|
|
if opts.Wiki {
|
2021-12-10 04:27:50 +03:00
|
|
|
wikiPath := repo_model.WikiPath(u.Name, opts.RepoName)
|
2022-01-20 02:26:57 +03:00
|
|
|
wikiRemotePath := WikiRemoteURL(ctx, opts.CloneAddr)
|
2019-12-14 20:30:01 +03:00
|
|
|
if len(wikiRemotePath) > 0 {
|
2020-08-11 23:05:34 +03:00
|
|
|
if err := util.RemoveAll(wikiPath); err != nil {
|
2022-10-24 22:29:17 +03:00
|
|
|
return repo, fmt.Errorf("Failed to remove %s: %w", wikiPath, err)
|
2019-12-14 20:30:01 +03:00
|
|
|
}
|
|
|
|
|
2022-04-30 15:50:56 +03:00
|
|
|
if err := git.Clone(ctx, wikiRemotePath, wikiPath, git.CloneRepoOptions{
|
2022-03-19 17:16:38 +03:00
|
|
|
Mirror: true,
|
|
|
|
Quiet: true,
|
|
|
|
Timeout: migrateTimeout,
|
|
|
|
Branch: "master",
|
|
|
|
SkipTLSVerify: setting.Migrations.SkipTLSVerify,
|
2019-12-14 20:30:01 +03:00
|
|
|
}); err != nil {
|
|
|
|
log.Warn("Clone wiki: %v", err)
|
2020-08-11 23:05:34 +03:00
|
|
|
if err := util.RemoveAll(wikiPath); err != nil {
|
2022-10-24 22:29:17 +03:00
|
|
|
return repo, fmt.Errorf("Failed to remove %s: %w", wikiPath, err)
|
2019-12-14 20:30:01 +03:00
|
|
|
}
|
2022-04-30 15:50:56 +03:00
|
|
|
} else {
|
|
|
|
if err := git.WriteCommitGraph(ctx, wikiPath); err != nil {
|
|
|
|
return repo, err
|
|
|
|
}
|
2019-12-14 20:30:01 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-10-13 22:47:02 +03:00
|
|
|
if repo.OwnerID == u.ID {
|
|
|
|
repo.Owner = u
|
|
|
|
}
|
|
|
|
|
2022-06-06 11:01:49 +03:00
|
|
|
if err = CheckDaemonExportOK(ctx, repo); err != nil {
|
2022-10-24 22:29:17 +03:00
|
|
|
return repo, fmt.Errorf("checkDaemonExportOK: %w", err)
|
2021-10-13 22:47:02 +03:00
|
|
|
}
|
|
|
|
|
2022-04-01 05:55:30 +03:00
|
|
|
if stdout, _, err := git.NewCommand(ctx, "update-server-info").
|
2021-10-13 22:47:02 +03:00
|
|
|
SetDescription(fmt.Sprintf("MigrateRepositoryGitData(git update-server-info): %s", repoPath)).
|
2022-04-01 05:55:30 +03:00
|
|
|
RunStdString(&git.RunOpts{Dir: repoPath}); err != nil {
|
2021-10-13 22:47:02 +03:00
|
|
|
log.Error("MigrateRepositoryGitData(git update-server-info) in %v: Stdout: %s\nError: %v", repo, stdout, err)
|
2022-10-24 22:29:17 +03:00
|
|
|
return repo, fmt.Errorf("error in MigrateRepositoryGitData(git update-server-info): %w", err)
|
2021-10-13 22:47:02 +03:00
|
|
|
}
|
|
|
|
|
2022-03-29 22:13:41 +03:00
|
|
|
gitRepo, err := git.OpenRepository(ctx, repoPath)
|
2019-12-14 20:30:01 +03:00
|
|
|
if err != nil {
|
2022-10-24 22:29:17 +03:00
|
|
|
return repo, fmt.Errorf("OpenRepository: %w", err)
|
2019-12-14 20:30:01 +03:00
|
|
|
}
|
|
|
|
defer gitRepo.Close()
|
|
|
|
|
|
|
|
repo.IsEmpty, err = gitRepo.IsEmpty()
|
|
|
|
if err != nil {
|
2022-10-24 22:29:17 +03:00
|
|
|
return repo, fmt.Errorf("git.IsEmpty: %w", err)
|
2019-12-14 20:30:01 +03:00
|
|
|
}
|
|
|
|
|
2020-09-15 17:37:44 +03:00
|
|
|
if !repo.IsEmpty {
|
|
|
|
if len(repo.DefaultBranch) == 0 {
|
|
|
|
// Try to get HEAD branch and set it as default branch.
|
|
|
|
headBranch, err := gitRepo.GetHEADBranch()
|
|
|
|
if err != nil {
|
2022-10-24 22:29:17 +03:00
|
|
|
return repo, fmt.Errorf("GetHEADBranch: %w", err)
|
2020-09-15 17:37:44 +03:00
|
|
|
}
|
|
|
|
if headBranch != nil {
|
|
|
|
repo.DefaultBranch = headBranch.Name
|
|
|
|
}
|
2019-12-14 20:30:01 +03:00
|
|
|
}
|
|
|
|
|
2023-06-29 13:03:20 +03:00
|
|
|
if _, err := SyncRepoBranchesWithRepo(ctx, repo, gitRepo, u.ID); err != nil {
|
|
|
|
return repo, fmt.Errorf("SyncRepoBranchesWithRepo: %v", err)
|
|
|
|
}
|
|
|
|
|
2020-09-15 17:37:44 +03:00
|
|
|
if !opts.Releases {
|
2022-03-31 15:30:40 +03:00
|
|
|
// note: this will greatly improve release (tag) sync
|
|
|
|
// for pull-mirrors with many tags
|
|
|
|
repo.IsMirror = opts.Mirror
|
2023-09-25 16:17:37 +03:00
|
|
|
if err = SyncReleasesWithTags(ctx, repo, gitRepo); err != nil {
|
2020-09-15 17:37:44 +03:00
|
|
|
log.Error("Failed to synchronize tags to releases for repository: %v", err)
|
|
|
|
}
|
2019-12-14 20:30:01 +03:00
|
|
|
}
|
2021-04-09 01:25:57 +03:00
|
|
|
|
|
|
|
if opts.LFS {
|
2021-11-20 12:34:05 +03:00
|
|
|
endpoint := lfs.DetermineEndpoint(opts.CloneAddr, opts.LFSEndpoint)
|
|
|
|
lfsClient := lfs.NewClient(endpoint, httpTransport)
|
|
|
|
if err = StoreMissingLfsObjectsInRepository(ctx, repo, gitRepo, lfsClient); err != nil {
|
2021-04-09 01:25:57 +03:00
|
|
|
log.Error("Failed to store missing LFS objects for repository: %v", err)
|
|
|
|
}
|
|
|
|
}
|
2019-12-14 20:30:01 +03:00
|
|
|
}
|
|
|
|
|
2023-06-29 13:03:20 +03:00
|
|
|
ctx, committer, err := db.TxContext(ctx)
|
2022-06-06 11:01:49 +03:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
2019-12-14 20:30:01 +03:00
|
|
|
}
|
2022-06-06 11:01:49 +03:00
|
|
|
defer committer.Close()
|
2019-12-14 20:30:01 +03:00
|
|
|
|
|
|
|
if opts.Mirror {
|
2023-09-16 19:03:02 +03:00
|
|
|
remoteAddress, err := util.SanitizeURL(opts.CloneAddr)
|
|
|
|
if err != nil {
|
|
|
|
return repo, err
|
|
|
|
}
|
2021-12-10 04:27:50 +03:00
|
|
|
mirrorModel := repo_model.Mirror{
|
2019-12-14 20:30:01 +03:00
|
|
|
RepoID: repo.ID,
|
|
|
|
Interval: setting.Mirror.DefaultInterval,
|
|
|
|
EnablePrune: true,
|
|
|
|
NextUpdateUnix: timeutil.TimeStampNow().AddDuration(setting.Mirror.DefaultInterval),
|
2021-04-09 01:25:57 +03:00
|
|
|
LFS: opts.LFS,
|
2023-09-16 19:03:02 +03:00
|
|
|
RemoteAddress: remoteAddress,
|
2021-04-09 01:25:57 +03:00
|
|
|
}
|
|
|
|
if opts.LFS {
|
|
|
|
mirrorModel.LFSEndpoint = opts.LFSEndpoint
|
2021-01-03 02:47:47 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
if opts.MirrorInterval != "" {
|
|
|
|
parsedInterval, err := time.ParseDuration(opts.MirrorInterval)
|
|
|
|
if err != nil {
|
|
|
|
log.Error("Failed to set Interval: %v", err)
|
|
|
|
return repo, err
|
|
|
|
}
|
|
|
|
if parsedInterval == 0 {
|
|
|
|
mirrorModel.Interval = 0
|
|
|
|
mirrorModel.NextUpdateUnix = 0
|
|
|
|
} else if parsedInterval < setting.Mirror.MinInterval {
|
2023-05-26 04:04:48 +03:00
|
|
|
err := fmt.Errorf("interval %s is set below Minimum Interval of %s", parsedInterval, setting.Mirror.MinInterval)
|
2021-01-03 02:47:47 +03:00
|
|
|
log.Error("Interval: %s is too frequent", opts.MirrorInterval)
|
|
|
|
return repo, err
|
|
|
|
} else {
|
|
|
|
mirrorModel.Interval = parsedInterval
|
|
|
|
mirrorModel.NextUpdateUnix = timeutil.TimeStampNow().AddDuration(parsedInterval)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-06-06 11:01:49 +03:00
|
|
|
if err = repo_model.InsertMirror(ctx, &mirrorModel); err != nil {
|
2022-10-24 22:29:17 +03:00
|
|
|
return repo, fmt.Errorf("InsertOne: %w", err)
|
2019-12-14 20:30:01 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
repo.IsMirror = true
|
2022-06-06 11:01:49 +03:00
|
|
|
if err = UpdateRepository(ctx, repo, false); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2023-05-26 04:04:48 +03:00
|
|
|
|
|
|
|
// this is necessary for sync local tags from remote
|
|
|
|
configName := fmt.Sprintf("remote.%s.fetch", mirrorModel.GetRemoteName())
|
|
|
|
if stdout, _, err := git.NewCommand(ctx, "config").
|
|
|
|
AddOptionValues("--add", configName, `+refs/tags/*:refs/tags/*`).
|
|
|
|
RunStdString(&git.RunOpts{Dir: repoPath}); err != nil {
|
|
|
|
log.Error("MigrateRepositoryGitData(git config --add <remote> +refs/tags/*:refs/tags/*) in %v: Stdout: %s\nError: %v", repo, stdout, err)
|
|
|
|
return repo, fmt.Errorf("error in MigrateRepositoryGitData(git config --add <remote> +refs/tags/*:refs/tags/*): %w", err)
|
|
|
|
}
|
2019-12-14 20:30:01 +03:00
|
|
|
} else {
|
2022-06-06 11:01:49 +03:00
|
|
|
if err = UpdateRepoSize(ctx, repo); err != nil {
|
|
|
|
log.Error("Failed to update size for repository: %v", err)
|
|
|
|
}
|
|
|
|
if repo, err = CleanUpMigrateInfo(ctx, repo); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2019-12-14 20:30:01 +03:00
|
|
|
}
|
|
|
|
|
2022-06-06 11:01:49 +03:00
|
|
|
return repo, committer.Commit()
|
2019-12-14 20:30:01 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// cleanUpMigrateGitConfig removes mirror info which prevents "push --all".
|
|
|
|
// This also removes possible user credentials.
|
2023-06-05 13:05:31 +03:00
|
|
|
func cleanUpMigrateGitConfig(ctx context.Context, repoPath string) error {
|
|
|
|
cmd := git.NewCommand(ctx, "remote", "rm", "origin")
|
|
|
|
// if the origin does not exist
|
|
|
|
_, stderr, err := cmd.RunStdString(&git.RunOpts{
|
|
|
|
Dir: repoPath,
|
|
|
|
})
|
|
|
|
if err != nil && !strings.HasPrefix(stderr, "fatal: No such remote") {
|
|
|
|
return err
|
2019-12-14 20:30:01 +03:00
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// CleanUpMigrateInfo finishes migrating repository and/or wiki with things that don't need to be done for mirrors.
|
2022-01-20 02:26:57 +03:00
|
|
|
func CleanUpMigrateInfo(ctx context.Context, repo *repo_model.Repository) (*repo_model.Repository, error) {
|
2019-12-14 20:30:01 +03:00
|
|
|
repoPath := repo.RepoPath()
|
2023-09-06 15:08:51 +03:00
|
|
|
if err := CreateDelegateHooks(repoPath); err != nil {
|
2022-10-24 22:29:17 +03:00
|
|
|
return repo, fmt.Errorf("createDelegateHooks: %w", err)
|
2019-12-14 20:30:01 +03:00
|
|
|
}
|
|
|
|
if repo.HasWiki() {
|
2023-09-06 15:08:51 +03:00
|
|
|
if err := CreateDelegateHooks(repo.WikiPath()); err != nil {
|
2022-10-24 22:29:17 +03:00
|
|
|
return repo, fmt.Errorf("createDelegateHooks.(wiki): %w", err)
|
2019-12-14 20:30:01 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-04-01 05:55:30 +03:00
|
|
|
_, _, err := git.NewCommand(ctx, "remote", "rm", "origin").RunStdString(&git.RunOpts{Dir: repoPath})
|
2019-12-14 20:30:01 +03:00
|
|
|
if err != nil && !strings.HasPrefix(err.Error(), "exit status 128 - fatal: No such remote ") {
|
2022-10-24 22:29:17 +03:00
|
|
|
return repo, fmt.Errorf("CleanUpMigrateInfo: %w", err)
|
2019-12-14 20:30:01 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
if repo.HasWiki() {
|
2023-06-05 13:05:31 +03:00
|
|
|
if err := cleanUpMigrateGitConfig(ctx, repo.WikiPath()); err != nil {
|
2022-10-24 22:29:17 +03:00
|
|
|
return repo, fmt.Errorf("cleanUpMigrateGitConfig (wiki): %w", err)
|
2019-12-14 20:30:01 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-06-06 11:01:49 +03:00
|
|
|
return repo, UpdateRepository(ctx, repo, false)
|
2019-12-14 20:30:01 +03:00
|
|
|
}
|
|
|
|
|
2024-01-24 06:02:04 +03:00
|
|
|
// SyncRepoTags synchronizes releases table with repository tags
|
|
|
|
func SyncRepoTags(ctx context.Context, repoID int64) error {
|
|
|
|
repo, err := repo_model.GetRepositoryByID(ctx, repoID)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
Simplify how git repositories are opened (#28937)
## Purpose
This is a refactor toward building an abstraction over managing git
repositories.
Afterwards, it does not matter anymore if they are stored on the local
disk or somewhere remote.
## What this PR changes
We used `git.OpenRepository` everywhere previously.
Now, we should split them into two distinct functions:
Firstly, there are temporary repositories which do not change:
```go
git.OpenRepository(ctx, diskPath)
```
Gitea managed repositories having a record in the database in the
`repository` table are moved into the new package `gitrepo`:
```go
gitrepo.OpenRepository(ctx, repo_model.Repo)
```
Why is `repo_model.Repository` the second parameter instead of file
path?
Because then we can easily adapt our repository storage strategy.
The repositories can be stored locally, however, they could just as well
be stored on a remote server.
## Further changes in other PRs
- A Git Command wrapper on package `gitrepo` could be created. i.e.
`NewCommand(ctx, repo_model.Repository, commands...)`. `git.RunOpts{Dir:
repo.RepoPath()}`, the directory should be empty before invoking this
method and it can be filled in the function only. #28940
- Remove the `RepoPath()`/`WikiPath()` functions to reduce the
possibility of mistakes.
---------
Co-authored-by: delvh <dev.lh@web.de>
2024-01-27 23:09:51 +03:00
|
|
|
gitRepo, err := gitrepo.OpenRepository(ctx, repo)
|
2024-01-24 06:02:04 +03:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
defer gitRepo.Close()
|
|
|
|
|
|
|
|
return SyncReleasesWithTags(ctx, repo, gitRepo)
|
|
|
|
}
|
|
|
|
|
2019-12-14 20:30:01 +03:00
|
|
|
// SyncReleasesWithTags synchronizes release table with repository tags
|
2023-09-25 16:17:37 +03:00
|
|
|
func SyncReleasesWithTags(ctx context.Context, repo *repo_model.Repository, gitRepo *git.Repository) error {
|
2022-03-31 15:30:40 +03:00
|
|
|
log.Debug("SyncReleasesWithTags: in Repo[%d:%s/%s]", repo.ID, repo.OwnerName, repo.Name)
|
|
|
|
|
|
|
|
// optimized procedure for pull-mirrors which saves a lot of time (in
|
|
|
|
// particular for repos with many tags).
|
|
|
|
if repo.IsMirror {
|
2023-09-25 16:17:37 +03:00
|
|
|
return pullMirrorReleaseSync(ctx, repo, gitRepo)
|
2022-03-31 15:30:40 +03:00
|
|
|
}
|
|
|
|
|
2022-10-12 08:18:26 +03:00
|
|
|
existingRelTags := make(container.Set[string])
|
2022-08-25 05:31:57 +03:00
|
|
|
opts := repo_model.FindReleasesOptions{
|
2021-09-24 14:32:56 +03:00
|
|
|
IncludeDrafts: true,
|
|
|
|
IncludeTags: true,
|
|
|
|
ListOptions: db.ListOptions{PageSize: 50},
|
2024-01-15 05:19:25 +03:00
|
|
|
RepoID: repo.ID,
|
2021-09-24 14:32:56 +03:00
|
|
|
}
|
2019-12-14 20:30:01 +03:00
|
|
|
for page := 1; ; page++ {
|
2020-01-24 22:00:29 +03:00
|
|
|
opts.Page = page
|
2024-01-15 05:19:25 +03:00
|
|
|
rels, err := db.Find[repo_model.Release](gitRepo.Ctx, opts)
|
2019-12-14 20:30:01 +03:00
|
|
|
if err != nil {
|
2022-03-10 13:09:48 +03:00
|
|
|
return fmt.Errorf("unable to GetReleasesByRepoID in Repo[%d:%s/%s]: %w", repo.ID, repo.OwnerName, repo.Name, err)
|
2019-12-14 20:30:01 +03:00
|
|
|
}
|
|
|
|
if len(rels) == 0 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
for _, rel := range rels {
|
|
|
|
if rel.IsDraft {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
commitID, err := gitRepo.GetTagCommitID(rel.TagName)
|
|
|
|
if err != nil && !git.IsErrNotExist(err) {
|
2022-03-10 13:09:48 +03:00
|
|
|
return fmt.Errorf("unable to GetTagCommitID for %q in Repo[%d:%s/%s]: %w", rel.TagName, repo.ID, repo.OwnerName, repo.Name, err)
|
2019-12-14 20:30:01 +03:00
|
|
|
}
|
|
|
|
if git.IsErrNotExist(err) || commitID != rel.Sha1 {
|
2023-09-25 16:17:37 +03:00
|
|
|
if err := repo_model.PushUpdateDeleteTag(ctx, repo, rel.TagName); err != nil {
|
2022-03-10 13:09:48 +03:00
|
|
|
return fmt.Errorf("unable to PushUpdateDeleteTag: %q in Repo[%d:%s/%s]: %w", rel.TagName, repo.ID, repo.OwnerName, repo.Name, err)
|
2019-12-14 20:30:01 +03:00
|
|
|
}
|
|
|
|
} else {
|
2022-10-12 08:18:26 +03:00
|
|
|
existingRelTags.Add(strings.ToLower(rel.TagName))
|
2019-12-14 20:30:01 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2022-03-29 20:12:33 +03:00
|
|
|
|
|
|
|
_, err := gitRepo.WalkReferences(git.ObjectTag, 0, 0, func(sha1, refname string) error {
|
|
|
|
tagName := strings.TrimPrefix(refname, git.TagPrefix)
|
2022-10-12 08:18:26 +03:00
|
|
|
if existingRelTags.Contains(strings.ToLower(tagName)) {
|
2022-03-29 20:12:33 +03:00
|
|
|
return nil
|
2019-12-14 20:30:01 +03:00
|
|
|
}
|
2022-03-29 20:12:33 +03:00
|
|
|
|
2023-09-25 16:17:37 +03:00
|
|
|
if err := PushUpdateAddTag(ctx, repo, gitRepo, tagName, sha1, refname); err != nil {
|
2022-03-29 20:12:33 +03:00
|
|
|
return fmt.Errorf("unable to PushUpdateAddTag: %q to Repo[%d:%s/%s]: %w", tagName, repo.ID, repo.OwnerName, repo.Name, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
return err
|
2019-12-14 20:30:01 +03:00
|
|
|
}
|
2020-01-10 12:34:21 +03:00
|
|
|
|
|
|
|
// PushUpdateAddTag must be called for any push actions to add tag
|
Add context cache as a request level cache (#22294)
To avoid duplicated load of the same data in an HTTP request, we can set
a context cache to do that. i.e. Some pages may load a user from a
database with the same id in different areas on the same page. But the
code is hidden in two different deep logic. How should we share the
user? As a result of this PR, now if both entry functions accept
`context.Context` as the first parameter and we just need to refactor
`GetUserByID` to reuse the user from the context cache. Then it will not
be loaded twice on an HTTP request.
But of course, sometimes we would like to reload an object from the
database, that's why `RemoveContextData` is also exposed.
The core context cache is here. It defines a new context
```go
type cacheContext struct {
ctx context.Context
data map[any]map[any]any
lock sync.RWMutex
}
var cacheContextKey = struct{}{}
func WithCacheContext(ctx context.Context) context.Context {
return context.WithValue(ctx, cacheContextKey, &cacheContext{
ctx: ctx,
data: make(map[any]map[any]any),
})
}
```
Then you can use the below 4 methods to read/write/del the data within
the same context.
```go
func GetContextData(ctx context.Context, tp, key any) any
func SetContextData(ctx context.Context, tp, key, value any)
func RemoveContextData(ctx context.Context, tp, key any)
func GetWithContextCache[T any](ctx context.Context, cacheGroupKey string, cacheTargetID any, f func() (T, error)) (T, error)
```
Then let's take a look at how `system.GetString` implement it.
```go
func GetSetting(ctx context.Context, key string) (string, error) {
return cache.GetWithContextCache(ctx, contextCacheKey, key, func() (string, error) {
return cache.GetString(genSettingCacheKey(key), func() (string, error) {
res, err := GetSettingNoCache(ctx, key)
if err != nil {
return "", err
}
return res.SettingValue, nil
})
})
}
```
First, it will check if context data include the setting object with the
key. If not, it will query from the global cache which may be memory or
a Redis cache. If not, it will get the object from the database. In the
end, if the object gets from the global cache or database, it will be
set into the context cache.
An object stored in the context cache will only be destroyed after the
context disappeared.
2023-02-15 16:37:34 +03:00
|
|
|
func PushUpdateAddTag(ctx context.Context, repo *repo_model.Repository, gitRepo *git.Repository, tagName, sha1, refname string) error {
|
2022-03-29 20:12:33 +03:00
|
|
|
tag, err := gitRepo.GetTagWithID(sha1, tagName)
|
2020-01-10 12:34:21 +03:00
|
|
|
if err != nil {
|
2022-03-10 13:09:48 +03:00
|
|
|
return fmt.Errorf("unable to GetTag: %w", err)
|
2020-01-10 12:34:21 +03:00
|
|
|
}
|
2022-01-12 23:37:46 +03:00
|
|
|
commit, err := tag.Commit(gitRepo)
|
2020-01-10 12:34:21 +03:00
|
|
|
if err != nil {
|
2022-03-10 13:09:48 +03:00
|
|
|
return fmt.Errorf("unable to get tag Commit: %w", err)
|
2020-01-10 12:34:21 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
sig := tag.Tagger
|
|
|
|
if sig == nil {
|
|
|
|
sig = commit.Author
|
|
|
|
}
|
|
|
|
if sig == nil {
|
|
|
|
sig = commit.Committer
|
|
|
|
}
|
|
|
|
|
2021-11-24 12:49:20 +03:00
|
|
|
var author *user_model.User
|
2022-01-20 20:46:10 +03:00
|
|
|
createdAt := time.Unix(1, 0)
|
2020-01-10 12:34:21 +03:00
|
|
|
|
|
|
|
if sig != nil {
|
Add context cache as a request level cache (#22294)
To avoid duplicated load of the same data in an HTTP request, we can set
a context cache to do that. i.e. Some pages may load a user from a
database with the same id in different areas on the same page. But the
code is hidden in two different deep logic. How should we share the
user? As a result of this PR, now if both entry functions accept
`context.Context` as the first parameter and we just need to refactor
`GetUserByID` to reuse the user from the context cache. Then it will not
be loaded twice on an HTTP request.
But of course, sometimes we would like to reload an object from the
database, that's why `RemoveContextData` is also exposed.
The core context cache is here. It defines a new context
```go
type cacheContext struct {
ctx context.Context
data map[any]map[any]any
lock sync.RWMutex
}
var cacheContextKey = struct{}{}
func WithCacheContext(ctx context.Context) context.Context {
return context.WithValue(ctx, cacheContextKey, &cacheContext{
ctx: ctx,
data: make(map[any]map[any]any),
})
}
```
Then you can use the below 4 methods to read/write/del the data within
the same context.
```go
func GetContextData(ctx context.Context, tp, key any) any
func SetContextData(ctx context.Context, tp, key, value any)
func RemoveContextData(ctx context.Context, tp, key any)
func GetWithContextCache[T any](ctx context.Context, cacheGroupKey string, cacheTargetID any, f func() (T, error)) (T, error)
```
Then let's take a look at how `system.GetString` implement it.
```go
func GetSetting(ctx context.Context, key string) (string, error) {
return cache.GetWithContextCache(ctx, contextCacheKey, key, func() (string, error) {
return cache.GetString(genSettingCacheKey(key), func() (string, error) {
res, err := GetSettingNoCache(ctx, key)
if err != nil {
return "", err
}
return res.SettingValue, nil
})
})
}
```
First, it will check if context data include the setting object with the
key. If not, it will query from the global cache which may be memory or
a Redis cache. If not, it will get the object from the database. In the
end, if the object gets from the global cache or database, it will be
set into the context cache.
An object stored in the context cache will only be destroyed after the
context disappeared.
2023-02-15 16:37:34 +03:00
|
|
|
author, err = user_model.GetUserByEmail(ctx, sig.Email)
|
2021-11-24 12:49:20 +03:00
|
|
|
if err != nil && !user_model.IsErrUserNotExist(err) {
|
2022-03-10 13:09:48 +03:00
|
|
|
return fmt.Errorf("unable to GetUserByEmail for %q: %w", sig.Email, err)
|
2020-01-10 12:34:21 +03:00
|
|
|
}
|
|
|
|
createdAt = sig.When
|
|
|
|
}
|
|
|
|
|
|
|
|
commitsCount, err := commit.CommitsCount()
|
|
|
|
if err != nil {
|
2022-03-10 13:09:48 +03:00
|
|
|
return fmt.Errorf("unable to get CommitsCount: %w", err)
|
2020-01-10 12:34:21 +03:00
|
|
|
}
|
|
|
|
|
2022-08-25 05:31:57 +03:00
|
|
|
rel := repo_model.Release{
|
2020-01-10 12:34:21 +03:00
|
|
|
RepoID: repo.ID,
|
|
|
|
TagName: tagName,
|
|
|
|
LowerTagName: strings.ToLower(tagName),
|
|
|
|
Sha1: commit.ID.String(),
|
|
|
|
NumCommits: commitsCount,
|
|
|
|
CreatedUnix: timeutil.TimeStamp(createdAt.Unix()),
|
|
|
|
IsTag: true,
|
|
|
|
}
|
|
|
|
if author != nil {
|
|
|
|
rel.PublisherID = author.ID
|
|
|
|
}
|
|
|
|
|
2023-09-25 16:17:37 +03:00
|
|
|
return repo_model.SaveOrUpdateTag(ctx, repo, &rel)
|
2020-01-10 12:34:21 +03:00
|
|
|
}
|
2021-04-09 01:25:57 +03:00
|
|
|
|
|
|
|
// StoreMissingLfsObjectsInRepository downloads missing LFS objects
|
2021-12-10 04:27:50 +03:00
|
|
|
func StoreMissingLfsObjectsInRepository(ctx context.Context, repo *repo_model.Repository, gitRepo *git.Repository, lfsClient lfs.Client) error {
|
2021-04-09 01:25:57 +03:00
|
|
|
contentStore := lfs.NewContentStore()
|
|
|
|
|
|
|
|
pointerChan := make(chan lfs.PointerBlob)
|
|
|
|
errChan := make(chan error, 1)
|
|
|
|
go lfs.SearchPointerBlobs(ctx, gitRepo, pointerChan, errChan)
|
|
|
|
|
2021-06-14 20:20:43 +03:00
|
|
|
downloadObjects := func(pointers []lfs.Pointer) error {
|
2021-11-20 12:34:05 +03:00
|
|
|
err := lfsClient.Download(ctx, pointers, func(p lfs.Pointer, content io.ReadCloser, objectError error) error {
|
2021-06-14 20:20:43 +03:00
|
|
|
if objectError != nil {
|
|
|
|
return objectError
|
2021-04-09 01:25:57 +03:00
|
|
|
}
|
|
|
|
|
2021-06-14 20:20:43 +03:00
|
|
|
defer content.Close()
|
2021-04-09 01:25:57 +03:00
|
|
|
|
2023-12-07 10:27:36 +03:00
|
|
|
_, err := git_model.NewLFSMetaObject(ctx, repo.ID, p)
|
2021-04-09 01:25:57 +03:00
|
|
|
if err != nil {
|
2022-03-10 13:09:48 +03:00
|
|
|
log.Error("Repo[%-v]: Error creating LFS meta object %-v: %v", repo, p, err)
|
2021-06-14 20:20:43 +03:00
|
|
|
return err
|
|
|
|
}
|
2021-04-09 01:25:57 +03:00
|
|
|
|
2021-06-14 20:20:43 +03:00
|
|
|
if err := contentStore.Put(p, content); err != nil {
|
2022-03-10 13:09:48 +03:00
|
|
|
log.Error("Repo[%-v]: Error storing content for LFS meta object %-v: %v", repo, p, err)
|
2023-01-09 06:50:54 +03:00
|
|
|
if _, err2 := git_model.RemoveLFSMetaObjectByOid(ctx, repo.ID, p.Oid); err2 != nil {
|
2022-03-10 13:09:48 +03:00
|
|
|
log.Error("Repo[%-v]: Error removing LFS meta object %-v: %v", repo, p, err2)
|
2021-04-09 01:25:57 +03:00
|
|
|
}
|
|
|
|
return err
|
|
|
|
}
|
2021-06-14 20:20:43 +03:00
|
|
|
return nil
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
|
|
|
return nil
|
|
|
|
default:
|
|
|
|
}
|
2021-04-09 01:25:57 +03:00
|
|
|
}
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2021-06-14 20:20:43 +03:00
|
|
|
var batch []lfs.Pointer
|
|
|
|
for pointerBlob := range pointerChan {
|
2023-01-09 06:50:54 +03:00
|
|
|
meta, err := git_model.GetLFSMetaObjectByOid(ctx, repo.ID, pointerBlob.Oid)
|
2022-06-12 18:51:54 +03:00
|
|
|
if err != nil && err != git_model.ErrLFSObjectNotExist {
|
2022-03-10 13:09:48 +03:00
|
|
|
log.Error("Repo[%-v]: Error querying LFS meta object %-v: %v", repo, pointerBlob.Pointer, err)
|
2021-06-14 20:20:43 +03:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
if meta != nil {
|
2022-03-10 13:09:48 +03:00
|
|
|
log.Trace("Repo[%-v]: Skipping unknown LFS meta object %-v", repo, pointerBlob.Pointer)
|
2021-06-14 20:20:43 +03:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2022-03-10 13:09:48 +03:00
|
|
|
log.Trace("Repo[%-v]: LFS object %-v not present in repository", repo, pointerBlob.Pointer)
|
2021-06-14 20:20:43 +03:00
|
|
|
|
|
|
|
exist, err := contentStore.Exists(pointerBlob.Pointer)
|
|
|
|
if err != nil {
|
2022-03-10 13:09:48 +03:00
|
|
|
log.Error("Repo[%-v]: Error checking if LFS object %-v exists: %v", repo, pointerBlob.Pointer, err)
|
2021-06-14 20:20:43 +03:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
if exist {
|
2022-03-10 13:09:48 +03:00
|
|
|
log.Trace("Repo[%-v]: LFS object %-v already present; creating meta object", repo, pointerBlob.Pointer)
|
2023-12-07 10:27:36 +03:00
|
|
|
_, err := git_model.NewLFSMetaObject(ctx, repo.ID, pointerBlob.Pointer)
|
2021-06-14 20:20:43 +03:00
|
|
|
if err != nil {
|
2022-03-10 13:09:48 +03:00
|
|
|
log.Error("Repo[%-v]: Error creating LFS meta object %-v: %v", repo, pointerBlob.Pointer, err)
|
2021-06-14 20:20:43 +03:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if setting.LFS.MaxFileSize > 0 && pointerBlob.Size > setting.LFS.MaxFileSize {
|
2022-03-10 13:09:48 +03:00
|
|
|
log.Info("Repo[%-v]: LFS object %-v download denied because of LFS_MAX_FILE_SIZE=%d < size %d", repo, pointerBlob.Pointer, setting.LFS.MaxFileSize, pointerBlob.Size)
|
2021-06-14 20:20:43 +03:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
batch = append(batch, pointerBlob.Pointer)
|
2021-11-20 12:34:05 +03:00
|
|
|
if len(batch) >= lfsClient.BatchSize() {
|
2021-06-14 20:20:43 +03:00
|
|
|
if err := downloadObjects(batch); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
batch = nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if len(batch) > 0 {
|
|
|
|
if err := downloadObjects(batch); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-04-09 01:25:57 +03:00
|
|
|
err, has := <-errChan
|
|
|
|
if has {
|
2022-03-10 13:09:48 +03:00
|
|
|
log.Error("Repo[%-v]: Error enumerating LFS objects for repository: %v", repo, err)
|
2021-04-09 01:25:57 +03:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
2022-03-31 15:30:40 +03:00
|
|
|
|
2024-01-26 09:18:19 +03:00
|
|
|
// shortRelease to reduce load memory, this struct can replace repo_model.Release
|
|
|
|
type shortRelease struct {
|
|
|
|
ID int64
|
|
|
|
TagName string
|
|
|
|
Sha1 string
|
|
|
|
IsTag bool
|
|
|
|
}
|
|
|
|
|
|
|
|
func (shortRelease) TableName() string {
|
|
|
|
return "release"
|
|
|
|
}
|
|
|
|
|
2022-03-31 15:30:40 +03:00
|
|
|
// pullMirrorReleaseSync is a pull-mirror specific tag<->release table
|
|
|
|
// synchronization which overwrites all Releases from the repository tags. This
|
|
|
|
// can be relied on since a pull-mirror is always identical to its
|
|
|
|
// upstream. Hence, after each sync we want the pull-mirror release set to be
|
|
|
|
// identical to the upstream tag set. This is much more efficient for
|
|
|
|
// repositories like https://github.com/vim/vim (with over 13000 tags).
|
2023-09-25 16:17:37 +03:00
|
|
|
func pullMirrorReleaseSync(ctx context.Context, repo *repo_model.Repository, gitRepo *git.Repository) error {
|
2022-03-31 15:30:40 +03:00
|
|
|
log.Trace("pullMirrorReleaseSync: rebuilding releases for pull-mirror Repo[%d:%s/%s]", repo.ID, repo.OwnerName, repo.Name)
|
|
|
|
tags, numTags, err := gitRepo.GetTagInfos(0, 0)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("unable to GetTagInfos in pull-mirror Repo[%d:%s/%s]: %w", repo.ID, repo.OwnerName, repo.Name, err)
|
|
|
|
}
|
2023-09-25 16:17:37 +03:00
|
|
|
err = db.WithTx(ctx, func(ctx context.Context) error {
|
2024-01-26 09:18:19 +03:00
|
|
|
dbReleases, err := db.Find[shortRelease](ctx, repo_model.FindReleasesOptions{
|
|
|
|
RepoID: repo.ID,
|
|
|
|
IncludeDrafts: true,
|
|
|
|
IncludeTags: true,
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("unable to FindReleases in pull-mirror Repo[%d:%s/%s]: %w", repo.ID, repo.OwnerName, repo.Name, err)
|
2022-03-31 15:30:40 +03:00
|
|
|
}
|
2024-01-26 09:18:19 +03:00
|
|
|
|
|
|
|
inserts, deletes, updates := calcSync(tags, dbReleases)
|
2022-03-31 15:30:40 +03:00
|
|
|
//
|
|
|
|
// make release set identical to upstream tags
|
|
|
|
//
|
2024-01-26 09:18:19 +03:00
|
|
|
for _, tag := range inserts {
|
2022-08-25 05:31:57 +03:00
|
|
|
release := repo_model.Release{
|
2022-03-31 15:30:40 +03:00
|
|
|
RepoID: repo.ID,
|
|
|
|
TagName: tag.Name,
|
|
|
|
LowerTagName: strings.ToLower(tag.Name),
|
|
|
|
Sha1: tag.Object.String(),
|
|
|
|
// NOTE: ignored, since NumCommits are unused
|
|
|
|
// for pull-mirrors (only relevant when
|
|
|
|
// displaying releases, IsTag: false)
|
|
|
|
NumCommits: -1,
|
|
|
|
CreatedUnix: timeutil.TimeStamp(tag.Tagger.When.Unix()),
|
|
|
|
IsTag: true,
|
|
|
|
}
|
|
|
|
if err := db.Insert(ctx, release); err != nil {
|
|
|
|
return fmt.Errorf("unable insert tag %s for pull-mirror Repo[%d:%s/%s]: %w", tag.Name, repo.ID, repo.OwnerName, repo.Name, err)
|
|
|
|
}
|
|
|
|
}
|
2024-01-26 09:18:19 +03:00
|
|
|
|
|
|
|
// only delete tags releases
|
|
|
|
if len(deletes) > 0 {
|
|
|
|
if _, err := db.GetEngine(ctx).Where("repo_id=?", repo.ID).
|
|
|
|
In("id", deletes).
|
|
|
|
Delete(&repo_model.Release{}); err != nil {
|
|
|
|
return fmt.Errorf("unable to delete tags for pull-mirror Repo[%d:%s/%s]: %w", repo.ID, repo.OwnerName, repo.Name, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, tag := range updates {
|
|
|
|
if _, err := db.GetEngine(ctx).Where("repo_id = ? AND lower_tag_name = ?", repo.ID, strings.ToLower(tag.Name)).
|
|
|
|
Cols("sha1").
|
|
|
|
Update(&repo_model.Release{
|
|
|
|
Sha1: tag.Object.String(),
|
|
|
|
}); err != nil {
|
|
|
|
return fmt.Errorf("unable to update tag %s for pull-mirror Repo[%d:%s/%s]: %w", tag.Name, repo.ID, repo.OwnerName, repo.Name, err)
|
|
|
|
}
|
|
|
|
}
|
2022-03-31 15:30:40 +03:00
|
|
|
return nil
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("unable to rebuild release table for pull-mirror Repo[%d:%s/%s]: %w", repo.ID, repo.OwnerName, repo.Name, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
log.Trace("pullMirrorReleaseSync: done rebuilding %d releases", numTags)
|
|
|
|
return nil
|
|
|
|
}
|
2024-01-26 09:18:19 +03:00
|
|
|
|
|
|
|
func calcSync(destTags []*git.Tag, dbTags []*shortRelease) ([]*git.Tag, []int64, []*git.Tag) {
|
|
|
|
destTagMap := make(map[string]*git.Tag)
|
|
|
|
for _, tag := range destTags {
|
|
|
|
destTagMap[tag.Name] = tag
|
|
|
|
}
|
|
|
|
dbTagMap := make(map[string]*shortRelease)
|
|
|
|
for _, rel := range dbTags {
|
|
|
|
dbTagMap[rel.TagName] = rel
|
|
|
|
}
|
|
|
|
|
|
|
|
inserted := make([]*git.Tag, 0, 10)
|
|
|
|
updated := make([]*git.Tag, 0, 10)
|
|
|
|
for _, tag := range destTags {
|
|
|
|
rel := dbTagMap[tag.Name]
|
|
|
|
if rel == nil {
|
|
|
|
inserted = append(inserted, tag)
|
|
|
|
} else if rel.Sha1 != tag.Object.String() {
|
|
|
|
updated = append(updated, tag)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
deleted := make([]int64, 0, 10)
|
|
|
|
for _, tag := range dbTags {
|
|
|
|
if destTagMap[tag.TagName] == nil && tag.IsTag {
|
|
|
|
deleted = append(deleted, tag.ID)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return inserted, deleted, updated
|
|
|
|
}
|