mirror of
https://codeberg.org/forgejo/forgejo.git
synced 2024-12-27 22:23:50 +03:00
Code Formats, Nits & Unused Func/Var deletions (#15286)
* _ to unused func options * rm useless brakets * rm trifial non used models functions * rm dead code * rm dead global vars * fix routers/api/v1/repo/issue.go * dont overload import module
This commit is contained in:
parent
0991f9aa42
commit
9c4601bdf8
33 changed files with 41 additions and 98 deletions
|
@ -512,7 +512,7 @@ func runDeleteUser(c *cli.Context) error {
|
|||
return models.DeleteUser(user)
|
||||
}
|
||||
|
||||
func runRepoSyncReleases(c *cli.Context) error {
|
||||
func runRepoSyncReleases(_ *cli.Context) error {
|
||||
if err := initDB(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -578,14 +578,14 @@ func getReleaseCount(id int64) (int64, error) {
|
|||
)
|
||||
}
|
||||
|
||||
func runRegenerateHooks(c *cli.Context) error {
|
||||
func runRegenerateHooks(_ *cli.Context) error {
|
||||
if err := initDB(); err != nil {
|
||||
return err
|
||||
}
|
||||
return repo_module.SyncRepositoryHooks(graceful.GetManager().ShutdownContext())
|
||||
}
|
||||
|
||||
func runRegenerateKeys(c *cli.Context) error {
|
||||
func runRegenerateKeys(_ *cli.Context) error {
|
||||
if err := initDB(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -166,7 +166,7 @@ Gitea or set your environment appropriately.`, "")
|
|||
}
|
||||
|
||||
// the environment setted on serv command
|
||||
isWiki := (os.Getenv(models.EnvRepoIsWiki) == "true")
|
||||
isWiki := os.Getenv(models.EnvRepoIsWiki) == "true"
|
||||
username := os.Getenv(models.EnvRepoUsername)
|
||||
reponame := os.Getenv(models.EnvRepoName)
|
||||
userID, _ := strconv.ParseInt(os.Getenv(models.EnvPusherID), 10, 64)
|
||||
|
@ -322,7 +322,7 @@ Gitea or set your environment appropriately.`, "")
|
|||
|
||||
// the environment setted on serv command
|
||||
repoUser := os.Getenv(models.EnvRepoUsername)
|
||||
isWiki := (os.Getenv(models.EnvRepoIsWiki) == "true")
|
||||
isWiki := os.Getenv(models.EnvRepoIsWiki) == "true"
|
||||
repoName := os.Getenv(models.EnvRepoName)
|
||||
pusherID, _ := strconv.ParseInt(os.Getenv(models.EnvPusherID), 10, 64)
|
||||
pusherName := os.Getenv(models.EnvPusherName)
|
||||
|
|
|
@ -260,12 +260,6 @@ func (protectBranch *ProtectedBranch) IsProtectedFile(patterns []glob.Glob, path
|
|||
return r
|
||||
}
|
||||
|
||||
// GetProtectedBranchByRepoID getting protected branch by repo ID
|
||||
func GetProtectedBranchByRepoID(repoID int64) ([]*ProtectedBranch, error) {
|
||||
protectedBranches := make([]*ProtectedBranch, 0)
|
||||
return protectedBranches, x.Where("repo_id = ?", repoID).Desc("updated_unix").Find(&protectedBranches)
|
||||
}
|
||||
|
||||
// GetProtectedBranchBy getting protected branch by ID/Name
|
||||
func GetProtectedBranchBy(repoID int64, branchName string) (*ProtectedBranch, error) {
|
||||
return getProtectedBranchBy(x, repoID, branchName)
|
||||
|
@ -283,19 +277,6 @@ func getProtectedBranchBy(e Engine, repoID int64, branchName string) (*Protected
|
|||
return rel, nil
|
||||
}
|
||||
|
||||
// GetProtectedBranchByID getting protected branch by ID
|
||||
func GetProtectedBranchByID(id int64) (*ProtectedBranch, error) {
|
||||
rel := &ProtectedBranch{}
|
||||
has, err := x.ID(id).Get(rel)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !has {
|
||||
return nil, nil
|
||||
}
|
||||
return rel, nil
|
||||
}
|
||||
|
||||
// WhitelistOptions represent all sorts of whitelists used for protected branches
|
||||
type WhitelistOptions struct {
|
||||
UserIDs []int64
|
||||
|
|
|
@ -510,19 +510,6 @@ func GetLabelIDsInOrgByNames(orgID int64, labelNames []string) ([]int64, error)
|
|||
Find(&labelIDs)
|
||||
}
|
||||
|
||||
// GetLabelIDsInOrgsByNames returns a list of labelIDs by names in one of the given
|
||||
// organization.
|
||||
// it silently ignores label names that do not belong to the organization.
|
||||
func GetLabelIDsInOrgsByNames(orgIDs []int64, labelNames []string) ([]int64, error) {
|
||||
labelIDs := make([]int64, 0, len(labelNames))
|
||||
return labelIDs, x.Table("label").
|
||||
In("org_id", orgIDs).
|
||||
In("name", labelNames).
|
||||
Asc("name").
|
||||
Cols("id").
|
||||
Find(&labelIDs)
|
||||
}
|
||||
|
||||
// GetLabelInOrgByID returns a label by ID in given organization.
|
||||
func GetLabelInOrgByID(orgID, labelID int64) (*Label, error) {
|
||||
return getLabelInOrgByID(x, orgID, labelID)
|
||||
|
|
|
@ -131,11 +131,11 @@ func (repo *Repository) CountLFSMetaObjects() (int64, error) {
|
|||
func LFSObjectAccessible(user *User, oid string) (bool, error) {
|
||||
if user.IsAdmin {
|
||||
count, err := x.Count(&LFSMetaObject{Pointer: lfs.Pointer{Oid: oid}})
|
||||
return (count > 0), err
|
||||
return count > 0, err
|
||||
}
|
||||
cond := accessibleRepositoryCondition(user)
|
||||
count, err := x.Where(cond).Join("INNER", "repository", "`lfs_meta_object`.repository_id = `repository`.id").Count(&LFSMetaObject{Pointer: lfs.Pointer{Oid: oid}})
|
||||
return (count > 0), err
|
||||
return count > 0, err
|
||||
}
|
||||
|
||||
// LFSAutoAssociate auto associates accessible LFSMetaObjects
|
||||
|
|
|
@ -75,7 +75,7 @@ func TestGetRepositoryCount(t *testing.T) {
|
|||
assert.NoError(t, err2)
|
||||
assert.NoError(t, err3)
|
||||
assert.Equal(t, int64(3), count)
|
||||
assert.Equal(t, (privateCount + publicCount), count)
|
||||
assert.Equal(t, privateCount+publicCount, count)
|
||||
}
|
||||
|
||||
func TestGetPublicRepositoryCount(t *testing.T) {
|
||||
|
|
|
@ -76,9 +76,6 @@ const (
|
|||
)
|
||||
|
||||
var (
|
||||
// ErrUserNotKeyOwner user does not own this key error
|
||||
ErrUserNotKeyOwner = errors.New("User does not own this public key")
|
||||
|
||||
// ErrEmailNotExist e-mail does not exist error
|
||||
ErrEmailNotExist = errors.New("E-mail does not exist")
|
||||
|
||||
|
|
|
@ -65,7 +65,7 @@ func getUserHeatmapData(user *User, team *Team, doer *User) ([]*UserHeatmapData,
|
|||
Select(groupBy+" AS timestamp, count(user_id) as contributions").
|
||||
Table("action").
|
||||
Where(cond).
|
||||
And("created_unix > ?", (timeutil.TimeStampNow() - 31536000)).
|
||||
And("created_unix > ?", timeutil.TimeStampNow()-31536000).
|
||||
GroupBy(groupByName).
|
||||
OrderBy("timestamp").
|
||||
Find(&hdata)
|
||||
|
|
|
@ -35,6 +35,7 @@ func GetUserOpenIDs(uid int64) ([]*UserOpenID, error) {
|
|||
return openids, nil
|
||||
}
|
||||
|
||||
// isOpenIDUsed returns true if the openid has been used.
|
||||
func isOpenIDUsed(e Engine, uri string) (bool, error) {
|
||||
if len(uri) == 0 {
|
||||
return true, nil
|
||||
|
@ -43,11 +44,6 @@ func isOpenIDUsed(e Engine, uri string) (bool, error) {
|
|||
return e.Get(&UserOpenID{URI: uri})
|
||||
}
|
||||
|
||||
// IsOpenIDUsed returns true if the openid has been used.
|
||||
func IsOpenIDUsed(openid string) (bool, error) {
|
||||
return isOpenIDUsed(x, openid)
|
||||
}
|
||||
|
||||
// NOTE: make sure openid.URI is normalized already
|
||||
func addUserOpenID(e Engine, openid *UserOpenID) error {
|
||||
used, err := isOpenIDUsed(e, openid.URI)
|
||||
|
|
|
@ -122,9 +122,7 @@ func (s *linkifyParser) Parse(parent ast.Node, block text.Reader, pc parser.Cont
|
|||
}
|
||||
}
|
||||
}
|
||||
if m == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if consumes != 0 {
|
||||
s := segment.WithStop(segment.Start + 1)
|
||||
ast.MergeOrAppendTextSegment(parent, s)
|
||||
|
|
|
@ -8,7 +8,6 @@ package process
|
|||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os/exec"
|
||||
|
@ -22,10 +21,8 @@ import (
|
|||
// then we delete the singleton.
|
||||
|
||||
var (
|
||||
// ErrExecTimeout represent a timeout error
|
||||
ErrExecTimeout = errors.New("Process execution timeout")
|
||||
manager *Manager
|
||||
managerInit sync.Once
|
||||
manager *Manager
|
||||
managerInit sync.Once
|
||||
|
||||
// DefaultContext is the default context to run processing commands in
|
||||
DefaultContext = context.Background()
|
||||
|
|
|
@ -23,7 +23,7 @@ type UniqueByteFIFO interface {
|
|||
Has(data []byte) (bool, error)
|
||||
}
|
||||
|
||||
var _ (ByteFIFO) = &DummyByteFIFO{}
|
||||
var _ ByteFIFO = &DummyByteFIFO{}
|
||||
|
||||
// DummyByteFIFO represents a dummy fifo
|
||||
type DummyByteFIFO struct{}
|
||||
|
@ -48,7 +48,7 @@ func (*DummyByteFIFO) Len() int64 {
|
|||
return 0
|
||||
}
|
||||
|
||||
var _ (UniqueByteFIFO) = &DummyUniqueByteFIFO{}
|
||||
var _ UniqueByteFIFO = &DummyUniqueByteFIFO{}
|
||||
|
||||
// DummyUniqueByteFIFO represents a dummy unique fifo
|
||||
type DummyUniqueByteFIFO struct {
|
||||
|
|
|
@ -50,7 +50,7 @@ func toConfig(exemplar, cfg interface{}) (interface{}, error) {
|
|||
var err error
|
||||
|
||||
configBytes, err = json.Marshal(cfg)
|
||||
ok = (err == nil)
|
||||
ok = err == nil
|
||||
}
|
||||
if !ok {
|
||||
// no ... we've tried hard enough at this point - throw an error!
|
||||
|
|
|
@ -21,7 +21,7 @@ type ByteFIFOQueueConfiguration struct {
|
|||
Name string
|
||||
}
|
||||
|
||||
var _ (Queue) = &ByteFIFOQueue{}
|
||||
var _ Queue = &ByteFIFOQueue{}
|
||||
|
||||
// ByteFIFOQueue is a Queue formed from a ByteFIFO and WorkerPool
|
||||
type ByteFIFOQueue struct {
|
||||
|
@ -196,7 +196,7 @@ func (q *ByteFIFOQueue) IsTerminated() <-chan struct{} {
|
|||
return q.terminated
|
||||
}
|
||||
|
||||
var _ (UniqueQueue) = &ByteFIFOUniqueQueue{}
|
||||
var _ UniqueQueue = &ByteFIFOUniqueQueue{}
|
||||
|
||||
// ByteFIFOUniqueQueue represents a UniqueQueue formed from a UniqueByteFifo
|
||||
type ByteFIFOUniqueQueue struct {
|
||||
|
|
|
@ -55,7 +55,7 @@ func NewLevelQueue(handle HandlerFunc, cfg, exemplar interface{}) (Queue, error)
|
|||
return queue, nil
|
||||
}
|
||||
|
||||
var _ (ByteFIFO) = &LevelQueueByteFIFO{}
|
||||
var _ ByteFIFO = &LevelQueueByteFIFO{}
|
||||
|
||||
// LevelQueueByteFIFO represents a ByteFIFO formed from a LevelQueue
|
||||
type LevelQueueByteFIFO struct {
|
||||
|
|
|
@ -69,7 +69,7 @@ type redisClient interface {
|
|||
Close() error
|
||||
}
|
||||
|
||||
var _ (ByteFIFO) = &RedisByteFIFO{}
|
||||
var _ ByteFIFO = &RedisByteFIFO{}
|
||||
|
||||
// RedisByteFIFO represents a ByteFIFO formed from a redisClient
|
||||
type RedisByteFIFO struct {
|
||||
|
|
|
@ -59,7 +59,7 @@ func NewLevelUniqueQueue(handle HandlerFunc, cfg, exemplar interface{}) (Queue,
|
|||
return queue, nil
|
||||
}
|
||||
|
||||
var _ (UniqueByteFIFO) = &LevelUniqueQueueByteFIFO{}
|
||||
var _ UniqueByteFIFO = &LevelUniqueQueueByteFIFO{}
|
||||
|
||||
// LevelUniqueQueueByteFIFO represents a ByteFIFO formed from a LevelUniqueQueue
|
||||
type LevelUniqueQueueByteFIFO struct {
|
||||
|
|
|
@ -62,7 +62,7 @@ func NewRedisUniqueQueue(handle HandlerFunc, cfg, exemplar interface{}) (Queue,
|
|||
return queue, nil
|
||||
}
|
||||
|
||||
var _ (UniqueByteFIFO) = &RedisUniqueByteFIFO{}
|
||||
var _ UniqueByteFIFO = &RedisUniqueByteFIFO{}
|
||||
|
||||
// RedisUniqueByteFIFO represents a UniqueByteFIFO formed from a redisClient
|
||||
type RedisUniqueByteFIFO struct {
|
||||
|
|
|
@ -296,7 +296,7 @@ func convertFullHTMLReferencesToShortRefs(re *regexp.Regexp, contentBytes *[]byt
|
|||
|
||||
// our new section has length endPos - match[3]
|
||||
// our old section has length match[9] - match[3]
|
||||
(*contentBytes) = (*contentBytes)[:len((*contentBytes))-match[9]+endPos]
|
||||
*contentBytes = (*contentBytes)[:len(*contentBytes)-match[9]+endPos]
|
||||
pos = endPos
|
||||
}
|
||||
}
|
||||
|
|
|
@ -201,7 +201,7 @@ func UpdateIssuesCommit(doer *models.User, repo *models.Repository, commits []*r
|
|||
continue
|
||||
}
|
||||
}
|
||||
close := (ref.Action == references.XRefActionCloses)
|
||||
close := ref.Action == references.XRefActionCloses
|
||||
if close && len(ref.TimeLog) > 0 {
|
||||
if err := issueAddTime(refIssue, doer, c.Timestamp, ref.TimeLog); err != nil {
|
||||
return err
|
||||
|
|
|
@ -318,7 +318,6 @@ var (
|
|||
LogRootPath string
|
||||
DisableRouterLog bool
|
||||
RouterLogLevel log.Level
|
||||
RouterLogMode string
|
||||
EnableAccessLog bool
|
||||
AccessLogTemplate string
|
||||
EnableXORMLog bool
|
||||
|
@ -408,10 +407,6 @@ var (
|
|||
IsWindows bool
|
||||
HasRobotsTxt bool
|
||||
InternalToken string // internal access token
|
||||
|
||||
// UILocation is the location on the UI, so that we can display the time on UI.
|
||||
// Currently only show the default time.Local, it could be added to app.ini after UI is ready
|
||||
UILocation = time.Local
|
||||
)
|
||||
|
||||
// IsProd if it's a production mode
|
||||
|
|
|
@ -50,7 +50,7 @@ func toConfig(exemplar, cfg interface{}) (interface{}, error) {
|
|||
var err error
|
||||
|
||||
configBytes, err = json.Marshal(cfg)
|
||||
ok = (err == nil)
|
||||
ok = err == nil
|
||||
}
|
||||
if !ok {
|
||||
// no ... we've tried hard enough at this point - throw an error!
|
||||
|
|
|
@ -19,8 +19,6 @@ import (
|
|||
var (
|
||||
// ErrURLNotSupported represents url is not supported
|
||||
ErrURLNotSupported = errors.New("url method not supported")
|
||||
// ErrIterateObjectsNotSupported represents IterateObjects not supported
|
||||
ErrIterateObjectsNotSupported = errors.New("iterateObjects method not supported")
|
||||
)
|
||||
|
||||
// ErrInvalidConfiguration is called when there is invalid configuration for a storage
|
||||
|
|
|
@ -660,7 +660,7 @@ func SearchTeam(ctx *context.APIContext) {
|
|||
UserID: ctx.User.ID,
|
||||
Keyword: strings.TrimSpace(ctx.Query("q")),
|
||||
OrgID: ctx.Org.Organization.ID,
|
||||
IncludeDesc: (ctx.Query("include_desc") == "" || ctx.QueryBool("include_desc")),
|
||||
IncludeDesc: ctx.Query("include_desc") == "" || ctx.QueryBool("include_desc"),
|
||||
ListOptions: listOptions,
|
||||
}
|
||||
|
||||
|
|
|
@ -141,7 +141,6 @@ func SearchIssues(ctx *context.APIContext) {
|
|||
keyword = ""
|
||||
}
|
||||
var issueIDs []int64
|
||||
var labelIDs []int64
|
||||
if len(keyword) > 0 && len(repoIDs) > 0 {
|
||||
if issueIDs, err = issue_indexer.SearchIssuesByKeyword(repoIDs, keyword); err != nil {
|
||||
ctx.Error(http.StatusInternalServerError, "SearchIssuesByKeyword", err)
|
||||
|
@ -176,7 +175,7 @@ func SearchIssues(ctx *context.APIContext) {
|
|||
|
||||
// Only fetch the issues if we either don't have a keyword or the search returned issues
|
||||
// This would otherwise return all issues if no issues were found by the search.
|
||||
if len(keyword) == 0 || len(issueIDs) > 0 || len(labelIDs) > 0 {
|
||||
if len(keyword) == 0 || len(issueIDs) > 0 || len(includedLabelNames) > 0 {
|
||||
issuesOpt := &models.IssuesOptions{
|
||||
ListOptions: models.ListOptions{
|
||||
Page: ctx.QueryInt("page"),
|
||||
|
@ -675,7 +674,7 @@ func EditIssue(ctx *context.APIContext) {
|
|||
}
|
||||
}
|
||||
if form.State != nil {
|
||||
issue.IsClosed = (api.StateClosed == api.StateType(*form.State))
|
||||
issue.IsClosed = api.StateClosed == api.StateType(*form.State)
|
||||
}
|
||||
statusChangeComment, titleChanged, err := models.UpdateIssueByAPI(issue, ctx.User)
|
||||
if err != nil {
|
||||
|
|
|
@ -580,7 +580,7 @@ func EditPullRequest(ctx *context.APIContext) {
|
|||
}
|
||||
|
||||
if form.State != nil {
|
||||
issue.IsClosed = (api.StateClosed == api.StateType(*form.State))
|
||||
issue.IsClosed = api.StateClosed == api.StateType(*form.State)
|
||||
}
|
||||
statusChangeComment, titleChanged, err := models.UpdateIssueByAPI(issue, ctx.User)
|
||||
if err != nil {
|
||||
|
|
|
@ -32,10 +32,10 @@ func Events(ctx *context.Context) {
|
|||
|
||||
if !ctx.IsSigned {
|
||||
// Return unauthorized status event
|
||||
event := (&eventsource.Event{
|
||||
event := &eventsource.Event{
|
||||
Name: "close",
|
||||
Data: "unauthorized",
|
||||
})
|
||||
}
|
||||
_, _ = event.WriteTo(ctx)
|
||||
ctx.Resp.Flush()
|
||||
return
|
||||
|
@ -137,10 +137,10 @@ loop:
|
|||
break loop
|
||||
}
|
||||
// Replace the event - we don't want to expose the session ID to the user
|
||||
event = (&eventsource.Event{
|
||||
event = &eventsource.Event{
|
||||
Name: "logout",
|
||||
Data: "elsewhere",
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
_, err := event.WriteTo(ctx.Resp)
|
||||
|
|
|
@ -193,7 +193,7 @@ func NewTeamPost(ctx *context.Context) {
|
|||
ctx.Data["PageIsOrgTeams"] = true
|
||||
ctx.Data["PageIsOrgTeamsNew"] = true
|
||||
ctx.Data["Units"] = models.Units
|
||||
var includesAllRepositories = (form.RepoAccess == "all")
|
||||
var includesAllRepositories = form.RepoAccess == "all"
|
||||
|
||||
t := &models.Team{
|
||||
OrgID: ctx.Org.Organization.ID,
|
||||
|
@ -286,7 +286,7 @@ func EditTeamPost(ctx *context.Context) {
|
|||
|
||||
isAuthChanged := false
|
||||
isIncludeAllChanged := false
|
||||
var includesAllRepositories = (form.RepoAccess == "all")
|
||||
var includesAllRepositories = form.RepoAccess == "all"
|
||||
if !t.IsOwnerTeam() {
|
||||
// Validate permission level.
|
||||
auth := models.ParseAccessMode(form.Permission)
|
||||
|
|
|
@ -91,7 +91,7 @@ func httpBase(ctx *context.Context) (h *serviceHandler) {
|
|||
strings.HasSuffix(ctx.Req.URL.Path, "git-upload-archive") {
|
||||
isPull = true
|
||||
} else {
|
||||
isPull = (ctx.Req.Method == "GET")
|
||||
isPull = ctx.Req.Method == "GET"
|
||||
}
|
||||
|
||||
var accessMode models.AccessMode
|
||||
|
|
|
@ -52,8 +52,6 @@ const (
|
|||
)
|
||||
|
||||
var (
|
||||
// ErrTooManyFiles upload too many files
|
||||
ErrTooManyFiles = errors.New("Maximum number of files to upload exceeded")
|
||||
// IssueTemplateCandidates issue templates
|
||||
IssueTemplateCandidates = []string{
|
||||
"ISSUE_TEMPLATE.md",
|
||||
|
|
|
@ -130,16 +130,16 @@ func SettingsProtectedBranch(c *context.Context) {
|
|||
c.Data["merge_whitelist_users"] = strings.Join(base.Int64sToStrings(protectBranch.MergeWhitelistUserIDs), ",")
|
||||
c.Data["approvals_whitelist_users"] = strings.Join(base.Int64sToStrings(protectBranch.ApprovalsWhitelistUserIDs), ",")
|
||||
contexts, _ := models.FindRepoRecentCommitStatusContexts(c.Repo.Repository.ID, 7*24*time.Hour) // Find last week status check contexts
|
||||
for _, context := range protectBranch.StatusCheckContexts {
|
||||
for _, ctx := range protectBranch.StatusCheckContexts {
|
||||
var found bool
|
||||
for _, ctx := range contexts {
|
||||
if ctx == context {
|
||||
for i := range contexts {
|
||||
if contexts[i] == ctx {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
contexts = append(contexts, context)
|
||||
contexts = append(contexts, ctx)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -770,9 +770,6 @@ func getActiveTeamOrOrgRepoIds(ctxUser *models.User, team *models.Team, unitType
|
|||
|
||||
if team != nil {
|
||||
env = ctxUser.AccessibleTeamReposEnv(team)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("AccessibleTeamReposEnv: %v", err)
|
||||
}
|
||||
} else {
|
||||
env, err = ctxUser.AccessibleReposEnv(ctxUser.ID)
|
||||
if err != nil {
|
||||
|
|
|
@ -98,7 +98,7 @@ func Merge(pr *models.PullRequest, doer *models.User, baseGitRepo *git.Repositor
|
|||
if err = ref.Issue.LoadRepo(); err != nil {
|
||||
return err
|
||||
}
|
||||
close := (ref.RefAction == references.XRefActionCloses)
|
||||
close := ref.RefAction == references.XRefActionCloses
|
||||
if close != ref.Issue.IsClosed {
|
||||
if err = issue_service.ChangeStatus(ref.Issue, doer, close); err != nil {
|
||||
return err
|
||||
|
|
Loading…
Reference in a new issue