diff --git a/integrations/testlogger.go b/integrations/testlogger.go
index 246f6fe7d0..70efa95cce 100644
--- a/integrations/testlogger.go
+++ b/integrations/testlogger.go
@@ -181,6 +181,11 @@ func (log *TestLogger) Init(config string) error {
 	return nil
 }
 
+// Content returns the content accumulated in the content provider
+func (log *TestLogger) Content() (string, error) {
+	return "", fmt.Errorf("not supported")
+}
+
 // Flush when log should be flushed
 func (log *TestLogger) Flush() {
 }
diff --git a/models/migrations/testlogger_test.go b/models/migrations/testlogger_test.go
index 01d6b7a0f7..c087e311c2 100644
--- a/models/migrations/testlogger_test.go
+++ b/models/migrations/testlogger_test.go
@@ -166,6 +166,11 @@ func (log *TestLogger) Init(config string) error {
 	return nil
 }
 
+// Content returns the content accumulated in the content provider
+func (log *TestLogger) Content() (string, error) {
+	return "", fmt.Errorf("not supported")
+}
+
 // Flush when log should be flushed
 func (log *TestLogger) Flush() {
 }
diff --git a/modules/log/buffer.go b/modules/log/buffer.go
new file mode 100644
index 0000000000..9535522bb6
--- /dev/null
+++ b/modules/log/buffer.go
@@ -0,0 +1,72 @@
+// Copyright 2022 The Gitea Authors. All rights reserved.
+// Use of this source code is governed by a MIT-style
+// license that can be found in the LICENSE file.
+
+package log
+
+import (
+	"bytes"
+	"sync"
+)
+
+type bufferWriteCloser struct {
+	mu     sync.Mutex
+	buffer bytes.Buffer
+}
+
+func (b *bufferWriteCloser) Write(p []byte) (int, error) {
+	b.mu.Lock()
+	defer b.mu.Unlock()
+	return b.buffer.Write(p)
+}
+
+func (b *bufferWriteCloser) Close() error {
+	return nil
+}
+
+func (b *bufferWriteCloser) String() string {
+	b.mu.Lock()
+	defer b.mu.Unlock()
+	return b.buffer.String()
+}
+
+// BufferLogger implements LoggerProvider and writes messages in a buffer.
+type BufferLogger struct {
+	WriterLogger
+}
+
+// NewBufferLogger create BufferLogger returning as LoggerProvider.
+func NewBufferLogger() LoggerProvider {
+	log := &BufferLogger{}
+	log.NewWriterLogger(&bufferWriteCloser{})
+	return log
+}
+
+// Init inits connection writer
+func (log *BufferLogger) Init(string) error {
+	log.NewWriterLogger(log.out)
+	return nil
+}
+
+// Content returns the content accumulated in the content provider
+func (log *BufferLogger) Content() (string, error) {
+	return log.out.(*bufferWriteCloser).String(), nil
+}
+
+// Flush when log should be flushed
+func (log *BufferLogger) Flush() {
+}
+
+// ReleaseReopen does nothing
+func (log *BufferLogger) ReleaseReopen() error {
+	return nil
+}
+
+// GetName returns the default name for this implementation
+func (log *BufferLogger) GetName() string {
+	return "buffer"
+}
+
+func init() {
+	Register("buffer", NewBufferLogger)
+}
diff --git a/modules/log/buffer_test.go b/modules/log/buffer_test.go
new file mode 100644
index 0000000000..8ec74bc78d
--- /dev/null
+++ b/modules/log/buffer_test.go
@@ -0,0 +1,64 @@
+// Copyright 2022 The Gitea Authors. All rights reserved.
+// Use of this source code is governed by a MIT-style
+// license that can be found in the LICENSE file.
+
+package log
+
+import (
+	"fmt"
+	"strings"
+	"testing"
+	"time"
+
+	"github.com/stretchr/testify/assert"
+)
+
+func TestBufferLogger(t *testing.T) {
+	logger := NewBufferLogger()
+	bufferLogger := logger.(*BufferLogger)
+	assert.NotNil(t, bufferLogger)
+
+	err := logger.Init("")
+	assert.NoError(t, err)
+
+	location, _ := time.LoadLocation("EST")
+	date := time.Date(2019, time.January, 13, 22, 3, 30, 15, location)
+
+	msg := "TEST MSG"
+	event := Event{
+		level:    INFO,
+		msg:      msg,
+		caller:   "CALLER",
+		filename: "FULL/FILENAME",
+		line:     1,
+		time:     date,
+	}
+	logger.LogEvent(&event)
+	content, err := bufferLogger.Content()
+	assert.NoError(t, err)
+	assert.Contains(t, content, msg)
+	logger.Close()
+}
+
+func TestBufferLoggerContent(t *testing.T) {
+	level := INFO
+	logger := NewLogger(0, "console", "console", fmt.Sprintf(`{"level":"%s"}`, level.String()))
+
+	logger.SetLogger("buffer", "buffer", "{}")
+	defer logger.DelLogger("buffer")
+
+	msg := "A UNIQUE MESSAGE"
+	Error(msg)
+
+	found := false
+	for i := 0; i < 30000; i++ {
+		content, err := logger.GetLoggerProviderContent("buffer")
+		assert.NoError(t, err)
+		if strings.Contains(content, msg) {
+			found = true
+			break
+		}
+		time.Sleep(1 * time.Millisecond)
+	}
+	assert.True(t, found)
+}
diff --git a/modules/log/conn.go b/modules/log/conn.go
index 3e32c81d7f..bd1d76d071 100644
--- a/modules/log/conn.go
+++ b/modules/log/conn.go
@@ -119,6 +119,11 @@ func (log *ConnLogger) Init(jsonconfig string) error {
 	return nil
 }
 
+// Content returns the content accumulated in the content provider
+func (log *ConnLogger) Content() (string, error) {
+	return "", fmt.Errorf("not supported")
+}
+
 // Flush does nothing for this implementation
 func (log *ConnLogger) Flush() {
 }
diff --git a/modules/log/console.go b/modules/log/console.go
index 7ecdd5c3b8..8c78add474 100644
--- a/modules/log/console.go
+++ b/modules/log/console.go
@@ -66,6 +66,11 @@ func (log *ConsoleLogger) Init(config string) error {
 	return nil
 }
 
+// Content returns the content accumulated in the content provider
+func (log *ConsoleLogger) Content() (string, error) {
+	return "", fmt.Errorf("not supported")
+}
+
 // Flush when log should be flushed
 func (log *ConsoleLogger) Flush() {
 }
diff --git a/modules/log/event.go b/modules/log/event.go
index 00a66c306a..b20dac17c7 100644
--- a/modules/log/event.go
+++ b/modules/log/event.go
@@ -216,6 +216,12 @@ func (m *MultiChannelledLog) GetEventLogger(name string) EventLogger {
 	return m.loggers[name]
 }
 
+// GetEventProvider returns a sub logger provider content from this MultiChannelledLog
+func (m *MultiChannelledLog) GetLoggerProviderContent(name string) (string, error) {
+	channelledLogger := m.GetEventLogger(name).(*ChannelledLog)
+	return channelledLogger.loggerProvider.Content()
+}
+
 // GetEventLoggerNames returns a list of names
 func (m *MultiChannelledLog) GetEventLoggerNames() []string {
 	m.rwmutex.RLock()
diff --git a/modules/log/file.go b/modules/log/file.go
index 7dc77c0942..d9a529e67f 100644
--- a/modules/log/file.go
+++ b/modules/log/file.go
@@ -243,6 +243,15 @@ func (log *FileLogger) deleteOldLog() {
 	})
 }
 
+// Content returns the content accumulated in the content provider
+func (log *FileLogger) Content() (string, error) {
+	b, err := os.ReadFile(log.Filename)
+	if err != nil {
+		return "", err
+	}
+	return string(b), nil
+}
+
 // Flush flush file logger.
 // there are no buffering messages in file logger in memory.
 // flush file means sync file from disk.
diff --git a/modules/log/provider.go b/modules/log/provider.go
index b31bf392e6..e03e6f74a7 100644
--- a/modules/log/provider.go
+++ b/modules/log/provider.go
@@ -7,6 +7,7 @@ package log
 // LoggerProvider represents behaviors of a logger provider.
 type LoggerProvider interface {
 	Init(config string) error
+	Content() (string, error)
 	EventLogger
 }
 
diff --git a/modules/log/smtp.go b/modules/log/smtp.go
index 60dced3706..c5163292e6 100644
--- a/modules/log/smtp.go
+++ b/modules/log/smtp.go
@@ -95,6 +95,11 @@ func (log *SMTPLogger) sendMail(p []byte) (int, error) {
 	)
 }
 
+// Content returns the content accumulated in the content provider
+func (log *SMTPLogger) Content() (string, error) {
+	return "", fmt.Errorf("not supported")
+}
+
 // Flush when log should be flushed
 func (log *SMTPLogger) Flush() {
 }
diff --git a/services/migrations/gitea_uploader.go b/services/migrations/gitea_uploader.go
index 34f507e83c..21c2dc8f8e 100644
--- a/services/migrations/gitea_uploader.go
+++ b/services/migrations/gitea_uploader.go
@@ -484,19 +484,9 @@ func (g *GiteaLocalUploader) CreatePullRequests(prs ...*base.PullRequest) error
 	return nil
 }
 
-func (g *GiteaLocalUploader) newPullRequest(pr *base.PullRequest) (*models.PullRequest, error) {
-	var labels []*models.Label
-	for _, label := range pr.Labels {
-		lb, ok := g.labels[label.Name]
-		if ok {
-			labels = append(labels, lb)
-		}
-	}
-
-	milestoneID := g.milestones[pr.Milestone]
-
+func (g *GiteaLocalUploader) updateGitForPullRequest(pr *base.PullRequest) (head string, err error) {
 	// download patch file
-	err := func() error {
+	err = func() error {
 		if pr.PatchURL == "" {
 			return nil
 		}
@@ -519,25 +509,25 @@ func (g *GiteaLocalUploader) newPullRequest(pr *base.PullRequest) (*models.PullR
 		return err
 	}()
 	if err != nil {
-		return nil, err
+		return "", err
 	}
 
 	// set head information
 	pullHead := filepath.Join(g.repo.RepoPath(), "refs", "pull", fmt.Sprintf("%d", pr.Number))
 	if err := os.MkdirAll(pullHead, os.ModePerm); err != nil {
-		return nil, err
+		return "", err
 	}
 	p, err := os.Create(filepath.Join(pullHead, "head"))
 	if err != nil {
-		return nil, err
+		return "", err
 	}
 	_, err = p.WriteString(pr.Head.SHA)
 	p.Close()
 	if err != nil {
-		return nil, err
+		return "", err
 	}
 
-	head := "unknown repository"
+	head = "unknown repository"
 	if pr.IsForkPullRequest() && pr.State != "closed" {
 		if pr.Head.OwnerName != "" {
 			remote := pr.Head.OwnerName
@@ -560,16 +550,16 @@ func (g *GiteaLocalUploader) newPullRequest(pr *base.PullRequest) (*models.PullR
 				} else {
 					headBranch := filepath.Join(g.repo.RepoPath(), "refs", "heads", pr.Head.OwnerName, pr.Head.Ref)
 					if err := os.MkdirAll(filepath.Dir(headBranch), os.ModePerm); err != nil {
-						return nil, err
+						return "", err
 					}
 					b, err := os.Create(headBranch)
 					if err != nil {
-						return nil, err
+						return "", err
 					}
 					_, err = b.WriteString(pr.Head.SHA)
 					b.Close()
 					if err != nil {
-						return nil, err
+						return "", err
 					}
 					head = pr.Head.OwnerName + "/" + pr.Head.Ref
 				}
@@ -595,6 +585,25 @@ func (g *GiteaLocalUploader) newPullRequest(pr *base.PullRequest) (*models.PullR
 		}
 	}
 
+	return head, nil
+}
+
+func (g *GiteaLocalUploader) newPullRequest(pr *base.PullRequest) (*models.PullRequest, error) {
+	var labels []*models.Label
+	for _, label := range pr.Labels {
+		lb, ok := g.labels[label.Name]
+		if ok {
+			labels = append(labels, lb)
+		}
+	}
+
+	milestoneID := g.milestones[pr.Milestone]
+
+	head, err := g.updateGitForPullRequest(pr)
+	if err != nil {
+		return nil, fmt.Errorf("updateGitForPullRequest: %w", err)
+	}
+
 	if pr.Created.IsZero() {
 		if pr.Closed != nil {
 			pr.Created = *pr.Closed
diff --git a/services/migrations/gitea_uploader_test.go b/services/migrations/gitea_uploader_test.go
index 0a35b5a632..34107b7f6a 100644
--- a/services/migrations/gitea_uploader_test.go
+++ b/services/migrations/gitea_uploader_test.go
@@ -7,7 +7,11 @@ package migrations
 
 import (
 	"context"
+	"fmt"
+	"os"
+	"path/filepath"
 	"strconv"
+	"strings"
 	"testing"
 	"time"
 
@@ -16,7 +20,9 @@ import (
 	repo_model "code.gitea.io/gitea/models/repo"
 	"code.gitea.io/gitea/models/unittest"
 	user_model "code.gitea.io/gitea/models/user"
+	"code.gitea.io/gitea/modules/git"
 	"code.gitea.io/gitea/modules/graceful"
+	"code.gitea.io/gitea/modules/log"
 	base "code.gitea.io/gitea/modules/migration"
 	"code.gitea.io/gitea/modules/structs"
 	"code.gitea.io/gitea/modules/util"
@@ -217,3 +223,303 @@ func TestGiteaUploadRemapExternalUser(t *testing.T) {
 	assert.NoError(t, err)
 	assert.EqualValues(t, linkedUser.ID, target.GetUserID())
 }
+
+func TestGiteaUploadUpdateGitForPullRequest(t *testing.T) {
+	unittest.PrepareTestEnv(t)
+
+	//
+	// fromRepo master
+	//
+	fromRepo := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: 1}).(*repo_model.Repository)
+	baseRef := "master"
+	assert.NoError(t, git.InitRepository(git.DefaultContext, fromRepo.RepoPath(), false))
+	_, err := git.NewCommand(git.DefaultContext, "symbolic-ref", "HEAD", git.BranchPrefix+baseRef).RunInDir(fromRepo.RepoPath())
+	assert.NoError(t, err)
+	assert.NoError(t, os.WriteFile(filepath.Join(fromRepo.RepoPath(), "README.md"), []byte(fmt.Sprintf("# Testing Repository\n\nOriginally created in: %s", fromRepo.RepoPath())), 0o644))
+	assert.NoError(t, git.AddChanges(fromRepo.RepoPath(), true))
+	signature := git.Signature{
+		Email: "test@example.com",
+		Name:  "test",
+		When:  time.Now(),
+	}
+	assert.NoError(t, git.CommitChanges(fromRepo.RepoPath(), git.CommitChangesOptions{
+		Committer: &signature,
+		Author:    &signature,
+		Message:   "Initial Commit",
+	}))
+	fromGitRepo, err := git.OpenRepositoryCtx(git.DefaultContext, fromRepo.RepoPath())
+	assert.NoError(t, err)
+	defer fromGitRepo.Close()
+	baseSHA, err := fromGitRepo.GetBranchCommitID(baseRef)
+	assert.NoError(t, err)
+
+	//
+	// fromRepo branch1
+	//
+	headRef := "branch1"
+	_, err = git.NewCommand(git.DefaultContext, "checkout", "-b", headRef).RunInDir(fromRepo.RepoPath())
+	assert.NoError(t, err)
+	assert.NoError(t, os.WriteFile(filepath.Join(fromRepo.RepoPath(), "README.md"), []byte("SOMETHING"), 0o644))
+	assert.NoError(t, git.AddChanges(fromRepo.RepoPath(), true))
+	signature.When = time.Now()
+	assert.NoError(t, git.CommitChanges(fromRepo.RepoPath(), git.CommitChangesOptions{
+		Committer: &signature,
+		Author:    &signature,
+		Message:   "Pull request",
+	}))
+	assert.NoError(t, err)
+	headSHA, err := fromGitRepo.GetBranchCommitID(headRef)
+	assert.NoError(t, err)
+
+	fromRepoOwner := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: fromRepo.OwnerID}).(*user_model.User)
+
+	//
+	// forkRepo branch2
+	//
+	forkHeadRef := "branch2"
+	forkRepo := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: 8}).(*repo_model.Repository)
+	assert.NoError(t, git.CloneWithArgs(git.DefaultContext, fromRepo.RepoPath(), forkRepo.RepoPath(), []string{}, git.CloneRepoOptions{
+		Branch: headRef,
+	}))
+	_, err = git.NewCommand(git.DefaultContext, "checkout", "-b", forkHeadRef).RunInDir(forkRepo.RepoPath())
+	assert.NoError(t, err)
+	assert.NoError(t, os.WriteFile(filepath.Join(forkRepo.RepoPath(), "README.md"), []byte(fmt.Sprintf("# branch2 %s", forkRepo.RepoPath())), 0o644))
+	assert.NoError(t, git.AddChanges(forkRepo.RepoPath(), true))
+	assert.NoError(t, git.CommitChanges(forkRepo.RepoPath(), git.CommitChangesOptions{
+		Committer: &signature,
+		Author:    &signature,
+		Message:   "branch2 commit",
+	}))
+	forkGitRepo, err := git.OpenRepositoryCtx(git.DefaultContext, forkRepo.RepoPath())
+	assert.NoError(t, err)
+	defer forkGitRepo.Close()
+	forkHeadSHA, err := forkGitRepo.GetBranchCommitID(forkHeadRef)
+	assert.NoError(t, err)
+
+	toRepoName := "migrated"
+	uploader := NewGiteaLocalUploader(context.Background(), fromRepoOwner, fromRepoOwner.Name, toRepoName)
+	uploader.gitServiceType = structs.GiteaService
+	assert.NoError(t, uploader.CreateRepo(&base.Repository{
+		Description: "description",
+		OriginalURL: fromRepo.RepoPath(),
+		CloneURL:    fromRepo.RepoPath(),
+		IsPrivate:   false,
+		IsMirror:    true,
+	}, base.MigrateOptions{
+		GitServiceType: structs.GiteaService,
+		Private:        false,
+		Mirror:         true,
+	}))
+
+	for _, testCase := range []struct {
+		name          string
+		head          string
+		assertContent func(t *testing.T, content string)
+		pr            base.PullRequest
+	}{
+		{
+			name: "fork, good Head.SHA",
+			head: fmt.Sprintf("%s/%s", forkRepo.OwnerName, forkHeadRef),
+			pr: base.PullRequest{
+				PatchURL: "",
+				Number:   1,
+				State:    "open",
+				Base: base.PullRequestBranch{
+					CloneURL:  fromRepo.RepoPath(),
+					Ref:       baseRef,
+					SHA:       baseSHA,
+					RepoName:  fromRepo.Name,
+					OwnerName: fromRepo.OwnerName,
+				},
+				Head: base.PullRequestBranch{
+					CloneURL:  forkRepo.RepoPath(),
+					Ref:       forkHeadRef,
+					SHA:       forkHeadSHA,
+					RepoName:  forkRepo.Name,
+					OwnerName: forkRepo.OwnerName,
+				},
+			},
+		},
+		{
+			name: "fork, invalid Head.Ref",
+			head: "unknown repository",
+			pr: base.PullRequest{
+				PatchURL: "",
+				Number:   1,
+				State:    "open",
+				Base: base.PullRequestBranch{
+					CloneURL:  fromRepo.RepoPath(),
+					Ref:       baseRef,
+					SHA:       baseSHA,
+					RepoName:  fromRepo.Name,
+					OwnerName: fromRepo.OwnerName,
+				},
+				Head: base.PullRequestBranch{
+					CloneURL:  forkRepo.RepoPath(),
+					Ref:       "INVALID",
+					SHA:       forkHeadSHA,
+					RepoName:  forkRepo.Name,
+					OwnerName: forkRepo.OwnerName,
+				},
+			},
+			assertContent: func(t *testing.T, content string) {
+				assert.Contains(t, content, "Fetch branch from")
+			},
+		},
+		{
+			name: "invalid fork CloneURL",
+			head: "unknown repository",
+			pr: base.PullRequest{
+				PatchURL: "",
+				Number:   1,
+				State:    "open",
+				Base: base.PullRequestBranch{
+					CloneURL:  fromRepo.RepoPath(),
+					Ref:       baseRef,
+					SHA:       baseSHA,
+					RepoName:  fromRepo.Name,
+					OwnerName: fromRepo.OwnerName,
+				},
+				Head: base.PullRequestBranch{
+					CloneURL:  "UNLIKELY",
+					Ref:       forkHeadRef,
+					SHA:       forkHeadSHA,
+					RepoName:  forkRepo.Name,
+					OwnerName: "WRONG",
+				},
+			},
+			assertContent: func(t *testing.T, content string) {
+				assert.Contains(t, content, "AddRemote failed")
+			},
+		},
+		{
+			name: "no fork, good Head.SHA",
+			head: headRef,
+			pr: base.PullRequest{
+				PatchURL: "",
+				Number:   1,
+				State:    "open",
+				Base: base.PullRequestBranch{
+					CloneURL:  fromRepo.RepoPath(),
+					Ref:       baseRef,
+					SHA:       baseSHA,
+					RepoName:  fromRepo.Name,
+					OwnerName: fromRepo.OwnerName,
+				},
+				Head: base.PullRequestBranch{
+					CloneURL:  fromRepo.RepoPath(),
+					Ref:       headRef,
+					SHA:       headSHA,
+					RepoName:  fromRepo.Name,
+					OwnerName: fromRepo.OwnerName,
+				},
+			},
+		},
+		{
+			name: "no fork, empty Head.SHA",
+			head: headRef,
+			pr: base.PullRequest{
+				PatchURL: "",
+				Number:   1,
+				State:    "open",
+				Base: base.PullRequestBranch{
+					CloneURL:  fromRepo.RepoPath(),
+					Ref:       baseRef,
+					SHA:       baseSHA,
+					RepoName:  fromRepo.Name,
+					OwnerName: fromRepo.OwnerName,
+				},
+				Head: base.PullRequestBranch{
+					CloneURL:  fromRepo.RepoPath(),
+					Ref:       headRef,
+					SHA:       "",
+					RepoName:  fromRepo.Name,
+					OwnerName: fromRepo.OwnerName,
+				},
+			},
+			assertContent: func(t *testing.T, content string) {
+				assert.Contains(t, content, "Empty reference, removing")
+				assert.NotContains(t, content, "Cannot remove local head")
+			},
+		},
+		{
+			name: "no fork, invalid Head.SHA",
+			head: headRef,
+			pr: base.PullRequest{
+				PatchURL: "",
+				Number:   1,
+				State:    "open",
+				Base: base.PullRequestBranch{
+					CloneURL:  fromRepo.RepoPath(),
+					Ref:       baseRef,
+					SHA:       baseSHA,
+					RepoName:  fromRepo.Name,
+					OwnerName: fromRepo.OwnerName,
+				},
+				Head: base.PullRequestBranch{
+					CloneURL:  fromRepo.RepoPath(),
+					Ref:       headRef,
+					SHA:       "brokenSHA",
+					RepoName:  fromRepo.Name,
+					OwnerName: fromRepo.OwnerName,
+				},
+			},
+			assertContent: func(t *testing.T, content string) {
+				assert.Contains(t, content, "Deprecated local head")
+				assert.Contains(t, content, "Cannot remove local head")
+			},
+		},
+		{
+			name: "no fork, not found Head.SHA",
+			head: headRef,
+			pr: base.PullRequest{
+				PatchURL: "",
+				Number:   1,
+				State:    "open",
+				Base: base.PullRequestBranch{
+					CloneURL:  fromRepo.RepoPath(),
+					Ref:       baseRef,
+					SHA:       baseSHA,
+					RepoName:  fromRepo.Name,
+					OwnerName: fromRepo.OwnerName,
+				},
+				Head: base.PullRequestBranch{
+					CloneURL:  fromRepo.RepoPath(),
+					Ref:       headRef,
+					SHA:       "2697b352310fcd01cbd1f3dbd43b894080027f68",
+					RepoName:  fromRepo.Name,
+					OwnerName: fromRepo.OwnerName,
+				},
+			},
+			assertContent: func(t *testing.T, content string) {
+				assert.Contains(t, content, "Deprecated local head")
+				assert.NotContains(t, content, "Cannot remove local head")
+			},
+		},
+	} {
+		t.Run(testCase.name, func(t *testing.T) {
+			logger, ok := log.NamedLoggers.Load(log.DEFAULT)
+			assert.True(t, ok)
+			logger.SetLogger("buffer", "buffer", "{}")
+			defer logger.DelLogger("buffer")
+
+			head, err := uploader.updateGitForPullRequest(&testCase.pr)
+			assert.NoError(t, err)
+			assert.EqualValues(t, testCase.head, head)
+			if testCase.assertContent != nil {
+				fence := fmt.Sprintf(">>>>>>>>>>>>>FENCE %s<<<<<<<<<<<<<<<", testCase.name)
+				log.Error(fence)
+				var content string
+				for i := 0; i < 5000; i++ {
+					content, err = logger.GetLoggerProviderContent("buffer")
+					assert.NoError(t, err)
+					if strings.Contains(content, fence) {
+						break
+					}
+					time.Sleep(1 * time.Millisecond)
+				}
+				testCase.assertContent(t, content)
+			}
+		})
+	}
+}