mirror of
https://github.com/mjl-/mox.git
synced 2024-12-26 16:33:47 +03:00
implement storing non-system/well-known flags (keywords) for messages and mailboxes, with imap
the mailbox select/examine responses now return all flags used in a mailbox in the FLAGS response. and indicate in the PERMANENTFLAGS response that clients can set new keywords. we store these values on the new Message.Keywords field. system/well-known flags are still in Message.Flags, so we're recognizing those and handling them separately. the imap store command handles the new flags. as does the append command, and the search command. we store keywords in a mailbox when a message in that mailbox gets the keyword. we don't automatically remove the keywords from a mailbox. there is currently no way at all to remove a keyword from a mailbox. the import commands now handle non-system/well-known keywords too, when importing from mbox/maildir. jmap requires keyword support, so best to get it out of the way now.
This commit is contained in:
parent
afefadf2c0
commit
40163bd145
30 changed files with 1927 additions and 145 deletions
1
go.mod
1
go.mod
|
@ -11,6 +11,7 @@ require (
|
||||||
github.com/prometheus/client_golang v1.14.0
|
github.com/prometheus/client_golang v1.14.0
|
||||||
go.etcd.io/bbolt v1.3.7
|
go.etcd.io/bbolt v1.3.7
|
||||||
golang.org/x/crypto v0.8.0
|
golang.org/x/crypto v0.8.0
|
||||||
|
golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1
|
||||||
golang.org/x/net v0.9.0
|
golang.org/x/net v0.9.0
|
||||||
golang.org/x/text v0.9.0
|
golang.org/x/text v0.9.0
|
||||||
)
|
)
|
||||||
|
|
2
go.sum
2
go.sum
|
@ -236,6 +236,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0
|
||||||
golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
|
golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
|
||||||
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
|
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
|
||||||
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
|
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
|
||||||
|
golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1 h1:k/i9J1pBpvlfR+9QsetwPyERsqu1GIbi967PQMq3Ivc=
|
||||||
|
golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1/go.mod h1:V1LtkGg67GoY2N1AnLN78QLrzxkLyJw7RJb1gzOOz9w=
|
||||||
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
|
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
|
||||||
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
|
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
|
||||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||||
|
|
|
@ -14,14 +14,19 @@ import (
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
"sort"
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"github.com/mjl-/bstore"
|
||||||
|
|
||||||
"github.com/mjl-/mox/mlog"
|
"github.com/mjl-/mox/mlog"
|
||||||
"github.com/mjl-/mox/mox-"
|
"github.com/mjl-/mox/mox-"
|
||||||
"github.com/mjl-/mox/store"
|
"github.com/mjl-/mox/store"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var ctxbg = context.Background()
|
||||||
|
|
||||||
func tcheck(t *testing.T, err error, msg string) {
|
func tcheck(t *testing.T, err error, msg string) {
|
||||||
t.Helper()
|
t.Helper()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -50,7 +55,7 @@ func TestAccount(t *testing.T) {
|
||||||
if authHdr != "" {
|
if authHdr != "" {
|
||||||
r.Header.Add("Authorization", authHdr)
|
r.Header.Add("Authorization", authHdr)
|
||||||
}
|
}
|
||||||
ok := checkAccountAuth(context.Background(), log, w, r)
|
ok := checkAccountAuth(ctxbg, log, w, r)
|
||||||
if ok != expect {
|
if ok != expect {
|
||||||
t.Fatalf("got %v, expected %v", ok, expect)
|
t.Fatalf("got %v, expected %v", ok, expect)
|
||||||
}
|
}
|
||||||
|
@ -59,7 +64,7 @@ func TestAccount(t *testing.T) {
|
||||||
const authOK = "Basic bWpsQG1veC5leGFtcGxlOnRlc3QxMjM0" // mjl@mox.example:test1234
|
const authOK = "Basic bWpsQG1veC5leGFtcGxlOnRlc3QxMjM0" // mjl@mox.example:test1234
|
||||||
const authBad = "Basic bWpsQG1veC5leGFtcGxlOmJhZHBhc3N3b3Jk" // mjl@mox.example:badpassword
|
const authBad = "Basic bWpsQG1veC5leGFtcGxlOmJhZHBhc3N3b3Jk" // mjl@mox.example:badpassword
|
||||||
|
|
||||||
authCtx := context.WithValue(context.Background(), authCtxKey, "mjl")
|
authCtx := context.WithValue(ctxbg, authCtxKey, "mjl")
|
||||||
|
|
||||||
test(authOK, "") // No password set yet.
|
test(authOK, "") // No password set yet.
|
||||||
Account{}.SetPassword(authCtx, "test1234")
|
Account{}.SetPassword(authCtx, "test1234")
|
||||||
|
@ -132,6 +137,39 @@ func TestAccount(t *testing.T) {
|
||||||
testImport("../testdata/importtest.mbox.zip", 2)
|
testImport("../testdata/importtest.mbox.zip", 2)
|
||||||
testImport("../testdata/importtest.maildir.tgz", 2)
|
testImport("../testdata/importtest.maildir.tgz", 2)
|
||||||
|
|
||||||
|
// Check there are messages, with the right flags.
|
||||||
|
acc.DB.Read(ctxbg, func(tx *bstore.Tx) error {
|
||||||
|
_, err = bstore.QueryTx[store.Message](tx).FilterIn("Keywords", "other").FilterIn("Keywords", "test").Get()
|
||||||
|
tcheck(t, err, `fetching message with keywords "other" and "test"`)
|
||||||
|
|
||||||
|
mb, err := acc.MailboxFind(tx, "importtest")
|
||||||
|
tcheck(t, err, "looking up mailbox importtest")
|
||||||
|
if mb == nil {
|
||||||
|
t.Fatalf("missing mailbox importtest")
|
||||||
|
}
|
||||||
|
sort.Strings(mb.Keywords)
|
||||||
|
if strings.Join(mb.Keywords, " ") != "other test" {
|
||||||
|
t.Fatalf(`expected mailbox keywords "other" and "test", got %v`, mb.Keywords)
|
||||||
|
}
|
||||||
|
|
||||||
|
n, err := bstore.QueryTx[store.Message](tx).FilterIn("Keywords", "custom").Count()
|
||||||
|
tcheck(t, err, `fetching message with keyword "custom"`)
|
||||||
|
if n != 2 {
|
||||||
|
t.Fatalf(`got %d messages with keyword "custom", expected 2`, n)
|
||||||
|
}
|
||||||
|
|
||||||
|
mb, err = acc.MailboxFind(tx, "maildir")
|
||||||
|
tcheck(t, err, "looking up mailbox maildir")
|
||||||
|
if mb == nil {
|
||||||
|
t.Fatalf("missing mailbox maildir")
|
||||||
|
}
|
||||||
|
if strings.Join(mb.Keywords, " ") != "custom" {
|
||||||
|
t.Fatalf(`expected mailbox keywords "custom", got %v`, mb.Keywords)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
|
||||||
testExport := func(httppath string, iszip bool, expectFiles int) {
|
testExport := func(httppath string, iszip bool, expectFiles int) {
|
||||||
t.Helper()
|
t.Helper()
|
||||||
|
|
||||||
|
|
|
@ -1,7 +1,6 @@
|
||||||
package http
|
package http
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
|
||||||
"crypto/ed25519"
|
"crypto/ed25519"
|
||||||
"net"
|
"net"
|
||||||
"net/http/httptest"
|
"net/http/httptest"
|
||||||
|
@ -29,7 +28,7 @@ func TestAdminAuth(t *testing.T) {
|
||||||
if authHdr != "" {
|
if authHdr != "" {
|
||||||
r.Header.Add("Authorization", authHdr)
|
r.Header.Add("Authorization", authHdr)
|
||||||
}
|
}
|
||||||
ok := checkAdminAuth(context.Background(), passwordfile, w, r)
|
ok := checkAdminAuth(ctxbg, passwordfile, w, r)
|
||||||
if ok != expect {
|
if ok != expect {
|
||||||
t.Fatalf("got %v, expected %v", ok, expect)
|
t.Fatalf("got %v, expected %v", ok, expect)
|
||||||
}
|
}
|
||||||
|
@ -125,9 +124,9 @@ func TestCheckDomain(t *testing.T) {
|
||||||
close(done)
|
close(done)
|
||||||
dialer := &net.Dialer{Deadline: time.Now().Add(-time.Second), Cancel: done}
|
dialer := &net.Dialer{Deadline: time.Now().Add(-time.Second), Cancel: done}
|
||||||
|
|
||||||
checkDomain(context.Background(), resolver, dialer, "mox.example")
|
checkDomain(ctxbg, resolver, dialer, "mox.example")
|
||||||
// todo: check returned data
|
// todo: check returned data
|
||||||
|
|
||||||
Admin{}.Domains(context.Background()) // todo: check results
|
Admin{}.Domains(ctxbg) // todo: check results
|
||||||
dnsblsStatus(context.Background(), resolver) // todo: check results
|
dnsblsStatus(ctxbg, resolver) // todo: check results
|
||||||
}
|
}
|
||||||
|
|
|
@ -19,6 +19,7 @@ import (
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"golang.org/x/exp/maps"
|
||||||
"golang.org/x/text/unicode/norm"
|
"golang.org/x/text/unicode/norm"
|
||||||
|
|
||||||
"github.com/mjl-/bstore"
|
"github.com/mjl-/bstore"
|
||||||
|
@ -361,10 +362,16 @@ func importMessages(ctx context.Context, log *mlog.Log, token string, acc *store
|
||||||
mailboxes := map[string]store.Mailbox{}
|
mailboxes := map[string]store.Mailbox{}
|
||||||
messages := map[string]int{}
|
messages := map[string]int{}
|
||||||
|
|
||||||
// For maildirs, we are likely to get a possible dovecot-keywords file after having imported the messages. Once we see the keywords, we use them. But before that time we remember which messages miss a keywords. Once the keywords become available, we'll fix up the flags for the unknown messages
|
// For maildirs, we are likely to get a possible dovecot-keywords file after having
|
||||||
|
// imported the messages. Once we see the keywords, we use them. But before that
|
||||||
|
// time we remember which messages miss a keywords. Once the keywords become
|
||||||
|
// available, we'll fix up the flags for the unknown messages
|
||||||
mailboxKeywords := map[string]map[rune]string{} // Mailbox to 'a'-'z' to flag name.
|
mailboxKeywords := map[string]map[rune]string{} // Mailbox to 'a'-'z' to flag name.
|
||||||
mailboxMissingKeywordMessages := map[string]map[int64]string{} // Mailbox to message id to string consisting of the unrecognized flags.
|
mailboxMissingKeywordMessages := map[string]map[int64]string{} // Mailbox to message id to string consisting of the unrecognized flags.
|
||||||
|
|
||||||
|
// We keep the mailboxes we deliver to up to date with their keywords (non-system flags).
|
||||||
|
destMailboxKeywords := map[int64]map[string]bool{}
|
||||||
|
|
||||||
// Previous mailbox an event was sent for. We send an event for new mailboxes, when
|
// Previous mailbox an event was sent for. We send an event for new mailboxes, when
|
||||||
// another 100 messages were added, when adding a message to another mailbox, and
|
// another 100 messages were added, when adding a message to another mailbox, and
|
||||||
// finally at the end as a closing statement.
|
// finally at the end as a closing statement.
|
||||||
|
@ -471,6 +478,15 @@ func importMessages(ctx context.Context, log *mlog.Log, token string, acc *store
|
||||||
m.MailboxID = mb.ID
|
m.MailboxID = mb.ID
|
||||||
m.MailboxOrigID = mb.ID
|
m.MailboxOrigID = mb.ID
|
||||||
|
|
||||||
|
if len(m.Keywords) > 0 {
|
||||||
|
if destMailboxKeywords[mb.ID] == nil {
|
||||||
|
destMailboxKeywords[mb.ID] = map[string]bool{}
|
||||||
|
}
|
||||||
|
for _, k := range m.Keywords {
|
||||||
|
destMailboxKeywords[mb.ID][k] = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Parse message and store parsed information for later fast retrieval.
|
// Parse message and store parsed information for later fast retrieval.
|
||||||
p, err := message.EnsurePart(f, m.Size)
|
p, err := message.EnsurePart(f, m.Size)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -503,7 +519,7 @@ func importMessages(ctx context.Context, log *mlog.Log, token string, acc *store
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
deliveredIDs = append(deliveredIDs, m.ID)
|
deliveredIDs = append(deliveredIDs, m.ID)
|
||||||
changes = append(changes, store.ChangeAddUID{MailboxID: m.MailboxID, UID: m.UID, Flags: m.Flags})
|
changes = append(changes, store.ChangeAddUID{MailboxID: m.MailboxID, UID: m.UID, Flags: m.Flags, Keywords: m.Keywords})
|
||||||
messages[mb.Name]++
|
messages[mb.Name]++
|
||||||
if messages[mb.Name]%100 == 0 || prevMailbox != mb.Name {
|
if messages[mb.Name]%100 == 0 || prevMailbox != mb.Name {
|
||||||
prevMailbox = mb.Name
|
prevMailbox = mb.Name
|
||||||
|
@ -583,7 +599,8 @@ func importMessages(ctx context.Context, log *mlog.Log, token string, acc *store
|
||||||
|
|
||||||
// Parse flags. See https://cr.yp.to/proto/maildir.html.
|
// Parse flags. See https://cr.yp.to/proto/maildir.html.
|
||||||
var keepFlags string
|
var keepFlags string
|
||||||
flags := store.Flags{}
|
var flags store.Flags
|
||||||
|
keywords := map[string]bool{}
|
||||||
t = strings.SplitN(path.Base(filename), ":2,", 2)
|
t = strings.SplitN(path.Base(filename), ":2,", 2)
|
||||||
if len(t) == 2 {
|
if len(t) == 2 {
|
||||||
for _, c := range t[1] {
|
for _, c := range t[1] {
|
||||||
|
@ -602,12 +619,12 @@ func importMessages(ctx context.Context, log *mlog.Log, token string, acc *store
|
||||||
flags.Flagged = true
|
flags.Flagged = true
|
||||||
default:
|
default:
|
||||||
if c >= 'a' && c <= 'z' {
|
if c >= 'a' && c <= 'z' {
|
||||||
keywords, ok := mailboxKeywords[mailbox]
|
dovecotKeywords, ok := mailboxKeywords[mailbox]
|
||||||
if !ok {
|
if !ok {
|
||||||
// No keywords file seen yet, we'll try later if it comes in.
|
// No keywords file seen yet, we'll try later if it comes in.
|
||||||
keepFlags += string(c)
|
keepFlags += string(c)
|
||||||
} else if kw, ok := keywords[c]; ok {
|
} else if kw, ok := dovecotKeywords[c]; ok {
|
||||||
flagSet(&flags, strings.ToLower(kw))
|
flagSet(&flags, keywords, strings.ToLower(kw))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -617,6 +634,7 @@ func importMessages(ctx context.Context, log *mlog.Log, token string, acc *store
|
||||||
m := store.Message{
|
m := store.Message{
|
||||||
Received: received,
|
Received: received,
|
||||||
Flags: flags,
|
Flags: flags,
|
||||||
|
Keywords: maps.Keys(keywords),
|
||||||
Size: size,
|
Size: size,
|
||||||
}
|
}
|
||||||
xdeliver(mb, &m, f, filename)
|
xdeliver(mb, &m, f, filename)
|
||||||
|
@ -663,38 +681,52 @@ func importMessages(ctx context.Context, log *mlog.Log, token string, acc *store
|
||||||
default:
|
default:
|
||||||
if path.Base(name) == "dovecot-keywords" {
|
if path.Base(name) == "dovecot-keywords" {
|
||||||
mailbox := path.Dir(name)
|
mailbox := path.Dir(name)
|
||||||
keywords := map[rune]string{}
|
dovecotKeywords := map[rune]string{}
|
||||||
words, err := store.ParseDovecotKeywords(r, log)
|
words, err := store.ParseDovecotKeywords(r, log)
|
||||||
log.Check(err, "parsing dovecot keywords for mailbox", mlog.Field("mailbox", mailbox))
|
log.Check(err, "parsing dovecot keywords for mailbox", mlog.Field("mailbox", mailbox))
|
||||||
for i, kw := range words {
|
for i, kw := range words {
|
||||||
keywords['a'+rune(i)] = kw
|
dovecotKeywords['a'+rune(i)] = kw
|
||||||
}
|
}
|
||||||
mailboxKeywords[mailbox] = keywords
|
mailboxKeywords[mailbox] = dovecotKeywords
|
||||||
|
|
||||||
for id, chars := range mailboxMissingKeywordMessages[mailbox] {
|
for id, chars := range mailboxMissingKeywordMessages[mailbox] {
|
||||||
var flags, zeroflags store.Flags
|
var flags, zeroflags store.Flags
|
||||||
|
keywords := map[string]bool{}
|
||||||
for _, c := range chars {
|
for _, c := range chars {
|
||||||
kw, ok := keywords[c]
|
kw, ok := dovecotKeywords[c]
|
||||||
if !ok {
|
if !ok {
|
||||||
problemf("unspecified message flag %c for message id %d (continuing)", c, id)
|
problemf("unspecified dovecot message flag %c for message id %d (continuing)", c, id)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
flagSet(&flags, strings.ToLower(kw))
|
flagSet(&flags, keywords, strings.ToLower(kw))
|
||||||
}
|
}
|
||||||
if flags == zeroflags {
|
if flags == zeroflags && len(keywords) == 0 {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
m := store.Message{ID: id}
|
m := store.Message{ID: id}
|
||||||
err := tx.Get(&m)
|
err := tx.Get(&m)
|
||||||
ximportcheckf(err, "get imported message for flag update")
|
ximportcheckf(err, "get imported message for flag update")
|
||||||
|
|
||||||
m.Flags = m.Flags.Set(flags, flags)
|
m.Flags = m.Flags.Set(flags, flags)
|
||||||
|
m.Keywords = maps.Keys(keywords)
|
||||||
|
|
||||||
|
if len(m.Keywords) > 0 {
|
||||||
|
if destMailboxKeywords[m.MailboxID] == nil {
|
||||||
|
destMailboxKeywords[m.MailboxID] = map[string]bool{}
|
||||||
|
}
|
||||||
|
for _, k := range m.Keywords {
|
||||||
|
destMailboxKeywords[m.MailboxID][k] = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// We train before updating, training may set m.TrainedJunk.
|
// We train before updating, training may set m.TrainedJunk.
|
||||||
if jf != nil && m.NeedsTraining() {
|
if jf != nil && m.NeedsTraining() {
|
||||||
openTrainMessage(&m)
|
openTrainMessage(&m)
|
||||||
}
|
}
|
||||||
err = tx.Update(&m)
|
err = tx.Update(&m)
|
||||||
ximportcheckf(err, "updating message after flag update")
|
ximportcheckf(err, "updating message after flag update")
|
||||||
changes = append(changes, store.ChangeFlags{MailboxID: m.MailboxID, UID: m.UID, Mask: flags, Flags: flags})
|
changes = append(changes, store.ChangeFlags{MailboxID: m.MailboxID, UID: m.UID, Mask: flags, Flags: flags, Keywords: m.Keywords})
|
||||||
}
|
}
|
||||||
delete(mailboxMissingKeywordMessages, mailbox)
|
delete(mailboxMissingKeywordMessages, mailbox)
|
||||||
} else {
|
} else {
|
||||||
|
@ -744,6 +776,19 @@ func importMessages(ctx context.Context, log *mlog.Log, token string, acc *store
|
||||||
sendEvent("count", importCount{prevMailbox, messages[prevMailbox]})
|
sendEvent("count", importCount{prevMailbox, messages[prevMailbox]})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Update mailboxes with keywords.
|
||||||
|
for mbID, keywords := range destMailboxKeywords {
|
||||||
|
mb := store.Mailbox{ID: mbID}
|
||||||
|
err := tx.Get(&mb)
|
||||||
|
ximportcheckf(err, "loading mailbox for updating keywords")
|
||||||
|
var changed bool
|
||||||
|
mb.Keywords, changed = store.MergeKeywords(mb.Keywords, maps.Keys(keywords))
|
||||||
|
if changed {
|
||||||
|
err = tx.Update(&mb)
|
||||||
|
ximportcheckf(err, "updating mailbox with keywords")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
err = tx.Commit()
|
err = tx.Commit()
|
||||||
tx = nil
|
tx = nil
|
||||||
ximportcheckf(err, "commit")
|
ximportcheckf(err, "commit")
|
||||||
|
@ -768,9 +813,7 @@ func importMessages(ctx context.Context, log *mlog.Log, token string, acc *store
|
||||||
sendEvent("done", importDone{})
|
sendEvent("done", importDone{})
|
||||||
}
|
}
|
||||||
|
|
||||||
func flagSet(flags *store.Flags, word string) {
|
func flagSet(flags *store.Flags, keywords map[string]bool, word string) {
|
||||||
// todo: custom labels, e.g. $label1, JunkRecorded?
|
|
||||||
|
|
||||||
switch word {
|
switch word {
|
||||||
case "forwarded", "$forwarded":
|
case "forwarded", "$forwarded":
|
||||||
flags.Forwarded = true
|
flags.Forwarded = true
|
||||||
|
@ -782,5 +825,9 @@ func flagSet(flags *store.Flags, word string) {
|
||||||
flags.Phishing = true
|
flags.Phishing = true
|
||||||
case "mdnsent", "$mdnsent":
|
case "mdnsent", "$mdnsent":
|
||||||
flags.MDNSent = true
|
flags.MDNSent = true
|
||||||
|
default:
|
||||||
|
if store.ValidLowercaseKeyword(word) {
|
||||||
|
keywords[word] = true
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -178,9 +178,9 @@ func (c *Conn) xrespCode() (string, CodeArg) {
|
||||||
l := []string{} // Must be non-nil.
|
l := []string{} // Must be non-nil.
|
||||||
if c.take(' ') {
|
if c.take(' ') {
|
||||||
c.xtake("(")
|
c.xtake("(")
|
||||||
l = []string{c.xflag()}
|
l = []string{c.xflagPerm()}
|
||||||
for c.take(' ') {
|
for c.take(' ') {
|
||||||
l = append(l, c.xflag())
|
l = append(l, c.xflagPerm())
|
||||||
}
|
}
|
||||||
c.xtake(")")
|
c.xtake(")")
|
||||||
}
|
}
|
||||||
|
@ -694,10 +694,13 @@ func (c *Conn) xliteral() []byte {
|
||||||
|
|
||||||
// ../rfc/9051:6565
|
// ../rfc/9051:6565
|
||||||
// todo: stricter
|
// todo: stricter
|
||||||
func (c *Conn) xflag() string {
|
func (c *Conn) xflag0(allowPerm bool) string {
|
||||||
s := ""
|
s := ""
|
||||||
if c.take('\\') {
|
if c.take('\\') {
|
||||||
s = "\\"
|
s = `\`
|
||||||
|
if allowPerm && c.take('*') {
|
||||||
|
return `\*`
|
||||||
|
}
|
||||||
} else if c.take('$') {
|
} else if c.take('$') {
|
||||||
s = "$"
|
s = "$"
|
||||||
}
|
}
|
||||||
|
@ -705,6 +708,14 @@ func (c *Conn) xflag() string {
|
||||||
return s
|
return s
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (c *Conn) xflag() string {
|
||||||
|
return c.xflag0(false)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Conn) xflagPerm() string {
|
||||||
|
return c.xflag0(true)
|
||||||
|
}
|
||||||
|
|
||||||
func (c *Conn) xsection() string {
|
func (c *Conn) xsection() string {
|
||||||
c.xtake("[")
|
c.xtake("[")
|
||||||
s := c.xtakeuntil(']')
|
s := c.xtakeuntil(']')
|
||||||
|
|
|
@ -44,14 +44,14 @@ func TestAppend(t *testing.T) {
|
||||||
tc2.transactf("no", "append nobox (\\Seen) \" 1-Jan-2022 10:10:00 +0100\" {1}")
|
tc2.transactf("no", "append nobox (\\Seen) \" 1-Jan-2022 10:10:00 +0100\" {1}")
|
||||||
tc2.xcode("TRYCREATE")
|
tc2.xcode("TRYCREATE")
|
||||||
|
|
||||||
tc2.transactf("ok", "append inbox (\\Seen) \" 1-Jan-2022 10:10:00 +0100\" {1+}\r\nx")
|
tc2.transactf("ok", "append inbox (\\Seen Label1 $label2) \" 1-Jan-2022 10:10:00 +0100\" {1+}\r\nx")
|
||||||
tc2.xuntagged(imapclient.UntaggedExists(1))
|
tc2.xuntagged(imapclient.UntaggedExists(1))
|
||||||
tc2.xcodeArg(imapclient.CodeAppendUID{UIDValidity: 1, UID: 1})
|
tc2.xcodeArg(imapclient.CodeAppendUID{UIDValidity: 1, UID: 1})
|
||||||
|
|
||||||
tc.transactf("ok", "noop")
|
tc.transactf("ok", "noop")
|
||||||
uid1 := imapclient.FetchUID(1)
|
uid1 := imapclient.FetchUID(1)
|
||||||
flagsSeen := imapclient.FetchFlags{`\Seen`}
|
flags := imapclient.FetchFlags{`\Seen`, "label1", "$label2"}
|
||||||
tc.xuntagged(imapclient.UntaggedExists(1), imapclient.UntaggedFetch{Seq: 1, Attrs: []imapclient.FetchAttr{uid1, flagsSeen}})
|
tc.xuntagged(imapclient.UntaggedExists(1), imapclient.UntaggedFetch{Seq: 1, Attrs: []imapclient.FetchAttr{uid1, flags}})
|
||||||
tc3.transactf("ok", "noop")
|
tc3.transactf("ok", "noop")
|
||||||
tc3.xuntagged() // Inbox is not selected, nothing to report.
|
tc3.xuntagged() // Inbox is not selected, nothing to report.
|
||||||
|
|
||||||
|
|
|
@ -189,12 +189,12 @@ func (cmd *fetchCmd) process(atts []fetchAtt) {
|
||||||
err := cmd.tx.Update(m)
|
err := cmd.tx.Update(m)
|
||||||
xcheckf(err, "marking message as seen")
|
xcheckf(err, "marking message as seen")
|
||||||
|
|
||||||
cmd.changes = append(cmd.changes, store.ChangeFlags{MailboxID: cmd.mailboxID, UID: cmd.uid, Mask: store.Flags{Seen: true}, Flags: m.Flags})
|
cmd.changes = append(cmd.changes, store.ChangeFlags{MailboxID: cmd.mailboxID, UID: cmd.uid, Mask: store.Flags{Seen: true}, Flags: m.Flags, Keywords: m.Keywords})
|
||||||
}
|
}
|
||||||
|
|
||||||
if cmd.needFlags {
|
if cmd.needFlags {
|
||||||
m := cmd.xensureMessage()
|
m := cmd.xensureMessage()
|
||||||
data = append(data, bare("FLAGS"), flaglist(m.Flags))
|
data = append(data, bare("FLAGS"), flaglist(m.Flags, m.Keywords))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Write errors are turned into panics because we write through c.
|
// Write errors are turned into panics because we write through c.
|
||||||
|
|
|
@ -376,8 +376,18 @@ func (p *parser) remainder() string {
|
||||||
return p.orig[p.o:]
|
return p.orig[p.o:]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ../rfc/9051:6565
|
||||||
func (p *parser) xflag() string {
|
func (p *parser) xflag() string {
|
||||||
return p.xtakelist(`\`, "$") + p.xatom()
|
w, _ := p.takelist(`\`, "$")
|
||||||
|
s := w + p.xatom()
|
||||||
|
if s[0] == '\\' {
|
||||||
|
switch strings.ToLower(s) {
|
||||||
|
case `\answered`, `\flagged`, `\deleted`, `\seen`, `\draft`:
|
||||||
|
default:
|
||||||
|
p.xerrorf("unknown system flag %s", s)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return s
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *parser) xflagList() (l []string) {
|
func (p *parser) xflagList() (l []string) {
|
||||||
|
|
|
@ -309,19 +309,24 @@ func (s *search) match(sk searchKey) bool {
|
||||||
case "FLAGGED":
|
case "FLAGGED":
|
||||||
return s.m.Flagged
|
return s.m.Flagged
|
||||||
case "KEYWORD":
|
case "KEYWORD":
|
||||||
switch sk.atom {
|
kw := strings.ToLower(sk.atom)
|
||||||
case "$Forwarded":
|
switch kw {
|
||||||
|
case "$forwarded":
|
||||||
return s.m.Forwarded
|
return s.m.Forwarded
|
||||||
case "$Junk":
|
case "$junk":
|
||||||
return s.m.Junk
|
return s.m.Junk
|
||||||
case "$NotJunk":
|
case "$notjunk":
|
||||||
return s.m.Notjunk
|
return s.m.Notjunk
|
||||||
case "$Phishing":
|
case "$phishing":
|
||||||
return s.m.Phishing
|
return s.m.Phishing
|
||||||
case "$MDNSent":
|
case "$mdnsent":
|
||||||
return s.m.MDNSent
|
return s.m.MDNSent
|
||||||
default:
|
default:
|
||||||
c.log.Info("search with unknown keyword", mlog.Field("keyword", sk.atom))
|
for _, k := range s.m.Keywords {
|
||||||
|
if k == kw {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
case "SEEN":
|
case "SEEN":
|
||||||
|
@ -333,21 +338,26 @@ func (s *search) match(sk searchKey) bool {
|
||||||
case "UNFLAGGED":
|
case "UNFLAGGED":
|
||||||
return !s.m.Flagged
|
return !s.m.Flagged
|
||||||
case "UNKEYWORD":
|
case "UNKEYWORD":
|
||||||
switch sk.atom {
|
kw := strings.ToLower(sk.atom)
|
||||||
case "$Forwarded":
|
switch kw {
|
||||||
|
case "$forwarded":
|
||||||
return !s.m.Forwarded
|
return !s.m.Forwarded
|
||||||
case "$Junk":
|
case "$junk":
|
||||||
return !s.m.Junk
|
return !s.m.Junk
|
||||||
case "$NotJunk":
|
case "$notjunk":
|
||||||
return !s.m.Notjunk
|
return !s.m.Notjunk
|
||||||
case "$Phishing":
|
case "$phishing":
|
||||||
return !s.m.Phishing
|
return !s.m.Phishing
|
||||||
case "$MDNSent":
|
case "$mdnsent":
|
||||||
return !s.m.MDNSent
|
return !s.m.MDNSent
|
||||||
default:
|
default:
|
||||||
c.log.Info("search with unknown keyword", mlog.Field("keyword", sk.atom))
|
for _, k := range s.m.Keywords {
|
||||||
|
if k == kw {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
case "UNSEEN":
|
case "UNSEEN":
|
||||||
return !s.m.Seen
|
return !s.m.Seen
|
||||||
case "DRAFT":
|
case "DRAFT":
|
||||||
|
|
|
@ -79,6 +79,8 @@ func TestSearch(t *testing.T) {
|
||||||
`$Notjunk`,
|
`$Notjunk`,
|
||||||
`$Phishing`,
|
`$Phishing`,
|
||||||
`$MDNSent`,
|
`$MDNSent`,
|
||||||
|
`custom1`,
|
||||||
|
`Custom2`,
|
||||||
}
|
}
|
||||||
tc.client.Append("inbox", mostFlags, &received, []byte(searchMsg))
|
tc.client.Append("inbox", mostFlags, &received, []byte(searchMsg))
|
||||||
|
|
||||||
|
@ -123,6 +125,12 @@ func TestSearch(t *testing.T) {
|
||||||
tc.transactf("ok", `search keyword $Forwarded`)
|
tc.transactf("ok", `search keyword $Forwarded`)
|
||||||
tc.xsearch(3)
|
tc.xsearch(3)
|
||||||
|
|
||||||
|
tc.transactf("ok", `search keyword Custom1`)
|
||||||
|
tc.xsearch(3)
|
||||||
|
|
||||||
|
tc.transactf("ok", `search keyword custom2`)
|
||||||
|
tc.xsearch(3)
|
||||||
|
|
||||||
tc.transactf("ok", `search new`)
|
tc.transactf("ok", `search new`)
|
||||||
tc.xsearch() // New requires a message to be recent. We pretend all messages are not recent.
|
tc.xsearch() // New requires a message to be recent. We pretend all messages are not recent.
|
||||||
|
|
||||||
|
@ -162,6 +170,9 @@ func TestSearch(t *testing.T) {
|
||||||
tc.transactf("ok", `search unkeyword $Junk`)
|
tc.transactf("ok", `search unkeyword $Junk`)
|
||||||
tc.xsearch(1, 2)
|
tc.xsearch(1, 2)
|
||||||
|
|
||||||
|
tc.transactf("ok", `search unkeyword custom1`)
|
||||||
|
tc.xsearch(1, 2)
|
||||||
|
|
||||||
tc.transactf("ok", `search unseen`)
|
tc.transactf("ok", `search unseen`)
|
||||||
tc.xsearch(1, 2)
|
tc.xsearch(1, 2)
|
||||||
|
|
||||||
|
|
|
@ -32,8 +32,9 @@ func testSelectExamine(t *testing.T, examine bool) {
|
||||||
|
|
||||||
uclosed := imapclient.UntaggedResult{Status: imapclient.OK, RespText: imapclient.RespText{Code: "CLOSED", More: "x"}}
|
uclosed := imapclient.UntaggedResult{Status: imapclient.OK, RespText: imapclient.RespText{Code: "CLOSED", More: "x"}}
|
||||||
flags := strings.Split(`\Seen \Answered \Flagged \Deleted \Draft $Forwarded $Junk $NotJunk $Phishing $MDNSent`, " ")
|
flags := strings.Split(`\Seen \Answered \Flagged \Deleted \Draft $Forwarded $Junk $NotJunk $Phishing $MDNSent`, " ")
|
||||||
|
permflags := strings.Split(`\Seen \Answered \Flagged \Deleted \Draft $Forwarded $Junk $NotJunk $Phishing $MDNSent \*`, " ")
|
||||||
uflags := imapclient.UntaggedFlags(flags)
|
uflags := imapclient.UntaggedFlags(flags)
|
||||||
upermflags := imapclient.UntaggedResult{Status: imapclient.OK, RespText: imapclient.RespText{Code: "PERMANENTFLAGS", CodeArg: imapclient.CodeList{Code: "PERMANENTFLAGS", Args: flags}, More: "x"}}
|
upermflags := imapclient.UntaggedResult{Status: imapclient.OK, RespText: imapclient.RespText{Code: "PERMANENTFLAGS", CodeArg: imapclient.CodeList{Code: "PERMANENTFLAGS", Args: permflags}, More: "x"}}
|
||||||
urecent := imapclient.UntaggedRecent(0)
|
urecent := imapclient.UntaggedRecent(0)
|
||||||
uexists0 := imapclient.UntaggedExists(0)
|
uexists0 := imapclient.UntaggedExists(0)
|
||||||
uexists1 := imapclient.UntaggedExists(1)
|
uexists1 := imapclient.UntaggedExists(1)
|
||||||
|
|
|
@ -1233,7 +1233,7 @@ func (c *conn) applyChanges(changes []store.Change, initial bool) {
|
||||||
c.bwritelinef("* %d EXISTS", len(c.uids))
|
c.bwritelinef("* %d EXISTS", len(c.uids))
|
||||||
for _, add := range adds {
|
for _, add := range adds {
|
||||||
seq := c.xsequence(add.UID)
|
seq := c.xsequence(add.UID)
|
||||||
c.bwritelinef("* %d FETCH (UID %d FLAGS %s)", seq, add.UID, flaglist(add.Flags).pack(c))
|
c.bwritelinef("* %d FETCH (UID %d FLAGS %s)", seq, add.UID, flaglist(add.Flags, add.Keywords).pack(c))
|
||||||
}
|
}
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
@ -1265,7 +1265,7 @@ func (c *conn) applyChanges(changes []store.Change, initial bool) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if !initial {
|
if !initial {
|
||||||
c.bwritelinef("* %d FETCH (UID %d FLAGS %s)", seq, ch.UID, flaglist(ch.Flags).pack(c))
|
c.bwritelinef("* %d FETCH (UID %d FLAGS %s)", seq, ch.UID, flaglist(ch.Flags, ch.Keywords).pack(c))
|
||||||
}
|
}
|
||||||
case store.ChangeRemoveMailbox:
|
case store.ChangeRemoveMailbox:
|
||||||
// Only announce \NonExistent to modern clients, otherwise they may ignore the
|
// Only announce \NonExistent to modern clients, otherwise they may ignore the
|
||||||
|
@ -1862,8 +1862,12 @@ func (c *conn) cmdSelectExamine(isselect bool, tag, cmd string, p *parser) {
|
||||||
})
|
})
|
||||||
c.applyChanges(c.comm.Get(), true)
|
c.applyChanges(c.comm.Get(), true)
|
||||||
|
|
||||||
c.bwritelinef(`* FLAGS (\Seen \Answered \Flagged \Deleted \Draft $Forwarded $Junk $NotJunk $Phishing $MDNSent)`)
|
var flags string
|
||||||
c.bwritelinef(`* OK [PERMANENTFLAGS (\Seen \Answered \Flagged \Deleted \Draft $Forwarded $Junk $NotJunk $Phishing $MDNSent)] x`)
|
if len(mb.Keywords) > 0 {
|
||||||
|
flags = " " + strings.Join(mb.Keywords, " ")
|
||||||
|
}
|
||||||
|
c.bwritelinef(`* FLAGS (\Seen \Answered \Flagged \Deleted \Draft $Forwarded $Junk $NotJunk $Phishing $MDNSent%s)`, flags)
|
||||||
|
c.bwritelinef(`* OK [PERMANENTFLAGS (\Seen \Answered \Flagged \Deleted \Draft $Forwarded $Junk $NotJunk $Phishing $MDNSent \*)] x`)
|
||||||
if !c.enabled[capIMAP4rev2] {
|
if !c.enabled[capIMAP4rev2] {
|
||||||
c.bwritelinef(`* 0 RECENT`)
|
c.bwritelinef(`* 0 RECENT`)
|
||||||
}
|
}
|
||||||
|
@ -2436,7 +2440,7 @@ func (c *conn) xstatusLine(tx *bstore.Tx, mb store.Mailbox, attrs []string) stri
|
||||||
return fmt.Sprintf("* STATUS %s (%s)", astring(mb.Name).pack(c), strings.Join(status, " "))
|
return fmt.Sprintf("* STATUS %s (%s)", astring(mb.Name).pack(c), strings.Join(status, " "))
|
||||||
}
|
}
|
||||||
|
|
||||||
func xparseStoreFlags(l []string, syntax bool) (flags store.Flags) {
|
func xparseStoreFlags(l []string, syntax bool) (flags store.Flags, keywords []string) {
|
||||||
fields := map[string]*bool{
|
fields := map[string]*bool{
|
||||||
`\answered`: &flags.Answered,
|
`\answered`: &flags.Answered,
|
||||||
`\flagged`: &flags.Flagged,
|
`\flagged`: &flags.Flagged,
|
||||||
|
@ -2449,20 +2453,24 @@ func xparseStoreFlags(l []string, syntax bool) (flags store.Flags) {
|
||||||
`$phishing`: &flags.Phishing,
|
`$phishing`: &flags.Phishing,
|
||||||
`$mdnsent`: &flags.MDNSent,
|
`$mdnsent`: &flags.MDNSent,
|
||||||
}
|
}
|
||||||
|
seen := map[string]bool{}
|
||||||
for _, f := range l {
|
for _, f := range l {
|
||||||
if field, ok := fields[strings.ToLower(f)]; !ok {
|
f = strings.ToLower(f)
|
||||||
if syntax {
|
if field, ok := fields[f]; ok {
|
||||||
xsyntaxErrorf("unknown flag %q", f)
|
|
||||||
}
|
|
||||||
xuserErrorf("unknown flag %q", f)
|
|
||||||
} else {
|
|
||||||
*field = true
|
*field = true
|
||||||
|
} else if seen[f] {
|
||||||
|
if moxvar.Pedantic {
|
||||||
|
xuserErrorf("duplicate keyword %s", f)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
keywords = append(keywords, f)
|
||||||
|
seen[f] = true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func flaglist(fl store.Flags) listspace {
|
func flaglist(fl store.Flags, keywords []string) listspace {
|
||||||
l := listspace{}
|
l := listspace{}
|
||||||
flag := func(v bool, s string) {
|
flag := func(v bool, s string) {
|
||||||
if v {
|
if v {
|
||||||
|
@ -2479,6 +2487,9 @@ func flaglist(fl store.Flags) listspace {
|
||||||
flag(fl.Notjunk, `$NotJunk`)
|
flag(fl.Notjunk, `$NotJunk`)
|
||||||
flag(fl.Phishing, `$Phishing`)
|
flag(fl.Phishing, `$Phishing`)
|
||||||
flag(fl.MDNSent, `$MDNSent`)
|
flag(fl.MDNSent, `$MDNSent`)
|
||||||
|
for _, k := range keywords {
|
||||||
|
l = append(l, bare(k))
|
||||||
|
}
|
||||||
return l
|
return l
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2494,9 +2505,10 @@ func (c *conn) cmdAppend(tag, cmd string, p *parser) {
|
||||||
name := p.xmailbox()
|
name := p.xmailbox()
|
||||||
p.xspace()
|
p.xspace()
|
||||||
var storeFlags store.Flags
|
var storeFlags store.Flags
|
||||||
|
var keywords []string
|
||||||
if p.hasPrefix("(") {
|
if p.hasPrefix("(") {
|
||||||
// Error must be a syntax error, to properly abort the connection due to literal.
|
// Error must be a syntax error, to properly abort the connection due to literal.
|
||||||
storeFlags = xparseStoreFlags(p.xflagList(), true)
|
storeFlags, keywords = xparseStoreFlags(p.xflagList(), true)
|
||||||
p.xspace()
|
p.xspace()
|
||||||
}
|
}
|
||||||
var tm time.Time
|
var tm time.Time
|
||||||
|
@ -2570,11 +2582,21 @@ func (c *conn) cmdAppend(tag, cmd string, p *parser) {
|
||||||
c.account.WithWLock(func() {
|
c.account.WithWLock(func() {
|
||||||
c.xdbwrite(func(tx *bstore.Tx) {
|
c.xdbwrite(func(tx *bstore.Tx) {
|
||||||
mb = c.xmailbox(tx, name, "TRYCREATE")
|
mb = c.xmailbox(tx, name, "TRYCREATE")
|
||||||
|
|
||||||
|
// Ensure keywords are stored in mailbox.
|
||||||
|
var changed bool
|
||||||
|
mb.Keywords, changed = store.MergeKeywords(mb.Keywords, keywords)
|
||||||
|
if changed {
|
||||||
|
err := tx.Update(&mb)
|
||||||
|
xcheckf(err, "updating keywords in mailbox")
|
||||||
|
}
|
||||||
|
|
||||||
msg = store.Message{
|
msg = store.Message{
|
||||||
MailboxID: mb.ID,
|
MailboxID: mb.ID,
|
||||||
MailboxOrigID: mb.ID,
|
MailboxOrigID: mb.ID,
|
||||||
Received: tm,
|
Received: tm,
|
||||||
Flags: storeFlags,
|
Flags: storeFlags,
|
||||||
|
Keywords: keywords,
|
||||||
Size: size,
|
Size: size,
|
||||||
MsgPrefix: msgPrefix,
|
MsgPrefix: msgPrefix,
|
||||||
}
|
}
|
||||||
|
@ -2589,7 +2611,7 @@ func (c *conn) cmdAppend(tag, cmd string, p *parser) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Broadcast the change to other connections.
|
// Broadcast the change to other connections.
|
||||||
c.broadcast([]store.Change{store.ChangeAddUID{MailboxID: mb.ID, UID: msg.UID, Flags: msg.Flags}})
|
c.broadcast([]store.Change{store.ChangeAddUID{MailboxID: mb.ID, UID: msg.UID, Flags: msg.Flags, Keywords: msg.Keywords}})
|
||||||
})
|
})
|
||||||
|
|
||||||
err = msgFile.Close()
|
err = msgFile.Close()
|
||||||
|
@ -2952,6 +2974,7 @@ func (c *conn) cmdxCopy(isUID bool, tag, cmd string, p *parser) {
|
||||||
var mbDst store.Mailbox
|
var mbDst store.Mailbox
|
||||||
var origUIDs, newUIDs []store.UID
|
var origUIDs, newUIDs []store.UID
|
||||||
var flags []store.Flags
|
var flags []store.Flags
|
||||||
|
var keywords [][]string
|
||||||
|
|
||||||
c.account.WithWLock(func() {
|
c.account.WithWLock(func() {
|
||||||
c.xdbwrite(func(tx *bstore.Tx) {
|
c.xdbwrite(func(tx *bstore.Tx) {
|
||||||
|
@ -3017,6 +3040,7 @@ func (c *conn) cmdxCopy(isUID bool, tag, cmd string, p *parser) {
|
||||||
newUIDs = append(newUIDs, m.UID)
|
newUIDs = append(newUIDs, m.UID)
|
||||||
newMsgIDs = append(newMsgIDs, m.ID)
|
newMsgIDs = append(newMsgIDs, m.ID)
|
||||||
flags = append(flags, m.Flags)
|
flags = append(flags, m.Flags)
|
||||||
|
keywords = append(keywords, m.Keywords)
|
||||||
|
|
||||||
qmr := bstore.QueryTx[store.Recipient](tx)
|
qmr := bstore.QueryTx[store.Recipient](tx)
|
||||||
qmr.FilterNonzero(store.Recipient{MessageID: origID})
|
qmr.FilterNonzero(store.Recipient{MessageID: origID})
|
||||||
|
@ -3048,7 +3072,7 @@ func (c *conn) cmdxCopy(isUID bool, tag, cmd string, p *parser) {
|
||||||
if len(newUIDs) > 0 {
|
if len(newUIDs) > 0 {
|
||||||
changes := make([]store.Change, len(newUIDs))
|
changes := make([]store.Change, len(newUIDs))
|
||||||
for i, uid := range newUIDs {
|
for i, uid := range newUIDs {
|
||||||
changes[i] = store.ChangeAddUID{MailboxID: mbDst.ID, UID: uid, Flags: flags[i]}
|
changes[i] = store.ChangeAddUID{MailboxID: mbDst.ID, UID: uid, Flags: flags[i], Keywords: keywords[i]}
|
||||||
}
|
}
|
||||||
c.broadcast(changes)
|
c.broadcast(changes)
|
||||||
}
|
}
|
||||||
|
@ -3187,7 +3211,7 @@ func (c *conn) cmdxMove(isUID bool, tag, cmd string, p *parser) {
|
||||||
changes = append(changes, store.ChangeRemoveUIDs{MailboxID: c.mailboxID, UIDs: uids})
|
changes = append(changes, store.ChangeRemoveUIDs{MailboxID: c.mailboxID, UIDs: uids})
|
||||||
for _, m := range msgs {
|
for _, m := range msgs {
|
||||||
newUIDs = append(newUIDs, m.UID)
|
newUIDs = append(newUIDs, m.UID)
|
||||||
changes = append(changes, store.ChangeAddUID{MailboxID: mbDst.ID, UID: m.UID, Flags: m.Flags})
|
changes = append(changes, store.ChangeAddUID{MailboxID: mbDst.ID, UID: m.UID, Flags: m.Flags, Keywords: m.Keywords})
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
|
@ -3240,25 +3264,21 @@ func (c *conn) cmdxStore(isUID bool, tag, cmd string, p *parser) {
|
||||||
xuserErrorf("mailbox open in read-only mode")
|
xuserErrorf("mailbox open in read-only mode")
|
||||||
}
|
}
|
||||||
|
|
||||||
var mask, flags store.Flags
|
flags, keywords := xparseStoreFlags(flagstrs, false)
|
||||||
|
var mask store.Flags
|
||||||
if plus {
|
if plus {
|
||||||
mask = xparseStoreFlags(flagstrs, false)
|
mask, flags = flags, store.FlagsAll
|
||||||
flags = store.FlagsAll
|
|
||||||
} else if minus {
|
} else if minus {
|
||||||
mask = xparseStoreFlags(flagstrs, false)
|
mask, flags = flags, store.Flags{}
|
||||||
flags = store.Flags{}
|
|
||||||
} else {
|
} else {
|
||||||
mask = store.FlagsAll
|
mask = store.FlagsAll
|
||||||
flags = xparseStoreFlags(flagstrs, false)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
updates := store.FlagsQuerySet(mask, flags)
|
|
||||||
|
|
||||||
var updated []store.Message
|
var updated []store.Message
|
||||||
|
|
||||||
c.account.WithWLock(func() {
|
c.account.WithWLock(func() {
|
||||||
c.xdbwrite(func(tx *bstore.Tx) {
|
c.xdbwrite(func(tx *bstore.Tx) {
|
||||||
c.xmailboxID(tx, c.mailboxID) // Validate.
|
mb := c.xmailboxID(tx, c.mailboxID) // Validate.
|
||||||
|
|
||||||
uidargs := c.xnumSetCondition(isUID, nums)
|
uidargs := c.xnumSetCondition(isUID, nums)
|
||||||
|
|
||||||
|
@ -3266,27 +3286,41 @@ func (c *conn) cmdxStore(isUID bool, tag, cmd string, p *parser) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Ensure keywords are in mailbox.
|
||||||
|
if !minus {
|
||||||
|
var changed bool
|
||||||
|
mb.Keywords, changed = store.MergeKeywords(mb.Keywords, keywords)
|
||||||
|
if changed {
|
||||||
|
err := tx.Update(&mb)
|
||||||
|
xcheckf(err, "updating mailbox with keywords")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
q := bstore.QueryTx[store.Message](tx)
|
q := bstore.QueryTx[store.Message](tx)
|
||||||
q.FilterNonzero(store.Message{MailboxID: c.mailboxID})
|
q.FilterNonzero(store.Message{MailboxID: c.mailboxID})
|
||||||
q.FilterEqual("UID", uidargs...)
|
q.FilterEqual("UID", uidargs...)
|
||||||
if len(updates) == 0 {
|
err := q.ForEach(func(m store.Message) error {
|
||||||
var err error
|
m.Flags = m.Flags.Set(mask, flags)
|
||||||
updated, err = q.List()
|
if minus {
|
||||||
xcheckf(err, "listing for flags")
|
m.Keywords = store.RemoveKeywords(m.Keywords, keywords)
|
||||||
|
} else if plus {
|
||||||
|
m.Keywords, _ = store.MergeKeywords(m.Keywords, keywords)
|
||||||
} else {
|
} else {
|
||||||
q.Gather(&updated)
|
m.Keywords = keywords
|
||||||
_, err := q.UpdateFields(updates)
|
|
||||||
xcheckf(err, "updating flags")
|
|
||||||
}
|
}
|
||||||
|
updated = append(updated, m)
|
||||||
|
return tx.Update(&m)
|
||||||
|
})
|
||||||
|
xcheckf(err, "storing flags in messages")
|
||||||
|
|
||||||
err := c.account.RetrainMessages(context.TODO(), c.log, tx, updated, false)
|
err = c.account.RetrainMessages(context.TODO(), c.log, tx, updated, false)
|
||||||
xcheckf(err, "training messages")
|
xcheckf(err, "training messages")
|
||||||
})
|
})
|
||||||
|
|
||||||
// Broadcast changes to other connections.
|
// Broadcast changes to other connections.
|
||||||
changes := make([]store.Change, len(updated))
|
changes := make([]store.Change, len(updated))
|
||||||
for i, m := range updated {
|
for i, m := range updated {
|
||||||
changes[i] = store.ChangeFlags{MailboxID: m.MailboxID, UID: m.UID, Mask: mask, Flags: m.Flags}
|
changes[i] = store.ChangeFlags{MailboxID: m.MailboxID, UID: m.UID, Mask: mask, Flags: m.Flags, Keywords: m.Keywords}
|
||||||
}
|
}
|
||||||
c.broadcast(changes)
|
c.broadcast(changes)
|
||||||
})
|
})
|
||||||
|
@ -3294,7 +3328,7 @@ func (c *conn) cmdxStore(isUID bool, tag, cmd string, p *parser) {
|
||||||
for _, m := range updated {
|
for _, m := range updated {
|
||||||
if !silent {
|
if !silent {
|
||||||
// ../rfc/9051:6749 ../rfc/3501:4869
|
// ../rfc/9051:6749 ../rfc/3501:4869
|
||||||
c.bwritelinef("* %d FETCH (UID %d FLAGS %s)", c.xsequence(m.UID), m.UID, flaglist(m.Flags).pack(c))
|
c.bwritelinef("* %d FETCH (UID %d FLAGS %s)", c.xsequence(m.UID), m.UID, flaglist(m.Flags, m.Keywords).pack(c))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -191,6 +191,10 @@ func (tc *testconn) xcodeArg(v any) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (tc *testconn) xuntagged(exps ...any) {
|
func (tc *testconn) xuntagged(exps ...any) {
|
||||||
|
tc.xuntaggedCheck(true, exps...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tc *testconn) xuntaggedCheck(all bool, exps ...any) {
|
||||||
tc.t.Helper()
|
tc.t.Helper()
|
||||||
last := append([]imapclient.Untagged{}, tc.lastUntagged...)
|
last := append([]imapclient.Untagged{}, tc.lastUntagged...)
|
||||||
next:
|
next:
|
||||||
|
@ -212,7 +216,7 @@ next:
|
||||||
}
|
}
|
||||||
tc.t.Fatalf("did not find untagged response %#v %T (%d) in %v%s", exp, exp, ei, tc.lastUntagged, next)
|
tc.t.Fatalf("did not find untagged response %#v %T (%d) in %v%s", exp, exp, ei, tc.lastUntagged, next)
|
||||||
}
|
}
|
||||||
if len(last) > 0 {
|
if len(last) > 0 && all {
|
||||||
tc.t.Fatalf("leftover untagged responses %v", last)
|
tc.t.Fatalf("leftover untagged responses %v", last)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -525,7 +529,7 @@ func TestScenario(t *testing.T) {
|
||||||
tc.transactf("ok", `store 1 flags.silent (\seen \answered)`)
|
tc.transactf("ok", `store 1 flags.silent (\seen \answered)`)
|
||||||
tc.transactf("ok", `store 1 -flags.silent (\answered)`)
|
tc.transactf("ok", `store 1 -flags.silent (\answered)`)
|
||||||
tc.transactf("ok", `store 1 +flags.silent (\answered)`)
|
tc.transactf("ok", `store 1 +flags.silent (\answered)`)
|
||||||
tc.transactf("no", `store 1 flags (\badflag)`)
|
tc.transactf("bad", `store 1 flags (\badflag)`)
|
||||||
tc.transactf("ok", "noop")
|
tc.transactf("ok", "noop")
|
||||||
|
|
||||||
tc.transactf("ok", "copy 1 Trash")
|
tc.transactf("ok", "copy 1 Trash")
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
package imapserver
|
package imapserver
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/mjl-/mox/imapclient"
|
"github.com/mjl-/mox/imapclient"
|
||||||
|
@ -54,15 +55,30 @@ func TestStore(t *testing.T) {
|
||||||
tc.transactf("ok", "uid store 1 flags ()")
|
tc.transactf("ok", "uid store 1 flags ()")
|
||||||
tc.xuntagged(imapclient.UntaggedFetch{Seq: 1, Attrs: []imapclient.FetchAttr{uid1, noflags}})
|
tc.xuntagged(imapclient.UntaggedFetch{Seq: 1, Attrs: []imapclient.FetchAttr{uid1, noflags}})
|
||||||
|
|
||||||
|
tc.transactf("ok", "store 1 flags (new)") // New flag.
|
||||||
|
tc.xuntagged(imapclient.UntaggedFetch{Seq: 1, Attrs: []imapclient.FetchAttr{uid1, imapclient.FetchFlags{"new"}}})
|
||||||
|
tc.transactf("ok", "store 1 flags (new new a b c)") // Duplicates are ignored.
|
||||||
|
tc.xuntagged(imapclient.UntaggedFetch{Seq: 1, Attrs: []imapclient.FetchAttr{uid1, imapclient.FetchFlags{"new", "a", "b", "c"}}})
|
||||||
|
tc.transactf("ok", "store 1 +flags (new new c d e)")
|
||||||
|
tc.xuntagged(imapclient.UntaggedFetch{Seq: 1, Attrs: []imapclient.FetchAttr{uid1, imapclient.FetchFlags{"new", "a", "b", "c", "d", "e"}}})
|
||||||
|
tc.transactf("ok", "store 1 -flags (new new e a c)")
|
||||||
|
tc.xuntagged(imapclient.UntaggedFetch{Seq: 1, Attrs: []imapclient.FetchAttr{uid1, imapclient.FetchFlags{"b", "d"}}})
|
||||||
|
tc.transactf("ok", "store 1 flags ($Forwarded Different)")
|
||||||
|
tc.xuntagged(imapclient.UntaggedFetch{Seq: 1, Attrs: []imapclient.FetchAttr{uid1, imapclient.FetchFlags{"$Forwarded", "different"}}})
|
||||||
|
|
||||||
tc.transactf("bad", "store") // Need numset, flags and args.
|
tc.transactf("bad", "store") // Need numset, flags and args.
|
||||||
tc.transactf("bad", "store 1") // Need flags.
|
tc.transactf("bad", "store 1") // Need flags.
|
||||||
tc.transactf("bad", "store 1 +") // Need flags.
|
tc.transactf("bad", "store 1 +") // Need flags.
|
||||||
tc.transactf("bad", "store 1 -") // Need flags.
|
tc.transactf("bad", "store 1 -") // Need flags.
|
||||||
tc.transactf("bad", "store 1 flags ") // Need flags.
|
tc.transactf("bad", "store 1 flags ") // Need flags.
|
||||||
tc.transactf("bad", "store 1 flags ") // Need flags.
|
tc.transactf("bad", "store 1 flags ") // Need flags.
|
||||||
tc.transactf("bad", "store 1 flags (bogus)") // Unknown flag.
|
|
||||||
|
|
||||||
tc.client.Unselect()
|
tc.client.Unselect()
|
||||||
tc.client.Examine("inbox") // Open read-only.
|
tc.transactf("ok", "examine inbox") // Open read-only.
|
||||||
|
|
||||||
|
// Flags are added to mailbox, not removed.
|
||||||
|
flags := strings.Split(`\Seen \Answered \Flagged \Deleted \Draft $Forwarded $Junk $NotJunk $Phishing $MDNSent new a b c d e different`, " ")
|
||||||
|
tc.xuntaggedCheck(false, imapclient.UntaggedFlags(flags))
|
||||||
|
|
||||||
tc.transactf("no", `store 1 flags ()`) // No permission to set flags.
|
tc.transactf("no", `store 1 flags ()`) // No permission to set flags.
|
||||||
}
|
}
|
||||||
|
|
19
import.go
19
import.go
|
@ -13,6 +13,8 @@ import (
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"golang.org/x/exp/maps"
|
||||||
|
|
||||||
"github.com/mjl-/mox/message"
|
"github.com/mjl-/mox/message"
|
||||||
"github.com/mjl-/mox/metrics"
|
"github.com/mjl-/mox/metrics"
|
||||||
"github.com/mjl-/mox/mlog"
|
"github.com/mjl-/mox/mlog"
|
||||||
|
@ -229,7 +231,7 @@ func importctl(ctx context.Context, ctl *ctl, mbox bool) {
|
||||||
ctl.xcheck(err, "delivering message")
|
ctl.xcheck(err, "delivering message")
|
||||||
deliveredIDs = append(deliveredIDs, m.ID)
|
deliveredIDs = append(deliveredIDs, m.ID)
|
||||||
ctl.log.Debug("delivered message", mlog.Field("id", m.ID))
|
ctl.log.Debug("delivered message", mlog.Field("id", m.ID))
|
||||||
changes = append(changes, store.ChangeAddUID{MailboxID: m.MailboxID, UID: m.UID, Flags: m.Flags})
|
changes = append(changes, store.ChangeAddUID{MailboxID: m.MailboxID, UID: m.UID, Flags: m.Flags, Keywords: m.Keywords})
|
||||||
}
|
}
|
||||||
|
|
||||||
// todo: one goroutine for reading messages, one for parsing the message, one adding to database, one for junk filter training.
|
// todo: one goroutine for reading messages, one for parsing the message, one adding to database, one for junk filter training.
|
||||||
|
@ -240,6 +242,9 @@ func importctl(ctx context.Context, ctl *ctl, mbox bool) {
|
||||||
mb, changes, err = a.MailboxEnsure(tx, mailbox, true)
|
mb, changes, err = a.MailboxEnsure(tx, mailbox, true)
|
||||||
ctl.xcheck(err, "ensuring mailbox exists")
|
ctl.xcheck(err, "ensuring mailbox exists")
|
||||||
|
|
||||||
|
// We ensure keywords in messages make it to the mailbox as well.
|
||||||
|
mailboxKeywords := map[string]bool{}
|
||||||
|
|
||||||
jf, _, err := a.OpenJunkFilter(ctx, ctl.log)
|
jf, _, err := a.OpenJunkFilter(ctx, ctl.log)
|
||||||
if err != nil && !errors.Is(err, store.ErrNoJunkFilter) {
|
if err != nil && !errors.Is(err, store.ErrNoJunkFilter) {
|
||||||
ctl.xcheck(err, "open junk filter")
|
ctl.xcheck(err, "open junk filter")
|
||||||
|
@ -264,6 +269,10 @@ func importctl(ctx context.Context, ctl *ctl, mbox bool) {
|
||||||
ctl.log.Check(err, "closing temporary message after failing to import")
|
ctl.log.Check(err, "closing temporary message after failing to import")
|
||||||
}()
|
}()
|
||||||
|
|
||||||
|
for _, kw := range m.Keywords {
|
||||||
|
mailboxKeywords[kw] = true
|
||||||
|
}
|
||||||
|
|
||||||
// Parse message and store parsed information for later fast retrieval.
|
// Parse message and store parsed information for later fast retrieval.
|
||||||
p, err := message.EnsurePart(msgf, m.Size)
|
p, err := message.EnsurePart(msgf, m.Size)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -317,6 +326,14 @@ func importctl(ctx context.Context, ctl *ctl, mbox bool) {
|
||||||
process(m, msgf, origPath)
|
process(m, msgf, origPath)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// If there are any new keywords, update the mailbox.
|
||||||
|
var changed bool
|
||||||
|
mb.Keywords, changed = store.MergeKeywords(mb.Keywords, maps.Keys(mailboxKeywords))
|
||||||
|
if changed {
|
||||||
|
err := tx.Update(&mb)
|
||||||
|
ctl.xcheck(err, "updating keywords in mailbox")
|
||||||
|
}
|
||||||
|
|
||||||
err = tx.Commit()
|
err = tx.Commit()
|
||||||
ctl.xcheck(err, "commit")
|
ctl.xcheck(err, "commit")
|
||||||
tx = nil
|
tx = nil
|
||||||
|
|
|
@ -39,6 +39,7 @@ import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"golang.org/x/crypto/bcrypt"
|
"golang.org/x/crypto/bcrypt"
|
||||||
|
"golang.org/x/exp/slices"
|
||||||
"golang.org/x/text/unicode/norm"
|
"golang.org/x/text/unicode/norm"
|
||||||
|
|
||||||
"github.com/mjl-/bstore"
|
"github.com/mjl-/bstore"
|
||||||
|
@ -172,6 +173,11 @@ type Mailbox struct {
|
||||||
Junk bool
|
Junk bool
|
||||||
Sent bool
|
Sent bool
|
||||||
Trash bool
|
Trash bool
|
||||||
|
|
||||||
|
// Keywords as used in messages. Storing a non-system keyword for a message
|
||||||
|
// automatically adds it to this list. Used in the IMAP FLAGS response. Only
|
||||||
|
// "atoms", stored in lower case.
|
||||||
|
Keywords []string
|
||||||
}
|
}
|
||||||
|
|
||||||
// Subscriptions are separate from existence of mailboxes.
|
// Subscriptions are separate from existence of mailboxes.
|
||||||
|
@ -286,6 +292,7 @@ type Message struct {
|
||||||
|
|
||||||
MessageHash []byte // Hash of message. For rejects delivery, so optional like MessageID.
|
MessageHash []byte // Hash of message. For rejects delivery, so optional like MessageID.
|
||||||
Flags
|
Flags
|
||||||
|
Keywords []string `bstore:"index"` // Non-system or well-known $-flags. Only in "atom" syntax, stored in lower case.
|
||||||
Size int64
|
Size int64
|
||||||
TrainedJunk *bool // If nil, no training done yet. Otherwise, true is trained as junk, false trained as nonjunk.
|
TrainedJunk *bool // If nil, no training done yet. Otherwise, true is trained as junk, false trained as nonjunk.
|
||||||
MsgPrefix []byte // Typically holds received headers and/or header separator.
|
MsgPrefix []byte // Typically holds received headers and/or header separator.
|
||||||
|
@ -1054,7 +1061,7 @@ func (a *Account) DeliverMailbox(log *mlog.Log, mailbox string, m *Message, msgF
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
changes = append(changes, ChangeAddUID{m.MailboxID, m.UID, m.Flags})
|
changes = append(changes, ChangeAddUID{m.MailboxID, m.UID, m.Flags, m.Keywords})
|
||||||
comm := RegisterComm(a)
|
comm := RegisterComm(a)
|
||||||
defer comm.Unregister()
|
defer comm.Unregister()
|
||||||
comm.Broadcast(changes)
|
comm.Broadcast(changes)
|
||||||
|
@ -1348,24 +1355,44 @@ func (f Flags) Set(mask, flags Flags) Flags {
|
||||||
return r
|
return r
|
||||||
}
|
}
|
||||||
|
|
||||||
// FlagsQuerySet returns a map with the flags that are true in mask, with
|
// RemoveKeywords removes keywords from l, modifying and returning it. Should only
|
||||||
// values from flags.
|
// be used with lower-case keywords, not with system flags like \Seen.
|
||||||
func FlagsQuerySet(mask, flags Flags) map[string]any {
|
func RemoveKeywords(l, remove []string) []string {
|
||||||
r := map[string]any{}
|
for _, k := range remove {
|
||||||
set := func(f string, m, v bool) {
|
if i := slices.Index(l, k); i >= 0 {
|
||||||
if m {
|
copy(l[i:], l[i+1:])
|
||||||
r[f] = v
|
l = l[:len(l)-1]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
set("Seen", mask.Seen, flags.Seen)
|
return l
|
||||||
set("Answered", mask.Answered, flags.Answered)
|
}
|
||||||
set("Flagged", mask.Flagged, flags.Flagged)
|
|
||||||
set("Forwarded", mask.Forwarded, flags.Forwarded)
|
// MergeKeywords adds keywords from add into l, updating and returning it along
|
||||||
set("Junk", mask.Junk, flags.Junk)
|
// with whether it added any keyword. Keywords are only added if they aren't
|
||||||
set("Notjunk", mask.Notjunk, flags.Notjunk)
|
// already present. Should only be used with lower-case keywords, not with system
|
||||||
set("Deleted", mask.Deleted, flags.Deleted)
|
// flags like \Seen.
|
||||||
set("Draft", mask.Draft, flags.Draft)
|
func MergeKeywords(l, add []string) ([]string, bool) {
|
||||||
set("Phishing", mask.Phishing, flags.Phishing)
|
var changed bool
|
||||||
set("MDNSent", mask.MDNSent, flags.MDNSent)
|
for _, k := range add {
|
||||||
return r
|
if slices.Index(l, k) < 0 {
|
||||||
|
l = append(l, k)
|
||||||
|
changed = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return l, changed
|
||||||
|
}
|
||||||
|
|
||||||
|
// ValidLowercaseKeyword returns whether s is a valid, lower-case, keyword.
|
||||||
|
func ValidLowercaseKeyword(s string) bool {
|
||||||
|
for _, c := range s {
|
||||||
|
if c >= 'a' && c <= 'z' {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// ../rfc/9051:6334
|
||||||
|
const atomspecials = `(){%*"\]`
|
||||||
|
if c <= ' ' || c > 0x7e || strings.ContainsRune(atomspecials, c) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return len(s) > 0
|
||||||
}
|
}
|
||||||
|
|
|
@ -12,6 +12,8 @@ import (
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"golang.org/x/exp/maps"
|
||||||
|
|
||||||
"github.com/mjl-/mox/mlog"
|
"github.com/mjl-/mox/mlog"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -92,6 +94,7 @@ func (mr *MboxReader) Next() (*Message, *os.File, string, error) {
|
||||||
fromLine := mr.fromLine
|
fromLine := mr.fromLine
|
||||||
bf := bufio.NewWriter(f)
|
bf := bufio.NewWriter(f)
|
||||||
var flags Flags
|
var flags Flags
|
||||||
|
keywords := map[string]bool{}
|
||||||
var size int64
|
var size int64
|
||||||
for {
|
for {
|
||||||
line, err := mr.r.ReadBytes('\n')
|
line, err := mr.r.ReadBytes('\n')
|
||||||
|
@ -132,7 +135,23 @@ func (mr *MboxReader) Next() (*Message, *os.File, string, error) {
|
||||||
} else if bytes.HasPrefix(line, []byte("X-Keywords:")) {
|
} else if bytes.HasPrefix(line, []byte("X-Keywords:")) {
|
||||||
s := strings.TrimSpace(strings.SplitN(string(line), ":", 2)[1])
|
s := strings.TrimSpace(strings.SplitN(string(line), ":", 2)[1])
|
||||||
for _, t := range strings.Split(s, ",") {
|
for _, t := range strings.Split(s, ",") {
|
||||||
flagSet(&flags, strings.ToLower(strings.TrimSpace(t)))
|
word := strings.ToLower(strings.TrimSpace(t))
|
||||||
|
switch word {
|
||||||
|
case "forwarded", "$forwarded":
|
||||||
|
flags.Forwarded = true
|
||||||
|
case "junk", "$junk":
|
||||||
|
flags.Junk = true
|
||||||
|
case "notjunk", "$notjunk", "nonjunk", "$nonjunk":
|
||||||
|
flags.Notjunk = true
|
||||||
|
case "phishing", "$phishing":
|
||||||
|
flags.Phishing = true
|
||||||
|
case "mdnsent", "$mdnsent":
|
||||||
|
flags.MDNSent = true
|
||||||
|
default:
|
||||||
|
if ValidLowercaseKeyword(word) {
|
||||||
|
keywords[word] = true
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -165,7 +184,7 @@ func (mr *MboxReader) Next() (*Message, *os.File, string, error) {
|
||||||
return nil, nil, mr.Position(), fmt.Errorf("flush: %v", err)
|
return nil, nil, mr.Position(), fmt.Errorf("flush: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
m := &Message{Flags: flags, Size: size}
|
m := &Message{Flags: flags, Keywords: maps.Keys(keywords), Size: size}
|
||||||
|
|
||||||
if t := strings.SplitN(fromLine, " ", 3); len(t) == 3 {
|
if t := strings.SplitN(fromLine, " ", 3); len(t) == 3 {
|
||||||
layouts := []string{time.ANSIC, time.UnixDate, time.RubyDate}
|
layouts := []string{time.ANSIC, time.UnixDate, time.RubyDate}
|
||||||
|
@ -297,6 +316,7 @@ func (mr *MaildirReader) Next() (*Message, *os.File, string, error) {
|
||||||
|
|
||||||
// Parse flags. See https://cr.yp.to/proto/maildir.html.
|
// Parse flags. See https://cr.yp.to/proto/maildir.html.
|
||||||
flags := Flags{}
|
flags := Flags{}
|
||||||
|
keywords := map[string]bool{}
|
||||||
t = strings.SplitN(filepath.Base(sf.Name()), ":2,", 2)
|
t = strings.SplitN(filepath.Base(sf.Name()), ":2,", 2)
|
||||||
if len(t) == 2 {
|
if len(t) == 2 {
|
||||||
for _, c := range t[1] {
|
for _, c := range t[1] {
|
||||||
|
@ -319,26 +339,29 @@ func (mr *MaildirReader) Next() (*Message, *os.File, string, error) {
|
||||||
if index >= len(mr.dovecotKeywords) {
|
if index >= len(mr.dovecotKeywords) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
kw := mr.dovecotKeywords[index]
|
kw := strings.ToLower(mr.dovecotKeywords[index])
|
||||||
switch kw {
|
switch kw {
|
||||||
case "$Forwarded", "Forwarded":
|
case "$forwarded", "forwarded":
|
||||||
flags.Forwarded = true
|
flags.Forwarded = true
|
||||||
case "$Junk", "Junk":
|
case "$junk", "junk":
|
||||||
flags.Junk = true
|
flags.Junk = true
|
||||||
case "$NotJunk", "NotJunk", "NonJunk":
|
case "$notjunk", "notjunk", "nonjunk":
|
||||||
flags.Notjunk = true
|
flags.Notjunk = true
|
||||||
case "$MDNSent":
|
case "$mdnsent", "mdnsent":
|
||||||
flags.MDNSent = true
|
flags.MDNSent = true
|
||||||
case "$Phishing", "Phishing":
|
case "$phishing", "phishing":
|
||||||
flags.Phishing = true
|
flags.Phishing = true
|
||||||
|
default:
|
||||||
|
if ValidLowercaseKeyword(kw) {
|
||||||
|
keywords[kw] = true
|
||||||
|
}
|
||||||
}
|
}
|
||||||
// todo: custom labels, e.g. $label1, JunkRecorded?
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
m := &Message{Received: received, Flags: flags, Size: size}
|
m := &Message{Received: received, Flags: flags, Keywords: maps.Keys(keywords), Size: size}
|
||||||
|
|
||||||
// Prevent cleanup by defer.
|
// Prevent cleanup by defer.
|
||||||
mf := f
|
mf := f
|
||||||
|
@ -397,18 +420,3 @@ func ParseDovecotKeywords(r io.Reader, log *mlog.Log) ([]string, error) {
|
||||||
}
|
}
|
||||||
return keywords[:end], err
|
return keywords[:end], err
|
||||||
}
|
}
|
||||||
|
|
||||||
func flagSet(flags *Flags, word string) {
|
|
||||||
switch word {
|
|
||||||
case "forwarded", "$forwarded":
|
|
||||||
flags.Forwarded = true
|
|
||||||
case "junk", "$junk":
|
|
||||||
flags.Junk = true
|
|
||||||
case "notjunk", "$notjunk", "nonjunk", "$nonjunk":
|
|
||||||
flags.Notjunk = true
|
|
||||||
case "phishing", "$phishing":
|
|
||||||
flags.Phishing = true
|
|
||||||
case "mdnsent", "$mdnsent":
|
|
||||||
flags.MDNSent = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
|
@ -26,7 +26,8 @@ type Change any
|
||||||
type ChangeAddUID struct {
|
type ChangeAddUID struct {
|
||||||
MailboxID int64
|
MailboxID int64
|
||||||
UID UID
|
UID UID
|
||||||
Flags Flags
|
Flags Flags // System flags.
|
||||||
|
Keywords []string // Other flags.
|
||||||
}
|
}
|
||||||
|
|
||||||
// ChangeRemoveUIDs is sent for removal of one or more messages from a mailbox.
|
// ChangeRemoveUIDs is sent for removal of one or more messages from a mailbox.
|
||||||
|
@ -41,6 +42,7 @@ type ChangeFlags struct {
|
||||||
UID UID
|
UID UID
|
||||||
Mask Flags // Which flags are actually modified.
|
Mask Flags // Which flags are actually modified.
|
||||||
Flags Flags // New flag values. All are set, not just mask.
|
Flags Flags // New flag values. All are set, not just mask.
|
||||||
|
Keywords []string // Other flags.
|
||||||
}
|
}
|
||||||
|
|
||||||
// ChangeRemoveMailbox is sent for a removed mailbox.
|
// ChangeRemoveMailbox is sent for a removed mailbox.
|
||||||
|
|
BIN
testdata/importtest.maildir.tgz
vendored
BIN
testdata/importtest.maildir.tgz
vendored
Binary file not shown.
BIN
testdata/importtest.mbox.zip
vendored
BIN
testdata/importtest.mbox.zip
vendored
Binary file not shown.
27
vendor/golang.org/x/exp/LICENSE
generated
vendored
Normal file
27
vendor/golang.org/x/exp/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,27 @@
|
||||||
|
Copyright (c) 2009 The Go Authors. All rights reserved.
|
||||||
|
|
||||||
|
Redistribution and use in source and binary forms, with or without
|
||||||
|
modification, are permitted provided that the following conditions are
|
||||||
|
met:
|
||||||
|
|
||||||
|
* Redistributions of source code must retain the above copyright
|
||||||
|
notice, this list of conditions and the following disclaimer.
|
||||||
|
* Redistributions in binary form must reproduce the above
|
||||||
|
copyright notice, this list of conditions and the following disclaimer
|
||||||
|
in the documentation and/or other materials provided with the
|
||||||
|
distribution.
|
||||||
|
* Neither the name of Google Inc. nor the names of its
|
||||||
|
contributors may be used to endorse or promote products derived from
|
||||||
|
this software without specific prior written permission.
|
||||||
|
|
||||||
|
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
22
vendor/golang.org/x/exp/PATENTS
generated
vendored
Normal file
22
vendor/golang.org/x/exp/PATENTS
generated
vendored
Normal file
|
@ -0,0 +1,22 @@
|
||||||
|
Additional IP Rights Grant (Patents)
|
||||||
|
|
||||||
|
"This implementation" means the copyrightable works distributed by
|
||||||
|
Google as part of the Go project.
|
||||||
|
|
||||||
|
Google hereby grants to You a perpetual, worldwide, non-exclusive,
|
||||||
|
no-charge, royalty-free, irrevocable (except as stated in this section)
|
||||||
|
patent license to make, have made, use, offer to sell, sell, import,
|
||||||
|
transfer and otherwise run, modify and propagate the contents of this
|
||||||
|
implementation of Go, where such license applies only to those patent
|
||||||
|
claims, both currently owned or controlled by Google and acquired in
|
||||||
|
the future, licensable by Google that are necessarily infringed by this
|
||||||
|
implementation of Go. This grant does not include claims that would be
|
||||||
|
infringed only as a consequence of further modification of this
|
||||||
|
implementation. If you or your agent or exclusive licensee institute or
|
||||||
|
order or agree to the institution of patent litigation against any
|
||||||
|
entity (including a cross-claim or counterclaim in a lawsuit) alleging
|
||||||
|
that this implementation of Go or any code incorporated within this
|
||||||
|
implementation of Go constitutes direct or contributory patent
|
||||||
|
infringement, or inducement of patent infringement, then any patent
|
||||||
|
rights granted to you under this License for this implementation of Go
|
||||||
|
shall terminate as of the date such litigation is filed.
|
50
vendor/golang.org/x/exp/constraints/constraints.go
generated
vendored
Normal file
50
vendor/golang.org/x/exp/constraints/constraints.go
generated
vendored
Normal file
|
@ -0,0 +1,50 @@
|
||||||
|
// Copyright 2021 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// Package constraints defines a set of useful constraints to be used
|
||||||
|
// with type parameters.
|
||||||
|
package constraints
|
||||||
|
|
||||||
|
// Signed is a constraint that permits any signed integer type.
|
||||||
|
// If future releases of Go add new predeclared signed integer types,
|
||||||
|
// this constraint will be modified to include them.
|
||||||
|
type Signed interface {
|
||||||
|
~int | ~int8 | ~int16 | ~int32 | ~int64
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unsigned is a constraint that permits any unsigned integer type.
|
||||||
|
// If future releases of Go add new predeclared unsigned integer types,
|
||||||
|
// this constraint will be modified to include them.
|
||||||
|
type Unsigned interface {
|
||||||
|
~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr
|
||||||
|
}
|
||||||
|
|
||||||
|
// Integer is a constraint that permits any integer type.
|
||||||
|
// If future releases of Go add new predeclared integer types,
|
||||||
|
// this constraint will be modified to include them.
|
||||||
|
type Integer interface {
|
||||||
|
Signed | Unsigned
|
||||||
|
}
|
||||||
|
|
||||||
|
// Float is a constraint that permits any floating-point type.
|
||||||
|
// If future releases of Go add new predeclared floating-point types,
|
||||||
|
// this constraint will be modified to include them.
|
||||||
|
type Float interface {
|
||||||
|
~float32 | ~float64
|
||||||
|
}
|
||||||
|
|
||||||
|
// Complex is a constraint that permits any complex numeric type.
|
||||||
|
// If future releases of Go add new predeclared complex numeric types,
|
||||||
|
// this constraint will be modified to include them.
|
||||||
|
type Complex interface {
|
||||||
|
~complex64 | ~complex128
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ordered is a constraint that permits any ordered type: any type
|
||||||
|
// that supports the operators < <= >= >.
|
||||||
|
// If future releases of Go add new ordered types,
|
||||||
|
// this constraint will be modified to include them.
|
||||||
|
type Ordered interface {
|
||||||
|
Integer | Float | ~string
|
||||||
|
}
|
94
vendor/golang.org/x/exp/maps/maps.go
generated
vendored
Normal file
94
vendor/golang.org/x/exp/maps/maps.go
generated
vendored
Normal file
|
@ -0,0 +1,94 @@
|
||||||
|
// Copyright 2021 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// Package maps defines various functions useful with maps of any type.
|
||||||
|
package maps
|
||||||
|
|
||||||
|
// Keys returns the keys of the map m.
|
||||||
|
// The keys will be in an indeterminate order.
|
||||||
|
func Keys[M ~map[K]V, K comparable, V any](m M) []K {
|
||||||
|
r := make([]K, 0, len(m))
|
||||||
|
for k := range m {
|
||||||
|
r = append(r, k)
|
||||||
|
}
|
||||||
|
return r
|
||||||
|
}
|
||||||
|
|
||||||
|
// Values returns the values of the map m.
|
||||||
|
// The values will be in an indeterminate order.
|
||||||
|
func Values[M ~map[K]V, K comparable, V any](m M) []V {
|
||||||
|
r := make([]V, 0, len(m))
|
||||||
|
for _, v := range m {
|
||||||
|
r = append(r, v)
|
||||||
|
}
|
||||||
|
return r
|
||||||
|
}
|
||||||
|
|
||||||
|
// Equal reports whether two maps contain the same key/value pairs.
|
||||||
|
// Values are compared using ==.
|
||||||
|
func Equal[M1, M2 ~map[K]V, K, V comparable](m1 M1, m2 M2) bool {
|
||||||
|
if len(m1) != len(m2) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
for k, v1 := range m1 {
|
||||||
|
if v2, ok := m2[k]; !ok || v1 != v2 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// EqualFunc is like Equal, but compares values using eq.
|
||||||
|
// Keys are still compared with ==.
|
||||||
|
func EqualFunc[M1 ~map[K]V1, M2 ~map[K]V2, K comparable, V1, V2 any](m1 M1, m2 M2, eq func(V1, V2) bool) bool {
|
||||||
|
if len(m1) != len(m2) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
for k, v1 := range m1 {
|
||||||
|
if v2, ok := m2[k]; !ok || !eq(v1, v2) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Clear removes all entries from m, leaving it empty.
|
||||||
|
func Clear[M ~map[K]V, K comparable, V any](m M) {
|
||||||
|
for k := range m {
|
||||||
|
delete(m, k)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Clone returns a copy of m. This is a shallow clone:
|
||||||
|
// the new keys and values are set using ordinary assignment.
|
||||||
|
func Clone[M ~map[K]V, K comparable, V any](m M) M {
|
||||||
|
// Preserve nil in case it matters.
|
||||||
|
if m == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
r := make(M, len(m))
|
||||||
|
for k, v := range m {
|
||||||
|
r[k] = v
|
||||||
|
}
|
||||||
|
return r
|
||||||
|
}
|
||||||
|
|
||||||
|
// Copy copies all key/value pairs in src adding them to dst.
|
||||||
|
// When a key in src is already present in dst,
|
||||||
|
// the value in dst will be overwritten by the value associated
|
||||||
|
// with the key in src.
|
||||||
|
func Copy[M1 ~map[K]V, M2 ~map[K]V, K comparable, V any](dst M1, src M2) {
|
||||||
|
for k, v := range src {
|
||||||
|
dst[k] = v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteFunc deletes any key/value pairs from m for which del returns true.
|
||||||
|
func DeleteFunc[M ~map[K]V, K comparable, V any](m M, del func(K, V) bool) {
|
||||||
|
for k, v := range m {
|
||||||
|
if del(k, v) {
|
||||||
|
delete(m, k)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
258
vendor/golang.org/x/exp/slices/slices.go
generated
vendored
Normal file
258
vendor/golang.org/x/exp/slices/slices.go
generated
vendored
Normal file
|
@ -0,0 +1,258 @@
|
||||||
|
// Copyright 2021 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// Package slices defines various functions useful with slices of any type.
|
||||||
|
// Unless otherwise specified, these functions all apply to the elements
|
||||||
|
// of a slice at index 0 <= i < len(s).
|
||||||
|
//
|
||||||
|
// Note that the less function in IsSortedFunc, SortFunc, SortStableFunc requires a
|
||||||
|
// strict weak ordering (https://en.wikipedia.org/wiki/Weak_ordering#Strict_weak_orderings),
|
||||||
|
// or the sorting may fail to sort correctly. A common case is when sorting slices of
|
||||||
|
// floating-point numbers containing NaN values.
|
||||||
|
package slices
|
||||||
|
|
||||||
|
import "golang.org/x/exp/constraints"
|
||||||
|
|
||||||
|
// Equal reports whether two slices are equal: the same length and all
|
||||||
|
// elements equal. If the lengths are different, Equal returns false.
|
||||||
|
// Otherwise, the elements are compared in increasing index order, and the
|
||||||
|
// comparison stops at the first unequal pair.
|
||||||
|
// Floating point NaNs are not considered equal.
|
||||||
|
func Equal[E comparable](s1, s2 []E) bool {
|
||||||
|
if len(s1) != len(s2) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
for i := range s1 {
|
||||||
|
if s1[i] != s2[i] {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// EqualFunc reports whether two slices are equal using a comparison
|
||||||
|
// function on each pair of elements. If the lengths are different,
|
||||||
|
// EqualFunc returns false. Otherwise, the elements are compared in
|
||||||
|
// increasing index order, and the comparison stops at the first index
|
||||||
|
// for which eq returns false.
|
||||||
|
func EqualFunc[E1, E2 any](s1 []E1, s2 []E2, eq func(E1, E2) bool) bool {
|
||||||
|
if len(s1) != len(s2) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
for i, v1 := range s1 {
|
||||||
|
v2 := s2[i]
|
||||||
|
if !eq(v1, v2) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Compare compares the elements of s1 and s2.
|
||||||
|
// The elements are compared sequentially, starting at index 0,
|
||||||
|
// until one element is not equal to the other.
|
||||||
|
// The result of comparing the first non-matching elements is returned.
|
||||||
|
// If both slices are equal until one of them ends, the shorter slice is
|
||||||
|
// considered less than the longer one.
|
||||||
|
// The result is 0 if s1 == s2, -1 if s1 < s2, and +1 if s1 > s2.
|
||||||
|
// Comparisons involving floating point NaNs are ignored.
|
||||||
|
func Compare[E constraints.Ordered](s1, s2 []E) int {
|
||||||
|
s2len := len(s2)
|
||||||
|
for i, v1 := range s1 {
|
||||||
|
if i >= s2len {
|
||||||
|
return +1
|
||||||
|
}
|
||||||
|
v2 := s2[i]
|
||||||
|
switch {
|
||||||
|
case v1 < v2:
|
||||||
|
return -1
|
||||||
|
case v1 > v2:
|
||||||
|
return +1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(s1) < s2len {
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// CompareFunc is like Compare but uses a comparison function
|
||||||
|
// on each pair of elements. The elements are compared in increasing
|
||||||
|
// index order, and the comparisons stop after the first time cmp
|
||||||
|
// returns non-zero.
|
||||||
|
// The result is the first non-zero result of cmp; if cmp always
|
||||||
|
// returns 0 the result is 0 if len(s1) == len(s2), -1 if len(s1) < len(s2),
|
||||||
|
// and +1 if len(s1) > len(s2).
|
||||||
|
func CompareFunc[E1, E2 any](s1 []E1, s2 []E2, cmp func(E1, E2) int) int {
|
||||||
|
s2len := len(s2)
|
||||||
|
for i, v1 := range s1 {
|
||||||
|
if i >= s2len {
|
||||||
|
return +1
|
||||||
|
}
|
||||||
|
v2 := s2[i]
|
||||||
|
if c := cmp(v1, v2); c != 0 {
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(s1) < s2len {
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// Index returns the index of the first occurrence of v in s,
|
||||||
|
// or -1 if not present.
|
||||||
|
func Index[E comparable](s []E, v E) int {
|
||||||
|
for i := range s {
|
||||||
|
if v == s[i] {
|
||||||
|
return i
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
|
||||||
|
// IndexFunc returns the first index i satisfying f(s[i]),
|
||||||
|
// or -1 if none do.
|
||||||
|
func IndexFunc[E any](s []E, f func(E) bool) int {
|
||||||
|
for i := range s {
|
||||||
|
if f(s[i]) {
|
||||||
|
return i
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
|
||||||
|
// Contains reports whether v is present in s.
|
||||||
|
func Contains[E comparable](s []E, v E) bool {
|
||||||
|
return Index(s, v) >= 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// ContainsFunc reports whether at least one
|
||||||
|
// element e of s satisfies f(e).
|
||||||
|
func ContainsFunc[E any](s []E, f func(E) bool) bool {
|
||||||
|
return IndexFunc(s, f) >= 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// Insert inserts the values v... into s at index i,
|
||||||
|
// returning the modified slice.
|
||||||
|
// In the returned slice r, r[i] == v[0].
|
||||||
|
// Insert panics if i is out of range.
|
||||||
|
// This function is O(len(s) + len(v)).
|
||||||
|
func Insert[S ~[]E, E any](s S, i int, v ...E) S {
|
||||||
|
tot := len(s) + len(v)
|
||||||
|
if tot <= cap(s) {
|
||||||
|
s2 := s[:tot]
|
||||||
|
copy(s2[i+len(v):], s[i:])
|
||||||
|
copy(s2[i:], v)
|
||||||
|
return s2
|
||||||
|
}
|
||||||
|
s2 := make(S, tot)
|
||||||
|
copy(s2, s[:i])
|
||||||
|
copy(s2[i:], v)
|
||||||
|
copy(s2[i+len(v):], s[i:])
|
||||||
|
return s2
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete removes the elements s[i:j] from s, returning the modified slice.
|
||||||
|
// Delete panics if s[i:j] is not a valid slice of s.
|
||||||
|
// Delete modifies the contents of the slice s; it does not create a new slice.
|
||||||
|
// Delete is O(len(s)-j), so if many items must be deleted, it is better to
|
||||||
|
// make a single call deleting them all together than to delete one at a time.
|
||||||
|
// Delete might not modify the elements s[len(s)-(j-i):len(s)]. If those
|
||||||
|
// elements contain pointers you might consider zeroing those elements so that
|
||||||
|
// objects they reference can be garbage collected.
|
||||||
|
func Delete[S ~[]E, E any](s S, i, j int) S {
|
||||||
|
_ = s[i:j] // bounds check
|
||||||
|
|
||||||
|
return append(s[:i], s[j:]...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Replace replaces the elements s[i:j] by the given v, and returns the
|
||||||
|
// modified slice. Replace panics if s[i:j] is not a valid slice of s.
|
||||||
|
func Replace[S ~[]E, E any](s S, i, j int, v ...E) S {
|
||||||
|
_ = s[i:j] // verify that i:j is a valid subslice
|
||||||
|
tot := len(s[:i]) + len(v) + len(s[j:])
|
||||||
|
if tot <= cap(s) {
|
||||||
|
s2 := s[:tot]
|
||||||
|
copy(s2[i+len(v):], s[j:])
|
||||||
|
copy(s2[i:], v)
|
||||||
|
return s2
|
||||||
|
}
|
||||||
|
s2 := make(S, tot)
|
||||||
|
copy(s2, s[:i])
|
||||||
|
copy(s2[i:], v)
|
||||||
|
copy(s2[i+len(v):], s[j:])
|
||||||
|
return s2
|
||||||
|
}
|
||||||
|
|
||||||
|
// Clone returns a copy of the slice.
|
||||||
|
// The elements are copied using assignment, so this is a shallow clone.
|
||||||
|
func Clone[S ~[]E, E any](s S) S {
|
||||||
|
// Preserve nil in case it matters.
|
||||||
|
if s == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return append(S([]E{}), s...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Compact replaces consecutive runs of equal elements with a single copy.
|
||||||
|
// This is like the uniq command found on Unix.
|
||||||
|
// Compact modifies the contents of the slice s; it does not create a new slice.
|
||||||
|
// When Compact discards m elements in total, it might not modify the elements
|
||||||
|
// s[len(s)-m:len(s)]. If those elements contain pointers you might consider
|
||||||
|
// zeroing those elements so that objects they reference can be garbage collected.
|
||||||
|
func Compact[S ~[]E, E comparable](s S) S {
|
||||||
|
if len(s) < 2 {
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
i := 1
|
||||||
|
for k := 1; k < len(s); k++ {
|
||||||
|
if s[k] != s[k-1] {
|
||||||
|
if i != k {
|
||||||
|
s[i] = s[k]
|
||||||
|
}
|
||||||
|
i++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return s[:i]
|
||||||
|
}
|
||||||
|
|
||||||
|
// CompactFunc is like Compact but uses a comparison function.
|
||||||
|
func CompactFunc[S ~[]E, E any](s S, eq func(E, E) bool) S {
|
||||||
|
if len(s) < 2 {
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
i := 1
|
||||||
|
for k := 1; k < len(s); k++ {
|
||||||
|
if !eq(s[k], s[k-1]) {
|
||||||
|
if i != k {
|
||||||
|
s[i] = s[k]
|
||||||
|
}
|
||||||
|
i++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return s[:i]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Grow increases the slice's capacity, if necessary, to guarantee space for
|
||||||
|
// another n elements. After Grow(n), at least n elements can be appended
|
||||||
|
// to the slice without another allocation. If n is negative or too large to
|
||||||
|
// allocate the memory, Grow panics.
|
||||||
|
func Grow[S ~[]E, E any](s S, n int) S {
|
||||||
|
if n < 0 {
|
||||||
|
panic("cannot be negative")
|
||||||
|
}
|
||||||
|
if n -= cap(s) - len(s); n > 0 {
|
||||||
|
// TODO(https://go.dev/issue/53888): Make using []E instead of S
|
||||||
|
// to workaround a compiler bug where the runtime.growslice optimization
|
||||||
|
// does not take effect. Revert when the compiler is fixed.
|
||||||
|
s = append([]E(s)[:cap(s)], make([]E, n)...)[:len(s)]
|
||||||
|
}
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Clip removes unused capacity from the slice, returning s[:len(s):len(s)].
|
||||||
|
func Clip[S ~[]E, E any](s S) S {
|
||||||
|
return s[:len(s):len(s)]
|
||||||
|
}
|
128
vendor/golang.org/x/exp/slices/sort.go
generated
vendored
Normal file
128
vendor/golang.org/x/exp/slices/sort.go
generated
vendored
Normal file
|
@ -0,0 +1,128 @@
|
||||||
|
// Copyright 2022 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package slices
|
||||||
|
|
||||||
|
import (
|
||||||
|
"math/bits"
|
||||||
|
|
||||||
|
"golang.org/x/exp/constraints"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Sort sorts a slice of any ordered type in ascending order.
|
||||||
|
// Sort may fail to sort correctly when sorting slices of floating-point
|
||||||
|
// numbers containing Not-a-number (NaN) values.
|
||||||
|
// Use slices.SortFunc(x, func(a, b float64) bool {return a < b || (math.IsNaN(a) && !math.IsNaN(b))})
|
||||||
|
// instead if the input may contain NaNs.
|
||||||
|
func Sort[E constraints.Ordered](x []E) {
|
||||||
|
n := len(x)
|
||||||
|
pdqsortOrdered(x, 0, n, bits.Len(uint(n)))
|
||||||
|
}
|
||||||
|
|
||||||
|
// SortFunc sorts the slice x in ascending order as determined by the less function.
|
||||||
|
// This sort is not guaranteed to be stable.
|
||||||
|
//
|
||||||
|
// SortFunc requires that less is a strict weak ordering.
|
||||||
|
// See https://en.wikipedia.org/wiki/Weak_ordering#Strict_weak_orderings.
|
||||||
|
func SortFunc[E any](x []E, less func(a, b E) bool) {
|
||||||
|
n := len(x)
|
||||||
|
pdqsortLessFunc(x, 0, n, bits.Len(uint(n)), less)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SortStableFunc sorts the slice x while keeping the original order of equal
|
||||||
|
// elements, using less to compare elements.
|
||||||
|
func SortStableFunc[E any](x []E, less func(a, b E) bool) {
|
||||||
|
stableLessFunc(x, len(x), less)
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsSorted reports whether x is sorted in ascending order.
|
||||||
|
func IsSorted[E constraints.Ordered](x []E) bool {
|
||||||
|
for i := len(x) - 1; i > 0; i-- {
|
||||||
|
if x[i] < x[i-1] {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsSortedFunc reports whether x is sorted in ascending order, with less as the
|
||||||
|
// comparison function.
|
||||||
|
func IsSortedFunc[E any](x []E, less func(a, b E) bool) bool {
|
||||||
|
for i := len(x) - 1; i > 0; i-- {
|
||||||
|
if less(x[i], x[i-1]) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// BinarySearch searches for target in a sorted slice and returns the position
|
||||||
|
// where target is found, or the position where target would appear in the
|
||||||
|
// sort order; it also returns a bool saying whether the target is really found
|
||||||
|
// in the slice. The slice must be sorted in increasing order.
|
||||||
|
func BinarySearch[E constraints.Ordered](x []E, target E) (int, bool) {
|
||||||
|
// Inlining is faster than calling BinarySearchFunc with a lambda.
|
||||||
|
n := len(x)
|
||||||
|
// Define x[-1] < target and x[n] >= target.
|
||||||
|
// Invariant: x[i-1] < target, x[j] >= target.
|
||||||
|
i, j := 0, n
|
||||||
|
for i < j {
|
||||||
|
h := int(uint(i+j) >> 1) // avoid overflow when computing h
|
||||||
|
// i ≤ h < j
|
||||||
|
if x[h] < target {
|
||||||
|
i = h + 1 // preserves x[i-1] < target
|
||||||
|
} else {
|
||||||
|
j = h // preserves x[j] >= target
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// i == j, x[i-1] < target, and x[j] (= x[i]) >= target => answer is i.
|
||||||
|
return i, i < n && x[i] == target
|
||||||
|
}
|
||||||
|
|
||||||
|
// BinarySearchFunc works like BinarySearch, but uses a custom comparison
|
||||||
|
// function. The slice must be sorted in increasing order, where "increasing"
|
||||||
|
// is defined by cmp. cmp should return 0 if the slice element matches
|
||||||
|
// the target, a negative number if the slice element precedes the target,
|
||||||
|
// or a positive number if the slice element follows the target.
|
||||||
|
// cmp must implement the same ordering as the slice, such that if
|
||||||
|
// cmp(a, t) < 0 and cmp(b, t) >= 0, then a must precede b in the slice.
|
||||||
|
func BinarySearchFunc[E, T any](x []E, target T, cmp func(E, T) int) (int, bool) {
|
||||||
|
n := len(x)
|
||||||
|
// Define cmp(x[-1], target) < 0 and cmp(x[n], target) >= 0 .
|
||||||
|
// Invariant: cmp(x[i - 1], target) < 0, cmp(x[j], target) >= 0.
|
||||||
|
i, j := 0, n
|
||||||
|
for i < j {
|
||||||
|
h := int(uint(i+j) >> 1) // avoid overflow when computing h
|
||||||
|
// i ≤ h < j
|
||||||
|
if cmp(x[h], target) < 0 {
|
||||||
|
i = h + 1 // preserves cmp(x[i - 1], target) < 0
|
||||||
|
} else {
|
||||||
|
j = h // preserves cmp(x[j], target) >= 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// i == j, cmp(x[i-1], target) < 0, and cmp(x[j], target) (= cmp(x[i], target)) >= 0 => answer is i.
|
||||||
|
return i, i < n && cmp(x[i], target) == 0
|
||||||
|
}
|
||||||
|
|
||||||
|
type sortedHint int // hint for pdqsort when choosing the pivot
|
||||||
|
|
||||||
|
const (
|
||||||
|
unknownHint sortedHint = iota
|
||||||
|
increasingHint
|
||||||
|
decreasingHint
|
||||||
|
)
|
||||||
|
|
||||||
|
// xorshift paper: https://www.jstatsoft.org/article/view/v008i14/xorshift.pdf
|
||||||
|
type xorshift uint64
|
||||||
|
|
||||||
|
func (r *xorshift) Next() uint64 {
|
||||||
|
*r ^= *r << 13
|
||||||
|
*r ^= *r >> 17
|
||||||
|
*r ^= *r << 5
|
||||||
|
return uint64(*r)
|
||||||
|
}
|
||||||
|
|
||||||
|
func nextPowerOfTwo(length int) uint {
|
||||||
|
return 1 << bits.Len(uint(length))
|
||||||
|
}
|
479
vendor/golang.org/x/exp/slices/zsortfunc.go
generated
vendored
Normal file
479
vendor/golang.org/x/exp/slices/zsortfunc.go
generated
vendored
Normal file
|
@ -0,0 +1,479 @@
|
||||||
|
// Code generated by gen_sort_variants.go; DO NOT EDIT.
|
||||||
|
|
||||||
|
// Copyright 2022 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package slices
|
||||||
|
|
||||||
|
// insertionSortLessFunc sorts data[a:b] using insertion sort.
|
||||||
|
func insertionSortLessFunc[E any](data []E, a, b int, less func(a, b E) bool) {
|
||||||
|
for i := a + 1; i < b; i++ {
|
||||||
|
for j := i; j > a && less(data[j], data[j-1]); j-- {
|
||||||
|
data[j], data[j-1] = data[j-1], data[j]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// siftDownLessFunc implements the heap property on data[lo:hi].
|
||||||
|
// first is an offset into the array where the root of the heap lies.
|
||||||
|
func siftDownLessFunc[E any](data []E, lo, hi, first int, less func(a, b E) bool) {
|
||||||
|
root := lo
|
||||||
|
for {
|
||||||
|
child := 2*root + 1
|
||||||
|
if child >= hi {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if child+1 < hi && less(data[first+child], data[first+child+1]) {
|
||||||
|
child++
|
||||||
|
}
|
||||||
|
if !less(data[first+root], data[first+child]) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
data[first+root], data[first+child] = data[first+child], data[first+root]
|
||||||
|
root = child
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func heapSortLessFunc[E any](data []E, a, b int, less func(a, b E) bool) {
|
||||||
|
first := a
|
||||||
|
lo := 0
|
||||||
|
hi := b - a
|
||||||
|
|
||||||
|
// Build heap with greatest element at top.
|
||||||
|
for i := (hi - 1) / 2; i >= 0; i-- {
|
||||||
|
siftDownLessFunc(data, i, hi, first, less)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Pop elements, largest first, into end of data.
|
||||||
|
for i := hi - 1; i >= 0; i-- {
|
||||||
|
data[first], data[first+i] = data[first+i], data[first]
|
||||||
|
siftDownLessFunc(data, lo, i, first, less)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// pdqsortLessFunc sorts data[a:b].
|
||||||
|
// The algorithm based on pattern-defeating quicksort(pdqsort), but without the optimizations from BlockQuicksort.
|
||||||
|
// pdqsort paper: https://arxiv.org/pdf/2106.05123.pdf
|
||||||
|
// C++ implementation: https://github.com/orlp/pdqsort
|
||||||
|
// Rust implementation: https://docs.rs/pdqsort/latest/pdqsort/
|
||||||
|
// limit is the number of allowed bad (very unbalanced) pivots before falling back to heapsort.
|
||||||
|
func pdqsortLessFunc[E any](data []E, a, b, limit int, less func(a, b E) bool) {
|
||||||
|
const maxInsertion = 12
|
||||||
|
|
||||||
|
var (
|
||||||
|
wasBalanced = true // whether the last partitioning was reasonably balanced
|
||||||
|
wasPartitioned = true // whether the slice was already partitioned
|
||||||
|
)
|
||||||
|
|
||||||
|
for {
|
||||||
|
length := b - a
|
||||||
|
|
||||||
|
if length <= maxInsertion {
|
||||||
|
insertionSortLessFunc(data, a, b, less)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fall back to heapsort if too many bad choices were made.
|
||||||
|
if limit == 0 {
|
||||||
|
heapSortLessFunc(data, a, b, less)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// If the last partitioning was imbalanced, we need to breaking patterns.
|
||||||
|
if !wasBalanced {
|
||||||
|
breakPatternsLessFunc(data, a, b, less)
|
||||||
|
limit--
|
||||||
|
}
|
||||||
|
|
||||||
|
pivot, hint := choosePivotLessFunc(data, a, b, less)
|
||||||
|
if hint == decreasingHint {
|
||||||
|
reverseRangeLessFunc(data, a, b, less)
|
||||||
|
// The chosen pivot was pivot-a elements after the start of the array.
|
||||||
|
// After reversing it is pivot-a elements before the end of the array.
|
||||||
|
// The idea came from Rust's implementation.
|
||||||
|
pivot = (b - 1) - (pivot - a)
|
||||||
|
hint = increasingHint
|
||||||
|
}
|
||||||
|
|
||||||
|
// The slice is likely already sorted.
|
||||||
|
if wasBalanced && wasPartitioned && hint == increasingHint {
|
||||||
|
if partialInsertionSortLessFunc(data, a, b, less) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Probably the slice contains many duplicate elements, partition the slice into
|
||||||
|
// elements equal to and elements greater than the pivot.
|
||||||
|
if a > 0 && !less(data[a-1], data[pivot]) {
|
||||||
|
mid := partitionEqualLessFunc(data, a, b, pivot, less)
|
||||||
|
a = mid
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
mid, alreadyPartitioned := partitionLessFunc(data, a, b, pivot, less)
|
||||||
|
wasPartitioned = alreadyPartitioned
|
||||||
|
|
||||||
|
leftLen, rightLen := mid-a, b-mid
|
||||||
|
balanceThreshold := length / 8
|
||||||
|
if leftLen < rightLen {
|
||||||
|
wasBalanced = leftLen >= balanceThreshold
|
||||||
|
pdqsortLessFunc(data, a, mid, limit, less)
|
||||||
|
a = mid + 1
|
||||||
|
} else {
|
||||||
|
wasBalanced = rightLen >= balanceThreshold
|
||||||
|
pdqsortLessFunc(data, mid+1, b, limit, less)
|
||||||
|
b = mid
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// partitionLessFunc does one quicksort partition.
|
||||||
|
// Let p = data[pivot]
|
||||||
|
// Moves elements in data[a:b] around, so that data[i]<p and data[j]>=p for i<newpivot and j>newpivot.
|
||||||
|
// On return, data[newpivot] = p
|
||||||
|
func partitionLessFunc[E any](data []E, a, b, pivot int, less func(a, b E) bool) (newpivot int, alreadyPartitioned bool) {
|
||||||
|
data[a], data[pivot] = data[pivot], data[a]
|
||||||
|
i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned
|
||||||
|
|
||||||
|
for i <= j && less(data[i], data[a]) {
|
||||||
|
i++
|
||||||
|
}
|
||||||
|
for i <= j && !less(data[j], data[a]) {
|
||||||
|
j--
|
||||||
|
}
|
||||||
|
if i > j {
|
||||||
|
data[j], data[a] = data[a], data[j]
|
||||||
|
return j, true
|
||||||
|
}
|
||||||
|
data[i], data[j] = data[j], data[i]
|
||||||
|
i++
|
||||||
|
j--
|
||||||
|
|
||||||
|
for {
|
||||||
|
for i <= j && less(data[i], data[a]) {
|
||||||
|
i++
|
||||||
|
}
|
||||||
|
for i <= j && !less(data[j], data[a]) {
|
||||||
|
j--
|
||||||
|
}
|
||||||
|
if i > j {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
data[i], data[j] = data[j], data[i]
|
||||||
|
i++
|
||||||
|
j--
|
||||||
|
}
|
||||||
|
data[j], data[a] = data[a], data[j]
|
||||||
|
return j, false
|
||||||
|
}
|
||||||
|
|
||||||
|
// partitionEqualLessFunc partitions data[a:b] into elements equal to data[pivot] followed by elements greater than data[pivot].
|
||||||
|
// It assumed that data[a:b] does not contain elements smaller than the data[pivot].
|
||||||
|
func partitionEqualLessFunc[E any](data []E, a, b, pivot int, less func(a, b E) bool) (newpivot int) {
|
||||||
|
data[a], data[pivot] = data[pivot], data[a]
|
||||||
|
i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned
|
||||||
|
|
||||||
|
for {
|
||||||
|
for i <= j && !less(data[a], data[i]) {
|
||||||
|
i++
|
||||||
|
}
|
||||||
|
for i <= j && less(data[a], data[j]) {
|
||||||
|
j--
|
||||||
|
}
|
||||||
|
if i > j {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
data[i], data[j] = data[j], data[i]
|
||||||
|
i++
|
||||||
|
j--
|
||||||
|
}
|
||||||
|
return i
|
||||||
|
}
|
||||||
|
|
||||||
|
// partialInsertionSortLessFunc partially sorts a slice, returns true if the slice is sorted at the end.
|
||||||
|
func partialInsertionSortLessFunc[E any](data []E, a, b int, less func(a, b E) bool) bool {
|
||||||
|
const (
|
||||||
|
maxSteps = 5 // maximum number of adjacent out-of-order pairs that will get shifted
|
||||||
|
shortestShifting = 50 // don't shift any elements on short arrays
|
||||||
|
)
|
||||||
|
i := a + 1
|
||||||
|
for j := 0; j < maxSteps; j++ {
|
||||||
|
for i < b && !less(data[i], data[i-1]) {
|
||||||
|
i++
|
||||||
|
}
|
||||||
|
|
||||||
|
if i == b {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
if b-a < shortestShifting {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
data[i], data[i-1] = data[i-1], data[i]
|
||||||
|
|
||||||
|
// Shift the smaller one to the left.
|
||||||
|
if i-a >= 2 {
|
||||||
|
for j := i - 1; j >= 1; j-- {
|
||||||
|
if !less(data[j], data[j-1]) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
data[j], data[j-1] = data[j-1], data[j]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Shift the greater one to the right.
|
||||||
|
if b-i >= 2 {
|
||||||
|
for j := i + 1; j < b; j++ {
|
||||||
|
if !less(data[j], data[j-1]) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
data[j], data[j-1] = data[j-1], data[j]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// breakPatternsLessFunc scatters some elements around in an attempt to break some patterns
|
||||||
|
// that might cause imbalanced partitions in quicksort.
|
||||||
|
func breakPatternsLessFunc[E any](data []E, a, b int, less func(a, b E) bool) {
|
||||||
|
length := b - a
|
||||||
|
if length >= 8 {
|
||||||
|
random := xorshift(length)
|
||||||
|
modulus := nextPowerOfTwo(length)
|
||||||
|
|
||||||
|
for idx := a + (length/4)*2 - 1; idx <= a+(length/4)*2+1; idx++ {
|
||||||
|
other := int(uint(random.Next()) & (modulus - 1))
|
||||||
|
if other >= length {
|
||||||
|
other -= length
|
||||||
|
}
|
||||||
|
data[idx], data[a+other] = data[a+other], data[idx]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// choosePivotLessFunc chooses a pivot in data[a:b].
|
||||||
|
//
|
||||||
|
// [0,8): chooses a static pivot.
|
||||||
|
// [8,shortestNinther): uses the simple median-of-three method.
|
||||||
|
// [shortestNinther,∞): uses the Tukey ninther method.
|
||||||
|
func choosePivotLessFunc[E any](data []E, a, b int, less func(a, b E) bool) (pivot int, hint sortedHint) {
|
||||||
|
const (
|
||||||
|
shortestNinther = 50
|
||||||
|
maxSwaps = 4 * 3
|
||||||
|
)
|
||||||
|
|
||||||
|
l := b - a
|
||||||
|
|
||||||
|
var (
|
||||||
|
swaps int
|
||||||
|
i = a + l/4*1
|
||||||
|
j = a + l/4*2
|
||||||
|
k = a + l/4*3
|
||||||
|
)
|
||||||
|
|
||||||
|
if l >= 8 {
|
||||||
|
if l >= shortestNinther {
|
||||||
|
// Tukey ninther method, the idea came from Rust's implementation.
|
||||||
|
i = medianAdjacentLessFunc(data, i, &swaps, less)
|
||||||
|
j = medianAdjacentLessFunc(data, j, &swaps, less)
|
||||||
|
k = medianAdjacentLessFunc(data, k, &swaps, less)
|
||||||
|
}
|
||||||
|
// Find the median among i, j, k and stores it into j.
|
||||||
|
j = medianLessFunc(data, i, j, k, &swaps, less)
|
||||||
|
}
|
||||||
|
|
||||||
|
switch swaps {
|
||||||
|
case 0:
|
||||||
|
return j, increasingHint
|
||||||
|
case maxSwaps:
|
||||||
|
return j, decreasingHint
|
||||||
|
default:
|
||||||
|
return j, unknownHint
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// order2LessFunc returns x,y where data[x] <= data[y], where x,y=a,b or x,y=b,a.
|
||||||
|
func order2LessFunc[E any](data []E, a, b int, swaps *int, less func(a, b E) bool) (int, int) {
|
||||||
|
if less(data[b], data[a]) {
|
||||||
|
*swaps++
|
||||||
|
return b, a
|
||||||
|
}
|
||||||
|
return a, b
|
||||||
|
}
|
||||||
|
|
||||||
|
// medianLessFunc returns x where data[x] is the median of data[a],data[b],data[c], where x is a, b, or c.
|
||||||
|
func medianLessFunc[E any](data []E, a, b, c int, swaps *int, less func(a, b E) bool) int {
|
||||||
|
a, b = order2LessFunc(data, a, b, swaps, less)
|
||||||
|
b, c = order2LessFunc(data, b, c, swaps, less)
|
||||||
|
a, b = order2LessFunc(data, a, b, swaps, less)
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
|
||||||
|
// medianAdjacentLessFunc finds the median of data[a - 1], data[a], data[a + 1] and stores the index into a.
|
||||||
|
func medianAdjacentLessFunc[E any](data []E, a int, swaps *int, less func(a, b E) bool) int {
|
||||||
|
return medianLessFunc(data, a-1, a, a+1, swaps, less)
|
||||||
|
}
|
||||||
|
|
||||||
|
func reverseRangeLessFunc[E any](data []E, a, b int, less func(a, b E) bool) {
|
||||||
|
i := a
|
||||||
|
j := b - 1
|
||||||
|
for i < j {
|
||||||
|
data[i], data[j] = data[j], data[i]
|
||||||
|
i++
|
||||||
|
j--
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func swapRangeLessFunc[E any](data []E, a, b, n int, less func(a, b E) bool) {
|
||||||
|
for i := 0; i < n; i++ {
|
||||||
|
data[a+i], data[b+i] = data[b+i], data[a+i]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func stableLessFunc[E any](data []E, n int, less func(a, b E) bool) {
|
||||||
|
blockSize := 20 // must be > 0
|
||||||
|
a, b := 0, blockSize
|
||||||
|
for b <= n {
|
||||||
|
insertionSortLessFunc(data, a, b, less)
|
||||||
|
a = b
|
||||||
|
b += blockSize
|
||||||
|
}
|
||||||
|
insertionSortLessFunc(data, a, n, less)
|
||||||
|
|
||||||
|
for blockSize < n {
|
||||||
|
a, b = 0, 2*blockSize
|
||||||
|
for b <= n {
|
||||||
|
symMergeLessFunc(data, a, a+blockSize, b, less)
|
||||||
|
a = b
|
||||||
|
b += 2 * blockSize
|
||||||
|
}
|
||||||
|
if m := a + blockSize; m < n {
|
||||||
|
symMergeLessFunc(data, a, m, n, less)
|
||||||
|
}
|
||||||
|
blockSize *= 2
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// symMergeLessFunc merges the two sorted subsequences data[a:m] and data[m:b] using
|
||||||
|
// the SymMerge algorithm from Pok-Son Kim and Arne Kutzner, "Stable Minimum
|
||||||
|
// Storage Merging by Symmetric Comparisons", in Susanne Albers and Tomasz
|
||||||
|
// Radzik, editors, Algorithms - ESA 2004, volume 3221 of Lecture Notes in
|
||||||
|
// Computer Science, pages 714-723. Springer, 2004.
|
||||||
|
//
|
||||||
|
// Let M = m-a and N = b-n. Wolog M < N.
|
||||||
|
// The recursion depth is bound by ceil(log(N+M)).
|
||||||
|
// The algorithm needs O(M*log(N/M + 1)) calls to data.Less.
|
||||||
|
// The algorithm needs O((M+N)*log(M)) calls to data.Swap.
|
||||||
|
//
|
||||||
|
// The paper gives O((M+N)*log(M)) as the number of assignments assuming a
|
||||||
|
// rotation algorithm which uses O(M+N+gcd(M+N)) assignments. The argumentation
|
||||||
|
// in the paper carries through for Swap operations, especially as the block
|
||||||
|
// swapping rotate uses only O(M+N) Swaps.
|
||||||
|
//
|
||||||
|
// symMerge assumes non-degenerate arguments: a < m && m < b.
|
||||||
|
// Having the caller check this condition eliminates many leaf recursion calls,
|
||||||
|
// which improves performance.
|
||||||
|
func symMergeLessFunc[E any](data []E, a, m, b int, less func(a, b E) bool) {
|
||||||
|
// Avoid unnecessary recursions of symMerge
|
||||||
|
// by direct insertion of data[a] into data[m:b]
|
||||||
|
// if data[a:m] only contains one element.
|
||||||
|
if m-a == 1 {
|
||||||
|
// Use binary search to find the lowest index i
|
||||||
|
// such that data[i] >= data[a] for m <= i < b.
|
||||||
|
// Exit the search loop with i == b in case no such index exists.
|
||||||
|
i := m
|
||||||
|
j := b
|
||||||
|
for i < j {
|
||||||
|
h := int(uint(i+j) >> 1)
|
||||||
|
if less(data[h], data[a]) {
|
||||||
|
i = h + 1
|
||||||
|
} else {
|
||||||
|
j = h
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Swap values until data[a] reaches the position before i.
|
||||||
|
for k := a; k < i-1; k++ {
|
||||||
|
data[k], data[k+1] = data[k+1], data[k]
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Avoid unnecessary recursions of symMerge
|
||||||
|
// by direct insertion of data[m] into data[a:m]
|
||||||
|
// if data[m:b] only contains one element.
|
||||||
|
if b-m == 1 {
|
||||||
|
// Use binary search to find the lowest index i
|
||||||
|
// such that data[i] > data[m] for a <= i < m.
|
||||||
|
// Exit the search loop with i == m in case no such index exists.
|
||||||
|
i := a
|
||||||
|
j := m
|
||||||
|
for i < j {
|
||||||
|
h := int(uint(i+j) >> 1)
|
||||||
|
if !less(data[m], data[h]) {
|
||||||
|
i = h + 1
|
||||||
|
} else {
|
||||||
|
j = h
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Swap values until data[m] reaches the position i.
|
||||||
|
for k := m; k > i; k-- {
|
||||||
|
data[k], data[k-1] = data[k-1], data[k]
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
mid := int(uint(a+b) >> 1)
|
||||||
|
n := mid + m
|
||||||
|
var start, r int
|
||||||
|
if m > mid {
|
||||||
|
start = n - b
|
||||||
|
r = mid
|
||||||
|
} else {
|
||||||
|
start = a
|
||||||
|
r = m
|
||||||
|
}
|
||||||
|
p := n - 1
|
||||||
|
|
||||||
|
for start < r {
|
||||||
|
c := int(uint(start+r) >> 1)
|
||||||
|
if !less(data[p-c], data[c]) {
|
||||||
|
start = c + 1
|
||||||
|
} else {
|
||||||
|
r = c
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
end := n - start
|
||||||
|
if start < m && m < end {
|
||||||
|
rotateLessFunc(data, start, m, end, less)
|
||||||
|
}
|
||||||
|
if a < start && start < mid {
|
||||||
|
symMergeLessFunc(data, a, start, mid, less)
|
||||||
|
}
|
||||||
|
if mid < end && end < b {
|
||||||
|
symMergeLessFunc(data, mid, end, b, less)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// rotateLessFunc rotates two consecutive blocks u = data[a:m] and v = data[m:b] in data:
|
||||||
|
// Data of the form 'x u v y' is changed to 'x v u y'.
|
||||||
|
// rotate performs at most b-a many calls to data.Swap,
|
||||||
|
// and it assumes non-degenerate arguments: a < m && m < b.
|
||||||
|
func rotateLessFunc[E any](data []E, a, m, b int, less func(a, b E) bool) {
|
||||||
|
i := m - a
|
||||||
|
j := b - m
|
||||||
|
|
||||||
|
for i != j {
|
||||||
|
if i > j {
|
||||||
|
swapRangeLessFunc(data, m-i, m, j, less)
|
||||||
|
i -= j
|
||||||
|
} else {
|
||||||
|
swapRangeLessFunc(data, m-i, m+j-i, i, less)
|
||||||
|
j -= i
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// i == j
|
||||||
|
swapRangeLessFunc(data, m-i, m, i, less)
|
||||||
|
}
|
481
vendor/golang.org/x/exp/slices/zsortordered.go
generated
vendored
Normal file
481
vendor/golang.org/x/exp/slices/zsortordered.go
generated
vendored
Normal file
|
@ -0,0 +1,481 @@
|
||||||
|
// Code generated by gen_sort_variants.go; DO NOT EDIT.
|
||||||
|
|
||||||
|
// Copyright 2022 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package slices
|
||||||
|
|
||||||
|
import "golang.org/x/exp/constraints"
|
||||||
|
|
||||||
|
// insertionSortOrdered sorts data[a:b] using insertion sort.
|
||||||
|
func insertionSortOrdered[E constraints.Ordered](data []E, a, b int) {
|
||||||
|
for i := a + 1; i < b; i++ {
|
||||||
|
for j := i; j > a && (data[j] < data[j-1]); j-- {
|
||||||
|
data[j], data[j-1] = data[j-1], data[j]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// siftDownOrdered implements the heap property on data[lo:hi].
|
||||||
|
// first is an offset into the array where the root of the heap lies.
|
||||||
|
func siftDownOrdered[E constraints.Ordered](data []E, lo, hi, first int) {
|
||||||
|
root := lo
|
||||||
|
for {
|
||||||
|
child := 2*root + 1
|
||||||
|
if child >= hi {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if child+1 < hi && (data[first+child] < data[first+child+1]) {
|
||||||
|
child++
|
||||||
|
}
|
||||||
|
if !(data[first+root] < data[first+child]) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
data[first+root], data[first+child] = data[first+child], data[first+root]
|
||||||
|
root = child
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func heapSortOrdered[E constraints.Ordered](data []E, a, b int) {
|
||||||
|
first := a
|
||||||
|
lo := 0
|
||||||
|
hi := b - a
|
||||||
|
|
||||||
|
// Build heap with greatest element at top.
|
||||||
|
for i := (hi - 1) / 2; i >= 0; i-- {
|
||||||
|
siftDownOrdered(data, i, hi, first)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Pop elements, largest first, into end of data.
|
||||||
|
for i := hi - 1; i >= 0; i-- {
|
||||||
|
data[first], data[first+i] = data[first+i], data[first]
|
||||||
|
siftDownOrdered(data, lo, i, first)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// pdqsortOrdered sorts data[a:b].
|
||||||
|
// The algorithm based on pattern-defeating quicksort(pdqsort), but without the optimizations from BlockQuicksort.
|
||||||
|
// pdqsort paper: https://arxiv.org/pdf/2106.05123.pdf
|
||||||
|
// C++ implementation: https://github.com/orlp/pdqsort
|
||||||
|
// Rust implementation: https://docs.rs/pdqsort/latest/pdqsort/
|
||||||
|
// limit is the number of allowed bad (very unbalanced) pivots before falling back to heapsort.
|
||||||
|
func pdqsortOrdered[E constraints.Ordered](data []E, a, b, limit int) {
|
||||||
|
const maxInsertion = 12
|
||||||
|
|
||||||
|
var (
|
||||||
|
wasBalanced = true // whether the last partitioning was reasonably balanced
|
||||||
|
wasPartitioned = true // whether the slice was already partitioned
|
||||||
|
)
|
||||||
|
|
||||||
|
for {
|
||||||
|
length := b - a
|
||||||
|
|
||||||
|
if length <= maxInsertion {
|
||||||
|
insertionSortOrdered(data, a, b)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fall back to heapsort if too many bad choices were made.
|
||||||
|
if limit == 0 {
|
||||||
|
heapSortOrdered(data, a, b)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// If the last partitioning was imbalanced, we need to breaking patterns.
|
||||||
|
if !wasBalanced {
|
||||||
|
breakPatternsOrdered(data, a, b)
|
||||||
|
limit--
|
||||||
|
}
|
||||||
|
|
||||||
|
pivot, hint := choosePivotOrdered(data, a, b)
|
||||||
|
if hint == decreasingHint {
|
||||||
|
reverseRangeOrdered(data, a, b)
|
||||||
|
// The chosen pivot was pivot-a elements after the start of the array.
|
||||||
|
// After reversing it is pivot-a elements before the end of the array.
|
||||||
|
// The idea came from Rust's implementation.
|
||||||
|
pivot = (b - 1) - (pivot - a)
|
||||||
|
hint = increasingHint
|
||||||
|
}
|
||||||
|
|
||||||
|
// The slice is likely already sorted.
|
||||||
|
if wasBalanced && wasPartitioned && hint == increasingHint {
|
||||||
|
if partialInsertionSortOrdered(data, a, b) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Probably the slice contains many duplicate elements, partition the slice into
|
||||||
|
// elements equal to and elements greater than the pivot.
|
||||||
|
if a > 0 && !(data[a-1] < data[pivot]) {
|
||||||
|
mid := partitionEqualOrdered(data, a, b, pivot)
|
||||||
|
a = mid
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
mid, alreadyPartitioned := partitionOrdered(data, a, b, pivot)
|
||||||
|
wasPartitioned = alreadyPartitioned
|
||||||
|
|
||||||
|
leftLen, rightLen := mid-a, b-mid
|
||||||
|
balanceThreshold := length / 8
|
||||||
|
if leftLen < rightLen {
|
||||||
|
wasBalanced = leftLen >= balanceThreshold
|
||||||
|
pdqsortOrdered(data, a, mid, limit)
|
||||||
|
a = mid + 1
|
||||||
|
} else {
|
||||||
|
wasBalanced = rightLen >= balanceThreshold
|
||||||
|
pdqsortOrdered(data, mid+1, b, limit)
|
||||||
|
b = mid
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// partitionOrdered does one quicksort partition.
|
||||||
|
// Let p = data[pivot]
|
||||||
|
// Moves elements in data[a:b] around, so that data[i]<p and data[j]>=p for i<newpivot and j>newpivot.
|
||||||
|
// On return, data[newpivot] = p
|
||||||
|
func partitionOrdered[E constraints.Ordered](data []E, a, b, pivot int) (newpivot int, alreadyPartitioned bool) {
|
||||||
|
data[a], data[pivot] = data[pivot], data[a]
|
||||||
|
i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned
|
||||||
|
|
||||||
|
for i <= j && (data[i] < data[a]) {
|
||||||
|
i++
|
||||||
|
}
|
||||||
|
for i <= j && !(data[j] < data[a]) {
|
||||||
|
j--
|
||||||
|
}
|
||||||
|
if i > j {
|
||||||
|
data[j], data[a] = data[a], data[j]
|
||||||
|
return j, true
|
||||||
|
}
|
||||||
|
data[i], data[j] = data[j], data[i]
|
||||||
|
i++
|
||||||
|
j--
|
||||||
|
|
||||||
|
for {
|
||||||
|
for i <= j && (data[i] < data[a]) {
|
||||||
|
i++
|
||||||
|
}
|
||||||
|
for i <= j && !(data[j] < data[a]) {
|
||||||
|
j--
|
||||||
|
}
|
||||||
|
if i > j {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
data[i], data[j] = data[j], data[i]
|
||||||
|
i++
|
||||||
|
j--
|
||||||
|
}
|
||||||
|
data[j], data[a] = data[a], data[j]
|
||||||
|
return j, false
|
||||||
|
}
|
||||||
|
|
||||||
|
// partitionEqualOrdered partitions data[a:b] into elements equal to data[pivot] followed by elements greater than data[pivot].
|
||||||
|
// It assumed that data[a:b] does not contain elements smaller than the data[pivot].
|
||||||
|
func partitionEqualOrdered[E constraints.Ordered](data []E, a, b, pivot int) (newpivot int) {
|
||||||
|
data[a], data[pivot] = data[pivot], data[a]
|
||||||
|
i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned
|
||||||
|
|
||||||
|
for {
|
||||||
|
for i <= j && !(data[a] < data[i]) {
|
||||||
|
i++
|
||||||
|
}
|
||||||
|
for i <= j && (data[a] < data[j]) {
|
||||||
|
j--
|
||||||
|
}
|
||||||
|
if i > j {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
data[i], data[j] = data[j], data[i]
|
||||||
|
i++
|
||||||
|
j--
|
||||||
|
}
|
||||||
|
return i
|
||||||
|
}
|
||||||
|
|
||||||
|
// partialInsertionSortOrdered partially sorts a slice, returns true if the slice is sorted at the end.
|
||||||
|
func partialInsertionSortOrdered[E constraints.Ordered](data []E, a, b int) bool {
|
||||||
|
const (
|
||||||
|
maxSteps = 5 // maximum number of adjacent out-of-order pairs that will get shifted
|
||||||
|
shortestShifting = 50 // don't shift any elements on short arrays
|
||||||
|
)
|
||||||
|
i := a + 1
|
||||||
|
for j := 0; j < maxSteps; j++ {
|
||||||
|
for i < b && !(data[i] < data[i-1]) {
|
||||||
|
i++
|
||||||
|
}
|
||||||
|
|
||||||
|
if i == b {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
if b-a < shortestShifting {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
data[i], data[i-1] = data[i-1], data[i]
|
||||||
|
|
||||||
|
// Shift the smaller one to the left.
|
||||||
|
if i-a >= 2 {
|
||||||
|
for j := i - 1; j >= 1; j-- {
|
||||||
|
if !(data[j] < data[j-1]) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
data[j], data[j-1] = data[j-1], data[j]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Shift the greater one to the right.
|
||||||
|
if b-i >= 2 {
|
||||||
|
for j := i + 1; j < b; j++ {
|
||||||
|
if !(data[j] < data[j-1]) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
data[j], data[j-1] = data[j-1], data[j]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// breakPatternsOrdered scatters some elements around in an attempt to break some patterns
|
||||||
|
// that might cause imbalanced partitions in quicksort.
|
||||||
|
func breakPatternsOrdered[E constraints.Ordered](data []E, a, b int) {
|
||||||
|
length := b - a
|
||||||
|
if length >= 8 {
|
||||||
|
random := xorshift(length)
|
||||||
|
modulus := nextPowerOfTwo(length)
|
||||||
|
|
||||||
|
for idx := a + (length/4)*2 - 1; idx <= a+(length/4)*2+1; idx++ {
|
||||||
|
other := int(uint(random.Next()) & (modulus - 1))
|
||||||
|
if other >= length {
|
||||||
|
other -= length
|
||||||
|
}
|
||||||
|
data[idx], data[a+other] = data[a+other], data[idx]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// choosePivotOrdered chooses a pivot in data[a:b].
|
||||||
|
//
|
||||||
|
// [0,8): chooses a static pivot.
|
||||||
|
// [8,shortestNinther): uses the simple median-of-three method.
|
||||||
|
// [shortestNinther,∞): uses the Tukey ninther method.
|
||||||
|
func choosePivotOrdered[E constraints.Ordered](data []E, a, b int) (pivot int, hint sortedHint) {
|
||||||
|
const (
|
||||||
|
shortestNinther = 50
|
||||||
|
maxSwaps = 4 * 3
|
||||||
|
)
|
||||||
|
|
||||||
|
l := b - a
|
||||||
|
|
||||||
|
var (
|
||||||
|
swaps int
|
||||||
|
i = a + l/4*1
|
||||||
|
j = a + l/4*2
|
||||||
|
k = a + l/4*3
|
||||||
|
)
|
||||||
|
|
||||||
|
if l >= 8 {
|
||||||
|
if l >= shortestNinther {
|
||||||
|
// Tukey ninther method, the idea came from Rust's implementation.
|
||||||
|
i = medianAdjacentOrdered(data, i, &swaps)
|
||||||
|
j = medianAdjacentOrdered(data, j, &swaps)
|
||||||
|
k = medianAdjacentOrdered(data, k, &swaps)
|
||||||
|
}
|
||||||
|
// Find the median among i, j, k and stores it into j.
|
||||||
|
j = medianOrdered(data, i, j, k, &swaps)
|
||||||
|
}
|
||||||
|
|
||||||
|
switch swaps {
|
||||||
|
case 0:
|
||||||
|
return j, increasingHint
|
||||||
|
case maxSwaps:
|
||||||
|
return j, decreasingHint
|
||||||
|
default:
|
||||||
|
return j, unknownHint
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// order2Ordered returns x,y where data[x] <= data[y], where x,y=a,b or x,y=b,a.
|
||||||
|
func order2Ordered[E constraints.Ordered](data []E, a, b int, swaps *int) (int, int) {
|
||||||
|
if data[b] < data[a] {
|
||||||
|
*swaps++
|
||||||
|
return b, a
|
||||||
|
}
|
||||||
|
return a, b
|
||||||
|
}
|
||||||
|
|
||||||
|
// medianOrdered returns x where data[x] is the median of data[a],data[b],data[c], where x is a, b, or c.
|
||||||
|
func medianOrdered[E constraints.Ordered](data []E, a, b, c int, swaps *int) int {
|
||||||
|
a, b = order2Ordered(data, a, b, swaps)
|
||||||
|
b, c = order2Ordered(data, b, c, swaps)
|
||||||
|
a, b = order2Ordered(data, a, b, swaps)
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
|
||||||
|
// medianAdjacentOrdered finds the median of data[a - 1], data[a], data[a + 1] and stores the index into a.
|
||||||
|
func medianAdjacentOrdered[E constraints.Ordered](data []E, a int, swaps *int) int {
|
||||||
|
return medianOrdered(data, a-1, a, a+1, swaps)
|
||||||
|
}
|
||||||
|
|
||||||
|
func reverseRangeOrdered[E constraints.Ordered](data []E, a, b int) {
|
||||||
|
i := a
|
||||||
|
j := b - 1
|
||||||
|
for i < j {
|
||||||
|
data[i], data[j] = data[j], data[i]
|
||||||
|
i++
|
||||||
|
j--
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func swapRangeOrdered[E constraints.Ordered](data []E, a, b, n int) {
|
||||||
|
for i := 0; i < n; i++ {
|
||||||
|
data[a+i], data[b+i] = data[b+i], data[a+i]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func stableOrdered[E constraints.Ordered](data []E, n int) {
|
||||||
|
blockSize := 20 // must be > 0
|
||||||
|
a, b := 0, blockSize
|
||||||
|
for b <= n {
|
||||||
|
insertionSortOrdered(data, a, b)
|
||||||
|
a = b
|
||||||
|
b += blockSize
|
||||||
|
}
|
||||||
|
insertionSortOrdered(data, a, n)
|
||||||
|
|
||||||
|
for blockSize < n {
|
||||||
|
a, b = 0, 2*blockSize
|
||||||
|
for b <= n {
|
||||||
|
symMergeOrdered(data, a, a+blockSize, b)
|
||||||
|
a = b
|
||||||
|
b += 2 * blockSize
|
||||||
|
}
|
||||||
|
if m := a + blockSize; m < n {
|
||||||
|
symMergeOrdered(data, a, m, n)
|
||||||
|
}
|
||||||
|
blockSize *= 2
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// symMergeOrdered merges the two sorted subsequences data[a:m] and data[m:b] using
|
||||||
|
// the SymMerge algorithm from Pok-Son Kim and Arne Kutzner, "Stable Minimum
|
||||||
|
// Storage Merging by Symmetric Comparisons", in Susanne Albers and Tomasz
|
||||||
|
// Radzik, editors, Algorithms - ESA 2004, volume 3221 of Lecture Notes in
|
||||||
|
// Computer Science, pages 714-723. Springer, 2004.
|
||||||
|
//
|
||||||
|
// Let M = m-a and N = b-n. Wolog M < N.
|
||||||
|
// The recursion depth is bound by ceil(log(N+M)).
|
||||||
|
// The algorithm needs O(M*log(N/M + 1)) calls to data.Less.
|
||||||
|
// The algorithm needs O((M+N)*log(M)) calls to data.Swap.
|
||||||
|
//
|
||||||
|
// The paper gives O((M+N)*log(M)) as the number of assignments assuming a
|
||||||
|
// rotation algorithm which uses O(M+N+gcd(M+N)) assignments. The argumentation
|
||||||
|
// in the paper carries through for Swap operations, especially as the block
|
||||||
|
// swapping rotate uses only O(M+N) Swaps.
|
||||||
|
//
|
||||||
|
// symMerge assumes non-degenerate arguments: a < m && m < b.
|
||||||
|
// Having the caller check this condition eliminates many leaf recursion calls,
|
||||||
|
// which improves performance.
|
||||||
|
func symMergeOrdered[E constraints.Ordered](data []E, a, m, b int) {
|
||||||
|
// Avoid unnecessary recursions of symMerge
|
||||||
|
// by direct insertion of data[a] into data[m:b]
|
||||||
|
// if data[a:m] only contains one element.
|
||||||
|
if m-a == 1 {
|
||||||
|
// Use binary search to find the lowest index i
|
||||||
|
// such that data[i] >= data[a] for m <= i < b.
|
||||||
|
// Exit the search loop with i == b in case no such index exists.
|
||||||
|
i := m
|
||||||
|
j := b
|
||||||
|
for i < j {
|
||||||
|
h := int(uint(i+j) >> 1)
|
||||||
|
if data[h] < data[a] {
|
||||||
|
i = h + 1
|
||||||
|
} else {
|
||||||
|
j = h
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Swap values until data[a] reaches the position before i.
|
||||||
|
for k := a; k < i-1; k++ {
|
||||||
|
data[k], data[k+1] = data[k+1], data[k]
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Avoid unnecessary recursions of symMerge
|
||||||
|
// by direct insertion of data[m] into data[a:m]
|
||||||
|
// if data[m:b] only contains one element.
|
||||||
|
if b-m == 1 {
|
||||||
|
// Use binary search to find the lowest index i
|
||||||
|
// such that data[i] > data[m] for a <= i < m.
|
||||||
|
// Exit the search loop with i == m in case no such index exists.
|
||||||
|
i := a
|
||||||
|
j := m
|
||||||
|
for i < j {
|
||||||
|
h := int(uint(i+j) >> 1)
|
||||||
|
if !(data[m] < data[h]) {
|
||||||
|
i = h + 1
|
||||||
|
} else {
|
||||||
|
j = h
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Swap values until data[m] reaches the position i.
|
||||||
|
for k := m; k > i; k-- {
|
||||||
|
data[k], data[k-1] = data[k-1], data[k]
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
mid := int(uint(a+b) >> 1)
|
||||||
|
n := mid + m
|
||||||
|
var start, r int
|
||||||
|
if m > mid {
|
||||||
|
start = n - b
|
||||||
|
r = mid
|
||||||
|
} else {
|
||||||
|
start = a
|
||||||
|
r = m
|
||||||
|
}
|
||||||
|
p := n - 1
|
||||||
|
|
||||||
|
for start < r {
|
||||||
|
c := int(uint(start+r) >> 1)
|
||||||
|
if !(data[p-c] < data[c]) {
|
||||||
|
start = c + 1
|
||||||
|
} else {
|
||||||
|
r = c
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
end := n - start
|
||||||
|
if start < m && m < end {
|
||||||
|
rotateOrdered(data, start, m, end)
|
||||||
|
}
|
||||||
|
if a < start && start < mid {
|
||||||
|
symMergeOrdered(data, a, start, mid)
|
||||||
|
}
|
||||||
|
if mid < end && end < b {
|
||||||
|
symMergeOrdered(data, mid, end, b)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// rotateOrdered rotates two consecutive blocks u = data[a:m] and v = data[m:b] in data:
|
||||||
|
// Data of the form 'x u v y' is changed to 'x v u y'.
|
||||||
|
// rotate performs at most b-a many calls to data.Swap,
|
||||||
|
// and it assumes non-degenerate arguments: a < m && m < b.
|
||||||
|
func rotateOrdered[E constraints.Ordered](data []E, a, m, b int) {
|
||||||
|
i := m - a
|
||||||
|
j := b - m
|
||||||
|
|
||||||
|
for i != j {
|
||||||
|
if i > j {
|
||||||
|
swapRangeOrdered(data, m-i, m, j)
|
||||||
|
i -= j
|
||||||
|
} else {
|
||||||
|
swapRangeOrdered(data, m-i, m+j-i, i)
|
||||||
|
j -= i
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// i == j
|
||||||
|
swapRangeOrdered(data, m-i, m, i)
|
||||||
|
}
|
5
vendor/modules.txt
vendored
5
vendor/modules.txt
vendored
|
@ -60,6 +60,11 @@ golang.org/x/crypto/bcrypt
|
||||||
golang.org/x/crypto/blake2b
|
golang.org/x/crypto/blake2b
|
||||||
golang.org/x/crypto/blowfish
|
golang.org/x/crypto/blowfish
|
||||||
golang.org/x/crypto/pbkdf2
|
golang.org/x/crypto/pbkdf2
|
||||||
|
# golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1
|
||||||
|
## explicit; go 1.20
|
||||||
|
golang.org/x/exp/constraints
|
||||||
|
golang.org/x/exp/maps
|
||||||
|
golang.org/x/exp/slices
|
||||||
# golang.org/x/mod v0.8.0
|
# golang.org/x/mod v0.8.0
|
||||||
## explicit; go 1.17
|
## explicit; go 1.17
|
||||||
golang.org/x/mod/internal/lazyregexp
|
golang.org/x/mod/internal/lazyregexp
|
||||||
|
|
Loading…
Reference in a new issue