mirror of
https://github.com/mjl-/mox.git
synced 2024-12-25 16:03:48 +03:00
add basic rate limiters
limiting is done based on remote ip's, with 3 ip mask variants to limit networks of machines. often with two windows, enabling short bursts of activity, but not sustained high activity. currently only for imap and smtp, not yet http. limits are currently based on: - number of open connections - connection rate - limits after authentication failures. too many failures, and new connections will be dropped. - rate of delivery in total number of messages - rate of delivery in total size of messages the limits on connections and authentication failures are in-memory. the limits on delivery of messages are based on stored messages. the limits themselves are not yet configurable, let's use this first. in the future, we may also want to have stricter limits for senders without any reputation.
This commit is contained in:
parent
1617b7c0d6
commit
2154392bd8
7 changed files with 584 additions and 6 deletions
|
@ -48,6 +48,7 @@ import (
|
|||
"fmt"
|
||||
"hash"
|
||||
"io"
|
||||
"math"
|
||||
"net"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
@ -70,6 +71,7 @@ import (
|
|||
"github.com/mjl-/mox/mox-"
|
||||
"github.com/mjl-/mox/moxio"
|
||||
"github.com/mjl-/mox/moxvar"
|
||||
"github.com/mjl-/mox/ratelimit"
|
||||
"github.com/mjl-/mox/scram"
|
||||
"github.com/mjl-/mox/store"
|
||||
)
|
||||
|
@ -101,6 +103,33 @@ var (
|
|||
)
|
||||
)
|
||||
|
||||
var limiterConnectionrate, limiterConnections *ratelimit.Limiter
|
||||
|
||||
func init() {
|
||||
// Also called by tests, so they don't trigger the rate limiter.
|
||||
limitersInit()
|
||||
}
|
||||
|
||||
func limitersInit() {
|
||||
mox.LimitersInit()
|
||||
limiterConnectionrate = &ratelimit.Limiter{
|
||||
WindowLimits: []ratelimit.WindowLimit{
|
||||
{
|
||||
Window: time.Minute,
|
||||
Limits: [...]int64{300, 900, 2700},
|
||||
},
|
||||
},
|
||||
}
|
||||
limiterConnections = &ratelimit.Limiter{
|
||||
WindowLimits: []ratelimit.WindowLimit{
|
||||
{
|
||||
Window: time.Duration(math.MaxInt64), // All of time.
|
||||
Limits: [...]int64{30, 90, 270},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Capabilities (extensions) the server supports. Connections will add a few more, e.g. STARTTLS, LOGINDISABLED, AUTH=PLAIN.
|
||||
// ENABLE: ../rfc/5161
|
||||
// LITERAL+: ../rfc/7888
|
||||
|
@ -136,6 +165,7 @@ type conn struct {
|
|||
tw *moxio.TraceWriter
|
||||
lastlog time.Time // For printing time since previous log line.
|
||||
tlsConfig *tls.Config // TLS config to use for handshake.
|
||||
remoteIP net.IP
|
||||
noRequireSTARTTLS bool
|
||||
cmd string // Currently executing, for deciding to applyChanges and logging.
|
||||
cmdMetric string // Currently executing, for metrics.
|
||||
|
@ -507,12 +537,21 @@ func (c *conn) xreadliteral(size int64, sync bool) string {
|
|||
var cleanClose struct{} // Sentinel value for panic/recover indicating clean close of connection.
|
||||
|
||||
func serve(listenerName string, cid int64, tlsConfig *tls.Config, nc net.Conn, xtls, noRequireSTARTTLS bool) {
|
||||
var remoteIP net.IP
|
||||
if a, ok := nc.RemoteAddr().(*net.TCPAddr); ok {
|
||||
remoteIP = a.IP
|
||||
} else {
|
||||
// For net.Pipe, during tests.
|
||||
remoteIP = net.ParseIP("127.0.0.10")
|
||||
}
|
||||
|
||||
c := &conn{
|
||||
cid: cid,
|
||||
conn: nc,
|
||||
tls: xtls,
|
||||
lastlog: time.Now(),
|
||||
tlsConfig: tlsConfig,
|
||||
remoteIP: remoteIP,
|
||||
noRequireSTARTTLS: noRequireSTARTTLS,
|
||||
enabled: map[capability]bool{},
|
||||
cmd: "(greeting)",
|
||||
|
@ -583,6 +622,25 @@ func serve(listenerName string, cid int64, tlsConfig *tls.Config, nc net.Conn, x
|
|||
default:
|
||||
}
|
||||
|
||||
if !limiterConnectionrate.Add(c.remoteIP, time.Now(), 1) {
|
||||
c.writelinef("* BYE connection rate from your ip or network too high, slow down please")
|
||||
return
|
||||
}
|
||||
|
||||
// If remote IP/network resulted in too many authentication failures, refuse to serve.
|
||||
if !mox.LimiterFailedAuth.CanAdd(c.remoteIP, time.Now(), 1) {
|
||||
c.log.Debug("refusing connection due to many auth failures", mlog.Field("remoteip", c.remoteIP))
|
||||
c.writelinef("* BYE too many auth failures")
|
||||
return
|
||||
}
|
||||
|
||||
if !limiterConnections.Add(c.remoteIP, time.Now(), 1) {
|
||||
c.log.Debug("refusing connection due to many open connections", mlog.Field("remoteip", c.remoteIP))
|
||||
c.writelinef("* BYE too many open connections from your ip or network")
|
||||
return
|
||||
}
|
||||
defer limiterConnections.Add(c.remoteIP, time.Now(), -1)
|
||||
|
||||
// We register and unregister the original connection, in case it c.conn is
|
||||
// replaced with a TLS connection later on.
|
||||
mox.Connections.Register(nc, "imap", listenerName)
|
||||
|
@ -1313,6 +1371,12 @@ func (c *conn) cmdAuthenticate(tag, cmd string, p *parser) {
|
|||
authResult := "error"
|
||||
defer func() {
|
||||
metrics.AuthenticationInc("imap", authVariant, authResult)
|
||||
switch authResult {
|
||||
case "ok":
|
||||
mox.LimiterFailedAuth.Reset(c.remoteIP, time.Now())
|
||||
default:
|
||||
mox.LimiterFailedAuth.Add(c.remoteIP, time.Now(), 1)
|
||||
}
|
||||
}()
|
||||
|
||||
// Request syntax: ../rfc/9051:6341 ../rfc/3501:4561
|
||||
|
|
|
@ -300,6 +300,8 @@ func startNoSwitchboard(t *testing.T) *testconn {
|
|||
}
|
||||
|
||||
func startArgs(t *testing.T, first, isTLS, allowLoginWithoutTLS bool) *testconn {
|
||||
limitersInit() // Reset rate limiters.
|
||||
|
||||
if first {
|
||||
os.RemoveAll("../testdata/imap/data")
|
||||
}
|
||||
|
|
30
mox-/limitauth.go
Normal file
30
mox-/limitauth.go
Normal file
|
@ -0,0 +1,30 @@
|
|||
package mox
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/mjl-/mox/ratelimit"
|
||||
)
|
||||
|
||||
var LimiterFailedAuth *ratelimit.Limiter
|
||||
|
||||
func init() {
|
||||
LimitersInit()
|
||||
}
|
||||
|
||||
// LimitesrsInit initializes the failed auth rate limiter.
|
||||
func LimitersInit() {
|
||||
LimiterFailedAuth = &ratelimit.Limiter{
|
||||
WindowLimits: []ratelimit.WindowLimit{
|
||||
{
|
||||
// Max 10 failures/minute for ipmasked1, 30 or ipmasked2, 90 for ipmasked3.
|
||||
Window: time.Minute,
|
||||
Limits: [...]int64{10, 30, 90},
|
||||
},
|
||||
{
|
||||
Window: 24 * time.Hour,
|
||||
Limits: [...]int64{50, 150, 450},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
146
ratelimit/ratelimit.go
Normal file
146
ratelimit/ratelimit.go
Normal file
|
@ -0,0 +1,146 @@
|
|||
// Package ratelimit provides a simple window-based rate limiter.
|
||||
package ratelimit
|
||||
|
||||
import (
|
||||
"net"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Limiter is a simple rate limiter with one or more fixed windows, e.g. the
|
||||
// last minute/hour/day/week, working on three classes/subnets of an IP.
|
||||
type Limiter struct {
|
||||
sync.Mutex
|
||||
WindowLimits []WindowLimit
|
||||
ipmasked [3][16]byte
|
||||
}
|
||||
|
||||
// WindowLimit holds counters for one window, with limits for each IP class/subnet.
|
||||
type WindowLimit struct {
|
||||
Window time.Duration
|
||||
Limits [3]int64 // For "ipmasked1" through "ipmasked3".
|
||||
Time uint32 // Time/Window.
|
||||
Counts map[struct {
|
||||
Index uint8
|
||||
IPMasked [16]byte
|
||||
}]int64
|
||||
}
|
||||
|
||||
// Add attempts to consume "n" items from the rate limiter. If the total for this
|
||||
// key and this interval would exceed limit, "n" is not counted and false is
|
||||
// returned. If now represents a different time interval, all counts are reset.
|
||||
func (l *Limiter) Add(ip net.IP, tm time.Time, n int64) bool {
|
||||
return l.checkAdd(true, ip, tm, n)
|
||||
}
|
||||
|
||||
// CanAdd returns if n could be added to the limiter.
|
||||
func (l *Limiter) CanAdd(ip net.IP, tm time.Time, n int64) bool {
|
||||
return l.checkAdd(false, ip, tm, n)
|
||||
}
|
||||
|
||||
func (l *Limiter) checkAdd(add bool, ip net.IP, tm time.Time, n int64) bool {
|
||||
l.Lock()
|
||||
defer l.Unlock()
|
||||
|
||||
// First check.
|
||||
for i, pl := range l.WindowLimits {
|
||||
t := uint32(tm.UnixNano() / int64(pl.Window))
|
||||
|
||||
if t > pl.Time || pl.Counts == nil {
|
||||
l.WindowLimits[i].Time = t
|
||||
pl.Counts = map[struct {
|
||||
Index uint8
|
||||
IPMasked [16]byte
|
||||
}]int64{} // Used below.
|
||||
l.WindowLimits[i].Counts = pl.Counts
|
||||
}
|
||||
|
||||
for j := 0; j < 3; j++ {
|
||||
if i == 0 {
|
||||
l.ipmasked[j] = l.maskIP(j, ip)
|
||||
}
|
||||
|
||||
v := pl.Counts[struct {
|
||||
Index uint8
|
||||
IPMasked [16]byte
|
||||
}{uint8(j), l.ipmasked[j]}]
|
||||
if v+n > pl.Limits[j] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
if !add {
|
||||
return true
|
||||
}
|
||||
// Finally record.
|
||||
for _, pl := range l.WindowLimits {
|
||||
for j := 0; j < 3; j++ {
|
||||
pl.Counts[struct {
|
||||
Index uint8
|
||||
IPMasked [16]byte
|
||||
}{uint8(j), l.ipmasked[j]}] += n
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Reset sets the counter to 0 for key and ip, and substracts from the ipmasked counts.
|
||||
func (l *Limiter) Reset(ip net.IP, tm time.Time) {
|
||||
l.Lock()
|
||||
defer l.Unlock()
|
||||
|
||||
// Prepare masked ip's.
|
||||
for i := 0; i < 3; i++ {
|
||||
l.ipmasked[i] = l.maskIP(i, ip)
|
||||
}
|
||||
|
||||
for _, pl := range l.WindowLimits {
|
||||
t := uint32(tm.UnixNano() / int64(pl.Window))
|
||||
if t != pl.Time || pl.Counts == nil {
|
||||
continue
|
||||
}
|
||||
var n int64
|
||||
for j := 0; j < 3; j++ {
|
||||
k := struct {
|
||||
Index uint8
|
||||
IPMasked [16]byte
|
||||
}{uint8(j), l.ipmasked[j]}
|
||||
if j == 0 {
|
||||
n = pl.Counts[k]
|
||||
}
|
||||
if pl.Counts != nil {
|
||||
pl.Counts[k] -= n
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (l *Limiter) maskIP(i int, ip net.IP) [16]byte {
|
||||
isv4 := ip.To4() != nil
|
||||
|
||||
var ipmasked net.IP
|
||||
if isv4 {
|
||||
switch i {
|
||||
case 0:
|
||||
ipmasked = ip
|
||||
case 1:
|
||||
ipmasked = ip.Mask(net.CIDRMask(26, 32))
|
||||
case 2:
|
||||
ipmasked = ip.Mask(net.CIDRMask(21, 32))
|
||||
default:
|
||||
panic("missing case for maskip ipv4")
|
||||
}
|
||||
} else {
|
||||
switch i {
|
||||
case 0:
|
||||
ipmasked = ip.Mask(net.CIDRMask(64, 128))
|
||||
case 1:
|
||||
ipmasked = ip.Mask(net.CIDRMask(48, 128))
|
||||
case 2:
|
||||
ipmasked = ip.Mask(net.CIDRMask(32, 128))
|
||||
default:
|
||||
panic("missing case for masking ipv6")
|
||||
}
|
||||
}
|
||||
return *(*[16]byte)(ipmasked.To16())
|
||||
}
|
72
ratelimit/ratelimit_test.go
Normal file
72
ratelimit/ratelimit_test.go
Normal file
|
@ -0,0 +1,72 @@
|
|||
package ratelimit
|
||||
|
||||
import (
|
||||
"net"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestLimiter(t *testing.T) {
|
||||
l := &Limiter{
|
||||
WindowLimits: []WindowLimit{
|
||||
{
|
||||
Window: time.Minute,
|
||||
Limits: [...]int64{2, 4, 6},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
now := time.Now()
|
||||
check := func(exp bool, ip net.IP, tm time.Time, n int64) {
|
||||
t.Helper()
|
||||
ok := l.CanAdd(ip, tm, n)
|
||||
if ok != exp {
|
||||
t.Fatalf("canadd, got %v, expected %v", ok, exp)
|
||||
}
|
||||
ok = l.Add(ip, tm, n)
|
||||
if ok != exp {
|
||||
t.Fatalf("add, got %v, expected %v", ok, exp)
|
||||
}
|
||||
}
|
||||
check(false, net.ParseIP("10.0.0.1"), now, 3) // past limit
|
||||
check(true, net.ParseIP("10.0.0.1"), now, 1)
|
||||
check(false, net.ParseIP("10.0.0.1"), now, 2) // now past limit
|
||||
check(true, net.ParseIP("10.0.0.1"), now, 1)
|
||||
check(false, net.ParseIP("10.0.0.1"), now, 1) // now past limit
|
||||
|
||||
next := now.Add(time.Minute)
|
||||
check(true, net.ParseIP("10.0.0.1"), next, 2) // next minute, should have reset
|
||||
check(true, net.ParseIP("10.0.0.2"), next, 2) // other ip
|
||||
check(false, net.ParseIP("10.0.0.3"), next, 2) // yet another ip, ipmasked2 was consumed
|
||||
check(true, net.ParseIP("10.0.1.4"), next, 2) // using ipmasked3
|
||||
check(false, net.ParseIP("10.0.2.4"), next, 2) // ipmasked3 consumed
|
||||
l.Reset(net.ParseIP("10.0.1.4"), next)
|
||||
if !l.CanAdd(net.ParseIP("10.0.1.4"), next, 2) {
|
||||
t.Fatalf("reset did not free up count for ip")
|
||||
}
|
||||
check(true, net.ParseIP("10.0.2.4"), next, 2) // ipmasked3 available again
|
||||
|
||||
l = &Limiter{
|
||||
WindowLimits: []WindowLimit{
|
||||
{
|
||||
Window: time.Minute,
|
||||
Limits: [...]int64{1, 2, 3},
|
||||
},
|
||||
{
|
||||
Window: time.Hour,
|
||||
Limits: [...]int64{2, 3, 4},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
min1 := time.UnixMilli((time.Now().UnixNano() / int64(time.Hour)) * int64(time.Hour) / int64(time.Millisecond))
|
||||
min2 := min1.Add(time.Minute)
|
||||
min3 := min1.Add(2 * time.Minute)
|
||||
check(true, net.ParseIP("10.0.0.1"), min1, 1)
|
||||
check(true, net.ParseIP("10.0.0.1"), min2, 1)
|
||||
check(false, net.ParseIP("10.0.0.1"), min3, 1)
|
||||
check(true, net.ParseIP("10.0.0.255"), min3, 1) // ipmasked2 still ok
|
||||
check(false, net.ParseIP("10.0.0.255"), min3, 1) // ipmasked2 also full
|
||||
check(true, net.ParseIP("10.0.1.1"), min3, 1) // ipmasked3 still ok
|
||||
check(false, net.ParseIP("10.0.1.255"), min3, 1) // ipmasked3 also full
|
||||
}
|
|
@ -15,6 +15,7 @@ import (
|
|||
"fmt"
|
||||
"hash"
|
||||
"io"
|
||||
"math"
|
||||
"net"
|
||||
"os"
|
||||
"runtime/debug"
|
||||
|
@ -42,6 +43,7 @@ import (
|
|||
"github.com/mjl-/mox/moxvar"
|
||||
"github.com/mjl-/mox/publicsuffix"
|
||||
"github.com/mjl-/mox/queue"
|
||||
"github.com/mjl-/mox/ratelimit"
|
||||
"github.com/mjl-/mox/scram"
|
||||
"github.com/mjl-/mox/smtp"
|
||||
"github.com/mjl-/mox/spf"
|
||||
|
@ -57,9 +59,39 @@ var xlog = mlog.New("smtpserver")
|
|||
|
||||
// We use panic and recover for error handling while executing commands.
|
||||
// These errors signal the connection must be closed.
|
||||
var (
|
||||
errIO = errors.New("fatal io error")
|
||||
)
|
||||
var errIO = errors.New("fatal io error")
|
||||
|
||||
var limiterConnectionRate, limiterConnections *ratelimit.Limiter
|
||||
|
||||
// For delivery rate limiting. Variable because changed during tests.
|
||||
var limitIPMasked1MessagesPerMinute int = 500
|
||||
var limitIPMasked1SizePerMinute int64 = 1000 * 1024 * 1024
|
||||
|
||||
func init() {
|
||||
// Also called by tests, so they don't trigger the rate limiter.
|
||||
limitersInit()
|
||||
}
|
||||
|
||||
func limitersInit() {
|
||||
mox.LimitersInit()
|
||||
// todo future: make these configurable
|
||||
limiterConnectionRate = &ratelimit.Limiter{
|
||||
WindowLimits: []ratelimit.WindowLimit{
|
||||
{
|
||||
Window: time.Minute,
|
||||
Limits: [...]int64{300, 900, 2700},
|
||||
},
|
||||
},
|
||||
}
|
||||
limiterConnections = &ratelimit.Limiter{
|
||||
WindowLimits: []ratelimit.WindowLimit{
|
||||
{
|
||||
Window: time.Duration(math.MaxInt64), // All of time.
|
||||
Limits: [...]int64{30, 90, 270},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
type codes struct {
|
||||
code int
|
||||
|
@ -503,6 +535,25 @@ func serve(listenerName string, cid int64, hostname dns.Domain, tlsConfig *tls.C
|
|||
default:
|
||||
}
|
||||
|
||||
if !limiterConnectionRate.Add(c.remoteIP, time.Now(), 1) {
|
||||
c.writecodeline(smtp.C421ServiceUnavail, smtp.SePol7Other0, "connection rate from your ip or network too high, slow down please", nil)
|
||||
return
|
||||
}
|
||||
|
||||
// If remote IP/network resulted in too many authentication failures, refuse to serve.
|
||||
if submission && !mox.LimiterFailedAuth.CanAdd(c.remoteIP, time.Now(), 1) {
|
||||
c.log.Debug("refusing connection due to many auth failures", mlog.Field("remoteip", c.remoteIP))
|
||||
c.writecodeline(smtp.C421ServiceUnavail, smtp.SePol7Other0, "too many auth failures", nil)
|
||||
return
|
||||
}
|
||||
|
||||
if !limiterConnections.Add(c.remoteIP, time.Now(), 1) {
|
||||
c.log.Debug("refusing connection due to many open connections", mlog.Field("remoteip", c.remoteIP))
|
||||
c.writecodeline(smtp.C421ServiceUnavail, smtp.SePol7Other0, "too many open connections from your ip or network", nil)
|
||||
return
|
||||
}
|
||||
defer limiterConnections.Add(c.remoteIP, time.Now(), -1)
|
||||
|
||||
// We register and unregister the original connection, in case c.conn is replaced
|
||||
// with a TLS connection later on.
|
||||
mox.Connections.Register(nc, "smtp", listenerName)
|
||||
|
@ -773,6 +824,12 @@ func (c *conn) cmdAuth(p *parser) {
|
|||
authResult := "error"
|
||||
defer func() {
|
||||
metrics.AuthenticationInc("submission", authVariant, authResult)
|
||||
switch authResult {
|
||||
case "ok":
|
||||
mox.LimiterFailedAuth.Reset(c.remoteIP, time.Now())
|
||||
default:
|
||||
mox.LimiterFailedAuth.Add(c.remoteIP, time.Now(), 1)
|
||||
}
|
||||
}()
|
||||
|
||||
// todo: implement "AUTH LOGIN"? it looks like PLAIN, but without the continuation. it is an obsolete sasl mechanism. an account in desktop outlook appears to go through the cloud, attempting to submit email only with unadvertised and AUTH LOGIN. it appears they don't know "plain".
|
||||
|
@ -1913,6 +1970,88 @@ func (c *conn) deliver(ctx context.Context, recvHdrFor func(string) string, msgW
|
|||
}
|
||||
}()
|
||||
|
||||
// We don't want to let a single IP or network deliver too many messages to an
|
||||
// account. They may fill up the mailbox, either with messages that have to be
|
||||
// purged, or by filling the disk. We check both cases for IP's and networks.
|
||||
var rateError bool // Whether returned error represents a rate error.
|
||||
err = acc.DB.Read(func(tx *bstore.Tx) (retErr error) {
|
||||
now := time.Now()
|
||||
defer func() {
|
||||
log.Debugx("checking message and size delivery rates", retErr, mlog.Field("duration", time.Since(now)))
|
||||
}()
|
||||
|
||||
checkCount := func(msg store.Message, window time.Duration, limit int) {
|
||||
if retErr != nil {
|
||||
return
|
||||
}
|
||||
q := bstore.QueryTx[store.Message](tx)
|
||||
q.FilterNonzero(msg)
|
||||
q.FilterGreater("Received", now.Add(-window))
|
||||
n, err := q.Count()
|
||||
if err != nil {
|
||||
retErr = err
|
||||
return
|
||||
}
|
||||
if n >= limit {
|
||||
rateError = true
|
||||
retErr = fmt.Errorf("more than %d messages in past %s from your ip/network", limit, window)
|
||||
}
|
||||
}
|
||||
|
||||
checkSize := func(msg store.Message, window time.Duration, limit int64) {
|
||||
if retErr != nil {
|
||||
return
|
||||
}
|
||||
q := bstore.QueryTx[store.Message](tx)
|
||||
q.FilterNonzero(msg)
|
||||
q.FilterGreater("Received", now.Add(-window))
|
||||
size := msgWriter.Size
|
||||
err := q.ForEach(func(v store.Message) error {
|
||||
size += v.Size
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
retErr = err
|
||||
return
|
||||
}
|
||||
if size > limit {
|
||||
rateError = true
|
||||
retErr = fmt.Errorf("more than %d bytes in past %s from your ip/network", limit, window)
|
||||
}
|
||||
}
|
||||
|
||||
// todo future: make these configurable
|
||||
|
||||
const day = 24 * time.Hour
|
||||
checkCount(store.Message{RemoteIPMasked1: ipmasked1}, time.Minute, limitIPMasked1MessagesPerMinute)
|
||||
checkCount(store.Message{RemoteIPMasked1: ipmasked1}, day, 20*500)
|
||||
checkCount(store.Message{RemoteIPMasked2: ipmasked2}, time.Minute, 1500)
|
||||
checkCount(store.Message{RemoteIPMasked2: ipmasked2}, day, 20*1500)
|
||||
checkCount(store.Message{RemoteIPMasked3: ipmasked3}, time.Minute, 4500)
|
||||
checkCount(store.Message{RemoteIPMasked3: ipmasked3}, day, 20*4500)
|
||||
|
||||
const MB = 1024 * 1024
|
||||
checkSize(store.Message{RemoteIPMasked1: ipmasked1}, time.Minute, limitIPMasked1SizePerMinute)
|
||||
checkSize(store.Message{RemoteIPMasked1: ipmasked1}, day, 3*1000*MB)
|
||||
checkSize(store.Message{RemoteIPMasked2: ipmasked2}, time.Minute, 3000*MB)
|
||||
checkSize(store.Message{RemoteIPMasked2: ipmasked2}, day, 3*3000*MB)
|
||||
checkSize(store.Message{RemoteIPMasked3: ipmasked3}, time.Minute, 9000*MB)
|
||||
checkSize(store.Message{RemoteIPMasked3: ipmasked3}, day, 3*9000*MB)
|
||||
|
||||
return retErr
|
||||
})
|
||||
if err != nil && !rateError {
|
||||
log.Errorx("checking delivery rates", err)
|
||||
metricDelivery.WithLabelValues("checkrates", "").Inc()
|
||||
addError(rcptAcc, smtp.C451LocalErr, smtp.SeSys3Other0, false, "error processing")
|
||||
continue
|
||||
} else if err != nil {
|
||||
log.Debugx("refusing due to high delivery rate", err)
|
||||
metricDelivery.WithLabelValues("highrate", "").Inc()
|
||||
addError(rcptAcc, smtp.C452StorageFull, smtp.SeMailbox2Full2, true, err.Error())
|
||||
continue
|
||||
}
|
||||
|
||||
// ../rfc/5321:3204
|
||||
// ../rfc/5321:3300
|
||||
// Received-SPF header goes before Received. ../rfc/7208:2038
|
||||
|
|
|
@ -28,6 +28,7 @@ import (
|
|||
"github.com/mjl-/mox/dkim"
|
||||
"github.com/mjl-/mox/dmarcdb"
|
||||
"github.com/mjl-/mox/dns"
|
||||
"github.com/mjl-/mox/mlog"
|
||||
"github.com/mjl-/mox/mox-"
|
||||
"github.com/mjl-/mox/queue"
|
||||
"github.com/mjl-/mox/smtp"
|
||||
|
@ -70,10 +71,13 @@ type testserver struct {
|
|||
user, pass string
|
||||
submission bool
|
||||
dnsbls []dns.Domain
|
||||
tlsmode smtpclient.TLSMode
|
||||
}
|
||||
|
||||
func newTestServer(t *testing.T, configPath string, resolver dns.Resolver) *testserver {
|
||||
ts := testserver{t: t, cid: 1, resolver: resolver}
|
||||
limitersInit() // Reset rate limiters.
|
||||
|
||||
ts := testserver{t: t, cid: 1, resolver: resolver, tlsmode: smtpclient.TLSOpportunistic}
|
||||
|
||||
mox.Context = context.Background()
|
||||
mox.ConfigStaticPath = configPath
|
||||
|
@ -125,7 +129,7 @@ func (ts *testserver) run(fn func(helloErr error, client *smtpclient.Client)) {
|
|||
authLine = fmt.Sprintf("AUTH PLAIN %s", base64.StdEncoding.EncodeToString([]byte(fmt.Sprintf("\u0000%s\u0000%s", ts.user, ts.pass))))
|
||||
}
|
||||
|
||||
client, err := smtpclient.New(context.Background(), xlog.WithCid(ts.cid-1), clientConn, smtpclient.TLSOpportunistic, "mox.example", authLine)
|
||||
client, err := smtpclient.New(context.Background(), xlog.WithCid(ts.cid-1), clientConn, ts.tlsmode, "mox.example", authLine)
|
||||
if err != nil {
|
||||
clientConn.Close()
|
||||
} else {
|
||||
|
@ -745,5 +749,126 @@ func TestTLSReport(t *testing.T) {
|
|||
|
||||
run(tlsrpt, 0)
|
||||
run(strings.ReplaceAll(tlsrpt, "xmox.nl", "mox.example"), 1)
|
||||
|
||||
}
|
||||
|
||||
func TestRatelimitConnectionrate(t *testing.T) {
|
||||
ts := newTestServer(t, "../testdata/smtp/mox.conf", dns.MockResolver{})
|
||||
defer ts.close()
|
||||
|
||||
// We'll be creating 300 connections, no TLS and reduce noise.
|
||||
ts.tlsmode = smtpclient.TLSSkip
|
||||
mlog.SetConfig(map[string]mlog.Level{"": mlog.LevelInfo})
|
||||
|
||||
// We may be passing a window boundary during this tests. The limit is 300/minute.
|
||||
// So make twice that many connections and hope the tests don't take too long.
|
||||
for i := 0; i <= 2*300; i++ {
|
||||
ts.run(func(err error, client *smtpclient.Client) {
|
||||
t.Helper()
|
||||
if err != nil && i < 300 {
|
||||
t.Fatalf("expected smtp connection, got %v", err)
|
||||
}
|
||||
if err == nil && i == 600 {
|
||||
t.Fatalf("expected no smtp connection due to connection rate limit, got connection")
|
||||
}
|
||||
if client != nil {
|
||||
client.Close()
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestRatelimitAuth(t *testing.T) {
|
||||
ts := newTestServer(t, "../testdata/smtp/mox.conf", dns.MockResolver{})
|
||||
defer ts.close()
|
||||
|
||||
ts.submission = true
|
||||
ts.tlsmode = smtpclient.TLSSkip
|
||||
ts.user = "bad"
|
||||
ts.pass = "bad"
|
||||
|
||||
// We may be passing a window boundary during this tests. The limit is 10 auth
|
||||
// failures/minute. So make twice that many connections and hope the tests don't
|
||||
// take too long.
|
||||
for i := 0; i <= 2*10; i++ {
|
||||
ts.run(func(err error, client *smtpclient.Client) {
|
||||
t.Helper()
|
||||
if err == nil {
|
||||
t.Fatalf("got auth success with bad credentials")
|
||||
}
|
||||
var cerr smtpclient.Error
|
||||
badauth := errors.As(err, &cerr) && cerr.Code == smtp.C535AuthBadCreds
|
||||
if !badauth && i < 10 {
|
||||
t.Fatalf("expected auth failure, got %v", err)
|
||||
}
|
||||
if badauth && i == 20 {
|
||||
t.Fatalf("expected no smtp connection due to failed auth rate limit, got other error %v", err)
|
||||
}
|
||||
if client != nil {
|
||||
client.Close()
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestRatelimitDelivery(t *testing.T) {
|
||||
resolver := dns.MockResolver{
|
||||
A: map[string][]string{
|
||||
"example.org.": {"127.0.0.10"}, // For mx check.
|
||||
},
|
||||
PTR: map[string][]string{
|
||||
"127.0.0.10": {"example.org."},
|
||||
},
|
||||
}
|
||||
ts := newTestServer(t, "../testdata/smtp/mox.conf", resolver)
|
||||
defer ts.close()
|
||||
|
||||
orig := limitIPMasked1MessagesPerMinute
|
||||
limitIPMasked1MessagesPerMinute = 1
|
||||
defer func() {
|
||||
limitIPMasked1MessagesPerMinute = orig
|
||||
}()
|
||||
|
||||
ts.run(func(err error, client *smtpclient.Client) {
|
||||
mailFrom := "remote@example.org"
|
||||
rcptTo := "mjl@mox.example"
|
||||
if err == nil {
|
||||
err = client.Deliver(context.Background(), mailFrom, rcptTo, int64(len(deliverMessage)), strings.NewReader(deliverMessage), false, false)
|
||||
}
|
||||
tcheck(t, err, "deliver to remote")
|
||||
|
||||
err = client.Deliver(context.Background(), mailFrom, rcptTo, int64(len(deliverMessage)), strings.NewReader(deliverMessage), false, false)
|
||||
var cerr smtpclient.Error
|
||||
if err == nil || !errors.As(err, &cerr) || cerr.Code != smtp.C452StorageFull {
|
||||
t.Fatalf("got err %v, expected smtpclient error with code 452 for storage full", err)
|
||||
}
|
||||
})
|
||||
|
||||
limitIPMasked1MessagesPerMinute = orig
|
||||
|
||||
origSize := limitIPMasked1SizePerMinute
|
||||
// Message was already delivered once. We'll do another one. But the 3rd will fail.
|
||||
// We need the actual size with prepended headers, since that is used in the
|
||||
// calculations.
|
||||
msg, err := bstore.QueryDB[store.Message](ts.acc.DB).Get()
|
||||
if err != nil {
|
||||
t.Fatalf("getting delivered message for its size: %v", err)
|
||||
}
|
||||
limitIPMasked1SizePerMinute = 2*msg.Size + int64(len(deliverMessage)/2)
|
||||
defer func() {
|
||||
limitIPMasked1SizePerMinute = origSize
|
||||
}()
|
||||
ts.run(func(err error, client *smtpclient.Client) {
|
||||
mailFrom := "remote@example.org"
|
||||
rcptTo := "mjl@mox.example"
|
||||
if err == nil {
|
||||
err = client.Deliver(context.Background(), mailFrom, rcptTo, int64(len(deliverMessage)), strings.NewReader(deliverMessage), false, false)
|
||||
}
|
||||
tcheck(t, err, "deliver to remote")
|
||||
|
||||
err = client.Deliver(context.Background(), mailFrom, rcptTo, int64(len(deliverMessage)), strings.NewReader(deliverMessage), false, false)
|
||||
var cerr smtpclient.Error
|
||||
if err == nil || !errors.As(err, &cerr) || cerr.Code != smtp.C452StorageFull {
|
||||
t.Fatalf("got err %v, expected smtpclient error with code 452 for storage full", err)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue