mirror of
https://github.com/caddyserver/caddy.git
synced 2024-12-25 13:13:47 +03:00
caddytls: Sync distributed storage cleaning (#5940)
* caddytls: Log out remote addr to detect abuse * caddytls: Sync distributed storage cleaning * Handle errors * Update certmagic to fix tiny bug * Split off port when logging remote IP * Upgrade CertMagic
This commit is contained in:
parent
df5edf6bdb
commit
6d9a83376b
5 changed files with 36 additions and 21 deletions
2
go.mod
2
go.mod
|
@ -7,7 +7,7 @@ require (
|
|||
github.com/Masterminds/sprig/v3 v3.2.3
|
||||
github.com/alecthomas/chroma/v2 v2.9.1
|
||||
github.com/aryann/difflib v0.0.0-20210328193216-ff5ff6dc229b
|
||||
github.com/caddyserver/certmagic v0.19.3-0.20231030175448-e8e6167a2a51
|
||||
github.com/caddyserver/certmagic v0.20.0
|
||||
github.com/dustin/go-humanize v1.0.1
|
||||
github.com/go-chi/chi/v5 v5.0.10
|
||||
github.com/google/cel-go v0.15.1
|
||||
|
|
6
go.sum
6
go.sum
|
@ -61,10 +61,8 @@ github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
|||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
|
||||
github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps=
|
||||
github.com/caddyserver/certmagic v0.19.2 h1:HZd1AKLx4592MalEGQS39DKs2ZOAJCEM/xYPMQ2/ui0=
|
||||
github.com/caddyserver/certmagic v0.19.2/go.mod h1:fsL01NomQ6N+kE2j37ZCnig2MFosG+MIO4ztnmG/zz8=
|
||||
github.com/caddyserver/certmagic v0.19.3-0.20231030175448-e8e6167a2a51 h1:9gmY8uzVBgUqJPjGBHMikvrxoyA+Ctu1u5WeVk5ktQ0=
|
||||
github.com/caddyserver/certmagic v0.19.3-0.20231030175448-e8e6167a2a51/go.mod h1:N4sXgpICQUskEWpj7zVzvWD41p3NYacrNoZYiRM2jTg=
|
||||
github.com/caddyserver/certmagic v0.20.0 h1:bTw7LcEZAh9ucYCRXyCpIrSAGplplI0vGYJ4BpCQ/Fc=
|
||||
github.com/caddyserver/certmagic v0.20.0/go.mod h1:N4sXgpICQUskEWpj7zVzvWD41p3NYacrNoZYiRM2jTg=
|
||||
github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ=
|
||||
github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM=
|
||||
github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM=
|
||||
|
|
|
@ -16,9 +16,11 @@ package caddytls
|
|||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/url"
|
||||
"os"
|
||||
"strconv"
|
||||
|
@ -496,7 +498,7 @@ func (iss *ACMEIssuer) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
|
|||
// to see if a certificate can be obtained for name.
|
||||
// The certificate request should be denied if this
|
||||
// returns an error.
|
||||
func onDemandAskRequest(logger *zap.Logger, ask string, name string) error {
|
||||
func onDemandAskRequest(ctx context.Context, logger *zap.Logger, ask string, name string) error {
|
||||
askURL, err := url.Parse(ask)
|
||||
if err != nil {
|
||||
return fmt.Errorf("parsing ask URL: %v", err)
|
||||
|
@ -513,7 +515,17 @@ func onDemandAskRequest(logger *zap.Logger, ask string, name string) error {
|
|||
}
|
||||
resp.Body.Close()
|
||||
|
||||
// logging out the client IP can be useful for servers that want to count
|
||||
// attempts from clients to detect patterns of abuse
|
||||
var clientIP string
|
||||
if hello, ok := ctx.Value(certmagic.ClientHelloInfoCtxKey).(*tls.ClientHelloInfo); ok && hello != nil {
|
||||
if remote := hello.Conn.RemoteAddr(); remote != nil {
|
||||
clientIP, _, _ = net.SplitHostPort(remote.String())
|
||||
}
|
||||
}
|
||||
|
||||
logger.Debug("response from ask endpoint",
|
||||
zap.String("client_ip", clientIP),
|
||||
zap.String("domain", name),
|
||||
zap.String("url", askURLString),
|
||||
zap.Int("status", resp.StatusCode))
|
||||
|
|
|
@ -256,7 +256,7 @@ func (ap *AutomationPolicy) Provision(tlsApp *TLS) error {
|
|||
if tlsApp.Automation == nil || tlsApp.Automation.OnDemand == nil {
|
||||
return nil
|
||||
}
|
||||
if err := onDemandAskRequest(tlsApp.logger, tlsApp.Automation.OnDemand.Ask, name); err != nil {
|
||||
if err := onDemandAskRequest(ctx, tlsApp.logger, tlsApp.Automation.OnDemand.Ask, name); err != nil {
|
||||
// distinguish true errors from denials, because it's important to elevate actual errors
|
||||
if errors.Is(err, errAskDenied) {
|
||||
tlsApp.logger.Debug("certificate issuance denied",
|
||||
|
|
|
@ -551,6 +551,10 @@ func (t *TLS) cleanStorageUnits() {
|
|||
storageCleanMu.Lock()
|
||||
defer storageCleanMu.Unlock()
|
||||
|
||||
// TODO: This check might not be needed anymore now that CertMagic syncs
|
||||
// and throttles storage cleaning globally across the cluster.
|
||||
// The original comment below might be outdated:
|
||||
//
|
||||
// If storage was cleaned recently, don't do it again for now. Although the ticker
|
||||
// calling this function drops missed ticks for us, config reloads discard the old
|
||||
// ticker and replace it with a new one, possibly invoking a cleaning to happen again
|
||||
|
@ -563,21 +567,26 @@ func (t *TLS) cleanStorageUnits() {
|
|||
return
|
||||
}
|
||||
|
||||
id, err := caddy.InstanceID()
|
||||
if err != nil {
|
||||
t.logger.Warn("unable to get instance ID; storage clean stamps will be incomplete", zap.Error(err))
|
||||
}
|
||||
options := certmagic.CleanStorageOptions{
|
||||
Logger: t.logger,
|
||||
InstanceID: id.String(),
|
||||
Interval: t.storageCleanInterval(),
|
||||
OCSPStaples: true,
|
||||
ExpiredCerts: true,
|
||||
ExpiredCertGracePeriod: 24 * time.Hour * 14,
|
||||
}
|
||||
|
||||
// avoid cleaning same storage more than once per cleaning cycle
|
||||
storagesCleaned := make(map[string]struct{})
|
||||
|
||||
// start with the default/global storage
|
||||
storage := t.ctx.Storage()
|
||||
storageStr := fmt.Sprintf("%v", storage)
|
||||
t.logger.Info("cleaning storage unit", zap.String("description", storageStr))
|
||||
certmagic.CleanStorage(t.ctx, storage, options)
|
||||
storagesCleaned[storageStr] = struct{}{}
|
||||
err = certmagic.CleanStorage(t.ctx, t.ctx.Storage(), options)
|
||||
if err != nil {
|
||||
// probably don't want to return early, since we should still
|
||||
// see if any other storages can get cleaned up
|
||||
t.logger.Error("could not clean default/global storage", zap.Error(err))
|
||||
}
|
||||
|
||||
// then clean each storage defined in ACME automation policies
|
||||
if t.Automation != nil {
|
||||
|
@ -585,13 +594,9 @@ func (t *TLS) cleanStorageUnits() {
|
|||
if ap.storage == nil {
|
||||
continue
|
||||
}
|
||||
storageStr := fmt.Sprintf("%v", ap.storage)
|
||||
if _, ok := storagesCleaned[storageStr]; ok {
|
||||
continue
|
||||
if err := certmagic.CleanStorage(t.ctx, ap.storage, options); err != nil {
|
||||
t.logger.Error("could not clean storage configured in automation policy", zap.Error(err))
|
||||
}
|
||||
t.logger.Info("cleaning storage unit", zap.String("description", storageStr))
|
||||
certmagic.CleanStorage(t.ctx, ap.storage, options)
|
||||
storagesCleaned[storageStr] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in a new issue