Merge branch 'master' into produce-csr

This commit is contained in:
Matt Holt 2024-03-06 18:35:43 -07:00 committed by GitHub
commit 434d4bba24
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
311 changed files with 14395 additions and 5983 deletions

View file

@ -1,5 +1,5 @@
[*] [*]
end_of_line = lf end_of_line = lf
[caddytest/integration/caddyfile_adapt/*.txt] [caddytest/integration/caddyfile_adapt/*.caddyfiletest]
indent_style = tab indent_style = tab

View file

@ -19,45 +19,49 @@ jobs:
fail-fast: false fail-fast: false
matrix: matrix:
os: os:
- ubuntu-latest - linux
- macos-latest - mac
- windows-latest - windows
go: go:
- '1.20'
- '1.21' - '1.21'
- '1.22'
include: include:
# Set the minimum Go patch version for the given Go minor # Set the minimum Go patch version for the given Go minor
# Usable via ${{ matrix.GO_SEMVER }} # Usable via ${{ matrix.GO_SEMVER }}
- go: '1.20'
GO_SEMVER: '~1.20.6'
- go: '1.21' - go: '1.21'
GO_SEMVER: '~1.21.0' GO_SEMVER: '~1.21.0'
- go: '1.22'
GO_SEMVER: '~1.22.1'
# Set some variables per OS, usable via ${{ matrix.VAR }} # Set some variables per OS, usable via ${{ matrix.VAR }}
# OS_LABEL: the VM label from GitHub Actions (see https://docs.github.com/en/actions/using-github-hosted-runners/about-github-hosted-runners/about-github-hosted-runners#standard-github-hosted-runners-for-public-repositories)
# CADDY_BIN_PATH: the path to the compiled Caddy binary, for artifact publishing # CADDY_BIN_PATH: the path to the compiled Caddy binary, for artifact publishing
# SUCCESS: the typical value for $? per OS (Windows/pwsh returns 'True') # SUCCESS: the typical value for $? per OS (Windows/pwsh returns 'True')
- os: ubuntu-latest - os: linux
OS_LABEL: ubuntu-latest
CADDY_BIN_PATH: ./cmd/caddy/caddy CADDY_BIN_PATH: ./cmd/caddy/caddy
SUCCESS: 0 SUCCESS: 0
- os: macos-latest - os: mac
OS_LABEL: macos-14
CADDY_BIN_PATH: ./cmd/caddy/caddy CADDY_BIN_PATH: ./cmd/caddy/caddy
SUCCESS: 0 SUCCESS: 0
- os: windows-latest - os: windows
OS_LABEL: windows-latest
CADDY_BIN_PATH: ./cmd/caddy/caddy.exe CADDY_BIN_PATH: ./cmd/caddy/caddy.exe
SUCCESS: 'True' SUCCESS: 'True'
runs-on: ${{ matrix.os }} runs-on: ${{ matrix.OS_LABEL }}
steps: steps:
- name: Checkout code - name: Checkout code
uses: actions/checkout@v3 uses: actions/checkout@v4
- name: Install Go - name: Install Go
uses: actions/setup-go@v4 uses: actions/setup-go@v5
with: with:
go-version: ${{ matrix.GO_SEMVER }} go-version: ${{ matrix.GO_SEMVER }}
check-latest: true check-latest: true
@ -95,13 +99,14 @@ jobs:
env: env:
CGO_ENABLED: 0 CGO_ENABLED: 0
run: | run: |
go build -trimpath -ldflags="-w -s" -v go build -tags nobdger -trimpath -ldflags="-w -s" -v
- name: Publish Build Artifact - name: Publish Build Artifact
uses: actions/upload-artifact@v3 uses: actions/upload-artifact@v4
with: with:
name: caddy_${{ runner.os }}_go${{ matrix.go }}_${{ steps.vars.outputs.short_sha }} name: caddy_${{ runner.os }}_go${{ matrix.go }}_${{ steps.vars.outputs.short_sha }}
path: ${{ matrix.CADDY_BIN_PATH }} path: ${{ matrix.CADDY_BIN_PATH }}
compression-level: 0
# Commented bits below were useful to allow the job to continue # Commented bits below were useful to allow the job to continue
# even if the tests fail, so we can publish the report separately # even if the tests fail, so we can publish the report separately
@ -111,7 +116,7 @@ jobs:
# continue-on-error: true # continue-on-error: true
run: | run: |
# (go test -v -coverprofile=cover-profile.out -race ./... 2>&1) > test-results/test-result.out # (go test -v -coverprofile=cover-profile.out -race ./... 2>&1) > test-results/test-result.out
go test -v -coverprofile="cover-profile.out" -short -race ./... go test -tags nobadger -v -coverprofile="cover-profile.out" -short -race ./...
# echo "status=$?" >> $GITHUB_OUTPUT # echo "status=$?" >> $GITHUB_OUTPUT
# Relevant step if we reinvestigate publishing test/coverage reports # Relevant step if we reinvestigate publishing test/coverage reports
@ -124,7 +129,7 @@ jobs:
# To return the correct result even though we set 'continue-on-error: true' # To return the correct result even though we set 'continue-on-error: true'
# - name: Coerce correct build result # - name: Coerce correct build result
# if: matrix.os != 'windows-latest' && steps.step_test.outputs.status != ${{ matrix.SUCCESS }} # if: matrix.os != 'windows' && steps.step_test.outputs.status != ${{ matrix.SUCCESS }}
# run: | # run: |
# echo "step_test ${{ steps.step_test.outputs.status }}\n" # echo "step_test ${{ steps.step_test.outputs.status }}\n"
# exit 1 # exit 1
@ -136,7 +141,7 @@ jobs:
continue-on-error: true # August 2020: s390x VM is down due to weather and power issues continue-on-error: true # August 2020: s390x VM is down due to weather and power issues
steps: steps:
- name: Checkout code - name: Checkout code
uses: actions/checkout@v3 uses: actions/checkout@v4
- name: Run Tests - name: Run Tests
run: | run: |
mkdir -p ~/.ssh && echo -e "${SSH_KEY//_/\\n}" > ~/.ssh/id_ecdsa && chmod og-rwx ~/.ssh/id_ecdsa mkdir -p ~/.ssh && echo -e "${SSH_KEY//_/\\n}" > ~/.ssh/id_ecdsa && chmod og-rwx ~/.ssh/id_ecdsa
@ -146,7 +151,7 @@ jobs:
# The environment is fresh, so there's no point in keeping accepting and adding the key. # The environment is fresh, so there's no point in keeping accepting and adding the key.
rsync -arz -e "ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null" --progress --delete --exclude '.git' . "$CI_USER"@ci-s390x.caddyserver.com:/var/tmp/"$short_sha" rsync -arz -e "ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null" --progress --delete --exclude '.git' . "$CI_USER"@ci-s390x.caddyserver.com:/var/tmp/"$short_sha"
ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -t "$CI_USER"@ci-s390x.caddyserver.com "cd /var/tmp/$short_sha; go version; go env; printf "\n\n";CGO_ENABLED=0 go test -v ./..." ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -t "$CI_USER"@ci-s390x.caddyserver.com "cd /var/tmp/$short_sha; go version; go env; printf "\n\n";CGO_ENABLED=0 go test -tags nobadger -v ./..."
test_result=$? test_result=$?
# There's no need leaving the files around # There's no need leaving the files around
@ -162,11 +167,9 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Checkout code - name: Checkout code
uses: actions/checkout@v3 uses: actions/checkout@v4
- uses: goreleaser/goreleaser-action@v4 - uses: goreleaser/goreleaser-action@v5
with: with:
version: latest version: latest
args: check args: check
env:
TAG: ${{ steps.vars.outputs.version_tag }}

View file

@ -11,11 +11,12 @@ on:
- 2.* - 2.*
jobs: jobs:
cross-build-test: build:
strategy: strategy:
fail-fast: false fail-fast: false
matrix: matrix:
goos: goos:
- 'aix'
- 'android' - 'android'
- 'linux' - 'linux'
- 'solaris' - 'solaris'
@ -28,22 +29,22 @@ jobs:
- 'darwin' - 'darwin'
- 'netbsd' - 'netbsd'
go: go:
- '1.21' - '1.22'
include: include:
# Set the minimum Go patch version for the given Go minor # Set the minimum Go patch version for the given Go minor
# Usable via ${{ matrix.GO_SEMVER }} # Usable via ${{ matrix.GO_SEMVER }}
- go: '1.21' - go: '1.22'
GO_SEMVER: '~1.21.0' GO_SEMVER: '~1.22.1'
runs-on: ubuntu-latest runs-on: ubuntu-latest
continue-on-error: true continue-on-error: true
steps: steps:
- name: Checkout code - name: Checkout code
uses: actions/checkout@v3 uses: actions/checkout@v4
- name: Install Go - name: Install Go
uses: actions/setup-go@v4 uses: actions/setup-go@v5
with: with:
go-version: ${{ matrix.GO_SEMVER }} go-version: ${{ matrix.GO_SEMVER }}
check-latest: true check-latest: true
@ -62,11 +63,12 @@ jobs:
env: env:
CGO_ENABLED: 0 CGO_ENABLED: 0
GOOS: ${{ matrix.goos }} GOOS: ${{ matrix.goos }}
GOARCH: ${{ matrix.goos == 'aix' && 'ppc64' || 'amd64' }}
shell: bash shell: bash
continue-on-error: true continue-on-error: true
working-directory: ./cmd/caddy working-directory: ./cmd/caddy
run: | run: |
GOOS=$GOOS go build -trimpath -o caddy-"$GOOS"-amd64 2> /dev/null GOOS=$GOOS GOARCH=$GOARCH go build -tags nobadger -trimpath -o caddy-"$GOOS"-$GOARCH 2> /dev/null
if [ $? -ne 0 ]; then if [ $? -ne 0 ]; then
echo "::warning ::$GOOS Build Failed" echo "::warning ::$GOOS Build Failed"
exit 0 exit 0

View file

@ -23,24 +23,33 @@ jobs:
strategy: strategy:
matrix: matrix:
os: os:
- ubuntu-latest - linux
- macos-latest - mac
- windows-latest - windows
runs-on: ${{ matrix.os }}
include:
- os: linux
OS_LABEL: ubuntu-latest
- os: mac
OS_LABEL: macos-14
- os: windows
OS_LABEL: windows-latest
runs-on: ${{ matrix.OS_LABEL }}
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v4
- uses: actions/setup-go@v4 - uses: actions/setup-go@v5
with: with:
go-version: '~1.21.0' go-version: '~1.22.1'
check-latest: true check-latest: true
# Workaround for https://github.com/golangci/golangci-lint-action/issues/135
skip-pkg-cache: true
- name: golangci-lint - name: golangci-lint
uses: golangci/golangci-lint-action@v3 uses: golangci/golangci-lint-action@v4
with: with:
version: v1.54 version: v1.55
# Workaround for https://github.com/golangci/golangci-lint-action/issues/135 # Workaround for https://github.com/golangci/golangci-lint-action/issues/135
skip-pkg-cache: true skip-pkg-cache: true
@ -50,3 +59,12 @@ jobs:
# Optional: show only new issues if it's a pull request. The default value is `false`. # Optional: show only new issues if it's a pull request. The default value is `false`.
# only-new-issues: true # only-new-issues: true
govulncheck:
runs-on: ubuntu-latest
steps:
- name: govulncheck
uses: golang/govulncheck-action@v1
with:
go-version-input: '~1.22.1'
check-latest: true

View file

@ -32,18 +32,18 @@ jobs:
steps: steps:
- name: Checkout code - name: Checkout code
uses: actions/checkout@v3 uses: actions/checkout@v4
with: with:
fetch-depth: 0 fetch-depth: 0
- name: Install Go - name: Install Go
uses: actions/setup-go@v4 uses: actions/setup-go@v5
with: with:
go-version: ${{ matrix.GO_SEMVER }} go-version: ${{ matrix.GO_SEMVER }}
check-latest: true check-latest: true
# Force fetch upstream tags -- because 65 minutes # Force fetch upstream tags -- because 65 minutes
# tl;dr: actions/checkout@v3 runs this line: # tl;dr: actions/checkout@v4 runs this line:
# git -c protocol.version=2 fetch --no-tags --prune --progress --no-recurse-submodules --depth=1 origin +ebc278ec98bb24f2852b61fde2a9bf2e3d83818b:refs/tags/ # git -c protocol.version=2 fetch --no-tags --prune --progress --no-recurse-submodules --depth=1 origin +ebc278ec98bb24f2852b61fde2a9bf2e3d83818b:refs/tags/
# which makes its own local lightweight tag, losing all the annotations in the process. Our earlier script ran: # which makes its own local lightweight tag, losing all the annotations in the process. Our earlier script ran:
# git fetch --prune --unshallow # git fetch --prune --unshallow
@ -106,7 +106,7 @@ jobs:
run: syft version run: syft version
# GoReleaser will take care of publishing those artifacts into the release # GoReleaser will take care of publishing those artifacts into the release
- name: Run GoReleaser - name: Run GoReleaser
uses: goreleaser/goreleaser-action@v4 uses: goreleaser/goreleaser-action@v5
with: with:
version: latest version: latest
args: release --clean --timeout 60m args: release --clean --timeout 60m

View file

@ -18,7 +18,7 @@ jobs:
# See https://github.com/peter-evans/repository-dispatch # See https://github.com/peter-evans/repository-dispatch
- name: Trigger event on caddyserver/dist - name: Trigger event on caddyserver/dist
uses: peter-evans/repository-dispatch@v2 uses: peter-evans/repository-dispatch@v3
with: with:
token: ${{ secrets.REPO_DISPATCH_TOKEN }} token: ${{ secrets.REPO_DISPATCH_TOKEN }}
repository: caddyserver/dist repository: caddyserver/dist
@ -26,7 +26,7 @@ jobs:
client-payload: '{"tag": "${{ github.event.release.tag_name }}"}' client-payload: '{"tag": "${{ github.event.release.tag_name }}"}'
- name: Trigger event on caddyserver/caddy-docker - name: Trigger event on caddyserver/caddy-docker
uses: peter-evans/repository-dispatch@v2 uses: peter-evans/repository-dispatch@v3
with: with:
token: ${{ secrets.REPO_DISPATCH_TOKEN }} token: ${{ secrets.REPO_DISPATCH_TOKEN }}
repository: caddyserver/caddy-docker repository: caddyserver/caddy-docker

1
.gitignore vendored
View file

@ -12,6 +12,7 @@ Caddyfile.*
cmd/caddy/caddy cmd/caddy/caddy
cmd/caddy/caddy.exe cmd/caddy/caddy.exe
cmd/caddy/tmp/*.exe cmd/caddy/tmp/*.exe
cmd/caddy/.env
# mac specific # mac specific
.DS_Store .DS_Store

View file

@ -15,35 +15,68 @@ linters-settings:
# If `true`, make the section order the same as the order of `sections`. # If `true`, make the section order the same as the order of `sections`.
# Default: false # Default: false
custom-order: true custom-order: true
exhaustive:
ignore-enum-types: reflect.Kind|svc.Cmd
linters: linters:
disable-all: true disable-all: true
enable: enable:
- asasalint
- asciicheck
- bidichk
- bodyclose - bodyclose
- decorder
- dogsled
- dupl
- dupword
- durationcheck
- errcheck - errcheck
- errname
- exhaustive
- exportloopref
- gci - gci
- gofmt
- goimports
- gofumpt - gofumpt
- gosec - gosec
- gosimple - gosimple
- govet - govet
- ineffassign - ineffassign
- importas
- misspell - misspell
- prealloc - prealloc
- promlinter
- sloglint
- sqlclosecheck
- staticcheck - staticcheck
- tenv
- testableexamples
- testifylint
- tparallel
- typecheck - typecheck
- unconvert - unconvert
- unused - unused
- wastedassign
- whitespace
- zerologlint
# these are implicitly disabled: # these are implicitly disabled:
# - asciicheck # - containedctx
# - contextcheck
# - cyclop
# - depguard # - depguard
# - dogsled # - errchkjson
# - dupl # - errorlint
# - exhaustive # - exhaustruct
# - exportloopref # - execinquery
# - exhaustruct
# - forbidigo
# - forcetypeassert
# - funlen # - funlen
# - gci # - ginkgolinter
# - gocheckcompilerdirectives
# - gochecknoglobals # - gochecknoglobals
# - gochecknoinits # - gochecknoinits
# - gochecksumtype
# - gocognit # - gocognit
# - goconst # - goconst
# - gocritic # - gocritic
@ -51,27 +84,47 @@ linters:
# - godot # - godot
# - godox # - godox
# - goerr113 # - goerr113
# - gofumpt
# - goheader # - goheader
# - golint
# - gomnd # - gomnd
# - gomoddirectives
# - gomodguard # - gomodguard
# - goprintffuncname # - goprintffuncname
# - interfacer # - gosmopolitan
# - grouper
# - inamedparam
# - interfacebloat
# - ireturn
# - lll # - lll
# - maligned # - loggercheck
# - maintidx
# - makezero
# - mirror
# - musttag
# - nakedret # - nakedret
# - nestif # - nestif
# - nilerr
# - nilnil
# - nlreturn # - nlreturn
# - noctx # - noctx
# - nolintlint # - nolintlint
# - nonamedreturns
# - nosprintfhostport
# - paralleltest
# - perfsprint
# - predeclared
# - protogetter
# - reassign
# - revive
# - rowserrcheck # - rowserrcheck
# - scopelint
# - sqlclosecheck
# - stylecheck # - stylecheck
# - tagalign
# - tagliatelle
# - testpackage # - testpackage
# - thelper
# - unparam # - unparam
# - whitespace # - usestdlibvars
# - varnamelen
# - wrapcheck
# - wsl # - wsl
run: run:
@ -110,3 +163,6 @@ issues:
text: 'G404' # G404: Insecure random number source (rand) text: 'G404' # G404: Insecure random number source (rand)
linters: linters:
- gosec - gosec
- path: modules/logging/filters.go
linters:
- dupl

View file

@ -77,6 +77,8 @@ builds:
- -mod=readonly - -mod=readonly
ldflags: ldflags:
- -s -w - -s -w
tags:
- nobadger
signs: signs:
- cmd: cosign - cmd: cosign

View file

@ -87,7 +87,7 @@ See [our online documentation](https://caddyserver.com/docs/install) for other i
Requirements: Requirements:
- [Go 1.20 or newer](https://golang.org/dl/) - [Go 1.21 or newer](https://golang.org/dl/)
### For development ### For development

View file

@ -1196,15 +1196,27 @@ traverseLoop:
} }
case http.MethodPut: case http.MethodPut:
if _, ok := v[part]; ok { if _, ok := v[part]; ok {
return fmt.Errorf("[%s] key already exists: %s", path, part) return APIError{
HTTPStatus: http.StatusConflict,
Err: fmt.Errorf("[%s] key already exists: %s", path, part),
}
} }
v[part] = val v[part] = val
case http.MethodPatch: case http.MethodPatch:
if _, ok := v[part]; !ok { if _, ok := v[part]; !ok {
return fmt.Errorf("[%s] key does not exist: %s", path, part) return APIError{
HTTPStatus: http.StatusNotFound,
Err: fmt.Errorf("[%s] key does not exist: %s", path, part),
}
} }
v[part] = val v[part] = val
case http.MethodDelete: case http.MethodDelete:
if _, ok := v[part]; !ok {
return APIError{
HTTPStatus: http.StatusNotFound,
Err: fmt.Errorf("[%s] key does not exist: %s", path, part),
}
}
delete(v, part) delete(v, part)
default: default:
return fmt.Errorf("unrecognized method %s", method) return fmt.Errorf("unrecognized method %s", method)

View file

@ -75,6 +75,12 @@ func TestUnsyncedConfigAccess(t *testing.T) {
path: "/bar/qq", path: "/bar/qq",
expect: `{"foo": "jet", "bar": {"aa": "bb"}, "list": ["a", "b", "c"]}`, expect: `{"foo": "jet", "bar": {"aa": "bb"}, "list": ["a", "b", "c"]}`,
}, },
{
method: "DELETE",
path: "/bar/qq",
expect: `{"foo": "jet", "bar": {"aa": "bb"}, "list": ["a", "b", "c"]}`,
shouldErr: true,
},
{ {
method: "POST", method: "POST",
path: "/list", path: "/list",

View file

@ -22,6 +22,7 @@ import (
"errors" "errors"
"fmt" "fmt"
"io" "io"
"io/fs"
"log" "log"
"net/http" "net/http"
"os" "os"
@ -38,6 +39,7 @@ import (
"github.com/google/uuid" "github.com/google/uuid"
"go.uber.org/zap" "go.uber.org/zap"
"github.com/caddyserver/caddy/v2/internal/filesystems"
"github.com/caddyserver/caddy/v2/notify" "github.com/caddyserver/caddy/v2/notify"
) )
@ -83,6 +85,9 @@ type Config struct {
storage certmagic.Storage storage certmagic.Storage
cancelFunc context.CancelFunc cancelFunc context.CancelFunc
// filesystems is a dict of filesystems that will later be loaded from and added to.
filesystems FileSystems
} }
// App is a thing that Caddy runs. // App is a thing that Caddy runs.
@ -446,6 +451,9 @@ func run(newCfg *Config, start bool) (Context, error) {
} }
} }
// create the new filesystem map
newCfg.filesystems = &filesystems.FilesystemMap{}
// prepare the new config for use // prepare the new config for use
newCfg.apps = make(map[string]App) newCfg.apps = make(map[string]App)
@ -707,6 +715,7 @@ func exitProcess(ctx context.Context, logger *zap.Logger) {
logger.Warn("exiting; byeee!! 👋") logger.Warn("exiting; byeee!! 👋")
exitCode := ExitCodeSuccess exitCode := ExitCodeSuccess
lastContext := ActiveContext()
// stop all apps // stop all apps
if err := Stop(); err != nil { if err := Stop(); err != nil {
@ -728,6 +737,16 @@ func exitProcess(ctx context.Context, logger *zap.Logger) {
} }
} }
// execute any process-exit callbacks
for _, exitFunc := range lastContext.exitFuncs {
exitFunc(ctx)
}
exitFuncsMu.Lock()
for _, exitFunc := range exitFuncs {
exitFunc(ctx)
}
exitFuncsMu.Unlock()
// shut down admin endpoint(s) in goroutines so that // shut down admin endpoint(s) in goroutines so that
// if this function was called from an admin handler, // if this function was called from an admin handler,
// it has a chance to return gracefully // it has a chance to return gracefully
@ -766,6 +785,23 @@ var exiting = new(int32) // accessed atomically
// EXPERIMENTAL API: subject to change or removal. // EXPERIMENTAL API: subject to change or removal.
func Exiting() bool { return atomic.LoadInt32(exiting) == 1 } func Exiting() bool { return atomic.LoadInt32(exiting) == 1 }
// OnExit registers a callback to invoke during process exit.
// This registration is PROCESS-GLOBAL, meaning that each
// function should only be registered once forever, NOT once
// per config load (etc).
//
// EXPERIMENTAL API: subject to change or removal.
func OnExit(f func(context.Context)) {
exitFuncsMu.Lock()
exitFuncs = append(exitFuncs, f)
exitFuncsMu.Unlock()
}
var (
exitFuncs []func(context.Context)
exitFuncsMu sync.Mutex
)
// Duration can be an integer or a string. An integer is // Duration can be an integer or a string. An integer is
// interpreted as nanoseconds. If a string, it is a Go // interpreted as nanoseconds. If a string, it is a Go
// time.Duration value such as `300ms`, `1.5h`, or `2h45m`; // time.Duration value such as `300ms`, `1.5h`, or `2h45m`;
@ -825,13 +861,18 @@ func ParseDuration(s string) (time.Duration, error) {
// regardless of storage configuration, since each instance is intended to // regardless of storage configuration, since each instance is intended to
// have its own unique ID. // have its own unique ID.
func InstanceID() (uuid.UUID, error) { func InstanceID() (uuid.UUID, error) {
uuidFilePath := filepath.Join(AppDataDir(), "instance.uuid") appDataDir := AppDataDir()
uuidFilePath := filepath.Join(appDataDir, "instance.uuid")
uuidFileBytes, err := os.ReadFile(uuidFilePath) uuidFileBytes, err := os.ReadFile(uuidFilePath)
if os.IsNotExist(err) { if errors.Is(err, fs.ErrNotExist) {
uuid, err := uuid.NewRandom() uuid, err := uuid.NewRandom()
if err != nil { if err != nil {
return uuid, err return uuid, err
} }
err = os.MkdirAll(appDataDir, 0o600)
if err != nil {
return uuid, err
}
err = os.WriteFile(uuidFilePath, []byte(uuid.String()), 0o600) err = os.WriteFile(uuidFilePath, []byte(uuid.String()), 0o600)
return uuid, err return uuid, err
} else if err != nil { } else if err != nil {

View file

@ -52,7 +52,7 @@ func (a Adapter) Adapt(body []byte, options map[string]any) ([]byte, []caddyconf
return nil, warnings, err return nil, warnings, err
} }
// lint check: see if input was properly formatted; sometimes messy files files parse // lint check: see if input was properly formatted; sometimes messy files parse
// successfully but result in logical errors (the Caddyfile is a bad format, I'm sorry) // successfully but result in logical errors (the Caddyfile is a bad format, I'm sorry)
if warning, different := FormattingDifference(filename, body); different { if warning, different := FormattingDifference(filename, body); different {
warnings = append(warnings, warning) warnings = append(warnings, warning)
@ -92,30 +92,26 @@ func FormattingDifference(filename string, body []byte) (caddyconfig.Warning, bo
}, true }, true
} }
// Unmarshaler is a type that can unmarshal // Unmarshaler is a type that can unmarshal Caddyfile tokens to
// Caddyfile tokens to set itself up for a // set itself up for a JSON encoding. The goal of an unmarshaler
// JSON encoding. The goal of an unmarshaler // is not to set itself up for actual use, but to set itself up for
// is not to set itself up for actual use, // being marshaled into JSON. Caddyfile-unmarshaled values will not
// but to set itself up for being marshaled // be used directly; they will be encoded as JSON and then used from
// into JSON. Caddyfile-unmarshaled values // that. Implementations _may_ be able to support multiple segments
// will not be used directly; they will be // (instances of their directive or batch of tokens); typically this
// encoded as JSON and then used from that. // means wrapping parsing logic in a loop: `for d.Next() { ... }`.
// Implementations must be able to support // More commonly, only a single segment is supported, so a simple
// multiple segments (instances of their // `d.Next()` at the start should be used to consume the module
// directive or batch of tokens); typically // identifier token (directive name, etc).
// this means wrapping all token logic in
// a loop: `for d.Next() { ... }`.
type Unmarshaler interface { type Unmarshaler interface {
UnmarshalCaddyfile(d *Dispenser) error UnmarshalCaddyfile(d *Dispenser) error
} }
// ServerType is a type that can evaluate a Caddyfile and set up a caddy config. // ServerType is a type that can evaluate a Caddyfile and set up a caddy config.
type ServerType interface { type ServerType interface {
// Setup takes the server blocks which // Setup takes the server blocks which contain tokens,
// contain tokens, as well as options // as well as options (e.g. CLI flags) and creates a
// (e.g. CLI flags) and creates a Caddy // Caddy config, along with any warnings or an error.
// config, along with any warnings or
// an error.
Setup([]ServerBlock, map[string]any) (*caddy.Config, []caddyconfig.Warning, error) Setup([]ServerBlock, map[string]any) (*caddy.Config, []caddyconfig.Warning, error)
} }

View file

@ -305,7 +305,7 @@ func TestDispenser_ArgErr_Err(t *testing.T) {
t.Errorf("Expected error message with custom message in it ('foobar'); got '%v'", err) t.Errorf("Expected error message with custom message in it ('foobar'); got '%v'", err)
} }
var ErrBarIsFull = errors.New("bar is full") ErrBarIsFull := errors.New("bar is full")
bookingError := d.Errf("unable to reserve: %w", ErrBarIsFull) bookingError := d.Errf("unable to reserve: %w", ErrBarIsFull)
if !errors.Is(bookingError, ErrBarIsFull) { if !errors.Is(bookingError, ErrBarIsFull) {
t.Errorf("Errf(): should be able to unwrap the error chain") t.Errorf("Errf(): should be able to unwrap the error chain")

View file

@ -18,6 +18,8 @@ import (
"bytes" "bytes"
"io" "io"
"unicode" "unicode"
"golang.org/x/exp/slices"
) )
// Format formats the input Caddyfile to a standard, nice-looking // Format formats the input Caddyfile to a standard, nice-looking
@ -31,6 +33,14 @@ func Format(input []byte) []byte {
out := new(bytes.Buffer) out := new(bytes.Buffer)
rdr := bytes.NewReader(input) rdr := bytes.NewReader(input)
type heredocState int
const (
heredocClosed heredocState = 0
heredocOpening heredocState = 1
heredocOpened heredocState = 2
)
var ( var (
last rune // the last character that was written to the result last rune // the last character that was written to the result
@ -47,6 +57,11 @@ func Format(input []byte) []byte {
quoted bool // whether we're in a quoted segment quoted bool // whether we're in a quoted segment
escaped bool // whether current char is escaped escaped bool // whether current char is escaped
heredoc heredocState // whether we're in a heredoc
heredocEscaped bool // whether heredoc is escaped
heredocMarker []rune
heredocClosingMarker []rune
nesting int // indentation level nesting int // indentation level
) )
@ -75,6 +90,62 @@ func Format(input []byte) []byte {
panic(err) panic(err)
} }
// detect whether we have the start of a heredoc
if !quoted && !(heredoc != heredocClosed || heredocEscaped) &&
space && last == '<' && ch == '<' {
write(ch)
heredoc = heredocOpening
space = false
continue
}
if heredoc == heredocOpening {
if ch == '\n' {
if len(heredocMarker) > 0 && heredocMarkerRegexp.MatchString(string(heredocMarker)) {
heredoc = heredocOpened
} else {
heredocMarker = nil
heredoc = heredocClosed
nextLine()
continue
}
write(ch)
continue
}
if unicode.IsSpace(ch) {
// a space means it's just a regular token and not a heredoc
heredocMarker = nil
heredoc = heredocClosed
} else {
heredocMarker = append(heredocMarker, ch)
write(ch)
continue
}
}
// if we're in a heredoc, all characters are read&write as-is
if heredoc == heredocOpened {
heredocClosingMarker = append(heredocClosingMarker, ch)
if len(heredocClosingMarker) > len(heredocMarker)+1 { // We assert that the heredocClosingMarker is followed by a unicode.Space
heredocClosingMarker = heredocClosingMarker[1:]
}
// check if we're done
if unicode.IsSpace(ch) && slices.Equal(heredocClosingMarker[:len(heredocClosingMarker)-1], heredocMarker) {
heredocMarker = nil
heredocClosingMarker = nil
heredoc = heredocClosed
} else {
write(ch)
if ch == '\n' {
heredocClosingMarker = heredocClosingMarker[:0]
}
continue
}
}
if last == '<' && space {
space = false
}
if comment { if comment {
if ch == '\n' { if ch == '\n' {
comment = false comment = false
@ -98,6 +169,9 @@ func Format(input []byte) []byte {
} }
if escaped { if escaped {
if ch == '<' {
heredocEscaped = true
}
write(ch) write(ch)
escaped = false escaped = false
continue continue
@ -117,6 +191,7 @@ func Format(input []byte) []byte {
if unicode.IsSpace(ch) { if unicode.IsSpace(ch) {
space = true space = true
heredocEscaped = false
if ch == '\n' { if ch == '\n' {
newLines++ newLines++
} }
@ -205,6 +280,11 @@ func Format(input []byte) []byte {
write('{') write('{')
openBraceWritten = true openBraceWritten = true
} }
if spacePrior && ch == '<' {
space = true
}
write(ch) write(ch)
beginningOfLine = false beginningOfLine = false

View file

@ -362,6 +362,76 @@ block {
block { block {
} }
`,
},
{
description: "keep heredoc as-is",
input: `block {
heredoc <<HEREDOC
Here's more than one space Here's more than one space
HEREDOC
}
`,
expect: `block {
heredoc <<HEREDOC
Here's more than one space Here's more than one space
HEREDOC
}
`,
},
{
description: "Mixing heredoc with regular part",
input: `block {
heredoc <<HEREDOC
Here's more than one space Here's more than one space
HEREDOC
respond "More than one space will be eaten" 200
}
block2 {
heredoc <<HEREDOC
Here's more than one space Here's more than one space
HEREDOC
respond "More than one space will be eaten" 200
}
`,
expect: `block {
heredoc <<HEREDOC
Here's more than one space Here's more than one space
HEREDOC
respond "More than one space will be eaten" 200
}
block2 {
heredoc <<HEREDOC
Here's more than one space Here's more than one space
HEREDOC
respond "More than one space will be eaten" 200
}
`,
},
{
description: "Heredoc as regular token",
input: `block {
heredoc <<HEREDOC "More than one space will be eaten"
}
`,
expect: `block {
heredoc <<HEREDOC "More than one space will be eaten"
}
`,
},
{
description: "Escape heredoc",
input: `block {
heredoc \<<HEREDOC
respond "More than one space will be eaten" 200
}
`,
expect: `block {
heredoc \<<HEREDOC
respond "More than one space will be eaten" 200
}
`, `,
}, },
} { } {

View file

@ -52,6 +52,13 @@ func parseVariadic(token Token, argCount int) (bool, int, int) {
return false, 0, 0 return false, 0, 0
} }
// A valid token may contain several placeholders, and
// they may be separated by ":". It's not variadic.
// https://github.com/caddyserver/caddy/issues/5716
if strings.Contains(start, "}") || strings.Contains(end, "{") {
return false, 0, 0
}
var ( var (
startIndex = 0 startIndex = 0
endIndex = argCount endIndex = argCount

View file

@ -186,7 +186,7 @@ func (l *lexer) next() (bool, error) {
} }
// check if we're done, i.e. that the last few characters are the marker // check if we're done, i.e. that the last few characters are the marker
if len(val) > len(heredocMarker) && heredocMarker == string(val[len(val)-len(heredocMarker):]) { if len(val) >= len(heredocMarker) && heredocMarker == string(val[len(val)-len(heredocMarker):]) {
// set the final value // set the final value
val, err = l.finalizeHeredoc(val, heredocMarker) val, err = l.finalizeHeredoc(val, heredocMarker)
if err != nil { if err != nil {
@ -313,6 +313,11 @@ func (l *lexer) finalizeHeredoc(val []rune, marker string) ([]rune, error) {
// iterate over each line and strip the whitespace from the front // iterate over each line and strip the whitespace from the front
var out string var out string
for lineNum, lineText := range lines[:len(lines)-1] { for lineNum, lineText := range lines[:len(lines)-1] {
if lineText == "" || lineText == "\r" {
out += "\n"
continue
}
// find an exact match for the padding // find an exact match for the padding
index := strings.Index(lineText, paddingToStrip) index := strings.Index(lineText, paddingToStrip)

View file

@ -285,6 +285,18 @@ EOF same-line-arg
}, },
{ {
input: []byte(`heredoc <<EOF input: []byte(`heredoc <<EOF
EOF
HERE same-line-arg
`),
expected: []Token{
{Line: 1, Text: `heredoc`},
{Line: 1, Text: ``},
{Line: 3, Text: `HERE`},
{Line: 3, Text: `same-line-arg`},
},
},
{
input: []byte(`heredoc <<EOF
EOF same-line-arg EOF same-line-arg
`), `),
expected: []Token{ expected: []Token{
@ -445,6 +457,48 @@ EOF same-line-arg
expectErr: true, expectErr: true,
errorMessage: "mismatched leading whitespace in heredoc <<EOF on line #2 [ content], expected whitespace [\t\t] to match the closing marker", errorMessage: "mismatched leading whitespace in heredoc <<EOF on line #2 [ content], expected whitespace [\t\t] to match the closing marker",
}, },
{
input: []byte(`heredoc <<EOF
The next line is a blank line
The previous line is a blank line
EOF`),
expected: []Token{
{Line: 1, Text: "heredoc"},
{Line: 1, Text: "The next line is a blank line\n\nThe previous line is a blank line"},
},
},
{
input: []byte(`heredoc <<EOF
One tab indented heredoc with blank next line
One tab indented heredoc with blank previous line
EOF`),
expected: []Token{
{Line: 1, Text: "heredoc"},
{Line: 1, Text: "One tab indented heredoc with blank next line\n\nOne tab indented heredoc with blank previous line"},
},
},
{
input: []byte(`heredoc <<EOF
The next line is a blank line with one tab
The previous line is a blank line with one tab
EOF`),
expected: []Token{
{Line: 1, Text: "heredoc"},
{Line: 1, Text: "The next line is a blank line with one tab\n\t\nThe previous line is a blank line with one tab"},
},
},
{
input: []byte(`heredoc <<EOF
The next line is a blank line with one tab less than the correct indentation
The previous line is a blank line with one tab less than the correct indentation
EOF`),
expectErr: true,
errorMessage: "mismatched leading whitespace in heredoc <<EOF on line #3 [\t], expected whitespace [\t\t] to match the closing marker",
},
} }
for i, testCase := range testCases { for i, testCase := range testCases {

View file

@ -160,14 +160,14 @@ func (p *parser) begin() error {
} }
if ok, name := p.isNamedRoute(); ok { if ok, name := p.isNamedRoute(); ok {
// named routes only have one key, the route name
p.block.Keys = []string{name}
p.block.IsNamedRoute = true
// we just need a dummy leading token to ease parsing later // we just need a dummy leading token to ease parsing later
nameToken := p.Token() nameToken := p.Token()
nameToken.Text = name nameToken.Text = name
// named routes only have one key, the route name
p.block.Keys = []Token{nameToken}
p.block.IsNamedRoute = true
// get all the tokens from the block, including the braces // get all the tokens from the block, including the braces
tokens, err := p.blockTokens(true) tokens, err := p.blockTokens(true)
if err != nil { if err != nil {
@ -211,10 +211,11 @@ func (p *parser) addresses() error {
var expectingAnother bool var expectingAnother bool
for { for {
tkn := p.Val() value := p.Val()
token := p.Token()
// special case: import directive replaces tokens during parse-time // special case: import directive replaces tokens during parse-time
if tkn == "import" && p.isNewLine() { if value == "import" && p.isNewLine() {
err := p.doImport(0) err := p.doImport(0)
if err != nil { if err != nil {
return err return err
@ -223,9 +224,9 @@ func (p *parser) addresses() error {
} }
// Open brace definitely indicates end of addresses // Open brace definitely indicates end of addresses
if tkn == "{" { if value == "{" {
if expectingAnother { if expectingAnother {
return p.Errf("Expected another address but had '%s' - check for extra comma", tkn) return p.Errf("Expected another address but had '%s' - check for extra comma", value)
} }
// Mark this server block as being defined with braces. // Mark this server block as being defined with braces.
// This is used to provide a better error message when // This is used to provide a better error message when
@ -237,15 +238,15 @@ func (p *parser) addresses() error {
} }
// Users commonly forget to place a space between the address and the '{' // Users commonly forget to place a space between the address and the '{'
if strings.HasSuffix(tkn, "{") { if strings.HasSuffix(value, "{") {
return p.Errf("Site addresses cannot end with a curly brace: '%s' - put a space between the token and the brace", tkn) return p.Errf("Site addresses cannot end with a curly brace: '%s' - put a space between the token and the brace", value)
} }
if tkn != "" { // empty token possible if user typed "" if value != "" { // empty token possible if user typed ""
// Trailing comma indicates another address will follow, which // Trailing comma indicates another address will follow, which
// may possibly be on the next line // may possibly be on the next line
if tkn[len(tkn)-1] == ',' { if value[len(value)-1] == ',' {
tkn = tkn[:len(tkn)-1] value = value[:len(value)-1]
expectingAnother = true expectingAnother = true
} else { } else {
expectingAnother = false // but we may still see another one on this line expectingAnother = false // but we may still see another one on this line
@ -254,11 +255,12 @@ func (p *parser) addresses() error {
// If there's a comma here, it's probably because they didn't use a space // If there's a comma here, it's probably because they didn't use a space
// between their two domains, e.g. "foo.com,bar.com", which would not be // between their two domains, e.g. "foo.com,bar.com", which would not be
// parsed as two separate site addresses. // parsed as two separate site addresses.
if strings.Contains(tkn, ",") { if strings.Contains(value, ",") {
return p.Errf("Site addresses cannot contain a comma ',': '%s' - put a space after the comma to separate site addresses", tkn) return p.Errf("Site addresses cannot contain a comma ',': '%s' - put a space after the comma to separate site addresses", value)
} }
p.block.Keys = append(p.block.Keys, tkn) token.Text = value
p.block.Keys = append(p.block.Keys, token)
} }
// Advance token and possibly break out of loop or return error // Advance token and possibly break out of loop or return error
@ -637,8 +639,8 @@ func (p *parser) closeCurlyBrace() error {
func (p *parser) isNamedRoute() (bool, string) { func (p *parser) isNamedRoute() (bool, string) {
keys := p.block.Keys keys := p.block.Keys
// A named route block is a single key with parens, prefixed with &. // A named route block is a single key with parens, prefixed with &.
if len(keys) == 1 && strings.HasPrefix(keys[0], "&(") && strings.HasSuffix(keys[0], ")") { if len(keys) == 1 && strings.HasPrefix(keys[0].Text, "&(") && strings.HasSuffix(keys[0].Text, ")") {
return true, strings.TrimSuffix(keys[0][2:], ")") return true, strings.TrimSuffix(keys[0].Text[2:], ")")
} }
return false, "" return false, ""
} }
@ -646,8 +648,8 @@ func (p *parser) isNamedRoute() (bool, string) {
func (p *parser) isSnippet() (bool, string) { func (p *parser) isSnippet() (bool, string) {
keys := p.block.Keys keys := p.block.Keys
// A snippet block is a single key with parens. Nothing else qualifies. // A snippet block is a single key with parens. Nothing else qualifies.
if len(keys) == 1 && strings.HasPrefix(keys[0], "(") && strings.HasSuffix(keys[0], ")") { if len(keys) == 1 && strings.HasPrefix(keys[0].Text, "(") && strings.HasSuffix(keys[0].Text, ")") {
return true, strings.TrimSuffix(keys[0][1:], ")") return true, strings.TrimSuffix(keys[0].Text[1:], ")")
} }
return false, "" return false, ""
} }
@ -691,11 +693,19 @@ func (p *parser) blockTokens(retainCurlies bool) ([]Token, error) {
// grouped by segments. // grouped by segments.
type ServerBlock struct { type ServerBlock struct {
HasBraces bool HasBraces bool
Keys []string Keys []Token
Segments []Segment Segments []Segment
IsNamedRoute bool IsNamedRoute bool
} }
func (sb ServerBlock) GetKeysText() []string {
res := []string{}
for _, k := range sb.Keys {
res = append(res, k.Text)
}
return res
}
// DispenseDirective returns a dispenser that contains // DispenseDirective returns a dispenser that contains
// all the tokens in the server block. // all the tokens in the server block.
func (sb ServerBlock) DispenseDirective(dir string) *Dispenser { func (sb ServerBlock) DispenseDirective(dir string) *Dispenser {

View file

@ -22,7 +22,7 @@ import (
) )
func TestParseVariadic(t *testing.T) { func TestParseVariadic(t *testing.T) {
var args = make([]string, 10) args := make([]string, 10)
for i, tc := range []struct { for i, tc := range []struct {
input string input string
result bool result bool
@ -91,6 +91,10 @@ func TestParseVariadic(t *testing.T) {
input: "{args[0:10]}", input: "{args[0:10]}",
result: true, result: true,
}, },
{
input: "{args[0]}:{args[1]}:{args[2]}",
result: false,
},
} { } {
token := Token{ token := Token{
File: "test", File: "test",
@ -107,7 +111,6 @@ func TestAllTokens(t *testing.T) {
input := []byte("a b c\nd e") input := []byte("a b c\nd e")
expected := []string{"a", "b", "c", "d", "e"} expected := []string{"a", "b", "c", "d", "e"}
tokens, err := allTokens("TestAllTokens", input) tokens, err := allTokens("TestAllTokens", input)
if err != nil { if err != nil {
t.Fatalf("Expected no error, got %v", err) t.Fatalf("Expected no error, got %v", err)
} }
@ -145,7 +148,8 @@ func TestParseOneAndImport(t *testing.T) {
"localhost", "localhost",
}, []int{1}}, }, []int{1}},
{`localhost:1234 {
`localhost:1234
dir1 foo bar`, false, []string{ dir1 foo bar`, false, []string{
"localhost:1234", "localhost:1234",
}, []int{3}, }, []int{3},
@ -343,7 +347,7 @@ func TestParseOneAndImport(t *testing.T) {
i, len(test.keys), len(result.Keys)) i, len(test.keys), len(result.Keys))
continue continue
} }
for j, addr := range result.Keys { for j, addr := range result.GetKeysText() {
if addr != test.keys[j] { if addr != test.keys[j] {
t.Errorf("Test %d, key %d: Expected '%s', but was '%s'", t.Errorf("Test %d, key %d: Expected '%s', but was '%s'",
i, j, test.keys[j], addr) i, j, test.keys[j], addr)
@ -375,8 +379,9 @@ func TestRecursiveImport(t *testing.T) {
} }
isExpected := func(got ServerBlock) bool { isExpected := func(got ServerBlock) bool {
if len(got.Keys) != 1 || got.Keys[0] != "localhost" { textKeys := got.GetKeysText()
t.Errorf("got keys unexpected: expect localhost, got %v", got.Keys) if len(textKeys) != 1 || textKeys[0] != "localhost" {
t.Errorf("got keys unexpected: expect localhost, got %v", textKeys)
return false return false
} }
if len(got.Segments) != 2 { if len(got.Segments) != 2 {
@ -403,13 +408,13 @@ func TestRecursiveImport(t *testing.T) {
err = os.WriteFile(recursiveFile1, []byte( err = os.WriteFile(recursiveFile1, []byte(
`localhost `localhost
dir1 dir1
import recursive_import_test2`), 0644) import recursive_import_test2`), 0o644)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
defer os.Remove(recursiveFile1) defer os.Remove(recursiveFile1)
err = os.WriteFile(recursiveFile2, []byte("dir2 1"), 0644) err = os.WriteFile(recursiveFile2, []byte("dir2 1"), 0o644)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -437,7 +442,7 @@ func TestRecursiveImport(t *testing.T) {
err = os.WriteFile(recursiveFile1, []byte( err = os.WriteFile(recursiveFile1, []byte(
`localhost `localhost
dir1 dir1
import `+recursiveFile2), 0644) import `+recursiveFile2), 0o644)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -470,8 +475,9 @@ func TestDirectiveImport(t *testing.T) {
} }
isExpected := func(got ServerBlock) bool { isExpected := func(got ServerBlock) bool {
if len(got.Keys) != 1 || got.Keys[0] != "localhost" { textKeys := got.GetKeysText()
t.Errorf("got keys unexpected: expect localhost, got %v", got.Keys) if len(textKeys) != 1 || textKeys[0] != "localhost" {
t.Errorf("got keys unexpected: expect localhost, got %v", textKeys)
return false return false
} }
if len(got.Segments) != 2 { if len(got.Segments) != 2 {
@ -491,7 +497,7 @@ func TestDirectiveImport(t *testing.T) {
} }
err = os.WriteFile(directiveFile, []byte(`prop1 1 err = os.WriteFile(directiveFile, []byte(`prop1 1
prop2 2`), 0644) prop2 2`), 0o644)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -612,7 +618,7 @@ func TestParseAll(t *testing.T) {
i, len(test.keys[j]), j, len(block.Keys)) i, len(test.keys[j]), j, len(block.Keys))
continue continue
} }
for k, addr := range block.Keys { for k, addr := range block.GetKeysText() {
if addr != test.keys[j][k] { if addr != test.keys[j][k] {
t.Errorf("Test %d, block %d, key %d: Expected '%s', but got '%s'", t.Errorf("Test %d, block %d, key %d: Expected '%s', but got '%s'",
i, j, k, test.keys[j][k], addr) i, j, k, test.keys[j][k], addr)
@ -765,7 +771,7 @@ func TestSnippets(t *testing.T) {
if len(blocks) != 1 { if len(blocks) != 1 {
t.Fatalf("Expect exactly one server block. Got %d.", len(blocks)) t.Fatalf("Expect exactly one server block. Got %d.", len(blocks))
} }
if actual, expected := blocks[0].Keys[0], "http://example.com"; expected != actual { if actual, expected := blocks[0].GetKeysText()[0], "http://example.com"; expected != actual {
t.Errorf("Expected server name to be '%s' but was '%s'", expected, actual) t.Errorf("Expected server name to be '%s' but was '%s'", expected, actual)
} }
if len(blocks[0].Segments) != 2 { if len(blocks[0].Segments) != 2 {
@ -797,7 +803,7 @@ func TestImportedFilesIgnoreNonDirectiveImportTokens(t *testing.T) {
fileName := writeStringToTempFileOrDie(t, ` fileName := writeStringToTempFileOrDie(t, `
http://example.com { http://example.com {
# This isn't an import directive, it's just an arg with value 'import' # This isn't an import directive, it's just an arg with value 'import'
basicauth / import password basic_auth / import password
} }
`) `)
// Parse the root file that imports the other one. // Parse the root file that imports the other one.
@ -808,12 +814,12 @@ func TestImportedFilesIgnoreNonDirectiveImportTokens(t *testing.T) {
} }
auth := blocks[0].Segments[0] auth := blocks[0].Segments[0]
line := auth[0].Text + " " + auth[1].Text + " " + auth[2].Text + " " + auth[3].Text line := auth[0].Text + " " + auth[1].Text + " " + auth[2].Text + " " + auth[3].Text
if line != "basicauth / import password" { if line != "basic_auth / import password" {
// Previously, it would be changed to: // Previously, it would be changed to:
// basicauth / import /path/to/test/dir/password // basic_auth / import /path/to/test/dir/password
// referencing a file that (probably) doesn't exist and changing the // referencing a file that (probably) doesn't exist and changing the
// password! // password!
t.Errorf("Expected basicauth tokens to be 'basicauth / import password' but got %#q", line) t.Errorf("Expected basic_auth tokens to be 'basic_auth / import password' but got %#q", line)
} }
} }
@ -840,7 +846,7 @@ func TestSnippetAcrossMultipleFiles(t *testing.T) {
if len(blocks) != 1 { if len(blocks) != 1 {
t.Fatalf("Expect exactly one server block. Got %d.", len(blocks)) t.Fatalf("Expect exactly one server block. Got %d.", len(blocks))
} }
if actual, expected := blocks[0].Keys[0], "http://example.com"; expected != actual { if actual, expected := blocks[0].GetKeysText()[0], "http://example.com"; expected != actual {
t.Errorf("Expected server name to be '%s' but was '%s'", expected, actual) t.Errorf("Expected server name to be '%s' but was '%s'", expected, actual)
} }
if len(blocks[0].Segments) != 1 { if len(blocks[0].Segments) != 1 {

View file

@ -88,15 +88,15 @@ func (st *ServerType) mapAddressToServerBlocks(originalServerBlocks []serverBloc
// will be served by them; this has the effect of treating each // will be served by them; this has the effect of treating each
// key of a server block as its own, but without having to repeat its // key of a server block as its own, but without having to repeat its
// contents in cases where multiple keys really can be served together // contents in cases where multiple keys really can be served together
addrToKeys := make(map[string][]string) addrToKeys := make(map[string][]caddyfile.Token)
for j, key := range sblock.block.Keys { for j, key := range sblock.block.Keys {
// a key can have multiple listener addresses if there are multiple // a key can have multiple listener addresses if there are multiple
// arguments to the 'bind' directive (although they will all have // arguments to the 'bind' directive (although they will all have
// the same port, since the port is defined by the key or is implicit // the same port, since the port is defined by the key or is implicit
// through automatic HTTPS) // through automatic HTTPS)
addrs, err := st.listenerAddrsForServerBlockKey(sblock, key, options) addrs, err := st.listenerAddrsForServerBlockKey(sblock, key.Text, options)
if err != nil { if err != nil {
return nil, fmt.Errorf("server block %d, key %d (%s): determining listener address: %v", i, j, key, err) return nil, fmt.Errorf("server block %d, key %d (%s): determining listener address: %v", i, j, key.Text, err)
} }
// associate this key with each listener address it is served on // associate this key with each listener address it is served on
@ -122,9 +122,9 @@ func (st *ServerType) mapAddressToServerBlocks(originalServerBlocks []serverBloc
// parse keys so that we only have to do it once // parse keys so that we only have to do it once
parsedKeys := make([]Address, 0, len(keys)) parsedKeys := make([]Address, 0, len(keys))
for _, key := range keys { for _, key := range keys {
addr, err := ParseAddress(key) addr, err := ParseAddress(key.Text)
if err != nil { if err != nil {
return nil, fmt.Errorf("parsing key '%s': %v", key, err) return nil, fmt.Errorf("parsing key '%s': %v", key.Text, err)
} }
parsedKeys = append(parsedKeys, addr.Normalize()) parsedKeys = append(parsedKeys, addr.Normalize())
} }

View file

@ -15,12 +15,9 @@
package httpcaddyfile package httpcaddyfile
import ( import (
"encoding/base64"
"encoding/pem"
"fmt" "fmt"
"html" "html"
"net/http" "net/http"
"os"
"reflect" "reflect"
"strconv" "strconv"
"strings" "strings"
@ -40,7 +37,8 @@ import (
func init() { func init() {
RegisterDirective("bind", parseBind) RegisterDirective("bind", parseBind)
RegisterDirective("tls", parseTLS) RegisterDirective("tls", parseTLS)
RegisterHandlerDirective("root", parseRoot) RegisterHandlerDirective("fs", parseFilesystem)
RegisterDirective("root", parseRoot)
RegisterHandlerDirective("vars", parseVars) RegisterHandlerDirective("vars", parseVars)
RegisterHandlerDirective("redir", parseRedir) RegisterHandlerDirective("redir", parseRedir)
RegisterHandlerDirective("respond", parseRespond) RegisterHandlerDirective("respond", parseRespond)
@ -51,18 +49,16 @@ func init() {
RegisterDirective("handle_errors", parseHandleErrors) RegisterDirective("handle_errors", parseHandleErrors)
RegisterHandlerDirective("invoke", parseInvoke) RegisterHandlerDirective("invoke", parseInvoke)
RegisterDirective("log", parseLog) RegisterDirective("log", parseLog)
RegisterHandlerDirective("skip_log", parseSkipLog) RegisterHandlerDirective("skip_log", parseLogSkip)
RegisterHandlerDirective("log_skip", parseLogSkip)
} }
// parseBind parses the bind directive. Syntax: // parseBind parses the bind directive. Syntax:
// //
// bind <addresses...> // bind <addresses...>
func parseBind(h Helper) ([]ConfigValue, error) { func parseBind(h Helper) ([]ConfigValue, error) {
var lnHosts []string h.Next() // consume directive name
for h.Next() { return []ConfigValue{{Class: "bind", Value: h.RemainingArgs()}}, nil
lnHosts = append(lnHosts, h.RemainingArgs()...)
}
return h.NewBindAddresses(lnHosts), nil
} }
// parseTLS parses the tls directive. Syntax: // parseTLS parses the tls directive. Syntax:
@ -90,12 +86,15 @@ func parseBind(h Helper) ([]ConfigValue, error) {
// dns_ttl <duration> // dns_ttl <duration>
// dns_challenge_override_domain <domain> // dns_challenge_override_domain <domain>
// on_demand // on_demand
// reuse_private_keys
// eab <key_id> <mac_key> // eab <key_id> <mac_key>
// issuer <module_name> [...] // issuer <module_name> [...]
// get_certificate <module_name> [...] // get_certificate <module_name> [...]
// insecure_secrets_log <log_file> // insecure_secrets_log <log_file>
// } // }
func parseTLS(h Helper) ([]ConfigValue, error) { func parseTLS(h Helper) ([]ConfigValue, error) {
h.Next() // consume directive name
cp := new(caddytls.ConnectionPolicy) cp := new(caddytls.ConnectionPolicy)
var fileLoader caddytls.FileLoader var fileLoader caddytls.FileLoader
var folderLoader caddytls.FolderLoader var folderLoader caddytls.FolderLoader
@ -106,8 +105,8 @@ func parseTLS(h Helper) ([]ConfigValue, error) {
var issuers []certmagic.Issuer var issuers []certmagic.Issuer
var certManagers []certmagic.Manager var certManagers []certmagic.Manager
var onDemand bool var onDemand bool
var reusePrivateKeys bool
for h.Next() {
// file certificate loader // file certificate loader
firstLine := h.RemainingArgs() firstLine := h.RemainingArgs()
switch len(firstLine) { switch len(firstLine) {
@ -174,7 +173,7 @@ func parseTLS(h Helper) ([]ConfigValue, error) {
} }
var hasBlock bool var hasBlock bool
for nesting := h.Nesting(); h.NextBlock(nesting); { for h.NextBlock(0) {
hasBlock = true hasBlock = true
switch h.Val() { switch h.Val() {
@ -214,55 +213,9 @@ func parseTLS(h Helper) ([]ConfigValue, error) {
case "client_auth": case "client_auth":
cp.ClientAuthentication = &caddytls.ClientAuthentication{} cp.ClientAuthentication = &caddytls.ClientAuthentication{}
for nesting := h.Nesting(); h.NextBlock(nesting); { if err := cp.ClientAuthentication.UnmarshalCaddyfile(h.NewFromNextSegment()); err != nil {
subdir := h.Val()
switch subdir {
case "mode":
if !h.Args(&cp.ClientAuthentication.Mode) {
return nil, h.ArgErr()
}
if h.NextArg() {
return nil, h.ArgErr()
}
case "trusted_ca_cert",
"trusted_leaf_cert":
if !h.NextArg() {
return nil, h.ArgErr()
}
if subdir == "trusted_ca_cert" {
cp.ClientAuthentication.TrustedCACerts = append(cp.ClientAuthentication.TrustedCACerts, h.Val())
} else {
cp.ClientAuthentication.TrustedLeafCerts = append(cp.ClientAuthentication.TrustedLeafCerts, h.Val())
}
case "trusted_ca_cert_file",
"trusted_leaf_cert_file":
if !h.NextArg() {
return nil, h.ArgErr()
}
filename := h.Val()
certDataPEM, err := os.ReadFile(filename)
if err != nil {
return nil, err return nil, err
} }
block, _ := pem.Decode(certDataPEM)
if block == nil || block.Type != "CERTIFICATE" {
return nil, h.Errf("no CERTIFICATE pem block found in %s", h.Val())
}
if subdir == "trusted_ca_cert_file" {
cp.ClientAuthentication.TrustedCACerts = append(cp.ClientAuthentication.TrustedCACerts,
base64.StdEncoding.EncodeToString(block.Bytes))
} else {
cp.ClientAuthentication.TrustedLeafCerts = append(cp.ClientAuthentication.TrustedLeafCerts,
base64.StdEncoding.EncodeToString(block.Bytes))
}
default:
return nil, h.Errf("unknown subdirective for client_auth: %s", subdir)
}
}
case "alpn": case "alpn":
args := h.RemainingArgs() args := h.RemainingArgs()
if len(args) == 0 { if len(args) == 0 {
@ -473,6 +426,12 @@ func parseTLS(h Helper) ([]ConfigValue, error) {
} }
onDemand = true onDemand = true
case "reuse_private_keys":
if h.NextArg() {
return nil, h.ArgErr()
}
reusePrivateKeys = true
case "insecure_secrets_log": case "insecure_secrets_log":
if !h.NextArg() { if !h.NextArg() {
return nil, h.ArgErr() return nil, h.ArgErr()
@ -488,7 +447,6 @@ func parseTLS(h Helper) ([]ConfigValue, error) {
if len(firstLine) == 0 && !hasBlock { if len(firstLine) == 0 && !hasBlock {
return nil, h.ArgErr() return nil, h.ArgErr()
} }
}
// begin building the final config values // begin building the final config values
configVals := []ConfigValue{} configVals := []ConfigValue{}
@ -579,6 +537,14 @@ func parseTLS(h Helper) ([]ConfigValue, error) {
}) })
} }
// reuse private keys TLS
if reusePrivateKeys {
configVals = append(configVals, ConfigValue{
Class: "tls.reuse_private_keys",
Value: true,
})
}
// custom certificate selection // custom certificate selection
if len(certSelector.AnyTag) > 0 { if len(certSelector.AnyTag) > 0 {
cp.CertSelection = &certSelector cp.CertSelection = &certSelector
@ -600,18 +566,53 @@ func parseTLS(h Helper) ([]ConfigValue, error) {
// parseRoot parses the root directive. Syntax: // parseRoot parses the root directive. Syntax:
// //
// root [<matcher>] <path> // root [<matcher>] <path>
func parseRoot(h Helper) (caddyhttp.MiddlewareHandler, error) { func parseRoot(h Helper) ([]ConfigValue, error) {
var root string h.Next() // consume directive name
for h.Next() {
// count the tokens to determine what to do
argsCount := h.CountRemainingArgs()
if argsCount == 0 {
return nil, h.Errf("too few arguments; must have at least a root path")
}
if argsCount > 2 {
return nil, h.Errf("too many arguments; should only be a matcher and a path")
}
// with only one arg, assume it's a root path with no matcher token
if argsCount == 1 {
if !h.NextArg() {
return nil, h.ArgErr()
}
return h.NewRoute(nil, caddyhttp.VarsMiddleware{"root": h.Val()}), nil
}
// parse the matcher token into a matcher set
userMatcherSet, err := h.ExtractMatcherSet()
if err != nil {
return nil, err
}
h.Next() // consume directive name again, matcher parsing does a reset
// advance to the root path
if !h.NextArg() {
return nil, h.ArgErr()
}
// make the route with the matcher
return h.NewRoute(userMatcherSet, caddyhttp.VarsMiddleware{"root": h.Val()}), nil
}
// parseFilesystem parses the fs directive. Syntax:
//
// fs <filesystem>
func parseFilesystem(h Helper) (caddyhttp.MiddlewareHandler, error) {
h.Next() // consume directive name
if !h.NextArg() { if !h.NextArg() {
return nil, h.ArgErr() return nil, h.ArgErr()
} }
root = h.Val()
if h.NextArg() { if h.NextArg() {
return nil, h.ArgErr() return nil, h.ArgErr()
} }
} return caddyhttp.VarsMiddleware{"fs": h.Val()}, nil
return caddyhttp.VarsMiddleware{"root": root}, nil
} }
// parseVars parses the vars directive. See its UnmarshalCaddyfile method for syntax. // parseVars parses the vars directive. See its UnmarshalCaddyfile method for syntax.
@ -631,10 +632,7 @@ func parseVars(h Helper) (caddyhttp.MiddlewareHandler, error) {
// respond with HTTP 200 and no Location header; redirect is performed // respond with HTTP 200 and no Location header; redirect is performed
// with JS and a meta tag). // with JS and a meta tag).
func parseRedir(h Helper) (caddyhttp.MiddlewareHandler, error) { func parseRedir(h Helper) (caddyhttp.MiddlewareHandler, error) {
if !h.Next() { h.Next() // consume directive name
return nil, h.ArgErr()
}
if !h.NextArg() { if !h.NextArg() {
return nil, h.ArgErr() return nil, h.ArgErr()
} }
@ -650,8 +648,10 @@ func parseRedir(h Helper) (caddyhttp.MiddlewareHandler, error) {
switch code { switch code {
case "permanent": case "permanent":
code = "301" code = "301"
case "temporary", "": case "temporary", "":
code = "302" code = "302"
case "html": case "html":
// Script tag comes first since that will better imitate a redirect in the browser's // Script tag comes first since that will better imitate a redirect in the browser's
// history, but the meta tag is a fallback for most non-JS clients. // history, but the meta tag is a fallback for most non-JS clients.
@ -667,7 +667,9 @@ func parseRedir(h Helper) (caddyhttp.MiddlewareHandler, error) {
` `
safeTo := html.EscapeString(to) safeTo := html.EscapeString(to)
body = fmt.Sprintf(metaRedir, safeTo, safeTo, safeTo, safeTo) body = fmt.Sprintf(metaRedir, safeTo, safeTo, safeTo, safeTo)
hdr = http.Header{"Content-Type": []string{"text/html; charset=utf-8"}}
code = "200" // don't redirect non-browser clients code = "200" // don't redirect non-browser clients
default: default:
// Allow placeholders for the code // Allow placeholders for the code
if strings.HasPrefix(code, "{") { if strings.HasPrefix(code, "{") {
@ -706,10 +708,7 @@ func parseRedir(h Helper) (caddyhttp.MiddlewareHandler, error) {
func parseRespond(h Helper) (caddyhttp.MiddlewareHandler, error) { func parseRespond(h Helper) (caddyhttp.MiddlewareHandler, error) {
sr := new(caddyhttp.StaticResponse) sr := new(caddyhttp.StaticResponse)
err := sr.UnmarshalCaddyfile(h.Dispenser) err := sr.UnmarshalCaddyfile(h.Dispenser)
if err != nil { return sr, err
return nil, err
}
return sr, nil
} }
// parseAbort parses the abort directive. // parseAbort parses the abort directive.
@ -725,10 +724,7 @@ func parseAbort(h Helper) (caddyhttp.MiddlewareHandler, error) {
func parseError(h Helper) (caddyhttp.MiddlewareHandler, error) { func parseError(h Helper) (caddyhttp.MiddlewareHandler, error) {
se := new(caddyhttp.StaticError) se := new(caddyhttp.StaticError)
err := se.UnmarshalCaddyfile(h.Dispenser) err := se.UnmarshalCaddyfile(h.Dispenser)
if err != nil { return se, err
return nil, err
}
return se, nil
} }
// parseRoute parses the route directive. // parseRoute parses the route directive.
@ -754,10 +750,67 @@ func parseHandle(h Helper) (caddyhttp.MiddlewareHandler, error) {
} }
func parseHandleErrors(h Helper) ([]ConfigValue, error) { func parseHandleErrors(h Helper) ([]ConfigValue, error) {
subroute, err := ParseSegmentAsSubroute(h) h.Next() // consume directive name
expression := ""
args := h.RemainingArgs()
if len(args) > 0 {
codes := []string{}
for _, val := range args {
if len(val) != 3 {
return nil, h.Errf("bad status value '%s'", val)
}
if strings.HasSuffix(val, "xx") {
val = val[:1]
_, err := strconv.Atoi(val)
if err != nil {
return nil, h.Errf("bad status value '%s': %v", val, err)
}
if expression != "" {
expression += " || "
}
expression += fmt.Sprintf("{http.error.status_code} >= %s00 && {http.error.status_code} <= %s99", val, val)
continue
}
_, err := strconv.Atoi(val)
if err != nil {
return nil, h.Errf("bad status value '%s': %v", val, err)
}
codes = append(codes, val)
}
if len(codes) > 0 {
if expression != "" {
expression += " || "
}
expression += "{http.error.status_code} in [" + strings.Join(codes, ", ") + "]"
}
// Reset cursor position to get ready for ParseSegmentAsSubroute
h.Reset()
h.Next()
h.RemainingArgs()
h.Prev()
} else {
// If no arguments present reset the cursor position to get ready for ParseSegmentAsSubroute
h.Prev()
}
handler, err := ParseSegmentAsSubroute(h)
if err != nil { if err != nil {
return nil, err return nil, err
} }
subroute, ok := handler.(*caddyhttp.Subroute)
if !ok {
return nil, h.Errf("segment was not parsed as a subroute")
}
if expression != "" {
statusMatcher := caddy.ModuleMap{
"expression": h.JSON(caddyhttp.MatchExpression{Expr: expression}),
}
for i := range subroute.Routes {
subroute.Routes[i].MatcherSetsRaw = []caddy.ModuleMap{statusMatcher}
}
}
return []ConfigValue{ return []ConfigValue{
{ {
Class: "error_route", Class: "error_route",
@ -804,12 +857,14 @@ func parseLog(h Helper) ([]ConfigValue, error) {
// level. The parseAsGlobalOption parameter is used to distinguish any differing logic // level. The parseAsGlobalOption parameter is used to distinguish any differing logic
// between the two. // between the two.
func parseLogHelper(h Helper, globalLogNames map[string]struct{}) ([]ConfigValue, error) { func parseLogHelper(h Helper, globalLogNames map[string]struct{}) ([]ConfigValue, error) {
h.Next() // consume option name
// When the globalLogNames parameter is passed in, we make // When the globalLogNames parameter is passed in, we make
// modifications to the parsing behavior. // modifications to the parsing behavior.
parseAsGlobalOption := globalLogNames != nil parseAsGlobalOption := globalLogNames != nil
var configValues []ConfigValue var configValues []ConfigValue
for h.Next() {
// Logic below expects that a name is always present when a // Logic below expects that a name is always present when a
// global option is being parsed; or an optional override // global option is being parsed; or an optional override
// is supported for access logs. // is supported for access logs.
@ -981,18 +1036,22 @@ func parseLogHelper(h Helper, globalLogNames map[string]struct{}) ([]ConfigValue
Class: "custom_log", Class: "custom_log",
Value: val, Value: val,
}) })
}
return configValues, nil return configValues, nil
} }
// parseSkipLog parses the skip_log directive. Syntax: // parseLogSkip parses the log_skip directive. Syntax:
// //
// skip_log [<matcher>] // log_skip [<matcher>]
func parseSkipLog(h Helper) (caddyhttp.MiddlewareHandler, error) { func parseLogSkip(h Helper) (caddyhttp.MiddlewareHandler, error) {
for h.Next() { h.Next() // consume directive name
// "skip_log" is deprecated, replaced by "log_skip"
if h.Val() == "skip_log" {
caddy.Log().Named("config.adapter.caddyfile").Warn("the 'skip_log' directive is deprecated, please use 'log_skip' instead!")
}
if h.NextArg() { if h.NextArg() {
return nil, h.ArgErr() return nil, h.ArgErr()
} }
} return caddyhttp.VarsMiddleware{"log_skip": true}, nil
return caddyhttp.VarsMiddleware{"skip_log": true}, nil
} }

View file

@ -27,22 +27,31 @@ import (
"github.com/caddyserver/caddy/v2/modules/caddyhttp" "github.com/caddyserver/caddy/v2/modules/caddyhttp"
) )
// directiveOrder specifies the order // defaultDirectiveOrder specifies the default order
// to apply directives in HTTP routes. // to apply directives in HTTP routes. This must only
// consist of directives that are included in Caddy's
// standard distribution.
// //
// The root directive goes first in case rewrites or // e.g. The 'root' directive goes near the start in
// redirects depend on existence of files, i.e. the // case rewrites or redirects depend on existence of
// file matcher, which must know the root first. // files, i.e. the file matcher, which must know the
// root first.
// //
// The header directive goes second so that headers // e.g. The 'header' directive goes before 'redir' so
// can be manipulated before doing redirects. // that headers can be manipulated before doing redirects.
var directiveOrder = []string{ //
// e.g. The 'respond' directive is near the end because it
// writes a response and terminates the middleware chain.
var defaultDirectiveOrder = []string{
"tracing", "tracing",
// set variables that may be used by other directives
"map", "map",
"vars", "vars",
"fs",
"root", "root",
"skip_log", "log_append",
"log_skip",
"header", "header",
"copy_response_headers", // only in reverse_proxy's handle_response "copy_response_headers", // only in reverse_proxy's handle_response
@ -57,7 +66,8 @@ var directiveOrder = []string{
"try_files", "try_files",
// middleware handlers; some wrap responses // middleware handlers; some wrap responses
"basicauth", "basicauth", // TODO: deprecated, renamed to basic_auth
"basic_auth",
"forward_auth", "forward_auth",
"request_header", "request_header",
"encode", "encode",
@ -82,6 +92,11 @@ var directiveOrder = []string{
"acme_server", "acme_server",
} }
// directiveOrder specifies the order to apply directives
// in HTTP routes, after being modified by either the
// plugins or by the user via the "order" global option.
var directiveOrder = defaultDirectiveOrder
// directiveIsOrdered returns true if dir is // directiveIsOrdered returns true if dir is
// a known, ordered (sorted) directive. // a known, ordered (sorted) directive.
func directiveIsOrdered(dir string) bool { func directiveIsOrdered(dir string) bool {
@ -128,6 +143,58 @@ func RegisterHandlerDirective(dir string, setupFunc UnmarshalHandlerFunc) {
}) })
} }
// RegisterDirectiveOrder registers the default order for a
// directive from a plugin.
//
// This is useful when a plugin has a well-understood place
// it should run in the middleware pipeline, and it allows
// users to avoid having to define the order themselves.
//
// The directive dir may be placed in the position relative
// to ('before' or 'after') a directive included in Caddy's
// standard distribution. It cannot be relative to another
// plugin's directive.
//
// EXPERIMENTAL: This API may change or be removed.
func RegisterDirectiveOrder(dir string, position Positional, standardDir string) {
// check if directive was already ordered
if directiveIsOrdered(dir) {
panic("directive '" + dir + "' already ordered")
}
if position != Before && position != After {
panic("the 2nd argument must be either 'before' or 'after', got '" + position + "'")
}
// check if directive exists in standard distribution, since
// we can't allow plugins to depend on one another; we can't
// guarantee the order that plugins are loaded in.
foundStandardDir := false
for _, d := range defaultDirectiveOrder {
if d == standardDir {
foundStandardDir = true
}
}
if !foundStandardDir {
panic("the 3rd argument '" + standardDir + "' must be a directive that exists in the standard distribution of Caddy")
}
// insert directive into proper position
newOrder := directiveOrder
for i, d := range newOrder {
if d != standardDir {
continue
}
if position == Before {
newOrder = append(newOrder[:i], append([]string{dir}, newOrder[i:]...)...)
} else if position == After {
newOrder = append(newOrder[:i+1], append([]string{dir}, newOrder[i+1:]...)...)
}
break
}
directiveOrder = newOrder
}
// RegisterGlobalOption registers a unique global option opt with // RegisterGlobalOption registers a unique global option opt with
// an associated unmarshaling (setup) function. When the global // an associated unmarshaling (setup) function. When the global
// option opt is encountered in a Caddyfile, setupFunc will be // option opt is encountered in a Caddyfile, setupFunc will be
@ -270,12 +337,6 @@ func (h Helper) GroupRoutes(vals []ConfigValue) {
} }
} }
// NewBindAddresses returns config values relevant to adding
// listener bind addresses to the config.
func (h Helper) NewBindAddresses(addrs []string) []ConfigValue {
return []ConfigValue{{Class: "bind", Value: addrs}}
}
// WithDispenser returns a new instance based on d. All others Helper // WithDispenser returns a new instance based on d. All others Helper
// fields are copied, so typically maps are shared with this new instance. // fields are copied, so typically maps are shared with this new instance.
func (h Helper) WithDispenser(d *caddyfile.Dispenser) Helper { func (h Helper) WithDispenser(d *caddyfile.Dispenser) Helper {
@ -558,6 +619,16 @@ func (sb serverBlock) isAllHTTP() bool {
return true return true
} }
// Positional are the supported modes for ordering directives.
type Positional string
const (
Before Positional = "before"
After Positional = "after"
First Positional = "first"
Last Positional = "last"
)
type ( type (
// UnmarshalFunc is a function which can unmarshal Caddyfile // UnmarshalFunc is a function which can unmarshal Caddyfile
// tokens into zero or more config values using a Helper type. // tokens into zero or more config values using a Helper type.

View file

@ -31,20 +31,23 @@ func TestHostsFromKeys(t *testing.T) {
[]Address{ []Address{
{Original: ":2015", Port: "2015"}, {Original: ":2015", Port: "2015"},
}, },
[]string{}, []string{}, []string{},
[]string{},
}, },
{ {
[]Address{ []Address{
{Original: ":443", Port: "443"}, {Original: ":443", Port: "443"},
}, },
[]string{}, []string{}, []string{},
[]string{},
}, },
{ {
[]Address{ []Address{
{Original: "foo", Host: "foo"}, {Original: "foo", Host: "foo"},
{Original: ":2015", Port: "2015"}, {Original: ":2015", Port: "2015"},
}, },
[]string{}, []string{"foo"}, []string{},
[]string{"foo"},
}, },
{ {
[]Address{ []Address{

View file

@ -17,8 +17,8 @@ package httpcaddyfile
import ( import (
"encoding/json" "encoding/json"
"fmt" "fmt"
"net"
"reflect" "reflect"
"regexp"
"sort" "sort"
"strconv" "strconv"
"strings" "strings"
@ -65,8 +65,11 @@ func (st ServerType) Setup(
originalServerBlocks := make([]serverBlock, 0, len(inputServerBlocks)) originalServerBlocks := make([]serverBlock, 0, len(inputServerBlocks))
for _, sblock := range inputServerBlocks { for _, sblock := range inputServerBlocks {
for j, k := range sblock.Keys { for j, k := range sblock.Keys {
if j == 0 && strings.HasPrefix(k, "@") { if j == 0 && strings.HasPrefix(k.Text, "@") {
return nil, warnings, fmt.Errorf("cannot define a matcher outside of a site block: '%s'", k) return nil, warnings, fmt.Errorf("%s:%d: cannot define a matcher outside of a site block: '%s'", k.File, k.Line, k.Text)
}
if _, ok := registeredDirectives[k.Text]; ok {
return nil, warnings, fmt.Errorf("%s:%d: parsed '%s' as a site address, but it is a known directive; directives must appear in a site block", k.File, k.Line, k.Text)
} }
} }
originalServerBlocks = append(originalServerBlocks, serverBlock{ originalServerBlocks = append(originalServerBlocks, serverBlock{
@ -82,46 +85,18 @@ func (st ServerType) Setup(
return nil, warnings, err return nil, warnings, err
} }
originalServerBlocks, err = st.extractNamedRoutes(originalServerBlocks, options, &warnings) // this will replace both static and user-defined placeholder shorthands
// with actual identifiers used by Caddy
replacer := NewShorthandReplacer()
originalServerBlocks, err = st.extractNamedRoutes(originalServerBlocks, options, &warnings, replacer)
if err != nil { if err != nil {
return nil, warnings, err return nil, warnings, err
} }
// replace shorthand placeholders (which are convenient
// when writing a Caddyfile) with their actual placeholder
// identifiers or variable names
replacer := strings.NewReplacer(placeholderShorthands()...)
// these are placeholders that allow a user-defined final
// parameters, but we still want to provide a shorthand
// for those, so we use a regexp to replace
regexpReplacements := []struct {
search *regexp.Regexp
replace string
}{
{regexp.MustCompile(`{header\.([\w-]*)}`), "{http.request.header.$1}"},
{regexp.MustCompile(`{cookie\.([\w-]*)}`), "{http.request.cookie.$1}"},
{regexp.MustCompile(`{labels\.([\w-]*)}`), "{http.request.host.labels.$1}"},
{regexp.MustCompile(`{path\.([\w-]*)}`), "{http.request.uri.path.$1}"},
{regexp.MustCompile(`{file\.([\w-]*)}`), "{http.request.uri.path.file.$1}"},
{regexp.MustCompile(`{query\.([\w-]*)}`), "{http.request.uri.query.$1}"},
{regexp.MustCompile(`{re\.([\w-]*)\.([\w-]*)}`), "{http.regexp.$1.$2}"},
{regexp.MustCompile(`{vars\.([\w-]*)}`), "{http.vars.$1}"},
{regexp.MustCompile(`{rp\.([\w-\.]*)}`), "{http.reverse_proxy.$1}"},
{regexp.MustCompile(`{err\.([\w-\.]*)}`), "{http.error.$1}"},
{regexp.MustCompile(`{file_match\.([\w-]*)}`), "{http.matchers.file.$1}"},
}
for _, sb := range originalServerBlocks { for _, sb := range originalServerBlocks {
for _, segment := range sb.block.Segments { for i := range sb.block.Segments {
for i := 0; i < len(segment); i++ { replacer.ApplyToSegment(&sb.block.Segments[i])
// simple string replacements
segment[i].Text = replacer.Replace(segment[i].Text)
// complex regexp replacements
for _, r := range regexpReplacements {
segment[i].Text = r.search.ReplaceAllString(segment[i].Text, r.replace)
}
}
} }
if len(sb.block.Keys) == 0 { if len(sb.block.Keys) == 0 {
@ -299,6 +274,12 @@ func (st ServerType) Setup(
if !reflect.DeepEqual(pkiApp, &caddypki.PKI{CAs: make(map[string]*caddypki.CA)}) { if !reflect.DeepEqual(pkiApp, &caddypki.PKI{CAs: make(map[string]*caddypki.CA)}) {
cfg.AppsRaw["pki"] = caddyconfig.JSON(pkiApp, &warnings) cfg.AppsRaw["pki"] = caddyconfig.JSON(pkiApp, &warnings)
} }
if filesystems, ok := options["filesystem"].(caddy.Module); ok {
cfg.AppsRaw["caddy.filesystems"] = caddyconfig.JSON(
filesystems,
&warnings)
}
if storageCvtr, ok := options["storage"].(caddy.StorageConverter); ok { if storageCvtr, ok := options["storage"].(caddy.StorageConverter); ok {
cfg.StorageRaw = caddyconfig.JSONModuleObject(storageCvtr, cfg.StorageRaw = caddyconfig.JSONModuleObject(storageCvtr,
"module", "module",
@ -308,7 +289,6 @@ func (st ServerType) Setup(
if adminConfig, ok := options["admin"].(*caddy.AdminConfig); ok && adminConfig != nil { if adminConfig, ok := options["admin"].(*caddy.AdminConfig); ok && adminConfig != nil {
cfg.Admin = adminConfig cfg.Admin = adminConfig
} }
if pc, ok := options["persist_config"].(string); ok && pc == "off" { if pc, ok := options["persist_config"].(string); ok && pc == "off" {
if cfg.Admin == nil { if cfg.Admin == nil {
cfg.Admin = new(caddy.AdminConfig) cfg.Admin = new(caddy.AdminConfig)
@ -452,6 +432,7 @@ func (ServerType) extractNamedRoutes(
serverBlocks []serverBlock, serverBlocks []serverBlock,
options map[string]any, options map[string]any,
warnings *[]caddyconfig.Warning, warnings *[]caddyconfig.Warning,
replacer ShorthandReplacer,
) ([]serverBlock, error) { ) ([]serverBlock, error) {
namedRoutes := map[string]*caddyhttp.Route{} namedRoutes := map[string]*caddyhttp.Route{}
@ -477,11 +458,14 @@ func (ServerType) extractNamedRoutes(
continue continue
} }
wholeSegment := caddyfile.Segment{}
for i := range sb.block.Segments {
// replace user-defined placeholder shorthands in extracted named routes
replacer.ApplyToSegment(&sb.block.Segments[i])
// zip up all the segments since ParseSegmentAsSubroute // zip up all the segments since ParseSegmentAsSubroute
// was designed to take a directive+ // was designed to take a directive+
wholeSegment := caddyfile.Segment{} wholeSegment = append(wholeSegment, sb.block.Segments[i]...)
for _, segment := range sb.block.Segments {
wholeSegment = append(wholeSegment, segment...)
} }
h := Helper{ h := Helper{
@ -509,7 +493,7 @@ func (ServerType) extractNamedRoutes(
route.HandlersRaw = []json.RawMessage{caddyconfig.JSONModuleObject(handler, "handler", subroute.CaddyModule().ID.Name(), h.warnings)} route.HandlersRaw = []json.RawMessage{caddyconfig.JSONModuleObject(handler, "handler", subroute.CaddyModule().ID.Name(), h.warnings)}
} }
namedRoutes[sb.block.Keys[0]] = &route namedRoutes[sb.block.GetKeysText()[0]] = &route
} }
options["named_routes"] = namedRoutes options["named_routes"] = namedRoutes
@ -547,12 +531,12 @@ func (st *ServerType) serversFromPairings(
// address), otherwise their routes will improperly be added // address), otherwise their routes will improperly be added
// to the same server (see issue #4635) // to the same server (see issue #4635)
for j, sblock1 := range p.serverBlocks { for j, sblock1 := range p.serverBlocks {
for _, key := range sblock1.block.Keys { for _, key := range sblock1.block.GetKeysText() {
for k, sblock2 := range p.serverBlocks { for k, sblock2 := range p.serverBlocks {
if k == j { if k == j {
continue continue
} }
if sliceContains(sblock2.block.Keys, key) { if sliceContains(sblock2.block.GetKeysText(), key) {
return nil, fmt.Errorf("ambiguous site definition: %s", key) return nil, fmt.Errorf("ambiguous site definition: %s", key)
} }
} }
@ -710,6 +694,7 @@ func (st *ServerType) serversFromPairings(
} }
if len(hosts) > 0 { if len(hosts) > 0 {
slices.Sort(hosts) // for deterministic JSON output
cp.MatchersRaw = caddy.ModuleMap{ cp.MatchersRaw = caddy.ModuleMap{
"sni": caddyconfig.JSON(hosts, warnings), // make sure to match all hosts, not just auto-HTTPS-qualified ones "sni": caddyconfig.JSON(hosts, warnings), // make sure to match all hosts, not just auto-HTTPS-qualified ones
} }
@ -741,10 +726,20 @@ func (st *ServerType) serversFromPairings(
} }
} }
// If TLS is specified as directive, it will also result in 1 or more connection policy being created
// Thus, catch-all address with non-standard port, e.g. :8443, can have TLS enabled without
// specifying prefix "https://"
// Second part of the condition is to allow creating TLS conn policy even though `auto_https` has been disabled
// ensuring compatibility with behavior described in below link
// https://caddy.community/t/making-sense-of-auto-https-and-why-disabling-it-still-serves-https-instead-of-http/9761
createdTLSConnPolicies, ok := sblock.pile["tls.connection_policy"]
hasTLSEnabled := (ok && len(createdTLSConnPolicies) > 0) ||
(addr.Host != "" && srv.AutoHTTPS != nil && !sliceContains(srv.AutoHTTPS.Skip, addr.Host))
// we'll need to remember if the address qualifies for auto-HTTPS, so we // we'll need to remember if the address qualifies for auto-HTTPS, so we
// can add a TLS conn policy if necessary // can add a TLS conn policy if necessary
if addr.Scheme == "https" || if addr.Scheme == "https" ||
(addr.Scheme != "http" && addr.Host != "" && addr.Port != httpPort) { (addr.Scheme != "http" && addr.Port != httpPort && hasTLSEnabled) {
addressQualifiesForTLS = true addressQualifiesForTLS = true
} }
// predict whether auto-HTTPS will add the conn policy for us; if so, we // predict whether auto-HTTPS will add the conn policy for us; if so, we
@ -782,10 +777,19 @@ func (st *ServerType) serversFromPairings(
if srv.Errors == nil { if srv.Errors == nil {
srv.Errors = new(caddyhttp.HTTPErrorConfig) srv.Errors = new(caddyhttp.HTTPErrorConfig)
} }
sort.SliceStable(errorSubrouteVals, func(i, j int) bool {
sri, srj := errorSubrouteVals[i].Value.(*caddyhttp.Subroute), errorSubrouteVals[j].Value.(*caddyhttp.Subroute)
if len(sri.Routes[0].MatcherSetsRaw) == 0 && len(srj.Routes[0].MatcherSetsRaw) != 0 {
return false
}
return true
})
errorsSubroute := &caddyhttp.Subroute{}
for _, val := range errorSubrouteVals { for _, val := range errorSubrouteVals {
sr := val.Value.(*caddyhttp.Subroute) sr := val.Value.(*caddyhttp.Subroute)
srv.Errors.Routes = appendSubrouteToRouteList(srv.Errors.Routes, sr, matcherSetsEnc, p, warnings) errorsSubroute.Routes = append(errorsSubroute.Routes, sr.Routes...)
} }
srv.Errors.Routes = appendSubrouteToRouteList(srv.Errors.Routes, errorsSubroute, matcherSetsEnc, p, warnings)
} }
// add log associations // add log associations
@ -811,7 +815,12 @@ func (st *ServerType) serversFromPairings(
if srv.Logs.LoggerNames == nil { if srv.Logs.LoggerNames == nil {
srv.Logs.LoggerNames = make(map[string]string) srv.Logs.LoggerNames = make(map[string]string)
} }
srv.Logs.LoggerNames[h] = ncl.name // strip the port from the host, if any
host, _, err := net.SplitHostPort(h)
if err != nil {
host = h
}
srv.Logs.LoggerNames[host] = ncl.name
} }
} }
} }
@ -828,6 +837,11 @@ func (st *ServerType) serversFromPairings(
} }
} }
// sort for deterministic JSON output
if srv.Logs != nil {
slices.Sort(srv.Logs.SkipHosts)
}
// a server cannot (natively) serve both HTTP and HTTPS at the // a server cannot (natively) serve both HTTP and HTTPS at the
// same time, so make sure the configuration isn't in conflict // same time, so make sure the configuration isn't in conflict
err := detectConflictingSchemes(srv, p.serverBlocks, options) err := detectConflictingSchemes(srv, p.serverBlocks, options)
@ -1370,7 +1384,8 @@ func (st *ServerType) compileEncodedMatcherSets(sblock serverBlock) ([]caddy.Mod
} }
func parseMatcherDefinitions(d *caddyfile.Dispenser, matchers map[string]caddy.ModuleMap) error { func parseMatcherDefinitions(d *caddyfile.Dispenser, matchers map[string]caddy.ModuleMap) error {
for d.Next() { d.Next() // advance to the first token
// this is the "name" for "named matchers" // this is the "name" for "named matchers"
definitionName := d.Val() definitionName := d.Val()
@ -1406,11 +1421,16 @@ func parseMatcherDefinitions(d *caddyfile.Dispenser, matchers map[string]caddy.M
// and that it's probably an 'expression' matcher // and that it's probably an 'expression' matcher
if d.NextArg() { if d.NextArg() {
if d.Token().Quoted() { if d.Token().Quoted() {
err := makeMatcher("expression", []caddyfile.Token{d.Token()}) // since it was missing the matcher name, we insert a token
// in front of the expression token itself
err := makeMatcher("expression", []caddyfile.Token{
{Text: "expression", File: d.File(), Line: d.Line()},
d.Token(),
})
if err != nil { if err != nil {
return err return err
} }
continue return nil
} }
// if it wasn't quoted, then we need to rewind after calling // if it wasn't quoted, then we need to rewind after calling
@ -1433,7 +1453,6 @@ func parseMatcherDefinitions(d *caddyfile.Dispenser, matchers map[string]caddy.M
return err return err
} }
} }
}
return nil return nil
} }
@ -1449,37 +1468,6 @@ func encodeMatcherSet(matchers map[string]caddyhttp.RequestMatcher) (caddy.Modul
return msEncoded, nil return msEncoded, nil
} }
// placeholderShorthands returns a slice of old-new string pairs,
// where the left of the pair is a placeholder shorthand that may
// be used in the Caddyfile, and the right is the replacement.
func placeholderShorthands() []string {
return []string{
"{dir}", "{http.request.uri.path.dir}",
"{file}", "{http.request.uri.path.file}",
"{host}", "{http.request.host}",
"{hostport}", "{http.request.hostport}",
"{port}", "{http.request.port}",
"{method}", "{http.request.method}",
"{path}", "{http.request.uri.path}",
"{query}", "{http.request.uri.query}",
"{remote}", "{http.request.remote}",
"{remote_host}", "{http.request.remote.host}",
"{remote_port}", "{http.request.remote.port}",
"{scheme}", "{http.request.scheme}",
"{uri}", "{http.request.uri}",
"{tls_cipher}", "{http.request.tls.cipher_suite}",
"{tls_version}", "{http.request.tls.version}",
"{tls_client_fingerprint}", "{http.request.tls.client.fingerprint}",
"{tls_client_issuer}", "{http.request.tls.client.issuer}",
"{tls_client_serial}", "{http.request.tls.client.serial}",
"{tls_client_subject}", "{http.request.tls.client.subject}",
"{tls_client_certificate_pem}", "{http.request.tls.client.certificate_pem}",
"{tls_client_certificate_der_base64}", "{http.request.tls.client.certificate_der_base64}",
"{upstream_hostport}", "{http.reverse_proxy.upstream.hostport}",
"{client_ip}", "{http.vars.client_ip}",
}
}
// WasReplacedPlaceholderShorthand checks if a token string was // WasReplacedPlaceholderShorthand checks if a token string was
// likely a replaced shorthand of the known Caddyfile placeholder // likely a replaced shorthand of the known Caddyfile placeholder
// replacement outputs. Useful to prevent some user-defined map // replacement outputs. Useful to prevent some user-defined map

View file

@ -62,8 +62,8 @@ func init() {
func parseOptTrue(d *caddyfile.Dispenser, _ any) (any, error) { return true, nil } func parseOptTrue(d *caddyfile.Dispenser, _ any) (any, error) { return true, nil }
func parseOptHTTPPort(d *caddyfile.Dispenser, _ any) (any, error) { func parseOptHTTPPort(d *caddyfile.Dispenser, _ any) (any, error) {
d.Next() // consume option name
var httpPort int var httpPort int
for d.Next() {
var httpPortStr string var httpPortStr string
if !d.AllArgs(&httpPortStr) { if !d.AllArgs(&httpPortStr) {
return 0, d.ArgErr() return 0, d.ArgErr()
@ -73,13 +73,12 @@ func parseOptHTTPPort(d *caddyfile.Dispenser, _ any) (any, error) {
if err != nil { if err != nil {
return 0, d.Errf("converting port '%s' to integer value: %v", httpPortStr, err) return 0, d.Errf("converting port '%s' to integer value: %v", httpPortStr, err)
} }
}
return httpPort, nil return httpPort, nil
} }
func parseOptHTTPSPort(d *caddyfile.Dispenser, _ any) (any, error) { func parseOptHTTPSPort(d *caddyfile.Dispenser, _ any) (any, error) {
d.Next() // consume option name
var httpsPort int var httpsPort int
for d.Next() {
var httpsPortStr string var httpsPortStr string
if !d.AllArgs(&httpsPortStr) { if !d.AllArgs(&httpsPortStr) {
return 0, d.ArgErr() return 0, d.ArgErr()
@ -89,14 +88,12 @@ func parseOptHTTPSPort(d *caddyfile.Dispenser, _ any) (any, error) {
if err != nil { if err != nil {
return 0, d.Errf("converting port '%s' to integer value: %v", httpsPortStr, err) return 0, d.Errf("converting port '%s' to integer value: %v", httpsPortStr, err)
} }
}
return httpsPort, nil return httpsPort, nil
} }
func parseOptOrder(d *caddyfile.Dispenser, _ any) (any, error) { func parseOptOrder(d *caddyfile.Dispenser, _ any) (any, error) {
newOrder := directiveOrder d.Next() // consume option name
for d.Next() {
// get directive name // get directive name
if !d.Next() { if !d.Next() {
return nil, d.ArgErr() return nil, d.ArgErr()
@ -110,7 +107,9 @@ func parseOptOrder(d *caddyfile.Dispenser, _ any) (any, error) {
if !d.Next() { if !d.Next() {
return nil, d.ArgErr() return nil, d.ArgErr()
} }
pos := d.Val() pos := Positional(d.Val())
newOrder := directiveOrder
// if directive exists, first remove it // if directive exists, first remove it
for i, d := range newOrder { for i, d := range newOrder {
@ -122,22 +121,22 @@ func parseOptOrder(d *caddyfile.Dispenser, _ any) (any, error) {
// act on the positional // act on the positional
switch pos { switch pos {
case "first": case First:
newOrder = append([]string{dirName}, newOrder...) newOrder = append([]string{dirName}, newOrder...)
if d.NextArg() { if d.NextArg() {
return nil, d.ArgErr() return nil, d.ArgErr()
} }
directiveOrder = newOrder directiveOrder = newOrder
return newOrder, nil return newOrder, nil
case "last": case Last:
newOrder = append(newOrder, dirName) newOrder = append(newOrder, dirName)
if d.NextArg() { if d.NextArg() {
return nil, d.ArgErr() return nil, d.ArgErr()
} }
directiveOrder = newOrder directiveOrder = newOrder
return newOrder, nil return newOrder, nil
case "before": case Before:
case "after": case After:
default: default:
return nil, d.Errf("unknown positional '%s'", pos) return nil, d.Errf("unknown positional '%s'", pos)
} }
@ -154,15 +153,14 @@ func parseOptOrder(d *caddyfile.Dispenser, _ any) (any, error) {
// insert directive into proper position // insert directive into proper position
for i, d := range newOrder { for i, d := range newOrder {
if d == otherDir { if d == otherDir {
if pos == "before" { if pos == Before {
newOrder = append(newOrder[:i], append([]string{dirName}, newOrder[i:]...)...) newOrder = append(newOrder[:i], append([]string{dirName}, newOrder[i:]...)...)
} else if pos == "after" { } else if pos == After {
newOrder = append(newOrder[:i+1], append([]string{dirName}, newOrder[i+1:]...)...) newOrder = append(newOrder[:i+1], append([]string{dirName}, newOrder[i+1:]...)...)
} }
break break
} }
} }
}
directiveOrder = newOrder directiveOrder = newOrder
@ -223,11 +221,11 @@ func parseOptACMEDNS(d *caddyfile.Dispenser, _ any) (any, error) {
func parseOptACMEEAB(d *caddyfile.Dispenser, _ any) (any, error) { func parseOptACMEEAB(d *caddyfile.Dispenser, _ any) (any, error) {
eab := new(acme.EAB) eab := new(acme.EAB)
for d.Next() { d.Next() // consume option name
if d.NextArg() { if d.NextArg() {
return nil, d.ArgErr() return nil, d.ArgErr()
} }
for nesting := d.Nesting(); d.NextBlock(nesting); { for d.NextBlock(0) {
switch d.Val() { switch d.Val() {
case "key_id": case "key_id":
if !d.NextArg() { if !d.NextArg() {
@ -245,17 +243,19 @@ func parseOptACMEEAB(d *caddyfile.Dispenser, _ any) (any, error) {
return nil, d.Errf("unrecognized parameter '%s'", d.Val()) return nil, d.Errf("unrecognized parameter '%s'", d.Val())
} }
} }
}
return eab, nil return eab, nil
} }
func parseOptCertIssuer(d *caddyfile.Dispenser, existing any) (any, error) { func parseOptCertIssuer(d *caddyfile.Dispenser, existing any) (any, error) {
d.Next() // consume option name
var issuers []certmagic.Issuer var issuers []certmagic.Issuer
if existing != nil { if existing != nil {
issuers = existing.([]certmagic.Issuer) issuers = existing.([]certmagic.Issuer)
} }
for d.Next() { // consume option name
if !d.Next() { // get issuer module name // get issuer module name
if !d.Next() {
return nil, d.ArgErr() return nil, d.ArgErr()
} }
modID := "tls.issuance." + d.Val() modID := "tls.issuance." + d.Val()
@ -268,12 +268,11 @@ func parseOptCertIssuer(d *caddyfile.Dispenser, existing any) (any, error) {
return nil, d.Errf("module %s (%T) is not a certmagic.Issuer", modID, unm) return nil, d.Errf("module %s (%T) is not a certmagic.Issuer", modID, unm)
} }
issuers = append(issuers, iss) issuers = append(issuers, iss)
}
return issuers, nil return issuers, nil
} }
func parseOptSingleString(d *caddyfile.Dispenser, _ any) (any, error) { func parseOptSingleString(d *caddyfile.Dispenser, _ any) (any, error) {
d.Next() // consume parameter name d.Next() // consume option name
if !d.Next() { if !d.Next() {
return "", d.ArgErr() return "", d.ArgErr()
} }
@ -285,7 +284,7 @@ func parseOptSingleString(d *caddyfile.Dispenser, _ any) (any, error) {
} }
func parseOptStringList(d *caddyfile.Dispenser, _ any) (any, error) { func parseOptStringList(d *caddyfile.Dispenser, _ any) (any, error) {
d.Next() // consume parameter name d.Next() // consume option name
val := d.RemainingArgs() val := d.RemainingArgs()
if len(val) == 0 { if len(val) == 0 {
return "", d.ArgErr() return "", d.ArgErr()
@ -294,8 +293,9 @@ func parseOptStringList(d *caddyfile.Dispenser, _ any) (any, error) {
} }
func parseOptAdmin(d *caddyfile.Dispenser, _ any) (any, error) { func parseOptAdmin(d *caddyfile.Dispenser, _ any) (any, error) {
d.Next() // consume option name
adminCfg := new(caddy.AdminConfig) adminCfg := new(caddy.AdminConfig)
for d.Next() {
if d.NextArg() { if d.NextArg() {
listenAddress := d.Val() listenAddress := d.Val()
if listenAddress == "off" { if listenAddress == "off" {
@ -310,7 +310,7 @@ func parseOptAdmin(d *caddyfile.Dispenser, _ any) (any, error) {
} }
} }
} }
for nesting := d.Nesting(); d.NextBlock(nesting); { for d.NextBlock(0) {
switch d.Val() { switch d.Val() {
case "enforce_origin": case "enforce_origin":
adminCfg.EnforceOrigin = true adminCfg.EnforceOrigin = true
@ -322,7 +322,6 @@ func parseOptAdmin(d *caddyfile.Dispenser, _ any) (any, error) {
return nil, d.Errf("unrecognized parameter '%s'", d.Val()) return nil, d.Errf("unrecognized parameter '%s'", d.Val())
} }
} }
}
if adminCfg.Listen == "" && !adminCfg.Disabled { if adminCfg.Listen == "" && !adminCfg.Disabled {
adminCfg.Listen = caddy.DefaultAdminListen adminCfg.Listen = caddy.DefaultAdminListen
} }
@ -330,11 +329,13 @@ func parseOptAdmin(d *caddyfile.Dispenser, _ any) (any, error) {
} }
func parseOptOnDemand(d *caddyfile.Dispenser, _ any) (any, error) { func parseOptOnDemand(d *caddyfile.Dispenser, _ any) (any, error) {
var ond *caddytls.OnDemandConfig d.Next() // consume option name
for d.Next() {
if d.NextArg() { if d.NextArg() {
return nil, d.ArgErr() return nil, d.ArgErr()
} }
var ond *caddytls.OnDemandConfig
for nesting := d.Nesting(); d.NextBlock(nesting); { for nesting := d.Nesting(); d.NextBlock(nesting); {
switch d.Val() { switch d.Val() {
case "ask": case "ask":
@ -344,7 +345,8 @@ func parseOptOnDemand(d *caddyfile.Dispenser, _ any) (any, error) {
if ond == nil { if ond == nil {
ond = new(caddytls.OnDemandConfig) ond = new(caddytls.OnDemandConfig)
} }
ond.Ask = d.Val() perm := caddytls.PermissionByHTTP{Endpoint: d.Val()}
ond.PermissionRaw = caddyconfig.JSONModuleObject(perm, "module", "http", nil)
case "interval": case "interval":
if !d.NextArg() { if !d.NextArg() {
@ -382,7 +384,6 @@ func parseOptOnDemand(d *caddyfile.Dispenser, _ any) (any, error) {
return nil, d.Errf("unrecognized parameter '%s'", d.Val()) return nil, d.Errf("unrecognized parameter '%s'", d.Val())
} }
} }
}
if ond == nil { if ond == nil {
return nil, d.Err("expected at least one config parameter for on_demand_tls") return nil, d.Err("expected at least one config parameter for on_demand_tls")
} }
@ -390,7 +391,7 @@ func parseOptOnDemand(d *caddyfile.Dispenser, _ any) (any, error) {
} }
func parseOptPersistConfig(d *caddyfile.Dispenser, _ any) (any, error) { func parseOptPersistConfig(d *caddyfile.Dispenser, _ any) (any, error) {
d.Next() // consume parameter name d.Next() // consume option name
if !d.Next() { if !d.Next() {
return "", d.ArgErr() return "", d.ArgErr()
} }
@ -405,7 +406,7 @@ func parseOptPersistConfig(d *caddyfile.Dispenser, _ any) (any, error) {
} }
func parseOptAutoHTTPS(d *caddyfile.Dispenser, _ any) (any, error) { func parseOptAutoHTTPS(d *caddyfile.Dispenser, _ any) (any, error) {
d.Next() // consume parameter name d.Next() // consume option name
if !d.Next() { if !d.Next() {
return "", d.ArgErr() return "", d.ArgErr()
} }

View file

@ -48,10 +48,12 @@ func init() {
// //
// When the CA ID is unspecified, 'local' is assumed. // When the CA ID is unspecified, 'local' is assumed.
func parsePKIApp(d *caddyfile.Dispenser, existingVal any) (any, error) { func parsePKIApp(d *caddyfile.Dispenser, existingVal any) (any, error) {
pki := &caddypki.PKI{CAs: make(map[string]*caddypki.CA)} d.Next() // consume app name
for d.Next() { pki := &caddypki.PKI{
for nesting := d.Nesting(); d.NextBlock(nesting); { CAs: make(map[string]*caddypki.CA),
}
for d.NextBlock(0) {
switch d.Val() { switch d.Val() {
case "ca": case "ca":
pkiCa := new(caddypki.CA) pkiCa := new(caddypki.CA)
@ -164,8 +166,6 @@ func parsePKIApp(d *caddyfile.Dispenser, existingVal any) (any, error) {
return nil, d.Errf("unrecognized pki option '%s'", d.Val()) return nil, d.Errf("unrecognized pki option '%s'", d.Val())
} }
} }
}
return pki, nil return pki, nil
} }

View file

@ -46,21 +46,23 @@ type serverOptions struct {
Protocols []string Protocols []string
StrictSNIHost *bool StrictSNIHost *bool
TrustedProxiesRaw json.RawMessage TrustedProxiesRaw json.RawMessage
TrustedProxiesStrict int
ClientIPHeaders []string ClientIPHeaders []string
ShouldLogCredentials bool ShouldLogCredentials bool
Metrics *caddyhttp.Metrics Metrics *caddyhttp.Metrics
} }
func unmarshalCaddyfileServerOptions(d *caddyfile.Dispenser) (any, error) { func unmarshalCaddyfileServerOptions(d *caddyfile.Dispenser) (any, error) {
d.Next() // consume option name
serverOpts := serverOptions{} serverOpts := serverOptions{}
for d.Next() {
if d.NextArg() { if d.NextArg() {
serverOpts.ListenerAddress = d.Val() serverOpts.ListenerAddress = d.Val()
if d.NextArg() { if d.NextArg() {
return nil, d.ArgErr() return nil, d.ArgErr()
} }
} }
for nesting := d.Nesting(); d.NextBlock(nesting); { for d.NextBlock(0) {
switch d.Val() { switch d.Val() {
case "name": case "name":
if serverOpts.ListenerAddress == "" { if serverOpts.ListenerAddress == "" {
@ -217,6 +219,12 @@ func unmarshalCaddyfileServerOptions(d *caddyfile.Dispenser) (any, error) {
) )
serverOpts.TrustedProxiesRaw = jsonSource serverOpts.TrustedProxiesRaw = jsonSource
case "trusted_proxies_strict":
if d.NextArg() {
return nil, d.ArgErr()
}
serverOpts.TrustedProxiesStrict = 1
case "client_ip_headers": case "client_ip_headers":
headers := d.RemainingArgs() headers := d.RemainingArgs()
for _, header := range headers { for _, header := range headers {
@ -276,7 +284,6 @@ func unmarshalCaddyfileServerOptions(d *caddyfile.Dispenser) (any, error) {
return nil, d.Errf("unrecognized servers option '%s'", d.Val()) return nil, d.Errf("unrecognized servers option '%s'", d.Val())
} }
} }
}
return serverOpts, nil return serverOpts, nil
} }
@ -340,6 +347,7 @@ func applyServerOptions(
server.StrictSNIHost = opts.StrictSNIHost server.StrictSNIHost = opts.StrictSNIHost
server.TrustedProxiesRaw = opts.TrustedProxiesRaw server.TrustedProxiesRaw = opts.TrustedProxiesRaw
server.ClientIPHeaders = opts.ClientIPHeaders server.ClientIPHeaders = opts.ClientIPHeaders
server.TrustedProxiesStrict = opts.TrustedProxiesStrict
server.Metrics = opts.Metrics server.Metrics = opts.Metrics
if opts.ShouldLogCredentials { if opts.ShouldLogCredentials {
if server.Logs == nil { if server.Logs == nil {

View file

@ -0,0 +1,93 @@
package httpcaddyfile
import (
"regexp"
"strings"
"github.com/caddyserver/caddy/v2/caddyconfig/caddyfile"
)
type ComplexShorthandReplacer struct {
search *regexp.Regexp
replace string
}
type ShorthandReplacer struct {
complex []ComplexShorthandReplacer
simple *strings.Replacer
}
func NewShorthandReplacer() ShorthandReplacer {
// replace shorthand placeholders (which are convenient
// when writing a Caddyfile) with their actual placeholder
// identifiers or variable names
replacer := strings.NewReplacer(placeholderShorthands()...)
// these are placeholders that allow a user-defined final
// parameters, but we still want to provide a shorthand
// for those, so we use a regexp to replace
regexpReplacements := []ComplexShorthandReplacer{
{regexp.MustCompile(`{header\.([\w-]*)}`), "{http.request.header.$1}"},
{regexp.MustCompile(`{cookie\.([\w-]*)}`), "{http.request.cookie.$1}"},
{regexp.MustCompile(`{labels\.([\w-]*)}`), "{http.request.host.labels.$1}"},
{regexp.MustCompile(`{path\.([\w-]*)}`), "{http.request.uri.path.$1}"},
{regexp.MustCompile(`{file\.([\w-]*)}`), "{http.request.uri.path.file.$1}"},
{regexp.MustCompile(`{query\.([\w-]*)}`), "{http.request.uri.query.$1}"},
{regexp.MustCompile(`{re\.([\w-\.]*)}`), "{http.regexp.$1}"},
{regexp.MustCompile(`{vars\.([\w-]*)}`), "{http.vars.$1}"},
{regexp.MustCompile(`{rp\.([\w-\.]*)}`), "{http.reverse_proxy.$1}"},
{regexp.MustCompile(`{err\.([\w-\.]*)}`), "{http.error.$1}"},
{regexp.MustCompile(`{file_match\.([\w-]*)}`), "{http.matchers.file.$1}"},
}
return ShorthandReplacer{
complex: regexpReplacements,
simple: replacer,
}
}
// placeholderShorthands returns a slice of old-new string pairs,
// where the left of the pair is a placeholder shorthand that may
// be used in the Caddyfile, and the right is the replacement.
func placeholderShorthands() []string {
return []string{
"{dir}", "{http.request.uri.path.dir}",
"{file}", "{http.request.uri.path.file}",
"{host}", "{http.request.host}",
"{hostport}", "{http.request.hostport}",
"{port}", "{http.request.port}",
"{method}", "{http.request.method}",
"{path}", "{http.request.uri.path}",
"{query}", "{http.request.uri.query}",
"{remote}", "{http.request.remote}",
"{remote_host}", "{http.request.remote.host}",
"{remote_port}", "{http.request.remote.port}",
"{scheme}", "{http.request.scheme}",
"{uri}", "{http.request.uri}",
"{uuid}", "{http.request.uuid}",
"{tls_cipher}", "{http.request.tls.cipher_suite}",
"{tls_version}", "{http.request.tls.version}",
"{tls_client_fingerprint}", "{http.request.tls.client.fingerprint}",
"{tls_client_issuer}", "{http.request.tls.client.issuer}",
"{tls_client_serial}", "{http.request.tls.client.serial}",
"{tls_client_subject}", "{http.request.tls.client.subject}",
"{tls_client_certificate_pem}", "{http.request.tls.client.certificate_pem}",
"{tls_client_certificate_der_base64}", "{http.request.tls.client.certificate_der_base64}",
"{upstream_hostport}", "{http.reverse_proxy.upstream.hostport}",
"{client_ip}", "{http.vars.client_ip}",
}
}
// ApplyToSegment replaces shorthand placeholder to its full placeholder, understandable by Caddy.
func (s ShorthandReplacer) ApplyToSegment(segment *caddyfile.Segment) {
if segment != nil {
for i := 0; i < len(*segment); i++ {
// simple string replacements
(*segment)[i].Text = s.simple.Replace((*segment)[i].Text)
// complex regexp replacements
for _, r := range s.complex {
(*segment)[i].Text = r.search.ReplaceAllString((*segment)[i].Text, r.replace)
}
}
}
}

View file

@ -118,6 +118,11 @@ func (st ServerType) buildTLSApp(
ap.OnDemand = true ap.OnDemand = true
} }
// reuse private keys tls
if _, ok := sblock.pile["tls.reuse_private_keys"]; ok {
ap.ReusePrivateKeys = true
}
if keyTypeVals, ok := sblock.pile["tls.key_type"]; ok { if keyTypeVals, ok := sblock.pile["tls.key_type"]; ok {
ap.KeyType = keyTypeVals[0].Value.(string) ap.KeyType = keyTypeVals[0].Value.(string)
} }
@ -582,10 +587,12 @@ outer:
// eaten up by the one with subjects; and if both have subjects, we // eaten up by the one with subjects; and if both have subjects, we
// need to combine their lists // need to combine their lists
if reflect.DeepEqual(aps[i].IssuersRaw, aps[j].IssuersRaw) && if reflect.DeepEqual(aps[i].IssuersRaw, aps[j].IssuersRaw) &&
reflect.DeepEqual(aps[i].ManagersRaw, aps[j].ManagersRaw) &&
bytes.Equal(aps[i].StorageRaw, aps[j].StorageRaw) && bytes.Equal(aps[i].StorageRaw, aps[j].StorageRaw) &&
aps[i].MustStaple == aps[j].MustStaple && aps[i].MustStaple == aps[j].MustStaple &&
aps[i].KeyType == aps[j].KeyType && aps[i].KeyType == aps[j].KeyType &&
aps[i].OnDemand == aps[j].OnDemand && aps[i].OnDemand == aps[j].OnDemand &&
aps[i].ReusePrivateKeys == aps[j].ReusePrivateKeys &&
aps[i].RenewalWindowRatio == aps[j].RenewalWindowRatio { aps[i].RenewalWindowRatio == aps[j].RenewalWindowRatio {
if len(aps[i].SubjectsRaw) > 0 && len(aps[j].SubjectsRaw) == 0 { if len(aps[i].SubjectsRaw) > 0 && len(aps[j].SubjectsRaw) == 0 {
// later policy (at j) has no subjects ("catch-all"), so we can // later policy (at j) has no subjects ("catch-all"), so we can

View file

@ -8,6 +8,7 @@ import (
"errors" "errors"
"fmt" "fmt"
"io" "io"
"io/fs"
"log" "log"
"net" "net"
"net/http" "net/http"
@ -59,11 +60,11 @@ var (
type Tester struct { type Tester struct {
Client *http.Client Client *http.Client
configLoaded bool configLoaded bool
t *testing.T t testing.TB
} }
// NewTester will create a new testing client with an attached cookie jar // NewTester will create a new testing client with an attached cookie jar
func NewTester(t *testing.T) *Tester { func NewTester(t testing.TB) *Tester {
jar, err := cookiejar.New(nil) jar, err := cookiejar.New(nil)
if err != nil { if err != nil {
t.Fatalf("failed to create cookiejar: %s", err) t.Fatalf("failed to create cookiejar: %s", err)
@ -120,7 +121,6 @@ func (tc *Tester) initServer(rawConfig string, configType string) error {
tc.t.Cleanup(func() { tc.t.Cleanup(func() {
if tc.t.Failed() && tc.configLoaded { if tc.t.Failed() && tc.configLoaded {
res, err := http.Get(fmt.Sprintf("http://localhost:%d/config/", Default.AdminPort)) res, err := http.Get(fmt.Sprintf("http://localhost:%d/config/", Default.AdminPort))
if err != nil { if err != nil {
tc.t.Log("unable to read the current config") tc.t.Log("unable to read the current config")
@ -229,10 +229,10 @@ const initConfig = `{
// validateTestPrerequisites ensures the certificates are available in the // validateTestPrerequisites ensures the certificates are available in the
// designated path and Caddy sub-process is running. // designated path and Caddy sub-process is running.
func validateTestPrerequisites(t *testing.T) error { func validateTestPrerequisites(t testing.TB) error {
// check certificates are found // check certificates are found
for _, certName := range Default.Certifcates { for _, certName := range Default.Certifcates {
if _, err := os.Stat(getIntegrationDir() + certName); os.IsNotExist(err) { if _, err := os.Stat(getIntegrationDir() + certName); errors.Is(err, fs.ErrNotExist) {
return fmt.Errorf("caddy integration test certificates (%s) not found", certName) return fmt.Errorf("caddy integration test certificates (%s) not found", certName)
} }
} }
@ -373,7 +373,7 @@ func (tc *Tester) AssertRedirect(requestURI string, expectedToLocation string, e
} }
// CompareAdapt adapts a config and then compares it against an expected result // CompareAdapt adapts a config and then compares it against an expected result
func CompareAdapt(t *testing.T, filename, rawConfig string, adapterName string, expectedResponse string) bool { func CompareAdapt(t testing.TB, filename, rawConfig string, adapterName string, expectedResponse string) bool {
cfgAdapter := caddyconfig.GetAdapter(adapterName) cfgAdapter := caddyconfig.GetAdapter(adapterName)
if cfgAdapter == nil { if cfgAdapter == nil {
t.Logf("unrecognized config adapter '%s'", adapterName) t.Logf("unrecognized config adapter '%s'", adapterName)
@ -432,7 +432,7 @@ func CompareAdapt(t *testing.T, filename, rawConfig string, adapterName string,
} }
// AssertAdapt adapts a config and then tests it against an expected result // AssertAdapt adapts a config and then tests it against an expected result
func AssertAdapt(t *testing.T, rawConfig string, adapterName string, expectedResponse string) { func AssertAdapt(t testing.TB, rawConfig string, adapterName string, expectedResponse string) {
ok := CompareAdapt(t, "Caddyfile", rawConfig, adapterName, expectedResponse) ok := CompareAdapt(t, "Caddyfile", rawConfig, adapterName, expectedResponse)
if !ok { if !ok {
t.Fail() t.Fail()
@ -441,7 +441,7 @@ func AssertAdapt(t *testing.T, rawConfig string, adapterName string, expectedRes
// Generic request functions // Generic request functions
func applyHeaders(t *testing.T, req *http.Request, requestHeaders []string) { func applyHeaders(t testing.TB, req *http.Request, requestHeaders []string) {
requestContentType := "" requestContentType := ""
for _, requestHeader := range requestHeaders { for _, requestHeader := range requestHeaders {
arr := strings.SplitAfterN(requestHeader, ":", 2) arr := strings.SplitAfterN(requestHeader, ":", 2)
@ -467,7 +467,7 @@ func (tc *Tester) AssertResponseCode(req *http.Request, expectedStatusCode int)
} }
if expectedStatusCode != resp.StatusCode { if expectedStatusCode != resp.StatusCode {
tc.t.Errorf("requesting \"%s\" expected status code: %d but got %d", req.RequestURI, expectedStatusCode, resp.StatusCode) tc.t.Errorf("requesting \"%s\" expected status code: %d but got %d", req.URL.RequestURI(), expectedStatusCode, resp.StatusCode)
} }
return resp return resp

View file

@ -0,0 +1,206 @@
package integration
import (
"context"
"crypto/ecdsa"
"crypto/elliptic"
"crypto/rand"
"fmt"
"net"
"net/http"
"strings"
"testing"
"github.com/caddyserver/caddy/v2"
"github.com/caddyserver/caddy/v2/caddytest"
"github.com/mholt/acmez"
"github.com/mholt/acmez/acme"
smallstepacme "github.com/smallstep/certificates/acme"
"go.uber.org/zap"
)
const acmeChallengePort = 9081
// Test the basic functionality of Caddy's ACME server
func TestACMEServerWithDefaults(t *testing.T) {
ctx := context.Background()
logger, err := zap.NewDevelopment()
if err != nil {
t.Error(err)
return
}
tester := caddytest.NewTester(t)
tester.InitServer(`
{
skip_install_trust
admin localhost:2999
http_port 9080
https_port 9443
local_certs
}
acme.localhost {
acme_server
}
`, "caddyfile")
client := acmez.Client{
Client: &acme.Client{
Directory: "https://acme.localhost:9443/acme/local/directory",
HTTPClient: tester.Client,
Logger: logger,
},
ChallengeSolvers: map[string]acmez.Solver{
acme.ChallengeTypeHTTP01: &naiveHTTPSolver{logger: logger},
},
}
accountPrivateKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
if err != nil {
t.Errorf("generating account key: %v", err)
}
account := acme.Account{
Contact: []string{"mailto:you@example.com"},
TermsOfServiceAgreed: true,
PrivateKey: accountPrivateKey,
}
account, err = client.NewAccount(ctx, account)
if err != nil {
t.Errorf("new account: %v", err)
return
}
// Every certificate needs a key.
certPrivateKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
if err != nil {
t.Errorf("generating certificate key: %v", err)
return
}
certs, err := client.ObtainCertificate(ctx, account, certPrivateKey, []string{"localhost"})
if err != nil {
t.Errorf("obtaining certificate: %v", err)
return
}
// ACME servers should usually give you the entire certificate chain
// in PEM format, and sometimes even alternate chains! It's up to you
// which one(s) to store and use, but whatever you do, be sure to
// store the certificate and key somewhere safe and secure, i.e. don't
// lose them!
for _, cert := range certs {
t.Logf("Certificate %q:\n%s\n\n", cert.URL, cert.ChainPEM)
}
}
func TestACMEServerWithMismatchedChallenges(t *testing.T) {
ctx := context.Background()
logger := caddy.Log().Named("acmez")
tester := caddytest.NewTester(t)
tester.InitServer(`
{
skip_install_trust
admin localhost:2999
http_port 9080
https_port 9443
local_certs
}
acme.localhost {
acme_server {
challenges tls-alpn-01
}
}
`, "caddyfile")
client := acmez.Client{
Client: &acme.Client{
Directory: "https://acme.localhost:9443/acme/local/directory",
HTTPClient: tester.Client,
Logger: logger,
},
ChallengeSolvers: map[string]acmez.Solver{
acme.ChallengeTypeHTTP01: &naiveHTTPSolver{logger: logger},
},
}
accountPrivateKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
if err != nil {
t.Errorf("generating account key: %v", err)
}
account := acme.Account{
Contact: []string{"mailto:you@example.com"},
TermsOfServiceAgreed: true,
PrivateKey: accountPrivateKey,
}
account, err = client.NewAccount(ctx, account)
if err != nil {
t.Errorf("new account: %v", err)
return
}
// Every certificate needs a key.
certPrivateKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
if err != nil {
t.Errorf("generating certificate key: %v", err)
return
}
certs, err := client.ObtainCertificate(ctx, account, certPrivateKey, []string{"localhost"})
if len(certs) > 0 {
t.Errorf("expected '0' certificates, but received '%d'", len(certs))
}
if err == nil {
t.Error("expected errors, but received none")
}
const expectedErrMsg = "no solvers available for remaining challenges (configured=[http-01] offered=[tls-alpn-01] remaining=[tls-alpn-01])"
if !strings.Contains(err.Error(), expectedErrMsg) {
t.Errorf(`received error message does not match expectation: expected="%s" received="%s"`, expectedErrMsg, err.Error())
}
}
// naiveHTTPSolver is a no-op acmez.Solver for example purposes only.
type naiveHTTPSolver struct {
srv *http.Server
logger *zap.Logger
}
func (s *naiveHTTPSolver) Present(ctx context.Context, challenge acme.Challenge) error {
smallstepacme.InsecurePortHTTP01 = acmeChallengePort
s.srv = &http.Server{
Addr: fmt.Sprintf(":%d", acmeChallengePort),
Handler: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
host, _, err := net.SplitHostPort(r.Host)
if err != nil {
host = r.Host
}
s.logger.Info("received request on challenge server", zap.String("path", r.URL.Path))
if r.Method == "GET" && r.URL.Path == challenge.HTTP01ResourcePath() && strings.EqualFold(host, challenge.Identifier.Value) {
w.Header().Add("Content-Type", "text/plain")
w.Write([]byte(challenge.KeyAuthorization))
r.Close = true
s.logger.Info("served key authentication",
zap.String("identifier", challenge.Identifier.Value),
zap.String("challenge", "http-01"),
zap.String("remote", r.RemoteAddr),
)
}
}),
}
l, err := net.Listen("tcp", fmt.Sprintf(":%d", acmeChallengePort))
if err != nil {
return err
}
s.logger.Info("present challenge", zap.Any("challenge", challenge))
go s.srv.Serve(l)
return nil
}
func (s naiveHTTPSolver) CleanUp(ctx context.Context, challenge acme.Challenge) error {
smallstepacme.InsecurePortHTTP01 = 0
s.logger.Info("cleanup", zap.Any("challenge", challenge))
if s.srv != nil {
s.srv.Close()
}
return nil
}

View file

@ -0,0 +1,209 @@
package integration
import (
"context"
"crypto/ecdsa"
"crypto/elliptic"
"crypto/rand"
"strings"
"testing"
"github.com/caddyserver/caddy/v2/caddytest"
"github.com/mholt/acmez"
"github.com/mholt/acmez/acme"
"go.uber.org/zap"
)
func TestACMEServerDirectory(t *testing.T) {
tester := caddytest.NewTester(t)
tester.InitServer(`
{
skip_install_trust
local_certs
admin localhost:2999
http_port 9080
https_port 9443
pki {
ca local {
name "Caddy Local Authority"
}
}
}
acme.localhost:9443 {
acme_server
}
`, "caddyfile")
tester.AssertGetResponse(
"https://acme.localhost:9443/acme/local/directory",
200,
`{"newNonce":"https://acme.localhost:9443/acme/local/new-nonce","newAccount":"https://acme.localhost:9443/acme/local/new-account","newOrder":"https://acme.localhost:9443/acme/local/new-order","revokeCert":"https://acme.localhost:9443/acme/local/revoke-cert","keyChange":"https://acme.localhost:9443/acme/local/key-change"}
`)
}
func TestACMEServerAllowPolicy(t *testing.T) {
tester := caddytest.NewTester(t)
tester.InitServer(`
{
skip_install_trust
local_certs
admin localhost:2999
http_port 9080
https_port 9443
pki {
ca local {
name "Caddy Local Authority"
}
}
}
acme.localhost {
acme_server {
challenges http-01
allow {
domains localhost
}
}
}
`, "caddyfile")
ctx := context.Background()
logger, err := zap.NewDevelopment()
if err != nil {
t.Error(err)
return
}
client := acmez.Client{
Client: &acme.Client{
Directory: "https://acme.localhost:9443/acme/local/directory",
HTTPClient: tester.Client,
Logger: logger,
},
ChallengeSolvers: map[string]acmez.Solver{
acme.ChallengeTypeHTTP01: &naiveHTTPSolver{logger: logger},
},
}
accountPrivateKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
if err != nil {
t.Errorf("generating account key: %v", err)
}
account := acme.Account{
Contact: []string{"mailto:you@example.com"},
TermsOfServiceAgreed: true,
PrivateKey: accountPrivateKey,
}
account, err = client.NewAccount(ctx, account)
if err != nil {
t.Errorf("new account: %v", err)
return
}
// Every certificate needs a key.
certPrivateKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
if err != nil {
t.Errorf("generating certificate key: %v", err)
return
}
{
certs, err := client.ObtainCertificate(
ctx,
account,
certPrivateKey,
[]string{"localhost"},
)
if err != nil {
t.Errorf("obtaining certificate for allowed domain: %v", err)
return
}
// ACME servers should usually give you the entire certificate chain
// in PEM format, and sometimes even alternate chains! It's up to you
// which one(s) to store and use, but whatever you do, be sure to
// store the certificate and key somewhere safe and secure, i.e. don't
// lose them!
for _, cert := range certs {
t.Logf("Certificate %q:\n%s\n\n", cert.URL, cert.ChainPEM)
}
}
{
_, err := client.ObtainCertificate(ctx, account, certPrivateKey, []string{"not-matching.localhost"})
if err == nil {
t.Errorf("obtaining certificate for 'not-matching.localhost' domain")
} else if err != nil && !strings.Contains(err.Error(), "urn:ietf:params:acme:error:rejectedIdentifier") {
t.Logf("unexpected error: %v", err)
}
}
}
func TestACMEServerDenyPolicy(t *testing.T) {
tester := caddytest.NewTester(t)
tester.InitServer(`
{
skip_install_trust
local_certs
admin localhost:2999
http_port 9080
https_port 9443
pki {
ca local {
name "Caddy Local Authority"
}
}
}
acme.localhost {
acme_server {
deny {
domains deny.localhost
}
}
}
`, "caddyfile")
ctx := context.Background()
logger, err := zap.NewDevelopment()
if err != nil {
t.Error(err)
return
}
client := acmez.Client{
Client: &acme.Client{
Directory: "https://acme.localhost:9443/acme/local/directory",
HTTPClient: tester.Client,
Logger: logger,
},
ChallengeSolvers: map[string]acmez.Solver{
acme.ChallengeTypeHTTP01: &naiveHTTPSolver{logger: logger},
},
}
accountPrivateKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
if err != nil {
t.Errorf("generating account key: %v", err)
}
account := acme.Account{
Contact: []string{"mailto:you@example.com"},
TermsOfServiceAgreed: true,
PrivateKey: accountPrivateKey,
}
account, err = client.NewAccount(ctx, account)
if err != nil {
t.Errorf("new account: %v", err)
return
}
// Every certificate needs a key.
certPrivateKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
if err != nil {
t.Errorf("generating certificate key: %v", err)
return
}
{
_, err := client.ObtainCertificate(ctx, account, certPrivateKey, []string{"deny.localhost"})
if err == nil {
t.Errorf("obtaining certificate for 'deny.localhost' domain")
} else if err != nil && !strings.Contains(err.Error(), "urn:ietf:params:acme:error:rejectedIdentifier") {
t.Logf("unexpected error: %v", err)
}
}
}

View file

@ -0,0 +1,65 @@
{
pki {
ca custom-ca {
name "Custom CA"
}
}
}
acme.example.com {
acme_server {
ca custom-ca
challenges dns-01
}
}
----------
{
"apps": {
"http": {
"servers": {
"srv0": {
"listen": [
":443"
],
"routes": [
{
"match": [
{
"host": [
"acme.example.com"
]
}
],
"handle": [
{
"handler": "subroute",
"routes": [
{
"handle": [
{
"ca": "custom-ca",
"challenges": [
"dns-01"
],
"handler": "acme_server"
}
]
}
]
}
],
"terminal": true
}
]
}
}
},
"pki": {
"certificate_authorities": {
"custom-ca": {
"name": "Custom CA"
}
}
}
}
}

View file

@ -0,0 +1,62 @@
{
pki {
ca custom-ca {
name "Custom CA"
}
}
}
acme.example.com {
acme_server {
ca custom-ca
challenges
}
}
----------
{
"apps": {
"http": {
"servers": {
"srv0": {
"listen": [
":443"
],
"routes": [
{
"match": [
{
"host": [
"acme.example.com"
]
}
],
"handle": [
{
"handler": "subroute",
"routes": [
{
"handle": [
{
"ca": "custom-ca",
"handler": "acme_server"
}
]
}
]
}
],
"terminal": true
}
]
}
}
},
"pki": {
"certificate_authorities": {
"custom-ca": {
"name": "Custom CA"
}
}
}
}
}

View file

@ -0,0 +1,66 @@
{
pki {
ca custom-ca {
name "Custom CA"
}
}
}
acme.example.com {
acme_server {
ca custom-ca
challenges dns-01 http-01
}
}
----------
{
"apps": {
"http": {
"servers": {
"srv0": {
"listen": [
":443"
],
"routes": [
{
"match": [
{
"host": [
"acme.example.com"
]
}
],
"handle": [
{
"handler": "subroute",
"routes": [
{
"handle": [
{
"ca": "custom-ca",
"challenges": [
"dns-01",
"http-01"
],
"handler": "acme_server"
}
]
}
]
}
],
"terminal": true
}
]
}
}
},
"pki": {
"certificate_authorities": {
"custom-ca": {
"name": "Custom CA"
}
}
}
}
}

View file

@ -0,0 +1,37 @@
:8443 {
tls internal {
on_demand
}
}
----------
{
"apps": {
"http": {
"servers": {
"srv0": {
"listen": [
":8443"
],
"tls_connection_policies": [
{}
]
}
}
},
"tls": {
"automation": {
"policies": [
{
"issuers": [
{
"module": "internal"
}
],
"on_demand": true
}
]
}
}
}
}

View file

@ -11,6 +11,7 @@ encode gzip zstd {
header Content-Type application/xhtml+xml* header Content-Type application/xhtml+xml*
header Content-Type application/atom+xml* header Content-Type application/atom+xml*
header Content-Type application/rss+xml* header Content-Type application/rss+xml*
header Content-Type application/wasm*
header Content-Type image/svg+xml* header Content-Type image/svg+xml*
} }
} }
@ -47,6 +48,7 @@ encode {
"application/xhtml+xml*", "application/xhtml+xml*",
"application/atom+xml*", "application/atom+xml*",
"application/rss+xml*", "application/rss+xml*",
"application/wasm*",
"image/svg+xml*" "image/svg+xml*"
] ]
}, },

View file

@ -0,0 +1,245 @@
foo.localhost {
root * /srv
error /private* "Unauthorized" 410
error /fivehundred* "Internal Server Error" 500
handle_errors 5xx {
respond "Error In range [500 .. 599]"
}
handle_errors 410 {
respond "404 or 410 error"
}
}
bar.localhost {
root * /srv
error /private* "Unauthorized" 410
error /fivehundred* "Internal Server Error" 500
handle_errors 5xx {
respond "Error In range [500 .. 599] from second site"
}
handle_errors 410 {
respond "404 or 410 error from second site"
}
}
----------
{
"apps": {
"http": {
"servers": {
"srv0": {
"listen": [
":443"
],
"routes": [
{
"match": [
{
"host": [
"foo.localhost"
]
}
],
"handle": [
{
"handler": "subroute",
"routes": [
{
"handle": [
{
"handler": "vars",
"root": "/srv"
}
]
},
{
"handle": [
{
"error": "Internal Server Error",
"handler": "error",
"status_code": 500
}
],
"match": [
{
"path": [
"/fivehundred*"
]
}
]
},
{
"handle": [
{
"error": "Unauthorized",
"handler": "error",
"status_code": 410
}
],
"match": [
{
"path": [
"/private*"
]
}
]
}
]
}
],
"terminal": true
},
{
"match": [
{
"host": [
"bar.localhost"
]
}
],
"handle": [
{
"handler": "subroute",
"routes": [
{
"handle": [
{
"handler": "vars",
"root": "/srv"
}
]
},
{
"handle": [
{
"error": "Internal Server Error",
"handler": "error",
"status_code": 500
}
],
"match": [
{
"path": [
"/fivehundred*"
]
}
]
},
{
"handle": [
{
"error": "Unauthorized",
"handler": "error",
"status_code": 410
}
],
"match": [
{
"path": [
"/private*"
]
}
]
}
]
}
],
"terminal": true
}
],
"errors": {
"routes": [
{
"match": [
{
"host": [
"foo.localhost"
]
}
],
"handle": [
{
"handler": "subroute",
"routes": [
{
"handle": [
{
"body": "404 or 410 error",
"handler": "static_response"
}
],
"match": [
{
"expression": "{http.error.status_code} in [410]"
}
]
},
{
"handle": [
{
"body": "Error In range [500 .. 599]",
"handler": "static_response"
}
],
"match": [
{
"expression": "{http.error.status_code} \u003e= 500 \u0026\u0026 {http.error.status_code} \u003c= 599"
}
]
}
]
}
],
"terminal": true
},
{
"match": [
{
"host": [
"bar.localhost"
]
}
],
"handle": [
{
"handler": "subroute",
"routes": [
{
"handle": [
{
"body": "404 or 410 error from second site",
"handler": "static_response"
}
],
"match": [
{
"expression": "{http.error.status_code} in [410]"
}
]
},
{
"handle": [
{
"body": "Error In range [500 .. 599] from second site",
"handler": "static_response"
}
],
"match": [
{
"expression": "{http.error.status_code} \u003e= 500 \u0026\u0026 {http.error.status_code} \u003c= 599"
}
]
}
]
}
],
"terminal": true
}
]
}
}
}
}
}
}

View file

@ -0,0 +1,120 @@
{
http_port 3010
}
localhost:3010 {
root * /srv
error /private* "Unauthorized" 410
error /hidden* "Not found" 404
handle_errors 4xx {
respond "Error in the [400 .. 499] range"
}
}
----------
{
"apps": {
"http": {
"http_port": 3010,
"servers": {
"srv0": {
"listen": [
":3010"
],
"routes": [
{
"match": [
{
"host": [
"localhost"
]
}
],
"handle": [
{
"handler": "subroute",
"routes": [
{
"handle": [
{
"handler": "vars",
"root": "/srv"
}
]
},
{
"handle": [
{
"error": "Unauthorized",
"handler": "error",
"status_code": 410
}
],
"match": [
{
"path": [
"/private*"
]
}
]
},
{
"handle": [
{
"error": "Not found",
"handler": "error",
"status_code": 404
}
],
"match": [
{
"path": [
"/hidden*"
]
}
]
}
]
}
],
"terminal": true
}
],
"errors": {
"routes": [
{
"match": [
{
"host": [
"localhost"
]
}
],
"handle": [
{
"handler": "subroute",
"routes": [
{
"handle": [
{
"body": "Error in the [400 .. 499] range",
"handler": "static_response"
}
],
"match": [
{
"expression": "{http.error.status_code} \u003e= 400 \u0026\u0026 {http.error.status_code} \u003c= 499"
}
]
}
]
}
],
"terminal": true
}
]
}
}
}
}
}
}

View file

@ -0,0 +1,153 @@
{
http_port 2099
}
localhost:2099 {
root * /srv
error /private* "Unauthorized" 410
error /threehundred* "Moved Permanently" 301
error /internalerr* "Internal Server Error" 500
handle_errors 500 3xx {
respond "Error code is equal to 500 or in the [300..399] range"
}
handle_errors 4xx {
respond "Error in the [400 .. 499] range"
}
}
----------
{
"apps": {
"http": {
"http_port": 2099,
"servers": {
"srv0": {
"listen": [
":2099"
],
"routes": [
{
"match": [
{
"host": [
"localhost"
]
}
],
"handle": [
{
"handler": "subroute",
"routes": [
{
"handle": [
{
"handler": "vars",
"root": "/srv"
}
]
},
{
"handle": [
{
"error": "Moved Permanently",
"handler": "error",
"status_code": 301
}
],
"match": [
{
"path": [
"/threehundred*"
]
}
]
},
{
"handle": [
{
"error": "Internal Server Error",
"handler": "error",
"status_code": 500
}
],
"match": [
{
"path": [
"/internalerr*"
]
}
]
},
{
"handle": [
{
"error": "Unauthorized",
"handler": "error",
"status_code": 410
}
],
"match": [
{
"path": [
"/private*"
]
}
]
}
]
}
],
"terminal": true
}
],
"errors": {
"routes": [
{
"match": [
{
"host": [
"localhost"
]
}
],
"handle": [
{
"handler": "subroute",
"routes": [
{
"handle": [
{
"body": "Error in the [400 .. 499] range",
"handler": "static_response"
}
],
"match": [
{
"expression": "{http.error.status_code} \u003e= 400 \u0026\u0026 {http.error.status_code} \u003c= 499"
}
]
},
{
"handle": [
{
"body": "Error code is equal to 500 or in the [300..399] range",
"handler": "static_response"
}
],
"match": [
{
"expression": "{http.error.status_code} \u003e= 300 \u0026\u0026 {http.error.status_code} \u003c= 399 || {http.error.status_code} in [500]"
}
]
}
]
}
],
"terminal": true
}
]
}
}
}
}
}
}

View file

@ -0,0 +1,120 @@
{
http_port 3010
}
localhost:3010 {
root * /srv
error /private* "Unauthorized" 410
error /hidden* "Not found" 404
handle_errors 404 410 {
respond "404 or 410 error"
}
}
----------
{
"apps": {
"http": {
"http_port": 3010,
"servers": {
"srv0": {
"listen": [
":3010"
],
"routes": [
{
"match": [
{
"host": [
"localhost"
]
}
],
"handle": [
{
"handler": "subroute",
"routes": [
{
"handle": [
{
"handler": "vars",
"root": "/srv"
}
]
},
{
"handle": [
{
"error": "Unauthorized",
"handler": "error",
"status_code": 410
}
],
"match": [
{
"path": [
"/private*"
]
}
]
},
{
"handle": [
{
"error": "Not found",
"handler": "error",
"status_code": 404
}
],
"match": [
{
"path": [
"/hidden*"
]
}
]
}
]
}
],
"terminal": true
}
],
"errors": {
"routes": [
{
"match": [
{
"host": [
"localhost"
]
}
],
"handle": [
{
"handler": "subroute",
"routes": [
{
"handle": [
{
"body": "404 or 410 error",
"handler": "static_response"
}
],
"match": [
{
"expression": "{http.error.status_code} in [404, 410]"
}
]
}
]
}
],
"terminal": true
}
]
}
}
}
}
}
}

View file

@ -0,0 +1,148 @@
{
http_port 2099
}
localhost:2099 {
root * /srv
error /private* "Unauthorized" 410
error /hidden* "Not found" 404
error /internalerr* "Internal Server Error" 500
handle_errors {
respond "Fallback route: code outside the [400..499] range"
}
handle_errors 4xx {
respond "Error in the [400 .. 499] range"
}
}
----------
{
"apps": {
"http": {
"http_port": 2099,
"servers": {
"srv0": {
"listen": [
":2099"
],
"routes": [
{
"match": [
{
"host": [
"localhost"
]
}
],
"handle": [
{
"handler": "subroute",
"routes": [
{
"handle": [
{
"handler": "vars",
"root": "/srv"
}
]
},
{
"handle": [
{
"error": "Internal Server Error",
"handler": "error",
"status_code": 500
}
],
"match": [
{
"path": [
"/internalerr*"
]
}
]
},
{
"handle": [
{
"error": "Unauthorized",
"handler": "error",
"status_code": 410
}
],
"match": [
{
"path": [
"/private*"
]
}
]
},
{
"handle": [
{
"error": "Not found",
"handler": "error",
"status_code": 404
}
],
"match": [
{
"path": [
"/hidden*"
]
}
]
}
]
}
],
"terminal": true
}
],
"errors": {
"routes": [
{
"match": [
{
"host": [
"localhost"
]
}
],
"handle": [
{
"handler": "subroute",
"routes": [
{
"handle": [
{
"body": "Error in the [400 .. 499] range",
"handler": "static_response"
}
],
"match": [
{
"expression": "{http.error.status_code} \u003e= 400 \u0026\u0026 {http.error.status_code} \u003c= 499"
}
]
},
{
"handle": [
{
"body": "Fallback route: code outside the [400..499] range",
"handler": "static_response"
}
]
}
]
}
],
"terminal": true
}
]
}
}
}
}
}
}

View file

@ -69,7 +69,10 @@
} }
], ],
"on_demand": { "on_demand": {
"ask": "https://example.com", "permission": {
"endpoint": "https://example.com",
"module": "http"
},
"rate_limit": { "rate_limit": {
"interval": 30000000000, "interval": 30000000000,
"burst": 20 "burst": 20

View file

@ -78,7 +78,10 @@
} }
], ],
"on_demand": { "on_demand": {
"ask": "https://example.com", "permission": {
"endpoint": "https://example.com",
"module": "http"
},
"rate_limit": { "rate_limit": {
"interval": 30000000000, "interval": 30000000000,
"burst": 20 "burst": 20

View file

@ -71,7 +71,10 @@
} }
], ],
"on_demand": { "on_demand": {
"ask": "https://example.com", "permission": {
"endpoint": "https://example.com",
"module": "http"
},
"rate_limit": { "rate_limit": {
"interval": 30000000000, "interval": 30000000000,
"burst": 20 "burst": 20

View file

@ -0,0 +1,46 @@
http://handle {
file_server
}
----------
{
"apps": {
"http": {
"servers": {
"srv0": {
"listen": [
":80"
],
"routes": [
{
"match": [
{
"host": [
"handle"
]
}
],
"handle": [
{
"handler": "subroute",
"routes": [
{
"handle": [
{
"handler": "file_server",
"hide": [
"./Caddyfile"
]
}
]
}
]
}
],
"terminal": true
}
]
}
}
}
}
}

View file

@ -0,0 +1,71 @@
:80 {
log
vars foo foo
log_append const bar
log_append vars foo
log_append placeholder {path}
log_append /only-for-this-path secret value
}
----------
{
"apps": {
"http": {
"servers": {
"srv0": {
"listen": [
":80"
],
"routes": [
{
"handle": [
{
"foo": "foo",
"handler": "vars"
}
]
},
{
"match": [
{
"path": [
"/only-for-this-path"
]
}
],
"handle": [
{
"handler": "log_append",
"key": "secret",
"value": "value"
}
]
},
{
"handle": [
{
"handler": "log_append",
"key": "const",
"value": "bar"
},
{
"handler": "log_append",
"key": "vars",
"value": "foo"
},
{
"handler": "log_append",
"key": "placeholder",
"value": "{http.request.uri.path}"
}
]
}
],
"logs": {}
}
}
}
}
}

View file

@ -0,0 +1,63 @@
{
log {
format append {
wrap json
fields {
wrap "foo"
}
env {env.EXAMPLE}
int 1
float 1.1
bool true
string "string"
}
}
}
:80 {
respond "Hello, World!"
}
----------
{
"logging": {
"logs": {
"default": {
"encoder": {
"fields": {
"bool": true,
"env": "{env.EXAMPLE}",
"float": 1.1,
"int": 1,
"string": "string",
"wrap": "foo"
},
"format": "append",
"wrap": {
"format": "json"
}
}
}
}
},
"apps": {
"http": {
"servers": {
"srv0": {
"listen": [
":80"
],
"routes": [
{
"handle": [
{
"body": "Hello, World!",
"handler": "static_response"
}
]
}
]
}
}
}
}
}

View file

@ -1,7 +1,7 @@
http://localhost:2020 { http://localhost:2020 {
log log
skip_log /first-hidden* log_skip /first-hidden*
skip_log /second-hidden* log_skip /second-hidden*
respond 200 respond 200
} }
@ -34,7 +34,7 @@ http://localhost:2020 {
"handle": [ "handle": [
{ {
"handler": "vars", "handler": "vars",
"skip_log": true "log_skip": true
} }
], ],
"match": [ "match": [
@ -49,7 +49,7 @@ http://localhost:2020 {
"handle": [ "handle": [
{ {
"handler": "vars", "handler": "vars",
"skip_log": true "log_skip": true
} }
], ],
"match": [ "match": [
@ -99,7 +99,7 @@ http://localhost:2020 {
}, },
"logs": { "logs": {
"logger_names": { "logger_names": {
"localhost:2020": "" "localhost": ""
}, },
"skip_unmapped_hosts": true "skip_unmapped_hosts": true
} }

View file

@ -0,0 +1,52 @@
:80
log {
output stdout
format filter {
fields {
request>headers>Server delete
}
}
}
----------
{
"logging": {
"logs": {
"default": {
"exclude": [
"http.log.access.log0"
]
},
"log0": {
"writer": {
"output": "stdout"
},
"encoder": {
"fields": {
"request\u003eheaders\u003eServer": {
"filter": "delete"
}
},
"format": "filter"
},
"include": [
"http.log.access.log0"
]
}
}
},
"apps": {
"http": {
"servers": {
"srv0": {
"listen": [
":80"
],
"logs": {
"default_logger_name": "log0"
}
}
}
}
}
}

View file

@ -4,12 +4,17 @@ log {
output stdout output stdout
format filter { format filter {
wrap console wrap console
# long form, with "fields" wrapper
fields { fields {
uri query { uri query {
replace foo REDACTED replace foo REDACTED
delete bar delete bar
hash baz hash baz
} }
}
# short form, flatter structure
request>headers>Authorization replace REDACTED request>headers>Authorization replace REDACTED
request>headers>Server delete request>headers>Server delete
request>headers>Cookie cookie { request>headers>Cookie cookie {
@ -21,11 +26,11 @@ log {
ipv4 24 ipv4 24
ipv6 32 ipv6 32
} }
request>client_ip ip_mask 16 32
request>headers>Regexp regexp secret REDACTED request>headers>Regexp regexp secret REDACTED
request>headers>Hash hash request>headers>Hash hash
} }
} }
}
---------- ----------
{ {
"logging": { "logging": {
@ -41,6 +46,11 @@ log {
}, },
"encoder": { "encoder": {
"fields": { "fields": {
"request\u003eclient_ip": {
"filter": "ip_mask",
"ipv4_cidr": 16,
"ipv6_cidr": 32
},
"request\u003eheaders\u003eAuthorization": { "request\u003eheaders\u003eAuthorization": {
"filter": "replace", "filter": "replace",
"value": "REDACTED" "value": "REDACTED"

View file

@ -8,6 +8,12 @@
output file /baz.txt output file /baz.txt
} }
} }
example.com:8443 {
log {
output file /port.txt
}
}
---------- ----------
{ {
"logging": { "logging": {
@ -15,7 +21,8 @@
"default": { "default": {
"exclude": [ "exclude": [
"http.log.access.log0", "http.log.access.log0",
"http.log.access.log1" "http.log.access.log1",
"http.log.access.log2"
] ]
}, },
"log0": { "log0": {
@ -35,6 +42,15 @@
"include": [ "include": [
"http.log.access.log1" "http.log.access.log1"
] ]
},
"log2": {
"writer": {
"filename": "/port.txt",
"output": "file"
},
"include": [
"http.log.access.log2"
]
} }
} }
}, },
@ -64,6 +80,28 @@
"foo.example.com": "log0" "foo.example.com": "log0"
} }
} }
},
"srv1": {
"listen": [
":8443"
],
"routes": [
{
"match": [
{
"host": [
"example.com"
]
}
],
"terminal": true
}
],
"logs": {
"logger_names": {
"example.com": "log2"
}
}
} }
} }
} }

View file

@ -76,7 +76,7 @@ http://localhost:8881 {
}, },
"logs": { "logs": {
"logger_names": { "logger_names": {
"localhost:8881": "foo" "localhost": "foo"
} }
} }
} }

View file

@ -81,7 +81,7 @@ http://localhost:8881 {
}, },
"logs": { "logs": {
"logger_names": { "logger_names": {
"localhost:8881": "foo" "localhost": "foo"
} }
} }
} }

View file

@ -66,9 +66,9 @@ example.com {
"one.example.com": "" "one.example.com": ""
}, },
"skip_hosts": [ "skip_hosts": [
"example.com",
"three.example.com", "three.example.com",
"two.example.com", "two.example.com"
"example.com"
] ]
} }
} }

Some files were not shown because too many files have changed in this diff Show more