Merge branch 'master' into produce-csr

This commit is contained in:
Matt Holt 2024-03-06 18:35:43 -07:00 committed by GitHub
commit 434d4bba24
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
311 changed files with 14395 additions and 5983 deletions

View file

@ -1,5 +1,5 @@
[*] [*]
end_of_line = lf end_of_line = lf
[caddytest/integration/caddyfile_adapt/*.txt] [caddytest/integration/caddyfile_adapt/*.caddyfiletest]
indent_style = tab indent_style = tab

View file

@ -19,45 +19,49 @@ jobs:
fail-fast: false fail-fast: false
matrix: matrix:
os: os:
- ubuntu-latest - linux
- macos-latest - mac
- windows-latest - windows
go: go:
- '1.20'
- '1.21' - '1.21'
- '1.22'
include: include:
# Set the minimum Go patch version for the given Go minor # Set the minimum Go patch version for the given Go minor
# Usable via ${{ matrix.GO_SEMVER }} # Usable via ${{ matrix.GO_SEMVER }}
- go: '1.20'
GO_SEMVER: '~1.20.6'
- go: '1.21' - go: '1.21'
GO_SEMVER: '~1.21.0' GO_SEMVER: '~1.21.0'
- go: '1.22'
GO_SEMVER: '~1.22.1'
# Set some variables per OS, usable via ${{ matrix.VAR }} # Set some variables per OS, usable via ${{ matrix.VAR }}
# OS_LABEL: the VM label from GitHub Actions (see https://docs.github.com/en/actions/using-github-hosted-runners/about-github-hosted-runners/about-github-hosted-runners#standard-github-hosted-runners-for-public-repositories)
# CADDY_BIN_PATH: the path to the compiled Caddy binary, for artifact publishing # CADDY_BIN_PATH: the path to the compiled Caddy binary, for artifact publishing
# SUCCESS: the typical value for $? per OS (Windows/pwsh returns 'True') # SUCCESS: the typical value for $? per OS (Windows/pwsh returns 'True')
- os: ubuntu-latest - os: linux
OS_LABEL: ubuntu-latest
CADDY_BIN_PATH: ./cmd/caddy/caddy CADDY_BIN_PATH: ./cmd/caddy/caddy
SUCCESS: 0 SUCCESS: 0
- os: macos-latest - os: mac
OS_LABEL: macos-14
CADDY_BIN_PATH: ./cmd/caddy/caddy CADDY_BIN_PATH: ./cmd/caddy/caddy
SUCCESS: 0 SUCCESS: 0
- os: windows-latest - os: windows
OS_LABEL: windows-latest
CADDY_BIN_PATH: ./cmd/caddy/caddy.exe CADDY_BIN_PATH: ./cmd/caddy/caddy.exe
SUCCESS: 'True' SUCCESS: 'True'
runs-on: ${{ matrix.os }} runs-on: ${{ matrix.OS_LABEL }}
steps: steps:
- name: Checkout code - name: Checkout code
uses: actions/checkout@v3 uses: actions/checkout@v4
- name: Install Go - name: Install Go
uses: actions/setup-go@v4 uses: actions/setup-go@v5
with: with:
go-version: ${{ matrix.GO_SEMVER }} go-version: ${{ matrix.GO_SEMVER }}
check-latest: true check-latest: true
@ -95,13 +99,14 @@ jobs:
env: env:
CGO_ENABLED: 0 CGO_ENABLED: 0
run: | run: |
go build -trimpath -ldflags="-w -s" -v go build -tags nobdger -trimpath -ldflags="-w -s" -v
- name: Publish Build Artifact - name: Publish Build Artifact
uses: actions/upload-artifact@v3 uses: actions/upload-artifact@v4
with: with:
name: caddy_${{ runner.os }}_go${{ matrix.go }}_${{ steps.vars.outputs.short_sha }} name: caddy_${{ runner.os }}_go${{ matrix.go }}_${{ steps.vars.outputs.short_sha }}
path: ${{ matrix.CADDY_BIN_PATH }} path: ${{ matrix.CADDY_BIN_PATH }}
compression-level: 0
# Commented bits below were useful to allow the job to continue # Commented bits below were useful to allow the job to continue
# even if the tests fail, so we can publish the report separately # even if the tests fail, so we can publish the report separately
@ -111,7 +116,7 @@ jobs:
# continue-on-error: true # continue-on-error: true
run: | run: |
# (go test -v -coverprofile=cover-profile.out -race ./... 2>&1) > test-results/test-result.out # (go test -v -coverprofile=cover-profile.out -race ./... 2>&1) > test-results/test-result.out
go test -v -coverprofile="cover-profile.out" -short -race ./... go test -tags nobadger -v -coverprofile="cover-profile.out" -short -race ./...
# echo "status=$?" >> $GITHUB_OUTPUT # echo "status=$?" >> $GITHUB_OUTPUT
# Relevant step if we reinvestigate publishing test/coverage reports # Relevant step if we reinvestigate publishing test/coverage reports
@ -124,7 +129,7 @@ jobs:
# To return the correct result even though we set 'continue-on-error: true' # To return the correct result even though we set 'continue-on-error: true'
# - name: Coerce correct build result # - name: Coerce correct build result
# if: matrix.os != 'windows-latest' && steps.step_test.outputs.status != ${{ matrix.SUCCESS }} # if: matrix.os != 'windows' && steps.step_test.outputs.status != ${{ matrix.SUCCESS }}
# run: | # run: |
# echo "step_test ${{ steps.step_test.outputs.status }}\n" # echo "step_test ${{ steps.step_test.outputs.status }}\n"
# exit 1 # exit 1
@ -136,7 +141,7 @@ jobs:
continue-on-error: true # August 2020: s390x VM is down due to weather and power issues continue-on-error: true # August 2020: s390x VM is down due to weather and power issues
steps: steps:
- name: Checkout code - name: Checkout code
uses: actions/checkout@v3 uses: actions/checkout@v4
- name: Run Tests - name: Run Tests
run: | run: |
mkdir -p ~/.ssh && echo -e "${SSH_KEY//_/\\n}" > ~/.ssh/id_ecdsa && chmod og-rwx ~/.ssh/id_ecdsa mkdir -p ~/.ssh && echo -e "${SSH_KEY//_/\\n}" > ~/.ssh/id_ecdsa && chmod og-rwx ~/.ssh/id_ecdsa
@ -146,7 +151,7 @@ jobs:
# The environment is fresh, so there's no point in keeping accepting and adding the key. # The environment is fresh, so there's no point in keeping accepting and adding the key.
rsync -arz -e "ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null" --progress --delete --exclude '.git' . "$CI_USER"@ci-s390x.caddyserver.com:/var/tmp/"$short_sha" rsync -arz -e "ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null" --progress --delete --exclude '.git' . "$CI_USER"@ci-s390x.caddyserver.com:/var/tmp/"$short_sha"
ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -t "$CI_USER"@ci-s390x.caddyserver.com "cd /var/tmp/$short_sha; go version; go env; printf "\n\n";CGO_ENABLED=0 go test -v ./..." ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -t "$CI_USER"@ci-s390x.caddyserver.com "cd /var/tmp/$short_sha; go version; go env; printf "\n\n";CGO_ENABLED=0 go test -tags nobadger -v ./..."
test_result=$? test_result=$?
# There's no need leaving the files around # There's no need leaving the files around
@ -162,11 +167,9 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Checkout code - name: Checkout code
uses: actions/checkout@v3 uses: actions/checkout@v4
- uses: goreleaser/goreleaser-action@v4 - uses: goreleaser/goreleaser-action@v5
with: with:
version: latest version: latest
args: check args: check
env:
TAG: ${{ steps.vars.outputs.version_tag }}

View file

@ -11,11 +11,12 @@ on:
- 2.* - 2.*
jobs: jobs:
cross-build-test: build:
strategy: strategy:
fail-fast: false fail-fast: false
matrix: matrix:
goos: goos:
- 'aix'
- 'android' - 'android'
- 'linux' - 'linux'
- 'solaris' - 'solaris'
@ -28,22 +29,22 @@ jobs:
- 'darwin' - 'darwin'
- 'netbsd' - 'netbsd'
go: go:
- '1.21' - '1.22'
include: include:
# Set the minimum Go patch version for the given Go minor # Set the minimum Go patch version for the given Go minor
# Usable via ${{ matrix.GO_SEMVER }} # Usable via ${{ matrix.GO_SEMVER }}
- go: '1.21' - go: '1.22'
GO_SEMVER: '~1.21.0' GO_SEMVER: '~1.22.1'
runs-on: ubuntu-latest runs-on: ubuntu-latest
continue-on-error: true continue-on-error: true
steps: steps:
- name: Checkout code - name: Checkout code
uses: actions/checkout@v3 uses: actions/checkout@v4
- name: Install Go - name: Install Go
uses: actions/setup-go@v4 uses: actions/setup-go@v5
with: with:
go-version: ${{ matrix.GO_SEMVER }} go-version: ${{ matrix.GO_SEMVER }}
check-latest: true check-latest: true
@ -62,11 +63,12 @@ jobs:
env: env:
CGO_ENABLED: 0 CGO_ENABLED: 0
GOOS: ${{ matrix.goos }} GOOS: ${{ matrix.goos }}
GOARCH: ${{ matrix.goos == 'aix' && 'ppc64' || 'amd64' }}
shell: bash shell: bash
continue-on-error: true continue-on-error: true
working-directory: ./cmd/caddy working-directory: ./cmd/caddy
run: | run: |
GOOS=$GOOS go build -trimpath -o caddy-"$GOOS"-amd64 2> /dev/null GOOS=$GOOS GOARCH=$GOARCH go build -tags nobadger -trimpath -o caddy-"$GOOS"-$GOARCH 2> /dev/null
if [ $? -ne 0 ]; then if [ $? -ne 0 ]; then
echo "::warning ::$GOOS Build Failed" echo "::warning ::$GOOS Build Failed"
exit 0 exit 0

View file

@ -23,24 +23,33 @@ jobs:
strategy: strategy:
matrix: matrix:
os: os:
- ubuntu-latest - linux
- macos-latest - mac
- windows-latest - windows
runs-on: ${{ matrix.os }}
include:
- os: linux
OS_LABEL: ubuntu-latest
- os: mac
OS_LABEL: macos-14
- os: windows
OS_LABEL: windows-latest
runs-on: ${{ matrix.OS_LABEL }}
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v4
- uses: actions/setup-go@v4 - uses: actions/setup-go@v5
with: with:
go-version: '~1.21.0' go-version: '~1.22.1'
check-latest: true check-latest: true
# Workaround for https://github.com/golangci/golangci-lint-action/issues/135
skip-pkg-cache: true
- name: golangci-lint - name: golangci-lint
uses: golangci/golangci-lint-action@v3 uses: golangci/golangci-lint-action@v4
with: with:
version: v1.54 version: v1.55
# Workaround for https://github.com/golangci/golangci-lint-action/issues/135 # Workaround for https://github.com/golangci/golangci-lint-action/issues/135
skip-pkg-cache: true skip-pkg-cache: true
@ -50,3 +59,12 @@ jobs:
# Optional: show only new issues if it's a pull request. The default value is `false`. # Optional: show only new issues if it's a pull request. The default value is `false`.
# only-new-issues: true # only-new-issues: true
govulncheck:
runs-on: ubuntu-latest
steps:
- name: govulncheck
uses: golang/govulncheck-action@v1
with:
go-version-input: '~1.22.1'
check-latest: true

View file

@ -32,18 +32,18 @@ jobs:
steps: steps:
- name: Checkout code - name: Checkout code
uses: actions/checkout@v3 uses: actions/checkout@v4
with: with:
fetch-depth: 0 fetch-depth: 0
- name: Install Go - name: Install Go
uses: actions/setup-go@v4 uses: actions/setup-go@v5
with: with:
go-version: ${{ matrix.GO_SEMVER }} go-version: ${{ matrix.GO_SEMVER }}
check-latest: true check-latest: true
# Force fetch upstream tags -- because 65 minutes # Force fetch upstream tags -- because 65 minutes
# tl;dr: actions/checkout@v3 runs this line: # tl;dr: actions/checkout@v4 runs this line:
# git -c protocol.version=2 fetch --no-tags --prune --progress --no-recurse-submodules --depth=1 origin +ebc278ec98bb24f2852b61fde2a9bf2e3d83818b:refs/tags/ # git -c protocol.version=2 fetch --no-tags --prune --progress --no-recurse-submodules --depth=1 origin +ebc278ec98bb24f2852b61fde2a9bf2e3d83818b:refs/tags/
# which makes its own local lightweight tag, losing all the annotations in the process. Our earlier script ran: # which makes its own local lightweight tag, losing all the annotations in the process. Our earlier script ran:
# git fetch --prune --unshallow # git fetch --prune --unshallow
@ -106,7 +106,7 @@ jobs:
run: syft version run: syft version
# GoReleaser will take care of publishing those artifacts into the release # GoReleaser will take care of publishing those artifacts into the release
- name: Run GoReleaser - name: Run GoReleaser
uses: goreleaser/goreleaser-action@v4 uses: goreleaser/goreleaser-action@v5
with: with:
version: latest version: latest
args: release --clean --timeout 60m args: release --clean --timeout 60m

View file

@ -18,7 +18,7 @@ jobs:
# See https://github.com/peter-evans/repository-dispatch # See https://github.com/peter-evans/repository-dispatch
- name: Trigger event on caddyserver/dist - name: Trigger event on caddyserver/dist
uses: peter-evans/repository-dispatch@v2 uses: peter-evans/repository-dispatch@v3
with: with:
token: ${{ secrets.REPO_DISPATCH_TOKEN }} token: ${{ secrets.REPO_DISPATCH_TOKEN }}
repository: caddyserver/dist repository: caddyserver/dist
@ -26,7 +26,7 @@ jobs:
client-payload: '{"tag": "${{ github.event.release.tag_name }}"}' client-payload: '{"tag": "${{ github.event.release.tag_name }}"}'
- name: Trigger event on caddyserver/caddy-docker - name: Trigger event on caddyserver/caddy-docker
uses: peter-evans/repository-dispatch@v2 uses: peter-evans/repository-dispatch@v3
with: with:
token: ${{ secrets.REPO_DISPATCH_TOKEN }} token: ${{ secrets.REPO_DISPATCH_TOKEN }}
repository: caddyserver/caddy-docker repository: caddyserver/caddy-docker

1
.gitignore vendored
View file

@ -12,6 +12,7 @@ Caddyfile.*
cmd/caddy/caddy cmd/caddy/caddy
cmd/caddy/caddy.exe cmd/caddy/caddy.exe
cmd/caddy/tmp/*.exe cmd/caddy/tmp/*.exe
cmd/caddy/.env
# mac specific # mac specific
.DS_Store .DS_Store

View file

@ -15,35 +15,68 @@ linters-settings:
# If `true`, make the section order the same as the order of `sections`. # If `true`, make the section order the same as the order of `sections`.
# Default: false # Default: false
custom-order: true custom-order: true
exhaustive:
ignore-enum-types: reflect.Kind|svc.Cmd
linters: linters:
disable-all: true disable-all: true
enable: enable:
- asasalint
- asciicheck
- bidichk
- bodyclose - bodyclose
- decorder
- dogsled
- dupl
- dupword
- durationcheck
- errcheck - errcheck
- errname
- exhaustive
- exportloopref
- gci - gci
- gofmt
- goimports
- gofumpt - gofumpt
- gosec - gosec
- gosimple - gosimple
- govet - govet
- ineffassign - ineffassign
- importas
- misspell - misspell
- prealloc - prealloc
- promlinter
- sloglint
- sqlclosecheck
- staticcheck - staticcheck
- tenv
- testableexamples
- testifylint
- tparallel
- typecheck - typecheck
- unconvert - unconvert
- unused - unused
- wastedassign
- whitespace
- zerologlint
# these are implicitly disabled: # these are implicitly disabled:
# - asciicheck # - containedctx
# - contextcheck
# - cyclop
# - depguard # - depguard
# - dogsled # - errchkjson
# - dupl # - errorlint
# - exhaustive # - exhaustruct
# - exportloopref # - execinquery
# - exhaustruct
# - forbidigo
# - forcetypeassert
# - funlen # - funlen
# - gci # - ginkgolinter
# - gocheckcompilerdirectives
# - gochecknoglobals # - gochecknoglobals
# - gochecknoinits # - gochecknoinits
# - gochecksumtype
# - gocognit # - gocognit
# - goconst # - goconst
# - gocritic # - gocritic
@ -51,27 +84,47 @@ linters:
# - godot # - godot
# - godox # - godox
# - goerr113 # - goerr113
# - gofumpt
# - goheader # - goheader
# - golint
# - gomnd # - gomnd
# - gomoddirectives
# - gomodguard # - gomodguard
# - goprintffuncname # - goprintffuncname
# - interfacer # - gosmopolitan
# - grouper
# - inamedparam
# - interfacebloat
# - ireturn
# - lll # - lll
# - maligned # - loggercheck
# - maintidx
# - makezero
# - mirror
# - musttag
# - nakedret # - nakedret
# - nestif # - nestif
# - nilerr
# - nilnil
# - nlreturn # - nlreturn
# - noctx # - noctx
# - nolintlint # - nolintlint
# - nonamedreturns
# - nosprintfhostport
# - paralleltest
# - perfsprint
# - predeclared
# - protogetter
# - reassign
# - revive
# - rowserrcheck # - rowserrcheck
# - scopelint
# - sqlclosecheck
# - stylecheck # - stylecheck
# - tagalign
# - tagliatelle
# - testpackage # - testpackage
# - thelper
# - unparam # - unparam
# - whitespace # - usestdlibvars
# - varnamelen
# - wrapcheck
# - wsl # - wsl
run: run:
@ -110,3 +163,6 @@ issues:
text: 'G404' # G404: Insecure random number source (rand) text: 'G404' # G404: Insecure random number source (rand)
linters: linters:
- gosec - gosec
- path: modules/logging/filters.go
linters:
- dupl

View file

@ -77,6 +77,8 @@ builds:
- -mod=readonly - -mod=readonly
ldflags: ldflags:
- -s -w - -s -w
tags:
- nobadger
signs: signs:
- cmd: cosign - cmd: cosign

View file

@ -87,7 +87,7 @@ See [our online documentation](https://caddyserver.com/docs/install) for other i
Requirements: Requirements:
- [Go 1.20 or newer](https://golang.org/dl/) - [Go 1.21 or newer](https://golang.org/dl/)
### For development ### For development

View file

@ -1196,15 +1196,27 @@ traverseLoop:
} }
case http.MethodPut: case http.MethodPut:
if _, ok := v[part]; ok { if _, ok := v[part]; ok {
return fmt.Errorf("[%s] key already exists: %s", path, part) return APIError{
HTTPStatus: http.StatusConflict,
Err: fmt.Errorf("[%s] key already exists: %s", path, part),
}
} }
v[part] = val v[part] = val
case http.MethodPatch: case http.MethodPatch:
if _, ok := v[part]; !ok { if _, ok := v[part]; !ok {
return fmt.Errorf("[%s] key does not exist: %s", path, part) return APIError{
HTTPStatus: http.StatusNotFound,
Err: fmt.Errorf("[%s] key does not exist: %s", path, part),
}
} }
v[part] = val v[part] = val
case http.MethodDelete: case http.MethodDelete:
if _, ok := v[part]; !ok {
return APIError{
HTTPStatus: http.StatusNotFound,
Err: fmt.Errorf("[%s] key does not exist: %s", path, part),
}
}
delete(v, part) delete(v, part)
default: default:
return fmt.Errorf("unrecognized method %s", method) return fmt.Errorf("unrecognized method %s", method)

View file

@ -75,6 +75,12 @@ func TestUnsyncedConfigAccess(t *testing.T) {
path: "/bar/qq", path: "/bar/qq",
expect: `{"foo": "jet", "bar": {"aa": "bb"}, "list": ["a", "b", "c"]}`, expect: `{"foo": "jet", "bar": {"aa": "bb"}, "list": ["a", "b", "c"]}`,
}, },
{
method: "DELETE",
path: "/bar/qq",
expect: `{"foo": "jet", "bar": {"aa": "bb"}, "list": ["a", "b", "c"]}`,
shouldErr: true,
},
{ {
method: "POST", method: "POST",
path: "/list", path: "/list",

View file

@ -22,6 +22,7 @@ import (
"errors" "errors"
"fmt" "fmt"
"io" "io"
"io/fs"
"log" "log"
"net/http" "net/http"
"os" "os"
@ -38,6 +39,7 @@ import (
"github.com/google/uuid" "github.com/google/uuid"
"go.uber.org/zap" "go.uber.org/zap"
"github.com/caddyserver/caddy/v2/internal/filesystems"
"github.com/caddyserver/caddy/v2/notify" "github.com/caddyserver/caddy/v2/notify"
) )
@ -83,6 +85,9 @@ type Config struct {
storage certmagic.Storage storage certmagic.Storage
cancelFunc context.CancelFunc cancelFunc context.CancelFunc
// filesystems is a dict of filesystems that will later be loaded from and added to.
filesystems FileSystems
} }
// App is a thing that Caddy runs. // App is a thing that Caddy runs.
@ -446,6 +451,9 @@ func run(newCfg *Config, start bool) (Context, error) {
} }
} }
// create the new filesystem map
newCfg.filesystems = &filesystems.FilesystemMap{}
// prepare the new config for use // prepare the new config for use
newCfg.apps = make(map[string]App) newCfg.apps = make(map[string]App)
@ -707,6 +715,7 @@ func exitProcess(ctx context.Context, logger *zap.Logger) {
logger.Warn("exiting; byeee!! 👋") logger.Warn("exiting; byeee!! 👋")
exitCode := ExitCodeSuccess exitCode := ExitCodeSuccess
lastContext := ActiveContext()
// stop all apps // stop all apps
if err := Stop(); err != nil { if err := Stop(); err != nil {
@ -728,6 +737,16 @@ func exitProcess(ctx context.Context, logger *zap.Logger) {
} }
} }
// execute any process-exit callbacks
for _, exitFunc := range lastContext.exitFuncs {
exitFunc(ctx)
}
exitFuncsMu.Lock()
for _, exitFunc := range exitFuncs {
exitFunc(ctx)
}
exitFuncsMu.Unlock()
// shut down admin endpoint(s) in goroutines so that // shut down admin endpoint(s) in goroutines so that
// if this function was called from an admin handler, // if this function was called from an admin handler,
// it has a chance to return gracefully // it has a chance to return gracefully
@ -766,6 +785,23 @@ var exiting = new(int32) // accessed atomically
// EXPERIMENTAL API: subject to change or removal. // EXPERIMENTAL API: subject to change or removal.
func Exiting() bool { return atomic.LoadInt32(exiting) == 1 } func Exiting() bool { return atomic.LoadInt32(exiting) == 1 }
// OnExit registers a callback to invoke during process exit.
// This registration is PROCESS-GLOBAL, meaning that each
// function should only be registered once forever, NOT once
// per config load (etc).
//
// EXPERIMENTAL API: subject to change or removal.
func OnExit(f func(context.Context)) {
exitFuncsMu.Lock()
exitFuncs = append(exitFuncs, f)
exitFuncsMu.Unlock()
}
var (
exitFuncs []func(context.Context)
exitFuncsMu sync.Mutex
)
// Duration can be an integer or a string. An integer is // Duration can be an integer or a string. An integer is
// interpreted as nanoseconds. If a string, it is a Go // interpreted as nanoseconds. If a string, it is a Go
// time.Duration value such as `300ms`, `1.5h`, or `2h45m`; // time.Duration value such as `300ms`, `1.5h`, or `2h45m`;
@ -825,13 +861,18 @@ func ParseDuration(s string) (time.Duration, error) {
// regardless of storage configuration, since each instance is intended to // regardless of storage configuration, since each instance is intended to
// have its own unique ID. // have its own unique ID.
func InstanceID() (uuid.UUID, error) { func InstanceID() (uuid.UUID, error) {
uuidFilePath := filepath.Join(AppDataDir(), "instance.uuid") appDataDir := AppDataDir()
uuidFilePath := filepath.Join(appDataDir, "instance.uuid")
uuidFileBytes, err := os.ReadFile(uuidFilePath) uuidFileBytes, err := os.ReadFile(uuidFilePath)
if os.IsNotExist(err) { if errors.Is(err, fs.ErrNotExist) {
uuid, err := uuid.NewRandom() uuid, err := uuid.NewRandom()
if err != nil { if err != nil {
return uuid, err return uuid, err
} }
err = os.MkdirAll(appDataDir, 0o600)
if err != nil {
return uuid, err
}
err = os.WriteFile(uuidFilePath, []byte(uuid.String()), 0o600) err = os.WriteFile(uuidFilePath, []byte(uuid.String()), 0o600)
return uuid, err return uuid, err
} else if err != nil { } else if err != nil {

View file

@ -52,7 +52,7 @@ func (a Adapter) Adapt(body []byte, options map[string]any) ([]byte, []caddyconf
return nil, warnings, err return nil, warnings, err
} }
// lint check: see if input was properly formatted; sometimes messy files files parse // lint check: see if input was properly formatted; sometimes messy files parse
// successfully but result in logical errors (the Caddyfile is a bad format, I'm sorry) // successfully but result in logical errors (the Caddyfile is a bad format, I'm sorry)
if warning, different := FormattingDifference(filename, body); different { if warning, different := FormattingDifference(filename, body); different {
warnings = append(warnings, warning) warnings = append(warnings, warning)
@ -92,30 +92,26 @@ func FormattingDifference(filename string, body []byte) (caddyconfig.Warning, bo
}, true }, true
} }
// Unmarshaler is a type that can unmarshal // Unmarshaler is a type that can unmarshal Caddyfile tokens to
// Caddyfile tokens to set itself up for a // set itself up for a JSON encoding. The goal of an unmarshaler
// JSON encoding. The goal of an unmarshaler // is not to set itself up for actual use, but to set itself up for
// is not to set itself up for actual use, // being marshaled into JSON. Caddyfile-unmarshaled values will not
// but to set itself up for being marshaled // be used directly; they will be encoded as JSON and then used from
// into JSON. Caddyfile-unmarshaled values // that. Implementations _may_ be able to support multiple segments
// will not be used directly; they will be // (instances of their directive or batch of tokens); typically this
// encoded as JSON and then used from that. // means wrapping parsing logic in a loop: `for d.Next() { ... }`.
// Implementations must be able to support // More commonly, only a single segment is supported, so a simple
// multiple segments (instances of their // `d.Next()` at the start should be used to consume the module
// directive or batch of tokens); typically // identifier token (directive name, etc).
// this means wrapping all token logic in
// a loop: `for d.Next() { ... }`.
type Unmarshaler interface { type Unmarshaler interface {
UnmarshalCaddyfile(d *Dispenser) error UnmarshalCaddyfile(d *Dispenser) error
} }
// ServerType is a type that can evaluate a Caddyfile and set up a caddy config. // ServerType is a type that can evaluate a Caddyfile and set up a caddy config.
type ServerType interface { type ServerType interface {
// Setup takes the server blocks which // Setup takes the server blocks which contain tokens,
// contain tokens, as well as options // as well as options (e.g. CLI flags) and creates a
// (e.g. CLI flags) and creates a Caddy // Caddy config, along with any warnings or an error.
// config, along with any warnings or
// an error.
Setup([]ServerBlock, map[string]any) (*caddy.Config, []caddyconfig.Warning, error) Setup([]ServerBlock, map[string]any) (*caddy.Config, []caddyconfig.Warning, error)
} }

View file

@ -305,7 +305,7 @@ func TestDispenser_ArgErr_Err(t *testing.T) {
t.Errorf("Expected error message with custom message in it ('foobar'); got '%v'", err) t.Errorf("Expected error message with custom message in it ('foobar'); got '%v'", err)
} }
var ErrBarIsFull = errors.New("bar is full") ErrBarIsFull := errors.New("bar is full")
bookingError := d.Errf("unable to reserve: %w", ErrBarIsFull) bookingError := d.Errf("unable to reserve: %w", ErrBarIsFull)
if !errors.Is(bookingError, ErrBarIsFull) { if !errors.Is(bookingError, ErrBarIsFull) {
t.Errorf("Errf(): should be able to unwrap the error chain") t.Errorf("Errf(): should be able to unwrap the error chain")

View file

@ -18,6 +18,8 @@ import (
"bytes" "bytes"
"io" "io"
"unicode" "unicode"
"golang.org/x/exp/slices"
) )
// Format formats the input Caddyfile to a standard, nice-looking // Format formats the input Caddyfile to a standard, nice-looking
@ -31,6 +33,14 @@ func Format(input []byte) []byte {
out := new(bytes.Buffer) out := new(bytes.Buffer)
rdr := bytes.NewReader(input) rdr := bytes.NewReader(input)
type heredocState int
const (
heredocClosed heredocState = 0
heredocOpening heredocState = 1
heredocOpened heredocState = 2
)
var ( var (
last rune // the last character that was written to the result last rune // the last character that was written to the result
@ -47,6 +57,11 @@ func Format(input []byte) []byte {
quoted bool // whether we're in a quoted segment quoted bool // whether we're in a quoted segment
escaped bool // whether current char is escaped escaped bool // whether current char is escaped
heredoc heredocState // whether we're in a heredoc
heredocEscaped bool // whether heredoc is escaped
heredocMarker []rune
heredocClosingMarker []rune
nesting int // indentation level nesting int // indentation level
) )
@ -75,6 +90,62 @@ func Format(input []byte) []byte {
panic(err) panic(err)
} }
// detect whether we have the start of a heredoc
if !quoted && !(heredoc != heredocClosed || heredocEscaped) &&
space && last == '<' && ch == '<' {
write(ch)
heredoc = heredocOpening
space = false
continue
}
if heredoc == heredocOpening {
if ch == '\n' {
if len(heredocMarker) > 0 && heredocMarkerRegexp.MatchString(string(heredocMarker)) {
heredoc = heredocOpened
} else {
heredocMarker = nil
heredoc = heredocClosed
nextLine()
continue
}
write(ch)
continue
}
if unicode.IsSpace(ch) {
// a space means it's just a regular token and not a heredoc
heredocMarker = nil
heredoc = heredocClosed
} else {
heredocMarker = append(heredocMarker, ch)
write(ch)
continue
}
}
// if we're in a heredoc, all characters are read&write as-is
if heredoc == heredocOpened {
heredocClosingMarker = append(heredocClosingMarker, ch)
if len(heredocClosingMarker) > len(heredocMarker)+1 { // We assert that the heredocClosingMarker is followed by a unicode.Space
heredocClosingMarker = heredocClosingMarker[1:]
}
// check if we're done
if unicode.IsSpace(ch) && slices.Equal(heredocClosingMarker[:len(heredocClosingMarker)-1], heredocMarker) {
heredocMarker = nil
heredocClosingMarker = nil
heredoc = heredocClosed
} else {
write(ch)
if ch == '\n' {
heredocClosingMarker = heredocClosingMarker[:0]
}
continue
}
}
if last == '<' && space {
space = false
}
if comment { if comment {
if ch == '\n' { if ch == '\n' {
comment = false comment = false
@ -98,6 +169,9 @@ func Format(input []byte) []byte {
} }
if escaped { if escaped {
if ch == '<' {
heredocEscaped = true
}
write(ch) write(ch)
escaped = false escaped = false
continue continue
@ -117,6 +191,7 @@ func Format(input []byte) []byte {
if unicode.IsSpace(ch) { if unicode.IsSpace(ch) {
space = true space = true
heredocEscaped = false
if ch == '\n' { if ch == '\n' {
newLines++ newLines++
} }
@ -205,6 +280,11 @@ func Format(input []byte) []byte {
write('{') write('{')
openBraceWritten = true openBraceWritten = true
} }
if spacePrior && ch == '<' {
space = true
}
write(ch) write(ch)
beginningOfLine = false beginningOfLine = false

View file

@ -362,6 +362,76 @@ block {
block { block {
} }
`,
},
{
description: "keep heredoc as-is",
input: `block {
heredoc <<HEREDOC
Here's more than one space Here's more than one space
HEREDOC
}
`,
expect: `block {
heredoc <<HEREDOC
Here's more than one space Here's more than one space
HEREDOC
}
`,
},
{
description: "Mixing heredoc with regular part",
input: `block {
heredoc <<HEREDOC
Here's more than one space Here's more than one space
HEREDOC
respond "More than one space will be eaten" 200
}
block2 {
heredoc <<HEREDOC
Here's more than one space Here's more than one space
HEREDOC
respond "More than one space will be eaten" 200
}
`,
expect: `block {
heredoc <<HEREDOC
Here's more than one space Here's more than one space
HEREDOC
respond "More than one space will be eaten" 200
}
block2 {
heredoc <<HEREDOC
Here's more than one space Here's more than one space
HEREDOC
respond "More than one space will be eaten" 200
}
`,
},
{
description: "Heredoc as regular token",
input: `block {
heredoc <<HEREDOC "More than one space will be eaten"
}
`,
expect: `block {
heredoc <<HEREDOC "More than one space will be eaten"
}
`,
},
{
description: "Escape heredoc",
input: `block {
heredoc \<<HEREDOC
respond "More than one space will be eaten" 200
}
`,
expect: `block {
heredoc \<<HEREDOC
respond "More than one space will be eaten" 200
}
`, `,
}, },
} { } {

View file

@ -52,6 +52,13 @@ func parseVariadic(token Token, argCount int) (bool, int, int) {
return false, 0, 0 return false, 0, 0
} }
// A valid token may contain several placeholders, and
// they may be separated by ":". It's not variadic.
// https://github.com/caddyserver/caddy/issues/5716
if strings.Contains(start, "}") || strings.Contains(end, "{") {
return false, 0, 0
}
var ( var (
startIndex = 0 startIndex = 0
endIndex = argCount endIndex = argCount

View file

@ -186,7 +186,7 @@ func (l *lexer) next() (bool, error) {
} }
// check if we're done, i.e. that the last few characters are the marker // check if we're done, i.e. that the last few characters are the marker
if len(val) > len(heredocMarker) && heredocMarker == string(val[len(val)-len(heredocMarker):]) { if len(val) >= len(heredocMarker) && heredocMarker == string(val[len(val)-len(heredocMarker):]) {
// set the final value // set the final value
val, err = l.finalizeHeredoc(val, heredocMarker) val, err = l.finalizeHeredoc(val, heredocMarker)
if err != nil { if err != nil {
@ -313,6 +313,11 @@ func (l *lexer) finalizeHeredoc(val []rune, marker string) ([]rune, error) {
// iterate over each line and strip the whitespace from the front // iterate over each line and strip the whitespace from the front
var out string var out string
for lineNum, lineText := range lines[:len(lines)-1] { for lineNum, lineText := range lines[:len(lines)-1] {
if lineText == "" || lineText == "\r" {
out += "\n"
continue
}
// find an exact match for the padding // find an exact match for the padding
index := strings.Index(lineText, paddingToStrip) index := strings.Index(lineText, paddingToStrip)

View file

@ -285,6 +285,18 @@ EOF same-line-arg
}, },
{ {
input: []byte(`heredoc <<EOF input: []byte(`heredoc <<EOF
EOF
HERE same-line-arg
`),
expected: []Token{
{Line: 1, Text: `heredoc`},
{Line: 1, Text: ``},
{Line: 3, Text: `HERE`},
{Line: 3, Text: `same-line-arg`},
},
},
{
input: []byte(`heredoc <<EOF
EOF same-line-arg EOF same-line-arg
`), `),
expected: []Token{ expected: []Token{
@ -445,6 +457,48 @@ EOF same-line-arg
expectErr: true, expectErr: true,
errorMessage: "mismatched leading whitespace in heredoc <<EOF on line #2 [ content], expected whitespace [\t\t] to match the closing marker", errorMessage: "mismatched leading whitespace in heredoc <<EOF on line #2 [ content], expected whitespace [\t\t] to match the closing marker",
}, },
{
input: []byte(`heredoc <<EOF
The next line is a blank line
The previous line is a blank line
EOF`),
expected: []Token{
{Line: 1, Text: "heredoc"},
{Line: 1, Text: "The next line is a blank line\n\nThe previous line is a blank line"},
},
},
{
input: []byte(`heredoc <<EOF
One tab indented heredoc with blank next line
One tab indented heredoc with blank previous line
EOF`),
expected: []Token{
{Line: 1, Text: "heredoc"},
{Line: 1, Text: "One tab indented heredoc with blank next line\n\nOne tab indented heredoc with blank previous line"},
},
},
{
input: []byte(`heredoc <<EOF
The next line is a blank line with one tab
The previous line is a blank line with one tab
EOF`),
expected: []Token{
{Line: 1, Text: "heredoc"},
{Line: 1, Text: "The next line is a blank line with one tab\n\t\nThe previous line is a blank line with one tab"},
},
},
{
input: []byte(`heredoc <<EOF
The next line is a blank line with one tab less than the correct indentation
The previous line is a blank line with one tab less than the correct indentation
EOF`),
expectErr: true,
errorMessage: "mismatched leading whitespace in heredoc <<EOF on line #3 [\t], expected whitespace [\t\t] to match the closing marker",
},
} }
for i, testCase := range testCases { for i, testCase := range testCases {

View file

@ -160,14 +160,14 @@ func (p *parser) begin() error {
} }
if ok, name := p.isNamedRoute(); ok { if ok, name := p.isNamedRoute(); ok {
// named routes only have one key, the route name
p.block.Keys = []string{name}
p.block.IsNamedRoute = true
// we just need a dummy leading token to ease parsing later // we just need a dummy leading token to ease parsing later
nameToken := p.Token() nameToken := p.Token()
nameToken.Text = name nameToken.Text = name
// named routes only have one key, the route name
p.block.Keys = []Token{nameToken}
p.block.IsNamedRoute = true
// get all the tokens from the block, including the braces // get all the tokens from the block, including the braces
tokens, err := p.blockTokens(true) tokens, err := p.blockTokens(true)
if err != nil { if err != nil {
@ -211,10 +211,11 @@ func (p *parser) addresses() error {
var expectingAnother bool var expectingAnother bool
for { for {
tkn := p.Val() value := p.Val()
token := p.Token()
// special case: import directive replaces tokens during parse-time // special case: import directive replaces tokens during parse-time
if tkn == "import" && p.isNewLine() { if value == "import" && p.isNewLine() {
err := p.doImport(0) err := p.doImport(0)
if err != nil { if err != nil {
return err return err
@ -223,9 +224,9 @@ func (p *parser) addresses() error {
} }
// Open brace definitely indicates end of addresses // Open brace definitely indicates end of addresses
if tkn == "{" { if value == "{" {
if expectingAnother { if expectingAnother {
return p.Errf("Expected another address but had '%s' - check for extra comma", tkn) return p.Errf("Expected another address but had '%s' - check for extra comma", value)
} }
// Mark this server block as being defined with braces. // Mark this server block as being defined with braces.
// This is used to provide a better error message when // This is used to provide a better error message when
@ -237,15 +238,15 @@ func (p *parser) addresses() error {
} }
// Users commonly forget to place a space between the address and the '{' // Users commonly forget to place a space between the address and the '{'
if strings.HasSuffix(tkn, "{") { if strings.HasSuffix(value, "{") {
return p.Errf("Site addresses cannot end with a curly brace: '%s' - put a space between the token and the brace", tkn) return p.Errf("Site addresses cannot end with a curly brace: '%s' - put a space between the token and the brace", value)
} }
if tkn != "" { // empty token possible if user typed "" if value != "" { // empty token possible if user typed ""
// Trailing comma indicates another address will follow, which // Trailing comma indicates another address will follow, which
// may possibly be on the next line // may possibly be on the next line
if tkn[len(tkn)-1] == ',' { if value[len(value)-1] == ',' {
tkn = tkn[:len(tkn)-1] value = value[:len(value)-1]
expectingAnother = true expectingAnother = true
} else { } else {
expectingAnother = false // but we may still see another one on this line expectingAnother = false // but we may still see another one on this line
@ -254,11 +255,12 @@ func (p *parser) addresses() error {
// If there's a comma here, it's probably because they didn't use a space // If there's a comma here, it's probably because they didn't use a space
// between their two domains, e.g. "foo.com,bar.com", which would not be // between their two domains, e.g. "foo.com,bar.com", which would not be
// parsed as two separate site addresses. // parsed as two separate site addresses.
if strings.Contains(tkn, ",") { if strings.Contains(value, ",") {
return p.Errf("Site addresses cannot contain a comma ',': '%s' - put a space after the comma to separate site addresses", tkn) return p.Errf("Site addresses cannot contain a comma ',': '%s' - put a space after the comma to separate site addresses", value)
} }
p.block.Keys = append(p.block.Keys, tkn) token.Text = value
p.block.Keys = append(p.block.Keys, token)
} }
// Advance token and possibly break out of loop or return error // Advance token and possibly break out of loop or return error
@ -637,8 +639,8 @@ func (p *parser) closeCurlyBrace() error {
func (p *parser) isNamedRoute() (bool, string) { func (p *parser) isNamedRoute() (bool, string) {
keys := p.block.Keys keys := p.block.Keys
// A named route block is a single key with parens, prefixed with &. // A named route block is a single key with parens, prefixed with &.
if len(keys) == 1 && strings.HasPrefix(keys[0], "&(") && strings.HasSuffix(keys[0], ")") { if len(keys) == 1 && strings.HasPrefix(keys[0].Text, "&(") && strings.HasSuffix(keys[0].Text, ")") {
return true, strings.TrimSuffix(keys[0][2:], ")") return true, strings.TrimSuffix(keys[0].Text[2:], ")")
} }
return false, "" return false, ""
} }
@ -646,8 +648,8 @@ func (p *parser) isNamedRoute() (bool, string) {
func (p *parser) isSnippet() (bool, string) { func (p *parser) isSnippet() (bool, string) {
keys := p.block.Keys keys := p.block.Keys
// A snippet block is a single key with parens. Nothing else qualifies. // A snippet block is a single key with parens. Nothing else qualifies.
if len(keys) == 1 && strings.HasPrefix(keys[0], "(") && strings.HasSuffix(keys[0], ")") { if len(keys) == 1 && strings.HasPrefix(keys[0].Text, "(") && strings.HasSuffix(keys[0].Text, ")") {
return true, strings.TrimSuffix(keys[0][1:], ")") return true, strings.TrimSuffix(keys[0].Text[1:], ")")
} }
return false, "" return false, ""
} }
@ -691,11 +693,19 @@ func (p *parser) blockTokens(retainCurlies bool) ([]Token, error) {
// grouped by segments. // grouped by segments.
type ServerBlock struct { type ServerBlock struct {
HasBraces bool HasBraces bool
Keys []string Keys []Token
Segments []Segment Segments []Segment
IsNamedRoute bool IsNamedRoute bool
} }
func (sb ServerBlock) GetKeysText() []string {
res := []string{}
for _, k := range sb.Keys {
res = append(res, k.Text)
}
return res
}
// DispenseDirective returns a dispenser that contains // DispenseDirective returns a dispenser that contains
// all the tokens in the server block. // all the tokens in the server block.
func (sb ServerBlock) DispenseDirective(dir string) *Dispenser { func (sb ServerBlock) DispenseDirective(dir string) *Dispenser {

View file

@ -22,7 +22,7 @@ import (
) )
func TestParseVariadic(t *testing.T) { func TestParseVariadic(t *testing.T) {
var args = make([]string, 10) args := make([]string, 10)
for i, tc := range []struct { for i, tc := range []struct {
input string input string
result bool result bool
@ -91,6 +91,10 @@ func TestParseVariadic(t *testing.T) {
input: "{args[0:10]}", input: "{args[0:10]}",
result: true, result: true,
}, },
{
input: "{args[0]}:{args[1]}:{args[2]}",
result: false,
},
} { } {
token := Token{ token := Token{
File: "test", File: "test",
@ -107,7 +111,6 @@ func TestAllTokens(t *testing.T) {
input := []byte("a b c\nd e") input := []byte("a b c\nd e")
expected := []string{"a", "b", "c", "d", "e"} expected := []string{"a", "b", "c", "d", "e"}
tokens, err := allTokens("TestAllTokens", input) tokens, err := allTokens("TestAllTokens", input)
if err != nil { if err != nil {
t.Fatalf("Expected no error, got %v", err) t.Fatalf("Expected no error, got %v", err)
} }
@ -145,10 +148,11 @@ func TestParseOneAndImport(t *testing.T) {
"localhost", "localhost",
}, []int{1}}, }, []int{1}},
{`localhost:1234 {
`localhost:1234
dir1 foo bar`, false, []string{ dir1 foo bar`, false, []string{
"localhost:1234", "localhost:1234",
}, []int{3}, }, []int{3},
}, },
{`localhost { {`localhost {
@ -343,7 +347,7 @@ func TestParseOneAndImport(t *testing.T) {
i, len(test.keys), len(result.Keys)) i, len(test.keys), len(result.Keys))
continue continue
} }
for j, addr := range result.Keys { for j, addr := range result.GetKeysText() {
if addr != test.keys[j] { if addr != test.keys[j] {
t.Errorf("Test %d, key %d: Expected '%s', but was '%s'", t.Errorf("Test %d, key %d: Expected '%s', but was '%s'",
i, j, test.keys[j], addr) i, j, test.keys[j], addr)
@ -375,8 +379,9 @@ func TestRecursiveImport(t *testing.T) {
} }
isExpected := func(got ServerBlock) bool { isExpected := func(got ServerBlock) bool {
if len(got.Keys) != 1 || got.Keys[0] != "localhost" { textKeys := got.GetKeysText()
t.Errorf("got keys unexpected: expect localhost, got %v", got.Keys) if len(textKeys) != 1 || textKeys[0] != "localhost" {
t.Errorf("got keys unexpected: expect localhost, got %v", textKeys)
return false return false
} }
if len(got.Segments) != 2 { if len(got.Segments) != 2 {
@ -403,13 +408,13 @@ func TestRecursiveImport(t *testing.T) {
err = os.WriteFile(recursiveFile1, []byte( err = os.WriteFile(recursiveFile1, []byte(
`localhost `localhost
dir1 dir1
import recursive_import_test2`), 0644) import recursive_import_test2`), 0o644)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
defer os.Remove(recursiveFile1) defer os.Remove(recursiveFile1)
err = os.WriteFile(recursiveFile2, []byte("dir2 1"), 0644) err = os.WriteFile(recursiveFile2, []byte("dir2 1"), 0o644)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -437,7 +442,7 @@ func TestRecursiveImport(t *testing.T) {
err = os.WriteFile(recursiveFile1, []byte( err = os.WriteFile(recursiveFile1, []byte(
`localhost `localhost
dir1 dir1
import `+recursiveFile2), 0644) import `+recursiveFile2), 0o644)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -470,8 +475,9 @@ func TestDirectiveImport(t *testing.T) {
} }
isExpected := func(got ServerBlock) bool { isExpected := func(got ServerBlock) bool {
if len(got.Keys) != 1 || got.Keys[0] != "localhost" { textKeys := got.GetKeysText()
t.Errorf("got keys unexpected: expect localhost, got %v", got.Keys) if len(textKeys) != 1 || textKeys[0] != "localhost" {
t.Errorf("got keys unexpected: expect localhost, got %v", textKeys)
return false return false
} }
if len(got.Segments) != 2 { if len(got.Segments) != 2 {
@ -491,7 +497,7 @@ func TestDirectiveImport(t *testing.T) {
} }
err = os.WriteFile(directiveFile, []byte(`prop1 1 err = os.WriteFile(directiveFile, []byte(`prop1 1
prop2 2`), 0644) prop2 2`), 0o644)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -612,7 +618,7 @@ func TestParseAll(t *testing.T) {
i, len(test.keys[j]), j, len(block.Keys)) i, len(test.keys[j]), j, len(block.Keys))
continue continue
} }
for k, addr := range block.Keys { for k, addr := range block.GetKeysText() {
if addr != test.keys[j][k] { if addr != test.keys[j][k] {
t.Errorf("Test %d, block %d, key %d: Expected '%s', but got '%s'", t.Errorf("Test %d, block %d, key %d: Expected '%s', but got '%s'",
i, j, k, test.keys[j][k], addr) i, j, k, test.keys[j][k], addr)
@ -765,7 +771,7 @@ func TestSnippets(t *testing.T) {
if len(blocks) != 1 { if len(blocks) != 1 {
t.Fatalf("Expect exactly one server block. Got %d.", len(blocks)) t.Fatalf("Expect exactly one server block. Got %d.", len(blocks))
} }
if actual, expected := blocks[0].Keys[0], "http://example.com"; expected != actual { if actual, expected := blocks[0].GetKeysText()[0], "http://example.com"; expected != actual {
t.Errorf("Expected server name to be '%s' but was '%s'", expected, actual) t.Errorf("Expected server name to be '%s' but was '%s'", expected, actual)
} }
if len(blocks[0].Segments) != 2 { if len(blocks[0].Segments) != 2 {
@ -797,7 +803,7 @@ func TestImportedFilesIgnoreNonDirectiveImportTokens(t *testing.T) {
fileName := writeStringToTempFileOrDie(t, ` fileName := writeStringToTempFileOrDie(t, `
http://example.com { http://example.com {
# This isn't an import directive, it's just an arg with value 'import' # This isn't an import directive, it's just an arg with value 'import'
basicauth / import password basic_auth / import password
} }
`) `)
// Parse the root file that imports the other one. // Parse the root file that imports the other one.
@ -808,12 +814,12 @@ func TestImportedFilesIgnoreNonDirectiveImportTokens(t *testing.T) {
} }
auth := blocks[0].Segments[0] auth := blocks[0].Segments[0]
line := auth[0].Text + " " + auth[1].Text + " " + auth[2].Text + " " + auth[3].Text line := auth[0].Text + " " + auth[1].Text + " " + auth[2].Text + " " + auth[3].Text
if line != "basicauth / import password" { if line != "basic_auth / import password" {
// Previously, it would be changed to: // Previously, it would be changed to:
// basicauth / import /path/to/test/dir/password // basic_auth / import /path/to/test/dir/password
// referencing a file that (probably) doesn't exist and changing the // referencing a file that (probably) doesn't exist and changing the
// password! // password!
t.Errorf("Expected basicauth tokens to be 'basicauth / import password' but got %#q", line) t.Errorf("Expected basic_auth tokens to be 'basic_auth / import password' but got %#q", line)
} }
} }
@ -840,7 +846,7 @@ func TestSnippetAcrossMultipleFiles(t *testing.T) {
if len(blocks) != 1 { if len(blocks) != 1 {
t.Fatalf("Expect exactly one server block. Got %d.", len(blocks)) t.Fatalf("Expect exactly one server block. Got %d.", len(blocks))
} }
if actual, expected := blocks[0].Keys[0], "http://example.com"; expected != actual { if actual, expected := blocks[0].GetKeysText()[0], "http://example.com"; expected != actual {
t.Errorf("Expected server name to be '%s' but was '%s'", expected, actual) t.Errorf("Expected server name to be '%s' but was '%s'", expected, actual)
} }
if len(blocks[0].Segments) != 1 { if len(blocks[0].Segments) != 1 {

View file

@ -88,15 +88,15 @@ func (st *ServerType) mapAddressToServerBlocks(originalServerBlocks []serverBloc
// will be served by them; this has the effect of treating each // will be served by them; this has the effect of treating each
// key of a server block as its own, but without having to repeat its // key of a server block as its own, but without having to repeat its
// contents in cases where multiple keys really can be served together // contents in cases where multiple keys really can be served together
addrToKeys := make(map[string][]string) addrToKeys := make(map[string][]caddyfile.Token)
for j, key := range sblock.block.Keys { for j, key := range sblock.block.Keys {
// a key can have multiple listener addresses if there are multiple // a key can have multiple listener addresses if there are multiple
// arguments to the 'bind' directive (although they will all have // arguments to the 'bind' directive (although they will all have
// the same port, since the port is defined by the key or is implicit // the same port, since the port is defined by the key or is implicit
// through automatic HTTPS) // through automatic HTTPS)
addrs, err := st.listenerAddrsForServerBlockKey(sblock, key, options) addrs, err := st.listenerAddrsForServerBlockKey(sblock, key.Text, options)
if err != nil { if err != nil {
return nil, fmt.Errorf("server block %d, key %d (%s): determining listener address: %v", i, j, key, err) return nil, fmt.Errorf("server block %d, key %d (%s): determining listener address: %v", i, j, key.Text, err)
} }
// associate this key with each listener address it is served on // associate this key with each listener address it is served on
@ -122,9 +122,9 @@ func (st *ServerType) mapAddressToServerBlocks(originalServerBlocks []serverBloc
// parse keys so that we only have to do it once // parse keys so that we only have to do it once
parsedKeys := make([]Address, 0, len(keys)) parsedKeys := make([]Address, 0, len(keys))
for _, key := range keys { for _, key := range keys {
addr, err := ParseAddress(key) addr, err := ParseAddress(key.Text)
if err != nil { if err != nil {
return nil, fmt.Errorf("parsing key '%s': %v", key, err) return nil, fmt.Errorf("parsing key '%s': %v", key.Text, err)
} }
parsedKeys = append(parsedKeys, addr.Normalize()) parsedKeys = append(parsedKeys, addr.Normalize())
} }

File diff suppressed because it is too large Load diff

View file

@ -27,22 +27,31 @@ import (
"github.com/caddyserver/caddy/v2/modules/caddyhttp" "github.com/caddyserver/caddy/v2/modules/caddyhttp"
) )
// directiveOrder specifies the order // defaultDirectiveOrder specifies the default order
// to apply directives in HTTP routes. // to apply directives in HTTP routes. This must only
// consist of directives that are included in Caddy's
// standard distribution.
// //
// The root directive goes first in case rewrites or // e.g. The 'root' directive goes near the start in
// redirects depend on existence of files, i.e. the // case rewrites or redirects depend on existence of
// file matcher, which must know the root first. // files, i.e. the file matcher, which must know the
// root first.
// //
// The header directive goes second so that headers // e.g. The 'header' directive goes before 'redir' so
// can be manipulated before doing redirects. // that headers can be manipulated before doing redirects.
var directiveOrder = []string{ //
// e.g. The 'respond' directive is near the end because it
// writes a response and terminates the middleware chain.
var defaultDirectiveOrder = []string{
"tracing", "tracing",
// set variables that may be used by other directives
"map", "map",
"vars", "vars",
"fs",
"root", "root",
"skip_log", "log_append",
"log_skip",
"header", "header",
"copy_response_headers", // only in reverse_proxy's handle_response "copy_response_headers", // only in reverse_proxy's handle_response
@ -57,7 +66,8 @@ var directiveOrder = []string{
"try_files", "try_files",
// middleware handlers; some wrap responses // middleware handlers; some wrap responses
"basicauth", "basicauth", // TODO: deprecated, renamed to basic_auth
"basic_auth",
"forward_auth", "forward_auth",
"request_header", "request_header",
"encode", "encode",
@ -82,6 +92,11 @@ var directiveOrder = []string{
"acme_server", "acme_server",
} }
// directiveOrder specifies the order to apply directives
// in HTTP routes, after being modified by either the
// plugins or by the user via the "order" global option.
var directiveOrder = defaultDirectiveOrder
// directiveIsOrdered returns true if dir is // directiveIsOrdered returns true if dir is
// a known, ordered (sorted) directive. // a known, ordered (sorted) directive.
func directiveIsOrdered(dir string) bool { func directiveIsOrdered(dir string) bool {
@ -128,6 +143,58 @@ func RegisterHandlerDirective(dir string, setupFunc UnmarshalHandlerFunc) {
}) })
} }
// RegisterDirectiveOrder registers the default order for a
// directive from a plugin.
//
// This is useful when a plugin has a well-understood place
// it should run in the middleware pipeline, and it allows
// users to avoid having to define the order themselves.
//
// The directive dir may be placed in the position relative
// to ('before' or 'after') a directive included in Caddy's
// standard distribution. It cannot be relative to another
// plugin's directive.
//
// EXPERIMENTAL: This API may change or be removed.
func RegisterDirectiveOrder(dir string, position Positional, standardDir string) {
// check if directive was already ordered
if directiveIsOrdered(dir) {
panic("directive '" + dir + "' already ordered")
}
if position != Before && position != After {
panic("the 2nd argument must be either 'before' or 'after', got '" + position + "'")
}
// check if directive exists in standard distribution, since
// we can't allow plugins to depend on one another; we can't
// guarantee the order that plugins are loaded in.
foundStandardDir := false
for _, d := range defaultDirectiveOrder {
if d == standardDir {
foundStandardDir = true
}
}
if !foundStandardDir {
panic("the 3rd argument '" + standardDir + "' must be a directive that exists in the standard distribution of Caddy")
}
// insert directive into proper position
newOrder := directiveOrder
for i, d := range newOrder {
if d != standardDir {
continue
}
if position == Before {
newOrder = append(newOrder[:i], append([]string{dir}, newOrder[i:]...)...)
} else if position == After {
newOrder = append(newOrder[:i+1], append([]string{dir}, newOrder[i+1:]...)...)
}
break
}
directiveOrder = newOrder
}
// RegisterGlobalOption registers a unique global option opt with // RegisterGlobalOption registers a unique global option opt with
// an associated unmarshaling (setup) function. When the global // an associated unmarshaling (setup) function. When the global
// option opt is encountered in a Caddyfile, setupFunc will be // option opt is encountered in a Caddyfile, setupFunc will be
@ -270,12 +337,6 @@ func (h Helper) GroupRoutes(vals []ConfigValue) {
} }
} }
// NewBindAddresses returns config values relevant to adding
// listener bind addresses to the config.
func (h Helper) NewBindAddresses(addrs []string) []ConfigValue {
return []ConfigValue{{Class: "bind", Value: addrs}}
}
// WithDispenser returns a new instance based on d. All others Helper // WithDispenser returns a new instance based on d. All others Helper
// fields are copied, so typically maps are shared with this new instance. // fields are copied, so typically maps are shared with this new instance.
func (h Helper) WithDispenser(d *caddyfile.Dispenser) Helper { func (h Helper) WithDispenser(d *caddyfile.Dispenser) Helper {
@ -558,6 +619,16 @@ func (sb serverBlock) isAllHTTP() bool {
return true return true
} }
// Positional are the supported modes for ordering directives.
type Positional string
const (
Before Positional = "before"
After Positional = "after"
First Positional = "first"
Last Positional = "last"
)
type ( type (
// UnmarshalFunc is a function which can unmarshal Caddyfile // UnmarshalFunc is a function which can unmarshal Caddyfile
// tokens into zero or more config values using a Helper type. // tokens into zero or more config values using a Helper type.

View file

@ -31,20 +31,23 @@ func TestHostsFromKeys(t *testing.T) {
[]Address{ []Address{
{Original: ":2015", Port: "2015"}, {Original: ":2015", Port: "2015"},
}, },
[]string{}, []string{}, []string{},
[]string{},
}, },
{ {
[]Address{ []Address{
{Original: ":443", Port: "443"}, {Original: ":443", Port: "443"},
}, },
[]string{}, []string{}, []string{},
[]string{},
}, },
{ {
[]Address{ []Address{
{Original: "foo", Host: "foo"}, {Original: "foo", Host: "foo"},
{Original: ":2015", Port: "2015"}, {Original: ":2015", Port: "2015"},
}, },
[]string{}, []string{"foo"}, []string{},
[]string{"foo"},
}, },
{ {
[]Address{ []Address{

View file

@ -17,8 +17,8 @@ package httpcaddyfile
import ( import (
"encoding/json" "encoding/json"
"fmt" "fmt"
"net"
"reflect" "reflect"
"regexp"
"sort" "sort"
"strconv" "strconv"
"strings" "strings"
@ -65,8 +65,11 @@ func (st ServerType) Setup(
originalServerBlocks := make([]serverBlock, 0, len(inputServerBlocks)) originalServerBlocks := make([]serverBlock, 0, len(inputServerBlocks))
for _, sblock := range inputServerBlocks { for _, sblock := range inputServerBlocks {
for j, k := range sblock.Keys { for j, k := range sblock.Keys {
if j == 0 && strings.HasPrefix(k, "@") { if j == 0 && strings.HasPrefix(k.Text, "@") {
return nil, warnings, fmt.Errorf("cannot define a matcher outside of a site block: '%s'", k) return nil, warnings, fmt.Errorf("%s:%d: cannot define a matcher outside of a site block: '%s'", k.File, k.Line, k.Text)
}
if _, ok := registeredDirectives[k.Text]; ok {
return nil, warnings, fmt.Errorf("%s:%d: parsed '%s' as a site address, but it is a known directive; directives must appear in a site block", k.File, k.Line, k.Text)
} }
} }
originalServerBlocks = append(originalServerBlocks, serverBlock{ originalServerBlocks = append(originalServerBlocks, serverBlock{
@ -82,46 +85,18 @@ func (st ServerType) Setup(
return nil, warnings, err return nil, warnings, err
} }
originalServerBlocks, err = st.extractNamedRoutes(originalServerBlocks, options, &warnings) // this will replace both static and user-defined placeholder shorthands
// with actual identifiers used by Caddy
replacer := NewShorthandReplacer()
originalServerBlocks, err = st.extractNamedRoutes(originalServerBlocks, options, &warnings, replacer)
if err != nil { if err != nil {
return nil, warnings, err return nil, warnings, err
} }
// replace shorthand placeholders (which are convenient
// when writing a Caddyfile) with their actual placeholder
// identifiers or variable names
replacer := strings.NewReplacer(placeholderShorthands()...)
// these are placeholders that allow a user-defined final
// parameters, but we still want to provide a shorthand
// for those, so we use a regexp to replace
regexpReplacements := []struct {
search *regexp.Regexp
replace string
}{
{regexp.MustCompile(`{header\.([\w-]*)}`), "{http.request.header.$1}"},
{regexp.MustCompile(`{cookie\.([\w-]*)}`), "{http.request.cookie.$1}"},
{regexp.MustCompile(`{labels\.([\w-]*)}`), "{http.request.host.labels.$1}"},
{regexp.MustCompile(`{path\.([\w-]*)}`), "{http.request.uri.path.$1}"},
{regexp.MustCompile(`{file\.([\w-]*)}`), "{http.request.uri.path.file.$1}"},
{regexp.MustCompile(`{query\.([\w-]*)}`), "{http.request.uri.query.$1}"},
{regexp.MustCompile(`{re\.([\w-]*)\.([\w-]*)}`), "{http.regexp.$1.$2}"},
{regexp.MustCompile(`{vars\.([\w-]*)}`), "{http.vars.$1}"},
{regexp.MustCompile(`{rp\.([\w-\.]*)}`), "{http.reverse_proxy.$1}"},
{regexp.MustCompile(`{err\.([\w-\.]*)}`), "{http.error.$1}"},
{regexp.MustCompile(`{file_match\.([\w-]*)}`), "{http.matchers.file.$1}"},
}
for _, sb := range originalServerBlocks { for _, sb := range originalServerBlocks {
for _, segment := range sb.block.Segments { for i := range sb.block.Segments {
for i := 0; i < len(segment); i++ { replacer.ApplyToSegment(&sb.block.Segments[i])
// simple string replacements
segment[i].Text = replacer.Replace(segment[i].Text)
// complex regexp replacements
for _, r := range regexpReplacements {
segment[i].Text = r.search.ReplaceAllString(segment[i].Text, r.replace)
}
}
} }
if len(sb.block.Keys) == 0 { if len(sb.block.Keys) == 0 {
@ -299,6 +274,12 @@ func (st ServerType) Setup(
if !reflect.DeepEqual(pkiApp, &caddypki.PKI{CAs: make(map[string]*caddypki.CA)}) { if !reflect.DeepEqual(pkiApp, &caddypki.PKI{CAs: make(map[string]*caddypki.CA)}) {
cfg.AppsRaw["pki"] = caddyconfig.JSON(pkiApp, &warnings) cfg.AppsRaw["pki"] = caddyconfig.JSON(pkiApp, &warnings)
} }
if filesystems, ok := options["filesystem"].(caddy.Module); ok {
cfg.AppsRaw["caddy.filesystems"] = caddyconfig.JSON(
filesystems,
&warnings)
}
if storageCvtr, ok := options["storage"].(caddy.StorageConverter); ok { if storageCvtr, ok := options["storage"].(caddy.StorageConverter); ok {
cfg.StorageRaw = caddyconfig.JSONModuleObject(storageCvtr, cfg.StorageRaw = caddyconfig.JSONModuleObject(storageCvtr,
"module", "module",
@ -308,7 +289,6 @@ func (st ServerType) Setup(
if adminConfig, ok := options["admin"].(*caddy.AdminConfig); ok && adminConfig != nil { if adminConfig, ok := options["admin"].(*caddy.AdminConfig); ok && adminConfig != nil {
cfg.Admin = adminConfig cfg.Admin = adminConfig
} }
if pc, ok := options["persist_config"].(string); ok && pc == "off" { if pc, ok := options["persist_config"].(string); ok && pc == "off" {
if cfg.Admin == nil { if cfg.Admin == nil {
cfg.Admin = new(caddy.AdminConfig) cfg.Admin = new(caddy.AdminConfig)
@ -452,6 +432,7 @@ func (ServerType) extractNamedRoutes(
serverBlocks []serverBlock, serverBlocks []serverBlock,
options map[string]any, options map[string]any,
warnings *[]caddyconfig.Warning, warnings *[]caddyconfig.Warning,
replacer ShorthandReplacer,
) ([]serverBlock, error) { ) ([]serverBlock, error) {
namedRoutes := map[string]*caddyhttp.Route{} namedRoutes := map[string]*caddyhttp.Route{}
@ -477,11 +458,14 @@ func (ServerType) extractNamedRoutes(
continue continue
} }
// zip up all the segments since ParseSegmentAsSubroute
// was designed to take a directive+
wholeSegment := caddyfile.Segment{} wholeSegment := caddyfile.Segment{}
for _, segment := range sb.block.Segments { for i := range sb.block.Segments {
wholeSegment = append(wholeSegment, segment...) // replace user-defined placeholder shorthands in extracted named routes
replacer.ApplyToSegment(&sb.block.Segments[i])
// zip up all the segments since ParseSegmentAsSubroute
// was designed to take a directive+
wholeSegment = append(wholeSegment, sb.block.Segments[i]...)
} }
h := Helper{ h := Helper{
@ -509,7 +493,7 @@ func (ServerType) extractNamedRoutes(
route.HandlersRaw = []json.RawMessage{caddyconfig.JSONModuleObject(handler, "handler", subroute.CaddyModule().ID.Name(), h.warnings)} route.HandlersRaw = []json.RawMessage{caddyconfig.JSONModuleObject(handler, "handler", subroute.CaddyModule().ID.Name(), h.warnings)}
} }
namedRoutes[sb.block.Keys[0]] = &route namedRoutes[sb.block.GetKeysText()[0]] = &route
} }
options["named_routes"] = namedRoutes options["named_routes"] = namedRoutes
@ -547,12 +531,12 @@ func (st *ServerType) serversFromPairings(
// address), otherwise their routes will improperly be added // address), otherwise their routes will improperly be added
// to the same server (see issue #4635) // to the same server (see issue #4635)
for j, sblock1 := range p.serverBlocks { for j, sblock1 := range p.serverBlocks {
for _, key := range sblock1.block.Keys { for _, key := range sblock1.block.GetKeysText() {
for k, sblock2 := range p.serverBlocks { for k, sblock2 := range p.serverBlocks {
if k == j { if k == j {
continue continue
} }
if sliceContains(sblock2.block.Keys, key) { if sliceContains(sblock2.block.GetKeysText(), key) {
return nil, fmt.Errorf("ambiguous site definition: %s", key) return nil, fmt.Errorf("ambiguous site definition: %s", key)
} }
} }
@ -710,6 +694,7 @@ func (st *ServerType) serversFromPairings(
} }
if len(hosts) > 0 { if len(hosts) > 0 {
slices.Sort(hosts) // for deterministic JSON output
cp.MatchersRaw = caddy.ModuleMap{ cp.MatchersRaw = caddy.ModuleMap{
"sni": caddyconfig.JSON(hosts, warnings), // make sure to match all hosts, not just auto-HTTPS-qualified ones "sni": caddyconfig.JSON(hosts, warnings), // make sure to match all hosts, not just auto-HTTPS-qualified ones
} }
@ -741,10 +726,20 @@ func (st *ServerType) serversFromPairings(
} }
} }
// If TLS is specified as directive, it will also result in 1 or more connection policy being created
// Thus, catch-all address with non-standard port, e.g. :8443, can have TLS enabled without
// specifying prefix "https://"
// Second part of the condition is to allow creating TLS conn policy even though `auto_https` has been disabled
// ensuring compatibility with behavior described in below link
// https://caddy.community/t/making-sense-of-auto-https-and-why-disabling-it-still-serves-https-instead-of-http/9761
createdTLSConnPolicies, ok := sblock.pile["tls.connection_policy"]
hasTLSEnabled := (ok && len(createdTLSConnPolicies) > 0) ||
(addr.Host != "" && srv.AutoHTTPS != nil && !sliceContains(srv.AutoHTTPS.Skip, addr.Host))
// we'll need to remember if the address qualifies for auto-HTTPS, so we // we'll need to remember if the address qualifies for auto-HTTPS, so we
// can add a TLS conn policy if necessary // can add a TLS conn policy if necessary
if addr.Scheme == "https" || if addr.Scheme == "https" ||
(addr.Scheme != "http" && addr.Host != "" && addr.Port != httpPort) { (addr.Scheme != "http" && addr.Port != httpPort && hasTLSEnabled) {
addressQualifiesForTLS = true addressQualifiesForTLS = true
} }
// predict whether auto-HTTPS will add the conn policy for us; if so, we // predict whether auto-HTTPS will add the conn policy for us; if so, we
@ -782,10 +777,19 @@ func (st *ServerType) serversFromPairings(
if srv.Errors == nil { if srv.Errors == nil {
srv.Errors = new(caddyhttp.HTTPErrorConfig) srv.Errors = new(caddyhttp.HTTPErrorConfig)
} }
sort.SliceStable(errorSubrouteVals, func(i, j int) bool {
sri, srj := errorSubrouteVals[i].Value.(*caddyhttp.Subroute), errorSubrouteVals[j].Value.(*caddyhttp.Subroute)
if len(sri.Routes[0].MatcherSetsRaw) == 0 && len(srj.Routes[0].MatcherSetsRaw) != 0 {
return false
}
return true
})
errorsSubroute := &caddyhttp.Subroute{}
for _, val := range errorSubrouteVals { for _, val := range errorSubrouteVals {
sr := val.Value.(*caddyhttp.Subroute) sr := val.Value.(*caddyhttp.Subroute)
srv.Errors.Routes = appendSubrouteToRouteList(srv.Errors.Routes, sr, matcherSetsEnc, p, warnings) errorsSubroute.Routes = append(errorsSubroute.Routes, sr.Routes...)
} }
srv.Errors.Routes = appendSubrouteToRouteList(srv.Errors.Routes, errorsSubroute, matcherSetsEnc, p, warnings)
} }
// add log associations // add log associations
@ -811,7 +815,12 @@ func (st *ServerType) serversFromPairings(
if srv.Logs.LoggerNames == nil { if srv.Logs.LoggerNames == nil {
srv.Logs.LoggerNames = make(map[string]string) srv.Logs.LoggerNames = make(map[string]string)
} }
srv.Logs.LoggerNames[h] = ncl.name // strip the port from the host, if any
host, _, err := net.SplitHostPort(h)
if err != nil {
host = h
}
srv.Logs.LoggerNames[host] = ncl.name
} }
} }
} }
@ -828,6 +837,11 @@ func (st *ServerType) serversFromPairings(
} }
} }
// sort for deterministic JSON output
if srv.Logs != nil {
slices.Sort(srv.Logs.SkipHosts)
}
// a server cannot (natively) serve both HTTP and HTTPS at the // a server cannot (natively) serve both HTTP and HTTPS at the
// same time, so make sure the configuration isn't in conflict // same time, so make sure the configuration isn't in conflict
err := detectConflictingSchemes(srv, p.serverBlocks, options) err := detectConflictingSchemes(srv, p.serverBlocks, options)
@ -1370,68 +1384,73 @@ func (st *ServerType) compileEncodedMatcherSets(sblock serverBlock) ([]caddy.Mod
} }
func parseMatcherDefinitions(d *caddyfile.Dispenser, matchers map[string]caddy.ModuleMap) error { func parseMatcherDefinitions(d *caddyfile.Dispenser, matchers map[string]caddy.ModuleMap) error {
for d.Next() { d.Next() // advance to the first token
// this is the "name" for "named matchers"
definitionName := d.Val()
if _, ok := matchers[definitionName]; ok { // this is the "name" for "named matchers"
return fmt.Errorf("matcher is defined more than once: %s", definitionName) definitionName := d.Val()
if _, ok := matchers[definitionName]; ok {
return fmt.Errorf("matcher is defined more than once: %s", definitionName)
}
matchers[definitionName] = make(caddy.ModuleMap)
// given a matcher name and the tokens following it, parse
// the tokens as a matcher module and record it
makeMatcher := func(matcherName string, tokens []caddyfile.Token) error {
mod, err := caddy.GetModule("http.matchers." + matcherName)
if err != nil {
return fmt.Errorf("getting matcher module '%s': %v", matcherName, err)
} }
matchers[definitionName] = make(caddy.ModuleMap) unm, ok := mod.New().(caddyfile.Unmarshaler)
if !ok {
return fmt.Errorf("matcher module '%s' is not a Caddyfile unmarshaler", matcherName)
}
err = unm.UnmarshalCaddyfile(caddyfile.NewDispenser(tokens))
if err != nil {
return err
}
rm, ok := unm.(caddyhttp.RequestMatcher)
if !ok {
return fmt.Errorf("matcher module '%s' is not a request matcher", matcherName)
}
matchers[definitionName][matcherName] = caddyconfig.JSON(rm, nil)
return nil
}
// given a matcher name and the tokens following it, parse // if the next token is quoted, we can assume it's not a matcher name
// the tokens as a matcher module and record it // and that it's probably an 'expression' matcher
makeMatcher := func(matcherName string, tokens []caddyfile.Token) error { if d.NextArg() {
mod, err := caddy.GetModule("http.matchers." + matcherName) if d.Token().Quoted() {
if err != nil { // since it was missing the matcher name, we insert a token
return fmt.Errorf("getting matcher module '%s': %v", matcherName, err) // in front of the expression token itself
} err := makeMatcher("expression", []caddyfile.Token{
unm, ok := mod.New().(caddyfile.Unmarshaler) {Text: "expression", File: d.File(), Line: d.Line()},
if !ok { d.Token(),
return fmt.Errorf("matcher module '%s' is not a Caddyfile unmarshaler", matcherName) })
}
err = unm.UnmarshalCaddyfile(caddyfile.NewDispenser(tokens))
if err != nil { if err != nil {
return err return err
} }
rm, ok := unm.(caddyhttp.RequestMatcher)
if !ok {
return fmt.Errorf("matcher module '%s' is not a request matcher", matcherName)
}
matchers[definitionName][matcherName] = caddyconfig.JSON(rm, nil)
return nil return nil
} }
// if the next token is quoted, we can assume it's not a matcher name // if it wasn't quoted, then we need to rewind after calling
// and that it's probably an 'expression' matcher // d.NextArg() so the below properly grabs the matcher name
if d.NextArg() { d.Prev()
if d.Token().Quoted() { }
err := makeMatcher("expression", []caddyfile.Token{d.Token()})
if err != nil {
return err
}
continue
}
// if it wasn't quoted, then we need to rewind after calling // in case there are multiple instances of the same matcher, concatenate
// d.NextArg() so the below properly grabs the matcher name // their tokens (we expect that UnmarshalCaddyfile should be able to
d.Prev() // handle more than one segment); otherwise, we'd overwrite other
} // instances of the matcher in this set
tokensByMatcherName := make(map[string][]caddyfile.Token)
// in case there are multiple instances of the same matcher, concatenate for nesting := d.Nesting(); d.NextArg() || d.NextBlock(nesting); {
// their tokens (we expect that UnmarshalCaddyfile should be able to matcherName := d.Val()
// handle more than one segment); otherwise, we'd overwrite other tokensByMatcherName[matcherName] = append(tokensByMatcherName[matcherName], d.NextSegment()...)
// instances of the matcher in this set }
tokensByMatcherName := make(map[string][]caddyfile.Token) for matcherName, tokens := range tokensByMatcherName {
for nesting := d.Nesting(); d.NextArg() || d.NextBlock(nesting); { err := makeMatcher(matcherName, tokens)
matcherName := d.Val() if err != nil {
tokensByMatcherName[matcherName] = append(tokensByMatcherName[matcherName], d.NextSegment()...) return err
}
for matcherName, tokens := range tokensByMatcherName {
err := makeMatcher(matcherName, tokens)
if err != nil {
return err
}
} }
} }
return nil return nil
@ -1449,37 +1468,6 @@ func encodeMatcherSet(matchers map[string]caddyhttp.RequestMatcher) (caddy.Modul
return msEncoded, nil return msEncoded, nil
} }
// placeholderShorthands returns a slice of old-new string pairs,
// where the left of the pair is a placeholder shorthand that may
// be used in the Caddyfile, and the right is the replacement.
func placeholderShorthands() []string {
return []string{
"{dir}", "{http.request.uri.path.dir}",
"{file}", "{http.request.uri.path.file}",
"{host}", "{http.request.host}",
"{hostport}", "{http.request.hostport}",
"{port}", "{http.request.port}",
"{method}", "{http.request.method}",
"{path}", "{http.request.uri.path}",
"{query}", "{http.request.uri.query}",
"{remote}", "{http.request.remote}",
"{remote_host}", "{http.request.remote.host}",
"{remote_port}", "{http.request.remote.port}",
"{scheme}", "{http.request.scheme}",
"{uri}", "{http.request.uri}",
"{tls_cipher}", "{http.request.tls.cipher_suite}",
"{tls_version}", "{http.request.tls.version}",
"{tls_client_fingerprint}", "{http.request.tls.client.fingerprint}",
"{tls_client_issuer}", "{http.request.tls.client.issuer}",
"{tls_client_serial}", "{http.request.tls.client.serial}",
"{tls_client_subject}", "{http.request.tls.client.subject}",
"{tls_client_certificate_pem}", "{http.request.tls.client.certificate_pem}",
"{tls_client_certificate_der_base64}", "{http.request.tls.client.certificate_der_base64}",
"{upstream_hostport}", "{http.reverse_proxy.upstream.hostport}",
"{client_ip}", "{http.vars.client_ip}",
}
}
// WasReplacedPlaceholderShorthand checks if a token string was // WasReplacedPlaceholderShorthand checks if a token string was
// likely a replaced shorthand of the known Caddyfile placeholder // likely a replaced shorthand of the known Caddyfile placeholder
// replacement outputs. Useful to prevent some user-defined map // replacement outputs. Useful to prevent some user-defined map

View file

@ -62,105 +62,103 @@ func init() {
func parseOptTrue(d *caddyfile.Dispenser, _ any) (any, error) { return true, nil } func parseOptTrue(d *caddyfile.Dispenser, _ any) (any, error) { return true, nil }
func parseOptHTTPPort(d *caddyfile.Dispenser, _ any) (any, error) { func parseOptHTTPPort(d *caddyfile.Dispenser, _ any) (any, error) {
d.Next() // consume option name
var httpPort int var httpPort int
for d.Next() { var httpPortStr string
var httpPortStr string if !d.AllArgs(&httpPortStr) {
if !d.AllArgs(&httpPortStr) { return 0, d.ArgErr()
return 0, d.ArgErr() }
} var err error
var err error httpPort, err = strconv.Atoi(httpPortStr)
httpPort, err = strconv.Atoi(httpPortStr) if err != nil {
if err != nil { return 0, d.Errf("converting port '%s' to integer value: %v", httpPortStr, err)
return 0, d.Errf("converting port '%s' to integer value: %v", httpPortStr, err)
}
} }
return httpPort, nil return httpPort, nil
} }
func parseOptHTTPSPort(d *caddyfile.Dispenser, _ any) (any, error) { func parseOptHTTPSPort(d *caddyfile.Dispenser, _ any) (any, error) {
d.Next() // consume option name
var httpsPort int var httpsPort int
for d.Next() { var httpsPortStr string
var httpsPortStr string if !d.AllArgs(&httpsPortStr) {
if !d.AllArgs(&httpsPortStr) { return 0, d.ArgErr()
return 0, d.ArgErr() }
} var err error
var err error httpsPort, err = strconv.Atoi(httpsPortStr)
httpsPort, err = strconv.Atoi(httpsPortStr) if err != nil {
if err != nil { return 0, d.Errf("converting port '%s' to integer value: %v", httpsPortStr, err)
return 0, d.Errf("converting port '%s' to integer value: %v", httpsPortStr, err)
}
} }
return httpsPort, nil return httpsPort, nil
} }
func parseOptOrder(d *caddyfile.Dispenser, _ any) (any, error) { func parseOptOrder(d *caddyfile.Dispenser, _ any) (any, error) {
d.Next() // consume option name
// get directive name
if !d.Next() {
return nil, d.ArgErr()
}
dirName := d.Val()
if _, ok := registeredDirectives[dirName]; !ok {
return nil, d.Errf("%s is not a registered directive", dirName)
}
// get positional token
if !d.Next() {
return nil, d.ArgErr()
}
pos := Positional(d.Val())
newOrder := directiveOrder newOrder := directiveOrder
for d.Next() { // if directive exists, first remove it
// get directive name for i, d := range newOrder {
if !d.Next() { if d == dirName {
return nil, d.ArgErr() newOrder = append(newOrder[:i], newOrder[i+1:]...)
} break
dirName := d.Val()
if _, ok := registeredDirectives[dirName]; !ok {
return nil, d.Errf("%s is not a registered directive", dirName)
} }
}
// get positional token // act on the positional
if !d.Next() { switch pos {
return nil, d.ArgErr() case First:
} newOrder = append([]string{dirName}, newOrder...)
pos := d.Val()
// if directive exists, first remove it
for i, d := range newOrder {
if d == dirName {
newOrder = append(newOrder[:i], newOrder[i+1:]...)
break
}
}
// act on the positional
switch pos {
case "first":
newOrder = append([]string{dirName}, newOrder...)
if d.NextArg() {
return nil, d.ArgErr()
}
directiveOrder = newOrder
return newOrder, nil
case "last":
newOrder = append(newOrder, dirName)
if d.NextArg() {
return nil, d.ArgErr()
}
directiveOrder = newOrder
return newOrder, nil
case "before":
case "after":
default:
return nil, d.Errf("unknown positional '%s'", pos)
}
// get name of other directive
if !d.NextArg() {
return nil, d.ArgErr()
}
otherDir := d.Val()
if d.NextArg() { if d.NextArg() {
return nil, d.ArgErr() return nil, d.ArgErr()
} }
directiveOrder = newOrder
return newOrder, nil
case Last:
newOrder = append(newOrder, dirName)
if d.NextArg() {
return nil, d.ArgErr()
}
directiveOrder = newOrder
return newOrder, nil
case Before:
case After:
default:
return nil, d.Errf("unknown positional '%s'", pos)
}
// insert directive into proper position // get name of other directive
for i, d := range newOrder { if !d.NextArg() {
if d == otherDir { return nil, d.ArgErr()
if pos == "before" { }
newOrder = append(newOrder[:i], append([]string{dirName}, newOrder[i:]...)...) otherDir := d.Val()
} else if pos == "after" { if d.NextArg() {
newOrder = append(newOrder[:i+1], append([]string{dirName}, newOrder[i+1:]...)...) return nil, d.ArgErr()
} }
break
// insert directive into proper position
for i, d := range newOrder {
if d == otherDir {
if pos == Before {
newOrder = append(newOrder[:i], append([]string{dirName}, newOrder[i:]...)...)
} else if pos == After {
newOrder = append(newOrder[:i+1], append([]string{dirName}, newOrder[i+1:]...)...)
} }
break
} }
} }
@ -223,57 +221,58 @@ func parseOptACMEDNS(d *caddyfile.Dispenser, _ any) (any, error) {
func parseOptACMEEAB(d *caddyfile.Dispenser, _ any) (any, error) { func parseOptACMEEAB(d *caddyfile.Dispenser, _ any) (any, error) {
eab := new(acme.EAB) eab := new(acme.EAB)
for d.Next() { d.Next() // consume option name
if d.NextArg() { if d.NextArg() {
return nil, d.ArgErr() return nil, d.ArgErr()
} }
for nesting := d.Nesting(); d.NextBlock(nesting); { for d.NextBlock(0) {
switch d.Val() { switch d.Val() {
case "key_id": case "key_id":
if !d.NextArg() { if !d.NextArg() {
return nil, d.ArgErr() return nil, d.ArgErr()
}
eab.KeyID = d.Val()
case "mac_key":
if !d.NextArg() {
return nil, d.ArgErr()
}
eab.MACKey = d.Val()
default:
return nil, d.Errf("unrecognized parameter '%s'", d.Val())
} }
eab.KeyID = d.Val()
case "mac_key":
if !d.NextArg() {
return nil, d.ArgErr()
}
eab.MACKey = d.Val()
default:
return nil, d.Errf("unrecognized parameter '%s'", d.Val())
} }
} }
return eab, nil return eab, nil
} }
func parseOptCertIssuer(d *caddyfile.Dispenser, existing any) (any, error) { func parseOptCertIssuer(d *caddyfile.Dispenser, existing any) (any, error) {
d.Next() // consume option name
var issuers []certmagic.Issuer var issuers []certmagic.Issuer
if existing != nil { if existing != nil {
issuers = existing.([]certmagic.Issuer) issuers = existing.([]certmagic.Issuer)
} }
for d.Next() { // consume option name
if !d.Next() { // get issuer module name // get issuer module name
return nil, d.ArgErr() if !d.Next() {
} return nil, d.ArgErr()
modID := "tls.issuance." + d.Val()
unm, err := caddyfile.UnmarshalModule(d, modID)
if err != nil {
return nil, err
}
iss, ok := unm.(certmagic.Issuer)
if !ok {
return nil, d.Errf("module %s (%T) is not a certmagic.Issuer", modID, unm)
}
issuers = append(issuers, iss)
} }
modID := "tls.issuance." + d.Val()
unm, err := caddyfile.UnmarshalModule(d, modID)
if err != nil {
return nil, err
}
iss, ok := unm.(certmagic.Issuer)
if !ok {
return nil, d.Errf("module %s (%T) is not a certmagic.Issuer", modID, unm)
}
issuers = append(issuers, iss)
return issuers, nil return issuers, nil
} }
func parseOptSingleString(d *caddyfile.Dispenser, _ any) (any, error) { func parseOptSingleString(d *caddyfile.Dispenser, _ any) (any, error) {
d.Next() // consume parameter name d.Next() // consume option name
if !d.Next() { if !d.Next() {
return "", d.ArgErr() return "", d.ArgErr()
} }
@ -285,7 +284,7 @@ func parseOptSingleString(d *caddyfile.Dispenser, _ any) (any, error) {
} }
func parseOptStringList(d *caddyfile.Dispenser, _ any) (any, error) { func parseOptStringList(d *caddyfile.Dispenser, _ any) (any, error) {
d.Next() // consume parameter name d.Next() // consume option name
val := d.RemainingArgs() val := d.RemainingArgs()
if len(val) == 0 { if len(val) == 0 {
return "", d.ArgErr() return "", d.ArgErr()
@ -294,33 +293,33 @@ func parseOptStringList(d *caddyfile.Dispenser, _ any) (any, error) {
} }
func parseOptAdmin(d *caddyfile.Dispenser, _ any) (any, error) { func parseOptAdmin(d *caddyfile.Dispenser, _ any) (any, error) {
d.Next() // consume option name
adminCfg := new(caddy.AdminConfig) adminCfg := new(caddy.AdminConfig)
for d.Next() { if d.NextArg() {
if d.NextArg() { listenAddress := d.Val()
listenAddress := d.Val() if listenAddress == "off" {
if listenAddress == "off" { adminCfg.Disabled = true
adminCfg.Disabled = true if d.Next() { // Do not accept any remaining options including block
if d.Next() { // Do not accept any remaining options including block return nil, d.Err("No more option is allowed after turning off admin config")
return nil, d.Err("No more option is allowed after turning off admin config") }
} } else {
} else { adminCfg.Listen = listenAddress
adminCfg.Listen = listenAddress if d.NextArg() { // At most 1 arg is allowed
if d.NextArg() { // At most 1 arg is allowed return nil, d.ArgErr()
return nil, d.ArgErr()
}
} }
} }
for nesting := d.Nesting(); d.NextBlock(nesting); { }
switch d.Val() { for d.NextBlock(0) {
case "enforce_origin": switch d.Val() {
adminCfg.EnforceOrigin = true case "enforce_origin":
adminCfg.EnforceOrigin = true
case "origins": case "origins":
adminCfg.Origins = d.RemainingArgs() adminCfg.Origins = d.RemainingArgs()
default: default:
return nil, d.Errf("unrecognized parameter '%s'", d.Val()) return nil, d.Errf("unrecognized parameter '%s'", d.Val())
}
} }
} }
if adminCfg.Listen == "" && !adminCfg.Disabled { if adminCfg.Listen == "" && !adminCfg.Disabled {
@ -330,57 +329,59 @@ func parseOptAdmin(d *caddyfile.Dispenser, _ any) (any, error) {
} }
func parseOptOnDemand(d *caddyfile.Dispenser, _ any) (any, error) { func parseOptOnDemand(d *caddyfile.Dispenser, _ any) (any, error) {
d.Next() // consume option name
if d.NextArg() {
return nil, d.ArgErr()
}
var ond *caddytls.OnDemandConfig var ond *caddytls.OnDemandConfig
for d.Next() {
if d.NextArg() {
return nil, d.ArgErr()
}
for nesting := d.Nesting(); d.NextBlock(nesting); {
switch d.Val() {
case "ask":
if !d.NextArg() {
return nil, d.ArgErr()
}
if ond == nil {
ond = new(caddytls.OnDemandConfig)
}
ond.Ask = d.Val()
case "interval": for nesting := d.Nesting(); d.NextBlock(nesting); {
if !d.NextArg() { switch d.Val() {
return nil, d.ArgErr() case "ask":
} if !d.NextArg() {
dur, err := caddy.ParseDuration(d.Val()) return nil, d.ArgErr()
if err != nil {
return nil, err
}
if ond == nil {
ond = new(caddytls.OnDemandConfig)
}
if ond.RateLimit == nil {
ond.RateLimit = new(caddytls.RateLimit)
}
ond.RateLimit.Interval = caddy.Duration(dur)
case "burst":
if !d.NextArg() {
return nil, d.ArgErr()
}
burst, err := strconv.Atoi(d.Val())
if err != nil {
return nil, err
}
if ond == nil {
ond = new(caddytls.OnDemandConfig)
}
if ond.RateLimit == nil {
ond.RateLimit = new(caddytls.RateLimit)
}
ond.RateLimit.Burst = burst
default:
return nil, d.Errf("unrecognized parameter '%s'", d.Val())
} }
if ond == nil {
ond = new(caddytls.OnDemandConfig)
}
perm := caddytls.PermissionByHTTP{Endpoint: d.Val()}
ond.PermissionRaw = caddyconfig.JSONModuleObject(perm, "module", "http", nil)
case "interval":
if !d.NextArg() {
return nil, d.ArgErr()
}
dur, err := caddy.ParseDuration(d.Val())
if err != nil {
return nil, err
}
if ond == nil {
ond = new(caddytls.OnDemandConfig)
}
if ond.RateLimit == nil {
ond.RateLimit = new(caddytls.RateLimit)
}
ond.RateLimit.Interval = caddy.Duration(dur)
case "burst":
if !d.NextArg() {
return nil, d.ArgErr()
}
burst, err := strconv.Atoi(d.Val())
if err != nil {
return nil, err
}
if ond == nil {
ond = new(caddytls.OnDemandConfig)
}
if ond.RateLimit == nil {
ond.RateLimit = new(caddytls.RateLimit)
}
ond.RateLimit.Burst = burst
default:
return nil, d.Errf("unrecognized parameter '%s'", d.Val())
} }
} }
if ond == nil { if ond == nil {
@ -390,7 +391,7 @@ func parseOptOnDemand(d *caddyfile.Dispenser, _ any) (any, error) {
} }
func parseOptPersistConfig(d *caddyfile.Dispenser, _ any) (any, error) { func parseOptPersistConfig(d *caddyfile.Dispenser, _ any) (any, error) {
d.Next() // consume parameter name d.Next() // consume option name
if !d.Next() { if !d.Next() {
return "", d.ArgErr() return "", d.ArgErr()
} }
@ -405,7 +406,7 @@ func parseOptPersistConfig(d *caddyfile.Dispenser, _ any) (any, error) {
} }
func parseOptAutoHTTPS(d *caddyfile.Dispenser, _ any) (any, error) { func parseOptAutoHTTPS(d *caddyfile.Dispenser, _ any) (any, error) {
d.Next() // consume parameter name d.Next() // consume option name
if !d.Next() { if !d.Next() {
return "", d.ArgErr() return "", d.ArgErr()
} }

View file

@ -48,124 +48,124 @@ func init() {
// //
// When the CA ID is unspecified, 'local' is assumed. // When the CA ID is unspecified, 'local' is assumed.
func parsePKIApp(d *caddyfile.Dispenser, existingVal any) (any, error) { func parsePKIApp(d *caddyfile.Dispenser, existingVal any) (any, error) {
pki := &caddypki.PKI{CAs: make(map[string]*caddypki.CA)} d.Next() // consume app name
for d.Next() { pki := &caddypki.PKI{
for nesting := d.Nesting(); d.NextBlock(nesting); { CAs: make(map[string]*caddypki.CA),
switch d.Val() { }
case "ca": for d.NextBlock(0) {
pkiCa := new(caddypki.CA) switch d.Val() {
case "ca":
pkiCa := new(caddypki.CA)
if d.NextArg() {
pkiCa.ID = d.Val()
if d.NextArg() { if d.NextArg() {
pkiCa.ID = d.Val() return nil, d.ArgErr()
if d.NextArg() { }
}
if pkiCa.ID == "" {
pkiCa.ID = caddypki.DefaultCAID
}
for nesting := d.Nesting(); d.NextBlock(nesting); {
switch d.Val() {
case "name":
if !d.NextArg() {
return nil, d.ArgErr() return nil, d.ArgErr()
} }
} pkiCa.Name = d.Val()
if pkiCa.ID == "" {
pkiCa.ID = caddypki.DefaultCAID
}
for nesting := d.Nesting(); d.NextBlock(nesting); { case "root_cn":
switch d.Val() { if !d.NextArg() {
case "name": return nil, d.ArgErr()
if !d.NextArg() {
return nil, d.ArgErr()
}
pkiCa.Name = d.Val()
case "root_cn":
if !d.NextArg() {
return nil, d.ArgErr()
}
pkiCa.RootCommonName = d.Val()
case "intermediate_cn":
if !d.NextArg() {
return nil, d.ArgErr()
}
pkiCa.IntermediateCommonName = d.Val()
case "intermediate_lifetime":
if !d.NextArg() {
return nil, d.ArgErr()
}
dur, err := caddy.ParseDuration(d.Val())
if err != nil {
return nil, err
}
pkiCa.IntermediateLifetime = caddy.Duration(dur)
case "root":
if pkiCa.Root == nil {
pkiCa.Root = new(caddypki.KeyPair)
}
for nesting := d.Nesting(); d.NextBlock(nesting); {
switch d.Val() {
case "cert":
if !d.NextArg() {
return nil, d.ArgErr()
}
pkiCa.Root.Certificate = d.Val()
case "key":
if !d.NextArg() {
return nil, d.ArgErr()
}
pkiCa.Root.PrivateKey = d.Val()
case "format":
if !d.NextArg() {
return nil, d.ArgErr()
}
pkiCa.Root.Format = d.Val()
default:
return nil, d.Errf("unrecognized pki ca root option '%s'", d.Val())
}
}
case "intermediate":
if pkiCa.Intermediate == nil {
pkiCa.Intermediate = new(caddypki.KeyPair)
}
for nesting := d.Nesting(); d.NextBlock(nesting); {
switch d.Val() {
case "cert":
if !d.NextArg() {
return nil, d.ArgErr()
}
pkiCa.Intermediate.Certificate = d.Val()
case "key":
if !d.NextArg() {
return nil, d.ArgErr()
}
pkiCa.Intermediate.PrivateKey = d.Val()
case "format":
if !d.NextArg() {
return nil, d.ArgErr()
}
pkiCa.Intermediate.Format = d.Val()
default:
return nil, d.Errf("unrecognized pki ca intermediate option '%s'", d.Val())
}
}
default:
return nil, d.Errf("unrecognized pki ca option '%s'", d.Val())
} }
pkiCa.RootCommonName = d.Val()
case "intermediate_cn":
if !d.NextArg() {
return nil, d.ArgErr()
}
pkiCa.IntermediateCommonName = d.Val()
case "intermediate_lifetime":
if !d.NextArg() {
return nil, d.ArgErr()
}
dur, err := caddy.ParseDuration(d.Val())
if err != nil {
return nil, err
}
pkiCa.IntermediateLifetime = caddy.Duration(dur)
case "root":
if pkiCa.Root == nil {
pkiCa.Root = new(caddypki.KeyPair)
}
for nesting := d.Nesting(); d.NextBlock(nesting); {
switch d.Val() {
case "cert":
if !d.NextArg() {
return nil, d.ArgErr()
}
pkiCa.Root.Certificate = d.Val()
case "key":
if !d.NextArg() {
return nil, d.ArgErr()
}
pkiCa.Root.PrivateKey = d.Val()
case "format":
if !d.NextArg() {
return nil, d.ArgErr()
}
pkiCa.Root.Format = d.Val()
default:
return nil, d.Errf("unrecognized pki ca root option '%s'", d.Val())
}
}
case "intermediate":
if pkiCa.Intermediate == nil {
pkiCa.Intermediate = new(caddypki.KeyPair)
}
for nesting := d.Nesting(); d.NextBlock(nesting); {
switch d.Val() {
case "cert":
if !d.NextArg() {
return nil, d.ArgErr()
}
pkiCa.Intermediate.Certificate = d.Val()
case "key":
if !d.NextArg() {
return nil, d.ArgErr()
}
pkiCa.Intermediate.PrivateKey = d.Val()
case "format":
if !d.NextArg() {
return nil, d.ArgErr()
}
pkiCa.Intermediate.Format = d.Val()
default:
return nil, d.Errf("unrecognized pki ca intermediate option '%s'", d.Val())
}
}
default:
return nil, d.Errf("unrecognized pki ca option '%s'", d.Val())
} }
pki.CAs[pkiCa.ID] = pkiCa
default:
return nil, d.Errf("unrecognized pki option '%s'", d.Val())
} }
pki.CAs[pkiCa.ID] = pkiCa
default:
return nil, d.Errf("unrecognized pki option '%s'", d.Val())
} }
} }
return pki, nil return pki, nil
} }

View file

@ -46,235 +46,242 @@ type serverOptions struct {
Protocols []string Protocols []string
StrictSNIHost *bool StrictSNIHost *bool
TrustedProxiesRaw json.RawMessage TrustedProxiesRaw json.RawMessage
TrustedProxiesStrict int
ClientIPHeaders []string ClientIPHeaders []string
ShouldLogCredentials bool ShouldLogCredentials bool
Metrics *caddyhttp.Metrics Metrics *caddyhttp.Metrics
} }
func unmarshalCaddyfileServerOptions(d *caddyfile.Dispenser) (any, error) { func unmarshalCaddyfileServerOptions(d *caddyfile.Dispenser) (any, error) {
d.Next() // consume option name
serverOpts := serverOptions{} serverOpts := serverOptions{}
for d.Next() { if d.NextArg() {
serverOpts.ListenerAddress = d.Val()
if d.NextArg() { if d.NextArg() {
serverOpts.ListenerAddress = d.Val() return nil, d.ArgErr()
if d.NextArg() { }
}
for d.NextBlock(0) {
switch d.Val() {
case "name":
if serverOpts.ListenerAddress == "" {
return nil, d.Errf("cannot set a name for a server without a listener address")
}
if !d.NextArg() {
return nil, d.ArgErr() return nil, d.ArgErr()
} }
} serverOpts.Name = d.Val()
for nesting := d.Nesting(); d.NextBlock(nesting); {
switch d.Val() {
case "name":
if serverOpts.ListenerAddress == "" {
return nil, d.Errf("cannot set a name for a server without a listener address")
}
if !d.NextArg() {
return nil, d.ArgErr()
}
serverOpts.Name = d.Val()
case "listener_wrappers": case "listener_wrappers":
for nesting := d.Nesting(); d.NextBlock(nesting); { for nesting := d.Nesting(); d.NextBlock(nesting); {
modID := "caddy.listeners." + d.Val() modID := "caddy.listeners." + d.Val()
unm, err := caddyfile.UnmarshalModule(d, modID)
if err != nil {
return nil, err
}
listenerWrapper, ok := unm.(caddy.ListenerWrapper)
if !ok {
return nil, fmt.Errorf("module %s (%T) is not a listener wrapper", modID, unm)
}
jsonListenerWrapper := caddyconfig.JSONModuleObject(
listenerWrapper,
"wrapper",
listenerWrapper.(caddy.Module).CaddyModule().ID.Name(),
nil,
)
serverOpts.ListenerWrappersRaw = append(serverOpts.ListenerWrappersRaw, jsonListenerWrapper)
}
case "timeouts":
for nesting := d.Nesting(); d.NextBlock(nesting); {
switch d.Val() {
case "read_body":
if !d.NextArg() {
return nil, d.ArgErr()
}
dur, err := caddy.ParseDuration(d.Val())
if err != nil {
return nil, d.Errf("parsing read_body timeout duration: %v", err)
}
serverOpts.ReadTimeout = caddy.Duration(dur)
case "read_header":
if !d.NextArg() {
return nil, d.ArgErr()
}
dur, err := caddy.ParseDuration(d.Val())
if err != nil {
return nil, d.Errf("parsing read_header timeout duration: %v", err)
}
serverOpts.ReadHeaderTimeout = caddy.Duration(dur)
case "write":
if !d.NextArg() {
return nil, d.ArgErr()
}
dur, err := caddy.ParseDuration(d.Val())
if err != nil {
return nil, d.Errf("parsing write timeout duration: %v", err)
}
serverOpts.WriteTimeout = caddy.Duration(dur)
case "idle":
if !d.NextArg() {
return nil, d.ArgErr()
}
dur, err := caddy.ParseDuration(d.Val())
if err != nil {
return nil, d.Errf("parsing idle timeout duration: %v", err)
}
serverOpts.IdleTimeout = caddy.Duration(dur)
default:
return nil, d.Errf("unrecognized timeouts option '%s'", d.Val())
}
}
case "keepalive_interval":
if !d.NextArg() {
return nil, d.ArgErr()
}
dur, err := caddy.ParseDuration(d.Val())
if err != nil {
return nil, d.Errf("parsing keepalive interval duration: %v", err)
}
serverOpts.KeepAliveInterval = caddy.Duration(dur)
case "max_header_size":
var sizeStr string
if !d.AllArgs(&sizeStr) {
return nil, d.ArgErr()
}
size, err := humanize.ParseBytes(sizeStr)
if err != nil {
return nil, d.Errf("parsing max_header_size: %v", err)
}
serverOpts.MaxHeaderBytes = int(size)
case "enable_full_duplex":
if d.NextArg() {
return nil, d.ArgErr()
}
serverOpts.EnableFullDuplex = true
case "log_credentials":
if d.NextArg() {
return nil, d.ArgErr()
}
serverOpts.ShouldLogCredentials = true
case "protocols":
protos := d.RemainingArgs()
for _, proto := range protos {
if proto != "h1" && proto != "h2" && proto != "h2c" && proto != "h3" {
return nil, d.Errf("unknown protocol '%s': expected h1, h2, h2c, or h3", proto)
}
if sliceContains(serverOpts.Protocols, proto) {
return nil, d.Errf("protocol %s specified more than once", proto)
}
serverOpts.Protocols = append(serverOpts.Protocols, proto)
}
if nesting := d.Nesting(); d.NextBlock(nesting) {
return nil, d.ArgErr()
}
case "strict_sni_host":
if d.NextArg() && d.Val() != "insecure_off" && d.Val() != "on" {
return nil, d.Errf("strict_sni_host only supports 'on' or 'insecure_off', got '%s'", d.Val())
}
boolVal := true
if d.Val() == "insecure_off" {
boolVal = false
}
serverOpts.StrictSNIHost = &boolVal
case "trusted_proxies":
if !d.NextArg() {
return nil, d.Err("trusted_proxies expects an IP range source module name as its first argument")
}
modID := "http.ip_sources." + d.Val()
unm, err := caddyfile.UnmarshalModule(d, modID) unm, err := caddyfile.UnmarshalModule(d, modID)
if err != nil { if err != nil {
return nil, err return nil, err
} }
source, ok := unm.(caddyhttp.IPRangeSource) listenerWrapper, ok := unm.(caddy.ListenerWrapper)
if !ok { if !ok {
return nil, fmt.Errorf("module %s (%T) is not an IP range source", modID, unm) return nil, fmt.Errorf("module %s (%T) is not a listener wrapper", modID, unm)
} }
jsonSource := caddyconfig.JSONModuleObject( jsonListenerWrapper := caddyconfig.JSONModuleObject(
source, listenerWrapper,
"source", "wrapper",
source.(caddy.Module).CaddyModule().ID.Name(), listenerWrapper.(caddy.Module).CaddyModule().ID.Name(),
nil, nil,
) )
serverOpts.TrustedProxiesRaw = jsonSource serverOpts.ListenerWrappersRaw = append(serverOpts.ListenerWrappersRaw, jsonListenerWrapper)
case "client_ip_headers":
headers := d.RemainingArgs()
for _, header := range headers {
if sliceContains(serverOpts.ClientIPHeaders, header) {
return nil, d.Errf("client IP header %s specified more than once", header)
}
serverOpts.ClientIPHeaders = append(serverOpts.ClientIPHeaders, header)
}
if nesting := d.Nesting(); d.NextBlock(nesting) {
return nil, d.ArgErr()
}
case "metrics":
if d.NextArg() {
return nil, d.ArgErr()
}
if nesting := d.Nesting(); d.NextBlock(nesting) {
return nil, d.ArgErr()
}
serverOpts.Metrics = new(caddyhttp.Metrics)
// TODO: DEPRECATED. (August 2022)
case "protocol":
caddy.Log().Named("caddyfile").Warn("DEPRECATED: protocol sub-option will be removed soon")
for nesting := d.Nesting(); d.NextBlock(nesting); {
switch d.Val() {
case "allow_h2c":
caddy.Log().Named("caddyfile").Warn("DEPRECATED: allow_h2c will be removed soon; use protocols option instead")
if d.NextArg() {
return nil, d.ArgErr()
}
if sliceContains(serverOpts.Protocols, "h2c") {
return nil, d.Errf("protocol h2c already specified")
}
serverOpts.Protocols = append(serverOpts.Protocols, "h2c")
case "strict_sni_host":
caddy.Log().Named("caddyfile").Warn("DEPRECATED: protocol > strict_sni_host in this position will be removed soon; move up to the servers block instead")
if d.NextArg() && d.Val() != "insecure_off" && d.Val() != "on" {
return nil, d.Errf("strict_sni_host only supports 'on' or 'insecure_off', got '%s'", d.Val())
}
boolVal := true
if d.Val() == "insecure_off" {
boolVal = false
}
serverOpts.StrictSNIHost = &boolVal
default:
return nil, d.Errf("unrecognized protocol option '%s'", d.Val())
}
}
default:
return nil, d.Errf("unrecognized servers option '%s'", d.Val())
} }
case "timeouts":
for nesting := d.Nesting(); d.NextBlock(nesting); {
switch d.Val() {
case "read_body":
if !d.NextArg() {
return nil, d.ArgErr()
}
dur, err := caddy.ParseDuration(d.Val())
if err != nil {
return nil, d.Errf("parsing read_body timeout duration: %v", err)
}
serverOpts.ReadTimeout = caddy.Duration(dur)
case "read_header":
if !d.NextArg() {
return nil, d.ArgErr()
}
dur, err := caddy.ParseDuration(d.Val())
if err != nil {
return nil, d.Errf("parsing read_header timeout duration: %v", err)
}
serverOpts.ReadHeaderTimeout = caddy.Duration(dur)
case "write":
if !d.NextArg() {
return nil, d.ArgErr()
}
dur, err := caddy.ParseDuration(d.Val())
if err != nil {
return nil, d.Errf("parsing write timeout duration: %v", err)
}
serverOpts.WriteTimeout = caddy.Duration(dur)
case "idle":
if !d.NextArg() {
return nil, d.ArgErr()
}
dur, err := caddy.ParseDuration(d.Val())
if err != nil {
return nil, d.Errf("parsing idle timeout duration: %v", err)
}
serverOpts.IdleTimeout = caddy.Duration(dur)
default:
return nil, d.Errf("unrecognized timeouts option '%s'", d.Val())
}
}
case "keepalive_interval":
if !d.NextArg() {
return nil, d.ArgErr()
}
dur, err := caddy.ParseDuration(d.Val())
if err != nil {
return nil, d.Errf("parsing keepalive interval duration: %v", err)
}
serverOpts.KeepAliveInterval = caddy.Duration(dur)
case "max_header_size":
var sizeStr string
if !d.AllArgs(&sizeStr) {
return nil, d.ArgErr()
}
size, err := humanize.ParseBytes(sizeStr)
if err != nil {
return nil, d.Errf("parsing max_header_size: %v", err)
}
serverOpts.MaxHeaderBytes = int(size)
case "enable_full_duplex":
if d.NextArg() {
return nil, d.ArgErr()
}
serverOpts.EnableFullDuplex = true
case "log_credentials":
if d.NextArg() {
return nil, d.ArgErr()
}
serverOpts.ShouldLogCredentials = true
case "protocols":
protos := d.RemainingArgs()
for _, proto := range protos {
if proto != "h1" && proto != "h2" && proto != "h2c" && proto != "h3" {
return nil, d.Errf("unknown protocol '%s': expected h1, h2, h2c, or h3", proto)
}
if sliceContains(serverOpts.Protocols, proto) {
return nil, d.Errf("protocol %s specified more than once", proto)
}
serverOpts.Protocols = append(serverOpts.Protocols, proto)
}
if nesting := d.Nesting(); d.NextBlock(nesting) {
return nil, d.ArgErr()
}
case "strict_sni_host":
if d.NextArg() && d.Val() != "insecure_off" && d.Val() != "on" {
return nil, d.Errf("strict_sni_host only supports 'on' or 'insecure_off', got '%s'", d.Val())
}
boolVal := true
if d.Val() == "insecure_off" {
boolVal = false
}
serverOpts.StrictSNIHost = &boolVal
case "trusted_proxies":
if !d.NextArg() {
return nil, d.Err("trusted_proxies expects an IP range source module name as its first argument")
}
modID := "http.ip_sources." + d.Val()
unm, err := caddyfile.UnmarshalModule(d, modID)
if err != nil {
return nil, err
}
source, ok := unm.(caddyhttp.IPRangeSource)
if !ok {
return nil, fmt.Errorf("module %s (%T) is not an IP range source", modID, unm)
}
jsonSource := caddyconfig.JSONModuleObject(
source,
"source",
source.(caddy.Module).CaddyModule().ID.Name(),
nil,
)
serverOpts.TrustedProxiesRaw = jsonSource
case "trusted_proxies_strict":
if d.NextArg() {
return nil, d.ArgErr()
}
serverOpts.TrustedProxiesStrict = 1
case "client_ip_headers":
headers := d.RemainingArgs()
for _, header := range headers {
if sliceContains(serverOpts.ClientIPHeaders, header) {
return nil, d.Errf("client IP header %s specified more than once", header)
}
serverOpts.ClientIPHeaders = append(serverOpts.ClientIPHeaders, header)
}
if nesting := d.Nesting(); d.NextBlock(nesting) {
return nil, d.ArgErr()
}
case "metrics":
if d.NextArg() {
return nil, d.ArgErr()
}
if nesting := d.Nesting(); d.NextBlock(nesting) {
return nil, d.ArgErr()
}
serverOpts.Metrics = new(caddyhttp.Metrics)
// TODO: DEPRECATED. (August 2022)
case "protocol":
caddy.Log().Named("caddyfile").Warn("DEPRECATED: protocol sub-option will be removed soon")
for nesting := d.Nesting(); d.NextBlock(nesting); {
switch d.Val() {
case "allow_h2c":
caddy.Log().Named("caddyfile").Warn("DEPRECATED: allow_h2c will be removed soon; use protocols option instead")
if d.NextArg() {
return nil, d.ArgErr()
}
if sliceContains(serverOpts.Protocols, "h2c") {
return nil, d.Errf("protocol h2c already specified")
}
serverOpts.Protocols = append(serverOpts.Protocols, "h2c")
case "strict_sni_host":
caddy.Log().Named("caddyfile").Warn("DEPRECATED: protocol > strict_sni_host in this position will be removed soon; move up to the servers block instead")
if d.NextArg() && d.Val() != "insecure_off" && d.Val() != "on" {
return nil, d.Errf("strict_sni_host only supports 'on' or 'insecure_off', got '%s'", d.Val())
}
boolVal := true
if d.Val() == "insecure_off" {
boolVal = false
}
serverOpts.StrictSNIHost = &boolVal
default:
return nil, d.Errf("unrecognized protocol option '%s'", d.Val())
}
}
default:
return nil, d.Errf("unrecognized servers option '%s'", d.Val())
} }
} }
return serverOpts, nil return serverOpts, nil
@ -340,6 +347,7 @@ func applyServerOptions(
server.StrictSNIHost = opts.StrictSNIHost server.StrictSNIHost = opts.StrictSNIHost
server.TrustedProxiesRaw = opts.TrustedProxiesRaw server.TrustedProxiesRaw = opts.TrustedProxiesRaw
server.ClientIPHeaders = opts.ClientIPHeaders server.ClientIPHeaders = opts.ClientIPHeaders
server.TrustedProxiesStrict = opts.TrustedProxiesStrict
server.Metrics = opts.Metrics server.Metrics = opts.Metrics
if opts.ShouldLogCredentials { if opts.ShouldLogCredentials {
if server.Logs == nil { if server.Logs == nil {

View file

@ -0,0 +1,93 @@
package httpcaddyfile
import (
"regexp"
"strings"
"github.com/caddyserver/caddy/v2/caddyconfig/caddyfile"
)
type ComplexShorthandReplacer struct {
search *regexp.Regexp
replace string
}
type ShorthandReplacer struct {
complex []ComplexShorthandReplacer
simple *strings.Replacer
}
func NewShorthandReplacer() ShorthandReplacer {
// replace shorthand placeholders (which are convenient
// when writing a Caddyfile) with their actual placeholder
// identifiers or variable names
replacer := strings.NewReplacer(placeholderShorthands()...)
// these are placeholders that allow a user-defined final
// parameters, but we still want to provide a shorthand
// for those, so we use a regexp to replace
regexpReplacements := []ComplexShorthandReplacer{
{regexp.MustCompile(`{header\.([\w-]*)}`), "{http.request.header.$1}"},
{regexp.MustCompile(`{cookie\.([\w-]*)}`), "{http.request.cookie.$1}"},
{regexp.MustCompile(`{labels\.([\w-]*)}`), "{http.request.host.labels.$1}"},
{regexp.MustCompile(`{path\.([\w-]*)}`), "{http.request.uri.path.$1}"},
{regexp.MustCompile(`{file\.([\w-]*)}`), "{http.request.uri.path.file.$1}"},
{regexp.MustCompile(`{query\.([\w-]*)}`), "{http.request.uri.query.$1}"},
{regexp.MustCompile(`{re\.([\w-\.]*)}`), "{http.regexp.$1}"},
{regexp.MustCompile(`{vars\.([\w-]*)}`), "{http.vars.$1}"},
{regexp.MustCompile(`{rp\.([\w-\.]*)}`), "{http.reverse_proxy.$1}"},
{regexp.MustCompile(`{err\.([\w-\.]*)}`), "{http.error.$1}"},
{regexp.MustCompile(`{file_match\.([\w-]*)}`), "{http.matchers.file.$1}"},
}
return ShorthandReplacer{
complex: regexpReplacements,
simple: replacer,
}
}
// placeholderShorthands returns a slice of old-new string pairs,
// where the left of the pair is a placeholder shorthand that may
// be used in the Caddyfile, and the right is the replacement.
func placeholderShorthands() []string {
return []string{
"{dir}", "{http.request.uri.path.dir}",
"{file}", "{http.request.uri.path.file}",
"{host}", "{http.request.host}",
"{hostport}", "{http.request.hostport}",
"{port}", "{http.request.port}",
"{method}", "{http.request.method}",
"{path}", "{http.request.uri.path}",
"{query}", "{http.request.uri.query}",
"{remote}", "{http.request.remote}",
"{remote_host}", "{http.request.remote.host}",
"{remote_port}", "{http.request.remote.port}",
"{scheme}", "{http.request.scheme}",
"{uri}", "{http.request.uri}",
"{uuid}", "{http.request.uuid}",
"{tls_cipher}", "{http.request.tls.cipher_suite}",
"{tls_version}", "{http.request.tls.version}",
"{tls_client_fingerprint}", "{http.request.tls.client.fingerprint}",
"{tls_client_issuer}", "{http.request.tls.client.issuer}",
"{tls_client_serial}", "{http.request.tls.client.serial}",
"{tls_client_subject}", "{http.request.tls.client.subject}",
"{tls_client_certificate_pem}", "{http.request.tls.client.certificate_pem}",
"{tls_client_certificate_der_base64}", "{http.request.tls.client.certificate_der_base64}",
"{upstream_hostport}", "{http.reverse_proxy.upstream.hostport}",
"{client_ip}", "{http.vars.client_ip}",
}
}
// ApplyToSegment replaces shorthand placeholder to its full placeholder, understandable by Caddy.
func (s ShorthandReplacer) ApplyToSegment(segment *caddyfile.Segment) {
if segment != nil {
for i := 0; i < len(*segment); i++ {
// simple string replacements
(*segment)[i].Text = s.simple.Replace((*segment)[i].Text)
// complex regexp replacements
for _, r := range s.complex {
(*segment)[i].Text = r.search.ReplaceAllString((*segment)[i].Text, r.replace)
}
}
}
}

View file

@ -118,6 +118,11 @@ func (st ServerType) buildTLSApp(
ap.OnDemand = true ap.OnDemand = true
} }
// reuse private keys tls
if _, ok := sblock.pile["tls.reuse_private_keys"]; ok {
ap.ReusePrivateKeys = true
}
if keyTypeVals, ok := sblock.pile["tls.key_type"]; ok { if keyTypeVals, ok := sblock.pile["tls.key_type"]; ok {
ap.KeyType = keyTypeVals[0].Value.(string) ap.KeyType = keyTypeVals[0].Value.(string)
} }
@ -582,10 +587,12 @@ outer:
// eaten up by the one with subjects; and if both have subjects, we // eaten up by the one with subjects; and if both have subjects, we
// need to combine their lists // need to combine their lists
if reflect.DeepEqual(aps[i].IssuersRaw, aps[j].IssuersRaw) && if reflect.DeepEqual(aps[i].IssuersRaw, aps[j].IssuersRaw) &&
reflect.DeepEqual(aps[i].ManagersRaw, aps[j].ManagersRaw) &&
bytes.Equal(aps[i].StorageRaw, aps[j].StorageRaw) && bytes.Equal(aps[i].StorageRaw, aps[j].StorageRaw) &&
aps[i].MustStaple == aps[j].MustStaple && aps[i].MustStaple == aps[j].MustStaple &&
aps[i].KeyType == aps[j].KeyType && aps[i].KeyType == aps[j].KeyType &&
aps[i].OnDemand == aps[j].OnDemand && aps[i].OnDemand == aps[j].OnDemand &&
aps[i].ReusePrivateKeys == aps[j].ReusePrivateKeys &&
aps[i].RenewalWindowRatio == aps[j].RenewalWindowRatio { aps[i].RenewalWindowRatio == aps[j].RenewalWindowRatio {
if len(aps[i].SubjectsRaw) > 0 && len(aps[j].SubjectsRaw) == 0 { if len(aps[i].SubjectsRaw) > 0 && len(aps[j].SubjectsRaw) == 0 {
// later policy (at j) has no subjects ("catch-all"), so we can // later policy (at j) has no subjects ("catch-all"), so we can

View file

@ -8,6 +8,7 @@ import (
"errors" "errors"
"fmt" "fmt"
"io" "io"
"io/fs"
"log" "log"
"net" "net"
"net/http" "net/http"
@ -59,11 +60,11 @@ var (
type Tester struct { type Tester struct {
Client *http.Client Client *http.Client
configLoaded bool configLoaded bool
t *testing.T t testing.TB
} }
// NewTester will create a new testing client with an attached cookie jar // NewTester will create a new testing client with an attached cookie jar
func NewTester(t *testing.T) *Tester { func NewTester(t testing.TB) *Tester {
jar, err := cookiejar.New(nil) jar, err := cookiejar.New(nil)
if err != nil { if err != nil {
t.Fatalf("failed to create cookiejar: %s", err) t.Fatalf("failed to create cookiejar: %s", err)
@ -120,7 +121,6 @@ func (tc *Tester) initServer(rawConfig string, configType string) error {
tc.t.Cleanup(func() { tc.t.Cleanup(func() {
if tc.t.Failed() && tc.configLoaded { if tc.t.Failed() && tc.configLoaded {
res, err := http.Get(fmt.Sprintf("http://localhost:%d/config/", Default.AdminPort)) res, err := http.Get(fmt.Sprintf("http://localhost:%d/config/", Default.AdminPort))
if err != nil { if err != nil {
tc.t.Log("unable to read the current config") tc.t.Log("unable to read the current config")
@ -229,10 +229,10 @@ const initConfig = `{
// validateTestPrerequisites ensures the certificates are available in the // validateTestPrerequisites ensures the certificates are available in the
// designated path and Caddy sub-process is running. // designated path and Caddy sub-process is running.
func validateTestPrerequisites(t *testing.T) error { func validateTestPrerequisites(t testing.TB) error {
// check certificates are found // check certificates are found
for _, certName := range Default.Certifcates { for _, certName := range Default.Certifcates {
if _, err := os.Stat(getIntegrationDir() + certName); os.IsNotExist(err) { if _, err := os.Stat(getIntegrationDir() + certName); errors.Is(err, fs.ErrNotExist) {
return fmt.Errorf("caddy integration test certificates (%s) not found", certName) return fmt.Errorf("caddy integration test certificates (%s) not found", certName)
} }
} }
@ -373,7 +373,7 @@ func (tc *Tester) AssertRedirect(requestURI string, expectedToLocation string, e
} }
// CompareAdapt adapts a config and then compares it against an expected result // CompareAdapt adapts a config and then compares it against an expected result
func CompareAdapt(t *testing.T, filename, rawConfig string, adapterName string, expectedResponse string) bool { func CompareAdapt(t testing.TB, filename, rawConfig string, adapterName string, expectedResponse string) bool {
cfgAdapter := caddyconfig.GetAdapter(adapterName) cfgAdapter := caddyconfig.GetAdapter(adapterName)
if cfgAdapter == nil { if cfgAdapter == nil {
t.Logf("unrecognized config adapter '%s'", adapterName) t.Logf("unrecognized config adapter '%s'", adapterName)
@ -432,7 +432,7 @@ func CompareAdapt(t *testing.T, filename, rawConfig string, adapterName string,
} }
// AssertAdapt adapts a config and then tests it against an expected result // AssertAdapt adapts a config and then tests it against an expected result
func AssertAdapt(t *testing.T, rawConfig string, adapterName string, expectedResponse string) { func AssertAdapt(t testing.TB, rawConfig string, adapterName string, expectedResponse string) {
ok := CompareAdapt(t, "Caddyfile", rawConfig, adapterName, expectedResponse) ok := CompareAdapt(t, "Caddyfile", rawConfig, adapterName, expectedResponse)
if !ok { if !ok {
t.Fail() t.Fail()
@ -441,7 +441,7 @@ func AssertAdapt(t *testing.T, rawConfig string, adapterName string, expectedRes
// Generic request functions // Generic request functions
func applyHeaders(t *testing.T, req *http.Request, requestHeaders []string) { func applyHeaders(t testing.TB, req *http.Request, requestHeaders []string) {
requestContentType := "" requestContentType := ""
for _, requestHeader := range requestHeaders { for _, requestHeader := range requestHeaders {
arr := strings.SplitAfterN(requestHeader, ":", 2) arr := strings.SplitAfterN(requestHeader, ":", 2)
@ -467,7 +467,7 @@ func (tc *Tester) AssertResponseCode(req *http.Request, expectedStatusCode int)
} }
if expectedStatusCode != resp.StatusCode { if expectedStatusCode != resp.StatusCode {
tc.t.Errorf("requesting \"%s\" expected status code: %d but got %d", req.RequestURI, expectedStatusCode, resp.StatusCode) tc.t.Errorf("requesting \"%s\" expected status code: %d but got %d", req.URL.RequestURI(), expectedStatusCode, resp.StatusCode)
} }
return resp return resp

View file

@ -0,0 +1,206 @@
package integration
import (
"context"
"crypto/ecdsa"
"crypto/elliptic"
"crypto/rand"
"fmt"
"net"
"net/http"
"strings"
"testing"
"github.com/caddyserver/caddy/v2"
"github.com/caddyserver/caddy/v2/caddytest"
"github.com/mholt/acmez"
"github.com/mholt/acmez/acme"
smallstepacme "github.com/smallstep/certificates/acme"
"go.uber.org/zap"
)
const acmeChallengePort = 9081
// Test the basic functionality of Caddy's ACME server
func TestACMEServerWithDefaults(t *testing.T) {
ctx := context.Background()
logger, err := zap.NewDevelopment()
if err != nil {
t.Error(err)
return
}
tester := caddytest.NewTester(t)
tester.InitServer(`
{
skip_install_trust
admin localhost:2999
http_port 9080
https_port 9443
local_certs
}
acme.localhost {
acme_server
}
`, "caddyfile")
client := acmez.Client{
Client: &acme.Client{
Directory: "https://acme.localhost:9443/acme/local/directory",
HTTPClient: tester.Client,
Logger: logger,
},
ChallengeSolvers: map[string]acmez.Solver{
acme.ChallengeTypeHTTP01: &naiveHTTPSolver{logger: logger},
},
}
accountPrivateKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
if err != nil {
t.Errorf("generating account key: %v", err)
}
account := acme.Account{
Contact: []string{"mailto:you@example.com"},
TermsOfServiceAgreed: true,
PrivateKey: accountPrivateKey,
}
account, err = client.NewAccount(ctx, account)
if err != nil {
t.Errorf("new account: %v", err)
return
}
// Every certificate needs a key.
certPrivateKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
if err != nil {
t.Errorf("generating certificate key: %v", err)
return
}
certs, err := client.ObtainCertificate(ctx, account, certPrivateKey, []string{"localhost"})
if err != nil {
t.Errorf("obtaining certificate: %v", err)
return
}
// ACME servers should usually give you the entire certificate chain
// in PEM format, and sometimes even alternate chains! It's up to you
// which one(s) to store and use, but whatever you do, be sure to
// store the certificate and key somewhere safe and secure, i.e. don't
// lose them!
for _, cert := range certs {
t.Logf("Certificate %q:\n%s\n\n", cert.URL, cert.ChainPEM)
}
}
func TestACMEServerWithMismatchedChallenges(t *testing.T) {
ctx := context.Background()
logger := caddy.Log().Named("acmez")
tester := caddytest.NewTester(t)
tester.InitServer(`
{
skip_install_trust
admin localhost:2999
http_port 9080
https_port 9443
local_certs
}
acme.localhost {
acme_server {
challenges tls-alpn-01
}
}
`, "caddyfile")
client := acmez.Client{
Client: &acme.Client{
Directory: "https://acme.localhost:9443/acme/local/directory",
HTTPClient: tester.Client,
Logger: logger,
},
ChallengeSolvers: map[string]acmez.Solver{
acme.ChallengeTypeHTTP01: &naiveHTTPSolver{logger: logger},
},
}
accountPrivateKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
if err != nil {
t.Errorf("generating account key: %v", err)
}
account := acme.Account{
Contact: []string{"mailto:you@example.com"},
TermsOfServiceAgreed: true,
PrivateKey: accountPrivateKey,
}
account, err = client.NewAccount(ctx, account)
if err != nil {
t.Errorf("new account: %v", err)
return
}
// Every certificate needs a key.
certPrivateKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
if err != nil {
t.Errorf("generating certificate key: %v", err)
return
}
certs, err := client.ObtainCertificate(ctx, account, certPrivateKey, []string{"localhost"})
if len(certs) > 0 {
t.Errorf("expected '0' certificates, but received '%d'", len(certs))
}
if err == nil {
t.Error("expected errors, but received none")
}
const expectedErrMsg = "no solvers available for remaining challenges (configured=[http-01] offered=[tls-alpn-01] remaining=[tls-alpn-01])"
if !strings.Contains(err.Error(), expectedErrMsg) {
t.Errorf(`received error message does not match expectation: expected="%s" received="%s"`, expectedErrMsg, err.Error())
}
}
// naiveHTTPSolver is a no-op acmez.Solver for example purposes only.
type naiveHTTPSolver struct {
srv *http.Server
logger *zap.Logger
}
func (s *naiveHTTPSolver) Present(ctx context.Context, challenge acme.Challenge) error {
smallstepacme.InsecurePortHTTP01 = acmeChallengePort
s.srv = &http.Server{
Addr: fmt.Sprintf(":%d", acmeChallengePort),
Handler: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
host, _, err := net.SplitHostPort(r.Host)
if err != nil {
host = r.Host
}
s.logger.Info("received request on challenge server", zap.String("path", r.URL.Path))
if r.Method == "GET" && r.URL.Path == challenge.HTTP01ResourcePath() && strings.EqualFold(host, challenge.Identifier.Value) {
w.Header().Add("Content-Type", "text/plain")
w.Write([]byte(challenge.KeyAuthorization))
r.Close = true
s.logger.Info("served key authentication",
zap.String("identifier", challenge.Identifier.Value),
zap.String("challenge", "http-01"),
zap.String("remote", r.RemoteAddr),
)
}
}),
}
l, err := net.Listen("tcp", fmt.Sprintf(":%d", acmeChallengePort))
if err != nil {
return err
}
s.logger.Info("present challenge", zap.Any("challenge", challenge))
go s.srv.Serve(l)
return nil
}
func (s naiveHTTPSolver) CleanUp(ctx context.Context, challenge acme.Challenge) error {
smallstepacme.InsecurePortHTTP01 = 0
s.logger.Info("cleanup", zap.Any("challenge", challenge))
if s.srv != nil {
s.srv.Close()
}
return nil
}

View file

@ -0,0 +1,209 @@
package integration
import (
"context"
"crypto/ecdsa"
"crypto/elliptic"
"crypto/rand"
"strings"
"testing"
"github.com/caddyserver/caddy/v2/caddytest"
"github.com/mholt/acmez"
"github.com/mholt/acmez/acme"
"go.uber.org/zap"
)
func TestACMEServerDirectory(t *testing.T) {
tester := caddytest.NewTester(t)
tester.InitServer(`
{
skip_install_trust
local_certs
admin localhost:2999
http_port 9080
https_port 9443
pki {
ca local {
name "Caddy Local Authority"
}
}
}
acme.localhost:9443 {
acme_server
}
`, "caddyfile")
tester.AssertGetResponse(
"https://acme.localhost:9443/acme/local/directory",
200,
`{"newNonce":"https://acme.localhost:9443/acme/local/new-nonce","newAccount":"https://acme.localhost:9443/acme/local/new-account","newOrder":"https://acme.localhost:9443/acme/local/new-order","revokeCert":"https://acme.localhost:9443/acme/local/revoke-cert","keyChange":"https://acme.localhost:9443/acme/local/key-change"}
`)
}
func TestACMEServerAllowPolicy(t *testing.T) {
tester := caddytest.NewTester(t)
tester.InitServer(`
{
skip_install_trust
local_certs
admin localhost:2999
http_port 9080
https_port 9443
pki {
ca local {
name "Caddy Local Authority"
}
}
}
acme.localhost {
acme_server {
challenges http-01
allow {
domains localhost
}
}
}
`, "caddyfile")
ctx := context.Background()
logger, err := zap.NewDevelopment()
if err != nil {
t.Error(err)
return
}
client := acmez.Client{
Client: &acme.Client{
Directory: "https://acme.localhost:9443/acme/local/directory",
HTTPClient: tester.Client,
Logger: logger,
},
ChallengeSolvers: map[string]acmez.Solver{
acme.ChallengeTypeHTTP01: &naiveHTTPSolver{logger: logger},
},
}
accountPrivateKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
if err != nil {
t.Errorf("generating account key: %v", err)
}
account := acme.Account{
Contact: []string{"mailto:you@example.com"},
TermsOfServiceAgreed: true,
PrivateKey: accountPrivateKey,
}
account, err = client.NewAccount(ctx, account)
if err != nil {
t.Errorf("new account: %v", err)
return
}
// Every certificate needs a key.
certPrivateKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
if err != nil {
t.Errorf("generating certificate key: %v", err)
return
}
{
certs, err := client.ObtainCertificate(
ctx,
account,
certPrivateKey,
[]string{"localhost"},
)
if err != nil {
t.Errorf("obtaining certificate for allowed domain: %v", err)
return
}
// ACME servers should usually give you the entire certificate chain
// in PEM format, and sometimes even alternate chains! It's up to you
// which one(s) to store and use, but whatever you do, be sure to
// store the certificate and key somewhere safe and secure, i.e. don't
// lose them!
for _, cert := range certs {
t.Logf("Certificate %q:\n%s\n\n", cert.URL, cert.ChainPEM)
}
}
{
_, err := client.ObtainCertificate(ctx, account, certPrivateKey, []string{"not-matching.localhost"})
if err == nil {
t.Errorf("obtaining certificate for 'not-matching.localhost' domain")
} else if err != nil && !strings.Contains(err.Error(), "urn:ietf:params:acme:error:rejectedIdentifier") {
t.Logf("unexpected error: %v", err)
}
}
}
func TestACMEServerDenyPolicy(t *testing.T) {
tester := caddytest.NewTester(t)
tester.InitServer(`
{
skip_install_trust
local_certs
admin localhost:2999
http_port 9080
https_port 9443
pki {
ca local {
name "Caddy Local Authority"
}
}
}
acme.localhost {
acme_server {
deny {
domains deny.localhost
}
}
}
`, "caddyfile")
ctx := context.Background()
logger, err := zap.NewDevelopment()
if err != nil {
t.Error(err)
return
}
client := acmez.Client{
Client: &acme.Client{
Directory: "https://acme.localhost:9443/acme/local/directory",
HTTPClient: tester.Client,
Logger: logger,
},
ChallengeSolvers: map[string]acmez.Solver{
acme.ChallengeTypeHTTP01: &naiveHTTPSolver{logger: logger},
},
}
accountPrivateKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
if err != nil {
t.Errorf("generating account key: %v", err)
}
account := acme.Account{
Contact: []string{"mailto:you@example.com"},
TermsOfServiceAgreed: true,
PrivateKey: accountPrivateKey,
}
account, err = client.NewAccount(ctx, account)
if err != nil {
t.Errorf("new account: %v", err)
return
}
// Every certificate needs a key.
certPrivateKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
if err != nil {
t.Errorf("generating certificate key: %v", err)
return
}
{
_, err := client.ObtainCertificate(ctx, account, certPrivateKey, []string{"deny.localhost"})
if err == nil {
t.Errorf("obtaining certificate for 'deny.localhost' domain")
} else if err != nil && !strings.Contains(err.Error(), "urn:ietf:params:acme:error:rejectedIdentifier") {
t.Logf("unexpected error: %v", err)
}
}
}

View file

@ -0,0 +1,65 @@
{
pki {
ca custom-ca {
name "Custom CA"
}
}
}
acme.example.com {
acme_server {
ca custom-ca
challenges dns-01
}
}
----------
{
"apps": {
"http": {
"servers": {
"srv0": {
"listen": [
":443"
],
"routes": [
{
"match": [
{
"host": [
"acme.example.com"
]
}
],
"handle": [
{
"handler": "subroute",
"routes": [
{
"handle": [
{
"ca": "custom-ca",
"challenges": [
"dns-01"
],
"handler": "acme_server"
}
]
}
]
}
],
"terminal": true
}
]
}
}
},
"pki": {
"certificate_authorities": {
"custom-ca": {
"name": "Custom CA"
}
}
}
}
}

View file

@ -0,0 +1,62 @@
{
pki {
ca custom-ca {
name "Custom CA"
}
}
}
acme.example.com {
acme_server {
ca custom-ca
challenges
}
}
----------
{
"apps": {
"http": {
"servers": {
"srv0": {
"listen": [
":443"
],
"routes": [
{
"match": [
{
"host": [
"acme.example.com"
]
}
],
"handle": [
{
"handler": "subroute",
"routes": [
{
"handle": [
{
"ca": "custom-ca",
"handler": "acme_server"
}
]
}
]
}
],
"terminal": true
}
]
}
}
},
"pki": {
"certificate_authorities": {
"custom-ca": {
"name": "Custom CA"
}
}
}
}
}

View file

@ -0,0 +1,66 @@
{
pki {
ca custom-ca {
name "Custom CA"
}
}
}
acme.example.com {
acme_server {
ca custom-ca
challenges dns-01 http-01
}
}
----------
{
"apps": {
"http": {
"servers": {
"srv0": {
"listen": [
":443"
],
"routes": [
{
"match": [
{
"host": [
"acme.example.com"
]
}
],
"handle": [
{
"handler": "subroute",
"routes": [
{
"handle": [
{
"ca": "custom-ca",
"challenges": [
"dns-01",
"http-01"
],
"handler": "acme_server"
}
]
}
]
}
],
"terminal": true
}
]
}
}
},
"pki": {
"certificate_authorities": {
"custom-ca": {
"name": "Custom CA"
}
}
}
}
}

View file

@ -0,0 +1,37 @@
:8443 {
tls internal {
on_demand
}
}
----------
{
"apps": {
"http": {
"servers": {
"srv0": {
"listen": [
":8443"
],
"tls_connection_policies": [
{}
]
}
}
},
"tls": {
"automation": {
"policies": [
{
"issuers": [
{
"module": "internal"
}
],
"on_demand": true
}
]
}
}
}
}

View file

@ -11,6 +11,7 @@ encode gzip zstd {
header Content-Type application/xhtml+xml* header Content-Type application/xhtml+xml*
header Content-Type application/atom+xml* header Content-Type application/atom+xml*
header Content-Type application/rss+xml* header Content-Type application/rss+xml*
header Content-Type application/wasm*
header Content-Type image/svg+xml* header Content-Type image/svg+xml*
} }
} }
@ -47,6 +48,7 @@ encode {
"application/xhtml+xml*", "application/xhtml+xml*",
"application/atom+xml*", "application/atom+xml*",
"application/rss+xml*", "application/rss+xml*",
"application/wasm*",
"image/svg+xml*" "image/svg+xml*"
] ]
}, },

View file

@ -0,0 +1,245 @@
foo.localhost {
root * /srv
error /private* "Unauthorized" 410
error /fivehundred* "Internal Server Error" 500
handle_errors 5xx {
respond "Error In range [500 .. 599]"
}
handle_errors 410 {
respond "404 or 410 error"
}
}
bar.localhost {
root * /srv
error /private* "Unauthorized" 410
error /fivehundred* "Internal Server Error" 500
handle_errors 5xx {
respond "Error In range [500 .. 599] from second site"
}
handle_errors 410 {
respond "404 or 410 error from second site"
}
}
----------
{
"apps": {
"http": {
"servers": {
"srv0": {
"listen": [
":443"
],
"routes": [
{
"match": [
{
"host": [
"foo.localhost"
]
}
],
"handle": [
{
"handler": "subroute",
"routes": [
{
"handle": [
{
"handler": "vars",
"root": "/srv"
}
]
},
{
"handle": [
{
"error": "Internal Server Error",
"handler": "error",
"status_code": 500
}
],
"match": [
{
"path": [
"/fivehundred*"
]
}
]
},
{
"handle": [
{
"error": "Unauthorized",
"handler": "error",
"status_code": 410
}
],
"match": [
{
"path": [
"/private*"
]
}
]
}
]
}
],
"terminal": true
},
{
"match": [
{
"host": [
"bar.localhost"
]
}
],
"handle": [
{
"handler": "subroute",
"routes": [
{
"handle": [
{
"handler": "vars",
"root": "/srv"
}
]
},
{
"handle": [
{
"error": "Internal Server Error",
"handler": "error",
"status_code": 500
}
],
"match": [
{
"path": [
"/fivehundred*"
]
}
]
},
{
"handle": [
{
"error": "Unauthorized",
"handler": "error",
"status_code": 410
}
],
"match": [
{
"path": [
"/private*"
]
}
]
}
]
}
],
"terminal": true
}
],
"errors": {
"routes": [
{
"match": [
{
"host": [
"foo.localhost"
]
}
],
"handle": [
{
"handler": "subroute",
"routes": [
{
"handle": [
{
"body": "404 or 410 error",
"handler": "static_response"
}
],
"match": [
{
"expression": "{http.error.status_code} in [410]"
}
]
},
{
"handle": [
{
"body": "Error In range [500 .. 599]",
"handler": "static_response"
}
],
"match": [
{
"expression": "{http.error.status_code} \u003e= 500 \u0026\u0026 {http.error.status_code} \u003c= 599"
}
]
}
]
}
],
"terminal": true
},
{
"match": [
{
"host": [
"bar.localhost"
]
}
],
"handle": [
{
"handler": "subroute",
"routes": [
{
"handle": [
{
"body": "404 or 410 error from second site",
"handler": "static_response"
}
],
"match": [
{
"expression": "{http.error.status_code} in [410]"
}
]
},
{
"handle": [
{
"body": "Error In range [500 .. 599] from second site",
"handler": "static_response"
}
],
"match": [
{
"expression": "{http.error.status_code} \u003e= 500 \u0026\u0026 {http.error.status_code} \u003c= 599"
}
]
}
]
}
],
"terminal": true
}
]
}
}
}
}
}
}

View file

@ -0,0 +1,120 @@
{
http_port 3010
}
localhost:3010 {
root * /srv
error /private* "Unauthorized" 410
error /hidden* "Not found" 404
handle_errors 4xx {
respond "Error in the [400 .. 499] range"
}
}
----------
{
"apps": {
"http": {
"http_port": 3010,
"servers": {
"srv0": {
"listen": [
":3010"
],
"routes": [
{
"match": [
{
"host": [
"localhost"
]
}
],
"handle": [
{
"handler": "subroute",
"routes": [
{
"handle": [
{
"handler": "vars",
"root": "/srv"
}
]
},
{
"handle": [
{
"error": "Unauthorized",
"handler": "error",
"status_code": 410
}
],
"match": [
{
"path": [
"/private*"
]
}
]
},
{
"handle": [
{
"error": "Not found",
"handler": "error",
"status_code": 404
}
],
"match": [
{
"path": [
"/hidden*"
]
}
]
}
]
}
],
"terminal": true
}
],
"errors": {
"routes": [
{
"match": [
{
"host": [
"localhost"
]
}
],
"handle": [
{
"handler": "subroute",
"routes": [
{
"handle": [
{
"body": "Error in the [400 .. 499] range",
"handler": "static_response"
}
],
"match": [
{
"expression": "{http.error.status_code} \u003e= 400 \u0026\u0026 {http.error.status_code} \u003c= 499"
}
]
}
]
}
],
"terminal": true
}
]
}
}
}
}
}
}

View file

@ -0,0 +1,153 @@
{
http_port 2099
}
localhost:2099 {
root * /srv
error /private* "Unauthorized" 410
error /threehundred* "Moved Permanently" 301
error /internalerr* "Internal Server Error" 500
handle_errors 500 3xx {
respond "Error code is equal to 500 or in the [300..399] range"
}
handle_errors 4xx {
respond "Error in the [400 .. 499] range"
}
}
----------
{
"apps": {
"http": {
"http_port": 2099,
"servers": {
"srv0": {
"listen": [
":2099"
],
"routes": [
{
"match": [
{
"host": [
"localhost"
]
}
],
"handle": [
{
"handler": "subroute",
"routes": [
{
"handle": [
{
"handler": "vars",
"root": "/srv"
}
]
},
{
"handle": [
{
"error": "Moved Permanently",
"handler": "error",
"status_code": 301
}
],
"match": [
{
"path": [
"/threehundred*"
]
}
]
},
{
"handle": [
{
"error": "Internal Server Error",
"handler": "error",
"status_code": 500
}
],
"match": [
{
"path": [
"/internalerr*"
]
}
]
},
{
"handle": [
{
"error": "Unauthorized",
"handler": "error",
"status_code": 410
}
],
"match": [
{
"path": [
"/private*"
]
}
]
}
]
}
],
"terminal": true
}
],
"errors": {
"routes": [
{
"match": [
{
"host": [
"localhost"
]
}
],
"handle": [
{
"handler": "subroute",
"routes": [
{
"handle": [
{
"body": "Error in the [400 .. 499] range",
"handler": "static_response"
}
],
"match": [
{
"expression": "{http.error.status_code} \u003e= 400 \u0026\u0026 {http.error.status_code} \u003c= 499"
}
]
},
{
"handle": [
{
"body": "Error code is equal to 500 or in the [300..399] range",
"handler": "static_response"
}
],
"match": [
{
"expression": "{http.error.status_code} \u003e= 300 \u0026\u0026 {http.error.status_code} \u003c= 399 || {http.error.status_code} in [500]"
}
]
}
]
}
],
"terminal": true
}
]
}
}
}
}
}
}

View file

@ -0,0 +1,120 @@
{
http_port 3010
}
localhost:3010 {
root * /srv
error /private* "Unauthorized" 410
error /hidden* "Not found" 404
handle_errors 404 410 {
respond "404 or 410 error"
}
}
----------
{
"apps": {
"http": {
"http_port": 3010,
"servers": {
"srv0": {
"listen": [
":3010"
],
"routes": [
{
"match": [
{
"host": [
"localhost"
]
}
],
"handle": [
{
"handler": "subroute",
"routes": [
{
"handle": [
{
"handler": "vars",
"root": "/srv"
}
]
},
{
"handle": [
{
"error": "Unauthorized",
"handler": "error",
"status_code": 410
}
],
"match": [
{
"path": [
"/private*"
]
}
]
},
{
"handle": [
{
"error": "Not found",
"handler": "error",
"status_code": 404
}
],
"match": [
{
"path": [
"/hidden*"
]
}
]
}
]
}
],
"terminal": true
}
],
"errors": {
"routes": [
{
"match": [
{
"host": [
"localhost"
]
}
],
"handle": [
{
"handler": "subroute",
"routes": [
{
"handle": [
{
"body": "404 or 410 error",
"handler": "static_response"
}
],
"match": [
{
"expression": "{http.error.status_code} in [404, 410]"
}
]
}
]
}
],
"terminal": true
}
]
}
}
}
}
}
}

View file

@ -0,0 +1,148 @@
{
http_port 2099
}
localhost:2099 {
root * /srv
error /private* "Unauthorized" 410
error /hidden* "Not found" 404
error /internalerr* "Internal Server Error" 500
handle_errors {
respond "Fallback route: code outside the [400..499] range"
}
handle_errors 4xx {
respond "Error in the [400 .. 499] range"
}
}
----------
{
"apps": {
"http": {
"http_port": 2099,
"servers": {
"srv0": {
"listen": [
":2099"
],
"routes": [
{
"match": [
{
"host": [
"localhost"
]
}
],
"handle": [
{
"handler": "subroute",
"routes": [
{
"handle": [
{
"handler": "vars",
"root": "/srv"
}
]
},
{
"handle": [
{
"error": "Internal Server Error",
"handler": "error",
"status_code": 500
}
],
"match": [
{
"path": [
"/internalerr*"
]
}
]
},
{
"handle": [
{
"error": "Unauthorized",
"handler": "error",
"status_code": 410
}
],
"match": [
{
"path": [
"/private*"
]
}
]
},
{
"handle": [
{
"error": "Not found",
"handler": "error",
"status_code": 404
}
],
"match": [
{
"path": [
"/hidden*"
]
}
]
}
]
}
],
"terminal": true
}
],
"errors": {
"routes": [
{
"match": [
{
"host": [
"localhost"
]
}
],
"handle": [
{
"handler": "subroute",
"routes": [
{
"handle": [
{
"body": "Error in the [400 .. 499] range",
"handler": "static_response"
}
],
"match": [
{
"expression": "{http.error.status_code} \u003e= 400 \u0026\u0026 {http.error.status_code} \u003c= 499"
}
]
},
{
"handle": [
{
"body": "Fallback route: code outside the [400..499] range",
"handler": "static_response"
}
]
}
]
}
],
"terminal": true
}
]
}
}
}
}
}
}

View file

@ -69,7 +69,10 @@
} }
], ],
"on_demand": { "on_demand": {
"ask": "https://example.com", "permission": {
"endpoint": "https://example.com",
"module": "http"
},
"rate_limit": { "rate_limit": {
"interval": 30000000000, "interval": 30000000000,
"burst": 20 "burst": 20

View file

@ -78,7 +78,10 @@
} }
], ],
"on_demand": { "on_demand": {
"ask": "https://example.com", "permission": {
"endpoint": "https://example.com",
"module": "http"
},
"rate_limit": { "rate_limit": {
"interval": 30000000000, "interval": 30000000000,
"burst": 20 "burst": 20

View file

@ -71,7 +71,10 @@
} }
], ],
"on_demand": { "on_demand": {
"ask": "https://example.com", "permission": {
"endpoint": "https://example.com",
"module": "http"
},
"rate_limit": { "rate_limit": {
"interval": 30000000000, "interval": 30000000000,
"burst": 20 "burst": 20

View file

@ -0,0 +1,46 @@
http://handle {
file_server
}
----------
{
"apps": {
"http": {
"servers": {
"srv0": {
"listen": [
":80"
],
"routes": [
{
"match": [
{
"host": [
"handle"
]
}
],
"handle": [
{
"handler": "subroute",
"routes": [
{
"handle": [
{
"handler": "file_server",
"hide": [
"./Caddyfile"
]
}
]
}
]
}
],
"terminal": true
}
]
}
}
}
}
}

View file

@ -0,0 +1,71 @@
:80 {
log
vars foo foo
log_append const bar
log_append vars foo
log_append placeholder {path}
log_append /only-for-this-path secret value
}
----------
{
"apps": {
"http": {
"servers": {
"srv0": {
"listen": [
":80"
],
"routes": [
{
"handle": [
{
"foo": "foo",
"handler": "vars"
}
]
},
{
"match": [
{
"path": [
"/only-for-this-path"
]
}
],
"handle": [
{
"handler": "log_append",
"key": "secret",
"value": "value"
}
]
},
{
"handle": [
{
"handler": "log_append",
"key": "const",
"value": "bar"
},
{
"handler": "log_append",
"key": "vars",
"value": "foo"
},
{
"handler": "log_append",
"key": "placeholder",
"value": "{http.request.uri.path}"
}
]
}
],
"logs": {}
}
}
}
}
}

View file

@ -0,0 +1,63 @@
{
log {
format append {
wrap json
fields {
wrap "foo"
}
env {env.EXAMPLE}
int 1
float 1.1
bool true
string "string"
}
}
}
:80 {
respond "Hello, World!"
}
----------
{
"logging": {
"logs": {
"default": {
"encoder": {
"fields": {
"bool": true,
"env": "{env.EXAMPLE}",
"float": 1.1,
"int": 1,
"string": "string",
"wrap": "foo"
},
"format": "append",
"wrap": {
"format": "json"
}
}
}
}
},
"apps": {
"http": {
"servers": {
"srv0": {
"listen": [
":80"
],
"routes": [
{
"handle": [
{
"body": "Hello, World!",
"handler": "static_response"
}
]
}
]
}
}
}
}
}

View file

@ -1,7 +1,7 @@
http://localhost:2020 { http://localhost:2020 {
log log
skip_log /first-hidden* log_skip /first-hidden*
skip_log /second-hidden* log_skip /second-hidden*
respond 200 respond 200
} }
@ -34,7 +34,7 @@ http://localhost:2020 {
"handle": [ "handle": [
{ {
"handler": "vars", "handler": "vars",
"skip_log": true "log_skip": true
} }
], ],
"match": [ "match": [
@ -49,7 +49,7 @@ http://localhost:2020 {
"handle": [ "handle": [
{ {
"handler": "vars", "handler": "vars",
"skip_log": true "log_skip": true
} }
], ],
"match": [ "match": [
@ -99,7 +99,7 @@ http://localhost:2020 {
}, },
"logs": { "logs": {
"logger_names": { "logger_names": {
"localhost:2020": "" "localhost": ""
}, },
"skip_unmapped_hosts": true "skip_unmapped_hosts": true
} }

View file

@ -0,0 +1,52 @@
:80
log {
output stdout
format filter {
fields {
request>headers>Server delete
}
}
}
----------
{
"logging": {
"logs": {
"default": {
"exclude": [
"http.log.access.log0"
]
},
"log0": {
"writer": {
"output": "stdout"
},
"encoder": {
"fields": {
"request\u003eheaders\u003eServer": {
"filter": "delete"
}
},
"format": "filter"
},
"include": [
"http.log.access.log0"
]
}
}
},
"apps": {
"http": {
"servers": {
"srv0": {
"listen": [
":80"
],
"logs": {
"default_logger_name": "log0"
}
}
}
}
}
}

View file

@ -4,26 +4,31 @@ log {
output stdout output stdout
format filter { format filter {
wrap console wrap console
# long form, with "fields" wrapper
fields { fields {
uri query { uri query {
replace foo REDACTED replace foo REDACTED
delete bar delete bar
hash baz hash baz
} }
request>headers>Authorization replace REDACTED
request>headers>Server delete
request>headers>Cookie cookie {
replace foo REDACTED
delete bar
hash baz
}
request>remote_ip ip_mask {
ipv4 24
ipv6 32
}
request>headers>Regexp regexp secret REDACTED
request>headers>Hash hash
} }
# short form, flatter structure
request>headers>Authorization replace REDACTED
request>headers>Server delete
request>headers>Cookie cookie {
replace foo REDACTED
delete bar
hash baz
}
request>remote_ip ip_mask {
ipv4 24
ipv6 32
}
request>client_ip ip_mask 16 32
request>headers>Regexp regexp secret REDACTED
request>headers>Hash hash
} }
} }
---------- ----------
@ -41,6 +46,11 @@ log {
}, },
"encoder": { "encoder": {
"fields": { "fields": {
"request\u003eclient_ip": {
"filter": "ip_mask",
"ipv4_cidr": 16,
"ipv6_cidr": 32
},
"request\u003eheaders\u003eAuthorization": { "request\u003eheaders\u003eAuthorization": {
"filter": "replace", "filter": "replace",
"value": "REDACTED" "value": "REDACTED"

View file

@ -8,6 +8,12 @@
output file /baz.txt output file /baz.txt
} }
} }
example.com:8443 {
log {
output file /port.txt
}
}
---------- ----------
{ {
"logging": { "logging": {
@ -15,7 +21,8 @@
"default": { "default": {
"exclude": [ "exclude": [
"http.log.access.log0", "http.log.access.log0",
"http.log.access.log1" "http.log.access.log1",
"http.log.access.log2"
] ]
}, },
"log0": { "log0": {
@ -35,6 +42,15 @@
"include": [ "include": [
"http.log.access.log1" "http.log.access.log1"
] ]
},
"log2": {
"writer": {
"filename": "/port.txt",
"output": "file"
},
"include": [
"http.log.access.log2"
]
} }
} }
}, },
@ -64,6 +80,28 @@
"foo.example.com": "log0" "foo.example.com": "log0"
} }
} }
},
"srv1": {
"listen": [
":8443"
],
"routes": [
{
"match": [
{
"host": [
"example.com"
]
}
],
"terminal": true
}
],
"logs": {
"logger_names": {
"example.com": "log2"
}
}
} }
} }
} }

View file

@ -76,7 +76,7 @@ http://localhost:8881 {
}, },
"logs": { "logs": {
"logger_names": { "logger_names": {
"localhost:8881": "foo" "localhost": "foo"
} }
} }
} }

View file

@ -81,7 +81,7 @@ http://localhost:8881 {
}, },
"logs": { "logs": {
"logger_names": { "logger_names": {
"localhost:8881": "foo" "localhost": "foo"
} }
} }
} }

View file

@ -66,9 +66,9 @@ example.com {
"one.example.com": "" "one.example.com": ""
}, },
"skip_hosts": [ "skip_hosts": [
"example.com",
"three.example.com", "three.example.com",
"two.example.com", "two.example.com"
"example.com"
] ]
} }
} }

Some files were not shown because too many files have changed in this diff Show more