mirror of
https://github.com/caddyserver/caddy.git
synced 2024-12-26 05:33:49 +03:00
Merge branch 'master' into proxy-res-templates
This commit is contained in:
commit
ac8411d69a
145 changed files with 5947 additions and 2768 deletions
1
.gitattributes
vendored
Normal file
1
.gitattributes
vendored
Normal file
|
@ -0,0 +1 @@
|
||||||
|
*.go text eol=lf
|
10
.github/workflows/ci.yml
vendored
10
.github/workflows/ci.yml
vendored
|
@ -19,16 +19,16 @@ jobs:
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
os: [ ubuntu-latest, macos-latest, windows-latest ]
|
os: [ ubuntu-latest, macos-latest, windows-latest ]
|
||||||
go: [ '1.17', '1.18' ]
|
go: [ '1.18', '1.19' ]
|
||||||
|
|
||||||
include:
|
include:
|
||||||
# Set the minimum Go patch version for the given Go minor
|
# Set the minimum Go patch version for the given Go minor
|
||||||
# Usable via ${{ matrix.GO_SEMVER }}
|
# Usable via ${{ matrix.GO_SEMVER }}
|
||||||
- go: '1.17'
|
|
||||||
GO_SEMVER: '~1.17.9'
|
|
||||||
|
|
||||||
- go: '1.18'
|
- go: '1.18'
|
||||||
GO_SEMVER: '~1.18.1'
|
GO_SEMVER: '~1.18.4'
|
||||||
|
|
||||||
|
- go: '1.19'
|
||||||
|
GO_SEMVER: '~1.19.0'
|
||||||
|
|
||||||
# Set some variables per OS, usable via ${{ matrix.VAR }}
|
# Set some variables per OS, usable via ${{ matrix.VAR }}
|
||||||
# CADDY_BIN_PATH: the path to the compiled Caddy binary, for artifact publishing
|
# CADDY_BIN_PATH: the path to the compiled Caddy binary, for artifact publishing
|
||||||
|
|
6
.github/workflows/cross-build.yml
vendored
6
.github/workflows/cross-build.yml
vendored
|
@ -16,13 +16,13 @@ jobs:
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
goos: ['android', 'linux', 'solaris', 'illumos', 'dragonfly', 'freebsd', 'openbsd', 'plan9', 'windows', 'darwin', 'netbsd']
|
goos: ['android', 'linux', 'solaris', 'illumos', 'dragonfly', 'freebsd', 'openbsd', 'plan9', 'windows', 'darwin', 'netbsd']
|
||||||
go: [ '1.18' ]
|
go: [ '1.19' ]
|
||||||
|
|
||||||
include:
|
include:
|
||||||
# Set the minimum Go patch version for the given Go minor
|
# Set the minimum Go patch version for the given Go minor
|
||||||
# Usable via ${{ matrix.GO_SEMVER }}
|
# Usable via ${{ matrix.GO_SEMVER }}
|
||||||
- go: '1.18'
|
- go: '1.19'
|
||||||
GO_SEMVER: '~1.18.1'
|
GO_SEMVER: '~1.19.0'
|
||||||
|
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
continue-on-error: true
|
continue-on-error: true
|
||||||
|
|
11
.github/workflows/lint.yml
vendored
11
.github/workflows/lint.yml
vendored
|
@ -14,17 +14,22 @@ jobs:
|
||||||
# From https://github.com/golangci/golangci-lint-action
|
# From https://github.com/golangci/golangci-lint-action
|
||||||
golangci:
|
golangci:
|
||||||
name: lint
|
name: lint
|
||||||
runs-on: ubuntu-latest
|
strategy:
|
||||||
|
matrix:
|
||||||
|
os: [ubuntu-latest, macos-latest, windows-latest]
|
||||||
|
runs-on: ${{ matrix.os }}
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v3
|
||||||
- uses: actions/setup-go@v3
|
- uses: actions/setup-go@v3
|
||||||
with:
|
with:
|
||||||
go-version: '~1.17.9'
|
go-version: '~1.18.4'
|
||||||
check-latest: true
|
check-latest: true
|
||||||
|
|
||||||
- name: golangci-lint
|
- name: golangci-lint
|
||||||
uses: golangci/golangci-lint-action@v3
|
uses: golangci/golangci-lint-action@v3
|
||||||
with:
|
with:
|
||||||
version: v1.44
|
version: v1.47
|
||||||
|
# Windows times out frequently after about 5m50s if we don't set a longer timeout.
|
||||||
|
args: --timeout 10m
|
||||||
# Optional: show only new issues if it's a pull request. The default value is `false`.
|
# Optional: show only new issues if it's a pull request. The default value is `false`.
|
||||||
# only-new-issues: true
|
# only-new-issues: true
|
||||||
|
|
23
.github/workflows/release.yml
vendored
23
.github/workflows/release.yml
vendored
|
@ -11,15 +11,22 @@ jobs:
|
||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
os: [ ubuntu-latest ]
|
os: [ ubuntu-latest ]
|
||||||
go: [ '1.18' ]
|
go: [ '1.19' ]
|
||||||
|
|
||||||
include:
|
include:
|
||||||
# Set the minimum Go patch version for the given Go minor
|
# Set the minimum Go patch version for the given Go minor
|
||||||
# Usable via ${{ matrix.GO_SEMVER }}
|
# Usable via ${{ matrix.GO_SEMVER }}
|
||||||
- go: '1.18'
|
- go: '1.19'
|
||||||
GO_SEMVER: '~1.18.1'
|
GO_SEMVER: '~1.19.0'
|
||||||
|
|
||||||
runs-on: ${{ matrix.os }}
|
runs-on: ${{ matrix.os }}
|
||||||
|
# https://github.com/sigstore/cosign/issues/1258#issuecomment-1002251233
|
||||||
|
# https://docs.github.com/en/actions/deployment/security-hardening-your-deployments/about-security-hardening-with-openid-connect#adding-permissions-settings
|
||||||
|
permissions:
|
||||||
|
id-token: write
|
||||||
|
# https://docs.github.com/en/rest/overview/permissions-required-for-github-apps#permission-on-contents
|
||||||
|
# "Releases" is part of `contents`, so it needs the `write`
|
||||||
|
contents: write
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Install Go
|
- name: Install Go
|
||||||
|
@ -99,7 +106,14 @@ jobs:
|
||||||
key: ${{ runner.os }}-go${{ matrix.go }}-release-${{ hashFiles('**/go.sum') }}
|
key: ${{ runner.os }}-go${{ matrix.go }}-release-${{ hashFiles('**/go.sum') }}
|
||||||
restore-keys: |
|
restore-keys: |
|
||||||
${{ runner.os }}-go${{ matrix.go }}-release
|
${{ runner.os }}-go${{ matrix.go }}-release
|
||||||
|
- name: Install Cosign
|
||||||
|
uses: sigstore/cosign-installer@main
|
||||||
|
- name: Cosign version
|
||||||
|
run: cosign version
|
||||||
|
- name: Install Syft
|
||||||
|
uses: anchore/sbom-action/download-syft@main
|
||||||
|
- name: Syft version
|
||||||
|
run: syft version
|
||||||
# GoReleaser will take care of publishing those artifacts into the release
|
# GoReleaser will take care of publishing those artifacts into the release
|
||||||
- name: Run GoReleaser
|
- name: Run GoReleaser
|
||||||
uses: goreleaser/goreleaser-action@v2
|
uses: goreleaser/goreleaser-action@v2
|
||||||
|
@ -109,6 +123,7 @@ jobs:
|
||||||
env:
|
env:
|
||||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
TAG: ${{ steps.vars.outputs.version_tag }}
|
TAG: ${{ steps.vars.outputs.version_tag }}
|
||||||
|
COSIGN_EXPERIMENTAL: 1
|
||||||
|
|
||||||
# Only publish on non-special tags (e.g. non-beta)
|
# Only publish on non-special tags (e.g. non-beta)
|
||||||
# We will continue to push to Gemfury for the foreseeable future, although
|
# We will continue to push to Gemfury for the foreseeable future, although
|
||||||
|
|
|
@ -14,7 +14,11 @@ before:
|
||||||
# run `go mod tidy`. The `/bin/sh -c '...'` is because goreleaser can't find cd in PATH without shell invocation.
|
# run `go mod tidy`. The `/bin/sh -c '...'` is because goreleaser can't find cd in PATH without shell invocation.
|
||||||
- /bin/sh -c 'cd ./caddy-build && go mod tidy'
|
- /bin/sh -c 'cd ./caddy-build && go mod tidy'
|
||||||
- git clone --depth 1 https://github.com/caddyserver/dist caddy-dist
|
- git clone --depth 1 https://github.com/caddyserver/dist caddy-dist
|
||||||
|
- mkdir -p caddy-dist/man
|
||||||
- go mod download
|
- go mod download
|
||||||
|
- go run cmd/caddy/main.go manpage --directory ./caddy-dist/man
|
||||||
|
- gzip -r ./caddy-dist/man/
|
||||||
|
- /bin/sh -c 'go run cmd/caddy/main.go completion bash > ./caddy-dist/scripts/bash-completion'
|
||||||
|
|
||||||
builds:
|
builds:
|
||||||
- env:
|
- env:
|
||||||
|
@ -58,9 +62,22 @@ builds:
|
||||||
goarm: "5"
|
goarm: "5"
|
||||||
flags:
|
flags:
|
||||||
- -trimpath
|
- -trimpath
|
||||||
|
- -mod=readonly
|
||||||
ldflags:
|
ldflags:
|
||||||
- -s -w
|
- -s -w
|
||||||
|
signs:
|
||||||
|
- cmd: cosign
|
||||||
|
signature: "${artifact}.sig"
|
||||||
|
certificate: '{{ trimsuffix .Env.artifact ".tar.gz" }}.pem'
|
||||||
|
args: ["sign-blob", "--output-signature=${signature}", "--output-certificate", "${certificate}", "${artifact}"]
|
||||||
|
artifacts: all
|
||||||
|
sboms:
|
||||||
|
- artifacts: binary
|
||||||
|
# defaults to
|
||||||
|
# documents:
|
||||||
|
# - "{{ .Binary }}_{{ .Version }}_{{ .Os }}_{{ .Arch }}.sbom"
|
||||||
|
cmd: syft
|
||||||
|
args: ["$artifact", "--file", "${document}", "--output", "cyclonedx-json"]
|
||||||
archives:
|
archives:
|
||||||
- format_overrides:
|
- format_overrides:
|
||||||
- goos: windows
|
- goos: windows
|
||||||
|
@ -96,13 +113,16 @@ nfpms:
|
||||||
- src: ./caddy-dist/welcome/index.html
|
- src: ./caddy-dist/welcome/index.html
|
||||||
dst: /usr/share/caddy/index.html
|
dst: /usr/share/caddy/index.html
|
||||||
|
|
||||||
- src: ./caddy-dist/scripts/completions/bash-completion
|
- src: ./caddy-dist/scripts/bash-completion
|
||||||
dst: /etc/bash_completion.d/caddy
|
dst: /etc/bash_completion.d/caddy
|
||||||
|
|
||||||
- src: ./caddy-dist/config/Caddyfile
|
- src: ./caddy-dist/config/Caddyfile
|
||||||
dst: /etc/caddy/Caddyfile
|
dst: /etc/caddy/Caddyfile
|
||||||
type: config
|
type: config
|
||||||
|
|
||||||
|
- src: ./caddy-dist/man/*
|
||||||
|
dst: /usr/share/man/man8/
|
||||||
|
|
||||||
scripts:
|
scripts:
|
||||||
postinstall: ./caddy-dist/scripts/postinstall.sh
|
postinstall: ./caddy-dist/scripts/postinstall.sh
|
||||||
preremove: ./caddy-dist/scripts/preremove.sh
|
preremove: ./caddy-dist/scripts/preremove.sh
|
||||||
|
|
16
README.md
16
README.md
|
@ -57,25 +57,25 @@
|
||||||
- Multi-issuer fallback
|
- Multi-issuer fallback
|
||||||
- **Stays up when other servers go down** due to TLS/OCSP/certificate-related issues
|
- **Stays up when other servers go down** due to TLS/OCSP/certificate-related issues
|
||||||
- **Production-ready** after serving trillions of requests and managing millions of TLS certificates
|
- **Production-ready** after serving trillions of requests and managing millions of TLS certificates
|
||||||
- **Scales to tens of thousands of sites** ... and probably more
|
- **Scales to hundreds of thousands of sites** as proven in production
|
||||||
- **HTTP/1.1, HTTP/2, and experimental HTTP/3** support
|
- **HTTP/1.1, HTTP/2, and HTTP/3** supported all by default
|
||||||
- **Highly extensible** [modular architecture](https://caddyserver.com/docs/architecture) lets Caddy do anything without bloat
|
- **Highly extensible** [modular architecture](https://caddyserver.com/docs/architecture) lets Caddy do anything without bloat
|
||||||
- **Runs anywhere** with **no external dependencies** (not even libc)
|
- **Runs anywhere** with **no external dependencies** (not even libc)
|
||||||
- Written in Go, a language with higher **memory safety guarantees** than other servers
|
- Written in Go, a language with higher **memory safety guarantees** than other servers
|
||||||
- Actually **fun to use**
|
- Actually **fun to use**
|
||||||
- So, so much more to [discover](https://caddyserver.com/v2)
|
- So much more to [discover](https://caddyserver.com/v2)
|
||||||
|
|
||||||
## Install
|
## Install
|
||||||
|
|
||||||
The simplest, cross-platform way is to download from [GitHub Releases](https://github.com/caddyserver/caddy/releases) and place the executable file in your PATH.
|
The simplest, cross-platform way to get started is to download Caddy from [GitHub Releases](https://github.com/caddyserver/caddy/releases) and place the executable file in your PATH.
|
||||||
|
|
||||||
For other install options, see https://caddyserver.com/docs/install.
|
See [our online documentation](https://caddyserver.com/docs/install) for other install instructions.
|
||||||
|
|
||||||
## Build from source
|
## Build from source
|
||||||
|
|
||||||
Requirements:
|
Requirements:
|
||||||
|
|
||||||
- [Go 1.17 or newer](https://golang.org/dl/)
|
- [Go 1.18 or newer](https://golang.org/dl/)
|
||||||
|
|
||||||
### For development
|
### For development
|
||||||
|
|
||||||
|
@ -164,9 +164,9 @@ The docs are also open source. You can contribute to them here: https://github.c
|
||||||
|
|
||||||
## Getting help
|
## Getting help
|
||||||
|
|
||||||
- We **strongly recommend** that all professionals or companies using Caddy get a support contract through [Ardan Labs](https://www.ardanlabs.com/my/contact-us?dd=caddy) before help is needed.
|
- We advise companies using Caddy to secure a support contract through [Ardan Labs](https://www.ardanlabs.com/my/contact-us?dd=caddy) before help is needed.
|
||||||
|
|
||||||
- A [sponsorship](https://github.com/sponsors/mholt) goes a long way! If Caddy is benefitting your company, please consider a sponsorship! This not only helps fund full-time work to ensure the longevity of the project, it's also a great look for your company to your customers and potential customers!
|
- A [sponsorship](https://github.com/sponsors/mholt) goes a long way! We can offer private help to sponsors. If Caddy is benefitting your company, please consider a sponsorship. This not only helps fund full-time work to ensure the longevity of the project, it provides your company the resources, support, and discounts you need; along with being a great look for your company to your customers and potential customers!
|
||||||
|
|
||||||
- Individuals can exchange help for free on our community forum at https://caddy.community. Remember that people give help out of their spare time and good will. The best way to get help is to give it first!
|
- Individuals can exchange help for free on our community forum at https://caddy.community. Remember that people give help out of their spare time and good will. The best way to get help is to give it first!
|
||||||
|
|
||||||
|
|
50
admin.go
50
admin.go
|
@ -21,7 +21,6 @@ import (
|
||||||
"crypto/tls"
|
"crypto/tls"
|
||||||
"crypto/x509"
|
"crypto/x509"
|
||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
"encoding/hex"
|
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"errors"
|
"errors"
|
||||||
"expvar"
|
"expvar"
|
||||||
|
@ -41,7 +40,6 @@ import (
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/caddyserver/caddy/v2/notify"
|
|
||||||
"github.com/caddyserver/certmagic"
|
"github.com/caddyserver/certmagic"
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
|
@ -341,17 +339,19 @@ func (admin AdminConfig) allowedOrigins(addr NetworkAddress) []*url.URL {
|
||||||
// that there is always an admin server (unless it is explicitly
|
// that there is always an admin server (unless it is explicitly
|
||||||
// configured to be disabled).
|
// configured to be disabled).
|
||||||
func replaceLocalAdminServer(cfg *Config) error {
|
func replaceLocalAdminServer(cfg *Config) error {
|
||||||
// always be sure to close down the old admin endpoint
|
// always* be sure to close down the old admin endpoint
|
||||||
// as gracefully as possible, even if the new one is
|
// as gracefully as possible, even if the new one is
|
||||||
// disabled -- careful to use reference to the current
|
// disabled -- careful to use reference to the current
|
||||||
// (old) admin endpoint since it will be different
|
// (old) admin endpoint since it will be different
|
||||||
// when the function returns
|
// when the function returns
|
||||||
|
// (* except if the new one fails to start)
|
||||||
oldAdminServer := localAdminServer
|
oldAdminServer := localAdminServer
|
||||||
|
var err error
|
||||||
defer func() {
|
defer func() {
|
||||||
// do the shutdown asynchronously so that any
|
// do the shutdown asynchronously so that any
|
||||||
// current API request gets a response; this
|
// current API request gets a response; this
|
||||||
// goroutine may last a few seconds
|
// goroutine may last a few seconds
|
||||||
if oldAdminServer != nil {
|
if oldAdminServer != nil && err == nil {
|
||||||
go func(oldAdminServer *http.Server) {
|
go func(oldAdminServer *http.Server) {
|
||||||
err := stopAdminServer(oldAdminServer)
|
err := stopAdminServer(oldAdminServer)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -442,7 +442,7 @@ func manageIdentity(ctx Context, cfg *Config) error {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("loading identity issuer modules: %s", err)
|
return fmt.Errorf("loading identity issuer modules: %s", err)
|
||||||
}
|
}
|
||||||
for _, issVal := range val.([]interface{}) {
|
for _, issVal := range val.([]any) {
|
||||||
cfg.Admin.Identity.issuers = append(cfg.Admin.Identity.issuers, issVal.(certmagic.Issuer))
|
cfg.Admin.Identity.issuers = append(cfg.Admin.Identity.issuers, issVal.(certmagic.Issuer))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -901,6 +901,12 @@ func (h adminHandler) originAllowed(origin *url.URL) bool {
|
||||||
// produce and verify ETags.
|
// produce and verify ETags.
|
||||||
func etagHasher() hash.Hash32 { return fnv.New32a() }
|
func etagHasher() hash.Hash32 { return fnv.New32a() }
|
||||||
|
|
||||||
|
// makeEtag returns an Etag header value (including quotes) for
|
||||||
|
// the given config path and hash of contents at that path.
|
||||||
|
func makeEtag(path string, hash hash.Hash) string {
|
||||||
|
return fmt.Sprintf(`"%s %x"`, path, hash.Sum(nil))
|
||||||
|
}
|
||||||
|
|
||||||
func handleConfig(w http.ResponseWriter, r *http.Request) error {
|
func handleConfig(w http.ResponseWriter, r *http.Request) error {
|
||||||
switch r.Method {
|
switch r.Method {
|
||||||
case http.MethodGet:
|
case http.MethodGet:
|
||||||
|
@ -919,7 +925,7 @@ func handleConfig(w http.ResponseWriter, r *http.Request) error {
|
||||||
|
|
||||||
// we could consider setting up a sync.Pool for the summed
|
// we could consider setting up a sync.Pool for the summed
|
||||||
// hashes to reduce GC pressure.
|
// hashes to reduce GC pressure.
|
||||||
w.Header().Set("ETag", r.URL.Path+" "+hex.EncodeToString(hash.Sum(nil)))
|
w.Header().Set("Etag", makeEtag(r.URL.Path, hash))
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
|
||||||
|
@ -988,9 +994,9 @@ func handleConfigID(w http.ResponseWriter, r *http.Request) error {
|
||||||
id := parts[2]
|
id := parts[2]
|
||||||
|
|
||||||
// map the ID to the expanded path
|
// map the ID to the expanded path
|
||||||
currentCfgMu.RLock()
|
currentCtxMu.RLock()
|
||||||
expanded, ok := rawCfgIndex[id]
|
expanded, ok := rawCfgIndex[id]
|
||||||
defer currentCfgMu.RUnlock()
|
defer currentCtxMu.RUnlock()
|
||||||
if !ok {
|
if !ok {
|
||||||
return APIError{
|
return APIError{
|
||||||
HTTPStatus: http.StatusNotFound,
|
HTTPStatus: http.StatusNotFound,
|
||||||
|
@ -1013,10 +1019,6 @@ func handleStop(w http.ResponseWriter, r *http.Request) error {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := notify.NotifyStopping(); err != nil {
|
|
||||||
Log().Error("unable to notify stopping to service manager", zap.Error(err))
|
|
||||||
}
|
|
||||||
|
|
||||||
exitProcess(context.Background(), Log().Named("admin.api"))
|
exitProcess(context.Background(), Log().Named("admin.api"))
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -1025,11 +1027,11 @@ func handleStop(w http.ResponseWriter, r *http.Request) error {
|
||||||
// the operation at path according to method, using body and out as
|
// the operation at path according to method, using body and out as
|
||||||
// needed. This is a low-level, unsynchronized function; most callers
|
// needed. This is a low-level, unsynchronized function; most callers
|
||||||
// will want to use changeConfig or readConfig instead. This requires a
|
// will want to use changeConfig or readConfig instead. This requires a
|
||||||
// read or write lock on currentCfgMu, depending on method (GET needs
|
// read or write lock on currentCtxMu, depending on method (GET needs
|
||||||
// only a read lock; all others need a write lock).
|
// only a read lock; all others need a write lock).
|
||||||
func unsyncedConfigAccess(method, path string, body []byte, out io.Writer) error {
|
func unsyncedConfigAccess(method, path string, body []byte, out io.Writer) error {
|
||||||
var err error
|
var err error
|
||||||
var val interface{}
|
var val any
|
||||||
|
|
||||||
// if there is a request body, decode it into the
|
// if there is a request body, decode it into the
|
||||||
// variable that will be set in the config according
|
// variable that will be set in the config according
|
||||||
|
@ -1066,16 +1068,16 @@ func unsyncedConfigAccess(method, path string, body []byte, out io.Writer) error
|
||||||
parts = parts[:len(parts)-1]
|
parts = parts[:len(parts)-1]
|
||||||
}
|
}
|
||||||
|
|
||||||
var ptr interface{} = rawCfg
|
var ptr any = rawCfg
|
||||||
|
|
||||||
traverseLoop:
|
traverseLoop:
|
||||||
for i, part := range parts {
|
for i, part := range parts {
|
||||||
switch v := ptr.(type) {
|
switch v := ptr.(type) {
|
||||||
case map[string]interface{}:
|
case map[string]any:
|
||||||
// if the next part enters a slice, and the slice is our destination,
|
// if the next part enters a slice, and the slice is our destination,
|
||||||
// handle it specially (because appending to the slice copies the slice
|
// handle it specially (because appending to the slice copies the slice
|
||||||
// header, which does not replace the original one like we want)
|
// header, which does not replace the original one like we want)
|
||||||
if arr, ok := v[part].([]interface{}); ok && i == len(parts)-2 {
|
if arr, ok := v[part].([]any); ok && i == len(parts)-2 {
|
||||||
var idx int
|
var idx int
|
||||||
if method != http.MethodPost {
|
if method != http.MethodPost {
|
||||||
idxStr := parts[len(parts)-1]
|
idxStr := parts[len(parts)-1]
|
||||||
|
@ -1097,7 +1099,7 @@ traverseLoop:
|
||||||
}
|
}
|
||||||
case http.MethodPost:
|
case http.MethodPost:
|
||||||
if ellipses {
|
if ellipses {
|
||||||
valArray, ok := val.([]interface{})
|
valArray, ok := val.([]any)
|
||||||
if !ok {
|
if !ok {
|
||||||
return fmt.Errorf("final element is not an array")
|
return fmt.Errorf("final element is not an array")
|
||||||
}
|
}
|
||||||
|
@ -1132,9 +1134,9 @@ traverseLoop:
|
||||||
case http.MethodPost:
|
case http.MethodPost:
|
||||||
// if the part is an existing list, POST appends to
|
// if the part is an existing list, POST appends to
|
||||||
// it, otherwise it just sets or creates the value
|
// it, otherwise it just sets or creates the value
|
||||||
if arr, ok := v[part].([]interface{}); ok {
|
if arr, ok := v[part].([]any); ok {
|
||||||
if ellipses {
|
if ellipses {
|
||||||
valArray, ok := val.([]interface{})
|
valArray, ok := val.([]any)
|
||||||
if !ok {
|
if !ok {
|
||||||
return fmt.Errorf("final element is not an array")
|
return fmt.Errorf("final element is not an array")
|
||||||
}
|
}
|
||||||
|
@ -1165,12 +1167,12 @@ traverseLoop:
|
||||||
// might not exist yet; that's OK but we need to make them as
|
// might not exist yet; that's OK but we need to make them as
|
||||||
// we go, while we still have a pointer from the level above
|
// we go, while we still have a pointer from the level above
|
||||||
if v[part] == nil && method == http.MethodPut {
|
if v[part] == nil && method == http.MethodPut {
|
||||||
v[part] = make(map[string]interface{})
|
v[part] = make(map[string]any)
|
||||||
}
|
}
|
||||||
ptr = v[part]
|
ptr = v[part]
|
||||||
}
|
}
|
||||||
|
|
||||||
case []interface{}:
|
case []any:
|
||||||
partInt, err := strconv.Atoi(part)
|
partInt, err := strconv.Atoi(part)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("[/%s] invalid array index '%s': %v",
|
return fmt.Errorf("[/%s] invalid array index '%s': %v",
|
||||||
|
@ -1192,7 +1194,7 @@ traverseLoop:
|
||||||
|
|
||||||
// RemoveMetaFields removes meta fields like "@id" from a JSON message
|
// RemoveMetaFields removes meta fields like "@id" from a JSON message
|
||||||
// by using a simple regular expression. (An alternate way to do this
|
// by using a simple regular expression. (An alternate way to do this
|
||||||
// would be to delete them from the raw, map[string]interface{}
|
// would be to delete them from the raw, map[string]any
|
||||||
// representation as they are indexed, then iterate the index we made
|
// representation as they are indexed, then iterate the index we made
|
||||||
// and add them back after encoding as JSON, but this is simpler.)
|
// and add them back after encoding as JSON, but this is simpler.)
|
||||||
func RemoveMetaFields(rawJSON []byte) []byte {
|
func RemoveMetaFields(rawJSON []byte) []byte {
|
||||||
|
@ -1324,7 +1326,7 @@ const (
|
||||||
)
|
)
|
||||||
|
|
||||||
var bufPool = sync.Pool{
|
var bufPool = sync.Pool{
|
||||||
New: func() interface{} {
|
New: func() any {
|
||||||
return new(bytes.Buffer)
|
return new(bytes.Buffer)
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
|
@ -15,8 +15,8 @@
|
||||||
package caddy
|
package caddy
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/hex"
|
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
"net/http"
|
"net/http"
|
||||||
"reflect"
|
"reflect"
|
||||||
"sync"
|
"sync"
|
||||||
|
@ -115,7 +115,7 @@ func TestUnsyncedConfigAccess(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// decode the expected config so we can do a convenient DeepEqual
|
// decode the expected config so we can do a convenient DeepEqual
|
||||||
var expectedDecoded interface{}
|
var expectedDecoded any
|
||||||
err = json.Unmarshal([]byte(tc.expect), &expectedDecoded)
|
err = json.Unmarshal([]byte(tc.expect), &expectedDecoded)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Test %d: Unmarshaling expected config: %v", i, err)
|
t.Fatalf("Test %d: Unmarshaling expected config: %v", i, err)
|
||||||
|
@ -168,7 +168,7 @@ func TestETags(t *testing.T) {
|
||||||
const key = "/" + rawConfigKey + "/apps/foo"
|
const key = "/" + rawConfigKey + "/apps/foo"
|
||||||
|
|
||||||
// try update the config with the wrong etag
|
// try update the config with the wrong etag
|
||||||
err := changeConfig(http.MethodPost, key, []byte(`{"strField": "abc", "intField": 1}}`), "/"+rawConfigKey+" not_an_etag", false)
|
err := changeConfig(http.MethodPost, key, []byte(`{"strField": "abc", "intField": 1}}`), fmt.Sprintf(`"/%s not_an_etag"`, rawConfigKey), false)
|
||||||
if apiErr, ok := err.(APIError); !ok || apiErr.HTTPStatus != http.StatusPreconditionFailed {
|
if apiErr, ok := err.(APIError); !ok || apiErr.HTTPStatus != http.StatusPreconditionFailed {
|
||||||
t.Fatalf("expected precondition failed; got %v", err)
|
t.Fatalf("expected precondition failed; got %v", err)
|
||||||
}
|
}
|
||||||
|
@ -180,13 +180,13 @@ func TestETags(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// do the same update with the correct key
|
// do the same update with the correct key
|
||||||
err = changeConfig(http.MethodPost, key, []byte(`{"strField": "abc", "intField": 1}`), key+" "+hex.EncodeToString(hash.Sum(nil)), false)
|
err = changeConfig(http.MethodPost, key, []byte(`{"strField": "abc", "intField": 1}`), makeEtag(key, hash), false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("expected update to work; got %v", err)
|
t.Fatalf("expected update to work; got %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// now try another update. The hash should no longer match and we should get precondition failed
|
// now try another update. The hash should no longer match and we should get precondition failed
|
||||||
err = changeConfig(http.MethodPost, key, []byte(`{"strField": "abc", "intField": 2}`), key+" "+hex.EncodeToString(hash.Sum(nil)), false)
|
err = changeConfig(http.MethodPost, key, []byte(`{"strField": "abc", "intField": 2}`), makeEtag(key, hash), false)
|
||||||
if apiErr, ok := err.(APIError); !ok || apiErr.HTTPStatus != http.StatusPreconditionFailed {
|
if apiErr, ok := err.(APIError); !ok || apiErr.HTTPStatus != http.StatusPreconditionFailed {
|
||||||
t.Fatalf("expected precondition failed; got %v", err)
|
t.Fatalf("expected precondition failed; got %v", err)
|
||||||
}
|
}
|
||||||
|
|
240
caddy.go
240
caddy.go
|
@ -102,20 +102,32 @@ func Run(cfg *Config) error {
|
||||||
// if it is different from the current config or
|
// if it is different from the current config or
|
||||||
// forceReload is true.
|
// forceReload is true.
|
||||||
func Load(cfgJSON []byte, forceReload bool) error {
|
func Load(cfgJSON []byte, forceReload bool) error {
|
||||||
if err := notify.NotifyReloading(); err != nil {
|
if err := notify.Reloading(); err != nil {
|
||||||
Log().Error("unable to notify reloading to service manager", zap.Error(err))
|
Log().Error("unable to notify service manager of reloading state", zap.Error(err))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// after reload, notify system of success or, if
|
||||||
|
// failure, update with status (error message)
|
||||||
|
var err error
|
||||||
defer func() {
|
defer func() {
|
||||||
if err := notify.NotifyReadiness(); err != nil {
|
if err != nil {
|
||||||
Log().Error("unable to notify readiness to service manager", zap.Error(err))
|
if notifyErr := notify.Error(err, 0); notifyErr != nil {
|
||||||
|
Log().Error("unable to notify to service manager of reload error",
|
||||||
|
zap.Error(notifyErr),
|
||||||
|
zap.String("reload_err", err.Error()))
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if err := notify.Ready(); err != nil {
|
||||||
|
Log().Error("unable to notify to service manager of ready state", zap.Error(err))
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
err := changeConfig(http.MethodPost, "/"+rawConfigKey, cfgJSON, "", forceReload)
|
err = changeConfig(http.MethodPost, "/"+rawConfigKey, cfgJSON, "", forceReload)
|
||||||
if errors.Is(err, errSameConfig) {
|
if errors.Is(err, errSameConfig) {
|
||||||
err = nil // not really an error
|
err = nil // not really an error
|
||||||
}
|
}
|
||||||
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -127,7 +139,9 @@ func Load(cfgJSON []byte, forceReload bool) error {
|
||||||
// forcefully reloaded, then errConfigUnchanged This function is safe for
|
// forcefully reloaded, then errConfigUnchanged This function is safe for
|
||||||
// concurrent use.
|
// concurrent use.
|
||||||
// The ifMatchHeader can optionally be given a string of the format:
|
// The ifMatchHeader can optionally be given a string of the format:
|
||||||
// "<path> <hash>"
|
//
|
||||||
|
// "<path> <hash>"
|
||||||
|
//
|
||||||
// where <path> is the absolute path in the config and <hash> is the expected hash of
|
// where <path> is the absolute path in the config and <hash> is the expected hash of
|
||||||
// the config at that path. If the hash in the ifMatchHeader doesn't match
|
// the config at that path. If the hash in the ifMatchHeader doesn't match
|
||||||
// the hash of the config, then an APIError with status 412 will be returned.
|
// the hash of the config, then an APIError with status 412 will be returned.
|
||||||
|
@ -141,12 +155,20 @@ func changeConfig(method, path string, input []byte, ifMatchHeader string, force
|
||||||
return fmt.Errorf("method not allowed")
|
return fmt.Errorf("method not allowed")
|
||||||
}
|
}
|
||||||
|
|
||||||
currentCfgMu.Lock()
|
currentCtxMu.Lock()
|
||||||
defer currentCfgMu.Unlock()
|
defer currentCtxMu.Unlock()
|
||||||
|
|
||||||
if ifMatchHeader != "" {
|
if ifMatchHeader != "" {
|
||||||
|
// expect the first and last character to be quotes
|
||||||
|
if len(ifMatchHeader) < 2 || ifMatchHeader[0] != '"' || ifMatchHeader[len(ifMatchHeader)-1] != '"' {
|
||||||
|
return APIError{
|
||||||
|
HTTPStatus: http.StatusBadRequest,
|
||||||
|
Err: fmt.Errorf("malformed If-Match header; expect quoted string"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// read out the parts
|
// read out the parts
|
||||||
parts := strings.Fields(ifMatchHeader)
|
parts := strings.Fields(ifMatchHeader[1 : len(ifMatchHeader)-1])
|
||||||
if len(parts) != 2 {
|
if len(parts) != 2 {
|
||||||
return APIError{
|
return APIError{
|
||||||
HTTPStatus: http.StatusBadRequest,
|
HTTPStatus: http.StatusBadRequest,
|
||||||
|
@ -209,7 +231,7 @@ func changeConfig(method, path string, input []byte, ifMatchHeader string, force
|
||||||
// with what caddy is still running; we need to
|
// with what caddy is still running; we need to
|
||||||
// unmarshal it again because it's likely that
|
// unmarshal it again because it's likely that
|
||||||
// pointers deep in our rawCfg map were modified
|
// pointers deep in our rawCfg map were modified
|
||||||
var oldCfg interface{}
|
var oldCfg any
|
||||||
err2 := json.Unmarshal(rawCfgJSON, &oldCfg)
|
err2 := json.Unmarshal(rawCfgJSON, &oldCfg)
|
||||||
if err2 != nil {
|
if err2 != nil {
|
||||||
err = fmt.Errorf("%v; additionally, restoring old config: %v", err, err2)
|
err = fmt.Errorf("%v; additionally, restoring old config: %v", err, err2)
|
||||||
|
@ -234,18 +256,18 @@ func changeConfig(method, path string, input []byte, ifMatchHeader string, force
|
||||||
// readConfig traverses the current config to path
|
// readConfig traverses the current config to path
|
||||||
// and writes its JSON encoding to out.
|
// and writes its JSON encoding to out.
|
||||||
func readConfig(path string, out io.Writer) error {
|
func readConfig(path string, out io.Writer) error {
|
||||||
currentCfgMu.RLock()
|
currentCtxMu.RLock()
|
||||||
defer currentCfgMu.RUnlock()
|
defer currentCtxMu.RUnlock()
|
||||||
return unsyncedConfigAccess(http.MethodGet, path, nil, out)
|
return unsyncedConfigAccess(http.MethodGet, path, nil, out)
|
||||||
}
|
}
|
||||||
|
|
||||||
// indexConfigObjects recursively searches ptr for object fields named
|
// indexConfigObjects recursively searches ptr for object fields named
|
||||||
// "@id" and maps that ID value to the full configPath in the index.
|
// "@id" and maps that ID value to the full configPath in the index.
|
||||||
// This function is NOT safe for concurrent access; obtain a write lock
|
// This function is NOT safe for concurrent access; obtain a write lock
|
||||||
// on currentCfgMu.
|
// on currentCtxMu.
|
||||||
func indexConfigObjects(ptr interface{}, configPath string, index map[string]string) error {
|
func indexConfigObjects(ptr any, configPath string, index map[string]string) error {
|
||||||
switch val := ptr.(type) {
|
switch val := ptr.(type) {
|
||||||
case map[string]interface{}:
|
case map[string]any:
|
||||||
for k, v := range val {
|
for k, v := range val {
|
||||||
if k == idKey {
|
if k == idKey {
|
||||||
switch idVal := v.(type) {
|
switch idVal := v.(type) {
|
||||||
|
@ -264,7 +286,7 @@ func indexConfigObjects(ptr interface{}, configPath string, index map[string]str
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
case []interface{}:
|
case []any:
|
||||||
// traverse each element of the array recursively
|
// traverse each element of the array recursively
|
||||||
for i := range val {
|
for i := range val {
|
||||||
err := indexConfigObjects(val[i], path.Join(configPath, strconv.Itoa(i)), index)
|
err := indexConfigObjects(val[i], path.Join(configPath, strconv.Itoa(i)), index)
|
||||||
|
@ -282,7 +304,7 @@ func indexConfigObjects(ptr interface{}, configPath string, index map[string]str
|
||||||
// it as the new config, replacing any other current config.
|
// it as the new config, replacing any other current config.
|
||||||
// It does NOT update the raw config state, as this is a
|
// It does NOT update the raw config state, as this is a
|
||||||
// lower-level function; most callers will want to use Load
|
// lower-level function; most callers will want to use Load
|
||||||
// instead. A write lock on currentCfgMu is required! If
|
// instead. A write lock on currentCtxMu is required! If
|
||||||
// allowPersist is false, it will not be persisted to disk,
|
// allowPersist is false, it will not be persisted to disk,
|
||||||
// even if it is configured to.
|
// even if it is configured to.
|
||||||
func unsyncedDecodeAndRun(cfgJSON []byte, allowPersist bool) error {
|
func unsyncedDecodeAndRun(cfgJSON []byte, allowPersist bool) error {
|
||||||
|
@ -311,17 +333,17 @@ func unsyncedDecodeAndRun(cfgJSON []byte, allowPersist bool) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
// run the new config and start all its apps
|
// run the new config and start all its apps
|
||||||
err = run(newCfg, true)
|
ctx, err := run(newCfg, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// swap old config with the new one
|
// swap old context (including its config) with the new one
|
||||||
oldCfg := currentCfg
|
oldCtx := currentCtx
|
||||||
currentCfg = newCfg
|
currentCtx = ctx
|
||||||
|
|
||||||
// Stop, Cleanup each old app
|
// Stop, Cleanup each old app
|
||||||
unsyncedStop(oldCfg)
|
unsyncedStop(oldCtx)
|
||||||
|
|
||||||
// autosave a non-nil config, if not disabled
|
// autosave a non-nil config, if not disabled
|
||||||
if allowPersist &&
|
if allowPersist &&
|
||||||
|
@ -365,7 +387,7 @@ func unsyncedDecodeAndRun(cfgJSON []byte, allowPersist bool) error {
|
||||||
// This is a low-level function; most callers
|
// This is a low-level function; most callers
|
||||||
// will want to use Run instead, which also
|
// will want to use Run instead, which also
|
||||||
// updates the config's raw state.
|
// updates the config's raw state.
|
||||||
func run(newCfg *Config, start bool) error {
|
func run(newCfg *Config, start bool) (Context, error) {
|
||||||
// because we will need to roll back any state
|
// because we will need to roll back any state
|
||||||
// modifications if this function errors, we
|
// modifications if this function errors, we
|
||||||
// keep a single error value and scope all
|
// keep a single error value and scope all
|
||||||
|
@ -396,8 +418,8 @@ func run(newCfg *Config, start bool) error {
|
||||||
cancel()
|
cancel()
|
||||||
|
|
||||||
// also undo any other state changes we made
|
// also undo any other state changes we made
|
||||||
if currentCfg != nil {
|
if currentCtx.cfg != nil {
|
||||||
certmagic.Default.Storage = currentCfg.storage
|
certmagic.Default.Storage = currentCtx.cfg.storage
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
@ -409,14 +431,14 @@ func run(newCfg *Config, start bool) error {
|
||||||
}
|
}
|
||||||
err = newCfg.Logging.openLogs(ctx)
|
err = newCfg.Logging.openLogs(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return ctx, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// start the admin endpoint (and stop any prior one)
|
// start the admin endpoint (and stop any prior one)
|
||||||
if start {
|
if start {
|
||||||
err = replaceLocalAdminServer(newCfg)
|
err = replaceLocalAdminServer(newCfg)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("starting caddy administration endpoint: %v", err)
|
return ctx, fmt.Errorf("starting caddy administration endpoint: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -445,7 +467,7 @@ func run(newCfg *Config, start bool) error {
|
||||||
return nil
|
return nil
|
||||||
}()
|
}()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return ctx, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Load and Provision each app and their submodules
|
// Load and Provision each app and their submodules
|
||||||
|
@ -458,18 +480,18 @@ func run(newCfg *Config, start bool) error {
|
||||||
return nil
|
return nil
|
||||||
}()
|
}()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return ctx, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if !start {
|
if !start {
|
||||||
return nil
|
return ctx, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Provision any admin routers which may need to access
|
// Provision any admin routers which may need to access
|
||||||
// some of the other apps at runtime
|
// some of the other apps at runtime
|
||||||
err = newCfg.Admin.provisionAdminRouters(ctx)
|
err = newCfg.Admin.provisionAdminRouters(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return ctx, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Start
|
// Start
|
||||||
|
@ -494,12 +516,12 @@ func run(newCfg *Config, start bool) error {
|
||||||
return nil
|
return nil
|
||||||
}()
|
}()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return ctx, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// now that the user's config is running, finish setting up anything else,
|
// now that the user's config is running, finish setting up anything else,
|
||||||
// such as remote admin endpoint, config loader, etc.
|
// such as remote admin endpoint, config loader, etc.
|
||||||
return finishSettingUp(ctx, newCfg)
|
return ctx, finishSettingUp(ctx, newCfg)
|
||||||
}
|
}
|
||||||
|
|
||||||
// finishSettingUp should be run after all apps have successfully started.
|
// finishSettingUp should be run after all apps have successfully started.
|
||||||
|
@ -604,10 +626,10 @@ type ConfigLoader interface {
|
||||||
// stop the others. Stop should only be called
|
// stop the others. Stop should only be called
|
||||||
// if not replacing with a new config.
|
// if not replacing with a new config.
|
||||||
func Stop() error {
|
func Stop() error {
|
||||||
currentCfgMu.Lock()
|
currentCtxMu.Lock()
|
||||||
defer currentCfgMu.Unlock()
|
defer currentCtxMu.Unlock()
|
||||||
unsyncedStop(currentCfg)
|
unsyncedStop(currentCtx)
|
||||||
currentCfg = nil
|
currentCtx = Context{}
|
||||||
rawCfgJSON = nil
|
rawCfgJSON = nil
|
||||||
rawCfgIndex = nil
|
rawCfgIndex = nil
|
||||||
rawCfg[rawConfigKey] = nil
|
rawCfg[rawConfigKey] = nil
|
||||||
|
@ -620,13 +642,13 @@ func Stop() error {
|
||||||
// it is logged and the function continues stopping
|
// it is logged and the function continues stopping
|
||||||
// the next app. This function assumes all apps in
|
// the next app. This function assumes all apps in
|
||||||
// cfg were successfully started first.
|
// cfg were successfully started first.
|
||||||
func unsyncedStop(cfg *Config) {
|
func unsyncedStop(ctx Context) {
|
||||||
if cfg == nil {
|
if ctx.cfg == nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// stop each app
|
// stop each app
|
||||||
for name, a := range cfg.apps {
|
for name, a := range ctx.cfg.apps {
|
||||||
err := a.Stop()
|
err := a.Stop()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("[ERROR] stop %s: %v", name, err)
|
log.Printf("[ERROR] stop %s: %v", name, err)
|
||||||
|
@ -634,13 +656,13 @@ func unsyncedStop(cfg *Config) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// clean up all modules
|
// clean up all modules
|
||||||
cfg.cancelFunc()
|
ctx.cfg.cancelFunc()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Validate loads, provisions, and validates
|
// Validate loads, provisions, and validates
|
||||||
// cfg, but does not start running it.
|
// cfg, but does not start running it.
|
||||||
func Validate(cfg *Config) error {
|
func Validate(cfg *Config) error {
|
||||||
err := run(cfg, false)
|
_, err := run(cfg, false)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
cfg.cancelFunc() // call Cleanup on all modules
|
cfg.cancelFunc() // call Cleanup on all modules
|
||||||
}
|
}
|
||||||
|
@ -654,6 +676,10 @@ func Validate(cfg *Config) error {
|
||||||
// Errors are logged along the way, and an appropriate exit
|
// Errors are logged along the way, and an appropriate exit
|
||||||
// code is emitted.
|
// code is emitted.
|
||||||
func exitProcess(ctx context.Context, logger *zap.Logger) {
|
func exitProcess(ctx context.Context, logger *zap.Logger) {
|
||||||
|
if err := notify.Stopping(); err != nil {
|
||||||
|
Log().Error("unable to notify service manager of stopping state", zap.Error(err))
|
||||||
|
}
|
||||||
|
|
||||||
if logger == nil {
|
if logger == nil {
|
||||||
logger = Log()
|
logger = Log()
|
||||||
}
|
}
|
||||||
|
@ -783,36 +809,106 @@ func InstanceID() (uuid.UUID, error) {
|
||||||
return uuid.ParseBytes(uuidFileBytes)
|
return uuid.ParseBytes(uuidFileBytes)
|
||||||
}
|
}
|
||||||
|
|
||||||
// GoModule returns the build info of this Caddy
|
// Version returns the Caddy version in a simple/short form, and
|
||||||
// build from debug.BuildInfo (requires Go modules).
|
// a full version string. The short form will not have spaces and
|
||||||
// If no version information is available, a non-nil
|
// is intended for User-Agent strings and similar, but may be
|
||||||
// value will still be returned, but with an
|
// omitting valuable information. Note that Caddy must be compiled
|
||||||
// unknown version.
|
// in a special way to properly embed complete version information.
|
||||||
func GoModule() *debug.Module {
|
// First this function tries to get the version from the embedded
|
||||||
var mod debug.Module
|
// build info provided by go.mod dependencies; then it tries to
|
||||||
return goModule(&mod)
|
// get info from embedded VCS information, which requires having
|
||||||
}
|
// built Caddy from a git repository. If no version is available,
|
||||||
|
// this function returns "(devel)" becaise Go uses that, but for
|
||||||
// goModule holds the actual implementation of GoModule.
|
// the simple form we change it to "unknown".
|
||||||
// Allocating debug.Module in GoModule() and passing a
|
//
|
||||||
// reference to goModule enables mid-stack inlining.
|
// See relevant Go issues: https://github.com/golang/go/issues/29228
|
||||||
func goModule(mod *debug.Module) *debug.Module {
|
// and https://github.com/golang/go/issues/50603.
|
||||||
mod.Version = "unknown"
|
//
|
||||||
|
// This function is experimental and subject to change or removal.
|
||||||
|
func Version() (simple, full string) {
|
||||||
|
// the currently-recommended way to build Caddy involves
|
||||||
|
// building it as a dependency so we can extract version
|
||||||
|
// information from go.mod tooling; once the upstream
|
||||||
|
// Go issues are fixed, we should just be able to use
|
||||||
|
// bi.Main... hopefully.
|
||||||
|
var module *debug.Module
|
||||||
bi, ok := debug.ReadBuildInfo()
|
bi, ok := debug.ReadBuildInfo()
|
||||||
if ok {
|
if ok {
|
||||||
mod.Path = bi.Main.Path
|
// find the Caddy module in the dependency list
|
||||||
// The recommended way to build Caddy involves
|
|
||||||
// creating a separate main module, which
|
|
||||||
// TODO: track related Go issue: https://github.com/golang/go/issues/29228
|
|
||||||
// once that issue is fixed, we should just be able to use bi.Main... hopefully.
|
|
||||||
for _, dep := range bi.Deps {
|
for _, dep := range bi.Deps {
|
||||||
if dep.Path == ImportPath {
|
if dep.Path == ImportPath {
|
||||||
return dep
|
module = dep
|
||||||
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return &bi.Main
|
|
||||||
}
|
}
|
||||||
return mod
|
if module != nil {
|
||||||
|
simple, full = module.Version, module.Version
|
||||||
|
if module.Sum != "" {
|
||||||
|
full += " " + module.Sum
|
||||||
|
}
|
||||||
|
if module.Replace != nil {
|
||||||
|
full += " => " + module.Replace.Path
|
||||||
|
if module.Replace.Version != "" {
|
||||||
|
simple = module.Replace.Version + "_custom"
|
||||||
|
full += "@" + module.Replace.Version
|
||||||
|
}
|
||||||
|
if module.Replace.Sum != "" {
|
||||||
|
full += " " + module.Replace.Sum
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if full == "" {
|
||||||
|
var vcsRevision string
|
||||||
|
var vcsTime time.Time
|
||||||
|
var vcsModified bool
|
||||||
|
for _, setting := range bi.Settings {
|
||||||
|
switch setting.Key {
|
||||||
|
case "vcs.revision":
|
||||||
|
vcsRevision = setting.Value
|
||||||
|
case "vcs.time":
|
||||||
|
vcsTime, _ = time.Parse(time.RFC3339, setting.Value)
|
||||||
|
case "vcs.modified":
|
||||||
|
vcsModified, _ = strconv.ParseBool(setting.Value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if vcsRevision != "" {
|
||||||
|
var modified string
|
||||||
|
if vcsModified {
|
||||||
|
modified = "+modified"
|
||||||
|
}
|
||||||
|
full = fmt.Sprintf("%s%s (%s)", vcsRevision, modified, vcsTime.Format(time.RFC822))
|
||||||
|
simple = vcsRevision
|
||||||
|
|
||||||
|
// use short checksum for simple, if hex-only
|
||||||
|
if _, err := hex.DecodeString(simple); err == nil {
|
||||||
|
simple = simple[:8]
|
||||||
|
}
|
||||||
|
|
||||||
|
// append date to simple since it can be convenient
|
||||||
|
// to know the commit date as part of the version
|
||||||
|
if !vcsTime.IsZero() {
|
||||||
|
simple += "-" + vcsTime.Format("20060102")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if simple == "" || simple == "(devel)" {
|
||||||
|
simple = "unknown"
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// ActiveContext returns the currently-active context.
|
||||||
|
// This function is experimental and might be changed
|
||||||
|
// or removed in the future.
|
||||||
|
func ActiveContext() Context {
|
||||||
|
currentCtxMu.RLock()
|
||||||
|
defer currentCtxMu.RUnlock()
|
||||||
|
return currentCtx
|
||||||
}
|
}
|
||||||
|
|
||||||
// CtxKey is a value type for use with context.WithValue.
|
// CtxKey is a value type for use with context.WithValue.
|
||||||
|
@ -820,18 +916,21 @@ type CtxKey string
|
||||||
|
|
||||||
// This group of variables pertains to the current configuration.
|
// This group of variables pertains to the current configuration.
|
||||||
var (
|
var (
|
||||||
// currentCfgMu protects everything in this var block.
|
// currentCtxMu protects everything in this var block.
|
||||||
currentCfgMu sync.RWMutex
|
currentCtxMu sync.RWMutex
|
||||||
|
|
||||||
// currentCfg is the currently-running configuration.
|
// currentCtx is the root context for the currently-running
|
||||||
currentCfg *Config
|
// configuration, which can be accessed through this value.
|
||||||
|
// If the Config contained in this value is not nil, then
|
||||||
|
// a config is currently active/running.
|
||||||
|
currentCtx Context
|
||||||
|
|
||||||
// rawCfg is the current, generic-decoded configuration;
|
// rawCfg is the current, generic-decoded configuration;
|
||||||
// we initialize it as a map with one field ("config")
|
// we initialize it as a map with one field ("config")
|
||||||
// to maintain parity with the API endpoint and to avoid
|
// to maintain parity with the API endpoint and to avoid
|
||||||
// the special case of having to access/mutate the variable
|
// the special case of having to access/mutate the variable
|
||||||
// directly without traversing into it.
|
// directly without traversing into it.
|
||||||
rawCfg = map[string]interface{}{
|
rawCfg = map[string]any{
|
||||||
rawConfigKey: nil,
|
rawConfigKey: nil,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -850,4 +949,5 @@ var (
|
||||||
var errSameConfig = errors.New("config is unchanged")
|
var errSameConfig = errors.New("config is unchanged")
|
||||||
|
|
||||||
// ImportPath is the package import path for Caddy core.
|
// ImportPath is the package import path for Caddy core.
|
||||||
|
// This identifier may be removed in the future.
|
||||||
const ImportPath = "github.com/caddyserver/caddy/v2"
|
const ImportPath = "github.com/caddyserver/caddy/v2"
|
||||||
|
|
|
@ -29,12 +29,12 @@ type Adapter struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Adapt converts the Caddyfile config in body to Caddy JSON.
|
// Adapt converts the Caddyfile config in body to Caddy JSON.
|
||||||
func (a Adapter) Adapt(body []byte, options map[string]interface{}) ([]byte, []caddyconfig.Warning, error) {
|
func (a Adapter) Adapt(body []byte, options map[string]any) ([]byte, []caddyconfig.Warning, error) {
|
||||||
if a.ServerType == nil {
|
if a.ServerType == nil {
|
||||||
return nil, nil, fmt.Errorf("no server type")
|
return nil, nil, fmt.Errorf("no server type")
|
||||||
}
|
}
|
||||||
if options == nil {
|
if options == nil {
|
||||||
options = make(map[string]interface{})
|
options = make(map[string]any)
|
||||||
}
|
}
|
||||||
|
|
||||||
filename, _ := options["filename"].(string)
|
filename, _ := options["filename"].(string)
|
||||||
|
@ -116,7 +116,7 @@ type ServerType interface {
|
||||||
// (e.g. CLI flags) and creates a Caddy
|
// (e.g. CLI flags) and creates a Caddy
|
||||||
// config, along with any warnings or
|
// config, along with any warnings or
|
||||||
// an error.
|
// an error.
|
||||||
Setup([]ServerBlock, map[string]interface{}) (*caddy.Config, []caddyconfig.Warning, error)
|
Setup([]ServerBlock, map[string]any) (*caddy.Config, []caddyconfig.Warning, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
// UnmarshalModule instantiates a module with the given ID and invokes
|
// UnmarshalModule instantiates a module with the given ID and invokes
|
||||||
|
|
|
@ -146,15 +146,15 @@ func (d *Dispenser) NextLine() bool {
|
||||||
//
|
//
|
||||||
// Proper use of this method looks like this:
|
// Proper use of this method looks like this:
|
||||||
//
|
//
|
||||||
// for nesting := d.Nesting(); d.NextBlock(nesting); {
|
// for nesting := d.Nesting(); d.NextBlock(nesting); {
|
||||||
// }
|
// }
|
||||||
//
|
//
|
||||||
// However, in simple cases where it is known that the
|
// However, in simple cases where it is known that the
|
||||||
// Dispenser is new and has not already traversed state
|
// Dispenser is new and has not already traversed state
|
||||||
// by a loop over NextBlock(), this will do:
|
// by a loop over NextBlock(), this will do:
|
||||||
//
|
//
|
||||||
// for d.NextBlock(0) {
|
// for d.NextBlock(0) {
|
||||||
// }
|
// }
|
||||||
//
|
//
|
||||||
// As with other token parsing logic, a loop over
|
// As with other token parsing logic, a loop over
|
||||||
// NextBlock() should be contained within a loop over
|
// NextBlock() should be contained within a loop over
|
||||||
|
@ -217,7 +217,7 @@ func (d *Dispenser) ValRaw() string {
|
||||||
|
|
||||||
// ScalarVal gets value of the current token, converted to the closest
|
// ScalarVal gets value of the current token, converted to the closest
|
||||||
// scalar type. If there is no token loaded, it returns nil.
|
// scalar type. If there is no token loaded, it returns nil.
|
||||||
func (d *Dispenser) ScalarVal() interface{} {
|
func (d *Dispenser) ScalarVal() any {
|
||||||
if d.cursor < 0 || d.cursor >= len(d.tokens) {
|
if d.cursor < 0 || d.cursor >= len(d.tokens) {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -412,7 +412,7 @@ func (d *Dispenser) Err(msg string) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Errf is like Err, but for formatted error messages
|
// Errf is like Err, but for formatted error messages
|
||||||
func (d *Dispenser) Errf(format string, args ...interface{}) error {
|
func (d *Dispenser) Errf(format string, args ...any) error {
|
||||||
return d.WrapErr(fmt.Errorf(format, args...))
|
return d.WrapErr(fmt.Errorf(format, args...))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -13,7 +13,6 @@
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
//go:build gofuzz
|
//go:build gofuzz
|
||||||
// +build gofuzz
|
|
||||||
|
|
||||||
package caddyfile
|
package caddyfile
|
||||||
|
|
||||||
|
|
|
@ -191,3 +191,7 @@ func Tokenize(input []byte, filename string) ([]Token, error) {
|
||||||
}
|
}
|
||||||
return tokens, nil
|
return tokens, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (t Token) Quoted() bool {
|
||||||
|
return t.wasQuoted > 0
|
||||||
|
}
|
||||||
|
|
|
@ -13,7 +13,6 @@
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
//go:build gofuzz
|
//go:build gofuzz
|
||||||
// +build gofuzz
|
|
||||||
|
|
||||||
package caddyfile
|
package caddyfile
|
||||||
|
|
||||||
|
|
|
@ -24,7 +24,7 @@ import (
|
||||||
// Adapter is a type which can adapt a configuration to Caddy JSON.
|
// Adapter is a type which can adapt a configuration to Caddy JSON.
|
||||||
// It returns the results and any warnings, or an error.
|
// It returns the results and any warnings, or an error.
|
||||||
type Adapter interface {
|
type Adapter interface {
|
||||||
Adapt(body []byte, options map[string]interface{}) ([]byte, []Warning, error)
|
Adapt(body []byte, options map[string]any) ([]byte, []Warning, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Warning represents a warning or notice related to conversion.
|
// Warning represents a warning or notice related to conversion.
|
||||||
|
@ -48,7 +48,7 @@ func (w Warning) String() string {
|
||||||
// are converted to warnings. This is convenient when filling config
|
// are converted to warnings. This is convenient when filling config
|
||||||
// structs that require a json.RawMessage, without having to worry
|
// structs that require a json.RawMessage, without having to worry
|
||||||
// about errors.
|
// about errors.
|
||||||
func JSON(val interface{}, warnings *[]Warning) json.RawMessage {
|
func JSON(val any, warnings *[]Warning) json.RawMessage {
|
||||||
b, err := json.Marshal(val)
|
b, err := json.Marshal(val)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if warnings != nil {
|
if warnings != nil {
|
||||||
|
@ -64,9 +64,9 @@ func JSON(val interface{}, warnings *[]Warning) json.RawMessage {
|
||||||
// for encoding module values where the module name has to be described within
|
// for encoding module values where the module name has to be described within
|
||||||
// the object by a certain key; for example, `"handler": "file_server"` for a
|
// the object by a certain key; for example, `"handler": "file_server"` for a
|
||||||
// file server HTTP handler (fieldName="handler" and fieldVal="file_server").
|
// file server HTTP handler (fieldName="handler" and fieldVal="file_server").
|
||||||
// The val parameter must encode into a map[string]interface{} (i.e. it must be
|
// The val parameter must encode into a map[string]any (i.e. it must be
|
||||||
// a struct or map). Any errors are converted into warnings.
|
// a struct or map). Any errors are converted into warnings.
|
||||||
func JSONModuleObject(val interface{}, fieldName, fieldVal string, warnings *[]Warning) json.RawMessage {
|
func JSONModuleObject(val any, fieldName, fieldVal string, warnings *[]Warning) json.RawMessage {
|
||||||
// encode to a JSON object first
|
// encode to a JSON object first
|
||||||
enc, err := json.Marshal(val)
|
enc, err := json.Marshal(val)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -77,7 +77,7 @@ func JSONModuleObject(val interface{}, fieldName, fieldVal string, warnings *[]W
|
||||||
}
|
}
|
||||||
|
|
||||||
// then decode the object
|
// then decode the object
|
||||||
var tmp map[string]interface{}
|
var tmp map[string]any
|
||||||
err = json.Unmarshal(enc, &tmp)
|
err = json.Unmarshal(enc, &tmp)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if warnings != nil {
|
if warnings != nil {
|
||||||
|
|
|
@ -17,6 +17,7 @@ package httpcaddyfile
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"net"
|
"net"
|
||||||
|
"net/netip"
|
||||||
"reflect"
|
"reflect"
|
||||||
"sort"
|
"sort"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
@ -76,7 +77,7 @@ import (
|
||||||
// multiple addresses to the same lists of server blocks (a many:many mapping).
|
// multiple addresses to the same lists of server blocks (a many:many mapping).
|
||||||
// (Doing this is essentially a map-reduce technique.)
|
// (Doing this is essentially a map-reduce technique.)
|
||||||
func (st *ServerType) mapAddressToServerBlocks(originalServerBlocks []serverBlock,
|
func (st *ServerType) mapAddressToServerBlocks(originalServerBlocks []serverBlock,
|
||||||
options map[string]interface{}) (map[string][]serverBlock, error) {
|
options map[string]any) (map[string][]serverBlock, error) {
|
||||||
sbmap := make(map[string][]serverBlock)
|
sbmap := make(map[string][]serverBlock)
|
||||||
|
|
||||||
for i, sblock := range originalServerBlocks {
|
for i, sblock := range originalServerBlocks {
|
||||||
|
@ -183,8 +184,10 @@ func (st *ServerType) consolidateAddrMappings(addrToServerBlocks map[string][]se
|
||||||
return sbaddrs
|
return sbaddrs
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// listenerAddrsForServerBlockKey essentially converts the Caddyfile
|
||||||
|
// site addresses to Caddy listener addresses for each server block.
|
||||||
func (st *ServerType) listenerAddrsForServerBlockKey(sblock serverBlock, key string,
|
func (st *ServerType) listenerAddrsForServerBlockKey(sblock serverBlock, key string,
|
||||||
options map[string]interface{}) ([]string, error) {
|
options map[string]any) ([]string, error) {
|
||||||
addr, err := ParseAddress(key)
|
addr, err := ParseAddress(key)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("parsing key: %v", err)
|
return nil, fmt.Errorf("parsing key: %v", err)
|
||||||
|
@ -232,12 +235,14 @@ func (st *ServerType) listenerAddrsForServerBlockKey(sblock serverBlock, key str
|
||||||
// use a map to prevent duplication
|
// use a map to prevent duplication
|
||||||
listeners := make(map[string]struct{})
|
listeners := make(map[string]struct{})
|
||||||
for _, host := range lnHosts {
|
for _, host := range lnHosts {
|
||||||
addr, err := caddy.ParseNetworkAddress(host)
|
// host can have network + host (e.g. "tcp6/localhost") but
|
||||||
if err == nil && addr.IsUnixNetwork() {
|
// will/should not have port information because this usually
|
||||||
listeners[host] = struct{}{}
|
// comes from the bind directive, so we append the port
|
||||||
} else {
|
addr, err := caddy.ParseNetworkAddress(host + ":" + lnPort)
|
||||||
listeners[host+":"+lnPort] = struct{}{}
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("parsing network address: %v", err)
|
||||||
}
|
}
|
||||||
|
listeners[addr.String()] = struct{}{}
|
||||||
}
|
}
|
||||||
|
|
||||||
// now turn map into list
|
// now turn map into list
|
||||||
|
@ -350,9 +355,9 @@ func (a Address) Normalize() Address {
|
||||||
|
|
||||||
// ensure host is normalized if it's an IP address
|
// ensure host is normalized if it's an IP address
|
||||||
host := strings.TrimSpace(a.Host)
|
host := strings.TrimSpace(a.Host)
|
||||||
if ip := net.ParseIP(host); ip != nil {
|
if ip, err := netip.ParseAddr(host); err == nil {
|
||||||
if ipv6 := ip.To16(); ipv6 != nil && ipv6.DefaultMask() == nil {
|
if ip.Is6() && !ip.Is4() && !ip.Is4In6() {
|
||||||
host = ipv6.String()
|
host = ip.String()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -13,7 +13,6 @@
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
//go:build gofuzz
|
//go:build gofuzz
|
||||||
// +build gofuzz
|
|
||||||
|
|
||||||
package httpcaddyfile
|
package httpcaddyfile
|
||||||
|
|
||||||
|
|
|
@ -540,8 +540,13 @@ func parseVars(h Helper) (caddyhttp.MiddlewareHandler, error) {
|
||||||
|
|
||||||
// parseRedir parses the redir directive. Syntax:
|
// parseRedir parses the redir directive. Syntax:
|
||||||
//
|
//
|
||||||
// redir [<matcher>] <to> [<code>]
|
// redir [<matcher>] <to> [<code>]
|
||||||
//
|
//
|
||||||
|
// <code> can be "permanent" for 301, "temporary" for 302 (default),
|
||||||
|
// a placeholder, or any number in the 3xx range or 401. The special
|
||||||
|
// code "html" can be used to redirect only browser clients (will
|
||||||
|
// respond with HTTP 200 and no Location header; redirect is performed
|
||||||
|
// with JS and a meta tag).
|
||||||
func parseRedir(h Helper) (caddyhttp.MiddlewareHandler, error) {
|
func parseRedir(h Helper) (caddyhttp.MiddlewareHandler, error) {
|
||||||
if !h.Next() {
|
if !h.Next() {
|
||||||
return nil, h.ArgErr()
|
return nil, h.ArgErr()
|
||||||
|
@ -558,6 +563,7 @@ func parseRedir(h Helper) (caddyhttp.MiddlewareHandler, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
var body string
|
var body string
|
||||||
|
var hdr http.Header
|
||||||
switch code {
|
switch code {
|
||||||
case "permanent":
|
case "permanent":
|
||||||
code = "301"
|
code = "301"
|
||||||
|
@ -578,7 +584,7 @@ func parseRedir(h Helper) (caddyhttp.MiddlewareHandler, error) {
|
||||||
`
|
`
|
||||||
safeTo := html.EscapeString(to)
|
safeTo := html.EscapeString(to)
|
||||||
body = fmt.Sprintf(metaRedir, safeTo, safeTo, safeTo, safeTo)
|
body = fmt.Sprintf(metaRedir, safeTo, safeTo, safeTo, safeTo)
|
||||||
code = "302"
|
code = "200" // don't redirect non-browser clients
|
||||||
default:
|
default:
|
||||||
// Allow placeholders for the code
|
// Allow placeholders for the code
|
||||||
if strings.HasPrefix(code, "{") {
|
if strings.HasPrefix(code, "{") {
|
||||||
|
@ -601,9 +607,14 @@ func parseRedir(h Helper) (caddyhttp.MiddlewareHandler, error) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// don't redirect non-browser clients
|
||||||
|
if code != "200" {
|
||||||
|
hdr = http.Header{"Location": []string{to}}
|
||||||
|
}
|
||||||
|
|
||||||
return caddyhttp.StaticResponse{
|
return caddyhttp.StaticResponse{
|
||||||
StatusCode: caddyhttp.WeakString(code),
|
StatusCode: caddyhttp.WeakString(code),
|
||||||
Headers: http.Header{"Location": []string{to}},
|
Headers: hdr,
|
||||||
Body: body,
|
Body: body,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -142,8 +142,8 @@ func RegisterGlobalOption(opt string, setupFunc UnmarshalGlobalFunc) {
|
||||||
type Helper struct {
|
type Helper struct {
|
||||||
*caddyfile.Dispenser
|
*caddyfile.Dispenser
|
||||||
// State stores intermediate variables during caddyfile adaptation.
|
// State stores intermediate variables during caddyfile adaptation.
|
||||||
State map[string]interface{}
|
State map[string]any
|
||||||
options map[string]interface{}
|
options map[string]any
|
||||||
warnings *[]caddyconfig.Warning
|
warnings *[]caddyconfig.Warning
|
||||||
matcherDefs map[string]caddy.ModuleMap
|
matcherDefs map[string]caddy.ModuleMap
|
||||||
parentBlock caddyfile.ServerBlock
|
parentBlock caddyfile.ServerBlock
|
||||||
|
@ -151,7 +151,7 @@ type Helper struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Option gets the option keyed by name.
|
// Option gets the option keyed by name.
|
||||||
func (h Helper) Option(name string) interface{} {
|
func (h Helper) Option(name string) any {
|
||||||
return h.options[name]
|
return h.options[name]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -175,7 +175,7 @@ func (h Helper) Caddyfiles() []string {
|
||||||
}
|
}
|
||||||
|
|
||||||
// JSON converts val into JSON. Any errors are added to warnings.
|
// JSON converts val into JSON. Any errors are added to warnings.
|
||||||
func (h Helper) JSON(val interface{}) json.RawMessage {
|
func (h Helper) JSON(val any) json.RawMessage {
|
||||||
return caddyconfig.JSON(val, h.warnings)
|
return caddyconfig.JSON(val, h.warnings)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -375,7 +375,7 @@ type ConfigValue struct {
|
||||||
// The value to be used when building the config.
|
// The value to be used when building the config.
|
||||||
// Generally its type is associated with the
|
// Generally its type is associated with the
|
||||||
// name of the Class.
|
// name of the Class.
|
||||||
Value interface{}
|
Value any
|
||||||
|
|
||||||
directive string
|
directive string
|
||||||
}
|
}
|
||||||
|
@ -406,7 +406,7 @@ func sortRoutes(routes []ConfigValue) {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
// decode the path matchers, if there is just one of them
|
// decode the path matchers if there is just one matcher set
|
||||||
var iPM, jPM caddyhttp.MatchPath
|
var iPM, jPM caddyhttp.MatchPath
|
||||||
if len(iRoute.MatcherSetsRaw) == 1 {
|
if len(iRoute.MatcherSetsRaw) == 1 {
|
||||||
_ = json.Unmarshal(iRoute.MatcherSetsRaw[0]["path"], &iPM)
|
_ = json.Unmarshal(iRoute.MatcherSetsRaw[0]["path"], &iPM)
|
||||||
|
@ -415,38 +415,45 @@ func sortRoutes(routes []ConfigValue) {
|
||||||
_ = json.Unmarshal(jRoute.MatcherSetsRaw[0]["path"], &jPM)
|
_ = json.Unmarshal(jRoute.MatcherSetsRaw[0]["path"], &jPM)
|
||||||
}
|
}
|
||||||
|
|
||||||
// sort by longer path (more specific) first; missing path
|
// if there is only one path in the path matcher, sort by longer path
|
||||||
// matchers or multi-matchers are treated as zero-length paths
|
// (more specific) first; missing path matchers or multi-matchers are
|
||||||
|
// treated as zero-length paths
|
||||||
var iPathLen, jPathLen int
|
var iPathLen, jPathLen int
|
||||||
if len(iPM) > 0 {
|
if len(iPM) == 1 {
|
||||||
iPathLen = len(iPM[0])
|
iPathLen = len(iPM[0])
|
||||||
}
|
}
|
||||||
if len(jPM) > 0 {
|
if len(jPM) == 1 {
|
||||||
jPathLen = len(jPM[0])
|
jPathLen = len(jPM[0])
|
||||||
}
|
}
|
||||||
|
|
||||||
// some directives involve setting values which can overwrite
|
// some directives involve setting values which can overwrite
|
||||||
// eachother, so it makes most sense to reverse the order so
|
// each other, so it makes most sense to reverse the order so
|
||||||
// that the lease specific matcher is first; everything else
|
// that the lease specific matcher is first; everything else
|
||||||
// has most-specific matcher first
|
// has most-specific matcher first
|
||||||
if iDir == "vars" {
|
if iDir == "vars" {
|
||||||
// if both directives have no path matcher, use whichever one
|
// we can only confidently compare path lengths if both
|
||||||
// has no matcher first.
|
// directives have a single path to match (issue #5037)
|
||||||
if iPathLen == 0 && jPathLen == 0 {
|
if iPathLen > 0 && jPathLen > 0 {
|
||||||
return len(iRoute.MatcherSetsRaw) == 0 && len(jRoute.MatcherSetsRaw) > 0
|
// sort least-specific (shortest) path first
|
||||||
|
return iPathLen < jPathLen
|
||||||
}
|
}
|
||||||
|
|
||||||
// sort with the least-specific (shortest) path first
|
// if both directives don't have a single path to compare,
|
||||||
return iPathLen < jPathLen
|
// sort whichever one has no matcher first; if both have
|
||||||
|
// no matcher, sort equally (stable sort preserves order)
|
||||||
|
return len(iRoute.MatcherSetsRaw) == 0 && len(jRoute.MatcherSetsRaw) > 0
|
||||||
} else {
|
} else {
|
||||||
// if both directives have no path matcher, use whichever one
|
// we can only confidently compare path lengths if both
|
||||||
// has any kind of matcher defined first.
|
// directives have a single path to match (issue #5037)
|
||||||
if iPathLen == 0 && jPathLen == 0 {
|
if iPathLen > 0 && jPathLen > 0 {
|
||||||
return len(iRoute.MatcherSetsRaw) > 0 && len(jRoute.MatcherSetsRaw) == 0
|
// sort most-specific (longest) path first
|
||||||
|
return iPathLen > jPathLen
|
||||||
}
|
}
|
||||||
|
|
||||||
// sort with the most-specific (longest) path first
|
// if both directives don't have a single path to compare,
|
||||||
return iPathLen > jPathLen
|
// sort whichever one has a matcher first; if both have
|
||||||
|
// a matcher, sort equally (stable sort preserves order)
|
||||||
|
return len(iRoute.MatcherSetsRaw) > 0 && len(jRoute.MatcherSetsRaw) == 0
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@ -567,7 +574,7 @@ type (
|
||||||
// tokens from a global option. It is passed the tokens to parse and
|
// tokens from a global option. It is passed the tokens to parse and
|
||||||
// existing value from the previous instance of this global option
|
// existing value from the previous instance of this global option
|
||||||
// (if any). It returns the value to associate with this global option.
|
// (if any). It returns the value to associate with this global option.
|
||||||
UnmarshalGlobalFunc func(d *caddyfile.Dispenser, existingVal interface{}) (interface{}, error)
|
UnmarshalGlobalFunc func(d *caddyfile.Dispenser, existingVal any) (any, error)
|
||||||
)
|
)
|
||||||
|
|
||||||
var registeredDirectives = make(map[string]UnmarshalFunc)
|
var registeredDirectives = make(map[string]UnmarshalFunc)
|
||||||
|
|
|
@ -53,27 +53,18 @@ type ServerType struct {
|
||||||
|
|
||||||
// Setup makes a config from the tokens.
|
// Setup makes a config from the tokens.
|
||||||
func (st ServerType) Setup(inputServerBlocks []caddyfile.ServerBlock,
|
func (st ServerType) Setup(inputServerBlocks []caddyfile.ServerBlock,
|
||||||
options map[string]interface{}) (*caddy.Config, []caddyconfig.Warning, error) {
|
options map[string]any) (*caddy.Config, []caddyconfig.Warning, error) {
|
||||||
var warnings []caddyconfig.Warning
|
var warnings []caddyconfig.Warning
|
||||||
gc := counter{new(int)}
|
gc := counter{new(int)}
|
||||||
state := make(map[string]interface{})
|
state := make(map[string]any)
|
||||||
|
|
||||||
// load all the server blocks and associate them with a "pile"
|
// load all the server blocks and associate them with a "pile" of config values
|
||||||
// of config values; also prohibit duplicate keys because they
|
|
||||||
// can make a config confusing if more than one server block is
|
|
||||||
// chosen to handle a request - we actually will make each
|
|
||||||
// server block's route terminal so that only one will run
|
|
||||||
sbKeys := make(map[string]struct{})
|
|
||||||
originalServerBlocks := make([]serverBlock, 0, len(inputServerBlocks))
|
originalServerBlocks := make([]serverBlock, 0, len(inputServerBlocks))
|
||||||
for i, sblock := range inputServerBlocks {
|
for _, sblock := range inputServerBlocks {
|
||||||
for j, k := range sblock.Keys {
|
for j, k := range sblock.Keys {
|
||||||
if j == 0 && strings.HasPrefix(k, "@") {
|
if j == 0 && strings.HasPrefix(k, "@") {
|
||||||
return nil, warnings, fmt.Errorf("cannot define a matcher outside of a site block: '%s'", k)
|
return nil, warnings, fmt.Errorf("cannot define a matcher outside of a site block: '%s'", k)
|
||||||
}
|
}
|
||||||
if _, ok := sbKeys[k]; ok {
|
|
||||||
return nil, warnings, fmt.Errorf("duplicate site address not allowed: '%s' in %v (site block %d, key %d)", k, sblock.Keys, i, j)
|
|
||||||
}
|
|
||||||
sbKeys[k] = struct{}{}
|
|
||||||
}
|
}
|
||||||
originalServerBlocks = append(originalServerBlocks, serverBlock{
|
originalServerBlocks = append(originalServerBlocks, serverBlock{
|
||||||
block: sblock,
|
block: sblock,
|
||||||
|
@ -100,14 +91,17 @@ func (st ServerType) Setup(inputServerBlocks []caddyfile.ServerBlock,
|
||||||
search *regexp.Regexp
|
search *regexp.Regexp
|
||||||
replace string
|
replace string
|
||||||
}{
|
}{
|
||||||
{regexp.MustCompile(`{query\.([\w-]*)}`), "{http.request.uri.query.$1}"},
|
|
||||||
{regexp.MustCompile(`{labels\.([\w-]*)}`), "{http.request.host.labels.$1}"},
|
|
||||||
{regexp.MustCompile(`{header\.([\w-]*)}`), "{http.request.header.$1}"},
|
{regexp.MustCompile(`{header\.([\w-]*)}`), "{http.request.header.$1}"},
|
||||||
|
{regexp.MustCompile(`{cookie\.([\w-]*)}`), "{http.request.cookie.$1}"},
|
||||||
|
{regexp.MustCompile(`{labels\.([\w-]*)}`), "{http.request.host.labels.$1}"},
|
||||||
{regexp.MustCompile(`{path\.([\w-]*)}`), "{http.request.uri.path.$1}"},
|
{regexp.MustCompile(`{path\.([\w-]*)}`), "{http.request.uri.path.$1}"},
|
||||||
|
{regexp.MustCompile(`{file\.([\w-]*)}`), "{http.request.uri.path.file.$1}"},
|
||||||
|
{regexp.MustCompile(`{query\.([\w-]*)}`), "{http.request.uri.query.$1}"},
|
||||||
{regexp.MustCompile(`{re\.([\w-]*)\.([\w-]*)}`), "{http.regexp.$1.$2}"},
|
{regexp.MustCompile(`{re\.([\w-]*)\.([\w-]*)}`), "{http.regexp.$1.$2}"},
|
||||||
{regexp.MustCompile(`{vars\.([\w-]*)}`), "{http.vars.$1}"},
|
{regexp.MustCompile(`{vars\.([\w-]*)}`), "{http.vars.$1}"},
|
||||||
{regexp.MustCompile(`{rp\.([\w-\.]*)}`), "{http.reverse_proxy.$1}"},
|
{regexp.MustCompile(`{rp\.([\w-\.]*)}`), "{http.reverse_proxy.$1}"},
|
||||||
{regexp.MustCompile(`{err\.([\w-\.]*)}`), "{http.error.$1}"},
|
{regexp.MustCompile(`{err\.([\w-\.]*)}`), "{http.error.$1}"},
|
||||||
|
{regexp.MustCompile(`{file_match\.([\w-]*)}`), "{http.matchers.file.$1}"},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, sb := range originalServerBlocks {
|
for _, sb := range originalServerBlocks {
|
||||||
|
@ -199,10 +193,11 @@ func (st ServerType) Setup(inputServerBlocks []caddyfile.ServerBlock,
|
||||||
|
|
||||||
// now that each server is configured, make the HTTP app
|
// now that each server is configured, make the HTTP app
|
||||||
httpApp := caddyhttp.App{
|
httpApp := caddyhttp.App{
|
||||||
HTTPPort: tryInt(options["http_port"], &warnings),
|
HTTPPort: tryInt(options["http_port"], &warnings),
|
||||||
HTTPSPort: tryInt(options["https_port"], &warnings),
|
HTTPSPort: tryInt(options["https_port"], &warnings),
|
||||||
GracePeriod: tryDuration(options["grace_period"], &warnings),
|
GracePeriod: tryDuration(options["grace_period"], &warnings),
|
||||||
Servers: servers,
|
ShutdownDelay: tryDuration(options["shutdown_delay"], &warnings),
|
||||||
|
Servers: servers,
|
||||||
}
|
}
|
||||||
|
|
||||||
// then make the TLS app
|
// then make the TLS app
|
||||||
|
@ -322,14 +317,14 @@ func (st ServerType) Setup(inputServerBlocks []caddyfile.ServerBlock,
|
||||||
// which is expected to be the first server block if it has zero
|
// which is expected to be the first server block if it has zero
|
||||||
// keys. It returns the updated list of server blocks with the
|
// keys. It returns the updated list of server blocks with the
|
||||||
// global options block removed, and updates options accordingly.
|
// global options block removed, and updates options accordingly.
|
||||||
func (ServerType) evaluateGlobalOptionsBlock(serverBlocks []serverBlock, options map[string]interface{}) ([]serverBlock, error) {
|
func (ServerType) evaluateGlobalOptionsBlock(serverBlocks []serverBlock, options map[string]any) ([]serverBlock, error) {
|
||||||
if len(serverBlocks) == 0 || len(serverBlocks[0].block.Keys) > 0 {
|
if len(serverBlocks) == 0 || len(serverBlocks[0].block.Keys) > 0 {
|
||||||
return serverBlocks, nil
|
return serverBlocks, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, segment := range serverBlocks[0].block.Segments {
|
for _, segment := range serverBlocks[0].block.Segments {
|
||||||
opt := segment.Directive()
|
opt := segment.Directive()
|
||||||
var val interface{}
|
var val any
|
||||||
var err error
|
var err error
|
||||||
disp := caddyfile.NewDispenser(segment)
|
disp := caddyfile.NewDispenser(segment)
|
||||||
|
|
||||||
|
@ -399,7 +394,7 @@ func (ServerType) evaluateGlobalOptionsBlock(serverBlocks []serverBlock, options
|
||||||
// to server blocks. Each pairing is essentially a server definition.
|
// to server blocks. Each pairing is essentially a server definition.
|
||||||
func (st *ServerType) serversFromPairings(
|
func (st *ServerType) serversFromPairings(
|
||||||
pairings []sbAddrAssociation,
|
pairings []sbAddrAssociation,
|
||||||
options map[string]interface{},
|
options map[string]any,
|
||||||
warnings *[]caddyconfig.Warning,
|
warnings *[]caddyconfig.Warning,
|
||||||
groupCounter counter,
|
groupCounter counter,
|
||||||
) (map[string]*caddyhttp.Server, error) {
|
) (map[string]*caddyhttp.Server, error) {
|
||||||
|
@ -420,6 +415,23 @@ func (st *ServerType) serversFromPairings(
|
||||||
}
|
}
|
||||||
|
|
||||||
for i, p := range pairings {
|
for i, p := range pairings {
|
||||||
|
// detect ambiguous site definitions: server blocks which
|
||||||
|
// have the same host bound to the same interface (listener
|
||||||
|
// address), otherwise their routes will improperly be added
|
||||||
|
// to the same server (see issue #4635)
|
||||||
|
for j, sblock1 := range p.serverBlocks {
|
||||||
|
for _, key := range sblock1.block.Keys {
|
||||||
|
for k, sblock2 := range p.serverBlocks {
|
||||||
|
if k == j {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if sliceContains(sblock2.block.Keys, key) {
|
||||||
|
return nil, fmt.Errorf("ambiguous site definition: %s", key)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
srv := &caddyhttp.Server{
|
srv := &caddyhttp.Server{
|
||||||
Listen: p.addresses,
|
Listen: p.addresses,
|
||||||
}
|
}
|
||||||
|
@ -717,7 +729,7 @@ func (st *ServerType) serversFromPairings(
|
||||||
return servers, nil
|
return servers, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func detectConflictingSchemes(srv *caddyhttp.Server, serverBlocks []serverBlock, options map[string]interface{}) error {
|
func detectConflictingSchemes(srv *caddyhttp.Server, serverBlocks []serverBlock, options map[string]any) error {
|
||||||
httpPort := strconv.Itoa(caddyhttp.DefaultHTTPPort)
|
httpPort := strconv.Itoa(caddyhttp.DefaultHTTPPort)
|
||||||
if hp, ok := options["http_port"].(int); ok {
|
if hp, ok := options["http_port"].(int); ok {
|
||||||
httpPort = strconv.Itoa(hp)
|
httpPort = strconv.Itoa(hp)
|
||||||
|
@ -943,7 +955,7 @@ func appendSubrouteToRouteList(routeList caddyhttp.RouteList,
|
||||||
func buildSubroute(routes []ConfigValue, groupCounter counter) (*caddyhttp.Subroute, error) {
|
func buildSubroute(routes []ConfigValue, groupCounter counter) (*caddyhttp.Subroute, error) {
|
||||||
for _, val := range routes {
|
for _, val := range routes {
|
||||||
if !directiveIsOrdered(val.directive) {
|
if !directiveIsOrdered(val.directive) {
|
||||||
return nil, fmt.Errorf("directive '%s' is not ordered, so it cannot be used here", val.directive)
|
return nil, fmt.Errorf("directive '%s' is not an ordered HTTP handler, so it cannot be used here", val.directive)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1191,6 +1203,7 @@ func (st *ServerType) compileEncodedMatcherSets(sblock serverBlock) ([]caddy.Mod
|
||||||
|
|
||||||
func parseMatcherDefinitions(d *caddyfile.Dispenser, matchers map[string]caddy.ModuleMap) error {
|
func parseMatcherDefinitions(d *caddyfile.Dispenser, matchers map[string]caddy.ModuleMap) error {
|
||||||
for d.Next() {
|
for d.Next() {
|
||||||
|
// this is the "name" for "named matchers"
|
||||||
definitionName := d.Val()
|
definitionName := d.Val()
|
||||||
|
|
||||||
if _, ok := matchers[definitionName]; ok {
|
if _, ok := matchers[definitionName]; ok {
|
||||||
|
@ -1198,16 +1211,9 @@ func parseMatcherDefinitions(d *caddyfile.Dispenser, matchers map[string]caddy.M
|
||||||
}
|
}
|
||||||
matchers[definitionName] = make(caddy.ModuleMap)
|
matchers[definitionName] = make(caddy.ModuleMap)
|
||||||
|
|
||||||
// in case there are multiple instances of the same matcher, concatenate
|
// given a matcher name and the tokens following it, parse
|
||||||
// their tokens (we expect that UnmarshalCaddyfile should be able to
|
// the tokens as a matcher module and record it
|
||||||
// handle more than one segment); otherwise, we'd overwrite other
|
makeMatcher := func(matcherName string, tokens []caddyfile.Token) error {
|
||||||
// instances of the matcher in this set
|
|
||||||
tokensByMatcherName := make(map[string][]caddyfile.Token)
|
|
||||||
for nesting := d.Nesting(); d.NextArg() || d.NextBlock(nesting); {
|
|
||||||
matcherName := d.Val()
|
|
||||||
tokensByMatcherName[matcherName] = append(tokensByMatcherName[matcherName], d.NextSegment()...)
|
|
||||||
}
|
|
||||||
for matcherName, tokens := range tokensByMatcherName {
|
|
||||||
mod, err := caddy.GetModule("http.matchers." + matcherName)
|
mod, err := caddy.GetModule("http.matchers." + matcherName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("getting matcher module '%s': %v", matcherName, err)
|
return fmt.Errorf("getting matcher module '%s': %v", matcherName, err)
|
||||||
|
@ -1225,6 +1231,39 @@ func parseMatcherDefinitions(d *caddyfile.Dispenser, matchers map[string]caddy.M
|
||||||
return fmt.Errorf("matcher module '%s' is not a request matcher", matcherName)
|
return fmt.Errorf("matcher module '%s' is not a request matcher", matcherName)
|
||||||
}
|
}
|
||||||
matchers[definitionName][matcherName] = caddyconfig.JSON(rm, nil)
|
matchers[definitionName][matcherName] = caddyconfig.JSON(rm, nil)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// if the next token is quoted, we can assume it's not a matcher name
|
||||||
|
// and that it's probably an 'expression' matcher
|
||||||
|
if d.NextArg() {
|
||||||
|
if d.Token().Quoted() {
|
||||||
|
err := makeMatcher("expression", []caddyfile.Token{d.Token()})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// if it wasn't quoted, then we need to rewind after calling
|
||||||
|
// d.NextArg() so the below properly grabs the matcher name
|
||||||
|
d.Prev()
|
||||||
|
}
|
||||||
|
|
||||||
|
// in case there are multiple instances of the same matcher, concatenate
|
||||||
|
// their tokens (we expect that UnmarshalCaddyfile should be able to
|
||||||
|
// handle more than one segment); otherwise, we'd overwrite other
|
||||||
|
// instances of the matcher in this set
|
||||||
|
tokensByMatcherName := make(map[string][]caddyfile.Token)
|
||||||
|
for nesting := d.Nesting(); d.NextArg() || d.NextBlock(nesting); {
|
||||||
|
matcherName := d.Val()
|
||||||
|
tokensByMatcherName[matcherName] = append(tokensByMatcherName[matcherName], d.NextSegment()...)
|
||||||
|
}
|
||||||
|
for matcherName, tokens := range tokensByMatcherName {
|
||||||
|
err := makeMatcher(matcherName, tokens)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
@ -1296,7 +1335,7 @@ func WasReplacedPlaceholderShorthand(token string) string {
|
||||||
|
|
||||||
// tryInt tries to convert val to an integer. If it fails,
|
// tryInt tries to convert val to an integer. If it fails,
|
||||||
// it downgrades the error to a warning and returns 0.
|
// it downgrades the error to a warning and returns 0.
|
||||||
func tryInt(val interface{}, warnings *[]caddyconfig.Warning) int {
|
func tryInt(val any, warnings *[]caddyconfig.Warning) int {
|
||||||
intVal, ok := val.(int)
|
intVal, ok := val.(int)
|
||||||
if val != nil && !ok && warnings != nil {
|
if val != nil && !ok && warnings != nil {
|
||||||
*warnings = append(*warnings, caddyconfig.Warning{Message: "not an integer type"})
|
*warnings = append(*warnings, caddyconfig.Warning{Message: "not an integer type"})
|
||||||
|
@ -1304,7 +1343,7 @@ func tryInt(val interface{}, warnings *[]caddyconfig.Warning) int {
|
||||||
return intVal
|
return intVal
|
||||||
}
|
}
|
||||||
|
|
||||||
func tryString(val interface{}, warnings *[]caddyconfig.Warning) string {
|
func tryString(val any, warnings *[]caddyconfig.Warning) string {
|
||||||
stringVal, ok := val.(string)
|
stringVal, ok := val.(string)
|
||||||
if val != nil && !ok && warnings != nil {
|
if val != nil && !ok && warnings != nil {
|
||||||
*warnings = append(*warnings, caddyconfig.Warning{Message: "not a string type"})
|
*warnings = append(*warnings, caddyconfig.Warning{Message: "not a string type"})
|
||||||
|
@ -1312,7 +1351,7 @@ func tryString(val interface{}, warnings *[]caddyconfig.Warning) string {
|
||||||
return stringVal
|
return stringVal
|
||||||
}
|
}
|
||||||
|
|
||||||
func tryDuration(val interface{}, warnings *[]caddyconfig.Warning) caddy.Duration {
|
func tryDuration(val any, warnings *[]caddyconfig.Warning) caddy.Duration {
|
||||||
durationVal, ok := val.(caddy.Duration)
|
durationVal, ok := val.(caddy.Duration)
|
||||||
if val != nil && !ok && warnings != nil {
|
if val != nil && !ok && warnings != nil {
|
||||||
*warnings = append(*warnings, caddyconfig.Warning{Message: "not a duration type"})
|
*warnings = append(*warnings, caddyconfig.Warning{Message: "not a duration type"})
|
||||||
|
|
|
@ -31,11 +31,13 @@ func init() {
|
||||||
RegisterGlobalOption("https_port", parseOptHTTPSPort)
|
RegisterGlobalOption("https_port", parseOptHTTPSPort)
|
||||||
RegisterGlobalOption("default_bind", parseOptStringList)
|
RegisterGlobalOption("default_bind", parseOptStringList)
|
||||||
RegisterGlobalOption("grace_period", parseOptDuration)
|
RegisterGlobalOption("grace_period", parseOptDuration)
|
||||||
|
RegisterGlobalOption("shutdown_delay", parseOptDuration)
|
||||||
RegisterGlobalOption("default_sni", parseOptSingleString)
|
RegisterGlobalOption("default_sni", parseOptSingleString)
|
||||||
RegisterGlobalOption("order", parseOptOrder)
|
RegisterGlobalOption("order", parseOptOrder)
|
||||||
RegisterGlobalOption("storage", parseOptStorage)
|
RegisterGlobalOption("storage", parseOptStorage)
|
||||||
RegisterGlobalOption("storage_clean_interval", parseOptDuration)
|
RegisterGlobalOption("storage_clean_interval", parseOptDuration)
|
||||||
RegisterGlobalOption("renew_interval", parseOptDuration)
|
RegisterGlobalOption("renew_interval", parseOptDuration)
|
||||||
|
RegisterGlobalOption("ocsp_interval", parseOptDuration)
|
||||||
RegisterGlobalOption("acme_ca", parseOptSingleString)
|
RegisterGlobalOption("acme_ca", parseOptSingleString)
|
||||||
RegisterGlobalOption("acme_ca_root", parseOptSingleString)
|
RegisterGlobalOption("acme_ca_root", parseOptSingleString)
|
||||||
RegisterGlobalOption("acme_dns", parseOptACMEDNS)
|
RegisterGlobalOption("acme_dns", parseOptACMEDNS)
|
||||||
|
@ -54,9 +56,9 @@ func init() {
|
||||||
RegisterGlobalOption("preferred_chains", parseOptPreferredChains)
|
RegisterGlobalOption("preferred_chains", parseOptPreferredChains)
|
||||||
}
|
}
|
||||||
|
|
||||||
func parseOptTrue(d *caddyfile.Dispenser, _ interface{}) (interface{}, error) { return true, nil }
|
func parseOptTrue(d *caddyfile.Dispenser, _ any) (any, error) { return true, nil }
|
||||||
|
|
||||||
func parseOptHTTPPort(d *caddyfile.Dispenser, _ interface{}) (interface{}, error) {
|
func parseOptHTTPPort(d *caddyfile.Dispenser, _ any) (any, error) {
|
||||||
var httpPort int
|
var httpPort int
|
||||||
for d.Next() {
|
for d.Next() {
|
||||||
var httpPortStr string
|
var httpPortStr string
|
||||||
|
@ -72,7 +74,7 @@ func parseOptHTTPPort(d *caddyfile.Dispenser, _ interface{}) (interface{}, error
|
||||||
return httpPort, nil
|
return httpPort, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func parseOptHTTPSPort(d *caddyfile.Dispenser, _ interface{}) (interface{}, error) {
|
func parseOptHTTPSPort(d *caddyfile.Dispenser, _ any) (any, error) {
|
||||||
var httpsPort int
|
var httpsPort int
|
||||||
for d.Next() {
|
for d.Next() {
|
||||||
var httpsPortStr string
|
var httpsPortStr string
|
||||||
|
@ -88,7 +90,7 @@ func parseOptHTTPSPort(d *caddyfile.Dispenser, _ interface{}) (interface{}, erro
|
||||||
return httpsPort, nil
|
return httpsPort, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func parseOptOrder(d *caddyfile.Dispenser, _ interface{}) (interface{}, error) {
|
func parseOptOrder(d *caddyfile.Dispenser, _ any) (any, error) {
|
||||||
newOrder := directiveOrder
|
newOrder := directiveOrder
|
||||||
|
|
||||||
for d.Next() {
|
for d.Next() {
|
||||||
|
@ -164,7 +166,7 @@ func parseOptOrder(d *caddyfile.Dispenser, _ interface{}) (interface{}, error) {
|
||||||
return newOrder, nil
|
return newOrder, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func parseOptStorage(d *caddyfile.Dispenser, _ interface{}) (interface{}, error) {
|
func parseOptStorage(d *caddyfile.Dispenser, _ any) (any, error) {
|
||||||
if !d.Next() { // consume option name
|
if !d.Next() { // consume option name
|
||||||
return nil, d.ArgErr()
|
return nil, d.ArgErr()
|
||||||
}
|
}
|
||||||
|
@ -183,7 +185,7 @@ func parseOptStorage(d *caddyfile.Dispenser, _ interface{}) (interface{}, error)
|
||||||
return storage, nil
|
return storage, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func parseOptDuration(d *caddyfile.Dispenser, _ interface{}) (interface{}, error) {
|
func parseOptDuration(d *caddyfile.Dispenser, _ any) (any, error) {
|
||||||
if !d.Next() { // consume option name
|
if !d.Next() { // consume option name
|
||||||
return nil, d.ArgErr()
|
return nil, d.ArgErr()
|
||||||
}
|
}
|
||||||
|
@ -197,7 +199,7 @@ func parseOptDuration(d *caddyfile.Dispenser, _ interface{}) (interface{}, error
|
||||||
return caddy.Duration(dur), nil
|
return caddy.Duration(dur), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func parseOptACMEDNS(d *caddyfile.Dispenser, _ interface{}) (interface{}, error) {
|
func parseOptACMEDNS(d *caddyfile.Dispenser, _ any) (any, error) {
|
||||||
if !d.Next() { // consume option name
|
if !d.Next() { // consume option name
|
||||||
return nil, d.ArgErr()
|
return nil, d.ArgErr()
|
||||||
}
|
}
|
||||||
|
@ -216,7 +218,7 @@ func parseOptACMEDNS(d *caddyfile.Dispenser, _ interface{}) (interface{}, error)
|
||||||
return prov, nil
|
return prov, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func parseOptACMEEAB(d *caddyfile.Dispenser, _ interface{}) (interface{}, error) {
|
func parseOptACMEEAB(d *caddyfile.Dispenser, _ any) (any, error) {
|
||||||
eab := new(acme.EAB)
|
eab := new(acme.EAB)
|
||||||
for d.Next() {
|
for d.Next() {
|
||||||
if d.NextArg() {
|
if d.NextArg() {
|
||||||
|
@ -244,7 +246,7 @@ func parseOptACMEEAB(d *caddyfile.Dispenser, _ interface{}) (interface{}, error)
|
||||||
return eab, nil
|
return eab, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func parseOptCertIssuer(d *caddyfile.Dispenser, existing interface{}) (interface{}, error) {
|
func parseOptCertIssuer(d *caddyfile.Dispenser, existing any) (any, error) {
|
||||||
var issuers []certmagic.Issuer
|
var issuers []certmagic.Issuer
|
||||||
if existing != nil {
|
if existing != nil {
|
||||||
issuers = existing.([]certmagic.Issuer)
|
issuers = existing.([]certmagic.Issuer)
|
||||||
|
@ -267,7 +269,7 @@ func parseOptCertIssuer(d *caddyfile.Dispenser, existing interface{}) (interface
|
||||||
return issuers, nil
|
return issuers, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func parseOptSingleString(d *caddyfile.Dispenser, _ interface{}) (interface{}, error) {
|
func parseOptSingleString(d *caddyfile.Dispenser, _ any) (any, error) {
|
||||||
d.Next() // consume parameter name
|
d.Next() // consume parameter name
|
||||||
if !d.Next() {
|
if !d.Next() {
|
||||||
return "", d.ArgErr()
|
return "", d.ArgErr()
|
||||||
|
@ -279,7 +281,7 @@ func parseOptSingleString(d *caddyfile.Dispenser, _ interface{}) (interface{}, e
|
||||||
return val, nil
|
return val, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func parseOptStringList(d *caddyfile.Dispenser, _ interface{}) (interface{}, error) {
|
func parseOptStringList(d *caddyfile.Dispenser, _ any) (any, error) {
|
||||||
d.Next() // consume parameter name
|
d.Next() // consume parameter name
|
||||||
val := d.RemainingArgs()
|
val := d.RemainingArgs()
|
||||||
if len(val) == 0 {
|
if len(val) == 0 {
|
||||||
|
@ -288,7 +290,7 @@ func parseOptStringList(d *caddyfile.Dispenser, _ interface{}) (interface{}, err
|
||||||
return val, nil
|
return val, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func parseOptAdmin(d *caddyfile.Dispenser, _ interface{}) (interface{}, error) {
|
func parseOptAdmin(d *caddyfile.Dispenser, _ any) (any, error) {
|
||||||
adminCfg := new(caddy.AdminConfig)
|
adminCfg := new(caddy.AdminConfig)
|
||||||
for d.Next() {
|
for d.Next() {
|
||||||
if d.NextArg() {
|
if d.NextArg() {
|
||||||
|
@ -324,7 +326,7 @@ func parseOptAdmin(d *caddyfile.Dispenser, _ interface{}) (interface{}, error) {
|
||||||
return adminCfg, nil
|
return adminCfg, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func parseOptOnDemand(d *caddyfile.Dispenser, _ interface{}) (interface{}, error) {
|
func parseOptOnDemand(d *caddyfile.Dispenser, _ any) (any, error) {
|
||||||
var ond *caddytls.OnDemandConfig
|
var ond *caddytls.OnDemandConfig
|
||||||
for d.Next() {
|
for d.Next() {
|
||||||
if d.NextArg() {
|
if d.NextArg() {
|
||||||
|
@ -384,7 +386,7 @@ func parseOptOnDemand(d *caddyfile.Dispenser, _ interface{}) (interface{}, error
|
||||||
return ond, nil
|
return ond, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func parseOptAutoHTTPS(d *caddyfile.Dispenser, _ interface{}) (interface{}, error) {
|
func parseOptAutoHTTPS(d *caddyfile.Dispenser, _ any) (any, error) {
|
||||||
d.Next() // consume parameter name
|
d.Next() // consume parameter name
|
||||||
if !d.Next() {
|
if !d.Next() {
|
||||||
return "", d.ArgErr()
|
return "", d.ArgErr()
|
||||||
|
@ -399,11 +401,11 @@ func parseOptAutoHTTPS(d *caddyfile.Dispenser, _ interface{}) (interface{}, erro
|
||||||
return val, nil
|
return val, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func parseServerOptions(d *caddyfile.Dispenser, _ interface{}) (interface{}, error) {
|
func parseServerOptions(d *caddyfile.Dispenser, _ any) (any, error) {
|
||||||
return unmarshalCaddyfileServerOptions(d)
|
return unmarshalCaddyfileServerOptions(d)
|
||||||
}
|
}
|
||||||
|
|
||||||
func parseOCSPStaplingOptions(d *caddyfile.Dispenser, _ interface{}) (interface{}, error) {
|
func parseOCSPStaplingOptions(d *caddyfile.Dispenser, _ any) (any, error) {
|
||||||
d.Next() // consume option name
|
d.Next() // consume option name
|
||||||
var val string
|
var val string
|
||||||
if !d.AllArgs(&val) {
|
if !d.AllArgs(&val) {
|
||||||
|
@ -429,8 +431,7 @@ func parseOCSPStaplingOptions(d *caddyfile.Dispenser, _ interface{}) (interface{
|
||||||
//
|
//
|
||||||
// When the name argument is unspecified, this directive modifies the default
|
// When the name argument is unspecified, this directive modifies the default
|
||||||
// logger.
|
// logger.
|
||||||
//
|
func parseLogOptions(d *caddyfile.Dispenser, existingVal any) (any, error) {
|
||||||
func parseLogOptions(d *caddyfile.Dispenser, existingVal interface{}) (interface{}, error) {
|
|
||||||
currentNames := make(map[string]struct{})
|
currentNames := make(map[string]struct{})
|
||||||
if existingVal != nil {
|
if existingVal != nil {
|
||||||
innerVals, ok := existingVal.([]ConfigValue)
|
innerVals, ok := existingVal.([]ConfigValue)
|
||||||
|
@ -465,7 +466,7 @@ func parseLogOptions(d *caddyfile.Dispenser, existingVal interface{}) (interface
|
||||||
return configValues, nil
|
return configValues, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func parseOptPreferredChains(d *caddyfile.Dispenser, _ interface{}) (interface{}, error) {
|
func parseOptPreferredChains(d *caddyfile.Dispenser, _ any) (any, error) {
|
||||||
d.Next()
|
d.Next()
|
||||||
return caddytls.ParseCaddyfilePreferredChainsOptions(d)
|
return caddytls.ParseCaddyfilePreferredChainsOptions(d)
|
||||||
}
|
}
|
||||||
|
|
|
@ -45,8 +45,7 @@ func init() {
|
||||||
// }
|
// }
|
||||||
//
|
//
|
||||||
// When the CA ID is unspecified, 'local' is assumed.
|
// When the CA ID is unspecified, 'local' is assumed.
|
||||||
//
|
func parsePKIApp(d *caddyfile.Dispenser, existingVal any) (any, error) {
|
||||||
func parsePKIApp(d *caddyfile.Dispenser, existingVal interface{}) (interface{}, error) {
|
|
||||||
pki := &caddypki.PKI{CAs: make(map[string]*caddypki.CA)}
|
pki := &caddypki.PKI{CAs: make(map[string]*caddypki.CA)}
|
||||||
|
|
||||||
for d.Next() {
|
for d.Next() {
|
||||||
|
@ -160,7 +159,7 @@ func parsePKIApp(d *caddyfile.Dispenser, existingVal interface{}) (interface{},
|
||||||
|
|
||||||
func (st ServerType) buildPKIApp(
|
func (st ServerType) buildPKIApp(
|
||||||
pairings []sbAddrAssociation,
|
pairings []sbAddrAssociation,
|
||||||
options map[string]interface{},
|
options map[string]any,
|
||||||
warnings []caddyconfig.Warning,
|
warnings []caddyconfig.Warning,
|
||||||
) (*caddypki.PKI, []caddyconfig.Warning, error) {
|
) (*caddypki.PKI, []caddyconfig.Warning, error) {
|
||||||
|
|
||||||
|
|
|
@ -38,14 +38,14 @@ type serverOptions struct {
|
||||||
ReadHeaderTimeout caddy.Duration
|
ReadHeaderTimeout caddy.Duration
|
||||||
WriteTimeout caddy.Duration
|
WriteTimeout caddy.Duration
|
||||||
IdleTimeout caddy.Duration
|
IdleTimeout caddy.Duration
|
||||||
|
KeepAliveInterval caddy.Duration
|
||||||
MaxHeaderBytes int
|
MaxHeaderBytes int
|
||||||
AllowH2C bool
|
Protocols []string
|
||||||
ExperimentalHTTP3 bool
|
|
||||||
StrictSNIHost *bool
|
StrictSNIHost *bool
|
||||||
ShouldLogCredentials bool
|
ShouldLogCredentials bool
|
||||||
}
|
}
|
||||||
|
|
||||||
func unmarshalCaddyfileServerOptions(d *caddyfile.Dispenser) (interface{}, error) {
|
func unmarshalCaddyfileServerOptions(d *caddyfile.Dispenser) (any, error) {
|
||||||
serverOpts := serverOptions{}
|
serverOpts := serverOptions{}
|
||||||
for d.Next() {
|
for d.Next() {
|
||||||
if d.NextArg() {
|
if d.NextArg() {
|
||||||
|
@ -123,6 +123,15 @@ func unmarshalCaddyfileServerOptions(d *caddyfile.Dispenser) (interface{}, error
|
||||||
return nil, d.Errf("unrecognized timeouts option '%s'", d.Val())
|
return nil, d.Errf("unrecognized timeouts option '%s'", d.Val())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
case "keepalive_interval":
|
||||||
|
if !d.NextArg() {
|
||||||
|
return nil, d.ArgErr()
|
||||||
|
}
|
||||||
|
dur, err := caddy.ParseDuration(d.Val())
|
||||||
|
if err != nil {
|
||||||
|
return nil, d.Errf("parsing keepalive interval duration: %v", err)
|
||||||
|
}
|
||||||
|
serverOpts.KeepAliveInterval = caddy.Duration(dur)
|
||||||
|
|
||||||
case "max_header_size":
|
case "max_header_size":
|
||||||
var sizeStr string
|
var sizeStr string
|
||||||
|
@ -141,22 +150,51 @@ func unmarshalCaddyfileServerOptions(d *caddyfile.Dispenser) (interface{}, error
|
||||||
}
|
}
|
||||||
serverOpts.ShouldLogCredentials = true
|
serverOpts.ShouldLogCredentials = true
|
||||||
|
|
||||||
|
case "protocols":
|
||||||
|
protos := d.RemainingArgs()
|
||||||
|
for _, proto := range protos {
|
||||||
|
if proto != "h1" && proto != "h2" && proto != "h2c" && proto != "h3" {
|
||||||
|
return nil, d.Errf("unknown protocol '%s': expected h1, h2, h2c, or h3", proto)
|
||||||
|
}
|
||||||
|
if sliceContains(serverOpts.Protocols, proto) {
|
||||||
|
return nil, d.Errf("protocol %s specified more than once", proto)
|
||||||
|
}
|
||||||
|
serverOpts.Protocols = append(serverOpts.Protocols, proto)
|
||||||
|
}
|
||||||
|
if d.NextBlock(0) {
|
||||||
|
return nil, d.ArgErr()
|
||||||
|
}
|
||||||
|
|
||||||
|
case "strict_sni_host":
|
||||||
|
if d.NextArg() && d.Val() != "insecure_off" && d.Val() != "on" {
|
||||||
|
return nil, d.Errf("strict_sni_host only supports 'on' or 'insecure_off', got '%s'", d.Val())
|
||||||
|
}
|
||||||
|
boolVal := true
|
||||||
|
if d.Val() == "insecure_off" {
|
||||||
|
boolVal = false
|
||||||
|
}
|
||||||
|
serverOpts.StrictSNIHost = &boolVal
|
||||||
|
|
||||||
|
// TODO: DEPRECATED. (August 2022)
|
||||||
case "protocol":
|
case "protocol":
|
||||||
|
caddy.Log().Named("caddyfile").Warn("DEPRECATED: protocol sub-option will be removed soon")
|
||||||
|
|
||||||
for nesting := d.Nesting(); d.NextBlock(nesting); {
|
for nesting := d.Nesting(); d.NextBlock(nesting); {
|
||||||
switch d.Val() {
|
switch d.Val() {
|
||||||
case "allow_h2c":
|
case "allow_h2c":
|
||||||
if d.NextArg() {
|
caddy.Log().Named("caddyfile").Warn("DEPRECATED: allow_h2c will be removed soon; use protocols option instead")
|
||||||
return nil, d.ArgErr()
|
|
||||||
}
|
|
||||||
serverOpts.AllowH2C = true
|
|
||||||
|
|
||||||
case "experimental_http3":
|
|
||||||
if d.NextArg() {
|
if d.NextArg() {
|
||||||
return nil, d.ArgErr()
|
return nil, d.ArgErr()
|
||||||
}
|
}
|
||||||
serverOpts.ExperimentalHTTP3 = true
|
if sliceContains(serverOpts.Protocols, "h2c") {
|
||||||
|
return nil, d.Errf("protocol h2c already specified")
|
||||||
|
}
|
||||||
|
serverOpts.Protocols = append(serverOpts.Protocols, "h2c")
|
||||||
|
|
||||||
case "strict_sni_host":
|
case "strict_sni_host":
|
||||||
|
caddy.Log().Named("caddyfile").Warn("DEPRECATED: protocol > strict_sni_host in this position will be removed soon; move up to the servers block instead")
|
||||||
|
|
||||||
if d.NextArg() && d.Val() != "insecure_off" && d.Val() != "on" {
|
if d.NextArg() && d.Val() != "insecure_off" && d.Val() != "on" {
|
||||||
return nil, d.Errf("strict_sni_host only supports 'on' or 'insecure_off', got '%s'", d.Val())
|
return nil, d.Errf("strict_sni_host only supports 'on' or 'insecure_off', got '%s'", d.Val())
|
||||||
}
|
}
|
||||||
|
@ -182,20 +220,9 @@ func unmarshalCaddyfileServerOptions(d *caddyfile.Dispenser) (interface{}, error
|
||||||
// applyServerOptions sets the server options on the appropriate servers
|
// applyServerOptions sets the server options on the appropriate servers
|
||||||
func applyServerOptions(
|
func applyServerOptions(
|
||||||
servers map[string]*caddyhttp.Server,
|
servers map[string]*caddyhttp.Server,
|
||||||
options map[string]interface{},
|
options map[string]any,
|
||||||
warnings *[]caddyconfig.Warning,
|
warnings *[]caddyconfig.Warning,
|
||||||
) error {
|
) error {
|
||||||
// If experimental HTTP/3 is enabled, enable it on each server.
|
|
||||||
// We already know there won't be a conflict with serverOptions because
|
|
||||||
// we validated earlier that "experimental_http3" cannot be set at the same
|
|
||||||
// time as "servers"
|
|
||||||
if enableH3, ok := options["experimental_http3"].(bool); ok && enableH3 {
|
|
||||||
*warnings = append(*warnings, caddyconfig.Warning{Message: "the 'experimental_http3' global option is deprecated, please use the 'servers > protocol > experimental_http3' option instead"})
|
|
||||||
for _, srv := range servers {
|
|
||||||
srv.ExperimentalHTTP3 = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
serverOpts, ok := options["servers"].([]serverOptions)
|
serverOpts, ok := options["servers"].([]serverOptions)
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil
|
return nil
|
||||||
|
@ -228,9 +255,9 @@ func applyServerOptions(
|
||||||
server.ReadHeaderTimeout = opts.ReadHeaderTimeout
|
server.ReadHeaderTimeout = opts.ReadHeaderTimeout
|
||||||
server.WriteTimeout = opts.WriteTimeout
|
server.WriteTimeout = opts.WriteTimeout
|
||||||
server.IdleTimeout = opts.IdleTimeout
|
server.IdleTimeout = opts.IdleTimeout
|
||||||
|
server.KeepAliveInterval = opts.KeepAliveInterval
|
||||||
server.MaxHeaderBytes = opts.MaxHeaderBytes
|
server.MaxHeaderBytes = opts.MaxHeaderBytes
|
||||||
server.AllowH2C = opts.AllowH2C
|
server.Protocols = opts.Protocols
|
||||||
server.ExperimentalHTTP3 = opts.ExperimentalHTTP3
|
|
||||||
server.StrictSNIHost = opts.StrictSNIHost
|
server.StrictSNIHost = opts.StrictSNIHost
|
||||||
if opts.ShouldLogCredentials {
|
if opts.ShouldLogCredentials {
|
||||||
if server.Logs == nil {
|
if server.Logs == nil {
|
||||||
|
|
|
@ -33,7 +33,7 @@ import (
|
||||||
|
|
||||||
func (st ServerType) buildTLSApp(
|
func (st ServerType) buildTLSApp(
|
||||||
pairings []sbAddrAssociation,
|
pairings []sbAddrAssociation,
|
||||||
options map[string]interface{},
|
options map[string]any,
|
||||||
warnings []caddyconfig.Warning,
|
warnings []caddyconfig.Warning,
|
||||||
) (*caddytls.TLS, []caddyconfig.Warning, error) {
|
) (*caddytls.TLS, []caddyconfig.Warning, error) {
|
||||||
|
|
||||||
|
@ -307,6 +307,14 @@ func (st ServerType) buildTLSApp(
|
||||||
tlsApp.Automation.RenewCheckInterval = renewCheckInterval
|
tlsApp.Automation.RenewCheckInterval = renewCheckInterval
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// set the OCSP check interval if configured
|
||||||
|
if ocspCheckInterval, ok := options["ocsp_interval"].(caddy.Duration); ok {
|
||||||
|
if tlsApp.Automation == nil {
|
||||||
|
tlsApp.Automation = new(caddytls.AutomationConfig)
|
||||||
|
}
|
||||||
|
tlsApp.Automation.OCSPCheckInterval = ocspCheckInterval
|
||||||
|
}
|
||||||
|
|
||||||
// set whether OCSP stapling should be disabled for manually-managed certificates
|
// set whether OCSP stapling should be disabled for manually-managed certificates
|
||||||
if ocspConfig, ok := options["ocsp_stapling"].(certmagic.OCSPConfig); ok {
|
if ocspConfig, ok := options["ocsp_stapling"].(certmagic.OCSPConfig); ok {
|
||||||
tlsApp.DisableOCSPStapling = ocspConfig.DisableStapling
|
tlsApp.DisableOCSPStapling = ocspConfig.DisableStapling
|
||||||
|
@ -420,7 +428,7 @@ func (st ServerType) buildTLSApp(
|
||||||
|
|
||||||
type acmeCapable interface{ GetACMEIssuer() *caddytls.ACMEIssuer }
|
type acmeCapable interface{ GetACMEIssuer() *caddytls.ACMEIssuer }
|
||||||
|
|
||||||
func fillInGlobalACMEDefaults(issuer certmagic.Issuer, options map[string]interface{}) error {
|
func fillInGlobalACMEDefaults(issuer certmagic.Issuer, options map[string]any) error {
|
||||||
acmeWrapper, ok := issuer.(acmeCapable)
|
acmeWrapper, ok := issuer.(acmeCapable)
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil
|
return nil
|
||||||
|
@ -467,7 +475,7 @@ func fillInGlobalACMEDefaults(issuer certmagic.Issuer, options map[string]interf
|
||||||
// for any other automation policies. A nil policy (and no error) will be
|
// for any other automation policies. A nil policy (and no error) will be
|
||||||
// returned if there are no default/global options. However, if always is
|
// returned if there are no default/global options. However, if always is
|
||||||
// true, a non-nil value will always be returned (unless there is an error).
|
// true, a non-nil value will always be returned (unless there is an error).
|
||||||
func newBaseAutomationPolicy(options map[string]interface{}, warnings []caddyconfig.Warning, always bool) (*caddytls.AutomationPolicy, error) {
|
func newBaseAutomationPolicy(options map[string]any, warnings []caddyconfig.Warning, always bool) (*caddytls.AutomationPolicy, error) {
|
||||||
issuers, hasIssuers := options["cert_issuer"]
|
issuers, hasIssuers := options["cert_issuer"]
|
||||||
_, hasLocalCerts := options["local_certs"]
|
_, hasLocalCerts := options["local_certs"]
|
||||||
keyType, hasKeyType := options["key_type"]
|
keyType, hasKeyType := options["key_type"]
|
||||||
|
|
|
@ -189,12 +189,11 @@ func adaptByContentType(contentType string, body []byte) ([]byte, []Warning, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// adapter name should be suffix of MIME type
|
// adapter name should be suffix of MIME type
|
||||||
slashIdx := strings.Index(ct, "/")
|
_, adapterName, slashFound := strings.Cut(ct, "/")
|
||||||
if slashIdx < 0 {
|
if !slashFound {
|
||||||
return nil, nil, fmt.Errorf("malformed Content-Type")
|
return nil, nil, fmt.Errorf("malformed Content-Type")
|
||||||
}
|
}
|
||||||
|
|
||||||
adapterName := ct[slashIdx+1:]
|
|
||||||
cfgAdapter := GetAdapter(adapterName)
|
cfgAdapter := GetAdapter(adapterName)
|
||||||
if cfgAdapter == nil {
|
if cfgAdapter == nil {
|
||||||
return nil, nil, fmt.Errorf("unrecognized config adapter '%s'", adapterName)
|
return nil, nil, fmt.Errorf("unrecognized config adapter '%s'", adapterName)
|
||||||
|
@ -209,7 +208,7 @@ func adaptByContentType(contentType string, body []byte) ([]byte, []Warning, err
|
||||||
}
|
}
|
||||||
|
|
||||||
var bufPool = sync.Pool{
|
var bufPool = sync.Pool{
|
||||||
New: func() interface{} {
|
New: func() any {
|
||||||
return new(bytes.Buffer)
|
return new(bytes.Buffer)
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
|
@ -100,7 +100,7 @@ func (tc *Tester) InitServer(rawConfig string, configType string) {
|
||||||
tc.t.Fail()
|
tc.t.Fail()
|
||||||
}
|
}
|
||||||
if err := tc.ensureConfigRunning(rawConfig, configType); err != nil {
|
if err := tc.ensureConfigRunning(rawConfig, configType); err != nil {
|
||||||
tc.t.Logf("failed ensurng config is running: %s", err)
|
tc.t.Logf("failed ensuring config is running: %s", err)
|
||||||
tc.t.Fail()
|
tc.t.Fail()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -186,7 +186,7 @@ func (tc *Tester) ensureConfigRunning(rawConfig string, configType string) error
|
||||||
expectedBytes, _, _ = adapter.Adapt([]byte(rawConfig), nil)
|
expectedBytes, _, _ = adapter.Adapt([]byte(rawConfig), nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
var expected interface{}
|
var expected any
|
||||||
err := json.Unmarshal(expectedBytes, &expected)
|
err := json.Unmarshal(expectedBytes, &expected)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -196,7 +196,7 @@ func (tc *Tester) ensureConfigRunning(rawConfig string, configType string) error
|
||||||
Timeout: Default.LoadRequestTimeout,
|
Timeout: Default.LoadRequestTimeout,
|
||||||
}
|
}
|
||||||
|
|
||||||
fetchConfig := func(client *http.Client) interface{} {
|
fetchConfig := func(client *http.Client) any {
|
||||||
resp, err := client.Get(fmt.Sprintf("http://localhost:%d/config/", Default.AdminPort))
|
resp, err := client.Get(fmt.Sprintf("http://localhost:%d/config/", Default.AdminPort))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil
|
return nil
|
||||||
|
@ -206,7 +206,7 @@ func (tc *Tester) ensureConfigRunning(rawConfig string, configType string) error
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
var actual interface{}
|
var actual any
|
||||||
err = json.Unmarshal(actualBytes, &actual)
|
err = json.Unmarshal(actualBytes, &actual)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil
|
return nil
|
||||||
|
@ -371,7 +371,7 @@ func CompareAdapt(t *testing.T, filename, rawConfig string, adapterName string,
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
options := make(map[string]interface{})
|
options := make(map[string]any)
|
||||||
|
|
||||||
result, warnings, err := cfgAdapter.Adapt([]byte(rawConfig), options)
|
result, warnings, err := cfgAdapter.Adapt([]byte(rawConfig), options)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -3,6 +3,7 @@
|
||||||
http_port 8080
|
http_port 8080
|
||||||
https_port 8443
|
https_port 8443
|
||||||
grace_period 5s
|
grace_period 5s
|
||||||
|
shutdown_delay 10s
|
||||||
default_sni localhost
|
default_sni localhost
|
||||||
order root first
|
order root first
|
||||||
storage file_system {
|
storage file_system {
|
||||||
|
@ -45,6 +46,7 @@
|
||||||
"http_port": 8080,
|
"http_port": 8080,
|
||||||
"https_port": 8443,
|
"https_port": 8443,
|
||||||
"grace_period": 5000000000,
|
"grace_period": 5000000000,
|
||||||
|
"shutdown_delay": 10000000000,
|
||||||
"servers": {
|
"servers": {
|
||||||
"srv0": {
|
"srv0": {
|
||||||
"listen": [
|
"listen": [
|
||||||
|
|
|
@ -22,6 +22,7 @@
|
||||||
}
|
}
|
||||||
storage_clean_interval 7d
|
storage_clean_interval 7d
|
||||||
renew_interval 1d
|
renew_interval 1d
|
||||||
|
ocsp_interval 2d
|
||||||
|
|
||||||
key_type ed25519
|
key_type ed25519
|
||||||
}
|
}
|
||||||
|
@ -83,6 +84,7 @@
|
||||||
},
|
},
|
||||||
"ask": "https://example.com"
|
"ask": "https://example.com"
|
||||||
},
|
},
|
||||||
|
"ocsp_interval": 172800000000000,
|
||||||
"renew_interval": 86400000000000,
|
"renew_interval": 86400000000000,
|
||||||
"storage_clean_interval": 604800000000000
|
"storage_clean_interval": 604800000000000
|
||||||
}
|
}
|
||||||
|
|
|
@ -12,11 +12,8 @@
|
||||||
}
|
}
|
||||||
max_header_size 100MB
|
max_header_size 100MB
|
||||||
log_credentials
|
log_credentials
|
||||||
protocol {
|
strict_sni_host
|
||||||
allow_h2c
|
protocols h1 h2 h2c h3
|
||||||
experimental_http3
|
|
||||||
strict_sni_host
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -61,8 +58,12 @@ foo.com {
|
||||||
"logs": {
|
"logs": {
|
||||||
"should_log_credentials": true
|
"should_log_credentials": true
|
||||||
},
|
},
|
||||||
"experimental_http3": true,
|
"protocols": [
|
||||||
"allow_h2c": true
|
"h1",
|
||||||
|
"h2",
|
||||||
|
"h2c",
|
||||||
|
"h3"
|
||||||
|
]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -19,27 +19,30 @@
|
||||||
@matcher6 vars_regexp "{http.request.uri}" `\.([a-f0-9]{6})\.(css|js)$`
|
@matcher6 vars_regexp "{http.request.uri}" `\.([a-f0-9]{6})\.(css|js)$`
|
||||||
respond @matcher6 "from vars_regexp matcher without name"
|
respond @matcher6 "from vars_regexp matcher without name"
|
||||||
|
|
||||||
@matcher7 {
|
@matcher7 `path('/foo*') && method('GET')`
|
||||||
|
respond @matcher7 "inline expression matcher shortcut"
|
||||||
|
|
||||||
|
@matcher8 {
|
||||||
header Foo bar
|
header Foo bar
|
||||||
header Foo foobar
|
header Foo foobar
|
||||||
header Bar foo
|
header Bar foo
|
||||||
}
|
}
|
||||||
respond @matcher7 "header matcher merging values of the same field"
|
respond @matcher8 "header matcher merging values of the same field"
|
||||||
|
|
||||||
@matcher8 {
|
@matcher9 {
|
||||||
query foo=bar foo=baz bar=foo
|
query foo=bar foo=baz bar=foo
|
||||||
query bar=baz
|
query bar=baz
|
||||||
}
|
}
|
||||||
respond @matcher8 "query matcher merging pairs with the same keys"
|
respond @matcher9 "query matcher merging pairs with the same keys"
|
||||||
|
|
||||||
@matcher9 {
|
@matcher10 {
|
||||||
header !Foo
|
header !Foo
|
||||||
header Bar foo
|
header Bar foo
|
||||||
}
|
}
|
||||||
respond @matcher9 "header matcher with null field matcher"
|
respond @matcher10 "header matcher with null field matcher"
|
||||||
|
|
||||||
@matcher10 remote_ip private_ranges
|
@matcher11 remote_ip private_ranges
|
||||||
respond @matcher10 "remote_ip matcher with private ranges"
|
respond @matcher11 "remote_ip matcher with private ranges"
|
||||||
}
|
}
|
||||||
----------
|
----------
|
||||||
{
|
{
|
||||||
|
@ -152,6 +155,19 @@
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"match": [
|
||||||
|
{
|
||||||
|
"expression": "path('/foo*') \u0026\u0026 method('GET')"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"handle": [
|
||||||
|
{
|
||||||
|
"body": "inline expression matcher shortcut",
|
||||||
|
"handler": "static_response"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"match": [
|
"match": [
|
||||||
{
|
{
|
||||||
|
|
|
@ -1,6 +1,8 @@
|
||||||
:8884
|
:8884
|
||||||
|
|
||||||
reverse_proxy h2c://localhost:8080
|
reverse_proxy h2c://localhost:8080
|
||||||
|
|
||||||
|
reverse_proxy unix+h2c//run/app.sock
|
||||||
----------
|
----------
|
||||||
{
|
{
|
||||||
"apps": {
|
"apps": {
|
||||||
|
@ -27,6 +29,21 @@ reverse_proxy h2c://localhost:8080
|
||||||
"dial": "localhost:8080"
|
"dial": "localhost:8080"
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"handler": "reverse_proxy",
|
||||||
|
"transport": {
|
||||||
|
"protocol": "http",
|
||||||
|
"versions": [
|
||||||
|
"h2c",
|
||||||
|
"2"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"upstreams": [
|
||||||
|
{
|
||||||
|
"dial": "unix//run/app.sock"
|
||||||
|
}
|
||||||
|
]
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,64 @@
|
||||||
|
:8884
|
||||||
|
|
||||||
|
reverse_proxy 127.0.0.1:65535 {
|
||||||
|
lb_policy first
|
||||||
|
lb_retries 5
|
||||||
|
lb_try_duration 10s
|
||||||
|
lb_try_interval 500ms
|
||||||
|
lb_retry_match {
|
||||||
|
path /foo*
|
||||||
|
method POST
|
||||||
|
}
|
||||||
|
lb_retry_match path /bar*
|
||||||
|
}
|
||||||
|
----------
|
||||||
|
{
|
||||||
|
"apps": {
|
||||||
|
"http": {
|
||||||
|
"servers": {
|
||||||
|
"srv0": {
|
||||||
|
"listen": [
|
||||||
|
":8884"
|
||||||
|
],
|
||||||
|
"routes": [
|
||||||
|
{
|
||||||
|
"handle": [
|
||||||
|
{
|
||||||
|
"handler": "reverse_proxy",
|
||||||
|
"load_balancing": {
|
||||||
|
"retries": 5,
|
||||||
|
"retry_match": [
|
||||||
|
{
|
||||||
|
"method": [
|
||||||
|
"POST"
|
||||||
|
],
|
||||||
|
"path": [
|
||||||
|
"/foo*"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"path": [
|
||||||
|
"/bar*"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"selection_policy": {
|
||||||
|
"policy": "first"
|
||||||
|
},
|
||||||
|
"try_duration": 10000000000,
|
||||||
|
"try_interval": 500000000
|
||||||
|
},
|
||||||
|
"upstreams": [
|
||||||
|
{
|
||||||
|
"dial": "127.0.0.1:65535"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -24,8 +24,9 @@ https://example.com {
|
||||||
max_conns_per_host 5
|
max_conns_per_host 5
|
||||||
keepalive_idle_conns_per_host 2
|
keepalive_idle_conns_per_host 2
|
||||||
keepalive_interval 30s
|
keepalive_interval 30s
|
||||||
renegotiation freely
|
|
||||||
except_ports 8181 8182
|
tls_renegotiation freely
|
||||||
|
tls_except_ports 8181 8182
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -68,7 +68,7 @@ func TestDuplicateHosts(t *testing.T) {
|
||||||
}
|
}
|
||||||
`,
|
`,
|
||||||
"caddyfile",
|
"caddyfile",
|
||||||
"duplicate site address not allowed")
|
"ambiguous site definition")
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestReadCookie(t *testing.T) {
|
func TestReadCookie(t *testing.T) {
|
||||||
|
|
|
@ -60,7 +60,7 @@ func TestMapRespondWithDefault(t *testing.T) {
|
||||||
tester.AssertPostResponseBody("http://localhost:9080/version", []string{}, bytes.NewBuffer([]byte{}), 200, "hello from localhost unknown")
|
tester.AssertPostResponseBody("http://localhost:9080/version", []string{}, bytes.NewBuffer([]byte{}), 200, "hello from localhost unknown")
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestMapAsJson(t *testing.T) {
|
func TestMapAsJSON(t *testing.T) {
|
||||||
// arrange
|
// arrange
|
||||||
tester := caddytest.NewTester(t)
|
tester := caddytest.NewTester(t)
|
||||||
tester.InitServer(`
|
tester.InitServer(`
|
||||||
|
@ -85,7 +85,7 @@ func TestMapAsJson(t *testing.T) {
|
||||||
{
|
{
|
||||||
"handler": "map",
|
"handler": "map",
|
||||||
"source": "{http.request.method}",
|
"source": "{http.request.method}",
|
||||||
"destinations": ["dest-name"],
|
"destinations": ["{dest-name}"],
|
||||||
"defaults": ["unknown"],
|
"defaults": ["unknown"],
|
||||||
"mappings": [
|
"mappings": [
|
||||||
{
|
{
|
||||||
|
|
|
@ -123,8 +123,8 @@ func TestH2ToH2CStream(t *testing.T) {
|
||||||
// Disable any compression method from server.
|
// Disable any compression method from server.
|
||||||
req.Header.Set("Accept-Encoding", "identity")
|
req.Header.Set("Accept-Encoding", "identity")
|
||||||
|
|
||||||
resp := tester.AssertResponseCode(req, 200)
|
resp := tester.AssertResponseCode(req, http.StatusOK)
|
||||||
if 200 != resp.StatusCode {
|
if resp.StatusCode != http.StatusOK {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
go func() {
|
go func() {
|
||||||
|
@ -143,7 +143,6 @@ func TestH2ToH2CStream(t *testing.T) {
|
||||||
if !strings.Contains(body, expectedBody) {
|
if !strings.Contains(body, expectedBody) {
|
||||||
t.Errorf("requesting \"%s\" expected response body \"%s\" but got \"%s\"", req.RequestURI, expectedBody, body)
|
t.Errorf("requesting \"%s\" expected response body \"%s\" but got \"%s\"", req.RequestURI, expectedBody, body)
|
||||||
}
|
}
|
||||||
return
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func testH2ToH2CStreamServeH2C(t *testing.T) *http.Server {
|
func testH2ToH2CStreamServeH2C(t *testing.T) *http.Server {
|
||||||
|
@ -335,8 +334,8 @@ func TestH2ToH1ChunkedResponse(t *testing.T) {
|
||||||
fmt.Fprint(w, expectedBody)
|
fmt.Fprint(w, expectedBody)
|
||||||
w.Close()
|
w.Close()
|
||||||
}()
|
}()
|
||||||
resp := tester.AssertResponseCode(req, 200)
|
resp := tester.AssertResponseCode(req, http.StatusOK)
|
||||||
if 200 != resp.StatusCode {
|
if resp.StatusCode != http.StatusOK {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -351,7 +350,6 @@ func TestH2ToH1ChunkedResponse(t *testing.T) {
|
||||||
if body != expectedBody {
|
if body != expectedBody {
|
||||||
t.Errorf("requesting \"%s\" expected response body \"%s\" but got \"%s\"", req.RequestURI, expectedBody, body)
|
t.Errorf("requesting \"%s\" expected response body \"%s\" but got \"%s\"", req.RequestURI, expectedBody, body)
|
||||||
}
|
}
|
||||||
return
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func testH2ToH1ChunkedResponseServeH1(t *testing.T) *http.Server {
|
func testH2ToH1ChunkedResponseServeH1(t *testing.T) *http.Server {
|
||||||
|
|
120
cmd/cobra.go
Normal file
120
cmd/cobra.go
Normal file
|
@ -0,0 +1,120 @@
|
||||||
|
package caddycmd
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
)
|
||||||
|
|
||||||
|
var rootCmd = &cobra.Command{
|
||||||
|
Use: "caddy",
|
||||||
|
Long: `Caddy is an extensible server platform written in Go.
|
||||||
|
|
||||||
|
At its core, Caddy merely manages configuration. Modules are plugged
|
||||||
|
in statically at compile-time to provide useful functionality. Caddy's
|
||||||
|
standard distribution includes common modules to serve HTTP, TLS,
|
||||||
|
and PKI applications, including the automation of certificates.
|
||||||
|
|
||||||
|
To run Caddy, use:
|
||||||
|
|
||||||
|
- 'caddy run' to run Caddy in the foreground (recommended).
|
||||||
|
- 'caddy start' to start Caddy in the background; only do this
|
||||||
|
if you will be keeping the terminal window open until you run
|
||||||
|
'caddy stop' to close the server.
|
||||||
|
|
||||||
|
When Caddy is started, it opens a locally-bound administrative socket
|
||||||
|
to which configuration can be POSTed via a restful HTTP API (see
|
||||||
|
https://caddyserver.com/docs/api).
|
||||||
|
|
||||||
|
Caddy's native configuration format is JSON. However, config adapters
|
||||||
|
can be used to convert other config formats to JSON when Caddy receives
|
||||||
|
its configuration. The Caddyfile is a built-in config adapter that is
|
||||||
|
popular for hand-written configurations due to its straightforward
|
||||||
|
syntax (see https://caddyserver.com/docs/caddyfile). Many third-party
|
||||||
|
adapters are available (see https://caddyserver.com/docs/config-adapters).
|
||||||
|
Use 'caddy adapt' to see how a config translates to JSON.
|
||||||
|
|
||||||
|
For convenience, the CLI can act as an HTTP client to give Caddy its
|
||||||
|
initial configuration for you. If a file named Caddyfile is in the
|
||||||
|
current working directory, it will do this automatically. Otherwise,
|
||||||
|
you can use the --config flag to specify the path to a config file.
|
||||||
|
|
||||||
|
Some special-purpose subcommands build and load a configuration file
|
||||||
|
for you directly from command line input; for example:
|
||||||
|
|
||||||
|
- caddy file-server
|
||||||
|
- caddy reverse-proxy
|
||||||
|
- caddy respond
|
||||||
|
|
||||||
|
These commands disable the administration endpoint because their
|
||||||
|
configuration is specified solely on the command line.
|
||||||
|
|
||||||
|
In general, the most common way to run Caddy is simply:
|
||||||
|
|
||||||
|
$ caddy run
|
||||||
|
|
||||||
|
Or, with a configuration file:
|
||||||
|
|
||||||
|
$ caddy run --config caddy.json
|
||||||
|
|
||||||
|
If running interactively in a terminal, running Caddy in the
|
||||||
|
background may be more convenient:
|
||||||
|
|
||||||
|
$ caddy start
|
||||||
|
...
|
||||||
|
$ caddy stop
|
||||||
|
|
||||||
|
This allows you to run other commands while Caddy stays running.
|
||||||
|
Be sure to stop Caddy before you close the terminal!
|
||||||
|
|
||||||
|
Depending on the system, Caddy may need permission to bind to low
|
||||||
|
ports. One way to do this on Linux is to use setcap:
|
||||||
|
|
||||||
|
$ sudo setcap cap_net_bind_service=+ep $(which caddy)
|
||||||
|
|
||||||
|
Remember to run that command again after replacing the binary.
|
||||||
|
|
||||||
|
See the Caddy website for tutorials, configuration structure,
|
||||||
|
syntax, and module documentation: https://caddyserver.com/docs/
|
||||||
|
|
||||||
|
Custom Caddy builds are available on the Caddy download page at:
|
||||||
|
https://caddyserver.com/download
|
||||||
|
|
||||||
|
The xcaddy command can be used to build Caddy from source with or
|
||||||
|
without additional plugins: https://github.com/caddyserver/xcaddy
|
||||||
|
|
||||||
|
Where possible, Caddy should be installed using officially-supported
|
||||||
|
package installers: https://caddyserver.com/docs/install
|
||||||
|
|
||||||
|
Instructions for running Caddy in production are also available:
|
||||||
|
https://caddyserver.com/docs/running
|
||||||
|
`,
|
||||||
|
Example: ` $ caddy run
|
||||||
|
$ caddy run --config caddy.json
|
||||||
|
$ caddy reload --config caddy.json
|
||||||
|
$ caddy stop`,
|
||||||
|
|
||||||
|
// kind of annoying to have all the help text printed out if
|
||||||
|
// caddy has an error provisioning its modules, for instance...
|
||||||
|
SilenceUsage: true,
|
||||||
|
}
|
||||||
|
|
||||||
|
const fullDocsFooter = `Full documentation is available at:
|
||||||
|
https://caddyserver.com/docs/command-line`
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
rootCmd.SetHelpTemplate(rootCmd.HelpTemplate() + "\n" + fullDocsFooter)
|
||||||
|
}
|
||||||
|
|
||||||
|
func caddyCmdToCoral(caddyCmd Command) *cobra.Command {
|
||||||
|
cmd := &cobra.Command{
|
||||||
|
Use: caddyCmd.Name,
|
||||||
|
Short: caddyCmd.Short,
|
||||||
|
Long: caddyCmd.Long,
|
||||||
|
RunE: func(cmd *cobra.Command, _ []string) error {
|
||||||
|
fls := cmd.Flags()
|
||||||
|
_, err := caddyCmd.Func(Flags{fls})
|
||||||
|
return err
|
||||||
|
},
|
||||||
|
}
|
||||||
|
cmd.Flags().AddGoFlagSet(caddyCmd.Flags)
|
||||||
|
return cmd
|
||||||
|
}
|
|
@ -29,7 +29,6 @@ import (
|
||||||
"os/exec"
|
"os/exec"
|
||||||
"runtime"
|
"runtime"
|
||||||
"runtime/debug"
|
"runtime/debug"
|
||||||
"sort"
|
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/aryann/difflib"
|
"github.com/aryann/difflib"
|
||||||
|
@ -280,7 +279,7 @@ func cmdStop(fl Flags) (int, error) {
|
||||||
configFlag := fl.String("config")
|
configFlag := fl.String("config")
|
||||||
configAdapterFlag := fl.String("adapter")
|
configAdapterFlag := fl.String("adapter")
|
||||||
|
|
||||||
adminAddr, err := DetermineAdminAPIAddress(addrFlag, configFlag, configAdapterFlag)
|
adminAddr, err := DetermineAdminAPIAddress(addrFlag, nil, configFlag, configAdapterFlag)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return caddy.ExitCodeFailedStartup, fmt.Errorf("couldn't determine admin API address: %v", err)
|
return caddy.ExitCodeFailedStartup, fmt.Errorf("couldn't determine admin API address: %v", err)
|
||||||
}
|
}
|
||||||
|
@ -310,7 +309,7 @@ func cmdReload(fl Flags) (int, error) {
|
||||||
return caddy.ExitCodeFailedStartup, fmt.Errorf("no config file to load")
|
return caddy.ExitCodeFailedStartup, fmt.Errorf("no config file to load")
|
||||||
}
|
}
|
||||||
|
|
||||||
adminAddr, err := DetermineAdminAPIAddress(addrFlag, configFlag, configAdapterFlag)
|
adminAddr, err := DetermineAdminAPIAddress(addrFlag, config, configFlag, configAdapterFlag)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return caddy.ExitCodeFailedStartup, fmt.Errorf("couldn't determine admin API address: %v", err)
|
return caddy.ExitCodeFailedStartup, fmt.Errorf("couldn't determine admin API address: %v", err)
|
||||||
}
|
}
|
||||||
|
@ -331,30 +330,17 @@ func cmdReload(fl Flags) (int, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func cmdVersion(_ Flags) (int, error) {
|
func cmdVersion(_ Flags) (int, error) {
|
||||||
fmt.Println(CaddyVersion())
|
_, full := caddy.Version()
|
||||||
|
fmt.Println(full)
|
||||||
return caddy.ExitCodeSuccess, nil
|
return caddy.ExitCodeSuccess, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func cmdBuildInfo(fl Flags) (int, error) {
|
func cmdBuildInfo(_ Flags) (int, error) {
|
||||||
bi, ok := debug.ReadBuildInfo()
|
bi, ok := debug.ReadBuildInfo()
|
||||||
if !ok {
|
if !ok {
|
||||||
return caddy.ExitCodeFailedStartup, fmt.Errorf("no build information")
|
return caddy.ExitCodeFailedStartup, fmt.Errorf("no build information")
|
||||||
}
|
}
|
||||||
|
fmt.Println(bi)
|
||||||
fmt.Printf("go_version: %s\n", runtime.Version())
|
|
||||||
fmt.Printf("go_os: %s\n", runtime.GOOS)
|
|
||||||
fmt.Printf("go_arch: %s\n", runtime.GOARCH)
|
|
||||||
fmt.Printf("path: %s\n", bi.Path)
|
|
||||||
fmt.Printf("main: %s %s %s\n", bi.Main.Path, bi.Main.Version, bi.Main.Sum)
|
|
||||||
fmt.Println("dependencies:")
|
|
||||||
|
|
||||||
for _, goMod := range bi.Deps {
|
|
||||||
fmt.Printf("%s %s %s", goMod.Path, goMod.Version, goMod.Sum)
|
|
||||||
if goMod.Replace != nil {
|
|
||||||
fmt.Printf(" => %s %s %s", goMod.Replace.Path, goMod.Replace.Version, goMod.Replace.Sum)
|
|
||||||
}
|
|
||||||
fmt.Println()
|
|
||||||
}
|
|
||||||
return caddy.ExitCodeSuccess, nil
|
return caddy.ExitCodeSuccess, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -471,7 +457,7 @@ func cmdAdaptConfig(fl Flags) (int, error) {
|
||||||
fmt.Errorf("reading input file: %v", err)
|
fmt.Errorf("reading input file: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
opts := map[string]interface{}{"filename": adaptCmdInputFlag}
|
opts := map[string]any{"filename": adaptCmdInputFlag}
|
||||||
|
|
||||||
adaptedConfig, warnings, err := cfgAdapter.Adapt(input, opts)
|
adaptedConfig, warnings, err := cfgAdapter.Adapt(input, opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -593,70 +579,6 @@ func cmdFmt(fl Flags) (int, error) {
|
||||||
return caddy.ExitCodeSuccess, nil
|
return caddy.ExitCodeSuccess, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func cmdHelp(fl Flags) (int, error) {
|
|
||||||
const fullDocs = `Full documentation is available at:
|
|
||||||
https://caddyserver.com/docs/command-line`
|
|
||||||
|
|
||||||
args := fl.Args()
|
|
||||||
if len(args) == 0 {
|
|
||||||
s := `Caddy is an extensible server platform.
|
|
||||||
|
|
||||||
usage:
|
|
||||||
caddy <command> [<args...>]
|
|
||||||
|
|
||||||
commands:
|
|
||||||
`
|
|
||||||
keys := make([]string, 0, len(commands))
|
|
||||||
for k := range commands {
|
|
||||||
keys = append(keys, k)
|
|
||||||
}
|
|
||||||
sort.Strings(keys)
|
|
||||||
for _, k := range keys {
|
|
||||||
cmd := commands[k]
|
|
||||||
short := strings.TrimSuffix(cmd.Short, ".")
|
|
||||||
s += fmt.Sprintf(" %-15s %s\n", cmd.Name, short)
|
|
||||||
}
|
|
||||||
|
|
||||||
s += "\nUse 'caddy help <command>' for more information about a command.\n"
|
|
||||||
s += "\n" + fullDocs + "\n"
|
|
||||||
|
|
||||||
fmt.Print(s)
|
|
||||||
|
|
||||||
return caddy.ExitCodeSuccess, nil
|
|
||||||
} else if len(args) > 1 {
|
|
||||||
return caddy.ExitCodeFailedStartup, fmt.Errorf("can only give help with one command")
|
|
||||||
}
|
|
||||||
|
|
||||||
subcommand, ok := commands[args[0]]
|
|
||||||
if !ok {
|
|
||||||
return caddy.ExitCodeFailedStartup, fmt.Errorf("unknown command: %s", args[0])
|
|
||||||
}
|
|
||||||
|
|
||||||
helpText := strings.TrimSpace(subcommand.Long)
|
|
||||||
if helpText == "" {
|
|
||||||
helpText = subcommand.Short
|
|
||||||
if !strings.HasSuffix(helpText, ".") {
|
|
||||||
helpText += "."
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
result := fmt.Sprintf("%s\n\nusage:\n caddy %s %s\n",
|
|
||||||
helpText,
|
|
||||||
subcommand.Name,
|
|
||||||
strings.TrimSpace(subcommand.Usage),
|
|
||||||
)
|
|
||||||
|
|
||||||
if help := flagHelp(subcommand.Flags); help != "" {
|
|
||||||
result += fmt.Sprintf("\nflags:\n%s", help)
|
|
||||||
}
|
|
||||||
|
|
||||||
result += "\n" + fullDocs + "\n"
|
|
||||||
|
|
||||||
fmt.Print(result)
|
|
||||||
|
|
||||||
return caddy.ExitCodeSuccess, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// AdminAPIRequest makes an API request according to the CLI flags given,
|
// AdminAPIRequest makes an API request according to the CLI flags given,
|
||||||
// with the given HTTP method and request URI. If body is non-nil, it will
|
// with the given HTTP method and request URI. If body is non-nil, it will
|
||||||
// be assumed to be Content-Type application/json. The caller should close
|
// be assumed to be Content-Type application/json. The caller should close
|
||||||
|
@ -732,10 +654,11 @@ func AdminAPIRequest(adminAddr, method, uri string, headers http.Header, body io
|
||||||
|
|
||||||
// DetermineAdminAPIAddress determines which admin API endpoint address should
|
// DetermineAdminAPIAddress determines which admin API endpoint address should
|
||||||
// be used based on the inputs. By priority: if `address` is specified, then
|
// be used based on the inputs. By priority: if `address` is specified, then
|
||||||
// it is returned; if `configFile` (and `configAdapter`) are specified, then that
|
// it is returned; if `config` is specified, then that config will be used for
|
||||||
// config will be loaded to find the admin address; otherwise, the default
|
// finding the admin address; if `configFile` (and `configAdapter`) are specified,
|
||||||
// admin listen address will be returned.
|
// then that config will be loaded to find the admin address; otherwise, the
|
||||||
func DetermineAdminAPIAddress(address, configFile, configAdapter string) (string, error) {
|
// default admin listen address will be returned.
|
||||||
|
func DetermineAdminAPIAddress(address string, config []byte, configFile, configAdapter string) (string, error) {
|
||||||
// Prefer the address if specified and non-empty
|
// Prefer the address if specified and non-empty
|
||||||
if address != "" {
|
if address != "" {
|
||||||
return address, nil
|
return address, nil
|
||||||
|
@ -743,21 +666,29 @@ func DetermineAdminAPIAddress(address, configFile, configAdapter string) (string
|
||||||
|
|
||||||
// Try to load the config from file if specified, with the given adapter name
|
// Try to load the config from file if specified, with the given adapter name
|
||||||
if configFile != "" {
|
if configFile != "" {
|
||||||
// get the config in caddy's native format
|
var loadedConfigFile string
|
||||||
config, loadedConfigFile, err := LoadConfig(configFile, configAdapter)
|
var err error
|
||||||
if err != nil {
|
|
||||||
return "", err
|
// use the provided loaded config if non-empty
|
||||||
}
|
// otherwise, load it from the specified file/adapter
|
||||||
if loadedConfigFile == "" {
|
loadedConfig := config
|
||||||
return "", fmt.Errorf("no config file to load")
|
if len(loadedConfig) == 0 {
|
||||||
|
// get the config in caddy's native format
|
||||||
|
loadedConfig, loadedConfigFile, err = LoadConfig(configFile, configAdapter)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
if loadedConfigFile == "" {
|
||||||
|
return "", fmt.Errorf("no config file to load")
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// get the address of the admin listener if set
|
// get the address of the admin listener from the config
|
||||||
if len(config) > 0 {
|
if len(loadedConfig) > 0 {
|
||||||
var tmpStruct struct {
|
var tmpStruct struct {
|
||||||
Admin caddy.AdminConfig `json:"admin"`
|
Admin caddy.AdminConfig `json:"admin"`
|
||||||
}
|
}
|
||||||
err = json.Unmarshal(config, &tmpStruct)
|
err := json.Unmarshal(loadedConfig, &tmpStruct)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", fmt.Errorf("unmarshaling admin listener address from config: %v", err)
|
return "", fmt.Errorf("unmarshaling admin listener address from config: %v", err)
|
||||||
}
|
}
|
||||||
|
|
154
cmd/commands.go
154
cmd/commands.go
|
@ -16,7 +16,14 @@ package caddycmd
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"flag"
|
"flag"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
"regexp"
|
"regexp"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/caddyserver/caddy/v2"
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
"github.com/spf13/cobra/doc"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Command represents a subcommand. Name, Func,
|
// Command represents a subcommand. Name, Func,
|
||||||
|
@ -70,13 +77,6 @@ func Commands() map[string]Command {
|
||||||
var commands = make(map[string]Command)
|
var commands = make(map[string]Command)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
RegisterCommand(Command{
|
|
||||||
Name: "help",
|
|
||||||
Func: cmdHelp,
|
|
||||||
Usage: "<command>",
|
|
||||||
Short: "Shows help for a Caddy subcommand",
|
|
||||||
})
|
|
||||||
|
|
||||||
RegisterCommand(Command{
|
RegisterCommand(Command{
|
||||||
Name: "start",
|
Name: "start",
|
||||||
Func: cmdStart,
|
Func: cmdStart,
|
||||||
|
@ -137,8 +137,8 @@ The --resume flag will override the --config flag if there is a config auto-
|
||||||
save file. It is not an error if --resume is used and no autosave file exists.
|
save file. It is not an error if --resume is used and no autosave file exists.
|
||||||
|
|
||||||
If --watch is specified, the config file will be loaded automatically after
|
If --watch is specified, the config file will be loaded automatically after
|
||||||
changes. ⚠️ This is dangerous in production! Only use this option in a local
|
changes. ⚠️ This can make unintentional config changes easier; only use this
|
||||||
development environment.`,
|
option in a local development environment.`,
|
||||||
Flags: func() *flag.FlagSet {
|
Flags: func() *flag.FlagSet {
|
||||||
fs := flag.NewFlagSet("run", flag.ExitOnError)
|
fs := flag.NewFlagSet("run", flag.ExitOnError)
|
||||||
fs.String("config", "", "Configuration file")
|
fs.String("config", "", "Configuration file")
|
||||||
|
@ -200,6 +200,19 @@ config file; otherwise the default is assumed.`,
|
||||||
Name: "version",
|
Name: "version",
|
||||||
Func: cmdVersion,
|
Func: cmdVersion,
|
||||||
Short: "Prints the version",
|
Short: "Prints the version",
|
||||||
|
Long: `
|
||||||
|
Prints the version of this Caddy binary.
|
||||||
|
|
||||||
|
Version information must be embedded into the binary at compile-time in
|
||||||
|
order for Caddy to display anything useful with this command. If Caddy
|
||||||
|
is built from within a version control repository, the Go command will
|
||||||
|
embed the revision hash if available. However, if Caddy is built in the
|
||||||
|
way specified by our online documentation (or by using xcaddy), more
|
||||||
|
detailed version information is printed as given by Go modules.
|
||||||
|
|
||||||
|
For more details about the full version string, see the Go module
|
||||||
|
documentation: https://go.dev/doc/modules/version-numbers
|
||||||
|
`,
|
||||||
})
|
})
|
||||||
|
|
||||||
RegisterCommand(Command{
|
RegisterCommand(Command{
|
||||||
|
@ -226,6 +239,24 @@ config file; otherwise the default is assumed.`,
|
||||||
Name: "environ",
|
Name: "environ",
|
||||||
Func: cmdEnviron,
|
Func: cmdEnviron,
|
||||||
Short: "Prints the environment",
|
Short: "Prints the environment",
|
||||||
|
Long: `
|
||||||
|
Prints the environment as seen by this Caddy process.
|
||||||
|
|
||||||
|
The environment includes variables set in the system. If your Caddy
|
||||||
|
configuration uses environment variables (e.g. "{env.VARIABLE}") then
|
||||||
|
this command can be useful for verifying that the variables will have
|
||||||
|
the values you expect in your config.
|
||||||
|
|
||||||
|
Note that environments may be different depending on how you run Caddy.
|
||||||
|
Environments for Caddy instances started by service managers such as
|
||||||
|
systemd are often different than the environment inherited from your
|
||||||
|
shell or terminal.
|
||||||
|
|
||||||
|
You can also print the environment the same time you use "caddy run"
|
||||||
|
by adding the "--environ" flag.
|
||||||
|
|
||||||
|
Environments may contain sensitive data.
|
||||||
|
`,
|
||||||
})
|
})
|
||||||
|
|
||||||
RegisterCommand(Command{
|
RegisterCommand(Command{
|
||||||
|
@ -346,16 +377,111 @@ EXPERIMENTAL: May be changed or removed.
|
||||||
}(),
|
}(),
|
||||||
})
|
})
|
||||||
|
|
||||||
|
RegisterCommand(Command{
|
||||||
|
Name: "manpage",
|
||||||
|
Func: func(fl Flags) (int, error) {
|
||||||
|
dir := strings.TrimSpace(fl.String("directory"))
|
||||||
|
if dir == "" {
|
||||||
|
return caddy.ExitCodeFailedQuit, fmt.Errorf("designated output directory and specified section are required")
|
||||||
|
}
|
||||||
|
if err := os.MkdirAll(dir, 0755); err != nil {
|
||||||
|
return caddy.ExitCodeFailedQuit, err
|
||||||
|
}
|
||||||
|
if err := doc.GenManTree(rootCmd, &doc.GenManHeader{
|
||||||
|
Title: "Caddy",
|
||||||
|
Section: "8", // https://en.wikipedia.org/wiki/Man_page#Manual_sections
|
||||||
|
}, dir); err != nil {
|
||||||
|
return caddy.ExitCodeFailedQuit, err
|
||||||
|
}
|
||||||
|
return caddy.ExitCodeSuccess, nil
|
||||||
|
},
|
||||||
|
Usage: "--directory <path>",
|
||||||
|
Short: "Generates the manual pages for Caddy commands",
|
||||||
|
Long: `
|
||||||
|
Generates the manual pages for Caddy commands into the designated directory
|
||||||
|
tagged into section 8 (System Administration).
|
||||||
|
|
||||||
|
The manual page files are generated into the directory specified by the
|
||||||
|
argument of --directory. If the directory does not exist, it will be created.
|
||||||
|
`,
|
||||||
|
Flags: func() *flag.FlagSet {
|
||||||
|
fs := flag.NewFlagSet("manpage", flag.ExitOnError)
|
||||||
|
fs.String("directory", "", "The output directory where the manpages are generated")
|
||||||
|
return fs
|
||||||
|
}(),
|
||||||
|
})
|
||||||
|
|
||||||
|
// source: https://github.com/spf13/cobra/blob/main/shell_completions.md
|
||||||
|
rootCmd.AddCommand(&cobra.Command{
|
||||||
|
Use: "completion [bash|zsh|fish|powershell]",
|
||||||
|
Short: "Generate completion script",
|
||||||
|
Long: fmt.Sprintf(`To load completions:
|
||||||
|
|
||||||
|
Bash:
|
||||||
|
|
||||||
|
$ source <(%[1]s completion bash)
|
||||||
|
|
||||||
|
# To load completions for each session, execute once:
|
||||||
|
# Linux:
|
||||||
|
$ %[1]s completion bash > /etc/bash_completion.d/%[1]s
|
||||||
|
# macOS:
|
||||||
|
$ %[1]s completion bash > $(brew --prefix)/etc/bash_completion.d/%[1]s
|
||||||
|
|
||||||
|
Zsh:
|
||||||
|
|
||||||
|
# If shell completion is not already enabled in your environment,
|
||||||
|
# you will need to enable it. You can execute the following once:
|
||||||
|
|
||||||
|
$ echo "autoload -U compinit; compinit" >> ~/.zshrc
|
||||||
|
|
||||||
|
# To load completions for each session, execute once:
|
||||||
|
$ %[1]s completion zsh > "${fpath[1]}/_%[1]s"
|
||||||
|
|
||||||
|
# You will need to start a new shell for this setup to take effect.
|
||||||
|
|
||||||
|
fish:
|
||||||
|
|
||||||
|
$ %[1]s completion fish | source
|
||||||
|
|
||||||
|
# To load completions for each session, execute once:
|
||||||
|
$ %[1]s completion fish > ~/.config/fish/completions/%[1]s.fish
|
||||||
|
|
||||||
|
PowerShell:
|
||||||
|
|
||||||
|
PS> %[1]s completion powershell | Out-String | Invoke-Expression
|
||||||
|
|
||||||
|
# To load completions for every new session, run:
|
||||||
|
PS> %[1]s completion powershell > %[1]s.ps1
|
||||||
|
# and source this file from your PowerShell profile.
|
||||||
|
`, rootCmd.Root().Name()),
|
||||||
|
DisableFlagsInUseLine: true,
|
||||||
|
ValidArgs: []string{"bash", "zsh", "fish", "powershell"},
|
||||||
|
Args: cobra.ExactValidArgs(1),
|
||||||
|
RunE: func(cmd *cobra.Command, args []string) error {
|
||||||
|
switch args[0] {
|
||||||
|
case "bash":
|
||||||
|
return cmd.Root().GenBashCompletion(os.Stdout)
|
||||||
|
case "zsh":
|
||||||
|
return cmd.Root().GenZshCompletion(os.Stdout)
|
||||||
|
case "fish":
|
||||||
|
return cmd.Root().GenFishCompletion(os.Stdout, true)
|
||||||
|
case "powershell":
|
||||||
|
return cmd.Root().GenPowerShellCompletionWithDesc(os.Stdout)
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("unrecognized shell: %s", args[0])
|
||||||
|
}
|
||||||
|
},
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// RegisterCommand registers the command cmd.
|
// RegisterCommand registers the command cmd.
|
||||||
// cmd.Name must be unique and conform to the
|
// cmd.Name must be unique and conform to the
|
||||||
// following format:
|
// following format:
|
||||||
//
|
//
|
||||||
// - lowercase
|
// - lowercase
|
||||||
// - alphanumeric and hyphen characters only
|
// - alphanumeric and hyphen characters only
|
||||||
// - cannot start or end with a hyphen
|
// - cannot start or end with a hyphen
|
||||||
// - hyphen cannot be adjacent to another hyphen
|
// - hyphen cannot be adjacent to another hyphen
|
||||||
//
|
//
|
||||||
// This function panics if the name is already registered,
|
// This function panics if the name is already registered,
|
||||||
// if the name does not meet the described format, or if
|
// if the name does not meet the described format, or if
|
||||||
|
@ -378,7 +504,7 @@ func RegisterCommand(cmd Command) {
|
||||||
if !commandNameRegex.MatchString(cmd.Name) {
|
if !commandNameRegex.MatchString(cmd.Name) {
|
||||||
panic("invalid command name")
|
panic("invalid command name")
|
||||||
}
|
}
|
||||||
commands[cmd.Name] = cmd
|
rootCmd.AddCommand(caddyCmdToCoral(cmd))
|
||||||
}
|
}
|
||||||
|
|
||||||
var commandNameRegex = regexp.MustCompile(`^[a-z0-9]$|^([a-z0-9]+-?[a-z0-9]*)+[a-z0-9]$`)
|
var commandNameRegex = regexp.MustCompile(`^[a-z0-9]$|^([a-z0-9]+-?[a-z0-9]*)+[a-z0-9]$`)
|
||||||
|
|
104
cmd/main.go
104
cmd/main.go
|
@ -33,13 +33,14 @@ import (
|
||||||
"github.com/caddyserver/caddy/v2"
|
"github.com/caddyserver/caddy/v2"
|
||||||
"github.com/caddyserver/caddy/v2/caddyconfig"
|
"github.com/caddyserver/caddy/v2/caddyconfig"
|
||||||
"github.com/caddyserver/certmagic"
|
"github.com/caddyserver/certmagic"
|
||||||
|
"github.com/spf13/pflag"
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
// set a fitting User-Agent for ACME requests
|
// set a fitting User-Agent for ACME requests
|
||||||
goModule := caddy.GoModule()
|
version, _ := caddy.Version()
|
||||||
cleanModVersion := strings.TrimPrefix(goModule.Version, "v")
|
cleanModVersion := strings.TrimPrefix(version, "v")
|
||||||
certmagic.UserAgent = "Caddy/" + cleanModVersion
|
certmagic.UserAgent = "Caddy/" + cleanModVersion
|
||||||
|
|
||||||
// by using Caddy, user indicates agreement to CA terms
|
// by using Caddy, user indicates agreement to CA terms
|
||||||
|
@ -50,43 +51,14 @@ func init() {
|
||||||
// Main implements the main function of the caddy command.
|
// Main implements the main function of the caddy command.
|
||||||
// Call this if Caddy is to be the main() of your program.
|
// Call this if Caddy is to be the main() of your program.
|
||||||
func Main() {
|
func Main() {
|
||||||
switch len(os.Args) {
|
if len(os.Args) == 0 {
|
||||||
case 0:
|
|
||||||
fmt.Printf("[FATAL] no arguments provided by OS; args[0] must be command\n")
|
fmt.Printf("[FATAL] no arguments provided by OS; args[0] must be command\n")
|
||||||
os.Exit(caddy.ExitCodeFailedStartup)
|
os.Exit(caddy.ExitCodeFailedStartup)
|
||||||
case 1:
|
|
||||||
os.Args = append(os.Args, "help")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
subcommandName := os.Args[1]
|
if err := rootCmd.Execute(); err != nil {
|
||||||
subcommand, ok := commands[subcommandName]
|
os.Exit(1)
|
||||||
if !ok {
|
|
||||||
if strings.HasPrefix(os.Args[1], "-") {
|
|
||||||
// user probably forgot to type the subcommand
|
|
||||||
fmt.Println("[ERROR] first argument must be a subcommand; see 'caddy help'")
|
|
||||||
} else {
|
|
||||||
fmt.Printf("[ERROR] '%s' is not a recognized subcommand; see 'caddy help'\n", os.Args[1])
|
|
||||||
}
|
|
||||||
os.Exit(caddy.ExitCodeFailedStartup)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fs := subcommand.Flags
|
|
||||||
if fs == nil {
|
|
||||||
fs = flag.NewFlagSet(subcommand.Name, flag.ExitOnError)
|
|
||||||
}
|
|
||||||
|
|
||||||
err := fs.Parse(os.Args[2:])
|
|
||||||
if err != nil {
|
|
||||||
fmt.Println(err)
|
|
||||||
os.Exit(caddy.ExitCodeFailedStartup)
|
|
||||||
}
|
|
||||||
|
|
||||||
exitCode, err := subcommand.Func(Flags{fs})
|
|
||||||
if err != nil {
|
|
||||||
fmt.Fprintf(os.Stderr, "%s: %v\n", subcommand.Name, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
os.Exit(exitCode)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// handlePingbackConn reads from conn and ensures it matches
|
// handlePingbackConn reads from conn and ensures it matches
|
||||||
|
@ -173,7 +145,7 @@ func LoadConfig(configFile, adapterName string) ([]byte, string, error) {
|
||||||
|
|
||||||
// adapt config
|
// adapt config
|
||||||
if cfgAdapter != nil {
|
if cfgAdapter != nil {
|
||||||
adaptedConfig, warnings, err := cfgAdapter.Adapt(config, map[string]interface{}{
|
adaptedConfig, warnings, err := cfgAdapter.Adapt(config, map[string]any{
|
||||||
"filename": configFile,
|
"filename": configFile,
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -280,7 +252,7 @@ func watchConfigFile(filename, adapterName string) {
|
||||||
// Flags wraps a FlagSet so that typed values
|
// Flags wraps a FlagSet so that typed values
|
||||||
// from flags can be easily retrieved.
|
// from flags can be easily retrieved.
|
||||||
type Flags struct {
|
type Flags struct {
|
||||||
*flag.FlagSet
|
*pflag.FlagSet
|
||||||
}
|
}
|
||||||
|
|
||||||
// String returns the string representation of the
|
// String returns the string representation of the
|
||||||
|
@ -326,22 +298,6 @@ func (f Flags) Duration(name string) time.Duration {
|
||||||
return val
|
return val
|
||||||
}
|
}
|
||||||
|
|
||||||
// flagHelp returns the help text for fs.
|
|
||||||
func flagHelp(fs *flag.FlagSet) string {
|
|
||||||
if fs == nil {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
// temporarily redirect output
|
|
||||||
out := fs.Output()
|
|
||||||
defer fs.SetOutput(out)
|
|
||||||
|
|
||||||
buf := new(bytes.Buffer)
|
|
||||||
fs.SetOutput(buf)
|
|
||||||
fs.PrintDefaults()
|
|
||||||
return buf.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
func loadEnvFromFile(envFile string) error {
|
func loadEnvFromFile(envFile string) error {
|
||||||
file, err := os.Open(envFile)
|
file, err := os.Open(envFile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -387,11 +343,11 @@ func parseEnvFile(envInput io.Reader) (map[string]string, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// split line into key and value
|
// split line into key and value
|
||||||
fields := strings.SplitN(line, "=", 2)
|
before, after, isCut := strings.Cut(line, "=")
|
||||||
if len(fields) != 2 {
|
if !isCut {
|
||||||
return nil, fmt.Errorf("can't parse line %d; line should be in KEY=VALUE format", lineNumber)
|
return nil, fmt.Errorf("can't parse line %d; line should be in KEY=VALUE format", lineNumber)
|
||||||
}
|
}
|
||||||
key, val := fields[0], fields[1]
|
key, val := before, after
|
||||||
|
|
||||||
// sometimes keys are prefixed by "export " so file can be sourced in bash; ignore it here
|
// sometimes keys are prefixed by "export " so file can be sourced in bash; ignore it here
|
||||||
key = strings.TrimPrefix(key, "export ")
|
key = strings.TrimPrefix(key, "export ")
|
||||||
|
@ -408,11 +364,8 @@ func parseEnvFile(envInput io.Reader) (map[string]string, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// remove any trailing comment after value
|
// remove any trailing comment after value
|
||||||
if commentStart := strings.Index(val, "#"); commentStart > 0 {
|
if commentStart, _, found := strings.Cut(val, "#"); found {
|
||||||
before := val[commentStart-1]
|
val = strings.TrimRight(commentStart, " \t")
|
||||||
if before == '\t' || before == ' ' {
|
|
||||||
val = strings.TrimRight(val[:commentStart], " \t")
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// quoted value: support newlines
|
// quoted value: support newlines
|
||||||
|
@ -441,11 +394,12 @@ func parseEnvFile(envInput io.Reader) (map[string]string, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func printEnvironment() {
|
func printEnvironment() {
|
||||||
|
_, version := caddy.Version()
|
||||||
fmt.Printf("caddy.HomeDir=%s\n", caddy.HomeDir())
|
fmt.Printf("caddy.HomeDir=%s\n", caddy.HomeDir())
|
||||||
fmt.Printf("caddy.AppDataDir=%s\n", caddy.AppDataDir())
|
fmt.Printf("caddy.AppDataDir=%s\n", caddy.AppDataDir())
|
||||||
fmt.Printf("caddy.AppConfigDir=%s\n", caddy.AppConfigDir())
|
fmt.Printf("caddy.AppConfigDir=%s\n", caddy.AppConfigDir())
|
||||||
fmt.Printf("caddy.ConfigAutosavePath=%s\n", caddy.ConfigAutosavePath)
|
fmt.Printf("caddy.ConfigAutosavePath=%s\n", caddy.ConfigAutosavePath)
|
||||||
fmt.Printf("caddy.Version=%s\n", CaddyVersion())
|
fmt.Printf("caddy.Version=%s\n", version)
|
||||||
fmt.Printf("runtime.GOOS=%s\n", runtime.GOOS)
|
fmt.Printf("runtime.GOOS=%s\n", runtime.GOOS)
|
||||||
fmt.Printf("runtime.GOARCH=%s\n", runtime.GOARCH)
|
fmt.Printf("runtime.GOARCH=%s\n", runtime.GOARCH)
|
||||||
fmt.Printf("runtime.Compiler=%s\n", runtime.Compiler)
|
fmt.Printf("runtime.Compiler=%s\n", runtime.Compiler)
|
||||||
|
@ -462,21 +416,15 @@ func printEnvironment() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// CaddyVersion returns a detailed version string, if available.
|
// StringSlice is a flag.Value that enables repeated use of a string flag.
|
||||||
func CaddyVersion() string {
|
type StringSlice []string
|
||||||
goModule := caddy.GoModule()
|
|
||||||
ver := goModule.Version
|
func (ss StringSlice) String() string { return "[" + strings.Join(ss, ", ") + "]" }
|
||||||
if goModule.Sum != "" {
|
|
||||||
ver += " " + goModule.Sum
|
func (ss *StringSlice) Set(value string) error {
|
||||||
}
|
*ss = append(*ss, value)
|
||||||
if goModule.Replace != nil {
|
return nil
|
||||||
ver += " => " + goModule.Replace.Path
|
|
||||||
if goModule.Replace.Version != "" {
|
|
||||||
ver += "@" + goModule.Replace.Version
|
|
||||||
}
|
|
||||||
if goModule.Replace.Sum != "" {
|
|
||||||
ver += " " + goModule.Replace.Sum
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return ver
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Interface guard
|
||||||
|
var _ flag.Value = (*StringSlice)(nil)
|
||||||
|
|
|
@ -194,7 +194,7 @@ func getModules() (standard, nonstandard, unknown []moduleInfo, err error) {
|
||||||
// can use reflection but we need a non-pointer value (I'm
|
// can use reflection but we need a non-pointer value (I'm
|
||||||
// not sure why), and since New() should return a pointer
|
// not sure why), and since New() should return a pointer
|
||||||
// value, we need to dereference it first
|
// value, we need to dereference it first
|
||||||
iface := interface{}(modInfo.New())
|
iface := any(modInfo.New())
|
||||||
if rv := reflect.ValueOf(iface); rv.Kind() == reflect.Ptr {
|
if rv := reflect.ValueOf(iface); rv.Kind() == reflect.Ptr {
|
||||||
iface = reflect.New(reflect.TypeOf(iface).Elem()).Elem().Interface()
|
iface = reflect.New(reflect.TypeOf(iface).Elem()).Elem().Interface()
|
||||||
}
|
}
|
||||||
|
|
|
@ -13,7 +13,6 @@
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
//go:build !windows
|
//go:build !windows
|
||||||
// +build !windows
|
|
||||||
|
|
||||||
package caddycmd
|
package caddycmd
|
||||||
|
|
||||||
|
|
|
@ -31,6 +31,9 @@ import (
|
||||||
func removeCaddyBinary(path string) error {
|
func removeCaddyBinary(path string) error {
|
||||||
var sI syscall.StartupInfo
|
var sI syscall.StartupInfo
|
||||||
var pI syscall.ProcessInformation
|
var pI syscall.ProcessInformation
|
||||||
argv := syscall.StringToUTF16Ptr(filepath.Join(os.Getenv("windir"), "system32", "cmd.exe") + " /C del " + path)
|
argv, err := syscall.UTF16PtrFromString(filepath.Join(os.Getenv("windir"), "system32", "cmd.exe") + " /C del " + path)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
return syscall.CreateProcess(nil, argv, nil, nil, true, 0, nil, nil, &sI, &pI)
|
return syscall.CreateProcess(nil, argv, nil, nil, true, 0, nil, nil, &sI, &pI)
|
||||||
}
|
}
|
||||||
|
|
104
context.go
104
context.go
|
@ -37,9 +37,10 @@ import (
|
||||||
// not actually need to do this).
|
// not actually need to do this).
|
||||||
type Context struct {
|
type Context struct {
|
||||||
context.Context
|
context.Context
|
||||||
moduleInstances map[string][]interface{}
|
moduleInstances map[string][]Module
|
||||||
cfg *Config
|
cfg *Config
|
||||||
cleanupFuncs []func()
|
cleanupFuncs []func()
|
||||||
|
ancestry []Module
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewContext provides a new context derived from the given
|
// NewContext provides a new context derived from the given
|
||||||
|
@ -51,7 +52,7 @@ type Context struct {
|
||||||
// modules which are loaded will be properly unloaded.
|
// modules which are loaded will be properly unloaded.
|
||||||
// See standard library context package's documentation.
|
// See standard library context package's documentation.
|
||||||
func NewContext(ctx Context) (Context, context.CancelFunc) {
|
func NewContext(ctx Context) (Context, context.CancelFunc) {
|
||||||
newCtx := Context{moduleInstances: make(map[string][]interface{}), cfg: ctx.cfg}
|
newCtx := Context{moduleInstances: make(map[string][]Module), cfg: ctx.cfg}
|
||||||
c, cancel := context.WithCancel(ctx.Context)
|
c, cancel := context.WithCancel(ctx.Context)
|
||||||
wrappedCancel := func() {
|
wrappedCancel := func() {
|
||||||
cancel()
|
cancel()
|
||||||
|
@ -90,15 +91,15 @@ func (ctx *Context) OnCancel(f func()) {
|
||||||
// ModuleMap may be used in place of map[string]json.RawMessage. The return value's
|
// ModuleMap may be used in place of map[string]json.RawMessage. The return value's
|
||||||
// underlying type mirrors the input field's type:
|
// underlying type mirrors the input field's type:
|
||||||
//
|
//
|
||||||
// json.RawMessage => interface{}
|
// json.RawMessage => any
|
||||||
// []json.RawMessage => []interface{}
|
// []json.RawMessage => []any
|
||||||
// [][]json.RawMessage => [][]interface{}
|
// [][]json.RawMessage => [][]any
|
||||||
// map[string]json.RawMessage => map[string]interface{}
|
// map[string]json.RawMessage => map[string]any
|
||||||
// []map[string]json.RawMessage => []map[string]interface{}
|
// []map[string]json.RawMessage => []map[string]any
|
||||||
//
|
//
|
||||||
// The field must have a "caddy" struct tag in this format:
|
// The field must have a "caddy" struct tag in this format:
|
||||||
//
|
//
|
||||||
// caddy:"key1=val1 key2=val2"
|
// caddy:"key1=val1 key2=val2"
|
||||||
//
|
//
|
||||||
// To load modules, a "namespace" key is required. For example, to load modules
|
// To load modules, a "namespace" key is required. For example, to load modules
|
||||||
// in the "http.handlers" namespace, you'd put: `namespace=http.handlers` in the
|
// in the "http.handlers" namespace, you'd put: `namespace=http.handlers` in the
|
||||||
|
@ -115,20 +116,20 @@ func (ctx *Context) OnCancel(f func()) {
|
||||||
// meaning the key containing the module's name that is defined inline with the module
|
// meaning the key containing the module's name that is defined inline with the module
|
||||||
// itself. You must specify the inline key in a struct tag, along with the namespace:
|
// itself. You must specify the inline key in a struct tag, along with the namespace:
|
||||||
//
|
//
|
||||||
// caddy:"namespace=http.handlers inline_key=handler"
|
// caddy:"namespace=http.handlers inline_key=handler"
|
||||||
//
|
//
|
||||||
// This will look for a key/value pair like `"handler": "..."` in the json.RawMessage
|
// This will look for a key/value pair like `"handler": "..."` in the json.RawMessage
|
||||||
// in order to know the module name.
|
// in order to know the module name.
|
||||||
//
|
//
|
||||||
// To make use of the loaded module(s) (the return value), you will probably want
|
// To make use of the loaded module(s) (the return value), you will probably want
|
||||||
// to type-assert each interface{} value(s) to the types that are useful to you
|
// to type-assert each 'any' value(s) to the types that are useful to you
|
||||||
// and store them on the same struct. Storing them on the same struct makes for
|
// and store them on the same struct. Storing them on the same struct makes for
|
||||||
// easy garbage collection when your host module is no longer needed.
|
// easy garbage collection when your host module is no longer needed.
|
||||||
//
|
//
|
||||||
// Loaded modules have already been provisioned and validated. Upon returning
|
// Loaded modules have already been provisioned and validated. Upon returning
|
||||||
// successfully, this method clears the json.RawMessage(s) in the field since
|
// successfully, this method clears the json.RawMessage(s) in the field since
|
||||||
// the raw JSON is no longer needed, and this allows the GC to free up memory.
|
// the raw JSON is no longer needed, and this allows the GC to free up memory.
|
||||||
func (ctx Context) LoadModule(structPointer interface{}, fieldName string) (interface{}, error) {
|
func (ctx Context) LoadModule(structPointer any, fieldName string) (any, error) {
|
||||||
val := reflect.ValueOf(structPointer).Elem().FieldByName(fieldName)
|
val := reflect.ValueOf(structPointer).Elem().FieldByName(fieldName)
|
||||||
typ := val.Type()
|
typ := val.Type()
|
||||||
|
|
||||||
|
@ -148,7 +149,7 @@ func (ctx Context) LoadModule(structPointer interface{}, fieldName string) (inte
|
||||||
}
|
}
|
||||||
inlineModuleKey := opts["inline_key"]
|
inlineModuleKey := opts["inline_key"]
|
||||||
|
|
||||||
var result interface{}
|
var result any
|
||||||
|
|
||||||
switch val.Kind() {
|
switch val.Kind() {
|
||||||
case reflect.Slice:
|
case reflect.Slice:
|
||||||
|
@ -170,7 +171,7 @@ func (ctx Context) LoadModule(structPointer interface{}, fieldName string) (inte
|
||||||
if inlineModuleKey == "" {
|
if inlineModuleKey == "" {
|
||||||
panic("unable to determine module name without inline_key because type is not a ModuleMap")
|
panic("unable to determine module name without inline_key because type is not a ModuleMap")
|
||||||
}
|
}
|
||||||
var all []interface{}
|
var all []any
|
||||||
for i := 0; i < val.Len(); i++ {
|
for i := 0; i < val.Len(); i++ {
|
||||||
val, err := ctx.loadModuleInline(inlineModuleKey, moduleNamespace, val.Index(i).Interface().(json.RawMessage))
|
val, err := ctx.loadModuleInline(inlineModuleKey, moduleNamespace, val.Index(i).Interface().(json.RawMessage))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -186,10 +187,10 @@ func (ctx Context) LoadModule(structPointer interface{}, fieldName string) (inte
|
||||||
if inlineModuleKey == "" {
|
if inlineModuleKey == "" {
|
||||||
panic("unable to determine module name without inline_key because type is not a ModuleMap")
|
panic("unable to determine module name without inline_key because type is not a ModuleMap")
|
||||||
}
|
}
|
||||||
var all [][]interface{}
|
var all [][]any
|
||||||
for i := 0; i < val.Len(); i++ {
|
for i := 0; i < val.Len(); i++ {
|
||||||
innerVal := val.Index(i)
|
innerVal := val.Index(i)
|
||||||
var allInner []interface{}
|
var allInner []any
|
||||||
for j := 0; j < innerVal.Len(); j++ {
|
for j := 0; j < innerVal.Len(); j++ {
|
||||||
innerInnerVal, err := ctx.loadModuleInline(inlineModuleKey, moduleNamespace, innerVal.Index(j).Interface().(json.RawMessage))
|
innerInnerVal, err := ctx.loadModuleInline(inlineModuleKey, moduleNamespace, innerVal.Index(j).Interface().(json.RawMessage))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -204,7 +205,7 @@ func (ctx Context) LoadModule(structPointer interface{}, fieldName string) (inte
|
||||||
} else if isModuleMapType(typ.Elem()) {
|
} else if isModuleMapType(typ.Elem()) {
|
||||||
// val is `[]map[string]json.RawMessage`
|
// val is `[]map[string]json.RawMessage`
|
||||||
|
|
||||||
var all []map[string]interface{}
|
var all []map[string]any
|
||||||
for i := 0; i < val.Len(); i++ {
|
for i := 0; i < val.Len(); i++ {
|
||||||
thisSet, err := ctx.loadModulesFromSomeMap(moduleNamespace, inlineModuleKey, val.Index(i))
|
thisSet, err := ctx.loadModulesFromSomeMap(moduleNamespace, inlineModuleKey, val.Index(i))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -232,10 +233,10 @@ func (ctx Context) LoadModule(structPointer interface{}, fieldName string) (inte
|
||||||
return result, nil
|
return result, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// loadModulesFromSomeMap loads modules from val, which must be a type of map[string]interface{}.
|
// loadModulesFromSomeMap loads modules from val, which must be a type of map[string]any.
|
||||||
// Depending on inlineModuleKey, it will be interpreted as either a ModuleMap (key is the module
|
// Depending on inlineModuleKey, it will be interpreted as either a ModuleMap (key is the module
|
||||||
// name) or as a regular map (key is not the module name, and module name is defined inline).
|
// name) or as a regular map (key is not the module name, and module name is defined inline).
|
||||||
func (ctx Context) loadModulesFromSomeMap(namespace, inlineModuleKey string, val reflect.Value) (map[string]interface{}, error) {
|
func (ctx Context) loadModulesFromSomeMap(namespace, inlineModuleKey string, val reflect.Value) (map[string]any, error) {
|
||||||
// if no inline_key is specified, then val must be a ModuleMap,
|
// if no inline_key is specified, then val must be a ModuleMap,
|
||||||
// where the key is the module name
|
// where the key is the module name
|
||||||
if inlineModuleKey == "" {
|
if inlineModuleKey == "" {
|
||||||
|
@ -253,8 +254,8 @@ func (ctx Context) loadModulesFromSomeMap(namespace, inlineModuleKey string, val
|
||||||
// loadModulesFromRegularMap loads modules from val, where val is a map[string]json.RawMessage.
|
// loadModulesFromRegularMap loads modules from val, where val is a map[string]json.RawMessage.
|
||||||
// Map keys are NOT interpreted as module names, so module names are still expected to appear
|
// Map keys are NOT interpreted as module names, so module names are still expected to appear
|
||||||
// inline with the objects.
|
// inline with the objects.
|
||||||
func (ctx Context) loadModulesFromRegularMap(namespace, inlineModuleKey string, val reflect.Value) (map[string]interface{}, error) {
|
func (ctx Context) loadModulesFromRegularMap(namespace, inlineModuleKey string, val reflect.Value) (map[string]any, error) {
|
||||||
mods := make(map[string]interface{})
|
mods := make(map[string]any)
|
||||||
iter := val.MapRange()
|
iter := val.MapRange()
|
||||||
for iter.Next() {
|
for iter.Next() {
|
||||||
k := iter.Key()
|
k := iter.Key()
|
||||||
|
@ -268,10 +269,10 @@ func (ctx Context) loadModulesFromRegularMap(namespace, inlineModuleKey string,
|
||||||
return mods, nil
|
return mods, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// loadModuleMap loads modules from a ModuleMap, i.e. map[string]interface{}, where the key is the
|
// loadModuleMap loads modules from a ModuleMap, i.e. map[string]any, where the key is the
|
||||||
// module name. With a module map, module names do not need to be defined inline with their values.
|
// module name. With a module map, module names do not need to be defined inline with their values.
|
||||||
func (ctx Context) loadModuleMap(namespace string, val reflect.Value) (map[string]interface{}, error) {
|
func (ctx Context) loadModuleMap(namespace string, val reflect.Value) (map[string]any, error) {
|
||||||
all := make(map[string]interface{})
|
all := make(map[string]any)
|
||||||
iter := val.MapRange()
|
iter := val.MapRange()
|
||||||
for iter.Next() {
|
for iter.Next() {
|
||||||
k := iter.Key().Interface().(string)
|
k := iter.Key().Interface().(string)
|
||||||
|
@ -299,19 +300,19 @@ func (ctx Context) loadModuleMap(namespace string, val reflect.Value) (map[strin
|
||||||
// directly by most modules. However, this method is useful when
|
// directly by most modules. However, this method is useful when
|
||||||
// dynamically loading/unloading modules in their own context,
|
// dynamically loading/unloading modules in their own context,
|
||||||
// like from embedded scripts, etc.
|
// like from embedded scripts, etc.
|
||||||
func (ctx Context) LoadModuleByID(id string, rawMsg json.RawMessage) (interface{}, error) {
|
func (ctx Context) LoadModuleByID(id string, rawMsg json.RawMessage) (any, error) {
|
||||||
modulesMu.RLock()
|
modulesMu.RLock()
|
||||||
mod, ok := modules[id]
|
modInfo, ok := modules[id]
|
||||||
modulesMu.RUnlock()
|
modulesMu.RUnlock()
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, fmt.Errorf("unknown module: %s", id)
|
return nil, fmt.Errorf("unknown module: %s", id)
|
||||||
}
|
}
|
||||||
|
|
||||||
if mod.New == nil {
|
if modInfo.New == nil {
|
||||||
return nil, fmt.Errorf("module '%s' has no constructor", mod.ID)
|
return nil, fmt.Errorf("module '%s' has no constructor", modInfo.ID)
|
||||||
}
|
}
|
||||||
|
|
||||||
val := mod.New().(interface{})
|
val := modInfo.New()
|
||||||
|
|
||||||
// value must be a pointer for unmarshaling into concrete type, even if
|
// value must be a pointer for unmarshaling into concrete type, even if
|
||||||
// the module's concrete type is a slice or map; New() *should* return
|
// the module's concrete type is a slice or map; New() *should* return
|
||||||
|
@ -327,7 +328,7 @@ func (ctx Context) LoadModuleByID(id string, rawMsg json.RawMessage) (interface{
|
||||||
if len(rawMsg) > 0 {
|
if len(rawMsg) > 0 {
|
||||||
err := strictUnmarshalJSON(rawMsg, &val)
|
err := strictUnmarshalJSON(rawMsg, &val)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("decoding module config: %s: %v", mod, err)
|
return nil, fmt.Errorf("decoding module config: %s: %v", modInfo, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -340,6 +341,8 @@ func (ctx Context) LoadModuleByID(id string, rawMsg json.RawMessage) (interface{
|
||||||
return nil, fmt.Errorf("module value cannot be null")
|
return nil, fmt.Errorf("module value cannot be null")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ctx.ancestry = append(ctx.ancestry, val)
|
||||||
|
|
||||||
if prov, ok := val.(Provisioner); ok {
|
if prov, ok := val.(Provisioner); ok {
|
||||||
err := prov.Provision(ctx)
|
err := prov.Provision(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -351,7 +354,7 @@ func (ctx Context) LoadModuleByID(id string, rawMsg json.RawMessage) (interface{
|
||||||
err = fmt.Errorf("%v; additionally, cleanup: %v", err, err2)
|
err = fmt.Errorf("%v; additionally, cleanup: %v", err, err2)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil, fmt.Errorf("provision %s: %v", mod, err)
|
return nil, fmt.Errorf("provision %s: %v", modInfo, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -365,7 +368,7 @@ func (ctx Context) LoadModuleByID(id string, rawMsg json.RawMessage) (interface{
|
||||||
err = fmt.Errorf("%v; additionally, cleanup: %v", err, err2)
|
err = fmt.Errorf("%v; additionally, cleanup: %v", err, err2)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil, fmt.Errorf("%s: invalid configuration: %v", mod, err)
|
return nil, fmt.Errorf("%s: invalid configuration: %v", modInfo, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -375,7 +378,7 @@ func (ctx Context) LoadModuleByID(id string, rawMsg json.RawMessage) (interface{
|
||||||
}
|
}
|
||||||
|
|
||||||
// loadModuleInline loads a module from a JSON raw message which decodes to
|
// loadModuleInline loads a module from a JSON raw message which decodes to
|
||||||
// a map[string]interface{}, where one of the object keys is moduleNameKey
|
// a map[string]any, where one of the object keys is moduleNameKey
|
||||||
// and the corresponding value is the module name (as a string) which can
|
// and the corresponding value is the module name (as a string) which can
|
||||||
// be found in the given scope. In other words, the module name is declared
|
// be found in the given scope. In other words, the module name is declared
|
||||||
// in-line with the module itself.
|
// in-line with the module itself.
|
||||||
|
@ -385,7 +388,7 @@ func (ctx Context) LoadModuleByID(id string, rawMsg json.RawMessage) (interface{
|
||||||
// multiple instances in the map or it appears in an array (where there are
|
// multiple instances in the map or it appears in an array (where there are
|
||||||
// no custom keys). In other words, the key containing the module name is
|
// no custom keys). In other words, the key containing the module name is
|
||||||
// treated special/separate from all the other keys in the object.
|
// treated special/separate from all the other keys in the object.
|
||||||
func (ctx Context) loadModuleInline(moduleNameKey, moduleScope string, raw json.RawMessage) (interface{}, error) {
|
func (ctx Context) loadModuleInline(moduleNameKey, moduleScope string, raw json.RawMessage) (any, error) {
|
||||||
moduleName, raw, err := getModuleNameInline(moduleNameKey, raw)
|
moduleName, raw, err := getModuleNameInline(moduleNameKey, raw)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -407,7 +410,7 @@ func (ctx Context) loadModuleInline(moduleNameKey, moduleScope string, raw json.
|
||||||
// called during the Provision/Validate phase to reference a
|
// called during the Provision/Validate phase to reference a
|
||||||
// module's own host app (since the parent app module is still
|
// module's own host app (since the parent app module is still
|
||||||
// in the process of being provisioned, it is not yet ready).
|
// in the process of being provisioned, it is not yet ready).
|
||||||
func (ctx Context) App(name string) (interface{}, error) {
|
func (ctx Context) App(name string) (any, error) {
|
||||||
if app, ok := ctx.cfg.apps[name]; ok {
|
if app, ok := ctx.cfg.apps[name]; ok {
|
||||||
return app, nil
|
return app, nil
|
||||||
}
|
}
|
||||||
|
@ -439,8 +442,10 @@ func (ctx Context) Storage() certmagic.Storage {
|
||||||
return ctx.cfg.storage
|
return ctx.cfg.storage
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TODO: aw man, can I please change this?
|
||||||
// Logger returns a logger that can be used by mod.
|
// Logger returns a logger that can be used by mod.
|
||||||
func (ctx Context) Logger(mod Module) *zap.Logger {
|
func (ctx Context) Logger(mod Module) *zap.Logger {
|
||||||
|
// TODO: if mod is nil, use ctx.Module() instead...
|
||||||
if ctx.cfg == nil {
|
if ctx.cfg == nil {
|
||||||
// often the case in tests; just use a dev logger
|
// often the case in tests; just use a dev logger
|
||||||
l, err := zap.NewDevelopment()
|
l, err := zap.NewDevelopment()
|
||||||
|
@ -451,3 +456,34 @@ func (ctx Context) Logger(mod Module) *zap.Logger {
|
||||||
}
|
}
|
||||||
return ctx.cfg.Logging.Logger(mod)
|
return ctx.cfg.Logging.Logger(mod)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TODO: use this
|
||||||
|
// // Logger returns a logger that can be used by the current module.
|
||||||
|
// func (ctx Context) Log() *zap.Logger {
|
||||||
|
// if ctx.cfg == nil {
|
||||||
|
// // often the case in tests; just use a dev logger
|
||||||
|
// l, err := zap.NewDevelopment()
|
||||||
|
// if err != nil {
|
||||||
|
// panic("config missing, unable to create dev logger: " + err.Error())
|
||||||
|
// }
|
||||||
|
// return l
|
||||||
|
// }
|
||||||
|
// return ctx.cfg.Logging.Logger(ctx.Module())
|
||||||
|
// }
|
||||||
|
|
||||||
|
// Modules returns the lineage of modules that this context provisioned,
|
||||||
|
// with the most recent/current module being last in the list.
|
||||||
|
func (ctx Context) Modules() []Module {
|
||||||
|
mods := make([]Module, len(ctx.ancestry))
|
||||||
|
copy(mods, ctx.ancestry)
|
||||||
|
return mods
|
||||||
|
}
|
||||||
|
|
||||||
|
// Module returns the current module, or the most recent one
|
||||||
|
// provisioned by the context.
|
||||||
|
func (ctx Context) Module() Module {
|
||||||
|
if len(ctx.ancestry) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return ctx.ancestry[len(ctx.ancestry)-1]
|
||||||
|
}
|
||||||
|
|
|
@ -71,13 +71,13 @@ func ExampleContext_LoadModule_array() {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
// since our input is []json.RawMessage, the output will be []interface{}
|
// since our input is []json.RawMessage, the output will be []any
|
||||||
mods, err := ctx.LoadModule(myStruct, "GuestModulesRaw")
|
mods, err := ctx.LoadModule(myStruct, "GuestModulesRaw")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// you'd want to actually handle the error here
|
// you'd want to actually handle the error here
|
||||||
// return fmt.Errorf("loading guest modules: %v", err)
|
// return fmt.Errorf("loading guest modules: %v", err)
|
||||||
}
|
}
|
||||||
for _, mod := range mods.([]interface{}) {
|
for _, mod := range mods.([]any) {
|
||||||
myStruct.guestModules = append(myStruct.guestModules, mod.(io.Writer))
|
myStruct.guestModules = append(myStruct.guestModules, mod.(io.Writer))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -104,13 +104,13 @@ func ExampleContext_LoadModule_map() {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
// since our input is map[string]json.RawMessage, the output will be map[string]interface{}
|
// since our input is map[string]json.RawMessage, the output will be map[string]any
|
||||||
mods, err := ctx.LoadModule(myStruct, "GuestModulesRaw")
|
mods, err := ctx.LoadModule(myStruct, "GuestModulesRaw")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// you'd want to actually handle the error here
|
// you'd want to actually handle the error here
|
||||||
// return fmt.Errorf("loading guest modules: %v", err)
|
// return fmt.Errorf("loading guest modules: %v", err)
|
||||||
}
|
}
|
||||||
for modName, mod := range mods.(map[string]interface{}) {
|
for modName, mod := range mods.(map[string]any) {
|
||||||
myStruct.guestModules[modName] = mod.(io.Writer)
|
myStruct.guestModules[modName] = mod.(io.Writer)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -13,7 +13,6 @@
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
//go:build gofuzz
|
//go:build gofuzz
|
||||||
// +build gofuzz
|
|
||||||
|
|
||||||
package caddy
|
package caddy
|
||||||
|
|
||||||
|
|
77
go.mod
77
go.mod
|
@ -1,43 +1,50 @@
|
||||||
module github.com/caddyserver/caddy/v2
|
module github.com/caddyserver/caddy/v2
|
||||||
|
|
||||||
go 1.17
|
go 1.18
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/BurntSushi/toml v1.1.0
|
github.com/BurntSushi/toml v1.2.0
|
||||||
github.com/Masterminds/sprig/v3 v3.2.2
|
github.com/Masterminds/sprig/v3 v3.2.2
|
||||||
github.com/alecthomas/chroma v0.10.0
|
github.com/alecthomas/chroma v0.10.0
|
||||||
github.com/aryann/difflib v0.0.0-20210328193216-ff5ff6dc229b
|
github.com/aryann/difflib v0.0.0-20210328193216-ff5ff6dc229b
|
||||||
github.com/caddyserver/certmagic v0.16.1
|
github.com/caddyserver/certmagic v0.17.1
|
||||||
github.com/dustin/go-humanize v1.0.1-0.20200219035652-afde56e7acac
|
github.com/dustin/go-humanize v1.0.1-0.20200219035652-afde56e7acac
|
||||||
github.com/go-chi/chi v4.1.2+incompatible
|
github.com/go-chi/chi v4.1.2+incompatible
|
||||||
github.com/google/cel-go v0.11.4
|
github.com/google/cel-go v0.12.4
|
||||||
github.com/google/uuid v1.3.0
|
github.com/google/uuid v1.3.0
|
||||||
github.com/klauspost/compress v1.15.6
|
github.com/klauspost/compress v1.15.9
|
||||||
github.com/klauspost/cpuid/v2 v2.0.13
|
github.com/klauspost/cpuid/v2 v2.1.0
|
||||||
github.com/lucas-clemente/quic-go v0.28.0
|
github.com/lucas-clemente/quic-go v0.28.2-0.20220813150001-9957668d4301
|
||||||
github.com/mholt/acmez v1.0.2
|
github.com/mholt/acmez v1.0.4
|
||||||
github.com/prometheus/client_golang v1.12.1
|
github.com/prometheus/client_golang v1.12.2
|
||||||
github.com/smallstep/certificates v0.19.0
|
github.com/smallstep/certificates v0.21.0
|
||||||
github.com/smallstep/cli v0.18.0
|
github.com/smallstep/cli v0.21.0
|
||||||
github.com/smallstep/nosql v0.4.0
|
github.com/smallstep/nosql v0.4.0
|
||||||
github.com/smallstep/truststore v0.11.0
|
github.com/smallstep/truststore v0.12.0
|
||||||
|
github.com/spf13/cobra v1.1.3
|
||||||
|
github.com/spf13/pflag v1.0.5
|
||||||
github.com/tailscale/tscert v0.0.0-20220316030059-54bbcb9f74e2
|
github.com/tailscale/tscert v0.0.0-20220316030059-54bbcb9f74e2
|
||||||
github.com/yuin/goldmark v1.4.12
|
github.com/yuin/goldmark v1.4.13
|
||||||
github.com/yuin/goldmark-highlighting v0.0.0-20220208100518-594be1970594
|
github.com/yuin/goldmark-highlighting v0.0.0-20220208100518-594be1970594
|
||||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.29.0
|
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.34.0
|
||||||
go.opentelemetry.io/otel v1.4.0
|
go.opentelemetry.io/otel v1.9.0
|
||||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.4.0
|
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.4.0
|
||||||
go.opentelemetry.io/otel/sdk v1.4.0
|
go.opentelemetry.io/otel/sdk v1.4.0
|
||||||
go.uber.org/zap v1.21.0
|
go.uber.org/zap v1.21.0
|
||||||
golang.org/x/crypto v0.0.0-20220210151621-f4118a5b28e2
|
golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa
|
||||||
golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e
|
golang.org/x/net v0.0.0-20220812165438-1d4ff48094d1
|
||||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211
|
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211
|
||||||
google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21
|
google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21
|
||||||
google.golang.org/protobuf v1.28.0
|
|
||||||
gopkg.in/natefinch/lumberjack.v2 v2.0.0
|
gopkg.in/natefinch/lumberjack.v2 v2.0.0
|
||||||
gopkg.in/yaml.v3 v3.0.1
|
gopkg.in/yaml.v3 v3.0.1
|
||||||
)
|
)
|
||||||
|
|
||||||
|
require (
|
||||||
|
github.com/golang/mock v1.6.0 // indirect
|
||||||
|
golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e // indirect
|
||||||
|
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||||
|
)
|
||||||
|
|
||||||
require (
|
require (
|
||||||
filippo.io/edwards25519 v1.0.0-rc.1 // indirect
|
filippo.io/edwards25519 v1.0.0-rc.1 // indirect
|
||||||
github.com/AndreasBriese/bbloom v0.0.0-20190825152654-46b345b51c96 // indirect
|
github.com/AndreasBriese/bbloom v0.0.0-20190825152654-46b345b51c96 // indirect
|
||||||
|
@ -48,19 +55,18 @@ require (
|
||||||
github.com/cenkalti/backoff/v4 v4.1.2 // indirect
|
github.com/cenkalti/backoff/v4 v4.1.2 // indirect
|
||||||
github.com/cespare/xxhash v1.1.0 // indirect
|
github.com/cespare/xxhash v1.1.0 // indirect
|
||||||
github.com/cespare/xxhash/v2 v2.1.2 // indirect
|
github.com/cespare/xxhash/v2 v2.1.2 // indirect
|
||||||
github.com/cheekybits/genny v1.0.0 // indirect
|
|
||||||
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e // indirect
|
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e // indirect
|
||||||
github.com/cpuguy83/go-md2man/v2 v2.0.0 // indirect
|
github.com/cpuguy83/go-md2man/v2 v2.0.1 // indirect
|
||||||
github.com/dgraph-io/badger v1.6.2 // indirect
|
github.com/dgraph-io/badger v1.6.2 // indirect
|
||||||
github.com/dgraph-io/badger/v2 v2.2007.4 // indirect
|
github.com/dgraph-io/badger/v2 v2.2007.4 // indirect
|
||||||
github.com/dgraph-io/ristretto v0.0.4-0.20200906165740-41ebdbffecfd // indirect
|
github.com/dgraph-io/ristretto v0.0.4-0.20200906165740-41ebdbffecfd // indirect
|
||||||
github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 // indirect
|
github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 // indirect
|
||||||
github.com/dlclark/regexp2 v1.4.0 // indirect
|
github.com/dlclark/regexp2 v1.4.0 // indirect
|
||||||
github.com/felixge/httpsnoop v1.0.2 // indirect
|
github.com/felixge/httpsnoop v1.0.3 // indirect
|
||||||
github.com/fsnotify/fsnotify v1.5.1 // indirect
|
github.com/fsnotify/fsnotify v1.5.1 // indirect
|
||||||
github.com/go-kit/kit v0.10.0 // indirect
|
github.com/go-kit/kit v0.10.0 // indirect
|
||||||
github.com/go-logfmt/logfmt v0.5.0 // indirect
|
github.com/go-logfmt/logfmt v0.5.0 // indirect
|
||||||
github.com/go-logr/logr v1.2.2 // indirect
|
github.com/go-logr/logr v1.2.3 // indirect
|
||||||
github.com/go-logr/stdr v1.2.2 // indirect
|
github.com/go-logr/stdr v1.2.2 // indirect
|
||||||
github.com/go-sql-driver/mysql v1.6.0 // indirect
|
github.com/go-sql-driver/mysql v1.6.0 // indirect
|
||||||
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 // indirect
|
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 // indirect
|
||||||
|
@ -69,6 +75,7 @@ require (
|
||||||
github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect
|
github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect
|
||||||
github.com/huandu/xstrings v1.3.2 // indirect
|
github.com/huandu/xstrings v1.3.2 // indirect
|
||||||
github.com/imdario/mergo v0.3.12 // indirect
|
github.com/imdario/mergo v0.3.12 // indirect
|
||||||
|
github.com/inconshreveable/mousetrap v1.0.0 // indirect
|
||||||
github.com/jackc/chunkreader/v2 v2.0.1 // indirect
|
github.com/jackc/chunkreader/v2 v2.0.1 // indirect
|
||||||
github.com/jackc/pgconn v1.10.1 // indirect
|
github.com/jackc/pgconn v1.10.1 // indirect
|
||||||
github.com/jackc/pgio v1.0.0 // indirect
|
github.com/jackc/pgio v1.0.0 // indirect
|
||||||
|
@ -80,16 +87,14 @@ require (
|
||||||
github.com/libdns/libdns v0.2.1 // indirect
|
github.com/libdns/libdns v0.2.1 // indirect
|
||||||
github.com/manifoldco/promptui v0.9.0 // indirect
|
github.com/manifoldco/promptui v0.9.0 // indirect
|
||||||
github.com/marten-seemann/qpack v0.2.1 // indirect
|
github.com/marten-seemann/qpack v0.2.1 // indirect
|
||||||
github.com/marten-seemann/qtls-go1-16 v0.1.5 // indirect
|
|
||||||
github.com/marten-seemann/qtls-go1-17 v0.1.2 // indirect
|
|
||||||
github.com/marten-seemann/qtls-go1-18 v0.1.2 // indirect
|
github.com/marten-seemann/qtls-go1-18 v0.1.2 // indirect
|
||||||
github.com/marten-seemann/qtls-go1-19 v0.1.0-beta.1 // indirect
|
github.com/marten-seemann/qtls-go1-19 v0.1.0 // indirect
|
||||||
github.com/mattn/go-colorable v0.1.8 // indirect
|
github.com/mattn/go-colorable v0.1.8 // indirect
|
||||||
github.com/mattn/go-isatty v0.0.13 // indirect
|
github.com/mattn/go-isatty v0.0.13 // indirect
|
||||||
github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect
|
github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect
|
||||||
github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d // indirect
|
github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d // indirect
|
||||||
github.com/micromdm/scep/v2 v2.1.0 // indirect
|
github.com/micromdm/scep/v2 v2.1.0 // indirect
|
||||||
github.com/miekg/dns v1.1.46 // indirect
|
github.com/miekg/dns v1.1.50 // indirect
|
||||||
github.com/mitchellh/copystructure v1.2.0 // indirect
|
github.com/mitchellh/copystructure v1.2.0 // indirect
|
||||||
github.com/mitchellh/go-ps v1.0.0 // indirect
|
github.com/mitchellh/go-ps v1.0.0 // indirect
|
||||||
github.com/mitchellh/reflectwalk v1.0.2 // indirect
|
github.com/mitchellh/reflectwalk v1.0.2 // indirect
|
||||||
|
@ -100,7 +105,7 @@ require (
|
||||||
github.com/prometheus/common v0.32.1 // indirect
|
github.com/prometheus/common v0.32.1 // indirect
|
||||||
github.com/prometheus/procfs v0.7.3 // indirect
|
github.com/prometheus/procfs v0.7.3 // indirect
|
||||||
github.com/rs/xid v1.2.1 // indirect
|
github.com/rs/xid v1.2.1 // indirect
|
||||||
github.com/russross/blackfriday/v2 v2.0.1 // indirect
|
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||||
github.com/shopspring/decimal v1.2.0 // indirect
|
github.com/shopspring/decimal v1.2.0 // indirect
|
||||||
github.com/shurcooL/sanitized_anchor_name v1.0.0 // indirect
|
github.com/shurcooL/sanitized_anchor_name v1.0.0 // indirect
|
||||||
github.com/sirupsen/logrus v1.8.1 // indirect
|
github.com/sirupsen/logrus v1.8.1 // indirect
|
||||||
|
@ -112,21 +117,21 @@ require (
|
||||||
go.mozilla.org/pkcs7 v0.0.0-20210826202110-33d05740a352 // indirect
|
go.mozilla.org/pkcs7 v0.0.0-20210826202110-33d05740a352 // indirect
|
||||||
go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.4.0 // indirect
|
go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.4.0 // indirect
|
||||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.4.0 // indirect
|
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.4.0 // indirect
|
||||||
go.opentelemetry.io/otel/internal/metric v0.27.0 // indirect
|
go.opentelemetry.io/otel/metric v0.31.0 // indirect
|
||||||
go.opentelemetry.io/otel/metric v0.27.0 // indirect
|
go.opentelemetry.io/otel/trace v1.9.0 // indirect
|
||||||
go.opentelemetry.io/otel/trace v1.4.0 // indirect
|
|
||||||
go.opentelemetry.io/proto/otlp v0.12.0 // indirect
|
go.opentelemetry.io/proto/otlp v0.12.0 // indirect
|
||||||
go.step.sm/cli-utils v0.7.0 // indirect
|
go.step.sm/cli-utils v0.7.3 // indirect
|
||||||
go.step.sm/crypto v0.16.1 // indirect
|
go.step.sm/crypto v0.16.2 // indirect
|
||||||
go.step.sm/linkedca v0.15.0 // indirect
|
go.step.sm/linkedca v0.16.1 // indirect
|
||||||
go.uber.org/atomic v1.9.0 // indirect
|
go.uber.org/atomic v1.9.0 // indirect
|
||||||
go.uber.org/multierr v1.6.0 // indirect
|
go.uber.org/multierr v1.6.0 // indirect
|
||||||
golang.org/x/mod v0.4.2 // indirect
|
golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3 // indirect
|
||||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a // indirect
|
golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10
|
||||||
golang.org/x/text v0.3.8-0.20211004125949-5bd84dd9b33b // indirect
|
golang.org/x/text v0.3.8-0.20211004125949-5bd84dd9b33b // indirect
|
||||||
golang.org/x/tools v0.1.7 // indirect
|
golang.org/x/tools v0.1.10 // indirect
|
||||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect
|
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect
|
||||||
google.golang.org/grpc v1.46.0 // indirect
|
google.golang.org/grpc v1.46.0 // indirect
|
||||||
|
google.golang.org/protobuf v1.28.0 // indirect
|
||||||
gopkg.in/square/go-jose.v2 v2.6.0 // indirect
|
gopkg.in/square/go-jose.v2 v2.6.0 // indirect
|
||||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
|
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
|
||||||
howett.net/plist v1.0.0 // indirect
|
howett.net/plist v1.0.0 // indirect
|
||||||
|
|
172
listen.go
Normal file
172
listen.go
Normal file
|
@ -0,0 +1,172 @@
|
||||||
|
//go:build !linux
|
||||||
|
|
||||||
|
package caddy
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"net"
|
||||||
|
"sync"
|
||||||
|
"sync/atomic"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"go.uber.org/zap"
|
||||||
|
)
|
||||||
|
|
||||||
|
func ListenTimeout(network, addr string, keepAlivePeriod time.Duration) (net.Listener, error) {
|
||||||
|
// check to see if plugin provides listener
|
||||||
|
if ln, err := getListenerFromPlugin(network, addr); err != nil || ln != nil {
|
||||||
|
return ln, err
|
||||||
|
}
|
||||||
|
|
||||||
|
lnKey := listenerKey(network, addr)
|
||||||
|
|
||||||
|
sharedLn, _, err := listenerPool.LoadOrNew(lnKey, func() (Destructor, error) {
|
||||||
|
ln, err := net.Listen(network, addr)
|
||||||
|
if err != nil {
|
||||||
|
// https://github.com/caddyserver/caddy/pull/4534
|
||||||
|
if isUnixNetwork(network) && isListenBindAddressAlreadyInUseError(err) {
|
||||||
|
return nil, fmt.Errorf("%w: this can happen if Caddy was forcefully killed", err)
|
||||||
|
}
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &sharedListener{Listener: ln, key: lnKey}, nil
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return &fakeCloseListener{sharedListener: sharedLn.(*sharedListener), keepAlivePeriod: keepAlivePeriod}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// fakeCloseListener is a private wrapper over a listener that
|
||||||
|
// is shared. The state of fakeCloseListener is not shared.
|
||||||
|
// This allows one user of a socket to "close" the listener
|
||||||
|
// while in reality the socket stays open for other users of
|
||||||
|
// the listener. In this way, servers become hot-swappable
|
||||||
|
// while the listener remains running. Listeners should be
|
||||||
|
// re-wrapped in a new fakeCloseListener each time the listener
|
||||||
|
// is reused. This type is atomic and values must not be copied.
|
||||||
|
type fakeCloseListener struct {
|
||||||
|
closed int32 // accessed atomically; belongs to this struct only
|
||||||
|
*sharedListener // embedded, so we also become a net.Listener
|
||||||
|
keepAlivePeriod time.Duration
|
||||||
|
}
|
||||||
|
|
||||||
|
type canSetKeepAlive interface {
|
||||||
|
SetKeepAlivePeriod(d time.Duration) error
|
||||||
|
SetKeepAlive(bool) error
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fcl *fakeCloseListener) Accept() (net.Conn, error) {
|
||||||
|
// if the listener is already "closed", return error
|
||||||
|
if atomic.LoadInt32(&fcl.closed) == 1 {
|
||||||
|
return nil, fakeClosedErr(fcl)
|
||||||
|
}
|
||||||
|
|
||||||
|
// call underlying accept
|
||||||
|
conn, err := fcl.sharedListener.Accept()
|
||||||
|
if err == nil {
|
||||||
|
// if 0, do nothing, Go's default is already set
|
||||||
|
// and if the connection allows setting KeepAlive, set it
|
||||||
|
if tconn, ok := conn.(canSetKeepAlive); ok && fcl.keepAlivePeriod != 0 {
|
||||||
|
if fcl.keepAlivePeriod > 0 {
|
||||||
|
err = tconn.SetKeepAlivePeriod(fcl.keepAlivePeriod)
|
||||||
|
} else { // negative
|
||||||
|
err = tconn.SetKeepAlive(false)
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
Log().With(zap.String("server", fcl.sharedListener.key)).Warn("unable to set keepalive for new connection:", zap.Error(err))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return conn, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// since Accept() returned an error, it may be because our reference to
|
||||||
|
// the listener (this fakeCloseListener) may have been closed, i.e. the
|
||||||
|
// server is shutting down; in that case, we need to clear the deadline
|
||||||
|
// that we set when Close() was called, and return a non-temporary and
|
||||||
|
// non-timeout error value to the caller, masking the "true" error, so
|
||||||
|
// that server loops / goroutines won't retry, linger, and leak
|
||||||
|
if atomic.LoadInt32(&fcl.closed) == 1 {
|
||||||
|
// we dereference the sharedListener explicitly even though it's embedded
|
||||||
|
// so that it's clear in the code that side-effects are shared with other
|
||||||
|
// users of this listener, not just our own reference to it; we also don't
|
||||||
|
// do anything with the error because all we could do is log it, but we
|
||||||
|
// expliclty assign it to nothing so we don't forget it's there if needed
|
||||||
|
_ = fcl.sharedListener.clearDeadline()
|
||||||
|
|
||||||
|
if netErr, ok := err.(net.Error); ok && netErr.Timeout() {
|
||||||
|
return nil, fakeClosedErr(fcl)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close stops accepting new connections without closing the
|
||||||
|
// underlying listener. The underlying listener is only closed
|
||||||
|
// if the caller is the last known user of the socket.
|
||||||
|
func (fcl *fakeCloseListener) Close() error {
|
||||||
|
if atomic.CompareAndSwapInt32(&fcl.closed, 0, 1) {
|
||||||
|
// There are two ways I know of to get an Accept()
|
||||||
|
// function to return to the server loop that called
|
||||||
|
// it: close the listener, or set a deadline in the
|
||||||
|
// past. Obviously, we can't close the socket yet
|
||||||
|
// since others may be using it (hence this whole
|
||||||
|
// file). But we can set the deadline in the past,
|
||||||
|
// and this is kind of cheating, but it works, and
|
||||||
|
// it apparently even works on Windows.
|
||||||
|
_ = fcl.sharedListener.setDeadline()
|
||||||
|
_, _ = listenerPool.Delete(fcl.sharedListener.key)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// sharedListener is a wrapper over an underlying listener. The listener
|
||||||
|
// and the other fields on the struct are shared state that is synchronized,
|
||||||
|
// so sharedListener structs must never be copied (always use a pointer).
|
||||||
|
type sharedListener struct {
|
||||||
|
net.Listener
|
||||||
|
key string // uniquely identifies this listener
|
||||||
|
deadline bool // whether a deadline is currently set
|
||||||
|
deadlineMu sync.Mutex
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sl *sharedListener) clearDeadline() error {
|
||||||
|
var err error
|
||||||
|
sl.deadlineMu.Lock()
|
||||||
|
if sl.deadline {
|
||||||
|
switch ln := sl.Listener.(type) {
|
||||||
|
case *net.TCPListener:
|
||||||
|
err = ln.SetDeadline(time.Time{})
|
||||||
|
case *net.UnixListener:
|
||||||
|
err = ln.SetDeadline(time.Time{})
|
||||||
|
}
|
||||||
|
sl.deadline = false
|
||||||
|
}
|
||||||
|
sl.deadlineMu.Unlock()
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sl *sharedListener) setDeadline() error {
|
||||||
|
timeInPast := time.Now().Add(-1 * time.Minute)
|
||||||
|
var err error
|
||||||
|
sl.deadlineMu.Lock()
|
||||||
|
if !sl.deadline {
|
||||||
|
switch ln := sl.Listener.(type) {
|
||||||
|
case *net.TCPListener:
|
||||||
|
err = ln.SetDeadline(timeInPast)
|
||||||
|
case *net.UnixListener:
|
||||||
|
err = ln.SetDeadline(timeInPast)
|
||||||
|
}
|
||||||
|
sl.deadline = true
|
||||||
|
}
|
||||||
|
sl.deadlineMu.Unlock()
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Destruct is called by the UsagePool when the listener is
|
||||||
|
// finally not being used anymore. It closes the socket.
|
||||||
|
func (sl *sharedListener) Destruct() error {
|
||||||
|
return sl.Listener.Close()
|
||||||
|
}
|
34
listen_linux.go
Normal file
34
listen_linux.go
Normal file
|
@ -0,0 +1,34 @@
|
||||||
|
package caddy
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"net"
|
||||||
|
"syscall"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"go.uber.org/zap"
|
||||||
|
"golang.org/x/sys/unix"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ListenTimeout is the same as Listen, but with a configurable keep-alive timeout duration.
|
||||||
|
func ListenTimeout(network, addr string, keepalivePeriod time.Duration) (net.Listener, error) {
|
||||||
|
// check to see if plugin provides listener
|
||||||
|
if ln, err := getListenerFromPlugin(network, addr); err != nil || ln != nil {
|
||||||
|
return ln, err
|
||||||
|
}
|
||||||
|
|
||||||
|
config := &net.ListenConfig{Control: reusePort, KeepAlive: keepalivePeriod}
|
||||||
|
return config.Listen(context.Background(), network, addr)
|
||||||
|
}
|
||||||
|
|
||||||
|
func reusePort(network, address string, conn syscall.RawConn) error {
|
||||||
|
return conn.Control(func(descriptor uintptr) {
|
||||||
|
if err := unix.SetsockoptInt(int(descriptor), unix.SOL_SOCKET, unix.SO_REUSEPORT, 1); err != nil {
|
||||||
|
Log().Error("setting SO_REUSEPORT",
|
||||||
|
zap.String("network", network),
|
||||||
|
zap.String("address", address),
|
||||||
|
zap.Uintptr("descriptor", descriptor),
|
||||||
|
zap.Error(err))
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
293
listeners.go
293
listeners.go
|
@ -20,16 +20,16 @@ import (
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net"
|
"net"
|
||||||
|
"net/netip"
|
||||||
"os"
|
"os"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
"syscall"
|
"syscall"
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/lucas-clemente/quic-go"
|
"github.com/lucas-clemente/quic-go"
|
||||||
"github.com/lucas-clemente/quic-go/http3"
|
"github.com/lucas-clemente/quic-go/http3"
|
||||||
|
"go.uber.org/zap"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Listen is like net.Listen, except Caddy's listeners can overlap
|
// Listen is like net.Listen, except Caddy's listeners can overlap
|
||||||
|
@ -41,31 +41,30 @@ import (
|
||||||
// the socket have been finished. Always be sure to close listeners
|
// the socket have been finished. Always be sure to close listeners
|
||||||
// when you are done with them, just like normal listeners.
|
// when you are done with them, just like normal listeners.
|
||||||
func Listen(network, addr string) (net.Listener, error) {
|
func Listen(network, addr string) (net.Listener, error) {
|
||||||
lnKey := network + "/" + addr
|
// a 0 timeout means Go uses its default
|
||||||
|
return ListenTimeout(network, addr, 0)
|
||||||
|
}
|
||||||
|
|
||||||
sharedLn, _, err := listenerPool.LoadOrNew(lnKey, func() (Destructor, error) {
|
// getListenerFromPlugin returns a listener on the given network and address
|
||||||
ln, err := net.Listen(network, addr)
|
// if a plugin has registered the network name. It may return (nil, nil) if
|
||||||
if err != nil {
|
// no plugin can provide a listener.
|
||||||
// https://github.com/caddyserver/caddy/pull/4534
|
func getListenerFromPlugin(network, addr string) (net.Listener, error) {
|
||||||
if isUnixNetwork(network) && isListenBindAddressAlreadyInUseError(err) {
|
network = strings.TrimSpace(strings.ToLower(network))
|
||||||
return nil, fmt.Errorf("%w: this can happen if Caddy was forcefully killed", err)
|
|
||||||
}
|
// get listener from plugin if network type is registered
|
||||||
return nil, err
|
if getListener, ok := networkTypes[network]; ok {
|
||||||
}
|
Log().Debug("getting listener from plugin", zap.String("network", network))
|
||||||
return &sharedListener{Listener: ln, key: lnKey}, nil
|
return getListener(network, addr)
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return &fakeCloseListener{sharedListener: sharedLn.(*sharedListener)}, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// ListenPacket returns a net.PacketConn suitable for use in a Caddy module.
|
// ListenPacket returns a net.PacketConn suitable for use in a Caddy module.
|
||||||
// It is like Listen except for PacketConns.
|
// It is like Listen except for PacketConns.
|
||||||
// Always be sure to close the PacketConn when you are done.
|
// Always be sure to close the PacketConn when you are done.
|
||||||
func ListenPacket(network, addr string) (net.PacketConn, error) {
|
func ListenPacket(network, addr string) (net.PacketConn, error) {
|
||||||
lnKey := network + "/" + addr
|
lnKey := listenerKey(network, addr)
|
||||||
|
|
||||||
sharedPc, _, err := listenerPool.LoadOrNew(lnKey, func() (Destructor, error) {
|
sharedPc, _, err := listenerPool.LoadOrNew(lnKey, func() (Destructor, error) {
|
||||||
pc, err := net.ListenPacket(network, addr)
|
pc, err := net.ListenPacket(network, addr)
|
||||||
|
@ -88,88 +87,45 @@ func ListenPacket(network, addr string) (net.PacketConn, error) {
|
||||||
// ListenQUIC returns a quic.EarlyListener suitable for use in a Caddy module.
|
// ListenQUIC returns a quic.EarlyListener suitable for use in a Caddy module.
|
||||||
// Note that the context passed to Accept is currently ignored, so using
|
// Note that the context passed to Accept is currently ignored, so using
|
||||||
// a context other than context.Background is meaningless.
|
// a context other than context.Background is meaningless.
|
||||||
func ListenQUIC(addr string, tlsConf *tls.Config) (quic.EarlyListener, error) {
|
// This API is EXPERIMENTAL and may change.
|
||||||
lnKey := "quic/" + addr
|
func ListenQUIC(addr string, tlsConf *tls.Config, activeRequests *int64) (quic.EarlyListener, error) {
|
||||||
|
lnKey := listenerKey("udp", addr)
|
||||||
|
|
||||||
sharedEl, _, err := listenerPool.LoadOrNew(lnKey, func() (Destructor, error) {
|
sharedEl, _, err := listenerPool.LoadOrNew(lnKey, func() (Destructor, error) {
|
||||||
el, err := quic.ListenAddrEarly(addr, http3.ConfigureTLSConfig(tlsConf), &quic.Config{})
|
el, err := quic.ListenAddrEarly(addr, http3.ConfigureTLSConfig(tlsConf), &quic.Config{
|
||||||
|
RequireAddressValidation: func(clientAddr net.Addr) bool {
|
||||||
|
var highLoad bool
|
||||||
|
if activeRequests != nil {
|
||||||
|
highLoad = atomic.LoadInt64(activeRequests) > 1000 // TODO: make tunable?
|
||||||
|
}
|
||||||
|
return highLoad
|
||||||
|
},
|
||||||
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return &sharedQuicListener{EarlyListener: el, key: lnKey}, nil
|
return &sharedQuicListener{EarlyListener: el, key: lnKey}, nil
|
||||||
})
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
return &fakeCloseQuicListener{
|
return &fakeCloseQuicListener{
|
||||||
sharedQuicListener: sharedEl.(*sharedQuicListener),
|
sharedQuicListener: sharedEl.(*sharedQuicListener),
|
||||||
context: ctx, contextCancel: cancel,
|
context: ctx,
|
||||||
}, err
|
contextCancel: cancel,
|
||||||
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// fakeCloseListener is a private wrapper over a listener that
|
// ListenerUsage returns the current usage count of the given listener address.
|
||||||
// is shared. The state of fakeCloseListener is not shared.
|
func ListenerUsage(network, addr string) int {
|
||||||
// This allows one user of a socket to "close" the listener
|
count, _ := listenerPool.References(listenerKey(network, addr))
|
||||||
// while in reality the socket stays open for other users of
|
return count
|
||||||
// the listener. In this way, servers become hot-swappable
|
|
||||||
// while the listener remains running. Listeners should be
|
|
||||||
// re-wrapped in a new fakeCloseListener each time the listener
|
|
||||||
// is reused. This type is atomic and values must not be copied.
|
|
||||||
type fakeCloseListener struct {
|
|
||||||
closed int32 // accessed atomically; belongs to this struct only
|
|
||||||
*sharedListener // embedded, so we also become a net.Listener
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (fcl *fakeCloseListener) Accept() (net.Conn, error) {
|
func listenerKey(network, addr string) string {
|
||||||
// if the listener is already "closed", return error
|
return network + "/" + addr
|
||||||
if atomic.LoadInt32(&fcl.closed) == 1 {
|
|
||||||
return nil, fakeClosedErr(fcl)
|
|
||||||
}
|
|
||||||
|
|
||||||
// call underlying accept
|
|
||||||
conn, err := fcl.sharedListener.Accept()
|
|
||||||
if err == nil {
|
|
||||||
return conn, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// since Accept() returned an error, it may be because our reference to
|
|
||||||
// the listener (this fakeCloseListener) may have been closed, i.e. the
|
|
||||||
// server is shutting down; in that case, we need to clear the deadline
|
|
||||||
// that we set when Close() was called, and return a non-temporary and
|
|
||||||
// non-timeout error value to the caller, masking the "true" error, so
|
|
||||||
// that server loops / goroutines won't retry, linger, and leak
|
|
||||||
if atomic.LoadInt32(&fcl.closed) == 1 {
|
|
||||||
// we dereference the sharedListener explicitly even though it's embedded
|
|
||||||
// so that it's clear in the code that side-effects are shared with other
|
|
||||||
// users of this listener, not just our own reference to it; we also don't
|
|
||||||
// do anything with the error because all we could do is log it, but we
|
|
||||||
// expliclty assign it to nothing so we don't forget it's there if needed
|
|
||||||
_ = fcl.sharedListener.clearDeadline()
|
|
||||||
|
|
||||||
if netErr, ok := err.(net.Error); ok && netErr.Timeout() {
|
|
||||||
return nil, fakeClosedErr(fcl)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Close stops accepting new connections without closing the
|
|
||||||
// underlying listener. The underlying listener is only closed
|
|
||||||
// if the caller is the last known user of the socket.
|
|
||||||
func (fcl *fakeCloseListener) Close() error {
|
|
||||||
if atomic.CompareAndSwapInt32(&fcl.closed, 0, 1) {
|
|
||||||
// There are two ways I know of to get an Accept()
|
|
||||||
// function to return to the server loop that called
|
|
||||||
// it: close the listener, or set a deadline in the
|
|
||||||
// past. Obviously, we can't close the socket yet
|
|
||||||
// since others may be using it (hence this whole
|
|
||||||
// file). But we can set the deadline in the past,
|
|
||||||
// and this is kind of cheating, but it works, and
|
|
||||||
// it apparently even works on Windows.
|
|
||||||
_ = fcl.sharedListener.setDeadline()
|
|
||||||
_, _ = listenerPool.Delete(fcl.sharedListener.key)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type fakeCloseQuicListener struct {
|
type fakeCloseQuicListener struct {
|
||||||
|
@ -255,55 +211,6 @@ func (fcpc fakeClosePacketConn) SyscallConn() (syscall.RawConn, error) {
|
||||||
return nil, fmt.Errorf("SyscallConn() not implemented for %T", fcpc.PacketConn)
|
return nil, fmt.Errorf("SyscallConn() not implemented for %T", fcpc.PacketConn)
|
||||||
}
|
}
|
||||||
|
|
||||||
// sharedListener is a wrapper over an underlying listener. The listener
|
|
||||||
// and the other fields on the struct are shared state that is synchronized,
|
|
||||||
// so sharedListener structs must never be copied (always use a pointer).
|
|
||||||
type sharedListener struct {
|
|
||||||
net.Listener
|
|
||||||
key string // uniquely identifies this listener
|
|
||||||
deadline bool // whether a deadline is currently set
|
|
||||||
deadlineMu sync.Mutex
|
|
||||||
}
|
|
||||||
|
|
||||||
func (sl *sharedListener) clearDeadline() error {
|
|
||||||
var err error
|
|
||||||
sl.deadlineMu.Lock()
|
|
||||||
if sl.deadline {
|
|
||||||
switch ln := sl.Listener.(type) {
|
|
||||||
case *net.TCPListener:
|
|
||||||
err = ln.SetDeadline(time.Time{})
|
|
||||||
case *net.UnixListener:
|
|
||||||
err = ln.SetDeadline(time.Time{})
|
|
||||||
}
|
|
||||||
sl.deadline = false
|
|
||||||
}
|
|
||||||
sl.deadlineMu.Unlock()
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (sl *sharedListener) setDeadline() error {
|
|
||||||
timeInPast := time.Now().Add(-1 * time.Minute)
|
|
||||||
var err error
|
|
||||||
sl.deadlineMu.Lock()
|
|
||||||
if !sl.deadline {
|
|
||||||
switch ln := sl.Listener.(type) {
|
|
||||||
case *net.TCPListener:
|
|
||||||
err = ln.SetDeadline(timeInPast)
|
|
||||||
case *net.UnixListener:
|
|
||||||
err = ln.SetDeadline(timeInPast)
|
|
||||||
}
|
|
||||||
sl.deadline = true
|
|
||||||
}
|
|
||||||
sl.deadlineMu.Unlock()
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Destruct is called by the UsagePool when the listener is
|
|
||||||
// finally not being used anymore. It closes the socket.
|
|
||||||
func (sl *sharedListener) Destruct() error {
|
|
||||||
return sl.Listener.Close()
|
|
||||||
}
|
|
||||||
|
|
||||||
// sharedQuicListener is like sharedListener, but for quic.EarlyListeners.
|
// sharedQuicListener is like sharedListener, but for quic.EarlyListeners.
|
||||||
type sharedQuicListener struct {
|
type sharedQuicListener struct {
|
||||||
quic.EarlyListener
|
quic.EarlyListener
|
||||||
|
@ -353,11 +260,25 @@ func (na NetworkAddress) JoinHostPort(offset uint) string {
|
||||||
return net.JoinHostPort(na.Host, strconv.Itoa(int(na.StartPort+offset)))
|
return net.JoinHostPort(na.Host, strconv.Itoa(int(na.StartPort+offset)))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (na NetworkAddress) Expand() []NetworkAddress {
|
||||||
|
size := na.PortRangeSize()
|
||||||
|
addrs := make([]NetworkAddress, size)
|
||||||
|
for portOffset := uint(0); portOffset < size; portOffset++ {
|
||||||
|
na2 := na
|
||||||
|
na2.StartPort, na2.EndPort = na.StartPort+portOffset, na.StartPort+portOffset
|
||||||
|
addrs[portOffset] = na2
|
||||||
|
}
|
||||||
|
return addrs
|
||||||
|
}
|
||||||
|
|
||||||
// PortRangeSize returns how many ports are in
|
// PortRangeSize returns how many ports are in
|
||||||
// pa's port range. Port ranges are inclusive,
|
// pa's port range. Port ranges are inclusive,
|
||||||
// so the size is the difference of start and
|
// so the size is the difference of start and
|
||||||
// end ports plus one.
|
// end ports plus one.
|
||||||
func (na NetworkAddress) PortRangeSize() uint {
|
func (na NetworkAddress) PortRangeSize() uint {
|
||||||
|
if na.EndPort < na.StartPort {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
return (na.EndPort - na.StartPort) + 1
|
return (na.EndPort - na.StartPort) + 1
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -368,7 +289,7 @@ func (na NetworkAddress) isLoopback() bool {
|
||||||
if na.Host == "localhost" {
|
if na.Host == "localhost" {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
if ip := net.ParseIP(na.Host); ip != nil {
|
if ip, err := netip.ParseAddr(na.Host); err == nil {
|
||||||
return ip.IsLoopback()
|
return ip.IsLoopback()
|
||||||
}
|
}
|
||||||
return false
|
return false
|
||||||
|
@ -378,7 +299,7 @@ func (na NetworkAddress) isWildcardInterface() bool {
|
||||||
if na.Host == "" {
|
if na.Host == "" {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
if ip := net.ParseIP(na.Host); ip != nil {
|
if ip, err := netip.ParseAddr(na.Host); err == nil {
|
||||||
return ip.IsUnspecified()
|
return ip.IsUnspecified()
|
||||||
}
|
}
|
||||||
return false
|
return false
|
||||||
|
@ -391,10 +312,13 @@ func (na NetworkAddress) port() string {
|
||||||
return fmt.Sprintf("%d-%d", na.StartPort, na.EndPort)
|
return fmt.Sprintf("%d-%d", na.StartPort, na.EndPort)
|
||||||
}
|
}
|
||||||
|
|
||||||
// String reconstructs the address string to the form expected
|
// String reconstructs the address string for human display.
|
||||||
// by ParseNetworkAddress(). If the address is a unix socket,
|
// The output can be parsed by ParseNetworkAddress(). If the
|
||||||
// any non-zero port will be dropped.
|
// address is a unix socket, any non-zero port will be dropped.
|
||||||
func (na NetworkAddress) String() string {
|
func (na NetworkAddress) String() string {
|
||||||
|
if na.Network == "tcp" && (na.Host != "" || na.port() != "") {
|
||||||
|
na.Network = "" // omit default network value for brevity
|
||||||
|
}
|
||||||
return JoinNetworkAddress(na.Network, na.Host, na.port())
|
return JoinNetworkAddress(na.Network, na.Host, na.port())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -427,36 +351,38 @@ func isListenBindAddressAlreadyInUseError(err error) bool {
|
||||||
func ParseNetworkAddress(addr string) (NetworkAddress, error) {
|
func ParseNetworkAddress(addr string) (NetworkAddress, error) {
|
||||||
var host, port string
|
var host, port string
|
||||||
network, host, port, err := SplitNetworkAddress(addr)
|
network, host, port, err := SplitNetworkAddress(addr)
|
||||||
if network == "" {
|
|
||||||
network = "tcp"
|
|
||||||
}
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return NetworkAddress{}, err
|
return NetworkAddress{}, err
|
||||||
}
|
}
|
||||||
|
if network == "" {
|
||||||
|
network = "tcp"
|
||||||
|
}
|
||||||
if isUnixNetwork(network) {
|
if isUnixNetwork(network) {
|
||||||
return NetworkAddress{
|
return NetworkAddress{
|
||||||
Network: network,
|
Network: network,
|
||||||
Host: host,
|
Host: host,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
ports := strings.SplitN(port, "-", 2)
|
|
||||||
if len(ports) == 1 {
|
|
||||||
ports = append(ports, ports[0])
|
|
||||||
}
|
|
||||||
var start, end uint64
|
var start, end uint64
|
||||||
start, err = strconv.ParseUint(ports[0], 10, 16)
|
if port != "" {
|
||||||
if err != nil {
|
before, after, found := strings.Cut(port, "-")
|
||||||
return NetworkAddress{}, fmt.Errorf("invalid start port: %v", err)
|
if !found {
|
||||||
}
|
after = before
|
||||||
end, err = strconv.ParseUint(ports[1], 10, 16)
|
}
|
||||||
if err != nil {
|
start, err = strconv.ParseUint(before, 10, 16)
|
||||||
return NetworkAddress{}, fmt.Errorf("invalid end port: %v", err)
|
if err != nil {
|
||||||
}
|
return NetworkAddress{}, fmt.Errorf("invalid start port: %v", err)
|
||||||
if end < start {
|
}
|
||||||
return NetworkAddress{}, fmt.Errorf("end port must not be less than start port")
|
end, err = strconv.ParseUint(after, 10, 16)
|
||||||
}
|
if err != nil {
|
||||||
if (end - start) > maxPortSpan {
|
return NetworkAddress{}, fmt.Errorf("invalid end port: %v", err)
|
||||||
return NetworkAddress{}, fmt.Errorf("port range exceeds %d ports", maxPortSpan)
|
}
|
||||||
|
if end < start {
|
||||||
|
return NetworkAddress{}, fmt.Errorf("end port must not be less than start port")
|
||||||
|
}
|
||||||
|
if (end - start) > maxPortSpan {
|
||||||
|
return NetworkAddress{}, fmt.Errorf("port range exceeds %d ports", maxPortSpan)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return NetworkAddress{
|
return NetworkAddress{
|
||||||
Network: network,
|
Network: network,
|
||||||
|
@ -469,15 +395,29 @@ func ParseNetworkAddress(addr string) (NetworkAddress, error) {
|
||||||
// SplitNetworkAddress splits a into its network, host, and port components.
|
// SplitNetworkAddress splits a into its network, host, and port components.
|
||||||
// Note that port may be a port range (:X-Y), or omitted for unix sockets.
|
// Note that port may be a port range (:X-Y), or omitted for unix sockets.
|
||||||
func SplitNetworkAddress(a string) (network, host, port string, err error) {
|
func SplitNetworkAddress(a string) (network, host, port string, err error) {
|
||||||
if idx := strings.Index(a, "/"); idx >= 0 {
|
beforeSlash, afterSlash, slashFound := strings.Cut(a, "/")
|
||||||
network = strings.ToLower(strings.TrimSpace(a[:idx]))
|
if slashFound {
|
||||||
a = a[idx+1:]
|
network = strings.ToLower(strings.TrimSpace(beforeSlash))
|
||||||
|
a = afterSlash
|
||||||
}
|
}
|
||||||
if isUnixNetwork(network) {
|
if isUnixNetwork(network) {
|
||||||
host = a
|
host = a
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
host, port, err = net.SplitHostPort(a)
|
host, port, err = net.SplitHostPort(a)
|
||||||
|
if err == nil || a == "" {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// in general, if there was an error, it was likely "missing port",
|
||||||
|
// so try adding a bogus port to take advantage of standard library's
|
||||||
|
// robust parser, then strip the artificial port before returning
|
||||||
|
// (don't overwrite original error though; might still be relevant)
|
||||||
|
var err2 error
|
||||||
|
host, port, err2 = net.SplitHostPort(a + ":0")
|
||||||
|
if err2 == nil {
|
||||||
|
err = nil
|
||||||
|
port = ""
|
||||||
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -499,6 +439,35 @@ func JoinNetworkAddress(network, host, port string) string {
|
||||||
return a
|
return a
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// RegisterNetwork registers a network type with Caddy so that if a listener is
|
||||||
|
// created for that network type, getListener will be invoked to get the listener.
|
||||||
|
// This should be called during init() and will panic if the network type is standard
|
||||||
|
// or reserved, or if it is already registered. EXPERIMENTAL and subject to change.
|
||||||
|
func RegisterNetwork(network string, getListener ListenerFunc) {
|
||||||
|
network = strings.TrimSpace(strings.ToLower(network))
|
||||||
|
|
||||||
|
if network == "tcp" || network == "tcp4" || network == "tcp6" ||
|
||||||
|
network == "udp" || network == "udp4" || network == "udp6" ||
|
||||||
|
network == "unix" || network == "unixpacket" || network == "unixgram" ||
|
||||||
|
strings.HasPrefix("ip:", network) || strings.HasPrefix("ip4:", network) || strings.HasPrefix("ip6:", network) {
|
||||||
|
panic("network type " + network + " is reserved")
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, ok := networkTypes[strings.ToLower(network)]; ok {
|
||||||
|
panic("network type " + network + " is already registered")
|
||||||
|
}
|
||||||
|
|
||||||
|
networkTypes[network] = getListener
|
||||||
|
}
|
||||||
|
|
||||||
|
// ListenerFunc is a function that can return a listener given a network and address.
|
||||||
|
// The listeners must be capable of overlapping: with Caddy, new configs are loaded
|
||||||
|
// before old ones are unloaded, so listeners may overlap briefly if the configs
|
||||||
|
// both need the same listener. EXPERIMENTAL and subject to change.
|
||||||
|
type ListenerFunc func(network, addr string) (net.Listener, error)
|
||||||
|
|
||||||
|
var networkTypes = map[string]ListenerFunc{}
|
||||||
|
|
||||||
// ListenerWrapper is a type that wraps a listener
|
// ListenerWrapper is a type that wraps a listener
|
||||||
// so it can modify the input listener's methods.
|
// so it can modify the input listener's methods.
|
||||||
// Modules that implement this interface are found
|
// Modules that implement this interface are found
|
||||||
|
|
|
@ -13,7 +13,6 @@
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
//go:build gofuzz
|
//go:build gofuzz
|
||||||
// +build gofuzz
|
|
||||||
|
|
||||||
package caddy
|
package caddy
|
||||||
|
|
||||||
|
|
|
@ -32,9 +32,24 @@ func TestSplitNetworkAddress(t *testing.T) {
|
||||||
expectErr: true,
|
expectErr: true,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
input: "foo",
|
input: "foo",
|
||||||
|
expectHost: "foo",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
input: ":", // empty host & empty port
|
||||||
|
},
|
||||||
|
{
|
||||||
|
input: "::",
|
||||||
expectErr: true,
|
expectErr: true,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
input: "[::]",
|
||||||
|
expectHost: "::",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
input: ":1234",
|
||||||
|
expectPort: "1234",
|
||||||
|
},
|
||||||
{
|
{
|
||||||
input: "foo:1234",
|
input: "foo:1234",
|
||||||
expectHost: "foo",
|
expectHost: "foo",
|
||||||
|
@ -80,10 +95,10 @@ func TestSplitNetworkAddress(t *testing.T) {
|
||||||
} {
|
} {
|
||||||
actualNetwork, actualHost, actualPort, err := SplitNetworkAddress(tc.input)
|
actualNetwork, actualHost, actualPort, err := SplitNetworkAddress(tc.input)
|
||||||
if tc.expectErr && err == nil {
|
if tc.expectErr && err == nil {
|
||||||
t.Errorf("Test %d: Expected error but got: %v", i, err)
|
t.Errorf("Test %d: Expected error but got %v", i, err)
|
||||||
}
|
}
|
||||||
if !tc.expectErr && err != nil {
|
if !tc.expectErr && err != nil {
|
||||||
t.Errorf("Test %d: Expected no error but got: %v", i, err)
|
t.Errorf("Test %d: Expected no error but got %v", i, err)
|
||||||
}
|
}
|
||||||
if actualNetwork != tc.expectNetwork {
|
if actualNetwork != tc.expectNetwork {
|
||||||
t.Errorf("Test %d: Expected network '%s' but got '%s'", i, tc.expectNetwork, actualNetwork)
|
t.Errorf("Test %d: Expected network '%s' but got '%s'", i, tc.expectNetwork, actualNetwork)
|
||||||
|
@ -169,8 +184,17 @@ func TestParseNetworkAddress(t *testing.T) {
|
||||||
expectErr: true,
|
expectErr: true,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
input: ":",
|
input: ":",
|
||||||
expectErr: true,
|
expectAddr: NetworkAddress{
|
||||||
|
Network: "tcp",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
input: "[::]",
|
||||||
|
expectAddr: NetworkAddress{
|
||||||
|
Network: "tcp",
|
||||||
|
Host: "::",
|
||||||
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
input: ":1234",
|
input: ":1234",
|
||||||
|
@ -307,3 +331,85 @@ func TestJoinHostPort(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestExpand(t *testing.T) {
|
||||||
|
for i, tc := range []struct {
|
||||||
|
input NetworkAddress
|
||||||
|
expect []NetworkAddress
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
input: NetworkAddress{
|
||||||
|
Network: "tcp",
|
||||||
|
Host: "localhost",
|
||||||
|
StartPort: 2000,
|
||||||
|
EndPort: 2000,
|
||||||
|
},
|
||||||
|
expect: []NetworkAddress{
|
||||||
|
{
|
||||||
|
Network: "tcp",
|
||||||
|
Host: "localhost",
|
||||||
|
StartPort: 2000,
|
||||||
|
EndPort: 2000,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
input: NetworkAddress{
|
||||||
|
Network: "tcp",
|
||||||
|
Host: "localhost",
|
||||||
|
StartPort: 2000,
|
||||||
|
EndPort: 2002,
|
||||||
|
},
|
||||||
|
expect: []NetworkAddress{
|
||||||
|
{
|
||||||
|
Network: "tcp",
|
||||||
|
Host: "localhost",
|
||||||
|
StartPort: 2000,
|
||||||
|
EndPort: 2000,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Network: "tcp",
|
||||||
|
Host: "localhost",
|
||||||
|
StartPort: 2001,
|
||||||
|
EndPort: 2001,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Network: "tcp",
|
||||||
|
Host: "localhost",
|
||||||
|
StartPort: 2002,
|
||||||
|
EndPort: 2002,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
input: NetworkAddress{
|
||||||
|
Network: "tcp",
|
||||||
|
Host: "localhost",
|
||||||
|
StartPort: 2000,
|
||||||
|
EndPort: 1999,
|
||||||
|
},
|
||||||
|
expect: []NetworkAddress{},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
input: NetworkAddress{
|
||||||
|
Network: "unix",
|
||||||
|
Host: "/foo/bar",
|
||||||
|
StartPort: 0,
|
||||||
|
EndPort: 0,
|
||||||
|
},
|
||||||
|
expect: []NetworkAddress{
|
||||||
|
{
|
||||||
|
Network: "unix",
|
||||||
|
Host: "/foo/bar",
|
||||||
|
StartPort: 0,
|
||||||
|
EndPort: 0,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
} {
|
||||||
|
actual := tc.input.Expand()
|
||||||
|
if !reflect.DeepEqual(actual, tc.expect) {
|
||||||
|
t.Errorf("Test %d: Expected %+v but got %+v", i, tc.expect, actual)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
16
modules.go
16
modules.go
|
@ -44,7 +44,7 @@ import (
|
||||||
// Provisioner, the Provision() method is called. 4) If the
|
// Provisioner, the Provision() method is called. 4) If the
|
||||||
// module is a Validator, the Validate() method is called.
|
// module is a Validator, the Validate() method is called.
|
||||||
// 5) The module will probably be type-asserted from
|
// 5) The module will probably be type-asserted from
|
||||||
// interface{} to some other, more useful interface expected
|
// 'any' to some other, more useful interface expected
|
||||||
// by the host module. For example, HTTP handler modules are
|
// by the host module. For example, HTTP handler modules are
|
||||||
// type-asserted as caddyhttp.MiddlewareHandler values.
|
// type-asserted as caddyhttp.MiddlewareHandler values.
|
||||||
// 6) When a module's containing Context is canceled, if it is
|
// 6) When a module's containing Context is canceled, if it is
|
||||||
|
@ -172,7 +172,7 @@ func GetModule(name string) (ModuleInfo, error) {
|
||||||
// GetModuleName returns a module's name (the last label of its ID)
|
// GetModuleName returns a module's name (the last label of its ID)
|
||||||
// from an instance of its value. If the value is not a module, an
|
// from an instance of its value. If the value is not a module, an
|
||||||
// empty string will be returned.
|
// empty string will be returned.
|
||||||
func GetModuleName(instance interface{}) string {
|
func GetModuleName(instance any) string {
|
||||||
var name string
|
var name string
|
||||||
if mod, ok := instance.(Module); ok {
|
if mod, ok := instance.(Module); ok {
|
||||||
name = mod.CaddyModule().ID.Name()
|
name = mod.CaddyModule().ID.Name()
|
||||||
|
@ -182,7 +182,7 @@ func GetModuleName(instance interface{}) string {
|
||||||
|
|
||||||
// GetModuleID returns a module's ID from an instance of its value.
|
// GetModuleID returns a module's ID from an instance of its value.
|
||||||
// If the value is not a module, an empty string will be returned.
|
// If the value is not a module, an empty string will be returned.
|
||||||
func GetModuleID(instance interface{}) string {
|
func GetModuleID(instance any) string {
|
||||||
var id string
|
var id string
|
||||||
if mod, ok := instance.(Module); ok {
|
if mod, ok := instance.(Module); ok {
|
||||||
id = string(mod.CaddyModule().ID)
|
id = string(mod.CaddyModule().ID)
|
||||||
|
@ -259,7 +259,7 @@ func Modules() []string {
|
||||||
// where raw must be a JSON encoding of a map. It returns that value,
|
// where raw must be a JSON encoding of a map. It returns that value,
|
||||||
// along with the result of removing that key from raw.
|
// along with the result of removing that key from raw.
|
||||||
func getModuleNameInline(moduleNameKey string, raw json.RawMessage) (string, json.RawMessage, error) {
|
func getModuleNameInline(moduleNameKey string, raw json.RawMessage) (string, json.RawMessage, error) {
|
||||||
var tmp map[string]interface{}
|
var tmp map[string]any
|
||||||
err := json.Unmarshal(raw, &tmp)
|
err := json.Unmarshal(raw, &tmp)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", nil, err
|
return "", nil, err
|
||||||
|
@ -324,11 +324,11 @@ func ParseStructTag(tag string) (map[string]string, error) {
|
||||||
if pair == "" {
|
if pair == "" {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
parts := strings.SplitN(pair, "=", 2)
|
before, after, isCut := strings.Cut(pair, "=")
|
||||||
if len(parts) != 2 {
|
if !isCut {
|
||||||
return nil, fmt.Errorf("missing key in '%s' (pair %d)", pair, i)
|
return nil, fmt.Errorf("missing key in '%s' (pair %d)", pair, i)
|
||||||
}
|
}
|
||||||
results[parts[0]] = parts[1]
|
results[before] = after
|
||||||
}
|
}
|
||||||
return results, nil
|
return results, nil
|
||||||
}
|
}
|
||||||
|
@ -337,7 +337,7 @@ func ParseStructTag(tag string) (map[string]string, error) {
|
||||||
// if any of the fields are unrecognized. Useful when decoding
|
// if any of the fields are unrecognized. Useful when decoding
|
||||||
// module configurations, where you want to be more sure they're
|
// module configurations, where you want to be more sure they're
|
||||||
// correct.
|
// correct.
|
||||||
func strictUnmarshalJSON(data []byte, v interface{}) error {
|
func strictUnmarshalJSON(data []byte, v any) error {
|
||||||
dec := json.NewDecoder(bytes.NewReader(data))
|
dec := json.NewDecoder(bytes.NewReader(data))
|
||||||
dec.DisallowUnknownFields()
|
dec.DisallowUnknownFields()
|
||||||
return dec.Decode(v)
|
return dec.Decode(v)
|
||||||
|
|
373
modules/caddyevents/app.go
Normal file
373
modules/caddyevents/app.go
Normal file
|
@ -0,0 +1,373 @@
|
||||||
|
// Copyright 2015 Matthew Holt and The Caddy Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package caddyevents
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/caddyserver/caddy/v2"
|
||||||
|
"github.com/google/uuid"
|
||||||
|
"go.uber.org/zap"
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
caddy.RegisterModule(App{})
|
||||||
|
}
|
||||||
|
|
||||||
|
// App implements a global eventing system within Caddy.
|
||||||
|
// Modules can emit and subscribe to events, providing
|
||||||
|
// hooks into deep parts of the code base that aren't
|
||||||
|
// otherwise accessible. Events provide information about
|
||||||
|
// what and when things are happening, and this facility
|
||||||
|
// allows handlers to take action when events occur,
|
||||||
|
// add information to the event's metadata, and even
|
||||||
|
// control program flow in some cases.
|
||||||
|
//
|
||||||
|
// Events are propagated in a DOM-like fashion. An event
|
||||||
|
// emitted from module `a.b.c` (the "origin") will first
|
||||||
|
// invoke handlers listening to `a.b.c`, then `a.b`,
|
||||||
|
// then `a`, then those listening regardless of origin.
|
||||||
|
// If a handler returns the special error Aborted, then
|
||||||
|
// propagation immediately stops and the event is marked
|
||||||
|
// as aborted. Emitters may optionally choose to adjust
|
||||||
|
// program flow based on an abort.
|
||||||
|
//
|
||||||
|
// Modules can subscribe to events by origin and/or name.
|
||||||
|
// A handler is invoked only if it is subscribed to the
|
||||||
|
// event by name and origin. Subscriptions should be
|
||||||
|
// registered during the provisioning phase, before apps
|
||||||
|
// are started.
|
||||||
|
//
|
||||||
|
// Event handlers are fired synchronously as part of the
|
||||||
|
// regular flow of the program. This allows event handlers
|
||||||
|
// to control the flow of the program if the origin permits
|
||||||
|
// it and also allows handlers to convey new information
|
||||||
|
// back into the origin module before it continues.
|
||||||
|
// In essence, event handlers are similar to HTTP
|
||||||
|
// middleware handlers.
|
||||||
|
//
|
||||||
|
// Event bindings/subscribers are unordered; i.e.
|
||||||
|
// event handlers are invoked in an arbitrary order.
|
||||||
|
// Event handlers should not rely on the logic of other
|
||||||
|
// handlers to succeed.
|
||||||
|
//
|
||||||
|
// The entirety of this app module is EXPERIMENTAL and
|
||||||
|
// subject to change. Pay attention to release notes.
|
||||||
|
type App struct {
|
||||||
|
// Subscriptions bind handlers to one or more events
|
||||||
|
// either globally or scoped to specific modules or module
|
||||||
|
// namespaces.
|
||||||
|
Subscriptions []*Subscription `json:"subscriptions,omitempty"`
|
||||||
|
|
||||||
|
// Map of event name to map of module ID/namespace to handlers
|
||||||
|
subscriptions map[string]map[caddy.ModuleID][]Handler
|
||||||
|
|
||||||
|
logger *zap.Logger
|
||||||
|
started bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// Subscription represents binding of one or more handlers to
|
||||||
|
// one or more events.
|
||||||
|
type Subscription struct {
|
||||||
|
// The name(s) of the event(s) to bind to. Default: all events.
|
||||||
|
Events []string `json:"events,omitempty"`
|
||||||
|
|
||||||
|
// The ID or namespace of the module(s) from which events
|
||||||
|
// originate to listen to for events. Default: all modules.
|
||||||
|
//
|
||||||
|
// Events propagate up, so events emitted by module "a.b.c"
|
||||||
|
// will also trigger the event for "a.b" and "a". Thus, to
|
||||||
|
// receive all events from "a.b.c" and "a.b.d", for example,
|
||||||
|
// one can subscribe to either "a.b" or all of "a" entirely.
|
||||||
|
Modules []caddy.ModuleID `json:"modules,omitempty"`
|
||||||
|
|
||||||
|
// The event handler modules. These implement the actual
|
||||||
|
// behavior to invoke when an event occurs. At least one
|
||||||
|
// handler is required.
|
||||||
|
HandlersRaw []json.RawMessage `json:"handlers,omitempty" caddy:"namespace=events.handlers inline_key=handler"`
|
||||||
|
|
||||||
|
// The decoded handlers; Go code that is subscribing to
|
||||||
|
// an event should set this field directly; HandlersRaw
|
||||||
|
// is meant for JSON configuration to fill out this field.
|
||||||
|
Handlers []Handler `json:"-"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// CaddyModule returns the Caddy module information.
|
||||||
|
func (App) CaddyModule() caddy.ModuleInfo {
|
||||||
|
return caddy.ModuleInfo{
|
||||||
|
ID: "events",
|
||||||
|
New: func() caddy.Module { return new(App) },
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Provision sets up the app.
|
||||||
|
func (app *App) Provision(ctx caddy.Context) error {
|
||||||
|
app.logger = ctx.Logger(app)
|
||||||
|
app.subscriptions = make(map[string]map[caddy.ModuleID][]Handler)
|
||||||
|
|
||||||
|
for _, sub := range app.Subscriptions {
|
||||||
|
if sub.HandlersRaw != nil {
|
||||||
|
handlersIface, err := ctx.LoadModule(sub, "HandlersRaw")
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("loading event subscriber modules: %v", err)
|
||||||
|
}
|
||||||
|
for _, h := range handlersIface.([]any) {
|
||||||
|
sub.Handlers = append(sub.Handlers, h.(Handler))
|
||||||
|
}
|
||||||
|
if len(sub.Handlers) == 0 {
|
||||||
|
// pointless to bind without any handlers
|
||||||
|
return fmt.Errorf("no handlers defined")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Start runs the app.
|
||||||
|
func (app *App) Start() error {
|
||||||
|
for _, sub := range app.Subscriptions {
|
||||||
|
if err := app.Subscribe(sub); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
app.started = true
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stop gracefully shuts down the app.
|
||||||
|
func (app *App) Stop() error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Subscribe binds one or more event handlers to one or more events
|
||||||
|
// according to the subscription s. For now, subscriptions can only
|
||||||
|
// be created during the provision phase; new bindings cannot be
|
||||||
|
// created after the events app has started.
|
||||||
|
func (app *App) Subscribe(s *Subscription) error {
|
||||||
|
if app.started {
|
||||||
|
return fmt.Errorf("events already started; new subscriptions closed")
|
||||||
|
}
|
||||||
|
|
||||||
|
// handle special case of catch-alls (omission of event name or module space implies all)
|
||||||
|
if len(s.Events) == 0 {
|
||||||
|
s.Events = []string{""}
|
||||||
|
}
|
||||||
|
if len(s.Modules) == 0 {
|
||||||
|
s.Modules = []caddy.ModuleID{""}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, eventName := range s.Events {
|
||||||
|
if app.subscriptions[eventName] == nil {
|
||||||
|
app.subscriptions[eventName] = make(map[caddy.ModuleID][]Handler)
|
||||||
|
}
|
||||||
|
for _, originModule := range s.Modules {
|
||||||
|
app.subscriptions[eventName][originModule] = append(app.subscriptions[eventName][originModule], s.Handlers...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// On is syntactic sugar for Subscribe() that binds a single handler
|
||||||
|
// to a single event from any module. If the eventName is empty string,
|
||||||
|
// it counts for all events.
|
||||||
|
func (app *App) On(eventName string, handler Handler) error {
|
||||||
|
return app.Subscribe(&Subscription{
|
||||||
|
Events: []string{eventName},
|
||||||
|
Handlers: []Handler{handler},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Emit creates and dispatches an event named eventName to all relevant handlers with
|
||||||
|
// the metadata data. Events are emitted and propagated synchronously. The returned Event
|
||||||
|
// value will have any additional information from the invoked handlers.
|
||||||
|
func (app *App) Emit(ctx caddy.Context, eventName string, data map[string]any) Event {
|
||||||
|
logger := app.logger.With(zap.String("name", eventName))
|
||||||
|
|
||||||
|
id, err := uuid.NewRandom()
|
||||||
|
if err != nil {
|
||||||
|
logger.Error("failed generating new event ID", zap.Error(err))
|
||||||
|
}
|
||||||
|
|
||||||
|
eventName = strings.ToLower(eventName)
|
||||||
|
|
||||||
|
e := Event{
|
||||||
|
id: id,
|
||||||
|
ts: time.Now(),
|
||||||
|
name: eventName,
|
||||||
|
origin: ctx.Module(),
|
||||||
|
data: data,
|
||||||
|
}
|
||||||
|
|
||||||
|
logger = logger.With(
|
||||||
|
zap.String("id", e.id.String()),
|
||||||
|
zap.String("origin", e.origin.CaddyModule().String()))
|
||||||
|
|
||||||
|
// add event info to replacer, make sure it's in the context
|
||||||
|
repl, ok := ctx.Context.Value(caddy.ReplacerCtxKey).(*caddy.Replacer)
|
||||||
|
if !ok {
|
||||||
|
repl = caddy.NewReplacer()
|
||||||
|
ctx.Context = context.WithValue(ctx.Context, caddy.ReplacerCtxKey, repl)
|
||||||
|
}
|
||||||
|
repl.Map(func(key string) (any, bool) {
|
||||||
|
switch key {
|
||||||
|
case "event":
|
||||||
|
return e, true
|
||||||
|
case "event.id":
|
||||||
|
return e.id, true
|
||||||
|
case "event.name":
|
||||||
|
return e.name, true
|
||||||
|
case "event.time":
|
||||||
|
return e.ts, true
|
||||||
|
case "event.time_unix":
|
||||||
|
return e.ts.UnixMilli(), true
|
||||||
|
case "event.module":
|
||||||
|
return e.origin.CaddyModule().ID, true
|
||||||
|
case "event.data":
|
||||||
|
return e.data, true
|
||||||
|
}
|
||||||
|
|
||||||
|
if strings.HasPrefix(key, "event.data.") {
|
||||||
|
key = strings.TrimPrefix(key, "event.data.")
|
||||||
|
if val, ok := data[key]; ok {
|
||||||
|
return val, true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, false
|
||||||
|
})
|
||||||
|
|
||||||
|
logger.Debug("event", zap.Any("data", e.data))
|
||||||
|
|
||||||
|
// invoke handlers bound to the event by name and also all events; this for loop
|
||||||
|
// iterates twice at most: once for the event name, once for "" (all events)
|
||||||
|
for {
|
||||||
|
moduleID := e.origin.CaddyModule().ID
|
||||||
|
|
||||||
|
// implement propagation up the module tree (i.e. start with "a.b.c" then "a.b" then "a" then "")
|
||||||
|
for {
|
||||||
|
if app.subscriptions[eventName] == nil {
|
||||||
|
break // shortcut if event not bound at all
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, handler := range app.subscriptions[eventName][moduleID] {
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
logger.Error("context canceled; event handling stopped")
|
||||||
|
return e
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := handler.Handle(ctx, e); err != nil {
|
||||||
|
aborted := errors.Is(err, ErrAborted)
|
||||||
|
|
||||||
|
logger.Error("handler error",
|
||||||
|
zap.Error(err),
|
||||||
|
zap.Bool("aborted", aborted))
|
||||||
|
|
||||||
|
if aborted {
|
||||||
|
e.Aborted = err
|
||||||
|
return e
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if moduleID == "" {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
lastDot := strings.LastIndex(string(moduleID), ".")
|
||||||
|
if lastDot < 0 {
|
||||||
|
moduleID = "" // include handlers bound to events regardless of module
|
||||||
|
} else {
|
||||||
|
moduleID = moduleID[:lastDot]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// include handlers listening to all events
|
||||||
|
if eventName == "" {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
eventName = ""
|
||||||
|
}
|
||||||
|
|
||||||
|
return e
|
||||||
|
}
|
||||||
|
|
||||||
|
// Event represents something that has happened or is happening.
|
||||||
|
type Event struct {
|
||||||
|
id uuid.UUID
|
||||||
|
ts time.Time
|
||||||
|
name string
|
||||||
|
origin caddy.Module
|
||||||
|
data map[string]any
|
||||||
|
|
||||||
|
// If non-nil, the event has been aborted, meaning
|
||||||
|
// propagation has stopped to other handlers and
|
||||||
|
// the code should stop what it was doing. Emitters
|
||||||
|
// may choose to use this as a signal to adjust their
|
||||||
|
// code path appropriately.
|
||||||
|
Aborted error
|
||||||
|
}
|
||||||
|
|
||||||
|
// CloudEvent exports event e as a structure that, when
|
||||||
|
// serialized as JSON, is compatible with the
|
||||||
|
// CloudEvents spec.
|
||||||
|
func (e Event) CloudEvent() CloudEvent {
|
||||||
|
dataJSON, _ := json.Marshal(e.data)
|
||||||
|
return CloudEvent{
|
||||||
|
ID: e.id.String(),
|
||||||
|
Source: e.origin.CaddyModule().String(),
|
||||||
|
SpecVersion: "1.0",
|
||||||
|
Type: e.name,
|
||||||
|
Time: e.ts,
|
||||||
|
DataContentType: "application/json",
|
||||||
|
Data: dataJSON,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// CloudEvent is a JSON-serializable structure that
|
||||||
|
// is compatible with the CloudEvents specification.
|
||||||
|
// See https://cloudevents.io.
|
||||||
|
type CloudEvent struct {
|
||||||
|
ID string `json:"id"`
|
||||||
|
Source string `json:"source"`
|
||||||
|
SpecVersion string `json:"specversion"`
|
||||||
|
Type string `json:"type"`
|
||||||
|
Time time.Time `json:"time"`
|
||||||
|
DataContentType string `json:"datacontenttype,omitempty"`
|
||||||
|
Data json.RawMessage `json:"data,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// ErrAborted cancels an event.
|
||||||
|
var ErrAborted = errors.New("event aborted")
|
||||||
|
|
||||||
|
// Handler is a type that can handle events.
|
||||||
|
type Handler interface {
|
||||||
|
Handle(context.Context, Event) error
|
||||||
|
}
|
||||||
|
|
||||||
|
// Interface guards
|
||||||
|
var (
|
||||||
|
_ caddy.App = (*App)(nil)
|
||||||
|
_ caddy.Provisioner = (*App)(nil)
|
||||||
|
)
|
88
modules/caddyevents/eventsconfig/caddyfile.go
Normal file
88
modules/caddyevents/eventsconfig/caddyfile.go
Normal file
|
@ -0,0 +1,88 @@
|
||||||
|
// Copyright 2015 Matthew Holt and The Caddy Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
// Package eventsconfig is for configuring caddyevents.App with the
|
||||||
|
// Caddyfile. This code can't be in the caddyevents package because
|
||||||
|
// the httpcaddyfile package imports caddyhttp, which imports
|
||||||
|
// caddyevents: hence, it creates an import cycle.
|
||||||
|
package eventsconfig
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
|
||||||
|
"github.com/caddyserver/caddy/v2/caddyconfig"
|
||||||
|
"github.com/caddyserver/caddy/v2/caddyconfig/caddyfile"
|
||||||
|
"github.com/caddyserver/caddy/v2/caddyconfig/httpcaddyfile"
|
||||||
|
"github.com/caddyserver/caddy/v2/modules/caddyevents"
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
httpcaddyfile.RegisterGlobalOption("events", parseApp)
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseApp configures the "events" global option from Caddyfile to set up the events app.
|
||||||
|
// Syntax:
|
||||||
|
//
|
||||||
|
// events {
|
||||||
|
// on <event> <handler_module...>
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// If <event> is *, then it will bind to all events.
|
||||||
|
func parseApp(d *caddyfile.Dispenser, _ any) (any, error) {
|
||||||
|
app := new(caddyevents.App)
|
||||||
|
|
||||||
|
// consume the option name
|
||||||
|
if !d.Next() {
|
||||||
|
return nil, d.ArgErr()
|
||||||
|
}
|
||||||
|
|
||||||
|
// handle the block
|
||||||
|
for d.NextBlock(0) {
|
||||||
|
switch d.Val() {
|
||||||
|
case "on":
|
||||||
|
if !d.NextArg() {
|
||||||
|
return nil, d.ArgErr()
|
||||||
|
}
|
||||||
|
eventName := d.Val()
|
||||||
|
if eventName == "*" {
|
||||||
|
eventName = ""
|
||||||
|
}
|
||||||
|
|
||||||
|
if !d.NextArg() {
|
||||||
|
return nil, d.ArgErr()
|
||||||
|
}
|
||||||
|
handlerName := d.Val()
|
||||||
|
modID := "events.handlers." + handlerName
|
||||||
|
unm, err := caddyfile.UnmarshalModule(d, modID)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
app.Subscriptions = append(app.Subscriptions, &caddyevents.Subscription{
|
||||||
|
Events: []string{eventName},
|
||||||
|
HandlersRaw: []json.RawMessage{
|
||||||
|
caddyconfig.JSONModuleObject(unm, "handler", handlerName, nil),
|
||||||
|
},
|
||||||
|
})
|
||||||
|
|
||||||
|
default:
|
||||||
|
return nil, d.ArgErr()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return httpcaddyfile.App{
|
||||||
|
Name: "events",
|
||||||
|
Value: caddyconfig.JSON(app, nil),
|
||||||
|
}, nil
|
||||||
|
}
|
|
@ -20,11 +20,12 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"net/http"
|
"net/http"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/caddyserver/caddy/v2"
|
"github.com/caddyserver/caddy/v2"
|
||||||
|
"github.com/caddyserver/caddy/v2/modules/caddyevents"
|
||||||
"github.com/caddyserver/caddy/v2/modules/caddytls"
|
"github.com/caddyserver/caddy/v2/modules/caddytls"
|
||||||
"github.com/lucas-clemente/quic-go/http3"
|
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
"golang.org/x/net/http2"
|
"golang.org/x/net/http2"
|
||||||
"golang.org/x/net/http2/h2c"
|
"golang.org/x/net/http2/h2c"
|
||||||
|
@ -95,6 +96,8 @@ func init() {
|
||||||
// `{http.request.uri}` | The full request URI
|
// `{http.request.uri}` | The full request URI
|
||||||
// `{http.response.header.*}` | Specific response header field
|
// `{http.response.header.*}` | Specific response header field
|
||||||
// `{http.vars.*}` | Custom variables in the HTTP handler chain
|
// `{http.vars.*}` | Custom variables in the HTTP handler chain
|
||||||
|
// `{http.shutting_down}` | True if the HTTP app is shutting down
|
||||||
|
// `{http.time_until_shutdown}` | Time until HTTP server shutdown, if scheduled
|
||||||
type App struct {
|
type App struct {
|
||||||
// HTTPPort specifies the port to use for HTTP (as opposed to HTTPS),
|
// HTTPPort specifies the port to use for HTTP (as opposed to HTTPS),
|
||||||
// which is used when setting up HTTP->HTTPS redirects or ACME HTTP
|
// which is used when setting up HTTP->HTTPS redirects or ACME HTTP
|
||||||
|
@ -107,18 +110,31 @@ type App struct {
|
||||||
HTTPSPort int `json:"https_port,omitempty"`
|
HTTPSPort int `json:"https_port,omitempty"`
|
||||||
|
|
||||||
// GracePeriod is how long to wait for active connections when shutting
|
// GracePeriod is how long to wait for active connections when shutting
|
||||||
// down the server. Once the grace period is over, connections will
|
// down the servers. During the grace period, no new connections are
|
||||||
// be forcefully closed.
|
// accepted, idle connections are closed, and active connections will
|
||||||
|
// be given the full length of time to become idle and close.
|
||||||
|
// Once the grace period is over, connections will be forcefully closed.
|
||||||
|
// If zero, the grace period is eternal. Default: 0.
|
||||||
GracePeriod caddy.Duration `json:"grace_period,omitempty"`
|
GracePeriod caddy.Duration `json:"grace_period,omitempty"`
|
||||||
|
|
||||||
|
// ShutdownDelay is how long to wait before initiating the grace
|
||||||
|
// period. When this app is stopping (e.g. during a config reload or
|
||||||
|
// process exit), all servers will be shut down. Normally this immediately
|
||||||
|
// initiates the grace period. However, if this delay is configured, servers
|
||||||
|
// will not be shut down until the delay is over. During this time, servers
|
||||||
|
// continue to function normally and allow new connections. At the end, the
|
||||||
|
// grace period will begin. This can be useful to allow downstream load
|
||||||
|
// balancers time to move this instance out of the rotation without hiccups.
|
||||||
|
//
|
||||||
|
// When shutdown has been scheduled, placeholders {http.shutting_down} (bool)
|
||||||
|
// and {http.time_until_shutdown} (duration) may be useful for health checks.
|
||||||
|
ShutdownDelay caddy.Duration `json:"shutdown_delay,omitempty"`
|
||||||
|
|
||||||
// Servers is the list of servers, keyed by arbitrary names chosen
|
// Servers is the list of servers, keyed by arbitrary names chosen
|
||||||
// at your discretion for your own convenience; the keys do not
|
// at your discretion for your own convenience; the keys do not
|
||||||
// affect functionality.
|
// affect functionality.
|
||||||
Servers map[string]*Server `json:"servers,omitempty"`
|
Servers map[string]*Server `json:"servers,omitempty"`
|
||||||
|
|
||||||
servers []*http.Server
|
|
||||||
h3servers []*http3.Server
|
|
||||||
|
|
||||||
ctx caddy.Context
|
ctx caddy.Context
|
||||||
logger *zap.Logger
|
logger *zap.Logger
|
||||||
tlsApp *caddytls.TLS
|
tlsApp *caddytls.TLS
|
||||||
|
@ -146,6 +162,11 @@ func (app *App) Provision(ctx caddy.Context) error {
|
||||||
app.ctx = ctx
|
app.ctx = ctx
|
||||||
app.logger = ctx.Logger(app)
|
app.logger = ctx.Logger(app)
|
||||||
|
|
||||||
|
eventsAppIface, err := ctx.App("events")
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("getting events app: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
repl := caddy.NewReplacer()
|
repl := caddy.NewReplacer()
|
||||||
|
|
||||||
// this provisions the matchers for each route,
|
// this provisions the matchers for each route,
|
||||||
|
@ -160,14 +181,28 @@ func (app *App) Provision(ctx caddy.Context) error {
|
||||||
for srvName, srv := range app.Servers {
|
for srvName, srv := range app.Servers {
|
||||||
srv.name = srvName
|
srv.name = srvName
|
||||||
srv.tlsApp = app.tlsApp
|
srv.tlsApp = app.tlsApp
|
||||||
|
srv.events = eventsAppIface.(*caddyevents.App)
|
||||||
|
srv.ctx = ctx
|
||||||
srv.logger = app.logger.Named("log")
|
srv.logger = app.logger.Named("log")
|
||||||
srv.errorLogger = app.logger.Named("log.error")
|
srv.errorLogger = app.logger.Named("log.error")
|
||||||
|
srv.shutdownAtMu = new(sync.RWMutex)
|
||||||
|
|
||||||
// only enable access logs if configured
|
// only enable access logs if configured
|
||||||
if srv.Logs != nil {
|
if srv.Logs != nil {
|
||||||
srv.accessLogger = app.logger.Named("log.access")
|
srv.accessLogger = app.logger.Named("log.access")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// the Go standard library does not let us serve only HTTP/2 using
|
||||||
|
// http.Server; we would probably need to write our own server
|
||||||
|
if !srv.protocol("h1") && (srv.protocol("h2") || srv.protocol("h2c")) {
|
||||||
|
return fmt.Errorf("server %s: cannot enable HTTP/2 or H2C without enabling HTTP/1.1; add h1 to protocols or remove h2/h2c", srvName)
|
||||||
|
}
|
||||||
|
|
||||||
|
// if no protocols configured explicitly, enable all except h2c
|
||||||
|
if len(srv.Protocols) == 0 {
|
||||||
|
srv.Protocols = []string{"h1", "h2", "h3"}
|
||||||
|
}
|
||||||
|
|
||||||
// if not explicitly configured by the user, disallow TLS
|
// if not explicitly configured by the user, disallow TLS
|
||||||
// client auth bypass (domain fronting) which could
|
// client auth bypass (domain fronting) which could
|
||||||
// otherwise be exploited by sending an unprotected SNI
|
// otherwise be exploited by sending an unprotected SNI
|
||||||
|
@ -179,8 +214,7 @@ func (app *App) Provision(ctx caddy.Context) error {
|
||||||
// based on hostname
|
// based on hostname
|
||||||
if srv.StrictSNIHost == nil && srv.hasTLSClientAuth() {
|
if srv.StrictSNIHost == nil && srv.hasTLSClientAuth() {
|
||||||
app.logger.Warn("enabling strict SNI-Host enforcement because TLS client auth is configured",
|
app.logger.Warn("enabling strict SNI-Host enforcement because TLS client auth is configured",
|
||||||
zap.String("server_id", srvName),
|
zap.String("server_id", srvName))
|
||||||
)
|
|
||||||
trueBool := true
|
trueBool := true
|
||||||
srv.StrictSNIHost = &trueBool
|
srv.StrictSNIHost = &trueBool
|
||||||
}
|
}
|
||||||
|
@ -189,8 +223,7 @@ func (app *App) Provision(ctx caddy.Context) error {
|
||||||
for i := range srv.Listen {
|
for i := range srv.Listen {
|
||||||
lnOut, err := repl.ReplaceOrErr(srv.Listen[i], true, true)
|
lnOut, err := repl.ReplaceOrErr(srv.Listen[i], true, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("server %s, listener %d: %v",
|
return fmt.Errorf("server %s, listener %d: %v", srvName, i, err)
|
||||||
srvName, i, err)
|
|
||||||
}
|
}
|
||||||
srv.Listen[i] = lnOut
|
srv.Listen[i] = lnOut
|
||||||
}
|
}
|
||||||
|
@ -202,7 +235,7 @@ func (app *App) Provision(ctx caddy.Context) error {
|
||||||
return fmt.Errorf("loading listener wrapper modules: %v", err)
|
return fmt.Errorf("loading listener wrapper modules: %v", err)
|
||||||
}
|
}
|
||||||
var hasTLSPlaceholder bool
|
var hasTLSPlaceholder bool
|
||||||
for i, val := range vals.([]interface{}) {
|
for i, val := range vals.([]any) {
|
||||||
if _, ok := val.(*tlsPlaceholderWrapper); ok {
|
if _, ok := val.(*tlsPlaceholderWrapper); ok {
|
||||||
if i == 0 {
|
if i == 0 {
|
||||||
// putting the tls placeholder wrapper first is nonsensical because
|
// putting the tls placeholder wrapper first is nonsensical because
|
||||||
|
@ -298,7 +331,7 @@ func (app *App) Start() error {
|
||||||
}
|
}
|
||||||
|
|
||||||
for srvName, srv := range app.Servers {
|
for srvName, srv := range app.Servers {
|
||||||
s := &http.Server{
|
srv.server = &http.Server{
|
||||||
ReadTimeout: time.Duration(srv.ReadTimeout),
|
ReadTimeout: time.Duration(srv.ReadTimeout),
|
||||||
ReadHeaderTimeout: time.Duration(srv.ReadHeaderTimeout),
|
ReadHeaderTimeout: time.Duration(srv.ReadHeaderTimeout),
|
||||||
WriteTimeout: time.Duration(srv.WriteTimeout),
|
WriteTimeout: time.Duration(srv.WriteTimeout),
|
||||||
|
@ -308,12 +341,37 @@ func (app *App) Start() error {
|
||||||
ErrorLog: serverLogger,
|
ErrorLog: serverLogger,
|
||||||
}
|
}
|
||||||
|
|
||||||
// enable h2c if configured
|
// disable HTTP/2, which we enabled by default during provisioning
|
||||||
if srv.AllowH2C {
|
if !srv.protocol("h2") {
|
||||||
|
srv.server.TLSNextProto = make(map[string]func(*http.Server, *tls.Conn, http.Handler))
|
||||||
|
for _, cp := range srv.TLSConnPolicies {
|
||||||
|
// the TLSConfig was already provisioned, so... manually remove it
|
||||||
|
for i, np := range cp.TLSConfig.NextProtos {
|
||||||
|
if np == "h2" {
|
||||||
|
cp.TLSConfig.NextProtos = append(cp.TLSConfig.NextProtos[:i], cp.TLSConfig.NextProtos[i+1:]...)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// remove it from the parent connection policy too, just to keep things tidy
|
||||||
|
for i, alpn := range cp.ALPN {
|
||||||
|
if alpn == "h2" {
|
||||||
|
cp.ALPN = append(cp.ALPN[:i], cp.ALPN[i+1:]...)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// this TLS config is used by the std lib to choose the actual TLS config for connections
|
||||||
|
// by looking through the connection policies to find the first one that matches
|
||||||
|
tlsCfg := srv.TLSConnPolicies.TLSConfig(app.ctx)
|
||||||
|
|
||||||
|
// enable H2C if configured
|
||||||
|
if srv.protocol("h2c") {
|
||||||
h2server := &http2.Server{
|
h2server := &http2.Server{
|
||||||
IdleTimeout: time.Duration(srv.IdleTimeout),
|
IdleTimeout: time.Duration(srv.IdleTimeout),
|
||||||
}
|
}
|
||||||
s.Handler = h2c.NewHandler(srv, h2server)
|
srv.server.Handler = h2c.NewHandler(srv, h2server)
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, lnAddr := range srv.Listen {
|
for _, lnAddr := range srv.Listen {
|
||||||
|
@ -321,10 +379,12 @@ func (app *App) Start() error {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("%s: parsing listen address '%s': %v", srvName, lnAddr, err)
|
return fmt.Errorf("%s: parsing listen address '%s': %v", srvName, lnAddr, err)
|
||||||
}
|
}
|
||||||
|
srv.addresses = append(srv.addresses, listenAddr)
|
||||||
|
|
||||||
for portOffset := uint(0); portOffset < listenAddr.PortRangeSize(); portOffset++ {
|
for portOffset := uint(0); portOffset < listenAddr.PortRangeSize(); portOffset++ {
|
||||||
// create the listener for this socket
|
// create the listener for this socket
|
||||||
hostport := listenAddr.JoinHostPort(portOffset)
|
hostport := listenAddr.JoinHostPort(portOffset)
|
||||||
ln, err := caddy.Listen(listenAddr.Network, hostport)
|
ln, err := caddy.ListenTimeout(listenAddr.Network, hostport, time.Duration(srv.KeepAliveInterval))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("%s: listening on %s: %v", listenAddr.Network, hostport, err)
|
return fmt.Errorf("%s: listening on %s: %v", listenAddr.Network, hostport, err)
|
||||||
}
|
}
|
||||||
|
@ -342,32 +402,16 @@ func (app *App) Start() error {
|
||||||
// enable TLS if there is a policy and if this is not the HTTP port
|
// enable TLS if there is a policy and if this is not the HTTP port
|
||||||
useTLS := len(srv.TLSConnPolicies) > 0 && int(listenAddr.StartPort+portOffset) != app.httpPort()
|
useTLS := len(srv.TLSConnPolicies) > 0 && int(listenAddr.StartPort+portOffset) != app.httpPort()
|
||||||
if useTLS {
|
if useTLS {
|
||||||
// create TLS listener
|
// create TLS listener - this enables and terminates TLS
|
||||||
tlsCfg := srv.TLSConnPolicies.TLSConfig(app.ctx)
|
|
||||||
ln = tls.NewListener(ln, tlsCfg)
|
ln = tls.NewListener(ln, tlsCfg)
|
||||||
|
|
||||||
/////////
|
// enable HTTP/3 if configured
|
||||||
// TODO: HTTP/3 support is experimental for now
|
if srv.protocol("h3") {
|
||||||
if srv.ExperimentalHTTP3 {
|
app.logger.Info("enabling HTTP/3 listener", zap.String("addr", hostport))
|
||||||
app.logger.Info("enabling experimental HTTP/3 listener",
|
if err := srv.serveHTTP3(hostport, tlsCfg); err != nil {
|
||||||
zap.String("addr", hostport),
|
return err
|
||||||
)
|
|
||||||
h3ln, err := caddy.ListenQUIC(hostport, tlsCfg)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("getting HTTP/3 QUIC listener: %v", err)
|
|
||||||
}
|
}
|
||||||
h3srv := &http3.Server{
|
|
||||||
Addr: hostport,
|
|
||||||
Handler: srv,
|
|
||||||
TLSConfig: tlsCfg,
|
|
||||||
MaxHeaderBytes: srv.MaxHeaderBytes,
|
|
||||||
}
|
|
||||||
//nolint:errcheck
|
|
||||||
go h3srv.ServeListener(h3ln)
|
|
||||||
app.h3servers = append(app.h3servers, h3srv)
|
|
||||||
srv.h3server = h3srv
|
|
||||||
}
|
}
|
||||||
/////////
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// finish wrapping listener where we left off before TLS
|
// finish wrapping listener where we left off before TLS
|
||||||
|
@ -386,15 +430,22 @@ func (app *App) Start() error {
|
||||||
|
|
||||||
app.logger.Debug("starting server loop",
|
app.logger.Debug("starting server loop",
|
||||||
zap.String("address", ln.Addr().String()),
|
zap.String("address", ln.Addr().String()),
|
||||||
zap.Bool("http3", srv.ExperimentalHTTP3),
|
|
||||||
zap.Bool("tls", useTLS),
|
zap.Bool("tls", useTLS),
|
||||||
)
|
zap.Bool("http3", srv.h3server != nil))
|
||||||
|
|
||||||
//nolint:errcheck
|
srv.listeners = append(srv.listeners, ln)
|
||||||
go s.Serve(ln)
|
|
||||||
app.servers = append(app.servers, s)
|
// enable HTTP/1 if configured
|
||||||
|
if srv.protocol("h1") {
|
||||||
|
//nolint:errcheck
|
||||||
|
go srv.server.Serve(ln)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
srv.logger.Info("server running",
|
||||||
|
zap.String("name", srvName),
|
||||||
|
zap.Strings("protocols", srv.Protocols))
|
||||||
}
|
}
|
||||||
|
|
||||||
// finish automatic HTTPS by finally beginning
|
// finish automatic HTTPS by finally beginning
|
||||||
|
@ -410,26 +461,65 @@ func (app *App) Start() error {
|
||||||
// Stop gracefully shuts down the HTTP server.
|
// Stop gracefully shuts down the HTTP server.
|
||||||
func (app *App) Stop() error {
|
func (app *App) Stop() error {
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
|
|
||||||
|
// see if any listeners in our config will be closing or if they are continuing
|
||||||
|
// hrough a reload; because if any are closing, we will enforce shutdown delay
|
||||||
|
var delay bool
|
||||||
|
scheduledTime := time.Now().Add(time.Duration(app.ShutdownDelay))
|
||||||
|
if app.ShutdownDelay > 0 {
|
||||||
|
for _, server := range app.Servers {
|
||||||
|
for _, na := range server.addresses {
|
||||||
|
for _, addr := range na.Expand() {
|
||||||
|
if caddy.ListenerUsage(addr.Network, addr.JoinHostPort(0)) < 2 {
|
||||||
|
app.logger.Debug("listener closing and shutdown delay is configured", zap.String("address", addr.String()))
|
||||||
|
server.shutdownAtMu.Lock()
|
||||||
|
server.shutdownAt = scheduledTime
|
||||||
|
server.shutdownAtMu.Unlock()
|
||||||
|
delay = true
|
||||||
|
} else {
|
||||||
|
app.logger.Debug("shutdown delay configured but listener will remain open", zap.String("address", addr.String()))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// honor scheduled/delayed shutdown time
|
||||||
|
if delay {
|
||||||
|
app.logger.Debug("shutdown scheduled",
|
||||||
|
zap.Duration("delay_duration", time.Duration(app.ShutdownDelay)),
|
||||||
|
zap.Time("time", scheduledTime))
|
||||||
|
time.Sleep(time.Duration(app.ShutdownDelay))
|
||||||
|
}
|
||||||
|
|
||||||
|
// enforce grace period if configured
|
||||||
if app.GracePeriod > 0 {
|
if app.GracePeriod > 0 {
|
||||||
var cancel context.CancelFunc
|
var cancel context.CancelFunc
|
||||||
ctx, cancel = context.WithTimeout(ctx, time.Duration(app.GracePeriod))
|
ctx, cancel = context.WithTimeout(ctx, time.Duration(app.GracePeriod))
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
app.logger.Debug("servers shutting down; grace period initiated", zap.Duration("duration", time.Duration(app.GracePeriod)))
|
||||||
|
} else {
|
||||||
|
app.logger.Debug("servers shutting down with eternal grace period")
|
||||||
}
|
}
|
||||||
for _, s := range app.servers {
|
|
||||||
err := s.Shutdown(ctx)
|
// shut down servers
|
||||||
if err != nil {
|
for _, server := range app.Servers {
|
||||||
return err
|
if err := server.server.Shutdown(ctx); err != nil {
|
||||||
|
app.logger.Error("server shutdown",
|
||||||
|
zap.Error(err),
|
||||||
|
zap.Strings("addresses", server.Listen))
|
||||||
|
}
|
||||||
|
|
||||||
|
if server.h3server != nil {
|
||||||
|
// TODO: CloseGracefully, once implemented upstream (see https://github.com/lucas-clemente/quic-go/issues/2103)
|
||||||
|
if err := server.h3server.Close(); err != nil {
|
||||||
|
app.logger.Error("HTTP/3 server shutdown",
|
||||||
|
zap.Error(err),
|
||||||
|
zap.Strings("addresses", server.Listen))
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, s := range app.h3servers {
|
|
||||||
// TODO: CloseGracefully, once implemented upstream
|
|
||||||
// (see https://github.com/lucas-clemente/quic-go/issues/2103)
|
|
||||||
err := s.Close()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -93,6 +93,9 @@ func (app *App) automaticHTTPSPhase1(ctx caddy.Context, repl *caddy.Replacer) er
|
||||||
// https://github.com/caddyserver/caddy/issues/3443)
|
// https://github.com/caddyserver/caddy/issues/3443)
|
||||||
redirDomains := make(map[string][]caddy.NetworkAddress)
|
redirDomains := make(map[string][]caddy.NetworkAddress)
|
||||||
|
|
||||||
|
// the log configuration for an HTTPS enabled server
|
||||||
|
var logCfg *ServerLogConfig
|
||||||
|
|
||||||
for srvName, srv := range app.Servers {
|
for srvName, srv := range app.Servers {
|
||||||
// as a prerequisite, provision route matchers; this is
|
// as a prerequisite, provision route matchers; this is
|
||||||
// required for all routes on all servers, and must be
|
// required for all routes on all servers, and must be
|
||||||
|
@ -172,6 +175,13 @@ func (app *App) automaticHTTPSPhase1(ctx caddy.Context, repl *caddy.Replacer) er
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// clone the logger so we can apply it to the HTTP server
|
||||||
|
// (not sure if necessary to clone it; but probably safer)
|
||||||
|
// (we choose one log cfg arbitrarily; not sure which is best)
|
||||||
|
if srv.Logs != nil {
|
||||||
|
logCfg = srv.Logs.clone()
|
||||||
|
}
|
||||||
|
|
||||||
// for all the hostnames we found, filter them so we have
|
// for all the hostnames we found, filter them so we have
|
||||||
// a deduplicated list of names for which to obtain certs
|
// a deduplicated list of names for which to obtain certs
|
||||||
// (only if cert management not disabled for this server)
|
// (only if cert management not disabled for this server)
|
||||||
|
@ -400,6 +410,7 @@ redirServersLoop:
|
||||||
app.Servers["remaining_auto_https_redirects"] = &Server{
|
app.Servers["remaining_auto_https_redirects"] = &Server{
|
||||||
Listen: redirServerAddrsList,
|
Listen: redirServerAddrsList,
|
||||||
Routes: appendCatchAll(redirRoutes),
|
Routes: appendCatchAll(redirRoutes),
|
||||||
|
Logs: logCfg,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -21,6 +21,7 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
weakrand "math/rand"
|
weakrand "math/rand"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
@ -94,10 +95,7 @@ func (hba *HTTPBasicAuth) Provision(ctx caddy.Context) error {
|
||||||
|
|
||||||
// if supported, generate a fake password we can compare against if needed
|
// if supported, generate a fake password we can compare against if needed
|
||||||
if hasher, ok := hba.Hash.(Hasher); ok {
|
if hasher, ok := hba.Hash.(Hasher); ok {
|
||||||
hba.fakePassword, err = hasher.Hash([]byte("antitiming"), []byte("fakesalt"))
|
hba.fakePassword = hasher.FakeHash()
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("generating anti-timing password hash: %v", err)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
repl := caddy.NewReplacer()
|
repl := caddy.NewReplacer()
|
||||||
|
@ -117,10 +115,19 @@ func (hba *HTTPBasicAuth) Provision(ctx caddy.Context) error {
|
||||||
return fmt.Errorf("account %d: username and password are required", i)
|
return fmt.Errorf("account %d: username and password are required", i)
|
||||||
}
|
}
|
||||||
|
|
||||||
acct.password, err = base64.StdEncoding.DecodeString(acct.Password)
|
// TODO: Remove support for redundantly-encoded b64-encoded hashes
|
||||||
if err != nil {
|
// Passwords starting with '$' are likely in Modular Crypt Format,
|
||||||
return fmt.Errorf("base64-decoding password: %v", err)
|
// so we don't need to base64 decode them. But historically, we
|
||||||
|
// required redundant base64, so we try to decode it otherwise.
|
||||||
|
if strings.HasPrefix(acct.Password, "$") {
|
||||||
|
acct.password = []byte(acct.Password)
|
||||||
|
} else {
|
||||||
|
acct.password, err = base64.StdEncoding.DecodeString(acct.Password)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("base64-decoding password: %v", err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if acct.Salt != "" {
|
if acct.Salt != "" {
|
||||||
acct.salt, err = base64.StdEncoding.DecodeString(acct.Salt)
|
acct.salt, err = base64.StdEncoding.DecodeString(acct.Salt)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -271,9 +278,11 @@ type Comparer interface {
|
||||||
// that require a salt). Hashing modules which implement
|
// that require a salt). Hashing modules which implement
|
||||||
// this interface can be used with the hash-password
|
// this interface can be used with the hash-password
|
||||||
// subcommand as well as benefitting from anti-timing
|
// subcommand as well as benefitting from anti-timing
|
||||||
// features.
|
// features. A hasher also returns a fake hash which
|
||||||
|
// can be used for timing side-channel mitigation.
|
||||||
type Hasher interface {
|
type Hasher interface {
|
||||||
Hash(plaintext, salt []byte) ([]byte, error)
|
Hash(plaintext, salt []byte) ([]byte, error)
|
||||||
|
FakeHash() []byte
|
||||||
}
|
}
|
||||||
|
|
||||||
// Account contains a username, password, and salt (if applicable).
|
// Account contains a username, password, and salt (if applicable).
|
||||||
|
|
|
@ -62,7 +62,7 @@ func (a *Authentication) Provision(ctx caddy.Context) error {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("loading authentication providers: %v", err)
|
return fmt.Errorf("loading authentication providers: %v", err)
|
||||||
}
|
}
|
||||||
for modName, modIface := range mods.(map[string]interface{}) {
|
for modName, modIface := range mods.(map[string]any) {
|
||||||
a.Providers[modName] = modIface.(Authenticator)
|
a.Providers[modName] = modIface.(Authenticator)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
|
|
@ -42,11 +42,13 @@ hash is written to stdout as a base64 string.
|
||||||
Caddy is attached to a controlling tty, the plaintext will
|
Caddy is attached to a controlling tty, the plaintext will
|
||||||
not be echoed.
|
not be echoed.
|
||||||
|
|
||||||
--algorithm may be bcrypt or scrypt. If script, the default
|
--algorithm may be bcrypt or scrypt. If scrypt, the default
|
||||||
parameters are used.
|
parameters are used.
|
||||||
|
|
||||||
Use the --salt flag for algorithms which require a salt to
|
Use the --salt flag for algorithms which require a salt to
|
||||||
be provided (scrypt).
|
be provided (scrypt).
|
||||||
|
|
||||||
|
Note that scrypt is deprecated. Please use 'bcrypt' instead.
|
||||||
`,
|
`,
|
||||||
Flags: func() *flag.FlagSet {
|
Flags: func() *flag.FlagSet {
|
||||||
fs := flag.NewFlagSet("hash-password", flag.ExitOnError)
|
fs := flag.NewFlagSet("hash-password", flag.ExitOnError)
|
||||||
|
@ -112,13 +114,16 @@ func cmdHashPassword(fs caddycmd.Flags) (int, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
var hash []byte
|
var hash []byte
|
||||||
|
var hashString string
|
||||||
switch algorithm {
|
switch algorithm {
|
||||||
case "bcrypt":
|
case "bcrypt":
|
||||||
hash, err = BcryptHash{}.Hash(plaintext, nil)
|
hash, err = BcryptHash{}.Hash(plaintext, nil)
|
||||||
|
hashString = string(hash)
|
||||||
case "scrypt":
|
case "scrypt":
|
||||||
def := ScryptHash{}
|
def := ScryptHash{}
|
||||||
def.SetDefaults()
|
def.SetDefaults()
|
||||||
hash, err = def.Hash(plaintext, salt)
|
hash, err = def.Hash(plaintext, salt)
|
||||||
|
hashString = base64.StdEncoding.EncodeToString(hash)
|
||||||
default:
|
default:
|
||||||
return caddy.ExitCodeFailedStartup, fmt.Errorf("unrecognized hash algorithm: %s", algorithm)
|
return caddy.ExitCodeFailedStartup, fmt.Errorf("unrecognized hash algorithm: %s", algorithm)
|
||||||
}
|
}
|
||||||
|
@ -126,9 +131,7 @@ func cmdHashPassword(fs caddycmd.Flags) (int, error) {
|
||||||
return caddy.ExitCodeFailedStartup, err
|
return caddy.ExitCodeFailedStartup, err
|
||||||
}
|
}
|
||||||
|
|
||||||
hashBase64 := base64.StdEncoding.EncodeToString(hash)
|
fmt.Println(hashString)
|
||||||
|
|
||||||
fmt.Println(hashBase64)
|
|
||||||
|
|
||||||
return 0, nil
|
return 0, nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -16,6 +16,7 @@ package caddyauth
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"crypto/subtle"
|
"crypto/subtle"
|
||||||
|
"encoding/base64"
|
||||||
|
|
||||||
"github.com/caddyserver/caddy/v2"
|
"github.com/caddyserver/caddy/v2"
|
||||||
"golang.org/x/crypto/bcrypt"
|
"golang.org/x/crypto/bcrypt"
|
||||||
|
@ -55,7 +56,16 @@ func (BcryptHash) Hash(plaintext, _ []byte) ([]byte, error) {
|
||||||
return bcrypt.GenerateFromPassword(plaintext, 14)
|
return bcrypt.GenerateFromPassword(plaintext, 14)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// FakeHash returns a fake hash.
|
||||||
|
func (BcryptHash) FakeHash() []byte {
|
||||||
|
// hashed with the following command:
|
||||||
|
// caddy hash-password --plaintext "antitiming" --algorithm "bcrypt"
|
||||||
|
return []byte("$2a$14$X3ulqf/iGxnf1k6oMZ.RZeJUoqI9PX2PM4rS5lkIKJXduLGXGPrt6")
|
||||||
|
}
|
||||||
|
|
||||||
// ScryptHash implements the scrypt KDF as a hash.
|
// ScryptHash implements the scrypt KDF as a hash.
|
||||||
|
//
|
||||||
|
// DEPRECATED, please use 'bcrypt' instead.
|
||||||
type ScryptHash struct {
|
type ScryptHash struct {
|
||||||
// scrypt's N parameter. If unset or 0, a safe default is used.
|
// scrypt's N parameter. If unset or 0, a safe default is used.
|
||||||
N int `json:"N,omitempty"`
|
N int `json:"N,omitempty"`
|
||||||
|
@ -80,8 +90,9 @@ func (ScryptHash) CaddyModule() caddy.ModuleInfo {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Provision sets up s.
|
// Provision sets up s.
|
||||||
func (s *ScryptHash) Provision(_ caddy.Context) error {
|
func (s *ScryptHash) Provision(ctx caddy.Context) error {
|
||||||
s.SetDefaults()
|
s.SetDefaults()
|
||||||
|
ctx.Logger(s).Warn("use of 'scrypt' is deprecated, please use 'bcrypt' instead")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -123,6 +134,14 @@ func (s ScryptHash) Hash(plaintext, salt []byte) ([]byte, error) {
|
||||||
return scrypt.Key(plaintext, salt, s.N, s.R, s.P, s.KeyLength)
|
return scrypt.Key(plaintext, salt, s.N, s.R, s.P, s.KeyLength)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// FakeHash returns a fake hash.
|
||||||
|
func (ScryptHash) FakeHash() []byte {
|
||||||
|
// hashed with the following command:
|
||||||
|
// caddy hash-password --plaintext "antitiming" --salt "fakesalt" --algorithm "scrypt"
|
||||||
|
bytes, _ := base64.StdEncoding.DecodeString("kFbjiVemlwK/ZS0tS6/UQqEDeaNMigyCs48KEsGUse8=")
|
||||||
|
return bytes
|
||||||
|
}
|
||||||
|
|
||||||
func hashesMatch(pwdHash1, pwdHash2 []byte) bool {
|
func hashesMatch(pwdHash1, pwdHash2 []byte) bool {
|
||||||
return subtle.ConstantTimeCompare(pwdHash1, pwdHash2) == 1
|
return subtle.ConstantTimeCompare(pwdHash1, pwdHash2) == 1
|
||||||
}
|
}
|
||||||
|
|
|
@ -20,6 +20,7 @@ import (
|
||||||
"io"
|
"io"
|
||||||
"net"
|
"net"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
"path"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
@ -244,6 +245,40 @@ func SanitizedPathJoin(root, reqPath string) string {
|
||||||
return path
|
return path
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// CleanPath cleans path p according to path.Clean(), but only
|
||||||
|
// merges repeated slashes if collapseSlashes is true, and always
|
||||||
|
// preserves trailing slashes.
|
||||||
|
func CleanPath(p string, collapseSlashes bool) string {
|
||||||
|
if collapseSlashes {
|
||||||
|
return cleanPath(p)
|
||||||
|
}
|
||||||
|
|
||||||
|
// insert an invalid/impossible URI character into each two consecutive
|
||||||
|
// slashes to expand empty path segments; then clean the path as usual,
|
||||||
|
// and then remove the remaining temporary characters.
|
||||||
|
const tmpCh = 0xff
|
||||||
|
var sb strings.Builder
|
||||||
|
for i, ch := range p {
|
||||||
|
if ch == '/' && i > 0 && p[i-1] == '/' {
|
||||||
|
sb.WriteByte(tmpCh)
|
||||||
|
}
|
||||||
|
sb.WriteRune(ch)
|
||||||
|
}
|
||||||
|
halfCleaned := cleanPath(sb.String())
|
||||||
|
halfCleaned = strings.ReplaceAll(halfCleaned, string([]byte{tmpCh}), "")
|
||||||
|
|
||||||
|
return halfCleaned
|
||||||
|
}
|
||||||
|
|
||||||
|
// cleanPath does path.Clean(p) but preserves any trailing slash.
|
||||||
|
func cleanPath(p string) string {
|
||||||
|
cleaned := path.Clean(p)
|
||||||
|
if cleaned != "/" && strings.HasSuffix(p, "/") {
|
||||||
|
cleaned = cleaned + "/"
|
||||||
|
}
|
||||||
|
return cleaned
|
||||||
|
}
|
||||||
|
|
||||||
// tlsPlaceholderWrapper is a no-op listener wrapper that marks
|
// tlsPlaceholderWrapper is a no-op listener wrapper that marks
|
||||||
// where the TLS listener should be in a chain of listener wrappers.
|
// where the TLS listener should be in a chain of listener wrappers.
|
||||||
// It should only be used if another listener wrapper must be placed
|
// It should only be used if another listener wrapper must be placed
|
||||||
|
|
|
@ -92,3 +92,60 @@ func TestSanitizedPathJoin(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestCleanPath(t *testing.T) {
|
||||||
|
for i, tc := range []struct {
|
||||||
|
input string
|
||||||
|
mergeSlashes bool
|
||||||
|
expect string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
input: "/foo",
|
||||||
|
expect: "/foo",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
input: "/foo/",
|
||||||
|
expect: "/foo/",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
input: "//foo",
|
||||||
|
expect: "//foo",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
input: "//foo",
|
||||||
|
mergeSlashes: true,
|
||||||
|
expect: "/foo",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
input: "/foo//bar/",
|
||||||
|
mergeSlashes: true,
|
||||||
|
expect: "/foo/bar/",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
input: "/foo/./.././bar",
|
||||||
|
expect: "/bar",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
input: "/foo//./..//./bar",
|
||||||
|
expect: "/foo//bar",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
input: "/foo///./..//./bar",
|
||||||
|
expect: "/foo///bar",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
input: "/foo///./..//.",
|
||||||
|
expect: "/foo//",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
input: "/foo//./bar",
|
||||||
|
expect: "/foo//bar",
|
||||||
|
},
|
||||||
|
} {
|
||||||
|
actual := CleanPath(tc.input, tc.mergeSlashes)
|
||||||
|
if actual != tc.expect {
|
||||||
|
t.Errorf("Test %d [input='%s' mergeSlashes=%t]: Got '%s', expected '%s'",
|
||||||
|
i, tc.input, tc.mergeSlashes, actual, tc.expect)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -28,7 +28,6 @@ import (
|
||||||
"github.com/caddyserver/caddy/v2"
|
"github.com/caddyserver/caddy/v2"
|
||||||
"github.com/caddyserver/caddy/v2/caddyconfig/caddyfile"
|
"github.com/caddyserver/caddy/v2/caddyconfig/caddyfile"
|
||||||
"github.com/google/cel-go/cel"
|
"github.com/google/cel-go/cel"
|
||||||
"github.com/google/cel-go/checker/decls"
|
|
||||||
"github.com/google/cel-go/common"
|
"github.com/google/cel-go/common"
|
||||||
"github.com/google/cel-go/common/operators"
|
"github.com/google/cel-go/common/operators"
|
||||||
"github.com/google/cel-go/common/types"
|
"github.com/google/cel-go/common/types"
|
||||||
|
@ -40,7 +39,6 @@ import (
|
||||||
"github.com/google/cel-go/parser"
|
"github.com/google/cel-go/parser"
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
|
exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
|
||||||
"google.golang.org/protobuf/proto"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
|
@ -126,13 +124,12 @@ func (m *MatchExpression) Provision(ctx caddy.Context) error {
|
||||||
|
|
||||||
// create the CEL environment
|
// create the CEL environment
|
||||||
env, err := cel.NewEnv(
|
env, err := cel.NewEnv(
|
||||||
cel.Declarations(
|
cel.Function(placeholderFuncName, cel.SingletonBinaryImpl(m.caddyPlaceholderFunc), cel.Overload(
|
||||||
decls.NewVar("request", httpRequestObjectType),
|
placeholderFuncName+"_httpRequest_string",
|
||||||
decls.NewFunction(placeholderFuncName,
|
[]*cel.Type{httpRequestObjectType, cel.StringType},
|
||||||
decls.NewOverload(placeholderFuncName+"_httpRequest_string",
|
cel.AnyType,
|
||||||
[]*exprpb.Type{httpRequestObjectType, decls.String},
|
)),
|
||||||
decls.Any)),
|
cel.Variable("request", httpRequestObjectType),
|
||||||
),
|
|
||||||
cel.CustomTypeAdapter(m.ta),
|
cel.CustomTypeAdapter(m.ta),
|
||||||
ext.Strings(),
|
ext.Strings(),
|
||||||
matcherLib,
|
matcherLib,
|
||||||
|
@ -149,20 +146,12 @@ func (m *MatchExpression) Provision(ctx caddy.Context) error {
|
||||||
|
|
||||||
// request matching is a boolean operation, so we don't really know
|
// request matching is a boolean operation, so we don't really know
|
||||||
// what to do if the expression returns a non-boolean type
|
// what to do if the expression returns a non-boolean type
|
||||||
if !proto.Equal(checked.ResultType(), decls.Bool) {
|
if checked.OutputType() != cel.BoolType {
|
||||||
return fmt.Errorf("CEL request matcher expects return type of bool, not %s", checked.ResultType())
|
return fmt.Errorf("CEL request matcher expects return type of bool, not %s", checked.OutputType())
|
||||||
}
|
}
|
||||||
|
|
||||||
// compile the "program"
|
// compile the "program"
|
||||||
m.prg, err = env.Program(checked,
|
m.prg, err = env.Program(checked, cel.EvalOptions(cel.OptOptimize))
|
||||||
cel.EvalOptions(cel.OptOptimize),
|
|
||||||
cel.Functions(
|
|
||||||
&functions.Overload{
|
|
||||||
Operator: placeholderFuncName,
|
|
||||||
Binary: m.caddyPlaceholderFunc,
|
|
||||||
},
|
|
||||||
),
|
|
||||||
)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("compiling CEL program: %s", err)
|
return fmt.Errorf("compiling CEL program: %s", err)
|
||||||
}
|
}
|
||||||
|
@ -229,7 +218,7 @@ var httpRequestCELType = types.NewTypeValue("http.Request", traits.ReceiverType)
|
||||||
// drops allocation costs for CEL expression evaluations by roughly half.
|
// drops allocation costs for CEL expression evaluations by roughly half.
|
||||||
type celHTTPRequest struct{ *http.Request }
|
type celHTTPRequest struct{ *http.Request }
|
||||||
|
|
||||||
func (cr celHTTPRequest) ResolveName(name string) (interface{}, bool) {
|
func (cr celHTTPRequest) ResolveName(name string) (any, bool) {
|
||||||
if name == "request" {
|
if name == "request" {
|
||||||
return cr, true
|
return cr, true
|
||||||
}
|
}
|
||||||
|
@ -240,7 +229,7 @@ func (cr celHTTPRequest) Parent() interpreter.Activation {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cr celHTTPRequest) ConvertToNative(typeDesc reflect.Type) (interface{}, error) {
|
func (cr celHTTPRequest) ConvertToNative(typeDesc reflect.Type) (any, error) {
|
||||||
return cr.Request, nil
|
return cr.Request, nil
|
||||||
}
|
}
|
||||||
func (celHTTPRequest) ConvertToType(typeVal ref.Type) ref.Val {
|
func (celHTTPRequest) ConvertToType(typeVal ref.Type) ref.Val {
|
||||||
|
@ -252,8 +241,8 @@ func (cr celHTTPRequest) Equal(other ref.Val) ref.Val {
|
||||||
}
|
}
|
||||||
return types.ValOrErr(other, "%v is not comparable type", other)
|
return types.ValOrErr(other, "%v is not comparable type", other)
|
||||||
}
|
}
|
||||||
func (celHTTPRequest) Type() ref.Type { return httpRequestCELType }
|
func (celHTTPRequest) Type() ref.Type { return httpRequestCELType }
|
||||||
func (cr celHTTPRequest) Value() interface{} { return cr }
|
func (cr celHTTPRequest) Value() any { return cr }
|
||||||
|
|
||||||
var pkixNameCELType = types.NewTypeValue("pkix.Name", traits.ReceiverType)
|
var pkixNameCELType = types.NewTypeValue("pkix.Name", traits.ReceiverType)
|
||||||
|
|
||||||
|
@ -261,7 +250,7 @@ var pkixNameCELType = types.NewTypeValue("pkix.Name", traits.ReceiverType)
|
||||||
// methods to satisfy the ref.Val interface.
|
// methods to satisfy the ref.Val interface.
|
||||||
type celPkixName struct{ *pkix.Name }
|
type celPkixName struct{ *pkix.Name }
|
||||||
|
|
||||||
func (pn celPkixName) ConvertToNative(typeDesc reflect.Type) (interface{}, error) {
|
func (pn celPkixName) ConvertToNative(typeDesc reflect.Type) (any, error) {
|
||||||
return pn.Name, nil
|
return pn.Name, nil
|
||||||
}
|
}
|
||||||
func (celPkixName) ConvertToType(typeVal ref.Type) ref.Val {
|
func (celPkixName) ConvertToType(typeVal ref.Type) ref.Val {
|
||||||
|
@ -273,13 +262,13 @@ func (pn celPkixName) Equal(other ref.Val) ref.Val {
|
||||||
}
|
}
|
||||||
return types.ValOrErr(other, "%v is not comparable type", other)
|
return types.ValOrErr(other, "%v is not comparable type", other)
|
||||||
}
|
}
|
||||||
func (celPkixName) Type() ref.Type { return pkixNameCELType }
|
func (celPkixName) Type() ref.Type { return pkixNameCELType }
|
||||||
func (pn celPkixName) Value() interface{} { return pn }
|
func (pn celPkixName) Value() any { return pn }
|
||||||
|
|
||||||
// celTypeAdapter can adapt our custom types to a CEL value.
|
// celTypeAdapter can adapt our custom types to a CEL value.
|
||||||
type celTypeAdapter struct{}
|
type celTypeAdapter struct{}
|
||||||
|
|
||||||
func (celTypeAdapter) NativeToValue(value interface{}) ref.Val {
|
func (celTypeAdapter) NativeToValue(value any) ref.Val {
|
||||||
switch v := value.(type) {
|
switch v := value.(type) {
|
||||||
case celHTTPRequest:
|
case celHTTPRequest:
|
||||||
return v
|
return v
|
||||||
|
@ -321,62 +310,45 @@ type CELLibraryProducer interface {
|
||||||
// limited set of function signatures. For strong type validation you may need
|
// limited set of function signatures. For strong type validation you may need
|
||||||
// to provide a custom macro which does a more detailed analysis of the CEL
|
// to provide a custom macro which does a more detailed analysis of the CEL
|
||||||
// literal provided to the macro as an argument.
|
// literal provided to the macro as an argument.
|
||||||
func CELMatcherImpl(macroName, funcName string, matcherDataTypes []*exprpb.Type, fac CELMatcherFactory) (cel.Library, error) {
|
func CELMatcherImpl(macroName, funcName string, matcherDataTypes []*cel.Type, fac CELMatcherFactory) (cel.Library, error) {
|
||||||
requestType := decls.NewObjectType("http.Request")
|
requestType := cel.ObjectType("http.Request")
|
||||||
var macro parser.Macro
|
var macro parser.Macro
|
||||||
switch len(matcherDataTypes) {
|
switch len(matcherDataTypes) {
|
||||||
case 1:
|
case 1:
|
||||||
matcherDataType := matcherDataTypes[0]
|
matcherDataType := matcherDataTypes[0]
|
||||||
if isCELStringListType(matcherDataType) {
|
switch matcherDataType.String() {
|
||||||
|
case "list(string)":
|
||||||
macro = parser.NewGlobalVarArgMacro(macroName, celMatcherStringListMacroExpander(funcName))
|
macro = parser.NewGlobalVarArgMacro(macroName, celMatcherStringListMacroExpander(funcName))
|
||||||
} else if isCELStringType(matcherDataType) {
|
case cel.StringType.String():
|
||||||
macro = parser.NewGlobalMacro(macroName, 1, celMatcherStringMacroExpander(funcName))
|
macro = parser.NewGlobalMacro(macroName, 1, celMatcherStringMacroExpander(funcName))
|
||||||
} else if isCELJSONType(matcherDataType) {
|
case CELTypeJSON.String():
|
||||||
macro = parser.NewGlobalMacro(macroName, 1, celMatcherJSONMacroExpander(funcName))
|
macro = parser.NewGlobalMacro(macroName, 1, celMatcherJSONMacroExpander(funcName))
|
||||||
} else {
|
default:
|
||||||
return nil, fmt.Errorf("unsupported matcher data type: %s", cel.FormatType(matcherDataType))
|
return nil, fmt.Errorf("unsupported matcher data type: %s", matcherDataType)
|
||||||
}
|
}
|
||||||
case 2:
|
case 2:
|
||||||
if isCELStringType(matcherDataTypes[0]) && isCELStringType(matcherDataTypes[1]) {
|
if matcherDataTypes[0] == cel.StringType && matcherDataTypes[1] == cel.StringType {
|
||||||
macro = parser.NewGlobalMacro(macroName, 2, celMatcherStringListMacroExpander(funcName))
|
macro = parser.NewGlobalMacro(macroName, 2, celMatcherStringListMacroExpander(funcName))
|
||||||
matcherDataTypes = []*exprpb.Type{CelTypeListString}
|
matcherDataTypes = []*cel.Type{cel.ListType(cel.StringType)}
|
||||||
} else {
|
} else {
|
||||||
return nil, fmt.Errorf(
|
return nil, fmt.Errorf("unsupported matcher data type: %s, %s", matcherDataTypes[0], matcherDataTypes[1])
|
||||||
"unsupported matcher data type: %s, %s",
|
|
||||||
cel.FormatType(matcherDataTypes[0]), cel.FormatType(matcherDataTypes[1]),
|
|
||||||
)
|
|
||||||
}
|
}
|
||||||
case 3:
|
case 3:
|
||||||
if isCELStringType(matcherDataTypes[0]) && isCELStringType(matcherDataTypes[1]) && isCELStringType(matcherDataTypes[2]) {
|
if matcherDataTypes[0] == cel.StringType && matcherDataTypes[1] == cel.StringType && matcherDataTypes[2] == cel.StringType {
|
||||||
macro = parser.NewGlobalMacro(macroName, 3, celMatcherStringListMacroExpander(funcName))
|
macro = parser.NewGlobalMacro(macroName, 3, celMatcherStringListMacroExpander(funcName))
|
||||||
matcherDataTypes = []*exprpb.Type{CelTypeListString}
|
matcherDataTypes = []*cel.Type{cel.ListType(cel.StringType)}
|
||||||
} else {
|
} else {
|
||||||
return nil, fmt.Errorf(
|
return nil, fmt.Errorf("unsupported matcher data type: %s, %s, %s", matcherDataTypes[0], matcherDataTypes[1], matcherDataTypes[2])
|
||||||
"unsupported matcher data type: %s, %s, %s",
|
|
||||||
cel.FormatType(matcherDataTypes[0]), cel.FormatType(matcherDataTypes[1]), cel.FormatType(matcherDataTypes[2]),
|
|
||||||
)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
envOptions := []cel.EnvOption{
|
envOptions := []cel.EnvOption{
|
||||||
cel.Macros(macro),
|
cel.Macros(macro),
|
||||||
cel.Declarations(
|
cel.Function(funcName,
|
||||||
decls.NewFunction(funcName,
|
cel.Overload(funcName, append([]*cel.Type{requestType}, matcherDataTypes...), cel.BoolType),
|
||||||
decls.NewOverload(
|
cel.SingletonBinaryImpl(CELMatcherRuntimeFunction(funcName, fac))),
|
||||||
funcName,
|
|
||||||
append([]*exprpb.Type{requestType}, matcherDataTypes...),
|
|
||||||
decls.Bool,
|
|
||||||
),
|
|
||||||
),
|
|
||||||
),
|
|
||||||
}
|
}
|
||||||
programOptions := []cel.ProgramOption{
|
programOptions := []cel.ProgramOption{
|
||||||
cel.CustomDecorator(CELMatcherDecorator(funcName, fac)),
|
cel.CustomDecorator(CELMatcherDecorator(funcName, fac)),
|
||||||
cel.Functions(
|
|
||||||
&functions.Overload{
|
|
||||||
Operator: funcName,
|
|
||||||
Binary: CELMatcherRuntimeFunction(funcName, fac),
|
|
||||||
},
|
|
||||||
),
|
|
||||||
}
|
}
|
||||||
return NewMatcherCELLibrary(envOptions, programOptions), nil
|
return NewMatcherCELLibrary(envOptions, programOptions), nil
|
||||||
}
|
}
|
||||||
|
@ -573,17 +545,17 @@ func celMatcherJSONMacroExpander(funcName string) parser.MacroExpander {
|
||||||
// CELValueToMapStrList converts a CEL value to a map[string][]string
|
// CELValueToMapStrList converts a CEL value to a map[string][]string
|
||||||
//
|
//
|
||||||
// Earlier validation stages should guarantee that the value has this type
|
// Earlier validation stages should guarantee that the value has this type
|
||||||
// at compile time, and that the runtime value type is map[string]interface{}.
|
// at compile time, and that the runtime value type is map[string]any.
|
||||||
// The reason for the slight difference in value type is that CEL allows for
|
// The reason for the slight difference in value type is that CEL allows for
|
||||||
// map literals containing heterogeneous values, in this case string and list
|
// map literals containing heterogeneous values, in this case string and list
|
||||||
// of string.
|
// of string.
|
||||||
func CELValueToMapStrList(data ref.Val) (map[string][]string, error) {
|
func CELValueToMapStrList(data ref.Val) (map[string][]string, error) {
|
||||||
mapStrType := reflect.TypeOf(map[string]interface{}{})
|
mapStrType := reflect.TypeOf(map[string]any{})
|
||||||
mapStrRaw, err := data.ConvertToNative(mapStrType)
|
mapStrRaw, err := data.ConvertToNative(mapStrType)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
mapStrIface := mapStrRaw.(map[string]interface{})
|
mapStrIface := mapStrRaw.(map[string]any)
|
||||||
mapStrListStr := make(map[string][]string, len(mapStrIface))
|
mapStrListStr := make(map[string][]string, len(mapStrIface))
|
||||||
for k, v := range mapStrIface {
|
for k, v := range mapStrIface {
|
||||||
switch val := v.(type) {
|
switch val := v.(type) {
|
||||||
|
@ -610,25 +582,6 @@ func CELValueToMapStrList(data ref.Val) (map[string][]string, error) {
|
||||||
return mapStrListStr, nil
|
return mapStrListStr, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// isCELJSONType returns whether the type corresponds to JSON input.
|
|
||||||
func isCELJSONType(t *exprpb.Type) bool {
|
|
||||||
switch t.GetTypeKind().(type) {
|
|
||||||
case *exprpb.Type_MapType_:
|
|
||||||
mapType := t.GetMapType()
|
|
||||||
return isCELStringType(mapType.GetKeyType()) && mapType.GetValueType().GetDyn() != nil
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// isCELStringType returns whether the type corresponds to a string.
|
|
||||||
func isCELStringType(t *exprpb.Type) bool {
|
|
||||||
switch t.GetTypeKind().(type) {
|
|
||||||
case *exprpb.Type_Primitive:
|
|
||||||
return t.GetPrimitive() == exprpb.Type_STRING
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// isCELStringExpr indicates whether the expression is a supported string expression
|
// isCELStringExpr indicates whether the expression is a supported string expression
|
||||||
func isCELStringExpr(e *exprpb.Expr) bool {
|
func isCELStringExpr(e *exprpb.Expr) bool {
|
||||||
return isCELStringLiteral(e) || isCELCaddyPlaceholderCall(e) || isCELConcatCall(e)
|
return isCELStringLiteral(e) || isCELCaddyPlaceholderCall(e) || isCELConcatCall(e)
|
||||||
|
@ -681,15 +634,6 @@ func isCELConcatCall(e *exprpb.Expr) bool {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
// isCELStringListType returns whether the type corresponds to a list of strings.
|
|
||||||
func isCELStringListType(t *exprpb.Type) bool {
|
|
||||||
switch t.GetTypeKind().(type) {
|
|
||||||
case *exprpb.Type_ListType_:
|
|
||||||
return isCELStringType(t.GetListType().GetElemType())
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// isCELStringListLiteral returns whether the expression resolves to a list literal
|
// isCELStringListLiteral returns whether the expression resolves to a list literal
|
||||||
// containing only string constants or a placeholder call.
|
// containing only string constants or a placeholder call.
|
||||||
func isCELStringListLiteral(e *exprpb.Expr) bool {
|
func isCELStringListLiteral(e *exprpb.Expr) bool {
|
||||||
|
@ -713,11 +657,10 @@ var (
|
||||||
placeholderRegexp = regexp.MustCompile(`{([a-zA-Z][\w.-]+)}`)
|
placeholderRegexp = regexp.MustCompile(`{([a-zA-Z][\w.-]+)}`)
|
||||||
placeholderExpansion = `caddyPlaceholder(request, "${1}")`
|
placeholderExpansion = `caddyPlaceholder(request, "${1}")`
|
||||||
|
|
||||||
CelTypeListString = decls.NewListType(decls.String)
|
CELTypeJSON = cel.MapType(cel.StringType, cel.DynType)
|
||||||
CelTypeJson = decls.NewMapType(decls.String, decls.Dyn)
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var httpRequestObjectType = decls.NewObjectType("http.Request")
|
var httpRequestObjectType = cel.ObjectType("http.Request")
|
||||||
|
|
||||||
// The name of the CEL function which accesses Replacer values.
|
// The name of the CEL function which accesses Replacer values.
|
||||||
const placeholderFuncName = "caddyPlaceholder"
|
const placeholderFuncName = "caddyPlaceholder"
|
||||||
|
|
|
@ -71,7 +71,7 @@ func (enc *Encode) Provision(ctx caddy.Context) error {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("loading encoder modules: %v", err)
|
return fmt.Errorf("loading encoder modules: %v", err)
|
||||||
}
|
}
|
||||||
for modName, modIface := range mods.(map[string]interface{}) {
|
for modName, modIface := range mods.(map[string]any) {
|
||||||
err = enc.addEncoding(modIface.(Encoding))
|
err = enc.addEncoding(modIface.(Encoding))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("adding encoding %s: %v", modName, err)
|
return fmt.Errorf("adding encoding %s: %v", modName, err)
|
||||||
|
@ -142,7 +142,7 @@ func (enc *Encode) addEncoding(e Encoding) error {
|
||||||
enc.writerPools = make(map[string]*sync.Pool)
|
enc.writerPools = make(map[string]*sync.Pool)
|
||||||
}
|
}
|
||||||
enc.writerPools[ae] = &sync.Pool{
|
enc.writerPools[ae] = &sync.Pool{
|
||||||
New: func() interface{} {
|
New: func() any {
|
||||||
return e.NewEncoder()
|
return e.NewEncoder()
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
@ -418,7 +418,7 @@ type Precompressed interface {
|
||||||
}
|
}
|
||||||
|
|
||||||
var bufPool = sync.Pool{
|
var bufPool = sync.Pool{
|
||||||
New: func() interface{} {
|
New: func() any {
|
||||||
return new(bytes.Buffer)
|
return new(bytes.Buffer)
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
|
@ -45,7 +45,7 @@ func (z *Zstd) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
|
||||||
// used in the Accept-Encoding request headers.
|
// used in the Accept-Encoding request headers.
|
||||||
func (Zstd) AcceptEncoding() string { return "zstd" }
|
func (Zstd) AcceptEncoding() string { return "zstd" }
|
||||||
|
|
||||||
// NewEncoder returns a new gzip writer.
|
// NewEncoder returns a new Zstandard writer.
|
||||||
func (z Zstd) NewEncoder() encode.Encoder {
|
func (z Zstd) NewEncoder() encode.Encoder {
|
||||||
// The default of 8MB for the window is
|
// The default of 8MB for the window is
|
||||||
// too large for many clients, so we limit
|
// too large for many clients, so we limit
|
||||||
|
|
|
@ -19,6 +19,8 @@ import (
|
||||||
_ "embed"
|
_ "embed"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"io/fs"
|
||||||
"net/http"
|
"net/http"
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
|
@ -67,9 +69,7 @@ func (fsrv *FileServer) serveBrowse(root, dirPath string, w http.ResponseWriter,
|
||||||
if r.URL.Path == "" || path.Base(origReq.URL.Path) == path.Base(r.URL.Path) {
|
if r.URL.Path == "" || path.Base(origReq.URL.Path) == path.Base(r.URL.Path) {
|
||||||
if !strings.HasSuffix(origReq.URL.Path, "/") {
|
if !strings.HasSuffix(origReq.URL.Path, "/") {
|
||||||
fsrv.logger.Debug("redirecting to trailing slash to preserve hrefs", zap.String("request_path", r.URL.Path))
|
fsrv.logger.Debug("redirecting to trailing slash to preserve hrefs", zap.String("request_path", r.URL.Path))
|
||||||
origReq.URL.Path += "/"
|
return redirect(w, r, origReq.URL.Path+"/")
|
||||||
http.Redirect(w, r, origReq.URL.String(), http.StatusMovedPermanently)
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -82,7 +82,7 @@ func (fsrv *FileServer) serveBrowse(root, dirPath string, w http.ResponseWriter,
|
||||||
repl := r.Context().Value(caddy.ReplacerCtxKey).(*caddy.Replacer)
|
repl := r.Context().Value(caddy.ReplacerCtxKey).(*caddy.Replacer)
|
||||||
|
|
||||||
// calling path.Clean here prevents weird breadcrumbs when URL paths are sketchy like /%2e%2e%2f
|
// calling path.Clean here prevents weird breadcrumbs when URL paths are sketchy like /%2e%2e%2f
|
||||||
listing, err := fsrv.loadDirectoryContents(dir, root, path.Clean(r.URL.Path), repl)
|
listing, err := fsrv.loadDirectoryContents(dir.(fs.ReadDirFile), root, path.Clean(r.URL.Path), repl)
|
||||||
switch {
|
switch {
|
||||||
case os.IsPermission(err):
|
case os.IsPermission(err):
|
||||||
return caddyhttp.Error(http.StatusForbidden, err)
|
return caddyhttp.Error(http.StatusForbidden, err)
|
||||||
|
@ -95,6 +95,7 @@ func (fsrv *FileServer) serveBrowse(root, dirPath string, w http.ResponseWriter,
|
||||||
fsrv.browseApplyQueryParams(w, r, &listing)
|
fsrv.browseApplyQueryParams(w, r, &listing)
|
||||||
|
|
||||||
buf := bufPool.Get().(*bytes.Buffer)
|
buf := bufPool.Get().(*bytes.Buffer)
|
||||||
|
buf.Reset()
|
||||||
defer bufPool.Put(buf)
|
defer bufPool.Put(buf)
|
||||||
|
|
||||||
acceptHeader := strings.ToLower(strings.Join(r.Header["Accept"], ","))
|
acceptHeader := strings.ToLower(strings.Join(r.Header["Accept"], ","))
|
||||||
|
@ -135,9 +136,9 @@ func (fsrv *FileServer) serveBrowse(root, dirPath string, w http.ResponseWriter,
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (fsrv *FileServer) loadDirectoryContents(dir *os.File, root, urlPath string, repl *caddy.Replacer) (browseTemplateContext, error) {
|
func (fsrv *FileServer) loadDirectoryContents(dir fs.ReadDirFile, root, urlPath string, repl *caddy.Replacer) (browseTemplateContext, error) {
|
||||||
files, err := dir.Readdir(-1)
|
files, err := dir.ReadDir(10000) // TODO: this limit should probably be configurable
|
||||||
if err != nil {
|
if err != nil && err != io.EOF {
|
||||||
return browseTemplateContext{}, err
|
return browseTemplateContext{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -203,25 +204,25 @@ func (fsrv *FileServer) makeBrowseTemplate(tplCtx *templateContext) (*template.T
|
||||||
return tpl, nil
|
return tpl, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// isSymlink return true if f is a symbolic link
|
|
||||||
func isSymlink(f os.FileInfo) bool {
|
|
||||||
return f.Mode()&os.ModeSymlink != 0
|
|
||||||
}
|
|
||||||
|
|
||||||
// isSymlinkTargetDir returns true if f's symbolic link target
|
// isSymlinkTargetDir returns true if f's symbolic link target
|
||||||
// is a directory.
|
// is a directory.
|
||||||
func isSymlinkTargetDir(f os.FileInfo, root, urlPath string) bool {
|
func (fsrv *FileServer) isSymlinkTargetDir(f fs.FileInfo, root, urlPath string) bool {
|
||||||
if !isSymlink(f) {
|
if !isSymlink(f) {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
target := caddyhttp.SanitizedPathJoin(root, path.Join(urlPath, f.Name()))
|
target := caddyhttp.SanitizedPathJoin(root, path.Join(urlPath, f.Name()))
|
||||||
targetInfo, err := os.Stat(target)
|
targetInfo, err := fs.Stat(fsrv.fileSystem, target)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
return targetInfo.IsDir()
|
return targetInfo.IsDir()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// isSymlink return true if f is a symbolic link.
|
||||||
|
func isSymlink(f fs.FileInfo) bool {
|
||||||
|
return f.Mode()&os.ModeSymlink != 0
|
||||||
|
}
|
||||||
|
|
||||||
// templateContext powers the context used when evaluating the browse template.
|
// templateContext powers the context used when evaluating the browse template.
|
||||||
// It combines browse-specific features with the standard templates handler
|
// It combines browse-specific features with the standard templates handler
|
||||||
// features.
|
// features.
|
||||||
|
@ -232,7 +233,7 @@ type templateContext struct {
|
||||||
|
|
||||||
// bufPool is used to increase the efficiency of file listings.
|
// bufPool is used to increase the efficiency of file listings.
|
||||||
var bufPool = sync.Pool{
|
var bufPool = sync.Pool{
|
||||||
New: func() interface{} {
|
New: func() any {
|
||||||
return new(bytes.Buffer)
|
return new(bytes.Buffer)
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
|
@ -15,6 +15,7 @@
|
||||||
package fileserver
|
package fileserver
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"io/fs"
|
||||||
"net/url"
|
"net/url"
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
|
@ -26,22 +27,31 @@ import (
|
||||||
"github.com/caddyserver/caddy/v2"
|
"github.com/caddyserver/caddy/v2"
|
||||||
"github.com/caddyserver/caddy/v2/modules/caddyhttp"
|
"github.com/caddyserver/caddy/v2/modules/caddyhttp"
|
||||||
"github.com/dustin/go-humanize"
|
"github.com/dustin/go-humanize"
|
||||||
|
"go.uber.org/zap"
|
||||||
)
|
)
|
||||||
|
|
||||||
func (fsrv *FileServer) directoryListing(files []os.FileInfo, canGoUp bool, root, urlPath string, repl *caddy.Replacer) browseTemplateContext {
|
func (fsrv *FileServer) directoryListing(entries []fs.DirEntry, canGoUp bool, root, urlPath string, repl *caddy.Replacer) browseTemplateContext {
|
||||||
filesToHide := fsrv.transformHidePaths(repl)
|
filesToHide := fsrv.transformHidePaths(repl)
|
||||||
|
|
||||||
var dirCount, fileCount int
|
var dirCount, fileCount int
|
||||||
fileInfos := []fileInfo{}
|
fileInfos := []fileInfo{}
|
||||||
|
|
||||||
for _, f := range files {
|
for _, entry := range entries {
|
||||||
name := f.Name()
|
name := entry.Name()
|
||||||
|
|
||||||
if fileHidden(name, filesToHide) {
|
if fileHidden(name, filesToHide) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
isDir := f.IsDir() || isSymlinkTargetDir(f, root, urlPath)
|
info, err := entry.Info()
|
||||||
|
if err != nil {
|
||||||
|
fsrv.logger.Error("could not get info about directory entry",
|
||||||
|
zap.String("name", entry.Name()),
|
||||||
|
zap.String("root", root))
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
isDir := entry.IsDir() || fsrv.isSymlinkTargetDir(info, root, urlPath)
|
||||||
|
|
||||||
// add the slash after the escape of path to avoid escaping the slash as well
|
// add the slash after the escape of path to avoid escaping the slash as well
|
||||||
if isDir {
|
if isDir {
|
||||||
|
@ -51,11 +61,11 @@ func (fsrv *FileServer) directoryListing(files []os.FileInfo, canGoUp bool, root
|
||||||
fileCount++
|
fileCount++
|
||||||
}
|
}
|
||||||
|
|
||||||
size := f.Size()
|
size := info.Size()
|
||||||
fileIsSymlink := isSymlink(f)
|
fileIsSymlink := isSymlink(info)
|
||||||
if fileIsSymlink {
|
if fileIsSymlink {
|
||||||
path := caddyhttp.SanitizedPathJoin(root, path.Join(urlPath, f.Name()))
|
path := caddyhttp.SanitizedPathJoin(root, path.Join(urlPath, info.Name()))
|
||||||
fileInfo, err := os.Stat(path)
|
fileInfo, err := fs.Stat(fsrv.fileSystem, path)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
size = fileInfo.Size()
|
size = fileInfo.Size()
|
||||||
}
|
}
|
||||||
|
@ -73,8 +83,8 @@ func (fsrv *FileServer) directoryListing(files []os.FileInfo, canGoUp bool, root
|
||||||
Name: name,
|
Name: name,
|
||||||
Size: size,
|
Size: size,
|
||||||
URL: u.String(),
|
URL: u.String(),
|
||||||
ModTime: f.ModTime().UTC(),
|
ModTime: info.ModTime().UTC(),
|
||||||
Mode: f.Mode(),
|
Mode: info.Mode(),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
name, _ := url.PathUnescape(urlPath)
|
name, _ := url.PathUnescape(urlPath)
|
||||||
|
|
|
@ -15,11 +15,13 @@
|
||||||
package fileserver
|
package fileserver
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"io/fs"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/caddyserver/caddy/v2"
|
"github.com/caddyserver/caddy/v2"
|
||||||
"github.com/caddyserver/caddy/v2/caddyconfig"
|
"github.com/caddyserver/caddy/v2/caddyconfig"
|
||||||
|
"github.com/caddyserver/caddy/v2/caddyconfig/caddyfile"
|
||||||
"github.com/caddyserver/caddy/v2/caddyconfig/httpcaddyfile"
|
"github.com/caddyserver/caddy/v2/caddyconfig/httpcaddyfile"
|
||||||
"github.com/caddyserver/caddy/v2/modules/caddyhttp"
|
"github.com/caddyserver/caddy/v2/modules/caddyhttp"
|
||||||
"github.com/caddyserver/caddy/v2/modules/caddyhttp/encode"
|
"github.com/caddyserver/caddy/v2/modules/caddyhttp/encode"
|
||||||
|
@ -34,16 +36,16 @@ func init() {
|
||||||
// parseCaddyfile parses the file_server directive. It enables the static file
|
// parseCaddyfile parses the file_server directive. It enables the static file
|
||||||
// server and configures it with this syntax:
|
// server and configures it with this syntax:
|
||||||
//
|
//
|
||||||
// file_server [<matcher>] [browse] {
|
// file_server [<matcher>] [browse] {
|
||||||
// root <path>
|
// fs <backend...>
|
||||||
// hide <files...>
|
// root <path>
|
||||||
// index <files...>
|
// hide <files...>
|
||||||
// browse [<template_file>]
|
// index <files...>
|
||||||
// precompressed <formats...>
|
// browse [<template_file>]
|
||||||
// status <status>
|
// precompressed <formats...>
|
||||||
// disable_canonical_uris
|
// status <status>
|
||||||
// }
|
// disable_canonical_uris
|
||||||
//
|
// }
|
||||||
func parseCaddyfile(h httpcaddyfile.Helper) (caddyhttp.MiddlewareHandler, error) {
|
func parseCaddyfile(h httpcaddyfile.Helper) (caddyhttp.MiddlewareHandler, error) {
|
||||||
var fsrv FileServer
|
var fsrv FileServer
|
||||||
|
|
||||||
|
@ -62,6 +64,25 @@ func parseCaddyfile(h httpcaddyfile.Helper) (caddyhttp.MiddlewareHandler, error)
|
||||||
|
|
||||||
for h.NextBlock(0) {
|
for h.NextBlock(0) {
|
||||||
switch h.Val() {
|
switch h.Val() {
|
||||||
|
case "fs":
|
||||||
|
if !h.NextArg() {
|
||||||
|
return nil, h.ArgErr()
|
||||||
|
}
|
||||||
|
if fsrv.FileSystemRaw != nil {
|
||||||
|
return nil, h.Err("file system module already specified")
|
||||||
|
}
|
||||||
|
name := h.Val()
|
||||||
|
modID := "caddy.fs." + name
|
||||||
|
unm, err := caddyfile.UnmarshalModule(h.Dispenser, modID)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
fsys, ok := unm.(fs.FS)
|
||||||
|
if !ok {
|
||||||
|
return nil, h.Errf("module %s (%T) is not a supported file system implementation (requires fs.FS)", modID, unm)
|
||||||
|
}
|
||||||
|
fsrv.FileSystemRaw = caddyconfig.JSONModuleObject(fsys, "backend", name, nil)
|
||||||
|
|
||||||
case "hide":
|
case "hide":
|
||||||
fsrv.Hide = h.RemainingArgs()
|
fsrv.Hide = h.RemainingArgs()
|
||||||
if len(fsrv.Hide) == 0 {
|
if len(fsrv.Hide) == 0 {
|
||||||
|
@ -155,22 +176,23 @@ func parseCaddyfile(h httpcaddyfile.Helper) (caddyhttp.MiddlewareHandler, error)
|
||||||
// with a rewrite directive, so this is not a standard handler directive.
|
// with a rewrite directive, so this is not a standard handler directive.
|
||||||
// A try_files directive has this syntax (notice no matcher tokens accepted):
|
// A try_files directive has this syntax (notice no matcher tokens accepted):
|
||||||
//
|
//
|
||||||
// try_files <files...>
|
// try_files <files...> {
|
||||||
|
// policy first_exist|smallest_size|largest_size|most_recently_modified
|
||||||
|
// }
|
||||||
//
|
//
|
||||||
// and is basically shorthand for:
|
// and is basically shorthand for:
|
||||||
//
|
//
|
||||||
// @try_files {
|
// @try_files file {
|
||||||
// file {
|
// try_files <files...>
|
||||||
// try_files <files...>
|
// policy first_exist|smallest_size|largest_size|most_recently_modified
|
||||||
// }
|
// }
|
||||||
// }
|
// rewrite @try_files {http.matchers.file.relative}
|
||||||
// rewrite @try_files {http.matchers.file.relative}
|
|
||||||
//
|
//
|
||||||
// This directive rewrites request paths only, preserving any other part
|
// This directive rewrites request paths only, preserving any other part
|
||||||
// of the URI, unless the part is explicitly given in the file list. For
|
// of the URI, unless the part is explicitly given in the file list. For
|
||||||
// example, if any of the files in the list have a query string:
|
// example, if any of the files in the list have a query string:
|
||||||
//
|
//
|
||||||
// try_files {path} index.php?{query}&p={path}
|
// try_files {path} index.php?{query}&p={path}
|
||||||
//
|
//
|
||||||
// then the query string will not be treated as part of the file name; and
|
// then the query string will not be treated as part of the file name; and
|
||||||
// if that file matches, the given query string will replace any query string
|
// if that file matches, the given query string will replace any query string
|
||||||
|
@ -185,6 +207,27 @@ func parseTryFiles(h httpcaddyfile.Helper) ([]httpcaddyfile.ConfigValue, error)
|
||||||
return nil, h.ArgErr()
|
return nil, h.ArgErr()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// parse out the optional try policy
|
||||||
|
var tryPolicy string
|
||||||
|
for nesting := h.Nesting(); h.NextBlock(nesting); {
|
||||||
|
switch h.Val() {
|
||||||
|
case "policy":
|
||||||
|
if tryPolicy != "" {
|
||||||
|
return nil, h.Err("try policy already configured")
|
||||||
|
}
|
||||||
|
if !h.NextArg() {
|
||||||
|
return nil, h.ArgErr()
|
||||||
|
}
|
||||||
|
tryPolicy = h.Val()
|
||||||
|
|
||||||
|
switch tryPolicy {
|
||||||
|
case tryPolicyFirstExist, tryPolicyLargestSize, tryPolicySmallestSize, tryPolicyMostRecentlyMod:
|
||||||
|
default:
|
||||||
|
return nil, h.Errf("unrecognized try policy: %s", tryPolicy)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// makeRoute returns a route that tries the files listed in try
|
// makeRoute returns a route that tries the files listed in try
|
||||||
// and then rewrites to the matched file; userQueryString is
|
// and then rewrites to the matched file; userQueryString is
|
||||||
// appended to the rewrite rule.
|
// appended to the rewrite rule.
|
||||||
|
@ -193,7 +236,7 @@ func parseTryFiles(h httpcaddyfile.Helper) ([]httpcaddyfile.ConfigValue, error)
|
||||||
URI: "{http.matchers.file.relative}" + userQueryString,
|
URI: "{http.matchers.file.relative}" + userQueryString,
|
||||||
}
|
}
|
||||||
matcherSet := caddy.ModuleMap{
|
matcherSet := caddy.ModuleMap{
|
||||||
"file": h.JSON(MatchFile{TryFiles: try}),
|
"file": h.JSON(MatchFile{TryFiles: try, TryPolicy: tryPolicy}),
|
||||||
}
|
}
|
||||||
return h.NewRoute(matcherSet, handler)
|
return h.NewRoute(matcherSet, handler)
|
||||||
}
|
}
|
||||||
|
|
|
@ -117,8 +117,14 @@ func cmdFileServer(fs caddycmd.Flags) (int, error) {
|
||||||
Servers: map[string]*caddyhttp.Server{"static": server},
|
Servers: map[string]*caddyhttp.Server{"static": server},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var false bool
|
||||||
cfg := &caddy.Config{
|
cfg := &caddy.Config{
|
||||||
Admin: &caddy.AdminConfig{Disabled: true},
|
Admin: &caddy.AdminConfig{
|
||||||
|
Disabled: true,
|
||||||
|
Config: &caddy.ConfigSettings{
|
||||||
|
Persist: &false,
|
||||||
|
},
|
||||||
|
},
|
||||||
AppsRaw: caddy.ModuleMap{
|
AppsRaw: caddy.ModuleMap{
|
||||||
"http": caddyconfig.JSON(httpApp, nil),
|
"http": caddyconfig.JSON(httpApp, nil),
|
||||||
},
|
},
|
||||||
|
|
|
@ -15,24 +15,26 @@
|
||||||
package fileserver
|
package fileserver
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"io/fs"
|
||||||
"net/http"
|
"net/http"
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
|
"path/filepath"
|
||||||
|
"runtime"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/caddyserver/caddy/v2"
|
"github.com/caddyserver/caddy/v2"
|
||||||
"github.com/caddyserver/caddy/v2/caddyconfig/caddyfile"
|
"github.com/caddyserver/caddy/v2/caddyconfig/caddyfile"
|
||||||
"github.com/caddyserver/caddy/v2/modules/caddyhttp"
|
"github.com/caddyserver/caddy/v2/modules/caddyhttp"
|
||||||
"github.com/google/cel-go/cel"
|
"github.com/google/cel-go/cel"
|
||||||
"github.com/google/cel-go/checker/decls"
|
|
||||||
"github.com/google/cel-go/common"
|
"github.com/google/cel-go/common"
|
||||||
"github.com/google/cel-go/common/operators"
|
"github.com/google/cel-go/common/operators"
|
||||||
"github.com/google/cel-go/common/types/ref"
|
"github.com/google/cel-go/common/types/ref"
|
||||||
"github.com/google/cel-go/interpreter/functions"
|
|
||||||
"github.com/google/cel-go/parser"
|
"github.com/google/cel-go/parser"
|
||||||
|
"go.uber.org/zap"
|
||||||
exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
|
exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -55,7 +57,15 @@ func init() {
|
||||||
// the matched file is a directory, "file" otherwise.
|
// the matched file is a directory, "file" otherwise.
|
||||||
// - `{http.matchers.file.remainder}` Set to the remainder
|
// - `{http.matchers.file.remainder}` Set to the remainder
|
||||||
// of the path if the path was split by `split_path`.
|
// of the path if the path was split by `split_path`.
|
||||||
|
//
|
||||||
|
// Even though file matching may depend on the OS path
|
||||||
|
// separator, the placeholder values always use /.
|
||||||
type MatchFile struct {
|
type MatchFile struct {
|
||||||
|
// The file system implementation to use. By default, the
|
||||||
|
// local disk file system will be used.
|
||||||
|
FileSystemRaw json.RawMessage `json:"file_system,omitempty" caddy:"namespace=caddy.fs inline_key=backend"`
|
||||||
|
fileSystem fs.FS
|
||||||
|
|
||||||
// The root directory, used for creating absolute
|
// The root directory, used for creating absolute
|
||||||
// file paths, and required when working with
|
// file paths, and required when working with
|
||||||
// relative paths; if not specified, `{http.vars.root}`
|
// relative paths; if not specified, `{http.vars.root}`
|
||||||
|
@ -96,6 +106,8 @@ type MatchFile struct {
|
||||||
// Each delimiter must appear at the end of a URI path
|
// Each delimiter must appear at the end of a URI path
|
||||||
// component in order to be used as a split delimiter.
|
// component in order to be used as a split delimiter.
|
||||||
SplitPath []string `json:"split_path,omitempty"`
|
SplitPath []string `json:"split_path,omitempty"`
|
||||||
|
|
||||||
|
logger *zap.Logger
|
||||||
}
|
}
|
||||||
|
|
||||||
// CaddyModule returns the Caddy module information.
|
// CaddyModule returns the Caddy module information.
|
||||||
|
@ -108,12 +120,11 @@ func (MatchFile) CaddyModule() caddy.ModuleInfo {
|
||||||
|
|
||||||
// UnmarshalCaddyfile sets up the matcher from Caddyfile tokens. Syntax:
|
// UnmarshalCaddyfile sets up the matcher from Caddyfile tokens. Syntax:
|
||||||
//
|
//
|
||||||
// file <files...> {
|
// file <files...> {
|
||||||
// root <path>
|
// root <path>
|
||||||
// try_files <files...>
|
// try_files <files...>
|
||||||
// try_policy first_exist|smallest_size|largest_size|most_recently_modified
|
// try_policy first_exist|smallest_size|largest_size|most_recently_modified
|
||||||
// }
|
// }
|
||||||
//
|
|
||||||
func (m *MatchFile) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
|
func (m *MatchFile) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
|
||||||
for d.Next() {
|
for d.Next() {
|
||||||
m.TryFiles = append(m.TryFiles, d.RemainingArgs()...)
|
m.TryFiles = append(m.TryFiles, d.RemainingArgs()...)
|
||||||
|
@ -151,20 +162,10 @@ func (m *MatchFile) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
|
||||||
// expression matchers.
|
// expression matchers.
|
||||||
//
|
//
|
||||||
// Example:
|
// Example:
|
||||||
// expression file({'root': '/srv', 'try_files': [{http.request.uri.path}, '/index.php'], 'try_policy': 'first_exist', 'split_path': ['.php']})
|
//
|
||||||
|
// expression file({'root': '/srv', 'try_files': [{http.request.uri.path}, '/index.php'], 'try_policy': 'first_exist', 'split_path': ['.php']})
|
||||||
func (MatchFile) CELLibrary(ctx caddy.Context) (cel.Library, error) {
|
func (MatchFile) CELLibrary(ctx caddy.Context) (cel.Library, error) {
|
||||||
requestType := decls.NewObjectType("http.Request")
|
requestType := cel.ObjectType("http.Request")
|
||||||
envOptions := []cel.EnvOption{
|
|
||||||
cel.Macros(parser.NewGlobalVarArgMacro("file", celFileMatcherMacroExpander())),
|
|
||||||
cel.Declarations(
|
|
||||||
decls.NewFunction("file",
|
|
||||||
decls.NewOverload("file_request_map",
|
|
||||||
[]*exprpb.Type{requestType, caddyhttp.CelTypeJson},
|
|
||||||
decls.Bool,
|
|
||||||
),
|
|
||||||
),
|
|
||||||
),
|
|
||||||
}
|
|
||||||
|
|
||||||
matcherFactory := func(data ref.Val) (caddyhttp.RequestMatcher, error) {
|
matcherFactory := func(data ref.Val) (caddyhttp.RequestMatcher, error) {
|
||||||
values, err := caddyhttp.CELValueToMapStrList(data)
|
values, err := caddyhttp.CELValueToMapStrList(data)
|
||||||
|
@ -193,14 +194,16 @@ func (MatchFile) CELLibrary(ctx caddy.Context) (cel.Library, error) {
|
||||||
return m, err
|
return m, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
envOptions := []cel.EnvOption{
|
||||||
|
cel.Macros(parser.NewGlobalVarArgMacro("file", celFileMatcherMacroExpander())),
|
||||||
|
cel.Function("file", cel.Overload("file_request_map", []*cel.Type{requestType, caddyhttp.CELTypeJSON}, cel.BoolType)),
|
||||||
|
cel.Function("file_request_map",
|
||||||
|
cel.Overload("file_request_map", []*cel.Type{requestType, caddyhttp.CELTypeJSON}, cel.BoolType),
|
||||||
|
cel.SingletonBinaryImpl(caddyhttp.CELMatcherRuntimeFunction("file_request_map", matcherFactory))),
|
||||||
|
}
|
||||||
|
|
||||||
programOptions := []cel.ProgramOption{
|
programOptions := []cel.ProgramOption{
|
||||||
cel.CustomDecorator(caddyhttp.CELMatcherDecorator("file_request_map", matcherFactory)),
|
cel.CustomDecorator(caddyhttp.CELMatcherDecorator("file_request_map", matcherFactory)),
|
||||||
cel.Functions(
|
|
||||||
&functions.Overload{
|
|
||||||
Operator: "file_request_map",
|
|
||||||
Binary: caddyhttp.CELMatcherRuntimeFunction("file_request_map", matcherFactory),
|
|
||||||
},
|
|
||||||
),
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return caddyhttp.NewMatcherCELLibrary(envOptions, programOptions), nil
|
return caddyhttp.NewMatcherCELLibrary(envOptions, programOptions), nil
|
||||||
|
@ -252,10 +255,25 @@ func celFileMatcherMacroExpander() parser.MacroExpander {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Provision sets up m's defaults.
|
// Provision sets up m's defaults.
|
||||||
func (m *MatchFile) Provision(_ caddy.Context) error {
|
func (m *MatchFile) Provision(ctx caddy.Context) error {
|
||||||
|
m.logger = ctx.Logger(m)
|
||||||
|
|
||||||
|
// establish the file system to use
|
||||||
|
if len(m.FileSystemRaw) > 0 {
|
||||||
|
mod, err := ctx.LoadModule(m, "FileSystemRaw")
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("loading file system module: %v", err)
|
||||||
|
}
|
||||||
|
m.fileSystem = mod.(fs.FS)
|
||||||
|
}
|
||||||
|
if m.fileSystem == nil {
|
||||||
|
m.fileSystem = osFS{}
|
||||||
|
}
|
||||||
|
|
||||||
if m.Root == "" {
|
if m.Root == "" {
|
||||||
m.Root = "{http.vars.root}"
|
m.Root = "{http.vars.root}"
|
||||||
}
|
}
|
||||||
|
|
||||||
// if list of files to try was omitted entirely, assume URL path
|
// if list of files to try was omitted entirely, assume URL path
|
||||||
// (use placeholder instead of r.URL.Path; see issue #4146)
|
// (use placeholder instead of r.URL.Path; see issue #4146)
|
||||||
if m.TryFiles == nil {
|
if m.TryFiles == nil {
|
||||||
|
@ -281,10 +299,10 @@ func (m MatchFile) Validate() error {
|
||||||
// Match returns true if r matches m. Returns true
|
// Match returns true if r matches m. Returns true
|
||||||
// if a file was matched. If so, four placeholders
|
// if a file was matched. If so, four placeholders
|
||||||
// will be available:
|
// will be available:
|
||||||
// - http.matchers.file.relative
|
// - http.matchers.file.relative: Path to file relative to site root
|
||||||
// - http.matchers.file.absolute
|
// - http.matchers.file.absolute: Path to file including site root
|
||||||
// - http.matchers.file.type
|
// - http.matchers.file.type: file or directory
|
||||||
// - http.matchers.file.remainder
|
// - http.matchers.file.remainder: Portion remaining after splitting file path (if configured)
|
||||||
func (m MatchFile) Match(r *http.Request) bool {
|
func (m MatchFile) Match(r *http.Request) bool {
|
||||||
return m.selectFile(r)
|
return m.selectFile(r)
|
||||||
}
|
}
|
||||||
|
@ -294,23 +312,80 @@ func (m MatchFile) Match(r *http.Request) bool {
|
||||||
func (m MatchFile) selectFile(r *http.Request) (matched bool) {
|
func (m MatchFile) selectFile(r *http.Request) (matched bool) {
|
||||||
repl := r.Context().Value(caddy.ReplacerCtxKey).(*caddy.Replacer)
|
repl := r.Context().Value(caddy.ReplacerCtxKey).(*caddy.Replacer)
|
||||||
|
|
||||||
root := repl.ReplaceAll(m.Root, ".")
|
root := filepath.Clean(repl.ReplaceAll(m.Root, "."))
|
||||||
|
|
||||||
// common preparation of the file into parts
|
type matchCandidate struct {
|
||||||
prepareFilePath := func(file string) (suffix, fullpath, remainder string) {
|
fullpath, relative, splitRemainder string
|
||||||
suffix, remainder = m.firstSplit(path.Clean(repl.ReplaceAll(file, "")))
|
|
||||||
if strings.HasSuffix(file, "/") {
|
|
||||||
suffix += "/"
|
|
||||||
}
|
|
||||||
fullpath = caddyhttp.SanitizedPathJoin(root, suffix)
|
|
||||||
return
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// sets up the placeholders for the matched file
|
// makeCandidates evaluates placeholders in file and expands any glob expressions
|
||||||
setPlaceholders := func(info os.FileInfo, rel string, abs string, remainder string) {
|
// to build a list of file candidates. Special glob characters are escaped in
|
||||||
repl.Set("http.matchers.file.relative", rel)
|
// placeholder replacements so globs cannot be expanded from placeholders, and
|
||||||
repl.Set("http.matchers.file.absolute", abs)
|
// globs are not evaluated on Windows because of its path separator character:
|
||||||
repl.Set("http.matchers.file.remainder", remainder)
|
// escaping is not supported so we can't safely glob on Windows, or we can't
|
||||||
|
// support placeholders on Windows (pick one). (Actually, evaluating untrusted
|
||||||
|
// globs is not the end of the world since the file server will still hide any
|
||||||
|
// hidden files, it just might lead to unexpected behavior.)
|
||||||
|
makeCandidates := func(file string) []matchCandidate {
|
||||||
|
// first, evaluate placeholders in the file pattern
|
||||||
|
expandedFile, err := repl.ReplaceFunc(file, func(variable string, val any) (any, error) {
|
||||||
|
if runtime.GOOS == "windows" {
|
||||||
|
return val, nil
|
||||||
|
}
|
||||||
|
switch v := val.(type) {
|
||||||
|
case string:
|
||||||
|
return globSafeRepl.Replace(v), nil
|
||||||
|
case fmt.Stringer:
|
||||||
|
return globSafeRepl.Replace(v.String()), nil
|
||||||
|
}
|
||||||
|
return val, nil
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
m.logger.Error("evaluating placeholders", zap.Error(err))
|
||||||
|
expandedFile = file // "oh well," I guess?
|
||||||
|
}
|
||||||
|
|
||||||
|
// clean the path and split, if configured -- we must split before
|
||||||
|
// globbing so that the file system doesn't include the remainder
|
||||||
|
// ("afterSplit") in the filename; be sure to restore trailing slash
|
||||||
|
beforeSplit, afterSplit := m.firstSplit(path.Clean(expandedFile))
|
||||||
|
if strings.HasSuffix(file, "/") {
|
||||||
|
beforeSplit += "/"
|
||||||
|
}
|
||||||
|
|
||||||
|
// create the full path to the file by prepending the site root
|
||||||
|
fullPattern := caddyhttp.SanitizedPathJoin(root, beforeSplit)
|
||||||
|
|
||||||
|
// expand glob expressions, but not on Windows because Glob() doesn't
|
||||||
|
// support escaping on Windows due to path separator)
|
||||||
|
var globResults []string
|
||||||
|
if runtime.GOOS == "windows" {
|
||||||
|
globResults = []string{fullPattern} // precious Windows
|
||||||
|
} else {
|
||||||
|
globResults, err = fs.Glob(m.fileSystem, fullPattern)
|
||||||
|
if err != nil {
|
||||||
|
m.logger.Error("expanding glob", zap.Error(err))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// for each glob result, combine all the forms of the path
|
||||||
|
var candidates []matchCandidate
|
||||||
|
for _, result := range globResults {
|
||||||
|
candidates = append(candidates, matchCandidate{
|
||||||
|
fullpath: result,
|
||||||
|
relative: strings.TrimPrefix(result, root),
|
||||||
|
splitRemainder: afterSplit,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
return candidates
|
||||||
|
}
|
||||||
|
|
||||||
|
// setPlaceholders creates the placeholders for the matched file
|
||||||
|
setPlaceholders := func(candidate matchCandidate, info fs.FileInfo) {
|
||||||
|
repl.Set("http.matchers.file.relative", filepath.ToSlash(candidate.relative))
|
||||||
|
repl.Set("http.matchers.file.absolute", filepath.ToSlash(candidate.fullpath))
|
||||||
|
repl.Set("http.matchers.file.remainder", filepath.ToSlash(candidate.splitRemainder))
|
||||||
|
|
||||||
fileType := "file"
|
fileType := "file"
|
||||||
if info.IsDir() {
|
if info.IsDir() {
|
||||||
|
@ -319,76 +394,83 @@ func (m MatchFile) selectFile(r *http.Request) (matched bool) {
|
||||||
repl.Set("http.matchers.file.type", fileType)
|
repl.Set("http.matchers.file.type", fileType)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// match file according to the configured policy
|
||||||
switch m.TryPolicy {
|
switch m.TryPolicy {
|
||||||
case "", tryPolicyFirstExist:
|
case "", tryPolicyFirstExist:
|
||||||
for _, f := range m.TryFiles {
|
for _, pattern := range m.TryFiles {
|
||||||
if err := parseErrorCode(f); err != nil {
|
if err := parseErrorCode(pattern); err != nil {
|
||||||
caddyhttp.SetVar(r.Context(), caddyhttp.MatcherErrorVarKey, err)
|
caddyhttp.SetVar(r.Context(), caddyhttp.MatcherErrorVarKey, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
suffix, fullpath, remainder := prepareFilePath(f)
|
candidates := makeCandidates(pattern)
|
||||||
if info, exists := strictFileExists(fullpath); exists {
|
for _, c := range candidates {
|
||||||
setPlaceholders(info, suffix, fullpath, remainder)
|
if info, exists := m.strictFileExists(c.fullpath); exists {
|
||||||
return true
|
setPlaceholders(c, info)
|
||||||
|
return true
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
case tryPolicyLargestSize:
|
case tryPolicyLargestSize:
|
||||||
var largestSize int64
|
var largestSize int64
|
||||||
var largestFilename string
|
var largest matchCandidate
|
||||||
var largestSuffix string
|
var largestInfo os.FileInfo
|
||||||
var remainder string
|
for _, pattern := range m.TryFiles {
|
||||||
var info os.FileInfo
|
candidates := makeCandidates(pattern)
|
||||||
for _, f := range m.TryFiles {
|
for _, c := range candidates {
|
||||||
suffix, fullpath, splitRemainder := prepareFilePath(f)
|
info, err := fs.Stat(m.fileSystem, c.fullpath)
|
||||||
info, err := os.Stat(fullpath)
|
if err == nil && info.Size() > largestSize {
|
||||||
if err == nil && info.Size() > largestSize {
|
largestSize = info.Size()
|
||||||
largestSize = info.Size()
|
largest = c
|
||||||
largestFilename = fullpath
|
largestInfo = info
|
||||||
largestSuffix = suffix
|
}
|
||||||
remainder = splitRemainder
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
setPlaceholders(info, largestSuffix, largestFilename, remainder)
|
if largestInfo == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
setPlaceholders(largest, largestInfo)
|
||||||
return true
|
return true
|
||||||
|
|
||||||
case tryPolicySmallestSize:
|
case tryPolicySmallestSize:
|
||||||
var smallestSize int64
|
var smallestSize int64
|
||||||
var smallestFilename string
|
var smallest matchCandidate
|
||||||
var smallestSuffix string
|
var smallestInfo os.FileInfo
|
||||||
var remainder string
|
for _, pattern := range m.TryFiles {
|
||||||
var info os.FileInfo
|
candidates := makeCandidates(pattern)
|
||||||
for _, f := range m.TryFiles {
|
for _, c := range candidates {
|
||||||
suffix, fullpath, splitRemainder := prepareFilePath(f)
|
info, err := fs.Stat(m.fileSystem, c.fullpath)
|
||||||
info, err := os.Stat(fullpath)
|
if err == nil && (smallestSize == 0 || info.Size() < smallestSize) {
|
||||||
if err == nil && (smallestSize == 0 || info.Size() < smallestSize) {
|
smallestSize = info.Size()
|
||||||
smallestSize = info.Size()
|
smallest = c
|
||||||
smallestFilename = fullpath
|
smallestInfo = info
|
||||||
smallestSuffix = suffix
|
}
|
||||||
remainder = splitRemainder
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
setPlaceholders(info, smallestSuffix, smallestFilename, remainder)
|
if smallestInfo == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
setPlaceholders(smallest, smallestInfo)
|
||||||
return true
|
return true
|
||||||
|
|
||||||
case tryPolicyMostRecentlyMod:
|
case tryPolicyMostRecentlyMod:
|
||||||
var recentDate time.Time
|
var recent matchCandidate
|
||||||
var recentFilename string
|
var recentInfo os.FileInfo
|
||||||
var recentSuffix string
|
for _, pattern := range m.TryFiles {
|
||||||
var remainder string
|
candidates := makeCandidates(pattern)
|
||||||
var info os.FileInfo
|
for _, c := range candidates {
|
||||||
for _, f := range m.TryFiles {
|
info, err := fs.Stat(m.fileSystem, c.fullpath)
|
||||||
suffix, fullpath, splitRemainder := prepareFilePath(f)
|
if err == nil &&
|
||||||
info, err := os.Stat(fullpath)
|
(recentInfo == nil || info.ModTime().After(recentInfo.ModTime())) {
|
||||||
if err == nil &&
|
recent = c
|
||||||
(recentDate.IsZero() || info.ModTime().After(recentDate)) {
|
recentInfo = info
|
||||||
recentDate = info.ModTime()
|
}
|
||||||
recentFilename = fullpath
|
|
||||||
recentSuffix = suffix
|
|
||||||
remainder = splitRemainder
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
setPlaceholders(info, recentSuffix, recentFilename, remainder)
|
if recentInfo == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
setPlaceholders(recent, recentInfo)
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -415,8 +497,8 @@ func parseErrorCode(input string) error {
|
||||||
// the file must also be a directory; if it does
|
// the file must also be a directory; if it does
|
||||||
// NOT end in a forward slash, the file must NOT
|
// NOT end in a forward slash, the file must NOT
|
||||||
// be a directory.
|
// be a directory.
|
||||||
func strictFileExists(file string) (os.FileInfo, bool) {
|
func (m MatchFile) strictFileExists(file string) (os.FileInfo, bool) {
|
||||||
stat, err := os.Stat(file)
|
info, err := fs.Stat(m.fileSystem, file)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// in reality, this can be any error
|
// in reality, this can be any error
|
||||||
// such as permission or even obscure
|
// such as permission or even obscure
|
||||||
|
@ -431,11 +513,11 @@ func strictFileExists(file string) (os.FileInfo, bool) {
|
||||||
if strings.HasSuffix(file, separator) {
|
if strings.HasSuffix(file, separator) {
|
||||||
// by convention, file paths ending
|
// by convention, file paths ending
|
||||||
// in a path separator must be a directory
|
// in a path separator must be a directory
|
||||||
return stat, stat.IsDir()
|
return info, info.IsDir()
|
||||||
}
|
}
|
||||||
// by convention, file paths NOT ending
|
// by convention, file paths NOT ending
|
||||||
// in a path separator must NOT be a directory
|
// in a path separator must NOT be a directory
|
||||||
return stat, !stat.IsDir()
|
return info, !info.IsDir()
|
||||||
}
|
}
|
||||||
|
|
||||||
// firstSplit returns the first result where the path
|
// firstSplit returns the first result where the path
|
||||||
|
@ -572,6 +654,15 @@ func isCELStringListLiteral(e *exprpb.Expr) bool {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// globSafeRepl replaces special glob characters with escaped
|
||||||
|
// equivalents. Note that the filepath godoc states that
|
||||||
|
// escaping is not done on Windows because of the separator.
|
||||||
|
var globSafeRepl = strings.NewReplacer(
|
||||||
|
"*", "\\*",
|
||||||
|
"[", "\\[",
|
||||||
|
"?", "\\?",
|
||||||
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
tryPolicyFirstExist = "first_exist"
|
tryPolicyFirstExist = "first_exist"
|
||||||
tryPolicyLargestSize = "largest_size"
|
tryPolicyLargestSize = "largest_size"
|
||||||
|
|
|
@ -28,7 +28,6 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestFileMatcher(t *testing.T) {
|
func TestFileMatcher(t *testing.T) {
|
||||||
|
|
||||||
// Windows doesn't like colons in files names
|
// Windows doesn't like colons in files names
|
||||||
isWindows := runtime.GOOS == "windows"
|
isWindows := runtime.GOOS == "windows"
|
||||||
if !isWindows {
|
if !isWindows {
|
||||||
|
@ -87,37 +86,38 @@ func TestFileMatcher(t *testing.T) {
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
path: "ملف.txt", // the path file name is not escaped
|
path: "ملف.txt", // the path file name is not escaped
|
||||||
expectedPath: "ملف.txt",
|
expectedPath: "/ملف.txt",
|
||||||
expectedType: "file",
|
expectedType: "file",
|
||||||
matched: true,
|
matched: true,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
path: url.PathEscape("ملف.txt"), // singly-escaped path
|
path: url.PathEscape("ملف.txt"), // singly-escaped path
|
||||||
expectedPath: "ملف.txt",
|
expectedPath: "/ملف.txt",
|
||||||
expectedType: "file",
|
expectedType: "file",
|
||||||
matched: true,
|
matched: true,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
path: url.PathEscape(url.PathEscape("ملف.txt")), // doubly-escaped path
|
path: url.PathEscape(url.PathEscape("ملف.txt")), // doubly-escaped path
|
||||||
expectedPath: "%D9%85%D9%84%D9%81.txt",
|
expectedPath: "/%D9%85%D9%84%D9%81.txt",
|
||||||
expectedType: "file",
|
expectedType: "file",
|
||||||
matched: true,
|
matched: true,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
path: "./with:in-name.txt", // browsers send the request with the path as such
|
path: "./with:in-name.txt", // browsers send the request with the path as such
|
||||||
expectedPath: "with:in-name.txt",
|
expectedPath: "/with:in-name.txt",
|
||||||
expectedType: "file",
|
expectedType: "file",
|
||||||
matched: !isWindows,
|
matched: !isWindows,
|
||||||
},
|
},
|
||||||
} {
|
} {
|
||||||
m := &MatchFile{
|
m := &MatchFile{
|
||||||
Root: "./testdata",
|
fileSystem: osFS{},
|
||||||
TryFiles: []string{"{http.request.uri.path}", "{http.request.uri.path}/"},
|
Root: "./testdata",
|
||||||
|
TryFiles: []string{"{http.request.uri.path}", "{http.request.uri.path}/"},
|
||||||
}
|
}
|
||||||
|
|
||||||
u, err := url.Parse(tc.path)
|
u, err := url.Parse(tc.path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Test %d: parsing path: %v", i, err)
|
t.Errorf("Test %d: parsing path: %v", i, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
req := &http.Request{URL: u}
|
req := &http.Request{URL: u}
|
||||||
|
@ -125,24 +125,24 @@ func TestFileMatcher(t *testing.T) {
|
||||||
|
|
||||||
result := m.Match(req)
|
result := m.Match(req)
|
||||||
if result != tc.matched {
|
if result != tc.matched {
|
||||||
t.Fatalf("Test %d: expected match=%t, got %t", i, tc.matched, result)
|
t.Errorf("Test %d: expected match=%t, got %t", i, tc.matched, result)
|
||||||
}
|
}
|
||||||
|
|
||||||
rel, ok := repl.Get("http.matchers.file.relative")
|
rel, ok := repl.Get("http.matchers.file.relative")
|
||||||
if !ok && result {
|
if !ok && result {
|
||||||
t.Fatalf("Test %d: expected replacer value", i)
|
t.Errorf("Test %d: expected replacer value", i)
|
||||||
}
|
}
|
||||||
if !result {
|
if !result {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
if rel != tc.expectedPath {
|
if rel != tc.expectedPath {
|
||||||
t.Fatalf("Test %d: actual path: %v, expected: %v", i, rel, tc.expectedPath)
|
t.Errorf("Test %d: actual path: %v, expected: %v", i, rel, tc.expectedPath)
|
||||||
}
|
}
|
||||||
|
|
||||||
fileType, _ := repl.Get("http.matchers.file.type")
|
fileType, _ := repl.Get("http.matchers.file.type")
|
||||||
if fileType != tc.expectedType {
|
if fileType != tc.expectedType {
|
||||||
t.Fatalf("Test %d: actual file type: %v, expected: %v", i, fileType, tc.expectedType)
|
t.Errorf("Test %d: actual file type: %v, expected: %v", i, fileType, tc.expectedType)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -213,14 +213,15 @@ func TestPHPFileMatcher(t *testing.T) {
|
||||||
},
|
},
|
||||||
} {
|
} {
|
||||||
m := &MatchFile{
|
m := &MatchFile{
|
||||||
Root: "./testdata",
|
fileSystem: osFS{},
|
||||||
TryFiles: []string{"{http.request.uri.path}", "{http.request.uri.path}/index.php"},
|
Root: "./testdata",
|
||||||
SplitPath: []string{".php"},
|
TryFiles: []string{"{http.request.uri.path}", "{http.request.uri.path}/index.php"},
|
||||||
|
SplitPath: []string{".php"},
|
||||||
}
|
}
|
||||||
|
|
||||||
u, err := url.Parse(tc.path)
|
u, err := url.Parse(tc.path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Test %d: parsing path: %v", i, err)
|
t.Errorf("Test %d: parsing path: %v", i, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
req := &http.Request{URL: u}
|
req := &http.Request{URL: u}
|
||||||
|
@ -228,24 +229,24 @@ func TestPHPFileMatcher(t *testing.T) {
|
||||||
|
|
||||||
result := m.Match(req)
|
result := m.Match(req)
|
||||||
if result != tc.matched {
|
if result != tc.matched {
|
||||||
t.Fatalf("Test %d: expected match=%t, got %t", i, tc.matched, result)
|
t.Errorf("Test %d: expected match=%t, got %t", i, tc.matched, result)
|
||||||
}
|
}
|
||||||
|
|
||||||
rel, ok := repl.Get("http.matchers.file.relative")
|
rel, ok := repl.Get("http.matchers.file.relative")
|
||||||
if !ok && result {
|
if !ok && result {
|
||||||
t.Fatalf("Test %d: expected replacer value", i)
|
t.Errorf("Test %d: expected replacer value", i)
|
||||||
}
|
}
|
||||||
if !result {
|
if !result {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
if rel != tc.expectedPath {
|
if rel != tc.expectedPath {
|
||||||
t.Fatalf("Test %d: actual path: %v, expected: %v", i, rel, tc.expectedPath)
|
t.Errorf("Test %d: actual path: %v, expected: %v", i, rel, tc.expectedPath)
|
||||||
}
|
}
|
||||||
|
|
||||||
fileType, _ := repl.Get("http.matchers.file.type")
|
fileType, _ := repl.Get("http.matchers.file.type")
|
||||||
if fileType != tc.expectedType {
|
if fileType != tc.expectedType {
|
||||||
t.Fatalf("Test %d: actual file type: %v, expected: %v", i, fileType, tc.expectedType)
|
t.Errorf("Test %d: actual file type: %v, expected: %v", i, fileType, tc.expectedType)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -15,11 +15,14 @@
|
||||||
package fileserver
|
package fileserver
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"io/fs"
|
||||||
weakrand "math/rand"
|
weakrand "math/rand"
|
||||||
"mime"
|
"mime"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
@ -39,10 +42,63 @@ func init() {
|
||||||
caddy.RegisterModule(FileServer{})
|
caddy.RegisterModule(FileServer{})
|
||||||
}
|
}
|
||||||
|
|
||||||
// FileServer implements a static file server responder for Caddy.
|
// FileServer implements a handler that serves static files.
|
||||||
|
//
|
||||||
|
// The path of the file to serve is constructed by joining the site root
|
||||||
|
// and the sanitized request path. Any and all files within the root and
|
||||||
|
// links with targets outside the site root may therefore be accessed.
|
||||||
|
// For example, with a site root of `/www`, requests to `/foo/bar.txt`
|
||||||
|
// will serve the file at `/www/foo/bar.txt`.
|
||||||
|
//
|
||||||
|
// The request path is sanitized using the Go standard library's
|
||||||
|
// path.Clean() function (https://pkg.go.dev/path#Clean) before being
|
||||||
|
// joined to the root. Request paths must be valid and well-formed.
|
||||||
|
//
|
||||||
|
// For requests that access directories instead of regular files,
|
||||||
|
// Caddy will attempt to serve an index file if present. For example,
|
||||||
|
// a request to `/dir/` will attempt to serve `/dir/index.html` if
|
||||||
|
// it exists. The index file names to try are configurable. If a
|
||||||
|
// requested directory does not have an index file, Caddy writes a
|
||||||
|
// 404 response. Alternatively, file browsing can be enabled with
|
||||||
|
// the "browse" parameter which shows a list of files when directories
|
||||||
|
// are requested if no index file is present.
|
||||||
|
//
|
||||||
|
// By default, this handler will canonicalize URIs so that requests to
|
||||||
|
// directories end with a slash, but requests to regular files do not.
|
||||||
|
// This is enforced with HTTP redirects automatically and can be disabled.
|
||||||
|
// Canonicalization redirects are not issued, however, if a URI rewrite
|
||||||
|
// modified the last component of the path (the filename).
|
||||||
|
//
|
||||||
|
// This handler sets the Etag and Last-Modified headers for static files.
|
||||||
|
// It does not perform MIME sniffing to determine Content-Type based on
|
||||||
|
// contents, but does use the extension (if known); see the Go docs for
|
||||||
|
// details: https://pkg.go.dev/mime#TypeByExtension
|
||||||
|
//
|
||||||
|
// The file server properly handles requests with If-Match,
|
||||||
|
// If-Unmodified-Since, If-Modified-Since, If-None-Match, Range, and
|
||||||
|
// If-Range headers. It includes the file's modification time in the
|
||||||
|
// Last-Modified header of the response.
|
||||||
type FileServer struct {
|
type FileServer struct {
|
||||||
|
// The file system implementation to use. By default, Caddy uses the local
|
||||||
|
// disk file system.
|
||||||
|
//
|
||||||
|
// File system modules used here must adhere to the following requirements:
|
||||||
|
// - Implement fs.FS interface.
|
||||||
|
// - Support seeking on opened files; i.e.returned fs.File values must
|
||||||
|
// implement the io.Seeker interface. This is required for determining
|
||||||
|
// Content-Length and satisfying Range requests.
|
||||||
|
// - fs.File values that represent directories must implement the
|
||||||
|
// fs.ReadDirFile interface so that directory listings can be procured.
|
||||||
|
FileSystemRaw json.RawMessage `json:"file_system,omitempty" caddy:"namespace=caddy.fs inline_key=backend"`
|
||||||
|
fileSystem fs.FS
|
||||||
|
|
||||||
// The path to the root of the site. Default is `{http.vars.root}` if set,
|
// The path to the root of the site. Default is `{http.vars.root}` if set,
|
||||||
// or current working directory otherwise.
|
// or current working directory otherwise. This should be a trusted value.
|
||||||
|
//
|
||||||
|
// Note that a site root is not a sandbox. Although the file server does
|
||||||
|
// sanitize the request URI to prevent directory traversal, files (including
|
||||||
|
// links) within the site root may be directly accessed based on the request
|
||||||
|
// path. Files and folders within the root should be secure and trustworthy.
|
||||||
Root string `json:"root,omitempty"`
|
Root string `json:"root,omitempty"`
|
||||||
|
|
||||||
// A list of files or folders to hide; the file server will pretend as if
|
// A list of files or folders to hide; the file server will pretend as if
|
||||||
|
@ -63,6 +119,7 @@ type FileServer struct {
|
||||||
Hide []string `json:"hide,omitempty"`
|
Hide []string `json:"hide,omitempty"`
|
||||||
|
|
||||||
// The names of files to try as index files if a folder is requested.
|
// The names of files to try as index files if a folder is requested.
|
||||||
|
// Default: index.html, index.txt.
|
||||||
IndexNames []string `json:"index_names,omitempty"`
|
IndexNames []string `json:"index_names,omitempty"`
|
||||||
|
|
||||||
// Enables file listings if a directory was requested and no index
|
// Enables file listings if a directory was requested and no index
|
||||||
|
@ -95,8 +152,7 @@ type FileServer struct {
|
||||||
// If no order specified here, the first encoding from the Accept-Encoding header
|
// If no order specified here, the first encoding from the Accept-Encoding header
|
||||||
// that both client and server support is used
|
// that both client and server support is used
|
||||||
PrecompressedOrder []string `json:"precompressed_order,omitempty"`
|
PrecompressedOrder []string `json:"precompressed_order,omitempty"`
|
||||||
|
precompressors map[string]encode.Precompressed
|
||||||
precompressors map[string]encode.Precompressed
|
|
||||||
|
|
||||||
logger *zap.Logger
|
logger *zap.Logger
|
||||||
}
|
}
|
||||||
|
@ -113,6 +169,18 @@ func (FileServer) CaddyModule() caddy.ModuleInfo {
|
||||||
func (fsrv *FileServer) Provision(ctx caddy.Context) error {
|
func (fsrv *FileServer) Provision(ctx caddy.Context) error {
|
||||||
fsrv.logger = ctx.Logger(fsrv)
|
fsrv.logger = ctx.Logger(fsrv)
|
||||||
|
|
||||||
|
// establish which file system (possibly a virtual one) we'll be using
|
||||||
|
if len(fsrv.FileSystemRaw) > 0 {
|
||||||
|
mod, err := ctx.LoadModule(fsrv, "FileSystemRaw")
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("loading file system module: %v", err)
|
||||||
|
}
|
||||||
|
fsrv.fileSystem = mod.(fs.FS)
|
||||||
|
}
|
||||||
|
if fsrv.fileSystem == nil {
|
||||||
|
fsrv.fileSystem = osFS{}
|
||||||
|
}
|
||||||
|
|
||||||
if fsrv.Root == "" {
|
if fsrv.Root == "" {
|
||||||
fsrv.Root = "{http.vars.root}"
|
fsrv.Root = "{http.vars.root}"
|
||||||
}
|
}
|
||||||
|
@ -131,11 +199,12 @@ func (fsrv *FileServer) Provision(ctx caddy.Context) error {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// support precompressed sidecar files
|
||||||
mods, err := ctx.LoadModule(fsrv, "PrecompressedRaw")
|
mods, err := ctx.LoadModule(fsrv, "PrecompressedRaw")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("loading encoder modules: %v", err)
|
return fmt.Errorf("loading encoder modules: %v", err)
|
||||||
}
|
}
|
||||||
for modName, modIface := range mods.(map[string]interface{}) {
|
for modName, modIface := range mods.(map[string]any) {
|
||||||
p, ok := modIface.(encode.Precompressed)
|
p, ok := modIface.(encode.Precompressed)
|
||||||
if !ok {
|
if !ok {
|
||||||
return fmt.Errorf("module %s is not precompressor", modName)
|
return fmt.Errorf("module %s is not precompressor", modName)
|
||||||
|
@ -166,16 +235,7 @@ func (fsrv *FileServer) ServeHTTP(w http.ResponseWriter, r *http.Request, next c
|
||||||
filesToHide := fsrv.transformHidePaths(repl)
|
filesToHide := fsrv.transformHidePaths(repl)
|
||||||
|
|
||||||
root := repl.ReplaceAll(fsrv.Root, ".")
|
root := repl.ReplaceAll(fsrv.Root, ".")
|
||||||
// PathUnescape returns an error if the escapes aren't well-formed,
|
|
||||||
// meaning the count % matches the RFC. Return early if the escape is
|
|
||||||
// improper.
|
|
||||||
if _, err := url.PathUnescape(r.URL.Path); err != nil {
|
|
||||||
fsrv.logger.Debug("improper path escape",
|
|
||||||
zap.String("site_root", root),
|
|
||||||
zap.String("request_path", r.URL.Path),
|
|
||||||
zap.Error(err))
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
filename := caddyhttp.SanitizedPathJoin(root, r.URL.Path)
|
filename := caddyhttp.SanitizedPathJoin(root, r.URL.Path)
|
||||||
|
|
||||||
fsrv.logger.Debug("sanitized path join",
|
fsrv.logger.Debug("sanitized path join",
|
||||||
|
@ -184,12 +244,12 @@ func (fsrv *FileServer) ServeHTTP(w http.ResponseWriter, r *http.Request, next c
|
||||||
zap.String("result", filename))
|
zap.String("result", filename))
|
||||||
|
|
||||||
// get information about the file
|
// get information about the file
|
||||||
info, err := os.Stat(filename)
|
info, err := fs.Stat(fsrv.fileSystem, filename)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
err = mapDirOpenError(err, filename)
|
err = fsrv.mapDirOpenError(err, filename)
|
||||||
if os.IsNotExist(err) {
|
if errors.Is(err, fs.ErrNotExist) {
|
||||||
return fsrv.notFound(w, r, next)
|
return fsrv.notFound(w, r, next)
|
||||||
} else if os.IsPermission(err) {
|
} else if errors.Is(err, fs.ErrPermission) {
|
||||||
return caddyhttp.Error(http.StatusForbidden, err)
|
return caddyhttp.Error(http.StatusForbidden, err)
|
||||||
}
|
}
|
||||||
return caddyhttp.Error(http.StatusInternalServerError, err)
|
return caddyhttp.Error(http.StatusInternalServerError, err)
|
||||||
|
@ -210,7 +270,7 @@ func (fsrv *FileServer) ServeHTTP(w http.ResponseWriter, r *http.Request, next c
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
indexInfo, err := os.Stat(indexPath)
|
indexInfo, err := fs.Stat(fsrv.fileSystem, indexPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
@ -280,7 +340,8 @@ func (fsrv *FileServer) ServeHTTP(w http.ResponseWriter, r *http.Request, next c
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
var file *os.File
|
var file fs.File
|
||||||
|
var etag string
|
||||||
|
|
||||||
// check for precompressed files
|
// check for precompressed files
|
||||||
for _, ae := range encode.AcceptedEncodings(r, fsrv.PrecompressedOrder) {
|
for _, ae := range encode.AcceptedEncodings(r, fsrv.PrecompressedOrder) {
|
||||||
|
@ -289,7 +350,7 @@ func (fsrv *FileServer) ServeHTTP(w http.ResponseWriter, r *http.Request, next c
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
compressedFilename := filename + precompress.Suffix()
|
compressedFilename := filename + precompress.Suffix()
|
||||||
compressedInfo, err := os.Stat(compressedFilename)
|
compressedInfo, err := fs.Stat(fsrv.fileSystem, compressedFilename)
|
||||||
if err != nil || compressedInfo.IsDir() {
|
if err != nil || compressedInfo.IsDir() {
|
||||||
fsrv.logger.Debug("precompressed file not accessible", zap.String("filename", compressedFilename), zap.Error(err))
|
fsrv.logger.Debug("precompressed file not accessible", zap.String("filename", compressedFilename), zap.Error(err))
|
||||||
continue
|
continue
|
||||||
|
@ -301,12 +362,19 @@ func (fsrv *FileServer) ServeHTTP(w http.ResponseWriter, r *http.Request, next c
|
||||||
if caddyErr, ok := err.(caddyhttp.HandlerError); ok && caddyErr.StatusCode == http.StatusServiceUnavailable {
|
if caddyErr, ok := err.(caddyhttp.HandlerError); ok && caddyErr.StatusCode == http.StatusServiceUnavailable {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
file = nil
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
defer file.Close()
|
defer file.Close()
|
||||||
w.Header().Set("Content-Encoding", ae)
|
w.Header().Set("Content-Encoding", ae)
|
||||||
w.Header().Del("Accept-Ranges")
|
w.Header().Del("Accept-Ranges")
|
||||||
w.Header().Add("Vary", "Accept-Encoding")
|
w.Header().Add("Vary", "Accept-Encoding")
|
||||||
|
|
||||||
|
// don't assign info = compressedInfo because sidecars are kind
|
||||||
|
// of transparent; however we do need to set the Etag:
|
||||||
|
// https://caddy.community/t/gzipped-sidecar-file-wrong-same-etag/16793
|
||||||
|
etag = calculateEtag(compressedInfo)
|
||||||
|
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -324,18 +392,18 @@ func (fsrv *FileServer) ServeHTTP(w http.ResponseWriter, r *http.Request, next c
|
||||||
return err // error is already structured
|
return err // error is already structured
|
||||||
}
|
}
|
||||||
defer file.Close()
|
defer file.Close()
|
||||||
|
|
||||||
|
etag = calculateEtag(info)
|
||||||
}
|
}
|
||||||
|
|
||||||
// set the ETag - note that a conditional If-None-Match request is handled
|
// set the Etag - note that a conditional If-None-Match request is handled
|
||||||
// by http.ServeContent below, which checks against this ETag value
|
// by http.ServeContent below, which checks against this Etag value
|
||||||
w.Header().Set("ETag", calculateEtag(info))
|
w.Header().Set("Etag", etag)
|
||||||
|
|
||||||
if w.Header().Get("Content-Type") == "" {
|
if w.Header().Get("Content-Type") == "" {
|
||||||
mtyp := mime.TypeByExtension(filepath.Ext(filename))
|
mtyp := mime.TypeByExtension(filepath.Ext(filename))
|
||||||
if mtyp == "" {
|
if mtyp == "" {
|
||||||
// do not allow Go to sniff the content-type; see
|
// do not allow Go to sniff the content-type; see https://www.youtube.com/watch?v=8t8JYpt0egE
|
||||||
// https://www.youtube.com/watch?v=8t8JYpt0egE
|
|
||||||
// TODO: If we want a Content-Type, consider writing a default of application/octet-stream - this is secure but violates spec
|
|
||||||
w.Header()["Content-Type"] = nil
|
w.Header()["Content-Type"] = nil
|
||||||
} else {
|
} else {
|
||||||
w.Header().Set("Content-Type", mtyp)
|
w.Header().Set("Content-Type", mtyp)
|
||||||
|
@ -375,7 +443,7 @@ func (fsrv *FileServer) ServeHTTP(w http.ResponseWriter, r *http.Request, next c
|
||||||
// that errors generated by ServeContent are written immediately
|
// that errors generated by ServeContent are written immediately
|
||||||
// to the response, so we cannot handle them (but errors there
|
// to the response, so we cannot handle them (but errors there
|
||||||
// are rare)
|
// are rare)
|
||||||
http.ServeContent(w, r, info.Name(), info.ModTime(), file)
|
http.ServeContent(w, r, info.Name(), info.ModTime(), file.(io.ReadSeeker))
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -384,10 +452,10 @@ func (fsrv *FileServer) ServeHTTP(w http.ResponseWriter, r *http.Request, next c
|
||||||
// the response is configured to inform the client how to best handle it
|
// the response is configured to inform the client how to best handle it
|
||||||
// and a well-described handler error is returned (do not wrap the
|
// and a well-described handler error is returned (do not wrap the
|
||||||
// returned error value).
|
// returned error value).
|
||||||
func (fsrv *FileServer) openFile(filename string, w http.ResponseWriter) (*os.File, error) {
|
func (fsrv *FileServer) openFile(filename string, w http.ResponseWriter) (fs.File, error) {
|
||||||
file, err := os.Open(filename)
|
file, err := fsrv.fileSystem.Open(filename)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
err = mapDirOpenError(err, filename)
|
err = fsrv.mapDirOpenError(err, filename)
|
||||||
if os.IsNotExist(err) {
|
if os.IsNotExist(err) {
|
||||||
fsrv.logger.Debug("file not found", zap.String("filename", filename), zap.Error(err))
|
fsrv.logger.Debug("file not found", zap.String("filename", filename), zap.Error(err))
|
||||||
return nil, caddyhttp.Error(http.StatusNotFound, err)
|
return nil, caddyhttp.Error(http.StatusNotFound, err)
|
||||||
|
@ -412,8 +480,8 @@ func (fsrv *FileServer) openFile(filename string, w http.ResponseWriter) (*os.Fi
|
||||||
// Adapted from the Go standard library; originally written by Nathaniel Caza.
|
// Adapted from the Go standard library; originally written by Nathaniel Caza.
|
||||||
// https://go-review.googlesource.com/c/go/+/36635/
|
// https://go-review.googlesource.com/c/go/+/36635/
|
||||||
// https://go-review.googlesource.com/c/go/+/36804/
|
// https://go-review.googlesource.com/c/go/+/36804/
|
||||||
func mapDirOpenError(originalErr error, name string) error {
|
func (fsrv *FileServer) mapDirOpenError(originalErr error, name string) error {
|
||||||
if os.IsNotExist(originalErr) || os.IsPermission(originalErr) {
|
if errors.Is(originalErr, fs.ErrNotExist) || errors.Is(originalErr, fs.ErrPermission) {
|
||||||
return originalErr
|
return originalErr
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -422,12 +490,12 @@ func mapDirOpenError(originalErr error, name string) error {
|
||||||
if parts[i] == "" {
|
if parts[i] == "" {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
fi, err := os.Stat(strings.Join(parts[:i+1], separator))
|
fi, err := fs.Stat(fsrv.fileSystem, strings.Join(parts[:i+1], separator))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return originalErr
|
return originalErr
|
||||||
}
|
}
|
||||||
if !fi.IsDir() {
|
if !fi.IsDir() {
|
||||||
return os.ErrNotExist
|
return fs.ErrNotExist
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -545,6 +613,21 @@ func (wr statusOverrideResponseWriter) WriteHeader(int) {
|
||||||
wr.ResponseWriter.WriteHeader(wr.code)
|
wr.ResponseWriter.WriteHeader(wr.code)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// osFS is a simple fs.FS implementation that uses the local
|
||||||
|
// file system. (We do not use os.DirFS because we do our own
|
||||||
|
// rooting or path prefixing without being constrained to a single
|
||||||
|
// root folder. The standard os.DirFS implementation is problematic
|
||||||
|
// since roots can be dynamic in our application.)
|
||||||
|
//
|
||||||
|
// osFS also implements fs.StatFS, fs.GlobFS, fs.ReadDirFS, and fs.ReadFileFS.
|
||||||
|
type osFS struct{}
|
||||||
|
|
||||||
|
func (osFS) Open(name string) (fs.File, error) { return os.Open(name) }
|
||||||
|
func (osFS) Stat(name string) (fs.FileInfo, error) { return os.Stat(name) }
|
||||||
|
func (osFS) Glob(pattern string) ([]string, error) { return filepath.Glob(pattern) }
|
||||||
|
func (osFS) ReadDir(name string) ([]fs.DirEntry, error) { return os.ReadDir(name) }
|
||||||
|
func (osFS) ReadFile(name string) ([]byte, error) { return os.ReadFile(name) }
|
||||||
|
|
||||||
var defaultIndexNames = []string{"index.html", "index.txt"}
|
var defaultIndexNames = []string{"index.html", "index.txt"}
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
@ -556,4 +639,9 @@ const (
|
||||||
var (
|
var (
|
||||||
_ caddy.Provisioner = (*FileServer)(nil)
|
_ caddy.Provisioner = (*FileServer)(nil)
|
||||||
_ caddyhttp.MiddlewareHandler = (*FileServer)(nil)
|
_ caddyhttp.MiddlewareHandler = (*FileServer)(nil)
|
||||||
|
|
||||||
|
_ fs.StatFS = (*osFS)(nil)
|
||||||
|
_ fs.GlobFS = (*osFS)(nil)
|
||||||
|
_ fs.ReadDirFS = (*osFS)(nil)
|
||||||
|
_ fs.ReadFileFS = (*osFS)(nil)
|
||||||
)
|
)
|
||||||
|
|
1
modules/caddyhttp/fileserver/testdata/foodir/bar.txt
vendored
Normal file
1
modules/caddyhttp/fileserver/testdata/foodir/bar.txt
vendored
Normal file
|
@ -0,0 +1 @@
|
||||||
|
foodir/bar.txt
|
|
@ -194,27 +194,27 @@ type RespHeaderOps struct {
|
||||||
func (ops HeaderOps) ApplyTo(hdr http.Header, repl *caddy.Replacer) {
|
func (ops HeaderOps) ApplyTo(hdr http.Header, repl *caddy.Replacer) {
|
||||||
// add
|
// add
|
||||||
for fieldName, vals := range ops.Add {
|
for fieldName, vals := range ops.Add {
|
||||||
fieldName = repl.ReplaceAll(fieldName, "")
|
fieldName = repl.ReplaceKnown(fieldName, "")
|
||||||
for _, v := range vals {
|
for _, v := range vals {
|
||||||
hdr.Add(fieldName, repl.ReplaceAll(v, ""))
|
hdr.Add(fieldName, repl.ReplaceKnown(v, ""))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// set
|
// set
|
||||||
for fieldName, vals := range ops.Set {
|
for fieldName, vals := range ops.Set {
|
||||||
fieldName = repl.ReplaceAll(fieldName, "")
|
fieldName = repl.ReplaceKnown(fieldName, "")
|
||||||
var newVals []string
|
var newVals []string
|
||||||
for i := range vals {
|
for i := range vals {
|
||||||
// append to new slice so we don't overwrite
|
// append to new slice so we don't overwrite
|
||||||
// the original values in ops.Set
|
// the original values in ops.Set
|
||||||
newVals = append(newVals, repl.ReplaceAll(vals[i], ""))
|
newVals = append(newVals, repl.ReplaceKnown(vals[i], ""))
|
||||||
}
|
}
|
||||||
hdr.Set(fieldName, strings.Join(newVals, ","))
|
hdr.Set(fieldName, strings.Join(newVals, ","))
|
||||||
}
|
}
|
||||||
|
|
||||||
// delete
|
// delete
|
||||||
for _, fieldName := range ops.Delete {
|
for _, fieldName := range ops.Delete {
|
||||||
fieldName = strings.ToLower(repl.ReplaceAll(fieldName, ""))
|
fieldName = strings.ToLower(repl.ReplaceKnown(fieldName, ""))
|
||||||
switch {
|
switch {
|
||||||
case strings.HasPrefix(fieldName, "*") && strings.HasSuffix(fieldName, "*"):
|
case strings.HasPrefix(fieldName, "*") && strings.HasSuffix(fieldName, "*"):
|
||||||
for existingField := range hdr {
|
for existingField := range hdr {
|
||||||
|
@ -241,13 +241,13 @@ func (ops HeaderOps) ApplyTo(hdr http.Header, repl *caddy.Replacer) {
|
||||||
|
|
||||||
// replace
|
// replace
|
||||||
for fieldName, replacements := range ops.Replace {
|
for fieldName, replacements := range ops.Replace {
|
||||||
fieldName = http.CanonicalHeaderKey(repl.ReplaceAll(fieldName, ""))
|
fieldName = http.CanonicalHeaderKey(repl.ReplaceKnown(fieldName, ""))
|
||||||
|
|
||||||
// all fields...
|
// all fields...
|
||||||
if fieldName == "*" {
|
if fieldName == "*" {
|
||||||
for _, r := range replacements {
|
for _, r := range replacements {
|
||||||
search := repl.ReplaceAll(r.Search, "")
|
search := repl.ReplaceKnown(r.Search, "")
|
||||||
replace := repl.ReplaceAll(r.Replace, "")
|
replace := repl.ReplaceKnown(r.Replace, "")
|
||||||
for fieldName, vals := range hdr {
|
for fieldName, vals := range hdr {
|
||||||
for i := range vals {
|
for i := range vals {
|
||||||
if r.re != nil {
|
if r.re != nil {
|
||||||
|
@ -263,8 +263,8 @@ func (ops HeaderOps) ApplyTo(hdr http.Header, repl *caddy.Replacer) {
|
||||||
|
|
||||||
// ...or only with the named field
|
// ...or only with the named field
|
||||||
for _, r := range replacements {
|
for _, r := range replacements {
|
||||||
search := repl.ReplaceAll(r.Search, "")
|
search := repl.ReplaceKnown(r.Search, "")
|
||||||
replace := repl.ReplaceAll(r.Replace, "")
|
replace := repl.ReplaceKnown(r.Replace, "")
|
||||||
for hdrFieldName, vals := range hdr {
|
for hdrFieldName, vals := range hdr {
|
||||||
// see issue #4330 for why we don't simply use hdr[fieldName]
|
// see issue #4330 for why we don't simply use hdr[fieldName]
|
||||||
if http.CanonicalHeaderKey(hdrFieldName) != fieldName {
|
if http.CanonicalHeaderKey(hdrFieldName) != fieldName {
|
||||||
|
|
|
@ -27,10 +27,10 @@ func init() {
|
||||||
|
|
||||||
// parseCaddyfile sets up the map handler from Caddyfile tokens. Syntax:
|
// parseCaddyfile sets up the map handler from Caddyfile tokens. Syntax:
|
||||||
//
|
//
|
||||||
// map [<matcher>] <source> <destinations...> {
|
// map [<matcher>] <source> <destinations...> {
|
||||||
// [~]<input> <outputs...>
|
// [~]<input> <outputs...>
|
||||||
// default <defaults...>
|
// default <defaults...>
|
||||||
// }
|
// }
|
||||||
//
|
//
|
||||||
// If the input value is prefixed with a tilde (~), then the input will be parsed as a
|
// If the input value is prefixed with a tilde (~), then the input will be parsed as a
|
||||||
// regular expression.
|
// regular expression.
|
||||||
|
@ -76,9 +76,9 @@ func parseCaddyfile(h httpcaddyfile.Helper) (caddyhttp.MiddlewareHandler, error)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
// every other line maps one input to one or more outputs
|
// every line maps an input value to one or more outputs
|
||||||
in := h.Val()
|
in := h.Val()
|
||||||
var outs []interface{}
|
var outs []any
|
||||||
for h.NextArg() {
|
for h.NextArg() {
|
||||||
val := h.ScalarVal()
|
val := h.ScalarVal()
|
||||||
if val == "-" {
|
if val == "-" {
|
||||||
|
|
|
@ -62,6 +62,9 @@ func (Handler) CaddyModule() caddy.ModuleInfo {
|
||||||
// Provision sets up h.
|
// Provision sets up h.
|
||||||
func (h *Handler) Provision(_ caddy.Context) error {
|
func (h *Handler) Provision(_ caddy.Context) error {
|
||||||
for j, dest := range h.Destinations {
|
for j, dest := range h.Destinations {
|
||||||
|
if strings.Count(dest, "{") != 1 || !strings.HasPrefix(dest, "{") {
|
||||||
|
return fmt.Errorf("destination must be a placeholder and only a placeholder")
|
||||||
|
}
|
||||||
h.Destinations[j] = strings.Trim(dest, "{}")
|
h.Destinations[j] = strings.Trim(dest, "{}")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -106,6 +109,16 @@ func (h *Handler) Validate() error {
|
||||||
}
|
}
|
||||||
seen[input] = i
|
seen[input] = i
|
||||||
|
|
||||||
|
// prevent infinite recursion
|
||||||
|
for _, out := range m.Outputs {
|
||||||
|
for _, dest := range h.Destinations {
|
||||||
|
if strings.Contains(caddy.ToString(out), dest) ||
|
||||||
|
strings.Contains(m.Input, dest) {
|
||||||
|
return fmt.Errorf("mapping %d requires value of {%s} to define value of {%s}: infinite recursion", i, dest, dest)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// ensure mappings have 1:1 output-to-destination correspondence
|
// ensure mappings have 1:1 output-to-destination correspondence
|
||||||
nOut := len(m.Outputs)
|
nOut := len(m.Outputs)
|
||||||
if nOut != nDest {
|
if nOut != nDest {
|
||||||
|
@ -119,7 +132,7 @@ func (h Handler) ServeHTTP(w http.ResponseWriter, r *http.Request, next caddyhtt
|
||||||
repl := r.Context().Value(caddy.ReplacerCtxKey).(*caddy.Replacer)
|
repl := r.Context().Value(caddy.ReplacerCtxKey).(*caddy.Replacer)
|
||||||
|
|
||||||
// defer work until a variable is actually evaluated by using replacer's Map callback
|
// defer work until a variable is actually evaluated by using replacer's Map callback
|
||||||
repl.Map(func(key string) (interface{}, bool) {
|
repl.Map(func(key string) (any, bool) {
|
||||||
// return early if the variable is not even a configured destination
|
// return early if the variable is not even a configured destination
|
||||||
destIdx := h.destinationIndex(key)
|
destIdx := h.destinationIndex(key)
|
||||||
if destIdx < 0 {
|
if destIdx < 0 {
|
||||||
|
@ -135,21 +148,22 @@ func (h Handler) ServeHTTP(w http.ResponseWriter, r *http.Request, next caddyhtt
|
||||||
if output == nil {
|
if output == nil {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
outputStr := caddy.ToString(output)
|
||||||
|
|
||||||
|
// evaluate regular expression if configured
|
||||||
if m.re != nil {
|
if m.re != nil {
|
||||||
var result []byte
|
var result []byte
|
||||||
matches := m.re.FindStringSubmatchIndex(input)
|
matches := m.re.FindStringSubmatchIndex(input)
|
||||||
if matches == nil {
|
if matches == nil {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
result = m.re.ExpandString(result, output.(string), input, matches)
|
result = m.re.ExpandString(result, outputStr, input, matches)
|
||||||
return string(result), true
|
return string(result), true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// otherwise simple string comparison
|
||||||
if input == m.Input {
|
if input == m.Input {
|
||||||
if outputStr, ok := output.(string); ok {
|
return repl.ReplaceAll(outputStr, ""), true
|
||||||
// NOTE: if the output has a placeholder that has the same key as the input, this is infinite recursion
|
|
||||||
return repl.ReplaceAll(outputStr, ""), true
|
|
||||||
}
|
|
||||||
return output, true
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -187,7 +201,7 @@ type Mapping struct {
|
||||||
// Upon a match with the input, each output is positionally correlated
|
// Upon a match with the input, each output is positionally correlated
|
||||||
// with each destination of the parent handler. An output that is null
|
// with each destination of the parent handler. An output that is null
|
||||||
// (nil) will be treated as if it was not mapped at all.
|
// (nil) will be treated as if it was not mapped at all.
|
||||||
Outputs []interface{} `json:"outputs,omitempty"`
|
Outputs []any `json:"outputs,omitempty"`
|
||||||
|
|
||||||
re *regexp.Regexp
|
re *regexp.Regexp
|
||||||
}
|
}
|
||||||
|
|
|
@ -15,7 +15,7 @@ func TestHandler(t *testing.T) {
|
||||||
for i, tc := range []struct {
|
for i, tc := range []struct {
|
||||||
handler Handler
|
handler Handler
|
||||||
reqURI string
|
reqURI string
|
||||||
expect map[string]interface{}
|
expect map[string]any
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
reqURI: "/foo",
|
reqURI: "/foo",
|
||||||
|
@ -25,11 +25,11 @@ func TestHandler(t *testing.T) {
|
||||||
Mappings: []Mapping{
|
Mappings: []Mapping{
|
||||||
{
|
{
|
||||||
Input: "/foo",
|
Input: "/foo",
|
||||||
Outputs: []interface{}{"FOO"},
|
Outputs: []any{"FOO"},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
expect: map[string]interface{}{
|
expect: map[string]any{
|
||||||
"output": "FOO",
|
"output": "FOO",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -41,11 +41,11 @@ func TestHandler(t *testing.T) {
|
||||||
Mappings: []Mapping{
|
Mappings: []Mapping{
|
||||||
{
|
{
|
||||||
InputRegexp: "(/abc)",
|
InputRegexp: "(/abc)",
|
||||||
Outputs: []interface{}{"ABC"},
|
Outputs: []any{"ABC"},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
expect: map[string]interface{}{
|
expect: map[string]any{
|
||||||
"output": "ABC",
|
"output": "ABC",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -57,11 +57,11 @@ func TestHandler(t *testing.T) {
|
||||||
Mappings: []Mapping{
|
Mappings: []Mapping{
|
||||||
{
|
{
|
||||||
InputRegexp: "(xyz)",
|
InputRegexp: "(xyz)",
|
||||||
Outputs: []interface{}{"...${1}..."},
|
Outputs: []any{"...${1}..."},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
expect: map[string]interface{}{
|
expect: map[string]any{
|
||||||
"output": "...xyz...",
|
"output": "...xyz...",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -74,11 +74,11 @@ func TestHandler(t *testing.T) {
|
||||||
Mappings: []Mapping{
|
Mappings: []Mapping{
|
||||||
{
|
{
|
||||||
InputRegexp: "(?i)(\\^|`|<|>|%|\\\\|\\{|\\}|\\|)",
|
InputRegexp: "(?i)(\\^|`|<|>|%|\\\\|\\{|\\}|\\|)",
|
||||||
Outputs: []interface{}{"3"},
|
Outputs: []any{"3"},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
expect: map[string]interface{}{
|
expect: map[string]any{
|
||||||
"output": "3",
|
"output": "3",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -90,11 +90,11 @@ func TestHandler(t *testing.T) {
|
||||||
Mappings: []Mapping{
|
Mappings: []Mapping{
|
||||||
{
|
{
|
||||||
Input: "/foo",
|
Input: "/foo",
|
||||||
Outputs: []interface{}{"{testvar}"},
|
Outputs: []any{"{testvar}"},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
expect: map[string]interface{}{
|
expect: map[string]any{
|
||||||
"output": "testing",
|
"output": "testing",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
|
|
@ -20,10 +20,10 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"net"
|
"net"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
"net/netip"
|
||||||
"net/textproto"
|
"net/textproto"
|
||||||
"net/url"
|
"net/url"
|
||||||
"path"
|
"path"
|
||||||
"path/filepath"
|
|
||||||
"reflect"
|
"reflect"
|
||||||
"regexp"
|
"regexp"
|
||||||
"sort"
|
"sort"
|
||||||
|
@ -33,11 +33,9 @@ import (
|
||||||
"github.com/caddyserver/caddy/v2"
|
"github.com/caddyserver/caddy/v2"
|
||||||
"github.com/caddyserver/caddy/v2/caddyconfig/caddyfile"
|
"github.com/caddyserver/caddy/v2/caddyconfig/caddyfile"
|
||||||
"github.com/google/cel-go/cel"
|
"github.com/google/cel-go/cel"
|
||||||
"github.com/google/cel-go/checker/decls"
|
|
||||||
"github.com/google/cel-go/common/types"
|
"github.com/google/cel-go/common/types"
|
||||||
"github.com/google/cel-go/common/types/ref"
|
"github.com/google/cel-go/common/types/ref"
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type (
|
type (
|
||||||
|
@ -65,20 +63,51 @@ type (
|
||||||
// Duplicate entries will return an error.
|
// Duplicate entries will return an error.
|
||||||
MatchHost []string
|
MatchHost []string
|
||||||
|
|
||||||
// MatchPath matches requests by the URI's path (case-insensitive). Path
|
// MatchPath case-insensitively matches requests by the URI's path. Path
|
||||||
// matches are exact, but wildcards may be used:
|
// matching is exact, not prefix-based, giving you more control and clarity
|
||||||
|
// over matching. Wildcards (`*`) may be used:
|
||||||
//
|
//
|
||||||
// - At the end, for a prefix match (`/prefix/*`)
|
// - At the end only, for a prefix match (`/prefix/*`)
|
||||||
// - At the beginning, for a suffix match (`*.suffix`)
|
// - At the beginning only, for a suffix match (`*.suffix`)
|
||||||
// - On both sides, for a substring match (`*/contains/*`)
|
// - On both sides only, for a substring match (`*/contains/*`)
|
||||||
// - In the middle, for a globular match (`/accounts/*/info`)
|
// - In the middle, for a globular match (`/accounts/*/info`)
|
||||||
//
|
//
|
||||||
|
// Slashes are significant; i.e. `/foo*` matches `/foo`, `/foo/`, `/foo/bar`,
|
||||||
|
// and `/foobar`; but `/foo/*` does not match `/foo` or `/foobar`. Valid
|
||||||
|
// paths start with a slash `/`.
|
||||||
|
//
|
||||||
|
// Because there are, in general, multiple possible escaped forms of any
|
||||||
|
// path, path matchers operate in unescaped space; that is, path matchers
|
||||||
|
// should be written in their unescaped form to prevent ambiguities and
|
||||||
|
// possible security issues, as all request paths will be normalized to
|
||||||
|
// their unescaped forms before matcher evaluation.
|
||||||
|
//
|
||||||
|
// However, escape sequences in a match pattern are supported; they are
|
||||||
|
// compared with the request's raw/escaped path for those bytes only.
|
||||||
|
// In other words, a matcher of `/foo%2Fbar` will match a request path
|
||||||
|
// of precisely `/foo%2Fbar`, but not `/foo/bar`. It follows that matching
|
||||||
|
// the literal percent sign (%) in normalized space can be done using the
|
||||||
|
// escaped form, `%25`.
|
||||||
|
//
|
||||||
|
// Even though wildcards (`*`) operate in the normalized space, the special
|
||||||
|
// escaped wildcard (`%*`), which is not a valid escape sequence, may be
|
||||||
|
// used in place of a span that should NOT be decoded; that is, `/bands/%*`
|
||||||
|
// will match `/bands/AC%2fDC` whereas `/bands/*` will not.
|
||||||
|
//
|
||||||
|
// Even though path matching is done in normalized space, the special
|
||||||
|
// wildcard `%*` may be used in place of a span that should NOT be decoded;
|
||||||
|
// that is, `/bands/%*/` will match `/bands/AC%2fDC/` whereas `/bands/*/`
|
||||||
|
// will not.
|
||||||
|
//
|
||||||
// This matcher is fast, so it does not support regular expressions or
|
// This matcher is fast, so it does not support regular expressions or
|
||||||
// capture groups. For slower but more powerful matching, use the
|
// capture groups. For slower but more powerful matching, use the
|
||||||
// path_regexp matcher.
|
// path_regexp matcher. (Note that due to the special treatment of
|
||||||
|
// escape sequences in matcher patterns, they may perform slightly slower
|
||||||
|
// in high-traffic environments.)
|
||||||
MatchPath []string
|
MatchPath []string
|
||||||
|
|
||||||
// MatchPathRE matches requests by a regular expression on the URI's path.
|
// MatchPathRE matches requests by a regular expression on the URI's path.
|
||||||
|
// Path matching is performed in the unescaped (decoded) form of the path.
|
||||||
//
|
//
|
||||||
// Upon a match, it adds placeholders to the request: `{http.regexp.name.capture_group}`
|
// Upon a match, it adds placeholders to the request: `{http.regexp.name.capture_group}`
|
||||||
// where `name` is the regular expression's name, and `capture_group` is either
|
// where `name` is the regular expression's name, and `capture_group` is either
|
||||||
|
@ -103,6 +132,9 @@ type (
|
||||||
// "query": ["*"]
|
// "query": ["*"]
|
||||||
// }
|
// }
|
||||||
// ```
|
// ```
|
||||||
|
//
|
||||||
|
// Invalid query strings, including those with bad escapings or illegal characters
|
||||||
|
// like semicolons, will fail to parse and thus fail to match.
|
||||||
MatchQuery url.Values
|
MatchQuery url.Values
|
||||||
|
|
||||||
// MatchHeader matches requests by header fields. The key is the field
|
// MatchHeader matches requests by header fields. The key is the field
|
||||||
|
@ -140,7 +172,7 @@ type (
|
||||||
|
|
||||||
// cidrs and zones vars should aligned always in the same
|
// cidrs and zones vars should aligned always in the same
|
||||||
// length and indexes for matching later
|
// length and indexes for matching later
|
||||||
cidrs []*net.IPNet
|
cidrs []*netip.Prefix
|
||||||
zones []string
|
zones []string
|
||||||
logger *zap.Logger
|
logger *zap.Logger
|
||||||
}
|
}
|
||||||
|
@ -302,12 +334,13 @@ outer:
|
||||||
// expression matchers.
|
// expression matchers.
|
||||||
//
|
//
|
||||||
// Example:
|
// Example:
|
||||||
// expression host('localhost')
|
//
|
||||||
|
// expression host('localhost')
|
||||||
func (MatchHost) CELLibrary(ctx caddy.Context) (cel.Library, error) {
|
func (MatchHost) CELLibrary(ctx caddy.Context) (cel.Library, error) {
|
||||||
return CELMatcherImpl(
|
return CELMatcherImpl(
|
||||||
"host",
|
"host",
|
||||||
"host_match_request_list",
|
"host_match_request_list",
|
||||||
[]*exprpb.Type{CelTypeListString},
|
[]*cel.Type{cel.ListType(cel.StringType)},
|
||||||
func(data ref.Val) (RequestMatcher, error) {
|
func(data ref.Val) (RequestMatcher, error) {
|
||||||
refStringList := reflect.TypeOf([]string{})
|
refStringList := reflect.TypeOf([]string{})
|
||||||
strList, err := data.ConvertToNative(refStringList)
|
strList, err := data.ConvertToNative(refStringList)
|
||||||
|
@ -341,6 +374,11 @@ func (MatchPath) CaddyModule() caddy.ModuleInfo {
|
||||||
// Provision lower-cases the paths in m to ensure case-insensitive matching.
|
// Provision lower-cases the paths in m to ensure case-insensitive matching.
|
||||||
func (m MatchPath) Provision(_ caddy.Context) error {
|
func (m MatchPath) Provision(_ caddy.Context) error {
|
||||||
for i := range m {
|
for i := range m {
|
||||||
|
if m[i] == "*" && i > 0 {
|
||||||
|
// will always match, so just put it first
|
||||||
|
m[0] = m[i]
|
||||||
|
break
|
||||||
|
}
|
||||||
m[i] = strings.ToLower(m[i])
|
m[i] = strings.ToLower(m[i])
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
@ -348,77 +386,108 @@ func (m MatchPath) Provision(_ caddy.Context) error {
|
||||||
|
|
||||||
// Match returns true if r matches m.
|
// Match returns true if r matches m.
|
||||||
func (m MatchPath) Match(r *http.Request) bool {
|
func (m MatchPath) Match(r *http.Request) bool {
|
||||||
// PathUnescape returns an error if the escapes aren't
|
// Even though RFC 9110 says that path matching is case-sensitive
|
||||||
// well-formed, meaning the count % matches the RFC.
|
// (https://www.rfc-editor.org/rfc/rfc9110.html#section-4.2.3),
|
||||||
// Return early if the escape is improper.
|
// we do case-insensitive matching to mitigate security issues
|
||||||
unescapedPath, err := url.PathUnescape(r.URL.Path)
|
// related to differences between operating systems, applications,
|
||||||
if err != nil {
|
// etc; if case-sensitive matching is needed, the regex matcher
|
||||||
return false
|
// can be used instead.
|
||||||
}
|
reqPath := strings.ToLower(r.URL.Path)
|
||||||
|
|
||||||
lowerPath := strings.ToLower(unescapedPath)
|
// See #2917; Windows ignores trailing dots and spaces
|
||||||
|
|
||||||
// Clean the path, merges doubled slashes, etc.
|
|
||||||
// This ensures maliciously crafted requests can't bypass
|
|
||||||
// the path matcher. See #4407
|
|
||||||
lowerPath = path.Clean(lowerPath)
|
|
||||||
|
|
||||||
// see #2917; Windows ignores trailing dots and spaces
|
|
||||||
// when accessing files (sigh), potentially causing a
|
// when accessing files (sigh), potentially causing a
|
||||||
// security risk (cry) if PHP files end up being served
|
// security risk (cry) if PHP files end up being served
|
||||||
// as static files, exposing the source code, instead of
|
// as static files, exposing the source code, instead of
|
||||||
// being matched by *.php to be treated as PHP scripts
|
// being matched by *.php to be treated as PHP scripts.
|
||||||
lowerPath = strings.TrimRight(lowerPath, ". ")
|
reqPath = strings.TrimRight(reqPath, ". ")
|
||||||
|
|
||||||
// Cleaning may remove the trailing slash, but we want to keep it
|
|
||||||
if lowerPath != "/" && strings.HasSuffix(r.URL.Path, "/") {
|
|
||||||
lowerPath = lowerPath + "/"
|
|
||||||
}
|
|
||||||
|
|
||||||
repl := r.Context().Value(caddy.ReplacerCtxKey).(*caddy.Replacer)
|
repl := r.Context().Value(caddy.ReplacerCtxKey).(*caddy.Replacer)
|
||||||
|
|
||||||
for _, matchPath := range m {
|
for _, matchPattern := range m {
|
||||||
matchPath = repl.ReplaceAll(matchPath, "")
|
matchPattern = repl.ReplaceAll(matchPattern, "")
|
||||||
|
|
||||||
// special case: whole path is wildcard; this is unnecessary
|
// special case: whole path is wildcard; this is unnecessary
|
||||||
// as it matches all requests, which is the same as no matcher
|
// as it matches all requests, which is the same as no matcher
|
||||||
if matchPath == "*" {
|
if matchPattern == "*" {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Clean the path, merge doubled slashes, etc.
|
||||||
|
// This ensures maliciously crafted requests can't bypass
|
||||||
|
// the path matcher. See #4407. Good security posture
|
||||||
|
// requires that we should do all we can to reduce any
|
||||||
|
// funny-looking paths into "normalized" forms such that
|
||||||
|
// weird variants can't sneak by.
|
||||||
|
//
|
||||||
|
// How we clean the path depends on the kind of pattern:
|
||||||
|
// we either merge slashes or we don't. If the pattern
|
||||||
|
// has double slashes, we preserve them in the path.
|
||||||
|
//
|
||||||
|
// TODO: Despite the fact that the *vast* majority of path
|
||||||
|
// matchers have only 1 pattern, a possible optimization is
|
||||||
|
// to remember the cleaned form of the path for future
|
||||||
|
// iterations; it's just that the way we clean depends on
|
||||||
|
// the kind of pattern.
|
||||||
|
|
||||||
|
mergeSlashes := !strings.Contains(matchPattern, "//")
|
||||||
|
|
||||||
|
// if '%' appears in the match pattern, we interpret that to mean
|
||||||
|
// the intent is to compare that part of the path in raw/escaped
|
||||||
|
// space; i.e. "%40"=="%40", not "@", and "%2F"=="%2F", not "/"
|
||||||
|
if strings.Contains(matchPattern, "%") {
|
||||||
|
reqPathForPattern := CleanPath(r.URL.EscapedPath(), mergeSlashes)
|
||||||
|
if m.matchPatternWithEscapeSequence(reqPathForPattern, matchPattern) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// doing prefix/suffix/substring matches doesn't make sense
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
reqPathForPattern := CleanPath(reqPath, mergeSlashes)
|
||||||
|
|
||||||
|
// for substring, prefix, and suffix matching, only perform those
|
||||||
|
// special, fast matches if they are the only wildcards in the pattern;
|
||||||
|
// otherwise we assume a globular match if any * appears in the middle
|
||||||
|
|
||||||
// special case: first and last characters are wildcard,
|
// special case: first and last characters are wildcard,
|
||||||
// treat it as a fast substring match
|
// treat it as a fast substring match
|
||||||
if len(matchPath) > 1 &&
|
if strings.Count(matchPattern, "*") == 2 &&
|
||||||
strings.HasPrefix(matchPath, "*") &&
|
strings.HasPrefix(matchPattern, "*") &&
|
||||||
strings.HasSuffix(matchPath, "*") {
|
strings.HasSuffix(matchPattern, "*") &&
|
||||||
if strings.Contains(lowerPath, matchPath[1:len(matchPath)-1]) {
|
strings.Count(matchPattern, "*") == 2 {
|
||||||
|
if strings.Contains(reqPathForPattern, matchPattern[1:len(matchPattern)-1]) {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
// special case: first character is a wildcard,
|
// only perform prefix/suffix match if it is the only wildcard...
|
||||||
// treat it as a fast suffix match
|
// I think that is more correct most of the time
|
||||||
if strings.HasPrefix(matchPath, "*") {
|
if strings.Count(matchPattern, "*") == 1 {
|
||||||
if strings.HasSuffix(lowerPath, matchPath[1:]) {
|
// special case: first character is a wildcard,
|
||||||
return true
|
// treat it as a fast suffix match
|
||||||
|
if strings.HasPrefix(matchPattern, "*") {
|
||||||
|
if strings.HasSuffix(reqPathForPattern, matchPattern[1:]) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// special case: last character is a wildcard,
|
||||||
|
// treat it as a fast prefix match
|
||||||
|
if strings.HasSuffix(matchPattern, "*") {
|
||||||
|
if strings.HasPrefix(reqPathForPattern, matchPattern[:len(matchPattern)-1]) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
continue
|
||||||
}
|
}
|
||||||
continue
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// special case: last character is a wildcard,
|
// at last, use globular matching, which also is exact matching
|
||||||
// treat it as a fast prefix match
|
// if there are no glob/wildcard chars; we ignore the error here
|
||||||
if strings.HasSuffix(matchPath, "*") {
|
// because we can't handle it anyway
|
||||||
if strings.HasPrefix(lowerPath, matchPath[:len(matchPath)-1]) {
|
matches, _ := path.Match(matchPattern, reqPathForPattern)
|
||||||
return true
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// for everything else, try globular matching, which also
|
|
||||||
// is exact matching if there are no glob/wildcard chars;
|
|
||||||
// can ignore error here because we can't handle it anyway
|
|
||||||
matches, _ := filepath.Match(matchPath, lowerPath)
|
|
||||||
if matches {
|
if matches {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
@ -426,11 +495,118 @@ func (m MatchPath) Match(r *http.Request) bool {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (MatchPath) matchPatternWithEscapeSequence(escapedPath, matchPath string) bool {
|
||||||
|
// We would just compare the pattern against r.URL.Path,
|
||||||
|
// but the pattern contains %, indicating that we should
|
||||||
|
// compare at least some part of the path in raw/escaped
|
||||||
|
// space, not normalized space; so we build the string we
|
||||||
|
// will compare against by adding the normalized parts
|
||||||
|
// of the path, then switching to the escaped parts where
|
||||||
|
// the pattern hints to us wherever % is present.
|
||||||
|
var sb strings.Builder
|
||||||
|
|
||||||
|
// iterate the pattern and escaped path in lock-step;
|
||||||
|
// increment iPattern every time we consume a char from the pattern,
|
||||||
|
// increment iPath every time we consume a char from the path;
|
||||||
|
// iPattern and iPath are our cursors/iterator positions for each string
|
||||||
|
var iPattern, iPath int
|
||||||
|
for {
|
||||||
|
if iPattern >= len(matchPath) || iPath >= len(escapedPath) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
// get the next character from the request path
|
||||||
|
|
||||||
|
pathCh := string(escapedPath[iPath])
|
||||||
|
var escapedPathCh string
|
||||||
|
|
||||||
|
// normalize (decode) escape sequences
|
||||||
|
if pathCh == "%" && len(escapedPath) >= iPath+3 {
|
||||||
|
// hold onto this in case we find out the intent is to match in escaped space here;
|
||||||
|
// we lowercase it even though technically the spec says: "For consistency, URI
|
||||||
|
// producers and normalizers should use uppercase hexadecimal digits for all percent-
|
||||||
|
// encodings" (RFC 3986 section 2.1) - we lowercased the matcher pattern earlier in
|
||||||
|
// provisioning so we do the same here to gain case-insensitivity in equivalence;
|
||||||
|
// besides, this string is never shown visibly
|
||||||
|
escapedPathCh = strings.ToLower(escapedPath[iPath : iPath+3])
|
||||||
|
|
||||||
|
var err error
|
||||||
|
pathCh, err = url.PathUnescape(escapedPathCh)
|
||||||
|
if err != nil {
|
||||||
|
// should be impossible unless EscapedPath() is giving us an invalid sequence!
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
iPath += 2 // escape sequence is 2 bytes longer than normal char
|
||||||
|
}
|
||||||
|
|
||||||
|
// now get the next character from the pattern
|
||||||
|
|
||||||
|
normalize := true
|
||||||
|
switch matchPath[iPattern] {
|
||||||
|
case '%':
|
||||||
|
// escape sequence
|
||||||
|
|
||||||
|
// if not a wildcard ("%*"), compare literally; consume next two bytes of pattern
|
||||||
|
if len(matchPath) >= iPattern+3 && matchPath[iPattern+1] != '*' {
|
||||||
|
sb.WriteString(escapedPathCh)
|
||||||
|
iPath++
|
||||||
|
iPattern += 2
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
// escaped wildcard sequence; consume next byte only ('*')
|
||||||
|
iPattern++
|
||||||
|
normalize = false
|
||||||
|
|
||||||
|
fallthrough
|
||||||
|
case '*':
|
||||||
|
// wildcard, so consume until next matching character
|
||||||
|
remaining := escapedPath[iPath:]
|
||||||
|
until := len(escapedPath) - iPath // go until end of string...
|
||||||
|
if iPattern < len(matchPath)-1 { // ...unless the * is not at the end
|
||||||
|
nextCh := matchPath[iPattern+1]
|
||||||
|
until = strings.IndexByte(remaining, nextCh)
|
||||||
|
if until == -1 {
|
||||||
|
// terminating char of wildcard span not found, so definitely no match
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if until == 0 {
|
||||||
|
// empty span; nothing to add on this iteration
|
||||||
|
break
|
||||||
|
}
|
||||||
|
next := remaining[:until]
|
||||||
|
if normalize {
|
||||||
|
var err error
|
||||||
|
next, err = url.PathUnescape(next)
|
||||||
|
if err != nil {
|
||||||
|
return false // should be impossible anyway
|
||||||
|
}
|
||||||
|
}
|
||||||
|
sb.WriteString(next)
|
||||||
|
iPath += until
|
||||||
|
default:
|
||||||
|
sb.WriteString(pathCh)
|
||||||
|
iPath++
|
||||||
|
}
|
||||||
|
|
||||||
|
iPattern++
|
||||||
|
}
|
||||||
|
|
||||||
|
// we can now treat rawpath globs (%*) as regular globs (*)
|
||||||
|
matchPath = strings.ReplaceAll(matchPath, "%*", "*")
|
||||||
|
|
||||||
|
// ignore error here because we can't handle it anyway=
|
||||||
|
matches, _ := path.Match(matchPath, sb.String())
|
||||||
|
return matches
|
||||||
|
}
|
||||||
|
|
||||||
// CELLibrary produces options that expose this matcher for use in CEL
|
// CELLibrary produces options that expose this matcher for use in CEL
|
||||||
// expression matchers.
|
// expression matchers.
|
||||||
//
|
//
|
||||||
// Example:
|
// Example:
|
||||||
// expression path('*substring*', '*suffix')
|
//
|
||||||
|
// expression path('*substring*', '*suffix')
|
||||||
func (MatchPath) CELLibrary(ctx caddy.Context) (cel.Library, error) {
|
func (MatchPath) CELLibrary(ctx caddy.Context) (cel.Library, error) {
|
||||||
return CELMatcherImpl(
|
return CELMatcherImpl(
|
||||||
// name of the macro, this is the function name that users see when writing expressions.
|
// name of the macro, this is the function name that users see when writing expressions.
|
||||||
|
@ -438,7 +614,7 @@ func (MatchPath) CELLibrary(ctx caddy.Context) (cel.Library, error) {
|
||||||
// name of the function that the macro will be rewritten to call.
|
// name of the function that the macro will be rewritten to call.
|
||||||
"path_match_request_list",
|
"path_match_request_list",
|
||||||
// internal data type of the MatchPath value.
|
// internal data type of the MatchPath value.
|
||||||
[]*exprpb.Type{CelTypeListString},
|
[]*cel.Type{cel.ListType(cel.StringType)},
|
||||||
// function to convert a constant list of strings to a MatchPath instance.
|
// function to convert a constant list of strings to a MatchPath instance.
|
||||||
func(data ref.Val) (RequestMatcher, error) {
|
func(data ref.Val) (RequestMatcher, error) {
|
||||||
refStringList := reflect.TypeOf([]string{})
|
refStringList := reflect.TypeOf([]string{})
|
||||||
|
@ -476,23 +652,10 @@ func (MatchPathRE) CaddyModule() caddy.ModuleInfo {
|
||||||
func (m MatchPathRE) Match(r *http.Request) bool {
|
func (m MatchPathRE) Match(r *http.Request) bool {
|
||||||
repl := r.Context().Value(caddy.ReplacerCtxKey).(*caddy.Replacer)
|
repl := r.Context().Value(caddy.ReplacerCtxKey).(*caddy.Replacer)
|
||||||
|
|
||||||
// PathUnescape returns an error if the escapes aren't
|
|
||||||
// well-formed, meaning the count % matches the RFC.
|
|
||||||
// Return early if the escape is improper.
|
|
||||||
unescapedPath, err := url.PathUnescape(r.URL.Path)
|
|
||||||
if err != nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// Clean the path, merges doubled slashes, etc.
|
// Clean the path, merges doubled slashes, etc.
|
||||||
// This ensures maliciously crafted requests can't bypass
|
// This ensures maliciously crafted requests can't bypass
|
||||||
// the path matcher. See #4407
|
// the path matcher. See #4407
|
||||||
cleanedPath := path.Clean(unescapedPath)
|
cleanedPath := cleanPath(r.URL.Path)
|
||||||
|
|
||||||
// Cleaning may remove the trailing slash, but we want to keep it
|
|
||||||
if cleanedPath != "/" && strings.HasSuffix(r.URL.Path, "/") {
|
|
||||||
cleanedPath = cleanedPath + "/"
|
|
||||||
}
|
|
||||||
|
|
||||||
return m.MatchRegexp.Match(cleanedPath, repl)
|
return m.MatchRegexp.Match(cleanedPath, repl)
|
||||||
}
|
}
|
||||||
|
@ -501,12 +664,13 @@ func (m MatchPathRE) Match(r *http.Request) bool {
|
||||||
// expression matchers.
|
// expression matchers.
|
||||||
//
|
//
|
||||||
// Example:
|
// Example:
|
||||||
// expression path_regexp('^/bar')
|
//
|
||||||
|
// expression path_regexp('^/bar')
|
||||||
func (MatchPathRE) CELLibrary(ctx caddy.Context) (cel.Library, error) {
|
func (MatchPathRE) CELLibrary(ctx caddy.Context) (cel.Library, error) {
|
||||||
unnamedPattern, err := CELMatcherImpl(
|
unnamedPattern, err := CELMatcherImpl(
|
||||||
"path_regexp",
|
"path_regexp",
|
||||||
"path_regexp_request_string",
|
"path_regexp_request_string",
|
||||||
[]*exprpb.Type{decls.String},
|
[]*cel.Type{cel.StringType},
|
||||||
func(data ref.Val) (RequestMatcher, error) {
|
func(data ref.Val) (RequestMatcher, error) {
|
||||||
pattern := data.(types.String)
|
pattern := data.(types.String)
|
||||||
matcher := MatchPathRE{MatchRegexp{Pattern: string(pattern)}}
|
matcher := MatchPathRE{MatchRegexp{Pattern: string(pattern)}}
|
||||||
|
@ -520,7 +684,7 @@ func (MatchPathRE) CELLibrary(ctx caddy.Context) (cel.Library, error) {
|
||||||
namedPattern, err := CELMatcherImpl(
|
namedPattern, err := CELMatcherImpl(
|
||||||
"path_regexp",
|
"path_regexp",
|
||||||
"path_regexp_request_string_string",
|
"path_regexp_request_string_string",
|
||||||
[]*exprpb.Type{decls.String, decls.String},
|
[]*cel.Type{cel.StringType, cel.StringType},
|
||||||
func(data ref.Val) (RequestMatcher, error) {
|
func(data ref.Val) (RequestMatcher, error) {
|
||||||
refStringList := reflect.TypeOf([]string{})
|
refStringList := reflect.TypeOf([]string{})
|
||||||
params, err := data.ConvertToNative(refStringList)
|
params, err := data.ConvertToNative(refStringList)
|
||||||
|
@ -574,12 +738,13 @@ func (m MatchMethod) Match(r *http.Request) bool {
|
||||||
// expression matchers.
|
// expression matchers.
|
||||||
//
|
//
|
||||||
// Example:
|
// Example:
|
||||||
// expression method('PUT', 'POST')
|
//
|
||||||
|
// expression method('PUT', 'POST')
|
||||||
func (MatchMethod) CELLibrary(_ caddy.Context) (cel.Library, error) {
|
func (MatchMethod) CELLibrary(_ caddy.Context) (cel.Library, error) {
|
||||||
return CELMatcherImpl(
|
return CELMatcherImpl(
|
||||||
"method",
|
"method",
|
||||||
"method_request_list",
|
"method_request_list",
|
||||||
[]*exprpb.Type{CelTypeListString},
|
[]*cel.Type{cel.ListType(cel.StringType)},
|
||||||
func(data ref.Val) (RequestMatcher, error) {
|
func(data ref.Val) (RequestMatcher, error) {
|
||||||
refStringList := reflect.TypeOf([]string{})
|
refStringList := reflect.TypeOf([]string{})
|
||||||
strList, err := data.ConvertToNative(refStringList)
|
strList, err := data.ConvertToNative(refStringList)
|
||||||
|
@ -609,11 +774,11 @@ func (m *MatchQuery) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
|
||||||
if query == "" {
|
if query == "" {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
parts := strings.SplitN(query, "=", 2)
|
before, after, found := strings.Cut(query, "=")
|
||||||
if len(parts) != 2 {
|
if !found {
|
||||||
return d.Errf("malformed query matcher token: %s; must be in param=val format", d.Val())
|
return d.Errf("malformed query matcher token: %s; must be in param=val format", d.Val())
|
||||||
}
|
}
|
||||||
url.Values(*m).Add(parts[0], parts[1])
|
url.Values(*m).Add(before, after)
|
||||||
}
|
}
|
||||||
if d.NextBlock(0) {
|
if d.NextBlock(0) {
|
||||||
return d.Err("malformed query matcher: blocks are not supported")
|
return d.Err("malformed query matcher: blocks are not supported")
|
||||||
|
@ -625,9 +790,25 @@ func (m *MatchQuery) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
|
||||||
// Match returns true if r matches m. An empty m matches an empty query string.
|
// Match returns true if r matches m. An empty m matches an empty query string.
|
||||||
func (m MatchQuery) Match(r *http.Request) bool {
|
func (m MatchQuery) Match(r *http.Request) bool {
|
||||||
repl := r.Context().Value(caddy.ReplacerCtxKey).(*caddy.Replacer)
|
repl := r.Context().Value(caddy.ReplacerCtxKey).(*caddy.Replacer)
|
||||||
|
|
||||||
|
// parse query string just once, for efficiency
|
||||||
|
parsed, err := url.ParseQuery(r.URL.RawQuery)
|
||||||
|
if err != nil {
|
||||||
|
// Illegal query string. Likely bad escape sequence or unescaped literals.
|
||||||
|
// Note that semicolons in query string have a controversial history. Summaries:
|
||||||
|
// - https://github.com/golang/go/issues/50034
|
||||||
|
// - https://github.com/golang/go/issues/25192
|
||||||
|
// Despite the URL WHATWG spec mandating the use of & separators for query strings,
|
||||||
|
// every URL parser implementation is different, and Filippo Valsorda rightly wrote:
|
||||||
|
// "Relying on parser alignment for security is doomed." Overall conclusion is that
|
||||||
|
// splitting on & and rejecting ; in key=value pairs is safer than accepting raw ;.
|
||||||
|
// We regard the Go team's decision as sound and thus reject malformed query strings.
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
for param, vals := range m {
|
for param, vals := range m {
|
||||||
param = repl.ReplaceAll(param, "")
|
param = repl.ReplaceAll(param, "")
|
||||||
paramVal, found := r.URL.Query()[param]
|
paramVal, found := parsed[param]
|
||||||
if found {
|
if found {
|
||||||
for _, v := range vals {
|
for _, v := range vals {
|
||||||
v = repl.ReplaceAll(v, "")
|
v = repl.ReplaceAll(v, "")
|
||||||
|
@ -644,12 +825,13 @@ func (m MatchQuery) Match(r *http.Request) bool {
|
||||||
// expression matchers.
|
// expression matchers.
|
||||||
//
|
//
|
||||||
// Example:
|
// Example:
|
||||||
// expression query({'sort': 'asc'}) || query({'foo': ['*bar*', 'baz']})
|
//
|
||||||
|
// expression query({'sort': 'asc'}) || query({'foo': ['*bar*', 'baz']})
|
||||||
func (MatchQuery) CELLibrary(_ caddy.Context) (cel.Library, error) {
|
func (MatchQuery) CELLibrary(_ caddy.Context) (cel.Library, error) {
|
||||||
return CELMatcherImpl(
|
return CELMatcherImpl(
|
||||||
"query",
|
"query",
|
||||||
"query_matcher_request_map",
|
"query_matcher_request_map",
|
||||||
[]*exprpb.Type{CelTypeJson},
|
[]*cel.Type{CELTypeJSON},
|
||||||
func(data ref.Val) (RequestMatcher, error) {
|
func(data ref.Val) (RequestMatcher, error) {
|
||||||
mapStrListStr, err := CELValueToMapStrList(data)
|
mapStrListStr, err := CELValueToMapStrList(data)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -719,13 +901,14 @@ func (m MatchHeader) Match(r *http.Request) bool {
|
||||||
// expression matchers.
|
// expression matchers.
|
||||||
//
|
//
|
||||||
// Example:
|
// Example:
|
||||||
// expression header({'content-type': 'image/png'})
|
//
|
||||||
// expression header({'foo': ['bar', 'baz']}) // match bar or baz
|
// expression header({'content-type': 'image/png'})
|
||||||
|
// expression header({'foo': ['bar', 'baz']}) // match bar or baz
|
||||||
func (MatchHeader) CELLibrary(_ caddy.Context) (cel.Library, error) {
|
func (MatchHeader) CELLibrary(_ caddy.Context) (cel.Library, error) {
|
||||||
return CELMatcherImpl(
|
return CELMatcherImpl(
|
||||||
"header",
|
"header",
|
||||||
"header_matcher_request_map",
|
"header_matcher_request_map",
|
||||||
[]*exprpb.Type{CelTypeJson},
|
[]*cel.Type{CELTypeJSON},
|
||||||
func(data ref.Val) (RequestMatcher, error) {
|
func(data ref.Val) (RequestMatcher, error) {
|
||||||
mapStrListStr, err := CELValueToMapStrList(data)
|
mapStrListStr, err := CELValueToMapStrList(data)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -822,6 +1005,12 @@ func (m *MatchHeaderRE) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
|
||||||
val = second
|
val = second
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// If there's already a pattern for this field
|
||||||
|
// then we would end up overwriting the old one
|
||||||
|
if (*m)[field] != nil {
|
||||||
|
return d.Errf("header_regexp matcher can only be used once per named matcher, per header field: %s", field)
|
||||||
|
}
|
||||||
|
|
||||||
(*m)[field] = &MatchRegexp{Pattern: val, Name: name}
|
(*m)[field] = &MatchRegexp{Pattern: val, Name: name}
|
||||||
|
|
||||||
if d.NextBlock(0) {
|
if d.NextBlock(0) {
|
||||||
|
@ -877,12 +1066,13 @@ func (m MatchHeaderRE) Validate() error {
|
||||||
// expression matchers.
|
// expression matchers.
|
||||||
//
|
//
|
||||||
// Example:
|
// Example:
|
||||||
// expression header_regexp('foo', 'Field', 'fo+')
|
//
|
||||||
|
// expression header_regexp('foo', 'Field', 'fo+')
|
||||||
func (MatchHeaderRE) CELLibrary(ctx caddy.Context) (cel.Library, error) {
|
func (MatchHeaderRE) CELLibrary(ctx caddy.Context) (cel.Library, error) {
|
||||||
unnamedPattern, err := CELMatcherImpl(
|
unnamedPattern, err := CELMatcherImpl(
|
||||||
"header_regexp",
|
"header_regexp",
|
||||||
"header_regexp_request_string_string",
|
"header_regexp_request_string_string",
|
||||||
[]*exprpb.Type{decls.String, decls.String},
|
[]*cel.Type{cel.StringType, cel.StringType},
|
||||||
func(data ref.Val) (RequestMatcher, error) {
|
func(data ref.Val) (RequestMatcher, error) {
|
||||||
refStringList := reflect.TypeOf([]string{})
|
refStringList := reflect.TypeOf([]string{})
|
||||||
params, err := data.ConvertToNative(refStringList)
|
params, err := data.ConvertToNative(refStringList)
|
||||||
|
@ -902,7 +1092,7 @@ func (MatchHeaderRE) CELLibrary(ctx caddy.Context) (cel.Library, error) {
|
||||||
namedPattern, err := CELMatcherImpl(
|
namedPattern, err := CELMatcherImpl(
|
||||||
"header_regexp",
|
"header_regexp",
|
||||||
"header_regexp_request_string_string_string",
|
"header_regexp_request_string_string_string",
|
||||||
[]*exprpb.Type{decls.String, decls.String, decls.String},
|
[]*cel.Type{cel.StringType, cel.StringType, cel.StringType},
|
||||||
func(data ref.Val) (RequestMatcher, error) {
|
func(data ref.Val) (RequestMatcher, error) {
|
||||||
refStringList := reflect.TypeOf([]string{})
|
refStringList := reflect.TypeOf([]string{})
|
||||||
params, err := data.ConvertToNative(refStringList)
|
params, err := data.ConvertToNative(refStringList)
|
||||||
|
@ -941,6 +1131,22 @@ func (m MatchProtocol) Match(r *http.Request) bool {
|
||||||
return r.TLS != nil
|
return r.TLS != nil
|
||||||
case "http":
|
case "http":
|
||||||
return r.TLS == nil
|
return r.TLS == nil
|
||||||
|
case "http/1.0":
|
||||||
|
return r.ProtoMajor == 1 && r.ProtoMinor == 0
|
||||||
|
case "http/1.0+":
|
||||||
|
return r.ProtoAtLeast(1, 0)
|
||||||
|
case "http/1.1":
|
||||||
|
return r.ProtoMajor == 1 && r.ProtoMinor == 1
|
||||||
|
case "http/1.1+":
|
||||||
|
return r.ProtoAtLeast(1, 1)
|
||||||
|
case "http/2":
|
||||||
|
return r.ProtoMajor == 2
|
||||||
|
case "http/2+":
|
||||||
|
return r.ProtoAtLeast(2, 0)
|
||||||
|
case "http/3":
|
||||||
|
return r.ProtoMajor == 3
|
||||||
|
case "http/3+":
|
||||||
|
return r.ProtoAtLeast(3, 0)
|
||||||
}
|
}
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
@ -961,12 +1167,13 @@ func (m *MatchProtocol) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
|
||||||
// expression matchers.
|
// expression matchers.
|
||||||
//
|
//
|
||||||
// Example:
|
// Example:
|
||||||
// expression protocol('https')
|
//
|
||||||
|
// expression protocol('https')
|
||||||
func (MatchProtocol) CELLibrary(_ caddy.Context) (cel.Library, error) {
|
func (MatchProtocol) CELLibrary(_ caddy.Context) (cel.Library, error) {
|
||||||
return CELMatcherImpl(
|
return CELMatcherImpl(
|
||||||
"protocol",
|
"protocol",
|
||||||
"protocol_request_string",
|
"protocol_request_string",
|
||||||
[]*exprpb.Type{decls.String},
|
[]*cel.Type{cel.StringType},
|
||||||
func(data ref.Val) (RequestMatcher, error) {
|
func(data ref.Val) (RequestMatcher, error) {
|
||||||
protocolStr, ok := data.(types.String)
|
protocolStr, ok := data.(types.String)
|
||||||
if !ok {
|
if !ok {
|
||||||
|
@ -987,57 +1194,12 @@ func (MatchNot) CaddyModule() caddy.ModuleInfo {
|
||||||
|
|
||||||
// UnmarshalCaddyfile implements caddyfile.Unmarshaler.
|
// UnmarshalCaddyfile implements caddyfile.Unmarshaler.
|
||||||
func (m *MatchNot) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
|
func (m *MatchNot) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
|
||||||
// first, unmarshal each matcher in the set from its tokens
|
|
||||||
type matcherPair struct {
|
|
||||||
raw caddy.ModuleMap
|
|
||||||
decoded MatcherSet
|
|
||||||
}
|
|
||||||
for d.Next() {
|
for d.Next() {
|
||||||
var mp matcherPair
|
matcherSet, err := ParseCaddyfileNestedMatcherSet(d)
|
||||||
matcherMap := make(map[string]RequestMatcher)
|
if err != nil {
|
||||||
|
return err
|
||||||
// in case there are multiple instances of the same matcher, concatenate
|
|
||||||
// their tokens (we expect that UnmarshalCaddyfile should be able to
|
|
||||||
// handle more than one segment); otherwise, we'd overwrite other
|
|
||||||
// instances of the matcher in this set
|
|
||||||
tokensByMatcherName := make(map[string][]caddyfile.Token)
|
|
||||||
for nesting := d.Nesting(); d.NextArg() || d.NextBlock(nesting); {
|
|
||||||
matcherName := d.Val()
|
|
||||||
tokensByMatcherName[matcherName] = append(tokensByMatcherName[matcherName], d.NextSegment()...)
|
|
||||||
}
|
}
|
||||||
for matcherName, tokens := range tokensByMatcherName {
|
m.MatcherSetsRaw = append(m.MatcherSetsRaw, matcherSet)
|
||||||
mod, err := caddy.GetModule("http.matchers." + matcherName)
|
|
||||||
if err != nil {
|
|
||||||
return d.Errf("getting matcher module '%s': %v", matcherName, err)
|
|
||||||
}
|
|
||||||
unm, ok := mod.New().(caddyfile.Unmarshaler)
|
|
||||||
if !ok {
|
|
||||||
return d.Errf("matcher module '%s' is not a Caddyfile unmarshaler", matcherName)
|
|
||||||
}
|
|
||||||
err = unm.UnmarshalCaddyfile(caddyfile.NewDispenser(tokens))
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
rm, ok := unm.(RequestMatcher)
|
|
||||||
if !ok {
|
|
||||||
return fmt.Errorf("matcher module '%s' is not a request matcher", matcherName)
|
|
||||||
}
|
|
||||||
matcherMap[matcherName] = rm
|
|
||||||
mp.decoded = append(mp.decoded, rm)
|
|
||||||
}
|
|
||||||
|
|
||||||
// we should now have a functional 'not' matcher, but we also
|
|
||||||
// need to be able to marshal as JSON, otherwise config
|
|
||||||
// adaptation will be missing the matchers!
|
|
||||||
mp.raw = make(caddy.ModuleMap)
|
|
||||||
for name, matcher := range matcherMap {
|
|
||||||
jsonBytes, err := json.Marshal(matcher)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("marshaling %T matcher: %v", matcher, err)
|
|
||||||
}
|
|
||||||
mp.raw[name] = jsonBytes
|
|
||||||
}
|
|
||||||
m.MatcherSetsRaw = append(m.MatcherSetsRaw, mp.raw)
|
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -1060,7 +1222,7 @@ func (m *MatchNot) Provision(ctx caddy.Context) error {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("loading matcher sets: %v", err)
|
return fmt.Errorf("loading matcher sets: %v", err)
|
||||||
}
|
}
|
||||||
for _, modMap := range matcherSets.([]map[string]interface{}) {
|
for _, modMap := range matcherSets.([]map[string]any) {
|
||||||
var ms MatcherSet
|
var ms MatcherSet
|
||||||
for _, modIface := range modMap {
|
for _, modIface := range modMap {
|
||||||
ms = append(ms, modIface.(RequestMatcher))
|
ms = append(ms, modIface.(RequestMatcher))
|
||||||
|
@ -1125,7 +1287,8 @@ func (m *MatchRemoteIP) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
|
||||||
// expression matchers.
|
// expression matchers.
|
||||||
//
|
//
|
||||||
// Example:
|
// Example:
|
||||||
// expression remote_ip('forwarded', '192.168.0.0/16', '172.16.0.0/12', '10.0.0.0/8')
|
//
|
||||||
|
// expression remote_ip('forwarded', '192.168.0.0/16', '172.16.0.0/12', '10.0.0.0/8')
|
||||||
func (MatchRemoteIP) CELLibrary(ctx caddy.Context) (cel.Library, error) {
|
func (MatchRemoteIP) CELLibrary(ctx caddy.Context) (cel.Library, error) {
|
||||||
return CELMatcherImpl(
|
return CELMatcherImpl(
|
||||||
// name of the macro, this is the function name that users see when writing expressions.
|
// name of the macro, this is the function name that users see when writing expressions.
|
||||||
|
@ -1133,7 +1296,7 @@ func (MatchRemoteIP) CELLibrary(ctx caddy.Context) (cel.Library, error) {
|
||||||
// name of the function that the macro will be rewritten to call.
|
// name of the function that the macro will be rewritten to call.
|
||||||
"remote_ip_match_request_list",
|
"remote_ip_match_request_list",
|
||||||
// internal data type of the MatchPath value.
|
// internal data type of the MatchPath value.
|
||||||
[]*exprpb.Type{CelTypeListString},
|
[]*cel.Type{cel.ListType(cel.StringType)},
|
||||||
// function to convert a constant list of strings to a MatchPath instance.
|
// function to convert a constant list of strings to a MatchPath instance.
|
||||||
func(data ref.Val) (RequestMatcher, error) {
|
func(data ref.Val) (RequestMatcher, error) {
|
||||||
refStringList := reflect.TypeOf([]string{})
|
refStringList := reflect.TypeOf([]string{})
|
||||||
|
@ -1175,27 +1338,24 @@ func (m *MatchRemoteIP) Provision(ctx caddy.Context) error {
|
||||||
m.zones = append(m.zones, "")
|
m.zones = append(m.zones, "")
|
||||||
}
|
}
|
||||||
if strings.Contains(str, "/") {
|
if strings.Contains(str, "/") {
|
||||||
_, ipNet, err := net.ParseCIDR(str)
|
ipNet, err := netip.ParsePrefix(str)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("parsing CIDR expression '%s': %v", str, err)
|
return fmt.Errorf("parsing CIDR expression '%s': %v", str, err)
|
||||||
}
|
}
|
||||||
m.cidrs = append(m.cidrs, ipNet)
|
m.cidrs = append(m.cidrs, &ipNet)
|
||||||
} else {
|
} else {
|
||||||
ip := net.ParseIP(str)
|
ipAddr, err := netip.ParseAddr(str)
|
||||||
if ip == nil {
|
if err != nil {
|
||||||
return fmt.Errorf("invalid IP address: %s", str)
|
return fmt.Errorf("invalid IP address: '%s': %v", str, err)
|
||||||
}
|
}
|
||||||
mask := len(ip) * 8
|
ipNew := netip.PrefixFrom(ipAddr, ipAddr.BitLen())
|
||||||
m.cidrs = append(m.cidrs, &net.IPNet{
|
m.cidrs = append(m.cidrs, &ipNew)
|
||||||
IP: ip,
|
|
||||||
Mask: net.CIDRMask(mask, mask),
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m MatchRemoteIP) getClientIP(r *http.Request) (net.IP, string, error) {
|
func (m MatchRemoteIP) getClientIP(r *http.Request) (netip.Addr, string, error) {
|
||||||
remote := r.RemoteAddr
|
remote := r.RemoteAddr
|
||||||
zoneID := ""
|
zoneID := ""
|
||||||
if m.Forwarded {
|
if m.Forwarded {
|
||||||
|
@ -1214,11 +1374,11 @@ func (m MatchRemoteIP) getClientIP(r *http.Request) (net.IP, string, error) {
|
||||||
ipStr = split[0]
|
ipStr = split[0]
|
||||||
zoneID = split[1]
|
zoneID = split[1]
|
||||||
}
|
}
|
||||||
ip := net.ParseIP(ipStr)
|
ipAddr, err := netip.ParseAddr(ipStr)
|
||||||
if ip == nil {
|
if err != nil {
|
||||||
return nil, zoneID, fmt.Errorf("invalid client IP address: %s", ipStr)
|
return netip.IPv4Unspecified(), "", err
|
||||||
}
|
}
|
||||||
return ip, zoneID, nil
|
return ipAddr, zoneID, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Match returns true if r matches m.
|
// Match returns true if r matches m.
|
||||||
|
@ -1319,6 +1479,13 @@ func (mre *MatchRegexp) Match(input string, repl *caddy.Replacer) bool {
|
||||||
// UnmarshalCaddyfile implements caddyfile.Unmarshaler.
|
// UnmarshalCaddyfile implements caddyfile.Unmarshaler.
|
||||||
func (mre *MatchRegexp) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
|
func (mre *MatchRegexp) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
|
||||||
for d.Next() {
|
for d.Next() {
|
||||||
|
// If this is the second iteration of the loop
|
||||||
|
// then there's more than one path_regexp matcher
|
||||||
|
// and we would end up overwriting the old one
|
||||||
|
if mre.Pattern != "" {
|
||||||
|
return d.Err("regular expression can only be used once per named matcher")
|
||||||
|
}
|
||||||
|
|
||||||
args := d.RemainingArgs()
|
args := d.RemainingArgs()
|
||||||
switch len(args) {
|
switch len(args) {
|
||||||
case 1:
|
case 1:
|
||||||
|
@ -1336,6 +1503,56 @@ func (mre *MatchRegexp) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ParseCaddyfileNestedMatcher parses the Caddyfile tokens for a nested
|
||||||
|
// matcher set, and returns its raw module map value.
|
||||||
|
func ParseCaddyfileNestedMatcherSet(d *caddyfile.Dispenser) (caddy.ModuleMap, error) {
|
||||||
|
matcherMap := make(map[string]RequestMatcher)
|
||||||
|
|
||||||
|
// in case there are multiple instances of the same matcher, concatenate
|
||||||
|
// their tokens (we expect that UnmarshalCaddyfile should be able to
|
||||||
|
// handle more than one segment); otherwise, we'd overwrite other
|
||||||
|
// instances of the matcher in this set
|
||||||
|
tokensByMatcherName := make(map[string][]caddyfile.Token)
|
||||||
|
for nesting := d.Nesting(); d.NextArg() || d.NextBlock(nesting); {
|
||||||
|
matcherName := d.Val()
|
||||||
|
tokensByMatcherName[matcherName] = append(tokensByMatcherName[matcherName], d.NextSegment()...)
|
||||||
|
}
|
||||||
|
|
||||||
|
for matcherName, tokens := range tokensByMatcherName {
|
||||||
|
mod, err := caddy.GetModule("http.matchers." + matcherName)
|
||||||
|
if err != nil {
|
||||||
|
return nil, d.Errf("getting matcher module '%s': %v", matcherName, err)
|
||||||
|
}
|
||||||
|
unm, ok := mod.New().(caddyfile.Unmarshaler)
|
||||||
|
if !ok {
|
||||||
|
return nil, d.Errf("matcher module '%s' is not a Caddyfile unmarshaler", matcherName)
|
||||||
|
}
|
||||||
|
err = unm.UnmarshalCaddyfile(caddyfile.NewDispenser(tokens))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
rm, ok := unm.(RequestMatcher)
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("matcher module '%s' is not a request matcher", matcherName)
|
||||||
|
}
|
||||||
|
matcherMap[matcherName] = rm
|
||||||
|
}
|
||||||
|
|
||||||
|
// we should now have a functional matcher, but we also
|
||||||
|
// need to be able to marshal as JSON, otherwise config
|
||||||
|
// adaptation will be missing the matchers!
|
||||||
|
matcherSet := make(caddy.ModuleMap)
|
||||||
|
for name, matcher := range matcherMap {
|
||||||
|
jsonBytes, err := json.Marshal(matcher)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("marshaling %T matcher: %v", matcher, err)
|
||||||
|
}
|
||||||
|
matcherSet[name] = jsonBytes
|
||||||
|
}
|
||||||
|
|
||||||
|
return matcherSet, nil
|
||||||
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
wordRE = regexp.MustCompile(`\w+`)
|
wordRE = regexp.MustCompile(`\w+`)
|
||||||
)
|
)
|
||||||
|
|
|
@ -158,9 +158,10 @@ func TestHostMatcher(t *testing.T) {
|
||||||
|
|
||||||
func TestPathMatcher(t *testing.T) {
|
func TestPathMatcher(t *testing.T) {
|
||||||
for i, tc := range []struct {
|
for i, tc := range []struct {
|
||||||
match MatchPath
|
match MatchPath // not URI-encoded because not parsing from a URI
|
||||||
input string
|
input string // should be valid URI encoding (escaped) since it will become part of a request
|
||||||
expect bool
|
expect bool
|
||||||
|
provisionErr bool
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
match: MatchPath{},
|
match: MatchPath{},
|
||||||
|
@ -252,6 +253,11 @@ func TestPathMatcher(t *testing.T) {
|
||||||
input: "/FOOOO",
|
input: "/FOOOO",
|
||||||
expect: true,
|
expect: true,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
match: MatchPath{"*.php"},
|
||||||
|
input: "/foo/index.php. .",
|
||||||
|
expect: true,
|
||||||
|
},
|
||||||
{
|
{
|
||||||
match: MatchPath{"/foo/bar.txt"},
|
match: MatchPath{"/foo/bar.txt"},
|
||||||
input: "/foo/BAR.txt",
|
input: "/foo/BAR.txt",
|
||||||
|
@ -263,10 +269,60 @@ func TestPathMatcher(t *testing.T) {
|
||||||
expect: true,
|
expect: true,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
match: MatchPath{"/foo*"},
|
match: MatchPath{"/foo"},
|
||||||
input: "//foo",
|
input: "//foo",
|
||||||
expect: true,
|
expect: true,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
match: MatchPath{"//foo"},
|
||||||
|
input: "/foo",
|
||||||
|
expect: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
match: MatchPath{"//foo"},
|
||||||
|
input: "//foo",
|
||||||
|
expect: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
match: MatchPath{"/foo//*"},
|
||||||
|
input: "/foo//bar",
|
||||||
|
expect: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
match: MatchPath{"/foo//*"},
|
||||||
|
input: "/foo/%2Fbar",
|
||||||
|
expect: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
match: MatchPath{"/foo/%2F*"},
|
||||||
|
input: "/foo/%2Fbar",
|
||||||
|
expect: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
match: MatchPath{"/foo/%2F*"},
|
||||||
|
input: "/foo//bar",
|
||||||
|
expect: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
match: MatchPath{"/foo//bar"},
|
||||||
|
input: "/foo//bar",
|
||||||
|
expect: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
match: MatchPath{"/foo/*//bar"},
|
||||||
|
input: "/foo///bar",
|
||||||
|
expect: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
match: MatchPath{"/foo/%*//bar"},
|
||||||
|
input: "/foo///bar",
|
||||||
|
expect: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
match: MatchPath{"/foo/%*//bar"},
|
||||||
|
input: "/foo//%2Fbar",
|
||||||
|
expect: true,
|
||||||
|
},
|
||||||
{
|
{
|
||||||
match: MatchPath{"/foo*"},
|
match: MatchPath{"/foo*"},
|
||||||
input: "/%2F/foo",
|
input: "/%2F/foo",
|
||||||
|
@ -292,8 +348,79 @@ func TestPathMatcher(t *testing.T) {
|
||||||
input: "/foo/bar",
|
input: "/foo/bar",
|
||||||
expect: true,
|
expect: true,
|
||||||
},
|
},
|
||||||
|
// notice these next three test cases are the same normalized path but are written differently
|
||||||
|
{
|
||||||
|
match: MatchPath{"/%25@.txt"},
|
||||||
|
input: "/%25@.txt",
|
||||||
|
expect: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
match: MatchPath{"/%25@.txt"},
|
||||||
|
input: "/%25%40.txt",
|
||||||
|
expect: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
match: MatchPath{"/%25%40.txt"},
|
||||||
|
input: "/%25%40.txt",
|
||||||
|
expect: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
match: MatchPath{"/bands/*/*"},
|
||||||
|
input: "/bands/AC%2FDC/T.N.T",
|
||||||
|
expect: false, // because * operates in normalized space
|
||||||
|
},
|
||||||
|
{
|
||||||
|
match: MatchPath{"/bands/%*/%*"},
|
||||||
|
input: "/bands/AC%2FDC/T.N.T",
|
||||||
|
expect: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
match: MatchPath{"/bands/%*/%*"},
|
||||||
|
input: "/bands/AC/DC/T.N.T",
|
||||||
|
expect: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
match: MatchPath{"/bands/%*"},
|
||||||
|
input: "/bands/AC/DC",
|
||||||
|
expect: false, // not a suffix match
|
||||||
|
},
|
||||||
|
{
|
||||||
|
match: MatchPath{"/bands/%*"},
|
||||||
|
input: "/bands/AC%2FDC",
|
||||||
|
expect: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
match: MatchPath{"/foo%2fbar/baz"},
|
||||||
|
input: "/foo%2Fbar/baz",
|
||||||
|
expect: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
match: MatchPath{"/foo%2fbar/baz"},
|
||||||
|
input: "/foo/bar/baz",
|
||||||
|
expect: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
match: MatchPath{"/foo/bar/baz"},
|
||||||
|
input: "/foo%2fbar/baz",
|
||||||
|
expect: true,
|
||||||
|
},
|
||||||
} {
|
} {
|
||||||
req := &http.Request{URL: &url.URL{Path: tc.input}}
|
err := tc.match.Provision(caddy.Context{})
|
||||||
|
if err == nil && tc.provisionErr {
|
||||||
|
t.Errorf("Test %d %v: Expected error provisioning, but there was no error", i, tc.match)
|
||||||
|
}
|
||||||
|
if err != nil && !tc.provisionErr {
|
||||||
|
t.Errorf("Test %d %v: Expected no error provisioning, but there was an error: %v", i, tc.match, err)
|
||||||
|
}
|
||||||
|
if tc.provisionErr {
|
||||||
|
continue // if it's not supposed to provision properly, pointless to test it
|
||||||
|
}
|
||||||
|
|
||||||
|
u, err := url.ParseRequestURI(tc.input)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Test %d (%v): Invalid request URI (should be rejected by Go's HTTP server): %v", i, tc.input, err)
|
||||||
|
}
|
||||||
|
req := &http.Request{URL: u}
|
||||||
repl := caddy.NewReplacer()
|
repl := caddy.NewReplacer()
|
||||||
ctx := context.WithValue(req.Context(), caddy.ReplacerCtxKey, repl)
|
ctx := context.WithValue(req.Context(), caddy.ReplacerCtxKey, repl)
|
||||||
req = req.WithContext(ctx)
|
req = req.WithContext(ctx)
|
||||||
|
@ -387,6 +514,16 @@ func TestPathREMatcher(t *testing.T) {
|
||||||
expect: true,
|
expect: true,
|
||||||
expectRepl: map[string]string{"name.myparam": "bar"},
|
expectRepl: map[string]string{"name.myparam": "bar"},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
match: MatchPathRE{MatchRegexp{Pattern: "^/%@.txt"}},
|
||||||
|
input: "/%25@.txt",
|
||||||
|
expect: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
match: MatchPathRE{MatchRegexp{Pattern: "^/%25@.txt"}},
|
||||||
|
input: "/%25@.txt",
|
||||||
|
expect: false,
|
||||||
|
},
|
||||||
} {
|
} {
|
||||||
// compile the regexp and validate its name
|
// compile the regexp and validate its name
|
||||||
err := tc.match.Provision(caddy.Context{})
|
err := tc.match.Provision(caddy.Context{})
|
||||||
|
@ -401,7 +538,11 @@ func TestPathREMatcher(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// set up the fake request and its Replacer
|
// set up the fake request and its Replacer
|
||||||
req := &http.Request{URL: &url.URL{Path: tc.input}}
|
u, err := url.ParseRequestURI(tc.input)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Test %d: Bad input URI: %v", i, err)
|
||||||
|
}
|
||||||
|
req := &http.Request{URL: u}
|
||||||
repl := caddy.NewReplacer()
|
repl := caddy.NewReplacer()
|
||||||
ctx := context.WithValue(req.Context(), caddy.ReplacerCtxKey, repl)
|
ctx := context.WithValue(req.Context(), caddy.ReplacerCtxKey, repl)
|
||||||
req = req.WithContext(ctx)
|
req = req.WithContext(ctx)
|
||||||
|
@ -807,7 +948,7 @@ func TestVarREMatcher(t *testing.T) {
|
||||||
req := &http.Request{URL: new(url.URL), Method: http.MethodGet}
|
req := &http.Request{URL: new(url.URL), Method: http.MethodGet}
|
||||||
repl := caddy.NewReplacer()
|
repl := caddy.NewReplacer()
|
||||||
ctx := context.WithValue(req.Context(), caddy.ReplacerCtxKey, repl)
|
ctx := context.WithValue(req.Context(), caddy.ReplacerCtxKey, repl)
|
||||||
ctx = context.WithValue(ctx, VarsCtxKey, make(map[string]interface{}))
|
ctx = context.WithValue(ctx, VarsCtxKey, make(map[string]any))
|
||||||
req = req.WithContext(ctx)
|
req = req.WithContext(ctx)
|
||||||
|
|
||||||
addHTTPVarsToReplacer(repl, req, httptest.NewRecorder())
|
addHTTPVarsToReplacer(repl, req, httptest.NewRecorder())
|
||||||
|
|
|
@ -29,10 +29,24 @@ func init() {
|
||||||
caddy.RegisterModule(Handler{})
|
caddy.RegisterModule(Handler{})
|
||||||
}
|
}
|
||||||
|
|
||||||
// Handler is a middleware for manipulating the request body.
|
// Handler is a middleware for HTTP/2 server push. Note that
|
||||||
|
// HTTP/2 server push has been deprecated by some clients and
|
||||||
|
// its use is discouraged unless you can accurately predict
|
||||||
|
// which resources actually need to be pushed to the client;
|
||||||
|
// it can be difficult to know what the client already has
|
||||||
|
// cached. Pushing unnecessary resources results in worse
|
||||||
|
// performance. Consider using HTTP 103 Early Hints instead.
|
||||||
|
//
|
||||||
|
// This handler supports pushing from Link headers; in other
|
||||||
|
// words, if the eventual response has Link headers, this
|
||||||
|
// handler will push the resources indicated by those headers,
|
||||||
|
// even without specifying any resources in its config.
|
||||||
type Handler struct {
|
type Handler struct {
|
||||||
Resources []Resource `json:"resources,omitempty"`
|
// The resources to push.
|
||||||
Headers *HeaderConfig `json:"headers,omitempty"`
|
Resources []Resource `json:"resources,omitempty"`
|
||||||
|
|
||||||
|
// Headers to modify for the push requests.
|
||||||
|
Headers *HeaderConfig `json:"headers,omitempty"`
|
||||||
|
|
||||||
logger *zap.Logger
|
logger *zap.Logger
|
||||||
}
|
}
|
||||||
|
|
|
@ -52,17 +52,16 @@ func parseLinkHeader(header string) []linkResource {
|
||||||
l.uri = strings.TrimSpace(link[li+1 : ri])
|
l.uri = strings.TrimSpace(link[li+1 : ri])
|
||||||
|
|
||||||
for _, param := range strings.Split(strings.TrimSpace(link[ri+1:]), semicolon) {
|
for _, param := range strings.Split(strings.TrimSpace(link[ri+1:]), semicolon) {
|
||||||
parts := strings.SplitN(strings.TrimSpace(param), equal, 2)
|
before, after, isCut := strings.Cut(strings.TrimSpace(param), equal)
|
||||||
key := strings.TrimSpace(parts[0])
|
key := strings.TrimSpace(before)
|
||||||
if key == "" {
|
if key == "" {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if len(parts) == 1 {
|
if isCut {
|
||||||
|
l.params[key] = strings.TrimSpace(after)
|
||||||
|
} else {
|
||||||
l.params[key] = key
|
l.params[key] = key
|
||||||
}
|
}
|
||||||
if len(parts) == 2 {
|
|
||||||
l.params[key] = strings.TrimSpace(parts[1])
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
resources = append(resources, l)
|
resources = append(resources, l)
|
||||||
|
|
|
@ -57,7 +57,7 @@ func addHTTPVarsToReplacer(repl *caddy.Replacer, req *http.Request, w http.Respo
|
||||||
SetVar(req.Context(), "start_time", time.Now())
|
SetVar(req.Context(), "start_time", time.Now())
|
||||||
SetVar(req.Context(), "uuid", new(requestID))
|
SetVar(req.Context(), "uuid", new(requestID))
|
||||||
|
|
||||||
httpVars := func(key string) (interface{}, bool) {
|
httpVars := func(key string) (any, bool) {
|
||||||
if req != nil {
|
if req != nil {
|
||||||
// query string parameters
|
// query string parameters
|
||||||
if strings.HasPrefix(key, reqURIQueryReplPrefix) {
|
if strings.HasPrefix(key, reqURIQueryReplPrefix) {
|
||||||
|
@ -143,6 +143,10 @@ func addHTTPVarsToReplacer(repl *caddy.Replacer, req *http.Request, w http.Respo
|
||||||
case "http.request.uri.path.dir":
|
case "http.request.uri.path.dir":
|
||||||
dir, _ := path.Split(req.URL.Path)
|
dir, _ := path.Split(req.URL.Path)
|
||||||
return dir, true
|
return dir, true
|
||||||
|
case "http.request.uri.path.file.base":
|
||||||
|
return strings.TrimSuffix(path.Base(req.URL.Path), path.Ext(req.URL.Path)), true
|
||||||
|
case "http.request.uri.path.file.ext":
|
||||||
|
return path.Ext(req.URL.Path), true
|
||||||
case "http.request.uri.query":
|
case "http.request.uri.query":
|
||||||
return req.URL.RawQuery, true
|
return req.URL.RawQuery, true
|
||||||
case "http.request.duration":
|
case "http.request.duration":
|
||||||
|
@ -169,7 +173,7 @@ func addHTTPVarsToReplacer(repl *caddy.Replacer, req *http.Request, w http.Respo
|
||||||
req.Body = io.NopCloser(buf) // replace real body with buffered data
|
req.Body = io.NopCloser(buf) // replace real body with buffered data
|
||||||
return buf.String(), true
|
return buf.String(), true
|
||||||
|
|
||||||
// original request, before any internal changes
|
// original request, before any internal changes
|
||||||
case "http.request.orig_method":
|
case "http.request.orig_method":
|
||||||
or, _ := req.Context().Value(OriginalRequestCtxKey).(http.Request)
|
or, _ := req.Context().Value(OriginalRequestCtxKey).(http.Request)
|
||||||
return or.Method, true
|
return or.Method, true
|
||||||
|
@ -233,7 +237,7 @@ func addHTTPVarsToReplacer(repl *caddy.Replacer, req *http.Request, w http.Respo
|
||||||
// middleware variables
|
// middleware variables
|
||||||
if strings.HasPrefix(key, varsReplPrefix) {
|
if strings.HasPrefix(key, varsReplPrefix) {
|
||||||
varName := key[len(varsReplPrefix):]
|
varName := key[len(varsReplPrefix):]
|
||||||
tbl := req.Context().Value(VarsCtxKey).(map[string]interface{})
|
tbl := req.Context().Value(VarsCtxKey).(map[string]any)
|
||||||
raw := tbl[varName]
|
raw := tbl[varName]
|
||||||
// variables can be dynamic, so always return true
|
// variables can be dynamic, so always return true
|
||||||
// even when it may not be set; treat as empty then
|
// even when it may not be set; treat as empty then
|
||||||
|
@ -252,13 +256,29 @@ func addHTTPVarsToReplacer(repl *caddy.Replacer, req *http.Request, w http.Respo
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
switch {
|
||||||
|
case key == "http.shutting_down":
|
||||||
|
server := req.Context().Value(ServerCtxKey).(*Server)
|
||||||
|
server.shutdownAtMu.RLock()
|
||||||
|
defer server.shutdownAtMu.RUnlock()
|
||||||
|
return !server.shutdownAt.IsZero(), true
|
||||||
|
case key == "http.time_until_shutdown":
|
||||||
|
server := req.Context().Value(ServerCtxKey).(*Server)
|
||||||
|
server.shutdownAtMu.RLock()
|
||||||
|
defer server.shutdownAtMu.RUnlock()
|
||||||
|
if server.shutdownAt.IsZero() {
|
||||||
|
return nil, true
|
||||||
|
}
|
||||||
|
return time.Until(server.shutdownAt), true
|
||||||
|
}
|
||||||
|
|
||||||
return nil, false
|
return nil, false
|
||||||
}
|
}
|
||||||
|
|
||||||
repl.Map(httpVars)
|
repl.Map(httpVars)
|
||||||
}
|
}
|
||||||
|
|
||||||
func getReqTLSReplacement(req *http.Request, key string) (interface{}, bool) {
|
func getReqTLSReplacement(req *http.Request, key string) (any, bool) {
|
||||||
if req == nil || req.TLS == nil {
|
if req == nil || req.TLS == nil {
|
||||||
return nil, false
|
return nil, false
|
||||||
}
|
}
|
||||||
|
@ -279,7 +299,7 @@ func getReqTLSReplacement(req *http.Request, key string) (interface{}, bool) {
|
||||||
if strings.HasPrefix(field, "client.san.") {
|
if strings.HasPrefix(field, "client.san.") {
|
||||||
field = field[len("client.san."):]
|
field = field[len("client.san."):]
|
||||||
var fieldName string
|
var fieldName string
|
||||||
var fieldValue interface{}
|
var fieldValue any
|
||||||
switch {
|
switch {
|
||||||
case strings.HasPrefix(field, "dns_names"):
|
case strings.HasPrefix(field, "dns_names"):
|
||||||
fieldName = "dns_names"
|
fieldName = "dns_names"
|
||||||
|
@ -383,7 +403,7 @@ func getReqTLSReplacement(req *http.Request, key string) (interface{}, bool) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// marshalPublicKey returns the byte encoding of pubKey.
|
// marshalPublicKey returns the byte encoding of pubKey.
|
||||||
func marshalPublicKey(pubKey interface{}) ([]byte, error) {
|
func marshalPublicKey(pubKey any) ([]byte, error) {
|
||||||
switch key := pubKey.(type) {
|
switch key := pubKey.(type) {
|
||||||
case *rsa.PublicKey:
|
case *rsa.PublicKey:
|
||||||
return asn1.Marshal(key)
|
return asn1.Marshal(key)
|
||||||
|
|
|
@ -27,7 +27,7 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestHTTPVarReplacement(t *testing.T) {
|
func TestHTTPVarReplacement(t *testing.T) {
|
||||||
req, _ := http.NewRequest("GET", "/", nil)
|
req, _ := http.NewRequest(http.MethodGet, "/foo/bar.tar.gz", nil)
|
||||||
repl := caddy.NewReplacer()
|
repl := caddy.NewReplacer()
|
||||||
ctx := context.WithValue(req.Context(), caddy.ReplacerCtxKey, repl)
|
ctx := context.WithValue(req.Context(), caddy.ReplacerCtxKey, repl)
|
||||||
req = req.WithContext(ctx)
|
req = req.WithContext(ctx)
|
||||||
|
@ -72,114 +72,134 @@ eqp31wM9il1n+guTNyxJd+FzVAH+hCZE5K+tCgVDdVFUlDEHHbS/wqb2PSIoouLV
|
||||||
addHTTPVarsToReplacer(repl, req, res)
|
addHTTPVarsToReplacer(repl, req, res)
|
||||||
|
|
||||||
for i, tc := range []struct {
|
for i, tc := range []struct {
|
||||||
input string
|
get string
|
||||||
expect string
|
expect string
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
input: "{http.request.scheme}",
|
get: "http.request.scheme",
|
||||||
expect: "https",
|
expect: "https",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
input: "{http.request.host}",
|
get: "http.request.method",
|
||||||
|
expect: http.MethodGet,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
get: "http.request.host",
|
||||||
expect: "example.com",
|
expect: "example.com",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
input: "{http.request.port}",
|
get: "http.request.port",
|
||||||
expect: "80",
|
expect: "80",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
input: "{http.request.hostport}",
|
get: "http.request.hostport",
|
||||||
expect: "example.com:80",
|
expect: "example.com:80",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
input: "{http.request.remote.host}",
|
get: "http.request.remote.host",
|
||||||
expect: "localhost",
|
expect: "localhost",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
input: "{http.request.remote.port}",
|
get: "http.request.remote.port",
|
||||||
expect: "1234",
|
expect: "1234",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
input: "{http.request.host.labels.0}",
|
get: "http.request.host.labels.0",
|
||||||
expect: "com",
|
expect: "com",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
input: "{http.request.host.labels.1}",
|
get: "http.request.host.labels.1",
|
||||||
expect: "example",
|
expect: "example",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
input: "{http.request.host.labels.2}",
|
get: "http.request.host.labels.2",
|
||||||
expect: "<empty>",
|
expect: "",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
input: "{http.request.tls.cipher_suite}",
|
get: "http.request.uri.path.file",
|
||||||
|
expect: "bar.tar.gz",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
get: "http.request.uri.path.file.base",
|
||||||
|
expect: "bar.tar",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
// not ideal, but also most correct, given that files can have dots (example: index.<SHA>.html) TODO: maybe this isn't right..
|
||||||
|
get: "http.request.uri.path.file.ext",
|
||||||
|
expect: ".gz",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
get: "http.request.tls.cipher_suite",
|
||||||
expect: "TLS_AES_256_GCM_SHA384",
|
expect: "TLS_AES_256_GCM_SHA384",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
input: "{http.request.tls.proto}",
|
get: "http.request.tls.proto",
|
||||||
expect: "h2",
|
expect: "h2",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
input: "{http.request.tls.proto_mutual}",
|
get: "http.request.tls.proto_mutual",
|
||||||
expect: "true",
|
expect: "true",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
input: "{http.request.tls.resumed}",
|
get: "http.request.tls.resumed",
|
||||||
expect: "false",
|
expect: "false",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
input: "{http.request.tls.server_name}",
|
get: "http.request.tls.server_name",
|
||||||
expect: "foo.com",
|
expect: "foo.com",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
input: "{http.request.tls.version}",
|
get: "http.request.tls.version",
|
||||||
expect: "tls1.3",
|
expect: "tls1.3",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
input: "{http.request.tls.client.fingerprint}",
|
get: "http.request.tls.client.fingerprint",
|
||||||
expect: "9f57b7b497cceacc5459b76ac1c3afedbc12b300e728071f55f84168ff0f7702",
|
expect: "9f57b7b497cceacc5459b76ac1c3afedbc12b300e728071f55f84168ff0f7702",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
input: "{http.request.tls.client.issuer}",
|
get: "http.request.tls.client.issuer",
|
||||||
expect: "CN=Caddy Test CA",
|
expect: "CN=Caddy Test CA",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
input: "{http.request.tls.client.serial}",
|
get: "http.request.tls.client.serial",
|
||||||
expect: "2",
|
expect: "2",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
input: "{http.request.tls.client.subject}",
|
get: "http.request.tls.client.subject",
|
||||||
expect: "CN=client.localdomain",
|
expect: "CN=client.localdomain",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
input: "{http.request.tls.client.san.dns_names}",
|
get: "http.request.tls.client.san.dns_names",
|
||||||
expect: "[localhost]",
|
expect: "[localhost]",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
input: "{http.request.tls.client.san.dns_names.0}",
|
get: "http.request.tls.client.san.dns_names.0",
|
||||||
expect: "localhost",
|
expect: "localhost",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
input: "{http.request.tls.client.san.dns_names.1}",
|
get: "http.request.tls.client.san.dns_names.1",
|
||||||
expect: "<empty>",
|
expect: "",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
input: "{http.request.tls.client.san.ips}",
|
get: "http.request.tls.client.san.ips",
|
||||||
expect: "[127.0.0.1]",
|
expect: "[127.0.0.1]",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
input: "{http.request.tls.client.san.ips.0}",
|
get: "http.request.tls.client.san.ips.0",
|
||||||
expect: "127.0.0.1",
|
expect: "127.0.0.1",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
input: "{http.request.tls.client.certificate_pem}",
|
get: "http.request.tls.client.certificate_pem",
|
||||||
expect: string(clientCert) + "\n", // returned value comes with a newline appended to it
|
expect: string(clientCert) + "\n", // returned value comes with a newline appended to it
|
||||||
},
|
},
|
||||||
} {
|
} {
|
||||||
actual := repl.ReplaceAll(tc.input, "<empty>")
|
actual, got := repl.GetString(tc.get)
|
||||||
|
if !got {
|
||||||
|
t.Errorf("Test %d: Expected to recognize the placeholder name, but didn't", i)
|
||||||
|
}
|
||||||
if actual != tc.expect {
|
if actual != tc.expect {
|
||||||
t.Errorf("Test %d: Expected placeholder %s to be '%s' but got '%s'",
|
t.Errorf("Test %d: Expected %s to be '%s' but got '%s'",
|
||||||
i, tc.input, tc.expect, actual)
|
i, tc.get, tc.expect, actual)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -62,6 +62,16 @@ func (rww *ResponseWriterWrapper) Push(target string, opts *http.PushOptions) er
|
||||||
return ErrNotImplemented
|
return ErrNotImplemented
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ReadFrom implements io.ReaderFrom. It simply calls the underlying
|
||||||
|
// ResponseWriter's ReadFrom method if there is one, otherwise it defaults
|
||||||
|
// to io.Copy.
|
||||||
|
func (rww *ResponseWriterWrapper) ReadFrom(r io.Reader) (n int64, err error) {
|
||||||
|
if rf, ok := rww.ResponseWriter.(io.ReaderFrom); ok {
|
||||||
|
return rf.ReadFrom(r)
|
||||||
|
}
|
||||||
|
return io.Copy(rww.ResponseWriter, r)
|
||||||
|
}
|
||||||
|
|
||||||
// HTTPInterfaces mix all the interfaces that middleware ResponseWriters need to support.
|
// HTTPInterfaces mix all the interfaces that middleware ResponseWriters need to support.
|
||||||
type HTTPInterfaces interface {
|
type HTTPInterfaces interface {
|
||||||
http.ResponseWriter
|
http.ResponseWriter
|
||||||
|
@ -111,15 +121,15 @@ type responseRecorder struct {
|
||||||
//
|
//
|
||||||
// Proper usage of a recorder looks like this:
|
// Proper usage of a recorder looks like this:
|
||||||
//
|
//
|
||||||
// rec := caddyhttp.NewResponseRecorder(w, buf, shouldBuffer)
|
// rec := caddyhttp.NewResponseRecorder(w, buf, shouldBuffer)
|
||||||
// err := next.ServeHTTP(rec, req)
|
// err := next.ServeHTTP(rec, req)
|
||||||
// if err != nil {
|
// if err != nil {
|
||||||
// return err
|
// return err
|
||||||
// }
|
// }
|
||||||
// if !rec.Buffered() {
|
// if !rec.Buffered() {
|
||||||
// return nil
|
// return nil
|
||||||
// }
|
// }
|
||||||
// // process the buffered response here
|
// // process the buffered response here
|
||||||
//
|
//
|
||||||
// The header map is not buffered; i.e. the ResponseRecorder's Header()
|
// The header map is not buffered; i.e. the ResponseRecorder's Header()
|
||||||
// method returns the same header map of the underlying ResponseWriter.
|
// method returns the same header map of the underlying ResponseWriter.
|
||||||
|
@ -129,7 +139,7 @@ type responseRecorder struct {
|
||||||
// Once you are ready to write the response, there are two ways you can
|
// Once you are ready to write the response, there are two ways you can
|
||||||
// do it. The easier way is to have the recorder do it:
|
// do it. The easier way is to have the recorder do it:
|
||||||
//
|
//
|
||||||
// rec.WriteResponse()
|
// rec.WriteResponse()
|
||||||
//
|
//
|
||||||
// This writes the recorded response headers as well as the buffered body.
|
// This writes the recorded response headers as well as the buffered body.
|
||||||
// Or, you may wish to do it yourself, especially if you manipulated the
|
// Or, you may wish to do it yourself, especially if you manipulated the
|
||||||
|
@ -138,9 +148,12 @@ type responseRecorder struct {
|
||||||
// recorder's body buffer, but you might have your own body to write
|
// recorder's body buffer, but you might have your own body to write
|
||||||
// instead):
|
// instead):
|
||||||
//
|
//
|
||||||
// w.WriteHeader(rec.Status())
|
// w.WriteHeader(rec.Status())
|
||||||
// io.Copy(w, rec.Buffer())
|
// io.Copy(w, rec.Buffer())
|
||||||
//
|
//
|
||||||
|
// As a special case, 1xx responses are not buffered nor recorded
|
||||||
|
// because they are not the final response; they are passed through
|
||||||
|
// directly to the underlying ResponseWriter.
|
||||||
func NewResponseRecorder(w http.ResponseWriter, buf *bytes.Buffer, shouldBuffer ShouldBufferFunc) ResponseRecorder {
|
func NewResponseRecorder(w http.ResponseWriter, buf *bytes.Buffer, shouldBuffer ShouldBufferFunc) ResponseRecorder {
|
||||||
return &responseRecorder{
|
return &responseRecorder{
|
||||||
ResponseWriterWrapper: &ResponseWriterWrapper{ResponseWriter: w},
|
ResponseWriterWrapper: &ResponseWriterWrapper{ResponseWriter: w},
|
||||||
|
@ -149,22 +162,29 @@ func NewResponseRecorder(w http.ResponseWriter, buf *bytes.Buffer, shouldBuffer
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// WriteHeader writes the headers with statusCode to the wrapped
|
||||||
|
// ResponseWriter unless the response is to be buffered instead.
|
||||||
|
// 1xx responses are never buffered.
|
||||||
func (rr *responseRecorder) WriteHeader(statusCode int) {
|
func (rr *responseRecorder) WriteHeader(statusCode int) {
|
||||||
if rr.wroteHeader {
|
if rr.wroteHeader {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
rr.statusCode = statusCode
|
|
||||||
rr.wroteHeader = true
|
|
||||||
|
|
||||||
// decide whether we should buffer the response
|
// 1xx responses aren't final; just informational
|
||||||
if rr.shouldBuffer == nil {
|
if statusCode < 100 || statusCode > 199 {
|
||||||
rr.stream = true
|
rr.statusCode = statusCode
|
||||||
} else {
|
rr.wroteHeader = true
|
||||||
rr.stream = !rr.shouldBuffer(rr.statusCode, rr.ResponseWriterWrapper.Header())
|
|
||||||
|
// decide whether we should buffer the response
|
||||||
|
if rr.shouldBuffer == nil {
|
||||||
|
rr.stream = true
|
||||||
|
} else {
|
||||||
|
rr.stream = !rr.shouldBuffer(rr.statusCode, rr.ResponseWriterWrapper.Header())
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// if not buffered, immediately write header
|
// if informational or not buffered, immediately write header
|
||||||
if rr.stream {
|
if rr.stream || (100 <= statusCode && statusCode <= 199) {
|
||||||
rr.ResponseWriterWrapper.WriteHeader(rr.statusCode)
|
rr.ResponseWriterWrapper.WriteHeader(rr.statusCode)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -178,9 +198,26 @@ func (rr *responseRecorder) Write(data []byte) (int, error) {
|
||||||
} else {
|
} else {
|
||||||
n, err = rr.buf.Write(data)
|
n, err = rr.buf.Write(data)
|
||||||
}
|
}
|
||||||
if err == nil {
|
|
||||||
rr.size += n
|
rr.size += n
|
||||||
|
return n, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rr *responseRecorder) ReadFrom(r io.Reader) (int64, error) {
|
||||||
|
rr.WriteHeader(http.StatusOK)
|
||||||
|
var n int64
|
||||||
|
var err error
|
||||||
|
if rr.stream {
|
||||||
|
if rf, ok := rr.ResponseWriter.(io.ReaderFrom); ok {
|
||||||
|
n, err = rf.ReadFrom(r)
|
||||||
|
} else {
|
||||||
|
n, err = io.Copy(rr.ResponseWriter, r)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
n, err = rr.buf.ReadFrom(r)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
rr.size += int(n)
|
||||||
return n, err
|
return n, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -241,4 +278,10 @@ type ShouldBufferFunc func(status int, header http.Header) bool
|
||||||
var (
|
var (
|
||||||
_ HTTPInterfaces = (*ResponseWriterWrapper)(nil)
|
_ HTTPInterfaces = (*ResponseWriterWrapper)(nil)
|
||||||
_ ResponseRecorder = (*responseRecorder)(nil)
|
_ ResponseRecorder = (*responseRecorder)(nil)
|
||||||
|
|
||||||
|
// Implementing ReaderFrom can be such a significant
|
||||||
|
// optimization that it should probably be required!
|
||||||
|
// see PR #5022 (25%-50% speedup)
|
||||||
|
_ io.ReaderFrom = (*ResponseWriterWrapper)(nil)
|
||||||
|
_ io.ReaderFrom = (*responseRecorder)(nil)
|
||||||
)
|
)
|
||||||
|
|
165
modules/caddyhttp/responsewriter_test.go
Normal file
165
modules/caddyhttp/responsewriter_test.go
Normal file
|
@ -0,0 +1,165 @@
|
||||||
|
package caddyhttp
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
type responseWriterSpy interface {
|
||||||
|
http.ResponseWriter
|
||||||
|
Written() string
|
||||||
|
CalledReadFrom() bool
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
_ responseWriterSpy = (*baseRespWriter)(nil)
|
||||||
|
_ responseWriterSpy = (*readFromRespWriter)(nil)
|
||||||
|
)
|
||||||
|
|
||||||
|
// a barebones http.ResponseWriter mock
|
||||||
|
type baseRespWriter []byte
|
||||||
|
|
||||||
|
func (brw *baseRespWriter) Write(d []byte) (int, error) {
|
||||||
|
*brw = append(*brw, d...)
|
||||||
|
return len(d), nil
|
||||||
|
}
|
||||||
|
func (brw *baseRespWriter) Header() http.Header { return nil }
|
||||||
|
func (brw *baseRespWriter) WriteHeader(statusCode int) {}
|
||||||
|
func (brw *baseRespWriter) Written() string { return string(*brw) }
|
||||||
|
func (brw *baseRespWriter) CalledReadFrom() bool { return false }
|
||||||
|
|
||||||
|
// an http.ResponseWriter mock that supports ReadFrom
|
||||||
|
type readFromRespWriter struct {
|
||||||
|
baseRespWriter
|
||||||
|
called bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rf *readFromRespWriter) ReadFrom(r io.Reader) (int64, error) {
|
||||||
|
rf.called = true
|
||||||
|
return io.Copy(&rf.baseRespWriter, r)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rf *readFromRespWriter) CalledReadFrom() bool { return rf.called }
|
||||||
|
|
||||||
|
func TestResponseWriterWrapperReadFrom(t *testing.T) {
|
||||||
|
tests := map[string]struct {
|
||||||
|
responseWriter responseWriterSpy
|
||||||
|
wantReadFrom bool
|
||||||
|
}{
|
||||||
|
"no ReadFrom": {
|
||||||
|
responseWriter: &baseRespWriter{},
|
||||||
|
wantReadFrom: false,
|
||||||
|
},
|
||||||
|
"has ReadFrom": {
|
||||||
|
responseWriter: &readFromRespWriter{},
|
||||||
|
wantReadFrom: true,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for name, tt := range tests {
|
||||||
|
t.Run(name, func(t *testing.T) {
|
||||||
|
// what we expect middlewares to do:
|
||||||
|
type myWrapper struct {
|
||||||
|
*ResponseWriterWrapper
|
||||||
|
}
|
||||||
|
|
||||||
|
wrapped := myWrapper{
|
||||||
|
ResponseWriterWrapper: &ResponseWriterWrapper{ResponseWriter: tt.responseWriter},
|
||||||
|
}
|
||||||
|
|
||||||
|
const srcData = "boo!"
|
||||||
|
// hides everything but Read, since strings.Reader implements WriteTo it would
|
||||||
|
// take precedence over our ReadFrom.
|
||||||
|
src := struct{ io.Reader }{strings.NewReader(srcData)}
|
||||||
|
|
||||||
|
fmt.Println(name)
|
||||||
|
if _, err := io.Copy(wrapped, src); err != nil {
|
||||||
|
t.Errorf("Copy() err = %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if got := tt.responseWriter.Written(); got != srcData {
|
||||||
|
t.Errorf("data = %q, want %q", got, srcData)
|
||||||
|
}
|
||||||
|
|
||||||
|
if tt.responseWriter.CalledReadFrom() != tt.wantReadFrom {
|
||||||
|
if tt.wantReadFrom {
|
||||||
|
t.Errorf("ReadFrom() should have been called")
|
||||||
|
} else {
|
||||||
|
t.Errorf("ReadFrom() should not have been called")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestResponseRecorderReadFrom(t *testing.T) {
|
||||||
|
tests := map[string]struct {
|
||||||
|
responseWriter responseWriterSpy
|
||||||
|
shouldBuffer bool
|
||||||
|
wantReadFrom bool
|
||||||
|
}{
|
||||||
|
"buffered plain": {
|
||||||
|
responseWriter: &baseRespWriter{},
|
||||||
|
shouldBuffer: true,
|
||||||
|
wantReadFrom: false,
|
||||||
|
},
|
||||||
|
"streamed plain": {
|
||||||
|
responseWriter: &baseRespWriter{},
|
||||||
|
shouldBuffer: false,
|
||||||
|
wantReadFrom: false,
|
||||||
|
},
|
||||||
|
"buffered ReadFrom": {
|
||||||
|
responseWriter: &readFromRespWriter{},
|
||||||
|
shouldBuffer: true,
|
||||||
|
wantReadFrom: false,
|
||||||
|
},
|
||||||
|
"streamed ReadFrom": {
|
||||||
|
responseWriter: &readFromRespWriter{},
|
||||||
|
shouldBuffer: false,
|
||||||
|
wantReadFrom: true,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for name, tt := range tests {
|
||||||
|
t.Run(name, func(t *testing.T) {
|
||||||
|
var buf bytes.Buffer
|
||||||
|
|
||||||
|
rr := NewResponseRecorder(tt.responseWriter, &buf, func(status int, header http.Header) bool {
|
||||||
|
return tt.shouldBuffer
|
||||||
|
})
|
||||||
|
|
||||||
|
const srcData = "boo!"
|
||||||
|
// hides everything but Read, since strings.Reader implements WriteTo it would
|
||||||
|
// take precedence over our ReadFrom.
|
||||||
|
src := struct{ io.Reader }{strings.NewReader(srcData)}
|
||||||
|
|
||||||
|
if _, err := io.Copy(rr, src); err != nil {
|
||||||
|
t.Errorf("Copy() err = %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
wantStreamed := srcData
|
||||||
|
wantBuffered := ""
|
||||||
|
if tt.shouldBuffer {
|
||||||
|
wantStreamed = ""
|
||||||
|
wantBuffered = srcData
|
||||||
|
}
|
||||||
|
|
||||||
|
if got := tt.responseWriter.Written(); got != wantStreamed {
|
||||||
|
t.Errorf("streamed data = %q, want %q", got, wantStreamed)
|
||||||
|
}
|
||||||
|
if got := buf.String(); got != wantBuffered {
|
||||||
|
t.Errorf("buffered data = %q, want %q", got, wantBuffered)
|
||||||
|
}
|
||||||
|
|
||||||
|
if tt.responseWriter.CalledReadFrom() != tt.wantReadFrom {
|
||||||
|
if tt.wantReadFrom {
|
||||||
|
t.Errorf("ReadFrom() should have been called")
|
||||||
|
} else {
|
||||||
|
t.Errorf("ReadFrom() should not have been called")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
|
@ -80,9 +80,9 @@ func parseUpstreamDialAddress(upstreamAddr string) (string, string, error) {
|
||||||
scheme, host, port = toURL.Scheme, toURL.Hostname(), toURL.Port()
|
scheme, host, port = toURL.Scheme, toURL.Hostname(), toURL.Port()
|
||||||
} else {
|
} else {
|
||||||
// extract network manually, since caddy.ParseNetworkAddress() will always add one
|
// extract network manually, since caddy.ParseNetworkAddress() will always add one
|
||||||
if idx := strings.Index(upstreamAddr, "/"); idx >= 0 {
|
if beforeSlash, afterSlash, slashFound := strings.Cut(upstreamAddr, "/"); slashFound {
|
||||||
network = strings.ToLower(strings.TrimSpace(upstreamAddr[:idx]))
|
network = strings.ToLower(strings.TrimSpace(beforeSlash))
|
||||||
upstreamAddr = upstreamAddr[idx+1:]
|
upstreamAddr = afterSlash
|
||||||
}
|
}
|
||||||
var err error
|
var err error
|
||||||
host, port, err = net.SplitHostPort(upstreamAddr)
|
host, port, err = net.SplitHostPort(upstreamAddr)
|
||||||
|
@ -96,6 +96,12 @@ func parseUpstreamDialAddress(upstreamAddr string) (string, string, error) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// special case network to support both unix and h2c at the same time
|
||||||
|
if network == "unix+h2c" {
|
||||||
|
network = "unix"
|
||||||
|
scheme = "h2c"
|
||||||
|
}
|
||||||
|
|
||||||
// for simplest possible config, we only need to include
|
// for simplest possible config, we only need to include
|
||||||
// the network portion if the user specified one
|
// the network portion if the user specified one
|
||||||
if network != "" {
|
if network != "" {
|
||||||
|
|
|
@ -76,7 +76,7 @@ func (adminUpstreams) handleUpstreams(w http.ResponseWriter, r *http.Request) er
|
||||||
|
|
||||||
// Iterate over the upstream pool (needs to be fast)
|
// Iterate over the upstream pool (needs to be fast)
|
||||||
var rangeErr error
|
var rangeErr error
|
||||||
hosts.Range(func(key, val interface{}) bool {
|
hosts.Range(func(key, val any) bool {
|
||||||
address, ok := key.(string)
|
address, ok := key.(string)
|
||||||
if !ok {
|
if !ok {
|
||||||
rangeErr = caddy.APIError{
|
rangeErr = caddy.APIError{
|
||||||
|
|
|
@ -52,71 +52,73 @@ func parseCaddyfile(h httpcaddyfile.Helper) (caddyhttp.MiddlewareHandler, error)
|
||||||
|
|
||||||
// UnmarshalCaddyfile sets up the handler from Caddyfile tokens. Syntax:
|
// UnmarshalCaddyfile sets up the handler from Caddyfile tokens. Syntax:
|
||||||
//
|
//
|
||||||
// reverse_proxy [<matcher>] [<upstreams...>] {
|
// reverse_proxy [<matcher>] [<upstreams...>] {
|
||||||
// # backends
|
// # backends
|
||||||
// to <upstreams...>
|
// to <upstreams...>
|
||||||
// dynamic <name> [...]
|
// dynamic <name> [...]
|
||||||
//
|
//
|
||||||
// # load balancing
|
// # load balancing
|
||||||
// lb_policy <name> [<options...>]
|
// lb_policy <name> [<options...>]
|
||||||
// lb_try_duration <duration>
|
// lb_retries <retries>
|
||||||
// lb_try_interval <interval>
|
// lb_try_duration <duration>
|
||||||
|
// lb_try_interval <interval>
|
||||||
|
// lb_retry_match <request-matcher>
|
||||||
//
|
//
|
||||||
// # active health checking
|
// # active health checking
|
||||||
// health_uri <uri>
|
// health_uri <uri>
|
||||||
// health_port <port>
|
// health_port <port>
|
||||||
// health_interval <interval>
|
// health_interval <interval>
|
||||||
// health_timeout <duration>
|
// health_timeout <duration>
|
||||||
// health_status <status>
|
// health_status <status>
|
||||||
// health_body <regexp>
|
// health_body <regexp>
|
||||||
// health_headers {
|
// health_headers {
|
||||||
// <field> [<values...>]
|
// <field> [<values...>]
|
||||||
// }
|
// }
|
||||||
//
|
//
|
||||||
// # passive health checking
|
// # passive health checking
|
||||||
// fail_duration <duration>
|
// fail_duration <duration>
|
||||||
// max_fails <num>
|
// max_fails <num>
|
||||||
// unhealthy_status <status>
|
// unhealthy_status <status>
|
||||||
// unhealthy_latency <duration>
|
// unhealthy_latency <duration>
|
||||||
// unhealthy_request_count <num>
|
// unhealthy_request_count <num>
|
||||||
//
|
//
|
||||||
// # streaming
|
// # streaming
|
||||||
// flush_interval <duration>
|
// flush_interval <duration>
|
||||||
// buffer_requests
|
// buffer_requests
|
||||||
// buffer_responses
|
// buffer_responses
|
||||||
// max_buffer_size <size>
|
// max_buffer_size <size>
|
||||||
//
|
//
|
||||||
// # request manipulation
|
// # request manipulation
|
||||||
// trusted_proxies [private_ranges] <ranges...>
|
// trusted_proxies [private_ranges] <ranges...>
|
||||||
// header_up [+|-]<field> [<value|regexp> [<replacement>]]
|
// header_up [+|-]<field> [<value|regexp> [<replacement>]]
|
||||||
// header_down [+|-]<field> [<value|regexp> [<replacement>]]
|
// header_down [+|-]<field> [<value|regexp> [<replacement>]]
|
||||||
// method <method>
|
// method <method>
|
||||||
// rewrite <to>
|
// rewrite <to>
|
||||||
//
|
//
|
||||||
// # round trip
|
// # round trip
|
||||||
// transport <name> {
|
// transport <name> {
|
||||||
// ...
|
// ...
|
||||||
// }
|
// }
|
||||||
//
|
//
|
||||||
// # optionally intercept responses from upstream
|
// # optionally intercept responses from upstream
|
||||||
// @name {
|
// @name {
|
||||||
// status <code...>
|
// status <code...>
|
||||||
// header <field> [<value>]
|
// header <field> [<value>]
|
||||||
// }
|
// }
|
||||||
// replace_status [<matcher>] <status_code>
|
// replace_status [<matcher>] <status_code>
|
||||||
// handle_response [<matcher>] {
|
// handle_response [<matcher>] {
|
||||||
// <directives...>
|
// <directives...>
|
||||||
//
|
//
|
||||||
// # special directives only available in handle_response
|
// # special directives only available in handle_response
|
||||||
// copy_response [<matcher>] [<status>] {
|
// copy_response [<matcher>] [<status>] {
|
||||||
// status <status>
|
// status <status>
|
||||||
// }
|
// }
|
||||||
// copy_response_headers [<matcher>] {
|
// copy_response_headers [<matcher>] {
|
||||||
// include <fields...>
|
// include <fields...>
|
||||||
// exclude <fields...>
|
// exclude <fields...>
|
||||||
// }
|
// }
|
||||||
// }
|
// }
|
||||||
// }
|
// }
|
||||||
//
|
//
|
||||||
// Proxy upstream addresses should be network dial addresses such
|
// Proxy upstream addresses should be network dial addresses such
|
||||||
// as `host:port`, or a URL such as `scheme://host:port`. Scheme
|
// as `host:port`, or a URL such as `scheme://host:port`. Scheme
|
||||||
|
@ -247,6 +249,19 @@ func (h *Handler) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
|
||||||
}
|
}
|
||||||
h.LoadBalancing.SelectionPolicyRaw = caddyconfig.JSONModuleObject(sel, "policy", name, nil)
|
h.LoadBalancing.SelectionPolicyRaw = caddyconfig.JSONModuleObject(sel, "policy", name, nil)
|
||||||
|
|
||||||
|
case "lb_retries":
|
||||||
|
if !d.NextArg() {
|
||||||
|
return d.ArgErr()
|
||||||
|
}
|
||||||
|
tries, err := strconv.Atoi(d.Val())
|
||||||
|
if err != nil {
|
||||||
|
return d.Errf("bad lb_retries number '%s': %v", d.Val(), err)
|
||||||
|
}
|
||||||
|
if h.LoadBalancing == nil {
|
||||||
|
h.LoadBalancing = new(LoadBalancing)
|
||||||
|
}
|
||||||
|
h.LoadBalancing.Retries = tries
|
||||||
|
|
||||||
case "lb_try_duration":
|
case "lb_try_duration":
|
||||||
if !d.NextArg() {
|
if !d.NextArg() {
|
||||||
return d.ArgErr()
|
return d.ArgErr()
|
||||||
|
@ -273,6 +288,16 @@ func (h *Handler) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
|
||||||
}
|
}
|
||||||
h.LoadBalancing.TryInterval = caddy.Duration(dur)
|
h.LoadBalancing.TryInterval = caddy.Duration(dur)
|
||||||
|
|
||||||
|
case "lb_retry_match":
|
||||||
|
matcherSet, err := caddyhttp.ParseCaddyfileNestedMatcherSet(d)
|
||||||
|
if err != nil {
|
||||||
|
return d.Errf("failed to parse lb_retry_match: %v", err)
|
||||||
|
}
|
||||||
|
if h.LoadBalancing == nil {
|
||||||
|
h.LoadBalancing = new(LoadBalancing)
|
||||||
|
}
|
||||||
|
h.LoadBalancing.RetryMatchRaw = append(h.LoadBalancing.RetryMatchRaw, matcherSet)
|
||||||
|
|
||||||
case "health_uri":
|
case "health_uri":
|
||||||
if !d.NextArg() {
|
if !d.NextArg() {
|
||||||
return d.ArgErr()
|
return d.ArgErr()
|
||||||
|
@ -799,31 +824,32 @@ func (h *Handler) FinalizeUnmarshalCaddyfile(helper httpcaddyfile.Helper) error
|
||||||
|
|
||||||
// UnmarshalCaddyfile deserializes Caddyfile tokens into h.
|
// UnmarshalCaddyfile deserializes Caddyfile tokens into h.
|
||||||
//
|
//
|
||||||
// transport http {
|
// transport http {
|
||||||
// read_buffer <size>
|
// read_buffer <size>
|
||||||
// write_buffer <size>
|
// write_buffer <size>
|
||||||
// max_response_header <size>
|
// max_response_header <size>
|
||||||
// dial_timeout <duration>
|
// dial_timeout <duration>
|
||||||
// dial_fallback_delay <duration>
|
// dial_fallback_delay <duration>
|
||||||
// response_header_timeout <duration>
|
// response_header_timeout <duration>
|
||||||
// expect_continue_timeout <duration>
|
// expect_continue_timeout <duration>
|
||||||
// resolvers <resolvers...>
|
// resolvers <resolvers...>
|
||||||
// tls
|
// tls
|
||||||
// tls_client_auth <automate_name> | <cert_file> <key_file>
|
// tls_client_auth <automate_name> | <cert_file> <key_file>
|
||||||
// tls_insecure_skip_verify
|
// tls_insecure_skip_verify
|
||||||
// tls_timeout <duration>
|
// tls_timeout <duration>
|
||||||
// tls_trusted_ca_certs <cert_files...>
|
// tls_trusted_ca_certs <cert_files...>
|
||||||
// tls_server_name <sni>
|
// tls_server_name <sni>
|
||||||
// keepalive [off|<duration>]
|
// tls_renegotiation <level>
|
||||||
// keepalive_interval <interval>
|
// tls_except_ports <ports...>
|
||||||
// keepalive_idle_conns <max_count>
|
// keepalive [off|<duration>]
|
||||||
// keepalive_idle_conns_per_host <count>
|
// keepalive_interval <interval>
|
||||||
// versions <versions...>
|
// keepalive_idle_conns <max_count>
|
||||||
// compression off
|
// keepalive_idle_conns_per_host <count>
|
||||||
// max_conns_per_host <count>
|
// versions <versions...>
|
||||||
// max_idle_conns_per_host <count>
|
// compression off
|
||||||
// }
|
// max_conns_per_host <count>
|
||||||
//
|
// max_idle_conns_per_host <count>
|
||||||
|
// }
|
||||||
func (h *HTTPTransport) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
|
func (h *HTTPTransport) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
|
||||||
for d.Next() {
|
for d.Next() {
|
||||||
for d.NextBlock(0) {
|
for d.NextBlock(0) {
|
||||||
|
@ -848,6 +874,26 @@ func (h *HTTPTransport) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
|
||||||
}
|
}
|
||||||
h.WriteBufferSize = int(size)
|
h.WriteBufferSize = int(size)
|
||||||
|
|
||||||
|
case "read_timeout":
|
||||||
|
if !d.NextArg() {
|
||||||
|
return d.ArgErr()
|
||||||
|
}
|
||||||
|
timeout, err := caddy.ParseDuration(d.Val())
|
||||||
|
if err != nil {
|
||||||
|
return d.Errf("invalid read timeout duration '%s': %v", d.Val(), err)
|
||||||
|
}
|
||||||
|
h.ReadTimeout = caddy.Duration(timeout)
|
||||||
|
|
||||||
|
case "write_timeout":
|
||||||
|
if !d.NextArg() {
|
||||||
|
return d.ArgErr()
|
||||||
|
}
|
||||||
|
timeout, err := caddy.ParseDuration(d.Val())
|
||||||
|
if err != nil {
|
||||||
|
return d.Errf("invalid write timeout duration '%s': %v", d.Val(), err)
|
||||||
|
}
|
||||||
|
h.WriteTimeout = caddy.Duration(timeout)
|
||||||
|
|
||||||
case "max_response_header":
|
case "max_response_header":
|
||||||
if !d.NextArg() {
|
if !d.NextArg() {
|
||||||
return d.ArgErr()
|
return d.ArgErr()
|
||||||
|
@ -907,6 +953,11 @@ func (h *HTTPTransport) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
|
||||||
return d.Errf("must specify at least one resolver address")
|
return d.Errf("must specify at least one resolver address")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
case "tls":
|
||||||
|
if h.TLS == nil {
|
||||||
|
h.TLS = new(TLSConfig)
|
||||||
|
}
|
||||||
|
|
||||||
case "tls_client_auth":
|
case "tls_client_auth":
|
||||||
if h.TLS == nil {
|
if h.TLS == nil {
|
||||||
h.TLS = new(TLSConfig)
|
h.TLS = new(TLSConfig)
|
||||||
|
@ -922,25 +973,6 @@ func (h *HTTPTransport) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
|
||||||
return d.ArgErr()
|
return d.ArgErr()
|
||||||
}
|
}
|
||||||
|
|
||||||
case "renegotiation":
|
|
||||||
if h.TLS == nil {
|
|
||||||
h.TLS = new(TLSConfig)
|
|
||||||
}
|
|
||||||
if !d.NextArg() {
|
|
||||||
return d.ArgErr()
|
|
||||||
}
|
|
||||||
switch renegotiation := d.Val(); renegotiation {
|
|
||||||
case "never", "once", "freely":
|
|
||||||
h.TLS.Renegotiation = renegotiation
|
|
||||||
default:
|
|
||||||
return d.ArgErr()
|
|
||||||
}
|
|
||||||
|
|
||||||
case "tls":
|
|
||||||
if h.TLS == nil {
|
|
||||||
h.TLS = new(TLSConfig)
|
|
||||||
}
|
|
||||||
|
|
||||||
case "tls_insecure_skip_verify":
|
case "tls_insecure_skip_verify":
|
||||||
if d.NextArg() {
|
if d.NextArg() {
|
||||||
return d.ArgErr()
|
return d.ArgErr()
|
||||||
|
@ -982,6 +1014,29 @@ func (h *HTTPTransport) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
|
||||||
}
|
}
|
||||||
h.TLS.ServerName = d.Val()
|
h.TLS.ServerName = d.Val()
|
||||||
|
|
||||||
|
case "tls_renegotiation":
|
||||||
|
if h.TLS == nil {
|
||||||
|
h.TLS = new(TLSConfig)
|
||||||
|
}
|
||||||
|
if !d.NextArg() {
|
||||||
|
return d.ArgErr()
|
||||||
|
}
|
||||||
|
switch renegotiation := d.Val(); renegotiation {
|
||||||
|
case "never", "once", "freely":
|
||||||
|
h.TLS.Renegotiation = renegotiation
|
||||||
|
default:
|
||||||
|
return d.ArgErr()
|
||||||
|
}
|
||||||
|
|
||||||
|
case "tls_except_ports":
|
||||||
|
if h.TLS == nil {
|
||||||
|
h.TLS = new(TLSConfig)
|
||||||
|
}
|
||||||
|
h.TLS.ExceptPorts = d.RemainingArgs()
|
||||||
|
if len(h.TLS.ExceptPorts) == 0 {
|
||||||
|
return d.ArgErr()
|
||||||
|
}
|
||||||
|
|
||||||
case "keepalive":
|
case "keepalive":
|
||||||
if !d.NextArg() {
|
if !d.NextArg() {
|
||||||
return d.ArgErr()
|
return d.ArgErr()
|
||||||
|
@ -1063,15 +1118,6 @@ func (h *HTTPTransport) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
|
||||||
}
|
}
|
||||||
h.MaxConnsPerHost = num
|
h.MaxConnsPerHost = num
|
||||||
|
|
||||||
case "except_ports":
|
|
||||||
if h.TLS == nil {
|
|
||||||
h.TLS = new(TLSConfig)
|
|
||||||
}
|
|
||||||
h.TLS.ExceptPorts = d.RemainingArgs()
|
|
||||||
if len(h.TLS.ExceptPorts) == 0 {
|
|
||||||
return d.ArgErr()
|
|
||||||
}
|
|
||||||
|
|
||||||
default:
|
default:
|
||||||
return d.Errf("unrecognized subdirective %s", d.Val())
|
return d.Errf("unrecognized subdirective %s", d.Val())
|
||||||
}
|
}
|
||||||
|
@ -1091,10 +1137,9 @@ func parseCopyResponseCaddyfile(h httpcaddyfile.Helper) (caddyhttp.MiddlewareHan
|
||||||
|
|
||||||
// UnmarshalCaddyfile sets up the handler from Caddyfile tokens. Syntax:
|
// UnmarshalCaddyfile sets up the handler from Caddyfile tokens. Syntax:
|
||||||
//
|
//
|
||||||
// copy_response [<matcher>] [<status>] {
|
// copy_response [<matcher>] [<status>] {
|
||||||
// status <status>
|
// status <status>
|
||||||
// }
|
// }
|
||||||
//
|
|
||||||
func (h *CopyResponseHandler) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
|
func (h *CopyResponseHandler) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
|
||||||
for d.Next() {
|
for d.Next() {
|
||||||
args := d.RemainingArgs()
|
args := d.RemainingArgs()
|
||||||
|
@ -1131,11 +1176,10 @@ func parseCopyResponseHeadersCaddyfile(h httpcaddyfile.Helper) (caddyhttp.Middle
|
||||||
|
|
||||||
// UnmarshalCaddyfile sets up the handler from Caddyfile tokens. Syntax:
|
// UnmarshalCaddyfile sets up the handler from Caddyfile tokens. Syntax:
|
||||||
//
|
//
|
||||||
// copy_response_headers [<matcher>] {
|
// copy_response_headers [<matcher>] {
|
||||||
// include <fields...>
|
// include <fields...>
|
||||||
// exclude <fields...>
|
// exclude <fields...>
|
||||||
// }
|
// }
|
||||||
//
|
|
||||||
func (h *CopyResponseHeadersHandler) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
|
func (h *CopyResponseHeadersHandler) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
|
||||||
for d.Next() {
|
for d.Next() {
|
||||||
args := d.RemainingArgs()
|
args := d.RemainingArgs()
|
||||||
|
@ -1161,16 +1205,15 @@ func (h *CopyResponseHeadersHandler) UnmarshalCaddyfile(d *caddyfile.Dispenser)
|
||||||
|
|
||||||
// UnmarshalCaddyfile deserializes Caddyfile tokens into h.
|
// UnmarshalCaddyfile deserializes Caddyfile tokens into h.
|
||||||
//
|
//
|
||||||
// dynamic srv [<name>] {
|
// dynamic srv [<name>] {
|
||||||
// service <service>
|
// service <service>
|
||||||
// proto <proto>
|
// proto <proto>
|
||||||
// name <name>
|
// name <name>
|
||||||
// refresh <interval>
|
// refresh <interval>
|
||||||
// resolvers <resolvers...>
|
// resolvers <resolvers...>
|
||||||
// dial_timeout <timeout>
|
// dial_timeout <timeout>
|
||||||
// dial_fallback_delay <timeout>
|
// dial_fallback_delay <timeout>
|
||||||
// }
|
// }
|
||||||
//
|
|
||||||
func (u *SRVUpstreams) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
|
func (u *SRVUpstreams) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
|
||||||
for d.Next() {
|
for d.Next() {
|
||||||
args := d.RemainingArgs()
|
args := d.RemainingArgs()
|
||||||
|
@ -1260,15 +1303,14 @@ func (u *SRVUpstreams) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
|
||||||
|
|
||||||
// UnmarshalCaddyfile deserializes Caddyfile tokens into h.
|
// UnmarshalCaddyfile deserializes Caddyfile tokens into h.
|
||||||
//
|
//
|
||||||
// dynamic a [<name> <port] {
|
// dynamic a [<name> <port] {
|
||||||
// name <name>
|
// name <name>
|
||||||
// port <port>
|
// port <port>
|
||||||
// refresh <interval>
|
// refresh <interval>
|
||||||
// resolvers <resolvers...>
|
// resolvers <resolvers...>
|
||||||
// dial_timeout <timeout>
|
// dial_timeout <timeout>
|
||||||
// dial_fallback_delay <timeout>
|
// dial_fallback_delay <timeout>
|
||||||
// }
|
// }
|
||||||
//
|
|
||||||
func (u *AUpstreams) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
|
func (u *AUpstreams) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
|
||||||
for d.Next() {
|
for d.Next() {
|
||||||
args := d.RemainingArgs()
|
args := d.RemainingArgs()
|
||||||
|
@ -1277,7 +1319,9 @@ func (u *AUpstreams) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
|
||||||
}
|
}
|
||||||
if len(args) > 0 {
|
if len(args) > 0 {
|
||||||
u.Name = args[0]
|
u.Name = args[0]
|
||||||
u.Port = args[1]
|
if len(args) == 2 {
|
||||||
|
u.Port = args[1]
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for d.NextBlock(0) {
|
for d.NextBlock(0) {
|
||||||
|
@ -1348,6 +1392,35 @@ func (u *AUpstreams) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// UnmarshalCaddyfile deserializes Caddyfile tokens into h.
|
||||||
|
//
|
||||||
|
// dynamic multi {
|
||||||
|
// <source> [...]
|
||||||
|
// }
|
||||||
|
func (u *MultiUpstreams) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
|
||||||
|
for d.Next() {
|
||||||
|
if d.NextArg() {
|
||||||
|
return d.ArgErr()
|
||||||
|
}
|
||||||
|
|
||||||
|
for nesting := d.Nesting(); d.NextBlock(nesting); {
|
||||||
|
dynModule := d.Val()
|
||||||
|
modID := "http.reverse_proxy.upstreams." + dynModule
|
||||||
|
unm, err := caddyfile.UnmarshalModule(d, modID)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
source, ok := unm.(UpstreamSource)
|
||||||
|
if !ok {
|
||||||
|
return d.Errf("module %s (%T) is not an UpstreamSource", modID, unm)
|
||||||
|
}
|
||||||
|
u.SourcesRaw = append(u.SourcesRaw, caddyconfig.JSONModuleObject(source, "source", dynModule, nil))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
const matcherPrefix = "@"
|
const matcherPrefix = "@"
|
||||||
|
|
||||||
// Interface guards
|
// Interface guards
|
||||||
|
@ -1356,4 +1429,5 @@ var (
|
||||||
_ caddyfile.Unmarshaler = (*HTTPTransport)(nil)
|
_ caddyfile.Unmarshaler = (*HTTPTransport)(nil)
|
||||||
_ caddyfile.Unmarshaler = (*SRVUpstreams)(nil)
|
_ caddyfile.Unmarshaler = (*SRVUpstreams)(nil)
|
||||||
_ caddyfile.Unmarshaler = (*AUpstreams)(nil)
|
_ caddyfile.Unmarshaler = (*AUpstreams)(nil)
|
||||||
|
_ caddyfile.Unmarshaler = (*MultiUpstreams)(nil)
|
||||||
)
|
)
|
||||||
|
|
|
@ -172,8 +172,13 @@ func cmdReverseProxy(fs caddycmd.Flags) (int, error) {
|
||||||
appsRaw["tls"] = caddyconfig.JSON(tlsApp, nil)
|
appsRaw["tls"] = caddyconfig.JSON(tlsApp, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var false bool
|
||||||
cfg := &caddy.Config{
|
cfg := &caddy.Config{
|
||||||
Admin: &caddy.AdminConfig{Disabled: true},
|
Admin: &caddy.AdminConfig{Disabled: true,
|
||||||
|
Config: &caddy.ConfigSettings{
|
||||||
|
Persist: &false,
|
||||||
|
},
|
||||||
|
},
|
||||||
AppsRaw: appsRaw,
|
AppsRaw: appsRaw,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -35,16 +35,16 @@ func init() {
|
||||||
|
|
||||||
// UnmarshalCaddyfile deserializes Caddyfile tokens into h.
|
// UnmarshalCaddyfile deserializes Caddyfile tokens into h.
|
||||||
//
|
//
|
||||||
// transport fastcgi {
|
// transport fastcgi {
|
||||||
// root <path>
|
// root <path>
|
||||||
// split <at>
|
// split <at>
|
||||||
// env <key> <value>
|
// env <key> <value>
|
||||||
// resolve_root_symlink
|
// resolve_root_symlink
|
||||||
// dial_timeout <duration>
|
// dial_timeout <duration>
|
||||||
// read_timeout <duration>
|
// read_timeout <duration>
|
||||||
// write_timeout <duration>
|
// write_timeout <duration>
|
||||||
// }
|
// capture_stderr
|
||||||
//
|
// }
|
||||||
func (t *Transport) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
|
func (t *Transport) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
|
||||||
for d.Next() {
|
for d.Next() {
|
||||||
for d.NextBlock(0) {
|
for d.NextBlock(0) {
|
||||||
|
@ -107,6 +107,12 @@ func (t *Transport) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
|
||||||
}
|
}
|
||||||
t.WriteTimeout = caddy.Duration(dur)
|
t.WriteTimeout = caddy.Duration(dur)
|
||||||
|
|
||||||
|
case "capture_stderr":
|
||||||
|
if d.NextArg() {
|
||||||
|
return d.ArgErr()
|
||||||
|
}
|
||||||
|
t.CaptureStderr = true
|
||||||
|
|
||||||
default:
|
default:
|
||||||
return d.Errf("unrecognized subdirective %s", d.Val())
|
return d.Errf("unrecognized subdirective %s", d.Val())
|
||||||
}
|
}
|
||||||
|
@ -120,31 +126,31 @@ func (t *Transport) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
|
||||||
// Unmarshaler is invoked by this function) but the resulting proxy is specially
|
// Unmarshaler is invoked by this function) but the resulting proxy is specially
|
||||||
// configured for most™️ PHP apps over FastCGI. A line such as this:
|
// configured for most™️ PHP apps over FastCGI. A line such as this:
|
||||||
//
|
//
|
||||||
// php_fastcgi localhost:7777
|
// php_fastcgi localhost:7777
|
||||||
//
|
//
|
||||||
// is equivalent to a route consisting of:
|
// is equivalent to a route consisting of:
|
||||||
//
|
//
|
||||||
// # Add trailing slash for directory requests
|
// # Add trailing slash for directory requests
|
||||||
// @canonicalPath {
|
// @canonicalPath {
|
||||||
// file {path}/index.php
|
// file {path}/index.php
|
||||||
// not path */
|
// not path */
|
||||||
// }
|
// }
|
||||||
// redir @canonicalPath {path}/ 308
|
// redir @canonicalPath {path}/ 308
|
||||||
//
|
//
|
||||||
// # If the requested file does not exist, try index files
|
// # If the requested file does not exist, try index files
|
||||||
// @indexFiles file {
|
// @indexFiles file {
|
||||||
// try_files {path} {path}/index.php index.php
|
// try_files {path} {path}/index.php index.php
|
||||||
// split_path .php
|
// split_path .php
|
||||||
// }
|
// }
|
||||||
// rewrite @indexFiles {http.matchers.file.relative}
|
// rewrite @indexFiles {http.matchers.file.relative}
|
||||||
//
|
//
|
||||||
// # Proxy PHP files to the FastCGI responder
|
// # Proxy PHP files to the FastCGI responder
|
||||||
// @phpFiles path *.php
|
// @phpFiles path *.php
|
||||||
// reverse_proxy @phpFiles localhost:7777 {
|
// reverse_proxy @phpFiles localhost:7777 {
|
||||||
// transport fastcgi {
|
// transport fastcgi {
|
||||||
// split .php
|
// split .php
|
||||||
// }
|
// }
|
||||||
// }
|
// }
|
||||||
//
|
//
|
||||||
// Thus, this directive produces multiple handlers, each with a different
|
// Thus, this directive produces multiple handlers, each with a different
|
||||||
// matcher because multiple consecutive handlers are necessary to support
|
// matcher because multiple consecutive handlers are necessary to support
|
||||||
|
@ -154,7 +160,7 @@ func (t *Transport) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
|
||||||
//
|
//
|
||||||
// If a matcher is specified by the user, for example:
|
// If a matcher is specified by the user, for example:
|
||||||
//
|
//
|
||||||
// php_fastcgi /subpath localhost:7777
|
// php_fastcgi /subpath localhost:7777
|
||||||
//
|
//
|
||||||
// then the resulting handlers are wrapped in a subroute that uses the
|
// then the resulting handlers are wrapped in a subroute that uses the
|
||||||
// user's matcher as a prerequisite to enter the subroute. In other
|
// user's matcher as a prerequisite to enter the subroute. In other
|
||||||
|
@ -303,6 +309,14 @@ func parsePHPFastCGI(h httpcaddyfile.Helper) ([]httpcaddyfile.ConfigValue, error
|
||||||
fcgiTransport.WriteTimeout = caddy.Duration(dur)
|
fcgiTransport.WriteTimeout = caddy.Duration(dur)
|
||||||
dispenser.Delete()
|
dispenser.Delete()
|
||||||
dispenser.Delete()
|
dispenser.Delete()
|
||||||
|
|
||||||
|
case "capture_stderr":
|
||||||
|
args := dispenser.RemainingArgs()
|
||||||
|
dispenser.Delete()
|
||||||
|
for range args {
|
||||||
|
dispenser.Delete()
|
||||||
|
}
|
||||||
|
fcgiTransport.CaptureStderr = true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -26,9 +26,6 @@ package fastcgi
|
||||||
import (
|
import (
|
||||||
"bufio"
|
"bufio"
|
||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
|
||||||
"encoding/binary"
|
|
||||||
"errors"
|
|
||||||
"io"
|
"io"
|
||||||
"mime/multipart"
|
"mime/multipart"
|
||||||
"net"
|
"net"
|
||||||
|
@ -40,8 +37,9 @@ import (
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"go.uber.org/zap"
|
||||||
)
|
)
|
||||||
|
|
||||||
// FCGIListenSockFileno describes listen socket file number.
|
// FCGIListenSockFileno describes listen socket file number.
|
||||||
|
@ -120,293 +118,84 @@ const (
|
||||||
maxPad = 255
|
maxPad = 255
|
||||||
)
|
)
|
||||||
|
|
||||||
type header struct {
|
|
||||||
Version uint8
|
|
||||||
Type uint8
|
|
||||||
ID uint16
|
|
||||||
ContentLength uint16
|
|
||||||
PaddingLength uint8
|
|
||||||
Reserved uint8
|
|
||||||
}
|
|
||||||
|
|
||||||
// for padding so we don't have to allocate all the time
|
// for padding so we don't have to allocate all the time
|
||||||
// not synchronized because we don't care what the contents are
|
// not synchronized because we don't care what the contents are
|
||||||
var pad [maxPad]byte
|
var pad [maxPad]byte
|
||||||
|
|
||||||
func (h *header) init(recType uint8, reqID uint16, contentLength int) {
|
// client implements a FastCGI client, which is a standard for
|
||||||
h.Version = 1
|
|
||||||
h.Type = recType
|
|
||||||
h.ID = reqID
|
|
||||||
h.ContentLength = uint16(contentLength)
|
|
||||||
h.PaddingLength = uint8(-contentLength & 7)
|
|
||||||
}
|
|
||||||
|
|
||||||
type record struct {
|
|
||||||
h header
|
|
||||||
rbuf []byte
|
|
||||||
}
|
|
||||||
|
|
||||||
func (rec *record) read(r io.Reader) (buf []byte, err error) {
|
|
||||||
if err = binary.Read(r, binary.BigEndian, &rec.h); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if rec.h.Version != 1 {
|
|
||||||
err = errors.New("fcgi: invalid header version")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if rec.h.Type == EndRequest {
|
|
||||||
err = io.EOF
|
|
||||||
return
|
|
||||||
}
|
|
||||||
n := int(rec.h.ContentLength) + int(rec.h.PaddingLength)
|
|
||||||
if len(rec.rbuf) < n {
|
|
||||||
rec.rbuf = make([]byte, n)
|
|
||||||
}
|
|
||||||
if _, err = io.ReadFull(r, rec.rbuf[:n]); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
buf = rec.rbuf[:int(rec.h.ContentLength)]
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// FCGIClient implements a FastCGI client, which is a standard for
|
|
||||||
// interfacing external applications with Web servers.
|
// interfacing external applications with Web servers.
|
||||||
type FCGIClient struct {
|
type client struct {
|
||||||
mutex sync.Mutex
|
rwc net.Conn
|
||||||
rwc io.ReadWriteCloser
|
// keepAlive bool // TODO: implement
|
||||||
h header
|
reqID uint16
|
||||||
buf bytes.Buffer
|
stderr bool
|
||||||
stderr bytes.Buffer
|
logger *zap.Logger
|
||||||
keepAlive bool
|
|
||||||
reqID uint16
|
|
||||||
}
|
|
||||||
|
|
||||||
// DialWithDialerContext connects to the fcgi responder at the specified network address, using custom net.Dialer
|
|
||||||
// and a context.
|
|
||||||
// See func net.Dial for a description of the network and address parameters.
|
|
||||||
func DialWithDialerContext(ctx context.Context, network, address string, dialer net.Dialer) (fcgi *FCGIClient, err error) {
|
|
||||||
var conn net.Conn
|
|
||||||
conn, err = dialer.DialContext(ctx, network, address)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
fcgi = &FCGIClient{
|
|
||||||
rwc: conn,
|
|
||||||
keepAlive: false,
|
|
||||||
reqID: 1,
|
|
||||||
}
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// DialContext is like Dial but passes ctx to dialer.Dial.
|
|
||||||
func DialContext(ctx context.Context, network, address string) (fcgi *FCGIClient, err error) {
|
|
||||||
// TODO: why not set timeout here?
|
|
||||||
return DialWithDialerContext(ctx, network, address, net.Dialer{})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Dial connects to the fcgi responder at the specified network address, using default net.Dialer.
|
|
||||||
// See func net.Dial for a description of the network and address parameters.
|
|
||||||
func Dial(network, address string) (fcgi *FCGIClient, err error) {
|
|
||||||
return DialContext(context.Background(), network, address)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Close closes fcgi connection
|
|
||||||
func (c *FCGIClient) Close() {
|
|
||||||
c.rwc.Close()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *FCGIClient) writeRecord(recType uint8, content []byte) (err error) {
|
|
||||||
c.mutex.Lock()
|
|
||||||
defer c.mutex.Unlock()
|
|
||||||
c.buf.Reset()
|
|
||||||
c.h.init(recType, c.reqID, len(content))
|
|
||||||
if err := binary.Write(&c.buf, binary.BigEndian, c.h); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if _, err := c.buf.Write(content); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if _, err := c.buf.Write(pad[:c.h.PaddingLength]); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
_, err = c.rwc.Write(c.buf.Bytes())
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *FCGIClient) writeBeginRequest(role uint16, flags uint8) error {
|
|
||||||
b := [8]byte{byte(role >> 8), byte(role), flags}
|
|
||||||
return c.writeRecord(BeginRequest, b[:])
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *FCGIClient) writePairs(recType uint8, pairs map[string]string) error {
|
|
||||||
w := newWriter(c, recType)
|
|
||||||
b := make([]byte, 8)
|
|
||||||
nn := 0
|
|
||||||
for k, v := range pairs {
|
|
||||||
m := 8 + len(k) + len(v)
|
|
||||||
if m > maxWrite {
|
|
||||||
// param data size exceed 65535 bytes"
|
|
||||||
vl := maxWrite - 8 - len(k)
|
|
||||||
v = v[:vl]
|
|
||||||
}
|
|
||||||
n := encodeSize(b, uint32(len(k)))
|
|
||||||
n += encodeSize(b[n:], uint32(len(v)))
|
|
||||||
m = n + len(k) + len(v)
|
|
||||||
if (nn + m) > maxWrite {
|
|
||||||
w.Flush()
|
|
||||||
nn = 0
|
|
||||||
}
|
|
||||||
nn += m
|
|
||||||
if _, err := w.Write(b[:n]); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if _, err := w.WriteString(k); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if _, err := w.WriteString(v); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
w.Close()
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func encodeSize(b []byte, size uint32) int {
|
|
||||||
if size > 127 {
|
|
||||||
size |= 1 << 31
|
|
||||||
binary.BigEndian.PutUint32(b, size)
|
|
||||||
return 4
|
|
||||||
}
|
|
||||||
b[0] = byte(size)
|
|
||||||
return 1
|
|
||||||
}
|
|
||||||
|
|
||||||
// bufWriter encapsulates bufio.Writer but also closes the underlying stream when
|
|
||||||
// Closed.
|
|
||||||
type bufWriter struct {
|
|
||||||
closer io.Closer
|
|
||||||
*bufio.Writer
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *bufWriter) Close() error {
|
|
||||||
if err := w.Writer.Flush(); err != nil {
|
|
||||||
w.closer.Close()
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return w.closer.Close()
|
|
||||||
}
|
|
||||||
|
|
||||||
func newWriter(c *FCGIClient, recType uint8) *bufWriter {
|
|
||||||
s := &streamWriter{c: c, recType: recType}
|
|
||||||
w := bufio.NewWriterSize(s, maxWrite)
|
|
||||||
return &bufWriter{s, w}
|
|
||||||
}
|
|
||||||
|
|
||||||
// streamWriter abstracts out the separation of a stream into discrete records.
|
|
||||||
// It only writes maxWrite bytes at a time.
|
|
||||||
type streamWriter struct {
|
|
||||||
c *FCGIClient
|
|
||||||
recType uint8
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *streamWriter) Write(p []byte) (int, error) {
|
|
||||||
nn := 0
|
|
||||||
for len(p) > 0 {
|
|
||||||
n := len(p)
|
|
||||||
if n > maxWrite {
|
|
||||||
n = maxWrite
|
|
||||||
}
|
|
||||||
if err := w.c.writeRecord(w.recType, p[:n]); err != nil {
|
|
||||||
return nn, err
|
|
||||||
}
|
|
||||||
nn += n
|
|
||||||
p = p[n:]
|
|
||||||
}
|
|
||||||
return nn, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *streamWriter) Close() error {
|
|
||||||
// send empty record to close the stream
|
|
||||||
return w.c.writeRecord(w.recType, nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
type streamReader struct {
|
|
||||||
c *FCGIClient
|
|
||||||
buf []byte
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *streamReader) Read(p []byte) (n int, err error) {
|
|
||||||
|
|
||||||
if len(p) > 0 {
|
|
||||||
if len(w.buf) == 0 {
|
|
||||||
|
|
||||||
// filter outputs for error log
|
|
||||||
for {
|
|
||||||
rec := &record{}
|
|
||||||
var buf []byte
|
|
||||||
buf, err = rec.read(w.c.rwc)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
// standard error output
|
|
||||||
if rec.h.Type == Stderr {
|
|
||||||
w.c.stderr.Write(buf)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
w.buf = buf
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
n = len(p)
|
|
||||||
if n > len(w.buf) {
|
|
||||||
n = len(w.buf)
|
|
||||||
}
|
|
||||||
copy(p, w.buf[:n])
|
|
||||||
w.buf = w.buf[n:]
|
|
||||||
}
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Do made the request and returns a io.Reader that translates the data read
|
// Do made the request and returns a io.Reader that translates the data read
|
||||||
// from fcgi responder out of fcgi packet before returning it.
|
// from fcgi responder out of fcgi packet before returning it.
|
||||||
func (c *FCGIClient) Do(p map[string]string, req io.Reader) (r io.Reader, err error) {
|
func (c *client) Do(p map[string]string, req io.Reader) (r io.Reader, err error) {
|
||||||
err = c.writeBeginRequest(uint16(Responder), 0)
|
writer := &streamWriter{c: c}
|
||||||
|
writer.buf = bufPool.Get().(*bytes.Buffer)
|
||||||
|
writer.buf.Reset()
|
||||||
|
defer bufPool.Put(writer.buf)
|
||||||
|
|
||||||
|
err = writer.writeBeginRequest(uint16(Responder), 0)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
err = c.writePairs(Params, p)
|
writer.recType = Params
|
||||||
|
err = writer.writePairs(p)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
body := newWriter(c, Stdin)
|
writer.recType = Stdin
|
||||||
if req != nil {
|
if req != nil {
|
||||||
_, _ = io.Copy(body, req)
|
_, err = io.Copy(writer, req)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
err = writer.FlushStream()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
}
|
}
|
||||||
body.Close()
|
|
||||||
|
|
||||||
r = &streamReader{c: c}
|
r = &streamReader{c: c}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// clientCloser is a io.ReadCloser. It wraps a io.Reader with a Closer
|
// clientCloser is a io.ReadCloser. It wraps a io.Reader with a Closer
|
||||||
// that closes FCGIClient connection.
|
// that closes the client connection.
|
||||||
type clientCloser struct {
|
type clientCloser struct {
|
||||||
*FCGIClient
|
rwc net.Conn
|
||||||
|
r *streamReader
|
||||||
io.Reader
|
io.Reader
|
||||||
|
|
||||||
|
status int
|
||||||
|
logger *zap.Logger
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f clientCloser) Close() error { return f.rwc.Close() }
|
func (f clientCloser) Close() error {
|
||||||
|
stderr := f.r.stderr.Bytes()
|
||||||
|
if len(stderr) == 0 {
|
||||||
|
return f.rwc.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
if f.status >= 400 {
|
||||||
|
f.logger.Error("stderr", zap.ByteString("body", stderr))
|
||||||
|
} else {
|
||||||
|
f.logger.Warn("stderr", zap.ByteString("body", stderr))
|
||||||
|
}
|
||||||
|
|
||||||
|
return f.rwc.Close()
|
||||||
|
}
|
||||||
|
|
||||||
// Request returns a HTTP Response with Header and Body
|
// Request returns a HTTP Response with Header and Body
|
||||||
// from fcgi responder
|
// from fcgi responder
|
||||||
func (c *FCGIClient) Request(p map[string]string, req io.Reader) (resp *http.Response, err error) {
|
func (c *client) Request(p map[string]string, req io.Reader) (resp *http.Response, err error) {
|
||||||
r, err := c.Do(p, req)
|
r, err := c.Do(p, req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return
|
return
|
||||||
|
@ -424,13 +213,13 @@ func (c *FCGIClient) Request(p map[string]string, req io.Reader) (resp *http.Res
|
||||||
resp.Header = http.Header(mimeHeader)
|
resp.Header = http.Header(mimeHeader)
|
||||||
|
|
||||||
if resp.Header.Get("Status") != "" {
|
if resp.Header.Get("Status") != "" {
|
||||||
statusParts := strings.SplitN(resp.Header.Get("Status"), " ", 2)
|
statusNumber, statusInfo, statusIsCut := strings.Cut(resp.Header.Get("Status"), " ")
|
||||||
resp.StatusCode, err = strconv.Atoi(statusParts[0])
|
resp.StatusCode, err = strconv.Atoi(statusNumber)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if len(statusParts) > 1 {
|
if statusIsCut {
|
||||||
resp.Status = statusParts[1]
|
resp.Status = statusInfo
|
||||||
}
|
}
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
|
@ -441,16 +230,27 @@ func (c *FCGIClient) Request(p map[string]string, req io.Reader) (resp *http.Res
|
||||||
resp.TransferEncoding = resp.Header["Transfer-Encoding"]
|
resp.TransferEncoding = resp.Header["Transfer-Encoding"]
|
||||||
resp.ContentLength, _ = strconv.ParseInt(resp.Header.Get("Content-Length"), 10, 64)
|
resp.ContentLength, _ = strconv.ParseInt(resp.Header.Get("Content-Length"), 10, 64)
|
||||||
|
|
||||||
if chunked(resp.TransferEncoding) {
|
// wrap the response body in our closer
|
||||||
resp.Body = clientCloser{c, httputil.NewChunkedReader(rb)}
|
closer := clientCloser{
|
||||||
} else {
|
rwc: c.rwc,
|
||||||
resp.Body = clientCloser{c, io.NopCloser(rb)}
|
r: r.(*streamReader),
|
||||||
|
Reader: rb,
|
||||||
|
status: resp.StatusCode,
|
||||||
|
logger: noopLogger,
|
||||||
}
|
}
|
||||||
|
if chunked(resp.TransferEncoding) {
|
||||||
|
closer.Reader = httputil.NewChunkedReader(rb)
|
||||||
|
}
|
||||||
|
if c.stderr {
|
||||||
|
closer.logger = c.logger
|
||||||
|
}
|
||||||
|
resp.Body = closer
|
||||||
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get issues a GET request to the fcgi responder.
|
// Get issues a GET request to the fcgi responder.
|
||||||
func (c *FCGIClient) Get(p map[string]string, body io.Reader, l int64) (resp *http.Response, err error) {
|
func (c *client) Get(p map[string]string, body io.Reader, l int64) (resp *http.Response, err error) {
|
||||||
|
|
||||||
p["REQUEST_METHOD"] = "GET"
|
p["REQUEST_METHOD"] = "GET"
|
||||||
p["CONTENT_LENGTH"] = strconv.FormatInt(l, 10)
|
p["CONTENT_LENGTH"] = strconv.FormatInt(l, 10)
|
||||||
|
@ -459,7 +259,7 @@ func (c *FCGIClient) Get(p map[string]string, body io.Reader, l int64) (resp *ht
|
||||||
}
|
}
|
||||||
|
|
||||||
// Head issues a HEAD request to the fcgi responder.
|
// Head issues a HEAD request to the fcgi responder.
|
||||||
func (c *FCGIClient) Head(p map[string]string) (resp *http.Response, err error) {
|
func (c *client) Head(p map[string]string) (resp *http.Response, err error) {
|
||||||
|
|
||||||
p["REQUEST_METHOD"] = "HEAD"
|
p["REQUEST_METHOD"] = "HEAD"
|
||||||
p["CONTENT_LENGTH"] = "0"
|
p["CONTENT_LENGTH"] = "0"
|
||||||
|
@ -468,7 +268,7 @@ func (c *FCGIClient) Head(p map[string]string) (resp *http.Response, err error)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Options issues an OPTIONS request to the fcgi responder.
|
// Options issues an OPTIONS request to the fcgi responder.
|
||||||
func (c *FCGIClient) Options(p map[string]string) (resp *http.Response, err error) {
|
func (c *client) Options(p map[string]string) (resp *http.Response, err error) {
|
||||||
|
|
||||||
p["REQUEST_METHOD"] = "OPTIONS"
|
p["REQUEST_METHOD"] = "OPTIONS"
|
||||||
p["CONTENT_LENGTH"] = "0"
|
p["CONTENT_LENGTH"] = "0"
|
||||||
|
@ -478,7 +278,7 @@ func (c *FCGIClient) Options(p map[string]string) (resp *http.Response, err erro
|
||||||
|
|
||||||
// Post issues a POST request to the fcgi responder. with request body
|
// Post issues a POST request to the fcgi responder. with request body
|
||||||
// in the format that bodyType specified
|
// in the format that bodyType specified
|
||||||
func (c *FCGIClient) Post(p map[string]string, method string, bodyType string, body io.Reader, l int64) (resp *http.Response, err error) {
|
func (c *client) Post(p map[string]string, method string, bodyType string, body io.Reader, l int64) (resp *http.Response, err error) {
|
||||||
if p == nil {
|
if p == nil {
|
||||||
p = make(map[string]string)
|
p = make(map[string]string)
|
||||||
}
|
}
|
||||||
|
@ -501,7 +301,7 @@ func (c *FCGIClient) Post(p map[string]string, method string, bodyType string, b
|
||||||
|
|
||||||
// PostForm issues a POST to the fcgi responder, with form
|
// PostForm issues a POST to the fcgi responder, with form
|
||||||
// as a string key to a list values (url.Values)
|
// as a string key to a list values (url.Values)
|
||||||
func (c *FCGIClient) PostForm(p map[string]string, data url.Values) (resp *http.Response, err error) {
|
func (c *client) PostForm(p map[string]string, data url.Values) (resp *http.Response, err error) {
|
||||||
body := bytes.NewReader([]byte(data.Encode()))
|
body := bytes.NewReader([]byte(data.Encode()))
|
||||||
return c.Post(p, "POST", "application/x-www-form-urlencoded", body, int64(body.Len()))
|
return c.Post(p, "POST", "application/x-www-form-urlencoded", body, int64(body.Len()))
|
||||||
}
|
}
|
||||||
|
@ -509,7 +309,7 @@ func (c *FCGIClient) PostForm(p map[string]string, data url.Values) (resp *http.
|
||||||
// PostFile issues a POST to the fcgi responder in multipart(RFC 2046) standard,
|
// PostFile issues a POST to the fcgi responder in multipart(RFC 2046) standard,
|
||||||
// with form as a string key to a list values (url.Values),
|
// with form as a string key to a list values (url.Values),
|
||||||
// and/or with file as a string key to a list file path.
|
// and/or with file as a string key to a list file path.
|
||||||
func (c *FCGIClient) PostFile(p map[string]string, data url.Values, file map[string]string) (resp *http.Response, err error) {
|
func (c *client) PostFile(p map[string]string, data url.Values, file map[string]string) (resp *http.Response, err error) {
|
||||||
buf := &bytes.Buffer{}
|
buf := &bytes.Buffer{}
|
||||||
writer := multipart.NewWriter(buf)
|
writer := multipart.NewWriter(buf)
|
||||||
bodyType := writer.FormDataContentType()
|
bodyType := writer.FormDataContentType()
|
||||||
|
@ -550,18 +350,18 @@ func (c *FCGIClient) PostFile(p map[string]string, data url.Values, file map[str
|
||||||
|
|
||||||
// SetReadTimeout sets the read timeout for future calls that read from the
|
// SetReadTimeout sets the read timeout for future calls that read from the
|
||||||
// fcgi responder. A zero value for t means no timeout will be set.
|
// fcgi responder. A zero value for t means no timeout will be set.
|
||||||
func (c *FCGIClient) SetReadTimeout(t time.Duration) error {
|
func (c *client) SetReadTimeout(t time.Duration) error {
|
||||||
if conn, ok := c.rwc.(net.Conn); ok && t != 0 {
|
if t != 0 {
|
||||||
return conn.SetReadDeadline(time.Now().Add(t))
|
return c.rwc.SetReadDeadline(time.Now().Add(t))
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetWriteTimeout sets the write timeout for future calls that send data to
|
// SetWriteTimeout sets the write timeout for future calls that send data to
|
||||||
// the fcgi responder. A zero value for t means no timeout will be set.
|
// the fcgi responder. A zero value for t means no timeout will be set.
|
||||||
func (c *FCGIClient) SetWriteTimeout(t time.Duration) error {
|
func (c *client) SetWriteTimeout(t time.Duration) error {
|
||||||
if conn, ok := c.rwc.(net.Conn); ok && t != 0 {
|
if t != 0 {
|
||||||
return conn.SetWriteDeadline(time.Now().Add(t))
|
return c.rwc.SetWriteDeadline(time.Now().Add(t))
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -118,12 +118,14 @@ func (s FastCGIServer) ServeHTTP(resp http.ResponseWriter, req *http.Request) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func sendFcgi(reqType int, fcgiParams map[string]string, data []byte, posts map[string]string, files map[string]string) (content []byte) {
|
func sendFcgi(reqType int, fcgiParams map[string]string, data []byte, posts map[string]string, files map[string]string) (content []byte) {
|
||||||
fcgi, err := Dial("tcp", ipPort)
|
conn, err := net.Dial("tcp", ipPort)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Println("err:", err)
|
log.Println("err:", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fcgi := client{rwc: conn, reqID: 1}
|
||||||
|
|
||||||
length := 0
|
length := 0
|
||||||
|
|
||||||
var resp *http.Response
|
var resp *http.Response
|
||||||
|
@ -168,7 +170,7 @@ func sendFcgi(reqType int, fcgiParams map[string]string, data []byte, posts map[
|
||||||
content, _ = io.ReadAll(resp.Body)
|
content, _ = io.ReadAll(resp.Body)
|
||||||
|
|
||||||
log.Println("c: send data length ≈", length, string(content))
|
log.Println("c: send data length ≈", length, string(content))
|
||||||
fcgi.Close()
|
conn.Close()
|
||||||
time.Sleep(1 * time.Second)
|
time.Sleep(1 * time.Second)
|
||||||
|
|
||||||
if bytes.Contains(content, []byte("FAILED")) {
|
if bytes.Contains(content, []byte("FAILED")) {
|
||||||
|
|
|
@ -15,7 +15,6 @@
|
||||||
package fastcgi
|
package fastcgi
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
|
||||||
"crypto/tls"
|
"crypto/tls"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net"
|
"net"
|
||||||
|
@ -34,6 +33,8 @@ import (
|
||||||
"github.com/caddyserver/caddy/v2"
|
"github.com/caddyserver/caddy/v2"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var noopLogger = zap.NewNop()
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
caddy.RegisterModule(Transport{})
|
caddy.RegisterModule(Transport{})
|
||||||
}
|
}
|
||||||
|
@ -74,6 +75,11 @@ type Transport struct {
|
||||||
// The duration used to set a deadline when sending to the FastCGI server.
|
// The duration used to set a deadline when sending to the FastCGI server.
|
||||||
WriteTimeout caddy.Duration `json:"write_timeout,omitempty"`
|
WriteTimeout caddy.Duration `json:"write_timeout,omitempty"`
|
||||||
|
|
||||||
|
// Capture and log any messages sent by the upstream on stderr. Logs at WARN
|
||||||
|
// level by default. If the response has a 4xx or 5xx status ERROR level will
|
||||||
|
// be used instead.
|
||||||
|
CaptureStderr bool `json:"capture_stderr,omitempty"`
|
||||||
|
|
||||||
serverSoftware string
|
serverSoftware string
|
||||||
logger *zap.Logger
|
logger *zap.Logger
|
||||||
}
|
}
|
||||||
|
@ -94,10 +100,8 @@ func (t *Transport) Provision(ctx caddy.Context) error {
|
||||||
t.Root = "{http.vars.root}"
|
t.Root = "{http.vars.root}"
|
||||||
}
|
}
|
||||||
|
|
||||||
t.serverSoftware = "Caddy"
|
version, _ := caddy.Version()
|
||||||
if mod := caddy.GoModule(); mod.Version != "" {
|
t.serverSoftware = "Caddy/" + version
|
||||||
t.serverSoftware += "/" + mod.Version
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set a relatively short default dial timeout.
|
// Set a relatively short default dial timeout.
|
||||||
// This is helpful to make load-balancer retries more speedy.
|
// This is helpful to make load-balancer retries more speedy.
|
||||||
|
@ -110,6 +114,8 @@ func (t *Transport) Provision(ctx caddy.Context) error {
|
||||||
|
|
||||||
// RoundTrip implements http.RoundTripper.
|
// RoundTrip implements http.RoundTripper.
|
||||||
func (t Transport) RoundTrip(r *http.Request) (*http.Response, error) {
|
func (t Transport) RoundTrip(r *http.Request) (*http.Response, error) {
|
||||||
|
server := r.Context().Value(caddyhttp.ServerCtxKey).(*caddyhttp.Server)
|
||||||
|
|
||||||
// Disallow null bytes in the request path, because
|
// Disallow null bytes in the request path, because
|
||||||
// PHP upstreams may do bad things, like execute a
|
// PHP upstreams may do bad things, like execute a
|
||||||
// non-PHP file as PHP code. See #4574
|
// non-PHP file as PHP code. See #4574
|
||||||
|
@ -122,13 +128,7 @@ func (t Transport) RoundTrip(r *http.Request) (*http.Response, error) {
|
||||||
return nil, fmt.Errorf("building environment: %v", err)
|
return nil, fmt.Errorf("building environment: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: doesn't dialer have a Timeout field?
|
|
||||||
ctx := r.Context()
|
ctx := r.Context()
|
||||||
if t.DialTimeout > 0 {
|
|
||||||
var cancel context.CancelFunc
|
|
||||||
ctx, cancel = context.WithTimeout(ctx, time.Duration(t.DialTimeout))
|
|
||||||
defer cancel()
|
|
||||||
}
|
|
||||||
|
|
||||||
// extract dial information from request (should have been embedded by the reverse proxy)
|
// extract dial information from request (should have been embedded by the reverse proxy)
|
||||||
network, address := "tcp", r.URL.Host
|
network, address := "tcp", r.URL.Host
|
||||||
|
@ -137,24 +137,47 @@ func (t Transport) RoundTrip(r *http.Request) (*http.Response, error) {
|
||||||
address = dialInfo.Address
|
address = dialInfo.Address
|
||||||
}
|
}
|
||||||
|
|
||||||
t.logger.Debug("roundtrip",
|
logCreds := server.Logs != nil && server.Logs.ShouldLogCredentials
|
||||||
zap.Object("request", caddyhttp.LoggableHTTPRequest{Request: r}),
|
loggableReq := caddyhttp.LoggableHTTPRequest{
|
||||||
zap.String("dial", address),
|
Request: r,
|
||||||
zap.Object("env", env),
|
ShouldLogCredentials: logCreds,
|
||||||
)
|
}
|
||||||
|
loggableEnv := loggableEnv{vars: env, logCredentials: logCreds}
|
||||||
|
|
||||||
fcgiBackend, err := DialContext(ctx, network, address)
|
logger := t.logger.With(
|
||||||
|
zap.Object("request", loggableReq),
|
||||||
|
zap.Object("env", loggableEnv),
|
||||||
|
)
|
||||||
|
logger.Debug("roundtrip",
|
||||||
|
zap.String("dial", address),
|
||||||
|
zap.Object("env", loggableEnv),
|
||||||
|
zap.Object("request", loggableReq))
|
||||||
|
|
||||||
|
// connect to the backend
|
||||||
|
dialer := net.Dialer{Timeout: time.Duration(t.DialTimeout)}
|
||||||
|
conn, err := dialer.DialContext(ctx, network, address)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// TODO: wrap in a special error type if the dial failed, so retries can happen if enabled
|
|
||||||
return nil, fmt.Errorf("dialing backend: %v", err)
|
return nil, fmt.Errorf("dialing backend: %v", err)
|
||||||
}
|
}
|
||||||
// fcgiBackend gets closed when response body is closed (see clientCloser)
|
defer func() {
|
||||||
|
// conn will be closed with the response body unless there's an error
|
||||||
|
if err != nil {
|
||||||
|
conn.Close()
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
// create the client that will facilitate the protocol
|
||||||
|
client := client{
|
||||||
|
rwc: conn,
|
||||||
|
reqID: 1,
|
||||||
|
logger: logger,
|
||||||
|
}
|
||||||
|
|
||||||
// read/write timeouts
|
// read/write timeouts
|
||||||
if err := fcgiBackend.SetReadTimeout(time.Duration(t.ReadTimeout)); err != nil {
|
if err = client.SetReadTimeout(time.Duration(t.ReadTimeout)); err != nil {
|
||||||
return nil, fmt.Errorf("setting read timeout: %v", err)
|
return nil, fmt.Errorf("setting read timeout: %v", err)
|
||||||
}
|
}
|
||||||
if err := fcgiBackend.SetWriteTimeout(time.Duration(t.WriteTimeout)); err != nil {
|
if err = client.SetWriteTimeout(time.Duration(t.WriteTimeout)); err != nil {
|
||||||
return nil, fmt.Errorf("setting write timeout: %v", err)
|
return nil, fmt.Errorf("setting write timeout: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -166,16 +189,19 @@ func (t Transport) RoundTrip(r *http.Request) (*http.Response, error) {
|
||||||
var resp *http.Response
|
var resp *http.Response
|
||||||
switch r.Method {
|
switch r.Method {
|
||||||
case http.MethodHead:
|
case http.MethodHead:
|
||||||
resp, err = fcgiBackend.Head(env)
|
resp, err = client.Head(env)
|
||||||
case http.MethodGet:
|
case http.MethodGet:
|
||||||
resp, err = fcgiBackend.Get(env, r.Body, contentLength)
|
resp, err = client.Get(env, r.Body, contentLength)
|
||||||
case http.MethodOptions:
|
case http.MethodOptions:
|
||||||
resp, err = fcgiBackend.Options(env)
|
resp, err = client.Options(env)
|
||||||
default:
|
default:
|
||||||
resp, err = fcgiBackend.Post(env, r.Method, r.Header.Get("Content-Type"), r.Body, contentLength)
|
resp, err = client.Post(env, r.Method, r.Header.Get("Content-Type"), r.Body, contentLength)
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
return resp, err
|
return resp, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// buildEnv returns a set of CGI environment variables for the request.
|
// buildEnv returns a set of CGI environment variables for the request.
|
||||||
|
@ -366,11 +392,22 @@ func (t Transport) splitPos(path string) int {
|
||||||
return -1
|
return -1
|
||||||
}
|
}
|
||||||
|
|
||||||
// envVars is a simple type to allow for speeding up zap log encoding.
|
|
||||||
type envVars map[string]string
|
type envVars map[string]string
|
||||||
|
|
||||||
func (env envVars) MarshalLogObject(enc zapcore.ObjectEncoder) error {
|
// loggableEnv is a simple type to allow for speeding up zap log encoding.
|
||||||
for k, v := range env {
|
type loggableEnv struct {
|
||||||
|
vars envVars
|
||||||
|
logCredentials bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func (env loggableEnv) MarshalLogObject(enc zapcore.ObjectEncoder) error {
|
||||||
|
for k, v := range env.vars {
|
||||||
|
if !env.logCredentials {
|
||||||
|
switch strings.ToLower(k) {
|
||||||
|
case "http_cookie", "http_set_cookie", "http_authorization", "http_proxy_authorization":
|
||||||
|
v = ""
|
||||||
|
}
|
||||||
|
}
|
||||||
enc.AddString(k, v)
|
enc.AddString(k, v)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
@ -389,7 +426,7 @@ var headerNameReplacer = strings.NewReplacer(" ", "_", "-", "_")
|
||||||
|
|
||||||
// Interface guards
|
// Interface guards
|
||||||
var (
|
var (
|
||||||
_ zapcore.ObjectMarshaler = (*envVars)(nil)
|
_ zapcore.ObjectMarshaler = (*loggableEnv)(nil)
|
||||||
|
|
||||||
_ caddy.Provisioner = (*Transport)(nil)
|
_ caddy.Provisioner = (*Transport)(nil)
|
||||||
_ http.RoundTripper = (*Transport)(nil)
|
_ http.RoundTripper = (*Transport)(nil)
|
||||||
|
|
32
modules/caddyhttp/reverseproxy/fastcgi/header.go
Normal file
32
modules/caddyhttp/reverseproxy/fastcgi/header.go
Normal file
|
@ -0,0 +1,32 @@
|
||||||
|
// Copyright 2015 Matthew Holt and The Caddy Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package fastcgi
|
||||||
|
|
||||||
|
type header struct {
|
||||||
|
Version uint8
|
||||||
|
Type uint8
|
||||||
|
ID uint16
|
||||||
|
ContentLength uint16
|
||||||
|
PaddingLength uint8
|
||||||
|
Reserved uint8
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *header) init(recType uint8, reqID uint16, contentLength int) {
|
||||||
|
h.Version = 1
|
||||||
|
h.Type = recType
|
||||||
|
h.ID = reqID
|
||||||
|
h.ContentLength = uint16(contentLength)
|
||||||
|
h.PaddingLength = uint8(-contentLength & 7)
|
||||||
|
}
|
|
@ -12,19 +12,15 @@
|
||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
package notify
|
package fastcgi
|
||||||
|
|
||||||
// NotifyReadiness notifies process manager of readiness.
|
import (
|
||||||
func NotifyReadiness() error {
|
"bytes"
|
||||||
return notifyReadiness()
|
"sync"
|
||||||
}
|
)
|
||||||
|
|
||||||
// NotifyReloading notifies process manager of reloading.
|
var bufPool = sync.Pool{
|
||||||
func NotifyReloading() error {
|
New: func() any {
|
||||||
return notifyReloading()
|
return new(bytes.Buffer)
|
||||||
}
|
},
|
||||||
|
|
||||||
// NotifyStopping notifies process manager of stopping.
|
|
||||||
func NotifyStopping() error {
|
|
||||||
return notifyStopping()
|
|
||||||
}
|
}
|
44
modules/caddyhttp/reverseproxy/fastcgi/reader.go
Normal file
44
modules/caddyhttp/reverseproxy/fastcgi/reader.go
Normal file
|
@ -0,0 +1,44 @@
|
||||||
|
// Copyright 2015 Matthew Holt and The Caddy Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package fastcgi
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"io"
|
||||||
|
)
|
||||||
|
|
||||||
|
type streamReader struct {
|
||||||
|
c *client
|
||||||
|
rec record
|
||||||
|
stderr bytes.Buffer
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *streamReader) Read(p []byte) (n int, err error) {
|
||||||
|
for !w.rec.hasMore() {
|
||||||
|
err = w.rec.fill(w.c.rwc)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// standard error output
|
||||||
|
if w.rec.h.Type == Stderr {
|
||||||
|
if _, err = io.Copy(&w.stderr, &w.rec); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return w.rec.Read(p)
|
||||||
|
}
|
58
modules/caddyhttp/reverseproxy/fastcgi/record.go
Normal file
58
modules/caddyhttp/reverseproxy/fastcgi/record.go
Normal file
|
@ -0,0 +1,58 @@
|
||||||
|
// Copyright 2015 Matthew Holt and The Caddy Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package fastcgi
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/binary"
|
||||||
|
"errors"
|
||||||
|
"io"
|
||||||
|
)
|
||||||
|
|
||||||
|
type record struct {
|
||||||
|
h header
|
||||||
|
lr io.LimitedReader
|
||||||
|
padding int64
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rec *record) fill(r io.Reader) (err error) {
|
||||||
|
rec.lr.N = rec.padding
|
||||||
|
rec.lr.R = r
|
||||||
|
if _, err = io.Copy(io.Discard, rec); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = binary.Read(r, binary.BigEndian, &rec.h); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if rec.h.Version != 1 {
|
||||||
|
err = errors.New("fcgi: invalid header version")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if rec.h.Type == EndRequest {
|
||||||
|
err = io.EOF
|
||||||
|
return
|
||||||
|
}
|
||||||
|
rec.lr.N = int64(rec.h.ContentLength)
|
||||||
|
rec.padding = int64(rec.h.PaddingLength)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rec *record) Read(p []byte) (n int, err error) {
|
||||||
|
return rec.lr.Read(p)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rec *record) hasMore() bool {
|
||||||
|
return rec.lr.N > 0
|
||||||
|
}
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue