diff --git a/.editorconfig b/.editorconfig
new file mode 100644
index 00000000..96948b9d
--- /dev/null
+++ b/.editorconfig
@@ -0,0 +1,5 @@
+[*]
+end_of_line = lf
+
+[caddytest/integration/caddyfile_adapt/*.caddyfiletest]
+indent_style = tab
\ No newline at end of file
diff --git a/.gitattributes b/.gitattributes
new file mode 100644
index 00000000..a0717e4b
--- /dev/null
+++ b/.gitattributes
@@ -0,0 +1 @@
+*.go text eol=lf
\ No newline at end of file
diff --git a/.github/CONTRIBUTING.md b/.github/CONTRIBUTING.md
index 7c51a3e2..7142530e 100644
--- a/.github/CONTRIBUTING.md
+++ b/.github/CONTRIBUTING.md
@@ -1,7 +1,7 @@
Contributing to Caddy
=====================
-Welcome! Thank you for choosing to be a part of our community. Caddy wouldn't be great without your involvement!
+Welcome! Thank you for choosing to be a part of our community. Caddy wouldn't be nearly as excellent without your involvement!
For starters, we invite you to join [the Caddy forum](https://caddy.community) where you can hang out with other Caddy users and developers.
@@ -23,38 +23,50 @@ Other menu items:
### Contributing code
-You can have a huge impact on the project by helping with its code. To contribute code to Caddy, open a [pull request](https://github.com/caddyserver/caddy/pulls) (PR). If you're new to our community, that's okay: **we gladly welcome pull requests from anyone, regardless of your native language or coding experience.** You can get familiar with Caddy's code base by using [code search at Sourcegraph](https://sourcegraph.com/github.com/caddyserver/caddy/-/search).
+You can have a huge impact on the project by helping with its code. To contribute code to Caddy, first submit or comment in an issue to discuss your contribution, then open a [pull request](https://github.com/caddyserver/caddy/pulls) (PR). If you're new to our community, that's okay: **we gladly welcome pull requests from anyone, regardless of your native language or coding experience.** You can get familiar with Caddy's code base by using [code search at Sourcegraph](https://sourcegraph.com/github.com/caddyserver/caddy).
-We hold contributions to a high standard for quality :bowtie:, so don't be surprised if we ask for revisions—even if it seems small or insignificant. Please don't take it personally. :blue_heart: If your change is on the right track, we can guide you to make it mergable.
+We hold contributions to a high standard for quality :bowtie:, so don't be surprised if we ask for revisions—even if it seems small or insignificant. Please don't take it personally. :blue_heart: If your change is on the right track, we can guide you to make it mergeable.
Here are some of the expectations we have of contributors:
-- **Open an issue to propose your change first.** This way we can avoid confusion, coordinate what everyone is working on, and ensure that any changes are in-line with the project's goals and the best interests of its users. We can also discuss the best possible implementation. If there's already an issue about it, comment on the existing issue to claim it.
+- **Open an issue to propose your change first.** This way we can avoid confusion, coordinate what everyone is working on, and ensure that any changes are in-line with the project's goals and the best interests of its users. We can also discuss the best possible implementation. If there's already an issue about it, comment on the existing issue to claim it. A lot of valuable time can be saved by discussing a proposal first.
- **Keep pull requests small.** Smaller PRs are more likely to be merged because they are easier to review! We might ask you to break up large PRs into smaller ones. [An example of what we want to avoid.](https://twitter.com/iamdevloper/status/397664295875805184)
- **Keep related commits together in a PR.** We do want pull requests to be small, but you should also keep multiple related commits in the same PR if they rely on each other.
-- **Write tests.** Tests are essential! Written properly, they ensure your change works, and that other changes in the future won't break your change. CI checks should pass.
+- **Write tests.** Good, automated tests are very valuable! Written properly, they ensure your change works, and that other changes in the future won't break your change. CI checks should pass.
-- **Benchmarks should be included for optimizations.** Optimizations sometimes make code harder to read or have changes that are less than obvious. They should be proven with benchmarks or profiling.
+- **Benchmarks should be included for optimizations.** Optimizations sometimes make code harder to read or have changes that are less than obvious. They should be proven with benchmarks and profiling.
- **[Squash](http://gitready.com/advanced/2009/02/10/squashing-commits-with-rebase.html) insignificant commits.** Every commit should be significant. Commits which merely rewrite a comment or fix a typo can be combined into another commit that has more substance. Interactive rebase can do this, or a simpler way is `git reset --soft ` then `git commit -s`.
-- **Own your contributions.** Caddy is a growing project, and it's much better when individual contributors help maintain their change after it is merged.
+- **Be responsible for and maintain your contributions.** Caddy is a growing project, and it's much better when individual contributors help maintain their change after it is merged.
- **Use comments properly.** We expect good godoc comments for package-level functions, types, and values. Comments are also useful whenever the purpose for a line of code is not obvious.
-We often grant [collaborator status](#collaborator-instructions) to contributors who author one or more significant, high-quality PRs that are merged into the code base!
+- **Pull requests may still get closed.** The longer a PR stays open and idle, the more likely it is to be closed. If we haven't reviewed it in a while, it probably means the change is not a priority. Please don't take this personally, we're trying to balance a lot of tasks! If nobody else has commented or reacted to the PR, it likely means your change is useful only to you. The reality is this happens quite a lot. We don't tend to accept PRs that aren't generally helpful. For these reasons or others, the PR may get closed even after a review. We are not obligated to accept all proposed changes, even if the best justification we can give is something vague like, "It doesn't sit right." Sometimes PRs are just the wrong thing or the wrong time. Because it is open source, you can always build your own modified version of Caddy with a change you need, even if we reject it in the official repo. Plus, because Caddy is extensible, it's possible your feature could make a great plugin instead!
+
+- **You certify that you wrote and comprehend the code you submit.** The Caddy project welcomes original contributions that comply with [our CLA](https://cla-assistant.io/caddyserver/caddy), meaning that authors must be able to certify that they created or have rights to the code they are contributing. In addition, we require that code is not simply copy-pasted from Q/A sites or AI language models without full comprehension and rigorous testing. In other words: contributors are allowed to refer to communities for assistance and use AI tools such as language models for inspiration, but code which originates from or is assisted by these resources MUST be:
+
+ - Licensed for you to freely share
+ - Fully comprehended by you (be able to explain every line of code)
+ - Verified by automated tests when feasible, or thorough manual tests otherwise
+
+ We have found that current language models (LLMs, like ChatGPT) may understand code syntax and even problem spaces to an extent, but often fail in subtle ways to convey true knowledge and produce correct algorithms. Integrated tools such as GitHub Copilot and Sourcegraph Cody may be used for inspiration, but code generated by these tools still needs to meet our criteria for licensing, human comprehension, and testing. These tools may be used to help write code comments and tests as long as you can certify they are accurate and correct. Note that it is often more trouble than it's worth to certify that Copilot (for example) is not giving you code that is possibly plagiarised, unlicensed, or licensed with incompatible terms -- as the Caddy project cannot accept such contributions. If that's too difficult for you (or impossible), then we recommend using these resources only for inspiration and write your own code. Ultimately, you (the contributor) are responsible for the code you're submitting.
+
+ As a courtesy to reviewers, we kindly ask that you disclose when contributing code that was generated by an AI tool or copied from another website so we can be aware of what to look for in code review.
+
+We often grant [collaborator status](#collaborator-instructions) to contributors who author one or more significant, high-quality PRs that are merged into the code base.
#### HOW TO MAKE A PULL REQUEST TO CADDY
-Contributing to Go projects on GitHub is fun and easy. We recommend the following workflow:
+Contributing to Go projects on GitHub is fun and easy. After you have proposed your change in an issue, we recommend the following workflow:
1. [Fork this repo](https://github.com/caddyserver/caddy). This makes a copy of the code you can write to.
-2. If you don't already have this repo (caddyserver/caddy.git) repo on your computer, get it with `go get github.com/caddyserver/caddy/v2`.
+2. If you don't already have this repo (caddyserver/caddy.git) repo on your computer, clone it down: `git clone https://github.com/caddyserver/caddy.git`
3. Tell git that it can push the caddyserver/caddy.git repo to your fork by adding a remote: `git remote add myfork https://github.com//caddy.git`
@@ -85,9 +97,9 @@ Many people on the forums could benefit from your experience and expertise, too.
Like every software, Caddy has its flaws. If you find one, [search the issues](https://github.com/caddyserver/caddy/issues) to see if it has already been reported. If not, [open a new issue](https://github.com/caddyserver/caddy/issues/new) and describe the bug, and somebody will look into it! (This repository is only for Caddy and its standard modules.)
-**You can help stop bugs in their tracks!** Speed up the patch by identifying the bug in the code. This can sometimes be done by adding `fmt.Println()` statements (or similar) in relevant code paths to narrow down where the problem may be. It's a good way to [introduce yourself to the Go language](https://tour.golang.org), too.
+**You can help us fix bugs!** Speed up the patch by identifying the bug in the code. This can sometimes be done by adding `fmt.Println()` statements (or similar) in relevant code paths to narrow down where the problem may be. It's a good way to [introduce yourself to the Go language](https://tour.golang.org), too.
-Please follow the issue template so we have all the needed information. Unredacted—yes, actual values matter. We need to be able to repeat the bug using your instructions. Please simplify the issue as much as possible. The burden is on you to convince us that it is actually a bug in Caddy. This is easiest to do when you write clear, concise instructions so we can reproduce the behavior (even if it seems obvious). The more detailed and specific you are, the faster we will be able to help you!
+We may reply with an issue template. Please follow the template so we have all the needed information. Unredacted—yes, actual values matter. We need to be able to repeat the bug using your instructions. Please simplify the issue as much as possible. If you don't, we might close your report. The burden is on you to make it easily reproducible and to convince us that it is actually a bug in Caddy. This is easiest to do when you write clear, concise instructions so we can reproduce the behavior (even if it seems obvious). The more detailed and specific you are, the faster we will be able to help you!
We suggest reading [How to Report Bugs Effectively](http://www.chiark.greenend.org.uk/~sgtatham/bugs.html).
@@ -98,11 +110,12 @@ Please be kind. :smile: Remember that Caddy comes at no cost to you, and you're
Maintainers---or more generally, developers---need three things to act on bugs:
1. To agree or be convinced that it's a bug (reporter's responsibility).
- - A bug is undesired or surprising behavior which violates documentation or the spec.
+ - A bug is unintentional, undesired, or surprising behavior which violates documentation or relevant spec. It might be either a mistake in the documentation or a bug in the code.
+ - This project usually does not work around bugs in other software, systems, and dependencies; instead, we recommend that those bugs are fixed at their source. This sometimes means we close issues or reject PRs that attempt to fix, workaround, or hide bugs in other projects.
2. To be able to understand what is happening (mostly reporter's responsibility).
- - If the reporter can provide satisfactory instructions such that a developer can reproduce the bug, the developer will likely be able to understand the bug, write a test case, and implement a fix.
- - Otherwise, the burden is on the reporter to test possible solutions. This is discouraged because it loosens the feedback loop, slows down debugging efforts, obscures the true nature of the problem from the developers, and is unlikely to result in new test cases.
+ - If the reporter can provide satisfactory instructions such that a developer can reproduce the bug, the developer will likely be able to understand the bug, write a test case, and implement a fix. This is the least amount of work for everyone and path to the fastest resolution.
+ - Otherwise, the burden is on the reporter to test possible solutions. This is less preferable because it loosens the feedback loop, slows down debugging efforts, obscures the true nature of the problem from the developers, and is unlikely to result in new test cases.
3. A solution, or ideas toward a solution (mostly maintainer's responsibility).
- Sometimes the best solution is a documentation change.
@@ -112,7 +125,7 @@ Maintainers---or more generally, developers---need three things to act on bugs:
Thus, at the very least, the reporter is expected to:
-1. Convince the reader that it's a bug (if it's not obvious).
+1. Convince the reader that it's a bug in Caddy (if it's not obvious).
2. Reduce the problem down to the minimum specific steps required to reproduce it.
The maintainer is usually able to do the rest; but of course the reporter may invest additional effort to speed up the process.
@@ -123,7 +136,7 @@ The maintainer is usually able to do the rest; but of course the reporter may in
First, [search to see if your feature has already been requested](https://github.com/caddyserver/caddy/issues). If it has, you can add a :+1: reaction to vote for it. If your feature idea is new, open an issue to request the feature. Please describe your idea thoroughly so that we know how to implement it! Really vague requests may not be helpful or actionable and, without clarification, will have to be closed.
-While we really do value your requests and implement many of them, not all features are a good fit for Caddy. Most of those [make good modules](#writing-a-caddy-module), which can be made by anyone! But if a feature is not in the best interest of the Caddy project or its users in general, we may politely decline to implement it into Caddy core.
+While we really do value your requests and implement many of them, not all features are a good fit for Caddy. Most of those [make good modules](#writing-a-caddy-module), which can be made by anyone! But if a feature is not in the best interest of the Caddy project or its users in general, we may politely decline to implement it into Caddy core. Additionally, some features are bad ideas altogether (for either obvious or non-obvious reasons) which may be rejected. We'll try to explain why we reject a feature, but sometimes the best we can do is, "It's not a good fit for the project."
### Improving documentation
@@ -132,11 +145,11 @@ Caddy's documentation is available at [https://caddyserver.com/docs](https://cad
Note that third-party module documentation is not hosted by the Caddy website, other than basic usage examples. They are managed by the individual module authors, and you will have to contact them to change their documentation.
-
+Our documentation is scoped to the Caddy project only: it is not for describing how other software or systems work, even if they relate to Caddy or web servers. That kind of content [can be found in our community wiki](https://caddy.community/c/wiki/13), however.
## Collaborator Instructions
-Collaborators have push rights to the repository. We grant this permission after one or more successful, high-quality PRs are merged! We thank them for their help.The expectations we have of collaborators are:
+Collaborators have push rights to the repository. We grant this permission after one or more successful, high-quality PRs are merged! We thank them for their help. The expectations we have of collaborators are:
- **Help review pull requests.** Be meticulous, but also kind. We love our contributors, but we critique the contribution to make it better. Multiple, thorough reviews make for the best contributions! Here are some questions to consider:
- Can the change be made more elegant?
@@ -167,7 +180,7 @@ Collaborators have push rights to the repository. We grant this permission after
-## Values
+## Values (WIP)
- A person is always more important than code. People don't like being handled "efficiently". But we can still process issues and pull requests efficiently while being kind, patient, and considerate.
diff --git a/.github/SECURITY.md b/.github/SECURITY.md
index e9f016c4..44cc5b7c 100644
--- a/.github/SECURITY.md
+++ b/.github/SECURITY.md
@@ -2,26 +2,58 @@
The Caddy project would like to make sure that it stays on top of all practically-exploitable vulnerabilities.
-Some security problems are more the result of interplay between different components of the Web, rather than a vulnerability in the web server itself. Please report only vulnerabilities in the web server itself, as we cannot coerce the rest of the Web to be fixed (for example, we do not consider IP spoofing or BGP hijacks a vulnerability in the Caddy web server).
-
-Please note that we consider publicly-registered domain names to be public information. This necessary in order to maintain the integrity of certificate transparency, public DNS, and other public trust systems.
## Supported Versions
| Version | Supported |
| ------- | ------------------ |
-| 2.x | :white_check_mark: |
-| 1.x | :white_check_mark: (deprecating soon) |
+| 2.x | ✔️ |
+| 1.x | :x: |
| < 1.x | :x: |
+
+## Acceptable Scope
+
+A security report must demonstrate a security bug in the source code from this repository.
+
+Some security problems are the result of interplay between different components of the Web, rather than a vulnerability in the web server itself. Please only report vulnerabilities in the web server itself, as we cannot coerce the rest of the Web to be fixed (for example, we do not consider IP spoofing, BGP hijacks, or missing/misconfigured HTTP headers a vulnerability in the Caddy web server).
+
+Vulnerabilities caused by misconfigurations are out of scope. Yes, it is entirely possible to craft and use a configuration that is unsafe, just like with every other web server; we recommend against doing that.
+
+We do not accept reports if the steps imply or require a compromised system or third-party software, as we cannot control those. We expect that users secure their own systems and keep all their software patched. For example, if untrusted users are able to upload/write/host arbitrary files in the web root directory, it is NOT a security bug in Caddy if those files get served to clients; however, it _would_ be a valid report if a bug in Caddy's source code unintentionally gave unauthorized users the ability to upload unsafe files or delete files without relying on an unpatched system or piece of software.
+
+Client-side exploits are out of scope. In other words, it is not a bug in Caddy if the web browser does something unsafe, even if the downloaded content was served by Caddy. (Those kinds of exploits can generally be mitigated by proper configuration of HTTP headers.) As a general rule, the content served by Caddy is not considered in scope because content is configurable by the site owner or the associated web application.
+
+Security bugs in code dependencies (including Go's standard library) are out of scope. Instead, if a dependency has patched a relevant security bug, please feel free to open a public issue or pull request to update that dependency in our code.
+
+
## Reporting a Vulnerability
-Please email Matt Holt (the author) directly: Matthew dot Holt at Gmail.
+We get a lot of difficult reports that turn out to be invalid. Clear, obvious reports tend to be the most credible (but are also rare).
-We'll need enough information to verify the bug and make a patch. It will speed things up if you suggest a working patch, such as a code diff, and explain why and how it works. Reports that are not actionable, do not contain enough information, are too pushy/demanding, or are not able to convince us that it is a viable and practical attack on the web server itself may be deferred to a later time or possibly ignored, resources permitting. Priority will be given to credible, responsible reports that are constructive, specific, and actionable. Thank you for understanding.
+First please ensure your report falls within the accepted scope of security bugs (above).
+
+We'll need enough information to verify the bug and make a patch. To speed things up, please include:
+
+- Most minimal possible config (without redactions!)
+- Command(s)
+- Precise HTTP requests (`curl -v` and its output please)
+- Full log output (please enable debug mode)
+- Specific minimal steps to reproduce the issue from scratch
+- A working patch
+
+Please DO NOT use containers, VMs, cloud instances or services, or any other complex infrastructure in your steps. Always prefer `curl -v` instead of web browsers.
+
+We consider publicly-registered domain names to be public information. This necessary in order to maintain the integrity of certificate transparency, public DNS, and other public trust systems. Do not redact domain names from your reports. The actual content of your domain name affects Caddy's behavior, so we need the exact domain name(s) to reproduce with, or your report will be ignored.
+
+It will speed things up if you suggest a working patch, such as a code diff, and explain why and how it works. Reports that are not actionable, do not contain enough information, are too pushy/demanding, or are not able to convince us that it is a viable and practical attack on the web server itself may be deferred to a later time or possibly ignored, depending on available resources. Priority will be given to credible, responsible reports that are constructive, specific, and actionable. (We get a lot of invalid reports.) Thank you for understanding.
+
+When you are ready, please email Matt Holt (the author) directly: matt at dyanim dot com.
+
+Please don't encrypt the email body. It only makes the process more complicated.
Please also understand that due to our nature as an open source project, we do not have a budget to award security bounties. We can only thank you.
-If your report is valid and a patch is released, we will not reveal your identity by default. If you wish to be credited, please give us the name to use.
+If your report is valid and a patch is released, we will not reveal your identity by default. If you wish to be credited, please give us the name to use and/or your GitHub username. If you don't provide this we can't credit you.
Thanks for responsibly helping Caddy—and thousands of websites—be more secure!
diff --git a/.github/dependabot.yml b/.github/dependabot.yml
new file mode 100644
index 00000000..64284b90
--- /dev/null
+++ b/.github/dependabot.yml
@@ -0,0 +1,7 @@
+---
+version: 2
+updates:
+ - package-ecosystem: "github-actions"
+ directory: "/"
+ schedule:
+ interval: "monthly"
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index d8d1cf7b..c16af8db 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -1,14 +1,16 @@
# Used as inspiration: https://github.com/mvdan/github-actions-golang
-name: Cross-Platform
+name: Tests
on:
push:
- branches:
+ branches:
- master
+ - 2.*
pull_request:
- branches:
+ branches:
- master
+ - 2.*
jobs:
test:
@@ -16,35 +18,53 @@ jobs:
# Default is true, cancels jobs for other platforms in the matrix if one fails
fail-fast: false
matrix:
- os: [ ubuntu-latest, macos-latest, windows-latest ]
- go-version: [ 1.14.x ]
+ os:
+ - linux
+ - mac
+ - windows
+ go:
+ - '1.22'
+ - '1.23'
+
+ include:
+ # Set the minimum Go patch version for the given Go minor
+ # Usable via ${{ matrix.GO_SEMVER }}
+ - go: '1.22'
+ GO_SEMVER: '~1.22.3'
+
+ - go: '1.23'
+ GO_SEMVER: '~1.23.0'
# Set some variables per OS, usable via ${{ matrix.VAR }}
+ # OS_LABEL: the VM label from GitHub Actions (see https://docs.github.com/en/actions/using-github-hosted-runners/about-github-hosted-runners/about-github-hosted-runners#standard-github-hosted-runners-for-public-repositories)
# CADDY_BIN_PATH: the path to the compiled Caddy binary, for artifact publishing
# SUCCESS: the typical value for $? per OS (Windows/pwsh returns 'True')
- include:
- - os: ubuntu-latest
+ - os: linux
+ OS_LABEL: ubuntu-latest
CADDY_BIN_PATH: ./cmd/caddy/caddy
SUCCESS: 0
- - os: macos-latest
+ - os: mac
+ OS_LABEL: macos-14
CADDY_BIN_PATH: ./cmd/caddy/caddy
SUCCESS: 0
- - os: windows-latest
+ - os: windows
+ OS_LABEL: windows-latest
CADDY_BIN_PATH: ./cmd/caddy/caddy.exe
SUCCESS: 'True'
- runs-on: ${{ matrix.os }}
+ runs-on: ${{ matrix.OS_LABEL }}
steps:
- - name: Install Go
- uses: actions/setup-go@v1
- with:
- go-version: ${{ matrix.go-version }}
-
- name: Checkout code
- uses: actions/checkout@v2
+ uses: actions/checkout@v4
+
+ - name: Install Go
+ uses: actions/setup-go@v5
+ with:
+ go-version: ${{ matrix.GO_SEMVER }}
+ check-latest: true
# These tools would be useful if we later decide to reinvestigate
# publishing test/coverage reports to some tool for easier consumption
@@ -53,10 +73,11 @@ jobs:
# go get github.com/axw/gocov/gocov
# go get github.com/AlekSi/gocov-xml
# go get -u github.com/jstemmer/go-junit-report
- # echo "::add-path::$(go env GOPATH)/bin"
+ # echo "$(go env GOPATH)/bin" >> $GITHUB_PATH
- name: Print Go version and environment
id: vars
+ shell: bash
run: |
printf "Using go at: $(which go)\n"
printf "Go version: $(go version)\n"
@@ -64,17 +85,9 @@ jobs:
go env
printf "\n\nSystem environment:\n\n"
env
+ printf "Git version: $(git version)\n\n"
# Calculate the short SHA1 hash of the git commit
- echo "::set-output name=short_sha::$(git rev-parse --short HEAD)"
- echo "::set-output name=go_cache::$(go env GOCACHE)"
-
- - name: Cache the build cache
- uses: actions/cache@v1
- with:
- path: ${{ steps.vars.outputs.go_cache }}
- key: ${{ runner.os }}-go-ci-${{ hashFiles('**/go.sum') }}
- restore-keys: |
- ${{ runner.os }}-go-ci
+ echo "short_sha=$(git rev-parse --short HEAD)" >> $GITHUB_OUTPUT
- name: Get dependencies
run: |
@@ -86,13 +99,20 @@ jobs:
env:
CGO_ENABLED: 0
run: |
- go build -trimpath -ldflags="-w -s" -v
+ go build -tags nobadger -trimpath -ldflags="-w -s" -v
+
+ - name: Smoke test Caddy
+ working-directory: ./cmd/caddy
+ run: |
+ ./caddy start
+ ./caddy stop
- name: Publish Build Artifact
- uses: actions/upload-artifact@v1
+ uses: actions/upload-artifact@v4
with:
- name: caddy_v2_${{ runner.os }}_${{ steps.vars.outputs.short_sha }}
+ name: caddy_${{ runner.os }}_go${{ matrix.go }}_${{ steps.vars.outputs.short_sha }}
path: ${{ matrix.CADDY_BIN_PATH }}
+ compression-level: 0
# Commented bits below were useful to allow the job to continue
# even if the tests fail, so we can publish the report separately
@@ -102,8 +122,8 @@ jobs:
# continue-on-error: true
run: |
# (go test -v -coverprofile=cover-profile.out -race ./... 2>&1) > test-results/test-result.out
- go test -v -coverprofile="cover-profile.out" -short -race ./...
- # echo "::set-output name=status::$?"
+ go test -tags nobadger -v -coverprofile="cover-profile.out" -short -race ./...
+ # echo "status=$?" >> $GITHUB_OUTPUT
# Relevant step if we reinvestigate publishing test/coverage reports
# - name: Prepare coverage reports
@@ -115,21 +135,86 @@ jobs:
# To return the correct result even though we set 'continue-on-error: true'
# - name: Coerce correct build result
- # if: matrix.os != 'windows-latest' && steps.step_test.outputs.status != ${{ matrix.SUCCESS }}
+ # if: matrix.os != 'windows' && steps.step_test.outputs.status != ${{ matrix.SUCCESS }}
# run: |
# echo "step_test ${{ steps.step_test.outputs.status }}\n"
# exit 1
- # From https://github.com/reviewdog/action-golangci-lint
- golangci-lint:
- name: runner / golangci-lint
+ s390x-test:
+ name: test (s390x on IBM Z)
runs-on: ubuntu-latest
+ if: github.event.pull_request.head.repo.full_name == 'caddyserver/caddy' && github.actor != 'dependabot[bot]'
+ continue-on-error: true # August 2020: s390x VM is down due to weather and power issues
steps:
- - name: Checkout code into the Go module directory
- uses: actions/checkout@v2
+ - name: Checkout code
+ uses: actions/checkout@v4
+ - name: Run Tests
+ run: |
+ set +e
+ mkdir -p ~/.ssh && echo -e "${SSH_KEY//_/\\n}" > ~/.ssh/id_ecdsa && chmod og-rwx ~/.ssh/id_ecdsa
- - name: Run golangci-lint
- uses: reviewdog/action-golangci-lint@v1
- # uses: docker://reviewdog/action-golangci-lint:v1 # pre-build docker image
+ # short sha is enough?
+ short_sha=$(git rev-parse --short HEAD)
+
+ # To shorten the following lines
+ ssh_opts="-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null"
+ ssh_host="$CI_USER@ci-s390x.caddyserver.com"
+
+ # The environment is fresh, so there's no point in keeping accepting and adding the key.
+ rsync -arz -e "ssh $ssh_opts" --progress --delete --exclude '.git' . "$ssh_host":/var/tmp/"$short_sha"
+ ssh $ssh_opts -t "$ssh_host" bash < 0)); do
+ CGO_ENABLED=0 go test -p 1 -tags nobadger -v ./...
+ exit_code=$?
+ if ((exit_code == 0)); then
+ break
+ fi
+ echo "\n\nTest failed: \$exit_code, retrying..."
+ ((retries--))
+ done
+ echo "Remote exit code: \$exit_code"
+ exit \$exit_code
+ EOF
+ test_result=$?
+
+ # There's no need leaving the files around
+ ssh $ssh_opts "$ssh_host" "rm -rf /var/tmp/'$short_sha'"
+
+ echo "Test exit code: $test_result"
+ exit $test_result
+ env:
+ SSH_KEY: ${{ secrets.S390X_SSH_KEY }}
+ CI_USER: ${{ secrets.CI_USER }}
+
+ goreleaser-check:
+ runs-on: ubuntu-latest
+ if: github.event.pull_request.head.repo.full_name == 'caddyserver/caddy' && github.actor != 'dependabot[bot]'
+ steps:
+ - name: Checkout code
+ uses: actions/checkout@v4
+
+ - uses: goreleaser/goreleaser-action@v6
with:
- github_token: ${{ secrets.github_token }}
+ version: latest
+ args: check
+ - name: Install Go
+ uses: actions/setup-go@v5
+ with:
+ go-version: "~1.23"
+ check-latest: true
+ - name: Install xcaddy
+ run: |
+ go install github.com/caddyserver/xcaddy/cmd/xcaddy@latest
+ xcaddy version
+ - uses: goreleaser/goreleaser-action@v6
+ with:
+ version: latest
+ args: build --single-target --snapshot
+ env:
+ TAG: ${{ github.head_ref || github.ref_name }}
diff --git a/.github/workflows/cross-build.yml b/.github/workflows/cross-build.yml
new file mode 100644
index 00000000..af039460
--- /dev/null
+++ b/.github/workflows/cross-build.yml
@@ -0,0 +1,73 @@
+name: Cross-Build
+
+on:
+ push:
+ branches:
+ - master
+ - 2.*
+ pull_request:
+ branches:
+ - master
+ - 2.*
+
+jobs:
+ build:
+ strategy:
+ fail-fast: false
+ matrix:
+ goos:
+ - 'aix'
+ - 'linux'
+ - 'solaris'
+ - 'illumos'
+ - 'dragonfly'
+ - 'freebsd'
+ - 'openbsd'
+ - 'windows'
+ - 'darwin'
+ - 'netbsd'
+ go:
+ - '1.22'
+ - '1.23'
+
+ include:
+ # Set the minimum Go patch version for the given Go minor
+ # Usable via ${{ matrix.GO_SEMVER }}
+ - go: '1.22'
+ GO_SEMVER: '~1.22.3'
+
+ - go: '1.23'
+ GO_SEMVER: '~1.23.0'
+
+ runs-on: ubuntu-latest
+ continue-on-error: true
+ steps:
+ - name: Checkout code
+ uses: actions/checkout@v4
+
+ - name: Install Go
+ uses: actions/setup-go@v5
+ with:
+ go-version: ${{ matrix.GO_SEMVER }}
+ check-latest: true
+
+ - name: Print Go version and environment
+ id: vars
+ run: |
+ printf "Using go at: $(which go)\n"
+ printf "Go version: $(go version)\n"
+ printf "\n\nGo environment:\n\n"
+ go env
+ printf "\n\nSystem environment:\n\n"
+ env
+
+ - name: Run Build
+ env:
+ CGO_ENABLED: 0
+ GOOS: ${{ matrix.goos }}
+ GOARCH: ${{ matrix.goos == 'aix' && 'ppc64' || 'amd64' }}
+ shell: bash
+ continue-on-error: true
+ working-directory: ./cmd/caddy
+ run: |
+ GOOS=$GOOS GOARCH=$GOARCH go build -tags=nobadger,nomysql,nopgx -trimpath -o caddy-"$GOOS"-$GOARCH 2> /dev/null
diff --git a/.github/workflows/fuzzing.yml b/.github/workflows/fuzzing.yml
deleted file mode 100644
index c3c9657f..00000000
--- a/.github/workflows/fuzzing.yml
+++ /dev/null
@@ -1,73 +0,0 @@
-name: Fuzzing
-
-on:
- # Daily midnight fuzzing
- schedule:
- - cron: '0 0 * * *'
-
-jobs:
- fuzzing:
- name: Fuzzing
-
- strategy:
- matrix:
- os: [ ubuntu-latest ]
- go-version: [ 1.14.x ]
- runs-on: ${{ matrix.os }}
-
- steps:
- - name: Install Go
- uses: actions/setup-go@v1
- with:
- go-version: ${{ matrix.go-version }}
-
- - name: Checkout code
- uses: actions/checkout@v2
-
- - name: Download go-fuzz tools and the Fuzzit CLI, move Fuzzit CLI to GOBIN
- # If we decide we need to prevent this from running on forks, we can use this line:
- # if: github.repository == 'caddyserver/caddy'
- run: |
-
- go get -v github.com/dvyukov/go-fuzz/go-fuzz github.com/dvyukov/go-fuzz/go-fuzz-build
- wget -q -O fuzzit https://github.com/fuzzitdev/fuzzit/releases/download/v2.4.77/fuzzit_Linux_x86_64
- chmod a+x fuzzit
- mv fuzzit $(go env GOPATH)/bin
- echo "::add-path::$(go env GOPATH)/bin"
-
- - name: Generate fuzzers & submit them to Fuzzit
- continue-on-error: true
- env:
- FUZZIT_API_KEY: ${{ secrets.FUZZIT_API_KEY }}
- SYSTEM_PULLREQUEST_SOURCEBRANCH: ${{ github.ref }}
- BUILD_SOURCEVERSION: ${{ github.sha }}
- run: |
- # debug
- echo "PR Source Branch: $SYSTEM_PULLREQUEST_SOURCEBRANCH"
- echo "Source version: $BUILD_SOURCEVERSION"
-
- declare -A fuzzers_funcs=(\
- ["./caddyconfig/httpcaddyfile/addresses_fuzz.go"]="FuzzParseAddress" \
- ["./listeners_fuzz.go"]="FuzzParseNetworkAddress" \
- ["./replacer_fuzz.go"]="FuzzReplacer" \
- )
-
- declare -A fuzzers_targets=(\
- ["./caddyconfig/httpcaddyfile/addresses_fuzz.go"]="parse-address" \
- ["./listeners_fuzz.go"]="parse-network-address" \
- ["./replacer_fuzz.go"]="replacer" \
- )
-
- fuzz_type="fuzzing"
-
- for f in $(find . -name \*_fuzz.go); do
- FUZZER_DIRECTORY=$(dirname "$f")
-
- echo "go-fuzz-build func ${fuzzers_funcs[$f]} residing in $f"
-
- go-fuzz-build -func "${fuzzers_funcs[$f]}" -o "$FUZZER_DIRECTORY/${fuzzers_targets[$f]}.zip" "$FUZZER_DIRECTORY"
-
- fuzzit create job --engine go-fuzz caddyserver/"${fuzzers_targets[$f]}" "$FUZZER_DIRECTORY"/"${fuzzers_targets[$f]}.zip" --api-key "${FUZZIT_API_KEY}" --type "${fuzz_type}" --branch "${SYSTEM_PULLREQUEST_SOURCEBRANCH}" --revision "${BUILD_SOURCEVERSION}"
-
- echo "Completed $f"
- done
diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml
new file mode 100644
index 00000000..22e13973
--- /dev/null
+++ b/.github/workflows/lint.yml
@@ -0,0 +1,67 @@
+name: Lint
+
+on:
+ push:
+ branches:
+ - master
+ - 2.*
+ pull_request:
+ branches:
+ - master
+ - 2.*
+
+permissions:
+ contents: read
+
+jobs:
+ # From https://github.com/golangci/golangci-lint-action
+ golangci:
+ permissions:
+ contents: read # for actions/checkout to fetch code
+ pull-requests: read # for golangci/golangci-lint-action to fetch pull requests
+ name: lint
+ strategy:
+ matrix:
+ os:
+ - linux
+ - mac
+ - windows
+
+ include:
+ - os: linux
+ OS_LABEL: ubuntu-latest
+
+ - os: mac
+ OS_LABEL: macos-14
+
+ - os: windows
+ OS_LABEL: windows-latest
+
+ runs-on: ${{ matrix.OS_LABEL }}
+
+ steps:
+ - uses: actions/checkout@v4
+ - uses: actions/setup-go@v5
+ with:
+ go-version: '~1.23'
+ check-latest: true
+
+ - name: golangci-lint
+ uses: golangci/golangci-lint-action@v6
+ with:
+ version: latest
+
+ # Windows times out frequently after about 5m50s if we don't set a longer timeout.
+ args: --timeout 10m
+
+ # Optional: show only new issues if it's a pull request. The default value is `false`.
+ # only-new-issues: true
+
+ govulncheck:
+ runs-on: ubuntu-latest
+ steps:
+ - name: govulncheck
+ uses: golang/govulncheck-action@v1
+ with:
+ go-version-input: '~1.23.0'
+ check-latest: true
diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml
index a58a793e..d788ca36 100644
--- a/.github/workflows/release.yml
+++ b/.github/workflows/release.yml
@@ -10,22 +10,48 @@ jobs:
name: Release
strategy:
matrix:
- os: [ ubuntu-latest ]
- go-version: [ 1.14.x ]
+ os:
+ - ubuntu-latest
+ go:
+ - '1.23'
+
+ include:
+ # Set the minimum Go patch version for the given Go minor
+ # Usable via ${{ matrix.GO_SEMVER }}
+ - go: '1.23'
+ GO_SEMVER: '~1.23.0'
+
runs-on: ${{ matrix.os }}
+ # https://github.com/sigstore/cosign/issues/1258#issuecomment-1002251233
+ # https://docs.github.com/en/actions/deployment/security-hardening-your-deployments/about-security-hardening-with-openid-connect#adding-permissions-settings
+ permissions:
+ id-token: write
+ # https://docs.github.com/en/rest/overview/permissions-required-for-github-apps#permission-on-contents
+ # "Releases" is part of `contents`, so it needs the `write`
+ contents: write
steps:
- - name: Install Go
- uses: actions/setup-go@v1
- with:
- go-version: ${{ matrix.go-version }}
-
- name: Checkout code
- uses: actions/checkout@v2
+ uses: actions/checkout@v4
+ with:
+ fetch-depth: 0
- # So GoReleaser can generate the changelog properly
- - name: Unshallowify the repo clone
- run: git fetch --prune --unshallow
+ - name: Install Go
+ uses: actions/setup-go@v5
+ with:
+ go-version: ${{ matrix.GO_SEMVER }}
+ check-latest: true
+
+ # Force fetch upstream tags -- because 65 minutes
+ # tl;dr: actions/checkout@v4 runs this line:
+ # git -c protocol.version=2 fetch --no-tags --prune --progress --no-recurse-submodules --depth=1 origin +ebc278ec98bb24f2852b61fde2a9bf2e3d83818b:refs/tags/
+ # which makes its own local lightweight tag, losing all the annotations in the process. Our earlier script ran:
+ # git fetch --prune --unshallow
+ # which doesn't overwrite that tag because that would be destructive.
+ # Credit to @francislavoie for the investigation.
+ # https://github.com/actions/checkout/issues/290#issuecomment-680260080
+ - name: Force fetch upstream tags
+ run: git fetch --tags --force
# https://github.community/t5/GitHub-Actions/How-to-get-just-the-tag-name/m-p/32167/highlight/true#M1027
- name: Print Go version and environment
@@ -37,32 +63,116 @@ jobs:
go env
printf "\n\nSystem environment:\n\n"
env
- echo "::set-output name=version_tag::${GITHUB_REF/refs\/tags\//}"
- echo "::set-output name=short_sha::$(git rev-parse --short HEAD)"
- echo "::set-output name=go_cache::$(go env GOCACHE)"
+ echo "version_tag=${GITHUB_REF/refs\/tags\//}" >> $GITHUB_OUTPUT
+ echo "short_sha=$(git rev-parse --short HEAD)" >> $GITHUB_OUTPUT
- - name: Cache the build cache
- uses: actions/cache@v1
- with:
- path: ${{ steps.vars.outputs.go_cache }}
- key: ${{ runner.os }}-go-release-${{ hashFiles('**/go.sum') }}
- restore-keys: |
- ${{ runner.os }}-go-release
+ # Add "pip install" CLI tools to PATH
+ echo ~/.local/bin >> $GITHUB_PATH
+ # Parse semver
+ TAG=${GITHUB_REF/refs\/tags\//}
+ SEMVER_RE='[^0-9]*\([0-9]*\)[.]\([0-9]*\)[.]\([0-9]*\)\([0-9A-Za-z\.-]*\)'
+ TAG_MAJOR=`echo ${TAG#v} | sed -e "s#$SEMVER_RE#\1#"`
+ TAG_MINOR=`echo ${TAG#v} | sed -e "s#$SEMVER_RE#\2#"`
+ TAG_PATCH=`echo ${TAG#v} | sed -e "s#$SEMVER_RE#\3#"`
+ TAG_SPECIAL=`echo ${TAG#v} | sed -e "s#$SEMVER_RE#\4#"`
+ echo "tag_major=${TAG_MAJOR}" >> $GITHUB_OUTPUT
+ echo "tag_minor=${TAG_MINOR}" >> $GITHUB_OUTPUT
+ echo "tag_patch=${TAG_PATCH}" >> $GITHUB_OUTPUT
+ echo "tag_special=${TAG_SPECIAL}" >> $GITHUB_OUTPUT
+
+ # Cloudsmith CLI tooling for pushing releases
+ # See https://help.cloudsmith.io/docs/cli
+ - name: Install Cloudsmith CLI
+ run: pip install --upgrade cloudsmith-cli
+
+ - name: Validate commits and tag signatures
+ run: |
+
+ # Import Matt Holt's key
+ curl 'https://github.com/mholt.gpg' | gpg --import
+
+ echo "Verifying the tag: ${{ steps.vars.outputs.version_tag }}"
+ # tags are only accepted if signed by Matt's key
+ git verify-tag "${{ steps.vars.outputs.version_tag }}" || exit 1
+
+ - name: Install Cosign
+ uses: sigstore/cosign-installer@main
+ - name: Cosign version
+ run: cosign version
+ - name: Install Syft
+ uses: anchore/sbom-action/download-syft@main
+ - name: Syft version
+ run: syft version
+ - name: Install xcaddy
+ run: |
+ go install github.com/caddyserver/xcaddy/cmd/xcaddy@latest
+ xcaddy version
# GoReleaser will take care of publishing those artifacts into the release
- name: Run GoReleaser
- uses: goreleaser/goreleaser-action@v1
+ uses: goreleaser/goreleaser-action@v6
with:
version: latest
- args: release --rm-dist
+ args: release --clean --timeout 60m
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
TAG: ${{ steps.vars.outputs.version_tag }}
+ COSIGN_EXPERIMENTAL: 1
+ # Only publish on non-special tags (e.g. non-beta)
+ # We will continue to push to Gemfury for the foreseeable future, although
+ # Cloudsmith is probably better, to not break things for existing users of Gemfury.
+ # See https://gemfury.com/caddy/deb:caddy
- name: Publish .deb to Gemfury
+ if: ${{ steps.vars.outputs.tag_special == '' }}
env:
GEMFURY_PUSH_TOKEN: ${{ secrets.GEMFURY_PUSH_TOKEN }}
run: |
for filename in dist/*.deb; do
+ # armv6 and armv7 are both "armhf" so we can skip the duplicate
+ if [[ "$filename" == *"armv6"* ]]; then
+ echo "Skipping $filename"
+ continue
+ fi
+
curl -F package=@"$filename" https://${GEMFURY_PUSH_TOKEN}:@push.fury.io/caddy/
- done
\ No newline at end of file
+ done
+
+ # Publish only special tags (unstable/beta/rc) to the "testing" repo
+ # See https://cloudsmith.io/~caddy/repos/testing/
+ - name: Publish .deb to Cloudsmith (special tags)
+ if: ${{ steps.vars.outputs.tag_special != '' }}
+ env:
+ CLOUDSMITH_API_KEY: ${{ secrets.CLOUDSMITH_API_KEY }}
+ run: |
+ for filename in dist/*.deb; do
+ # armv6 and armv7 are both "armhf" so we can skip the duplicate
+ if [[ "$filename" == *"armv6"* ]]; then
+ echo "Skipping $filename"
+ continue
+ fi
+
+ echo "Pushing $filename to 'testing'"
+ cloudsmith push deb caddy/testing/any-distro/any-version $filename
+ done
+
+ # Publish stable tags to Cloudsmith to both repos, "stable" and "testing"
+ # See https://cloudsmith.io/~caddy/repos/stable/
+ - name: Publish .deb to Cloudsmith (stable tags)
+ if: ${{ steps.vars.outputs.tag_special == '' }}
+ env:
+ CLOUDSMITH_API_KEY: ${{ secrets.CLOUDSMITH_API_KEY }}
+ run: |
+ for filename in dist/*.deb; do
+ # armv6 and armv7 are both "armhf" so we can skip the duplicate
+ if [[ "$filename" == *"armv6"* ]]; then
+ echo "Skipping $filename"
+ continue
+ fi
+
+ echo "Pushing $filename to 'stable'"
+ cloudsmith push deb caddy/stable/any-distro/any-version $filename
+
+ echo "Pushing $filename to 'testing'"
+ cloudsmith push deb caddy/testing/any-distro/any-version $filename
+ done
diff --git a/.github/workflows/release_published.yml b/.github/workflows/release_published.yml
new file mode 100644
index 00000000..491dae75
--- /dev/null
+++ b/.github/workflows/release_published.yml
@@ -0,0 +1,35 @@
+name: Release Published
+
+# Event payload: https://developer.github.com/webhooks/event-payloads/#release
+on:
+ release:
+ types: [published]
+
+jobs:
+ release:
+ name: Release Published
+ strategy:
+ matrix:
+ os:
+ - ubuntu-latest
+ runs-on: ${{ matrix.os }}
+
+ steps:
+
+ # See https://github.com/peter-evans/repository-dispatch
+ - name: Trigger event on caddyserver/dist
+ uses: peter-evans/repository-dispatch@v3
+ with:
+ token: ${{ secrets.REPO_DISPATCH_TOKEN }}
+ repository: caddyserver/dist
+ event-type: release-tagged
+ client-payload: '{"tag": "${{ github.event.release.tag_name }}"}'
+
+ - name: Trigger event on caddyserver/caddy-docker
+ uses: peter-evans/repository-dispatch@v3
+ with:
+ token: ${{ secrets.REPO_DISPATCH_TOKEN }}
+ repository: caddyserver/caddy-docker
+ event-type: release-tagged
+ client-payload: '{"tag": "${{ github.event.release.tag_name }}"}'
+
diff --git a/.gitignore b/.gitignore
index d8128f54..381bf740 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,15 +1,19 @@
_gitignore/
*.log
Caddyfile
+Caddyfile.*
!caddyfile/
+!caddyfile.go
# artifacts from pprof tooling
*.prof
*.test
-# build artifacts
+# build artifacts and helpers
cmd/caddy/caddy
cmd/caddy/caddy.exe
+cmd/caddy/tmp/*.exe
+cmd/caddy/.env
# mac specific
.DS_Store
@@ -20,4 +24,8 @@ vendor
# goreleaser artifacts
dist
caddy-build
-caddy-dist
\ No newline at end of file
+caddy-dist
+
+# IDE files
+.idea/
+.vscode/
diff --git a/.golangci.yml b/.golangci.yml
index f6d83227..aecff563 100644
--- a/.golangci.yml
+++ b/.golangci.yml
@@ -1,51 +1,182 @@
linters-settings:
errcheck:
- ignore: fmt:.*,io/ioutil:^Read.*,github.com/caddyserver/caddy/v2/caddyconfig:RegisterAdapter,github.com/caddyserver/caddy/v2:RegisterModule
- ignoretests: true
- misspell:
- locale: US
+ exclude-functions:
+ - fmt.*
+ - (go.uber.org/zap/zapcore.ObjectEncoder).AddObject
+ - (go.uber.org/zap/zapcore.ObjectEncoder).AddArray
+ gci:
+ sections:
+ - standard # Standard section: captures all standard packages.
+ - default # Default section: contains all imports that could not be matched to another section type.
+ - prefix(github.com/caddyserver/caddy/v2/cmd) # ensure that this is always at the top and always has a line break.
+ - prefix(github.com/caddyserver/caddy) # Custom section: groups all imports with the specified Prefix.
+ # Skip generated files.
+ # Default: true
+ skip-generated: true
+ # Enable custom order of sections.
+ # If `true`, make the section order the same as the order of `sections`.
+ # Default: false
+ custom-order: true
+ exhaustive:
+ ignore-enum-types: reflect.Kind|svc.Cmd
linters:
+ disable-all: true
enable:
+ - asasalint
+ - asciicheck
+ - bidichk
- bodyclose
- - prealloc
- - unconvert
+ - decorder
+ - dogsled
+ - dupl
+ - dupword
+ - durationcheck
- errcheck
+ - errname
+ - exhaustive
+ - gci
- gofmt
- goimports
+ - gofumpt
- gosec
+ - gosimple
+ - govet
- ineffassign
+ - importas
- misspell
+ - prealloc
+ - promlinter
+ - sloglint
+ - sqlclosecheck
+ - staticcheck
+ - tenv
+ - testableexamples
+ - testifylint
+ - tparallel
+ - typecheck
+ - unconvert
+ - unused
+ - wastedassign
+ - whitespace
+ - zerologlint
+ # these are implicitly disabled:
+ # - containedctx
+ # - contextcheck
+ # - cyclop
+ # - depguard
+ # - errchkjson
+ # - errorlint
+ # - exhaustruct
+ # - execinquery
+ # - exhaustruct
+ # - forbidigo
+ # - forcetypeassert
+ # - funlen
+ # - ginkgolinter
+ # - gocheckcompilerdirectives
+ # - gochecknoglobals
+ # - gochecknoinits
+ # - gochecksumtype
+ # - gocognit
+ # - goconst
+ # - gocritic
+ # - gocyclo
+ # - godot
+ # - godox
+ # - goerr113
+ # - goheader
+ # - gomnd
+ # - gomoddirectives
+ # - gomodguard
+ # - goprintffuncname
+ # - gosmopolitan
+ # - grouper
+ # - inamedparam
+ # - interfacebloat
+ # - ireturn
+ # - lll
+ # - loggercheck
+ # - maintidx
+ # - makezero
+ # - mirror
+ # - musttag
+ # - nakedret
+ # - nestif
+ # - nilerr
+ # - nilnil
+ # - nlreturn
+ # - noctx
+ # - nolintlint
+ # - nonamedreturns
+ # - nosprintfhostport
+ # - paralleltest
+ # - perfsprint
+ # - predeclared
+ # - protogetter
+ # - reassign
+ # - revive
+ # - rowserrcheck
+ # - stylecheck
+ # - tagalign
+ # - tagliatelle
+ # - testpackage
+ # - thelper
+ # - unparam
+ # - usestdlibvars
+ # - varnamelen
+ # - wrapcheck
+ # - wsl
run:
# default concurrency is a available CPU number.
# concurrency: 4 # explicitly omit this value to fully utilize available resources.
- deadline: 5m
+ timeout: 5m
issues-exit-code: 1
tests: false
# output configuration options
output:
- format: 'colored-line-number'
+ formats:
+ - format: 'colored-line-number'
print-issued-lines: true
print-linter-name: true
issues:
exclude-rules:
+ - text: 'G115' # TODO: Either we should fix the issues or nuke the linter if it's bad
+ linters:
+ - gosec
# we aren't calling unknown URL
- - text: "G107" # G107: Url provided to HTTP request as taint input
+ - text: 'G107' # G107: Url provided to HTTP request as taint input
linters:
- gosec
# as a web server that's expected to handle any template, this is totally in the hands of the user.
- - text: "G203" # G203: Use of unescaped data in HTML templates
+ - text: 'G203' # G203: Use of unescaped data in HTML templates
linters:
- gosec
# we're shelling out to known commands, not relying on user-defined input.
- - text: "G204" # G204: Audit use of command execution
+ - text: 'G204' # G204: Audit use of command execution
linters:
- gosec
# the choice of weakrand is deliberate, hence the named import "weakrand"
- path: modules/caddyhttp/reverseproxy/selectionpolicies.go
- text: "G404" # G404: Insecure random number source (rand)
+ text: 'G404' # G404: Insecure random number source (rand)
linters:
- gosec
+ - path: modules/caddyhttp/reverseproxy/streaming.go
+ text: 'G404' # G404: Insecure random number source (rand)
+ linters:
+ - gosec
+ - path: modules/logging/filters.go
+ linters:
+ - dupl
+ - path: modules/caddyhttp/matchers.go
+ linters:
+ - dupl
+ - path: modules/caddyhttp/vars.go
+ linters:
+ - dupl
+ - path: _test\.go
+ linters:
+ - errcheck
diff --git a/.goreleaser.yml b/.goreleaser.yml
index c1e162f4..005fdbaf 100644
--- a/.goreleaser.yml
+++ b/.goreleaser.yml
@@ -1,20 +1,39 @@
+version: 2
+
before:
hooks:
+ # The build is done in this particular way to build Caddy in a designated directory named in .gitignore.
+ # This is so we can run goreleaser on tag without Git complaining of being dirty. The main.go in cmd/caddy directory
+ # cannot be built within that directory due to changes necessary for the build causing Git to be dirty, which
+ # subsequently causes gorleaser to refuse running.
+ - rm -rf caddy-build caddy-dist vendor
+ # vendor Caddy deps
+ - go mod vendor
- mkdir -p caddy-build
- cp cmd/caddy/main.go caddy-build/main.go
- - cp ./go.mod caddy-build/go.mod
- - sed -i.bkp 's|github.com/caddyserver/caddy/v2|caddy|g' ./caddy-build/go.mod
+ - /bin/sh -c 'cd ./caddy-build && go mod init caddy'
+ # prepare syso files for windows embedding
+ - /bin/sh -c 'for a in amd64 arm arm64; do XCADDY_SKIP_BUILD=1 GOOS=windows GOARCH=$a xcaddy build {{.Env.TAG}}; done'
+ - /bin/sh -c 'mv /tmp/buildenv_*/*.syso caddy-build'
# GoReleaser doesn't seem to offer {{.Tag}} at this stage, so we have to embed it into the env
# so we run: TAG=$(git describe --abbrev=0) goreleaser release --rm-dist --skip-publish --skip-validate
- go mod edit -require=github.com/caddyserver/caddy/v2@{{.Env.TAG}} ./caddy-build/go.mod
+ # as of Go 1.16, `go` commands no longer automatically change go.{mod,sum}. We now have to explicitly
+ # run `go mod tidy`. The `/bin/sh -c '...'` is because goreleaser can't find cd in PATH without shell invocation.
+ - /bin/sh -c 'cd ./caddy-build && go mod tidy'
+ # vendor the deps of the prepared to-build module
+ - /bin/sh -c 'cd ./caddy-build && go mod vendor'
- git clone --depth 1 https://github.com/caddyserver/dist caddy-dist
+ - mkdir -p caddy-dist/man
- go mod download
+ - go run cmd/caddy/main.go manpage --directory ./caddy-dist/man
+ - gzip -r ./caddy-dist/man/
+ - /bin/sh -c 'go run cmd/caddy/main.go completion bash > ./caddy-dist/scripts/bash-completion'
builds:
- env:
- CGO_ENABLED=0
- GO111MODULE=on
- main: main.go
dir: ./caddy-build
binary: caddy
goos:
@@ -26,23 +45,109 @@ builds:
- amd64
- arm
- arm64
+ - s390x
+ - ppc64le
+ - riscv64
goarm:
- - 6
- - 7
+ - "5"
+ - "6"
+ - "7"
ignore:
- goos: darwin
goarch: arm
+ - goos: darwin
+ goarch: ppc64le
+ - goos: darwin
+ goarch: s390x
+ - goos: darwin
+ goarch: riscv64
+ - goos: windows
+ goarch: ppc64le
+ - goos: windows
+ goarch: s390x
+ - goos: windows
+ goarch: riscv64
+ - goos: freebsd
+ goarch: ppc64le
+ - goos: freebsd
+ goarch: s390x
+ - goos: freebsd
+ goarch: riscv64
+ - goos: freebsd
+ goarch: arm
+ goarm: "5"
flags:
- -trimpath
+ - -mod=readonly
ldflags:
- -s -w
+ tags:
+ - nobadger
+ - nomysql
+ - nopgx
+
+signs:
+ - cmd: cosign
+ signature: "${artifact}.sig"
+ certificate: '{{ trimsuffix (trimsuffix .Env.artifact ".zip") ".tar.gz" }}.pem'
+ args: ["sign-blob", "--yes", "--output-signature=${signature}", "--output-certificate", "${certificate}", "${artifact}"]
+ artifacts: all
+
+sboms:
+ - artifacts: binary
+ documents:
+ - >-
+ {{ .ProjectName }}_
+ {{- .Version }}_
+ {{- if eq .Os "darwin" }}mac{{ else }}{{ .Os }}{{ end }}_
+ {{- .Arch }}
+ {{- with .Arm }}v{{ . }}{{ end }}
+ {{- with .Mips }}_{{ . }}{{ end }}
+ {{- if not (eq .Amd64 "v1") }}{{ .Amd64 }}{{ end }}.sbom
+ cmd: syft
+ args: ["$artifact", "--file", "${document}", "--output", "cyclonedx-json"]
archives:
- - format_overrides:
+ - id: default
+ format_overrides:
- goos: windows
format: zip
- replacements:
- darwin: mac
+ name_template: >-
+ {{ .ProjectName }}_
+ {{- .Version }}_
+ {{- if eq .Os "darwin" }}mac{{ else }}{{ .Os }}{{ end }}_
+ {{- .Arch }}
+ {{- with .Arm }}v{{ . }}{{ end }}
+ {{- with .Mips }}_{{ . }}{{ end }}
+ {{- if not (eq .Amd64 "v1") }}{{ .Amd64 }}{{ end }}
+
+ # package the 'caddy-build' directory into a tarball,
+ # allowing users to build the exact same set of files as ours.
+ - id: source
+ meta: true
+ name_template: "{{ .ProjectName }}_{{ .Version }}_buildable-artifact"
+ files:
+ - src: LICENSE
+ dst: ./LICENSE
+ - src: README.md
+ dst: ./README.md
+ - src: AUTHORS
+ dst: ./AUTHORS
+ - src: ./caddy-build
+ dst: ./
+
+source:
+ enabled: true
+ name_template: '{{ .ProjectName }}_{{ .Version }}_src'
+ format: 'tar.gz'
+
+ # Additional files/template/globs you want to add to the source archive.
+ #
+ # Default: empty.
+ files:
+ - vendor
+
+
checksum:
algorithm: sha512
@@ -50,11 +155,11 @@ nfpms:
- id: default
package_name: caddy
- vendor: Light Code Labs
+ vendor: Dyanim
homepage: https://caddyserver.com
maintainer: Matthew Holt
description: |
- Powerful, enterprise-ready, open source web server with automatic HTTPS written in Go
+ Caddy - Powerful, enterprise-ready, open source web server with automatic HTTPS written in Go
license: Apache 2.0
formats:
@@ -62,18 +167,33 @@ nfpms:
# - rpm
bindir: /usr/bin
- files:
- ./caddy-dist/init/caddy.service: /lib/systemd/system/caddy.service
- ./caddy-dist/init/caddy-api.service: /lib/systemd/system/caddy-api.service
- ./caddy-dist/welcome/index.html: /usr/share/caddy/index.html
- config_files:
- ./caddy-dist/config/Caddyfile: /etc/caddy/Caddyfile
+ contents:
+ - src: ./caddy-dist/init/caddy.service
+ dst: /lib/systemd/system/caddy.service
+
+ - src: ./caddy-dist/init/caddy-api.service
+ dst: /lib/systemd/system/caddy-api.service
+
+ - src: ./caddy-dist/welcome/index.html
+ dst: /usr/share/caddy/index.html
+
+ - src: ./caddy-dist/scripts/bash-completion
+ dst: /etc/bash_completion.d/caddy
+
+ - src: ./caddy-dist/config/Caddyfile
+ dst: /etc/caddy/Caddyfile
+ type: config
+
+ - src: ./caddy-dist/man/*
+ dst: /usr/share/man/man8/
scripts:
postinstall: ./caddy-dist/scripts/postinstall.sh
preremove: ./caddy-dist/scripts/preremove.sh
postremove: ./caddy-dist/scripts/postremove.sh
+ provides:
+ - httpd
release:
github:
@@ -89,5 +209,6 @@ changelog:
- '^chore:'
- '^ci:'
- '^docs?:'
+ - '^readme:'
- '^tests?:'
- '^\w+\s+' # a hack to remove commit messages without colons thus don't correspond to a package
diff --git a/README.md b/README.md
index 8be07eff..abc136f6 100644
--- a/README.md
+++ b/README.md
@@ -1,21 +1,31 @@
-
+
+
+
+
+
+
+
+
+
a project
+
Every site on HTTPS
Caddy is an extensible server platform that uses TLS by default.
-
-
-
+
+
+
+
- Download ·
+ Releases ·
Documentation ·
- Community
+ Get Help
@@ -23,10 +33,11 @@
### Menu
- [Features](#features)
+- [Install](#install)
- [Build from source](#build-from-source)
- [For development](#for-development)
- [With version information and/or plugins](#with-version-information-andor-plugins)
-- [Getting started](#getting-started)
+- [Quick start](#quick-start)
- [Overview](#overview)
- [Full documentation](#full-documentation)
- [Getting help](#getting-help)
@@ -35,53 +46,81 @@
Powered by
-
+
+
+
+
+
+
+
-## Features
+## [Features](https://caddyserver.com/features)
- **Easy configuration** with the [Caddyfile](https://caddyserver.com/docs/caddyfile)
- **Powerful configuration** with its [native JSON config](https://caddyserver.com/docs/json/)
- **Dynamic configuration** with the [JSON API](https://caddyserver.com/docs/api)
- [**Config adapters**](https://caddyserver.com/docs/config-adapters) if you don't like JSON
- **Automatic HTTPS** by default
- - [Let's Encrypt](https://letsencrypt.org) for public sites
+ - [ZeroSSL](https://zerossl.com) and [Let's Encrypt](https://letsencrypt.org) for public names
- Fully-managed local CA for internal names & IPs
- Can coordinate with other Caddy instances in a cluster
+ - Multi-issuer fallback
- **Stays up when other servers go down** due to TLS/OCSP/certificate-related issues
-- **HTTP/1.1, HTTP/2, and experimental HTTP/3** support
+- **Production-ready** after serving trillions of requests and managing millions of TLS certificates
+- **Scales to hundreds of thousands of sites** as proven in production
+- **HTTP/1.1, HTTP/2, and HTTP/3** all supported by default
- **Highly extensible** [modular architecture](https://caddyserver.com/docs/architecture) lets Caddy do anything without bloat
- **Runs anywhere** with **no external dependencies** (not even libc)
- Written in Go, a language with higher **memory safety guarantees** than other servers
- Actually **fun to use**
-- So, so much more to discover
+- So much more to [discover](https://caddyserver.com/features)
+## Install
+The simplest, cross-platform way to get started is to download Caddy from [GitHub Releases](https://github.com/caddyserver/caddy/releases) and place the executable file in your PATH.
+
+See [our online documentation](https://caddyserver.com/docs/install) for other install instructions.
## Build from source
Requirements:
-- [Go 1.14 or newer](https://golang.org/dl/)
-- Do NOT disable [Go modules](https://github.com/golang/go/wiki/Modules) (`export GO111MODULE=on`)
+- [Go 1.22.3 or newer](https://golang.org/dl/)
### For development
-
+
+_**Note:** These steps [will not embed proper version information](https://github.com/golang/go/issues/29228). For that, please follow the instructions in the next section._
+
```bash
$ git clone "https://github.com/caddyserver/caddy.git"
$ cd caddy/cmd/caddy/
$ go build
```
-_**Note:** These steps [will not embed proper version information](https://github.com/golang/go/issues/29228). For that, please follow the instructions below._
+When you run Caddy, it may try to bind to low ports unless otherwise specified in your config. If your OS requires elevated privileges for this, you will need to give your new binary permission to do so. On Linux, this can be done easily with: `sudo setcap cap_net_bind_service=+ep ./caddy`
+
+If you prefer to use `go run` which only creates temporary binaries, you can still do this with the included `setcap.sh` like so:
+
+```bash
+$ go run -exec ./setcap.sh main.go
+```
+
+If you don't want to type your password for `setcap`, use `sudo visudo` to edit your sudoers file and allow your user account to run that command without a password, for example:
+
+```
+username ALL=(ALL:ALL) NOPASSWD: /usr/sbin/setcap
+```
+
+replacing `username` with your actual username. Please be careful and only do this if you know what you are doing! We are only qualified to document how to use Caddy, not Go tooling or your computer, and we are providing these instructions for convenience only; please learn how to use your own computer at your own risk and make any needful adjustments.
### With version information and/or plugins
-Using [our builder tool](https://github.com/caddyserver/xcaddy)...
+Using [our builder tool, `xcaddy`](https://github.com/caddyserver/xcaddy)...
```
-$ xcaddy build
+$ xcaddy build
```
...the following steps are automated:
@@ -90,8 +129,9 @@ $ xcaddy build
2. Change into it: `cd caddy`
3. Copy [Caddy's main.go](https://github.com/caddyserver/caddy/blob/master/cmd/caddy/main.go) into the empty folder. Add imports for any custom plugins you want to add.
4. Initialize a Go module: `go mod init caddy`
-5. Pin Caddy version: `go get github.com/caddyserver/caddy/v2@TAG` replacing `TAG` with a git tag or commit. You can also pin any plugin versions similarly.
-6. Compile: `go build`
+5. (Optional) Pin Caddy version: `go get github.com/caddyserver/caddy/v2@version` replacing `version` with a git tag, commit, or branch name.
+6. (Optional) Add plugins by adding their import: `_ "import/path/here"`
+7. Compile: `go build -tags=nobadger,nomysql,nopgx`
@@ -100,7 +140,7 @@ $ xcaddy build
The [Caddy website](https://caddyserver.com/docs/) has documentation that includes tutorials, quick-start guides, reference, and more.
-**We recommend that all users do our [Getting Started](https://caddyserver.com/docs/getting-started) guide to become familiar with using Caddy.**
+**We recommend that all users -- regardless of experience level -- do our [Getting Started](https://caddyserver.com/docs/getting-started) guide to become familiar with using Caddy.**
If you've only got a minute, [the website has several quick-start tutorials](https://caddyserver.com/docs/quick-starts) to choose from! However, after finishing a quick-start tutorial, please read more documentation to understand how the software works. 🙂
@@ -119,7 +159,7 @@ The primary way to configure Caddy is through [its API](https://caddyserver.com/
Caddy exposes an unprecedented level of control compared to any web server in existence. In Caddy, you are usually setting the actual values of the initialized types in memory that power everything from your HTTP handlers and TLS handshakes to your storage medium. Caddy is also ridiculously extensible, with a powerful plugin system that makes vast improvements over other web servers.
-To wield the power of this design, you need to know how the config document is structured. Please see the [our documentation site](https://caddyserver.com/docs/) for details about [Caddy's config structure](https://caddyserver.com/docs/json/).
+To wield the power of this design, you need to know how the config document is structured. Please see [our documentation site](https://caddyserver.com/docs/) for details about [Caddy's config structure](https://caddyserver.com/docs/json/).
Nearly all of Caddy's configuration is contained in a single config document, rather than being scattered across CLI flags and env variables and a configuration file as with other web servers. This makes managing your server config more straightforward and reduces hidden variables/factors.
@@ -136,7 +176,9 @@ The docs are also open source. You can contribute to them here: https://github.c
## Getting help
-- We **strongly recommend** that all professionals or companies using Caddy get a support contract through [Ardan Labs](https://www.ardanlabs.com/my/contact-us?dd=caddy) before help is needed.
+- We advise companies using Caddy to secure a support contract through [Ardan Labs](https://www.ardanlabs.com/my/contact-us?dd=caddy) before help is needed.
+
+- A [sponsorship](https://github.com/sponsors/mholt) goes a long way! We can offer private help to sponsors. If Caddy is benefitting your company, please consider a sponsorship. This not only helps fund full-time work to ensure the longevity of the project, it provides your company the resources, support, and discounts you need; along with being a great look for your company to your customers and potential customers!
- Individuals can exchange help for free on our community forum at https://caddy.community. Remember that people give help out of their spare time and good will. The best way to get help is to give it first!
@@ -146,7 +188,13 @@ Please use our [issue tracker](https://github.com/caddyserver/caddy/issues) only
## About
-**The name "Caddy" is trademarked.** The name of the software is "Caddy", not "Caddy Server" or "CaddyServer". Please call it "Caddy" or, if you wish to clarify, "the Caddy web server". Caddy is a registered trademark of Light Code Labs, LLC.
+Matthew Holt began developing Caddy in 2014 while studying computer science at Brigham Young University. (The name "Caddy" was chosen because this software helps with the tedious, mundane tasks of serving the Web, and is also a single place for multiple things to be organized together.) It soon became the first web server to use HTTPS automatically and by default, and now has hundreds of contributors and has served trillions of HTTPS requests.
+
+**The name "Caddy" is trademarked.** The name of the software is "Caddy", not "Caddy Server" or "CaddyServer". Please call it "Caddy" or, if you wish to clarify, "the Caddy web server". Caddy is a registered trademark of Stack Holdings GmbH.
- _Project on Twitter: [@caddyserver](https://twitter.com/caddyserver)_
- _Author on Twitter: [@mholt6](https://twitter.com/mholt6)_
+
+Caddy is a project of [ZeroSSL](https://zerossl.com), a Stack Holdings company.
+
+Debian package repository hosting is graciously provided by [Cloudsmith](https://cloudsmith.com). Cloudsmith is the only fully hosted, cloud-native, universal package management solution, that enables your organization to create, store and share packages in any format, to any place, with total confidence.
diff --git a/admin.go b/admin.go
index 6831686a..89fce1d2 100644
--- a/admin.go
+++ b/admin.go
@@ -17,9 +17,15 @@ package caddy
import (
"bytes"
"context"
+ "crypto"
+ "crypto/tls"
+ "crypto/x509"
+ "encoding/base64"
"encoding/json"
+ "errors"
"expvar"
"fmt"
+ "hash"
"io"
"net"
"net/http"
@@ -28,15 +34,29 @@ import (
"os"
"path"
"regexp"
+ "slices"
"strconv"
"strings"
"sync"
"time"
+ "github.com/caddyserver/certmagic"
+ "github.com/cespare/xxhash/v2"
+ "github.com/prometheus/client_golang/prometheus"
"go.uber.org/zap"
+ "go.uber.org/zap/zapcore"
)
-// TODO: is there a way to make the admin endpoint so that it can be plugged into the HTTP app? see issue #2833
+func init() {
+ // The hard-coded default `DefaultAdminListen` can be overridden
+ // by setting the `CADDY_ADMIN` environment variable.
+ // The environment variable may be used by packagers to change
+ // the default admin address to something more appropriate for
+ // that platform. See #5317 for discussion.
+ if env, exists := os.LookupEnv("CADDY_ADMIN"); exists {
+ DefaultAdminListen = env
+ }
+}
// AdminConfig configures Caddy's API endpoint, which is used
// to manage Caddy while it is running.
@@ -49,149 +69,334 @@ type AdminConfig struct {
// The address to which the admin endpoint's listener should
// bind itself. Can be any single network address that can be
- // parsed by Caddy. Default: localhost:2019
+ // parsed by Caddy. Accepts placeholders.
+ // Default: the value of the `CADDY_ADMIN` environment variable,
+ // or `localhost:2019` otherwise.
+ //
+ // Remember: When changing this value through a config reload,
+ // be sure to use the `--address` CLI flag to specify the current
+ // admin address if the currently-running admin endpoint is not
+ // the default address.
Listen string `json:"listen,omitempty"`
// If true, CORS headers will be emitted, and requests to the
// API will be rejected if their `Host` and `Origin` headers
// do not match the expected value(s). Use `origins` to
- // customize which origins/hosts are allowed.If `origins` is
+ // customize which origins/hosts are allowed. If `origins` is
// not set, the listen address is the only value allowed by
- // default.
+ // default. Enforced only on local (plaintext) endpoint.
EnforceOrigin bool `json:"enforce_origin,omitempty"`
// The list of allowed origins/hosts for API requests. Only needed
// if accessing the admin endpoint from a host different from the
// socket's network interface or if `enforce_origin` is true. If not
// set, the listener address will be the default value. If set but
- // empty, no origins will be allowed.
+ // empty, no origins will be allowed. Enforced only on local
+ // (plaintext) endpoint.
Origins []string `json:"origins,omitempty"`
- // Options related to configuration management.
+ // Options pertaining to configuration management.
Config *ConfigSettings `json:"config,omitempty"`
+
+ // Options that establish this server's identity. Identity refers to
+ // credentials which can be used to uniquely identify and authenticate
+ // this server instance. This is required if remote administration is
+ // enabled (but does not require remote administration to be enabled).
+ // Default: no identity management.
+ Identity *IdentityConfig `json:"identity,omitempty"`
+
+ // Options pertaining to remote administration. By default, remote
+ // administration is disabled. If enabled, identity management must
+ // also be configured, as that is how the endpoint is secured.
+ // See the neighboring "identity" object.
+ //
+ // EXPERIMENTAL: This feature is subject to change.
+ Remote *RemoteAdmin `json:"remote,omitempty"`
+
+ // Holds onto the routers so that we can later provision them
+ // if they require provisioning.
+ routers []AdminRouter
}
-// ConfigSettings configures the, uh, configuration... and
-// management thereof.
+// ConfigSettings configures the management of configuration.
type ConfigSettings struct {
// Whether to keep a copy of the active config on disk. Default is true.
+ // Note that "pulled" dynamic configs (using the neighboring "load" module)
+ // are not persisted; only configs that are pushed to Caddy get persisted.
Persist *bool `json:"persist,omitempty"`
+
+ // Loads a new configuration. This is helpful if your configs are
+ // managed elsewhere and you want Caddy to pull its config dynamically
+ // when it starts. The pulled config completely replaces the current
+ // one, just like any other config load. It is an error if a pulled
+ // config is configured to pull another config without a load_delay,
+ // as this creates a tight loop.
+ //
+ // EXPERIMENTAL: Subject to change.
+ LoadRaw json.RawMessage `json:"load,omitempty" caddy:"namespace=caddy.config_loaders inline_key=module"`
+
+ // The duration after which to load config. If set, config will be pulled
+ // from the config loader after this duration. A delay is required if a
+ // dynamically-loaded config is configured to load yet another config. To
+ // load configs on a regular interval, ensure this value is set the same
+ // on all loaded configs; it can also be variable if needed, and to stop
+ // the loop, simply remove dynamic config loading from the next-loaded
+ // config.
+ //
+ // EXPERIMENTAL: Subject to change.
+ LoadDelay Duration `json:"load_delay,omitempty"`
}
-// listenAddr extracts a singular listen address from ac.Listen,
-// returning the network and the address of the listener.
-func (admin AdminConfig) listenAddr() (NetworkAddress, error) {
- input := admin.Listen
- if input == "" {
- input = DefaultAdminListen
- }
- listenAddr, err := ParseNetworkAddress(input)
- if err != nil {
- return NetworkAddress{}, fmt.Errorf("parsing admin listener address: %v", err)
- }
- if listenAddr.PortRangeSize() != 1 {
- return NetworkAddress{}, fmt.Errorf("admin endpoint must have exactly one address; cannot listen on %v", listenAddr)
- }
- return listenAddr, nil
+// IdentityConfig configures management of this server's identity. An identity
+// consists of credentials that uniquely verify this instance; for example,
+// TLS certificates (public + private key pairs).
+type IdentityConfig struct {
+ // List of names or IP addresses which refer to this server.
+ // Certificates will be obtained for these identifiers so
+ // secure TLS connections can be made using them.
+ Identifiers []string `json:"identifiers,omitempty"`
+
+ // Issuers that can provide this admin endpoint its identity
+ // certificate(s). Default: ACME issuers configured for
+ // ZeroSSL and Let's Encrypt. Be sure to change this if you
+ // require credentials for private identifiers.
+ IssuersRaw []json.RawMessage `json:"issuers,omitempty" caddy:"namespace=tls.issuance inline_key=module"`
+
+ issuers []certmagic.Issuer
+}
+
+// RemoteAdmin enables and configures remote administration. If enabled,
+// a secure listener enforcing mutual TLS authentication will be started
+// on a different port from the standard plaintext admin server.
+//
+// This endpoint is secured using identity management, which must be
+// configured separately (because identity management does not depend
+// on remote administration). See the admin/identity config struct.
+//
+// EXPERIMENTAL: Subject to change.
+type RemoteAdmin struct {
+ // The address on which to start the secure listener. Accepts placeholders.
+ // Default: :2021
+ Listen string `json:"listen,omitempty"`
+
+ // List of access controls for this secure admin endpoint.
+ // This configures TLS mutual authentication (i.e. authorized
+ // client certificates), but also application-layer permissions
+ // like which paths and methods each identity is authorized for.
+ AccessControl []*AdminAccess `json:"access_control,omitempty"`
+}
+
+// AdminAccess specifies what permissions an identity or group
+// of identities are granted.
+type AdminAccess struct {
+ // Base64-encoded DER certificates containing public keys to accept.
+ // (The contents of PEM certificate blocks are base64-encoded DER.)
+ // Any of these public keys can appear in any part of a verified chain.
+ PublicKeys []string `json:"public_keys,omitempty"`
+
+ // Limits what the associated identities are allowed to do.
+ // If unspecified, all permissions are granted.
+ Permissions []AdminPermissions `json:"permissions,omitempty"`
+
+ publicKeys []crypto.PublicKey
+}
+
+// AdminPermissions specifies what kinds of requests are allowed
+// to be made to the admin endpoint.
+type AdminPermissions struct {
+ // The API paths allowed. Paths are simple prefix matches.
+ // Any subpath of the specified paths will be allowed.
+ Paths []string `json:"paths,omitempty"`
+
+ // The HTTP methods allowed for the given paths.
+ Methods []string `json:"methods,omitempty"`
}
// newAdminHandler reads admin's config and returns an http.Handler suitable
// for use in an admin endpoint server, which will be listening on listenAddr.
-func (admin AdminConfig) newAdminHandler(addr NetworkAddress) adminHandler {
- muxWrap := adminHandler{
- enforceOrigin: admin.EnforceOrigin,
- enforceHost: !addr.isWildcardInterface(),
- allowedOrigins: admin.allowedOrigins(addr),
- mux: http.NewServeMux(),
+func (admin *AdminConfig) newAdminHandler(addr NetworkAddress, remote bool, _ Context) adminHandler {
+ muxWrap := adminHandler{mux: http.NewServeMux()}
+
+ // secure the local or remote endpoint respectively
+ if remote {
+ muxWrap.remoteControl = admin.Remote
+ } else {
+ muxWrap.enforceHost = !addr.isWildcardInterface()
+ muxWrap.allowedOrigins = admin.allowedOrigins(addr)
+ muxWrap.enforceOrigin = admin.EnforceOrigin
}
+ addRouteWithMetrics := func(pattern string, handlerLabel string, h http.Handler) {
+ labels := prometheus.Labels{"path": pattern, "handler": handlerLabel}
+ h = instrumentHandlerCounter(
+ adminMetrics.requestCount.MustCurryWith(labels),
+ h,
+ )
+ muxWrap.mux.Handle(pattern, h)
+ }
// addRoute just calls muxWrap.mux.Handle after
// wrapping the handler with error handling
- addRoute := func(pattern string, h AdminHandler) {
+ addRoute := func(pattern string, handlerLabel string, h AdminHandler) {
wrapper := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
err := h.ServeHTTP(w, r)
+ if err != nil {
+ labels := prometheus.Labels{
+ "path": pattern,
+ "handler": handlerLabel,
+ "method": strings.ToUpper(r.Method),
+ }
+ adminMetrics.requestErrors.With(labels).Inc()
+ }
muxWrap.handleError(w, r, err)
})
- muxWrap.mux.Handle(pattern, wrapper)
+ addRouteWithMetrics(pattern, handlerLabel, wrapper)
}
+ const handlerLabel = "admin"
+
// register standard config control endpoints
- addRoute("/"+rawConfigKey+"/", AdminHandlerFunc(handleConfig))
- addRoute("/id/", AdminHandlerFunc(handleConfigID))
- addRoute("/stop", AdminHandlerFunc(handleStop))
+ addRoute("/"+rawConfigKey+"/", handlerLabel, AdminHandlerFunc(handleConfig))
+ addRoute("/id/", handlerLabel, AdminHandlerFunc(handleConfigID))
+ addRoute("/stop", handlerLabel, AdminHandlerFunc(handleStop))
// register debugging endpoints
- muxWrap.mux.HandleFunc("/debug/pprof/", pprof.Index)
- muxWrap.mux.HandleFunc("/debug/pprof/cmdline", pprof.Cmdline)
- muxWrap.mux.HandleFunc("/debug/pprof/profile", pprof.Profile)
- muxWrap.mux.HandleFunc("/debug/pprof/symbol", pprof.Symbol)
- muxWrap.mux.HandleFunc("/debug/pprof/trace", pprof.Trace)
- muxWrap.mux.Handle("/debug/vars", expvar.Handler())
+ addRouteWithMetrics("/debug/pprof/", handlerLabel, http.HandlerFunc(pprof.Index))
+ addRouteWithMetrics("/debug/pprof/cmdline", handlerLabel, http.HandlerFunc(pprof.Cmdline))
+ addRouteWithMetrics("/debug/pprof/profile", handlerLabel, http.HandlerFunc(pprof.Profile))
+ addRouteWithMetrics("/debug/pprof/symbol", handlerLabel, http.HandlerFunc(pprof.Symbol))
+ addRouteWithMetrics("/debug/pprof/trace", handlerLabel, http.HandlerFunc(pprof.Trace))
+ addRouteWithMetrics("/debug/vars", handlerLabel, expvar.Handler())
// register third-party module endpoints
for _, m := range GetModules("admin.api") {
router := m.New().(AdminRouter)
for _, route := range router.Routes() {
- addRoute(route.Pattern, route.Handler)
+ addRoute(route.Pattern, handlerLabel, route.Handler)
}
+ admin.routers = append(admin.routers, router)
}
return muxWrap
}
+// provisionAdminRouters provisions all the router modules
+// in the admin.api namespace that need provisioning.
+func (admin *AdminConfig) provisionAdminRouters(ctx Context) error {
+ for _, router := range admin.routers {
+ provisioner, ok := router.(Provisioner)
+ if !ok {
+ continue
+ }
+
+ err := provisioner.Provision(ctx)
+ if err != nil {
+ return err
+ }
+ }
+
+ // We no longer need the routers once provisioned, allow for GC
+ admin.routers = nil
+
+ return nil
+}
+
// allowedOrigins returns a list of origins that are allowed.
// If admin.Origins is nil (null), the provided listen address
// will be used as the default origin. If admin.Origins is
// empty, no origins will be allowed, effectively bricking the
// endpoint for non-unix-socket endpoints, but whatever.
-func (admin AdminConfig) allowedOrigins(addr NetworkAddress) []string {
+func (admin AdminConfig) allowedOrigins(addr NetworkAddress) []*url.URL {
uniqueOrigins := make(map[string]struct{})
for _, o := range admin.Origins {
uniqueOrigins[o] = struct{}{}
}
if admin.Origins == nil {
if addr.isLoopback() {
- if addr.IsUnixNetwork() {
+ if addr.IsUnixNetwork() || addr.IsFdNetwork() {
// RFC 2616, Section 14.26:
// "A client MUST include a Host header field in all HTTP/1.1 request
// messages. If the requested URI does not include an Internet host
// name for the service being requested, then the Host header field MUST
// be given with an empty value."
+ //
+ // UPDATE July 2023: Go broke this by patching a minor security bug in 1.20.6.
+ // Understandable, but frustrating. See:
+ // https://github.com/golang/go/issues/60374
+ // See also the discussion here:
+ // https://github.com/golang/go/issues/61431
+ //
+ // We can no longer conform to RFC 2616 Section 14.26 from either Go or curl
+ // in purity. (Curl allowed no host between 7.40 and 7.50, but now requires a
+ // bogus host; see https://superuser.com/a/925610.) If we disable Host/Origin
+ // security checks, the infosec community assures me that it is secure to do
+ // so, because:
+ // 1) Browsers do not allow access to unix sockets
+ // 2) DNS is irrelevant to unix sockets
+ //
+ // I am not quite ready to trust either of those external factors, so instead
+ // of disabling Host/Origin checks, we now allow specific Host values when
+ // accessing the admin endpoint over unix sockets. I definitely don't trust
+ // DNS (e.g. I don't trust 'localhost' to always resolve to the local host),
+ // and IP shouldn't even be used, but if it is for some reason, I think we can
+ // at least be reasonably assured that 127.0.0.1 and ::1 route to the local
+ // machine, meaning that a hypothetical browser origin would have to be on the
+ // local machine as well.
uniqueOrigins[""] = struct{}{}
+ uniqueOrigins["127.0.0.1"] = struct{}{}
+ uniqueOrigins["::1"] = struct{}{}
} else {
uniqueOrigins[net.JoinHostPort("localhost", addr.port())] = struct{}{}
uniqueOrigins[net.JoinHostPort("::1", addr.port())] = struct{}{}
uniqueOrigins[net.JoinHostPort("127.0.0.1", addr.port())] = struct{}{}
}
}
- if !addr.IsUnixNetwork() {
+ if !addr.IsUnixNetwork() && !addr.IsFdNetwork() {
uniqueOrigins[addr.JoinHostPort(0)] = struct{}{}
}
}
- allowed := make([]string, 0, len(uniqueOrigins))
- for origin := range uniqueOrigins {
+ allowed := make([]*url.URL, 0, len(uniqueOrigins))
+ for originStr := range uniqueOrigins {
+ var origin *url.URL
+ if strings.Contains(originStr, "://") {
+ var err error
+ origin, err = url.Parse(originStr)
+ if err != nil {
+ continue
+ }
+ origin.Path = ""
+ origin.RawPath = ""
+ origin.Fragment = ""
+ origin.RawFragment = ""
+ origin.RawQuery = ""
+ } else {
+ origin = &url.URL{Host: originStr}
+ }
allowed = append(allowed, origin)
}
return allowed
}
-// replaceAdmin replaces the running admin server according
-// to the relevant configuration in cfg. If no configuration
-// for the admin endpoint exists in cfg, a default one is
-// used, so that there is always an admin server (unless it
-// is explicitly configured to be disabled).
-func replaceAdmin(cfg *Config) error {
- // always be sure to close down the old admin endpoint
+// replaceLocalAdminServer replaces the running local admin server
+// according to the relevant configuration in cfg. If no configuration
+// for the admin endpoint exists in cfg, a default one is used, so
+// that there is always an admin server (unless it is explicitly
+// configured to be disabled).
+// Critically note that some elements and functionality of the context
+// may not be ready, e.g. storage. Tread carefully.
+func replaceLocalAdminServer(cfg *Config, ctx Context) error {
+ // always* be sure to close down the old admin endpoint
// as gracefully as possible, even if the new one is
// disabled -- careful to use reference to the current
// (old) admin endpoint since it will be different
// when the function returns
- oldAdminServer := adminServer
+ // (* except if the new one fails to start)
+ oldAdminServer := localAdminServer
+ var err error
defer func() {
// do the shutdown asynchronously so that any
// current API request gets a response; this
// goroutine may last a few seconds
- if oldAdminServer != nil {
+ if oldAdminServer != nil && err == nil {
go func(oldAdminServer *http.Server) {
err := stopAdminServer(oldAdminServer)
if err != nil {
@@ -201,54 +406,317 @@ func replaceAdmin(cfg *Config) error {
}
}()
- // always get a valid admin config
- adminConfig := DefaultAdminConfig
- if cfg != nil && cfg.Admin != nil {
- adminConfig = cfg.Admin
+ // set a default if admin wasn't otherwise configured
+ if cfg.Admin == nil {
+ cfg.Admin = &AdminConfig{
+ Listen: DefaultAdminListen,
+ }
}
// if new admin endpoint is to be disabled, we're done
- if adminConfig.Disabled {
+ if cfg.Admin.Disabled {
Log().Named("admin").Warn("admin endpoint disabled")
return nil
}
// extract a singular listener address
- addr, err := adminConfig.listenAddr()
+ addr, err := parseAdminListenAddr(cfg.Admin.Listen, DefaultAdminListen)
if err != nil {
return err
}
- handler := adminConfig.newAdminHandler(addr)
+ handler := cfg.Admin.newAdminHandler(addr, false, ctx)
- ln, err := Listen(addr.Network, addr.JoinHostPort(0))
+ ln, err := addr.Listen(context.TODO(), 0, net.ListenConfig{})
if err != nil {
return err
}
- adminServer = &http.Server{
+ serverMu.Lock()
+ localAdminServer = &http.Server{
+ Addr: addr.String(), // for logging purposes only
Handler: handler,
ReadTimeout: 10 * time.Second,
ReadHeaderTimeout: 5 * time.Second,
IdleTimeout: 60 * time.Second,
MaxHeaderBytes: 1024 * 64,
}
+ serverMu.Unlock()
- go adminServer.Serve(ln)
+ adminLogger := Log().Named("admin")
+ go func() {
+ serverMu.Lock()
+ server := localAdminServer
+ serverMu.Unlock()
+ if err := server.Serve(ln.(net.Listener)); !errors.Is(err, http.ErrServerClosed) {
+ adminLogger.Error("admin server shutdown for unknown reason", zap.Error(err))
+ }
+ }()
- Log().Named("admin").Info("admin endpoint started",
+ adminLogger.Info("admin endpoint started",
zap.String("address", addr.String()),
- zap.Bool("enforce_origin", adminConfig.EnforceOrigin),
- zap.Strings("origins", handler.allowedOrigins))
+ zap.Bool("enforce_origin", cfg.Admin.EnforceOrigin),
+ zap.Array("origins", loggableURLArray(handler.allowedOrigins)))
if !handler.enforceHost {
- Log().Named("admin").Warn("admin endpoint on open interface; host checking disabled",
+ adminLogger.Warn("admin endpoint on open interface; host checking disabled",
zap.String("address", addr.String()))
}
return nil
}
+// manageIdentity sets up automated identity management for this server.
+func manageIdentity(ctx Context, cfg *Config) error {
+ if cfg == nil || cfg.Admin == nil || cfg.Admin.Identity == nil {
+ return nil
+ }
+
+ // set default issuers; this is pretty hacky because we can't
+ // import the caddytls package -- but it works
+ if cfg.Admin.Identity.IssuersRaw == nil {
+ cfg.Admin.Identity.IssuersRaw = []json.RawMessage{
+ json.RawMessage(`{"module": "acme"}`),
+ }
+ }
+
+ // load and provision issuer modules
+ if cfg.Admin.Identity.IssuersRaw != nil {
+ val, err := ctx.LoadModule(cfg.Admin.Identity, "IssuersRaw")
+ if err != nil {
+ return fmt.Errorf("loading identity issuer modules: %s", err)
+ }
+ for _, issVal := range val.([]any) {
+ cfg.Admin.Identity.issuers = append(cfg.Admin.Identity.issuers, issVal.(certmagic.Issuer))
+ }
+ }
+
+ // we'll make a new cache when we make the CertMagic config, so stop any previous cache
+ if identityCertCache != nil {
+ identityCertCache.Stop()
+ }
+
+ logger := Log().Named("admin.identity")
+ cmCfg := cfg.Admin.Identity.certmagicConfig(logger, true)
+
+ // issuers have circular dependencies with the configs because,
+ // as explained in the caddytls package, they need access to the
+ // correct storage and cache to solve ACME challenges
+ for _, issuer := range cfg.Admin.Identity.issuers {
+ // avoid import cycle with caddytls package, so manually duplicate the interface here, yuck
+ if annoying, ok := issuer.(interface{ SetConfig(cfg *certmagic.Config) }); ok {
+ annoying.SetConfig(cmCfg)
+ }
+ }
+
+ // obtain and renew server identity certificate(s)
+ return cmCfg.ManageAsync(ctx, cfg.Admin.Identity.Identifiers)
+}
+
+// replaceRemoteAdminServer replaces the running remote admin server
+// according to the relevant configuration in cfg. It stops any previous
+// remote admin server and only starts a new one if configured.
+func replaceRemoteAdminServer(ctx Context, cfg *Config) error {
+ if cfg == nil {
+ return nil
+ }
+
+ remoteLogger := Log().Named("admin.remote")
+
+ oldAdminServer := remoteAdminServer
+ defer func() {
+ if oldAdminServer != nil {
+ go func(oldAdminServer *http.Server) {
+ err := stopAdminServer(oldAdminServer)
+ if err != nil {
+ Log().Named("admin").Error("stopping current secure admin endpoint", zap.Error(err))
+ }
+ }(oldAdminServer)
+ }
+ }()
+
+ if cfg.Admin == nil || cfg.Admin.Remote == nil {
+ return nil
+ }
+
+ addr, err := parseAdminListenAddr(cfg.Admin.Remote.Listen, DefaultRemoteAdminListen)
+ if err != nil {
+ return err
+ }
+
+ // make the HTTP handler but disable Host/Origin enforcement
+ // because we are using TLS authentication instead
+ handler := cfg.Admin.newAdminHandler(addr, true, ctx)
+
+ // create client certificate pool for TLS mutual auth, and extract public keys
+ // so that we can enforce access controls at the application layer
+ clientCertPool := x509.NewCertPool()
+ for i, accessControl := range cfg.Admin.Remote.AccessControl {
+ for j, certBase64 := range accessControl.PublicKeys {
+ cert, err := decodeBase64DERCert(certBase64)
+ if err != nil {
+ return fmt.Errorf("access control %d public key %d: parsing base64 certificate DER: %v", i, j, err)
+ }
+ accessControl.publicKeys = append(accessControl.publicKeys, cert.PublicKey)
+ clientCertPool.AddCert(cert)
+ }
+ }
+
+ // create TLS config that will enforce mutual authentication
+ if identityCertCache == nil {
+ return fmt.Errorf("cannot enable remote admin without a certificate cache; configure identity management to initialize a certificate cache")
+ }
+ cmCfg := cfg.Admin.Identity.certmagicConfig(remoteLogger, false)
+ tlsConfig := cmCfg.TLSConfig()
+ tlsConfig.NextProtos = nil // this server does not solve ACME challenges
+ tlsConfig.ClientAuth = tls.RequireAndVerifyClientCert
+ tlsConfig.ClientCAs = clientCertPool
+
+ // convert logger to stdlib so it can be used by HTTP server
+ serverLogger, err := zap.NewStdLogAt(remoteLogger, zap.DebugLevel)
+ if err != nil {
+ return err
+ }
+
+ serverMu.Lock()
+ // create secure HTTP server
+ remoteAdminServer = &http.Server{
+ Addr: addr.String(), // for logging purposes only
+ Handler: handler,
+ TLSConfig: tlsConfig,
+ ReadTimeout: 10 * time.Second,
+ ReadHeaderTimeout: 5 * time.Second,
+ IdleTimeout: 60 * time.Second,
+ MaxHeaderBytes: 1024 * 64,
+ ErrorLog: serverLogger,
+ }
+ serverMu.Unlock()
+
+ // start listener
+ lnAny, err := addr.Listen(ctx, 0, net.ListenConfig{})
+ if err != nil {
+ return err
+ }
+ ln := lnAny.(net.Listener)
+ ln = tls.NewListener(ln, tlsConfig)
+
+ go func() {
+ serverMu.Lock()
+ server := remoteAdminServer
+ serverMu.Unlock()
+ if err := server.Serve(ln); !errors.Is(err, http.ErrServerClosed) {
+ remoteLogger.Error("admin remote server shutdown for unknown reason", zap.Error(err))
+ }
+ }()
+
+ remoteLogger.Info("secure admin remote control endpoint started",
+ zap.String("address", addr.String()))
+
+ return nil
+}
+
+func (ident *IdentityConfig) certmagicConfig(logger *zap.Logger, makeCache bool) *certmagic.Config {
+ var cmCfg *certmagic.Config
+ if ident == nil {
+ // user might not have configured identity; that's OK, we can still make a
+ // certmagic config, although it'll be mostly useless for remote management
+ ident = new(IdentityConfig)
+ }
+ template := certmagic.Config{
+ Storage: DefaultStorage, // do not act as part of a cluster (this is for the server's local identity)
+ Logger: logger,
+ Issuers: ident.issuers,
+ }
+ if makeCache {
+ identityCertCache = certmagic.NewCache(certmagic.CacheOptions{
+ GetConfigForCert: func(certmagic.Certificate) (*certmagic.Config, error) {
+ return cmCfg, nil
+ },
+ Logger: logger.Named("cache"),
+ })
+ }
+ cmCfg = certmagic.New(identityCertCache, template)
+ return cmCfg
+}
+
+// IdentityCredentials returns this instance's configured, managed identity credentials
+// that can be used in TLS client authentication.
+func (ctx Context) IdentityCredentials(logger *zap.Logger) ([]tls.Certificate, error) {
+ if ctx.cfg == nil || ctx.cfg.Admin == nil || ctx.cfg.Admin.Identity == nil {
+ return nil, fmt.Errorf("no server identity configured")
+ }
+ ident := ctx.cfg.Admin.Identity
+ if len(ident.Identifiers) == 0 {
+ return nil, fmt.Errorf("no identifiers configured")
+ }
+ if logger == nil {
+ logger = Log()
+ }
+ magic := ident.certmagicConfig(logger, false)
+ return magic.ClientCredentials(ctx, ident.Identifiers)
+}
+
+// enforceAccessControls enforces application-layer access controls for r based on remote.
+// It expects that the TLS server has already established at least one verified chain of
+// trust, and then looks for a matching, authorized public key that is allowed to access
+// the defined path(s) using the defined method(s).
+func (remote RemoteAdmin) enforceAccessControls(r *http.Request) error {
+ for _, chain := range r.TLS.VerifiedChains {
+ for _, peerCert := range chain {
+ for _, adminAccess := range remote.AccessControl {
+ for _, allowedKey := range adminAccess.publicKeys {
+ // see if we found a matching public key; the TLS server already verified the chain
+ // so we know the client possesses the associated private key; this handy interface
+ // doesn't appear to be defined anywhere in the std lib, but was implemented here:
+ // https://github.com/golang/go/commit/b5f2c0f50297fa5cd14af668ddd7fd923626cf8c
+ comparer, ok := peerCert.PublicKey.(interface{ Equal(crypto.PublicKey) bool })
+ if !ok || !comparer.Equal(allowedKey) {
+ continue
+ }
+
+ // key recognized; make sure its HTTP request is permitted
+ for _, accessPerm := range adminAccess.Permissions {
+ // verify method
+ methodFound := accessPerm.Methods == nil || slices.Contains(accessPerm.Methods, r.Method)
+ if !methodFound {
+ return APIError{
+ HTTPStatus: http.StatusForbidden,
+ Message: "not authorized to use this method",
+ }
+ }
+
+ // verify path
+ pathFound := accessPerm.Paths == nil
+ for _, allowedPath := range accessPerm.Paths {
+ if strings.HasPrefix(r.URL.Path, allowedPath) {
+ pathFound = true
+ break
+ }
+ }
+ if !pathFound {
+ return APIError{
+ HTTPStatus: http.StatusForbidden,
+ Message: "not authorized to access this path",
+ }
+ }
+ }
+
+ // public key authorized, method and path allowed
+ return nil
+ }
+ }
+ }
+ }
+
+ // in theory, this should never happen; with an unverified chain, the TLS server
+ // should not accept the connection in the first place, and the acceptable cert
+ // pool is configured using the same list of public keys we verify against
+ return APIError{
+ HTTPStatus: http.StatusUnauthorized,
+ Message: "client identity not authorized",
+ }
+}
+
func stopAdminServer(srv *http.Server) error {
if srv == nil {
return fmt.Errorf("no admin server")
@@ -259,7 +727,7 @@ func stopAdminServer(srv *http.Server) error {
if err != nil {
return fmt.Errorf("shutting down admin server: %v", err)
}
- Log().Named("admin").Info("stopped previous server")
+ Log().Named("admin").Info("stopped previous server", zap.String("address", srv.Addr))
return nil
}
@@ -275,22 +743,44 @@ type AdminRoute struct {
}
type adminHandler struct {
+ mux *http.ServeMux
+
+ // security for local/plaintext endpoint
enforceOrigin bool
enforceHost bool
- allowedOrigins []string
- mux *http.ServeMux
+ allowedOrigins []*url.URL
+
+ // security for remote/encrypted endpoint
+ remoteControl *RemoteAdmin
}
// ServeHTTP is the external entry point for API requests.
// It will only be called once per request.
func (h adminHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
- Log().Named("admin.api").Info("received request",
+ ip, port, err := net.SplitHostPort(r.RemoteAddr)
+ if err != nil {
+ ip = r.RemoteAddr
+ port = ""
+ }
+ log := Log().Named("admin.api").With(
zap.String("method", r.Method),
zap.String("host", r.Host),
zap.String("uri", r.RequestURI),
- zap.String("remote_addr", r.RemoteAddr),
+ zap.String("remote_ip", ip),
+ zap.String("remote_port", port),
zap.Reflect("headers", r.Header),
)
+ if r.TLS != nil {
+ log = log.With(
+ zap.Bool("secure", true),
+ zap.Int("verified_chains", len(r.TLS.VerifiedChains)),
+ )
+ }
+ if r.RequestURI == "/metrics" {
+ log.Debug("received request")
+ } else {
+ log.Info("received request")
+ }
h.serveHTTP(w, r)
}
@@ -298,6 +788,22 @@ func (h adminHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
// be called more than once per request, for example if a request
// is rewritten (i.e. internal redirect).
func (h adminHandler) serveHTTP(w http.ResponseWriter, r *http.Request) {
+ if h.remoteControl != nil {
+ // enforce access controls on secure endpoint
+ if err := h.remoteControl.enforceAccessControls(r); err != nil {
+ h.handleError(w, r, err)
+ return
+ }
+ }
+
+ if strings.Contains(r.Header.Get("Upgrade"), "websocket") {
+ // I've never been able demonstrate a vulnerability myself, but apparently
+ // WebSocket connections originating from browsers aren't subject to CORS
+ // restrictions, so we'll just be on the safe side
+ h.handleError(w, r, fmt.Errorf("websocket connections aren't allowed"))
+ return
+ }
+
if h.enforceHost {
// DNS rebinding mitigation
err := h.checkHost(r)
@@ -323,8 +829,6 @@ func (h adminHandler) serveHTTP(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Access-Control-Allow-Origin", origin)
}
- // TODO: authentication & authorization, if configured
-
h.mux.ServeHTTP(w, r)
}
@@ -332,7 +836,7 @@ func (h adminHandler) handleError(w http.ResponseWriter, r *http.Request, err er
if err == nil {
return
}
- if err == ErrInternalRedir {
+ if err == errInternalRedir {
h.serveHTTP(w, r)
return
}
@@ -340,12 +844,12 @@ func (h adminHandler) handleError(w http.ResponseWriter, r *http.Request, err er
apiErr, ok := err.(APIError)
if !ok {
apiErr = APIError{
- Code: http.StatusInternalServerError,
- Err: err,
+ HTTPStatus: http.StatusInternalServerError,
+ Err: err,
}
}
- if apiErr.Code == 0 {
- apiErr.Code = http.StatusInternalServerError
+ if apiErr.HTTPStatus == 0 {
+ apiErr.HTTPStatus = http.StatusInternalServerError
}
if apiErr.Message == "" && apiErr.Err != nil {
apiErr.Message = apiErr.Err.Error()
@@ -353,12 +857,15 @@ func (h adminHandler) handleError(w http.ResponseWriter, r *http.Request, err er
Log().Named("admin.api").Error("request error",
zap.Error(err),
- zap.Int("status_code", apiErr.Code),
+ zap.Int("status_code", apiErr.HTTPStatus),
)
w.Header().Set("Content-Type", "application/json")
- w.WriteHeader(apiErr.Code)
- json.NewEncoder(w).Encode(apiErr)
+ w.WriteHeader(apiErr.HTTPStatus)
+ encErr := json.NewEncoder(w).Encode(apiErr)
+ if encErr != nil {
+ Log().Named("admin.api").Error("failed to encode error response", zap.Error(encErr))
+ }
}
// checkHost returns a handler that wraps next such that
@@ -366,17 +873,13 @@ func (h adminHandler) handleError(w http.ResponseWriter, r *http.Request, err er
// a trustworthy/expected value. This helps to mitigate DNS
// rebinding attacks.
func (h adminHandler) checkHost(r *http.Request) error {
- var allowed bool
- for _, allowedHost := range h.allowedOrigins {
- if r.Host == allowedHost {
- allowed = true
- break
- }
- }
+ allowed := slices.ContainsFunc(h.allowedOrigins, func(u *url.URL) bool {
+ return r.Host == u.Host
+ })
if !allowed {
return APIError{
- Code: http.StatusForbidden,
- Err: fmt.Errorf("host not allowed: %s", r.Host),
+ HTTPStatus: http.StatusForbidden,
+ Err: fmt.Errorf("host not allowed: %s", r.Host),
}
}
return nil
@@ -387,57 +890,94 @@ func (h adminHandler) checkHost(r *http.Request) error {
// sites from issuing requests to our listener. It
// returns the origin that was obtained from r.
func (h adminHandler) checkOrigin(r *http.Request) (string, error) {
- origin := h.getOriginHost(r)
- if origin == "" {
- return origin, APIError{
- Code: http.StatusForbidden,
- Err: fmt.Errorf("missing required Origin header"),
+ originStr, origin := h.getOrigin(r)
+ if origin == nil {
+ return "", APIError{
+ HTTPStatus: http.StatusForbidden,
+ Err: fmt.Errorf("required Origin header is missing or invalid"),
}
}
if !h.originAllowed(origin) {
- return origin, APIError{
- Code: http.StatusForbidden,
- Err: fmt.Errorf("client is not allowed to access from origin %s", origin),
+ return "", APIError{
+ HTTPStatus: http.StatusForbidden,
+ Err: fmt.Errorf("client is not allowed to access from origin '%s'", originStr),
}
}
- return origin, nil
+ return origin.String(), nil
}
-func (h adminHandler) getOriginHost(r *http.Request) string {
+func (h adminHandler) getOrigin(r *http.Request) (string, *url.URL) {
origin := r.Header.Get("Origin")
if origin == "" {
origin = r.Header.Get("Referer")
}
originURL, err := url.Parse(origin)
- if err == nil && originURL.Host != "" {
- origin = originURL.Host
+ if err != nil {
+ return origin, nil
}
- return origin
+ originURL.Path = ""
+ originURL.RawPath = ""
+ originURL.Fragment = ""
+ originURL.RawFragment = ""
+ originURL.RawQuery = ""
+ return origin, originURL
}
-func (h adminHandler) originAllowed(origin string) bool {
+func (h adminHandler) originAllowed(origin *url.URL) bool {
for _, allowedOrigin := range h.allowedOrigins {
- originCopy := origin
- if !strings.Contains(allowedOrigin, "://") {
- // no scheme specified, so allow both
- originCopy = strings.TrimPrefix(originCopy, "http://")
- originCopy = strings.TrimPrefix(originCopy, "https://")
+ if allowedOrigin.Scheme != "" && origin.Scheme != allowedOrigin.Scheme {
+ continue
}
- if originCopy == allowedOrigin {
+ if origin.Host == allowedOrigin.Host {
return true
}
}
return false
}
+// etagHasher returns a the hasher we used on the config to both
+// produce and verify ETags.
+func etagHasher() hash.Hash { return xxhash.New() }
+
+// makeEtag returns an Etag header value (including quotes) for
+// the given config path and hash of contents at that path.
+func makeEtag(path string, hash hash.Hash) string {
+ return fmt.Sprintf(`"%s %x"`, path, hash.Sum(nil))
+}
+
+// This buffer pool is used to keep buffers for
+// reading the config file during eTag header generation
+var bufferPool = sync.Pool{
+ New: func() any {
+ return new(bytes.Buffer)
+ },
+}
+
func handleConfig(w http.ResponseWriter, r *http.Request) error {
switch r.Method {
case http.MethodGet:
w.Header().Set("Content-Type", "application/json")
+ hash := etagHasher()
- err := readConfig(r.URL.Path, w)
+ // Read the config into a buffer instead of writing directly to
+ // the response writer, as we want to set the ETag as the header,
+ // not the trailer.
+ buf := bufferPool.Get().(*bytes.Buffer)
+ buf.Reset()
+ defer bufferPool.Put(buf)
+
+ configWriter := io.MultiWriter(buf, hash)
+ err := readConfig(r.URL.Path, configWriter)
if err != nil {
- return APIError{Code: http.StatusBadRequest, Err: err}
+ return APIError{HTTPStatus: http.StatusBadRequest, Err: err}
+ }
+
+ // we could consider setting up a sync.Pool for the summed
+ // hashes to reduce GC pressure.
+ w.Header().Set("Etag", makeEtag(r.URL.Path, hash))
+ _, err = w.Write(buf.Bytes())
+ if err != nil {
+ return APIError{HTTPStatus: http.StatusInternalServerError, Err: err}
}
return nil
@@ -452,8 +992,8 @@ func handleConfig(w http.ResponseWriter, r *http.Request) error {
if r.Method != http.MethodDelete {
if ct := r.Header.Get("Content-Type"); !strings.Contains(ct, "/json") {
return APIError{
- Code: http.StatusBadRequest,
- Err: fmt.Errorf("unacceptable content-type: %v; 'application/json' required", ct),
+ HTTPStatus: http.StatusBadRequest,
+ Err: fmt.Errorf("unacceptable content-type: %v; 'application/json' required", ct),
}
}
@@ -464,8 +1004,8 @@ func handleConfig(w http.ResponseWriter, r *http.Request) error {
_, err := io.Copy(buf, r.Body)
if err != nil {
return APIError{
- Code: http.StatusBadRequest,
- Err: fmt.Errorf("reading request body: %v", err),
+ HTTPStatus: http.StatusBadRequest,
+ Err: fmt.Errorf("reading request body: %v", err),
}
}
body = buf.Bytes()
@@ -473,15 +1013,15 @@ func handleConfig(w http.ResponseWriter, r *http.Request) error {
forceReload := r.Header.Get("Cache-Control") == "must-revalidate"
- err := changeConfig(r.Method, r.URL.Path, body, forceReload)
- if err != nil {
+ err := changeConfig(r.Method, r.URL.Path, body, r.Header.Get("If-Match"), forceReload)
+ if err != nil && !errors.Is(err, errSameConfig) {
return err
}
default:
return APIError{
- Code: http.StatusMethodNotAllowed,
- Err: fmt.Errorf("method %s not allowed", r.Method),
+ HTTPStatus: http.StatusMethodNotAllowed,
+ Err: fmt.Errorf("method %s not allowed", r.Method),
}
}
@@ -493,69 +1033,46 @@ func handleConfigID(w http.ResponseWriter, r *http.Request) error {
parts := strings.Split(idPath, "/")
if len(parts) < 3 || parts[2] == "" {
- return fmt.Errorf("request path is missing object ID")
+ return APIError{
+ HTTPStatus: http.StatusBadRequest,
+ Err: fmt.Errorf("request path is missing object ID"),
+ }
}
if parts[0] != "" || parts[1] != "id" {
- return fmt.Errorf("malformed object path")
+ return APIError{
+ HTTPStatus: http.StatusBadRequest,
+ Err: fmt.Errorf("malformed object path"),
+ }
}
id := parts[2]
// map the ID to the expanded path
- currentCfgMu.RLock()
+ rawCfgMu.RLock()
expanded, ok := rawCfgIndex[id]
- defer currentCfgMu.RUnlock()
+ rawCfgMu.RUnlock()
if !ok {
- return fmt.Errorf("unknown object ID '%s'", id)
+ return APIError{
+ HTTPStatus: http.StatusNotFound,
+ Err: fmt.Errorf("unknown object ID '%s'", id),
+ }
}
// piece the full URL path back together
parts = append([]string{expanded}, parts[3:]...)
r.URL.Path = path.Join(parts...)
- return ErrInternalRedir
+ return errInternalRedir
}
func handleStop(w http.ResponseWriter, r *http.Request) error {
- err := handleUnload(w, r)
- if err != nil {
- Log().Named("admin.api").Error("unload error", zap.Error(err))
- }
- go func() {
- err := stopAdminServer(adminServer)
- var exitCode int
- if err != nil {
- exitCode = ExitCodeFailedQuit
- Log().Named("admin.api").Error("failed to stop admin server gracefully", zap.Error(err))
- }
- Log().Named("admin.api").Info("stopping now, bye!! 👋")
- os.Exit(exitCode)
- }()
- return nil
-}
-
-// handleUnload stops the current configuration that is running.
-// Note that doing this can also be accomplished with DELETE /config/
-// but we leave this function because handleStop uses it.
-func handleUnload(w http.ResponseWriter, r *http.Request) error {
if r.Method != http.MethodPost {
return APIError{
- Code: http.StatusMethodNotAllowed,
- Err: fmt.Errorf("method not allowed"),
+ HTTPStatus: http.StatusMethodNotAllowed,
+ Err: fmt.Errorf("method not allowed"),
}
}
- currentCfgMu.RLock()
- hasCfg := currentCfg != nil
- currentCfgMu.RUnlock()
- if !hasCfg {
- Log().Named("admin.api").Info("nothing to unload")
- return nil
- }
- Log().Named("admin.api").Info("unloading")
- if err := stopAndCleanup(); err != nil {
- Log().Named("admin.api").Error("error unloading", zap.Error(err))
- } else {
- Log().Named("admin.api").Info("unloading completed")
- }
+
+ exitProcess(context.Background(), Log().Named("admin.api"))
return nil
}
@@ -563,11 +1080,11 @@ func handleUnload(w http.ResponseWriter, r *http.Request) error {
// the operation at path according to method, using body and out as
// needed. This is a low-level, unsynchronized function; most callers
// will want to use changeConfig or readConfig instead. This requires a
-// read or write lock on currentCfgMu, depending on method (GET needs
+// read or write lock on currentCtxMu, depending on method (GET needs
// only a read lock; all others need a write lock).
func unsyncedConfigAccess(method, path string, body []byte, out io.Writer) error {
var err error
- var val interface{}
+ var val any
// if there is a request body, decode it into the
// variable that will be set in the config according
@@ -604,16 +1121,16 @@ func unsyncedConfigAccess(method, path string, body []byte, out io.Writer) error
parts = parts[:len(parts)-1]
}
- var ptr interface{} = rawCfg
+ var ptr any = rawCfg
traverseLoop:
for i, part := range parts {
switch v := ptr.(type) {
- case map[string]interface{}:
+ case map[string]any:
// if the next part enters a slice, and the slice is our destination,
// handle it specially (because appending to the slice copies the slice
// header, which does not replace the original one like we want)
- if arr, ok := v[part].([]interface{}); ok && i == len(parts)-2 {
+ if arr, ok := v[part].([]any); ok && i == len(parts)-2 {
var idx int
if method != http.MethodPost {
idxStr := parts[len(parts)-1]
@@ -635,7 +1152,7 @@ traverseLoop:
}
case http.MethodPost:
if ellipses {
- valArray, ok := val.([]interface{})
+ valArray, ok := val.([]any)
if !ok {
return fmt.Errorf("final element is not an array")
}
@@ -670,9 +1187,9 @@ traverseLoop:
case http.MethodPost:
// if the part is an existing list, POST appends to
// it, otherwise it just sets or creates the value
- if arr, ok := v[part].([]interface{}); ok {
+ if arr, ok := v[part].([]any); ok {
if ellipses {
- valArray, ok := val.([]interface{})
+ valArray, ok := val.([]any)
if !ok {
return fmt.Errorf("final element is not an array")
}
@@ -685,15 +1202,27 @@ traverseLoop:
}
case http.MethodPut:
if _, ok := v[part]; ok {
- return fmt.Errorf("[%s] key already exists: %s", path, part)
+ return APIError{
+ HTTPStatus: http.StatusConflict,
+ Err: fmt.Errorf("[%s] key already exists: %s", path, part),
+ }
}
v[part] = val
case http.MethodPatch:
if _, ok := v[part]; !ok {
- return fmt.Errorf("[%s] key does not exist: %s", path, part)
+ return APIError{
+ HTTPStatus: http.StatusNotFound,
+ Err: fmt.Errorf("[%s] key does not exist: %s", path, part),
+ }
}
v[part] = val
case http.MethodDelete:
+ if _, ok := v[part]; !ok {
+ return APIError{
+ HTTPStatus: http.StatusNotFound,
+ Err: fmt.Errorf("[%s] key does not exist: %s", path, part),
+ }
+ }
delete(v, part)
default:
return fmt.Errorf("unrecognized method %s", method)
@@ -703,12 +1232,12 @@ traverseLoop:
// might not exist yet; that's OK but we need to make them as
// we go, while we still have a pointer from the level above
if v[part] == nil && method == http.MethodPut {
- v[part] = make(map[string]interface{})
+ v[part] = make(map[string]any)
}
ptr = v[part]
}
- case []interface{}:
+ case []any:
partInt, err := strconv.Atoi(part)
if err != nil {
return fmt.Errorf("[/%s] invalid array index '%s': %v",
@@ -730,7 +1259,7 @@ traverseLoop:
// RemoveMetaFields removes meta fields like "@id" from a JSON message
// by using a simple regular expression. (An alternate way to do this
-// would be to delete them from the raw, map[string]interface{}
+// would be to delete them from the raw, map[string]any
// representation as they are indexed, then iterate the index we made
// and add them back after encoding as JSON, but this is simpler.)
func RemoveMetaFields(rawJSON []byte) []byte {
@@ -767,9 +1296,9 @@ func (f AdminHandlerFunc) ServeHTTP(w http.ResponseWriter, r *http.Request) erro
// and client responses. If Message is unset, then
// Err.Error() will be serialized in its place.
type APIError struct {
- Code int `json:"-"`
- Err error `json:"-"`
- Message string `json:"error"`
+ HTTPStatus int `json:"-"`
+ Err error `json:"-"`
+ Message string `json:"error"`
}
func (e APIError) Error() string {
@@ -779,40 +1308,100 @@ func (e APIError) Error() string {
return e.Message
}
+// parseAdminListenAddr extracts a singular listen address from either addr
+// or defaultAddr, returning the network and the address of the listener.
+func parseAdminListenAddr(addr string, defaultAddr string) (NetworkAddress, error) {
+ input, err := NewReplacer().ReplaceOrErr(addr, true, true)
+ if err != nil {
+ return NetworkAddress{}, fmt.Errorf("replacing listen address: %v", err)
+ }
+ if input == "" {
+ input = defaultAddr
+ }
+ listenAddr, err := ParseNetworkAddress(input)
+ if err != nil {
+ return NetworkAddress{}, fmt.Errorf("parsing listener address: %v", err)
+ }
+ if listenAddr.PortRangeSize() != 1 {
+ return NetworkAddress{}, fmt.Errorf("must be exactly one listener address; cannot listen on: %s", listenAddr)
+ }
+ return listenAddr, nil
+}
+
+// decodeBase64DERCert base64-decodes, then DER-decodes, certStr.
+func decodeBase64DERCert(certStr string) (*x509.Certificate, error) {
+ derBytes, err := base64.StdEncoding.DecodeString(certStr)
+ if err != nil {
+ return nil, err
+ }
+ return x509.ParseCertificate(derBytes)
+}
+
+type loggableURLArray []*url.URL
+
+func (ua loggableURLArray) MarshalLogArray(enc zapcore.ArrayEncoder) error {
+ if ua == nil {
+ return nil
+ }
+ for _, u := range ua {
+ enc.AppendString(u.String())
+ }
+ return nil
+}
+
var (
- // DefaultAdminListen is the address for the admin
+ // DefaultAdminListen is the address for the local admin
// listener, if none is specified at startup.
DefaultAdminListen = "localhost:2019"
- // ErrInternalRedir indicates an internal redirect
- // and is useful when admin API handlers rewrite
- // the request; in that case, authentication and
- // authorization needs to happen again for the
- // rewritten request.
- ErrInternalRedir = fmt.Errorf("internal redirect; re-authorization required")
-
- // DefaultAdminConfig is the default configuration
- // for the administration endpoint.
- DefaultAdminConfig = &AdminConfig{
- Listen: DefaultAdminListen,
- }
+ // DefaultRemoteAdminListen is the address for the remote
+ // (TLS-authenticated) admin listener, if enabled and not
+ // specified otherwise.
+ DefaultRemoteAdminListen = ":2021"
)
+// PIDFile writes a pidfile to the file at filename. It
+// will get deleted before the process gracefully exits.
+func PIDFile(filename string) error {
+ pid := []byte(strconv.Itoa(os.Getpid()) + "\n")
+ err := os.WriteFile(filename, pid, 0o600)
+ if err != nil {
+ return err
+ }
+ pidfile = filename
+ return nil
+}
+
// idRegexp is used to match ID fields and their associated values
// in the config. It also matches adjacent commas so that syntax
// can be preserved no matter where in the object the field appears.
// It supports string and most numeric values.
var idRegexp = regexp.MustCompile(`(?m),?\s*"` + idKey + `"\s*:\s*(-?[0-9]+(\.[0-9]+)?|(?U)".*")\s*,?`)
+// pidfile is the name of the pidfile, if any.
+var pidfile string
+
+// errInternalRedir indicates an internal redirect
+// and is useful when admin API handlers rewrite
+// the request; in that case, authentication and
+// authorization needs to happen again for the
+// rewritten request.
+var errInternalRedir = fmt.Errorf("internal redirect; re-authorization required")
+
const (
rawConfigKey = "config"
idKey = "@id"
)
var bufPool = sync.Pool{
- New: func() interface{} {
+ New: func() any {
return new(bytes.Buffer)
},
}
-var adminServer *http.Server
+// keep a reference to admin endpoint singletons while they're active
+var (
+ serverMu sync.Mutex
+ localAdminServer, remoteAdminServer *http.Server
+ identityCertCache *certmagic.Cache
+)
diff --git a/admin_test.go b/admin_test.go
index cfb4ab7b..9137a888 100644
--- a/admin_test.go
+++ b/admin_test.go
@@ -16,10 +16,31 @@ package caddy
import (
"encoding/json"
+ "fmt"
+ "net/http"
"reflect"
+ "sync"
"testing"
)
+var testCfg = []byte(`{
+ "apps": {
+ "http": {
+ "servers": {
+ "myserver": {
+ "listen": ["tcp/localhost:8080-8084"],
+ "read_timeout": "30s"
+ },
+ "yourserver": {
+ "listen": ["127.0.0.1:5000"],
+ "read_header_timeout": "15s"
+ }
+ }
+ }
+ }
+ }
+ `)
+
func TestUnsyncedConfigAccess(t *testing.T) {
// each test is performed in sequence, so
// each change builds on the previous ones;
@@ -54,6 +75,12 @@ func TestUnsyncedConfigAccess(t *testing.T) {
path: "/bar/qq",
expect: `{"foo": "jet", "bar": {"aa": "bb"}, "list": ["a", "b", "c"]}`,
},
+ {
+ method: "DELETE",
+ path: "/bar/qq",
+ expect: `{"foo": "jet", "bar": {"aa": "bb"}, "list": ["a", "b", "c"]}`,
+ shouldErr: true,
+ },
{
method: "POST",
path: "/list",
@@ -94,7 +121,7 @@ func TestUnsyncedConfigAccess(t *testing.T) {
}
// decode the expected config so we can do a convenient DeepEqual
- var expectedDecoded interface{}
+ var expectedDecoded any
err = json.Unmarshal([]byte(tc.expect), &expectedDecoded)
if err != nil {
t.Fatalf("Test %d: Unmarshaling expected config: %v", i, err)
@@ -108,25 +135,71 @@ func TestUnsyncedConfigAccess(t *testing.T) {
}
}
-func BenchmarkLoad(b *testing.B) {
- for i := 0; i < b.N; i++ {
- cfg := []byte(`{
- "apps": {
- "http": {
- "servers": {
- "myserver": {
- "listen": ["tcp/localhost:8080-8084"],
- "read_timeout": "30s"
- },
- "yourserver": {
- "listen": ["127.0.0.1:5000"],
- "read_header_timeout": "15s"
- }
- }
- }
- }
- }
- `)
- Load(cfg, true)
+// TestLoadConcurrent exercises Load under concurrent conditions
+// and is most useful under test with `-race` enabled.
+func TestLoadConcurrent(t *testing.T) {
+ var wg sync.WaitGroup
+
+ for i := 0; i < 100; i++ {
+ wg.Add(1)
+ go func() {
+ _ = Load(testCfg, true)
+ wg.Done()
+ }()
+ }
+ wg.Wait()
+}
+
+type fooModule struct {
+ IntField int
+ StrField string
+}
+
+func (fooModule) CaddyModule() ModuleInfo {
+ return ModuleInfo{
+ ID: "foo",
+ New: func() Module { return new(fooModule) },
+ }
+}
+func (fooModule) Start() error { return nil }
+func (fooModule) Stop() error { return nil }
+
+func TestETags(t *testing.T) {
+ RegisterModule(fooModule{})
+
+ if err := Load([]byte(`{"admin": {"listen": "localhost:2999"}, "apps": {"foo": {"strField": "abc", "intField": 0}}}`), true); err != nil {
+ t.Fatalf("loading: %s", err)
+ }
+
+ const key = "/" + rawConfigKey + "/apps/foo"
+
+ // try update the config with the wrong etag
+ err := changeConfig(http.MethodPost, key, []byte(`{"strField": "abc", "intField": 1}}`), fmt.Sprintf(`"/%s not_an_etag"`, rawConfigKey), false)
+ if apiErr, ok := err.(APIError); !ok || apiErr.HTTPStatus != http.StatusPreconditionFailed {
+ t.Fatalf("expected precondition failed; got %v", err)
+ }
+
+ // get the etag
+ hash := etagHasher()
+ if err := readConfig(key, hash); err != nil {
+ t.Fatalf("reading: %s", err)
+ }
+
+ // do the same update with the correct key
+ err = changeConfig(http.MethodPost, key, []byte(`{"strField": "abc", "intField": 1}`), makeEtag(key, hash), false)
+ if err != nil {
+ t.Fatalf("expected update to work; got %v", err)
+ }
+
+ // now try another update. The hash should no longer match and we should get precondition failed
+ err = changeConfig(http.MethodPost, key, []byte(`{"strField": "abc", "intField": 2}`), makeEtag(key, hash), false)
+ if apiErr, ok := err.(APIError); !ok || apiErr.HTTPStatus != http.StatusPreconditionFailed {
+ t.Fatalf("expected precondition failed; got %v", err)
+ }
+}
+
+func BenchmarkLoad(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ Load(testCfg, true)
}
}
diff --git a/caddy.go b/caddy.go
index 00a56e74..b3e8889f 100644
--- a/caddy.go
+++ b/caddy.go
@@ -17,10 +17,12 @@ package caddy
import (
"bytes"
"context"
+ "encoding/hex"
"encoding/json"
+ "errors"
"fmt"
"io"
- "io/ioutil"
+ "io/fs"
"log"
"net/http"
"os"
@@ -30,10 +32,15 @@ import (
"strconv"
"strings"
"sync"
+ "sync/atomic"
"time"
"github.com/caddyserver/certmagic"
+ "github.com/google/uuid"
"go.uber.org/zap"
+
+ "github.com/caddyserver/caddy/v2/internal/filesystems"
+ "github.com/caddyserver/caddy/v2/notify"
)
// Config is the top (or beginning) of the Caddy configuration structure.
@@ -78,6 +85,9 @@ type Config struct {
storage certmagic.Storage
cancelFunc context.CancelFunc
+
+ // filesystems is a dict of filesystems that will later be loaded from and added to.
+ filesystems FileSystems
}
// App is a thing that Caddy runs.
@@ -99,16 +109,50 @@ func Run(cfg *Config) error {
// if it is different from the current config or
// forceReload is true.
func Load(cfgJSON []byte, forceReload bool) error {
- return changeConfig(http.MethodPost, "/"+rawConfigKey, cfgJSON, forceReload)
+ if err := notify.Reloading(); err != nil {
+ Log().Error("unable to notify service manager of reloading state", zap.Error(err))
+ }
+
+ // after reload, notify system of success or, if
+ // failure, update with status (error message)
+ var err error
+ defer func() {
+ if err != nil {
+ if notifyErr := notify.Error(err, 0); notifyErr != nil {
+ Log().Error("unable to notify to service manager of reload error",
+ zap.Error(notifyErr),
+ zap.String("reload_err", err.Error()))
+ }
+ return
+ }
+ if err := notify.Ready(); err != nil {
+ Log().Error("unable to notify to service manager of ready state", zap.Error(err))
+ }
+ }()
+
+ err = changeConfig(http.MethodPost, "/"+rawConfigKey, cfgJSON, "", forceReload)
+ if errors.Is(err, errSameConfig) {
+ err = nil // not really an error
+ }
+
+ return err
}
// changeConfig changes the current config (rawCfg) according to the
// method, traversed via the given path, and uses the given input as
// the new value (if applicable; i.e. "DELETE" doesn't have an input).
// If the resulting config is the same as the previous, no reload will
-// occur unless forceReload is true. This function is safe for
+// occur unless forceReload is true. If the config is unchanged and not
+// forcefully reloaded, then errConfigUnchanged This function is safe for
// concurrent use.
-func changeConfig(method, path string, input []byte, forceReload bool) error {
+// The ifMatchHeader can optionally be given a string of the format:
+//
+// " "
+//
+// where is the absolute path in the config and is the expected hash of
+// the config at that path. If the hash in the ifMatchHeader doesn't match
+// the hash of the config, then an APIError with status 412 will be returned.
+func changeConfig(method, path string, input []byte, ifMatchHeader string, forceReload bool) error {
switch method {
case http.MethodGet,
http.MethodHead,
@@ -118,8 +162,42 @@ func changeConfig(method, path string, input []byte, forceReload bool) error {
return fmt.Errorf("method not allowed")
}
- currentCfgMu.Lock()
- defer currentCfgMu.Unlock()
+ rawCfgMu.Lock()
+ defer rawCfgMu.Unlock()
+
+ if ifMatchHeader != "" {
+ // expect the first and last character to be quotes
+ if len(ifMatchHeader) < 2 || ifMatchHeader[0] != '"' || ifMatchHeader[len(ifMatchHeader)-1] != '"' {
+ return APIError{
+ HTTPStatus: http.StatusBadRequest,
+ Err: fmt.Errorf("malformed If-Match header; expect quoted string"),
+ }
+ }
+
+ // read out the parts
+ parts := strings.Fields(ifMatchHeader[1 : len(ifMatchHeader)-1])
+ if len(parts) != 2 {
+ return APIError{
+ HTTPStatus: http.StatusBadRequest,
+ Err: fmt.Errorf("malformed If-Match header; expect format \" \""),
+ }
+ }
+
+ // get the current hash of the config
+ // at the given path
+ hash := etagHasher()
+ err := unsyncedConfigAccess(http.MethodGet, parts[0], nil, hash)
+ if err != nil {
+ return err
+ }
+
+ if hex.EncodeToString(hash.Sum(nil)) != parts[1] {
+ return APIError{
+ HTTPStatus: http.StatusPreconditionFailed,
+ Err: fmt.Errorf("If-Match header did not match current config hash"),
+ }
+ }
+ }
err := unsyncedConfigAccess(method, path, input, nil)
if err != nil {
@@ -130,15 +208,15 @@ func changeConfig(method, path string, input []byte, forceReload bool) error {
newCfg, err := json.Marshal(rawCfg[rawConfigKey])
if err != nil {
return APIError{
- Code: http.StatusBadRequest,
- Err: fmt.Errorf("encoding new config: %v", err),
+ HTTPStatus: http.StatusBadRequest,
+ Err: fmt.Errorf("encoding new config: %v", err),
}
}
// if nothing changed, no need to do a whole reload unless the client forces it
if !forceReload && bytes.Equal(rawCfgJSON, newCfg) {
- Log().Named("admin.api").Info("config is unchanged")
- return nil
+ Log().Info("config is unchanged")
+ return errSameConfig
}
// find any IDs in this config and index them
@@ -146,21 +224,21 @@ func changeConfig(method, path string, input []byte, forceReload bool) error {
err = indexConfigObjects(rawCfg[rawConfigKey], "/"+rawConfigKey, idx)
if err != nil {
return APIError{
- Code: http.StatusInternalServerError,
- Err: fmt.Errorf("indexing config: %v", err),
+ HTTPStatus: http.StatusInternalServerError,
+ Err: fmt.Errorf("indexing config: %v", err),
}
}
// load this new config; if it fails, we need to revert to
// our old representation of caddy's actual config
- err = unsyncedDecodeAndRun(newCfg)
+ err = unsyncedDecodeAndRun(newCfg, true)
if err != nil {
if len(rawCfgJSON) > 0 {
// restore old config state to keep it consistent
// with what caddy is still running; we need to
// unmarshal it again because it's likely that
// pointers deep in our rawCfg map were modified
- var oldCfg interface{}
+ var oldCfg any
err2 := json.Unmarshal(rawCfgJSON, &oldCfg)
if err2 != nil {
err = fmt.Errorf("%v; additionally, restoring old config: %v", err, err2)
@@ -185,18 +263,18 @@ func changeConfig(method, path string, input []byte, forceReload bool) error {
// readConfig traverses the current config to path
// and writes its JSON encoding to out.
func readConfig(path string, out io.Writer) error {
- currentCfgMu.RLock()
- defer currentCfgMu.RUnlock()
+ rawCfgMu.RLock()
+ defer rawCfgMu.RUnlock()
return unsyncedConfigAccess(http.MethodGet, path, nil, out)
}
// indexConfigObjects recursively searches ptr for object fields named
// "@id" and maps that ID value to the full configPath in the index.
// This function is NOT safe for concurrent access; obtain a write lock
-// on currentCfgMu.
-func indexConfigObjects(ptr interface{}, configPath string, index map[string]string) error {
+// on currentCtxMu.
+func indexConfigObjects(ptr any, configPath string, index map[string]string) error {
switch val := ptr.(type) {
- case map[string]interface{}:
+ case map[string]any:
for k, v := range val {
if k == idKey {
switch idVal := v.(type) {
@@ -215,7 +293,7 @@ func indexConfigObjects(ptr interface{}, configPath string, index map[string]str
return err
}
}
- case []interface{}:
+ case []any:
// traverse each element of the array recursively
for i := range val {
err := indexConfigObjects(val[i], path.Join(configPath, strconv.Itoa(i)), index)
@@ -233,47 +311,66 @@ func indexConfigObjects(ptr interface{}, configPath string, index map[string]str
// it as the new config, replacing any other current config.
// It does NOT update the raw config state, as this is a
// lower-level function; most callers will want to use Load
-// instead. A write lock on currentCfgMu is required!
-func unsyncedDecodeAndRun(cfgJSON []byte) error {
+// instead. A write lock on rawCfgMu is required! If
+// allowPersist is false, it will not be persisted to disk,
+// even if it is configured to.
+func unsyncedDecodeAndRun(cfgJSON []byte, allowPersist bool) error {
// remove any @id fields from the JSON, which would cause
// loading to break since the field wouldn't be recognized
strippedCfgJSON := RemoveMetaFields(cfgJSON)
var newCfg *Config
- err := strictUnmarshalJSON(strippedCfgJSON, &newCfg)
+ err := StrictUnmarshalJSON(strippedCfgJSON, &newCfg)
if err != nil {
return err
}
+ // prevent recursive config loads; that is a user error, and
+ // although frequent config loads should be safe, we cannot
+ // guarantee that in the presence of third party plugins, nor
+ // do we want this error to go unnoticed (we assume it was a
+ // pulled config if we're not allowed to persist it)
+ if !allowPersist &&
+ newCfg != nil &&
+ newCfg.Admin != nil &&
+ newCfg.Admin.Config != nil &&
+ newCfg.Admin.Config.LoadRaw != nil &&
+ newCfg.Admin.Config.LoadDelay <= 0 {
+ return fmt.Errorf("recursive config loading detected: pulled configs cannot pull other configs without positive load_delay")
+ }
+
// run the new config and start all its apps
- err = run(newCfg, true)
+ ctx, err := run(newCfg, true)
if err != nil {
return err
}
- // swap old config with the new one
- oldCfg := currentCfg
- currentCfg = newCfg
+ // swap old context (including its config) with the new one
+ currentCtxMu.Lock()
+ oldCtx := currentCtx
+ currentCtx = ctx
+ currentCtxMu.Unlock()
// Stop, Cleanup each old app
- unsyncedStop(oldCfg)
+ unsyncedStop(oldCtx)
// autosave a non-nil config, if not disabled
- if newCfg != nil &&
+ if allowPersist &&
+ newCfg != nil &&
(newCfg.Admin == nil ||
newCfg.Admin.Config == nil ||
newCfg.Admin.Config.Persist == nil ||
*newCfg.Admin.Config.Persist) {
dir := filepath.Dir(ConfigAutosavePath)
- err := os.MkdirAll(dir, 0700)
+ err := os.MkdirAll(dir, 0o700)
if err != nil {
Log().Error("unable to create folder for config autosave",
zap.String("dir", dir),
zap.Error(err))
} else {
- err := ioutil.WriteFile(ConfigAutosavePath, cfgJSON, 0600)
+ err := os.WriteFile(ConfigAutosavePath, cfgJSON, 0o600)
if err == nil {
- Log().Info("autosaved config", zap.String("file", ConfigAutosavePath))
+ Log().Info("autosaved config (load with --resume flag)", zap.String("file", ConfigAutosavePath))
} else {
Log().Error("unable to autosave config",
zap.String("file", ConfigAutosavePath),
@@ -299,7 +396,63 @@ func unsyncedDecodeAndRun(cfgJSON []byte) error {
// This is a low-level function; most callers
// will want to use Run instead, which also
// updates the config's raw state.
-func run(newCfg *Config, start bool) error {
+func run(newCfg *Config, start bool) (Context, error) {
+ ctx, err := provisionContext(newCfg, start)
+ if err != nil {
+ globalMetrics.configSuccess.Set(0)
+ return ctx, err
+ }
+
+ if !start {
+ return ctx, nil
+ }
+
+ // Provision any admin routers which may need to access
+ // some of the other apps at runtime
+ err = ctx.cfg.Admin.provisionAdminRouters(ctx)
+ if err != nil {
+ globalMetrics.configSuccess.Set(0)
+ return ctx, err
+ }
+
+ // Start
+ err = func() error {
+ started := make([]string, 0, len(ctx.cfg.apps))
+ for name, a := range ctx.cfg.apps {
+ err := a.Start()
+ if err != nil {
+ // an app failed to start, so we need to stop
+ // all other apps that were already started
+ for _, otherAppName := range started {
+ err2 := ctx.cfg.apps[otherAppName].Stop()
+ if err2 != nil {
+ err = fmt.Errorf("%v; additionally, aborting app %s: %v",
+ err, otherAppName, err2)
+ }
+ }
+ return fmt.Errorf("%s app module: start: %v", name, err)
+ }
+ started = append(started, name)
+ }
+ return nil
+ }()
+ if err != nil {
+ globalMetrics.configSuccess.Set(0)
+ return ctx, err
+ }
+ globalMetrics.configSuccess.Set(1)
+ globalMetrics.configSuccessTime.SetToCurrentTime()
+ // now that the user's config is running, finish setting up anything else,
+ // such as remote admin endpoint, config loader, etc.
+ return ctx, finishSettingUp(ctx, ctx.cfg)
+}
+
+// provisionContext creates a new context from the given configuration and provisions
+// storage and apps.
+// If `newCfg` is nil a new empty configuration will be created.
+// If `replaceAdminServer` is true any currently active admin server will be replaced
+// with a new admin server based on the provided configuration.
+func provisionContext(newCfg *Config, replaceAdminServer bool) (Context, error) {
// because we will need to roll back any state
// modifications if this function errors, we
// keep a single error value and scope all
@@ -309,21 +462,10 @@ func run(newCfg *Config, start bool) error {
// been set by a short assignment
var err error
- // start the admin endpoint (and stop any prior one)
- if start {
- err = replaceAdmin(newCfg)
- if err != nil {
- return fmt.Errorf("starting caddy administration endpoint: %v", err)
- }
- }
-
if newCfg == nil {
- return nil
+ newCfg = new(Config)
}
- // prepare the new config for use
- newCfg.apps = make(map[string]App)
-
// create a context within which to load
// modules - essentially our new config's
// execution environment; be sure that
@@ -333,6 +475,7 @@ func run(newCfg *Config, start bool) error {
ctx, cancel := NewContext(Context{Context: context.Background(), cfg: newCfg})
defer func() {
if err != nil {
+ globalMetrics.configSuccess.Set(0)
// if there were any errors during startup,
// we should cancel the new context we created
// since the associated config won't be used;
@@ -341,8 +484,8 @@ func run(newCfg *Config, start bool) error {
cancel()
// also undo any other state changes we made
- if currentCfg != nil {
- certmagic.Default.Storage = currentCfg.storage
+ if currentCtx.cfg != nil {
+ certmagic.Default.Storage = currentCtx.cfg.storage
}
}
}()
@@ -354,9 +497,23 @@ func run(newCfg *Config, start bool) error {
}
err = newCfg.Logging.openLogs(ctx)
if err != nil {
- return err
+ return ctx, err
}
+ // start the admin endpoint (and stop any prior one)
+ if replaceAdminServer {
+ err = replaceLocalAdminServer(newCfg, ctx)
+ if err != nil {
+ return ctx, fmt.Errorf("starting caddy administration endpoint: %v", err)
+ }
+ }
+
+ // create the new filesystem map
+ newCfg.filesystems = &filesystems.FilesystemMap{}
+
+ // prepare the new config for use
+ newCfg.apps = make(map[string]App)
+
// set up global storage and make it CertMagic's default storage, too
err = func() error {
if newCfg.StorageRaw != nil {
@@ -379,7 +536,7 @@ func run(newCfg *Config, start bool) error {
return nil
}()
if err != nil {
- return err
+ return ctx, err
}
// Load and Provision each app and their submodules
@@ -391,35 +548,111 @@ func run(newCfg *Config, start bool) error {
}
return nil
}()
+ return ctx, err
+}
+
+// ProvisionContext creates a new context from the configuration and provisions storage
+// and app modules.
+// The function is intended for testing and advanced use cases only, typically `Run` should be
+// use to ensure a fully functional caddy instance.
+// EXPERIMENTAL: While this is public the interface and implementation details of this function may change.
+func ProvisionContext(newCfg *Config) (Context, error) {
+ return provisionContext(newCfg, false)
+}
+
+// finishSettingUp should be run after all apps have successfully started.
+func finishSettingUp(ctx Context, cfg *Config) error {
+ // establish this server's identity (only after apps are loaded
+ // so that cert management of this endpoint doesn't prevent user's
+ // servers from starting which likely also use HTTP/HTTPS ports;
+ // but before remote management which may depend on these creds)
+ err := manageIdentity(ctx, cfg)
if err != nil {
- return err
+ return fmt.Errorf("provisioning remote admin endpoint: %v", err)
}
- if !start {
- return nil
+ // replace any remote admin endpoint
+ err = replaceRemoteAdminServer(ctx, cfg)
+ if err != nil {
+ return fmt.Errorf("provisioning remote admin endpoint: %v", err)
}
- // Start
- return func() error {
- var started []string
- for name, a := range newCfg.apps {
- err := a.Start()
- if err != nil {
- // an app failed to start, so we need to stop
- // all other apps that were already started
- for _, otherAppName := range started {
- err2 := newCfg.apps[otherAppName].Stop()
- if err2 != nil {
- err = fmt.Errorf("%v; additionally, aborting app %s: %v",
- err, otherAppName, err2)
- }
- }
- return fmt.Errorf("%s app module: start: %v", name, err)
- }
- started = append(started, name)
+ // if dynamic config is requested, set that up and run it
+ if cfg != nil && cfg.Admin != nil && cfg.Admin.Config != nil && cfg.Admin.Config.LoadRaw != nil {
+ val, err := ctx.LoadModule(cfg.Admin.Config, "LoadRaw")
+ if err != nil {
+ return fmt.Errorf("loading config loader module: %s", err)
}
- return nil
- }()
+
+ logger := Log().Named("config_loader").With(
+ zap.String("module", val.(Module).CaddyModule().ID.Name()),
+ zap.Int("load_delay", int(cfg.Admin.Config.LoadDelay)))
+
+ runLoadedConfig := func(config []byte) error {
+ logger.Info("applying dynamically-loaded config")
+ err := changeConfig(http.MethodPost, "/"+rawConfigKey, config, "", false)
+ if errors.Is(err, errSameConfig) {
+ return err
+ }
+ if err != nil {
+ logger.Error("failed to run dynamically-loaded config", zap.Error(err))
+ return err
+ }
+ logger.Info("successfully applied dynamically-loaded config")
+ return nil
+ }
+
+ if cfg.Admin.Config.LoadDelay > 0 {
+ go func() {
+ // the loop is here to iterate ONLY if there is an error, a no-op config load,
+ // or an unchanged config; in which case we simply wait the delay and try again
+ for {
+ timer := time.NewTimer(time.Duration(cfg.Admin.Config.LoadDelay))
+ select {
+ case <-timer.C:
+ loadedConfig, err := val.(ConfigLoader).LoadConfig(ctx)
+ if err != nil {
+ logger.Error("failed loading dynamic config; will retry", zap.Error(err))
+ continue
+ }
+ if loadedConfig == nil {
+ logger.Info("dynamically-loaded config was nil; will retry")
+ continue
+ }
+ err = runLoadedConfig(loadedConfig)
+ if errors.Is(err, errSameConfig) {
+ logger.Info("dynamically-loaded config was unchanged; will retry")
+ continue
+ }
+ case <-ctx.Done():
+ if !timer.Stop() {
+ <-timer.C
+ }
+ logger.Info("stopping dynamic config loading")
+ }
+ break
+ }
+ }()
+ } else {
+ // if no LoadDelay is provided, will load config synchronously
+ loadedConfig, err := val.(ConfigLoader).LoadConfig(ctx)
+ if err != nil {
+ return fmt.Errorf("loading dynamic config from %T: %v", val, err)
+ }
+ // do this in a goroutine so current config can finish being loaded; otherwise deadlock
+ go func() { _ = runLoadedConfig(loadedConfig) }()
+ }
+ }
+
+ return nil
+}
+
+// ConfigLoader is a type that can load a Caddy config. If
+// the return value is non-nil, it must be valid Caddy JSON;
+// if nil or with non-nil error, it is considered to be a
+// no-op load and may be retried later.
+type ConfigLoader interface {
+ LoadConfig(Context) ([]byte, error)
}
// Stop stops running the current configuration.
@@ -429,29 +662,42 @@ func run(newCfg *Config, start bool) error {
// stop the others. Stop should only be called
// if not replacing with a new config.
func Stop() error {
- currentCfgMu.Lock()
- defer currentCfgMu.Unlock()
- unsyncedStop(currentCfg)
- currentCfg = nil
+ currentCtxMu.RLock()
+ ctx := currentCtx
+ currentCtxMu.RUnlock()
+
+ rawCfgMu.Lock()
+ unsyncedStop(ctx)
+
+ currentCtxMu.Lock()
+ currentCtx = Context{}
+ currentCtxMu.Unlock()
+
rawCfgJSON = nil
rawCfgIndex = nil
rawCfg[rawConfigKey] = nil
+ rawCfgMu.Unlock()
+
return nil
}
-// unsyncedStop stops cfg from running, but has
-// no locking around cfg. It is a no-op if cfg is
-// nil. If any app returns an error when stopping,
+// unsyncedStop stops ctx from running, but has
+// no locking around ctx. It is a no-op if ctx has a
+// nil cfg. If any app returns an error when stopping,
// it is logged and the function continues stopping
// the next app. This function assumes all apps in
-// cfg were successfully started first.
-func unsyncedStop(cfg *Config) {
- if cfg == nil {
+// ctx were successfully started first.
+//
+// A lock on rawCfgMu is required, even though this
+// function does not access rawCfg, that lock
+// synchronizes the stop/start of apps.
+func unsyncedStop(ctx Context) {
+ if ctx.cfg == nil {
return
}
// stop each app
- for name, a := range cfg.apps {
+ for name, a := range ctx.cfg.apps {
err := a.Stop()
if err != nil {
log.Printf("[ERROR] stop %s: %v", name, err)
@@ -459,34 +705,131 @@ func unsyncedStop(cfg *Config) {
}
// clean up all modules
- cfg.cancelFunc()
-}
-
-// stopAndCleanup calls stop and cleans up anything
-// else that is expedient. This should only be used
-// when stopping and not replacing with a new config.
-func stopAndCleanup() error {
- if err := Stop(); err != nil {
- return err
- }
- certmagic.CleanUpOwnLocks()
- return nil
+ ctx.cfg.cancelFunc()
}
// Validate loads, provisions, and validates
// cfg, but does not start running it.
func Validate(cfg *Config) error {
- err := run(cfg, false)
+ _, err := run(cfg, false)
if err == nil {
cfg.cancelFunc() // call Cleanup on all modules
}
return err
}
+// exitProcess exits the process as gracefully as possible,
+// but it always exits, even if there are errors doing so.
+// It stops all apps, cleans up external locks, removes any
+// PID file, and shuts down admin endpoint(s) in a goroutine.
+// Errors are logged along the way, and an appropriate exit
+// code is emitted.
+func exitProcess(ctx context.Context, logger *zap.Logger) {
+ // let the rest of the program know we're quitting
+ atomic.StoreInt32(exiting, 1)
+
+ // give the OS or service/process manager our 2 weeks' notice: we quit
+ if err := notify.Stopping(); err != nil {
+ Log().Error("unable to notify service manager of stopping state", zap.Error(err))
+ }
+
+ if logger == nil {
+ logger = Log()
+ }
+ logger.Warn("exiting; byeee!! 👋")
+
+ exitCode := ExitCodeSuccess
+ lastContext := ActiveContext()
+
+ // stop all apps
+ if err := Stop(); err != nil {
+ logger.Error("failed to stop apps", zap.Error(err))
+ exitCode = ExitCodeFailedQuit
+ }
+
+ // clean up certmagic locks
+ certmagic.CleanUpOwnLocks(ctx, logger)
+
+ // remove pidfile
+ if pidfile != "" {
+ err := os.Remove(pidfile)
+ if err != nil {
+ logger.Error("cleaning up PID file:",
+ zap.String("pidfile", pidfile),
+ zap.Error(err))
+ exitCode = ExitCodeFailedQuit
+ }
+ }
+
+ // execute any process-exit callbacks
+ for _, exitFunc := range lastContext.exitFuncs {
+ exitFunc(ctx)
+ }
+ exitFuncsMu.Lock()
+ for _, exitFunc := range exitFuncs {
+ exitFunc(ctx)
+ }
+ exitFuncsMu.Unlock()
+
+ // shut down admin endpoint(s) in goroutines so that
+ // if this function was called from an admin handler,
+ // it has a chance to return gracefully
+ // use goroutine so that we can finish responding to API request
+ go func() {
+ defer func() {
+ logger = logger.With(zap.Int("exit_code", exitCode))
+ if exitCode == ExitCodeSuccess {
+ logger.Info("shutdown complete")
+ } else {
+ logger.Error("unclean shutdown")
+ }
+ os.Exit(exitCode)
+ }()
+
+ if remoteAdminServer != nil {
+ err := stopAdminServer(remoteAdminServer)
+ if err != nil {
+ exitCode = ExitCodeFailedQuit
+ logger.Error("failed to stop remote admin server gracefully", zap.Error(err))
+ }
+ }
+ if localAdminServer != nil {
+ err := stopAdminServer(localAdminServer)
+ if err != nil {
+ exitCode = ExitCodeFailedQuit
+ logger.Error("failed to stop local admin server gracefully", zap.Error(err))
+ }
+ }
+ }()
+}
+
+var exiting = new(int32) // accessed atomically
+
+// Exiting returns true if the process is exiting.
+// EXPERIMENTAL API: subject to change or removal.
+func Exiting() bool { return atomic.LoadInt32(exiting) == 1 }
+
+// OnExit registers a callback to invoke during process exit.
+// This registration is PROCESS-GLOBAL, meaning that each
+// function should only be registered once forever, NOT once
+// per config load (etc).
+//
+// EXPERIMENTAL API: subject to change or removal.
+func OnExit(f func(context.Context)) {
+ exitFuncsMu.Lock()
+ exitFuncs = append(exitFuncs, f)
+ exitFuncsMu.Unlock()
+}
+
+var (
+ exitFuncs []func(context.Context)
+ exitFuncsMu sync.Mutex
+)
+
// Duration can be an integer or a string. An integer is
// interpreted as nanoseconds. If a string, it is a Go
// time.Duration value such as `300ms`, `1.5h`, or `2h45m`;
-// valid units are `ns`, `us`/`µs`, `ms`, `s`, `m`, and `h`.
+// valid units are `ns`, `us`/`µs`, `ms`, `s`, `m`, `h`, and `d`.
type Duration time.Duration
// UnmarshalJSON satisfies json.Unmarshaler.
@@ -497,7 +840,7 @@ func (d *Duration) UnmarshalJSON(b []byte) error {
var dur time.Duration
var err error
if b[0] == byte('"') && b[len(b)-1] == byte('"') {
- dur, err = time.ParseDuration(strings.Trim(string(b), `"`))
+ dur, err = ParseDuration(strings.Trim(string(b), `"`))
} else {
err = json.Unmarshal(b, &dur)
}
@@ -505,36 +848,201 @@ func (d *Duration) UnmarshalJSON(b []byte) error {
return err
}
-// GoModule returns the build info of this Caddy
-// build from debug.BuildInfo (requires Go modules).
-// If no version information is available, a non-nil
-// value will still be returned, but with an
-// unknown version.
-func GoModule() *debug.Module {
- var mod debug.Module
- return goModule(&mod)
+// ParseDuration parses a duration string, adding
+// support for the "d" unit meaning number of days,
+// where a day is assumed to be 24h. The maximum
+// input string length is 1024.
+func ParseDuration(s string) (time.Duration, error) {
+ if len(s) > 1024 {
+ return 0, fmt.Errorf("parsing duration: input string too long")
+ }
+ var inNumber bool
+ var numStart int
+ for i := 0; i < len(s); i++ {
+ ch := s[i]
+ if ch == 'd' {
+ daysStr := s[numStart:i]
+ days, err := strconv.ParseFloat(daysStr, 64)
+ if err != nil {
+ return 0, err
+ }
+ hours := days * 24.0
+ hoursStr := strconv.FormatFloat(hours, 'f', -1, 64)
+ s = s[:numStart] + hoursStr + "h" + s[i+1:]
+ i--
+ continue
+ }
+ if !inNumber {
+ numStart = i
+ }
+ inNumber = (ch >= '0' && ch <= '9') || ch == '.' || ch == '-' || ch == '+'
+ }
+ return time.ParseDuration(s)
}
-// goModule holds the actual implementation of GoModule.
-// Allocating debug.Module in GoModule() and passing a
-// reference to goModule enables mid-stack inlining.
-func goModule(mod *debug.Module) *debug.Module {
- mod.Version = "unknown"
+// InstanceID returns the UUID for this instance, and generates one if it
+// does not already exist. The UUID is stored in the local data directory,
+// regardless of storage configuration, since each instance is intended to
+// have its own unique ID.
+func InstanceID() (uuid.UUID, error) {
+ appDataDir := AppDataDir()
+ uuidFilePath := filepath.Join(appDataDir, "instance.uuid")
+ uuidFileBytes, err := os.ReadFile(uuidFilePath)
+ if errors.Is(err, fs.ErrNotExist) {
+ uuid, err := uuid.NewRandom()
+ if err != nil {
+ return uuid, err
+ }
+ err = os.MkdirAll(appDataDir, 0o700)
+ if err != nil {
+ return uuid, err
+ }
+ err = os.WriteFile(uuidFilePath, []byte(uuid.String()), 0o600)
+ return uuid, err
+ } else if err != nil {
+ return [16]byte{}, err
+ }
+ return uuid.ParseBytes(uuidFileBytes)
+}
+
+// CustomVersion is an optional string that overrides Caddy's
+// reported version. It can be helpful when downstream packagers
+// need to manually set Caddy's version. If no other version
+// information is available, the short form version (see
+// Version()) will be set to CustomVersion, and the full version
+// will include CustomVersion at the beginning.
+//
+// Set this variable during `go build` with `-ldflags`:
+//
+// -ldflags '-X github.com/caddyserver/caddy/v2.CustomVersion=v2.6.2'
+//
+// for example.
+var CustomVersion string
+
+// Version returns the Caddy version in a simple/short form, and
+// a full version string. The short form will not have spaces and
+// is intended for User-Agent strings and similar, but may be
+// omitting valuable information. Note that Caddy must be compiled
+// in a special way to properly embed complete version information.
+// First this function tries to get the version from the embedded
+// build info provided by go.mod dependencies; then it tries to
+// get info from embedded VCS information, which requires having
+// built Caddy from a git repository. If no version is available,
+// this function returns "(devel)" because Go uses that, but for
+// the simple form we change it to "unknown". If still no version
+// is available (e.g. no VCS repo), then it will use CustomVersion;
+// CustomVersion is always prepended to the full version string.
+//
+// See relevant Go issues: https://github.com/golang/go/issues/29228
+// and https://github.com/golang/go/issues/50603.
+//
+// This function is experimental and subject to change or removal.
+func Version() (simple, full string) {
+ // the currently-recommended way to build Caddy involves
+ // building it as a dependency so we can extract version
+ // information from go.mod tooling; once the upstream
+ // Go issues are fixed, we should just be able to use
+ // bi.Main... hopefully.
+ var module *debug.Module
bi, ok := debug.ReadBuildInfo()
- if ok {
- mod.Path = bi.Main.Path
- // The recommended way to build Caddy involves
- // creating a separate main module, which
- // TODO: track related Go issue: https://github.com/golang/go/issues/29228
- // once that issue is fixed, we should just be able to use bi.Main... hopefully.
- for _, dep := range bi.Deps {
- if dep.Path == ImportPath {
- return dep
+ if !ok {
+ if CustomVersion != "" {
+ full = CustomVersion
+ simple = CustomVersion
+ return
+ }
+ full = "unknown"
+ simple = "unknown"
+ return
+ }
+ // find the Caddy module in the dependency list
+ for _, dep := range bi.Deps {
+ if dep.Path == ImportPath {
+ module = dep
+ break
+ }
+ }
+ if module != nil {
+ simple, full = module.Version, module.Version
+ if module.Sum != "" {
+ full += " " + module.Sum
+ }
+ if module.Replace != nil {
+ full += " => " + module.Replace.Path
+ if module.Replace.Version != "" {
+ simple = module.Replace.Version + "_custom"
+ full += "@" + module.Replace.Version
+ }
+ if module.Replace.Sum != "" {
+ full += " " + module.Replace.Sum
}
}
- return &bi.Main
}
- return mod
+
+ if full == "" {
+ var vcsRevision string
+ var vcsTime time.Time
+ var vcsModified bool
+ for _, setting := range bi.Settings {
+ switch setting.Key {
+ case "vcs.revision":
+ vcsRevision = setting.Value
+ case "vcs.time":
+ vcsTime, _ = time.Parse(time.RFC3339, setting.Value)
+ case "vcs.modified":
+ vcsModified, _ = strconv.ParseBool(setting.Value)
+ }
+ }
+
+ if vcsRevision != "" {
+ var modified string
+ if vcsModified {
+ modified = "+modified"
+ }
+ full = fmt.Sprintf("%s%s (%s)", vcsRevision, modified, vcsTime.Format(time.RFC822))
+ simple = vcsRevision
+
+ // use short checksum for simple, if hex-only
+ if _, err := hex.DecodeString(simple); err == nil {
+ simple = simple[:8]
+ }
+
+ // append date to simple since it can be convenient
+ // to know the commit date as part of the version
+ if !vcsTime.IsZero() {
+ simple += "-" + vcsTime.Format("20060102")
+ }
+ }
+ }
+
+ if full == "" {
+ if CustomVersion != "" {
+ full = CustomVersion
+ } else {
+ full = "unknown"
+ }
+ } else if CustomVersion != "" {
+ full = CustomVersion + " " + full
+ }
+
+ if simple == "" || simple == "(devel)" {
+ if CustomVersion != "" {
+ simple = CustomVersion
+ } else {
+ simple = "unknown"
+ }
+ }
+
+ return
+}
+
+// ActiveContext returns the currently-active context.
+// This function is experimental and might be changed
+// or removed in the future.
+func ActiveContext() Context {
+ currentCtxMu.RLock()
+ defer currentCtxMu.RUnlock()
+ return currentCtx
}
// CtxKey is a value type for use with context.WithValue.
@@ -542,18 +1050,19 @@ type CtxKey string
// This group of variables pertains to the current configuration.
var (
- // currentCfgMu protects everything in this var block.
- currentCfgMu sync.RWMutex
-
- // currentCfg is the currently-running configuration.
- currentCfg *Config
+ // currentCtx is the root context for the currently-running
+ // configuration, which can be accessed through this value.
+ // If the Config contained in this value is not nil, then
+ // a config is currently active/running.
+ currentCtx Context
+ currentCtxMu sync.RWMutex
// rawCfg is the current, generic-decoded configuration;
// we initialize it as a map with one field ("config")
// to maintain parity with the API endpoint and to avoid
// the special case of having to access/mutate the variable
// directly without traversing into it.
- rawCfg = map[string]interface{}{
+ rawCfg = map[string]any{
rawConfigKey: nil,
}
@@ -564,7 +1073,17 @@ var (
// rawCfgIndex is the map of user-assigned ID to expanded
// path, for converting /id/ paths to /config/ paths.
rawCfgIndex map[string]string
+
+ // rawCfgMu protects all the rawCfg fields and also
+ // essentially synchronizes config changes/reloads.
+ rawCfgMu sync.RWMutex
)
+// errSameConfig is returned if the new config is the same
+// as the old one. This isn't usually an actual, actionable
+// error; it's mostly a sentinel value.
+var errSameConfig = errors.New("config is unchanged")
+
// ImportPath is the package import path for Caddy core.
+// This identifier may be removed in the future.
const ImportPath = "github.com/caddyserver/caddy/v2"
diff --git a/caddy_test.go b/caddy_test.go
new file mode 100644
index 00000000..adf14350
--- /dev/null
+++ b/caddy_test.go
@@ -0,0 +1,74 @@
+// Copyright 2015 Matthew Holt and The Caddy Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package caddy
+
+import (
+ "testing"
+ "time"
+)
+
+func TestParseDuration(t *testing.T) {
+ const day = 24 * time.Hour
+ for i, tc := range []struct {
+ input string
+ expect time.Duration
+ }{
+ {
+ input: "3h",
+ expect: 3 * time.Hour,
+ },
+ {
+ input: "1d",
+ expect: day,
+ },
+ {
+ input: "1d30m",
+ expect: day + 30*time.Minute,
+ },
+ {
+ input: "1m2d",
+ expect: time.Minute + day*2,
+ },
+ {
+ input: "1m2d30s",
+ expect: time.Minute + day*2 + 30*time.Second,
+ },
+ {
+ input: "1d2d",
+ expect: 3 * day,
+ },
+ {
+ input: "1.5d",
+ expect: time.Duration(1.5 * float64(day)),
+ },
+ {
+ input: "4m1.25d",
+ expect: 4*time.Minute + time.Duration(1.25*float64(day)),
+ },
+ {
+ input: "-1.25d12h",
+ expect: time.Duration(-1.25*float64(day)) - 12*time.Hour,
+ },
+ } {
+ actual, err := ParseDuration(tc.input)
+ if err != nil {
+ t.Errorf("Test %d ('%s'): Got error: %v", i, tc.input, err)
+ continue
+ }
+ if actual != tc.expect {
+ t.Errorf("Test %d ('%s'): Expected=%s Actual=%s", i, tc.input, tc.expect, actual)
+ }
+ }
+}
diff --git a/caddyconfig/caddyfile/adapter.go b/caddyconfig/caddyfile/adapter.go
index 5b4495eb..da4f9833 100644
--- a/caddyconfig/caddyfile/adapter.go
+++ b/caddyconfig/caddyfile/adapter.go
@@ -15,6 +15,7 @@
package caddyfile
import (
+ "bytes"
"encoding/json"
"fmt"
@@ -28,12 +29,12 @@ type Adapter struct {
}
// Adapt converts the Caddyfile config in body to Caddy JSON.
-func (a Adapter) Adapt(body []byte, options map[string]interface{}) ([]byte, []caddyconfig.Warning, error) {
+func (a Adapter) Adapt(body []byte, options map[string]any) ([]byte, []caddyconfig.Warning, error) {
if a.ServerType == nil {
return nil, nil, fmt.Errorf("no server type")
}
if options == nil {
- options = make(map[string]interface{})
+ options = make(map[string]any)
}
filename, _ := options["filename"].(string)
@@ -51,40 +52,93 @@ func (a Adapter) Adapt(body []byte, options map[string]interface{}) ([]byte, []c
return nil, warnings, err
}
- marshalFunc := json.Marshal
- if options["pretty"] == "true" {
- marshalFunc = caddyconfig.JSONIndent
+ // lint check: see if input was properly formatted; sometimes messy files parse
+ // successfully but result in logical errors (the Caddyfile is a bad format, I'm sorry)
+ if warning, different := FormattingDifference(filename, body); different {
+ warnings = append(warnings, warning)
}
- result, err := marshalFunc(cfg)
+
+ result, err := json.Marshal(cfg)
return result, warnings, err
}
-// Unmarshaler is a type that can unmarshal
-// Caddyfile tokens to set itself up for a
-// JSON encoding. The goal of an unmarshaler
-// is not to set itself up for actual use,
-// but to set itself up for being marshaled
-// into JSON. Caddyfile-unmarshaled values
-// will not be used directly; they will be
-// encoded as JSON and then used from that.
-// Implementations must be able to support
-// multiple segments (instances of their
-// directive or batch of tokens); typically
-// this means wrapping all token logic in
-// a loop: `for d.Next() { ... }`.
+// FormattingDifference returns a warning and true if the formatted version
+// is any different from the input; empty warning and false otherwise.
+// TODO: also perform this check on imported files
+func FormattingDifference(filename string, body []byte) (caddyconfig.Warning, bool) {
+ // replace windows-style newlines to normalize comparison
+ normalizedBody := bytes.Replace(body, []byte("\r\n"), []byte("\n"), -1)
+
+ formatted := Format(normalizedBody)
+ if bytes.Equal(formatted, normalizedBody) {
+ return caddyconfig.Warning{}, false
+ }
+
+ // find where the difference is
+ line := 1
+ for i, ch := range normalizedBody {
+ if i >= len(formatted) || ch != formatted[i] {
+ break
+ }
+ if ch == '\n' {
+ line++
+ }
+ }
+ return caddyconfig.Warning{
+ File: filename,
+ Line: line,
+ Message: "Caddyfile input is not formatted; run 'caddy fmt --overwrite' to fix inconsistencies",
+ }, true
+}
+
+// Unmarshaler is a type that can unmarshal Caddyfile tokens to
+// set itself up for a JSON encoding. The goal of an unmarshaler
+// is not to set itself up for actual use, but to set itself up for
+// being marshaled into JSON. Caddyfile-unmarshaled values will not
+// be used directly; they will be encoded as JSON and then used from
+// that. Implementations _may_ be able to support multiple segments
+// (instances of their directive or batch of tokens); typically this
+// means wrapping parsing logic in a loop: `for d.Next() { ... }`.
+// More commonly, only a single segment is supported, so a simple
+// `d.Next()` at the start should be used to consume the module
+// identifier token (directive name, etc).
type Unmarshaler interface {
UnmarshalCaddyfile(d *Dispenser) error
}
// ServerType is a type that can evaluate a Caddyfile and set up a caddy config.
type ServerType interface {
- // Setup takes the server blocks which
- // contain tokens, as well as options
- // (e.g. CLI flags) and creates a Caddy
- // config, along with any warnings or
- // an error.
- Setup([]ServerBlock, map[string]interface{}) (*caddy.Config, []caddyconfig.Warning, error)
+ // Setup takes the server blocks which contain tokens,
+ // as well as options (e.g. CLI flags) and creates a
+ // Caddy config, along with any warnings or an error.
+ Setup([]ServerBlock, map[string]any) (*caddy.Config, []caddyconfig.Warning, error)
+}
+
+// UnmarshalModule instantiates a module with the given ID and invokes
+// UnmarshalCaddyfile on the new value using the immediate next segment
+// of d as input. In other words, d's next token should be the first
+// token of the module's Caddyfile input.
+//
+// This function is used when the next segment of Caddyfile tokens
+// belongs to another Caddy module. The returned value is often
+// type-asserted to the module's associated type for practical use
+// when setting up a config.
+func UnmarshalModule(d *Dispenser, moduleID string) (Unmarshaler, error) {
+ mod, err := caddy.GetModule(moduleID)
+ if err != nil {
+ return nil, d.Errf("getting module named '%s': %v", moduleID, err)
+ }
+ inst := mod.New()
+ unm, ok := inst.(Unmarshaler)
+ if !ok {
+ return nil, d.Errf("module %s is not a Caddyfile unmarshaler; is %T", mod.ID, inst)
+ }
+ err = unm.UnmarshalCaddyfile(d.NewFromNextSegment())
+ if err != nil {
+ return nil, err
+ }
+ return unm, nil
}
// Interface guard
diff --git a/caddyconfig/caddyfile/dispenser.go b/caddyconfig/caddyfile/dispenser.go
old mode 100755
new mode 100644
index 932ab613..325bb54d
--- a/caddyconfig/caddyfile/dispenser.go
+++ b/caddyconfig/caddyfile/dispenser.go
@@ -17,6 +17,9 @@ package caddyfile
import (
"errors"
"fmt"
+ "io"
+ "log"
+ "strconv"
"strings"
)
@@ -27,6 +30,10 @@ type Dispenser struct {
tokens []Token
cursor int
nesting int
+
+ // A map of arbitrary context data that can be used
+ // to pass through some information to unmarshalers.
+ context map[string]any
}
// NewDispenser returns a Dispenser filled with the given tokens.
@@ -37,6 +44,16 @@ func NewDispenser(tokens []Token) *Dispenser {
}
}
+// NewTestDispenser parses input into tokens and creates a new
+// Dispenser for test purposes only; any errors are fatal.
+func NewTestDispenser(input string) *Dispenser {
+ tokens, err := allTokens("Testfile", []byte(input))
+ if err != nil && err != io.EOF {
+ log.Fatalf("getting all tokens from input: %v", err)
+ }
+ return NewDispenser(tokens)
+}
+
// Next loads the next token. Returns true if a token
// was loaded; false otherwise. If false, all tokens
// have been consumed.
@@ -88,12 +105,12 @@ func (d *Dispenser) nextOnSameLine() bool {
d.cursor++
return true
}
- if d.cursor >= len(d.tokens) {
+ if d.cursor >= len(d.tokens)-1 {
return false
}
- if d.cursor < len(d.tokens)-1 &&
- d.tokens[d.cursor].File == d.tokens[d.cursor+1].File &&
- d.tokens[d.cursor].Line+d.numLineBreaks(d.cursor) == d.tokens[d.cursor+1].Line {
+ curr := d.tokens[d.cursor]
+ next := d.tokens[d.cursor+1]
+ if !isNextOnNewLine(curr, next) {
d.cursor++
return true
}
@@ -109,12 +126,12 @@ func (d *Dispenser) NextLine() bool {
d.cursor++
return true
}
- if d.cursor >= len(d.tokens) {
+ if d.cursor >= len(d.tokens)-1 {
return false
}
- if d.cursor < len(d.tokens)-1 &&
- (d.tokens[d.cursor].File != d.tokens[d.cursor+1].File ||
- d.tokens[d.cursor].Line+d.numLineBreaks(d.cursor) < d.tokens[d.cursor+1].Line) {
+ curr := d.tokens[d.cursor]
+ next := d.tokens[d.cursor+1]
+ if isNextOnNewLine(curr, next) {
d.cursor++
return true
}
@@ -133,15 +150,15 @@ func (d *Dispenser) NextLine() bool {
//
// Proper use of this method looks like this:
//
-// for nesting := d.Nesting(); d.NextBlock(nesting); {
-// }
+// for nesting := d.Nesting(); d.NextBlock(nesting); {
+// }
//
// However, in simple cases where it is known that the
// Dispenser is new and has not already traversed state
// by a loop over NextBlock(), this will do:
//
-// for d.NextBlock(0) {
-// }
+// for d.NextBlock(0) {
+// }
//
// As with other token parsing logic, a loop over
// NextBlock() should be contained within a loop over
@@ -189,6 +206,46 @@ func (d *Dispenser) Val() string {
return d.tokens[d.cursor].Text
}
+// ValRaw gets the raw text of the current token (including quotes).
+// If the token was a heredoc, then the delimiter is not included,
+// because that is not relevant to any unmarshaling logic at this time.
+// If there is no token loaded, it returns empty string.
+func (d *Dispenser) ValRaw() string {
+ if d.cursor < 0 || d.cursor >= len(d.tokens) {
+ return ""
+ }
+ quote := d.tokens[d.cursor].wasQuoted
+ if quote > 0 && quote != '<' {
+ // string literal
+ return string(quote) + d.tokens[d.cursor].Text + string(quote)
+ }
+ return d.tokens[d.cursor].Text
+}
+
+// ScalarVal gets value of the current token, converted to the closest
+// scalar type. If there is no token loaded, it returns nil.
+func (d *Dispenser) ScalarVal() any {
+ if d.cursor < 0 || d.cursor >= len(d.tokens) {
+ return nil
+ }
+ quote := d.tokens[d.cursor].wasQuoted
+ text := d.tokens[d.cursor].Text
+
+ if quote > 0 {
+ return text // string literal
+ }
+ if num, err := strconv.Atoi(text); err == nil {
+ return num
+ }
+ if num, err := strconv.ParseFloat(text, 64); err == nil {
+ return num
+ }
+ if bool, err := strconv.ParseBool(text); err == nil {
+ return bool
+ }
+ return text
+}
+
// Line gets the line number of the current token.
// If there is no token loaded, it returns 0.
func (d *Dispenser) Line() int {
@@ -237,6 +294,19 @@ func (d *Dispenser) AllArgs(targets ...*string) bool {
return true
}
+// CountRemainingArgs counts the amount of remaining arguments
+// (tokens on the same line) without consuming the tokens.
+func (d *Dispenser) CountRemainingArgs() int {
+ count := 0
+ for d.NextArg() {
+ count++
+ }
+ for i := 0; i < count; i++ {
+ d.Prev()
+ }
+ return count
+}
+
// RemainingArgs loads any more arguments (tokens on the same line)
// into a slice and returns them. Open curly brace tokens also indicate
// the end of arguments, and the curly brace is not included in
@@ -249,6 +319,18 @@ func (d *Dispenser) RemainingArgs() []string {
return args
}
+// RemainingArgsRaw loads any more arguments (tokens on the same line,
+// retaining quotes) into a slice and returns them. Open curly brace
+// tokens also indicate the end of arguments, and the curly brace is
+// not included in the return value nor is it loaded.
+func (d *Dispenser) RemainingArgsRaw() []string {
+ var args []string
+ for d.NextArg() {
+ args = append(args, d.ValRaw())
+ }
+ return args
+}
+
// NewFromNextSegment returns a new dispenser with a copy of
// the tokens from the current token until the end of the
// "directive" whether that be to the end of the line or
@@ -313,33 +395,40 @@ func (d *Dispenser) Reset() {
// an argument.
func (d *Dispenser) ArgErr() error {
if d.Val() == "{" {
- return d.Err("Unexpected token '{', expecting argument")
+ return d.Err("unexpected token '{', expecting argument")
}
- return d.Errf("Wrong argument count or unexpected line ending after '%s'", d.Val())
+ return d.Errf("wrong argument count or unexpected line ending after '%s'", d.Val())
}
// SyntaxErr creates a generic syntax error which explains what was
// found and what was expected.
func (d *Dispenser) SyntaxErr(expected string) error {
- msg := fmt.Sprintf("%s:%d - Syntax error: Unexpected token '%s', expecting '%s'", d.File(), d.Line(), d.Val(), expected)
+ msg := fmt.Sprintf("syntax error: unexpected token '%s', expecting '%s', at %s:%d import chain: ['%s']", d.Val(), expected, d.File(), d.Line(), strings.Join(d.Token().imports, "','"))
return errors.New(msg)
}
// EOFErr returns an error indicating that the dispenser reached
// the end of the input when searching for the next token.
func (d *Dispenser) EOFErr() error {
- return d.Errf("Unexpected EOF")
+ return d.Errf("unexpected EOF")
}
// Err generates a custom parse-time error with a message of msg.
func (d *Dispenser) Err(msg string) error {
- msg = fmt.Sprintf("%s:%d - Error during parsing: %s", d.File(), d.Line(), msg)
- return errors.New(msg)
+ return d.WrapErr(errors.New(msg))
}
// Errf is like Err, but for formatted error messages
-func (d *Dispenser) Errf(format string, args ...interface{}) error {
- return d.Err(fmt.Sprintf(format, args...))
+func (d *Dispenser) Errf(format string, args ...any) error {
+ return d.WrapErr(fmt.Errorf(format, args...))
+}
+
+// WrapErr takes an existing error and adds the Caddyfile file and line number.
+func (d *Dispenser) WrapErr(err error) error {
+ if len(d.Token().imports) > 0 {
+ return fmt.Errorf("%w, at %s:%d import chain ['%s']", err, d.File(), d.Line(), strings.Join(d.Token().imports, "','"))
+ }
+ return fmt.Errorf("%w, at %s:%d", err, d.File(), d.Line())
}
// Delete deletes the current token and returns the updated slice
@@ -359,14 +448,42 @@ func (d *Dispenser) Delete() []Token {
return d.tokens
}
-// numLineBreaks counts how many line breaks are in the token
-// value given by the token index tknIdx. It returns 0 if the
-// token does not exist or there are no line breaks.
-func (d *Dispenser) numLineBreaks(tknIdx int) int {
- if tknIdx < 0 || tknIdx >= len(d.tokens) {
- return 0
+// DeleteN is the same as Delete, but can delete many tokens at once.
+// If there aren't N tokens available to delete, none are deleted.
+func (d *Dispenser) DeleteN(amount int) []Token {
+ if amount > 0 && d.cursor >= (amount-1) && d.cursor <= len(d.tokens)-1 {
+ d.tokens = append(d.tokens[:d.cursor-(amount-1)], d.tokens[d.cursor+1:]...)
+ d.cursor -= amount
}
- return strings.Count(d.tokens[tknIdx].Text, "\n")
+ return d.tokens
+}
+
+// SetContext sets a key-value pair in the context map.
+func (d *Dispenser) SetContext(key string, value any) {
+ if d.context == nil {
+ d.context = make(map[string]any)
+ }
+ d.context[key] = value
+}
+
+// GetContext gets the value of a key in the context map.
+func (d *Dispenser) GetContext(key string) any {
+ if d.context == nil {
+ return nil
+ }
+ return d.context[key]
+}
+
+// GetContextString gets the value of a key in the context map
+// as a string, or an empty string if the key does not exist.
+func (d *Dispenser) GetContextString(key string) string {
+ if d.context == nil {
+ return ""
+ }
+ if val, ok := d.context[key].(string); ok {
+ return val
+ }
+ return ""
}
// isNewLine determines whether the current token is on a different
@@ -379,6 +496,26 @@ func (d *Dispenser) isNewLine() bool {
if d.cursor > len(d.tokens)-1 {
return false
}
- return d.tokens[d.cursor-1].File != d.tokens[d.cursor].File ||
- d.tokens[d.cursor-1].Line+d.numLineBreaks(d.cursor-1) < d.tokens[d.cursor].Line
+
+ prev := d.tokens[d.cursor-1]
+ curr := d.tokens[d.cursor]
+ return isNextOnNewLine(prev, curr)
}
+
+// isNextOnNewLine determines whether the current token is on a different
+// line (higher line number) than the next token. It handles imported
+// tokens correctly. If there isn't a next token, it returns true.
+func (d *Dispenser) isNextOnNewLine() bool {
+ if d.cursor < 0 {
+ return false
+ }
+ if d.cursor >= len(d.tokens)-1 {
+ return true
+ }
+
+ curr := d.tokens[d.cursor]
+ next := d.tokens[d.cursor+1]
+ return isNextOnNewLine(curr, next)
+}
+
+const MatcherNameCtxKey = "matcher_name"
diff --git a/caddyconfig/caddyfile/dispenser_test.go b/caddyconfig/caddyfile/dispenser_test.go
old mode 100755
new mode 100644
index 4970f9d3..0f6ee504
--- a/caddyconfig/caddyfile/dispenser_test.go
+++ b/caddyconfig/caddyfile/dispenser_test.go
@@ -15,8 +15,7 @@
package caddyfile
import (
- "io"
- "log"
+ "errors"
"reflect"
"strings"
"testing"
@@ -305,14 +304,10 @@ func TestDispenser_ArgErr_Err(t *testing.T) {
if !strings.Contains(err.Error(), "foobar") {
t.Errorf("Expected error message with custom message in it ('foobar'); got '%v'", err)
}
-}
-// NewTestDispenser parses input into tokens and creates a new
-// Disenser for test purposes only; any errors are fatal.
-func NewTestDispenser(input string) *Dispenser {
- tokens, err := allTokens("Testfile", []byte(input))
- if err != nil && err != io.EOF {
- log.Fatalf("getting all tokens from input: %v", err)
+ ErrBarIsFull := errors.New("bar is full")
+ bookingError := d.Errf("unable to reserve: %w", ErrBarIsFull)
+ if !errors.Is(bookingError, ErrBarIsFull) {
+ t.Errorf("Errf(): should be able to unwrap the error chain")
}
- return NewDispenser(tokens)
}
diff --git a/caddyconfig/caddyfile/formatter.go b/caddyconfig/caddyfile/formatter.go
index 2c97f3b0..d35f0ac6 100644
--- a/caddyconfig/caddyfile/formatter.go
+++ b/caddyconfig/caddyfile/formatter.go
@@ -17,6 +17,7 @@ package caddyfile
import (
"bytes"
"io"
+ "slices"
"unicode"
)
@@ -31,6 +32,14 @@ func Format(input []byte) []byte {
out := new(bytes.Buffer)
rdr := bytes.NewReader(input)
+ type heredocState int
+
+ const (
+ heredocClosed heredocState = 0
+ heredocOpening heredocState = 1
+ heredocOpened heredocState = 2
+ )
+
var (
last rune // the last character that was written to the result
@@ -47,6 +56,11 @@ func Format(input []byte) []byte {
quoted bool // whether we're in a quoted segment
escaped bool // whether current char is escaped
+ heredoc heredocState // whether we're in a heredoc
+ heredocEscaped bool // whether heredoc is escaped
+ heredocMarker []rune
+ heredocClosingMarker []rune
+
nesting int // indentation level
)
@@ -75,9 +89,68 @@ func Format(input []byte) []byte {
panic(err)
}
+ // detect whether we have the start of a heredoc
+ if !quoted && !(heredoc != heredocClosed || heredocEscaped) &&
+ space && last == '<' && ch == '<' {
+ write(ch)
+ heredoc = heredocOpening
+ space = false
+ continue
+ }
+
+ if heredoc == heredocOpening {
+ if ch == '\n' {
+ if len(heredocMarker) > 0 && heredocMarkerRegexp.MatchString(string(heredocMarker)) {
+ heredoc = heredocOpened
+ } else {
+ heredocMarker = nil
+ heredoc = heredocClosed
+ nextLine()
+ continue
+ }
+ write(ch)
+ continue
+ }
+ if unicode.IsSpace(ch) {
+ // a space means it's just a regular token and not a heredoc
+ heredocMarker = nil
+ heredoc = heredocClosed
+ } else {
+ heredocMarker = append(heredocMarker, ch)
+ write(ch)
+ continue
+ }
+ }
+ // if we're in a heredoc, all characters are read&write as-is
+ if heredoc == heredocOpened {
+ heredocClosingMarker = append(heredocClosingMarker, ch)
+ if len(heredocClosingMarker) > len(heredocMarker)+1 { // We assert that the heredocClosingMarker is followed by a unicode.Space
+ heredocClosingMarker = heredocClosingMarker[1:]
+ }
+ // check if we're done
+ if unicode.IsSpace(ch) && slices.Equal(heredocClosingMarker[:len(heredocClosingMarker)-1], heredocMarker) {
+ heredocMarker = nil
+ heredocClosingMarker = nil
+ heredoc = heredocClosed
+ } else {
+ write(ch)
+ if ch == '\n' {
+ heredocClosingMarker = heredocClosingMarker[:0]
+ }
+ continue
+ }
+ }
+
+ if last == '<' && space {
+ space = false
+ }
+
if comment {
if ch == '\n' {
comment = false
+ space = true
+ nextLine()
+ continue
} else {
write(ch)
continue
@@ -95,6 +168,9 @@ func Format(input []byte) []byte {
}
if escaped {
+ if ch == '<' {
+ heredocEscaped = true
+ }
write(ch)
escaped = false
continue
@@ -114,6 +190,7 @@ func Format(input []byte) []byte {
if unicode.IsSpace(ch) {
space = true
+ heredocEscaped = false
if ch == '\n' {
newLines++
}
@@ -131,9 +208,6 @@ func Format(input []byte) []byte {
//////////////////////////////////////////////////////////
if ch == '#' {
- if !spacePrior && !beginningOfLine {
- write(' ')
- }
comment = true
}
@@ -153,7 +227,10 @@ func Format(input []byte) []byte {
openBraceWritten = true
nextLine()
newLines = 0
- nesting++
+ // prevent infinite nesting from ridiculous inputs (issue #4169)
+ if nesting < 10 {
+ nesting++
+ }
}
switch {
@@ -202,6 +279,11 @@ func Format(input []byte) []byte {
write('{')
openBraceWritten = true
}
+
+ if spacePrior && ch == '<' {
+ space = true
+ }
+
write(ch)
beginningOfLine = false
diff --git a/caddyconfig/caddyfile/formatter_fuzz.go b/caddyconfig/caddyfile/formatter_fuzz.go
new file mode 100644
index 00000000..7c1fc643
--- /dev/null
+++ b/caddyconfig/caddyfile/formatter_fuzz.go
@@ -0,0 +1,27 @@
+// Copyright 2015 Matthew Holt and The Caddy Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build gofuzz
+
+package caddyfile
+
+import "bytes"
+
+func FuzzFormat(input []byte) int {
+ formatted := Format(input)
+ if bytes.Equal(formatted, Format(formatted)) {
+ return 1
+ }
+ return 0
+}
diff --git a/caddyconfig/caddyfile/formatter_test.go b/caddyconfig/caddyfile/formatter_test.go
index 25bd7fa8..6eec822f 100644
--- a/caddyconfig/caddyfile/formatter_test.go
+++ b/caddyconfig/caddyfile/formatter_test.go
@@ -179,6 +179,11 @@ d {
{$F}
}`,
},
+ {
+ description: "env var placeholders with port",
+ input: `:{$PORT}`,
+ expect: `:{$PORT}`,
+ },
{
description: "comments",
input: `#a "\n"
@@ -201,7 +206,7 @@ c
}
d {
- e #f
+ e#f
# g
}
@@ -229,7 +234,7 @@ bar"
j {
"\"k\" l m"
}`,
- expect: `"a \"b\" " #c
+ expect: `"a \"b\" "#c
d
e {
@@ -305,6 +310,130 @@ bar "{\"key\":34}"`,
baz`,
},
+ {
+ description: "hash within string is not a comment",
+ input: `redir / /some/#/path`,
+ expect: `redir / /some/#/path`,
+ },
+ {
+ description: "brace does not fold into comment above",
+ input: `# comment
+{
+ foo
+}`,
+ expect: `# comment
+{
+ foo
+}`,
+ },
+ {
+ description: "matthewpi/vscode-caddyfile-support#13",
+ input: `{
+ email {$ACMEEMAIL}
+ #debug
+}
+
+block {
+}
+`,
+ expect: `{
+ email {$ACMEEMAIL}
+ #debug
+}
+
+block {
+}
+`,
+ },
+ {
+ description: "matthewpi/vscode-caddyfile-support#13 - bad formatting",
+ input: `{
+ email {$ACMEEMAIL}
+ #debug
+ }
+
+ block {
+ }
+`,
+ expect: `{
+ email {$ACMEEMAIL}
+ #debug
+}
+
+block {
+}
+`,
+ },
+ {
+ description: "keep heredoc as-is",
+ input: `block {
+ heredoc < endIndex || endIndex > argCount {
+ caddy.Log().Named("caddyfile").Warn(
+ "Variadic placeholder "+token.Text+" indices are out of bounds, only "+strconv.Itoa(argCount)+" argument(s) exist",
+ zap.String("file", token.File+":"+strconv.Itoa(token.Line)), zap.Strings("import_chain", token.imports))
+ return false, 0, 0
+ }
+ return true, startIndex, endIndex
+}
+
+// makeArgsReplacer prepares a Replacer which can replace
+// non-variadic args placeholders in imported tokens.
+func makeArgsReplacer(args []string) *caddy.Replacer {
+ repl := caddy.NewEmptyReplacer()
+ repl.Map(func(key string) (any, bool) {
+ // TODO: Remove the deprecated {args.*} placeholder
+ // support at some point in the future
+ if matches := argsRegexpIndexDeprecated.FindStringSubmatch(key); len(matches) > 0 {
+ // What's matched may be a substring of the key
+ if matches[0] != key {
+ return nil, false
+ }
+
+ value, err := strconv.Atoi(matches[1])
+ if err != nil {
+ caddy.Log().Named("caddyfile").Warn(
+ "Placeholder {args." + matches[1] + "} has an invalid index")
+ return nil, false
+ }
+ if value >= len(args) {
+ caddy.Log().Named("caddyfile").Warn(
+ "Placeholder {args." + matches[1] + "} index is out of bounds, only " + strconv.Itoa(len(args)) + " argument(s) exist")
+ return nil, false
+ }
+ caddy.Log().Named("caddyfile").Warn(
+ "Placeholder {args." + matches[1] + "} deprecated, use {args[" + matches[1] + "]} instead")
+ return args[value], true
+ }
+
+ // Handle args[*] form
+ if matches := argsRegexpIndex.FindStringSubmatch(key); len(matches) > 0 {
+ // What's matched may be a substring of the key
+ if matches[0] != key {
+ return nil, false
+ }
+
+ if strings.Contains(matches[1], ":") {
+ caddy.Log().Named("caddyfile").Warn(
+ "Variadic placeholder {args[" + matches[1] + "]} must be a token on its own")
+ return nil, false
+ }
+ value, err := strconv.Atoi(matches[1])
+ if err != nil {
+ caddy.Log().Named("caddyfile").Warn(
+ "Placeholder {args[" + matches[1] + "]} has an invalid index")
+ return nil, false
+ }
+ if value >= len(args) {
+ caddy.Log().Named("caddyfile").Warn(
+ "Placeholder {args[" + matches[1] + "]} index is out of bounds, only " + strconv.Itoa(len(args)) + " argument(s) exist")
+ return nil, false
+ }
+ return args[value], true
+ }
+
+ // Not an args placeholder, ignore
+ return nil, false
+ })
+ return repl
+}
+
+var (
+ argsRegexpIndexDeprecated = regexp.MustCompile(`args\.(.+)`)
+ argsRegexpIndex = regexp.MustCompile(`args\[(.+)]`)
+)
diff --git a/caddyconfig/caddyfile/importgraph.go b/caddyconfig/caddyfile/importgraph.go
new file mode 100644
index 00000000..ca859299
--- /dev/null
+++ b/caddyconfig/caddyfile/importgraph.go
@@ -0,0 +1,126 @@
+// Copyright 2015 Matthew Holt and The Caddy Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package caddyfile
+
+import (
+ "fmt"
+ "slices"
+)
+
+type adjacency map[string][]string
+
+type importGraph struct {
+ nodes map[string]struct{}
+ edges adjacency
+}
+
+func (i *importGraph) addNode(name string) {
+ if i.nodes == nil {
+ i.nodes = make(map[string]struct{})
+ }
+ if _, exists := i.nodes[name]; exists {
+ return
+ }
+ i.nodes[name] = struct{}{}
+}
+
+func (i *importGraph) addNodes(names []string) {
+ for _, name := range names {
+ i.addNode(name)
+ }
+}
+
+func (i *importGraph) removeNode(name string) {
+ delete(i.nodes, name)
+}
+
+func (i *importGraph) removeNodes(names []string) {
+ for _, name := range names {
+ i.removeNode(name)
+ }
+}
+
+func (i *importGraph) addEdge(from, to string) error {
+ if !i.exists(from) || !i.exists(to) {
+ return fmt.Errorf("one of the nodes does not exist")
+ }
+
+ if i.willCycle(to, from) {
+ return fmt.Errorf("a cycle of imports exists between %s and %s", from, to)
+ }
+
+ if i.areConnected(from, to) {
+ // if connected, there's nothing to do
+ return nil
+ }
+
+ if i.nodes == nil {
+ i.nodes = make(map[string]struct{})
+ }
+ if i.edges == nil {
+ i.edges = make(adjacency)
+ }
+
+ i.edges[from] = append(i.edges[from], to)
+ return nil
+}
+
+func (i *importGraph) addEdges(from string, tos []string) error {
+ for _, to := range tos {
+ err := i.addEdge(from, to)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (i *importGraph) areConnected(from, to string) bool {
+ al, ok := i.edges[from]
+ if !ok {
+ return false
+ }
+ return slices.Contains(al, to)
+}
+
+func (i *importGraph) willCycle(from, to string) bool {
+ collector := make(map[string]bool)
+
+ var visit func(string)
+ visit = func(start string) {
+ if !collector[start] {
+ collector[start] = true
+ for _, v := range i.edges[start] {
+ visit(v)
+ }
+ }
+ }
+
+ for _, v := range i.edges[from] {
+ visit(v)
+ }
+ for k := range collector {
+ if to == k {
+ return true
+ }
+ }
+
+ return false
+}
+
+func (i *importGraph) exists(key string) bool {
+ _, exists := i.nodes[key]
+ return exists
+}
diff --git a/caddyconfig/caddyfile/lexer.go b/caddyconfig/caddyfile/lexer.go
old mode 100755
new mode 100644
index 687ff90e..9b523f39
--- a/caddyconfig/caddyfile/lexer.go
+++ b/caddyconfig/caddyfile/lexer.go
@@ -1,4 +1,4 @@
-// Copyright 2015 Light Code Labs, LLC
+// Copyright 2015 Matthew Holt and The Caddy Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -16,7 +16,11 @@ package caddyfile
import (
"bufio"
+ "bytes"
+ "fmt"
"io"
+ "regexp"
+ "strings"
"unicode"
)
@@ -34,12 +38,41 @@ type (
// Token represents a single parsable unit.
Token struct {
- File string
- Line int
- Text string
+ File string
+ imports []string
+ Line int
+ Text string
+ wasQuoted rune // enclosing quote character, if any
+ heredocMarker string
+ snippetName string
}
)
+// Tokenize takes bytes as input and lexes it into
+// a list of tokens that can be parsed as a Caddyfile.
+// Also takes a filename to fill the token's File as
+// the source of the tokens, which is important to
+// determine relative paths for `import` directives.
+func Tokenize(input []byte, filename string) ([]Token, error) {
+ l := lexer{}
+ if err := l.load(bytes.NewReader(input)); err != nil {
+ return nil, err
+ }
+ var tokens []Token
+ for {
+ found, err := l.next()
+ if err != nil {
+ return nil, err
+ }
+ if !found {
+ break
+ }
+ l.token.File = filename
+ tokens = append(tokens, l.token)
+ }
+ return tokens, nil
+}
+
// load prepares the lexer to scan an input for tokens.
// It discards any leading byte order mark.
func (l *lexer) load(input io.Reader) error {
@@ -71,34 +104,114 @@ func (l *lexer) load(input io.Reader) error {
// may be escaped. The rest of the line is skipped
// if a "#" character is read in. Returns true if
// a token was loaded; false otherwise.
-func (l *lexer) next() bool {
+func (l *lexer) next() (bool, error) {
var val []rune
- var comment, quoted, escaped bool
+ var comment, quoted, btQuoted, inHeredoc, heredocEscaped, escaped bool
+ var heredocMarker string
- makeToken := func() bool {
+ makeToken := func(quoted rune) bool {
l.token.Text = string(val)
+ l.token.wasQuoted = quoted
+ l.token.heredocMarker = heredocMarker
return true
}
for {
+ // Read a character in; if err then if we had
+ // read some characters, make a token. If we
+ // reached EOF, then no more tokens to read.
+ // If no EOF, then we had a problem.
ch, _, err := l.reader.ReadRune()
if err != nil {
if len(val) > 0 {
- return makeToken()
+ if inHeredoc {
+ return false, fmt.Errorf("incomplete heredoc <<%s on line #%d, expected ending marker %s", heredocMarker, l.line+l.skippedLines, heredocMarker)
+ }
+
+ return makeToken(0), nil
}
if err == io.EOF {
- return false
+ return false, nil
}
- panic(err)
+ return false, err
}
- if !escaped && ch == '\\' {
+ // detect whether we have the start of a heredoc
+ if !(quoted || btQuoted) && !(inHeredoc || heredocEscaped) &&
+ len(val) > 1 && string(val[:2]) == "<<" {
+ // a space means it's just a regular token and not a heredoc
+ if ch == ' ' {
+ return makeToken(0), nil
+ }
+
+ // skip CR, we only care about LF
+ if ch == '\r' {
+ continue
+ }
+
+ // after hitting a newline, we know that the heredoc marker
+ // is the characters after the two << and the newline.
+ // we reset the val because the heredoc is syntax we don't
+ // want to keep.
+ if ch == '\n' {
+ if len(val) == 2 {
+ return false, fmt.Errorf("missing opening heredoc marker on line #%d; must contain only alpha-numeric characters, dashes and underscores; got empty string", l.line)
+ }
+
+ // check if there's too many <
+ if string(val[:3]) == "<<<" {
+ return false, fmt.Errorf("too many '<' for heredoc on line #%d; only use two, for example <= len(heredocMarker) && heredocMarker == string(val[len(val)-len(heredocMarker):]) {
+ // set the final value
+ val, err = l.finalizeHeredoc(val, heredocMarker)
+ if err != nil {
+ return false, err
+ }
+
+ // set the line counter, and make the token
+ l.line += l.skippedLines
+ l.skippedLines = 0
+ return makeToken('<'), nil
+ }
+
+ // stay in the heredoc until we find the ending marker
+ continue
+ }
+
+ // track whether we found an escape '\' for the next
+ // iteration to be contextually aware
+ if !escaped && !btQuoted && ch == '\\' {
escaped = true
continue
}
- if quoted {
- if escaped {
+ if quoted || btQuoted {
+ if quoted && escaped {
// all is literal in quoted area,
// so only escape quotes
if ch != '"' {
@@ -106,23 +219,29 @@ func (l *lexer) next() bool {
}
escaped = false
} else {
- if ch == '"' {
- return makeToken()
+ if (quoted && ch == '"') || (btQuoted && ch == '`') {
+ return makeToken(ch), nil
}
}
+ // allow quoted text to wrap continue on multiple lines
if ch == '\n' {
l.line += 1 + l.skippedLines
l.skippedLines = 0
}
+ // collect this character as part of the quoted token
val = append(val, ch)
continue
}
if unicode.IsSpace(ch) {
+ // ignore CR altogether, we only actually care about LF (\n)
if ch == '\r' {
continue
}
+ // end of the line
if ch == '\n' {
+ // newlines can be escaped to chain arguments
+ // onto multiple lines; else, increment the line count
if escaped {
l.skippedLines++
escaped = false
@@ -130,15 +249,19 @@ func (l *lexer) next() bool {
l.line += 1 + l.skippedLines
l.skippedLines = 0
}
+ // comments (#) are single-line only
comment = false
}
+ // any kind of space means we're at the end of this token
if len(val) > 0 {
- return makeToken()
+ return makeToken(0), nil
}
continue
}
- if ch == '#' {
+ // comments must be at the start of a token,
+ // in other words, preceded by space or newline
+ if ch == '#' && len(val) == 0 {
comment = true
}
if comment {
@@ -151,13 +274,126 @@ func (l *lexer) next() bool {
quoted = true
continue
}
+ if ch == '`' {
+ btQuoted = true
+ continue
+ }
}
if escaped {
- val = append(val, '\\')
+ // allow escaping the first < to skip the heredoc syntax
+ if ch == '<' {
+ heredocEscaped = true
+ } else {
+ val = append(val, '\\')
+ }
escaped = false
}
val = append(val, ch)
}
}
+
+// finalizeHeredoc takes the runes read as the heredoc text and the marker,
+// and processes the text to strip leading whitespace, returning the final
+// value without the leading whitespace.
+func (l *lexer) finalizeHeredoc(val []rune, marker string) ([]rune, error) {
+ stringVal := string(val)
+
+ // find the last newline of the heredoc, which is where the contents end
+ lastNewline := strings.LastIndex(stringVal, "\n")
+
+ // collapse the content, then split into separate lines
+ lines := strings.Split(stringVal[:lastNewline+1], "\n")
+
+ // figure out how much whitespace we need to strip from the front of every line
+ // by getting the string that precedes the marker, on the last line
+ paddingToStrip := stringVal[lastNewline+1 : len(stringVal)-len(marker)]
+
+ // iterate over each line and strip the whitespace from the front
+ var out string
+ for lineNum, lineText := range lines[:len(lines)-1] {
+ if lineText == "" || lineText == "\r" {
+ out += "\n"
+ continue
+ }
+
+ // find an exact match for the padding
+ index := strings.Index(lineText, paddingToStrip)
+
+ // if the padding doesn't match exactly at the start then we can't safely strip
+ if index != 0 {
+ return nil, fmt.Errorf("mismatched leading whitespace in heredoc <<%s on line #%d [%s], expected whitespace [%s] to match the closing marker", marker, l.line+lineNum+1, lineText, paddingToStrip)
+ }
+
+ // strip, then append the line, with the newline, to the output.
+ // also removes all "\r" because Windows.
+ out += strings.ReplaceAll(lineText[len(paddingToStrip):]+"\n", "\r", "")
+ }
+
+ // Remove the trailing newline from the loop
+ if len(out) > 0 && out[len(out)-1] == '\n' {
+ out = out[:len(out)-1]
+ }
+
+ // return the final value
+ return []rune(out), nil
+}
+
+// Quoted returns true if the token was enclosed in quotes
+// (i.e. double quotes, backticks, or heredoc).
+func (t Token) Quoted() bool {
+ return t.wasQuoted > 0
+}
+
+// NumLineBreaks counts how many line breaks are in the token text.
+func (t Token) NumLineBreaks() int {
+ lineBreaks := strings.Count(t.Text, "\n")
+ if t.wasQuoted == '<' {
+ // heredocs have an extra linebreak because the opening
+ // delimiter is on its own line and is not included in the
+ // token Text itself, and the trailing newline is removed.
+ lineBreaks += 2
+ }
+ return lineBreaks
+}
+
+// Clone returns a deep copy of the token.
+func (t Token) Clone() Token {
+ return Token{
+ File: t.File,
+ imports: append([]string{}, t.imports...),
+ Line: t.Line,
+ Text: t.Text,
+ wasQuoted: t.wasQuoted,
+ heredocMarker: t.heredocMarker,
+ snippetName: t.snippetName,
+ }
+}
+
+var heredocMarkerRegexp = regexp.MustCompile("^[A-Za-z0-9_-]+$")
+
+// isNextOnNewLine tests whether t2 is on a different line from t1
+func isNextOnNewLine(t1, t2 Token) bool {
+ // If the second token is from a different file,
+ // we can assume it's from a different line
+ if t1.File != t2.File {
+ return true
+ }
+
+ // If the second token is from a different import chain,
+ // we can assume it's from a different line
+ if len(t1.imports) != len(t2.imports) {
+ return true
+ }
+ for i, im := range t1.imports {
+ if im != t2.imports[i] {
+ return true
+ }
+ }
+
+ // If the first token (incl line breaks) ends
+ // on a line earlier than the next token,
+ // then the second token is on a new line
+ return t1.Line+t1.NumLineBreaks() < t2.Line
+}
diff --git a/caddyconfig/caddyfile/lexer_fuzz.go b/caddyconfig/caddyfile/lexer_fuzz.go
new file mode 100644
index 00000000..6f75694b
--- /dev/null
+++ b/caddyconfig/caddyfile/lexer_fuzz.go
@@ -0,0 +1,28 @@
+// Copyright 2015 Matthew Holt and The Caddy Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build gofuzz
+
+package caddyfile
+
+func FuzzTokenize(input []byte) int {
+ tokens, err := Tokenize(input, "Caddyfile")
+ if err != nil {
+ return 0
+ }
+ if len(tokens) == 0 {
+ return -1
+ }
+ return 1
+}
diff --git a/caddyconfig/caddyfile/lexer_test.go b/caddyconfig/caddyfile/lexer_test.go
old mode 100755
new mode 100644
index 9105eb57..7389af79
--- a/caddyconfig/caddyfile/lexer_test.go
+++ b/caddyconfig/caddyfile/lexer_test.go
@@ -1,4 +1,4 @@
-// Copyright 2015 Light Code Labs, LLC
+// Copyright 2015 Matthew Holt and The Caddy Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -15,37 +15,35 @@
package caddyfile
import (
- "log"
- "strings"
"testing"
)
-type lexerTestCase struct {
- input string
- expected []Token
-}
-
func TestLexer(t *testing.T) {
- testCases := []lexerTestCase{
+ testCases := []struct {
+ input []byte
+ expected []Token
+ expectErr bool
+ errorMessage string
+ }{
{
- input: `host:123`,
+ input: []byte(`host:123`),
expected: []Token{
{Line: 1, Text: "host:123"},
},
},
{
- input: `host:123
+ input: []byte(`host:123
- directive`,
+ directive`),
expected: []Token{
{Line: 1, Text: "host:123"},
{Line: 3, Text: "directive"},
},
},
{
- input: `host:123 {
+ input: []byte(`host:123 {
directive
- }`,
+ }`),
expected: []Token{
{Line: 1, Text: "host:123"},
{Line: 1, Text: "{"},
@@ -54,7 +52,7 @@ func TestLexer(t *testing.T) {
},
},
{
- input: `host:123 { directive }`,
+ input: []byte(`host:123 { directive }`),
expected: []Token{
{Line: 1, Text: "host:123"},
{Line: 1, Text: "{"},
@@ -63,12 +61,12 @@ func TestLexer(t *testing.T) {
},
},
{
- input: `host:123 {
+ input: []byte(`host:123 {
#comment
directive
# comment
foobar # another comment
- }`,
+ }`),
expected: []Token{
{Line: 1, Text: "host:123"},
{Line: 1, Text: "{"},
@@ -78,8 +76,28 @@ func TestLexer(t *testing.T) {
},
},
{
- input: `a "quoted value" b
- foobar`,
+ input: []byte(`host:123 {
+ # hash inside string is not a comment
+ redir / /some/#/path
+ }`),
+ expected: []Token{
+ {Line: 1, Text: "host:123"},
+ {Line: 1, Text: "{"},
+ {Line: 3, Text: "redir"},
+ {Line: 3, Text: "/"},
+ {Line: 3, Text: "/some/#/path"},
+ {Line: 4, Text: "}"},
+ },
+ },
+ {
+ input: []byte("# comment at beginning of file\n# comment at beginning of line\nhost:123"),
+ expected: []Token{
+ {Line: 3, Text: "host:123"},
+ },
+ },
+ {
+ input: []byte(`a "quoted value" b
+ foobar`),
expected: []Token{
{Line: 1, Text: "a"},
{Line: 1, Text: "quoted value"},
@@ -88,7 +106,7 @@ func TestLexer(t *testing.T) {
},
},
{
- input: `A "quoted \"value\" inside" B`,
+ input: []byte(`A "quoted \"value\" inside" B`),
expected: []Token{
{Line: 1, Text: "A"},
{Line: 1, Text: `quoted "value" inside`},
@@ -96,7 +114,7 @@ func TestLexer(t *testing.T) {
},
},
{
- input: "An escaped \"newline\\\ninside\" quotes",
+ input: []byte("An escaped \"newline\\\ninside\" quotes"),
expected: []Token{
{Line: 1, Text: "An"},
{Line: 1, Text: "escaped"},
@@ -105,7 +123,7 @@ func TestLexer(t *testing.T) {
},
},
{
- input: "An escaped newline\\\noutside quotes",
+ input: []byte("An escaped newline\\\noutside quotes"),
expected: []Token{
{Line: 1, Text: "An"},
{Line: 1, Text: "escaped"},
@@ -115,7 +133,7 @@ func TestLexer(t *testing.T) {
},
},
{
- input: "line1\\\nescaped\nline2\nline3",
+ input: []byte("line1\\\nescaped\nline2\nline3"),
expected: []Token{
{Line: 1, Text: "line1"},
{Line: 1, Text: "escaped"},
@@ -124,7 +142,7 @@ func TestLexer(t *testing.T) {
},
},
{
- input: "line1\\\nescaped1\\\nescaped2\nline4\nline5",
+ input: []byte("line1\\\nescaped1\\\nescaped2\nline4\nline5"),
expected: []Token{
{Line: 1, Text: "line1"},
{Line: 1, Text: "escaped1"},
@@ -134,34 +152,34 @@ func TestLexer(t *testing.T) {
},
},
{
- input: `"unescapable\ in quotes"`,
+ input: []byte(`"unescapable\ in quotes"`),
expected: []Token{
{Line: 1, Text: `unescapable\ in quotes`},
},
},
{
- input: `"don't\escape"`,
+ input: []byte(`"don't\escape"`),
expected: []Token{
{Line: 1, Text: `don't\escape`},
},
},
{
- input: `"don't\\escape"`,
+ input: []byte(`"don't\\escape"`),
expected: []Token{
{Line: 1, Text: `don't\\escape`},
},
},
{
- input: `un\escapable`,
+ input: []byte(`un\escapable`),
expected: []Token{
{Line: 1, Text: `un\escapable`},
},
},
{
- input: `A "quoted value with line
+ input: []byte(`A "quoted value with line
break inside" {
foobar
- }`,
+ }`),
expected: []Token{
{Line: 1, Text: "A"},
{Line: 1, Text: "quoted value with line\n\t\t\t\t\tbreak inside"},
@@ -171,13 +189,13 @@ func TestLexer(t *testing.T) {
},
},
{
- input: `"C:\php\php-cgi.exe"`,
+ input: []byte(`"C:\php\php-cgi.exe"`),
expected: []Token{
{Line: 1, Text: `C:\php\php-cgi.exe`},
},
},
{
- input: `empty "" string`,
+ input: []byte(`empty "" string`),
expected: []Token{
{Line: 1, Text: `empty`},
{Line: 1, Text: ``},
@@ -185,7 +203,7 @@ func TestLexer(t *testing.T) {
},
},
{
- input: "skip those\r\nCR characters",
+ input: []byte("skip those\r\nCR characters"),
expected: []Token{
{Line: 1, Text: "skip"},
{Line: 1, Text: "those"},
@@ -194,43 +212,328 @@ func TestLexer(t *testing.T) {
},
},
{
- input: "\xEF\xBB\xBF:8080", // test with leading byte order mark
+ input: []byte("\xEF\xBB\xBF:8080"), // test with leading byte order mark
expected: []Token{
{Line: 1, Text: ":8080"},
},
},
+ {
+ input: []byte("simple `backtick quoted` string"),
+ expected: []Token{
+ {Line: 1, Text: `simple`},
+ {Line: 1, Text: `backtick quoted`},
+ {Line: 1, Text: `string`},
+ },
+ },
+ {
+ input: []byte("multiline `backtick\nquoted\n` string"),
+ expected: []Token{
+ {Line: 1, Text: `multiline`},
+ {Line: 1, Text: "backtick\nquoted\n"},
+ {Line: 3, Text: `string`},
+ },
+ },
+ {
+ input: []byte("nested `\"quotes inside\" backticks` string"),
+ expected: []Token{
+ {Line: 1, Text: `nested`},
+ {Line: 1, Text: `"quotes inside" backticks`},
+ {Line: 1, Text: `string`},
+ },
+ },
+ {
+ input: []byte("reverse-nested \"`backticks` inside\" quotes"),
+ expected: []Token{
+ {Line: 1, Text: `reverse-nested`},
+ {Line: 1, Text: "`backticks` inside"},
+ {Line: 1, Text: `quotes`},
+ },
+ },
+ {
+ input: []byte(`heredoc <>`),
+ expected: []Token{
+ {Line: 1, Text: `escaped-heredoc`},
+ {Line: 1, Text: `<<`},
+ {Line: 1, Text: `>>`},
+ },
+ },
+ {
+ input: []byte(`not-a-heredoc >"`),
+ expected: []Token{
+ {Line: 1, Text: `not-a-heredoc`},
+ {Line: 1, Text: `<<`},
+ {Line: 1, Text: `>>`},
+ },
+ },
+ {
+ input: []byte(`not-a-heredoc << >>`),
+ expected: []Token{
+ {Line: 1, Text: `not-a-heredoc`},
+ {Line: 1, Text: `<<`},
+ {Line: 1, Text: `>>`},
+ },
+ },
+ {
+ input: []byte(`not-a-heredoc < 0 {
+ // use such tokens to create a new dispenser, and then use it to parse each block
+ bd := NewDispenser(blockTokens)
+ for bd.Next() {
+ // see if we can grab a key
+ var currentMappingKey string
+ if bd.Val() == "{" {
+ return p.Err("anonymous blocks are not supported")
+ }
+ currentMappingKey = bd.Val()
+ currentMappingTokens := []Token{}
+ // read all args until end of line / {
+ if bd.NextArg() {
+ currentMappingTokens = append(currentMappingTokens, bd.Token())
+ for bd.NextArg() {
+ currentMappingTokens = append(currentMappingTokens, bd.Token())
+ }
+ // TODO(elee1766): we don't enter another mapping here because it's annoying to extract the { and } properly.
+ // maybe someone can do that in the future
+ } else {
+ // attempt to enter a block and add tokens to the currentMappingTokens
+ for mappingNesting := bd.Nesting(); bd.NextBlock(mappingNesting); {
+ currentMappingTokens = append(currentMappingTokens, bd.Token())
+ }
+ }
+ blockMapping[currentMappingKey] = currentMappingTokens
+ }
+ }
+
+ // splice out the import directive and its arguments
+ // (2 tokens, plus the length of args)
+ tokensBefore := p.tokens[:p.cursor-1-len(args)-len(blockTokens)]
tokensAfter := p.tokens[p.cursor+1:]
var importedTokens []Token
+ var nodes []string
// first check snippets. That is a simple, non-recursive replacement
if p.definedSnippets != nil && p.definedSnippets[importPattern] != nil {
importedTokens = p.definedSnippets[importPattern]
+ if len(importedTokens) > 0 {
+ // just grab the first one
+ nodes = append(nodes, fmt.Sprintf("%s:%s", importedTokens[0].File, importedTokens[0].snippetName))
+ }
} else {
// make path relative to the file of the _token_ being processed rather
// than current working directory (issue #867) and then use glob to get
// list of matching filenames
- absFile, err := filepath.Abs(p.Dispenser.File())
+ absFile, err := caddy.FastAbs(p.Dispenser.File())
if err != nil {
return p.Errf("Failed to get absolute path of file: %s: %v", p.Dispenser.File(), err)
}
@@ -325,20 +441,32 @@ func (p *parser) doImport() error {
return p.Errf("Glob pattern may only contain one wildcard (*), but has others: %s", globPattern)
}
matches, err = filepath.Glob(globPattern)
-
if err != nil {
return p.Errf("Failed to use import pattern %s: %v", importPattern, err)
}
if len(matches) == 0 {
if strings.ContainsAny(globPattern, "*?[]") {
- log.Printf("[WARNING] No files matching import glob pattern: %s", importPattern)
+ caddy.Log().Warn("No files matching import glob pattern", zap.String("pattern", importPattern))
} else {
return p.Errf("File to import not found: %s", importPattern)
}
+ } else {
+ // See issue #5295 - should skip any files that start with a . when iterating over them.
+ sep := string(filepath.Separator)
+ segGlobPattern := strings.Split(globPattern, sep)
+ if strings.HasPrefix(segGlobPattern[len(segGlobPattern)-1], "*") {
+ var tmpMatches []string
+ for _, m := range matches {
+ seg := strings.Split(m, sep)
+ if !strings.HasPrefix(seg[len(seg)-1], ".") {
+ tmpMatches = append(tmpMatches, m)
+ }
+ }
+ matches = tmpMatches
+ }
}
// collect all the imported tokens
-
for _, importFile := range matches {
newTokens, err := p.doSingleImport(importFile)
if err != nil {
@@ -346,12 +474,117 @@ func (p *parser) doImport() error {
}
importedTokens = append(importedTokens, newTokens...)
}
+ nodes = matches
+ }
+
+ nodeName := p.File()
+ if p.Token().snippetName != "" {
+ nodeName += fmt.Sprintf(":%s", p.Token().snippetName)
+ }
+ p.importGraph.addNode(nodeName)
+ p.importGraph.addNodes(nodes)
+ if err := p.importGraph.addEdges(nodeName, nodes); err != nil {
+ p.importGraph.removeNodes(nodes)
+ return err
+ }
+
+ // copy the tokens so we don't overwrite p.definedSnippets
+ tokensCopy := make([]Token, 0, len(importedTokens))
+
+ var (
+ maybeSnippet bool
+ maybeSnippetId bool
+ index int
+ )
+
+ // run the argument replacer on the tokens
+ // golang for range slice return a copy of value
+ // similarly, append also copy value
+ for i, token := range importedTokens {
+ // update the token's imports to refer to import directive filename, line number and snippet name if there is one
+ if token.snippetName != "" {
+ token.imports = append(token.imports, fmt.Sprintf("%s:%d (import %s)", p.File(), p.Line(), token.snippetName))
+ } else {
+ token.imports = append(token.imports, fmt.Sprintf("%s:%d (import)", p.File(), p.Line()))
+ }
+
+ // naive way of determine snippets, as snippets definition can only follow name + block
+ // format, won't check for nesting correctness or any other error, that's what parser does.
+ if !maybeSnippet && nesting == 0 {
+ // first of the line
+ if i == 0 || isNextOnNewLine(tokensCopy[i-1], token) {
+ index = 0
+ } else {
+ index++
+ }
+
+ if index == 0 && len(token.Text) >= 3 && strings.HasPrefix(token.Text, "(") && strings.HasSuffix(token.Text, ")") {
+ maybeSnippetId = true
+ }
+ }
+
+ switch token.Text {
+ case "{":
+ nesting++
+ if index == 1 && maybeSnippetId && nesting == 1 {
+ maybeSnippet = true
+ maybeSnippetId = false
+ }
+ case "}":
+ nesting--
+ if nesting == 0 && maybeSnippet {
+ maybeSnippet = false
+ }
+ }
+ // if it is {block}, we substitute with all tokens in the block
+ // if it is {blocks.*}, we substitute with the tokens in the mapping for the *
+ var skip bool
+ var tokensToAdd []Token
+ switch {
+ case token.Text == "{block}":
+ tokensToAdd = blockTokens
+ case strings.HasPrefix(token.Text, "{blocks.") && strings.HasSuffix(token.Text, "}"):
+ // {blocks.foo.bar} will be extracted to key `foo.bar`
+ blockKey := strings.TrimPrefix(strings.TrimSuffix(token.Text, "}"), "{blocks.")
+ val, ok := blockMapping[blockKey]
+ if ok {
+ tokensToAdd = val
+ }
+ default:
+ skip = true
+ }
+ if !skip {
+ if len(tokensToAdd) == 0 {
+ // if there is no content in the snippet block, don't do any replacement
+ // this allows snippets which contained {block}/{block.*} before this change to continue functioning as normal
+ tokensCopy = append(tokensCopy, token)
+ } else {
+ tokensCopy = append(tokensCopy, tokensToAdd...)
+ }
+ continue
+ }
+
+ if maybeSnippet {
+ tokensCopy = append(tokensCopy, token)
+ continue
+ }
+
+ foundVariadic, startIndex, endIndex := parseVariadic(token, len(args))
+ if foundVariadic {
+ for _, arg := range args[startIndex:endIndex] {
+ token.Text = arg
+ tokensCopy = append(tokensCopy, token)
+ }
+ } else {
+ token.Text = repl.ReplaceKnown(token.Text, "")
+ tokensCopy = append(tokensCopy, token)
+ }
}
// splice the imported tokens in the place of the import statement
// and rewind cursor so Next() will land on first imported token
- p.tokens = append(tokensBefore, append(importedTokens, tokensAfter...)...)
- p.cursor--
+ p.tokens = append(tokensBefore, append(tokensCopy, tokensAfter...)...)
+ p.cursor -= len(args) + len(blockTokens) + 1
return nil
}
@@ -371,11 +604,17 @@ func (p *parser) doSingleImport(importFile string) ([]Token, error) {
return nil, p.Errf("Could not import %s: is a directory", importFile)
}
- input, err := ioutil.ReadAll(file)
+ input, err := io.ReadAll(file)
if err != nil {
return nil, p.Errf("Could not read imported file %s: %v", importFile, err)
}
+ // only warning in case of empty files
+ if len(input) == 0 || len(strings.TrimSpace(string(input))) == 0 {
+ caddy.Log().Warn("Import file is empty", zap.String("file", importFile))
+ return []Token{}, nil
+ }
+
importedTokens, err := allTokens(importFile, input)
if err != nil {
return nil, p.Errf("Could not read tokens while importing %s: %v", importFile, err)
@@ -383,7 +622,7 @@ func (p *parser) doSingleImport(importFile string) ([]Token, error) {
// Tack the file path onto these tokens so errors show the imported file's name
// (we use full, absolute path to avoid bugs: issue #1892)
- filename, err := filepath.Abs(importFile)
+ filename, err := caddy.FastAbs(importFile)
if err != nil {
return nil, p.Errf("Failed to get absolute path of file: %s: %v", importFile, err)
}
@@ -401,7 +640,6 @@ func (p *parser) doSingleImport(importFile string) ([]Token, error) {
// are loaded into the current server block for later use
// by directive setup functions.
func (p *parser) directive() error {
-
// a segment is a list of tokens associated with this directive
var segment Segment
@@ -411,6 +649,16 @@ func (p *parser) directive() error {
for p.Next() {
if p.Val() == "{" {
p.nesting++
+ if !p.isNextOnNewLine() && p.Token().wasQuoted == 0 {
+ return p.Err("Unexpected next token after '{' on same line")
+ }
+ if p.isNewLine() {
+ return p.Err("Unexpected '{' on a new line; did you mean to place the '{' on the previous line?")
+ }
+ } else if p.Val() == "{}" {
+ if p.isNextOnNewLine() && p.Token().wasQuoted == 0 {
+ return p.Err("Unexpected '{}' at end of line")
+ }
} else if p.isNewLine() && p.nesting == 0 {
p.cursor-- // read too far
break
@@ -419,7 +667,7 @@ func (p *parser) directive() error {
} else if p.Val() == "}" && p.nesting == 0 {
return p.Err("Unexpected '}' because no matching opening brace")
} else if p.Val() == "import" && p.isNewLine() {
- if err := p.doImport(); err != nil {
+ if err := p.doImport(1); err != nil {
return err
}
p.cursor-- // cursor is advanced when we continue, so roll back one more
@@ -460,28 +708,43 @@ func (p *parser) closeCurlyBrace() error {
return nil
}
+func (p *parser) isNamedRoute() (bool, string) {
+ keys := p.block.Keys
+ // A named route block is a single key with parens, prefixed with &.
+ if len(keys) == 1 && strings.HasPrefix(keys[0].Text, "&(") && strings.HasSuffix(keys[0].Text, ")") {
+ return true, strings.TrimSuffix(keys[0].Text[2:], ")")
+ }
+ return false, ""
+}
+
func (p *parser) isSnippet() (bool, string) {
keys := p.block.Keys
// A snippet block is a single key with parens. Nothing else qualifies.
- if len(keys) == 1 && strings.HasPrefix(keys[0], "(") && strings.HasSuffix(keys[0], ")") {
- return true, strings.TrimSuffix(keys[0][1:], ")")
+ if len(keys) == 1 && strings.HasPrefix(keys[0].Text, "(") && strings.HasSuffix(keys[0].Text, ")") {
+ return true, strings.TrimSuffix(keys[0].Text[1:], ")")
}
return false, ""
}
// read and store everything in a block for later replay.
-func (p *parser) snippetTokens() ([]Token, error) {
- // snippet must have curlies.
+func (p *parser) blockTokens(retainCurlies bool) ([]Token, error) {
+ // block must have curlies.
err := p.openCurlyBrace()
if err != nil {
return nil, err
}
- nesting := 1 // count our own nesting in snippets
+ nesting := 1 // count our own nesting
tokens := []Token{}
+ if retainCurlies {
+ tokens = append(tokens, p.Token())
+ }
for p.Next() {
if p.Val() == "}" {
nesting--
if nesting == 0 {
+ if retainCurlies {
+ tokens = append(tokens, p.Token())
+ }
break
}
}
@@ -501,8 +764,18 @@ func (p *parser) snippetTokens() ([]Token, error) {
// head of the server block with tokens, which are
// grouped by segments.
type ServerBlock struct {
- Keys []string
- Segments []Segment
+ HasBraces bool
+ Keys []Token
+ Segments []Segment
+ IsNamedRoute bool
+}
+
+func (sb ServerBlock) GetKeysText() []string {
+ res := []string{}
+ for _, k := range sb.Keys {
+ res = append(res, k.Text)
+ }
+ return res
}
// DispenseDirective returns a dispenser that contains
@@ -533,4 +806,7 @@ func (s Segment) Directive() string {
// spanOpen and spanClose are used to bound spans that
// contain the name of an environment variable.
-var spanOpen, spanClose = []byte{'{', '$'}, []byte{'}'}
+var (
+ spanOpen, spanClose = []byte{'{', '$'}, []byte{'}'}
+ envVarDefaultDelimiter = ":"
+)
diff --git a/caddyconfig/caddyfile/parse_test.go b/caddyconfig/caddyfile/parse_test.go
old mode 100755
new mode 100644
index ed153674..d3fada4e
--- a/caddyconfig/caddyfile/parse_test.go
+++ b/caddyconfig/caddyfile/parse_test.go
@@ -1,4 +1,4 @@
-// Copyright 2015 Light Code Labs, LLC
+// Copyright 2015 Matthew Holt and The Caddy Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -16,17 +16,101 @@ package caddyfile
import (
"bytes"
- "io/ioutil"
"os"
"path/filepath"
"testing"
)
+func TestParseVariadic(t *testing.T) {
+ args := make([]string, 10)
+ for i, tc := range []struct {
+ input string
+ result bool
+ }{
+ {
+ input: "",
+ result: false,
+ },
+ {
+ input: "{args[1",
+ result: false,
+ },
+ {
+ input: "1]}",
+ result: false,
+ },
+ {
+ input: "{args[:]}aaaaa",
+ result: false,
+ },
+ {
+ input: "aaaaa{args[:]}",
+ result: false,
+ },
+ {
+ input: "{args.}",
+ result: false,
+ },
+ {
+ input: "{args.1}",
+ result: false,
+ },
+ {
+ input: "{args[]}",
+ result: false,
+ },
+ {
+ input: "{args[:]}",
+ result: true,
+ },
+ {
+ input: "{args[:]}",
+ result: true,
+ },
+ {
+ input: "{args[0:]}",
+ result: true,
+ },
+ {
+ input: "{args[:0]}",
+ result: true,
+ },
+ {
+ input: "{args[-1:]}",
+ result: false,
+ },
+ {
+ input: "{args[:11]}",
+ result: false,
+ },
+ {
+ input: "{args[10:0]}",
+ result: false,
+ },
+ {
+ input: "{args[0:10]}",
+ result: true,
+ },
+ {
+ input: "{args[0]}:{args[1]}:{args[2]}",
+ result: false,
+ },
+ } {
+ token := Token{
+ File: "test",
+ Line: 1,
+ Text: tc.input,
+ }
+ if v, _, _ := parseVariadic(token, len(args)); v != tc.result {
+ t.Errorf("Test %d error expectation failed Expected: %t, got %t", i, tc.result, v)
+ }
+ }
+}
+
func TestAllTokens(t *testing.T) {
input := []byte("a b c\nd e")
expected := []string{"a", "b", "c", "d", "e"}
tokens, err := allTokens("TestAllTokens", input)
-
if err != nil {
t.Fatalf("Expected no error, got %v", err)
}
@@ -64,10 +148,11 @@ func TestParseOneAndImport(t *testing.T) {
"localhost",
}, []int{1}},
- {`localhost:1234
+ {
+ `localhost:1234
dir1 foo bar`, false, []string{
- "localhost:1234",
- }, []int{3},
+ "localhost:1234",
+ }, []int{3},
},
{`localhost {
@@ -160,6 +245,10 @@ func TestParseOneAndImport(t *testing.T) {
"localhost",
}, []int{}},
+ {`localhost{
+ dir1
+ }`, true, []string{}, []int{}},
+
{`localhost
dir1 {
nested {
@@ -182,14 +271,56 @@ func TestParseOneAndImport(t *testing.T) {
"host1",
}, []int{1, 2}},
- {`import testdata/import_test1.txt testdata/import_test2.txt`, true, []string{}, []int{}},
-
{`import testdata/not_found.txt`, true, []string{}, []int{}},
+ // empty file should just log a warning, and result in no tokens
+ {`import testdata/empty.txt`, false, []string{}, []int{}},
+
+ {`import testdata/only_white_space.txt`, false, []string{}, []int{}},
+
+ // import path/to/dir/* should skip any files that start with a . when iterating over them.
+ {`localhost
+ dir1 arg1
+ import testdata/glob/*`, false, []string{
+ "localhost",
+ }, []int{2, 3, 1}},
+
+ // import path/to/dir/.* should continue to read all dotfiles in a dir.
+ {`import testdata/glob/.*`, false, []string{
+ "host1",
+ }, []int{1, 2}},
+
{`""`, false, []string{}, []int{}},
{``, false, []string{}, []int{}},
+ // Unexpected next token after '{' on same line
+ {`localhost
+ dir1 { a b }`, true, []string{"localhost"}, []int{}},
+
+ // Unexpected '{' on a new line
+ {`localhost
+ dir1
+ {
+ a b
+ }`, true, []string{"localhost"}, []int{}},
+
+ // Workaround with quotes
+ {`localhost
+ dir1 "{" a b "}"`, false, []string{"localhost"}, []int{5}},
+
+ // Unexpected '{}' at end of line
+ {`localhost
+ dir1 {}`, true, []string{"localhost"}, []int{}},
+ // Workaround with quotes
+ {`localhost
+ dir1 "{}"`, false, []string{"localhost"}, []int{2}},
+
+ // import with args
+ {`import testdata/import_args0.txt a`, false, []string{"a"}, []int{}},
+ {`import testdata/import_args1.txt a b`, false, []string{"a", "b"}, []int{}},
+ {`import testdata/import_args*.txt a b`, false, []string{"a"}, []int{2}},
+
// test cases found by fuzzing!
{`import }{$"`, true, []string{}, []int{}},
{`import /*/*.txt`, true, []string{}, []int{}},
@@ -210,12 +341,13 @@ func TestParseOneAndImport(t *testing.T) {
t.Errorf("Test %d: Expected no error, but got: %v", i, err)
}
+ // t.Logf("%+v\n", result)
if len(result.Keys) != len(test.keys) {
t.Errorf("Test %d: Expected %d keys, got %d",
i, len(test.keys), len(result.Keys))
continue
}
- for j, addr := range result.Keys {
+ for j, addr := range result.GetKeysText() {
if addr != test.keys[j] {
t.Errorf("Test %d, key %d: Expected '%s', but was '%s'",
i, j, test.keys[j], addr)
@@ -247,8 +379,9 @@ func TestRecursiveImport(t *testing.T) {
}
isExpected := func(got ServerBlock) bool {
- if len(got.Keys) != 1 || got.Keys[0] != "localhost" {
- t.Errorf("got keys unexpected: expect localhost, got %v", got.Keys)
+ textKeys := got.GetKeysText()
+ if len(textKeys) != 1 || textKeys[0] != "localhost" {
+ t.Errorf("got keys unexpected: expect localhost, got %v", textKeys)
return false
}
if len(got.Segments) != 2 {
@@ -272,16 +405,16 @@ func TestRecursiveImport(t *testing.T) {
}
// test relative recursive import
- err = ioutil.WriteFile(recursiveFile1, []byte(
+ err = os.WriteFile(recursiveFile1, []byte(
`localhost
dir1
- import recursive_import_test2`), 0644)
+ import recursive_import_test2`), 0o644)
if err != nil {
t.Fatal(err)
}
defer os.Remove(recursiveFile1)
- err = ioutil.WriteFile(recursiveFile2, []byte("dir2 1"), 0644)
+ err = os.WriteFile(recursiveFile2, []byte("dir2 1"), 0o644)
if err != nil {
t.Fatal(err)
}
@@ -306,10 +439,10 @@ func TestRecursiveImport(t *testing.T) {
}
// test absolute recursive import
- err = ioutil.WriteFile(recursiveFile1, []byte(
+ err = os.WriteFile(recursiveFile1, []byte(
`localhost
dir1
- import `+recursiveFile2), 0644)
+ import `+recursiveFile2), 0o644)
if err != nil {
t.Fatal(err)
}
@@ -342,8 +475,9 @@ func TestDirectiveImport(t *testing.T) {
}
isExpected := func(got ServerBlock) bool {
- if len(got.Keys) != 1 || got.Keys[0] != "localhost" {
- t.Errorf("got keys unexpected: expect localhost, got %v", got.Keys)
+ textKeys := got.GetKeysText()
+ if len(textKeys) != 1 || textKeys[0] != "localhost" {
+ t.Errorf("got keys unexpected: expect localhost, got %v", textKeys)
return false
}
if len(got.Segments) != 2 {
@@ -362,8 +496,8 @@ func TestDirectiveImport(t *testing.T) {
t.Fatal(err)
}
- err = ioutil.WriteFile(directiveFile, []byte(`prop1 1
- prop2 2`), 0644)
+ err = os.WriteFile(directiveFile, []byte(`prop1 1
+ prop2 2`), 0o644)
if err != nil {
t.Fatal(err)
}
@@ -421,6 +555,10 @@ func TestParseAll(t *testing.T) {
{"localhost:1234", "http://host2"},
}},
+ {`foo.example.com , example.com`, false, [][]string{
+ {"foo.example.com", "example.com"},
+ }},
+
{`localhost:1234, http://host2,`, true, [][]string{}},
{`http://host1.com, http://host2.com {
@@ -440,6 +578,28 @@ func TestParseAll(t *testing.T) {
{`import notfound/*`, false, [][]string{}}, // glob needn't error with no matches
{`import notfound/file.conf`, true, [][]string{}}, // but a specific file should
+
+ // recursive self-import
+ {`import testdata/import_recursive0.txt`, true, [][]string{}},
+ {`import testdata/import_recursive3.txt
+ import testdata/import_recursive1.txt`, true, [][]string{}},
+
+ // cyclic imports
+ {`(A) {
+ import A
+ }
+ :80
+ import A
+ `, true, [][]string{}},
+ {`(A) {
+ import B
+ }
+ (B) {
+ import A
+ }
+ :80
+ import A
+ `, true, [][]string{}},
} {
p := testParser(test.input)
blocks, err := p.parseAll()
@@ -458,11 +618,11 @@ func TestParseAll(t *testing.T) {
}
for j, block := range blocks {
if len(block.Keys) != len(test.keys[j]) {
- t.Errorf("Test %d: Expected %d keys in block %d, got %d",
- i, len(test.keys[j]), j, len(block.Keys))
+ t.Errorf("Test %d: Expected %d keys in block %d, got %d: %v",
+ i, len(test.keys[j]), j, len(block.Keys), block.Keys)
continue
}
- for k, addr := range block.Keys {
+ for k, addr := range block.GetKeysText() {
if addr != test.keys[j][k] {
t.Errorf("Test %d, block %d, key %d: Expected '%s', but got '%s'",
i, j, k, test.keys[j][k], addr)
@@ -474,6 +634,7 @@ func TestParseAll(t *testing.T) {
func TestEnvironmentReplacement(t *testing.T) {
os.Setenv("FOOBAR", "foobar")
+ os.Setenv("CHAINED", "$FOOBAR")
for i, test := range []struct {
input string
@@ -519,6 +680,22 @@ func TestEnvironmentReplacement(t *testing.T) {
input: "{$FOOBAR}{$FOOBAR}",
expect: "foobarfoobar",
},
+ {
+ input: "{$CHAINED}",
+ expect: "$FOOBAR", // should not chain env expands
+ },
+ {
+ input: "{$FOO:default}",
+ expect: "default",
+ },
+ {
+ input: "foo{$BAR:bar}baz",
+ expect: "foobarbaz",
+ },
+ {
+ input: "foo{$BAR:$FOOBAR}baz",
+ expect: "foo$FOOBARbaz", // should not chain env expands
+ },
{
input: "{$FOOBAR",
expect: "{$FOOBAR",
@@ -544,16 +721,43 @@ func TestEnvironmentReplacement(t *testing.T) {
expect: "}{$",
},
} {
- actual, err := replaceEnvVars([]byte(test.input))
- if err != nil {
- t.Fatal(err)
- }
+ actual := replaceEnvVars([]byte(test.input))
if !bytes.Equal(actual, []byte(test.expect)) {
t.Errorf("Test %d: Expected: '%s' but got '%s'", i, test.expect, actual)
}
}
}
+func TestImportReplacementInJSONWithBrace(t *testing.T) {
+ for i, test := range []struct {
+ args []string
+ input string
+ expect string
+ }{
+ {
+ args: []string{"123"},
+ input: "{args[0]}",
+ expect: "123",
+ },
+ {
+ args: []string{"123"},
+ input: `{"key":"{args[0]}"}`,
+ expect: `{"key":"123"}`,
+ },
+ {
+ args: []string{"123", "123"},
+ input: `{"key":[{args[0]},{args[1]}]}`,
+ expect: `{"key":[123,123]}`,
+ },
+ } {
+ repl := makeArgsReplacer(test.args)
+ actual := repl.ReplaceKnown(test.input, "")
+ if actual != test.expect {
+ t.Errorf("Test %d: Expected: '%s' but got '%s'", i, test.expect, actual)
+ }
+ }
+}
+
func TestSnippets(t *testing.T) {
p := testParser(`
(common) {
@@ -571,7 +775,7 @@ func TestSnippets(t *testing.T) {
if len(blocks) != 1 {
t.Fatalf("Expect exactly one server block. Got %d.", len(blocks))
}
- if actual, expected := blocks[0].Keys[0], "http://example.com"; expected != actual {
+ if actual, expected := blocks[0].GetKeysText()[0], "http://example.com"; expected != actual {
t.Errorf("Expected server name to be '%s' but was '%s'", expected, actual)
}
if len(blocks[0].Segments) != 2 {
@@ -586,7 +790,7 @@ func TestSnippets(t *testing.T) {
}
func writeStringToTempFileOrDie(t *testing.T, str string) (pathToFile string) {
- file, err := ioutil.TempFile("", t.Name())
+ file, err := os.CreateTemp("", t.Name())
if err != nil {
panic(err) // get a stack trace so we know where this was called from.
}
@@ -603,7 +807,7 @@ func TestImportedFilesIgnoreNonDirectiveImportTokens(t *testing.T) {
fileName := writeStringToTempFileOrDie(t, `
http://example.com {
# This isn't an import directive, it's just an arg with value 'import'
- basicauth / import password
+ basic_auth / import password
}
`)
// Parse the root file that imports the other one.
@@ -614,12 +818,12 @@ func TestImportedFilesIgnoreNonDirectiveImportTokens(t *testing.T) {
}
auth := blocks[0].Segments[0]
line := auth[0].Text + " " + auth[1].Text + " " + auth[2].Text + " " + auth[3].Text
- if line != "basicauth / import password" {
+ if line != "basic_auth / import password" {
// Previously, it would be changed to:
- // basicauth / import /path/to/test/dir/password
+ // basic_auth / import /path/to/test/dir/password
// referencing a file that (probably) doesn't exist and changing the
// password!
- t.Errorf("Expected basicauth tokens to be 'basicauth / import password' but got %#q", line)
+ t.Errorf("Expected basic_auth tokens to be 'basic_auth / import password' but got %#q", line)
}
}
@@ -646,7 +850,7 @@ func TestSnippetAcrossMultipleFiles(t *testing.T) {
if len(blocks) != 1 {
t.Fatalf("Expect exactly one server block. Got %d.", len(blocks))
}
- if actual, expected := blocks[0].Keys[0], "http://example.com"; expected != actual {
+ if actual, expected := blocks[0].GetKeysText()[0], "http://example.com"; expected != actual {
t.Errorf("Expected server name to be '%s' but was '%s'", expected, actual)
}
if len(blocks[0].Segments) != 1 {
@@ -657,6 +861,29 @@ func TestSnippetAcrossMultipleFiles(t *testing.T) {
}
}
+func TestRejectsGlobalMatcher(t *testing.T) {
+ p := testParser(`
+ @rejected path /foo
+
+ (common) {
+ gzip foo
+ errors stderr
+ }
+
+ http://example.com {
+ import common
+ }
+ `)
+ _, err := p.parseAll()
+ if err == nil {
+ t.Fatal("Expected an error, but got nil")
+ }
+ expected := "request matchers may not be defined globally, they must be in a site block; found @rejected, at Testfile:2"
+ if err.Error() != expected {
+ t.Errorf("Expected error to be '%s' but got '%v'", expected, err)
+ }
+}
+
func testParser(input string) parser {
return parser{Dispenser: NewTestDispenser(input)}
}
diff --git a/caddyconfig/caddyfile/testdata/empty.txt b/caddyconfig/caddyfile/testdata/empty.txt
new file mode 100644
index 00000000..e69de29b
diff --git a/caddyconfig/caddyfile/testdata/glob/.dotfile.txt b/caddyconfig/caddyfile/testdata/glob/.dotfile.txt
new file mode 100644
index 00000000..faab100c
--- /dev/null
+++ b/caddyconfig/caddyfile/testdata/glob/.dotfile.txt
@@ -0,0 +1,4 @@
+host1 {
+ dir1
+ dir2 arg1
+}
diff --git a/caddyconfig/caddyfile/testdata/glob/import_test1.txt b/caddyconfig/caddyfile/testdata/glob/import_test1.txt
new file mode 100644
index 00000000..dac7b29b
--- /dev/null
+++ b/caddyconfig/caddyfile/testdata/glob/import_test1.txt
@@ -0,0 +1,2 @@
+dir2 arg1 arg2
+dir3
\ No newline at end of file
diff --git a/caddyconfig/caddyfile/testdata/import_args0.txt b/caddyconfig/caddyfile/testdata/import_args0.txt
new file mode 100644
index 00000000..add211e3
--- /dev/null
+++ b/caddyconfig/caddyfile/testdata/import_args0.txt
@@ -0,0 +1 @@
+{args[0]}
\ No newline at end of file
diff --git a/caddyconfig/caddyfile/testdata/import_args1.txt b/caddyconfig/caddyfile/testdata/import_args1.txt
new file mode 100644
index 00000000..422692a2
--- /dev/null
+++ b/caddyconfig/caddyfile/testdata/import_args1.txt
@@ -0,0 +1 @@
+{args[0]} {args[1]}
\ No newline at end of file
diff --git a/caddyconfig/caddyfile/testdata/import_glob0.txt b/caddyconfig/caddyfile/testdata/import_glob0.txt
old mode 100755
new mode 100644
diff --git a/caddyconfig/caddyfile/testdata/import_glob1.txt b/caddyconfig/caddyfile/testdata/import_glob1.txt
old mode 100755
new mode 100644
diff --git a/caddyconfig/caddyfile/testdata/import_glob2.txt b/caddyconfig/caddyfile/testdata/import_glob2.txt
old mode 100755
new mode 100644
diff --git a/caddyconfig/caddyfile/testdata/import_recursive0.txt b/caddyconfig/caddyfile/testdata/import_recursive0.txt
new file mode 100644
index 00000000..4d827b33
--- /dev/null
+++ b/caddyconfig/caddyfile/testdata/import_recursive0.txt
@@ -0,0 +1 @@
+import import_recursive0.txt
\ No newline at end of file
diff --git a/caddyconfig/caddyfile/testdata/import_recursive1.txt b/caddyconfig/caddyfile/testdata/import_recursive1.txt
new file mode 100644
index 00000000..9b6102ed
--- /dev/null
+++ b/caddyconfig/caddyfile/testdata/import_recursive1.txt
@@ -0,0 +1 @@
+import import_recursive2.txt
\ No newline at end of file
diff --git a/caddyconfig/caddyfile/testdata/import_recursive2.txt b/caddyconfig/caddyfile/testdata/import_recursive2.txt
new file mode 100644
index 00000000..5553dea3
--- /dev/null
+++ b/caddyconfig/caddyfile/testdata/import_recursive2.txt
@@ -0,0 +1 @@
+import import_recursive3.txt
\ No newline at end of file
diff --git a/caddyconfig/caddyfile/testdata/import_recursive3.txt b/caddyconfig/caddyfile/testdata/import_recursive3.txt
new file mode 100644
index 00000000..fcf0237f
--- /dev/null
+++ b/caddyconfig/caddyfile/testdata/import_recursive3.txt
@@ -0,0 +1 @@
+import import_recursive1.txt
\ No newline at end of file
diff --git a/caddyconfig/caddyfile/testdata/import_test1.txt b/caddyconfig/caddyfile/testdata/import_test1.txt
old mode 100755
new mode 100644
diff --git a/caddyconfig/caddyfile/testdata/import_test2.txt b/caddyconfig/caddyfile/testdata/import_test2.txt
old mode 100755
new mode 100644
diff --git a/caddyconfig/caddyfile/testdata/only_white_space.txt b/caddyconfig/caddyfile/testdata/only_white_space.txt
new file mode 100644
index 00000000..705327cd
--- /dev/null
+++ b/caddyconfig/caddyfile/testdata/only_white_space.txt
@@ -0,0 +1,7 @@
+
+
+
+
+
+
+
diff --git a/caddyconfig/configadapters.go b/caddyconfig/configadapters.go
index 2c466c42..0ca3c3af 100644
--- a/caddyconfig/configadapters.go
+++ b/caddyconfig/configadapters.go
@@ -24,7 +24,7 @@ import (
// Adapter is a type which can adapt a configuration to Caddy JSON.
// It returns the results and any warnings, or an error.
type Adapter interface {
- Adapt(body []byte, options map[string]interface{}) ([]byte, []Warning, error)
+ Adapt(body []byte, options map[string]any) ([]byte, []Warning, error)
}
// Warning represents a warning or notice related to conversion.
@@ -35,12 +35,20 @@ type Warning struct {
Message string `json:"message,omitempty"`
}
+func (w Warning) String() string {
+ var directive string
+ if w.Directive != "" {
+ directive = fmt.Sprintf(" (%s)", w.Directive)
+ }
+ return fmt.Sprintf("%s:%d%s: %s", w.File, w.Line, directive, w.Message)
+}
+
// JSON encodes val as JSON, returning it as a json.RawMessage. Any
// marshaling errors (which are highly unlikely with correct code)
// are converted to warnings. This is convenient when filling config
// structs that require a json.RawMessage, without having to worry
// about errors.
-func JSON(val interface{}, warnings *[]Warning) json.RawMessage {
+func JSON(val any, warnings *[]Warning) json.RawMessage {
b, err := json.Marshal(val)
if err != nil {
if warnings != nil {
@@ -51,15 +59,14 @@ func JSON(val interface{}, warnings *[]Warning) json.RawMessage {
return b
}
-// JSONModuleObject is like JSON, except it marshals val into a JSON object
-// and then adds a key to that object named fieldName with the value fieldVal.
-// This is useful for JSON-encoding module values where the module name has to
-// be described within the object by a certain key; for example,
-// "responder": "file_server" for a file server HTTP responder. The val must
-// encode into a map[string]interface{} (i.e. it must be a struct or map),
-// and any errors are converted into warnings, so this can be conveniently
-// used when filling a struct. For correct code, there should be no errors.
-func JSONModuleObject(val interface{}, fieldName, fieldVal string, warnings *[]Warning) json.RawMessage {
+// JSONModuleObject is like JSON(), except it marshals val into a JSON object
+// with an added key named fieldName with the value fieldVal. This is useful
+// for encoding module values where the module name has to be described within
+// the object by a certain key; for example, `"handler": "file_server"` for a
+// file server HTTP handler (fieldName="handler" and fieldVal="file_server").
+// The val parameter must encode into a map[string]any (i.e. it must be
+// a struct or map). Any errors are converted into warnings.
+func JSONModuleObject(val any, fieldName, fieldVal string, warnings *[]Warning) json.RawMessage {
// encode to a JSON object first
enc, err := json.Marshal(val)
if err != nil {
@@ -70,7 +77,7 @@ func JSONModuleObject(val interface{}, fieldName, fieldVal string, warnings *[]W
}
// then decode the object
- var tmp map[string]interface{}
+ var tmp map[string]any
err = json.Unmarshal(enc, &tmp)
if err != nil {
if warnings != nil {
@@ -94,12 +101,6 @@ func JSONModuleObject(val interface{}, fieldName, fieldVal string, warnings *[]W
return result
}
-// JSONIndent is used to JSON-marshal the final resulting Caddy
-// configuration in a consistent, human-readable way.
-func JSONIndent(val interface{}) ([]byte, error) {
- return json.MarshalIndent(val, "", "\t")
-}
-
// RegisterAdapter registers a config adapter with the given name.
// This should usually be done at init-time. It panics if the
// adapter cannot be registered successfully.
diff --git a/caddyconfig/httpcaddyfile/addresses.go b/caddyconfig/httpcaddyfile/addresses.go
index 51411a9a..1121776d 100644
--- a/caddyconfig/httpcaddyfile/addresses.go
+++ b/caddyconfig/httpcaddyfile/addresses.go
@@ -17,29 +17,32 @@ package httpcaddyfile
import (
"fmt"
"net"
+ "net/netip"
"reflect"
+ "sort"
"strconv"
"strings"
"unicode"
+ "github.com/caddyserver/certmagic"
+
"github.com/caddyserver/caddy/v2"
"github.com/caddyserver/caddy/v2/caddyconfig/caddyfile"
"github.com/caddyserver/caddy/v2/modules/caddyhttp"
- "github.com/caddyserver/certmagic"
)
-// mapAddressToServerBlocks returns a map of listener address to list of server
+// mapAddressToProtocolToServerBlocks returns a map of listener address to list of server
// blocks that will be served on that address. To do this, each server block is
// expanded so that each one is considered individually, although keys of a
// server block that share the same address stay grouped together so the config
// isn't repeated unnecessarily. For example, this Caddyfile:
//
-// example.com {
-// bind 127.0.0.1
-// }
-// www.example.com, example.net/path, localhost:9999 {
-// bind 127.0.0.1 1.2.3.4
-// }
+// example.com {
+// bind 127.0.0.1
+// }
+// www.example.com, example.net/path, localhost:9999 {
+// bind 127.0.0.1 1.2.3.4
+// }
//
// has two server blocks to start with. But expressed in this Caddyfile are
// actually 4 listener addresses: 127.0.0.1:443, 1.2.3.4:443, 127.0.0.1:9999,
@@ -74,9 +77,15 @@ import (
// repetition may be undesirable, so call consolidateAddrMappings() to map
// multiple addresses to the same lists of server blocks (a many:many mapping).
// (Doing this is essentially a map-reduce technique.)
-func (st *ServerType) mapAddressToServerBlocks(originalServerBlocks []serverBlock,
- options map[string]interface{}) (map[string][]serverBlock, error) {
- sbmap := make(map[string][]serverBlock)
+func (st *ServerType) mapAddressToProtocolToServerBlocks(originalServerBlocks []serverBlock,
+ options map[string]any,
+) (map[string]map[string][]serverBlock, error) {
+ addrToProtocolToServerBlocks := map[string]map[string][]serverBlock{}
+
+ type keyWithParsedKey struct {
+ key caddyfile.Token
+ parsedKey Address
+ }
for i, sblock := range originalServerBlocks {
// within a server block, we need to map all the listener addresses
@@ -84,95 +93,193 @@ func (st *ServerType) mapAddressToServerBlocks(originalServerBlocks []serverBloc
// will be served by them; this has the effect of treating each
// key of a server block as its own, but without having to repeat its
// contents in cases where multiple keys really can be served together
- addrToKeys := make(map[string][]string)
+ addrToProtocolToKeyWithParsedKeys := map[string]map[string][]keyWithParsedKey{}
for j, key := range sblock.block.Keys {
+ parsedKey, err := ParseAddress(key.Text)
+ if err != nil {
+ return nil, fmt.Errorf("parsing key: %v", err)
+ }
+ parsedKey = parsedKey.Normalize()
+
// a key can have multiple listener addresses if there are multiple
// arguments to the 'bind' directive (although they will all have
// the same port, since the port is defined by the key or is implicit
// through automatic HTTPS)
- addrs, err := st.listenerAddrsForServerBlockKey(sblock, key, options)
+ listeners, err := st.listenersForServerBlockAddress(sblock, parsedKey, options)
if err != nil {
- return nil, fmt.Errorf("server block %d, key %d (%s): determining listener address: %v", i, j, key, err)
+ return nil, fmt.Errorf("server block %d, key %d (%s): determining listener address: %v", i, j, key.Text, err)
}
- // associate this key with each listener address it is served on
- for _, addr := range addrs {
- addrToKeys[addr] = append(addrToKeys[addr], key)
+ // associate this key with its protocols and each listener address served with them
+ kwpk := keyWithParsedKey{key, parsedKey}
+ for addr, protocols := range listeners {
+ protocolToKeyWithParsedKeys, ok := addrToProtocolToKeyWithParsedKeys[addr]
+ if !ok {
+ protocolToKeyWithParsedKeys = map[string][]keyWithParsedKey{}
+ addrToProtocolToKeyWithParsedKeys[addr] = protocolToKeyWithParsedKeys
+ }
+
+ // an empty protocol indicates the default, a nil or empty value in the ListenProtocols array
+ if len(protocols) == 0 {
+ protocols[""] = struct{}{}
+ }
+ for prot := range protocols {
+ protocolToKeyWithParsedKeys[prot] = append(
+ protocolToKeyWithParsedKeys[prot],
+ kwpk)
+ }
}
}
+ // make a slice of the map keys so we can iterate in sorted order
+ addrs := make([]string, 0, len(addrToProtocolToKeyWithParsedKeys))
+ for addr := range addrToProtocolToKeyWithParsedKeys {
+ addrs = append(addrs, addr)
+ }
+ sort.Strings(addrs)
+
// now that we know which addresses serve which keys of this
// server block, we iterate that mapping and create a list of
// new server blocks for each address where the keys of the
// server block are only the ones which use the address; but
// the contents (tokens) are of course the same
- for addr, keys := range addrToKeys {
- // parse keys so that we only have to do it once
- parsedKeys := make([]Address, 0, len(keys))
- for _, key := range keys {
- addr, err := ParseAddress(key)
- if err != nil {
- return nil, fmt.Errorf("parsing key '%s': %v", key, err)
- }
- parsedKeys = append(parsedKeys, addr.Normalize())
+ for _, addr := range addrs {
+ protocolToKeyWithParsedKeys := addrToProtocolToKeyWithParsedKeys[addr]
+
+ prots := make([]string, 0, len(protocolToKeyWithParsedKeys))
+ for prot := range protocolToKeyWithParsedKeys {
+ prots = append(prots, prot)
}
- sbmap[addr] = append(sbmap[addr], serverBlock{
- block: caddyfile.ServerBlock{
- Keys: keys,
- Segments: sblock.block.Segments,
- },
- pile: sblock.pile,
- keys: parsedKeys,
+ sort.Strings(prots)
+
+ protocolToServerBlocks, ok := addrToProtocolToServerBlocks[addr]
+ if !ok {
+ protocolToServerBlocks = map[string][]serverBlock{}
+ addrToProtocolToServerBlocks[addr] = protocolToServerBlocks
+ }
+
+ for _, prot := range prots {
+ keyWithParsedKeys := protocolToKeyWithParsedKeys[prot]
+
+ keys := make([]caddyfile.Token, len(keyWithParsedKeys))
+ parsedKeys := make([]Address, len(keyWithParsedKeys))
+
+ for k, keyWithParsedKey := range keyWithParsedKeys {
+ keys[k] = keyWithParsedKey.key
+ parsedKeys[k] = keyWithParsedKey.parsedKey
+ }
+
+ protocolToServerBlocks[prot] = append(protocolToServerBlocks[prot], serverBlock{
+ block: caddyfile.ServerBlock{
+ Keys: keys,
+ Segments: sblock.block.Segments,
+ },
+ pile: sblock.pile,
+ parsedKeys: parsedKeys,
+ })
+ }
+ }
+ }
+
+ return addrToProtocolToServerBlocks, nil
+}
+
+// consolidateAddrMappings eliminates repetition of identical server blocks in a mapping of
+// single listener addresses to protocols to lists of server blocks. Since multiple addresses
+// may serve multiple protocols to identical sites (server block contents), this function turns
+// a 1:many mapping into a many:many mapping. Server block contents (tokens) must be
+// exactly identical so that reflect.DeepEqual returns true in order for the addresses to be combined.
+// Identical entries are deleted from the addrToServerBlocks map. Essentially, each pairing (each
+// association from multiple addresses to multiple server blocks; i.e. each element of
+// the returned slice) becomes a server definition in the output JSON.
+func (st *ServerType) consolidateAddrMappings(addrToProtocolToServerBlocks map[string]map[string][]serverBlock) []sbAddrAssociation {
+ sbaddrs := make([]sbAddrAssociation, 0, len(addrToProtocolToServerBlocks))
+
+ addrs := make([]string, 0, len(addrToProtocolToServerBlocks))
+ for addr := range addrToProtocolToServerBlocks {
+ addrs = append(addrs, addr)
+ }
+ sort.Strings(addrs)
+
+ for _, addr := range addrs {
+ protocolToServerBlocks := addrToProtocolToServerBlocks[addr]
+
+ prots := make([]string, 0, len(protocolToServerBlocks))
+ for prot := range protocolToServerBlocks {
+ prots = append(prots, prot)
+ }
+ sort.Strings(prots)
+
+ for _, prot := range prots {
+ serverBlocks := protocolToServerBlocks[prot]
+
+ // now find other addresses that map to identical
+ // server blocks and add them to our map of listener
+ // addresses and protocols, while removing them from
+ // the original map
+ listeners := map[string]map[string]struct{}{}
+
+ for otherAddr, otherProtocolToServerBlocks := range addrToProtocolToServerBlocks {
+ for otherProt, otherServerBlocks := range otherProtocolToServerBlocks {
+ if addr == otherAddr && prot == otherProt || reflect.DeepEqual(serverBlocks, otherServerBlocks) {
+ listener, ok := listeners[otherAddr]
+ if !ok {
+ listener = map[string]struct{}{}
+ listeners[otherAddr] = listener
+ }
+ listener[otherProt] = struct{}{}
+ delete(otherProtocolToServerBlocks, otherProt)
+ }
+ }
+ }
+
+ addresses := make([]string, 0, len(listeners))
+ for lnAddr := range listeners {
+ addresses = append(addresses, lnAddr)
+ }
+ sort.Strings(addresses)
+
+ addressesWithProtocols := make([]addressWithProtocols, 0, len(listeners))
+
+ for _, lnAddr := range addresses {
+ lnProts := listeners[lnAddr]
+ prots := make([]string, 0, len(lnProts))
+ for prot := range lnProts {
+ prots = append(prots, prot)
+ }
+ sort.Strings(prots)
+
+ addressesWithProtocols = append(addressesWithProtocols, addressWithProtocols{
+ address: lnAddr,
+ protocols: prots,
+ })
+ }
+
+ sbaddrs = append(sbaddrs, sbAddrAssociation{
+ addressesWithProtocols: addressesWithProtocols,
+ serverBlocks: serverBlocks,
})
}
}
- return sbmap, nil
-}
-
-// consolidateAddrMappings eliminates repetition of identical server blocks in a mapping of
-// single listener addresses to lists of server blocks. Since multiple addresses may serve
-// identical sites (server block contents), this function turns a 1:many mapping into a
-// many:many mapping. Server block contents (tokens) must be exactly identical so that
-// reflect.DeepEqual returns true in order for the addresses to be combined. Identical
-// entries are deleted from the addrToServerBlocks map. Essentially, each pairing (each
-// association from multiple addresses to multiple server blocks; i.e. each element of
-// the returned slice) becomes a server definition in the output JSON.
-func (st *ServerType) consolidateAddrMappings(addrToServerBlocks map[string][]serverBlock) []sbAddrAssociation {
- sbaddrs := make([]sbAddrAssociation, 0, len(addrToServerBlocks))
- for addr, sblocks := range addrToServerBlocks {
- // we start with knowing that at least this address
- // maps to these server blocks
- a := sbAddrAssociation{
- addresses: []string{addr},
- serverBlocks: sblocks,
- }
-
- // now find other addresses that map to identical
- // server blocks and add them to our list of
- // addresses, while removing them from the map
- for otherAddr, otherSblocks := range addrToServerBlocks {
- if addr == otherAddr {
- continue
- }
- if reflect.DeepEqual(sblocks, otherSblocks) {
- a.addresses = append(a.addresses, otherAddr)
- delete(addrToServerBlocks, otherAddr)
- }
- }
-
- sbaddrs = append(sbaddrs, a)
- }
return sbaddrs
}
-func (st *ServerType) listenerAddrsForServerBlockKey(sblock serverBlock, key string,
- options map[string]interface{}) ([]string, error) {
- addr, err := ParseAddress(key)
- if err != nil {
- return nil, fmt.Errorf("parsing key: %v", err)
+// listenersForServerBlockAddress essentially converts the Caddyfile site addresses to a map from
+// Caddy listener addresses and the protocols to serve them with to the parsed address for each server block.
+func (st *ServerType) listenersForServerBlockAddress(sblock serverBlock, addr Address,
+ options map[string]any,
+) (map[string]map[string]struct{}, error) {
+ switch addr.Scheme {
+ case "wss":
+ return nil, fmt.Errorf("the scheme wss:// is only supported in browsers; use https:// instead")
+ case "ws":
+ return nil, fmt.Errorf("the scheme ws:// is only supported in browsers; use http:// instead")
+ case "https", "http", "":
+ // Do nothing or handle the valid schemes
+ default:
+ return nil, fmt.Errorf("unsupported URL scheme %s://", addr.Scheme)
}
- addr = addr.Normalize()
// figure out the HTTP and HTTPS ports; either
// use defaults, or override with user config
@@ -196,36 +303,58 @@ func (st *ServerType) listenerAddrsForServerBlockKey(sblock serverBlock, key str
// error if scheme and port combination violate convention
if (addr.Scheme == "http" && lnPort == httpsPort) || (addr.Scheme == "https" && lnPort == httpPort) {
- return nil, fmt.Errorf("[%s] scheme and port violate convention", key)
+ return nil, fmt.Errorf("[%s] scheme and port violate convention", addr.String())
}
- // the bind directive specifies hosts, but is optional
- lnHosts := make([]string, 0, len(sblock.pile))
+ // the bind directive specifies hosts (and potentially network), and the protocols to serve them with, but is optional
+ lnCfgVals := make([]addressesWithProtocols, 0, len(sblock.pile["bind"]))
for _, cfgVal := range sblock.pile["bind"] {
- lnHosts = append(lnHosts, cfgVal.Value.([]string)...)
+ if val, ok := cfgVal.Value.(addressesWithProtocols); ok {
+ lnCfgVals = append(lnCfgVals, val)
+ }
}
- if len(lnHosts) == 0 {
- lnHosts = []string{""}
- }
-
- // use a map to prevent duplication
- listeners := make(map[string]struct{})
- for _, host := range lnHosts {
- addr, err := caddy.ParseNetworkAddress(host)
- if err == nil && addr.IsUnixNetwork() {
- listeners[host] = struct{}{}
+ if len(lnCfgVals) == 0 {
+ if defaultBindValues, ok := options["default_bind"].([]ConfigValue); ok {
+ for _, defaultBindValue := range defaultBindValues {
+ lnCfgVals = append(lnCfgVals, defaultBindValue.Value.(addressesWithProtocols))
+ }
} else {
- listeners[net.JoinHostPort(host, lnPort)] = struct{}{}
+ lnCfgVals = []addressesWithProtocols{{
+ addresses: []string{""},
+ protocols: nil,
+ }}
}
}
- // now turn map into list
- listenersList := make([]string, 0, len(listeners))
- for lnStr := range listeners {
- listenersList = append(listenersList, lnStr)
+ // use a map to prevent duplication
+ listeners := map[string]map[string]struct{}{}
+ for _, lnCfgVal := range lnCfgVals {
+ for _, lnAddr := range lnCfgVal.addresses {
+ lnNetw, lnHost, _, err := caddy.SplitNetworkAddress(lnAddr)
+ if err != nil {
+ return nil, fmt.Errorf("splitting listener address: %v", err)
+ }
+ networkAddr, err := caddy.ParseNetworkAddress(caddy.JoinNetworkAddress(lnNetw, lnHost, lnPort))
+ if err != nil {
+ return nil, fmt.Errorf("parsing network address: %v", err)
+ }
+ if _, ok := listeners[addr.String()]; !ok {
+ listeners[networkAddr.String()] = map[string]struct{}{}
+ }
+ for _, protocol := range lnCfgVal.protocols {
+ listeners[networkAddr.String()][protocol] = struct{}{}
+ }
+ }
}
- return listenersList, nil
+ return listeners, nil
+}
+
+// addressesWithProtocols associates a list of listen addresses
+// with a list of protocols to serve them with
+type addressesWithProtocols struct {
+ addresses []string
+ protocols []string
}
// Address represents a site address. It contains
@@ -328,8 +457,10 @@ func (a Address) Normalize() Address {
// ensure host is normalized if it's an IP address
host := strings.TrimSpace(a.Host)
- if ip := net.ParseIP(host); ip != nil {
- host = ip.String()
+ if ip, err := netip.ParseAddr(host); err == nil {
+ if ip.Is6() && !ip.Is4() && !ip.Is4In6() {
+ host = ip.String()
+ }
}
return Address{
@@ -341,28 +472,6 @@ func (a Address) Normalize() Address {
}
}
-// Key returns a string form of a, much like String() does, but this
-// method doesn't add anything default that wasn't in the original.
-func (a Address) Key() string {
- res := ""
- if a.Scheme != "" {
- res += a.Scheme + "://"
- }
- if a.Host != "" {
- res += a.Host
- }
- // insert port only if the original has its own explicit port
- if a.Port != "" &&
- len(a.Original) >= len(res) &&
- strings.HasPrefix(a.Original[len(res):], ":"+a.Port) {
- res += ":" + a.Port
- }
- if a.Path != "" {
- res += a.Path
- }
- return res
-}
-
// lowerExceptPlaceholders lowercases s except within
// placeholders (substrings in non-escaped '{ }' spans).
// See https://github.com/caddyserver/caddy/issues/3264
diff --git a/caddyconfig/httpcaddyfile/addresses_fuzz.go b/caddyconfig/httpcaddyfile/addresses_fuzz.go
index 4ab62984..364ff971 100644
--- a/caddyconfig/httpcaddyfile/addresses_fuzz.go
+++ b/caddyconfig/httpcaddyfile/addresses_fuzz.go
@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-// +build gofuzz
+//go:build gofuzz
package httpcaddyfile
diff --git a/caddyconfig/httpcaddyfile/addresses_test.go b/caddyconfig/httpcaddyfile/addresses_test.go
index 612ef67e..232460d0 100644
--- a/caddyconfig/httpcaddyfile/addresses_test.go
+++ b/caddyconfig/httpcaddyfile/addresses_test.go
@@ -106,67 +106,128 @@ func TestAddressString(t *testing.T) {
func TestKeyNormalization(t *testing.T) {
testCases := []struct {
input string
- expect string
+ expect Address
}{
{
- input: "example.com",
- expect: "example.com",
+ input: "example.com",
+ expect: Address{
+ Host: "example.com",
+ },
},
{
- input: "http://host:1234/path",
- expect: "http://host:1234/path",
+ input: "http://host:1234/path",
+ expect: Address{
+ Scheme: "http",
+ Host: "host",
+ Port: "1234",
+ Path: "/path",
+ },
},
{
- input: "HTTP://A/ABCDEF",
- expect: "http://a/ABCDEF",
+ input: "HTTP://A/ABCDEF",
+ expect: Address{
+ Scheme: "http",
+ Host: "a",
+ Path: "/ABCDEF",
+ },
},
{
- input: "A/ABCDEF",
- expect: "a/ABCDEF",
+ input: "A/ABCDEF",
+ expect: Address{
+ Host: "a",
+ Path: "/ABCDEF",
+ },
},
{
- input: "A:2015/Path",
- expect: "a:2015/Path",
+ input: "A:2015/Path",
+ expect: Address{
+ Host: "a",
+ Port: "2015",
+ Path: "/Path",
+ },
},
{
- input: "sub.{env.MY_DOMAIN}",
- expect: "sub.{env.MY_DOMAIN}",
+ input: "sub.{env.MY_DOMAIN}",
+ expect: Address{
+ Host: "sub.{env.MY_DOMAIN}",
+ },
},
{
- input: "sub.ExAmPle",
- expect: "sub.example",
+ input: "sub.ExAmPle",
+ expect: Address{
+ Host: "sub.example",
+ },
},
{
- input: "sub.\\{env.MY_DOMAIN\\}",
- expect: "sub.\\{env.my_domain\\}",
+ input: "sub.\\{env.MY_DOMAIN\\}",
+ expect: Address{
+ Host: "sub.\\{env.my_domain\\}",
+ },
},
{
- input: "sub.{env.MY_DOMAIN}.com",
- expect: "sub.{env.MY_DOMAIN}.com",
+ input: "sub.{env.MY_DOMAIN}.com",
+ expect: Address{
+ Host: "sub.{env.MY_DOMAIN}.com",
+ },
},
{
- input: ":80",
- expect: ":80",
+ input: ":80",
+ expect: Address{
+ Port: "80",
+ },
},
{
- input: ":443",
- expect: ":443",
+ input: ":443",
+ expect: Address{
+ Port: "443",
+ },
},
{
- input: ":1234",
- expect: ":1234",
+ input: ":1234",
+ expect: Address{
+ Port: "1234",
+ },
},
{
input: "",
- expect: "",
+ expect: Address{},
},
{
input: ":",
- expect: "",
+ expect: Address{},
},
{
- input: "[::]",
- expect: "::",
+ input: "[::]",
+ expect: Address{
+ Host: "::",
+ },
+ },
+ {
+ input: "127.0.0.1",
+ expect: Address{
+ Host: "127.0.0.1",
+ },
+ },
+ {
+ input: "[2001:db8:85a3:8d3:1319:8a2e:370:7348]:1234",
+ expect: Address{
+ Host: "2001:db8:85a3:8d3:1319:8a2e:370:7348",
+ Port: "1234",
+ },
+ },
+ {
+ // IPv4 address in IPv6 form (#4381)
+ input: "[::ffff:cff4:e77d]:1234",
+ expect: Address{
+ Host: "::ffff:cff4:e77d",
+ Port: "1234",
+ },
+ },
+ {
+ input: "::ffff:cff4:e77d",
+ expect: Address{
+ Host: "::ffff:cff4:e77d",
+ },
},
}
for i, tc := range testCases {
@@ -175,9 +236,18 @@ func TestKeyNormalization(t *testing.T) {
t.Errorf("Test %d: Parsing address '%s': %v", i, tc.input, err)
continue
}
- if actual := addr.Normalize().Key(); actual != tc.expect {
- t.Errorf("Test %d: Input '%s': Expected '%s' but got '%s'", i, tc.input, tc.expect, actual)
+ actual := addr.Normalize()
+ if actual.Scheme != tc.expect.Scheme {
+ t.Errorf("Test %d: Input '%s': Expected Scheme='%s' but got Scheme='%s'", i, tc.input, tc.expect.Scheme, actual.Scheme)
+ }
+ if actual.Host != tc.expect.Host {
+ t.Errorf("Test %d: Input '%s': Expected Host='%s' but got Host='%s'", i, tc.input, tc.expect.Host, actual.Host)
+ }
+ if actual.Port != tc.expect.Port {
+ t.Errorf("Test %d: Input '%s': Expected Port='%s' but got Port='%s'", i, tc.input, tc.expect.Port, actual.Port)
+ }
+ if actual.Path != tc.expect.Path {
+ t.Errorf("Test %d: Input '%s': Expected Path='%s' but got Path='%s'", i, tc.input, tc.expect.Path, actual.Path)
}
-
}
}
diff --git a/caddyconfig/httpcaddyfile/builtins.go b/caddyconfig/httpcaddyfile/builtins.go
index 1efe5acc..eca6a2d6 100644
--- a/caddyconfig/httpcaddyfile/builtins.go
+++ b/caddyconfig/httpcaddyfile/builtins.go
@@ -19,277 +19,534 @@ import (
"html"
"net/http"
"reflect"
+ "strconv"
"strings"
+ "time"
+
+ "github.com/caddyserver/certmagic"
+ "github.com/mholt/acmez/v2/acme"
+ "go.uber.org/zap/zapcore"
"github.com/caddyserver/caddy/v2"
"github.com/caddyserver/caddy/v2/caddyconfig"
"github.com/caddyserver/caddy/v2/caddyconfig/caddyfile"
"github.com/caddyserver/caddy/v2/modules/caddyhttp"
"github.com/caddyserver/caddy/v2/modules/caddytls"
- "go.uber.org/zap/zapcore"
)
func init() {
RegisterDirective("bind", parseBind)
RegisterDirective("tls", parseTLS)
- RegisterHandlerDirective("root", parseRoot)
+ RegisterHandlerDirective("fs", parseFilesystem)
+ RegisterDirective("root", parseRoot)
+ RegisterHandlerDirective("vars", parseVars)
RegisterHandlerDirective("redir", parseRedir)
RegisterHandlerDirective("respond", parseRespond)
+ RegisterHandlerDirective("abort", parseAbort)
+ RegisterHandlerDirective("error", parseError)
RegisterHandlerDirective("route", parseRoute)
RegisterHandlerDirective("handle", parseHandle)
RegisterDirective("handle_errors", parseHandleErrors)
+ RegisterHandlerDirective("invoke", parseInvoke)
RegisterDirective("log", parseLog)
+ RegisterHandlerDirective("skip_log", parseLogSkip)
+ RegisterHandlerDirective("log_skip", parseLogSkip)
+ RegisterHandlerDirective("log_name", parseLogName)
}
// parseBind parses the bind directive. Syntax:
//
-// bind
-//
+// bind [{
+// protocols [h1|h2|h2c|h3] [...]
+// }]
func parseBind(h Helper) ([]ConfigValue, error) {
- var lnHosts []string
- for h.Next() {
- lnHosts = append(lnHosts, h.RemainingArgs()...)
+ h.Next() // consume directive name
+ var addresses, protocols []string
+ addresses = h.RemainingArgs()
+
+ for h.NextBlock(0) {
+ switch h.Val() {
+ case "protocols":
+ protocols = h.RemainingArgs()
+ if len(protocols) == 0 {
+ return nil, h.Errf("protocols requires one or more arguments")
+ }
+ default:
+ return nil, h.Errf("unknown subdirective: %s", h.Val())
+ }
}
- return h.NewBindAddresses(lnHosts), nil
+
+ return []ConfigValue{{Class: "bind", Value: addressesWithProtocols{
+ addresses: addresses,
+ protocols: protocols,
+ }}}, nil
}
// parseTLS parses the tls directive. Syntax:
//
-// tls [|internal]|[ ] {
-// protocols []
-// ciphers
-// curves
-// alpn
-// load
-// ca
-// ca_root
-// dns
-// on_demand
-// }
-//
+// tls [|internal]|[ ] {
+// protocols []
+// ciphers
+// curves
+// client_auth {
+// mode [request|require|verify_if_given|require_and_verify]
+// trust_pool [...]
+// trusted_leaf_cert
+// trusted_leaf_cert_file
+// }
+// alpn
+// load
+// ca
+// ca_root
+// key_type [ed25519|p256|p384|rsa2048|rsa4096]
+// dns [...]
+// propagation_delay
+// propagation_timeout
+// resolvers
+// dns_ttl
+// dns_challenge_override_domain
+// on_demand
+// reuse_private_keys
+// eab
+// issuer [...]
+// get_certificate [...]
+// insecure_secrets_log
+// }
func parseTLS(h Helper) ([]ConfigValue, error) {
+ h.Next() // consume directive name
+
cp := new(caddytls.ConnectionPolicy)
var fileLoader caddytls.FileLoader
var folderLoader caddytls.FolderLoader
var certSelector caddytls.CustomCertSelectionPolicy
var acmeIssuer *caddytls.ACMEIssuer
+ var keyType string
var internalIssuer *caddytls.InternalIssuer
+ var issuers []certmagic.Issuer
+ var certManagers []certmagic.Manager
var onDemand bool
+ var reusePrivateKeys bool
- for h.Next() {
+ firstLine := h.RemainingArgs()
+ switch len(firstLine) {
+ case 0:
+ case 1:
+ if firstLine[0] == "internal" {
+ internalIssuer = new(caddytls.InternalIssuer)
+ } else if !strings.Contains(firstLine[0], "@") {
+ return nil, h.Err("single argument must either be 'internal' or an email address")
+ } else {
+ acmeIssuer = &caddytls.ACMEIssuer{
+ Email: firstLine[0],
+ }
+ }
+
+ case 2:
// file certificate loader
- firstLine := h.RemainingArgs()
- switch len(firstLine) {
- case 0:
- case 1:
- if firstLine[0] == "internal" {
- internalIssuer = new(caddytls.InternalIssuer)
- } else if !strings.Contains(firstLine[0], "@") {
- return nil, h.Err("single argument must either be 'internal' or an email address")
- } else {
- if acmeIssuer == nil {
- acmeIssuer = new(caddytls.ACMEIssuer)
+ certFilename := firstLine[0]
+ keyFilename := firstLine[1]
+
+ // tag this certificate so if multiple certs match, specifically
+ // this one that the user has provided will be used, see #2588:
+ // https://github.com/caddyserver/caddy/issues/2588 ... but we
+ // must be careful about how we do this; being careless will
+ // lead to failed handshakes
+ //
+ // we need to remember which cert files we've seen, since we
+ // must load each cert only once; otherwise, they each get a
+ // different tag... since a cert loaded twice has the same
+ // bytes, it will overwrite the first one in the cache, and
+ // only the last cert (and its tag) will survive, so any conn
+ // policy that is looking for any tag other than the last one
+ // to be loaded won't find it, and TLS handshakes will fail
+ // (see end of issue #3004)
+ //
+ // tlsCertTags maps certificate filenames to their tag.
+ // This is used to remember which tag is used for each
+ // certificate files, since we need to avoid loading
+ // the same certificate files more than once, overwriting
+ // previous tags
+ tlsCertTags, ok := h.State["tlsCertTags"].(map[string]string)
+ if !ok {
+ tlsCertTags = make(map[string]string)
+ h.State["tlsCertTags"] = tlsCertTags
+ }
+
+ tag, ok := tlsCertTags[certFilename]
+ if !ok {
+ // haven't seen this cert file yet, let's give it a tag
+ // and add a loader for it
+ tag = fmt.Sprintf("cert%d", len(tlsCertTags))
+ fileLoader = append(fileLoader, caddytls.CertKeyFilePair{
+ Certificate: certFilename,
+ Key: keyFilename,
+ Tags: []string{tag},
+ })
+ // remember this for next time we see this cert file
+ tlsCertTags[certFilename] = tag
+ }
+ certSelector.AnyTag = append(certSelector.AnyTag, tag)
+
+ default:
+ return nil, h.ArgErr()
+ }
+
+ var hasBlock bool
+ for h.NextBlock(0) {
+ hasBlock = true
+
+ switch h.Val() {
+ case "protocols":
+ args := h.RemainingArgs()
+ if len(args) == 0 {
+ return nil, h.Errf("protocols requires one or two arguments")
+ }
+ if len(args) > 0 {
+ if _, ok := caddytls.SupportedProtocols[args[0]]; !ok {
+ return nil, h.Errf("wrong protocol name or protocol not supported: '%s'", args[0])
}
- acmeIssuer.Email = firstLine[0]
+ cp.ProtocolMin = args[0]
+ }
+ if len(args) > 1 {
+ if _, ok := caddytls.SupportedProtocols[args[1]]; !ok {
+ return nil, h.Errf("wrong protocol name or protocol not supported: '%s'", args[1])
+ }
+ cp.ProtocolMax = args[1]
}
- case 2:
- certFilename := firstLine[0]
- keyFilename := firstLine[1]
+ case "ciphers":
+ for h.NextArg() {
+ if !caddytls.CipherSuiteNameSupported(h.Val()) {
+ return nil, h.Errf("wrong cipher suite name or cipher suite not supported: '%s'", h.Val())
+ }
+ cp.CipherSuites = append(cp.CipherSuites, h.Val())
+ }
- // tag this certificate so if multiple certs match, specifically
- // this one that the user has provided will be used, see #2588:
- // https://github.com/caddyserver/caddy/issues/2588 ... but we
- // must be careful about how we do this; being careless will
- // lead to failed handshakes
- //
- // we need to remember which cert files we've seen, since we
- // must load each cert only once; otherwise, they each get a
- // different tag... since a cert loaded twice has the same
- // bytes, it will overwrite the first one in the cache, and
- // only the last cert (and its tag) will survive, so a any conn
- // policy that is looking for any tag but the last one to be
- // loaded won't find it, and TLS handshakes will fail (see end)
- // of issue #3004)
- //
- // tlsCertTags maps certificate filenames to their tag.
- // This is used to remember which tag is used for each
- // certificate files, since we need to avoid loading
- // the same certificate files more than once, overwriting
- // previous tags
- tlsCertTags, ok := h.State["tlsCertTags"].(map[string]string)
+ case "curves":
+ for h.NextArg() {
+ if _, ok := caddytls.SupportedCurves[h.Val()]; !ok {
+ return nil, h.Errf("Wrong curve name or curve not supported: '%s'", h.Val())
+ }
+ cp.Curves = append(cp.Curves, h.Val())
+ }
+
+ case "client_auth":
+ cp.ClientAuthentication = &caddytls.ClientAuthentication{}
+ if err := cp.ClientAuthentication.UnmarshalCaddyfile(h.NewFromNextSegment()); err != nil {
+ return nil, err
+ }
+ case "alpn":
+ args := h.RemainingArgs()
+ if len(args) == 0 {
+ return nil, h.ArgErr()
+ }
+ cp.ALPN = args
+
+ case "load":
+ folderLoader = append(folderLoader, h.RemainingArgs()...)
+
+ case "ca":
+ arg := h.RemainingArgs()
+ if len(arg) != 1 {
+ return nil, h.ArgErr()
+ }
+ if acmeIssuer == nil {
+ acmeIssuer = new(caddytls.ACMEIssuer)
+ }
+ acmeIssuer.CA = arg[0]
+
+ case "key_type":
+ arg := h.RemainingArgs()
+ if len(arg) != 1 {
+ return nil, h.ArgErr()
+ }
+ keyType = arg[0]
+
+ case "eab":
+ arg := h.RemainingArgs()
+ if len(arg) != 2 {
+ return nil, h.ArgErr()
+ }
+ if acmeIssuer == nil {
+ acmeIssuer = new(caddytls.ACMEIssuer)
+ }
+ acmeIssuer.ExternalAccount = &acme.EAB{
+ KeyID: arg[0],
+ MACKey: arg[1],
+ }
+
+ case "issuer":
+ if !h.NextArg() {
+ return nil, h.ArgErr()
+ }
+ modName := h.Val()
+ modID := "tls.issuance." + modName
+ unm, err := caddyfile.UnmarshalModule(h.Dispenser, modID)
+ if err != nil {
+ return nil, err
+ }
+ issuer, ok := unm.(certmagic.Issuer)
if !ok {
- tlsCertTags = make(map[string]string)
- h.State["tlsCertTags"] = tlsCertTags
+ return nil, h.Errf("module %s (%T) is not a certmagic.Issuer", modID, unm)
}
+ issuers = append(issuers, issuer)
- tag, ok := tlsCertTags[certFilename]
- if !ok {
- // haven't seen this cert file yet, let's give it a tag
- // and add a loader for it
- tag = fmt.Sprintf("cert%d", len(tlsCertTags))
- fileLoader = append(fileLoader, caddytls.CertKeyFilePair{
- Certificate: certFilename,
- Key: keyFilename,
- Tags: []string{tag},
- })
- // remember this for next time we see this cert file
- tlsCertTags[certFilename] = tag
+ case "get_certificate":
+ if !h.NextArg() {
+ return nil, h.ArgErr()
}
- certSelector.AnyTag = append(certSelector.AnyTag, tag)
+ modName := h.Val()
+ modID := "tls.get_certificate." + modName
+ unm, err := caddyfile.UnmarshalModule(h.Dispenser, modID)
+ if err != nil {
+ return nil, err
+ }
+ certManager, ok := unm.(certmagic.Manager)
+ if !ok {
+ return nil, h.Errf("module %s (%T) is not a certmagic.CertificateManager", modID, unm)
+ }
+ certManagers = append(certManagers, certManager)
+
+ case "dns":
+ if !h.NextArg() {
+ return nil, h.ArgErr()
+ }
+ provName := h.Val()
+ if acmeIssuer == nil {
+ acmeIssuer = new(caddytls.ACMEIssuer)
+ }
+ if acmeIssuer.Challenges == nil {
+ acmeIssuer.Challenges = new(caddytls.ChallengesConfig)
+ }
+ if acmeIssuer.Challenges.DNS == nil {
+ acmeIssuer.Challenges.DNS = new(caddytls.DNSChallengeConfig)
+ }
+ modID := "dns.providers." + provName
+ unm, err := caddyfile.UnmarshalModule(h.Dispenser, modID)
+ if err != nil {
+ return nil, err
+ }
+ acmeIssuer.Challenges.DNS.ProviderRaw = caddyconfig.JSONModuleObject(unm, "name", provName, h.warnings)
+
+ case "resolvers":
+ args := h.RemainingArgs()
+ if len(args) == 0 {
+ return nil, h.ArgErr()
+ }
+ if acmeIssuer == nil {
+ acmeIssuer = new(caddytls.ACMEIssuer)
+ }
+ if acmeIssuer.Challenges == nil {
+ acmeIssuer.Challenges = new(caddytls.ChallengesConfig)
+ }
+ if acmeIssuer.Challenges.DNS == nil {
+ acmeIssuer.Challenges.DNS = new(caddytls.DNSChallengeConfig)
+ }
+ acmeIssuer.Challenges.DNS.Resolvers = args
+
+ case "propagation_delay":
+ arg := h.RemainingArgs()
+ if len(arg) != 1 {
+ return nil, h.ArgErr()
+ }
+ delayStr := arg[0]
+ delay, err := caddy.ParseDuration(delayStr)
+ if err != nil {
+ return nil, h.Errf("invalid propagation_delay duration %s: %v", delayStr, err)
+ }
+ if acmeIssuer == nil {
+ acmeIssuer = new(caddytls.ACMEIssuer)
+ }
+ if acmeIssuer.Challenges == nil {
+ acmeIssuer.Challenges = new(caddytls.ChallengesConfig)
+ }
+ if acmeIssuer.Challenges.DNS == nil {
+ acmeIssuer.Challenges.DNS = new(caddytls.DNSChallengeConfig)
+ }
+ acmeIssuer.Challenges.DNS.PropagationDelay = caddy.Duration(delay)
+
+ case "propagation_timeout":
+ arg := h.RemainingArgs()
+ if len(arg) != 1 {
+ return nil, h.ArgErr()
+ }
+ timeoutStr := arg[0]
+ var timeout time.Duration
+ if timeoutStr == "-1" {
+ timeout = time.Duration(-1)
+ } else {
+ var err error
+ timeout, err = caddy.ParseDuration(timeoutStr)
+ if err != nil {
+ return nil, h.Errf("invalid propagation_timeout duration %s: %v", timeoutStr, err)
+ }
+ }
+ if acmeIssuer == nil {
+ acmeIssuer = new(caddytls.ACMEIssuer)
+ }
+ if acmeIssuer.Challenges == nil {
+ acmeIssuer.Challenges = new(caddytls.ChallengesConfig)
+ }
+ if acmeIssuer.Challenges.DNS == nil {
+ acmeIssuer.Challenges.DNS = new(caddytls.DNSChallengeConfig)
+ }
+ acmeIssuer.Challenges.DNS.PropagationTimeout = caddy.Duration(timeout)
+
+ case "dns_ttl":
+ arg := h.RemainingArgs()
+ if len(arg) != 1 {
+ return nil, h.ArgErr()
+ }
+ ttlStr := arg[0]
+ ttl, err := caddy.ParseDuration(ttlStr)
+ if err != nil {
+ return nil, h.Errf("invalid dns_ttl duration %s: %v", ttlStr, err)
+ }
+ if acmeIssuer == nil {
+ acmeIssuer = new(caddytls.ACMEIssuer)
+ }
+ if acmeIssuer.Challenges == nil {
+ acmeIssuer.Challenges = new(caddytls.ChallengesConfig)
+ }
+ if acmeIssuer.Challenges.DNS == nil {
+ acmeIssuer.Challenges.DNS = new(caddytls.DNSChallengeConfig)
+ }
+ acmeIssuer.Challenges.DNS.TTL = caddy.Duration(ttl)
+
+ case "dns_challenge_override_domain":
+ arg := h.RemainingArgs()
+ if len(arg) != 1 {
+ return nil, h.ArgErr()
+ }
+ if acmeIssuer == nil {
+ acmeIssuer = new(caddytls.ACMEIssuer)
+ }
+ if acmeIssuer.Challenges == nil {
+ acmeIssuer.Challenges = new(caddytls.ChallengesConfig)
+ }
+ if acmeIssuer.Challenges.DNS == nil {
+ acmeIssuer.Challenges.DNS = new(caddytls.DNSChallengeConfig)
+ }
+ acmeIssuer.Challenges.DNS.OverrideDomain = arg[0]
+
+ case "ca_root":
+ arg := h.RemainingArgs()
+ if len(arg) != 1 {
+ return nil, h.ArgErr()
+ }
+ if acmeIssuer == nil {
+ acmeIssuer = new(caddytls.ACMEIssuer)
+ }
+ acmeIssuer.TrustedRootsPEMFiles = append(acmeIssuer.TrustedRootsPEMFiles, arg[0])
+
+ case "on_demand":
+ if h.NextArg() {
+ return nil, h.ArgErr()
+ }
+ onDemand = true
+
+ case "reuse_private_keys":
+ if h.NextArg() {
+ return nil, h.ArgErr()
+ }
+ reusePrivateKeys = true
+
+ case "insecure_secrets_log":
+ if !h.NextArg() {
+ return nil, h.ArgErr()
+ }
+ cp.InsecureSecretsLog = h.Val()
default:
- return nil, h.ArgErr()
- }
-
- var hasBlock bool
- for h.NextBlock(0) {
- hasBlock = true
-
- switch h.Val() {
- case "protocols":
- args := h.RemainingArgs()
- if len(args) == 0 {
- return nil, h.SyntaxErr("one or two protocols")
- }
- if len(args) > 0 {
- if _, ok := caddytls.SupportedProtocols[args[0]]; !ok {
- return nil, h.Errf("Wrong protocol name or protocol not supported: '%s'", args[0])
- }
- cp.ProtocolMin = args[0]
- }
- if len(args) > 1 {
- if _, ok := caddytls.SupportedProtocols[args[1]]; !ok {
- return nil, h.Errf("Wrong protocol name or protocol not supported: '%s'", args[1])
- }
- cp.ProtocolMax = args[1]
- }
-
- case "ciphers":
- for h.NextArg() {
- if !caddytls.CipherSuiteNameSupported(h.Val()) {
- return nil, h.Errf("Wrong cipher suite name or cipher suite not supported: '%s'", h.Val())
- }
- cp.CipherSuites = append(cp.CipherSuites, h.Val())
- }
-
- case "curves":
- for h.NextArg() {
- if _, ok := caddytls.SupportedCurves[h.Val()]; !ok {
- return nil, h.Errf("Wrong curve name or curve not supported: '%s'", h.Val())
- }
- cp.Curves = append(cp.Curves, h.Val())
- }
-
- case "alpn":
- args := h.RemainingArgs()
- if len(args) == 0 {
- return nil, h.ArgErr()
- }
- cp.ALPN = args
-
- case "load":
- folderLoader = append(folderLoader, h.RemainingArgs()...)
-
- case "ca":
- arg := h.RemainingArgs()
- if len(arg) != 1 {
- return nil, h.ArgErr()
- }
- if acmeIssuer == nil {
- acmeIssuer = new(caddytls.ACMEIssuer)
- }
- acmeIssuer.CA = arg[0]
-
- case "dns":
- if !h.Next() {
- return nil, h.ArgErr()
- }
- if acmeIssuer == nil {
- acmeIssuer = new(caddytls.ACMEIssuer)
- }
- provName := h.Val()
- if acmeIssuer.Challenges == nil {
- acmeIssuer.Challenges = new(caddytls.ChallengesConfig)
- }
- dnsProvModule, err := caddy.GetModule("tls.dns." + provName)
- if err != nil {
- return nil, h.Errf("getting DNS provider module named '%s': %v", provName, err)
- }
- acmeIssuer.Challenges.DNSRaw = caddyconfig.JSONModuleObject(dnsProvModule.New(), "provider", provName, h.warnings)
-
- case "ca_root":
- arg := h.RemainingArgs()
- if len(arg) != 1 {
- return nil, h.ArgErr()
- }
- if acmeIssuer == nil {
- acmeIssuer = new(caddytls.ACMEIssuer)
- }
- acmeIssuer.TrustedRootsPEMFiles = append(acmeIssuer.TrustedRootsPEMFiles, arg[0])
-
- case "on_demand":
- if h.NextArg() {
- return nil, h.ArgErr()
- }
- onDemand = true
-
- default:
- return nil, h.Errf("unknown subdirective: %s", h.Val())
- }
- }
-
- // a naked tls directive is not allowed
- if len(firstLine) == 0 && !hasBlock {
- return nil, h.ArgErr()
+ return nil, h.Errf("unknown subdirective: %s", h.Val())
}
}
+ // a naked tls directive is not allowed
+ if len(firstLine) == 0 && !hasBlock {
+ return nil, h.ArgErr()
+ }
+
// begin building the final config values
- var configVals []ConfigValue
+ configVals := []ConfigValue{}
// certificate loaders
if len(fileLoader) > 0 {
configVals = append(configVals, ConfigValue{
- Class: "tls.certificate_loader",
+ Class: "tls.cert_loader",
Value: fileLoader,
})
}
if len(folderLoader) > 0 {
configVals = append(configVals, ConfigValue{
- Class: "tls.certificate_loader",
+ Class: "tls.cert_loader",
Value: folderLoader,
})
}
- // issuer
- if acmeIssuer != nil && internalIssuer != nil {
- // the logic to support this would be complex
- return nil, h.Err("cannot use both ACME and internal issuers in same server block")
+ // some tls subdirectives are shortcuts that implicitly configure issuers, and the
+ // user can also configure issuers explicitly using the issuer subdirective; the
+ // logic to support both would likely be complex, or at least unintuitive
+ if len(issuers) > 0 && (acmeIssuer != nil || internalIssuer != nil) {
+ return nil, h.Err("cannot mix issuer subdirective (explicit issuers) with other issuer-specific subdirectives (implicit issuers)")
}
- if acmeIssuer != nil {
- // fill in global defaults, if configured
- if email := h.Option("email"); email != nil && acmeIssuer.Email == "" {
- acmeIssuer.Email = email.(string)
- }
- if acmeCA := h.Option("acme_ca"); acmeCA != nil && acmeIssuer.CA == "" {
- acmeIssuer.CA = acmeCA.(string)
- }
- if caPemFile := h.Option("acme_ca_root"); caPemFile != nil {
- acmeIssuer.TrustedRootsPEMFiles = append(acmeIssuer.TrustedRootsPEMFiles, caPemFile.(string))
+ if acmeIssuer != nil && internalIssuer != nil {
+ return nil, h.Err("cannot create both ACME and internal certificate issuers")
+ }
+
+ // now we should either have: explicitly-created issuers, or an implicitly-created
+ // ACME or internal issuer, or no issuers at all
+ switch {
+ case len(issuers) > 0:
+ for _, issuer := range issuers {
+ configVals = append(configVals, ConfigValue{
+ Class: "tls.cert_issuer",
+ Value: issuer,
+ })
}
- configVals = append(configVals, ConfigValue{
- Class: "tls.cert_issuer",
- Value: acmeIssuer,
- })
- } else if internalIssuer != nil {
+ case acmeIssuer != nil:
+ // implicit ACME issuers (from various subdirectives) - use defaults; there might be more than one
+ defaultIssuers := caddytls.DefaultIssuers(acmeIssuer.Email)
+
+ // if an ACME CA endpoint was set, the user expects to use that specific one,
+ // not any others that may be defaults, so replace all defaults with that ACME CA
+ if acmeIssuer.CA != "" {
+ defaultIssuers = []certmagic.Issuer{acmeIssuer}
+ }
+
+ for _, issuer := range defaultIssuers {
+ // apply settings from the implicitly-configured ACMEIssuer to any
+ // default ACMEIssuers, but preserve each default issuer's CA endpoint,
+ // because, for example, if you configure the DNS challenge, it should
+ // apply to any of the default ACMEIssuers, but you don't want to trample
+ // out their unique CA endpoints
+ if iss, ok := issuer.(*caddytls.ACMEIssuer); ok && iss != nil {
+ acmeCopy := *acmeIssuer
+ acmeCopy.CA = iss.CA
+ issuer = &acmeCopy
+ }
+ configVals = append(configVals, ConfigValue{
+ Class: "tls.cert_issuer",
+ Value: issuer,
+ })
+ }
+
+ case internalIssuer != nil:
configVals = append(configVals, ConfigValue{
Class: "tls.cert_issuer",
Value: internalIssuer,
})
}
+ // certificate key type
+ if keyType != "" {
+ configVals = append(configVals, ConfigValue{
+ Class: "tls.key_type",
+ Value: keyType,
+ })
+ }
+
// on-demand TLS
if onDemand {
configVals = append(configVals, ConfigValue{
@@ -297,6 +554,20 @@ func parseTLS(h Helper) ([]ConfigValue, error) {
Value: true,
})
}
+ for _, certManager := range certManagers {
+ configVals = append(configVals, ConfigValue{
+ Class: "tls.cert_manager",
+ Value: certManager,
+ })
+ }
+
+ // reuse private keys TLS
+ if reusePrivateKeys {
+ configVals = append(configVals, ConfigValue{
+ Class: "tls.reuse_private_keys",
+ Value: true,
+ })
+ }
// custom certificate selection
if len(certSelector.AnyTag) > 0 {
@@ -318,31 +589,74 @@ func parseTLS(h Helper) ([]ConfigValue, error) {
// parseRoot parses the root directive. Syntax:
//
-// root []
-//
-func parseRoot(h Helper) (caddyhttp.MiddlewareHandler, error) {
- var root string
- for h.Next() {
+// root []
+func parseRoot(h Helper) ([]ConfigValue, error) {
+ h.Next() // consume directive name
+
+ // count the tokens to determine what to do
+ argsCount := h.CountRemainingArgs()
+ if argsCount == 0 {
+ return nil, h.Errf("too few arguments; must have at least a root path")
+ }
+ if argsCount > 2 {
+ return nil, h.Errf("too many arguments; should only be a matcher and a path")
+ }
+
+ // with only one arg, assume it's a root path with no matcher token
+ if argsCount == 1 {
if !h.NextArg() {
return nil, h.ArgErr()
}
- root = h.Val()
- if h.NextArg() {
- return nil, h.ArgErr()
- }
+ return h.NewRoute(nil, caddyhttp.VarsMiddleware{"root": h.Val()}), nil
}
- return caddyhttp.VarsMiddleware{"root": root}, nil
+
+ // parse the matcher token into a matcher set
+ userMatcherSet, err := h.ExtractMatcherSet()
+ if err != nil {
+ return nil, err
+ }
+ h.Next() // consume directive name again, matcher parsing does a reset
+
+ // advance to the root path
+ if !h.NextArg() {
+ return nil, h.ArgErr()
+ }
+ // make the route with the matcher
+ return h.NewRoute(userMatcherSet, caddyhttp.VarsMiddleware{"root": h.Val()}), nil
+}
+
+// parseFilesystem parses the fs directive. Syntax:
+//
+// fs
+func parseFilesystem(h Helper) (caddyhttp.MiddlewareHandler, error) {
+ h.Next() // consume directive name
+ if !h.NextArg() {
+ return nil, h.ArgErr()
+ }
+ if h.NextArg() {
+ return nil, h.ArgErr()
+ }
+ return caddyhttp.VarsMiddleware{"fs": h.Val()}, nil
+}
+
+// parseVars parses the vars directive. See its UnmarshalCaddyfile method for syntax.
+func parseVars(h Helper) (caddyhttp.MiddlewareHandler, error) {
+ v := new(caddyhttp.VarsMiddleware)
+ err := v.UnmarshalCaddyfile(h.Dispenser)
+ return v, err
}
// parseRedir parses the redir directive. Syntax:
//
-// redir [] []
+// redir [] []
//
+// can be "permanent" for 301, "temporary" for 302 (default),
+// a placeholder, or any number in the 3xx range or 401. The special
+// code "html" can be used to redirect only browser clients (will
+// respond with HTTP 200 and no Location header; redirect is performed
+// with JS and a meta tag).
func parseRedir(h Helper) (caddyhttp.MiddlewareHandler, error) {
- if !h.Next() {
- return nil, h.ArgErr()
- }
-
+ h.Next() // consume directive name
if !h.NextArg() {
return nil, h.ArgErr()
}
@@ -352,14 +666,17 @@ func parseRedir(h Helper) (caddyhttp.MiddlewareHandler, error) {
if h.NextArg() {
code = h.Val()
}
- if code == "permanent" {
- code = "301"
- }
- if code == "temporary" || code == "" {
- code = "302"
- }
+
var body string
- if code == "html" {
+ var hdr http.Header
+ switch code {
+ case "permanent":
+ code = "301"
+
+ case "temporary", "":
+ code = "302"
+
+ case "html":
// Script tag comes first since that will better imitate a redirect in the browser's
// history, but the meta tag is a fallback for most non-JS clients.
const metaRedir = `
@@ -374,11 +691,39 @@ func parseRedir(h Helper) (caddyhttp.MiddlewareHandler, error) {
`
safeTo := html.EscapeString(to)
body = fmt.Sprintf(metaRedir, safeTo, safeTo, safeTo, safeTo)
+ hdr = http.Header{"Content-Type": []string{"text/html; charset=utf-8"}}
+ code = "200" // don't redirect non-browser clients
+
+ default:
+ // Allow placeholders for the code
+ if strings.HasPrefix(code, "{") {
+ break
+ }
+ // Try to validate as an integer otherwise
+ codeInt, err := strconv.Atoi(code)
+ if err != nil {
+ return nil, h.Errf("Not a supported redir code type or not valid integer: '%s'", code)
+ }
+ // Sometimes, a 401 with Location header is desirable because
+ // requests made with XHR will "eat" the 3xx redirect; so if
+ // the intent was to redirect to an auth page, a 3xx won't
+ // work. Responding with 401 allows JS code to read the
+ // Location header and do a window.location redirect manually.
+ // see https://stackoverflow.com/a/2573589/846934
+ // see https://github.com/oauth2-proxy/oauth2-proxy/issues/1522
+ if codeInt < 300 || (codeInt > 399 && codeInt != 401) {
+ return nil, h.Errf("Redir code not in the 3xx range or 401: '%v'", codeInt)
+ }
+ }
+
+ // don't redirect non-browser clients
+ if code != "200" {
+ hdr = http.Header{"Location": []string{to}}
}
return caddyhttp.StaticResponse{
StatusCode: caddyhttp.WeakString(code),
- Headers: http.Header{"Location": []string{to}},
+ Headers: hdr,
Body: body,
}, nil
}
@@ -387,61 +732,109 @@ func parseRedir(h Helper) (caddyhttp.MiddlewareHandler, error) {
func parseRespond(h Helper) (caddyhttp.MiddlewareHandler, error) {
sr := new(caddyhttp.StaticResponse)
err := sr.UnmarshalCaddyfile(h.Dispenser)
- if err != nil {
- return nil, err
+ return sr, err
+}
+
+// parseAbort parses the abort directive.
+func parseAbort(h Helper) (caddyhttp.MiddlewareHandler, error) {
+ h.Next() // consume directive
+ for h.Next() || h.NextBlock(0) {
+ return nil, h.ArgErr()
}
- return sr, nil
+ return &caddyhttp.StaticResponse{Abort: true}, nil
+}
+
+// parseError parses the error directive.
+func parseError(h Helper) (caddyhttp.MiddlewareHandler, error) {
+ se := new(caddyhttp.StaticError)
+ err := se.UnmarshalCaddyfile(h.Dispenser)
+ return se, err
}
// parseRoute parses the route directive.
func parseRoute(h Helper) (caddyhttp.MiddlewareHandler, error) {
- sr := new(caddyhttp.Subroute)
+ allResults, err := parseSegmentAsConfig(h)
+ if err != nil {
+ return nil, err
+ }
- for h.Next() {
- for nesting := h.Nesting(); h.NextBlock(nesting); {
- dir := h.Val()
-
- dirFunc, ok := registeredDirectives[dir]
- if !ok {
- return nil, h.Errf("unrecognized directive: %s", dir)
- }
-
- subHelper := h
- subHelper.Dispenser = h.NewFromNextSegment()
-
- results, err := dirFunc(subHelper)
- if err != nil {
- return nil, h.Errf("parsing caddyfile tokens for '%s': %v", dir, err)
- }
- for _, result := range results {
- switch handler := result.Value.(type) {
- case caddyhttp.Route:
- sr.Routes = append(sr.Routes, handler)
- case caddyhttp.Subroute:
- // directives which return a literal subroute instead of a route
- // means they intend to keep those handlers together without
- // them being reordered; we're doing that anyway since we're in
- // the route directive, so just append its handlers
- sr.Routes = append(sr.Routes, handler.Routes...)
- default:
- return nil, h.Errf("%s directive returned something other than an HTTP route or subroute: %#v (only handler directives can be used in routes)", dir, result.Value)
- }
- }
+ for _, result := range allResults {
+ switch result.Value.(type) {
+ case caddyhttp.Route, caddyhttp.Subroute:
+ default:
+ return nil, h.Errf("%s directive returned something other than an HTTP route or subroute: %#v (only handler directives can be used in routes)", result.directive, result.Value)
}
}
- return sr, nil
+ return buildSubroute(allResults, h.groupCounter, false)
}
func parseHandle(h Helper) (caddyhttp.MiddlewareHandler, error) {
- return parseSegmentAsSubroute(h)
+ return ParseSegmentAsSubroute(h)
}
func parseHandleErrors(h Helper) ([]ConfigValue, error) {
- subroute, err := parseSegmentAsSubroute(h)
+ h.Next() // consume directive name
+
+ expression := ""
+ args := h.RemainingArgs()
+ if len(args) > 0 {
+ codes := []string{}
+ for _, val := range args {
+ if len(val) != 3 {
+ return nil, h.Errf("bad status value '%s'", val)
+ }
+ if strings.HasSuffix(val, "xx") {
+ val = val[:1]
+ _, err := strconv.Atoi(val)
+ if err != nil {
+ return nil, h.Errf("bad status value '%s': %v", val, err)
+ }
+ if expression != "" {
+ expression += " || "
+ }
+ expression += fmt.Sprintf("{http.error.status_code} >= %s00 && {http.error.status_code} <= %s99", val, val)
+ continue
+ }
+ _, err := strconv.Atoi(val)
+ if err != nil {
+ return nil, h.Errf("bad status value '%s': %v", val, err)
+ }
+ codes = append(codes, val)
+ }
+ if len(codes) > 0 {
+ if expression != "" {
+ expression += " || "
+ }
+ expression += "{http.error.status_code} in [" + strings.Join(codes, ", ") + "]"
+ }
+ // Reset cursor position to get ready for ParseSegmentAsSubroute
+ h.Reset()
+ h.Next()
+ h.RemainingArgs()
+ h.Prev()
+ } else {
+ // If no arguments present reset the cursor position to get ready for ParseSegmentAsSubroute
+ h.Prev()
+ }
+
+ handler, err := ParseSegmentAsSubroute(h)
if err != nil {
return nil, err
}
+ subroute, ok := handler.(*caddyhttp.Subroute)
+ if !ok {
+ return nil, h.Errf("segment was not parsed as a subroute")
+ }
+
+ if expression != "" {
+ statusMatcher := caddy.ModuleMap{
+ "expression": h.JSON(caddyhttp.MatchExpression{Expr: expression}),
+ }
+ for i := range subroute.Routes {
+ subroute.Routes[i].MatcherSetsRaw = []caddy.ModuleMap{statusMatcher}
+ }
+ }
return []ConfigValue{
{
Class: "error_route",
@@ -450,112 +843,316 @@ func parseHandleErrors(h Helper) ([]ConfigValue, error) {
}, nil
}
+// parseInvoke parses the invoke directive.
+func parseInvoke(h Helper) (caddyhttp.MiddlewareHandler, error) {
+ h.Next() // consume directive
+ if !h.NextArg() {
+ return nil, h.ArgErr()
+ }
+ for h.Next() || h.NextBlock(0) {
+ return nil, h.ArgErr()
+ }
+
+ // remember that we're invoking this name
+ // to populate the server with these named routes
+ if h.State[namedRouteKey] == nil {
+ h.State[namedRouteKey] = map[string]struct{}{}
+ }
+ h.State[namedRouteKey].(map[string]struct{})[h.Val()] = struct{}{}
+
+ // return the handler
+ return &caddyhttp.Invoke{Name: h.Val()}, nil
+}
+
// parseLog parses the log directive. Syntax:
//
-// log {
-// output ...
-// format ...
-// level
-// }
-//
+// log {
+// hostnames
+// output ...
+// core ...
+// format ...
+// level
+// }
func parseLog(h Helper) ([]ConfigValue, error) {
+ return parseLogHelper(h, nil)
+}
+
+// parseLogHelper is used both for the parseLog directive within Server Blocks,
+// as well as the global "log" option for configuring loggers at the global
+// level. The parseAsGlobalOption parameter is used to distinguish any differing logic
+// between the two.
+func parseLogHelper(h Helper, globalLogNames map[string]struct{}) ([]ConfigValue, error) {
+ h.Next() // consume option name
+
+ // When the globalLogNames parameter is passed in, we make
+ // modifications to the parsing behavior.
+ parseAsGlobalOption := globalLogNames != nil
+
var configValues []ConfigValue
- for h.Next() {
- cl := new(caddy.CustomLog)
- for h.NextBlock(0) {
- switch h.Val() {
- case "output":
- if !h.NextArg() {
- return nil, h.ArgErr()
- }
- moduleName := h.Val()
+ // Logic below expects that a name is always present when a
+ // global option is being parsed; or an optional override
+ // is supported for access logs.
+ var logName string
- // can't use the usual caddyfile.Unmarshaler flow with the
- // standard writers because they are in the caddy package
- // (because they are the default) and implementing that
- // interface there would unfortunately create circular import
- var wo caddy.WriterOpener
- switch moduleName {
- case "stdout":
- wo = caddy.StdoutWriter{}
- case "stderr":
- wo = caddy.StderrWriter{}
- case "discard":
- wo = caddy.DiscardWriter{}
- default:
- mod, err := caddy.GetModule("caddy.logging.writers." + moduleName)
- if err != nil {
- return nil, h.Errf("getting log writer module named '%s': %v", moduleName, err)
- }
- unm, ok := mod.New().(caddyfile.Unmarshaler)
- if !ok {
- return nil, h.Errf("log writer module '%s' is not a Caddyfile unmarshaler", mod)
- }
- err = unm.UnmarshalCaddyfile(h.NewFromNextSegment())
- if err != nil {
- return nil, err
- }
- wo, ok = unm.(caddy.WriterOpener)
- if !ok {
- return nil, h.Errf("module %s is not a WriterOpener", mod)
- }
- }
- cl.WriterRaw = caddyconfig.JSONModuleObject(wo, "output", moduleName, h.warnings)
+ if parseAsGlobalOption {
+ if h.NextArg() {
+ logName = h.Val()
- case "format":
- if !h.NextArg() {
- return nil, h.ArgErr()
- }
- moduleName := h.Val()
- mod, err := caddy.GetModule("caddy.logging.encoders." + moduleName)
- if err != nil {
- return nil, h.Errf("getting log encoder module named '%s': %v", moduleName, err)
- }
- unm, ok := mod.New().(caddyfile.Unmarshaler)
- if !ok {
- return nil, h.Errf("log encoder module '%s' is not a Caddyfile unmarshaler", mod)
- }
- err = unm.UnmarshalCaddyfile(h.NewFromNextSegment())
+ // Only a single argument is supported.
+ if h.NextArg() {
+ return nil, h.ArgErr()
+ }
+ } else {
+ // If there is no log name specified, we
+ // reference the default logger. See the
+ // setupNewDefault function in the logging
+ // package for where this is configured.
+ logName = caddy.DefaultLoggerName
+ }
+
+ // Verify this name is unused.
+ _, used := globalLogNames[logName]
+ if used {
+ return nil, h.Err("duplicate global log option for: " + logName)
+ }
+ globalLogNames[logName] = struct{}{}
+ } else {
+ // An optional override of the logger name can be provided;
+ // otherwise a default will be used, like "log0", "log1", etc.
+ if h.NextArg() {
+ logName = h.Val()
+
+ // Only a single argument is supported.
+ if h.NextArg() {
+ return nil, h.ArgErr()
+ }
+ }
+ }
+
+ cl := new(caddy.CustomLog)
+
+ // allow overriding the current site block's hostnames for this logger;
+ // this is useful for setting up loggers per subdomain in a site block
+ // with a wildcard domain
+ customHostnames := []string{}
+ noHostname := false
+ for h.NextBlock(0) {
+ switch h.Val() {
+ case "hostnames":
+ if parseAsGlobalOption {
+ return nil, h.Err("hostnames is not allowed in the log global options")
+ }
+ args := h.RemainingArgs()
+ if len(args) == 0 {
+ return nil, h.ArgErr()
+ }
+ customHostnames = append(customHostnames, args...)
+
+ case "output":
+ if !h.NextArg() {
+ return nil, h.ArgErr()
+ }
+ moduleName := h.Val()
+
+ // can't use the usual caddyfile.Unmarshaler flow with the
+ // standard writers because they are in the caddy package
+ // (because they are the default) and implementing that
+ // interface there would unfortunately create circular import
+ var wo caddy.WriterOpener
+ switch moduleName {
+ case "stdout":
+ wo = caddy.StdoutWriter{}
+ case "stderr":
+ wo = caddy.StderrWriter{}
+ case "discard":
+ wo = caddy.DiscardWriter{}
+ default:
+ modID := "caddy.logging.writers." + moduleName
+ unm, err := caddyfile.UnmarshalModule(h.Dispenser, modID)
if err != nil {
return nil, err
}
- enc, ok := unm.(zapcore.Encoder)
+ var ok bool
+ wo, ok = unm.(caddy.WriterOpener)
if !ok {
- return nil, h.Errf("module %s is not a zapcore.Encoder", mod)
+ return nil, h.Errf("module %s (%T) is not a WriterOpener", modID, unm)
}
- cl.EncoderRaw = caddyconfig.JSONModuleObject(enc, "format", moduleName, h.warnings)
-
- case "level":
- if !h.NextArg() {
- return nil, h.ArgErr()
- }
- cl.Level = h.Val()
- if h.NextArg() {
- return nil, h.ArgErr()
- }
-
- default:
- return nil, h.Errf("unrecognized subdirective: %s", h.Val())
}
- }
+ cl.WriterRaw = caddyconfig.JSONModuleObject(wo, "output", moduleName, h.warnings)
- var val namedCustomLog
- if !reflect.DeepEqual(cl, new(caddy.CustomLog)) {
+ case "sampling":
+ d := h.Dispenser.NewFromNextSegment()
+ for d.NextArg() {
+ // consume any tokens on the same line, if any.
+ }
+
+ sampling := &caddy.LogSampling{}
+ for nesting := d.Nesting(); d.NextBlock(nesting); {
+ subdir := d.Val()
+ switch subdir {
+ case "interval":
+ if !d.NextArg() {
+ return nil, d.ArgErr()
+ }
+ interval, err := time.ParseDuration(d.Val() + "ns")
+ if err != nil {
+ return nil, d.Errf("failed to parse interval: %v", err)
+ }
+ sampling.Interval = interval
+ case "first":
+ if !d.NextArg() {
+ return nil, d.ArgErr()
+ }
+ first, err := strconv.Atoi(d.Val())
+ if err != nil {
+ return nil, d.Errf("failed to parse first: %v", err)
+ }
+ sampling.First = first
+ case "thereafter":
+ if !d.NextArg() {
+ return nil, d.ArgErr()
+ }
+ thereafter, err := strconv.Atoi(d.Val())
+ if err != nil {
+ return nil, d.Errf("failed to parse thereafter: %v", err)
+ }
+ sampling.Thereafter = thereafter
+ default:
+ return nil, d.Errf("unrecognized subdirective: %s", subdir)
+ }
+ }
+
+ cl.Sampling = sampling
+
+ case "core":
+ if !h.NextArg() {
+ return nil, h.ArgErr()
+ }
+ moduleName := h.Val()
+ moduleID := "caddy.logging.cores." + moduleName
+ unm, err := caddyfile.UnmarshalModule(h.Dispenser, moduleID)
+ if err != nil {
+ return nil, err
+ }
+ core, ok := unm.(zapcore.Core)
+ if !ok {
+ return nil, h.Errf("module %s (%T) is not a zapcore.Core", moduleID, unm)
+ }
+ cl.CoreRaw = caddyconfig.JSONModuleObject(core, "module", moduleName, h.warnings)
+
+ case "format":
+ if !h.NextArg() {
+ return nil, h.ArgErr()
+ }
+ moduleName := h.Val()
+ moduleID := "caddy.logging.encoders." + moduleName
+ unm, err := caddyfile.UnmarshalModule(h.Dispenser, moduleID)
+ if err != nil {
+ return nil, err
+ }
+ enc, ok := unm.(zapcore.Encoder)
+ if !ok {
+ return nil, h.Errf("module %s (%T) is not a zapcore.Encoder", moduleID, unm)
+ }
+ cl.EncoderRaw = caddyconfig.JSONModuleObject(enc, "format", moduleName, h.warnings)
+
+ case "level":
+ if !h.NextArg() {
+ return nil, h.ArgErr()
+ }
+ cl.Level = h.Val()
+ if h.NextArg() {
+ return nil, h.ArgErr()
+ }
+
+ case "include":
+ if !parseAsGlobalOption {
+ return nil, h.Err("include is not allowed in the log directive")
+ }
+ for h.NextArg() {
+ cl.Include = append(cl.Include, h.Val())
+ }
+
+ case "exclude":
+ if !parseAsGlobalOption {
+ return nil, h.Err("exclude is not allowed in the log directive")
+ }
+ for h.NextArg() {
+ cl.Exclude = append(cl.Exclude, h.Val())
+ }
+
+ case "no_hostname":
+ if h.NextArg() {
+ return nil, h.ArgErr()
+ }
+ noHostname = true
+
+ default:
+ return nil, h.Errf("unrecognized subdirective: %s", h.Val())
+ }
+ }
+
+ var val namedCustomLog
+ val.hostnames = customHostnames
+ val.noHostname = noHostname
+ isEmptyConfig := reflect.DeepEqual(cl, new(caddy.CustomLog))
+
+ // Skip handling of empty logging configs
+
+ if parseAsGlobalOption {
+ // Use indicated name for global log options
+ val.name = logName
+ } else {
+ if logName != "" {
+ val.name = logName
+ } else if !isEmptyConfig {
+ // Construct a log name for server log streams
logCounter, ok := h.State["logCounter"].(int)
if !ok {
logCounter = 0
}
val.name = fmt.Sprintf("log%d", logCounter)
- cl.Include = []string{"http.log.access." + val.name}
- val.log = cl
logCounter++
h.State["logCounter"] = logCounter
}
- configValues = append(configValues, ConfigValue{
- Class: "custom_log",
- Value: val,
- })
+ if val.name != "" {
+ cl.Include = []string{"http.log.access." + val.name}
+ }
}
+ if !isEmptyConfig {
+ val.log = cl
+ }
+ configValues = append(configValues, ConfigValue{
+ Class: "custom_log",
+ Value: val,
+ })
return configValues, nil
}
+
+// parseLogSkip parses the log_skip directive. Syntax:
+//
+// log_skip []
+func parseLogSkip(h Helper) (caddyhttp.MiddlewareHandler, error) {
+ h.Next() // consume directive name
+
+ // "skip_log" is deprecated, replaced by "log_skip"
+ if h.Val() == "skip_log" {
+ caddy.Log().Named("config.adapter.caddyfile").Warn("the 'skip_log' directive is deprecated, please use 'log_skip' instead!")
+ }
+
+ if h.NextArg() {
+ return nil, h.ArgErr()
+ }
+ return caddyhttp.VarsMiddleware{"log_skip": true}, nil
+}
+
+// parseLogName parses the log_name directive. Syntax:
+//
+// log_name
+func parseLogName(h Helper) (caddyhttp.MiddlewareHandler, error) {
+ h.Next() // consume directive name
+ return caddyhttp.VarsMiddleware{
+ caddyhttp.AccessLoggerNameVarKey: h.RemainingArgs(),
+ }, nil
+}
diff --git a/caddyconfig/httpcaddyfile/builtins_test.go b/caddyconfig/httpcaddyfile/builtins_test.go
new file mode 100644
index 00000000..c23531f2
--- /dev/null
+++ b/caddyconfig/httpcaddyfile/builtins_test.go
@@ -0,0 +1,369 @@
+package httpcaddyfile
+
+import (
+ "strings"
+ "testing"
+
+ "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile"
+ _ "github.com/caddyserver/caddy/v2/modules/logging"
+)
+
+func TestLogDirectiveSyntax(t *testing.T) {
+ for i, tc := range []struct {
+ input string
+ output string
+ expectError bool
+ }{
+ {
+ input: `:8080 {
+ log
+ }
+ `,
+ output: `{"apps":{"http":{"servers":{"srv0":{"listen":[":8080"],"logs":{}}}}}}`,
+ expectError: false,
+ },
+ {
+ input: `:8080 {
+ log {
+ core mock
+ output file foo.log
+ }
+ }
+ `,
+ output: `{"logging":{"logs":{"default":{"exclude":["http.log.access.log0"]},"log0":{"writer":{"filename":"foo.log","output":"file"},"core":{"module":"mock"},"include":["http.log.access.log0"]}}},"apps":{"http":{"servers":{"srv0":{"listen":[":8080"],"logs":{"default_logger_name":"log0"}}}}}}`,
+ expectError: false,
+ },
+ {
+ input: `:8080 {
+ log {
+ format filter {
+ wrap console
+ fields {
+ request>remote_ip ip_mask {
+ ipv4 24
+ ipv6 32
+ }
+ }
+ }
+ }
+ }
+ `,
+ output: `{"logging":{"logs":{"default":{"exclude":["http.log.access.log0"]},"log0":{"encoder":{"fields":{"request\u003eremote_ip":{"filter":"ip_mask","ipv4_cidr":24,"ipv6_cidr":32}},"format":"filter","wrap":{"format":"console"}},"include":["http.log.access.log0"]}}},"apps":{"http":{"servers":{"srv0":{"listen":[":8080"],"logs":{"default_logger_name":"log0"}}}}}}`,
+ expectError: false,
+ },
+ {
+ input: `:8080 {
+ log name-override {
+ core mock
+ output file foo.log
+ }
+ }
+ `,
+ output: `{"logging":{"logs":{"default":{"exclude":["http.log.access.name-override"]},"name-override":{"writer":{"filename":"foo.log","output":"file"},"core":{"module":"mock"},"include":["http.log.access.name-override"]}}},"apps":{"http":{"servers":{"srv0":{"listen":[":8080"],"logs":{"default_logger_name":"name-override"}}}}}}`,
+ expectError: false,
+ },
+ {
+ input: `:8080 {
+ log {
+ sampling {
+ interval 2
+ first 3
+ thereafter 4
+ }
+ }
+ }
+ `,
+ output: `{"logging":{"logs":{"default":{"exclude":["http.log.access.log0"]},"log0":{"sampling":{"interval":2,"first":3,"thereafter":4},"include":["http.log.access.log0"]}}},"apps":{"http":{"servers":{"srv0":{"listen":[":8080"],"logs":{"default_logger_name":"log0"}}}}}}`,
+ expectError: false,
+ },
+ } {
+
+ adapter := caddyfile.Adapter{
+ ServerType: ServerType{},
+ }
+
+ out, _, err := adapter.Adapt([]byte(tc.input), nil)
+
+ if err != nil != tc.expectError {
+ t.Errorf("Test %d error expectation failed Expected: %v, got %s", i, tc.expectError, err)
+ continue
+ }
+
+ if string(out) != tc.output {
+ t.Errorf("Test %d error output mismatch Expected: %s, got %s", i, tc.output, out)
+ }
+ }
+}
+
+func TestRedirDirectiveSyntax(t *testing.T) {
+ for i, tc := range []struct {
+ input string
+ expectError bool
+ }{
+ {
+ input: `:8080 {
+ redir :8081
+ }`,
+ expectError: false,
+ },
+ {
+ input: `:8080 {
+ redir * :8081
+ }`,
+ expectError: false,
+ },
+ {
+ input: `:8080 {
+ redir /api/* :8081 300
+ }`,
+ expectError: false,
+ },
+ {
+ input: `:8080 {
+ redir :8081 300
+ }`,
+ expectError: false,
+ },
+ {
+ input: `:8080 {
+ redir /api/* :8081 399
+ }`,
+ expectError: false,
+ },
+ {
+ input: `:8080 {
+ redir :8081 399
+ }`,
+ expectError: false,
+ },
+ {
+ input: `:8080 {
+ redir /old.html /new.html
+ }`,
+ expectError: false,
+ },
+ {
+ input: `:8080 {
+ redir /old.html /new.html temporary
+ }`,
+ expectError: false,
+ },
+ {
+ input: `:8080 {
+ redir https://example.com{uri} permanent
+ }`,
+ expectError: false,
+ },
+ {
+ input: `:8080 {
+ redir /old.html /new.html permanent
+ }`,
+ expectError: false,
+ },
+ {
+ input: `:8080 {
+ redir /old.html /new.html html
+ }`,
+ expectError: false,
+ },
+ {
+ // this is now allowed so a Location header
+ // can be written and consumed by JS
+ // in the case of XHR requests
+ input: `:8080 {
+ redir * :8081 401
+ }`,
+ expectError: false,
+ },
+ {
+ input: `:8080 {
+ redir * :8081 402
+ }`,
+ expectError: true,
+ },
+ {
+ input: `:8080 {
+ redir * :8081 {http.reverse_proxy.status_code}
+ }`,
+ expectError: false,
+ },
+ {
+ input: `:8080 {
+ redir /old.html /new.html htlm
+ }`,
+ expectError: true,
+ },
+ {
+ input: `:8080 {
+ redir * :8081 200
+ }`,
+ expectError: true,
+ },
+ {
+ input: `:8080 {
+ redir * :8081 temp
+ }`,
+ expectError: true,
+ },
+ {
+ input: `:8080 {
+ redir * :8081 perm
+ }`,
+ expectError: true,
+ },
+ {
+ input: `:8080 {
+ redir * :8081 php
+ }`,
+ expectError: true,
+ },
+ } {
+
+ adapter := caddyfile.Adapter{
+ ServerType: ServerType{},
+ }
+
+ _, _, err := adapter.Adapt([]byte(tc.input), nil)
+
+ if err != nil != tc.expectError {
+ t.Errorf("Test %d error expectation failed Expected: %v, got %s", i, tc.expectError, err)
+ continue
+ }
+ }
+}
+
+func TestImportErrorLine(t *testing.T) {
+ for i, tc := range []struct {
+ input string
+ errorFunc func(err error) bool
+ }{
+ {
+ input: `(t1) {
+ abort {args[:]}
+ }
+ :8080 {
+ import t1
+ import t1 true
+ }`,
+ errorFunc: func(err error) bool {
+ return err != nil && strings.Contains(err.Error(), "Caddyfile:6 (import t1)")
+ },
+ },
+ {
+ input: `(t1) {
+ abort {args[:]}
+ }
+ :8080 {
+ import t1 true
+ }`,
+ errorFunc: func(err error) bool {
+ return err != nil && strings.Contains(err.Error(), "Caddyfile:5 (import t1)")
+ },
+ },
+ {
+ input: `
+ import testdata/import_variadic_snippet.txt
+ :8080 {
+ import t1 true
+ }`,
+ errorFunc: func(err error) bool {
+ return err == nil
+ },
+ },
+ {
+ input: `
+ import testdata/import_variadic_with_import.txt
+ :8080 {
+ import t1 true
+ import t2 true
+ }`,
+ errorFunc: func(err error) bool {
+ return err == nil
+ },
+ },
+ } {
+ adapter := caddyfile.Adapter{
+ ServerType: ServerType{},
+ }
+
+ _, _, err := adapter.Adapt([]byte(tc.input), nil)
+
+ if !tc.errorFunc(err) {
+ t.Errorf("Test %d error expectation failed, got %s", i, err)
+ continue
+ }
+ }
+}
+
+func TestNestedImport(t *testing.T) {
+ for i, tc := range []struct {
+ input string
+ errorFunc func(err error) bool
+ }{
+ {
+ input: `(t1) {
+ respond {args[0]} {args[1]}
+ }
+
+ (t2) {
+ import t1 {args[0]} 202
+ }
+
+ :8080 {
+ handle {
+ import t2 "foobar"
+ }
+ }`,
+ errorFunc: func(err error) bool {
+ return err == nil
+ },
+ },
+ {
+ input: `(t1) {
+ respond {args[:]}
+ }
+
+ (t2) {
+ import t1 {args[0]} {args[1]}
+ }
+
+ :8080 {
+ handle {
+ import t2 "foobar" 202
+ }
+ }`,
+ errorFunc: func(err error) bool {
+ return err == nil
+ },
+ },
+ {
+ input: `(t1) {
+ respond {args[0]} {args[1]}
+ }
+
+ (t2) {
+ import t1 {args[:]}
+ }
+
+ :8080 {
+ handle {
+ import t2 "foobar" 202
+ }
+ }`,
+ errorFunc: func(err error) bool {
+ return err == nil
+ },
+ },
+ } {
+ adapter := caddyfile.Adapter{
+ ServerType: ServerType{},
+ }
+
+ _, _, err := adapter.Adapt([]byte(tc.input), nil)
+
+ if !tc.errorFunc(err) {
+ t.Errorf("Test %d error expectation failed, got %s", i, err)
+ continue
+ }
+ }
+}
diff --git a/caddyconfig/httpcaddyfile/directives.go b/caddyconfig/httpcaddyfile/directives.go
index ff248bfe..f0687a7e 100644
--- a/caddyconfig/httpcaddyfile/directives.go
+++ b/caddyconfig/httpcaddyfile/directives.go
@@ -17,6 +17,7 @@ package httpcaddyfile
import (
"encoding/json"
"net"
+ "slices"
"sort"
"strconv"
"strings"
@@ -27,54 +28,78 @@ import (
"github.com/caddyserver/caddy/v2/modules/caddyhttp"
)
-// directiveOrder specifies the order
-// to apply directives in HTTP routes.
+// defaultDirectiveOrder specifies the default order
+// to apply directives in HTTP routes. This must only
+// consist of directives that are included in Caddy's
+// standard distribution.
//
-// The root directive goes first in case rewrites or
-// redirects depend on existence of files, i.e. the
-// file matcher, which must know the root first.
+// e.g. The 'root' directive goes near the start in
+// case rewrites or redirects depend on existence of
+// files, i.e. the file matcher, which must know the
+// root first.
//
-// The header directive goes second so that headers
-// can be manipulated before doing redirects.
-var directiveOrder = []string{
+// e.g. The 'header' directive goes before 'redir' so
+// that headers can be manipulated before doing redirects.
+//
+// e.g. The 'respond' directive is near the end because it
+// writes a response and terminates the middleware chain.
+var defaultDirectiveOrder = []string{
+ "tracing",
+
+ // set variables that may be used by other directives
+ "map",
+ "vars",
+ "fs",
"root",
+ "log_append",
+ "skip_log", // TODO: deprecated, renamed to log_skip
+ "log_skip",
+ "log_name",
"header",
+ "copy_response_headers", // only in reverse_proxy's handle_response
+ "request_body",
"redir",
- "rewrite",
- // URI manipulation
+ // incoming request manipulation
+ "method",
+ "rewrite",
"uri",
"try_files",
// middleware handlers; some wrap responses
- "basicauth",
+ "basicauth", // TODO: deprecated, renamed to basic_auth
+ "basic_auth",
+ "forward_auth",
"request_header",
"encode",
+ "push",
+ "intercept",
"templates",
- // special routing directives
+ // special routing & dispatching directives
+ "invoke",
"handle",
+ "handle_path",
"route",
// handlers that typically respond to requests
+ "abort",
+ "error",
+ "copy_response", // only in reverse_proxy's handle_response
"respond",
+ "metrics",
"reverse_proxy",
"php_fastcgi",
"file_server",
+ "acme_server",
}
-// directiveIsOrdered returns true if dir is
-// a known, ordered (sorted) directive.
-func directiveIsOrdered(dir string) bool {
- for _, d := range directiveOrder {
- if d == dir {
- return true
- }
- }
- return false
-}
+// directiveOrder specifies the order to apply directives
+// in HTTP routes, after being modified by either the
+// plugins or by the user via the "order" global option.
+var directiveOrder = defaultDirectiveOrder
// RegisterDirective registers a unique directive dir with an
// associated unmarshaling (setup) function. When directive dir
@@ -97,20 +122,11 @@ func RegisterHandlerDirective(dir string, setupFunc UnmarshalHandlerFunc) {
return nil, h.ArgErr()
}
- matcherSet, ok, err := h.MatcherToken()
+ matcherSet, err := h.ExtractMatcherSet()
if err != nil {
return nil, err
}
- if ok {
- // strip matcher token; we don't need to
- // use the return value here because a
- // new dispenser should have been made
- // solely for this directive's tokens,
- // with no other uses of same slice
- h.Dispenser.Delete()
- }
- h.Dispenser.Reset() // pretend this lookahead never happened
val, err := setupFunc(h)
if err != nil {
return nil, err
@@ -120,13 +136,71 @@ func RegisterHandlerDirective(dir string, setupFunc UnmarshalHandlerFunc) {
})
}
+// RegisterDirectiveOrder registers the default order for a
+// directive from a plugin.
+//
+// This is useful when a plugin has a well-understood place
+// it should run in the middleware pipeline, and it allows
+// users to avoid having to define the order themselves.
+//
+// The directive dir may be placed in the position relative
+// to ('before' or 'after') a directive included in Caddy's
+// standard distribution. It cannot be relative to another
+// plugin's directive.
+//
+// EXPERIMENTAL: This API may change or be removed.
+func RegisterDirectiveOrder(dir string, position Positional, standardDir string) {
+ // check if directive was already ordered
+ if slices.Contains(directiveOrder, dir) {
+ panic("directive '" + dir + "' already ordered")
+ }
+
+ if position != Before && position != After {
+ panic("the 2nd argument must be either 'before' or 'after', got '" + position + "'")
+ }
+
+ // check if directive exists in standard distribution, since
+ // we can't allow plugins to depend on one another; we can't
+ // guarantee the order that plugins are loaded in.
+ foundStandardDir := slices.Contains(defaultDirectiveOrder, standardDir)
+ if !foundStandardDir {
+ panic("the 3rd argument '" + standardDir + "' must be a directive that exists in the standard distribution of Caddy")
+ }
+
+ // insert directive into proper position
+ newOrder := directiveOrder
+ for i, d := range newOrder {
+ if d != standardDir {
+ continue
+ }
+ if position == Before {
+ newOrder = append(newOrder[:i], append([]string{dir}, newOrder[i:]...)...)
+ } else if position == After {
+ newOrder = append(newOrder[:i+1], append([]string{dir}, newOrder[i+1:]...)...)
+ }
+ break
+ }
+ directiveOrder = newOrder
+}
+
+// RegisterGlobalOption registers a unique global option opt with
+// an associated unmarshaling (setup) function. When the global
+// option opt is encountered in a Caddyfile, setupFunc will be
+// called to unmarshal its tokens.
+func RegisterGlobalOption(opt string, setupFunc UnmarshalGlobalFunc) {
+ if _, ok := registeredGlobalOptions[opt]; ok {
+ panic("global option " + opt + " already registered")
+ }
+ registeredGlobalOptions[opt] = setupFunc
+}
+
// Helper is a type which helps setup a value from
// Caddyfile tokens.
type Helper struct {
*caddyfile.Dispenser
// State stores intermediate variables during caddyfile adaptation.
- State map[string]interface{}
- options map[string]interface{}
+ State map[string]any
+ options map[string]any
warnings *[]caddyconfig.Warning
matcherDefs map[string]caddy.ModuleMap
parentBlock caddyfile.ServerBlock
@@ -134,7 +208,7 @@ type Helper struct {
}
// Option gets the option keyed by name.
-func (h Helper) Option(name string) interface{} {
+func (h Helper) Option(name string) any {
return h.options[name]
}
@@ -154,11 +228,12 @@ func (h Helper) Caddyfiles() []string {
for file := range files {
filesSlice = append(filesSlice, file)
}
+ sort.Strings(filesSlice)
return filesSlice
}
// JSON converts val into JSON. Any errors are added to warnings.
-func (h Helper) JSON(val interface{}) json.RawMessage {
+func (h Helper) JSON(val any) json.RawMessage {
return caddyconfig.JSON(val, h.warnings)
}
@@ -184,7 +259,12 @@ func (h Helper) ExtractMatcherSet() (caddy.ModuleMap, error) {
return nil, err
}
if hasMatcher {
- h.Dispenser.Delete() // strip matcher token
+ // strip matcher token; we don't need to
+ // use the return value here because a
+ // new dispenser should have been made
+ // solely for this directive's tokens,
+ // with no other uses of same slice
+ h.Dispenser.Delete()
}
h.Dispenser.Reset() // pretend this lookahead never happened
return matcherSet, nil
@@ -192,7 +272,8 @@ func (h Helper) ExtractMatcherSet() (caddy.ModuleMap, error) {
// NewRoute returns config values relevant to creating a new HTTP route.
func (h Helper) NewRoute(matcherSet caddy.ModuleMap,
- handler caddyhttp.MiddlewareHandler) []ConfigValue {
+ handler caddyhttp.MiddlewareHandler,
+) []ConfigValue {
mod, err := caddy.GetModule(caddy.GetModuleID(handler))
if err != nil {
*h.warnings = append(*h.warnings, caddyconfig.Warning{
@@ -244,10 +325,92 @@ func (h Helper) GroupRoutes(vals []ConfigValue) {
}
}
-// NewBindAddresses returns config values relevant to adding
-// listener bind addresses to the config.
-func (h Helper) NewBindAddresses(addrs []string) []ConfigValue {
- return []ConfigValue{{Class: "bind", Value: addrs}}
+// WithDispenser returns a new instance based on d. All others Helper
+// fields are copied, so typically maps are shared with this new instance.
+func (h Helper) WithDispenser(d *caddyfile.Dispenser) Helper {
+ h.Dispenser = d
+ return h
+}
+
+// ParseSegmentAsSubroute parses the segment such that its subdirectives
+// are themselves treated as directives, from which a subroute is built
+// and returned.
+func ParseSegmentAsSubroute(h Helper) (caddyhttp.MiddlewareHandler, error) {
+ allResults, err := parseSegmentAsConfig(h)
+ if err != nil {
+ return nil, err
+ }
+
+ return buildSubroute(allResults, h.groupCounter, true)
+}
+
+// parseSegmentAsConfig parses the segment such that its subdirectives
+// are themselves treated as directives, including named matcher definitions,
+// and the raw Config structs are returned.
+func parseSegmentAsConfig(h Helper) ([]ConfigValue, error) {
+ var allResults []ConfigValue
+
+ for h.Next() {
+ // don't allow non-matcher args on the first line
+ if h.NextArg() {
+ return nil, h.ArgErr()
+ }
+
+ // slice the linear list of tokens into top-level segments
+ var segments []caddyfile.Segment
+ for nesting := h.Nesting(); h.NextBlock(nesting); {
+ segments = append(segments, h.NextSegment())
+ }
+
+ // copy existing matcher definitions so we can augment
+ // new ones that are defined only in this scope
+ matcherDefs := make(map[string]caddy.ModuleMap, len(h.matcherDefs))
+ for key, val := range h.matcherDefs {
+ matcherDefs[key] = val
+ }
+
+ // find and extract any embedded matcher definitions in this scope
+ for i := 0; i < len(segments); i++ {
+ seg := segments[i]
+ if strings.HasPrefix(seg.Directive(), matcherPrefix) {
+ // parse, then add the matcher to matcherDefs
+ err := parseMatcherDefinitions(caddyfile.NewDispenser(seg), matcherDefs)
+ if err != nil {
+ return nil, err
+ }
+ // remove the matcher segment (consumed), then step back the loop
+ segments = append(segments[:i], segments[i+1:]...)
+ i--
+ }
+ }
+
+ // with matchers ready to go, evaluate each directive's segment
+ for _, seg := range segments {
+ dir := seg.Directive()
+ dirFunc, ok := registeredDirectives[dir]
+ if !ok {
+ return nil, h.Errf("unrecognized directive: %s - are you sure your Caddyfile structure (nesting and braces) is correct?", dir)
+ }
+
+ subHelper := h
+ subHelper.Dispenser = caddyfile.NewDispenser(seg)
+ subHelper.matcherDefs = matcherDefs
+
+ results, err := dirFunc(subHelper)
+ if err != nil {
+ return nil, h.Errf("parsing caddyfile tokens for '%s': %v", dir, err)
+ }
+
+ dir = normalizeDirectiveName(dir)
+
+ for _, result := range results {
+ result.directive = dir
+ allResults = append(allResults, result)
+ }
+ }
+ }
+
+ return allResults, nil
}
// ConfigValue represents a value to be added to the final
@@ -265,7 +428,7 @@ type ConfigValue struct {
// The value to be used when building the config.
// Generally its type is associated with the
// name of the Class.
- Value interface{}
+ Value any
directive string
}
@@ -276,120 +439,86 @@ func sortRoutes(routes []ConfigValue) {
dirPositions[dir] = i
}
- // while we are sorting, we will need to decode a route's path matcher
- // in order to sub-sort by path length; we can amortize this operation
- // for efficiency by storing the decoded matchers in a slice
- decodedMatchers := make([]caddyhttp.MatchPath, len(routes))
-
sort.SliceStable(routes, func(i, j int) bool {
+ // if the directives are different, just use the established directive order
iDir, jDir := routes[i].directive, routes[j].directive
- if iDir == jDir {
- // directives are the same; sub-sort by path matcher length
- // if there's only one matcher set and one path (common case)
- iRoute, ok := routes[i].Value.(caddyhttp.Route)
- if !ok {
- return false
- }
- jRoute, ok := routes[j].Value.(caddyhttp.Route)
- if !ok {
- return false
- }
-
- // use already-decoded matcher, or decode if it's the first time seeing it
- iPM, jPM := decodedMatchers[i], decodedMatchers[j]
- if iPM == nil && len(iRoute.MatcherSetsRaw) == 1 {
- var pathMatcher caddyhttp.MatchPath
- _ = json.Unmarshal(iRoute.MatcherSetsRaw[0]["path"], &pathMatcher)
- decodedMatchers[i] = pathMatcher
- iPM = pathMatcher
- }
- if jPM == nil && len(jRoute.MatcherSetsRaw) == 1 {
- var pathMatcher caddyhttp.MatchPath
- _ = json.Unmarshal(jRoute.MatcherSetsRaw[0]["path"], &pathMatcher)
- decodedMatchers[j] = pathMatcher
- jPM = pathMatcher
- }
-
- // sort by longer path (more specific) first; missing
- // path matchers are treated as zero-length paths
- var iPathLen, jPathLen int
- if iPM != nil {
- iPathLen = len(iPM[0])
- }
- if jPM != nil {
- jPathLen = len(jPM[0])
- }
- return iPathLen > jPathLen
+ if iDir != jDir {
+ return dirPositions[iDir] < dirPositions[jDir]
}
- return dirPositions[iDir] < dirPositions[jDir]
- })
-}
-
-// parseSegmentAsSubroute parses the segment such that its subdirectives
-// are themselves treated as directives, from which a subroute is built
-// and returned.
-func parseSegmentAsSubroute(h Helper) (caddyhttp.MiddlewareHandler, error) {
- var allResults []ConfigValue
-
- for h.Next() {
- // slice the linear list of tokens into top-level segments
- var segments []caddyfile.Segment
- for nesting := h.Nesting(); h.NextBlock(nesting); {
- segments = append(segments, h.NextSegment())
+ // directives are the same; sub-sort by path matcher length if there's
+ // only one matcher set and one path (this is a very common case and
+ // usually -- but not always -- helpful/expected, oh well; user can
+ // always take manual control of order using handler or route blocks)
+ iRoute, ok := routes[i].Value.(caddyhttp.Route)
+ if !ok {
+ return false
+ }
+ jRoute, ok := routes[j].Value.(caddyhttp.Route)
+ if !ok {
+ return false
}
- // copy existing matcher definitions so we can augment
- // new ones that are defined only in this scope
- matcherDefs := make(map[string]caddy.ModuleMap, len(h.matcherDefs))
- for key, val := range h.matcherDefs {
- matcherDefs[key] = val
+ // decode the path matchers if there is just one matcher set
+ var iPM, jPM caddyhttp.MatchPath
+ if len(iRoute.MatcherSetsRaw) == 1 {
+ _ = json.Unmarshal(iRoute.MatcherSetsRaw[0]["path"], &iPM)
+ }
+ if len(jRoute.MatcherSetsRaw) == 1 {
+ _ = json.Unmarshal(jRoute.MatcherSetsRaw[0]["path"], &jPM)
}
- // find and extract any embedded matcher definitions in this scope
- for i, seg := range segments {
- if strings.HasPrefix(seg.Directive(), matcherPrefix) {
- err := parseMatcherDefinitions(caddyfile.NewDispenser(seg), matcherDefs)
- if err != nil {
- return nil, err
+ // if there is only one path in the path matcher, sort by longer path
+ // (more specific) first; missing path matchers or multi-matchers are
+ // treated as zero-length paths
+ var iPathLen, jPathLen int
+ if len(iPM) == 1 {
+ iPathLen = len(iPM[0])
+ }
+ if len(jPM) == 1 {
+ jPathLen = len(jPM[0])
+ }
+
+ sortByPath := func() bool {
+ // we can only confidently compare path lengths if both
+ // directives have a single path to match (issue #5037)
+ if iPathLen > 0 && jPathLen > 0 {
+ // if both paths are the same except for a trailing wildcard,
+ // sort by the shorter path first (which is more specific)
+ if strings.TrimSuffix(iPM[0], "*") == strings.TrimSuffix(jPM[0], "*") {
+ return iPathLen < jPathLen
}
- segments = append(segments[:i], segments[i+1:]...)
+
+ // sort most-specific (longest) path first
+ return iPathLen > jPathLen
}
+
+ // if both directives don't have a single path to compare,
+ // sort whichever one has a matcher first; if both have
+ // a matcher, sort equally (stable sort preserves order)
+ return len(iRoute.MatcherSetsRaw) > 0 && len(jRoute.MatcherSetsRaw) == 0
+ }()
+
+ // some directives involve setting values which can overwrite
+ // each other, so it makes most sense to reverse the order so
+ // that the least-specific matcher is first, allowing the last
+ // matching one to win
+ if iDir == "vars" {
+ return !sortByPath
}
- // with matchers ready to go, evaluate each directive's segment
- for _, seg := range segments {
- dir := seg.Directive()
- dirFunc, ok := registeredDirectives[dir]
- if !ok {
- return nil, h.Errf("unrecognized directive: %s", dir)
- }
-
- subHelper := h
- subHelper.Dispenser = caddyfile.NewDispenser(seg)
- subHelper.matcherDefs = matcherDefs
-
- results, err := dirFunc(subHelper)
- if err != nil {
- return nil, h.Errf("parsing caddyfile tokens for '%s': %v", dir, err)
- }
- for _, result := range results {
- result.directive = dir
- allResults = append(allResults, result)
- }
- }
- }
-
- return buildSubroute(allResults, h.groupCounter)
+ // everything else is most-specific matcher first
+ return sortByPath
+ })
}
// serverBlock pairs a Caddyfile server block with
// a "pile" of config values, keyed by class name,
// as well as its parsed keys for convenience.
type serverBlock struct {
- block caddyfile.ServerBlock
- pile map[string][]ConfigValue // config values obtained from directives
- keys []Address
+ block caddyfile.ServerBlock
+ pile map[string][]ConfigValue // config values obtained from directives
+ parsedKeys []Address
}
// hostsFromKeys returns a list of all the non-empty hostnames found in
@@ -406,7 +535,7 @@ type serverBlock struct {
func (sb serverBlock) hostsFromKeys(loggerMode bool) []string {
// ensure each entry in our list is unique
hostMap := make(map[string]struct{})
- for _, addr := range sb.keys {
+ for _, addr := range sb.parsedKeys {
if addr.Host == "" {
if !loggerMode {
// server block contains a key like ":443", i.e. the host portion
@@ -435,17 +564,53 @@ func (sb serverBlock) hostsFromKeys(loggerMode bool) []string {
return sblockHosts
}
+func (sb serverBlock) hostsFromKeysNotHTTP(httpPort string) []string {
+ // ensure each entry in our list is unique
+ hostMap := make(map[string]struct{})
+ for _, addr := range sb.parsedKeys {
+ if addr.Host == "" {
+ continue
+ }
+ if addr.Scheme != "http" && addr.Port != httpPort {
+ hostMap[addr.Host] = struct{}{}
+ }
+ }
+
+ // convert map to slice
+ sblockHosts := make([]string, 0, len(hostMap))
+ for host := range hostMap {
+ sblockHosts = append(sblockHosts, host)
+ }
+
+ return sblockHosts
+}
+
// hasHostCatchAllKey returns true if sb has a key that
// omits a host portion, i.e. it "catches all" hosts.
func (sb serverBlock) hasHostCatchAllKey() bool {
- for _, addr := range sb.keys {
- if addr.Host == "" {
- return true
- }
- }
- return false
+ return slices.ContainsFunc(sb.parsedKeys, func(addr Address) bool {
+ return addr.Host == ""
+ })
}
+// isAllHTTP returns true if all sb keys explicitly specify
+// the http:// scheme
+func (sb serverBlock) isAllHTTP() bool {
+ return !slices.ContainsFunc(sb.parsedKeys, func(addr Address) bool {
+ return addr.Scheme != "http"
+ })
+}
+
+// Positional are the supported modes for ordering directives.
+type Positional string
+
+const (
+ Before Positional = "before"
+ After Positional = "after"
+ First Positional = "first"
+ Last Positional = "last"
+)
+
type (
// UnmarshalFunc is a function which can unmarshal Caddyfile
// tokens into zero or more config values using a Helper type.
@@ -462,6 +627,14 @@ type (
// for you. These are passed to a call to
// RegisterHandlerDirective.
UnmarshalHandlerFunc func(h Helper) (caddyhttp.MiddlewareHandler, error)
+
+ // UnmarshalGlobalFunc is a function which can unmarshal Caddyfile
+ // tokens from a global option. It is passed the tokens to parse and
+ // existing value from the previous instance of this global option
+ // (if any). It returns the value to associate with this global option.
+ UnmarshalGlobalFunc func(d *caddyfile.Dispenser, existingVal any) (any, error)
)
var registeredDirectives = make(map[string]UnmarshalFunc)
+
+var registeredGlobalOptions = make(map[string]UnmarshalGlobalFunc)
diff --git a/caddyconfig/httpcaddyfile/directives_test.go b/caddyconfig/httpcaddyfile/directives_test.go
index e46a6d2a..2b4d3e6c 100644
--- a/caddyconfig/httpcaddyfile/directives_test.go
+++ b/caddyconfig/httpcaddyfile/directives_test.go
@@ -31,20 +31,23 @@ func TestHostsFromKeys(t *testing.T) {
[]Address{
{Original: ":2015", Port: "2015"},
},
- []string{}, []string{},
+ []string{},
+ []string{},
},
{
[]Address{
{Original: ":443", Port: "443"},
},
- []string{}, []string{},
+ []string{},
+ []string{},
},
{
[]Address{
{Original: "foo", Host: "foo"},
{Original: ":2015", Port: "2015"},
},
- []string{}, []string{"foo"},
+ []string{},
+ []string{"foo"},
},
{
[]Address{
@@ -75,7 +78,7 @@ func TestHostsFromKeys(t *testing.T) {
[]string{"example.com:2015"},
},
} {
- sb := serverBlock{keys: tc.keys}
+ sb := serverBlock{parsedKeys: tc.keys}
// test in normal mode
actual := sb.hostsFromKeys(false)
diff --git a/caddyconfig/httpcaddyfile/httptype.go b/caddyconfig/httpcaddyfile/httptype.go
index eb067bcd..c169b92a 100644
--- a/caddyconfig/httpcaddyfile/httptype.go
+++ b/caddyconfig/httpcaddyfile/httptype.go
@@ -15,17 +15,23 @@
package httpcaddyfile
import (
+ "cmp"
"encoding/json"
"fmt"
+ "net"
"reflect"
+ "slices"
"sort"
"strconv"
"strings"
+ "go.uber.org/zap"
+
"github.com/caddyserver/caddy/v2"
"github.com/caddyserver/caddy/v2/caddyconfig"
"github.com/caddyserver/caddy/v2/caddyconfig/caddyfile"
"github.com/caddyserver/caddy/v2/modules/caddyhttp"
+ "github.com/caddyserver/caddy/v2/modules/caddypki"
"github.com/caddyserver/caddy/v2/modules/caddytls"
)
@@ -33,30 +39,39 @@ func init() {
caddyconfig.RegisterAdapter("caddyfile", caddyfile.Adapter{ServerType: ServerType{}})
}
-// ServerType can set up a config from an HTTP Caddyfile.
-type ServerType struct {
+// App represents the configuration for a non-standard
+// Caddy app module (e.g. third-party plugin) which was
+// parsed from a global options block.
+type App struct {
+ // The JSON key for the app being configured
+ Name string
+
+ // The raw app config as JSON
+ Value json.RawMessage
}
+// ServerType can set up a config from an HTTP Caddyfile.
+type ServerType struct{}
+
// Setup makes a config from the tokens.
-func (st ServerType) Setup(inputServerBlocks []caddyfile.ServerBlock,
- options map[string]interface{}) (*caddy.Config, []caddyconfig.Warning, error) {
+func (st ServerType) Setup(
+ inputServerBlocks []caddyfile.ServerBlock,
+ options map[string]any,
+) (*caddy.Config, []caddyconfig.Warning, error) {
var warnings []caddyconfig.Warning
gc := counter{new(int)}
- state := make(map[string]interface{})
+ state := make(map[string]any)
- // load all the server blocks and associate them with a "pile"
- // of config values; also prohibit duplicate keys because they
- // can make a config confusing if more than one server block is
- // chosen to handle a request - we actually will make each
- // server block's route terminal so that only one will run
- sbKeys := make(map[string]struct{})
+ // load all the server blocks and associate them with a "pile" of config values
originalServerBlocks := make([]serverBlock, 0, len(inputServerBlocks))
- for i, sblock := range inputServerBlocks {
+ for _, sblock := range inputServerBlocks {
for j, k := range sblock.Keys {
- if _, ok := sbKeys[k]; ok {
- return nil, warnings, fmt.Errorf("duplicate site address not allowed: '%s' in %v (site block %d, key %d)", k, sblock.Keys, i, j)
+ if j == 0 && strings.HasPrefix(k.Text, "@") {
+ return nil, warnings, fmt.Errorf("%s:%d: cannot define a matcher outside of a site block: '%s'", k.File, k.Line, k.Text)
+ }
+ if _, ok := registeredDirectives[k.Text]; ok {
+ return nil, warnings, fmt.Errorf("%s:%d: parsed '%s' as a site address, but it is a known directive; directives must appear in a site block", k.File, k.Line, k.Text)
}
- sbKeys[k] = struct{}{}
}
originalServerBlocks = append(originalServerBlocks, serverBlock{
block: sblock,
@@ -71,35 +86,18 @@ func (st ServerType) Setup(inputServerBlocks []caddyfile.ServerBlock,
return nil, warnings, err
}
+ // this will replace both static and user-defined placeholder shorthands
+ // with actual identifiers used by Caddy
+ replacer := NewShorthandReplacer()
+
+ originalServerBlocks, err = st.extractNamedRoutes(originalServerBlocks, options, &warnings, replacer)
+ if err != nil {
+ return nil, warnings, err
+ }
+
for _, sb := range originalServerBlocks {
- // replace shorthand placeholders (which are
- // convenient when writing a Caddyfile) with
- // their actual placeholder identifiers or
- // variable names
- replacer := strings.NewReplacer(
- "{dir}", "{http.request.uri.path.dir}",
- "{file}", "{http.request.uri.path.file}",
- "{host}", "{http.request.host}",
- "{hostport}", "{http.request.hostport}",
- "{method}", "{http.request.method}",
- "{path}", "{http.request.uri.path}",
- "{query}", "{http.request.uri.query}",
- "{remote}", "{http.request.remote}",
- "{remote_host}", "{http.request.remote.host}",
- "{remote_port}", "{http.request.remote.port}",
- "{scheme}", "{http.request.scheme}",
- "{uri}", "{http.request.uri}",
- "{tls_cipher}", "{http.request.tls.cipher_suite}",
- "{tls_version}", "{http.request.tls.version}",
- "{tls_client_fingerprint}", "{http.request.tls.client.fingerprint}",
- "{tls_client_issuer}", "{http.request.tls.client.issuer}",
- "{tls_client_serial}", "{http.request.tls.client.serial}",
- "{tls_client_subject}", "{http.request.tls.client.subject}",
- )
- for _, segment := range sb.block.Segments {
- for i := 0; i < len(segment); i++ {
- segment[i].Text = replacer.Replace(segment[i].Text)
- }
+ for i := range sb.block.Segments {
+ replacer.ApplyToSegment(&sb.block.Segments[i])
}
if len(sb.block.Keys) == 0 {
@@ -130,7 +128,11 @@ func (st ServerType) Setup(inputServerBlocks []caddyfile.ServerBlock,
dirFunc, ok := registeredDirectives[dir]
if !ok {
tkn := segment[0]
- return nil, warnings, fmt.Errorf("%s:%d: unrecognized directive: %s", tkn.File, tkn.Line, dir)
+ message := "%s:%d: unrecognized directive: %s"
+ if !sb.block.HasBraces {
+ message += "\nDid you mean to define a second site? If so, you must use curly braces around each site to separate their configurations."
+ }
+ return nil, warnings, fmt.Errorf(message, tkn.File, tkn.Line, dir)
}
h := Helper{
@@ -147,15 +149,30 @@ func (st ServerType) Setup(inputServerBlocks []caddyfile.ServerBlock,
if err != nil {
return nil, warnings, fmt.Errorf("parsing caddyfile tokens for '%s': %v", dir, err)
}
+
+ dir = normalizeDirectiveName(dir)
+
for _, result := range results {
result.directive = dir
sb.pile[result.Class] = append(sb.pile[result.Class], result)
}
+
+ // specially handle named routes that were pulled out from
+ // the invoke directive, which could be nested anywhere within
+ // some subroutes in this directive; we add them to the pile
+ // for this server block
+ if state[namedRouteKey] != nil {
+ for name := range state[namedRouteKey].(map[string]struct{}) {
+ result := ConfigValue{Class: namedRouteKey, Value: name}
+ sb.pile[result.Class] = append(sb.pile[result.Class], result)
+ }
+ state[namedRouteKey] = nil
+ }
}
}
// map
- sbmap, err := st.mapAddressToServerBlocks(originalServerBlocks, options)
+ sbmap, err := st.mapAddressToProtocolToServerBlocks(originalServerBlocks, options)
if err != nil {
return nil, warnings, err
}
@@ -170,11 +187,26 @@ func (st ServerType) Setup(inputServerBlocks []caddyfile.ServerBlock,
return nil, warnings, err
}
+ // hoist the metrics config from per-server to global
+ metrics, _ := options["metrics"].(*caddyhttp.Metrics)
+ for _, s := range servers {
+ if s.Metrics != nil {
+ metrics = cmp.Or[*caddyhttp.Metrics](metrics, &caddyhttp.Metrics{})
+ metrics = &caddyhttp.Metrics{
+ PerHost: metrics.PerHost || s.Metrics.PerHost,
+ }
+ s.Metrics = nil // we don't need it anymore
+ }
+ }
+
// now that each server is configured, make the HTTP app
httpApp := caddyhttp.App{
- HTTPPort: tryInt(options["http_port"], &warnings),
- HTTPSPort: tryInt(options["https_port"], &warnings),
- Servers: servers,
+ HTTPPort: tryInt(options["http_port"], &warnings),
+ HTTPSPort: tryInt(options["https_port"], &warnings),
+ GracePeriod: tryDuration(options["grace_period"], &warnings),
+ ShutdownDelay: tryDuration(options["shutdown_delay"], &warnings),
+ Metrics: metrics,
+ Servers: servers,
}
// then make the TLS app
@@ -183,31 +215,32 @@ func (st ServerType) Setup(inputServerBlocks []caddyfile.ServerBlock,
return nil, warnings, err
}
- // if experimental HTTP/3 is enabled, enable it on each server
- if enableH3, ok := options["experimental_http3"].(bool); ok && enableH3 {
- for _, srv := range httpApp.Servers {
- srv.ExperimentalHTTP3 = true
- }
+ // then make the PKI app
+ pkiApp, warnings, err := st.buildPKIApp(pairings, options, warnings)
+ if err != nil {
+ return nil, warnings, err
}
// extract any custom logs, and enforce configured levels
var customLogs []namedCustomLog
var hasDefaultLog bool
- for _, p := range pairings {
- for _, sb := range p.serverBlocks {
- for _, clVal := range sb.pile["custom_log"] {
- ncl := clVal.Value.(namedCustomLog)
- if ncl.name == "" {
- continue
- }
- if ncl.name == "default" {
- hasDefaultLog = true
- }
- if _, ok := options["debug"]; ok && ncl.log.Level == "" {
- ncl.log.Level = "DEBUG"
- }
- customLogs = append(customLogs, ncl)
- }
+ addCustomLog := func(ncl namedCustomLog) {
+ if ncl.name == "" {
+ return
+ }
+ if ncl.name == caddy.DefaultLoggerName {
+ hasDefaultLog = true
+ }
+ if _, ok := options["debug"]; ok && ncl.log != nil && ncl.log.Level == "" {
+ ncl.log.Level = zap.DebugLevel.CapitalString()
+ }
+ customLogs = append(customLogs, ncl)
+ }
+
+ // Apply global log options, when set
+ if options["log"] != nil {
+ for _, logValue := range options["log"].([]ConfigValue) {
+ addCustomLog(logValue.Value.(namedCustomLog))
}
}
@@ -216,55 +249,114 @@ func (st ServerType) Setup(inputServerBlocks []caddyfile.ServerBlock,
// configure it with any applicable options
if _, ok := options["debug"]; ok {
customLogs = append(customLogs, namedCustomLog{
- name: "default",
- log: &caddy.CustomLog{Level: "DEBUG"},
+ name: caddy.DefaultLoggerName,
+ log: &caddy.CustomLog{
+ BaseLog: caddy.BaseLog{Level: zap.DebugLevel.CapitalString()},
+ },
})
}
}
+ // Apply server-specific log options
+ for _, p := range pairings {
+ for _, sb := range p.serverBlocks {
+ for _, clVal := range sb.pile["custom_log"] {
+ addCustomLog(clVal.Value.(namedCustomLog))
+ }
+ }
+ }
+
// annnd the top-level config, then we're done!
cfg := &caddy.Config{AppsRaw: make(caddy.ModuleMap)}
+
+ // loop through the configured options, and if any of
+ // them are an httpcaddyfile App, then we insert them
+ // into the config as raw Caddy apps
+ for _, opt := range options {
+ if app, ok := opt.(App); ok {
+ cfg.AppsRaw[app.Name] = app.Value
+ }
+ }
+
+ // insert the standard Caddy apps into the config
if len(httpApp.Servers) > 0 {
cfg.AppsRaw["http"] = caddyconfig.JSON(httpApp, &warnings)
}
if !reflect.DeepEqual(tlsApp, &caddytls.TLS{CertificatesRaw: make(caddy.ModuleMap)}) {
cfg.AppsRaw["tls"] = caddyconfig.JSON(tlsApp, &warnings)
}
+ if !reflect.DeepEqual(pkiApp, &caddypki.PKI{CAs: make(map[string]*caddypki.CA)}) {
+ cfg.AppsRaw["pki"] = caddyconfig.JSON(pkiApp, &warnings)
+ }
+ if filesystems, ok := options["filesystem"].(caddy.Module); ok {
+ cfg.AppsRaw["caddy.filesystems"] = caddyconfig.JSON(
+ filesystems,
+ &warnings)
+ }
+
if storageCvtr, ok := options["storage"].(caddy.StorageConverter); ok {
cfg.StorageRaw = caddyconfig.JSONModuleObject(storageCvtr,
"module",
storageCvtr.(caddy.Module).CaddyModule().ID.Name(),
&warnings)
}
- if adminConfig, ok := options["admin"].(string); ok && adminConfig != "" {
- if adminConfig == "off" {
- cfg.Admin = &caddy.AdminConfig{Disabled: true}
- } else {
- cfg.Admin = &caddy.AdminConfig{Listen: adminConfig}
- }
+ if adminConfig, ok := options["admin"].(*caddy.AdminConfig); ok && adminConfig != nil {
+ cfg.Admin = adminConfig
}
+ if pc, ok := options["persist_config"].(string); ok && pc == "off" {
+ if cfg.Admin == nil {
+ cfg.Admin = new(caddy.AdminConfig)
+ }
+ if cfg.Admin.Config == nil {
+ cfg.Admin.Config = new(caddy.ConfigSettings)
+ }
+ cfg.Admin.Config.Persist = new(bool)
+ }
+
if len(customLogs) > 0 {
if cfg.Logging == nil {
cfg.Logging = &caddy.Logging{
Logs: make(map[string]*caddy.CustomLog),
}
}
+
+ // Add the default log first if defined, so that it doesn't
+ // accidentally get re-created below due to the Exclude logic
for _, ncl := range customLogs {
+ if ncl.name == caddy.DefaultLoggerName && ncl.log != nil {
+ cfg.Logging.Logs[caddy.DefaultLoggerName] = ncl.log
+ break
+ }
+ }
+
+ // Add the rest of the custom logs
+ for _, ncl := range customLogs {
+ if ncl.log == nil || ncl.name == caddy.DefaultLoggerName {
+ continue
+ }
if ncl.name != "" {
cfg.Logging.Logs[ncl.name] = ncl.log
}
// most users seem to prefer not writing access logs
// to the default log when they are directed to a
// file or have any other special customization
- if len(ncl.log.Include) > 0 {
- defaultLog, ok := cfg.Logging.Logs["default"]
+ if ncl.name != caddy.DefaultLoggerName && len(ncl.log.Include) > 0 {
+ defaultLog, ok := cfg.Logging.Logs[caddy.DefaultLoggerName]
if !ok {
defaultLog = new(caddy.CustomLog)
- cfg.Logging.Logs["default"] = defaultLog
+ cfg.Logging.Logs[caddy.DefaultLoggerName] = defaultLog
}
defaultLog.Exclude = append(defaultLog.Exclude, ncl.log.Include...)
+
+ // avoid duplicates by sorting + compacting
+ sort.Strings(defaultLog.Exclude)
+ defaultLog.Exclude = slices.Compact[[]string, string](defaultLog.Exclude)
}
}
+ // we may have not actually added anything, so remove if empty
+ if len(cfg.Logging.Logs) == 0 {
+ cfg.Logging = nil
+ }
}
return cfg, warnings, nil
@@ -274,65 +366,179 @@ func (st ServerType) Setup(inputServerBlocks []caddyfile.ServerBlock,
// which is expected to be the first server block if it has zero
// keys. It returns the updated list of server blocks with the
// global options block removed, and updates options accordingly.
-func (ServerType) evaluateGlobalOptionsBlock(serverBlocks []serverBlock, options map[string]interface{}) ([]serverBlock, error) {
+func (ServerType) evaluateGlobalOptionsBlock(serverBlocks []serverBlock, options map[string]any) ([]serverBlock, error) {
if len(serverBlocks) == 0 || len(serverBlocks[0].block.Keys) > 0 {
return serverBlocks, nil
}
for _, segment := range serverBlocks[0].block.Segments {
- dir := segment.Directive()
- var val interface{}
+ opt := segment.Directive()
+ var val any
var err error
disp := caddyfile.NewDispenser(segment)
- switch dir {
- case "debug":
- val = true
- case "http_port":
- val, err = parseOptHTTPPort(disp)
- case "https_port":
- val, err = parseOptHTTPSPort(disp)
- case "default_sni":
- val, err = parseOptSingleString(disp)
- case "order":
- val, err = parseOptOrder(disp)
- case "experimental_http3":
- val, err = parseOptExperimentalHTTP3(disp)
- case "storage":
- val, err = parseOptStorage(disp)
- case "acme_ca", "acme_dns", "acme_ca_root":
- val, err = parseOptSingleString(disp)
- case "email":
- val, err = parseOptSingleString(disp)
- case "admin":
- val, err = parseOptAdmin(disp)
- case "on_demand_tls":
- val, err = parseOptOnDemand(disp)
- case "local_certs":
- val = true
- case "key_type":
- val, err = parseOptSingleString(disp)
- default:
- return nil, fmt.Errorf("unrecognized parameter name: %s", dir)
+
+ optFunc, ok := registeredGlobalOptions[opt]
+ if !ok {
+ tkn := segment[0]
+ return nil, fmt.Errorf("%s:%d: unrecognized global option: %s", tkn.File, tkn.Line, opt)
}
+
+ val, err = optFunc(disp, options[opt])
if err != nil {
- return nil, fmt.Errorf("%s: %v", dir, err)
+ return nil, fmt.Errorf("parsing caddyfile tokens for '%s': %v", opt, err)
+ }
+
+ // As a special case, fold multiple "servers" options together
+ // in an array instead of overwriting a possible existing value
+ if opt == "servers" {
+ existingOpts, ok := options[opt].([]serverOptions)
+ if !ok {
+ existingOpts = []serverOptions{}
+ }
+ serverOpts, ok := val.(serverOptions)
+ if !ok {
+ return nil, fmt.Errorf("unexpected type from 'servers' global options: %T", val)
+ }
+ options[opt] = append(existingOpts, serverOpts)
+ continue
+ }
+ // Additionally, fold multiple "log" options together into an
+ // array so that multiple loggers can be configured.
+ if opt == "log" {
+ existingOpts, ok := options[opt].([]ConfigValue)
+ if !ok {
+ existingOpts = []ConfigValue{}
+ }
+ logOpts, ok := val.([]ConfigValue)
+ if !ok {
+ return nil, fmt.Errorf("unexpected type from 'log' global options: %T", val)
+ }
+ options[opt] = append(existingOpts, logOpts...)
+ continue
+ }
+ // Also fold multiple "default_bind" options together into an
+ // array so that server blocks can have multiple binds by default.
+ if opt == "default_bind" {
+ existingOpts, ok := options[opt].([]ConfigValue)
+ if !ok {
+ existingOpts = []ConfigValue{}
+ }
+ defaultBindOpts, ok := val.([]ConfigValue)
+ if !ok {
+ return nil, fmt.Errorf("unexpected type from 'default_bind' global options: %T", val)
+ }
+ options[opt] = append(existingOpts, defaultBindOpts...)
+ continue
+ }
+
+ options[opt] = val
+ }
+
+ // If we got "servers" options, we'll sort them by their listener address
+ if serverOpts, ok := options["servers"].([]serverOptions); ok {
+ sort.Slice(serverOpts, func(i, j int) bool {
+ return len(serverOpts[i].ListenerAddress) > len(serverOpts[j].ListenerAddress)
+ })
+
+ // Reject the config if there are duplicate listener address
+ seen := make(map[string]bool)
+ for _, entry := range serverOpts {
+ if _, alreadySeen := seen[entry.ListenerAddress]; alreadySeen {
+ return nil, fmt.Errorf("cannot have 'servers' global options with duplicate listener addresses: %s", entry.ListenerAddress)
+ }
+ seen[entry.ListenerAddress] = true
}
- options[dir] = val
}
return serverBlocks[1:], nil
}
+// extractNamedRoutes pulls out any named route server blocks
+// so they don't get parsed as sites, and stores them in options
+// for later.
+func (ServerType) extractNamedRoutes(
+ serverBlocks []serverBlock,
+ options map[string]any,
+ warnings *[]caddyconfig.Warning,
+ replacer ShorthandReplacer,
+) ([]serverBlock, error) {
+ namedRoutes := map[string]*caddyhttp.Route{}
+
+ gc := counter{new(int)}
+ state := make(map[string]any)
+
+ // copy the server blocks so we can
+ // splice out the named route ones
+ filtered := append([]serverBlock{}, serverBlocks...)
+ index := -1
+
+ for _, sb := range serverBlocks {
+ index++
+ if !sb.block.IsNamedRoute {
+ continue
+ }
+
+ // splice out this block, because we know it's not a real server
+ filtered = append(filtered[:index], filtered[index+1:]...)
+ index--
+
+ if len(sb.block.Segments) == 0 {
+ continue
+ }
+
+ wholeSegment := caddyfile.Segment{}
+ for i := range sb.block.Segments {
+ // replace user-defined placeholder shorthands in extracted named routes
+ replacer.ApplyToSegment(&sb.block.Segments[i])
+
+ // zip up all the segments since ParseSegmentAsSubroute
+ // was designed to take a directive+
+ wholeSegment = append(wholeSegment, sb.block.Segments[i]...)
+ }
+
+ h := Helper{
+ Dispenser: caddyfile.NewDispenser(wholeSegment),
+ options: options,
+ warnings: warnings,
+ matcherDefs: nil,
+ parentBlock: sb.block,
+ groupCounter: gc,
+ State: state,
+ }
+
+ handler, err := ParseSegmentAsSubroute(h)
+ if err != nil {
+ return nil, err
+ }
+ subroute := handler.(*caddyhttp.Subroute)
+ route := caddyhttp.Route{}
+
+ if len(subroute.Routes) == 1 && len(subroute.Routes[0].MatcherSetsRaw) == 0 {
+ // if there's only one route with no matcher, then we can simplify
+ route.HandlersRaw = append(route.HandlersRaw, subroute.Routes[0].HandlersRaw[0])
+ } else {
+ // otherwise we need the whole subroute
+ route.HandlersRaw = []json.RawMessage{caddyconfig.JSONModuleObject(handler, "handler", subroute.CaddyModule().ID.Name(), h.warnings)}
+ }
+
+ namedRoutes[sb.block.GetKeysText()[0]] = &route
+ }
+ options["named_routes"] = namedRoutes
+
+ return filtered, nil
+}
+
// serversFromPairings creates the servers for each pairing of addresses
// to server blocks. Each pairing is essentially a server definition.
func (st *ServerType) serversFromPairings(
pairings []sbAddrAssociation,
- options map[string]interface{},
+ options map[string]any,
warnings *[]caddyconfig.Warning,
groupCounter counter,
) (map[string]*caddyhttp.Server, error) {
servers := make(map[string]*caddyhttp.Server)
defaultSNI := tryString(options["default_sni"], warnings)
+ fallbackSNI := tryString(options["fallback_sni"], warnings)
httpPort := strconv.Itoa(caddyhttp.DefaultHTTPPort)
if hp, ok := options["http_port"].(int); ok {
@@ -342,10 +548,109 @@ func (st *ServerType) serversFromPairings(
if hsp, ok := options["https_port"].(int); ok {
httpsPort = strconv.Itoa(hsp)
}
+ autoHTTPS := []string{}
+ if ah, ok := options["auto_https"].([]string); ok {
+ autoHTTPS = ah
+ }
for i, p := range pairings {
+ // detect ambiguous site definitions: server blocks which
+ // have the same host bound to the same interface (listener
+ // address), otherwise their routes will improperly be added
+ // to the same server (see issue #4635)
+ for j, sblock1 := range p.serverBlocks {
+ for _, key := range sblock1.block.GetKeysText() {
+ for k, sblock2 := range p.serverBlocks {
+ if k == j {
+ continue
+ }
+ if slices.Contains(sblock2.block.GetKeysText(), key) {
+ return nil, fmt.Errorf("ambiguous site definition: %s", key)
+ }
+ }
+ }
+ }
+
+ var (
+ addresses []string
+ protocols [][]string
+ )
+
+ for _, addressWithProtocols := range p.addressesWithProtocols {
+ addresses = append(addresses, addressWithProtocols.address)
+ protocols = append(protocols, addressWithProtocols.protocols)
+ }
+
srv := &caddyhttp.Server{
- Listen: p.addresses,
+ Listen: addresses,
+ ListenProtocols: protocols,
+ }
+
+ // remove srv.ListenProtocols[j] if it only contains the default protocols
+ for j, lnProtocols := range srv.ListenProtocols {
+ srv.ListenProtocols[j] = nil
+ for _, lnProtocol := range lnProtocols {
+ if lnProtocol != "" {
+ srv.ListenProtocols[j] = lnProtocols
+ break
+ }
+ }
+ }
+
+ // remove srv.ListenProtocols if it only contains the default protocols for all listen addresses
+ listenProtocols := srv.ListenProtocols
+ srv.ListenProtocols = nil
+ for _, lnProtocols := range listenProtocols {
+ if lnProtocols != nil {
+ srv.ListenProtocols = listenProtocols
+ break
+ }
+ }
+
+ // handle the auto_https global option
+ for _, val := range autoHTTPS {
+ switch val {
+ case "off":
+ if srv.AutoHTTPS == nil {
+ srv.AutoHTTPS = new(caddyhttp.AutoHTTPSConfig)
+ }
+ srv.AutoHTTPS.Disabled = true
+
+ case "disable_redirects":
+ if srv.AutoHTTPS == nil {
+ srv.AutoHTTPS = new(caddyhttp.AutoHTTPSConfig)
+ }
+ srv.AutoHTTPS.DisableRedir = true
+
+ case "disable_certs":
+ if srv.AutoHTTPS == nil {
+ srv.AutoHTTPS = new(caddyhttp.AutoHTTPSConfig)
+ }
+ srv.AutoHTTPS.DisableCerts = true
+
+ case "ignore_loaded_certs":
+ if srv.AutoHTTPS == nil {
+ srv.AutoHTTPS = new(caddyhttp.AutoHTTPSConfig)
+ }
+ srv.AutoHTTPS.IgnoreLoadedCerts = true
+
+ case "prefer_wildcard":
+ if srv.AutoHTTPS == nil {
+ srv.AutoHTTPS = new(caddyhttp.AutoHTTPSConfig)
+ }
+ srv.AutoHTTPS.PreferWildcard = true
+ }
+ }
+
+ // Using paths in site addresses is deprecated
+ // See ParseAddress() where parsing should later reject paths
+ // See https://github.com/caddyserver/caddy/pull/4728 for a full explanation
+ for _, sblock := range p.serverBlocks {
+ for _, addr := range sblock.parsedKeys {
+ if addr.Path != "" {
+ caddy.Log().Named("caddyfile").Warn("Using a path in a site address is deprecated; please use the 'handle' directive instead", zap.String("address", addr.String()))
+ }
+ }
}
// sort server blocks by their keys; this is important because
@@ -358,7 +663,11 @@ func (st *ServerType) serversFromPairings(
// but I don't expect many blocks will have THAT many keys...
var iLongestPath, jLongestPath string
var iLongestHost, jLongestHost string
- for _, addr := range p.serverBlocks[i].keys {
+ var iWildcardHost, jWildcardHost bool
+ for _, addr := range p.serverBlocks[i].parsedKeys {
+ if strings.Contains(addr.Host, "*") || addr.Host == "" {
+ iWildcardHost = true
+ }
if specificity(addr.Host) > specificity(iLongestHost) {
iLongestHost = addr.Host
}
@@ -366,7 +675,10 @@ func (st *ServerType) serversFromPairings(
iLongestPath = addr.Path
}
}
- for _, addr := range p.serverBlocks[j].keys {
+ for _, addr := range p.serverBlocks[j].parsedKeys {
+ if strings.Contains(addr.Host, "*") || addr.Host == "" {
+ jWildcardHost = true
+ }
if specificity(addr.Host) > specificity(jLongestHost) {
jLongestHost = addr.Host
}
@@ -374,14 +686,66 @@ func (st *ServerType) serversFromPairings(
jLongestPath = addr.Path
}
}
+ // catch-all blocks (blocks with no hostname) should always go
+ // last, even after blocks with wildcard hosts
+ if specificity(iLongestHost) == 0 {
+ return false
+ }
+ if specificity(jLongestHost) == 0 {
+ return true
+ }
+ if iWildcardHost != jWildcardHost {
+ // site blocks that have a key with a wildcard in the hostname
+ // must always be less specific than blocks without one; see
+ // https://github.com/caddyserver/caddy/issues/3410
+ return jWildcardHost && !iWildcardHost
+ }
if specificity(iLongestHost) == specificity(jLongestHost) {
return len(iLongestPath) > len(jLongestPath)
}
return specificity(iLongestHost) > specificity(jLongestHost)
})
+ // collect all hosts that have a wildcard in them
+ wildcardHosts := []string{}
+ for _, sblock := range p.serverBlocks {
+ for _, addr := range sblock.parsedKeys {
+ if strings.HasPrefix(addr.Host, "*.") {
+ wildcardHosts = append(wildcardHosts, addr.Host[2:])
+ }
+ }
+ }
+
var hasCatchAllTLSConnPolicy, addressQualifiesForTLS bool
- autoHTTPSWillAddConnPolicy := true
+ autoHTTPSWillAddConnPolicy := srv.AutoHTTPS == nil || !srv.AutoHTTPS.Disabled
+
+ // if needed, the ServerLogConfig is initialized beforehand so
+ // that all server blocks can populate it with data, even when not
+ // coming with a log directive
+ for _, sblock := range p.serverBlocks {
+ if len(sblock.pile["custom_log"]) != 0 {
+ srv.Logs = new(caddyhttp.ServerLogConfig)
+ break
+ }
+ }
+
+ // add named routes to the server if 'invoke' was used inside of it
+ configuredNamedRoutes := options["named_routes"].(map[string]*caddyhttp.Route)
+ for _, sblock := range p.serverBlocks {
+ if len(sblock.pile[namedRouteKey]) == 0 {
+ continue
+ }
+ for _, value := range sblock.pile[namedRouteKey] {
+ if srv.NamedRoutes == nil {
+ srv.NamedRoutes = map[string]*caddyhttp.Route{}
+ }
+ name := value.Value.(string)
+ if configuredNamedRoutes[name] == nil {
+ return nil, fmt.Errorf("cannot invoke named route '%s', which was not defined", name)
+ }
+ srv.NamedRoutes[name] = configuredNamedRoutes[name]
+ }
+ }
// create a subroute for each site in the server block
for _, sblock := range p.serverBlocks {
@@ -392,6 +756,13 @@ func (st *ServerType) serversFromPairings(
hosts := sblock.hostsFromKeys(false)
+ // emit warnings if user put unspecified IP addresses; they probably want the bind directive
+ for _, h := range hosts {
+ if h == "0.0.0.0" || h == "::" {
+ caddy.Log().Named("caddyfile").Warn("Site block has an unspecified IP address which only matches requests having that Host header; you probably want the 'bind' directive to configure the socket", zap.String("address", h))
+ }
+ }
+
// tls: connection policies
if cpVals, ok := sblock.pile["tls.connection_policy"]; ok {
// tls connection policies
@@ -405,47 +776,98 @@ func (st *ServerType) serversFromPairings(
cp.DefaultSNI = defaultSNI
break
}
+ if h == fallbackSNI {
+ hosts = append(hosts, "")
+ cp.FallbackSNI = fallbackSNI
+ break
+ }
}
if len(hosts) > 0 {
+ slices.Sort(hosts) // for deterministic JSON output
cp.MatchersRaw = caddy.ModuleMap{
"sni": caddyconfig.JSON(hosts, warnings), // make sure to match all hosts, not just auto-HTTPS-qualified ones
}
} else {
cp.DefaultSNI = defaultSNI
- hasCatchAllTLSConnPolicy = true
+ cp.FallbackSNI = fallbackSNI
}
- srv.TLSConnPolicies = append(srv.TLSConnPolicies, cp)
+ // only append this policy if it actually changes something
+ if !cp.SettingsEmpty() {
+ srv.TLSConnPolicies = append(srv.TLSConnPolicies, cp)
+ hasCatchAllTLSConnPolicy = len(hosts) == 0
+ }
}
}
- for _, addr := range sblock.keys {
- // exclude any hosts that were defined explicitly with "http://"
- // in the key from automated cert management (issue #2998)
- if addr.Scheme == "http" && addr.Host != "" {
- if srv.AutoHTTPS == nil {
- srv.AutoHTTPS = new(caddyhttp.AutoHTTPSConfig)
- }
- if !sliceContains(srv.AutoHTTPS.Skip, addr.Host) {
- srv.AutoHTTPS.Skip = append(srv.AutoHTTPS.Skip, addr.Host)
+ for _, addr := range sblock.parsedKeys {
+ // if server only uses HTTP port, auto-HTTPS will not apply
+ if listenersUseAnyPortOtherThan(srv.Listen, httpPort) {
+ // exclude any hosts that were defined explicitly with "http://"
+ // in the key from automated cert management (issue #2998)
+ if addr.Scheme == "http" && addr.Host != "" {
+ if srv.AutoHTTPS == nil {
+ srv.AutoHTTPS = new(caddyhttp.AutoHTTPSConfig)
+ }
+ if !slices.Contains(srv.AutoHTTPS.Skip, addr.Host) {
+ srv.AutoHTTPS.Skip = append(srv.AutoHTTPS.Skip, addr.Host)
+ }
}
}
+
+ // If TLS is specified as directive, it will also result in 1 or more connection policy being created
+ // Thus, catch-all address with non-standard port, e.g. :8443, can have TLS enabled without
+ // specifying prefix "https://"
+ // Second part of the condition is to allow creating TLS conn policy even though `auto_https` has been disabled
+ // ensuring compatibility with behavior described in below link
+ // https://caddy.community/t/making-sense-of-auto-https-and-why-disabling-it-still-serves-https-instead-of-http/9761
+ createdTLSConnPolicies, ok := sblock.pile["tls.connection_policy"]
+ hasTLSEnabled := (ok && len(createdTLSConnPolicies) > 0) ||
+ (addr.Host != "" && srv.AutoHTTPS != nil && !slices.Contains(srv.AutoHTTPS.Skip, addr.Host))
+
// we'll need to remember if the address qualifies for auto-HTTPS, so we
// can add a TLS conn policy if necessary
if addr.Scheme == "https" ||
- (addr.Scheme != "http" && addr.Host != "" && addr.Port != httpPort) {
+ (addr.Scheme != "http" && addr.Port != httpPort && hasTLSEnabled) {
addressQualifiesForTLS = true
}
+
+ // If prefer wildcard is enabled, then we add hosts that are
+ // already covered by the wildcard to the skip list
+ if addressQualifiesForTLS && srv.AutoHTTPS != nil && srv.AutoHTTPS.PreferWildcard {
+ baseDomain := addr.Host
+ if idx := strings.Index(baseDomain, "."); idx != -1 {
+ baseDomain = baseDomain[idx+1:]
+ }
+ if !strings.HasPrefix(addr.Host, "*.") && slices.Contains(wildcardHosts, baseDomain) {
+ srv.AutoHTTPS.SkipCerts = append(srv.AutoHTTPS.SkipCerts, addr.Host)
+ }
+ }
+
// predict whether auto-HTTPS will add the conn policy for us; if so, we
// may not need to add one for this server
autoHTTPSWillAddConnPolicy = autoHTTPSWillAddConnPolicy &&
(addr.Port == httpsPort || (addr.Port != httpPort && addr.Host != ""))
}
+ // Look for any config values that provide listener wrappers on the server block
+ for _, listenerConfig := range sblock.pile["listener_wrapper"] {
+ listenerWrapper, ok := listenerConfig.Value.(caddy.ListenerWrapper)
+ if !ok {
+ return nil, fmt.Errorf("config for a listener wrapper did not provide a value that implements caddy.ListenerWrapper")
+ }
+ jsonListenerWrapper := caddyconfig.JSONModuleObject(
+ listenerWrapper,
+ "wrapper",
+ listenerWrapper.(caddy.Module).CaddyModule().ID.Name(),
+ warnings)
+ srv.ListenerWrappersRaw = append(srv.ListenerWrappersRaw, jsonListenerWrapper)
+ }
+
// set up each handler directive, making sure to honor directive order
dirRoutes := sblock.pile["route"]
- siteSubroute, err := buildSubroute(dirRoutes, groupCounter)
+ siteSubroute, err := buildSubroute(dirRoutes, groupCounter, true)
if err != nil {
return nil, err
}
@@ -458,10 +880,19 @@ func (st *ServerType) serversFromPairings(
if srv.Errors == nil {
srv.Errors = new(caddyhttp.HTTPErrorConfig)
}
+ sort.SliceStable(errorSubrouteVals, func(i, j int) bool {
+ sri, srj := errorSubrouteVals[i].Value.(*caddyhttp.Subroute), errorSubrouteVals[j].Value.(*caddyhttp.Subroute)
+ if len(sri.Routes[0].MatcherSetsRaw) == 0 && len(srj.Routes[0].MatcherSetsRaw) != 0 {
+ return false
+ }
+ return true
+ })
+ errorsSubroute := &caddyhttp.Subroute{}
for _, val := range errorSubrouteVals {
sr := val.Value.(*caddyhttp.Subroute)
- srv.Errors.Routes = appendSubrouteToRouteList(srv.Errors.Routes, sr, matcherSetsEnc, p, warnings)
+ errorsSubroute.Routes = append(errorsSubroute.Routes, sr.Routes...)
}
+ srv.Errors.Routes = appendSubrouteToRouteList(srv.Errors.Routes, errorsSubroute, matcherSetsEnc, p, warnings)
}
// add log associations
@@ -469,28 +900,39 @@ func (st *ServerType) serversFromPairings(
sblockLogHosts := sblock.hostsFromKeys(true)
for _, cval := range sblock.pile["custom_log"] {
ncl := cval.Value.(namedCustomLog)
- if srv.Logs == nil {
- srv.Logs = new(caddyhttp.ServerLogConfig)
+
+ // if `no_hostname` is set, then this logger will not
+ // be associated with any of the site block's hostnames,
+ // and only be usable via the `log_name` directive
+ // or the `access_logger_names` variable
+ if ncl.noHostname {
+ continue
}
- if sblock.hasHostCatchAllKey() {
+
+ if sblock.hasHostCatchAllKey() && len(ncl.hostnames) == 0 {
// all requests for hosts not able to be listed should use
// this log because it's a catch-all-hosts server block
srv.Logs.DefaultLoggerName = ncl.name
- } else {
- // map each host to the user's desired logger name
- for _, h := range sblockLogHosts {
- // if the custom logger name is non-empty, add it to
- // the map; otherwise, only map to an empty logger
- // name if the server block has a catch-all host (in
- // which case only requests with mapped hostnames will
- // be access-logged, so it'll be necessary to add them
- // to the map even if they use default logger)
- if ncl.name != "" || len(hosts) == 0 {
- if srv.Logs.LoggerNames == nil {
- srv.Logs.LoggerNames = make(map[string]string)
- }
- srv.Logs.LoggerNames[h] = ncl.name
+ } else if len(ncl.hostnames) > 0 {
+ // if the logger overrides the hostnames, map that to the logger name
+ for _, h := range ncl.hostnames {
+ if srv.Logs.LoggerNames == nil {
+ srv.Logs.LoggerNames = make(map[string]caddyhttp.StringArray)
}
+ srv.Logs.LoggerNames[h] = append(srv.Logs.LoggerNames[h], ncl.name)
+ }
+ } else {
+ // otherwise, map each host to the logger name
+ for _, h := range sblockLogHosts {
+ // strip the port from the host, if any
+ host, _, err := net.SplitHostPort(h)
+ if err != nil {
+ host = h
+ }
+ if srv.Logs.LoggerNames == nil {
+ srv.Logs.LoggerNames = make(map[string]caddyhttp.StringArray)
+ }
+ srv.Logs.LoggerNames[host] = append(srv.Logs.LoggerNames[host], ncl.name)
}
}
}
@@ -507,6 +949,11 @@ func (st *ServerType) serversFromPairings(
}
}
+ // sort for deterministic JSON output
+ if srv.Logs != nil {
+ slices.Sort(srv.Logs.SkipHosts)
+ }
+
// a server cannot (natively) serve both HTTP and HTTPS at the
// same time, so make sure the configuration isn't in conflict
err := detectConflictingSchemes(srv, p.serverBlocks, options)
@@ -528,8 +975,11 @@ func (st *ServerType) serversFromPairings(
// policy missing for any HTTPS-enabled hosts, if so, add it... maybe?
if addressQualifiesForTLS &&
!hasCatchAllTLSConnPolicy &&
- (len(srv.TLSConnPolicies) > 0 || !autoHTTPSWillAddConnPolicy || defaultSNI != "") {
- srv.TLSConnPolicies = append(srv.TLSConnPolicies, &caddytls.ConnectionPolicy{DefaultSNI: defaultSNI})
+ (len(srv.TLSConnPolicies) > 0 || !autoHTTPSWillAddConnPolicy || defaultSNI != "" || fallbackSNI != "") {
+ srv.TLSConnPolicies = append(srv.TLSConnPolicies, &caddytls.ConnectionPolicy{
+ DefaultSNI: defaultSNI,
+ FallbackSNI: fallbackSNI,
+ })
}
// tidy things up a bit
@@ -542,10 +992,14 @@ func (st *ServerType) serversFromPairings(
servers[fmt.Sprintf("srv%d", i)] = srv
}
+ if err := applyServerOptions(servers, options, warnings); err != nil {
+ return nil, fmt.Errorf("applying global server options: %v", err)
+ }
+
return servers, nil
}
-func detectConflictingSchemes(srv *caddyhttp.Server, serverBlocks []serverBlock, options map[string]interface{}) error {
+func detectConflictingSchemes(srv *caddyhttp.Server, serverBlocks []serverBlock, options map[string]any) error {
httpPort := strconv.Itoa(caddyhttp.DefaultHTTPPort)
if hp, ok := options["http_port"].(int); ok {
httpPort = strconv.Itoa(hp)
@@ -583,7 +1037,7 @@ func detectConflictingSchemes(srv *caddyhttp.Server, serverBlocks []serverBlock,
}
for _, sblock := range serverBlocks {
- for _, addr := range sblock.keys {
+ for _, addr := range sblock.parsedKeys {
if addr.Scheme == "http" || addr.Port == httpPort {
if err := checkAndSetHTTP(addr); err != nil {
return err
@@ -603,9 +1057,15 @@ func detectConflictingSchemes(srv *caddyhttp.Server, serverBlocks []serverBlock,
return nil
}
-// consolidateConnPolicies removes empty TLS connection policies and combines
-// equivalent ones for a cleaner overall output.
+// consolidateConnPolicies sorts any catch-all policy to the end, removes empty TLS connection
+// policies, and combines equivalent ones for a cleaner overall output.
func consolidateConnPolicies(cps caddytls.ConnectionPolicies) (caddytls.ConnectionPolicies, error) {
+ // catch-all policies (those without any matcher) should be at the
+ // end, otherwise it nullifies any more specific policies
+ sort.SliceStable(cps, func(i, j int) bool {
+ return cps[j].MatchersRaw == nil && cps[i].MatchersRaw != nil
+ })
+
for i := 0; i < len(cps); i++ {
// compare it to the others
for j := 0; j < len(cps); j++ {
@@ -706,7 +1166,7 @@ func consolidateConnPolicies(cps caddytls.ConnectionPolicies) (caddytls.Connecti
} else if cps[i].CertSelection != nil && cps[j].CertSelection != nil {
// if both have one, then combine AnyTag
for _, tag := range cps[j].CertSelection.AnyTag {
- if !sliceContains(cps[i].CertSelection.AnyTag, tag) {
+ if !slices.Contains(cps[i].CertSelection.AnyTag, tag) {
cps[i].CertSelection.AnyTag = append(cps[i].CertSelection.AnyTag, tag)
}
}
@@ -727,18 +1187,39 @@ func appendSubrouteToRouteList(routeList caddyhttp.RouteList,
subroute *caddyhttp.Subroute,
matcherSetsEnc []caddy.ModuleMap,
p sbAddrAssociation,
- warnings *[]caddyconfig.Warning) caddyhttp.RouteList {
-
+ warnings *[]caddyconfig.Warning,
+) caddyhttp.RouteList {
// nothing to do if... there's nothing to do
if len(matcherSetsEnc) == 0 && len(subroute.Routes) == 0 && subroute.Errors == nil {
return routeList
}
+ // No need to wrap the handlers in a subroute if this is the only server block
+ // and there is no matcher for it (doing so would produce unnecessarily nested
+ // JSON), *unless* there is a host matcher within this site block; if so, then
+ // we still need to wrap in a subroute because otherwise the host matcher from
+ // the inside of the site block would be a top-level host matcher, which is
+ // subject to auto-HTTPS (cert management), and using a host matcher within
+ // a site block is a valid, common pattern for excluding domains from cert
+ // management, leading to unexpected behavior; see issue #5124.
+ wrapInSubroute := true
if len(matcherSetsEnc) == 0 && len(p.serverBlocks) == 1 {
- // no need to wrap the handlers in a subroute if this is
- // the only server block and there is no matcher for it
- routeList = append(routeList, subroute.Routes...)
- } else {
+ var hasHostMatcher bool
+ outer:
+ for _, route := range subroute.Routes {
+ for _, ms := range route.MatcherSetsRaw {
+ for matcherName := range ms {
+ if matcherName == "host" {
+ hasHostMatcher = true
+ break outer
+ }
+ }
+ }
+ }
+ wrapInSubroute = hasHostMatcher
+ }
+
+ if wrapInSubroute {
route := caddyhttp.Route{
// the semantics of a site block in the Caddyfile dictate
// that only the first matching one is evaluated, since
@@ -756,20 +1237,25 @@ func appendSubrouteToRouteList(routeList caddyhttp.RouteList,
if len(route.MatcherSetsRaw) > 0 || len(route.HandlersRaw) > 0 {
routeList = append(routeList, route)
}
+ } else {
+ routeList = append(routeList, subroute.Routes...)
}
+
return routeList
}
// buildSubroute turns the config values, which are expected to be routes
// into a clean and orderly subroute that has all the routes within it.
-func buildSubroute(routes []ConfigValue, groupCounter counter) (*caddyhttp.Subroute, error) {
- for _, val := range routes {
- if !directiveIsOrdered(val.directive) {
- return nil, fmt.Errorf("directive '%s' is not ordered, so it cannot be used here", val.directive)
+func buildSubroute(routes []ConfigValue, groupCounter counter, needsSorting bool) (*caddyhttp.Subroute, error) {
+ if needsSorting {
+ for _, val := range routes {
+ if !slices.Contains(directiveOrder, val.directive) {
+ return nil, fmt.Errorf("directive '%s' is not an ordered HTTP handler, so it cannot be used here - try placing within a route block or using the order global option", val.directive)
+ }
}
- }
- sortRoutes(routes)
+ sortRoutes(routes)
+ }
subroute := new(caddyhttp.Subroute)
@@ -798,7 +1284,18 @@ func buildSubroute(routes []ConfigValue, groupCounter counter) (*caddyhttp.Subro
// root directives would overwrite previously-matched ones; they should not cascade
"root": {},
}
- for meDir, info := range mutuallyExclusiveDirs {
+
+ // we need to deterministically loop over each of these directives
+ // in order to keep the group numbers consistent
+ keys := make([]string, 0, len(mutuallyExclusiveDirs))
+ for k := range mutuallyExclusiveDirs {
+ keys = append(keys, k)
+ }
+ sort.Strings(keys)
+
+ for _, meDir := range keys {
+ info := mutuallyExclusiveDirs[meDir]
+
// see how many instances of the directive there are
for _, r := range routes {
if r.directive == meDir {
@@ -859,6 +1356,19 @@ func buildSubroute(routes []ConfigValue, groupCounter counter) (*caddyhttp.Subro
return subroute, nil
}
+// normalizeDirectiveName ensures directives that should be sorted
+// at the same level are named the same before sorting happens.
+func normalizeDirectiveName(directive string) string {
+ // As a special case, we want "handle_path" to be sorted
+ // at the same level as "handle", so we force them to use
+ // the same directive name after their parsing is complete.
+ // See https://github.com/caddyserver/caddy/issues/3675#issuecomment-678042377
+ if directive == "handle_path" {
+ directive = "handle"
+ }
+ return directive
+}
+
// consolidateRoutes combines routes with the same properties
// (same matchers, same Terminal and Group settings) for a
// cleaner overall output.
@@ -886,19 +1396,24 @@ func matcherSetFromMatcherToken(
if tkn.Text == "*" {
// match all requests == no matchers, so nothing to do
return nil, true, nil
- } else if strings.HasPrefix(tkn.Text, "/") {
- // convenient way to specify a single path match
+ }
+
+ // convenient way to specify a single path match
+ if strings.HasPrefix(tkn.Text, "/") {
return caddy.ModuleMap{
"path": caddyconfig.JSON(caddyhttp.MatchPath{tkn.Text}, warnings),
}, true, nil
- } else if strings.HasPrefix(tkn.Text, matcherPrefix) {
- // pre-defined matcher
+ }
+
+ // pre-defined matcher
+ if strings.HasPrefix(tkn.Text, matcherPrefix) {
m, ok := matcherDefs[tkn.Text]
if !ok {
return nil, false, fmt.Errorf("unrecognized matcher name: %+v", tkn.Text)
}
return m, true, nil
}
+
return nil, false, nil
}
@@ -912,7 +1427,7 @@ func (st *ServerType) compileEncodedMatcherSets(sblock serverBlock) ([]caddy.Mod
var matcherPairs []*hostPathPair
var catchAllHosts bool
- for _, addr := range sblock.keys {
+ for _, addr := range sblock.parsedKeys {
// choose a matcher pair that should be shared by this
// server block; if none exists yet, create one
var chosenMatcherPair *hostPathPair
@@ -944,25 +1459,16 @@ func (st *ServerType) compileEncodedMatcherSets(sblock serverBlock) ([]caddy.Mod
// add this server block's keys to the matcher
// pair if it doesn't already exist
- if addr.Host != "" {
- var found bool
- for _, h := range chosenMatcherPair.hostm {
- if h == addr.Host {
- found = true
- break
- }
- }
- if !found {
- chosenMatcherPair.hostm = append(chosenMatcherPair.hostm, addr.Host)
- }
+ if addr.Host != "" && !slices.Contains(chosenMatcherPair.hostm, addr.Host) {
+ chosenMatcherPair.hostm = append(chosenMatcherPair.hostm, addr.Host)
}
}
// iterate each pairing of host and path matchers and
// put them into a map for JSON encoding
- var matcherSets []map[string]caddyhttp.RequestMatcher
+ var matcherSets []map[string]caddyhttp.RequestMatcherWithError
for _, mp := range matcherPairs {
- matcherSet := make(map[string]caddyhttp.RequestMatcher)
+ matcherSet := make(map[string]caddyhttp.RequestMatcherWithError)
if len(mp.hostm) > 0 {
matcherSet["host"] = mp.hostm
}
@@ -988,47 +1494,94 @@ func (st *ServerType) compileEncodedMatcherSets(sblock serverBlock) ([]caddy.Mod
}
func parseMatcherDefinitions(d *caddyfile.Dispenser, matchers map[string]caddy.ModuleMap) error {
- for d.Next() {
- definitionName := d.Val()
+ d.Next() // advance to the first token
- if _, ok := matchers[definitionName]; ok {
- return fmt.Errorf("matcher is defined more than once: %s", definitionName)
- }
- matchers[definitionName] = make(caddy.ModuleMap)
+ // this is the "name" for "named matchers"
+ definitionName := d.Val()
- // in case there are multiple instances of the same matcher, concatenate
- // their tokens (we expect that UnmarshalCaddyfile should be able to
- // handle more than one segment); otherwise, we'd overwrite other
- // instances of the matcher in this set
- tokensByMatcherName := make(map[string][]caddyfile.Token)
- for nesting := d.Nesting(); d.NextBlock(nesting); {
- matcherName := d.Val()
- tokensByMatcherName[matcherName] = append(tokensByMatcherName[matcherName], d.NextSegment()...)
+ if _, ok := matchers[definitionName]; ok {
+ return fmt.Errorf("matcher is defined more than once: %s", definitionName)
+ }
+ matchers[definitionName] = make(caddy.ModuleMap)
+
+ // given a matcher name and the tokens following it, parse
+ // the tokens as a matcher module and record it
+ makeMatcher := func(matcherName string, tokens []caddyfile.Token) error {
+ // create a new dispenser from the tokens
+ dispenser := caddyfile.NewDispenser(tokens)
+
+ // set the matcher name (without @) in the dispenser context so
+ // that matcher modules can access it to use it as their name
+ // (e.g. regexp matchers which use the name for capture groups)
+ dispenser.SetContext(caddyfile.MatcherNameCtxKey, definitionName[1:])
+
+ mod, err := caddy.GetModule("http.matchers." + matcherName)
+ if err != nil {
+ return fmt.Errorf("getting matcher module '%s': %v", matcherName, err)
}
- for matcherName, tokens := range tokensByMatcherName {
- mod, err := caddy.GetModule("http.matchers." + matcherName)
- if err != nil {
- return fmt.Errorf("getting matcher module '%s': %v", matcherName, err)
- }
- unm, ok := mod.New().(caddyfile.Unmarshaler)
- if !ok {
- return fmt.Errorf("matcher module '%s' is not a Caddyfile unmarshaler", matcherName)
- }
- err = unm.UnmarshalCaddyfile(caddyfile.NewDispenser(tokens))
+ unm, ok := mod.New().(caddyfile.Unmarshaler)
+ if !ok {
+ return fmt.Errorf("matcher module '%s' is not a Caddyfile unmarshaler", matcherName)
+ }
+ err = unm.UnmarshalCaddyfile(dispenser)
+ if err != nil {
+ return err
+ }
+
+ if rm, ok := unm.(caddyhttp.RequestMatcherWithError); ok {
+ matchers[definitionName][matcherName] = caddyconfig.JSON(rm, nil)
+ return nil
+ }
+ // nolint:staticcheck
+ if rm, ok := unm.(caddyhttp.RequestMatcher); ok {
+ matchers[definitionName][matcherName] = caddyconfig.JSON(rm, nil)
+ return nil
+ }
+ return fmt.Errorf("matcher module '%s' is not a request matcher", matcherName)
+ }
+
+ // if the next token is quoted, we can assume it's not a matcher name
+ // and that it's probably an 'expression' matcher
+ if d.NextArg() {
+ if d.Token().Quoted() {
+ // since it was missing the matcher name, we insert a token
+ // in front of the expression token itself; we use Clone() to
+ // make the new token to keep the same the import location as
+ // the next token, if this is within a snippet or imported file.
+ // see https://github.com/caddyserver/caddy/issues/6287
+ expressionToken := d.Token().Clone()
+ expressionToken.Text = "expression"
+ err := makeMatcher("expression", []caddyfile.Token{expressionToken, d.Token()})
if err != nil {
return err
}
- rm, ok := unm.(caddyhttp.RequestMatcher)
- if !ok {
- return fmt.Errorf("matcher module '%s' is not a request matcher", matcherName)
- }
- matchers[definitionName][matcherName] = caddyconfig.JSON(rm, nil)
+ return nil
+ }
+
+ // if it wasn't quoted, then we need to rewind after calling
+ // d.NextArg() so the below properly grabs the matcher name
+ d.Prev()
+ }
+
+ // in case there are multiple instances of the same matcher, concatenate
+ // their tokens (we expect that UnmarshalCaddyfile should be able to
+ // handle more than one segment); otherwise, we'd overwrite other
+ // instances of the matcher in this set
+ tokensByMatcherName := make(map[string][]caddyfile.Token)
+ for nesting := d.Nesting(); d.NextArg() || d.NextBlock(nesting); {
+ matcherName := d.Val()
+ tokensByMatcherName[matcherName] = append(tokensByMatcherName[matcherName], d.NextSegment()...)
+ }
+ for matcherName, tokens := range tokensByMatcherName {
+ err := makeMatcher(matcherName, tokens)
+ if err != nil {
+ return err
}
}
return nil
}
-func encodeMatcherSet(matchers map[string]caddyhttp.RequestMatcher) (caddy.ModuleMap, error) {
+func encodeMatcherSet(matchers map[string]caddyhttp.RequestMatcherWithError) (caddy.ModuleMap, error) {
msEncoded := make(caddy.ModuleMap)
for matcherName, val := range matchers {
jsonBytes, err := json.Marshal(val)
@@ -1040,9 +1593,31 @@ func encodeMatcherSet(matchers map[string]caddyhttp.RequestMatcher) (caddy.Modul
return msEncoded, nil
}
+// WasReplacedPlaceholderShorthand checks if a token string was
+// likely a replaced shorthand of the known Caddyfile placeholder
+// replacement outputs. Useful to prevent some user-defined map
+// output destinations from overlapping with one of the
+// predefined shorthands.
+func WasReplacedPlaceholderShorthand(token string) string {
+ prev := ""
+ for i, item := range placeholderShorthands() {
+ // only look at every 2nd item, which is the replacement
+ if i%2 == 0 {
+ prev = item
+ continue
+ }
+ if strings.Trim(token, "{}") == strings.Trim(item, "{}") {
+ // we return the original shorthand so it
+ // can be used for an error message
+ return prev
+ }
+ }
+ return ""
+}
+
// tryInt tries to convert val to an integer. If it fails,
// it downgrades the error to a warning and returns 0.
-func tryInt(val interface{}, warnings *[]caddyconfig.Warning) int {
+func tryInt(val any, warnings *[]caddyconfig.Warning) int {
intVal, ok := val.(int)
if val != nil && !ok && warnings != nil {
*warnings = append(*warnings, caddyconfig.Warning{Message: "not an integer type"})
@@ -1050,7 +1625,7 @@ func tryInt(val interface{}, warnings *[]caddyconfig.Warning) int {
return intVal
}
-func tryString(val interface{}, warnings *[]caddyconfig.Warning) string {
+func tryString(val any, warnings *[]caddyconfig.Warning) string {
stringVal, ok := val.(string)
if val != nil && !ok && warnings != nil {
*warnings = append(*warnings, caddyconfig.Warning{Message: "not a string type"})
@@ -1058,10 +1633,28 @@ func tryString(val interface{}, warnings *[]caddyconfig.Warning) string {
return stringVal
}
-// sliceContains returns true if needle is in haystack.
-func sliceContains(haystack []string, needle string) bool {
- for _, s := range haystack {
- if s == needle {
+func tryDuration(val any, warnings *[]caddyconfig.Warning) caddy.Duration {
+ durationVal, ok := val.(caddy.Duration)
+ if val != nil && !ok && warnings != nil {
+ *warnings = append(*warnings, caddyconfig.Warning{Message: "not a duration type"})
+ }
+ return durationVal
+}
+
+// listenersUseAnyPortOtherThan returns true if there are any
+// listeners in addresses that use a port which is not otherPort.
+// Mostly borrowed from unexported method in caddyhttp package.
+func listenersUseAnyPortOtherThan(addresses []string, otherPort string) bool {
+ otherPortInt, err := strconv.Atoi(otherPort)
+ if err != nil {
+ return false
+ }
+ for _, lnAddr := range addresses {
+ laddrs, err := caddy.ParseNetworkAddress(lnAddr)
+ if err != nil {
+ continue
+ }
+ if uint(otherPortInt) > laddrs.EndPort || uint(otherPortInt) < laddrs.StartPort {
return true
}
}
@@ -1105,19 +1698,31 @@ func (c counter) nextGroup() string {
}
type namedCustomLog struct {
- name string
- log *caddy.CustomLog
+ name string
+ hostnames []string
+ log *caddy.CustomLog
+ noHostname bool
+}
+
+// addressWithProtocols associates a listen address with
+// the protocols to serve it with
+type addressWithProtocols struct {
+ address string
+ protocols []string
}
// sbAddrAssociation is a mapping from a list of
-// addresses to a list of server blocks that are
-// served on those addresses.
+// addresses with protocols, and a list of server
+// blocks that are served on those addresses.
type sbAddrAssociation struct {
- addresses []string
- serverBlocks []serverBlock
+ addressesWithProtocols []addressWithProtocols
+ serverBlocks []serverBlock
}
-const matcherPrefix = "@"
+const (
+ matcherPrefix = "@"
+ namedRouteKey = "named_route"
+)
// Interface guard
var _ caddyfile.ServerType = (*ServerType)(nil)
diff --git a/caddyconfig/httpcaddyfile/httptype_test.go b/caddyconfig/httpcaddyfile/httptype_test.go
index 64f0f82a..69f55501 100644
--- a/caddyconfig/httpcaddyfile/httptype_test.go
+++ b/caddyconfig/httpcaddyfile/httptype_test.go
@@ -9,7 +9,6 @@ import (
func TestMatcherSyntax(t *testing.T) {
for i, tc := range []struct {
input string
- expectWarn bool
expectError bool
}{
{
@@ -18,7 +17,6 @@ func TestMatcherSyntax(t *testing.T) {
query showdebug=1
}
`,
- expectWarn: false,
expectError: false,
},
{
@@ -27,7 +25,6 @@ func TestMatcherSyntax(t *testing.T) {
query bad format
}
`,
- expectWarn: false,
expectError: true,
},
{
@@ -38,7 +35,6 @@ func TestMatcherSyntax(t *testing.T) {
}
}
`,
- expectWarn: false,
expectError: false,
},
{
@@ -47,21 +43,29 @@ func TestMatcherSyntax(t *testing.T) {
not path /somepath*
}
`,
- expectWarn: false,
expectError: false,
},
+ {
+ input: `http://localhost
+ @debug not path /somepath*
+ `,
+ expectError: false,
+ },
+ {
+ input: `@matcher {
+ path /matcher-not-allowed/outside-of-site-block/*
+ }
+ http://localhost
+ `,
+ expectError: true,
+ },
} {
adapter := caddyfile.Adapter{
ServerType: ServerType{},
}
- _, warnings, err := adapter.Adapt([]byte(tc.input), nil)
-
- if len(warnings) > 0 != tc.expectWarn {
- t.Errorf("Test %d warning expectation failed Expected: %v, got %v", i, tc.expectWarn, warnings)
- continue
- }
+ _, _, err := adapter.Adapt([]byte(tc.input), nil)
if err != nil != tc.expectError {
t.Errorf("Test %d error expectation failed Expected: %v, got %s", i, tc.expectError, err)
@@ -103,7 +107,6 @@ func TestSpecificity(t *testing.T) {
func TestGlobalOptions(t *testing.T) {
for i, tc := range []struct {
input string
- expectWarn bool
expectError bool
}{
{
@@ -113,7 +116,6 @@ func TestGlobalOptions(t *testing.T) {
}
:80
`,
- expectWarn: false,
expectError: false,
},
{
@@ -123,7 +125,6 @@ func TestGlobalOptions(t *testing.T) {
}
:80
`,
- expectWarn: false,
expectError: false,
},
{
@@ -133,7 +134,6 @@ func TestGlobalOptions(t *testing.T) {
}
:80
`,
- expectWarn: false,
expectError: false,
},
{
@@ -145,7 +145,54 @@ func TestGlobalOptions(t *testing.T) {
}
:80
`,
- expectWarn: false,
+ expectError: true,
+ },
+ {
+ input: `
+ {
+ admin {
+ enforce_origin
+ origins 192.168.1.1:2020 127.0.0.1:2020
+ }
+ }
+ :80
+ `,
+ expectError: false,
+ },
+ {
+ input: `
+ {
+ admin 127.0.0.1:2020 {
+ enforce_origin
+ origins 192.168.1.1:2020 127.0.0.1:2020
+ }
+ }
+ :80
+ `,
+ expectError: false,
+ },
+ {
+ input: `
+ {
+ admin 192.168.1.1:2020 127.0.0.1:2020 {
+ enforce_origin
+ origins 192.168.1.1:2020 127.0.0.1:2020
+ }
+ }
+ :80
+ `,
+ expectError: true,
+ },
+ {
+ input: `
+ {
+ admin off {
+ enforce_origin
+ origins 192.168.1.1:2020 127.0.0.1:2020
+ }
+ }
+ :80
+ `,
expectError: true,
},
} {
@@ -154,12 +201,7 @@ func TestGlobalOptions(t *testing.T) {
ServerType: ServerType{},
}
- _, warnings, err := adapter.Adapt([]byte(tc.input), nil)
-
- if len(warnings) > 0 != tc.expectWarn {
- t.Errorf("Test %d warning expectation failed Expected: %v, got %v", i, tc.expectWarn, warnings)
- continue
- }
+ _, _, err := adapter.Adapt([]byte(tc.input), nil)
if err != nil != tc.expectError {
t.Errorf("Test %d error expectation failed Expected: %v, got %s", i, tc.expectError, err)
diff --git a/caddyconfig/httpcaddyfile/options.go b/caddyconfig/httpcaddyfile/options.go
index 072d8f49..cfd8f709 100644
--- a/caddyconfig/httpcaddyfile/options.go
+++ b/caddyconfig/httpcaddyfile/options.go
@@ -15,156 +15,302 @@
package httpcaddyfile
import (
+ "slices"
"strconv"
- "time"
+
+ "github.com/caddyserver/certmagic"
+ "github.com/mholt/acmez/v2/acme"
"github.com/caddyserver/caddy/v2"
+ "github.com/caddyserver/caddy/v2/caddyconfig"
"github.com/caddyserver/caddy/v2/caddyconfig/caddyfile"
+ "github.com/caddyserver/caddy/v2/modules/caddyhttp"
"github.com/caddyserver/caddy/v2/modules/caddytls"
)
-func parseOptHTTPPort(d *caddyfile.Dispenser) (int, error) {
+func init() {
+ RegisterGlobalOption("debug", parseOptTrue)
+ RegisterGlobalOption("http_port", parseOptHTTPPort)
+ RegisterGlobalOption("https_port", parseOptHTTPSPort)
+ RegisterGlobalOption("default_bind", parseOptDefaultBind)
+ RegisterGlobalOption("grace_period", parseOptDuration)
+ RegisterGlobalOption("shutdown_delay", parseOptDuration)
+ RegisterGlobalOption("default_sni", parseOptSingleString)
+ RegisterGlobalOption("fallback_sni", parseOptSingleString)
+ RegisterGlobalOption("order", parseOptOrder)
+ RegisterGlobalOption("storage", parseOptStorage)
+ RegisterGlobalOption("storage_check", parseStorageCheck)
+ RegisterGlobalOption("storage_clean_interval", parseStorageCleanInterval)
+ RegisterGlobalOption("renew_interval", parseOptDuration)
+ RegisterGlobalOption("ocsp_interval", parseOptDuration)
+ RegisterGlobalOption("acme_ca", parseOptSingleString)
+ RegisterGlobalOption("acme_ca_root", parseOptSingleString)
+ RegisterGlobalOption("acme_dns", parseOptACMEDNS)
+ RegisterGlobalOption("acme_eab", parseOptACMEEAB)
+ RegisterGlobalOption("cert_issuer", parseOptCertIssuer)
+ RegisterGlobalOption("skip_install_trust", parseOptTrue)
+ RegisterGlobalOption("email", parseOptSingleString)
+ RegisterGlobalOption("admin", parseOptAdmin)
+ RegisterGlobalOption("on_demand_tls", parseOptOnDemand)
+ RegisterGlobalOption("local_certs", parseOptTrue)
+ RegisterGlobalOption("key_type", parseOptSingleString)
+ RegisterGlobalOption("auto_https", parseOptAutoHTTPS)
+ RegisterGlobalOption("metrics", parseMetricsOptions)
+ RegisterGlobalOption("servers", parseServerOptions)
+ RegisterGlobalOption("ocsp_stapling", parseOCSPStaplingOptions)
+ RegisterGlobalOption("cert_lifetime", parseOptDuration)
+ RegisterGlobalOption("log", parseLogOptions)
+ RegisterGlobalOption("preferred_chains", parseOptPreferredChains)
+ RegisterGlobalOption("persist_config", parseOptPersistConfig)
+}
+
+func parseOptTrue(d *caddyfile.Dispenser, _ any) (any, error) { return true, nil }
+
+func parseOptHTTPPort(d *caddyfile.Dispenser, _ any) (any, error) {
+ d.Next() // consume option name
var httpPort int
- for d.Next() {
- var httpPortStr string
- if !d.AllArgs(&httpPortStr) {
- return 0, d.ArgErr()
- }
- var err error
- httpPort, err = strconv.Atoi(httpPortStr)
- if err != nil {
- return 0, d.Errf("converting port '%s' to integer value: %v", httpPortStr, err)
- }
+ var httpPortStr string
+ if !d.AllArgs(&httpPortStr) {
+ return 0, d.ArgErr()
+ }
+ var err error
+ httpPort, err = strconv.Atoi(httpPortStr)
+ if err != nil {
+ return 0, d.Errf("converting port '%s' to integer value: %v", httpPortStr, err)
}
return httpPort, nil
}
-func parseOptHTTPSPort(d *caddyfile.Dispenser) (int, error) {
+func parseOptHTTPSPort(d *caddyfile.Dispenser, _ any) (any, error) {
+ d.Next() // consume option name
var httpsPort int
- for d.Next() {
- var httpsPortStr string
- if !d.AllArgs(&httpsPortStr) {
- return 0, d.ArgErr()
- }
- var err error
- httpsPort, err = strconv.Atoi(httpsPortStr)
- if err != nil {
- return 0, d.Errf("converting port '%s' to integer value: %v", httpsPortStr, err)
- }
+ var httpsPortStr string
+ if !d.AllArgs(&httpsPortStr) {
+ return 0, d.ArgErr()
+ }
+ var err error
+ httpsPort, err = strconv.Atoi(httpsPortStr)
+ if err != nil {
+ return 0, d.Errf("converting port '%s' to integer value: %v", httpsPortStr, err)
}
return httpsPort, nil
}
-func parseOptExperimentalHTTP3(d *caddyfile.Dispenser) (bool, error) {
- return true, nil
-}
+func parseOptOrder(d *caddyfile.Dispenser, _ any) (any, error) {
+ d.Next() // consume option name
-func parseOptOrder(d *caddyfile.Dispenser) ([]string, error) {
- newOrder := directiveOrder
+ // get directive name
+ if !d.Next() {
+ return nil, d.ArgErr()
+ }
+ dirName := d.Val()
+ if _, ok := registeredDirectives[dirName]; !ok {
+ return nil, d.Errf("%s is not a registered directive", dirName)
+ }
- for d.Next() {
- // get directive name
- if !d.Next() {
- return nil, d.ArgErr()
- }
- dirName := d.Val()
- if _, ok := registeredDirectives[dirName]; !ok {
- return nil, d.Errf("%s is not a registered directive", dirName)
- }
+ // get positional token
+ if !d.Next() {
+ return nil, d.ArgErr()
+ }
+ pos := Positional(d.Val())
- // get positional token
- if !d.Next() {
- return nil, d.ArgErr()
- }
- pos := d.Val()
+ // if directive already had an order, drop it
+ newOrder := slices.DeleteFunc(directiveOrder, func(d string) bool {
+ return d == dirName
+ })
- // if directive exists, first remove it
- for i, d := range newOrder {
- if d == dirName {
- newOrder = append(newOrder[:i], newOrder[i+1:]...)
- break
- }
- }
-
- // act on the positional
- switch pos {
- case "first":
- newOrder = append([]string{dirName}, newOrder...)
- if d.NextArg() {
- return nil, d.ArgErr()
- }
- directiveOrder = newOrder
- return newOrder, nil
- case "last":
- newOrder = append(newOrder, dirName)
- if d.NextArg() {
- return nil, d.ArgErr()
- }
- directiveOrder = newOrder
- return newOrder, nil
- case "before":
- case "after":
- default:
- return nil, d.Errf("unknown positional '%s'", pos)
- }
-
- // get name of other directive
- if !d.NextArg() {
- return nil, d.ArgErr()
- }
- otherDir := d.Val()
+ // act on the positional; if it's First or Last, we're done right away
+ switch pos {
+ case First:
+ newOrder = append([]string{dirName}, newOrder...)
if d.NextArg() {
return nil, d.ArgErr()
}
+ directiveOrder = newOrder
+ return newOrder, nil
- // insert directive into proper position
- for i, d := range newOrder {
- if d == otherDir {
- if pos == "before" {
- newOrder = append(newOrder[:i], append([]string{dirName}, newOrder[i:]...)...)
- } else if pos == "after" {
- newOrder = append(newOrder[:i+1], append([]string{dirName}, newOrder[i+1:]...)...)
- }
- break
- }
+ case Last:
+ newOrder = append(newOrder, dirName)
+ if d.NextArg() {
+ return nil, d.ArgErr()
}
+ directiveOrder = newOrder
+ return newOrder, nil
+
+ // if it's Before or After, continue
+ case Before:
+ case After:
+
+ default:
+ return nil, d.Errf("unknown positional '%s'", pos)
}
+ // get name of other directive
+ if !d.NextArg() {
+ return nil, d.ArgErr()
+ }
+ otherDir := d.Val()
+ if d.NextArg() {
+ return nil, d.ArgErr()
+ }
+
+ // get the position of the target directive
+ targetIndex := slices.Index(newOrder, otherDir)
+ if targetIndex == -1 {
+ return nil, d.Errf("directive '%s' not found", otherDir)
+ }
+ // if we're inserting after, we need to increment the index to go after
+ if pos == After {
+ targetIndex++
+ }
+ // insert the directive into the new order
+ newOrder = slices.Insert(newOrder, targetIndex, dirName)
+
directiveOrder = newOrder
return newOrder, nil
}
-func parseOptStorage(d *caddyfile.Dispenser) (caddy.StorageConverter, error) {
- if !d.Next() {
+func parseOptStorage(d *caddyfile.Dispenser, _ any) (any, error) {
+ if !d.Next() { // consume option name
return nil, d.ArgErr()
}
- args := d.RemainingArgs()
- if len(args) != 1 {
+ if !d.Next() { // get storage module name
return nil, d.ArgErr()
}
- modName := args[0]
- mod, err := caddy.GetModule("caddy.storage." + modName)
- if err != nil {
- return nil, d.Errf("getting storage module '%s': %v", modName, err)
- }
- unm, ok := mod.New().(caddyfile.Unmarshaler)
- if !ok {
- return nil, d.Errf("storage module '%s' is not a Caddyfile unmarshaler", mod.ID)
- }
- err = unm.UnmarshalCaddyfile(d.NewFromNextSegment())
+ modID := "caddy.storage." + d.Val()
+ unm, err := caddyfile.UnmarshalModule(d, modID)
if err != nil {
return nil, err
}
storage, ok := unm.(caddy.StorageConverter)
if !ok {
- return nil, d.Errf("module %s is not a StorageConverter", mod.ID)
+ return nil, d.Errf("module %s is not a caddy.StorageConverter", modID)
}
return storage, nil
}
-func parseOptSingleString(d *caddyfile.Dispenser) (string, error) {
- d.Next() // consume parameter name
+func parseStorageCheck(d *caddyfile.Dispenser, _ any) (any, error) {
+ d.Next() // consume option name
+ if !d.Next() {
+ return "", d.ArgErr()
+ }
+ val := d.Val()
+ if d.Next() {
+ return "", d.ArgErr()
+ }
+ if val != "off" {
+ return "", d.Errf("storage_check must be 'off'")
+ }
+ return val, nil
+}
+
+func parseStorageCleanInterval(d *caddyfile.Dispenser, _ any) (any, error) {
+ d.Next() // consume option name
+ if !d.Next() {
+ return "", d.ArgErr()
+ }
+ val := d.Val()
+ if d.Next() {
+ return "", d.ArgErr()
+ }
+ if val == "off" {
+ return false, nil
+ }
+ dur, err := caddy.ParseDuration(d.Val())
+ if err != nil {
+ return nil, d.Errf("failed to parse storage_clean_interval, must be a duration or 'off' %w", err)
+ }
+ return caddy.Duration(dur), nil
+}
+
+func parseOptDuration(d *caddyfile.Dispenser, _ any) (any, error) {
+ if !d.Next() { // consume option name
+ return nil, d.ArgErr()
+ }
+ if !d.Next() { // get duration value
+ return nil, d.ArgErr()
+ }
+ dur, err := caddy.ParseDuration(d.Val())
+ if err != nil {
+ return nil, err
+ }
+ return caddy.Duration(dur), nil
+}
+
+func parseOptACMEDNS(d *caddyfile.Dispenser, _ any) (any, error) {
+ if !d.Next() { // consume option name
+ return nil, d.ArgErr()
+ }
+ if !d.Next() { // get DNS module name
+ return nil, d.ArgErr()
+ }
+ modID := "dns.providers." + d.Val()
+ unm, err := caddyfile.UnmarshalModule(d, modID)
+ if err != nil {
+ return nil, err
+ }
+ prov, ok := unm.(certmagic.DNSProvider)
+ if !ok {
+ return nil, d.Errf("module %s (%T) is not a certmagic.DNSProvider", modID, unm)
+ }
+ return prov, nil
+}
+
+func parseOptACMEEAB(d *caddyfile.Dispenser, _ any) (any, error) {
+ eab := new(acme.EAB)
+ d.Next() // consume option name
+ if d.NextArg() {
+ return nil, d.ArgErr()
+ }
+ for d.NextBlock(0) {
+ switch d.Val() {
+ case "key_id":
+ if !d.NextArg() {
+ return nil, d.ArgErr()
+ }
+ eab.KeyID = d.Val()
+
+ case "mac_key":
+ if !d.NextArg() {
+ return nil, d.ArgErr()
+ }
+ eab.MACKey = d.Val()
+
+ default:
+ return nil, d.Errf("unrecognized parameter '%s'", d.Val())
+ }
+ }
+ return eab, nil
+}
+
+func parseOptCertIssuer(d *caddyfile.Dispenser, existing any) (any, error) {
+ d.Next() // consume option name
+
+ var issuers []certmagic.Issuer
+ if existing != nil {
+ issuers = existing.([]certmagic.Issuer)
+ }
+
+ // get issuer module name
+ if !d.Next() {
+ return nil, d.ArgErr()
+ }
+ modID := "tls.issuance." + d.Val()
+ unm, err := caddyfile.UnmarshalModule(d, modID)
+ if err != nil {
+ return nil, err
+ }
+ iss, ok := unm.(certmagic.Issuer)
+ if !ok {
+ return nil, d.Errf("module %s (%T) is not a certmagic.Issuer", modID, unm)
+ }
+ issuers = append(issuers, iss)
+ return issuers, nil
+}
+
+func parseOptSingleString(d *caddyfile.Dispenser, _ any) (any, error) {
+ d.Next() // consume option name
if !d.Next() {
return "", d.ArgErr()
}
@@ -175,72 +321,123 @@ func parseOptSingleString(d *caddyfile.Dispenser) (string, error) {
return val, nil
}
-func parseOptAdmin(d *caddyfile.Dispenser) (string, error) {
- if d.Next() {
- var listenAddress string
- if !d.AllArgs(&listenAddress) {
- return "", d.ArgErr()
- }
- if listenAddress == "" {
- listenAddress = caddy.DefaultAdminListen
- }
- return listenAddress, nil
+func parseOptDefaultBind(d *caddyfile.Dispenser, _ any) (any, error) {
+ d.Next() // consume option name
+
+ var addresses, protocols []string
+ addresses = d.RemainingArgs()
+
+ if len(addresses) == 0 {
+ addresses = append(addresses, "")
}
- return "", nil
+
+ for d.NextBlock(0) {
+ switch d.Val() {
+ case "protocols":
+ protocols = d.RemainingArgs()
+ if len(protocols) == 0 {
+ return nil, d.Errf("protocols requires one or more arguments")
+ }
+ default:
+ return nil, d.Errf("unknown subdirective: %s", d.Val())
+ }
+ }
+
+ return []ConfigValue{{Class: "bind", Value: addressesWithProtocols{
+ addresses: addresses,
+ protocols: protocols,
+ }}}, nil
}
-func parseOptOnDemand(d *caddyfile.Dispenser) (*caddytls.OnDemandConfig, error) {
- var ond *caddytls.OnDemandConfig
- for d.Next() {
- if d.NextArg() {
- return nil, d.ArgErr()
- }
- for nesting := d.Nesting(); d.NextBlock(nesting); {
- switch d.Val() {
- case "ask":
- if !d.NextArg() {
- return nil, d.ArgErr()
- }
- if ond == nil {
- ond = new(caddytls.OnDemandConfig)
- }
- ond.Ask = d.Val()
+func parseOptAdmin(d *caddyfile.Dispenser, _ any) (any, error) {
+ d.Next() // consume option name
- case "interval":
- if !d.NextArg() {
- return nil, d.ArgErr()
- }
- dur, err := time.ParseDuration(d.Val())
- if err != nil {
- return nil, err
- }
- if ond == nil {
- ond = new(caddytls.OnDemandConfig)
- }
- if ond.RateLimit == nil {
- ond.RateLimit = new(caddytls.RateLimit)
- }
- ond.RateLimit.Interval = caddy.Duration(dur)
-
- case "burst":
- if !d.NextArg() {
- return nil, d.ArgErr()
- }
- burst, err := strconv.Atoi(d.Val())
- if err != nil {
- return nil, err
- }
- if ond == nil {
- ond = new(caddytls.OnDemandConfig)
- }
- if ond.RateLimit == nil {
- ond.RateLimit = new(caddytls.RateLimit)
- }
- ond.RateLimit.Burst = burst
-
- default:
- return nil, d.Errf("unrecognized parameter '%s'", d.Val())
+ adminCfg := new(caddy.AdminConfig)
+ if d.NextArg() {
+ listenAddress := d.Val()
+ if listenAddress == "off" {
+ adminCfg.Disabled = true
+ if d.Next() { // Do not accept any remaining options including block
+ return nil, d.Err("No more option is allowed after turning off admin config")
}
+ } else {
+ adminCfg.Listen = listenAddress
+ if d.NextArg() { // At most 1 arg is allowed
+ return nil, d.ArgErr()
+ }
+ }
+ }
+ for d.NextBlock(0) {
+ switch d.Val() {
+ case "enforce_origin":
+ adminCfg.EnforceOrigin = true
+
+ case "origins":
+ adminCfg.Origins = d.RemainingArgs()
+
+ default:
+ return nil, d.Errf("unrecognized parameter '%s'", d.Val())
+ }
+ }
+ if adminCfg.Listen == "" && !adminCfg.Disabled {
+ adminCfg.Listen = caddy.DefaultAdminListen
+ }
+ return adminCfg, nil
+}
+
+func parseOptOnDemand(d *caddyfile.Dispenser, _ any) (any, error) {
+ d.Next() // consume option name
+ if d.NextArg() {
+ return nil, d.ArgErr()
+ }
+
+ var ond *caddytls.OnDemandConfig
+
+ for nesting := d.Nesting(); d.NextBlock(nesting); {
+ switch d.Val() {
+ case "ask":
+ if !d.NextArg() {
+ return nil, d.ArgErr()
+ }
+ if ond == nil {
+ ond = new(caddytls.OnDemandConfig)
+ }
+ if ond.PermissionRaw != nil {
+ return nil, d.Err("on-demand TLS permission module (or 'ask') already specified")
+ }
+ perm := caddytls.PermissionByHTTP{Endpoint: d.Val()}
+ ond.PermissionRaw = caddyconfig.JSONModuleObject(perm, "module", "http", nil)
+
+ case "permission":
+ if !d.NextArg() {
+ return nil, d.ArgErr()
+ }
+ if ond == nil {
+ ond = new(caddytls.OnDemandConfig)
+ }
+ if ond.PermissionRaw != nil {
+ return nil, d.Err("on-demand TLS permission module (or 'ask') already specified")
+ }
+ modName := d.Val()
+ modID := "tls.permission." + modName
+ unm, err := caddyfile.UnmarshalModule(d, modID)
+ if err != nil {
+ return nil, err
+ }
+ perm, ok := unm.(caddytls.OnDemandPermission)
+ if !ok {
+ return nil, d.Errf("module %s (%T) is not an on-demand TLS permission module", modID, unm)
+ }
+ ond.PermissionRaw = caddyconfig.JSONModuleObject(perm, "module", modName, nil)
+
+ case "interval":
+ return nil, d.Errf("the on_demand_tls 'interval' option is no longer supported, remove it from your config")
+
+ case "burst":
+ return nil, d.Errf("the on_demand_tls 'burst' option is no longer supported, remove it from your config")
+
+ default:
+ return nil, d.Errf("unrecognized parameter '%s'", d.Val())
}
}
if ond == nil {
@@ -248,3 +445,128 @@ func parseOptOnDemand(d *caddyfile.Dispenser) (*caddytls.OnDemandConfig, error)
}
return ond, nil
}
+
+func parseOptPersistConfig(d *caddyfile.Dispenser, _ any) (any, error) {
+ d.Next() // consume option name
+ if !d.Next() {
+ return "", d.ArgErr()
+ }
+ val := d.Val()
+ if d.Next() {
+ return "", d.ArgErr()
+ }
+ if val != "off" {
+ return "", d.Errf("persist_config must be 'off'")
+ }
+ return val, nil
+}
+
+func parseOptAutoHTTPS(d *caddyfile.Dispenser, _ any) (any, error) {
+ d.Next() // consume option name
+ val := d.RemainingArgs()
+ if len(val) == 0 {
+ return "", d.ArgErr()
+ }
+ for _, v := range val {
+ switch v {
+ case "off":
+ case "disable_redirects":
+ case "disable_certs":
+ case "ignore_loaded_certs":
+ case "prefer_wildcard":
+ break
+
+ default:
+ return "", d.Errf("auto_https must be one of 'off', 'disable_redirects', 'disable_certs', 'ignore_loaded_certs', or 'prefer_wildcard'")
+ }
+ }
+ return val, nil
+}
+
+func unmarshalCaddyfileMetricsOptions(d *caddyfile.Dispenser) (any, error) {
+ d.Next() // consume option name
+ metrics := new(caddyhttp.Metrics)
+ for d.NextBlock(0) {
+ switch d.Val() {
+ case "per_host":
+ metrics.PerHost = true
+ default:
+ return nil, d.Errf("unrecognized servers option '%s'", d.Val())
+ }
+ }
+ return metrics, nil
+}
+
+func parseMetricsOptions(d *caddyfile.Dispenser, _ any) (any, error) {
+ return unmarshalCaddyfileMetricsOptions(d)
+}
+
+func parseServerOptions(d *caddyfile.Dispenser, _ any) (any, error) {
+ return unmarshalCaddyfileServerOptions(d)
+}
+
+func parseOCSPStaplingOptions(d *caddyfile.Dispenser, _ any) (any, error) {
+ d.Next() // consume option name
+ var val string
+ if !d.AllArgs(&val) {
+ return nil, d.ArgErr()
+ }
+ if val != "off" {
+ return nil, d.Errf("invalid argument '%s'", val)
+ }
+ return certmagic.OCSPConfig{
+ DisableStapling: val == "off",
+ }, nil
+}
+
+// parseLogOptions parses the global log option. Syntax:
+//
+// log [name] {
+// output ...
+// format ...
+// level
+// include
+// exclude
+// }
+//
+// When the name argument is unspecified, this directive modifies the default
+// logger.
+func parseLogOptions(d *caddyfile.Dispenser, existingVal any) (any, error) {
+ currentNames := make(map[string]struct{})
+ if existingVal != nil {
+ innerVals, ok := existingVal.([]ConfigValue)
+ if !ok {
+ return nil, d.Errf("existing log values of unexpected type: %T", existingVal)
+ }
+ for _, rawVal := range innerVals {
+ val, ok := rawVal.Value.(namedCustomLog)
+ if !ok {
+ return nil, d.Errf("existing log value of unexpected type: %T", existingVal)
+ }
+ currentNames[val.name] = struct{}{}
+ }
+ }
+
+ var warnings []caddyconfig.Warning
+ // Call out the same parser that handles server-specific log configuration.
+ configValues, err := parseLogHelper(
+ Helper{
+ Dispenser: d,
+ warnings: &warnings,
+ },
+ currentNames,
+ )
+ if err != nil {
+ return nil, err
+ }
+ if len(warnings) > 0 {
+ return nil, d.Errf("warnings found in parsing global log options: %+v", warnings)
+ }
+
+ return configValues, nil
+}
+
+func parseOptPreferredChains(d *caddyfile.Dispenser, _ any) (any, error) {
+ d.Next()
+ return caddytls.ParseCaddyfilePreferredChainsOptions(d)
+}
diff --git a/caddyconfig/httpcaddyfile/options_test.go b/caddyconfig/httpcaddyfile/options_test.go
new file mode 100644
index 00000000..bc9e8813
--- /dev/null
+++ b/caddyconfig/httpcaddyfile/options_test.go
@@ -0,0 +1,64 @@
+package httpcaddyfile
+
+import (
+ "testing"
+
+ "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile"
+ _ "github.com/caddyserver/caddy/v2/modules/logging"
+)
+
+func TestGlobalLogOptionSyntax(t *testing.T) {
+ for i, tc := range []struct {
+ input string
+ output string
+ expectError bool
+ }{
+ // NOTE: Additional test cases of successful Caddyfile parsing
+ // are present in: caddytest/integration/caddyfile_adapt/
+ {
+ input: `{
+ log default
+ }
+ `,
+ output: `{}`,
+ expectError: false,
+ },
+ {
+ input: `{
+ log example {
+ output file foo.log
+ }
+ log example {
+ format json
+ }
+ }
+ `,
+ expectError: true,
+ },
+ {
+ input: `{
+ log example /foo {
+ output file foo.log
+ }
+ }
+ `,
+ expectError: true,
+ },
+ } {
+
+ adapter := caddyfile.Adapter{
+ ServerType: ServerType{},
+ }
+
+ out, _, err := adapter.Adapt([]byte(tc.input), nil)
+
+ if err != nil != tc.expectError {
+ t.Errorf("Test %d error expectation failed Expected: %v, got %v", i, tc.expectError, err)
+ continue
+ }
+
+ if string(out) != tc.output {
+ t.Errorf("Test %d error output mismatch Expected: %s, got %s", i, tc.output, out)
+ }
+ }
+}
diff --git a/caddyconfig/httpcaddyfile/pkiapp.go b/caddyconfig/httpcaddyfile/pkiapp.go
new file mode 100644
index 00000000..c57263ba
--- /dev/null
+++ b/caddyconfig/httpcaddyfile/pkiapp.go
@@ -0,0 +1,229 @@
+// Copyright 2015 Matthew Holt and The Caddy Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package httpcaddyfile
+
+import (
+ "github.com/caddyserver/caddy/v2"
+ "github.com/caddyserver/caddy/v2/caddyconfig"
+ "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile"
+ "github.com/caddyserver/caddy/v2/modules/caddypki"
+)
+
+func init() {
+ RegisterGlobalOption("pki", parsePKIApp)
+}
+
+// parsePKIApp parses the global log option. Syntax:
+//
+// pki {
+// ca [] {
+// name
+// root_cn
+// intermediate_cn
+// intermediate_lifetime
+// root {
+// cert
+// key
+// format
+// }
+// intermediate {
+// cert
+// key
+// format
+// }
+// }
+// }
+//
+// When the CA ID is unspecified, 'local' is assumed.
+func parsePKIApp(d *caddyfile.Dispenser, existingVal any) (any, error) {
+ d.Next() // consume app name
+
+ pki := &caddypki.PKI{
+ CAs: make(map[string]*caddypki.CA),
+ }
+ for d.NextBlock(0) {
+ switch d.Val() {
+ case "ca":
+ pkiCa := new(caddypki.CA)
+ if d.NextArg() {
+ pkiCa.ID = d.Val()
+ if d.NextArg() {
+ return nil, d.ArgErr()
+ }
+ }
+ if pkiCa.ID == "" {
+ pkiCa.ID = caddypki.DefaultCAID
+ }
+
+ for nesting := d.Nesting(); d.NextBlock(nesting); {
+ switch d.Val() {
+ case "name":
+ if !d.NextArg() {
+ return nil, d.ArgErr()
+ }
+ pkiCa.Name = d.Val()
+
+ case "root_cn":
+ if !d.NextArg() {
+ return nil, d.ArgErr()
+ }
+ pkiCa.RootCommonName = d.Val()
+
+ case "intermediate_cn":
+ if !d.NextArg() {
+ return nil, d.ArgErr()
+ }
+ pkiCa.IntermediateCommonName = d.Val()
+
+ case "intermediate_lifetime":
+ if !d.NextArg() {
+ return nil, d.ArgErr()
+ }
+ dur, err := caddy.ParseDuration(d.Val())
+ if err != nil {
+ return nil, err
+ }
+ pkiCa.IntermediateLifetime = caddy.Duration(dur)
+
+ case "root":
+ if pkiCa.Root == nil {
+ pkiCa.Root = new(caddypki.KeyPair)
+ }
+ for nesting := d.Nesting(); d.NextBlock(nesting); {
+ switch d.Val() {
+ case "cert":
+ if !d.NextArg() {
+ return nil, d.ArgErr()
+ }
+ pkiCa.Root.Certificate = d.Val()
+
+ case "key":
+ if !d.NextArg() {
+ return nil, d.ArgErr()
+ }
+ pkiCa.Root.PrivateKey = d.Val()
+
+ case "format":
+ if !d.NextArg() {
+ return nil, d.ArgErr()
+ }
+ pkiCa.Root.Format = d.Val()
+
+ default:
+ return nil, d.Errf("unrecognized pki ca root option '%s'", d.Val())
+ }
+ }
+
+ case "intermediate":
+ if pkiCa.Intermediate == nil {
+ pkiCa.Intermediate = new(caddypki.KeyPair)
+ }
+ for nesting := d.Nesting(); d.NextBlock(nesting); {
+ switch d.Val() {
+ case "cert":
+ if !d.NextArg() {
+ return nil, d.ArgErr()
+ }
+ pkiCa.Intermediate.Certificate = d.Val()
+
+ case "key":
+ if !d.NextArg() {
+ return nil, d.ArgErr()
+ }
+ pkiCa.Intermediate.PrivateKey = d.Val()
+
+ case "format":
+ if !d.NextArg() {
+ return nil, d.ArgErr()
+ }
+ pkiCa.Intermediate.Format = d.Val()
+
+ default:
+ return nil, d.Errf("unrecognized pki ca intermediate option '%s'", d.Val())
+ }
+ }
+
+ default:
+ return nil, d.Errf("unrecognized pki ca option '%s'", d.Val())
+ }
+ }
+
+ pki.CAs[pkiCa.ID] = pkiCa
+
+ default:
+ return nil, d.Errf("unrecognized pki option '%s'", d.Val())
+ }
+ }
+ return pki, nil
+}
+
+func (st ServerType) buildPKIApp(
+ pairings []sbAddrAssociation,
+ options map[string]any,
+ warnings []caddyconfig.Warning,
+) (*caddypki.PKI, []caddyconfig.Warning, error) {
+ skipInstallTrust := false
+ if _, ok := options["skip_install_trust"]; ok {
+ skipInstallTrust = true
+ }
+ falseBool := false
+
+ // Load the PKI app configured via global options
+ var pkiApp *caddypki.PKI
+ unwrappedPki, ok := options["pki"].(*caddypki.PKI)
+ if ok {
+ pkiApp = unwrappedPki
+ } else {
+ pkiApp = &caddypki.PKI{CAs: make(map[string]*caddypki.CA)}
+ }
+ for _, ca := range pkiApp.CAs {
+ if skipInstallTrust {
+ ca.InstallTrust = &falseBool
+ }
+ pkiApp.CAs[ca.ID] = ca
+ }
+
+ // Add in the CAs configured via directives
+ for _, p := range pairings {
+ for _, sblock := range p.serverBlocks {
+ // find all the CAs that were defined and add them to the app config
+ // i.e. from any "acme_server" directives
+ for _, caCfgValue := range sblock.pile["pki.ca"] {
+ ca := caCfgValue.Value.(*caddypki.CA)
+ if skipInstallTrust {
+ ca.InstallTrust = &falseBool
+ }
+
+ // the CA might already exist from global options, so
+ // don't overwrite it in that case
+ if _, ok := pkiApp.CAs[ca.ID]; !ok {
+ pkiApp.CAs[ca.ID] = ca
+ }
+ }
+ }
+ }
+
+ // if there was no CAs defined in any of the servers,
+ // and we were requested to not install trust, then
+ // add one for the default/local CA to do so
+ if len(pkiApp.CAs) == 0 && skipInstallTrust {
+ ca := new(caddypki.CA)
+ ca.ID = caddypki.DefaultCAID
+ ca.InstallTrust = &falseBool
+ pkiApp.CAs[ca.ID] = ca
+ }
+
+ return pkiApp, warnings, nil
+}
diff --git a/caddyconfig/httpcaddyfile/serveroptions.go b/caddyconfig/httpcaddyfile/serveroptions.go
new file mode 100644
index 00000000..40a8af20
--- /dev/null
+++ b/caddyconfig/httpcaddyfile/serveroptions.go
@@ -0,0 +1,344 @@
+// Copyright 2015 Matthew Holt and The Caddy Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package httpcaddyfile
+
+import (
+ "encoding/json"
+ "fmt"
+ "slices"
+
+ "github.com/dustin/go-humanize"
+
+ "github.com/caddyserver/caddy/v2"
+ "github.com/caddyserver/caddy/v2/caddyconfig"
+ "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile"
+ "github.com/caddyserver/caddy/v2/modules/caddyhttp"
+)
+
+// serverOptions collects server config overrides parsed from Caddyfile global options
+type serverOptions struct {
+ // If set, will only apply these options to servers that contain a
+ // listener address that matches exactly. If empty, will apply to all
+ // servers that were not already matched by another serverOptions.
+ ListenerAddress string
+
+ // These will all map 1:1 to the caddyhttp.Server struct
+ Name string
+ ListenerWrappersRaw []json.RawMessage
+ ReadTimeout caddy.Duration
+ ReadHeaderTimeout caddy.Duration
+ WriteTimeout caddy.Duration
+ IdleTimeout caddy.Duration
+ KeepAliveInterval caddy.Duration
+ MaxHeaderBytes int
+ EnableFullDuplex bool
+ Protocols []string
+ StrictSNIHost *bool
+ TrustedProxiesRaw json.RawMessage
+ TrustedProxiesStrict int
+ ClientIPHeaders []string
+ ShouldLogCredentials bool
+ Metrics *caddyhttp.Metrics
+ Trace bool // TODO: EXPERIMENTAL
+}
+
+func unmarshalCaddyfileServerOptions(d *caddyfile.Dispenser) (any, error) {
+ d.Next() // consume option name
+
+ serverOpts := serverOptions{}
+ if d.NextArg() {
+ serverOpts.ListenerAddress = d.Val()
+ if d.NextArg() {
+ return nil, d.ArgErr()
+ }
+ }
+ for d.NextBlock(0) {
+ switch d.Val() {
+ case "name":
+ if serverOpts.ListenerAddress == "" {
+ return nil, d.Errf("cannot set a name for a server without a listener address")
+ }
+ if !d.NextArg() {
+ return nil, d.ArgErr()
+ }
+ serverOpts.Name = d.Val()
+
+ case "listener_wrappers":
+ for nesting := d.Nesting(); d.NextBlock(nesting); {
+ modID := "caddy.listeners." + d.Val()
+ unm, err := caddyfile.UnmarshalModule(d, modID)
+ if err != nil {
+ return nil, err
+ }
+ listenerWrapper, ok := unm.(caddy.ListenerWrapper)
+ if !ok {
+ return nil, fmt.Errorf("module %s (%T) is not a listener wrapper", modID, unm)
+ }
+ jsonListenerWrapper := caddyconfig.JSONModuleObject(
+ listenerWrapper,
+ "wrapper",
+ listenerWrapper.(caddy.Module).CaddyModule().ID.Name(),
+ nil,
+ )
+ serverOpts.ListenerWrappersRaw = append(serverOpts.ListenerWrappersRaw, jsonListenerWrapper)
+ }
+
+ case "timeouts":
+ for nesting := d.Nesting(); d.NextBlock(nesting); {
+ switch d.Val() {
+ case "read_body":
+ if !d.NextArg() {
+ return nil, d.ArgErr()
+ }
+ dur, err := caddy.ParseDuration(d.Val())
+ if err != nil {
+ return nil, d.Errf("parsing read_body timeout duration: %v", err)
+ }
+ serverOpts.ReadTimeout = caddy.Duration(dur)
+
+ case "read_header":
+ if !d.NextArg() {
+ return nil, d.ArgErr()
+ }
+ dur, err := caddy.ParseDuration(d.Val())
+ if err != nil {
+ return nil, d.Errf("parsing read_header timeout duration: %v", err)
+ }
+ serverOpts.ReadHeaderTimeout = caddy.Duration(dur)
+
+ case "write":
+ if !d.NextArg() {
+ return nil, d.ArgErr()
+ }
+ dur, err := caddy.ParseDuration(d.Val())
+ if err != nil {
+ return nil, d.Errf("parsing write timeout duration: %v", err)
+ }
+ serverOpts.WriteTimeout = caddy.Duration(dur)
+
+ case "idle":
+ if !d.NextArg() {
+ return nil, d.ArgErr()
+ }
+ dur, err := caddy.ParseDuration(d.Val())
+ if err != nil {
+ return nil, d.Errf("parsing idle timeout duration: %v", err)
+ }
+ serverOpts.IdleTimeout = caddy.Duration(dur)
+
+ default:
+ return nil, d.Errf("unrecognized timeouts option '%s'", d.Val())
+ }
+ }
+ case "keepalive_interval":
+ if !d.NextArg() {
+ return nil, d.ArgErr()
+ }
+ dur, err := caddy.ParseDuration(d.Val())
+ if err != nil {
+ return nil, d.Errf("parsing keepalive interval duration: %v", err)
+ }
+ serverOpts.KeepAliveInterval = caddy.Duration(dur)
+
+ case "max_header_size":
+ var sizeStr string
+ if !d.AllArgs(&sizeStr) {
+ return nil, d.ArgErr()
+ }
+ size, err := humanize.ParseBytes(sizeStr)
+ if err != nil {
+ return nil, d.Errf("parsing max_header_size: %v", err)
+ }
+ serverOpts.MaxHeaderBytes = int(size)
+
+ case "enable_full_duplex":
+ if d.NextArg() {
+ return nil, d.ArgErr()
+ }
+ serverOpts.EnableFullDuplex = true
+
+ case "log_credentials":
+ if d.NextArg() {
+ return nil, d.ArgErr()
+ }
+ serverOpts.ShouldLogCredentials = true
+
+ case "protocols":
+ protos := d.RemainingArgs()
+ for _, proto := range protos {
+ if proto != "h1" && proto != "h2" && proto != "h2c" && proto != "h3" {
+ return nil, d.Errf("unknown protocol '%s': expected h1, h2, h2c, or h3", proto)
+ }
+ if slices.Contains(serverOpts.Protocols, proto) {
+ return nil, d.Errf("protocol %s specified more than once", proto)
+ }
+ serverOpts.Protocols = append(serverOpts.Protocols, proto)
+ }
+ if nesting := d.Nesting(); d.NextBlock(nesting) {
+ return nil, d.ArgErr()
+ }
+
+ case "strict_sni_host":
+ if d.NextArg() && d.Val() != "insecure_off" && d.Val() != "on" {
+ return nil, d.Errf("strict_sni_host only supports 'on' or 'insecure_off', got '%s'", d.Val())
+ }
+ boolVal := true
+ if d.Val() == "insecure_off" {
+ boolVal = false
+ }
+ serverOpts.StrictSNIHost = &boolVal
+
+ case "trusted_proxies":
+ if !d.NextArg() {
+ return nil, d.Err("trusted_proxies expects an IP range source module name as its first argument")
+ }
+ modID := "http.ip_sources." + d.Val()
+ unm, err := caddyfile.UnmarshalModule(d, modID)
+ if err != nil {
+ return nil, err
+ }
+ source, ok := unm.(caddyhttp.IPRangeSource)
+ if !ok {
+ return nil, fmt.Errorf("module %s (%T) is not an IP range source", modID, unm)
+ }
+ jsonSource := caddyconfig.JSONModuleObject(
+ source,
+ "source",
+ source.(caddy.Module).CaddyModule().ID.Name(),
+ nil,
+ )
+ serverOpts.TrustedProxiesRaw = jsonSource
+
+ case "trusted_proxies_strict":
+ if d.NextArg() {
+ return nil, d.ArgErr()
+ }
+ serverOpts.TrustedProxiesStrict = 1
+
+ case "client_ip_headers":
+ headers := d.RemainingArgs()
+ for _, header := range headers {
+ if slices.Contains(serverOpts.ClientIPHeaders, header) {
+ return nil, d.Errf("client IP header %s specified more than once", header)
+ }
+ serverOpts.ClientIPHeaders = append(serverOpts.ClientIPHeaders, header)
+ }
+ if nesting := d.Nesting(); d.NextBlock(nesting) {
+ return nil, d.ArgErr()
+ }
+
+ case "metrics":
+ caddy.Log().Warn("The nested 'metrics' option inside `servers` is deprecated and will be removed in the next major version. Use the global 'metrics' option instead.")
+ serverOpts.Metrics = new(caddyhttp.Metrics)
+ for nesting := d.Nesting(); d.NextBlock(nesting); {
+ switch d.Val() {
+ case "per_host":
+ serverOpts.Metrics.PerHost = true
+ }
+ }
+
+ case "trace":
+ if d.NextArg() {
+ return nil, d.ArgErr()
+ }
+ serverOpts.Trace = true
+
+ default:
+ return nil, d.Errf("unrecognized servers option '%s'", d.Val())
+ }
+ }
+ return serverOpts, nil
+}
+
+// applyServerOptions sets the server options on the appropriate servers
+func applyServerOptions(
+ servers map[string]*caddyhttp.Server,
+ options map[string]any,
+ _ *[]caddyconfig.Warning,
+) error {
+ serverOpts, ok := options["servers"].([]serverOptions)
+ if !ok {
+ return nil
+ }
+
+ // check for duplicate names, which would clobber the config
+ existingNames := map[string]bool{}
+ for _, opts := range serverOpts {
+ if opts.Name == "" {
+ continue
+ }
+ if existingNames[opts.Name] {
+ return fmt.Errorf("cannot use duplicate server name '%s'", opts.Name)
+ }
+ existingNames[opts.Name] = true
+ }
+
+ // collect the server name overrides
+ nameReplacements := map[string]string{}
+
+ for key, server := range servers {
+ // find the options that apply to this server
+ optsIndex := slices.IndexFunc(serverOpts, func(s serverOptions) bool {
+ return s.ListenerAddress == "" || slices.Contains(server.Listen, s.ListenerAddress)
+ })
+
+ // if none apply, then move to the next server
+ if optsIndex == -1 {
+ continue
+ }
+ opts := serverOpts[optsIndex]
+
+ // set all the options
+ server.ListenerWrappersRaw = opts.ListenerWrappersRaw
+ server.ReadTimeout = opts.ReadTimeout
+ server.ReadHeaderTimeout = opts.ReadHeaderTimeout
+ server.WriteTimeout = opts.WriteTimeout
+ server.IdleTimeout = opts.IdleTimeout
+ server.KeepAliveInterval = opts.KeepAliveInterval
+ server.MaxHeaderBytes = opts.MaxHeaderBytes
+ server.EnableFullDuplex = opts.EnableFullDuplex
+ server.Protocols = opts.Protocols
+ server.StrictSNIHost = opts.StrictSNIHost
+ server.TrustedProxiesRaw = opts.TrustedProxiesRaw
+ server.ClientIPHeaders = opts.ClientIPHeaders
+ server.TrustedProxiesStrict = opts.TrustedProxiesStrict
+ server.Metrics = opts.Metrics
+ if opts.ShouldLogCredentials {
+ if server.Logs == nil {
+ server.Logs = new(caddyhttp.ServerLogConfig)
+ }
+ server.Logs.ShouldLogCredentials = opts.ShouldLogCredentials
+ }
+ if opts.Trace {
+ // TODO: THIS IS EXPERIMENTAL (MAY 2024)
+ if server.Logs == nil {
+ server.Logs = new(caddyhttp.ServerLogConfig)
+ }
+ server.Logs.Trace = opts.Trace
+ }
+
+ if opts.Name != "" {
+ nameReplacements[key] = opts.Name
+ }
+ }
+
+ // rename the servers if marked to do so
+ for old, new := range nameReplacements {
+ servers[new] = servers[old]
+ delete(servers, old)
+ }
+
+ return nil
+}
diff --git a/caddyconfig/httpcaddyfile/shorthands.go b/caddyconfig/httpcaddyfile/shorthands.go
new file mode 100644
index 00000000..ca6e4f92
--- /dev/null
+++ b/caddyconfig/httpcaddyfile/shorthands.go
@@ -0,0 +1,102 @@
+package httpcaddyfile
+
+import (
+ "regexp"
+ "strings"
+
+ "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile"
+)
+
+type ComplexShorthandReplacer struct {
+ search *regexp.Regexp
+ replace string
+}
+
+type ShorthandReplacer struct {
+ complex []ComplexShorthandReplacer
+ simple *strings.Replacer
+}
+
+func NewShorthandReplacer() ShorthandReplacer {
+ // replace shorthand placeholders (which are convenient
+ // when writing a Caddyfile) with their actual placeholder
+ // identifiers or variable names
+ replacer := strings.NewReplacer(placeholderShorthands()...)
+
+ // these are placeholders that allow a user-defined final
+ // parameters, but we still want to provide a shorthand
+ // for those, so we use a regexp to replace
+ regexpReplacements := []ComplexShorthandReplacer{
+ {regexp.MustCompile(`{header\.([\w-]*)}`), "{http.request.header.$1}"},
+ {regexp.MustCompile(`{cookie\.([\w-]*)}`), "{http.request.cookie.$1}"},
+ {regexp.MustCompile(`{labels\.([\w-]*)}`), "{http.request.host.labels.$1}"},
+ {regexp.MustCompile(`{path\.([\w-]*)}`), "{http.request.uri.path.$1}"},
+ {regexp.MustCompile(`{file\.([\w-]*)}`), "{http.request.uri.path.file.$1}"},
+ {regexp.MustCompile(`{query\.([\w-]*)}`), "{http.request.uri.query.$1}"},
+ {regexp.MustCompile(`{re\.([\w-\.]*)}`), "{http.regexp.$1}"},
+ {regexp.MustCompile(`{vars\.([\w-]*)}`), "{http.vars.$1}"},
+ {regexp.MustCompile(`{rp\.([\w-\.]*)}`), "{http.reverse_proxy.$1}"},
+ {regexp.MustCompile(`{resp\.([\w-\.]*)}`), "{http.intercept.$1}"},
+ {regexp.MustCompile(`{err\.([\w-\.]*)}`), "{http.error.$1}"},
+ {regexp.MustCompile(`{file_match\.([\w-]*)}`), "{http.matchers.file.$1}"},
+ }
+
+ return ShorthandReplacer{
+ complex: regexpReplacements,
+ simple: replacer,
+ }
+}
+
+// placeholderShorthands returns a slice of old-new string pairs,
+// where the left of the pair is a placeholder shorthand that may
+// be used in the Caddyfile, and the right is the replacement.
+func placeholderShorthands() []string {
+ return []string{
+ "{host}", "{http.request.host}",
+ "{hostport}", "{http.request.hostport}",
+ "{port}", "{http.request.port}",
+ "{orig_method}", "{http.request.orig_method}",
+ "{orig_uri}", "{http.request.orig_uri}",
+ "{orig_path}", "{http.request.orig_uri.path}",
+ "{orig_dir}", "{http.request.orig_uri.path.dir}",
+ "{orig_file}", "{http.request.orig_uri.path.file}",
+ "{orig_query}", "{http.request.orig_uri.query}",
+ "{orig_?query}", "{http.request.orig_uri.prefixed_query}",
+ "{method}", "{http.request.method}",
+ "{uri}", "{http.request.uri}",
+ "{path}", "{http.request.uri.path}",
+ "{dir}", "{http.request.uri.path.dir}",
+ "{file}", "{http.request.uri.path.file}",
+ "{query}", "{http.request.uri.query}",
+ "{?query}", "{http.request.uri.prefixed_query}",
+ "{remote}", "{http.request.remote}",
+ "{remote_host}", "{http.request.remote.host}",
+ "{remote_port}", "{http.request.remote.port}",
+ "{scheme}", "{http.request.scheme}",
+ "{uuid}", "{http.request.uuid}",
+ "{tls_cipher}", "{http.request.tls.cipher_suite}",
+ "{tls_version}", "{http.request.tls.version}",
+ "{tls_client_fingerprint}", "{http.request.tls.client.fingerprint}",
+ "{tls_client_issuer}", "{http.request.tls.client.issuer}",
+ "{tls_client_serial}", "{http.request.tls.client.serial}",
+ "{tls_client_subject}", "{http.request.tls.client.subject}",
+ "{tls_client_certificate_pem}", "{http.request.tls.client.certificate_pem}",
+ "{tls_client_certificate_der_base64}", "{http.request.tls.client.certificate_der_base64}",
+ "{upstream_hostport}", "{http.reverse_proxy.upstream.hostport}",
+ "{client_ip}", "{http.vars.client_ip}",
+ }
+}
+
+// ApplyToSegment replaces shorthand placeholder to its full placeholder, understandable by Caddy.
+func (s ShorthandReplacer) ApplyToSegment(segment *caddyfile.Segment) {
+ if segment != nil {
+ for i := 0; i < len(*segment); i++ {
+ // simple string replacements
+ (*segment)[i].Text = s.simple.Replace((*segment)[i].Text)
+ // complex regexp replacements
+ for _, r := range s.complex {
+ (*segment)[i].Text = r.search.ReplaceAllString((*segment)[i].Text, r.replace)
+ }
+ }
+ }
+}
diff --git a/caddyconfig/httpcaddyfile/testdata/import_variadic.txt b/caddyconfig/httpcaddyfile/testdata/import_variadic.txt
new file mode 100644
index 00000000..f1e50e01
--- /dev/null
+++ b/caddyconfig/httpcaddyfile/testdata/import_variadic.txt
@@ -0,0 +1,9 @@
+(t2) {
+ respond 200 {
+ body {args[:]}
+ }
+}
+
+:8082 {
+ import t2 false
+}
\ No newline at end of file
diff --git a/caddyconfig/httpcaddyfile/testdata/import_variadic_snippet.txt b/caddyconfig/httpcaddyfile/testdata/import_variadic_snippet.txt
new file mode 100644
index 00000000..a02fcf90
--- /dev/null
+++ b/caddyconfig/httpcaddyfile/testdata/import_variadic_snippet.txt
@@ -0,0 +1,9 @@
+(t1) {
+ respond 200 {
+ body {args[:]}
+ }
+}
+
+:8081 {
+ import t1 false
+}
\ No newline at end of file
diff --git a/caddyconfig/httpcaddyfile/testdata/import_variadic_with_import.txt b/caddyconfig/httpcaddyfile/testdata/import_variadic_with_import.txt
new file mode 100644
index 00000000..ab1b32d9
--- /dev/null
+++ b/caddyconfig/httpcaddyfile/testdata/import_variadic_with_import.txt
@@ -0,0 +1,15 @@
+(t1) {
+ respond 200 {
+ body {args[:]}
+ }
+}
+
+:8081 {
+ import t1 false
+}
+
+import import_variadic.txt
+
+:8083 {
+ import t2 true
+}
\ No newline at end of file
diff --git a/caddyconfig/httpcaddyfile/tlsapp.go b/caddyconfig/httpcaddyfile/tlsapp.go
index f66fcae7..ea5ac92c 100644
--- a/caddyconfig/httpcaddyfile/tlsapp.go
+++ b/caddyconfig/httpcaddyfile/tlsapp.go
@@ -19,53 +19,57 @@ import (
"encoding/json"
"fmt"
"reflect"
+ "slices"
"sort"
"strconv"
+ "strings"
+
+ "github.com/caddyserver/certmagic"
+ "github.com/mholt/acmez/v2/acme"
"github.com/caddyserver/caddy/v2"
"github.com/caddyserver/caddy/v2/caddyconfig"
"github.com/caddyserver/caddy/v2/modules/caddyhttp"
"github.com/caddyserver/caddy/v2/modules/caddytls"
- "github.com/caddyserver/certmagic"
)
func (st ServerType) buildTLSApp(
pairings []sbAddrAssociation,
- options map[string]interface{},
+ options map[string]any,
warnings []caddyconfig.Warning,
) (*caddytls.TLS, []caddyconfig.Warning, error) {
-
tlsApp := &caddytls.TLS{CertificatesRaw: make(caddy.ModuleMap)}
var certLoaders []caddytls.CertificateLoader
- httpsPort := strconv.Itoa(caddyhttp.DefaultHTTPSPort)
- if hsp, ok := options["https_port"].(int); ok {
- httpsPort = strconv.Itoa(hsp)
+ httpPort := strconv.Itoa(caddyhttp.DefaultHTTPPort)
+ if hp, ok := options["http_port"].(int); ok {
+ httpPort = strconv.Itoa(hp)
+ }
+ autoHTTPS := []string{}
+ if ah, ok := options["auto_https"].([]string); ok {
+ autoHTTPS = ah
}
- // count how many server blocks have a TLS-enabled key with
- // no host, and find all hosts that share a server block with
- // a hostless key, so that they don't get forgotten/omitted
- // by auto-HTTPS (since they won't appear in route matchers)
- var serverBlocksWithTLSHostlessKey int
- hostsSharedWithHostlessKey := make(map[string]struct{})
- for _, pair := range pairings {
- for _, sb := range pair.serverBlocks {
- for _, addr := range sb.keys {
- if addr.Host == "" {
- // this address has no hostname, but if it's explicitly set
- // to HTTPS, then we need to count it as being TLS-enabled
- if addr.Scheme == "https" || addr.Port == httpsPort {
- serverBlocksWithTLSHostlessKey++
+ // find all hosts that share a server block with a hostless
+ // key, so that they don't get forgotten/omitted by auto-HTTPS
+ // (since they won't appear in route matchers)
+ httpsHostsSharedWithHostlessKey := make(map[string]struct{})
+ if !slices.Contains(autoHTTPS, "off") {
+ for _, pair := range pairings {
+ for _, sb := range pair.serverBlocks {
+ for _, addr := range sb.parsedKeys {
+ if addr.Host != "" {
+ continue
}
+
// this server block has a hostless key, now
// go through and add all the hosts to the set
- for _, otherAddr := range sb.keys {
+ for _, otherAddr := range sb.parsedKeys {
if otherAddr.Original == addr.Original {
continue
}
- if otherAddr.Host != "" {
- hostsSharedWithHostlessKey[otherAddr.Host] = struct{}{}
+ if otherAddr.Host != "" && otherAddr.Scheme != "http" && otherAddr.Port != httpPort {
+ httpsHostsSharedWithHostlessKey[otherAddr.Host] = struct{}{}
}
}
break
@@ -74,166 +78,234 @@ func (st ServerType) buildTLSApp(
}
}
+ // a catch-all automation policy is used as a "default" for all subjects that
+ // don't have custom configuration explicitly associated with them; this
+ // is only to add if the global settings or defaults are non-empty
catchAllAP, err := newBaseAutomationPolicy(options, warnings, false)
if err != nil {
return nil, warnings, err
}
+ if catchAllAP != nil {
+ if tlsApp.Automation == nil {
+ tlsApp.Automation = new(caddytls.AutomationConfig)
+ }
+ tlsApp.Automation.Policies = append(tlsApp.Automation.Policies, catchAllAP)
+ }
+
+ // collect all hosts that have a wildcard in them, and arent HTTP
+ wildcardHosts := []string{}
+ for _, p := range pairings {
+ var addresses []string
+ for _, addressWithProtocols := range p.addressesWithProtocols {
+ addresses = append(addresses, addressWithProtocols.address)
+ }
+ if !listenersUseAnyPortOtherThan(addresses, httpPort) {
+ continue
+ }
+ for _, sblock := range p.serverBlocks {
+ for _, addr := range sblock.parsedKeys {
+ if strings.HasPrefix(addr.Host, "*.") {
+ wildcardHosts = append(wildcardHosts, addr.Host[2:])
+ }
+ }
+ }
+ }
for _, p := range pairings {
+ // avoid setting up TLS automation policies for a server that is HTTP-only
+ var addresses []string
+ for _, addressWithProtocols := range p.addressesWithProtocols {
+ addresses = append(addresses, addressWithProtocols.address)
+ }
+ if !listenersUseAnyPortOtherThan(addresses, httpPort) {
+ continue
+ }
+
for _, sblock := range p.serverBlocks {
+ // check the scheme of all the site addresses,
+ // skip building AP if they all had http://
+ if sblock.isAllHTTP() {
+ continue
+ }
+
// get values that populate an automation policy for this block
- var ap *caddytls.AutomationPolicy
+ ap, err := newBaseAutomationPolicy(options, warnings, true)
+ if err != nil {
+ return nil, warnings, err
+ }
+
+ // make a plain copy so we can compare whether we made any changes
+ apCopy, err := newBaseAutomationPolicy(options, warnings, true)
+ if err != nil {
+ return nil, warnings, err
+ }
sblockHosts := sblock.hostsFromKeys(false)
- if len(sblockHosts) == 0 {
+ if len(sblockHosts) == 0 && catchAllAP != nil {
ap = catchAllAP
}
// on-demand tls
if _, ok := sblock.pile["tls.on_demand"]; ok {
- if ap == nil {
- var err error
- ap, err = newBaseAutomationPolicy(options, warnings, true)
- if err != nil {
- return nil, warnings, err
- }
- }
ap.OnDemand = true
}
+ // reuse private keys tls
+ if _, ok := sblock.pile["tls.reuse_private_keys"]; ok {
+ ap.ReusePrivateKeys = true
+ }
+
+ if keyTypeVals, ok := sblock.pile["tls.key_type"]; ok {
+ ap.KeyType = keyTypeVals[0].Value.(string)
+ }
+
// certificate issuers
if issuerVals, ok := sblock.pile["tls.cert_issuer"]; ok {
+ var issuers []certmagic.Issuer
for _, issuerVal := range issuerVals {
- issuer := issuerVal.Value.(certmagic.Issuer)
- if ap == nil {
- var err error
- ap, err = newBaseAutomationPolicy(options, warnings, true)
- if err != nil {
- return nil, warnings, err
- }
- }
- if ap == catchAllAP && !reflect.DeepEqual(ap.Issuer, issuer) {
- return nil, warnings, fmt.Errorf("automation policy from site block is also default/catch-all policy because of key without hostname, and the two are in conflict: %#v != %#v", ap.Issuer, issuer)
- }
- ap.Issuer = issuer
+ issuers = append(issuers, issuerVal.Value.(certmagic.Issuer))
}
+ if ap == catchAllAP && !reflect.DeepEqual(ap.Issuers, issuers) {
+ // this more correctly implements an error check that was removed
+ // below; try it with this config:
+ //
+ // :443 {
+ // bind 127.0.0.1
+ // }
+ //
+ // :443 {
+ // bind ::1
+ // tls {
+ // issuer acme
+ // }
+ // }
+ return nil, warnings, fmt.Errorf("automation policy from site block is also default/catch-all policy because of key without hostname, and the two are in conflict: %#v != %#v", ap.Issuers, issuers)
+ }
+ ap.Issuers = issuers
}
+ // certificate managers
+ if certManagerVals, ok := sblock.pile["tls.cert_manager"]; ok {
+ for _, certManager := range certManagerVals {
+ certGetterName := certManager.Value.(caddy.Module).CaddyModule().ID.Name()
+ ap.ManagersRaw = append(ap.ManagersRaw, caddyconfig.JSONModuleObject(certManager.Value, "via", certGetterName, &warnings))
+ }
+ }
// custom bind host
for _, cfgVal := range sblock.pile["bind"] {
- // either an existing issuer is already configured (and thus, ap is not
- // nil), or we need to configure an issuer, so we need ap to be non-nil
- if ap == nil {
- ap, err = newBaseAutomationPolicy(options, warnings, true)
- if err != nil {
- return nil, warnings, err
+ for _, iss := range ap.Issuers {
+ // if an issuer was already configured and it is NOT an ACME issuer,
+ // skip, since we intend to adjust only ACME issuers; ensure we
+ // include any issuer that embeds/wraps an underlying ACME issuer
+ var acmeIssuer *caddytls.ACMEIssuer
+ if acmeWrapper, ok := iss.(acmeCapable); ok {
+ acmeIssuer = acmeWrapper.GetACMEIssuer()
+ }
+ if acmeIssuer == nil {
+ continue
}
- }
- // if an issuer was already configured and it is NOT an ACME
- // issuer, skip, since we intend to adjust only ACME issuers
- var acmeIssuer *caddytls.ACMEIssuer
- if ap.Issuer != nil {
- var ok bool
- if acmeIssuer, ok = ap.Issuer.(*caddytls.ACMEIssuer); !ok {
- break
+ // proceed to configure the ACME issuer's bind host, without
+ // overwriting any existing settings
+ if acmeIssuer.Challenges == nil {
+ acmeIssuer.Challenges = new(caddytls.ChallengesConfig)
+ }
+ if acmeIssuer.Challenges.BindHost == "" {
+ // only binding to one host is supported
+ var bindHost string
+ if asserted, ok := cfgVal.Value.(addressesWithProtocols); ok && len(asserted.addresses) > 0 {
+ bindHost = asserted.addresses[0]
+ }
+ acmeIssuer.Challenges.BindHost = bindHost
}
}
-
- // proceed to configure the ACME issuer's bind host, without
- // overwriting any existing settings
- if acmeIssuer == nil {
- acmeIssuer = new(caddytls.ACMEIssuer)
- }
- if acmeIssuer.Challenges == nil {
- acmeIssuer.Challenges = new(caddytls.ChallengesConfig)
- }
- if acmeIssuer.Challenges.BindHost == "" {
- // only binding to one host is supported
- var bindHost string
- if bindHosts, ok := cfgVal.Value.([]string); ok && len(bindHosts) > 0 {
- bindHost = bindHosts[0]
- }
- acmeIssuer.Challenges.BindHost = bindHost
- }
- ap.Issuer = acmeIssuer // we'll encode it later
}
- if ap != nil {
- if ap.Issuer != nil {
- // encode issuer now that it's all set up
- issuerName := ap.Issuer.(caddy.Module).CaddyModule().ID.Name()
- ap.IssuerRaw = caddyconfig.JSONModuleObject(ap.Issuer, "module", issuerName, &warnings)
- }
+ // we used to ensure this block is allowed to create an automation policy;
+ // doing so was forbidden if it has a key with no host (i.e. ":443")
+ // and if there is a different server block that also has a key with no
+ // host -- since a key with no host matches any host, we need its
+ // associated automation policy to have an empty Subjects list, i.e. no
+ // host filter, which is indistinguishable between the two server blocks
+ // because automation is not done in the context of a particular server...
+ // this is an example of a poor mapping from Caddyfile to JSON but that's
+ // the least-leaky abstraction I could figure out -- however, this check
+ // was preventing certain listeners, like those provided by plugins, from
+ // being used as desired (see the Tailscale listener plugin), so I removed
+ // the check: and I think since I originally wrote the check I added a new
+ // check above which *properly* detects this ambiguity without breaking the
+ // listener plugin; see the check above with a commented example config
+ if len(sblockHosts) == 0 && catchAllAP == nil {
+ // this server block has a key with no hosts, but there is not yet
+ // a catch-all automation policy (probably because no global options
+ // were set), so this one becomes it
+ catchAllAP = ap
+ }
- // first make sure this block is allowed to create an automation policy;
- // doing so is forbidden if it has a key with no host (i.e. ":443")
- // and if there is a different server block that also has a key with no
- // host -- since a key with no host matches any host, we need its
- // associated automation policy to have an empty Subjects list, i.e. no
- // host filter, which is indistinguishable between the two server blocks
- // because automation is not done in the context of a particular server...
- // this is an example of a poor mapping from Caddyfile to JSON but that's
- // the least-leaky abstraction I could figure out
- if len(sblockHosts) == 0 {
- if serverBlocksWithTLSHostlessKey > 1 {
- // this server block and at least one other has a key with no host,
- // making the two indistinguishable; it is misleading to define such
- // a policy within one server block since it actually will apply to
- // others as well
- return nil, warnings, fmt.Errorf("cannot make a TLS automation policy from a server block that has a host-less address when there are other TLS-enabled server block addresses lacking a host")
+ hostsNotHTTP := sblock.hostsFromKeysNotHTTP(httpPort)
+ sort.Strings(hostsNotHTTP) // solely for deterministic test results
+
+ // if the we prefer wildcards and the AP is unchanged,
+ // then we can skip this AP because it should be covered
+ // by an AP with a wildcard
+ if slices.Contains(autoHTTPS, "prefer_wildcard") {
+ if hostsCoveredByWildcard(hostsNotHTTP, wildcardHosts) &&
+ reflect.DeepEqual(ap, apCopy) {
+ continue
+ }
+ }
+
+ // associate our new automation policy with this server block's hosts
+ ap.SubjectsRaw = hostsNotHTTP
+
+ // if a combination of public and internal names were given
+ // for this same server block and no issuer was specified, we
+ // need to separate them out in the automation policies so
+ // that the internal names can use the internal issuer and
+ // the other names can use the default/public/ACME issuer
+ var ap2 *caddytls.AutomationPolicy
+ if len(ap.Issuers) == 0 {
+ var internal, external []string
+ for _, s := range ap.SubjectsRaw {
+ // do not create Issuers for Tailscale domains; they will be given a Manager instead
+ if isTailscaleDomain(s) {
+ continue
}
- if catchAllAP == nil {
- // this server block has a key with no hosts, but there is not yet
- // a catch-all automation policy (probably because no global options
- // were set), so this one becomes it
- catchAllAP = ap
+ if !certmagic.SubjectQualifiesForCert(s) {
+ return nil, warnings, fmt.Errorf("subject does not qualify for certificate: '%s'", s)
+ }
+ // we don't use certmagic.SubjectQualifiesForPublicCert() because of one nuance:
+ // names like *.*.tld that may not qualify for a public certificate are actually
+ // fine when used with OnDemand, since OnDemand (currently) does not obtain
+ // wildcards (if it ever does, there will be a separate config option to enable
+ // it that we would need to check here) since the hostname is known at handshake;
+ // and it is unexpected to switch to internal issuer when the user wants to get
+ // regular certificates on-demand for a class of certs like *.*.tld.
+ if subjectQualifiesForPublicCert(ap, s) {
+ external = append(external, s)
+ } else {
+ internal = append(internal, s)
}
}
-
- // associate our new automation policy with this server block's hosts,
- // unless, of course, the server block has a key with no hosts, in which
- // case its automation policy becomes or blends with the default/global
- // automation policy because, of necessity, it applies to all hostnames
- // (i.e. it has no Subjects filter) -- in that case, we'll append it last
- if ap != catchAllAP {
- ap.Subjects = sblockHosts
-
- // if a combination of public and internal names were given
- // for this same server block and no issuer was specified, we
- // need to separate them out in the automation policies so
- // that the internal names can use the internal issuer and
- // the other names can use the default/public/ACME issuer
- var ap2 *caddytls.AutomationPolicy
- if ap.Issuer == nil {
- var internal, external []string
- for _, s := range ap.Subjects {
- if certmagic.SubjectQualifiesForPublicCert(s) {
- external = append(external, s)
- } else {
- internal = append(internal, s)
- }
- }
- if len(external) > 0 && len(internal) > 0 {
- ap.Subjects = external
- apCopy := *ap
- ap2 = &apCopy
- ap2.Subjects = internal
- ap2.IssuerRaw = caddyconfig.JSONModuleObject(caddytls.InternalIssuer{}, "module", "internal", &warnings)
- }
- }
- if tlsApp.Automation == nil {
- tlsApp.Automation = new(caddytls.AutomationConfig)
- }
- tlsApp.Automation.Policies = append(tlsApp.Automation.Policies, ap)
- if ap2 != nil {
- tlsApp.Automation.Policies = append(tlsApp.Automation.Policies, ap2)
- }
+ if len(external) > 0 && len(internal) > 0 {
+ ap.SubjectsRaw = external
+ apCopy := *ap
+ ap2 = &apCopy
+ ap2.SubjectsRaw = internal
+ ap2.IssuersRaw = []json.RawMessage{caddyconfig.JSONModuleObject(caddytls.InternalIssuer{}, "module", "internal", &warnings)}
}
}
+ if tlsApp.Automation == nil {
+ tlsApp.Automation = new(caddytls.AutomationConfig)
+ }
+ tlsApp.Automation.Policies = append(tlsApp.Automation.Policies, ap)
+ if ap2 != nil {
+ tlsApp.Automation.Policies = append(tlsApp.Automation.Policies, ap2)
+ }
+
// certificate loaders
- if clVals, ok := sblock.pile["tls.certificate_loader"]; ok {
+ if clVals, ok := sblock.pile["tls.cert_loader"]; ok {
for _, clVal := range clVals {
certLoaders = append(certLoaders, clVal.Value.(caddytls.CertificateLoader))
}
@@ -277,6 +349,45 @@ func (st ServerType) buildTLSApp(
tlsApp.Automation.OnDemand = onDemand
}
+ // if the storage clean interval is a boolean, then it's "off" to disable cleaning
+ if sc, ok := options["storage_check"].(string); ok && sc == "off" {
+ tlsApp.DisableStorageCheck = true
+ }
+
+ // if the storage clean interval is a boolean, then it's "off" to disable cleaning
+ if sci, ok := options["storage_clean_interval"].(bool); ok && !sci {
+ tlsApp.DisableStorageClean = true
+ }
+
+ // set the storage clean interval if configured
+ if storageCleanInterval, ok := options["storage_clean_interval"].(caddy.Duration); ok {
+ if tlsApp.Automation == nil {
+ tlsApp.Automation = new(caddytls.AutomationConfig)
+ }
+ tlsApp.Automation.StorageCleanInterval = storageCleanInterval
+ }
+
+ // set the expired certificates renew interval if configured
+ if renewCheckInterval, ok := options["renew_interval"].(caddy.Duration); ok {
+ if tlsApp.Automation == nil {
+ tlsApp.Automation = new(caddytls.AutomationConfig)
+ }
+ tlsApp.Automation.RenewCheckInterval = renewCheckInterval
+ }
+
+ // set the OCSP check interval if configured
+ if ocspCheckInterval, ok := options["ocsp_interval"].(caddy.Duration); ok {
+ if tlsApp.Automation == nil {
+ tlsApp.Automation = new(caddytls.AutomationConfig)
+ }
+ tlsApp.Automation.OCSPCheckInterval = ocspCheckInterval
+ }
+
+ // set whether OCSP stapling should be disabled for manually-managed certificates
+ if ocspConfig, ok := options["ocsp_stapling"].(certmagic.OCSPConfig); ok {
+ tlsApp.DisableOCSPStapling = ocspConfig.DisableStapling
+ }
+
// if any hostnames appear on the same server block as a key with
// no host, they will not be used with route matchers because the
// hostless key matches all hosts, therefore, it wouldn't be
@@ -286,47 +397,83 @@ func (st ServerType) buildTLSApp(
// get internal certificates by default rather than ACME
var al caddytls.AutomateLoader
internalAP := &caddytls.AutomationPolicy{
- IssuerRaw: json.RawMessage(`{"module":"internal"}`),
+ IssuersRaw: []json.RawMessage{json.RawMessage(`{"module":"internal"}`)},
}
- for h := range hostsSharedWithHostlessKey {
- al = append(al, h)
- if !certmagic.SubjectQualifiesForPublicCert(h) {
- internalAP.Subjects = append(internalAP.Subjects, h)
+ if !slices.Contains(autoHTTPS, "off") && !slices.Contains(autoHTTPS, "disable_certs") {
+ for h := range httpsHostsSharedWithHostlessKey {
+ al = append(al, h)
+ if !certmagic.SubjectQualifiesForPublicCert(h) {
+ internalAP.SubjectsRaw = append(internalAP.SubjectsRaw, h)
+ }
}
}
if len(al) > 0 {
tlsApp.CertificatesRaw["automate"] = caddyconfig.JSON(al, &warnings)
}
- if len(internalAP.Subjects) > 0 {
+ if len(internalAP.SubjectsRaw) > 0 {
if tlsApp.Automation == nil {
tlsApp.Automation = new(caddytls.AutomationConfig)
}
tlsApp.Automation.Policies = append(tlsApp.Automation.Policies, internalAP)
}
- // if there is a global/catch-all automation policy, ensure it goes last
- if catchAllAP != nil {
- // first, encode its issuer, if there is one
- if catchAllAP.Issuer != nil {
- issuerName := catchAllAP.Issuer.(caddy.Module).CaddyModule().ID.Name()
- catchAllAP.IssuerRaw = caddyconfig.JSONModuleObject(catchAllAP.Issuer, "module", issuerName, &warnings)
- }
+ // if there are any global options set for issuers (ACME ones in particular), make sure they
+ // take effect in every automation policy that does not have any issuers
+ if tlsApp.Automation != nil {
+ globalEmail := options["email"]
+ globalACMECA := options["acme_ca"]
+ globalACMECARoot := options["acme_ca_root"]
+ globalACMEDNS := options["acme_dns"]
+ globalACMEEAB := options["acme_eab"]
+ globalPreferredChains := options["preferred_chains"]
+ hasGlobalACMEDefaults := globalEmail != nil || globalACMECA != nil || globalACMECARoot != nil || globalACMEDNS != nil || globalACMEEAB != nil || globalPreferredChains != nil
+ if hasGlobalACMEDefaults {
+ for i := 0; i < len(tlsApp.Automation.Policies); i++ {
+ ap := tlsApp.Automation.Policies[i]
+ if len(ap.Issuers) == 0 && automationPolicyHasAllPublicNames(ap) {
+ // for public names, create default issuers which will later be filled in with configured global defaults
+ // (internal names will implicitly use the internal issuer at auto-https time)
+ emailStr, _ := globalEmail.(string)
+ ap.Issuers = caddytls.DefaultIssuers(emailStr)
- // then append it to the end of the policies list
- if tlsApp.Automation == nil {
- tlsApp.Automation = new(caddytls.AutomationConfig)
+ // if a specific endpoint is configured, can't use multiple default issuers
+ if globalACMECA != nil {
+ ap.Issuers = []certmagic.Issuer{new(caddytls.ACMEIssuer)}
+ }
+ }
+ }
}
- tlsApp.Automation.Policies = append(tlsApp.Automation.Policies, catchAllAP)
}
- // do a little verification & cleanup
+ // finalize and verify policies; do cleanup
if tlsApp.Automation != nil {
+ for i, ap := range tlsApp.Automation.Policies {
+ // ensure all issuers have global defaults filled in
+ for j, issuer := range ap.Issuers {
+ err := fillInGlobalACMEDefaults(issuer, options)
+ if err != nil {
+ return nil, warnings, fmt.Errorf("filling in global issuer defaults for AP %d, issuer %d: %v", i, j, err)
+ }
+ }
+
+ // encode all issuer values we created, so they will be rendered in the output
+ if len(ap.Issuers) > 0 && ap.IssuersRaw == nil {
+ for _, iss := range ap.Issuers {
+ issuerName := iss.(caddy.Module).CaddyModule().ID.Name()
+ ap.IssuersRaw = append(ap.IssuersRaw, caddyconfig.JSONModuleObject(iss, "module", issuerName, &warnings))
+ }
+ }
+ }
+
+ // consolidate automation policies that are the exact same
+ tlsApp.Automation.Policies = consolidateAutomationPolicies(tlsApp.Automation.Policies)
+
// ensure automation policies don't overlap subjects (this should be
// an error at provision-time as well, but catch it in the adapt phase
// for convenience)
automationHostSet := make(map[string]struct{})
for _, ap := range tlsApp.Automation.Policies {
- for _, s := range ap.Subjects {
+ for _, s := range ap.SubjectsRaw {
if _, ok := automationHostSet[s]; ok {
return nil, warnings, fmt.Errorf("hostname appears in more than one automation policy, making certificate management ambiguous: %s", s)
}
@@ -334,27 +481,101 @@ func (st ServerType) buildTLSApp(
}
}
- // consolidate automation policies that are the exact same
- tlsApp.Automation.Policies = consolidateAutomationPolicies(tlsApp.Automation.Policies)
+ // if nothing remains, remove any excess values to clean up the resulting config
+ if len(tlsApp.Automation.Policies) == 0 {
+ tlsApp.Automation.Policies = nil
+ }
+ if reflect.DeepEqual(tlsApp.Automation, new(caddytls.AutomationConfig)) {
+ tlsApp.Automation = nil
+ }
}
return tlsApp, warnings, nil
}
+type acmeCapable interface{ GetACMEIssuer() *caddytls.ACMEIssuer }
+
+func fillInGlobalACMEDefaults(issuer certmagic.Issuer, options map[string]any) error {
+ acmeWrapper, ok := issuer.(acmeCapable)
+ if !ok {
+ return nil
+ }
+ acmeIssuer := acmeWrapper.GetACMEIssuer()
+ if acmeIssuer == nil {
+ return nil
+ }
+
+ globalEmail := options["email"]
+ globalACMECA := options["acme_ca"]
+ globalACMECARoot := options["acme_ca_root"]
+ globalACMEDNS := options["acme_dns"]
+ globalACMEEAB := options["acme_eab"]
+ globalPreferredChains := options["preferred_chains"]
+ globalCertLifetime := options["cert_lifetime"]
+ globalHTTPPort, globalHTTPSPort := options["http_port"], options["https_port"]
+
+ if globalEmail != nil && acmeIssuer.Email == "" {
+ acmeIssuer.Email = globalEmail.(string)
+ }
+ if globalACMECA != nil && acmeIssuer.CA == "" {
+ acmeIssuer.CA = globalACMECA.(string)
+ }
+ if globalACMECARoot != nil && !slices.Contains(acmeIssuer.TrustedRootsPEMFiles, globalACMECARoot.(string)) {
+ acmeIssuer.TrustedRootsPEMFiles = append(acmeIssuer.TrustedRootsPEMFiles, globalACMECARoot.(string))
+ }
+ if globalACMEDNS != nil && (acmeIssuer.Challenges == nil || acmeIssuer.Challenges.DNS == nil) {
+ acmeIssuer.Challenges = &caddytls.ChallengesConfig{
+ DNS: &caddytls.DNSChallengeConfig{
+ ProviderRaw: caddyconfig.JSONModuleObject(globalACMEDNS, "name", globalACMEDNS.(caddy.Module).CaddyModule().ID.Name(), nil),
+ },
+ }
+ }
+ if globalACMEEAB != nil && acmeIssuer.ExternalAccount == nil {
+ acmeIssuer.ExternalAccount = globalACMEEAB.(*acme.EAB)
+ }
+ if globalPreferredChains != nil && acmeIssuer.PreferredChains == nil {
+ acmeIssuer.PreferredChains = globalPreferredChains.(*caddytls.ChainPreference)
+ }
+ if globalHTTPPort != nil && (acmeIssuer.Challenges == nil || acmeIssuer.Challenges.HTTP == nil || acmeIssuer.Challenges.HTTP.AlternatePort == 0) {
+ if acmeIssuer.Challenges == nil {
+ acmeIssuer.Challenges = new(caddytls.ChallengesConfig)
+ }
+ if acmeIssuer.Challenges.HTTP == nil {
+ acmeIssuer.Challenges.HTTP = new(caddytls.HTTPChallengeConfig)
+ }
+ acmeIssuer.Challenges.HTTP.AlternatePort = globalHTTPPort.(int)
+ }
+ if globalHTTPSPort != nil && (acmeIssuer.Challenges == nil || acmeIssuer.Challenges.TLSALPN == nil || acmeIssuer.Challenges.TLSALPN.AlternatePort == 0) {
+ if acmeIssuer.Challenges == nil {
+ acmeIssuer.Challenges = new(caddytls.ChallengesConfig)
+ }
+ if acmeIssuer.Challenges.TLSALPN == nil {
+ acmeIssuer.Challenges.TLSALPN = new(caddytls.TLSALPNChallengeConfig)
+ }
+ acmeIssuer.Challenges.TLSALPN.AlternatePort = globalHTTPSPort.(int)
+ }
+ if globalCertLifetime != nil && acmeIssuer.CertificateLifetime == 0 {
+ acmeIssuer.CertificateLifetime = globalCertLifetime.(caddy.Duration)
+ }
+ return nil
+}
+
// newBaseAutomationPolicy returns a new TLS automation policy that gets
// its values from the global options map. It should be used as the base
// for any other automation policies. A nil policy (and no error) will be
// returned if there are no default/global options. However, if always is
// true, a non-nil value will always be returned (unless there is an error).
-func newBaseAutomationPolicy(options map[string]interface{}, warnings []caddyconfig.Warning, always bool) (*caddytls.AutomationPolicy, error) {
- acmeCA, hasACMECA := options["acme_ca"]
- acmeDNS, hasACMEDNS := options["acme_dns"]
- acmeCARoot, hasACMECARoot := options["acme_ca_root"]
- email, hasEmail := options["email"]
- localCerts, hasLocalCerts := options["local_certs"]
+func newBaseAutomationPolicy(
+ options map[string]any,
+ _ []caddyconfig.Warning,
+ always bool,
+) (*caddytls.AutomationPolicy, error) {
+ issuers, hasIssuers := options["cert_issuer"]
+ _, hasLocalCerts := options["local_certs"]
keyType, hasKeyType := options["key_type"]
+ ocspStapling, hasOCSPStapling := options["ocsp_stapling"]
- hasGlobalAutomationOpts := hasACMECA || hasACMEDNS || hasACMECARoot || hasEmail || hasLocalCerts || hasKeyType
+ hasGlobalAutomationOpts := hasIssuers || hasLocalCerts || hasKeyType || hasOCSPStapling
// if there are no global options related to automation policies
// set, then we can just return right away
@@ -366,38 +587,24 @@ func newBaseAutomationPolicy(options map[string]interface{}, warnings []caddycon
}
ap := new(caddytls.AutomationPolicy)
+ if hasKeyType {
+ ap.KeyType = keyType.(string)
+ }
- if localCerts != nil {
- // internal issuer enabled trumps any ACME configurations; useful in testing
- ap.Issuer = new(caddytls.InternalIssuer) // we'll encode it later
- } else {
- if acmeCA == nil {
- acmeCA = ""
- }
- if email == nil {
- email = ""
- }
- mgr := &caddytls.ACMEIssuer{
- CA: acmeCA.(string),
- Email: email.(string),
- }
- if acmeDNS != nil {
- provName := acmeDNS.(string)
- dnsProvModule, err := caddy.GetModule("tls.dns." + provName)
- if err != nil {
- return nil, fmt.Errorf("getting DNS provider module named '%s': %v", provName, err)
- }
- mgr.Challenges = &caddytls.ChallengesConfig{
- DNSRaw: caddyconfig.JSONModuleObject(dnsProvModule.New(), "provider", provName, &warnings),
- }
- }
- if acmeCARoot != nil {
- mgr.TrustedRootsPEMFiles = []string{acmeCARoot.(string)}
- }
- if keyType != nil {
- ap.KeyType = keyType.(string)
- }
- ap.Issuer = mgr // we'll encode it later
+ if hasIssuers && hasLocalCerts {
+ return nil, fmt.Errorf("global options are ambiguous: local_certs is confusing when combined with cert_issuer, because local_certs is also a specific kind of issuer")
+ }
+
+ if hasIssuers {
+ ap.Issuers = issuers.([]certmagic.Issuer)
+ } else if hasLocalCerts {
+ ap.Issuers = []certmagic.Issuer{new(caddytls.InternalIssuer)}
+ }
+
+ if hasOCSPStapling {
+ ocspConfig := ocspStapling.(certmagic.OCSPConfig)
+ ap.DisableOCSPStapling = ocspConfig.DisableStapling
+ ap.OCSPOverrides = ocspConfig.ResponderOverrides
}
return ap, nil
@@ -406,17 +613,50 @@ func newBaseAutomationPolicy(options map[string]interface{}, warnings []caddycon
// consolidateAutomationPolicies combines automation policies that are the same,
// for a cleaner overall output.
func consolidateAutomationPolicies(aps []*caddytls.AutomationPolicy) []*caddytls.AutomationPolicy {
- for i := 0; i < len(aps); i++ {
- for j := 0; j < len(aps); j++ {
- if j == i {
- continue
- }
+ // sort from most specific to least specific; we depend on this ordering
+ sort.SliceStable(aps, func(i, j int) bool {
+ if automationPolicyIsSubset(aps[i], aps[j]) {
+ return true
+ }
+ if automationPolicyIsSubset(aps[j], aps[i]) {
+ return false
+ }
+ return len(aps[i].SubjectsRaw) > len(aps[j].SubjectsRaw)
+ })
+ emptyAPCount := 0
+ origLenAPs := len(aps)
+ // compute the number of empty policies (disregarding subjects) - see #4128
+ emptyAP := new(caddytls.AutomationPolicy)
+ for i := 0; i < len(aps); i++ {
+ emptyAP.SubjectsRaw = aps[i].SubjectsRaw
+ if reflect.DeepEqual(aps[i], emptyAP) {
+ emptyAPCount++
+ if !automationPolicyHasAllPublicNames(aps[i]) {
+ // if this automation policy has internal names, we might as well remove it
+ // so auto-https can implicitly use the internal issuer
+ aps = slices.Delete(aps, i, i+1)
+ i--
+ }
+ }
+ }
+ // If all policies are empty, we can return nil, as there is no need to set any policy
+ if emptyAPCount == origLenAPs {
+ return nil
+ }
+
+ // remove or combine duplicate policies
+outer:
+ for i := 0; i < len(aps); i++ {
+ // compare only with next policies; we sorted by specificity so we must not delete earlier policies
+ for j := i + 1; j < len(aps); j++ {
// if they're exactly equal in every way, just keep one of them
if reflect.DeepEqual(aps[i], aps[j]) {
- aps = append(aps[:j], aps[j+1:]...)
+ aps = slices.Delete(aps, j, j+1)
+ // must re-evaluate current i against next j; can't skip it!
+ // even if i decrements to -1, will be incremented to 0 immediately
i--
- break
+ continue outer
}
// if the policy is the same, we can keep just one, but we have
@@ -425,30 +665,123 @@ func consolidateAutomationPolicies(aps []*caddytls.AutomationPolicy) []*caddytls
// otherwise the one without any subjects (a catch-all) would be
// eaten up by the one with subjects; and if both have subjects, we
// need to combine their lists
- if bytes.Equal(aps[i].IssuerRaw, aps[j].IssuerRaw) &&
+ if reflect.DeepEqual(aps[i].IssuersRaw, aps[j].IssuersRaw) &&
+ reflect.DeepEqual(aps[i].ManagersRaw, aps[j].ManagersRaw) &&
bytes.Equal(aps[i].StorageRaw, aps[j].StorageRaw) &&
aps[i].MustStaple == aps[j].MustStaple &&
aps[i].KeyType == aps[j].KeyType &&
aps[i].OnDemand == aps[j].OnDemand &&
+ aps[i].ReusePrivateKeys == aps[j].ReusePrivateKeys &&
aps[i].RenewalWindowRatio == aps[j].RenewalWindowRatio {
- if len(aps[i].Subjects) == 0 && len(aps[j].Subjects) > 0 {
- aps = append(aps[:j], aps[j+1:]...)
- } else if len(aps[i].Subjects) > 0 && len(aps[j].Subjects) == 0 {
- aps = append(aps[:i], aps[i+1:]...)
+ if len(aps[i].SubjectsRaw) > 0 && len(aps[j].SubjectsRaw) == 0 {
+ // later policy (at j) has no subjects ("catch-all"), so we can
+ // remove the identical-but-more-specific policy that comes first
+ // AS LONG AS it is not shadowed by another policy before it; e.g.
+ // if policy i is for example.com, policy i+1 is '*.com', and policy
+ // j is catch-all, we cannot remove policy i because that would
+ // cause example.com to be served by the less specific policy for
+ // '*.com', which might be different (yes we've seen this happen)
+ if automationPolicyShadows(i, aps) >= j {
+ aps = slices.Delete(aps, i, i+1)
+ i--
+ continue outer
+ }
} else {
- aps[i].Subjects = append(aps[i].Subjects, aps[j].Subjects...)
- aps = append(aps[:j], aps[j+1:]...)
+ // avoid repeated subjects
+ for _, subj := range aps[j].SubjectsRaw {
+ if !slices.Contains(aps[i].SubjectsRaw, subj) {
+ aps[i].SubjectsRaw = append(aps[i].SubjectsRaw, subj)
+ }
+ }
+ aps = slices.Delete(aps, j, j+1)
+ j--
}
- i--
- break
}
}
}
- // ensure any catch-all policies go last
- sort.SliceStable(aps, func(i, j int) bool {
- return len(aps[i].Subjects) > len(aps[j].Subjects)
- })
-
return aps
}
+
+// automationPolicyIsSubset returns true if a's subjects are a subset
+// of b's subjects.
+func automationPolicyIsSubset(a, b *caddytls.AutomationPolicy) bool {
+ if len(b.SubjectsRaw) == 0 {
+ return true
+ }
+ if len(a.SubjectsRaw) == 0 {
+ return false
+ }
+ for _, aSubj := range a.SubjectsRaw {
+ inSuperset := slices.ContainsFunc(b.SubjectsRaw, func(bSubj string) bool {
+ return certmagic.MatchWildcard(aSubj, bSubj)
+ })
+ if !inSuperset {
+ return false
+ }
+ }
+ return true
+}
+
+// automationPolicyShadows returns the index of a policy that aps[i] shadows;
+// in other words, for all policies after position i, if that policy covers
+// the same subjects but is less specific, that policy's position is returned,
+// or -1 if no shadowing is found. For example, if policy i is for
+// "foo.example.com" and policy i+2 is for "*.example.com", then i+2 will be
+// returned, since that policy is shadowed by i, which is in front.
+func automationPolicyShadows(i int, aps []*caddytls.AutomationPolicy) int {
+ for j := i + 1; j < len(aps); j++ {
+ if automationPolicyIsSubset(aps[i], aps[j]) {
+ return j
+ }
+ }
+ return -1
+}
+
+// subjectQualifiesForPublicCert is like certmagic.SubjectQualifiesForPublicCert() except
+// that this allows domains with multiple wildcard levels like '*.*.example.com' to qualify
+// if the automation policy has OnDemand enabled (i.e. this function is more lenient).
+//
+// IP subjects are considered as non-qualifying for public certs. Technically, there are
+// now public ACME CAs as well as non-ACME CAs that issue IP certificates. But this function
+// is used solely for implicit automation (defaults), where it gets really complicated to
+// keep track of which issuers support IP certificates in which circumstances. Currently,
+// issuers that support IP certificates are very few, and all require some sort of config
+// from the user anyway (such as an account credential). Since we cannot implicitly and
+// automatically get public IP certs without configuration from the user, we treat IPs as
+// not qualifying for public certificates. Users should expressly configure an issuer
+// that supports IP certs for that purpose.
+func subjectQualifiesForPublicCert(ap *caddytls.AutomationPolicy, subj string) bool {
+ return !certmagic.SubjectIsIP(subj) &&
+ !certmagic.SubjectIsInternal(subj) &&
+ (strings.Count(subj, "*.") < 2 || ap.OnDemand)
+}
+
+// automationPolicyHasAllPublicNames returns true if all the names on the policy
+// do NOT qualify for public certs OR are tailscale domains.
+func automationPolicyHasAllPublicNames(ap *caddytls.AutomationPolicy) bool {
+ return !slices.ContainsFunc(ap.SubjectsRaw, func(i string) bool {
+ return !subjectQualifiesForPublicCert(ap, i) || isTailscaleDomain(i)
+ })
+}
+
+func isTailscaleDomain(name string) bool {
+ return strings.HasSuffix(strings.ToLower(name), ".ts.net")
+}
+
+func hostsCoveredByWildcard(hosts []string, wildcards []string) bool {
+ if len(hosts) == 0 || len(wildcards) == 0 {
+ return false
+ }
+ for _, host := range hosts {
+ for _, wildcard := range wildcards {
+ if strings.HasPrefix(host, "*.") {
+ continue
+ }
+ if certmagic.MatchWildcard(host, "*."+wildcard) {
+ return true
+ }
+ }
+ }
+ return false
+}
diff --git a/caddyconfig/httpcaddyfile/tlsapp_test.go b/caddyconfig/httpcaddyfile/tlsapp_test.go
new file mode 100644
index 00000000..d8edbdf9
--- /dev/null
+++ b/caddyconfig/httpcaddyfile/tlsapp_test.go
@@ -0,0 +1,56 @@
+package httpcaddyfile
+
+import (
+ "testing"
+
+ "github.com/caddyserver/caddy/v2/modules/caddytls"
+)
+
+func TestAutomationPolicyIsSubset(t *testing.T) {
+ for i, test := range []struct {
+ a, b []string
+ expect bool
+ }{
+ {
+ a: []string{"example.com"},
+ b: []string{},
+ expect: true,
+ },
+ {
+ a: []string{},
+ b: []string{"example.com"},
+ expect: false,
+ },
+ {
+ a: []string{"foo.example.com"},
+ b: []string{"*.example.com"},
+ expect: true,
+ },
+ {
+ a: []string{"foo.example.com"},
+ b: []string{"foo.example.com"},
+ expect: true,
+ },
+ {
+ a: []string{"foo.example.com"},
+ b: []string{"example.com"},
+ expect: false,
+ },
+ {
+ a: []string{"example.com", "foo.example.com"},
+ b: []string{"*.com", "*.*.com"},
+ expect: true,
+ },
+ {
+ a: []string{"example.com", "foo.example.com"},
+ b: []string{"*.com"},
+ expect: false,
+ },
+ } {
+ apA := &caddytls.AutomationPolicy{SubjectsRaw: test.a}
+ apB := &caddytls.AutomationPolicy{SubjectsRaw: test.b}
+ if actual := automationPolicyIsSubset(apA, apB); actual != test.expect {
+ t.Errorf("Test %d: Expected %t but got %t (A: %v B: %v)", i, test.expect, actual, test.a, test.b)
+ }
+ }
+}
diff --git a/caddyconfig/httploader.go b/caddyconfig/httploader.go
new file mode 100644
index 00000000..a25041a3
--- /dev/null
+++ b/caddyconfig/httploader.go
@@ -0,0 +1,218 @@
+// Copyright 2015 Matthew Holt and The Caddy Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package caddyconfig
+
+import (
+ "crypto/tls"
+ "crypto/x509"
+ "fmt"
+ "io"
+ "net/http"
+ "os"
+ "time"
+
+ "github.com/caddyserver/caddy/v2"
+)
+
+func init() {
+ caddy.RegisterModule(HTTPLoader{})
+}
+
+// HTTPLoader can load Caddy configs over HTTP(S).
+//
+// If the response is not a JSON config, a config adapter must be specified
+// either in the loader config (`adapter`), or in the Content-Type HTTP header
+// returned in the HTTP response from the server. The Content-Type header is
+// read just like the admin API's `/load` endpoint. If you don't have control
+// over the HTTP server (but can still trust its response), you can override
+// the Content-Type header by setting the `adapter` property in this config.
+type HTTPLoader struct {
+ // The method for the request. Default: GET
+ Method string `json:"method,omitempty"`
+
+ // The URL of the request.
+ URL string `json:"url,omitempty"`
+
+ // HTTP headers to add to the request.
+ Headers http.Header `json:"header,omitempty"`
+
+ // Maximum time allowed for a complete connection and request.
+ Timeout caddy.Duration `json:"timeout,omitempty"`
+
+ // The name of the config adapter to use, if any. Only needed
+ // if the HTTP response is not a JSON config and if the server's
+ // Content-Type header is missing or incorrect.
+ Adapter string `json:"adapter,omitempty"`
+
+ TLS *struct {
+ // Present this instance's managed remote identity credentials to the server.
+ UseServerIdentity bool `json:"use_server_identity,omitempty"`
+
+ // PEM-encoded client certificate filename to present to the server.
+ ClientCertificateFile string `json:"client_certificate_file,omitempty"`
+
+ // PEM-encoded key to use with the client certificate.
+ ClientCertificateKeyFile string `json:"client_certificate_key_file,omitempty"`
+
+ // List of PEM-encoded CA certificate files to add to the same trust
+ // store as RootCAPool (or root_ca_pool in the JSON).
+ RootCAPEMFiles []string `json:"root_ca_pem_files,omitempty"`
+ } `json:"tls,omitempty"`
+}
+
+// CaddyModule returns the Caddy module information.
+func (HTTPLoader) CaddyModule() caddy.ModuleInfo {
+ return caddy.ModuleInfo{
+ ID: "caddy.config_loaders.http",
+ New: func() caddy.Module { return new(HTTPLoader) },
+ }
+}
+
+// LoadConfig loads a Caddy config.
+func (hl HTTPLoader) LoadConfig(ctx caddy.Context) ([]byte, error) {
+ repl := caddy.NewReplacer()
+
+ client, err := hl.makeClient(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ method := repl.ReplaceAll(hl.Method, "")
+ if method == "" {
+ method = http.MethodGet
+ }
+
+ url := repl.ReplaceAll(hl.URL, "")
+ req, err := http.NewRequest(method, url, nil)
+ if err != nil {
+ return nil, err
+ }
+ for key, vals := range hl.Headers {
+ for _, val := range vals {
+ req.Header.Add(repl.ReplaceAll(key, ""), repl.ReplaceKnown(val, ""))
+ }
+ }
+
+ resp, err := doHttpCallWithRetries(ctx, client, req)
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+ if resp.StatusCode >= 400 {
+ return nil, fmt.Errorf("server responded with HTTP %d", resp.StatusCode)
+ }
+
+ body, err := io.ReadAll(resp.Body)
+ if err != nil {
+ return nil, err
+ }
+
+ // adapt the config based on either manually-configured adapter or server's response header
+ ct := resp.Header.Get("Content-Type")
+ if hl.Adapter != "" {
+ ct = "text/" + hl.Adapter
+ }
+ result, warnings, err := adaptByContentType(ct, body)
+ if err != nil {
+ return nil, err
+ }
+ for _, warn := range warnings {
+ ctx.Logger().Warn(warn.String())
+ }
+
+ return result, nil
+}
+
+func attemptHttpCall(client *http.Client, request *http.Request) (*http.Response, error) {
+ resp, err := client.Do(request)
+ if err != nil {
+ return nil, fmt.Errorf("problem calling http loader url: %v", err)
+ } else if resp.StatusCode < 200 || resp.StatusCode > 499 {
+ resp.Body.Close()
+ return nil, fmt.Errorf("bad response status code from http loader url: %v", resp.StatusCode)
+ }
+ return resp, nil
+}
+
+func doHttpCallWithRetries(ctx caddy.Context, client *http.Client, request *http.Request) (*http.Response, error) {
+ var resp *http.Response
+ var err error
+ const maxAttempts = 10
+
+ for i := 0; i < maxAttempts; i++ {
+ resp, err = attemptHttpCall(client, request)
+ if err != nil && i < maxAttempts-1 {
+ select {
+ case <-time.After(time.Millisecond * 500):
+ case <-ctx.Done():
+ return resp, ctx.Err()
+ }
+ } else {
+ break
+ }
+ }
+
+ return resp, err
+}
+
+func (hl HTTPLoader) makeClient(ctx caddy.Context) (*http.Client, error) {
+ client := &http.Client{
+ Timeout: time.Duration(hl.Timeout),
+ }
+
+ if hl.TLS != nil {
+ var tlsConfig *tls.Config
+
+ // client authentication
+ if hl.TLS.UseServerIdentity {
+ certs, err := ctx.IdentityCredentials(ctx.Logger())
+ if err != nil {
+ return nil, fmt.Errorf("getting server identity credentials: %v", err)
+ }
+ // See https://github.com/securego/gosec/issues/1054#issuecomment-2072235199
+ //nolint:gosec
+ tlsConfig = &tls.Config{Certificates: certs}
+ } else if hl.TLS.ClientCertificateFile != "" && hl.TLS.ClientCertificateKeyFile != "" {
+ cert, err := tls.LoadX509KeyPair(hl.TLS.ClientCertificateFile, hl.TLS.ClientCertificateKeyFile)
+ if err != nil {
+ return nil, err
+ }
+ //nolint:gosec
+ tlsConfig = &tls.Config{Certificates: []tls.Certificate{cert}}
+ }
+
+ // trusted server certs
+ if len(hl.TLS.RootCAPEMFiles) > 0 {
+ rootPool := x509.NewCertPool()
+ for _, pemFile := range hl.TLS.RootCAPEMFiles {
+ pemData, err := os.ReadFile(pemFile)
+ if err != nil {
+ return nil, fmt.Errorf("failed reading ca cert: %v", err)
+ }
+ rootPool.AppendCertsFromPEM(pemData)
+ }
+ if tlsConfig == nil {
+ tlsConfig = new(tls.Config)
+ }
+ tlsConfig.RootCAs = rootPool
+ }
+
+ client.Transport = &http.Transport{TLSClientConfig: tlsConfig}
+ }
+
+ return client, nil
+}
+
+var _ caddy.ConfigLoader = (*HTTPLoader)(nil)
diff --git a/caddyconfig/load.go b/caddyconfig/load.go
index 4855b46c..9f5cda90 100644
--- a/caddyconfig/load.go
+++ b/caddyconfig/load.go
@@ -58,6 +58,10 @@ func (al adminLoad) Routes() []caddy.AdminRoute {
Pattern: "/load",
Handler: caddy.AdminHandlerFunc(al.handleLoad),
},
+ {
+ Pattern: "/adapt",
+ Handler: caddy.AdminHandlerFunc(al.handleAdapt),
+ },
}
}
@@ -69,8 +73,8 @@ func (al adminLoad) Routes() []caddy.AdminRoute {
func (adminLoad) handleLoad(w http.ResponseWriter, r *http.Request) error {
if r.Method != http.MethodPost {
return caddy.APIError{
- Code: http.StatusMethodNotAllowed,
- Err: fmt.Errorf("method not allowed"),
+ HTTPStatus: http.StatusMethodNotAllowed,
+ Err: fmt.Errorf("method not allowed"),
}
}
@@ -81,8 +85,8 @@ func (adminLoad) handleLoad(w http.ResponseWriter, r *http.Request) error {
_, err := io.Copy(buf, r.Body)
if err != nil {
return caddy.APIError{
- Code: http.StatusBadRequest,
- Err: fmt.Errorf("reading request body: %v", err),
+ HTTPStatus: http.StatusBadRequest,
+ Err: fmt.Errorf("reading request body: %v", err),
}
}
body := buf.Bytes()
@@ -90,45 +94,21 @@ func (adminLoad) handleLoad(w http.ResponseWriter, r *http.Request) error {
// if the config is formatted other than Caddy's native
// JSON, we need to adapt it before loading it
if ctHeader := r.Header.Get("Content-Type"); ctHeader != "" {
- ct, _, err := mime.ParseMediaType(ctHeader)
+ result, warnings, err := adaptByContentType(ctHeader, body)
if err != nil {
return caddy.APIError{
- Code: http.StatusBadRequest,
- Err: fmt.Errorf("invalid Content-Type: %v", err),
+ HTTPStatus: http.StatusBadRequest,
+ Err: err,
}
}
- if !strings.HasSuffix(ct, "/json") {
- slashIdx := strings.Index(ct, "/")
- if slashIdx < 0 {
- return caddy.APIError{
- Code: http.StatusBadRequest,
- Err: fmt.Errorf("malformed Content-Type"),
- }
- }
- adapterName := ct[slashIdx+1:]
- cfgAdapter := GetAdapter(adapterName)
- if cfgAdapter == nil {
- return caddy.APIError{
- Code: http.StatusBadRequest,
- Err: fmt.Errorf("unrecognized config adapter '%s'", adapterName),
- }
- }
- result, warnings, err := cfgAdapter.Adapt(body, nil)
+ if len(warnings) > 0 {
+ respBody, err := json.Marshal(warnings)
if err != nil {
- return caddy.APIError{
- Code: http.StatusBadRequest,
- Err: fmt.Errorf("adapting config using %s adapter: %v", adapterName, err),
- }
+ caddy.Log().Named("admin.api.load").Error(err.Error())
}
- if len(warnings) > 0 {
- respBody, err := json.Marshal(warnings)
- if err != nil {
- caddy.Log().Named("admin.api.load").Error(err.Error())
- }
- _, _ = w.Write(respBody)
- }
- body = result
+ _, _ = w.Write(respBody)
}
+ body = result
}
forceReload := r.Header.Get("Cache-Control") == "must-revalidate"
@@ -136,8 +116,8 @@ func (adminLoad) handleLoad(w http.ResponseWriter, r *http.Request) error {
err = caddy.Load(body, forceReload)
if err != nil {
return caddy.APIError{
- Code: http.StatusBadRequest,
- Err: fmt.Errorf("loading config: %v", err),
+ HTTPStatus: http.StatusBadRequest,
+ Err: fmt.Errorf("loading config: %v", err),
}
}
@@ -146,8 +126,89 @@ func (adminLoad) handleLoad(w http.ResponseWriter, r *http.Request) error {
return nil
}
+// handleAdapt adapts the given Caddy config to JSON and responds with the result.
+func (adminLoad) handleAdapt(w http.ResponseWriter, r *http.Request) error {
+ if r.Method != http.MethodPost {
+ return caddy.APIError{
+ HTTPStatus: http.StatusMethodNotAllowed,
+ Err: fmt.Errorf("method not allowed"),
+ }
+ }
+
+ buf := bufPool.Get().(*bytes.Buffer)
+ buf.Reset()
+ defer bufPool.Put(buf)
+
+ _, err := io.Copy(buf, r.Body)
+ if err != nil {
+ return caddy.APIError{
+ HTTPStatus: http.StatusBadRequest,
+ Err: fmt.Errorf("reading request body: %v", err),
+ }
+ }
+
+ result, warnings, err := adaptByContentType(r.Header.Get("Content-Type"), buf.Bytes())
+ if err != nil {
+ return caddy.APIError{
+ HTTPStatus: http.StatusBadRequest,
+ Err: err,
+ }
+ }
+
+ out := struct {
+ Warnings []Warning `json:"warnings,omitempty"`
+ Result json.RawMessage `json:"result"`
+ }{
+ Warnings: warnings,
+ Result: result,
+ }
+
+ w.Header().Set("Content-Type", "application/json")
+ return json.NewEncoder(w).Encode(out)
+}
+
+// adaptByContentType adapts body to Caddy JSON using the adapter specified by contentType.
+// If contentType is empty or ends with "/json", the input will be returned, as a no-op.
+func adaptByContentType(contentType string, body []byte) ([]byte, []Warning, error) {
+ // assume JSON as the default
+ if contentType == "" {
+ return body, nil, nil
+ }
+
+ ct, _, err := mime.ParseMediaType(contentType)
+ if err != nil {
+ return nil, nil, caddy.APIError{
+ HTTPStatus: http.StatusBadRequest,
+ Err: fmt.Errorf("invalid Content-Type: %v", err),
+ }
+ }
+
+ // if already JSON, no need to adapt
+ if strings.HasSuffix(ct, "/json") {
+ return body, nil, nil
+ }
+
+ // adapter name should be suffix of MIME type
+ _, adapterName, slashFound := strings.Cut(ct, "/")
+ if !slashFound {
+ return nil, nil, fmt.Errorf("malformed Content-Type")
+ }
+
+ cfgAdapter := GetAdapter(adapterName)
+ if cfgAdapter == nil {
+ return nil, nil, fmt.Errorf("unrecognized config adapter '%s'", adapterName)
+ }
+
+ result, warnings, err := cfgAdapter.Adapt(body, nil)
+ if err != nil {
+ return nil, nil, fmt.Errorf("adapting config using %s adapter: %v", adapterName, err)
+ }
+
+ return result, warnings, nil
+}
+
var bufPool = sync.Pool{
- New: func() interface{} {
+ New: func() any {
return new(bytes.Buffer)
},
}
diff --git a/caddytest/caddy.ca.cer b/caddytest/caddy.ca.cer
new file mode 100644
index 00000000..00a9a1c1
--- /dev/null
+++ b/caddytest/caddy.ca.cer
@@ -0,0 +1,20 @@
+-----BEGIN CERTIFICATE-----
+MIIDSzCCAjOgAwIBAgIUfIRObjWNUA4jxQ/0x8BOCvE2Vw4wDQYJKoZIhvcNAQEL
+BQAwFjEUMBIGA1UEAwwLRWFzeS1SU0EgQ0EwHhcNMTkwODI4MTYyNTU5WhcNMjkw
+ODI1MTYyNTU5WjAWMRQwEgYDVQQDDAtFYXN5LVJTQSBDQTCCASIwDQYJKoZIhvcN
+AQEBBQADggEPADCCAQoCggEBAK5m5elxhQfMp/3aVJ4JnpN9PUSz6LlP6LePAPFU
+7gqohVVFVtDkChJAG3FNkNQNlieVTja/bgH9IcC6oKbROwdY1h0MvNV8AHHigvl0
+3WuJD8g2ReVFXXwsnrPmKXCFzQyMI6TYk3m2gYrXsZOU1GLnfMRC3KAMRgE2F45t
+wOs9hqG169YJ6mM2eQjzjCHWI6S2/iUYvYxRkCOlYUbLsMD/AhgAf1plzg6LPqNx
+tdlwxZnA0ytgkmhK67HtzJu0+ovUCsMv0RwcMhsEo9T8nyFAGt9XLZ63X5WpBCTU
+ApaAUhnG0XnerjmUWb6eUWw4zev54sEfY5F3x002iQaW6cECAwEAAaOBkDCBjTAd
+BgNVHQ4EFgQU4CBUbZsS2GaNIkGRz/cBsD5ivjswUQYDVR0jBEowSIAU4CBUbZsS
+2GaNIkGRz/cBsD5ivjuhGqQYMBYxFDASBgNVBAMMC0Vhc3ktUlNBIENBghR8hE5u
+NY1QDiPFD/THwE4K8TZXDjAMBgNVHRMEBTADAQH/MAsGA1UdDwQEAwIBBjANBgkq
+hkiG9w0BAQsFAAOCAQEAKB3V4HIzoiO/Ch6WMj9bLJ2FGbpkMrcb/Eq01hT5zcfK
+D66lVS1MlK+cRL446Z2b2KDP1oFyVs+qmrmtdwrWgD+nfe2sBmmIHo9m9KygMkEO
+fG3MghGTEcS+0cTKEcoHYWYyOqQh6jnedXY8Cdm4GM1hAc9MiL3/sqV8YCVSLNnk
+oNysmr06/rZ0MCUZPGUtRmfd0heWhrfzAKw2HLgX+RAmpOE2MZqWcjvqKGyaRiaZ
+ks4nJkP6521aC2Lgp0HhCz1j8/uQ5ldoDszCnu/iro0NAsNtudTMD+YoLQxLqdle
+Ih6CW+illc2VdXwj7mn6J04yns9jfE2jRjW/yTLFuQ==
+-----END CERTIFICATE-----
\ No newline at end of file
diff --git a/caddytest/caddytest.go b/caddytest/caddytest.go
index b0f23d0e..05aa1e3f 100644
--- a/caddytest/caddytest.go
+++ b/caddytest/caddytest.go
@@ -7,13 +7,15 @@ import (
"encoding/json"
"errors"
"fmt"
- "io/ioutil"
+ "io"
+ "io/fs"
"log"
"net"
"net/http"
"net/http/cookiejar"
"os"
"path"
+ "reflect"
"regexp"
"runtime"
"strings"
@@ -21,9 +23,10 @@ import (
"time"
"github.com/aryann/difflib"
- "github.com/caddyserver/caddy/v2/caddyconfig"
+
caddycmd "github.com/caddyserver/caddy/v2/cmd"
+ "github.com/caddyserver/caddy/v2/caddyconfig"
// plug in Caddy modules here
_ "github.com/caddyserver/caddy/v2/modules/standard"
)
@@ -33,13 +36,19 @@ type Defaults struct {
// Port we expect caddy to listening on
AdminPort int
// Certificates we expect to be loaded before attempting to run the tests
- Certifcates []string
+ Certificates []string
+ // TestRequestTimeout is the time to wait for a http request to
+ TestRequestTimeout time.Duration
+ // LoadRequestTimeout is the time to wait for the config to be loaded against the caddy server
+ LoadRequestTimeout time.Duration
}
// Default testing values
var Default = Defaults{
- AdminPort: 2019,
- Certifcates: []string{"/caddy.localhost.crt", "/caddy.localhost.key"},
+ AdminPort: 2999, // different from what a real server also running on a developer's machine might be
+ Certificates: []string{"/caddy.localhost.crt", "/caddy.localhost.key"},
+ TestRequestTimeout: 5 * time.Second,
+ LoadRequestTimeout: 5 * time.Second,
}
var (
@@ -49,13 +58,13 @@ var (
// Tester represents an instance of a test client.
type Tester struct {
- Client *http.Client
- t *testing.T
+ Client *http.Client
+ configLoaded bool
+ t testing.TB
}
// NewTester will create a new testing client with an attached cookie jar
-func NewTester(t *testing.T) *Tester {
-
+func NewTester(t testing.TB) *Tester {
jar, err := cookiejar.New(nil)
if err != nil {
t.Fatalf("failed to create cookiejar: %s", err)
@@ -65,9 +74,10 @@ func NewTester(t *testing.T) *Tester {
Client: &http.Client{
Transport: CreateTestingTransport(),
Jar: jar,
- Timeout: 5 * time.Second,
+ Timeout: Default.TestRequestTimeout,
},
- t: t,
+ configLoaded: false,
+ t: t,
}
}
@@ -85,47 +95,63 @@ func timeElapsed(start time.Time, name string) {
// InitServer this will configure the server with a configurion of a specific
// type. The configType must be either "json" or the adapter type.
func (tc *Tester) InitServer(rawConfig string, configType string) {
-
if err := tc.initServer(rawConfig, configType); err != nil {
tc.t.Logf("failed to load config: %s", err)
tc.t.Fail()
}
+ if err := tc.ensureConfigRunning(rawConfig, configType); err != nil {
+ tc.t.Logf("failed ensuring config is running: %s", err)
+ tc.t.Fail()
+ }
}
// InitServer this will configure the server with a configurion of a specific
// type. The configType must be either "json" or the adapter type.
func (tc *Tester) initServer(rawConfig string, configType string) error {
-
if testing.Short() {
tc.t.SkipNow()
return nil
}
- err := validateTestPrerequisites()
+ err := validateTestPrerequisites(tc.t)
if err != nil {
tc.t.Skipf("skipping tests as failed integration prerequisites. %s", err)
return nil
}
tc.t.Cleanup(func() {
- if tc.t.Failed() {
+ if tc.t.Failed() && tc.configLoaded {
res, err := http.Get(fmt.Sprintf("http://localhost:%d/config/", Default.AdminPort))
if err != nil {
tc.t.Log("unable to read the current config")
return
}
defer res.Body.Close()
- body, err := ioutil.ReadAll(res.Body)
+ body, _ := io.ReadAll(res.Body)
var out bytes.Buffer
- json.Indent(&out, body, "", " ")
+ _ = json.Indent(&out, body, "", " ")
tc.t.Logf("----------- failed with config -----------\n%s", out.String())
}
})
rawConfig = prependCaddyFilePath(rawConfig)
+ // normalize JSON config
+ if configType == "json" {
+ tc.t.Logf("Before: %s", rawConfig)
+ var conf any
+ if err := json.Unmarshal([]byte(rawConfig), &conf); err != nil {
+ return err
+ }
+ c, err := json.Marshal(conf)
+ if err != nil {
+ return err
+ }
+ rawConfig = string(c)
+ tc.t.Logf("After: %s", rawConfig)
+ }
client := &http.Client{
- Timeout: time.Second * 2,
+ Timeout: Default.LoadRequestTimeout,
}
start := time.Now()
req, err := http.NewRequest("POST", fmt.Sprintf("http://localhost:%d/load", Default.AdminPort), strings.NewReader(rawConfig))
@@ -148,7 +174,7 @@ func (tc *Tester) initServer(rawConfig string, configType string) error {
timeElapsed(start, "caddytest: config load time")
defer res.Body.Close()
- body, err := ioutil.ReadAll(res.Body)
+ body, err := io.ReadAll(res.Body)
if err != nil {
tc.t.Errorf("unable to read response. %s", err)
return err
@@ -158,69 +184,117 @@ func (tc *Tester) initServer(rawConfig string, configType string) error {
return configLoadError{Response: string(body)}
}
+ tc.configLoaded = true
return nil
}
-var hasValidated bool
-var arePrerequisitesValid bool
-
-func validateTestPrerequisites() error {
-
- if hasValidated {
- if !arePrerequisitesValid {
- return errors.New("caddy integration prerequisites failed. see first error")
+func (tc *Tester) ensureConfigRunning(rawConfig string, configType string) error {
+ expectedBytes := []byte(prependCaddyFilePath(rawConfig))
+ if configType != "json" {
+ adapter := caddyconfig.GetAdapter(configType)
+ if adapter == nil {
+ return fmt.Errorf("adapter of config type is missing: %s", configType)
}
- return nil
+ expectedBytes, _, _ = adapter.Adapt([]byte(rawConfig), nil)
}
- hasValidated = true
- arePrerequisitesValid = false
+ var expected any
+ err := json.Unmarshal(expectedBytes, &expected)
+ if err != nil {
+ return err
+ }
+ client := &http.Client{
+ Timeout: Default.LoadRequestTimeout,
+ }
+
+ fetchConfig := func(client *http.Client) any {
+ resp, err := client.Get(fmt.Sprintf("http://localhost:%d/config/", Default.AdminPort))
+ if err != nil {
+ return nil
+ }
+ defer resp.Body.Close()
+ actualBytes, err := io.ReadAll(resp.Body)
+ if err != nil {
+ return nil
+ }
+ var actual any
+ err = json.Unmarshal(actualBytes, &actual)
+ if err != nil {
+ return nil
+ }
+ return actual
+ }
+
+ for retries := 10; retries > 0; retries-- {
+ if reflect.DeepEqual(expected, fetchConfig(client)) {
+ return nil
+ }
+ time.Sleep(1 * time.Second)
+ }
+ tc.t.Errorf("POSTed configuration isn't active")
+ return errors.New("EnsureConfigRunning: POSTed configuration isn't active")
+}
+
+const initConfig = `{
+ admin localhost:2999
+}
+`
+
+// validateTestPrerequisites ensures the certificates are available in the
+// designated path and Caddy sub-process is running.
+func validateTestPrerequisites(t testing.TB) error {
// check certificates are found
- for _, certName := range Default.Certifcates {
- if _, err := os.Stat(getIntegrationDir() + certName); os.IsNotExist(err) {
+ for _, certName := range Default.Certificates {
+ if _, err := os.Stat(getIntegrationDir() + certName); errors.Is(err, fs.ErrNotExist) {
return fmt.Errorf("caddy integration test certificates (%s) not found", certName)
}
}
if isCaddyAdminRunning() != nil {
+ // setup the init config file, and set the cleanup afterwards
+ f, err := os.CreateTemp("", "")
+ if err != nil {
+ return err
+ }
+ t.Cleanup(func() {
+ os.Remove(f.Name())
+ })
+ if _, err := f.WriteString(initConfig); err != nil {
+ return err
+ }
+
// start inprocess caddy server
- os.Args = []string{"caddy", "run"}
+ os.Args = []string{"caddy", "run", "--config", f.Name(), "--adapter", "caddyfile"}
go func() {
caddycmd.Main()
}()
- // wait for caddy to start
- retries := 4
- for ; retries > 0 && isCaddyAdminRunning() != nil; retries-- {
- time.Sleep(10 * time.Millisecond)
+ // wait for caddy to start serving the initial config
+ for retries := 10; retries > 0 && isCaddyAdminRunning() != nil; retries-- {
+ time.Sleep(1 * time.Second)
}
}
- // assert that caddy is running
- if err := isCaddyAdminRunning(); err != nil {
- return err
- }
-
- arePrerequisitesValid = true
- return nil
+ // one more time to return the error
+ return isCaddyAdminRunning()
}
func isCaddyAdminRunning() error {
// assert that caddy is running
client := &http.Client{
- Timeout: time.Second * 2,
+ Timeout: Default.LoadRequestTimeout,
}
- _, err := client.Get(fmt.Sprintf("http://localhost:%d/config/", Default.AdminPort))
+ resp, err := client.Get(fmt.Sprintf("http://localhost:%d/config/", Default.AdminPort))
if err != nil {
- return errors.New("caddy integration test caddy server not running. Expected to be listening on localhost:2019")
+ return fmt.Errorf("caddy integration test caddy server not running. Expected to be listening on localhost:%d", Default.AdminPort)
}
+ resp.Body.Close()
return nil
}
func getIntegrationDir() string {
-
_, filename, _, ok := runtime.Caller(1)
if !ok {
panic("unable to determine the current file path")
@@ -240,7 +314,6 @@ func prependCaddyFilePath(rawConfig string) string {
// CreateTestingTransport creates a testing transport that forces call dialing connections to happen locally
func CreateTestingTransport() *http.Transport {
-
dialer := net.Dialer{
Timeout: 5 * time.Second,
KeepAlive: 5 * time.Second,
@@ -262,13 +335,12 @@ func CreateTestingTransport() *http.Transport {
IdleConnTimeout: 90 * time.Second,
TLSHandshakeTimeout: 5 * time.Second,
ExpectContinueTimeout: 1 * time.Second,
- TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
+ TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, //nolint:gosec
}
}
// AssertLoadError will load a config and expect an error
func AssertLoadError(t *testing.T, rawConfig string, configType string, expectedError string) {
-
tc := NewTester(t)
err := tc.initServer(rawConfig, configType)
@@ -279,7 +351,6 @@ func AssertLoadError(t *testing.T, rawConfig string, configType string, expected
// AssertRedirect makes a request and asserts the redirection happens
func (tc *Tester) AssertRedirect(requestURI string, expectedToLocation string, expectedStatusCode int) *http.Response {
-
redirectPolicyFunc := func(req *http.Request, via []*http.Request) error {
return http.ErrUseLastResponse
}
@@ -303,35 +374,45 @@ func (tc *Tester) AssertRedirect(requestURI string, expectedToLocation string, e
if err != nil {
tc.t.Errorf("requesting \"%s\" expected location: \"%s\" but got error: %s", requestURI, expectedToLocation, err)
}
-
- if expectedToLocation != loc.String() {
- tc.t.Errorf("requesting \"%s\" expected location: \"%s\" but got \"%s\"", requestURI, expectedToLocation, loc.String())
+ if loc == nil && expectedToLocation != "" {
+ tc.t.Errorf("requesting \"%s\" expected a Location header, but didn't get one", requestURI)
+ }
+ if loc != nil {
+ if expectedToLocation != loc.String() {
+ tc.t.Errorf("requesting \"%s\" expected location: \"%s\" but got \"%s\"", requestURI, expectedToLocation, loc.String())
+ }
}
return resp
}
-// AssertAdapt adapts a config and then tests it against an expected result
-func AssertAdapt(t *testing.T, rawConfig string, adapterName string, expectedResponse string) {
-
+// CompareAdapt adapts a config and then compares it against an expected result
+func CompareAdapt(t testing.TB, filename, rawConfig string, adapterName string, expectedResponse string) bool {
cfgAdapter := caddyconfig.GetAdapter(adapterName)
if cfgAdapter == nil {
- t.Errorf("unrecognized config adapter '%s'", adapterName)
- return
+ t.Logf("unrecognized config adapter '%s'", adapterName)
+ return false
}
- options := make(map[string]interface{})
- options["pretty"] = "true"
+ options := make(map[string]any)
result, warnings, err := cfgAdapter.Adapt([]byte(rawConfig), options)
if err != nil {
- t.Errorf("adapting config using %s adapter: %v", adapterName, err)
- return
+ t.Logf("adapting config using %s adapter: %v", adapterName, err)
+ return false
}
+ // prettify results to keep tests human-manageable
+ var prettyBuf bytes.Buffer
+ err = json.Indent(&prettyBuf, result, "", "\t")
+ if err != nil {
+ return false
+ }
+ result = prettyBuf.Bytes()
+
if len(warnings) > 0 {
for _, w := range warnings {
- t.Logf("warning: directive: %s : %s", w.Directive, w.Message)
+ t.Logf("warning: %s:%d: %s: %s", filename, w.Line, w.Directive, w.Message)
}
}
@@ -359,13 +440,22 @@ func AssertAdapt(t *testing.T, rawConfig string, adapterName string, expectedRes
fmt.Printf(" + %s\n", d.Payload)
}
}
+ return false
+ }
+ return true
+}
+
+// AssertAdapt adapts a config and then tests it against an expected result
+func AssertAdapt(t testing.TB, rawConfig string, adapterName string, expectedResponse string) {
+ ok := CompareAdapt(t, "Caddyfile", rawConfig, adapterName, expectedResponse)
+ if !ok {
t.Fail()
}
}
// Generic request functions
-func applyHeaders(t *testing.T, req *http.Request, requestHeaders []string) {
+func applyHeaders(t testing.TB, req *http.Request, requestHeaders []string) {
requestContentType := ""
for _, requestHeader := range requestHeaders {
arr := strings.SplitAfterN(requestHeader, ":", 2)
@@ -385,14 +475,13 @@ func applyHeaders(t *testing.T, req *http.Request, requestHeaders []string) {
// AssertResponseCode will execute the request and verify the status code, returns a response for additional assertions
func (tc *Tester) AssertResponseCode(req *http.Request, expectedStatusCode int) *http.Response {
-
resp, err := tc.Client.Do(req)
if err != nil {
tc.t.Fatalf("failed to call server %s", err)
}
if expectedStatusCode != resp.StatusCode {
- tc.t.Errorf("requesting \"%s\" expected status code: %d but got %d", req.RequestURI, expectedStatusCode, resp.StatusCode)
+ tc.t.Errorf("requesting \"%s\" expected status code: %d but got %d", req.URL.RequestURI(), expectedStatusCode, resp.StatusCode)
}
return resp
@@ -400,18 +489,17 @@ func (tc *Tester) AssertResponseCode(req *http.Request, expectedStatusCode int)
// AssertResponse request a URI and assert the status code and the body contains a string
func (tc *Tester) AssertResponse(req *http.Request, expectedStatusCode int, expectedBody string) (*http.Response, string) {
-
resp := tc.AssertResponseCode(req, expectedStatusCode)
defer resp.Body.Close()
- bytes, err := ioutil.ReadAll(resp.Body)
+ bytes, err := io.ReadAll(resp.Body)
if err != nil {
tc.t.Fatalf("unable to read the response body %s", err)
}
body := string(bytes)
- if !strings.Contains(body, expectedBody) {
+ if body != expectedBody {
tc.t.Errorf("requesting \"%s\" expected response body \"%s\" but got \"%s\"", req.RequestURI, expectedBody, body)
}
@@ -422,7 +510,6 @@ func (tc *Tester) AssertResponse(req *http.Request, expectedStatusCode int, expe
// AssertGetResponse GET a URI and expect a statusCode and body text
func (tc *Tester) AssertGetResponse(requestURI string, expectedStatusCode int, expectedBody string) (*http.Response, string) {
-
req, err := http.NewRequest("GET", requestURI, nil)
if err != nil {
tc.t.Fatalf("unable to create request %s", err)
@@ -433,7 +520,6 @@ func (tc *Tester) AssertGetResponse(requestURI string, expectedStatusCode int, e
// AssertDeleteResponse request a URI and expect a statusCode and body text
func (tc *Tester) AssertDeleteResponse(requestURI string, expectedStatusCode int, expectedBody string) (*http.Response, string) {
-
req, err := http.NewRequest("DELETE", requestURI, nil)
if err != nil {
tc.t.Fatalf("unable to create request %s", err)
@@ -444,7 +530,6 @@ func (tc *Tester) AssertDeleteResponse(requestURI string, expectedStatusCode int
// AssertPostResponseBody POST to a URI and assert the response code and body
func (tc *Tester) AssertPostResponseBody(requestURI string, requestHeaders []string, requestBody *bytes.Buffer, expectedStatusCode int, expectedBody string) (*http.Response, string) {
-
req, err := http.NewRequest("POST", requestURI, requestBody)
if err != nil {
tc.t.Errorf("failed to create request %s", err)
@@ -458,7 +543,6 @@ func (tc *Tester) AssertPostResponseBody(requestURI string, requestHeaders []str
// AssertPutResponseBody PUT to a URI and assert the response code and body
func (tc *Tester) AssertPutResponseBody(requestURI string, requestHeaders []string, requestBody *bytes.Buffer, expectedStatusCode int, expectedBody string) (*http.Response, string) {
-
req, err := http.NewRequest("PUT", requestURI, requestBody)
if err != nil {
tc.t.Errorf("failed to create request %s", err)
@@ -472,7 +556,6 @@ func (tc *Tester) AssertPutResponseBody(requestURI string, requestHeaders []stri
// AssertPatchResponseBody PATCH to a URI and assert the response code and body
func (tc *Tester) AssertPatchResponseBody(requestURI string, requestHeaders []string, requestBody *bytes.Buffer, expectedStatusCode int, expectedBody string) (*http.Response, string) {
-
req, err := http.NewRequest("PATCH", requestURI, requestBody)
if err != nil {
tc.t.Errorf("failed to create request %s", err)
diff --git a/caddytest/caddytest_test.go b/caddytest/caddytest_test.go
index a46867ca..a9d5da93 100644
--- a/caddytest/caddytest_test.go
+++ b/caddytest/caddytest_test.go
@@ -1,6 +1,7 @@
package caddytest
import (
+ "net/http"
"strings"
"testing"
)
@@ -31,3 +32,97 @@ func TestReplaceCertificatePaths(t *testing.T) {
t.Error("expected redirect uri to be unchanged")
}
}
+
+func TestLoadUnorderedJSON(t *testing.T) {
+ tester := NewTester(t)
+ tester.InitServer(`
+ {
+ "logging": {
+ "logs": {
+ "default": {
+ "level": "DEBUG",
+ "writer": {
+ "output": "stdout"
+ }
+ },
+ "sStdOutLogs": {
+ "level": "DEBUG",
+ "writer": {
+ "output": "stdout"
+ },
+ "include": [
+ "http.*",
+ "admin.*"
+ ]
+ },
+ "sFileLogs": {
+ "level": "DEBUG",
+ "writer": {
+ "output": "stdout"
+ },
+ "include": [
+ "http.*",
+ "admin.*"
+ ]
+ }
+ }
+ },
+ "admin": {
+ "listen": "localhost:2999"
+ },
+ "apps": {
+ "pki": {
+ "certificate_authorities" : {
+ "local" : {
+ "install_trust": false
+ }
+ }
+ },
+ "http": {
+ "http_port": 9080,
+ "https_port": 9443,
+ "servers": {
+ "s_server": {
+ "listen": [
+ ":9080"
+ ],
+ "routes": [
+ {
+ "handle": [
+ {
+ "handler": "static_response",
+ "body": "Hello"
+ }
+ ]
+ },
+ {
+ "match": [
+ {
+ "host": [
+ "localhost",
+ "127.0.0.1"
+ ]
+ }
+ ]
+ }
+ ],
+ "logs": {
+ "default_logger_name": "sStdOutLogs",
+ "logger_names": {
+ "localhost": "sStdOutLogs",
+ "127.0.0.1": "sFileLogs"
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ `, "json")
+ req, err := http.NewRequest(http.MethodGet, "http://localhost:9080/", nil)
+ if err != nil {
+ t.Fail()
+ return
+ }
+ tester.AssertResponseCode(req, 200)
+}
diff --git a/caddytest/integration/acme_test.go b/caddytest/integration/acme_test.go
new file mode 100644
index 00000000..ceacd1db
--- /dev/null
+++ b/caddytest/integration/acme_test.go
@@ -0,0 +1,206 @@
+package integration
+
+import (
+ "context"
+ "crypto/ecdsa"
+ "crypto/elliptic"
+ "crypto/rand"
+ "fmt"
+ "net"
+ "net/http"
+ "strings"
+ "testing"
+
+ "github.com/caddyserver/caddy/v2"
+ "github.com/caddyserver/caddy/v2/caddytest"
+ "github.com/mholt/acmez/v2"
+ "github.com/mholt/acmez/v2/acme"
+ smallstepacme "github.com/smallstep/certificates/acme"
+ "go.uber.org/zap"
+)
+
+const acmeChallengePort = 9081
+
+// Test the basic functionality of Caddy's ACME server
+func TestACMEServerWithDefaults(t *testing.T) {
+ ctx := context.Background()
+ logger, err := zap.NewDevelopment()
+ if err != nil {
+ t.Error(err)
+ return
+ }
+
+ tester := caddytest.NewTester(t)
+ tester.InitServer(`
+ {
+ skip_install_trust
+ admin localhost:2999
+ http_port 9080
+ https_port 9443
+ local_certs
+ }
+ acme.localhost {
+ acme_server
+ }
+ `, "caddyfile")
+
+ client := acmez.Client{
+ Client: &acme.Client{
+ Directory: "https://acme.localhost:9443/acme/local/directory",
+ HTTPClient: tester.Client,
+ Logger: logger,
+ },
+ ChallengeSolvers: map[string]acmez.Solver{
+ acme.ChallengeTypeHTTP01: &naiveHTTPSolver{logger: logger},
+ },
+ }
+
+ accountPrivateKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
+ if err != nil {
+ t.Errorf("generating account key: %v", err)
+ }
+ account := acme.Account{
+ Contact: []string{"mailto:you@example.com"},
+ TermsOfServiceAgreed: true,
+ PrivateKey: accountPrivateKey,
+ }
+ account, err = client.NewAccount(ctx, account)
+ if err != nil {
+ t.Errorf("new account: %v", err)
+ return
+ }
+
+ // Every certificate needs a key.
+ certPrivateKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
+ if err != nil {
+ t.Errorf("generating certificate key: %v", err)
+ return
+ }
+
+ certs, err := client.ObtainCertificateForSANs(ctx, account, certPrivateKey, []string{"localhost"})
+ if err != nil {
+ t.Errorf("obtaining certificate: %v", err)
+ return
+ }
+
+ // ACME servers should usually give you the entire certificate chain
+ // in PEM format, and sometimes even alternate chains! It's up to you
+ // which one(s) to store and use, but whatever you do, be sure to
+ // store the certificate and key somewhere safe and secure, i.e. don't
+ // lose them!
+ for _, cert := range certs {
+ t.Logf("Certificate %q:\n%s\n\n", cert.URL, cert.ChainPEM)
+ }
+}
+
+func TestACMEServerWithMismatchedChallenges(t *testing.T) {
+ ctx := context.Background()
+ logger := caddy.Log().Named("acmez")
+
+ tester := caddytest.NewTester(t)
+ tester.InitServer(`
+ {
+ skip_install_trust
+ admin localhost:2999
+ http_port 9080
+ https_port 9443
+ local_certs
+ }
+ acme.localhost {
+ acme_server {
+ challenges tls-alpn-01
+ }
+ }
+ `, "caddyfile")
+
+ client := acmez.Client{
+ Client: &acme.Client{
+ Directory: "https://acme.localhost:9443/acme/local/directory",
+ HTTPClient: tester.Client,
+ Logger: logger,
+ },
+ ChallengeSolvers: map[string]acmez.Solver{
+ acme.ChallengeTypeHTTP01: &naiveHTTPSolver{logger: logger},
+ },
+ }
+
+ accountPrivateKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
+ if err != nil {
+ t.Errorf("generating account key: %v", err)
+ }
+ account := acme.Account{
+ Contact: []string{"mailto:you@example.com"},
+ TermsOfServiceAgreed: true,
+ PrivateKey: accountPrivateKey,
+ }
+ account, err = client.NewAccount(ctx, account)
+ if err != nil {
+ t.Errorf("new account: %v", err)
+ return
+ }
+
+ // Every certificate needs a key.
+ certPrivateKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
+ if err != nil {
+ t.Errorf("generating certificate key: %v", err)
+ return
+ }
+
+ certs, err := client.ObtainCertificateForSANs(ctx, account, certPrivateKey, []string{"localhost"})
+ if len(certs) > 0 {
+ t.Errorf("expected '0' certificates, but received '%d'", len(certs))
+ }
+ if err == nil {
+ t.Error("expected errors, but received none")
+ }
+ const expectedErrMsg = "no solvers available for remaining challenges (configured=[http-01] offered=[tls-alpn-01] remaining=[tls-alpn-01])"
+ if !strings.Contains(err.Error(), expectedErrMsg) {
+ t.Errorf(`received error message does not match expectation: expected="%s" received="%s"`, expectedErrMsg, err.Error())
+ }
+}
+
+// naiveHTTPSolver is a no-op acmez.Solver for example purposes only.
+type naiveHTTPSolver struct {
+ srv *http.Server
+ logger *zap.Logger
+}
+
+func (s *naiveHTTPSolver) Present(ctx context.Context, challenge acme.Challenge) error {
+ smallstepacme.InsecurePortHTTP01 = acmeChallengePort
+ s.srv = &http.Server{
+ Addr: fmt.Sprintf(":%d", acmeChallengePort),
+ Handler: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ host, _, err := net.SplitHostPort(r.Host)
+ if err != nil {
+ host = r.Host
+ }
+ s.logger.Info("received request on challenge server", zap.String("path", r.URL.Path))
+ if r.Method == "GET" && r.URL.Path == challenge.HTTP01ResourcePath() && strings.EqualFold(host, challenge.Identifier.Value) {
+ w.Header().Add("Content-Type", "text/plain")
+ w.Write([]byte(challenge.KeyAuthorization))
+ r.Close = true
+ s.logger.Info("served key authentication",
+ zap.String("identifier", challenge.Identifier.Value),
+ zap.String("challenge", "http-01"),
+ zap.String("remote", r.RemoteAddr),
+ )
+ }
+ }),
+ }
+ l, err := net.Listen("tcp", fmt.Sprintf(":%d", acmeChallengePort))
+ if err != nil {
+ return err
+ }
+ s.logger.Info("present challenge", zap.Any("challenge", challenge))
+ go s.srv.Serve(l)
+ return nil
+}
+
+func (s naiveHTTPSolver) CleanUp(ctx context.Context, challenge acme.Challenge) error {
+ smallstepacme.InsecurePortHTTP01 = 0
+ s.logger.Info("cleanup", zap.Any("challenge", challenge))
+ if s.srv != nil {
+ s.srv.Close()
+ }
+ return nil
+}
diff --git a/caddytest/integration/acmeserver_test.go b/caddytest/integration/acmeserver_test.go
new file mode 100644
index 00000000..22b716f8
--- /dev/null
+++ b/caddytest/integration/acmeserver_test.go
@@ -0,0 +1,204 @@
+package integration
+
+import (
+ "context"
+ "crypto/ecdsa"
+ "crypto/elliptic"
+ "crypto/rand"
+ "strings"
+ "testing"
+
+ "github.com/caddyserver/caddy/v2/caddytest"
+ "github.com/mholt/acmez/v2"
+ "github.com/mholt/acmez/v2/acme"
+ "go.uber.org/zap"
+)
+
+func TestACMEServerDirectory(t *testing.T) {
+ tester := caddytest.NewTester(t)
+ tester.InitServer(`
+ {
+ skip_install_trust
+ local_certs
+ admin localhost:2999
+ http_port 9080
+ https_port 9443
+ pki {
+ ca local {
+ name "Caddy Local Authority"
+ }
+ }
+ }
+ acme.localhost:9443 {
+ acme_server
+ }
+ `, "caddyfile")
+ tester.AssertGetResponse(
+ "https://acme.localhost:9443/acme/local/directory",
+ 200,
+ `{"newNonce":"https://acme.localhost:9443/acme/local/new-nonce","newAccount":"https://acme.localhost:9443/acme/local/new-account","newOrder":"https://acme.localhost:9443/acme/local/new-order","revokeCert":"https://acme.localhost:9443/acme/local/revoke-cert","keyChange":"https://acme.localhost:9443/acme/local/key-change"}
+`)
+}
+
+func TestACMEServerAllowPolicy(t *testing.T) {
+ tester := caddytest.NewTester(t)
+ tester.InitServer(`
+ {
+ skip_install_trust
+ local_certs
+ admin localhost:2999
+ http_port 9080
+ https_port 9443
+ pki {
+ ca local {
+ name "Caddy Local Authority"
+ }
+ }
+ }
+ acme.localhost {
+ acme_server {
+ challenges http-01
+ allow {
+ domains localhost
+ }
+ }
+ }
+ `, "caddyfile")
+
+ ctx := context.Background()
+ logger, err := zap.NewDevelopment()
+ if err != nil {
+ t.Error(err)
+ return
+ }
+
+ client := acmez.Client{
+ Client: &acme.Client{
+ Directory: "https://acme.localhost:9443/acme/local/directory",
+ HTTPClient: tester.Client,
+ Logger: logger,
+ },
+ ChallengeSolvers: map[string]acmez.Solver{
+ acme.ChallengeTypeHTTP01: &naiveHTTPSolver{logger: logger},
+ },
+ }
+
+ accountPrivateKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
+ if err != nil {
+ t.Errorf("generating account key: %v", err)
+ }
+ account := acme.Account{
+ Contact: []string{"mailto:you@example.com"},
+ TermsOfServiceAgreed: true,
+ PrivateKey: accountPrivateKey,
+ }
+ account, err = client.NewAccount(ctx, account)
+ if err != nil {
+ t.Errorf("new account: %v", err)
+ return
+ }
+
+ // Every certificate needs a key.
+ certPrivateKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
+ if err != nil {
+ t.Errorf("generating certificate key: %v", err)
+ return
+ }
+ {
+ certs, err := client.ObtainCertificateForSANs(ctx, account, certPrivateKey, []string{"localhost"})
+ if err != nil {
+ t.Errorf("obtaining certificate for allowed domain: %v", err)
+ return
+ }
+
+ // ACME servers should usually give you the entire certificate chain
+ // in PEM format, and sometimes even alternate chains! It's up to you
+ // which one(s) to store and use, but whatever you do, be sure to
+ // store the certificate and key somewhere safe and secure, i.e. don't
+ // lose them!
+ for _, cert := range certs {
+ t.Logf("Certificate %q:\n%s\n\n", cert.URL, cert.ChainPEM)
+ }
+ }
+ {
+ _, err := client.ObtainCertificateForSANs(ctx, account, certPrivateKey, []string{"not-matching.localhost"})
+ if err == nil {
+ t.Errorf("obtaining certificate for 'not-matching.localhost' domain")
+ } else if err != nil && !strings.Contains(err.Error(), "urn:ietf:params:acme:error:rejectedIdentifier") {
+ t.Logf("unexpected error: %v", err)
+ }
+ }
+}
+
+func TestACMEServerDenyPolicy(t *testing.T) {
+ tester := caddytest.NewTester(t)
+ tester.InitServer(`
+ {
+ skip_install_trust
+ local_certs
+ admin localhost:2999
+ http_port 9080
+ https_port 9443
+ pki {
+ ca local {
+ name "Caddy Local Authority"
+ }
+ }
+ }
+ acme.localhost {
+ acme_server {
+ deny {
+ domains deny.localhost
+ }
+ }
+ }
+ `, "caddyfile")
+
+ ctx := context.Background()
+ logger, err := zap.NewDevelopment()
+ if err != nil {
+ t.Error(err)
+ return
+ }
+
+ client := acmez.Client{
+ Client: &acme.Client{
+ Directory: "https://acme.localhost:9443/acme/local/directory",
+ HTTPClient: tester.Client,
+ Logger: logger,
+ },
+ ChallengeSolvers: map[string]acmez.Solver{
+ acme.ChallengeTypeHTTP01: &naiveHTTPSolver{logger: logger},
+ },
+ }
+
+ accountPrivateKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
+ if err != nil {
+ t.Errorf("generating account key: %v", err)
+ }
+ account := acme.Account{
+ Contact: []string{"mailto:you@example.com"},
+ TermsOfServiceAgreed: true,
+ PrivateKey: accountPrivateKey,
+ }
+ account, err = client.NewAccount(ctx, account)
+ if err != nil {
+ t.Errorf("new account: %v", err)
+ return
+ }
+
+ // Every certificate needs a key.
+ certPrivateKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
+ if err != nil {
+ t.Errorf("generating certificate key: %v", err)
+ return
+ }
+ {
+ _, err := client.ObtainCertificateForSANs(ctx, account, certPrivateKey, []string{"deny.localhost"})
+ if err == nil {
+ t.Errorf("obtaining certificate for 'deny.localhost' domain")
+ } else if err != nil && !strings.Contains(err.Error(), "urn:ietf:params:acme:error:rejectedIdentifier") {
+ t.Logf("unexpected error: %v", err)
+ }
+ }
+}
diff --git a/caddytest/integration/autohttps_test.go b/caddytest/integration/autohttps_test.go
new file mode 100644
index 00000000..1dbdbcee
--- /dev/null
+++ b/caddytest/integration/autohttps_test.go
@@ -0,0 +1,145 @@
+package integration
+
+import (
+ "net/http"
+ "testing"
+
+ "github.com/caddyserver/caddy/v2/caddytest"
+)
+
+func TestAutoHTTPtoHTTPSRedirectsImplicitPort(t *testing.T) {
+ tester := caddytest.NewTester(t)
+ tester.InitServer(`
+ {
+ admin localhost:2999
+ skip_install_trust
+ http_port 9080
+ https_port 9443
+ }
+ localhost
+ respond "Yahaha! You found me!"
+ `, "caddyfile")
+
+ tester.AssertRedirect("http://localhost:9080/", "https://localhost/", http.StatusPermanentRedirect)
+}
+
+func TestAutoHTTPtoHTTPSRedirectsExplicitPortSameAsHTTPSPort(t *testing.T) {
+ tester := caddytest.NewTester(t)
+ tester.InitServer(`
+ {
+ skip_install_trust
+ admin localhost:2999
+ http_port 9080
+ https_port 9443
+ }
+ localhost:9443
+ respond "Yahaha! You found me!"
+ `, "caddyfile")
+
+ tester.AssertRedirect("http://localhost:9080/", "https://localhost/", http.StatusPermanentRedirect)
+}
+
+func TestAutoHTTPtoHTTPSRedirectsExplicitPortDifferentFromHTTPSPort(t *testing.T) {
+ tester := caddytest.NewTester(t)
+ tester.InitServer(`
+ {
+ skip_install_trust
+ admin localhost:2999
+ http_port 9080
+ https_port 9443
+ }
+ localhost:1234
+ respond "Yahaha! You found me!"
+ `, "caddyfile")
+
+ tester.AssertRedirect("http://localhost:9080/", "https://localhost:1234/", http.StatusPermanentRedirect)
+}
+
+func TestAutoHTTPRedirectsWithHTTPListenerFirstInAddresses(t *testing.T) {
+ tester := caddytest.NewTester(t)
+ tester.InitServer(`
+{
+ "admin": {
+ "listen": "localhost:2999"
+ },
+ "apps": {
+ "http": {
+ "http_port": 9080,
+ "https_port": 9443,
+ "servers": {
+ "ingress_server": {
+ "listen": [
+ ":9080",
+ ":9443"
+ ],
+ "routes": [
+ {
+ "match": [
+ {
+ "host": ["localhost"]
+ }
+ ]
+ }
+ ]
+ }
+ }
+ },
+ "pki": {
+ "certificate_authorities": {
+ "local": {
+ "install_trust": false
+ }
+ }
+ }
+ }
+}
+`, "json")
+ tester.AssertRedirect("http://localhost:9080/", "https://localhost/", http.StatusPermanentRedirect)
+}
+
+func TestAutoHTTPRedirectsInsertedBeforeUserDefinedCatchAll(t *testing.T) {
+ tester := caddytest.NewTester(t)
+ tester.InitServer(`
+ {
+ skip_install_trust
+ admin localhost:2999
+ http_port 9080
+ https_port 9443
+ local_certs
+ }
+ http://:9080 {
+ respond "Foo"
+ }
+ http://baz.localhost:9080 {
+ respond "Baz"
+ }
+ bar.localhost {
+ respond "Bar"
+ }
+ `, "caddyfile")
+ tester.AssertRedirect("http://bar.localhost:9080/", "https://bar.localhost/", http.StatusPermanentRedirect)
+ tester.AssertGetResponse("http://foo.localhost:9080/", 200, "Foo")
+ tester.AssertGetResponse("http://baz.localhost:9080/", 200, "Baz")
+}
+
+func TestAutoHTTPRedirectsInsertedBeforeUserDefinedCatchAllWithNoExplicitHTTPSite(t *testing.T) {
+ tester := caddytest.NewTester(t)
+ tester.InitServer(`
+ {
+ skip_install_trust
+ admin localhost:2999
+ http_port 9080
+ https_port 9443
+ local_certs
+ }
+ http://:9080 {
+ respond "Foo"
+ }
+ bar.localhost {
+ respond "Bar"
+ }
+ `, "caddyfile")
+ tester.AssertRedirect("http://bar.localhost:9080/", "https://bar.localhost/", http.StatusPermanentRedirect)
+ tester.AssertGetResponse("http://foo.localhost:9080/", 200, "Foo")
+ tester.AssertGetResponse("http://baz.localhost:9080/", 200, "Foo")
+}
diff --git a/caddytest/integration/caddyfile_adapt/acme_server_custom_challenges.caddyfiletest b/caddytest/integration/caddyfile_adapt/acme_server_custom_challenges.caddyfiletest
new file mode 100644
index 00000000..2a7a5149
--- /dev/null
+++ b/caddytest/integration/caddyfile_adapt/acme_server_custom_challenges.caddyfiletest
@@ -0,0 +1,65 @@
+{
+ pki {
+ ca custom-ca {
+ name "Custom CA"
+ }
+ }
+}
+
+acme.example.com {
+ acme_server {
+ ca custom-ca
+ challenges dns-01
+ }
+}
+----------
+{
+ "apps": {
+ "http": {
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":443"
+ ],
+ "routes": [
+ {
+ "match": [
+ {
+ "host": [
+ "acme.example.com"
+ ]
+ }
+ ],
+ "handle": [
+ {
+ "handler": "subroute",
+ "routes": [
+ {
+ "handle": [
+ {
+ "ca": "custom-ca",
+ "challenges": [
+ "dns-01"
+ ],
+ "handler": "acme_server"
+ }
+ ]
+ }
+ ]
+ }
+ ],
+ "terminal": true
+ }
+ ]
+ }
+ }
+ },
+ "pki": {
+ "certificate_authorities": {
+ "custom-ca": {
+ "name": "Custom CA"
+ }
+ }
+ }
+ }
+}
diff --git a/caddytest/integration/caddyfile_adapt/acme_server_default_challenges.caddyfiletest b/caddytest/integration/caddyfile_adapt/acme_server_default_challenges.caddyfiletest
new file mode 100644
index 00000000..26d34504
--- /dev/null
+++ b/caddytest/integration/caddyfile_adapt/acme_server_default_challenges.caddyfiletest
@@ -0,0 +1,62 @@
+{
+ pki {
+ ca custom-ca {
+ name "Custom CA"
+ }
+ }
+}
+
+acme.example.com {
+ acme_server {
+ ca custom-ca
+ challenges
+ }
+}
+----------
+{
+ "apps": {
+ "http": {
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":443"
+ ],
+ "routes": [
+ {
+ "match": [
+ {
+ "host": [
+ "acme.example.com"
+ ]
+ }
+ ],
+ "handle": [
+ {
+ "handler": "subroute",
+ "routes": [
+ {
+ "handle": [
+ {
+ "ca": "custom-ca",
+ "handler": "acme_server"
+ }
+ ]
+ }
+ ]
+ }
+ ],
+ "terminal": true
+ }
+ ]
+ }
+ }
+ },
+ "pki": {
+ "certificate_authorities": {
+ "custom-ca": {
+ "name": "Custom CA"
+ }
+ }
+ }
+ }
+}
diff --git a/caddytest/integration/caddyfile_adapt/acme_server_lifetime.caddyfiletest b/caddytest/integration/caddyfile_adapt/acme_server_lifetime.caddyfiletest
new file mode 100644
index 00000000..6099440a
--- /dev/null
+++ b/caddytest/integration/caddyfile_adapt/acme_server_lifetime.caddyfiletest
@@ -0,0 +1,108 @@
+{
+ pki {
+ ca internal {
+ name "Internal"
+ root_cn "Internal Root Cert"
+ intermediate_cn "Internal Intermediate Cert"
+ }
+ ca internal-long-lived {
+ name "Long-lived"
+ root_cn "Internal Root Cert 2"
+ intermediate_cn "Internal Intermediate Cert 2"
+ }
+ }
+}
+
+acme-internal.example.com {
+ acme_server {
+ ca internal
+ }
+}
+
+acme-long-lived.example.com {
+ acme_server {
+ ca internal-long-lived
+ lifetime 7d
+ }
+}
+----------
+{
+ "apps": {
+ "http": {
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":443"
+ ],
+ "routes": [
+ {
+ "match": [
+ {
+ "host": [
+ "acme-long-lived.example.com"
+ ]
+ }
+ ],
+ "handle": [
+ {
+ "handler": "subroute",
+ "routes": [
+ {
+ "handle": [
+ {
+ "ca": "internal-long-lived",
+ "handler": "acme_server",
+ "lifetime": 604800000000000
+ }
+ ]
+ }
+ ]
+ }
+ ],
+ "terminal": true
+ },
+ {
+ "match": [
+ {
+ "host": [
+ "acme-internal.example.com"
+ ]
+ }
+ ],
+ "handle": [
+ {
+ "handler": "subroute",
+ "routes": [
+ {
+ "handle": [
+ {
+ "ca": "internal",
+ "handler": "acme_server"
+ }
+ ]
+ }
+ ]
+ }
+ ],
+ "terminal": true
+ }
+ ]
+ }
+ }
+ },
+ "pki": {
+ "certificate_authorities": {
+ "internal": {
+ "name": "Internal",
+ "root_common_name": "Internal Root Cert",
+ "intermediate_common_name": "Internal Intermediate Cert"
+ },
+ "internal-long-lived": {
+ "name": "Long-lived",
+ "root_common_name": "Internal Root Cert 2",
+ "intermediate_common_name": "Internal Intermediate Cert 2"
+ }
+ }
+ }
+ }
+}
diff --git a/caddytest/integration/caddyfile_adapt/acme_server_multi_custom_challenges.caddyfiletest b/caddytest/integration/caddyfile_adapt/acme_server_multi_custom_challenges.caddyfiletest
new file mode 100644
index 00000000..7fe3ca66
--- /dev/null
+++ b/caddytest/integration/caddyfile_adapt/acme_server_multi_custom_challenges.caddyfiletest
@@ -0,0 +1,66 @@
+{
+ pki {
+ ca custom-ca {
+ name "Custom CA"
+ }
+ }
+}
+
+acme.example.com {
+ acme_server {
+ ca custom-ca
+ challenges dns-01 http-01
+ }
+}
+----------
+{
+ "apps": {
+ "http": {
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":443"
+ ],
+ "routes": [
+ {
+ "match": [
+ {
+ "host": [
+ "acme.example.com"
+ ]
+ }
+ ],
+ "handle": [
+ {
+ "handler": "subroute",
+ "routes": [
+ {
+ "handle": [
+ {
+ "ca": "custom-ca",
+ "challenges": [
+ "dns-01",
+ "http-01"
+ ],
+ "handler": "acme_server"
+ }
+ ]
+ }
+ ]
+ }
+ ],
+ "terminal": true
+ }
+ ]
+ }
+ }
+ },
+ "pki": {
+ "certificate_authorities": {
+ "custom-ca": {
+ "name": "Custom CA"
+ }
+ }
+ }
+ }
+}
diff --git a/caddytest/integration/caddyfile_adapt/acme_server_sign_with_root.caddyfiletest b/caddytest/integration/caddyfile_adapt/acme_server_sign_with_root.caddyfiletest
new file mode 100644
index 00000000..5b504010
--- /dev/null
+++ b/caddytest/integration/caddyfile_adapt/acme_server_sign_with_root.caddyfiletest
@@ -0,0 +1,67 @@
+{
+ pki {
+ ca internal {
+ name "Internal"
+ root_cn "Internal Root Cert"
+ intermediate_cn "Internal Intermediate Cert"
+ }
+ }
+}
+acme.example.com {
+ acme_server {
+ ca internal
+ sign_with_root
+ }
+}
+
+----------
+{
+ "apps": {
+ "http": {
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":443"
+ ],
+ "routes": [
+ {
+ "match": [
+ {
+ "host": [
+ "acme.example.com"
+ ]
+ }
+ ],
+ "handle": [
+ {
+ "handler": "subroute",
+ "routes": [
+ {
+ "handle": [
+ {
+ "ca": "internal",
+ "handler": "acme_server",
+ "sign_with_root": true
+ }
+ ]
+ }
+ ]
+ }
+ ],
+ "terminal": true
+ }
+ ]
+ }
+ }
+ },
+ "pki": {
+ "certificate_authorities": {
+ "internal": {
+ "name": "Internal",
+ "root_common_name": "Internal Root Cert",
+ "intermediate_common_name": "Internal Intermediate Cert"
+ }
+ }
+ }
+ }
+}
diff --git a/caddytest/integration/caddyfile_adapt/auto_https_disable_redirects.caddyfiletest b/caddytest/integration/caddyfile_adapt/auto_https_disable_redirects.caddyfiletest
new file mode 100644
index 00000000..61637bba
--- /dev/null
+++ b/caddytest/integration/caddyfile_adapt/auto_https_disable_redirects.caddyfiletest
@@ -0,0 +1,34 @@
+{
+ auto_https disable_redirects
+}
+
+localhost
+----------
+{
+ "apps": {
+ "http": {
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":443"
+ ],
+ "routes": [
+ {
+ "match": [
+ {
+ "host": [
+ "localhost"
+ ]
+ }
+ ],
+ "terminal": true
+ }
+ ],
+ "automatic_https": {
+ "disable_redirects": true
+ }
+ }
+ }
+ }
+ }
+}
\ No newline at end of file
diff --git a/caddytest/integration/caddyfile_adapt/auto_https_ignore_loaded_certs.caddyfiletest b/caddytest/integration/caddyfile_adapt/auto_https_ignore_loaded_certs.caddyfiletest
new file mode 100644
index 00000000..1c654385
--- /dev/null
+++ b/caddytest/integration/caddyfile_adapt/auto_https_ignore_loaded_certs.caddyfiletest
@@ -0,0 +1,34 @@
+{
+ auto_https ignore_loaded_certs
+}
+
+localhost
+----------
+{
+ "apps": {
+ "http": {
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":443"
+ ],
+ "routes": [
+ {
+ "match": [
+ {
+ "host": [
+ "localhost"
+ ]
+ }
+ ],
+ "terminal": true
+ }
+ ],
+ "automatic_https": {
+ "ignore_loaded_certificates": true
+ }
+ }
+ }
+ }
+ }
+}
\ No newline at end of file
diff --git a/caddytest/integration/caddyfile_adapt/auto_https_off.caddyfiletest b/caddytest/integration/caddyfile_adapt/auto_https_off.caddyfiletest
new file mode 100644
index 00000000..d4014d2a
--- /dev/null
+++ b/caddytest/integration/caddyfile_adapt/auto_https_off.caddyfiletest
@@ -0,0 +1,37 @@
+{
+ auto_https off
+}
+
+localhost
+----------
+{
+ "apps": {
+ "http": {
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":443"
+ ],
+ "routes": [
+ {
+ "match": [
+ {
+ "host": [
+ "localhost"
+ ]
+ }
+ ],
+ "terminal": true
+ }
+ ],
+ "tls_connection_policies": [
+ {}
+ ],
+ "automatic_https": {
+ "disable": true
+ }
+ }
+ }
+ }
+ }
+}
\ No newline at end of file
diff --git a/caddytest/integration/caddyfile_adapt/auto_https_prefer_wildcard.caddyfiletest b/caddytest/integration/caddyfile_adapt/auto_https_prefer_wildcard.caddyfiletest
new file mode 100644
index 00000000..04f2c466
--- /dev/null
+++ b/caddytest/integration/caddyfile_adapt/auto_https_prefer_wildcard.caddyfiletest
@@ -0,0 +1,109 @@
+{
+ auto_https prefer_wildcard
+}
+
+*.example.com {
+ tls {
+ dns mock
+ }
+ respond "fallback"
+}
+
+foo.example.com {
+ respond "foo"
+}
+----------
+{
+ "apps": {
+ "http": {
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":443"
+ ],
+ "routes": [
+ {
+ "match": [
+ {
+ "host": [
+ "foo.example.com"
+ ]
+ }
+ ],
+ "handle": [
+ {
+ "handler": "subroute",
+ "routes": [
+ {
+ "handle": [
+ {
+ "body": "foo",
+ "handler": "static_response"
+ }
+ ]
+ }
+ ]
+ }
+ ],
+ "terminal": true
+ },
+ {
+ "match": [
+ {
+ "host": [
+ "*.example.com"
+ ]
+ }
+ ],
+ "handle": [
+ {
+ "handler": "subroute",
+ "routes": [
+ {
+ "handle": [
+ {
+ "body": "fallback",
+ "handler": "static_response"
+ }
+ ]
+ }
+ ]
+ }
+ ],
+ "terminal": true
+ }
+ ],
+ "automatic_https": {
+ "skip_certificates": [
+ "foo.example.com"
+ ],
+ "prefer_wildcard": true
+ }
+ }
+ }
+ },
+ "tls": {
+ "automation": {
+ "policies": [
+ {
+ "subjects": [
+ "*.example.com"
+ ],
+ "issuers": [
+ {
+ "challenges": {
+ "dns": {
+ "provider": {
+ "name": "mock"
+ }
+ }
+ },
+ "module": "acme"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+}
\ No newline at end of file
diff --git a/caddytest/integration/caddyfile_adapt/auto_https_prefer_wildcard_multi.caddyfiletest b/caddytest/integration/caddyfile_adapt/auto_https_prefer_wildcard_multi.caddyfiletest
new file mode 100644
index 00000000..4f8c26a5
--- /dev/null
+++ b/caddytest/integration/caddyfile_adapt/auto_https_prefer_wildcard_multi.caddyfiletest
@@ -0,0 +1,268 @@
+{
+ auto_https prefer_wildcard
+}
+
+# Covers two domains
+*.one.example.com {
+ tls {
+ dns mock
+ }
+ respond "one fallback"
+}
+
+# Is covered, should not get its own AP
+foo.one.example.com {
+ respond "foo one"
+}
+
+# This one has its own tls config so it doesn't get covered (escape hatch)
+bar.one.example.com {
+ respond "bar one"
+ tls bar@bar.com
+}
+
+# Covers nothing but AP gets consolidated with the first
+*.two.example.com {
+ tls {
+ dns mock
+ }
+ respond "two fallback"
+}
+
+# Is HTTP so it should not cover
+http://*.three.example.com {
+ respond "three fallback"
+}
+
+# Has no wildcard coverage so it gets an AP
+foo.three.example.com {
+ respond "foo three"
+}
+----------
+{
+ "apps": {
+ "http": {
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":443"
+ ],
+ "routes": [
+ {
+ "match": [
+ {
+ "host": [
+ "foo.three.example.com"
+ ]
+ }
+ ],
+ "handle": [
+ {
+ "handler": "subroute",
+ "routes": [
+ {
+ "handle": [
+ {
+ "body": "foo three",
+ "handler": "static_response"
+ }
+ ]
+ }
+ ]
+ }
+ ],
+ "terminal": true
+ },
+ {
+ "match": [
+ {
+ "host": [
+ "foo.one.example.com"
+ ]
+ }
+ ],
+ "handle": [
+ {
+ "handler": "subroute",
+ "routes": [
+ {
+ "handle": [
+ {
+ "body": "foo one",
+ "handler": "static_response"
+ }
+ ]
+ }
+ ]
+ }
+ ],
+ "terminal": true
+ },
+ {
+ "match": [
+ {
+ "host": [
+ "bar.one.example.com"
+ ]
+ }
+ ],
+ "handle": [
+ {
+ "handler": "subroute",
+ "routes": [
+ {
+ "handle": [
+ {
+ "body": "bar one",
+ "handler": "static_response"
+ }
+ ]
+ }
+ ]
+ }
+ ],
+ "terminal": true
+ },
+ {
+ "match": [
+ {
+ "host": [
+ "*.one.example.com"
+ ]
+ }
+ ],
+ "handle": [
+ {
+ "handler": "subroute",
+ "routes": [
+ {
+ "handle": [
+ {
+ "body": "one fallback",
+ "handler": "static_response"
+ }
+ ]
+ }
+ ]
+ }
+ ],
+ "terminal": true
+ },
+ {
+ "match": [
+ {
+ "host": [
+ "*.two.example.com"
+ ]
+ }
+ ],
+ "handle": [
+ {
+ "handler": "subroute",
+ "routes": [
+ {
+ "handle": [
+ {
+ "body": "two fallback",
+ "handler": "static_response"
+ }
+ ]
+ }
+ ]
+ }
+ ],
+ "terminal": true
+ }
+ ],
+ "automatic_https": {
+ "skip_certificates": [
+ "foo.one.example.com",
+ "bar.one.example.com"
+ ],
+ "prefer_wildcard": true
+ }
+ },
+ "srv1": {
+ "listen": [
+ ":80"
+ ],
+ "routes": [
+ {
+ "match": [
+ {
+ "host": [
+ "*.three.example.com"
+ ]
+ }
+ ],
+ "handle": [
+ {
+ "handler": "subroute",
+ "routes": [
+ {
+ "handle": [
+ {
+ "body": "three fallback",
+ "handler": "static_response"
+ }
+ ]
+ }
+ ]
+ }
+ ],
+ "terminal": true
+ }
+ ],
+ "automatic_https": {
+ "prefer_wildcard": true
+ }
+ }
+ }
+ },
+ "tls": {
+ "automation": {
+ "policies": [
+ {
+ "subjects": [
+ "foo.three.example.com"
+ ]
+ },
+ {
+ "subjects": [
+ "bar.one.example.com"
+ ],
+ "issuers": [
+ {
+ "email": "bar@bar.com",
+ "module": "acme"
+ },
+ {
+ "ca": "https://acme.zerossl.com/v2/DV90",
+ "email": "bar@bar.com",
+ "module": "acme"
+ }
+ ]
+ },
+ {
+ "subjects": [
+ "*.one.example.com",
+ "*.two.example.com"
+ ],
+ "issuers": [
+ {
+ "challenges": {
+ "dns": {
+ "provider": {
+ "name": "mock"
+ }
+ }
+ },
+ "module": "acme"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+}
\ No newline at end of file
diff --git a/caddytest/integration/caddyfile_adapt/bind_fd_fdgram_h123.caddyfiletest b/caddytest/integration/caddyfile_adapt/bind_fd_fdgram_h123.caddyfiletest
new file mode 100644
index 00000000..08f30d18
--- /dev/null
+++ b/caddytest/integration/caddyfile_adapt/bind_fd_fdgram_h123.caddyfiletest
@@ -0,0 +1,142 @@
+{
+ auto_https disable_redirects
+ admin off
+}
+
+http://localhost {
+ bind fd/{env.CADDY_HTTP_FD} {
+ protocols h1
+ }
+ log
+ respond "Hello, HTTP!"
+}
+
+https://localhost {
+ bind fd/{env.CADDY_HTTPS_FD} {
+ protocols h1 h2
+ }
+ bind fdgram/{env.CADDY_HTTP3_FD} {
+ protocols h3
+ }
+ log
+ respond "Hello, HTTPS!"
+}
+----------
+{
+ "admin": {
+ "disabled": true
+ },
+ "apps": {
+ "http": {
+ "servers": {
+ "srv0": {
+ "listen": [
+ "fd/{env.CADDY_HTTPS_FD}",
+ "fdgram/{env.CADDY_HTTP3_FD}"
+ ],
+ "routes": [
+ {
+ "match": [
+ {
+ "host": [
+ "localhost"
+ ]
+ }
+ ],
+ "handle": [
+ {
+ "handler": "subroute",
+ "routes": [
+ {
+ "handle": [
+ {
+ "body": "Hello, HTTPS!",
+ "handler": "static_response"
+ }
+ ]
+ }
+ ]
+ }
+ ],
+ "terminal": true
+ }
+ ],
+ "automatic_https": {
+ "disable_redirects": true
+ },
+ "logs": {
+ "logger_names": {
+ "localhost": [
+ ""
+ ]
+ }
+ },
+ "listen_protocols": [
+ [
+ "h1",
+ "h2"
+ ],
+ [
+ "h3"
+ ]
+ ]
+ },
+ "srv1": {
+ "automatic_https": {
+ "disable_redirects": true
+ }
+ },
+ "srv2": {
+ "listen": [
+ "fd/{env.CADDY_HTTP_FD}"
+ ],
+ "routes": [
+ {
+ "match": [
+ {
+ "host": [
+ "localhost"
+ ]
+ }
+ ],
+ "handle": [
+ {
+ "handler": "subroute",
+ "routes": [
+ {
+ "handle": [
+ {
+ "body": "Hello, HTTP!",
+ "handler": "static_response"
+ }
+ ]
+ }
+ ]
+ }
+ ],
+ "terminal": true
+ }
+ ],
+ "automatic_https": {
+ "disable_redirects": true,
+ "skip": [
+ "localhost"
+ ]
+ },
+ "logs": {
+ "logger_names": {
+ "localhost": [
+ ""
+ ]
+ }
+ },
+ "listen_protocols": [
+ [
+ "h1"
+ ]
+ ]
+ }
+ }
+ }
+ }
+}
diff --git a/caddytest/integration/caddyfile_adapt/bind_ipv6.caddyfiletest b/caddytest/integration/caddyfile_adapt/bind_ipv6.caddyfiletest
new file mode 100644
index 00000000..d9d9bec6
--- /dev/null
+++ b/caddytest/integration/caddyfile_adapt/bind_ipv6.caddyfiletest
@@ -0,0 +1,29 @@
+example.com {
+ bind tcp6/[::]
+}
+----------
+{
+ "apps": {
+ "http": {
+ "servers": {
+ "srv0": {
+ "listen": [
+ "tcp6/[::]:443"
+ ],
+ "routes": [
+ {
+ "match": [
+ {
+ "host": [
+ "example.com"
+ ]
+ }
+ ],
+ "terminal": true
+ }
+ ]
+ }
+ }
+ }
+ }
+}
\ No newline at end of file
diff --git a/caddytest/integration/caddyfile_adapt/enable_tls_for_catch_all_site.caddyfiletest b/caddytest/integration/caddyfile_adapt/enable_tls_for_catch_all_site.caddyfiletest
new file mode 100644
index 00000000..b37b40c0
--- /dev/null
+++ b/caddytest/integration/caddyfile_adapt/enable_tls_for_catch_all_site.caddyfiletest
@@ -0,0 +1,37 @@
+:8443 {
+ tls internal {
+ on_demand
+ }
+}
+----------
+{
+ "apps": {
+ "http": {
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":8443"
+ ],
+ "tls_connection_policies": [
+ {}
+ ]
+ }
+ }
+ },
+ "tls": {
+ "automation": {
+ "policies": [
+ {
+ "issuers": [
+ {
+ "module": "internal"
+ }
+ ],
+ "on_demand": true
+ }
+ ]
+ }
+ }
+ }
+}
+
diff --git a/caddytest/integration/caddyfile_adapt/encode_options.caddyfiletest b/caddytest/integration/caddyfile_adapt/encode_options.caddyfiletest
new file mode 100644
index 00000000..ea9038ef
--- /dev/null
+++ b/caddytest/integration/caddyfile_adapt/encode_options.caddyfiletest
@@ -0,0 +1,100 @@
+:80
+
+# All the options
+encode gzip zstd {
+ minimum_length 256
+ match {
+ status 2xx 4xx 500
+ header Content-Type text/*
+ header Content-Type application/json*
+ header Content-Type application/javascript*
+ header Content-Type application/xhtml+xml*
+ header Content-Type application/atom+xml*
+ header Content-Type application/rss+xml*
+ header Content-Type application/wasm*
+ header Content-Type image/svg+xml*
+ }
+}
+
+# Long way with a block for each encoding
+encode {
+ zstd
+ gzip 5
+}
+
+encode
+----------
+{
+ "apps": {
+ "http": {
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":80"
+ ],
+ "routes": [
+ {
+ "handle": [
+ {
+ "encodings": {
+ "gzip": {},
+ "zstd": {}
+ },
+ "handler": "encode",
+ "match": {
+ "headers": {
+ "Content-Type": [
+ "text/*",
+ "application/json*",
+ "application/javascript*",
+ "application/xhtml+xml*",
+ "application/atom+xml*",
+ "application/rss+xml*",
+ "application/wasm*",
+ "image/svg+xml*"
+ ]
+ },
+ "status_code": [
+ 2,
+ 4,
+ 500
+ ]
+ },
+ "minimum_length": 256,
+ "prefer": [
+ "gzip",
+ "zstd"
+ ]
+ },
+ {
+ "encodings": {
+ "gzip": {
+ "level": 5
+ },
+ "zstd": {}
+ },
+ "handler": "encode",
+ "prefer": [
+ "zstd",
+ "gzip"
+ ]
+ },
+ {
+ "encodings": {
+ "gzip": {},
+ "zstd": {}
+ },
+ "handler": "encode",
+ "prefer": [
+ "zstd",
+ "gzip"
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+}
diff --git a/caddytest/integration/caddyfile_adapt/error_example.caddyfiletest b/caddytest/integration/caddyfile_adapt/error_example.caddyfiletest
new file mode 100644
index 00000000..bd42aee5
--- /dev/null
+++ b/caddytest/integration/caddyfile_adapt/error_example.caddyfiletest
@@ -0,0 +1,138 @@
+example.com {
+ root * /srv
+
+ # Trigger errors for certain paths
+ error /private* "Unauthorized" 403
+ error /hidden* "Not found" 404
+
+ # Handle the error by serving an HTML page
+ handle_errors {
+ rewrite * /{http.error.status_code}.html
+ file_server
+ }
+
+ file_server
+}
+----------
+{
+ "apps": {
+ "http": {
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":443"
+ ],
+ "routes": [
+ {
+ "match": [
+ {
+ "host": [
+ "example.com"
+ ]
+ }
+ ],
+ "handle": [
+ {
+ "handler": "subroute",
+ "routes": [
+ {
+ "handle": [
+ {
+ "handler": "vars",
+ "root": "/srv"
+ }
+ ]
+ },
+ {
+ "handle": [
+ {
+ "error": "Unauthorized",
+ "handler": "error",
+ "status_code": 403
+ }
+ ],
+ "match": [
+ {
+ "path": [
+ "/private*"
+ ]
+ }
+ ]
+ },
+ {
+ "handle": [
+ {
+ "error": "Not found",
+ "handler": "error",
+ "status_code": 404
+ }
+ ],
+ "match": [
+ {
+ "path": [
+ "/hidden*"
+ ]
+ }
+ ]
+ },
+ {
+ "handle": [
+ {
+ "handler": "file_server",
+ "hide": [
+ "./Caddyfile"
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ],
+ "terminal": true
+ }
+ ],
+ "errors": {
+ "routes": [
+ {
+ "match": [
+ {
+ "host": [
+ "example.com"
+ ]
+ }
+ ],
+ "handle": [
+ {
+ "handler": "subroute",
+ "routes": [
+ {
+ "group": "group0",
+ "handle": [
+ {
+ "handler": "rewrite",
+ "uri": "/{http.error.status_code}.html"
+ }
+ ]
+ },
+ {
+ "handle": [
+ {
+ "handler": "file_server",
+ "hide": [
+ "./Caddyfile"
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ],
+ "terminal": true
+ }
+ ]
+ }
+ }
+ }
+ }
+ }
+}
\ No newline at end of file
diff --git a/caddytest/integration/caddyfile_adapt/error_multi_site_blocks.caddyfiletest b/caddytest/integration/caddyfile_adapt/error_multi_site_blocks.caddyfiletest
new file mode 100644
index 00000000..0e84a13c
--- /dev/null
+++ b/caddytest/integration/caddyfile_adapt/error_multi_site_blocks.caddyfiletest
@@ -0,0 +1,245 @@
+foo.localhost {
+ root * /srv
+ error /private* "Unauthorized" 410
+ error /fivehundred* "Internal Server Error" 500
+
+ handle_errors 5xx {
+ respond "Error In range [500 .. 599]"
+ }
+ handle_errors 410 {
+ respond "404 or 410 error"
+ }
+}
+
+bar.localhost {
+ root * /srv
+ error /private* "Unauthorized" 410
+ error /fivehundred* "Internal Server Error" 500
+
+ handle_errors 5xx {
+ respond "Error In range [500 .. 599] from second site"
+ }
+ handle_errors 410 {
+ respond "404 or 410 error from second site"
+ }
+}
+----------
+{
+ "apps": {
+ "http": {
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":443"
+ ],
+ "routes": [
+ {
+ "match": [
+ {
+ "host": [
+ "foo.localhost"
+ ]
+ }
+ ],
+ "handle": [
+ {
+ "handler": "subroute",
+ "routes": [
+ {
+ "handle": [
+ {
+ "handler": "vars",
+ "root": "/srv"
+ }
+ ]
+ },
+ {
+ "handle": [
+ {
+ "error": "Internal Server Error",
+ "handler": "error",
+ "status_code": 500
+ }
+ ],
+ "match": [
+ {
+ "path": [
+ "/fivehundred*"
+ ]
+ }
+ ]
+ },
+ {
+ "handle": [
+ {
+ "error": "Unauthorized",
+ "handler": "error",
+ "status_code": 410
+ }
+ ],
+ "match": [
+ {
+ "path": [
+ "/private*"
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ],
+ "terminal": true
+ },
+ {
+ "match": [
+ {
+ "host": [
+ "bar.localhost"
+ ]
+ }
+ ],
+ "handle": [
+ {
+ "handler": "subroute",
+ "routes": [
+ {
+ "handle": [
+ {
+ "handler": "vars",
+ "root": "/srv"
+ }
+ ]
+ },
+ {
+ "handle": [
+ {
+ "error": "Internal Server Error",
+ "handler": "error",
+ "status_code": 500
+ }
+ ],
+ "match": [
+ {
+ "path": [
+ "/fivehundred*"
+ ]
+ }
+ ]
+ },
+ {
+ "handle": [
+ {
+ "error": "Unauthorized",
+ "handler": "error",
+ "status_code": 410
+ }
+ ],
+ "match": [
+ {
+ "path": [
+ "/private*"
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ],
+ "terminal": true
+ }
+ ],
+ "errors": {
+ "routes": [
+ {
+ "match": [
+ {
+ "host": [
+ "foo.localhost"
+ ]
+ }
+ ],
+ "handle": [
+ {
+ "handler": "subroute",
+ "routes": [
+ {
+ "handle": [
+ {
+ "body": "404 or 410 error",
+ "handler": "static_response"
+ }
+ ],
+ "match": [
+ {
+ "expression": "{http.error.status_code} in [410]"
+ }
+ ]
+ },
+ {
+ "handle": [
+ {
+ "body": "Error In range [500 .. 599]",
+ "handler": "static_response"
+ }
+ ],
+ "match": [
+ {
+ "expression": "{http.error.status_code} \u003e= 500 \u0026\u0026 {http.error.status_code} \u003c= 599"
+ }
+ ]
+ }
+ ]
+ }
+ ],
+ "terminal": true
+ },
+ {
+ "match": [
+ {
+ "host": [
+ "bar.localhost"
+ ]
+ }
+ ],
+ "handle": [
+ {
+ "handler": "subroute",
+ "routes": [
+ {
+ "handle": [
+ {
+ "body": "404 or 410 error from second site",
+ "handler": "static_response"
+ }
+ ],
+ "match": [
+ {
+ "expression": "{http.error.status_code} in [410]"
+ }
+ ]
+ },
+ {
+ "handle": [
+ {
+ "body": "Error In range [500 .. 599] from second site",
+ "handler": "static_response"
+ }
+ ],
+ "match": [
+ {
+ "expression": "{http.error.status_code} \u003e= 500 \u0026\u0026 {http.error.status_code} \u003c= 599"
+ }
+ ]
+ }
+ ]
+ }
+ ],
+ "terminal": true
+ }
+ ]
+ }
+ }
+ }
+ }
+ }
+}
diff --git a/caddytest/integration/caddyfile_adapt/error_range_codes.caddyfiletest b/caddytest/integration/caddyfile_adapt/error_range_codes.caddyfiletest
new file mode 100644
index 00000000..46b70c8e
--- /dev/null
+++ b/caddytest/integration/caddyfile_adapt/error_range_codes.caddyfiletest
@@ -0,0 +1,120 @@
+{
+ http_port 3010
+}
+localhost:3010 {
+ root * /srv
+ error /private* "Unauthorized" 410
+ error /hidden* "Not found" 404
+
+ handle_errors 4xx {
+ respond "Error in the [400 .. 499] range"
+ }
+}
+----------
+{
+ "apps": {
+ "http": {
+ "http_port": 3010,
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":3010"
+ ],
+ "routes": [
+ {
+ "match": [
+ {
+ "host": [
+ "localhost"
+ ]
+ }
+ ],
+ "handle": [
+ {
+ "handler": "subroute",
+ "routes": [
+ {
+ "handle": [
+ {
+ "handler": "vars",
+ "root": "/srv"
+ }
+ ]
+ },
+ {
+ "handle": [
+ {
+ "error": "Unauthorized",
+ "handler": "error",
+ "status_code": 410
+ }
+ ],
+ "match": [
+ {
+ "path": [
+ "/private*"
+ ]
+ }
+ ]
+ },
+ {
+ "handle": [
+ {
+ "error": "Not found",
+ "handler": "error",
+ "status_code": 404
+ }
+ ],
+ "match": [
+ {
+ "path": [
+ "/hidden*"
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ],
+ "terminal": true
+ }
+ ],
+ "errors": {
+ "routes": [
+ {
+ "match": [
+ {
+ "host": [
+ "localhost"
+ ]
+ }
+ ],
+ "handle": [
+ {
+ "handler": "subroute",
+ "routes": [
+ {
+ "handle": [
+ {
+ "body": "Error in the [400 .. 499] range",
+ "handler": "static_response"
+ }
+ ],
+ "match": [
+ {
+ "expression": "{http.error.status_code} \u003e= 400 \u0026\u0026 {http.error.status_code} \u003c= 499"
+ }
+ ]
+ }
+ ]
+ }
+ ],
+ "terminal": true
+ }
+ ]
+ }
+ }
+ }
+ }
+ }
+}
diff --git a/caddytest/integration/caddyfile_adapt/error_range_simple_codes.caddyfiletest b/caddytest/integration/caddyfile_adapt/error_range_simple_codes.caddyfiletest
new file mode 100644
index 00000000..70158830
--- /dev/null
+++ b/caddytest/integration/caddyfile_adapt/error_range_simple_codes.caddyfiletest
@@ -0,0 +1,153 @@
+{
+ http_port 2099
+}
+localhost:2099 {
+ root * /srv
+ error /private* "Unauthorized" 410
+ error /threehundred* "Moved Permanently" 301
+ error /internalerr* "Internal Server Error" 500
+
+ handle_errors 500 3xx {
+ respond "Error code is equal to 500 or in the [300..399] range"
+ }
+ handle_errors 4xx {
+ respond "Error in the [400 .. 499] range"
+ }
+}
+----------
+{
+ "apps": {
+ "http": {
+ "http_port": 2099,
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":2099"
+ ],
+ "routes": [
+ {
+ "match": [
+ {
+ "host": [
+ "localhost"
+ ]
+ }
+ ],
+ "handle": [
+ {
+ "handler": "subroute",
+ "routes": [
+ {
+ "handle": [
+ {
+ "handler": "vars",
+ "root": "/srv"
+ }
+ ]
+ },
+ {
+ "handle": [
+ {
+ "error": "Moved Permanently",
+ "handler": "error",
+ "status_code": 301
+ }
+ ],
+ "match": [
+ {
+ "path": [
+ "/threehundred*"
+ ]
+ }
+ ]
+ },
+ {
+ "handle": [
+ {
+ "error": "Internal Server Error",
+ "handler": "error",
+ "status_code": 500
+ }
+ ],
+ "match": [
+ {
+ "path": [
+ "/internalerr*"
+ ]
+ }
+ ]
+ },
+ {
+ "handle": [
+ {
+ "error": "Unauthorized",
+ "handler": "error",
+ "status_code": 410
+ }
+ ],
+ "match": [
+ {
+ "path": [
+ "/private*"
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ],
+ "terminal": true
+ }
+ ],
+ "errors": {
+ "routes": [
+ {
+ "match": [
+ {
+ "host": [
+ "localhost"
+ ]
+ }
+ ],
+ "handle": [
+ {
+ "handler": "subroute",
+ "routes": [
+ {
+ "handle": [
+ {
+ "body": "Error in the [400 .. 499] range",
+ "handler": "static_response"
+ }
+ ],
+ "match": [
+ {
+ "expression": "{http.error.status_code} \u003e= 400 \u0026\u0026 {http.error.status_code} \u003c= 499"
+ }
+ ]
+ },
+ {
+ "handle": [
+ {
+ "body": "Error code is equal to 500 or in the [300..399] range",
+ "handler": "static_response"
+ }
+ ],
+ "match": [
+ {
+ "expression": "{http.error.status_code} \u003e= 300 \u0026\u0026 {http.error.status_code} \u003c= 399 || {http.error.status_code} in [500]"
+ }
+ ]
+ }
+ ]
+ }
+ ],
+ "terminal": true
+ }
+ ]
+ }
+ }
+ }
+ }
+ }
+}
diff --git a/caddytest/integration/caddyfile_adapt/error_simple_codes.caddyfiletest b/caddytest/integration/caddyfile_adapt/error_simple_codes.caddyfiletest
new file mode 100644
index 00000000..5ac5863e
--- /dev/null
+++ b/caddytest/integration/caddyfile_adapt/error_simple_codes.caddyfiletest
@@ -0,0 +1,120 @@
+{
+ http_port 3010
+}
+localhost:3010 {
+ root * /srv
+ error /private* "Unauthorized" 410
+ error /hidden* "Not found" 404
+
+ handle_errors 404 410 {
+ respond "404 or 410 error"
+ }
+}
+----------
+{
+ "apps": {
+ "http": {
+ "http_port": 3010,
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":3010"
+ ],
+ "routes": [
+ {
+ "match": [
+ {
+ "host": [
+ "localhost"
+ ]
+ }
+ ],
+ "handle": [
+ {
+ "handler": "subroute",
+ "routes": [
+ {
+ "handle": [
+ {
+ "handler": "vars",
+ "root": "/srv"
+ }
+ ]
+ },
+ {
+ "handle": [
+ {
+ "error": "Unauthorized",
+ "handler": "error",
+ "status_code": 410
+ }
+ ],
+ "match": [
+ {
+ "path": [
+ "/private*"
+ ]
+ }
+ ]
+ },
+ {
+ "handle": [
+ {
+ "error": "Not found",
+ "handler": "error",
+ "status_code": 404
+ }
+ ],
+ "match": [
+ {
+ "path": [
+ "/hidden*"
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ],
+ "terminal": true
+ }
+ ],
+ "errors": {
+ "routes": [
+ {
+ "match": [
+ {
+ "host": [
+ "localhost"
+ ]
+ }
+ ],
+ "handle": [
+ {
+ "handler": "subroute",
+ "routes": [
+ {
+ "handle": [
+ {
+ "body": "404 or 410 error",
+ "handler": "static_response"
+ }
+ ],
+ "match": [
+ {
+ "expression": "{http.error.status_code} in [404, 410]"
+ }
+ ]
+ }
+ ]
+ }
+ ],
+ "terminal": true
+ }
+ ]
+ }
+ }
+ }
+ }
+ }
+}
diff --git a/caddytest/integration/caddyfile_adapt/error_sort.caddyfiletest b/caddytest/integration/caddyfile_adapt/error_sort.caddyfiletest
new file mode 100644
index 00000000..63701ccc
--- /dev/null
+++ b/caddytest/integration/caddyfile_adapt/error_sort.caddyfiletest
@@ -0,0 +1,148 @@
+{
+ http_port 2099
+}
+localhost:2099 {
+ root * /srv
+ error /private* "Unauthorized" 410
+ error /hidden* "Not found" 404
+ error /internalerr* "Internal Server Error" 500
+
+ handle_errors {
+ respond "Fallback route: code outside the [400..499] range"
+ }
+ handle_errors 4xx {
+ respond "Error in the [400 .. 499] range"
+ }
+}
+----------
+{
+ "apps": {
+ "http": {
+ "http_port": 2099,
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":2099"
+ ],
+ "routes": [
+ {
+ "match": [
+ {
+ "host": [
+ "localhost"
+ ]
+ }
+ ],
+ "handle": [
+ {
+ "handler": "subroute",
+ "routes": [
+ {
+ "handle": [
+ {
+ "handler": "vars",
+ "root": "/srv"
+ }
+ ]
+ },
+ {
+ "handle": [
+ {
+ "error": "Internal Server Error",
+ "handler": "error",
+ "status_code": 500
+ }
+ ],
+ "match": [
+ {
+ "path": [
+ "/internalerr*"
+ ]
+ }
+ ]
+ },
+ {
+ "handle": [
+ {
+ "error": "Unauthorized",
+ "handler": "error",
+ "status_code": 410
+ }
+ ],
+ "match": [
+ {
+ "path": [
+ "/private*"
+ ]
+ }
+ ]
+ },
+ {
+ "handle": [
+ {
+ "error": "Not found",
+ "handler": "error",
+ "status_code": 404
+ }
+ ],
+ "match": [
+ {
+ "path": [
+ "/hidden*"
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ],
+ "terminal": true
+ }
+ ],
+ "errors": {
+ "routes": [
+ {
+ "match": [
+ {
+ "host": [
+ "localhost"
+ ]
+ }
+ ],
+ "handle": [
+ {
+ "handler": "subroute",
+ "routes": [
+ {
+ "handle": [
+ {
+ "body": "Error in the [400 .. 499] range",
+ "handler": "static_response"
+ }
+ ],
+ "match": [
+ {
+ "expression": "{http.error.status_code} \u003e= 400 \u0026\u0026 {http.error.status_code} \u003c= 499"
+ }
+ ]
+ },
+ {
+ "handle": [
+ {
+ "body": "Fallback route: code outside the [400..499] range",
+ "handler": "static_response"
+ }
+ ]
+ }
+ ]
+ }
+ ],
+ "terminal": true
+ }
+ ]
+ }
+ }
+ }
+ }
+ }
+}
diff --git a/caddytest/integration/caddyfile_adapt/expression_quotes.caddyfiletest b/caddytest/integration/caddyfile_adapt/expression_quotes.caddyfiletest
new file mode 100644
index 00000000..4bc47a3d
--- /dev/null
+++ b/caddytest/integration/caddyfile_adapt/expression_quotes.caddyfiletest
@@ -0,0 +1,162 @@
+(snippet) {
+ @g `{http.error.status_code} == 404`
+}
+
+example.com
+
+@a expression {http.error.status_code} == 400
+abort @a
+
+@b expression {http.error.status_code} == "401"
+abort @b
+
+@c expression {http.error.status_code} == `402`
+abort @c
+
+@d expression "{http.error.status_code} == 403"
+abort @d
+
+@e expression `{http.error.status_code} == 404`
+abort @e
+
+@f `{http.error.status_code} == 404`
+abort @f
+
+import snippet
+abort @g
+----------
+{
+ "apps": {
+ "http": {
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":443"
+ ],
+ "routes": [
+ {
+ "match": [
+ {
+ "host": [
+ "example.com"
+ ]
+ }
+ ],
+ "handle": [
+ {
+ "handler": "subroute",
+ "routes": [
+ {
+ "handle": [
+ {
+ "abort": true,
+ "handler": "static_response"
+ }
+ ],
+ "match": [
+ {
+ "expression": "{http.error.status_code} == 400"
+ }
+ ]
+ },
+ {
+ "handle": [
+ {
+ "abort": true,
+ "handler": "static_response"
+ }
+ ],
+ "match": [
+ {
+ "expression": "{http.error.status_code} == \"401\""
+ }
+ ]
+ },
+ {
+ "handle": [
+ {
+ "abort": true,
+ "handler": "static_response"
+ }
+ ],
+ "match": [
+ {
+ "expression": "{http.error.status_code} == `402`"
+ }
+ ]
+ },
+ {
+ "handle": [
+ {
+ "abort": true,
+ "handler": "static_response"
+ }
+ ],
+ "match": [
+ {
+ "expression": {
+ "expr": "{http.error.status_code} == 403",
+ "name": "d"
+ }
+ }
+ ]
+ },
+ {
+ "handle": [
+ {
+ "abort": true,
+ "handler": "static_response"
+ }
+ ],
+ "match": [
+ {
+ "expression": {
+ "expr": "{http.error.status_code} == 404",
+ "name": "e"
+ }
+ }
+ ]
+ },
+ {
+ "handle": [
+ {
+ "abort": true,
+ "handler": "static_response"
+ }
+ ],
+ "match": [
+ {
+ "expression": {
+ "expr": "{http.error.status_code} == 404",
+ "name": "f"
+ }
+ }
+ ]
+ },
+ {
+ "handle": [
+ {
+ "abort": true,
+ "handler": "static_response"
+ }
+ ],
+ "match": [
+ {
+ "expression": {
+ "expr": "{http.error.status_code} == 404",
+ "name": "g"
+ }
+ }
+ ]
+ }
+ ]
+ }
+ ],
+ "terminal": true
+ }
+ ]
+ }
+ }
+ }
+ }
+}
\ No newline at end of file
diff --git a/caddytest/integration/caddyfile_adapt/file_server_disable_canonical_uris.caddyfiletest b/caddytest/integration/caddyfile_adapt/file_server_disable_canonical_uris.caddyfiletest
new file mode 100644
index 00000000..c30b9e95
--- /dev/null
+++ b/caddytest/integration/caddyfile_adapt/file_server_disable_canonical_uris.caddyfiletest
@@ -0,0 +1,32 @@
+:80
+
+file_server {
+ disable_canonical_uris
+}
+----------
+{
+ "apps": {
+ "http": {
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":80"
+ ],
+ "routes": [
+ {
+ "handle": [
+ {
+ "canonical_uris": false,
+ "handler": "file_server",
+ "hide": [
+ "./Caddyfile"
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+}
diff --git a/caddytest/integration/caddyfile_adapt/file_server_etag_file_extensions.caddyfiletest b/caddytest/integration/caddyfile_adapt/file_server_etag_file_extensions.caddyfiletest
new file mode 100644
index 00000000..d0dc7921
--- /dev/null
+++ b/caddytest/integration/caddyfile_adapt/file_server_etag_file_extensions.caddyfiletest
@@ -0,0 +1,40 @@
+:8080 {
+ root * ./
+ file_server {
+ etag_file_extensions .b3sum .sha256
+ }
+}
+----------
+{
+ "apps": {
+ "http": {
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":8080"
+ ],
+ "routes": [
+ {
+ "handle": [
+ {
+ "handler": "vars",
+ "root": "./"
+ },
+ {
+ "etag_file_extensions": [
+ ".b3sum",
+ ".sha256"
+ ],
+ "handler": "file_server",
+ "hide": [
+ "./Caddyfile"
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+}
\ No newline at end of file
diff --git a/caddytest/integration/caddyfile_adapt/file_server_file_limit.caddyfiletest b/caddytest/integration/caddyfile_adapt/file_server_file_limit.caddyfiletest
new file mode 100644
index 00000000..cd73fbff
--- /dev/null
+++ b/caddytest/integration/caddyfile_adapt/file_server_file_limit.caddyfiletest
@@ -0,0 +1,36 @@
+:80
+
+file_server {
+ browse {
+ file_limit 4000
+ }
+}
+----------
+{
+ "apps": {
+ "http": {
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":80"
+ ],
+ "routes": [
+ {
+ "handle": [
+ {
+ "browse": {
+ "file_limit": 4000
+ },
+ "handler": "file_server",
+ "hide": [
+ "./Caddyfile"
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+}
\ No newline at end of file
diff --git a/caddytest/integration/caddyfile_adapt/file_server_pass_thru.caddyfiletest b/caddytest/integration/caddyfile_adapt/file_server_pass_thru.caddyfiletest
new file mode 100644
index 00000000..cc7051b2
--- /dev/null
+++ b/caddytest/integration/caddyfile_adapt/file_server_pass_thru.caddyfiletest
@@ -0,0 +1,32 @@
+:80
+
+file_server {
+ pass_thru
+}
+----------
+{
+ "apps": {
+ "http": {
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":80"
+ ],
+ "routes": [
+ {
+ "handle": [
+ {
+ "handler": "file_server",
+ "hide": [
+ "./Caddyfile"
+ ],
+ "pass_thru": true
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+}
diff --git a/caddytest/integration/caddyfile_adapt/file_server_precompressed.caddyfiletest b/caddytest/integration/caddyfile_adapt/file_server_precompressed.caddyfiletest
new file mode 100644
index 00000000..3154de96
--- /dev/null
+++ b/caddytest/integration/caddyfile_adapt/file_server_precompressed.caddyfiletest
@@ -0,0 +1,61 @@
+:80
+
+file_server {
+ precompressed zstd br gzip
+}
+
+file_server {
+ precompressed
+}
+----------
+{
+ "apps": {
+ "http": {
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":80"
+ ],
+ "routes": [
+ {
+ "handle": [
+ {
+ "handler": "file_server",
+ "hide": [
+ "./Caddyfile"
+ ],
+ "precompressed": {
+ "br": {},
+ "gzip": {},
+ "zstd": {}
+ },
+ "precompressed_order": [
+ "zstd",
+ "br",
+ "gzip"
+ ]
+ },
+ {
+ "handler": "file_server",
+ "hide": [
+ "./Caddyfile"
+ ],
+ "precompressed": {
+ "br": {},
+ "gzip": {},
+ "zstd": {}
+ },
+ "precompressed_order": [
+ "br",
+ "zstd",
+ "gzip"
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+}
diff --git a/caddytest/integration/caddyfile_adapt/file_server_sort.caddyfiletest b/caddytest/integration/caddyfile_adapt/file_server_sort.caddyfiletest
new file mode 100644
index 00000000..7f07cba8
--- /dev/null
+++ b/caddytest/integration/caddyfile_adapt/file_server_sort.caddyfiletest
@@ -0,0 +1,39 @@
+:80
+
+file_server {
+ browse {
+ sort size desc
+ }
+}
+----------
+{
+ "apps": {
+ "http": {
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":80"
+ ],
+ "routes": [
+ {
+ "handle": [
+ {
+ "browse": {
+ "sort": [
+ "size",
+ "desc"
+ ]
+ },
+ "handler": "file_server",
+ "hide": [
+ "./Caddyfile"
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+}
\ No newline at end of file
diff --git a/caddytest/integration/caddyfile_adapt/file_server_status.caddyfiletest b/caddytest/integration/caddyfile_adapt/file_server_status.caddyfiletest
new file mode 100644
index 00000000..ede1f4ad
--- /dev/null
+++ b/caddytest/integration/caddyfile_adapt/file_server_status.caddyfiletest
@@ -0,0 +1,112 @@
+localhost
+
+root * /srv
+
+handle /nope* {
+ file_server {
+ status 403
+ }
+}
+
+handle /custom-status* {
+ file_server {
+ status {env.CUSTOM_STATUS}
+ }
+}
+----------
+{
+ "apps": {
+ "http": {
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":443"
+ ],
+ "routes": [
+ {
+ "match": [
+ {
+ "host": [
+ "localhost"
+ ]
+ }
+ ],
+ "handle": [
+ {
+ "handler": "subroute",
+ "routes": [
+ {
+ "handle": [
+ {
+ "handler": "vars",
+ "root": "/srv"
+ }
+ ]
+ },
+ {
+ "group": "group2",
+ "handle": [
+ {
+ "handler": "subroute",
+ "routes": [
+ {
+ "handle": [
+ {
+ "handler": "file_server",
+ "hide": [
+ "./Caddyfile"
+ ],
+ "status_code": "{env.CUSTOM_STATUS}"
+ }
+ ]
+ }
+ ]
+ }
+ ],
+ "match": [
+ {
+ "path": [
+ "/custom-status*"
+ ]
+ }
+ ]
+ },
+ {
+ "group": "group2",
+ "handle": [
+ {
+ "handler": "subroute",
+ "routes": [
+ {
+ "handle": [
+ {
+ "handler": "file_server",
+ "hide": [
+ "./Caddyfile"
+ ],
+ "status_code": 403
+ }
+ ]
+ }
+ ]
+ }
+ ],
+ "match": [
+ {
+ "path": [
+ "/nope*"
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ],
+ "terminal": true
+ }
+ ]
+ }
+ }
+ }
+ }
+}
\ No newline at end of file
diff --git a/caddytest/integration/caddyfile_adapt/forward_auth_authelia.caddyfiletest b/caddytest/integration/caddyfile_adapt/forward_auth_authelia.caddyfiletest
new file mode 100644
index 00000000..240bdc62
--- /dev/null
+++ b/caddytest/integration/caddyfile_adapt/forward_auth_authelia.caddyfiletest
@@ -0,0 +1,203 @@
+app.example.com {
+ forward_auth authelia:9091 {
+ uri /api/authz/forward-auth
+ copy_headers Remote-User Remote-Groups Remote-Name Remote-Email
+ }
+
+ reverse_proxy backend:8080
+}
+----------
+{
+ "apps": {
+ "http": {
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":443"
+ ],
+ "routes": [
+ {
+ "match": [
+ {
+ "host": [
+ "app.example.com"
+ ]
+ }
+ ],
+ "handle": [
+ {
+ "handler": "subroute",
+ "routes": [
+ {
+ "handle": [
+ {
+ "handle_response": [
+ {
+ "match": {
+ "status_code": [
+ 2
+ ]
+ },
+ "routes": [
+ {
+ "handle": [
+ {
+ "handler": "vars"
+ }
+ ]
+ },
+ {
+ "handle": [
+ {
+ "handler": "headers",
+ "request": {
+ "set": {
+ "Remote-Email": [
+ "{http.reverse_proxy.header.Remote-Email}"
+ ]
+ }
+ }
+ }
+ ],
+ "match": [
+ {
+ "not": [
+ {
+ "vars": {
+ "{http.reverse_proxy.header.Remote-Email}": [
+ ""
+ ]
+ }
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "handle": [
+ {
+ "handler": "headers",
+ "request": {
+ "set": {
+ "Remote-Groups": [
+ "{http.reverse_proxy.header.Remote-Groups}"
+ ]
+ }
+ }
+ }
+ ],
+ "match": [
+ {
+ "not": [
+ {
+ "vars": {
+ "{http.reverse_proxy.header.Remote-Groups}": [
+ ""
+ ]
+ }
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "handle": [
+ {
+ "handler": "headers",
+ "request": {
+ "set": {
+ "Remote-Name": [
+ "{http.reverse_proxy.header.Remote-Name}"
+ ]
+ }
+ }
+ }
+ ],
+ "match": [
+ {
+ "not": [
+ {
+ "vars": {
+ "{http.reverse_proxy.header.Remote-Name}": [
+ ""
+ ]
+ }
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "handle": [
+ {
+ "handler": "headers",
+ "request": {
+ "set": {
+ "Remote-User": [
+ "{http.reverse_proxy.header.Remote-User}"
+ ]
+ }
+ }
+ }
+ ],
+ "match": [
+ {
+ "not": [
+ {
+ "vars": {
+ "{http.reverse_proxy.header.Remote-User}": [
+ ""
+ ]
+ }
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ],
+ "handler": "reverse_proxy",
+ "headers": {
+ "request": {
+ "set": {
+ "X-Forwarded-Method": [
+ "{http.request.method}"
+ ],
+ "X-Forwarded-Uri": [
+ "{http.request.uri}"
+ ]
+ }
+ }
+ },
+ "rewrite": {
+ "method": "GET",
+ "uri": "/api/authz/forward-auth"
+ },
+ "upstreams": [
+ {
+ "dial": "authelia:9091"
+ }
+ ]
+ },
+ {
+ "handler": "reverse_proxy",
+ "upstreams": [
+ {
+ "dial": "backend:8080"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ],
+ "terminal": true
+ }
+ ]
+ }
+ }
+ }
+ }
+}
\ No newline at end of file
diff --git a/caddytest/integration/caddyfile_adapt/forward_auth_rename_headers.caddyfiletest b/caddytest/integration/caddyfile_adapt/forward_auth_rename_headers.caddyfiletest
new file mode 100644
index 00000000..c2be2ed4
--- /dev/null
+++ b/caddytest/integration/caddyfile_adapt/forward_auth_rename_headers.caddyfiletest
@@ -0,0 +1,206 @@
+:8881
+
+forward_auth localhost:9000 {
+ uri /auth
+ copy_headers A>1 B C>3 {
+ D
+ E>5
+ }
+}
+----------
+{
+ "apps": {
+ "http": {
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":8881"
+ ],
+ "routes": [
+ {
+ "handle": [
+ {
+ "handle_response": [
+ {
+ "match": {
+ "status_code": [
+ 2
+ ]
+ },
+ "routes": [
+ {
+ "handle": [
+ {
+ "handler": "vars"
+ }
+ ]
+ },
+ {
+ "handle": [
+ {
+ "handler": "headers",
+ "request": {
+ "set": {
+ "1": [
+ "{http.reverse_proxy.header.A}"
+ ]
+ }
+ }
+ }
+ ],
+ "match": [
+ {
+ "not": [
+ {
+ "vars": {
+ "{http.reverse_proxy.header.A}": [
+ ""
+ ]
+ }
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "handle": [
+ {
+ "handler": "headers",
+ "request": {
+ "set": {
+ "B": [
+ "{http.reverse_proxy.header.B}"
+ ]
+ }
+ }
+ }
+ ],
+ "match": [
+ {
+ "not": [
+ {
+ "vars": {
+ "{http.reverse_proxy.header.B}": [
+ ""
+ ]
+ }
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "handle": [
+ {
+ "handler": "headers",
+ "request": {
+ "set": {
+ "3": [
+ "{http.reverse_proxy.header.C}"
+ ]
+ }
+ }
+ }
+ ],
+ "match": [
+ {
+ "not": [
+ {
+ "vars": {
+ "{http.reverse_proxy.header.C}": [
+ ""
+ ]
+ }
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "handle": [
+ {
+ "handler": "headers",
+ "request": {
+ "set": {
+ "D": [
+ "{http.reverse_proxy.header.D}"
+ ]
+ }
+ }
+ }
+ ],
+ "match": [
+ {
+ "not": [
+ {
+ "vars": {
+ "{http.reverse_proxy.header.D}": [
+ ""
+ ]
+ }
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "handle": [
+ {
+ "handler": "headers",
+ "request": {
+ "set": {
+ "5": [
+ "{http.reverse_proxy.header.E}"
+ ]
+ }
+ }
+ }
+ ],
+ "match": [
+ {
+ "not": [
+ {
+ "vars": {
+ "{http.reverse_proxy.header.E}": [
+ ""
+ ]
+ }
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ],
+ "handler": "reverse_proxy",
+ "headers": {
+ "request": {
+ "set": {
+ "X-Forwarded-Method": [
+ "{http.request.method}"
+ ],
+ "X-Forwarded-Uri": [
+ "{http.request.uri}"
+ ]
+ }
+ }
+ },
+ "rewrite": {
+ "method": "GET",
+ "uri": "/auth"
+ },
+ "upstreams": [
+ {
+ "dial": "localhost:9000"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+}
\ No newline at end of file
diff --git a/caddytest/integration/caddyfile_adapt/global_options.caddyfiletest b/caddytest/integration/caddyfile_adapt/global_options.caddyfiletest
new file mode 100644
index 00000000..99f45cdd
--- /dev/null
+++ b/caddytest/integration/caddyfile_adapt/global_options.caddyfiletest
@@ -0,0 +1,83 @@
+{
+ debug
+ http_port 8080
+ https_port 8443
+ grace_period 5s
+ shutdown_delay 10s
+ default_sni localhost
+ order root first
+ storage file_system {
+ root /data
+ }
+ storage_check off
+ storage_clean_interval off
+ acme_ca https://example.com
+ acme_ca_root /path/to/ca.crt
+ ocsp_stapling off
+
+ email test@example.com
+ admin off
+ on_demand_tls {
+ ask https://example.com
+ }
+ local_certs
+ key_type ed25519
+}
+
+:80
+----------
+{
+ "admin": {
+ "disabled": true
+ },
+ "logging": {
+ "logs": {
+ "default": {
+ "level": "DEBUG"
+ }
+ }
+ },
+ "storage": {
+ "module": "file_system",
+ "root": "/data"
+ },
+ "apps": {
+ "http": {
+ "http_port": 8080,
+ "https_port": 8443,
+ "grace_period": 5000000000,
+ "shutdown_delay": 10000000000,
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":80"
+ ]
+ }
+ }
+ },
+ "tls": {
+ "automation": {
+ "policies": [
+ {
+ "issuers": [
+ {
+ "module": "internal"
+ }
+ ],
+ "key_type": "ed25519",
+ "disable_ocsp_stapling": true
+ }
+ ],
+ "on_demand": {
+ "permission": {
+ "endpoint": "https://example.com",
+ "module": "http"
+ }
+ }
+ },
+ "disable_ocsp_stapling": true,
+ "disable_storage_check": true,
+ "disable_storage_clean": true
+ }
+ }
+}
diff --git a/caddytest/integration/caddyfile_adapt/global_options_acme.caddyfiletest b/caddytest/integration/caddyfile_adapt/global_options_acme.caddyfiletest
new file mode 100644
index 00000000..004a3a32
--- /dev/null
+++ b/caddytest/integration/caddyfile_adapt/global_options_acme.caddyfiletest
@@ -0,0 +1,98 @@
+{
+ debug
+ http_port 8080
+ https_port 8443
+ default_sni localhost
+ order root first
+ storage file_system {
+ root /data
+ }
+ acme_ca https://example.com
+ acme_eab {
+ key_id 4K2scIVbBpNd-78scadB2g
+ mac_key abcdefghijklmnopqrstuvwx-abcdefghijklnopqrstuvwxyz12ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefgh
+ }
+ acme_ca_root /path/to/ca.crt
+ email test@example.com
+ admin off
+ on_demand_tls {
+ ask https://example.com
+ }
+ storage_clean_interval 7d
+ renew_interval 1d
+ ocsp_interval 2d
+
+ key_type ed25519
+}
+
+:80
+----------
+{
+ "admin": {
+ "disabled": true
+ },
+ "logging": {
+ "logs": {
+ "default": {
+ "level": "DEBUG"
+ }
+ }
+ },
+ "storage": {
+ "module": "file_system",
+ "root": "/data"
+ },
+ "apps": {
+ "http": {
+ "http_port": 8080,
+ "https_port": 8443,
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":80"
+ ]
+ }
+ }
+ },
+ "tls": {
+ "automation": {
+ "policies": [
+ {
+ "issuers": [
+ {
+ "ca": "https://example.com",
+ "challenges": {
+ "http": {
+ "alternate_port": 8080
+ },
+ "tls-alpn": {
+ "alternate_port": 8443
+ }
+ },
+ "email": "test@example.com",
+ "external_account": {
+ "key_id": "4K2scIVbBpNd-78scadB2g",
+ "mac_key": "abcdefghijklmnopqrstuvwx-abcdefghijklnopqrstuvwxyz12ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefgh"
+ },
+ "module": "acme",
+ "trusted_roots_pem_files": [
+ "/path/to/ca.crt"
+ ]
+ }
+ ],
+ "key_type": "ed25519"
+ }
+ ],
+ "on_demand": {
+ "permission": {
+ "endpoint": "https://example.com",
+ "module": "http"
+ }
+ },
+ "ocsp_interval": 172800000000000,
+ "renew_interval": 86400000000000,
+ "storage_clean_interval": 604800000000000
+ }
+ }
+ }
+}
diff --git a/caddytest/integration/caddyfile_adapt/global_options_admin.caddyfiletest b/caddytest/integration/caddyfile_adapt/global_options_admin.caddyfiletest
new file mode 100644
index 00000000..be309eaa
--- /dev/null
+++ b/caddytest/integration/caddyfile_adapt/global_options_admin.caddyfiletest
@@ -0,0 +1,80 @@
+{
+ debug
+ http_port 8080
+ https_port 8443
+ default_sni localhost
+ order root first
+ storage file_system {
+ root /data
+ }
+ acme_ca https://example.com
+ acme_ca_root /path/to/ca.crt
+
+ email test@example.com
+ admin {
+ origins localhost:2019 [::1]:2019 127.0.0.1:2019 192.168.10.128
+ }
+ on_demand_tls {
+ ask https://example.com
+ }
+ local_certs
+ key_type ed25519
+}
+
+:80
+----------
+{
+ "admin": {
+ "listen": "localhost:2019",
+ "origins": [
+ "localhost:2019",
+ "[::1]:2019",
+ "127.0.0.1:2019",
+ "192.168.10.128"
+ ]
+ },
+ "logging": {
+ "logs": {
+ "default": {
+ "level": "DEBUG"
+ }
+ }
+ },
+ "storage": {
+ "module": "file_system",
+ "root": "/data"
+ },
+ "apps": {
+ "http": {
+ "http_port": 8080,
+ "https_port": 8443,
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":80"
+ ]
+ }
+ }
+ },
+ "tls": {
+ "automation": {
+ "policies": [
+ {
+ "issuers": [
+ {
+ "module": "internal"
+ }
+ ],
+ "key_type": "ed25519"
+ }
+ ],
+ "on_demand": {
+ "permission": {
+ "endpoint": "https://example.com",
+ "module": "http"
+ }
+ }
+ }
+ }
+ }
+}
diff --git a/caddytest/integration/caddyfile_adapt/global_options_admin_with_persist_config_off.caddyfiletest b/caddytest/integration/caddyfile_adapt/global_options_admin_with_persist_config_off.caddyfiletest
new file mode 100644
index 00000000..998fe223
--- /dev/null
+++ b/caddytest/integration/caddyfile_adapt/global_options_admin_with_persist_config_off.caddyfiletest
@@ -0,0 +1,36 @@
+{
+ http_port 8080
+ persist_config off
+ admin {
+ origins localhost:2019 [::1]:2019 127.0.0.1:2019 192.168.10.128
+ }
+}
+
+:80
+----------
+{
+ "admin": {
+ "listen": "localhost:2019",
+ "origins": [
+ "localhost:2019",
+ "[::1]:2019",
+ "127.0.0.1:2019",
+ "192.168.10.128"
+ ],
+ "config": {
+ "persist": false
+ }
+ },
+ "apps": {
+ "http": {
+ "http_port": 8080,
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":80"
+ ]
+ }
+ }
+ }
+ }
+}
diff --git a/caddytest/integration/caddyfile_adapt/global_options_debug_with_access_log.caddyfiletest b/caddytest/integration/caddyfile_adapt/global_options_debug_with_access_log.caddyfiletest
new file mode 100644
index 00000000..772cd089
--- /dev/null
+++ b/caddytest/integration/caddyfile_adapt/global_options_debug_with_access_log.caddyfiletest
@@ -0,0 +1,45 @@
+{
+ debug
+}
+
+:8881 {
+ log {
+ format console
+ }
+}
+----------
+{
+ "logging": {
+ "logs": {
+ "default": {
+ "level": "DEBUG",
+ "exclude": [
+ "http.log.access.log0"
+ ]
+ },
+ "log0": {
+ "encoder": {
+ "format": "console"
+ },
+ "level": "DEBUG",
+ "include": [
+ "http.log.access.log0"
+ ]
+ }
+ }
+ },
+ "apps": {
+ "http": {
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":8881"
+ ],
+ "logs": {
+ "default_logger_name": "log0"
+ }
+ }
+ }
+ }
+ }
+}
\ No newline at end of file
diff --git a/caddytest/integration/caddyfile_adapt/global_options_default_bind.caddyfiletest b/caddytest/integration/caddyfile_adapt/global_options_default_bind.caddyfiletest
new file mode 100644
index 00000000..d0b4e269
--- /dev/null
+++ b/caddytest/integration/caddyfile_adapt/global_options_default_bind.caddyfiletest
@@ -0,0 +1,54 @@
+{
+ default_bind tcp4/0.0.0.0 tcp6/[::]
+}
+
+example.com {
+}
+
+example.org:12345 {
+}
+----------
+{
+ "apps": {
+ "http": {
+ "servers": {
+ "srv0": {
+ "listen": [
+ "tcp4/0.0.0.0:12345",
+ "tcp6/[::]:12345"
+ ],
+ "routes": [
+ {
+ "match": [
+ {
+ "host": [
+ "example.org"
+ ]
+ }
+ ],
+ "terminal": true
+ }
+ ]
+ },
+ "srv1": {
+ "listen": [
+ "tcp4/0.0.0.0:443",
+ "tcp6/[::]:443"
+ ],
+ "routes": [
+ {
+ "match": [
+ {
+ "host": [
+ "example.com"
+ ]
+ }
+ ],
+ "terminal": true
+ }
+ ]
+ }
+ }
+ }
+ }
+}
\ No newline at end of file
diff --git a/caddytest/integration/caddyfile_adapt/global_options_log_and_site.caddyfiletest b/caddytest/integration/caddyfile_adapt/global_options_log_and_site.caddyfiletest
new file mode 100644
index 00000000..037c8b65
--- /dev/null
+++ b/caddytest/integration/caddyfile_adapt/global_options_log_and_site.caddyfiletest
@@ -0,0 +1,77 @@
+{
+ log {
+ output file caddy.log
+ include some-log-source
+ exclude admin.api admin2.api
+ }
+ log custom-logger {
+ output file caddy.log
+ level WARN
+ include custom-log-source
+ }
+}
+
+:8884 {
+ log {
+ format json
+ output file access.log
+ }
+}
+----------
+{
+ "logging": {
+ "logs": {
+ "custom-logger": {
+ "writer": {
+ "filename": "caddy.log",
+ "output": "file"
+ },
+ "level": "WARN",
+ "include": [
+ "custom-log-source"
+ ]
+ },
+ "default": {
+ "writer": {
+ "filename": "caddy.log",
+ "output": "file"
+ },
+ "include": [
+ "some-log-source"
+ ],
+ "exclude": [
+ "admin.api",
+ "admin2.api",
+ "custom-log-source",
+ "http.log.access.log0"
+ ]
+ },
+ "log0": {
+ "writer": {
+ "filename": "access.log",
+ "output": "file"
+ },
+ "encoder": {
+ "format": "json"
+ },
+ "include": [
+ "http.log.access.log0"
+ ]
+ }
+ }
+ },
+ "apps": {
+ "http": {
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":8884"
+ ],
+ "logs": {
+ "default_logger_name": "log0"
+ }
+ }
+ }
+ }
+ }
+}
diff --git a/caddytest/integration/caddyfile_adapt/global_options_log_basic.caddyfiletest b/caddytest/integration/caddyfile_adapt/global_options_log_basic.caddyfiletest
new file mode 100644
index 00000000..b8d32dc3
--- /dev/null
+++ b/caddytest/integration/caddyfile_adapt/global_options_log_basic.caddyfiletest
@@ -0,0 +1,18 @@
+{
+ log {
+ output file foo.log
+ }
+}
+----------
+{
+ "logging": {
+ "logs": {
+ "default": {
+ "writer": {
+ "filename": "foo.log",
+ "output": "file"
+ }
+ }
+ }
+ }
+}
diff --git a/caddytest/integration/caddyfile_adapt/global_options_log_custom.caddyfiletest b/caddytest/integration/caddyfile_adapt/global_options_log_custom.caddyfiletest
new file mode 100644
index 00000000..b39cce9e
--- /dev/null
+++ b/caddytest/integration/caddyfile_adapt/global_options_log_custom.caddyfiletest
@@ -0,0 +1,35 @@
+{
+ log custom-logger {
+ format filter {
+ wrap console
+ fields {
+ request>remote_ip ip_mask {
+ ipv4 24
+ ipv6 32
+ }
+ }
+ }
+ }
+}
+----------
+{
+ "logging": {
+ "logs": {
+ "custom-logger": {
+ "encoder": {
+ "fields": {
+ "request\u003eremote_ip": {
+ "filter": "ip_mask",
+ "ipv4_cidr": 24,
+ "ipv6_cidr": 32
+ }
+ },
+ "format": "filter",
+ "wrap": {
+ "format": "console"
+ }
+ }
+ }
+ }
+ }
+}
diff --git a/caddytest/integration/caddyfile_adapt/global_options_log_multi.caddyfiletest b/caddytest/integration/caddyfile_adapt/global_options_log_multi.caddyfiletest
new file mode 100644
index 00000000..c20251ab
--- /dev/null
+++ b/caddytest/integration/caddyfile_adapt/global_options_log_multi.caddyfiletest
@@ -0,0 +1,26 @@
+{
+ log first {
+ output file foo.log
+ }
+ log second {
+ format json
+ }
+}
+----------
+{
+ "logging": {
+ "logs": {
+ "first": {
+ "writer": {
+ "filename": "foo.log",
+ "output": "file"
+ }
+ },
+ "second": {
+ "encoder": {
+ "format": "json"
+ }
+ }
+ }
+ }
+}
diff --git a/caddytest/integration/caddyfile_adapt/global_options_log_sampling.caddyfiletest b/caddytest/integration/caddyfile_adapt/global_options_log_sampling.caddyfiletest
new file mode 100644
index 00000000..12b73b2b
--- /dev/null
+++ b/caddytest/integration/caddyfile_adapt/global_options_log_sampling.caddyfiletest
@@ -0,0 +1,23 @@
+{
+ log {
+ sampling {
+ interval 300
+ first 50
+ thereafter 40
+ }
+ }
+}
+----------
+{
+ "logging": {
+ "logs": {
+ "default": {
+ "sampling": {
+ "interval": 300,
+ "first": 50,
+ "thereafter": 40
+ }
+ }
+ }
+ }
+}
\ No newline at end of file
diff --git a/caddytest/integration/caddyfile_adapt/global_options_persist_config.caddyfiletest b/caddytest/integration/caddyfile_adapt/global_options_persist_config.caddyfiletest
new file mode 100644
index 00000000..c905b476
--- /dev/null
+++ b/caddytest/integration/caddyfile_adapt/global_options_persist_config.caddyfiletest
@@ -0,0 +1,25 @@
+{
+ persist_config off
+}
+
+:8881 {
+}
+----------
+{
+ "admin": {
+ "config": {
+ "persist": false
+ }
+ },
+ "apps": {
+ "http": {
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":8881"
+ ]
+ }
+ }
+ }
+ }
+}
diff --git a/caddytest/integration/caddyfile_adapt/global_options_preferred_chains.caddyfiletest b/caddytest/integration/caddyfile_adapt/global_options_preferred_chains.caddyfiletest
new file mode 100644
index 00000000..1f5d0093
--- /dev/null
+++ b/caddytest/integration/caddyfile_adapt/global_options_preferred_chains.caddyfiletest
@@ -0,0 +1,50 @@
+{
+ preferred_chains smallest
+}
+
+example.com
+----------
+{
+ "apps": {
+ "http": {
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":443"
+ ],
+ "routes": [
+ {
+ "match": [
+ {
+ "host": [
+ "example.com"
+ ]
+ }
+ ],
+ "terminal": true
+ }
+ ]
+ }
+ }
+ },
+ "tls": {
+ "automation": {
+ "policies": [
+ {
+ "subjects": [
+ "example.com"
+ ],
+ "issuers": [
+ {
+ "module": "acme",
+ "preferred_chains": {
+ "smallest": true
+ }
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+}
diff --git a/caddytest/integration/caddyfile_adapt/global_options_skip_install_trust.caddyfiletest b/caddytest/integration/caddyfile_adapt/global_options_skip_install_trust.caddyfiletest
new file mode 100644
index 00000000..3a175a0d
--- /dev/null
+++ b/caddytest/integration/caddyfile_adapt/global_options_skip_install_trust.caddyfiletest
@@ -0,0 +1,168 @@
+{
+ skip_install_trust
+ pki {
+ ca {
+ name "Local"
+ root_cn "Custom Local Root Name"
+ intermediate_cn "Custom Local Intermediate Name"
+ root {
+ cert /path/to/cert.pem
+ key /path/to/key.pem
+ format pem_file
+ }
+ intermediate {
+ cert /path/to/cert.pem
+ key /path/to/key.pem
+ format pem_file
+ }
+ }
+ ca foo {
+ name "Foo"
+ root_cn "Custom Foo Root Name"
+ intermediate_cn "Custom Foo Intermediate Name"
+ }
+ }
+}
+
+a.example.com {
+ tls internal
+}
+
+acme.example.com {
+ acme_server {
+ ca foo
+ }
+}
+
+acme-bar.example.com {
+ acme_server {
+ ca bar
+ }
+}
+----------
+{
+ "apps": {
+ "http": {
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":443"
+ ],
+ "routes": [
+ {
+ "match": [
+ {
+ "host": [
+ "acme-bar.example.com"
+ ]
+ }
+ ],
+ "handle": [
+ {
+ "handler": "subroute",
+ "routes": [
+ {
+ "handle": [
+ {
+ "ca": "bar",
+ "handler": "acme_server"
+ }
+ ]
+ }
+ ]
+ }
+ ],
+ "terminal": true
+ },
+ {
+ "match": [
+ {
+ "host": [
+ "acme.example.com"
+ ]
+ }
+ ],
+ "handle": [
+ {
+ "handler": "subroute",
+ "routes": [
+ {
+ "handle": [
+ {
+ "ca": "foo",
+ "handler": "acme_server"
+ }
+ ]
+ }
+ ]
+ }
+ ],
+ "terminal": true
+ },
+ {
+ "match": [
+ {
+ "host": [
+ "a.example.com"
+ ]
+ }
+ ],
+ "terminal": true
+ }
+ ]
+ }
+ }
+ },
+ "pki": {
+ "certificate_authorities": {
+ "bar": {
+ "install_trust": false
+ },
+ "foo": {
+ "name": "Foo",
+ "root_common_name": "Custom Foo Root Name",
+ "intermediate_common_name": "Custom Foo Intermediate Name",
+ "install_trust": false
+ },
+ "local": {
+ "name": "Local",
+ "root_common_name": "Custom Local Root Name",
+ "intermediate_common_name": "Custom Local Intermediate Name",
+ "install_trust": false,
+ "root": {
+ "certificate": "/path/to/cert.pem",
+ "private_key": "/path/to/key.pem",
+ "format": "pem_file"
+ },
+ "intermediate": {
+ "certificate": "/path/to/cert.pem",
+ "private_key": "/path/to/key.pem",
+ "format": "pem_file"
+ }
+ }
+ }
+ },
+ "tls": {
+ "automation": {
+ "policies": [
+ {
+ "subjects": [
+ "acme-bar.example.com",
+ "acme.example.com"
+ ]
+ },
+ {
+ "subjects": [
+ "a.example.com"
+ ],
+ "issuers": [
+ {
+ "module": "internal"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+}
diff --git a/caddytest/integration/caddyfile_adapt/global_server_options_multi.caddyfiletest b/caddytest/integration/caddyfile_adapt/global_server_options_multi.caddyfiletest
new file mode 100644
index 00000000..ca5306fd
--- /dev/null
+++ b/caddytest/integration/caddyfile_adapt/global_server_options_multi.caddyfiletest
@@ -0,0 +1,82 @@
+{
+ servers {
+ timeouts {
+ idle 90s
+ }
+ strict_sni_host insecure_off
+ }
+ servers :80 {
+ timeouts {
+ idle 60s
+ }
+ }
+ servers :443 {
+ timeouts {
+ idle 30s
+ }
+ strict_sni_host
+ }
+}
+
+foo.com {
+}
+
+http://bar.com {
+}
+
+:8080 {
+}
+
+----------
+{
+ "apps": {
+ "http": {
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":443"
+ ],
+ "idle_timeout": 30000000000,
+ "routes": [
+ {
+ "match": [
+ {
+ "host": [
+ "foo.com"
+ ]
+ }
+ ],
+ "terminal": true
+ }
+ ],
+ "strict_sni_host": true
+ },
+ "srv1": {
+ "listen": [
+ ":80"
+ ],
+ "idle_timeout": 60000000000,
+ "routes": [
+ {
+ "match": [
+ {
+ "host": [
+ "bar.com"
+ ]
+ }
+ ],
+ "terminal": true
+ }
+ ]
+ },
+ "srv2": {
+ "listen": [
+ ":8080"
+ ],
+ "idle_timeout": 90000000000,
+ "strict_sni_host": false
+ }
+ }
+ }
+ }
+}
diff --git a/caddytest/integration/caddyfile_adapt/global_server_options_single.caddyfiletest b/caddytest/integration/caddyfile_adapt/global_server_options_single.caddyfiletest
new file mode 100644
index 00000000..2f3306fd
--- /dev/null
+++ b/caddytest/integration/caddyfile_adapt/global_server_options_single.caddyfiletest
@@ -0,0 +1,92 @@
+{
+ servers {
+ listener_wrappers {
+ http_redirect
+ tls
+ }
+ timeouts {
+ read_body 30s
+ read_header 30s
+ write 30s
+ idle 30s
+ }
+ max_header_size 100MB
+ enable_full_duplex
+ log_credentials
+ protocols h1 h2 h2c h3
+ strict_sni_host
+ trusted_proxies static private_ranges
+ client_ip_headers Custom-Real-Client-IP X-Forwarded-For
+ client_ip_headers A-Third-One
+ }
+}
+
+foo.com {
+}
+
+----------
+{
+ "apps": {
+ "http": {
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":443"
+ ],
+ "listener_wrappers": [
+ {
+ "wrapper": "http_redirect"
+ },
+ {
+ "wrapper": "tls"
+ }
+ ],
+ "read_timeout": 30000000000,
+ "read_header_timeout": 30000000000,
+ "write_timeout": 30000000000,
+ "idle_timeout": 30000000000,
+ "max_header_bytes": 100000000,
+ "enable_full_duplex": true,
+ "routes": [
+ {
+ "match": [
+ {
+ "host": [
+ "foo.com"
+ ]
+ }
+ ],
+ "terminal": true
+ }
+ ],
+ "strict_sni_host": true,
+ "trusted_proxies": {
+ "ranges": [
+ "192.168.0.0/16",
+ "172.16.0.0/12",
+ "10.0.0.0/8",
+ "127.0.0.1/8",
+ "fd00::/8",
+ "::1"
+ ],
+ "source": "static"
+ },
+ "client_ip_headers": [
+ "Custom-Real-Client-IP",
+ "X-Forwarded-For",
+ "A-Third-One"
+ ],
+ "logs": {
+ "should_log_credentials": true
+ },
+ "protocols": [
+ "h1",
+ "h2",
+ "h2c",
+ "h3"
+ ]
+ }
+ }
+ }
+ }
+}
\ No newline at end of file
diff --git a/caddytest/integration/caddyfile_adapt/handle_nested_in_route.caddyfiletest b/caddytest/integration/caddyfile_adapt/handle_nested_in_route.caddyfiletest
new file mode 100644
index 00000000..1f77d5c4
--- /dev/null
+++ b/caddytest/integration/caddyfile_adapt/handle_nested_in_route.caddyfiletest
@@ -0,0 +1,78 @@
+:8881 {
+ route {
+ handle /foo/* {
+ respond "Foo"
+ }
+ handle {
+ respond "Bar"
+ }
+ }
+}
+----------
+{
+ "apps": {
+ "http": {
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":8881"
+ ],
+ "routes": [
+ {
+ "handle": [
+ {
+ "handler": "subroute",
+ "routes": [
+ {
+ "group": "group2",
+ "handle": [
+ {
+ "handler": "subroute",
+ "routes": [
+ {
+ "handle": [
+ {
+ "body": "Foo",
+ "handler": "static_response"
+ }
+ ]
+ }
+ ]
+ }
+ ],
+ "match": [
+ {
+ "path": [
+ "/foo/*"
+ ]
+ }
+ ]
+ },
+ {
+ "group": "group2",
+ "handle": [
+ {
+ "handler": "subroute",
+ "routes": [
+ {
+ "handle": [
+ {
+ "body": "Bar",
+ "handler": "static_response"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+}
\ No newline at end of file
diff --git a/caddytest/integration/caddyfile_adapt/handle_path.caddyfiletest b/caddytest/integration/caddyfile_adapt/handle_path.caddyfiletest
new file mode 100644
index 00000000..f8817433
--- /dev/null
+++ b/caddytest/integration/caddyfile_adapt/handle_path.caddyfiletest
@@ -0,0 +1,52 @@
+:80
+handle_path /api/v1/* {
+ respond "API v1"
+}
+----------
+{
+ "apps": {
+ "http": {
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":80"
+ ],
+ "routes": [
+ {
+ "match": [
+ {
+ "path": [
+ "/api/v1/*"
+ ]
+ }
+ ],
+ "handle": [
+ {
+ "handler": "subroute",
+ "routes": [
+ {
+ "handle": [
+ {
+ "handler": "rewrite",
+ "strip_path_prefix": "/api/v1"
+ }
+ ]
+ },
+ {
+ "handle": [
+ {
+ "body": "API v1",
+ "handler": "static_response"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+}
\ No newline at end of file
diff --git a/caddytest/integration/caddyfile_adapt/handle_path_sorting.caddyfiletest b/caddytest/integration/caddyfile_adapt/handle_path_sorting.caddyfiletest
new file mode 100644
index 00000000..0a89f2ae
--- /dev/null
+++ b/caddytest/integration/caddyfile_adapt/handle_path_sorting.caddyfiletest
@@ -0,0 +1,105 @@
+:80 {
+ handle /api/* {
+ respond "api"
+ }
+
+ handle_path /static/* {
+ respond "static"
+ }
+
+ handle {
+ respond "handle"
+ }
+}
+----------
+{
+ "apps": {
+ "http": {
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":80"
+ ],
+ "routes": [
+ {
+ "group": "group3",
+ "match": [
+ {
+ "path": [
+ "/static/*"
+ ]
+ }
+ ],
+ "handle": [
+ {
+ "handler": "subroute",
+ "routes": [
+ {
+ "handle": [
+ {
+ "handler": "rewrite",
+ "strip_path_prefix": "/static"
+ }
+ ]
+ },
+ {
+ "handle": [
+ {
+ "body": "static",
+ "handler": "static_response"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "group": "group3",
+ "match": [
+ {
+ "path": [
+ "/api/*"
+ ]
+ }
+ ],
+ "handle": [
+ {
+ "handler": "subroute",
+ "routes": [
+ {
+ "handle": [
+ {
+ "body": "api",
+ "handler": "static_response"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "group": "group3",
+ "handle": [
+ {
+ "handler": "subroute",
+ "routes": [
+ {
+ "handle": [
+ {
+ "body": "handle",
+ "handler": "static_response"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+}
\ No newline at end of file
diff --git a/caddytest/integration/caddyfile_adapt/header.caddyfiletest b/caddytest/integration/caddyfile_adapt/header.caddyfiletest
new file mode 100644
index 00000000..ec2a842a
--- /dev/null
+++ b/caddytest/integration/caddyfile_adapt/header.caddyfiletest
@@ -0,0 +1,174 @@
+:80 {
+ header Denis "Ritchie"
+ header +Edsger "Dijkstra"
+ header ?John "von Neumann"
+ header -Wolfram
+ header {
+ Grace: "Hopper" # some users habitually suffix field names with a colon
+ +Ray "Solomonoff"
+ ?Tim "Berners-Lee"
+ defer
+ }
+ @images path /images/*
+ header @images {
+ Cache-Control "public, max-age=3600, stale-while-revalidate=86400"
+ }
+ header {
+ +Link "Foo"
+ +Link "Bar"
+ }
+ header >Set Defer
+ header >Replace Deferred Replacement
+}
+----------
+{
+ "apps": {
+ "http": {
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":80"
+ ],
+ "routes": [
+ {
+ "match": [
+ {
+ "path": [
+ "/images/*"
+ ]
+ }
+ ],
+ "handle": [
+ {
+ "handler": "headers",
+ "response": {
+ "set": {
+ "Cache-Control": [
+ "public, max-age=3600, stale-while-revalidate=86400"
+ ]
+ }
+ }
+ }
+ ]
+ },
+ {
+ "handle": [
+ {
+ "handler": "headers",
+ "response": {
+ "set": {
+ "Denis": [
+ "Ritchie"
+ ]
+ }
+ }
+ },
+ {
+ "handler": "headers",
+ "response": {
+ "add": {
+ "Edsger": [
+ "Dijkstra"
+ ]
+ }
+ }
+ },
+ {
+ "handler": "headers",
+ "response": {
+ "require": {
+ "headers": {
+ "John": null
+ }
+ },
+ "set": {
+ "John": [
+ "von Neumann"
+ ]
+ }
+ }
+ },
+ {
+ "handler": "headers",
+ "response": {
+ "deferred": true,
+ "delete": [
+ "Wolfram"
+ ]
+ }
+ },
+ {
+ "handler": "headers",
+ "response": {
+ "add": {
+ "Ray": [
+ "Solomonoff"
+ ]
+ },
+ "deferred": true,
+ "set": {
+ "Grace": [
+ "Hopper"
+ ]
+ }
+ }
+ },
+ {
+ "handler": "headers",
+ "response": {
+ "require": {
+ "headers": {
+ "Tim": null
+ }
+ },
+ "set": {
+ "Tim": [
+ "Berners-Lee"
+ ]
+ }
+ }
+ },
+ {
+ "handler": "headers",
+ "response": {
+ "add": {
+ "Link": [
+ "Foo",
+ "Bar"
+ ]
+ }
+ }
+ },
+ {
+ "handler": "headers",
+ "response": {
+ "deferred": true,
+ "set": {
+ "Set": [
+ "Defer"
+ ]
+ }
+ }
+ },
+ {
+ "handler": "headers",
+ "response": {
+ "deferred": true,
+ "replace": {
+ "Replace": [
+ {
+ "replace": "Replacement",
+ "search_regexp": "Deferred"
+ }
+ ]
+ }
+ }
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+}
diff --git a/caddytest/integration/caddyfile_adapt/heredoc.caddyfiletest b/caddytest/integration/caddyfile_adapt/heredoc.caddyfiletest
new file mode 100644
index 00000000..f50d2b7f
--- /dev/null
+++ b/caddytest/integration/caddyfile_adapt/heredoc.caddyfiletest
@@ -0,0 +1,51 @@
+example.com {
+ respond <
+ Foo
+ Foo
+
+ EOF 200
+}
+
+----------
+{
+ "apps": {
+ "http": {
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":443"
+ ],
+ "routes": [
+ {
+ "match": [
+ {
+ "host": [
+ "example.com"
+ ]
+ }
+ ],
+ "handle": [
+ {
+ "handler": "subroute",
+ "routes": [
+ {
+ "handle": [
+ {
+ "body": "\u003chtml\u003e\n \u003chead\u003e\u003ctitle\u003eFoo\u003c/title\u003e\n \u003cbody\u003eFoo\u003c/body\u003e\n\u003c/html\u003e",
+ "handler": "static_response",
+ "status_code": 200
+ }
+ ]
+ }
+ ]
+ }
+ ],
+ "terminal": true
+ }
+ ]
+ }
+ }
+ }
+ }
+}
\ No newline at end of file
diff --git a/caddytest/integration/caddyfile_adapt/http_only_hostnames.caddyfiletest b/caddytest/integration/caddyfile_adapt/http_only_hostnames.caddyfiletest
new file mode 100644
index 00000000..d867e166
--- /dev/null
+++ b/caddytest/integration/caddyfile_adapt/http_only_hostnames.caddyfiletest
@@ -0,0 +1,45 @@
+# https://github.com/caddyserver/caddy/issues/3977
+http://* {
+ respond "Hello, world!"
+}
+----------
+{
+ "apps": {
+ "http": {
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":80"
+ ],
+ "routes": [
+ {
+ "match": [
+ {
+ "host": [
+ "*"
+ ]
+ }
+ ],
+ "handle": [
+ {
+ "handler": "subroute",
+ "routes": [
+ {
+ "handle": [
+ {
+ "body": "Hello, world!",
+ "handler": "static_response"
+ }
+ ]
+ }
+ ]
+ }
+ ],
+ "terminal": true
+ }
+ ]
+ }
+ }
+ }
+ }
+}
\ No newline at end of file
diff --git a/caddytest/integration/caddyfile_adapt/http_only_on_any_address.caddyfiletest b/caddytest/integration/caddyfile_adapt/http_only_on_any_address.caddyfiletest
new file mode 100644
index 00000000..8af2c333
--- /dev/null
+++ b/caddytest/integration/caddyfile_adapt/http_only_on_any_address.caddyfiletest
@@ -0,0 +1,37 @@
+:80 {
+ respond /version 200 {
+ body "hello from localhost"
+ }
+}
+----------
+{
+ "apps": {
+ "http": {
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":80"
+ ],
+ "routes": [
+ {
+ "match": [
+ {
+ "path": [
+ "/version"
+ ]
+ }
+ ],
+ "handle": [
+ {
+ "body": "hello from localhost",
+ "handler": "static_response",
+ "status_code": 200
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+}
\ No newline at end of file
diff --git a/caddytest/integration/caddyfile_adapt/http_only_on_domain.caddyfiletest b/caddytest/integration/caddyfile_adapt/http_only_on_domain.caddyfiletest
new file mode 100644
index 00000000..d2792423
--- /dev/null
+++ b/caddytest/integration/caddyfile_adapt/http_only_on_domain.caddyfiletest
@@ -0,0 +1,54 @@
+http://a.caddy.localhost {
+ respond /version 200 {
+ body "hello from localhost"
+ }
+}
+----------
+{
+ "apps": {
+ "http": {
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":80"
+ ],
+ "routes": [
+ {
+ "match": [
+ {
+ "host": [
+ "a.caddy.localhost"
+ ]
+ }
+ ],
+ "handle": [
+ {
+ "handler": "subroute",
+ "routes": [
+ {
+ "handle": [
+ {
+ "body": "hello from localhost",
+ "handler": "static_response",
+ "status_code": 200
+ }
+ ],
+ "match": [
+ {
+ "path": [
+ "/version"
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ],
+ "terminal": true
+ }
+ ]
+ }
+ }
+ }
+ }
+}
\ No newline at end of file
diff --git a/caddytest/integration/caddyfile_adapt/http_only_on_hostless_block.caddyfiletest b/caddytest/integration/caddyfile_adapt/http_only_on_hostless_block.caddyfiletest
new file mode 100644
index 00000000..9ccc59eb
--- /dev/null
+++ b/caddytest/integration/caddyfile_adapt/http_only_on_hostless_block.caddyfiletest
@@ -0,0 +1,28 @@
+# Issue #4113
+:80, http://example.com {
+ respond "foo"
+}
+----------
+{
+ "apps": {
+ "http": {
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":80"
+ ],
+ "routes": [
+ {
+ "handle": [
+ {
+ "body": "foo",
+ "handler": "static_response"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+}
\ No newline at end of file
diff --git a/caddytest/integration/caddyfile_adapt/http_only_on_localhost.caddyfiletest b/caddytest/integration/caddyfile_adapt/http_only_on_localhost.caddyfiletest
new file mode 100644
index 00000000..13326f65
--- /dev/null
+++ b/caddytest/integration/caddyfile_adapt/http_only_on_localhost.caddyfiletest
@@ -0,0 +1,54 @@
+localhost:80 {
+ respond /version 200 {
+ body "hello from localhost"
+ }
+}
+----------
+{
+ "apps": {
+ "http": {
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":80"
+ ],
+ "routes": [
+ {
+ "match": [
+ {
+ "host": [
+ "localhost"
+ ]
+ }
+ ],
+ "handle": [
+ {
+ "handler": "subroute",
+ "routes": [
+ {
+ "handle": [
+ {
+ "body": "hello from localhost",
+ "handler": "static_response",
+ "status_code": 200
+ }
+ ],
+ "match": [
+ {
+ "path": [
+ "/version"
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ],
+ "terminal": true
+ }
+ ]
+ }
+ }
+ }
+ }
+}
\ No newline at end of file
diff --git a/caddytest/integration/caddyfile_adapt/http_only_on_non_standard_port.caddyfiletest b/caddytest/integration/caddyfile_adapt/http_only_on_non_standard_port.caddyfiletest
new file mode 100644
index 00000000..65d86bbe
--- /dev/null
+++ b/caddytest/integration/caddyfile_adapt/http_only_on_non_standard_port.caddyfiletest
@@ -0,0 +1,59 @@
+http://a.caddy.localhost:81 {
+ respond /version 200 {
+ body "hello from localhost"
+ }
+}
+----------
+{
+ "apps": {
+ "http": {
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":81"
+ ],
+ "routes": [
+ {
+ "match": [
+ {
+ "host": [
+ "a.caddy.localhost"
+ ]
+ }
+ ],
+ "handle": [
+ {
+ "handler": "subroute",
+ "routes": [
+ {
+ "handle": [
+ {
+ "body": "hello from localhost",
+ "handler": "static_response",
+ "status_code": 200
+ }
+ ],
+ "match": [
+ {
+ "path": [
+ "/version"
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ],
+ "terminal": true
+ }
+ ],
+ "automatic_https": {
+ "skip": [
+ "a.caddy.localhost"
+ ]
+ }
+ }
+ }
+ }
+ }
+}
\ No newline at end of file
diff --git a/caddytest/integration/caddyfile_adapt/http_valid_directive_like_site_address.caddyfiletest b/caddytest/integration/caddyfile_adapt/http_valid_directive_like_site_address.caddyfiletest
new file mode 100644
index 00000000..675523a5
--- /dev/null
+++ b/caddytest/integration/caddyfile_adapt/http_valid_directive_like_site_address.caddyfiletest
@@ -0,0 +1,46 @@
+http://handle {
+ file_server
+}
+----------
+{
+ "apps": {
+ "http": {
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":80"
+ ],
+ "routes": [
+ {
+ "match": [
+ {
+ "host": [
+ "handle"
+ ]
+ }
+ ],
+ "handle": [
+ {
+ "handler": "subroute",
+ "routes": [
+ {
+ "handle": [
+ {
+ "handler": "file_server",
+ "hide": [
+ "./Caddyfile"
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ],
+ "terminal": true
+ }
+ ]
+ }
+ }
+ }
+ }
+}
\ No newline at end of file
diff --git a/caddytest/integration/caddyfile_adapt/https_on_domain.caddyfiletest b/caddytest/integration/caddyfile_adapt/https_on_domain.caddyfiletest
new file mode 100644
index 00000000..fc584c53
--- /dev/null
+++ b/caddytest/integration/caddyfile_adapt/https_on_domain.caddyfiletest
@@ -0,0 +1,54 @@
+a.caddy.localhost {
+ respond /version 200 {
+ body "hello from localhost"
+ }
+}
+----------
+{
+ "apps": {
+ "http": {
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":443"
+ ],
+ "routes": [
+ {
+ "match": [
+ {
+ "host": [
+ "a.caddy.localhost"
+ ]
+ }
+ ],
+ "handle": [
+ {
+ "handler": "subroute",
+ "routes": [
+ {
+ "handle": [
+ {
+ "body": "hello from localhost",
+ "handler": "static_response",
+ "status_code": 200
+ }
+ ],
+ "match": [
+ {
+ "path": [
+ "/version"
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ],
+ "terminal": true
+ }
+ ]
+ }
+ }
+ }
+ }
+}
\ No newline at end of file
diff --git a/caddytest/integration/caddyfile_adapt/import_args_file.caddyfiletest b/caddytest/integration/caddyfile_adapt/import_args_file.caddyfiletest
new file mode 100644
index 00000000..1eb78f19
--- /dev/null
+++ b/caddytest/integration/caddyfile_adapt/import_args_file.caddyfiletest
@@ -0,0 +1,49 @@
+example.com
+
+import testdata/import_respond.txt Groot Rocket
+import testdata/import_respond.txt you "the confused man"
+----------
+{
+ "apps": {
+ "http": {
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":443"
+ ],
+ "routes": [
+ {
+ "match": [
+ {
+ "host": [
+ "example.com"
+ ]
+ }
+ ],
+ "handle": [
+ {
+ "handler": "subroute",
+ "routes": [
+ {
+ "handle": [
+ {
+ "body": "'I am Groot', hears Rocket",
+ "handler": "static_response"
+ },
+ {
+ "body": "'I am you', hears the confused man",
+ "handler": "static_response"
+ }
+ ]
+ }
+ ]
+ }
+ ],
+ "terminal": true
+ }
+ ]
+ }
+ }
+ }
+ }
+}
\ No newline at end of file
diff --git a/caddytest/integration/caddyfile_adapt/import_args_snippet.caddyfiletest b/caddytest/integration/caddyfile_adapt/import_args_snippet.caddyfiletest
new file mode 100644
index 00000000..9d5c2535
--- /dev/null
+++ b/caddytest/integration/caddyfile_adapt/import_args_snippet.caddyfiletest
@@ -0,0 +1,87 @@
+(logging) {
+ log {
+ output file /var/log/caddy/{args[0]}.access.log
+ }
+}
+
+a.example.com {
+ import logging a.example.com
+}
+
+b.example.com {
+ import logging b.example.com
+}
+----------
+{
+ "logging": {
+ "logs": {
+ "default": {
+ "exclude": [
+ "http.log.access.log0",
+ "http.log.access.log1"
+ ]
+ },
+ "log0": {
+ "writer": {
+ "filename": "/var/log/caddy/a.example.com.access.log",
+ "output": "file"
+ },
+ "include": [
+ "http.log.access.log0"
+ ]
+ },
+ "log1": {
+ "writer": {
+ "filename": "/var/log/caddy/b.example.com.access.log",
+ "output": "file"
+ },
+ "include": [
+ "http.log.access.log1"
+ ]
+ }
+ }
+ },
+ "apps": {
+ "http": {
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":443"
+ ],
+ "routes": [
+ {
+ "match": [
+ {
+ "host": [
+ "a.example.com"
+ ]
+ }
+ ],
+ "terminal": true
+ },
+ {
+ "match": [
+ {
+ "host": [
+ "b.example.com"
+ ]
+ }
+ ],
+ "terminal": true
+ }
+ ],
+ "logs": {
+ "logger_names": {
+ "a.example.com": [
+ "log0"
+ ],
+ "b.example.com": [
+ "log1"
+ ]
+ }
+ }
+ }
+ }
+ }
+ }
+}
\ No newline at end of file
diff --git a/caddytest/integration/caddyfile_adapt/import_args_snippet_env_placeholder.caddyfiletest b/caddytest/integration/caddyfile_adapt/import_args_snippet_env_placeholder.caddyfiletest
new file mode 100644
index 00000000..1bc907e5
--- /dev/null
+++ b/caddytest/integration/caddyfile_adapt/import_args_snippet_env_placeholder.caddyfiletest
@@ -0,0 +1,31 @@
+(foo) {
+ respond {env.FOO}
+}
+
+:80 {
+ import foo
+}
+----------
+{
+ "apps": {
+ "http": {
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":80"
+ ],
+ "routes": [
+ {
+ "handle": [
+ {
+ "body": "{env.FOO}",
+ "handler": "static_response"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+}
\ No newline at end of file
diff --git a/caddytest/integration/caddyfile_adapt/import_block_snippet.caddyfiletest b/caddytest/integration/caddyfile_adapt/import_block_snippet.caddyfiletest
new file mode 100644
index 00000000..a60c238c
--- /dev/null
+++ b/caddytest/integration/caddyfile_adapt/import_block_snippet.caddyfiletest
@@ -0,0 +1,58 @@
+(snippet) {
+ header {
+ {block}
+ }
+}
+
+example.com {
+ import snippet {
+ foo bar
+ }
+}
+----------
+{
+ "apps": {
+ "http": {
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":443"
+ ],
+ "routes": [
+ {
+ "match": [
+ {
+ "host": [
+ "example.com"
+ ]
+ }
+ ],
+ "handle": [
+ {
+ "handler": "subroute",
+ "routes": [
+ {
+ "handle": [
+ {
+ "handler": "headers",
+ "response": {
+ "set": {
+ "Foo": [
+ "bar"
+ ]
+ }
+ }
+ }
+ ]
+ }
+ ]
+ }
+ ],
+ "terminal": true
+ }
+ ]
+ }
+ }
+ }
+ }
+}
diff --git a/caddytest/integration/caddyfile_adapt/import_block_snippet_args.caddyfiletest b/caddytest/integration/caddyfile_adapt/import_block_snippet_args.caddyfiletest
new file mode 100644
index 00000000..7f2e68b7
--- /dev/null
+++ b/caddytest/integration/caddyfile_adapt/import_block_snippet_args.caddyfiletest
@@ -0,0 +1,56 @@
+(snippet) {
+ {block}
+}
+
+example.com {
+ import snippet {
+ header foo bar
+ }
+}
+----------
+{
+ "apps": {
+ "http": {
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":443"
+ ],
+ "routes": [
+ {
+ "match": [
+ {
+ "host": [
+ "example.com"
+ ]
+ }
+ ],
+ "handle": [
+ {
+ "handler": "subroute",
+ "routes": [
+ {
+ "handle": [
+ {
+ "handler": "headers",
+ "response": {
+ "set": {
+ "Foo": [
+ "bar"
+ ]
+ }
+ }
+ }
+ ]
+ }
+ ]
+ }
+ ],
+ "terminal": true
+ }
+ ]
+ }
+ }
+ }
+ }
+}
diff --git a/caddytest/integration/caddyfile_adapt/import_blocks_snippet.caddyfiletest b/caddytest/integration/caddyfile_adapt/import_blocks_snippet.caddyfiletest
new file mode 100644
index 00000000..4098f90b
--- /dev/null
+++ b/caddytest/integration/caddyfile_adapt/import_blocks_snippet.caddyfiletest
@@ -0,0 +1,76 @@
+(snippet) {
+ header {
+ {blocks.foo}
+ }
+ header {
+ {blocks.bar}
+ }
+}
+
+example.com {
+ import snippet {
+ foo {
+ foo a
+ }
+ bar {
+ bar b
+ }
+ }
+}
+----------
+{
+ "apps": {
+ "http": {
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":443"
+ ],
+ "routes": [
+ {
+ "match": [
+ {
+ "host": [
+ "example.com"
+ ]
+ }
+ ],
+ "handle": [
+ {
+ "handler": "subroute",
+ "routes": [
+ {
+ "handle": [
+ {
+ "handler": "headers",
+ "response": {
+ "set": {
+ "Foo": [
+ "a"
+ ]
+ }
+ }
+ },
+ {
+ "handler": "headers",
+ "response": {
+ "set": {
+ "Bar": [
+ "b"
+ ]
+ }
+ }
+ }
+ ]
+ }
+ ]
+ }
+ ],
+ "terminal": true
+ }
+ ]
+ }
+ }
+ }
+ }
+}
diff --git a/caddytest/integration/caddyfile_adapt/import_blocks_snippet_nested.caddyfiletest b/caddytest/integration/caddyfile_adapt/import_blocks_snippet_nested.caddyfiletest
new file mode 100644
index 00000000..ac1c5226
--- /dev/null
+++ b/caddytest/integration/caddyfile_adapt/import_blocks_snippet_nested.caddyfiletest
@@ -0,0 +1,82 @@
+(snippet) {
+ header {
+ {blocks.bar}
+ }
+ import sub_snippet {
+ bar {
+ {blocks.foo}
+ }
+ }
+}
+(sub_snippet) {
+ header {
+ {blocks.bar}
+ }
+}
+example.com {
+ import snippet {
+ foo {
+ foo a
+ }
+ bar {
+ bar b
+ }
+ }
+}
+----------
+{
+ "apps": {
+ "http": {
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":443"
+ ],
+ "routes": [
+ {
+ "match": [
+ {
+ "host": [
+ "example.com"
+ ]
+ }
+ ],
+ "handle": [
+ {
+ "handler": "subroute",
+ "routes": [
+ {
+ "handle": [
+ {
+ "handler": "headers",
+ "response": {
+ "set": {
+ "Bar": [
+ "b"
+ ]
+ }
+ }
+ },
+ {
+ "handler": "headers",
+ "response": {
+ "set": {
+ "Foo": [
+ "a"
+ ]
+ }
+ }
+ }
+ ]
+ }
+ ]
+ }
+ ],
+ "terminal": true
+ }
+ ]
+ }
+ }
+ }
+ }
+}
diff --git a/caddytest/integration/caddyfile_adapt/intercept_response.caddyfiletest b/caddytest/integration/caddyfile_adapt/intercept_response.caddyfiletest
new file mode 100644
index 00000000..c92b76fe
--- /dev/null
+++ b/caddytest/integration/caddyfile_adapt/intercept_response.caddyfiletest
@@ -0,0 +1,230 @@
+localhost
+
+respond "To intercept"
+
+intercept {
+ @500 status 500
+ replace_status @500 400
+
+ @all status 2xx 3xx 4xx 5xx
+ replace_status @all {http.error.status_code}
+
+ replace_status {http.error.status_code}
+
+ @accel header X-Accel-Redirect *
+ handle_response @accel {
+ respond "Header X-Accel-Redirect!"
+ }
+
+ @another {
+ header X-Another *
+ }
+ handle_response @another {
+ respond "Header X-Another!"
+ }
+
+ @401 status 401
+ handle_response @401 {
+ respond "Status 401!"
+ }
+
+ handle_response {
+ respond "Any! This should be last in the JSON!"
+ }
+
+ @403 {
+ status 403
+ }
+ handle_response @403 {
+ respond "Status 403!"
+ }
+
+ @multi {
+ status 401 403
+ status 404
+ header Foo *
+ header Bar *
+ }
+ handle_response @multi {
+ respond "Headers Foo, Bar AND statuses 401, 403 and 404!"
+ }
+}
+----------
+{
+ "apps": {
+ "http": {
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":443"
+ ],
+ "routes": [
+ {
+ "match": [
+ {
+ "host": [
+ "localhost"
+ ]
+ }
+ ],
+ "handle": [
+ {
+ "handler": "subroute",
+ "routes": [
+ {
+ "handle": [
+ {
+ "handle_response": [
+ {
+ "match": {
+ "status_code": [
+ 500
+ ]
+ },
+ "status_code": 400
+ },
+ {
+ "match": {
+ "status_code": [
+ 2,
+ 3,
+ 4,
+ 5
+ ]
+ },
+ "status_code": "{http.error.status_code}"
+ },
+ {
+ "match": {
+ "headers": {
+ "X-Accel-Redirect": [
+ "*"
+ ]
+ }
+ },
+ "routes": [
+ {
+ "handle": [
+ {
+ "body": "Header X-Accel-Redirect!",
+ "handler": "static_response"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "match": {
+ "headers": {
+ "X-Another": [
+ "*"
+ ]
+ }
+ },
+ "routes": [
+ {
+ "handle": [
+ {
+ "body": "Header X-Another!",
+ "handler": "static_response"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "match": {
+ "status_code": [
+ 401
+ ]
+ },
+ "routes": [
+ {
+ "handle": [
+ {
+ "body": "Status 401!",
+ "handler": "static_response"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "match": {
+ "status_code": [
+ 403
+ ]
+ },
+ "routes": [
+ {
+ "handle": [
+ {
+ "body": "Status 403!",
+ "handler": "static_response"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "match": {
+ "headers": {
+ "Bar": [
+ "*"
+ ],
+ "Foo": [
+ "*"
+ ]
+ },
+ "status_code": [
+ 401,
+ 403,
+ 404
+ ]
+ },
+ "routes": [
+ {
+ "handle": [
+ {
+ "body": "Headers Foo, Bar AND statuses 401, 403 and 404!",
+ "handler": "static_response"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "status_code": "{http.error.status_code}"
+ },
+ {
+ "routes": [
+ {
+ "handle": [
+ {
+ "body": "Any! This should be last in the JSON!",
+ "handler": "static_response"
+ }
+ ]
+ }
+ ]
+ }
+ ],
+ "handler": "intercept"
+ },
+ {
+ "body": "To intercept",
+ "handler": "static_response"
+ }
+ ]
+ }
+ ]
+ }
+ ],
+ "terminal": true
+ }
+ ]
+ }
+ }
+ }
+ }
+}
diff --git a/caddytest/integration/caddyfile_adapt/invoke_named_routes.caddyfiletest b/caddytest/integration/caddyfile_adapt/invoke_named_routes.caddyfiletest
new file mode 100644
index 00000000..83d9859c
--- /dev/null
+++ b/caddytest/integration/caddyfile_adapt/invoke_named_routes.caddyfiletest
@@ -0,0 +1,154 @@
+&(first) {
+ @first path /first
+ vars @first first 1
+ respond "first"
+}
+
+&(second) {
+ respond "second"
+}
+
+:8881 {
+ invoke first
+ route {
+ invoke second
+ }
+}
+
+:8882 {
+ handle {
+ invoke second
+ }
+}
+
+:8883 {
+ respond "no invoke"
+}
+----------
+{
+ "apps": {
+ "http": {
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":8881"
+ ],
+ "routes": [
+ {
+ "handle": [
+ {
+ "handler": "invoke",
+ "name": "first"
+ },
+ {
+ "handler": "subroute",
+ "routes": [
+ {
+ "handle": [
+ {
+ "handler": "invoke",
+ "name": "second"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ],
+ "named_routes": {
+ "first": {
+ "handle": [
+ {
+ "handler": "subroute",
+ "routes": [
+ {
+ "handle": [
+ {
+ "first": 1,
+ "handler": "vars"
+ }
+ ],
+ "match": [
+ {
+ "path": [
+ "/first"
+ ]
+ }
+ ]
+ },
+ {
+ "handle": [
+ {
+ "body": "first",
+ "handler": "static_response"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ "second": {
+ "handle": [
+ {
+ "body": "second",
+ "handler": "static_response"
+ }
+ ]
+ }
+ }
+ },
+ "srv1": {
+ "listen": [
+ ":8882"
+ ],
+ "routes": [
+ {
+ "handle": [
+ {
+ "handler": "subroute",
+ "routes": [
+ {
+ "handle": [
+ {
+ "handler": "invoke",
+ "name": "second"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ],
+ "named_routes": {
+ "second": {
+ "handle": [
+ {
+ "body": "second",
+ "handler": "static_response"
+ }
+ ]
+ }
+ }
+ },
+ "srv2": {
+ "listen": [
+ ":8883"
+ ],
+ "routes": [
+ {
+ "handle": [
+ {
+ "body": "no invoke",
+ "handler": "static_response"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+}
\ No newline at end of file
diff --git a/caddytest/integration/caddyfile_adapt/log_add.caddyfiletest b/caddytest/integration/caddyfile_adapt/log_add.caddyfiletest
new file mode 100644
index 00000000..4f91e464
--- /dev/null
+++ b/caddytest/integration/caddyfile_adapt/log_add.caddyfiletest
@@ -0,0 +1,71 @@
+:80 {
+ log
+
+ vars foo foo
+
+ log_append const bar
+ log_append vars foo
+ log_append placeholder {path}
+
+ log_append /only-for-this-path secret value
+}
+----------
+{
+ "apps": {
+ "http": {
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":80"
+ ],
+ "routes": [
+ {
+ "handle": [
+ {
+ "foo": "foo",
+ "handler": "vars"
+ }
+ ]
+ },
+ {
+ "match": [
+ {
+ "path": [
+ "/only-for-this-path"
+ ]
+ }
+ ],
+ "handle": [
+ {
+ "handler": "log_append",
+ "key": "secret",
+ "value": "value"
+ }
+ ]
+ },
+ {
+ "handle": [
+ {
+ "handler": "log_append",
+ "key": "const",
+ "value": "bar"
+ },
+ {
+ "handler": "log_append",
+ "key": "vars",
+ "value": "foo"
+ },
+ {
+ "handler": "log_append",
+ "key": "placeholder",
+ "value": "{http.request.uri.path}"
+ }
+ ]
+ }
+ ],
+ "logs": {}
+ }
+ }
+ }
+ }
+}
diff --git a/caddytest/integration/caddyfile_adapt/log_append_encoder.caddyfiletest b/caddytest/integration/caddyfile_adapt/log_append_encoder.caddyfiletest
new file mode 100644
index 00000000..88a6cd6b
--- /dev/null
+++ b/caddytest/integration/caddyfile_adapt/log_append_encoder.caddyfiletest
@@ -0,0 +1,63 @@
+{
+ log {
+ format append {
+ wrap json
+ fields {
+ wrap "foo"
+ }
+ env {env.EXAMPLE}
+ int 1
+ float 1.1
+ bool true
+ string "string"
+ }
+ }
+}
+
+:80 {
+ respond "Hello, World!"
+}
+----------
+{
+ "logging": {
+ "logs": {
+ "default": {
+ "encoder": {
+ "fields": {
+ "bool": true,
+ "env": "{env.EXAMPLE}",
+ "float": 1.1,
+ "int": 1,
+ "string": "string",
+ "wrap": "foo"
+ },
+ "format": "append",
+ "wrap": {
+ "format": "json"
+ }
+ }
+ }
+ }
+ },
+ "apps": {
+ "http": {
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":80"
+ ],
+ "routes": [
+ {
+ "handle": [
+ {
+ "body": "Hello, World!",
+ "handler": "static_response"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+}
\ No newline at end of file
diff --git a/caddytest/integration/caddyfile_adapt/log_except_catchall_blocks.caddyfiletest b/caddytest/integration/caddyfile_adapt/log_except_catchall_blocks.caddyfiletest
new file mode 100644
index 00000000..b2a7f2af
--- /dev/null
+++ b/caddytest/integration/caddyfile_adapt/log_except_catchall_blocks.caddyfiletest
@@ -0,0 +1,112 @@
+http://localhost:2020 {
+ log
+ log_skip /first-hidden*
+ log_skip /second-hidden*
+ respond 200
+}
+
+:2020 {
+ respond 418
+}
+----------
+{
+ "apps": {
+ "http": {
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":2020"
+ ],
+ "routes": [
+ {
+ "match": [
+ {
+ "host": [
+ "localhost"
+ ]
+ }
+ ],
+ "handle": [
+ {
+ "handler": "subroute",
+ "routes": [
+ {
+ "handle": [
+ {
+ "handler": "vars",
+ "log_skip": true
+ }
+ ],
+ "match": [
+ {
+ "path": [
+ "/second-hidden*"
+ ]
+ }
+ ]
+ },
+ {
+ "handle": [
+ {
+ "handler": "vars",
+ "log_skip": true
+ }
+ ],
+ "match": [
+ {
+ "path": [
+ "/first-hidden*"
+ ]
+ }
+ ]
+ },
+ {
+ "handle": [
+ {
+ "handler": "static_response",
+ "status_code": 200
+ }
+ ]
+ }
+ ]
+ }
+ ],
+ "terminal": true
+ },
+ {
+ "handle": [
+ {
+ "handler": "subroute",
+ "routes": [
+ {
+ "handle": [
+ {
+ "handler": "static_response",
+ "status_code": 418
+ }
+ ]
+ }
+ ]
+ }
+ ],
+ "terminal": true
+ }
+ ],
+ "automatic_https": {
+ "skip": [
+ "localhost"
+ ]
+ },
+ "logs": {
+ "logger_names": {
+ "localhost": [
+ ""
+ ]
+ },
+ "skip_unmapped_hosts": true
+ }
+ }
+ }
+ }
+ }
+}
\ No newline at end of file
diff --git a/caddytest/integration/caddyfile_adapt/log_filter_no_wrap.caddyfiletest b/caddytest/integration/caddyfile_adapt/log_filter_no_wrap.caddyfiletest
new file mode 100644
index 00000000..f63a1d92
--- /dev/null
+++ b/caddytest/integration/caddyfile_adapt/log_filter_no_wrap.caddyfiletest
@@ -0,0 +1,52 @@
+:80
+
+log {
+ output stdout
+ format filter {
+ fields {
+ request>headers>Server delete
+ }
+ }
+}
+----------
+{
+ "logging": {
+ "logs": {
+ "default": {
+ "exclude": [
+ "http.log.access.log0"
+ ]
+ },
+ "log0": {
+ "writer": {
+ "output": "stdout"
+ },
+ "encoder": {
+ "fields": {
+ "request\u003eheaders\u003eServer": {
+ "filter": "delete"
+ }
+ },
+ "format": "filter"
+ },
+ "include": [
+ "http.log.access.log0"
+ ]
+ }
+ }
+ },
+ "apps": {
+ "http": {
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":80"
+ ],
+ "logs": {
+ "default_logger_name": "log0"
+ }
+ }
+ }
+ }
+ }
+}
diff --git a/caddytest/integration/caddyfile_adapt/log_filter_with_header.txt b/caddytest/integration/caddyfile_adapt/log_filter_with_header.txt
new file mode 100644
index 00000000..3ab6d624
--- /dev/null
+++ b/caddytest/integration/caddyfile_adapt/log_filter_with_header.txt
@@ -0,0 +1,151 @@
+localhost {
+ log {
+ output file ./caddy.access.log
+ }
+ log health_check_log {
+ output file ./caddy.access.health.log
+ no_hostname
+ }
+ log general_log {
+ output file ./caddy.access.general.log
+ no_hostname
+ }
+ @healthCheck `header_regexp('User-Agent', '^some-regexp$') || path('/healthz*')`
+ handle @healthCheck {
+ log_name health_check_log general_log
+ respond "Healthy"
+ }
+
+ handle {
+ respond "Hello World"
+ }
+}
+----------
+{
+ "logging": {
+ "logs": {
+ "default": {
+ "exclude": [
+ "http.log.access.general_log",
+ "http.log.access.health_check_log",
+ "http.log.access.log0"
+ ]
+ },
+ "general_log": {
+ "writer": {
+ "filename": "./caddy.access.general.log",
+ "output": "file"
+ },
+ "include": [
+ "http.log.access.general_log"
+ ]
+ },
+ "health_check_log": {
+ "writer": {
+ "filename": "./caddy.access.health.log",
+ "output": "file"
+ },
+ "include": [
+ "http.log.access.health_check_log"
+ ]
+ },
+ "log0": {
+ "writer": {
+ "filename": "./caddy.access.log",
+ "output": "file"
+ },
+ "include": [
+ "http.log.access.log0"
+ ]
+ }
+ }
+ },
+ "apps": {
+ "http": {
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":443"
+ ],
+ "routes": [
+ {
+ "match": [
+ {
+ "host": [
+ "localhost"
+ ]
+ }
+ ],
+ "handle": [
+ {
+ "handler": "subroute",
+ "routes": [
+ {
+ "group": "group2",
+ "handle": [
+ {
+ "handler": "subroute",
+ "routes": [
+ {
+ "handle": [
+ {
+ "access_logger_names": [
+ "health_check_log",
+ "general_log"
+ ],
+ "handler": "vars"
+ },
+ {
+ "body": "Healthy",
+ "handler": "static_response"
+ }
+ ]
+ }
+ ]
+ }
+ ],
+ "match": [
+ {
+ "expression": {
+ "expr": "header_regexp('User-Agent', '^some-regexp$') || path('/healthz*')",
+ "name": "healthCheck"
+ }
+ }
+ ]
+ },
+ {
+ "group": "group2",
+ "handle": [
+ {
+ "handler": "subroute",
+ "routes": [
+ {
+ "handle": [
+ {
+ "body": "Hello World",
+ "handler": "static_response"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ],
+ "terminal": true
+ }
+ ],
+ "logs": {
+ "logger_names": {
+ "localhost": [
+ "log0"
+ ]
+ }
+ }
+ }
+ }
+ }
+ }
+}
diff --git a/caddytest/integration/caddyfile_adapt/log_filters.caddyfiletest b/caddytest/integration/caddyfile_adapt/log_filters.caddyfiletest
new file mode 100644
index 00000000..1b2fc2e5
--- /dev/null
+++ b/caddytest/integration/caddyfile_adapt/log_filters.caddyfiletest
@@ -0,0 +1,136 @@
+:80
+
+log {
+ output stdout
+ format filter {
+ wrap console
+
+ # long form, with "fields" wrapper
+ fields {
+ uri query {
+ replace foo REDACTED
+ delete bar
+ hash baz
+ }
+ }
+
+ # short form, flatter structure
+ request>headers>Authorization replace REDACTED
+ request>headers>Server delete
+ request>headers>Cookie cookie {
+ replace foo REDACTED
+ delete bar
+ hash baz
+ }
+ request>remote_ip ip_mask {
+ ipv4 24
+ ipv6 32
+ }
+ request>client_ip ip_mask 16 32
+ request>headers>Regexp regexp secret REDACTED
+ request>headers>Hash hash
+ }
+}
+----------
+{
+ "logging": {
+ "logs": {
+ "default": {
+ "exclude": [
+ "http.log.access.log0"
+ ]
+ },
+ "log0": {
+ "writer": {
+ "output": "stdout"
+ },
+ "encoder": {
+ "fields": {
+ "request\u003eclient_ip": {
+ "filter": "ip_mask",
+ "ipv4_cidr": 16,
+ "ipv6_cidr": 32
+ },
+ "request\u003eheaders\u003eAuthorization": {
+ "filter": "replace",
+ "value": "REDACTED"
+ },
+ "request\u003eheaders\u003eCookie": {
+ "actions": [
+ {
+ "name": "foo",
+ "type": "replace",
+ "value": "REDACTED"
+ },
+ {
+ "name": "bar",
+ "type": "delete"
+ },
+ {
+ "name": "baz",
+ "type": "hash"
+ }
+ ],
+ "filter": "cookie"
+ },
+ "request\u003eheaders\u003eHash": {
+ "filter": "hash"
+ },
+ "request\u003eheaders\u003eRegexp": {
+ "filter": "regexp",
+ "regexp": "secret",
+ "value": "REDACTED"
+ },
+ "request\u003eheaders\u003eServer": {
+ "filter": "delete"
+ },
+ "request\u003eremote_ip": {
+ "filter": "ip_mask",
+ "ipv4_cidr": 24,
+ "ipv6_cidr": 32
+ },
+ "uri": {
+ "actions": [
+ {
+ "parameter": "foo",
+ "type": "replace",
+ "value": "REDACTED"
+ },
+ {
+ "parameter": "bar",
+ "type": "delete"
+ },
+ {
+ "parameter": "baz",
+ "type": "hash"
+ }
+ ],
+ "filter": "query"
+ }
+ },
+ "format": "filter",
+ "wrap": {
+ "format": "console"
+ }
+ },
+ "include": [
+ "http.log.access.log0"
+ ]
+ }
+ }
+ },
+ "apps": {
+ "http": {
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":80"
+ ],
+ "logs": {
+ "default_logger_name": "log0"
+ }
+ }
+ }
+ }
+ }
+}
diff --git a/caddytest/integration/caddyfile_adapt/log_multi_logger_name.caddyfiletest b/caddytest/integration/caddyfile_adapt/log_multi_logger_name.caddyfiletest
new file mode 100644
index 00000000..be9ec188
--- /dev/null
+++ b/caddytest/integration/caddyfile_adapt/log_multi_logger_name.caddyfiletest
@@ -0,0 +1,117 @@
+(log-both) {
+ log {args[0]}-json {
+ hostnames {args[0]}
+ output file /var/log/{args[0]}.log
+ format json
+ }
+ log {args[0]}-console {
+ hostnames {args[0]}
+ output file /var/log/{args[0]}.json
+ format console
+ }
+}
+
+*.example.com {
+ # Subdomains log to multiple files at once, with
+ # different output files and formats.
+ import log-both foo.example.com
+ import log-both bar.example.com
+}
+----------
+{
+ "logging": {
+ "logs": {
+ "bar.example.com-console": {
+ "writer": {
+ "filename": "/var/log/bar.example.com.json",
+ "output": "file"
+ },
+ "encoder": {
+ "format": "console"
+ },
+ "include": [
+ "http.log.access.bar.example.com-console"
+ ]
+ },
+ "bar.example.com-json": {
+ "writer": {
+ "filename": "/var/log/bar.example.com.log",
+ "output": "file"
+ },
+ "encoder": {
+ "format": "json"
+ },
+ "include": [
+ "http.log.access.bar.example.com-json"
+ ]
+ },
+ "default": {
+ "exclude": [
+ "http.log.access.bar.example.com-console",
+ "http.log.access.bar.example.com-json",
+ "http.log.access.foo.example.com-console",
+ "http.log.access.foo.example.com-json"
+ ]
+ },
+ "foo.example.com-console": {
+ "writer": {
+ "filename": "/var/log/foo.example.com.json",
+ "output": "file"
+ },
+ "encoder": {
+ "format": "console"
+ },
+ "include": [
+ "http.log.access.foo.example.com-console"
+ ]
+ },
+ "foo.example.com-json": {
+ "writer": {
+ "filename": "/var/log/foo.example.com.log",
+ "output": "file"
+ },
+ "encoder": {
+ "format": "json"
+ },
+ "include": [
+ "http.log.access.foo.example.com-json"
+ ]
+ }
+ }
+ },
+ "apps": {
+ "http": {
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":443"
+ ],
+ "routes": [
+ {
+ "match": [
+ {
+ "host": [
+ "*.example.com"
+ ]
+ }
+ ],
+ "terminal": true
+ }
+ ],
+ "logs": {
+ "logger_names": {
+ "bar.example.com": [
+ "bar.example.com-json",
+ "bar.example.com-console"
+ ],
+ "foo.example.com": [
+ "foo.example.com-json",
+ "foo.example.com-console"
+ ]
+ }
+ }
+ }
+ }
+ }
+ }
+}
\ No newline at end of file
diff --git a/caddytest/integration/caddyfile_adapt/log_override_hostname.caddyfiletest b/caddytest/integration/caddyfile_adapt/log_override_hostname.caddyfiletest
new file mode 100644
index 00000000..b9213e65
--- /dev/null
+++ b/caddytest/integration/caddyfile_adapt/log_override_hostname.caddyfiletest
@@ -0,0 +1,117 @@
+*.example.com {
+ log {
+ hostnames foo.example.com bar.example.com
+ output file /foo-bar.txt
+ }
+ log {
+ hostnames baz.example.com
+ output file /baz.txt
+ }
+}
+
+example.com:8443 {
+ log {
+ output file /port.txt
+ }
+}
+----------
+{
+ "logging": {
+ "logs": {
+ "default": {
+ "exclude": [
+ "http.log.access.log0",
+ "http.log.access.log1",
+ "http.log.access.log2"
+ ]
+ },
+ "log0": {
+ "writer": {
+ "filename": "/foo-bar.txt",
+ "output": "file"
+ },
+ "include": [
+ "http.log.access.log0"
+ ]
+ },
+ "log1": {
+ "writer": {
+ "filename": "/baz.txt",
+ "output": "file"
+ },
+ "include": [
+ "http.log.access.log1"
+ ]
+ },
+ "log2": {
+ "writer": {
+ "filename": "/port.txt",
+ "output": "file"
+ },
+ "include": [
+ "http.log.access.log2"
+ ]
+ }
+ }
+ },
+ "apps": {
+ "http": {
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":443"
+ ],
+ "routes": [
+ {
+ "match": [
+ {
+ "host": [
+ "*.example.com"
+ ]
+ }
+ ],
+ "terminal": true
+ }
+ ],
+ "logs": {
+ "logger_names": {
+ "bar.example.com": [
+ "log0"
+ ],
+ "baz.example.com": [
+ "log1"
+ ],
+ "foo.example.com": [
+ "log0"
+ ]
+ }
+ }
+ },
+ "srv1": {
+ "listen": [
+ ":8443"
+ ],
+ "routes": [
+ {
+ "match": [
+ {
+ "host": [
+ "example.com"
+ ]
+ }
+ ],
+ "terminal": true
+ }
+ ],
+ "logs": {
+ "logger_names": {
+ "example.com": [
+ "log2"
+ ]
+ }
+ }
+ }
+ }
+ }
+ }
+}
\ No newline at end of file
diff --git a/caddytest/integration/caddyfile_adapt/log_override_name_multiaccess.caddyfiletest b/caddytest/integration/caddyfile_adapt/log_override_name_multiaccess.caddyfiletest
new file mode 100644
index 00000000..2708503e
--- /dev/null
+++ b/caddytest/integration/caddyfile_adapt/log_override_name_multiaccess.caddyfiletest
@@ -0,0 +1,88 @@
+{
+ log access-console {
+ include http.log.access.foo
+ output file access-localhost.log
+ format console
+ }
+
+ log access-json {
+ include http.log.access.foo
+ output file access-localhost.json
+ format json
+ }
+}
+
+http://localhost:8881 {
+ log foo
+}
+----------
+{
+ "logging": {
+ "logs": {
+ "access-console": {
+ "writer": {
+ "filename": "access-localhost.log",
+ "output": "file"
+ },
+ "encoder": {
+ "format": "console"
+ },
+ "include": [
+ "http.log.access.foo"
+ ]
+ },
+ "access-json": {
+ "writer": {
+ "filename": "access-localhost.json",
+ "output": "file"
+ },
+ "encoder": {
+ "format": "json"
+ },
+ "include": [
+ "http.log.access.foo"
+ ]
+ },
+ "default": {
+ "exclude": [
+ "http.log.access.foo"
+ ]
+ }
+ }
+ },
+ "apps": {
+ "http": {
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":8881"
+ ],
+ "routes": [
+ {
+ "match": [
+ {
+ "host": [
+ "localhost"
+ ]
+ }
+ ],
+ "terminal": true
+ }
+ ],
+ "automatic_https": {
+ "skip": [
+ "localhost"
+ ]
+ },
+ "logs": {
+ "logger_names": {
+ "localhost": [
+ "foo"
+ ]
+ }
+ }
+ }
+ }
+ }
+ }
+}
\ No newline at end of file
diff --git a/caddytest/integration/caddyfile_adapt/log_override_name_multiaccess_debug.caddyfiletest b/caddytest/integration/caddyfile_adapt/log_override_name_multiaccess_debug.caddyfiletest
new file mode 100644
index 00000000..7ea65978
--- /dev/null
+++ b/caddytest/integration/caddyfile_adapt/log_override_name_multiaccess_debug.caddyfiletest
@@ -0,0 +1,93 @@
+{
+ debug
+
+ log access-console {
+ include http.log.access.foo
+ output file access-localhost.log
+ format console
+ }
+
+ log access-json {
+ include http.log.access.foo
+ output file access-localhost.json
+ format json
+ }
+}
+
+http://localhost:8881 {
+ log foo
+}
+----------
+{
+ "logging": {
+ "logs": {
+ "access-console": {
+ "writer": {
+ "filename": "access-localhost.log",
+ "output": "file"
+ },
+ "encoder": {
+ "format": "console"
+ },
+ "level": "DEBUG",
+ "include": [
+ "http.log.access.foo"
+ ]
+ },
+ "access-json": {
+ "writer": {
+ "filename": "access-localhost.json",
+ "output": "file"
+ },
+ "encoder": {
+ "format": "json"
+ },
+ "level": "DEBUG",
+ "include": [
+ "http.log.access.foo"
+ ]
+ },
+ "default": {
+ "level": "DEBUG",
+ "exclude": [
+ "http.log.access.foo"
+ ]
+ }
+ }
+ },
+ "apps": {
+ "http": {
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":8881"
+ ],
+ "routes": [
+ {
+ "match": [
+ {
+ "host": [
+ "localhost"
+ ]
+ }
+ ],
+ "terminal": true
+ }
+ ],
+ "automatic_https": {
+ "skip": [
+ "localhost"
+ ]
+ },
+ "logs": {
+ "logger_names": {
+ "localhost": [
+ "foo"
+ ]
+ }
+ }
+ }
+ }
+ }
+ }
+}
\ No newline at end of file
diff --git a/caddytest/integration/caddyfile_adapt/log_roll_days.caddyfiletest b/caddytest/integration/caddyfile_adapt/log_roll_days.caddyfiletest
new file mode 100644
index 00000000..3ead4ac1
--- /dev/null
+++ b/caddytest/integration/caddyfile_adapt/log_roll_days.caddyfiletest
@@ -0,0 +1,51 @@
+:80
+
+log {
+ output file /var/log/access.log {
+ roll_size 1gb
+ roll_uncompressed
+ roll_local_time
+ roll_keep 5
+ roll_keep_for 90d
+ }
+}
+----------
+{
+ "logging": {
+ "logs": {
+ "default": {
+ "exclude": [
+ "http.log.access.log0"
+ ]
+ },
+ "log0": {
+ "writer": {
+ "filename": "/var/log/access.log",
+ "output": "file",
+ "roll_gzip": false,
+ "roll_keep": 5,
+ "roll_keep_days": 90,
+ "roll_local_time": true,
+ "roll_size_mb": 954
+ },
+ "include": [
+ "http.log.access.log0"
+ ]
+ }
+ }
+ },
+ "apps": {
+ "http": {
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":80"
+ ],
+ "logs": {
+ "default_logger_name": "log0"
+ }
+ }
+ }
+ }
+ }
+}
\ No newline at end of file
diff --git a/caddytest/integration/caddyfile_adapt/log_sampling.caddyfiletest b/caddytest/integration/caddyfile_adapt/log_sampling.caddyfiletest
new file mode 100644
index 00000000..b5862257
--- /dev/null
+++ b/caddytest/integration/caddyfile_adapt/log_sampling.caddyfiletest
@@ -0,0 +1,45 @@
+:80 {
+ log {
+ sampling {
+ interval 300
+ first 50
+ thereafter 40
+ }
+ }
+}
+----------
+{
+ "logging": {
+ "logs": {
+ "default": {
+ "exclude": [
+ "http.log.access.log0"
+ ]
+ },
+ "log0": {
+ "sampling": {
+ "interval": 300,
+ "first": 50,
+ "thereafter": 40
+ },
+ "include": [
+ "http.log.access.log0"
+ ]
+ }
+ }
+ },
+ "apps": {
+ "http": {
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":80"
+ ],
+ "logs": {
+ "default_logger_name": "log0"
+ }
+ }
+ }
+ }
+ }
+}
\ No newline at end of file
diff --git a/caddytest/integration/caddyfile_adapt/log_skip_hosts.caddyfiletest b/caddytest/integration/caddyfile_adapt/log_skip_hosts.caddyfiletest
new file mode 100644
index 00000000..c10610c2
--- /dev/null
+++ b/caddytest/integration/caddyfile_adapt/log_skip_hosts.caddyfiletest
@@ -0,0 +1,80 @@
+one.example.com {
+ log
+}
+
+two.example.com {
+}
+
+three.example.com {
+}
+
+example.com {
+}
+----------
+{
+ "apps": {
+ "http": {
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":443"
+ ],
+ "routes": [
+ {
+ "match": [
+ {
+ "host": [
+ "three.example.com"
+ ]
+ }
+ ],
+ "terminal": true
+ },
+ {
+ "match": [
+ {
+ "host": [
+ "one.example.com"
+ ]
+ }
+ ],
+ "terminal": true
+ },
+ {
+ "match": [
+ {
+ "host": [
+ "two.example.com"
+ ]
+ }
+ ],
+ "terminal": true
+ },
+ {
+ "match": [
+ {
+ "host": [
+ "example.com"
+ ]
+ }
+ ],
+ "terminal": true
+ }
+ ],
+ "logs": {
+ "logger_names": {
+ "one.example.com": [
+ ""
+ ]
+ },
+ "skip_hosts": [
+ "example.com",
+ "three.example.com",
+ "two.example.com"
+ ]
+ }
+ }
+ }
+ }
+ }
+}
diff --git a/caddytest/integration/caddyfile_adapt/map_and_vars_with_raw_types.caddyfiletest b/caddytest/integration/caddyfile_adapt/map_and_vars_with_raw_types.caddyfiletest
new file mode 100644
index 00000000..8b872635
--- /dev/null
+++ b/caddytest/integration/caddyfile_adapt/map_and_vars_with_raw_types.caddyfiletest
@@ -0,0 +1,127 @@
+example.com
+
+map {host} {my_placeholder} {magic_number} {
+ # Should output boolean "true" and an integer
+ example.com true 3
+
+ # Should output a string and null
+ foo.example.com "string value"
+
+ # Should output two strings (quoted int)
+ (.*)\.example.com "${1} subdomain" "5"
+
+ # Should output null and a string (quoted int)
+ ~.*\.net$ - `7`
+
+ # Should output a float and the string "false"
+ ~.*\.xyz$ 123.456 "false"
+
+ # Should output two strings, second being escaped quote
+ default "unknown domain" \"""
+}
+
+vars foo bar
+vars {
+ abc true
+ def 1
+ ghi 2.3
+ jkl "mn op"
+}
+
+----------
+{
+ "apps": {
+ "http": {
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":443"
+ ],
+ "routes": [
+ {
+ "match": [
+ {
+ "host": [
+ "example.com"
+ ]
+ }
+ ],
+ "handle": [
+ {
+ "handler": "subroute",
+ "routes": [
+ {
+ "handle": [
+ {
+ "defaults": [
+ "unknown domain",
+ "\""
+ ],
+ "destinations": [
+ "{my_placeholder}",
+ "{magic_number}"
+ ],
+ "handler": "map",
+ "mappings": [
+ {
+ "input": "example.com",
+ "outputs": [
+ true,
+ 3
+ ]
+ },
+ {
+ "input": "foo.example.com",
+ "outputs": [
+ "string value",
+ null
+ ]
+ },
+ {
+ "input": "(.*)\\.example.com",
+ "outputs": [
+ "${1} subdomain",
+ "5"
+ ]
+ },
+ {
+ "input_regexp": ".*\\.net$",
+ "outputs": [
+ null,
+ "7"
+ ]
+ },
+ {
+ "input_regexp": ".*\\.xyz$",
+ "outputs": [
+ 123.456,
+ "false"
+ ]
+ }
+ ],
+ "source": "{http.request.host}"
+ },
+ {
+ "abc": true,
+ "def": 1,
+ "ghi": 2.3,
+ "handler": "vars",
+ "jkl": "mn op"
+ },
+ {
+ "foo": "bar",
+ "handler": "vars"
+ }
+ ]
+ }
+ ]
+ }
+ ],
+ "terminal": true
+ }
+ ]
+ }
+ }
+ }
+ }
+}
\ No newline at end of file
diff --git a/caddytest/integration/caddyfile_adapt/matcher_syntax.caddyfiletest b/caddytest/integration/caddyfile_adapt/matcher_syntax.caddyfiletest
new file mode 100644
index 00000000..efb66cf2
--- /dev/null
+++ b/caddytest/integration/caddyfile_adapt/matcher_syntax.caddyfiletest
@@ -0,0 +1,336 @@
+:80 {
+ @matcher {
+ method GET
+ }
+ respond @matcher "get"
+
+ @matcher2 method POST
+ respond @matcher2 "post"
+
+ @matcher3 not method PUT
+ respond @matcher3 "not put"
+
+ @matcher4 vars "{http.request.uri}" "/vars-matcher"
+ respond @matcher4 "from vars matcher"
+
+ @matcher5 vars_regexp static "{http.request.uri}" `\.([a-f0-9]{6})\.(css|js)$`
+ respond @matcher5 "from vars_regexp matcher with name"
+
+ @matcher6 vars_regexp "{http.request.uri}" `\.([a-f0-9]{6})\.(css|js)$`
+ respond @matcher6 "from vars_regexp matcher without name"
+
+ @matcher7 `path('/foo*') && method('GET')`
+ respond @matcher7 "inline expression matcher shortcut"
+
+ @matcher8 {
+ header Foo bar
+ header Foo foobar
+ header Bar foo
+ }
+ respond @matcher8 "header matcher merging values of the same field"
+
+ @matcher9 {
+ query foo=bar foo=baz bar=foo
+ query bar=baz
+ }
+ respond @matcher9 "query matcher merging pairs with the same keys"
+
+ @matcher10 {
+ header !Foo
+ header Bar foo
+ }
+ respond @matcher10 "header matcher with null field matcher"
+
+ @matcher11 remote_ip private_ranges
+ respond @matcher11 "remote_ip matcher with private ranges"
+
+ @matcher12 client_ip private_ranges
+ respond @matcher12 "client_ip matcher with private ranges"
+
+ @matcher13 {
+ remote_ip 1.1.1.1
+ remote_ip 2.2.2.2
+ }
+ respond @matcher13 "remote_ip merged"
+
+ @matcher14 {
+ client_ip 1.1.1.1
+ client_ip 2.2.2.2
+ }
+ respond @matcher14 "client_ip merged"
+}
+----------
+{
+ "apps": {
+ "http": {
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":80"
+ ],
+ "routes": [
+ {
+ "match": [
+ {
+ "method": [
+ "GET"
+ ]
+ }
+ ],
+ "handle": [
+ {
+ "body": "get",
+ "handler": "static_response"
+ }
+ ]
+ },
+ {
+ "match": [
+ {
+ "method": [
+ "POST"
+ ]
+ }
+ ],
+ "handle": [
+ {
+ "body": "post",
+ "handler": "static_response"
+ }
+ ]
+ },
+ {
+ "match": [
+ {
+ "not": [
+ {
+ "method": [
+ "PUT"
+ ]
+ }
+ ]
+ }
+ ],
+ "handle": [
+ {
+ "body": "not put",
+ "handler": "static_response"
+ }
+ ]
+ },
+ {
+ "match": [
+ {
+ "vars": {
+ "{http.request.uri}": [
+ "/vars-matcher"
+ ]
+ }
+ }
+ ],
+ "handle": [
+ {
+ "body": "from vars matcher",
+ "handler": "static_response"
+ }
+ ]
+ },
+ {
+ "match": [
+ {
+ "vars_regexp": {
+ "{http.request.uri}": {
+ "name": "static",
+ "pattern": "\\.([a-f0-9]{6})\\.(css|js)$"
+ }
+ }
+ }
+ ],
+ "handle": [
+ {
+ "body": "from vars_regexp matcher with name",
+ "handler": "static_response"
+ }
+ ]
+ },
+ {
+ "match": [
+ {
+ "vars_regexp": {
+ "{http.request.uri}": {
+ "name": "matcher6",
+ "pattern": "\\.([a-f0-9]{6})\\.(css|js)$"
+ }
+ }
+ }
+ ],
+ "handle": [
+ {
+ "body": "from vars_regexp matcher without name",
+ "handler": "static_response"
+ }
+ ]
+ },
+ {
+ "match": [
+ {
+ "expression": {
+ "expr": "path('/foo*') \u0026\u0026 method('GET')",
+ "name": "matcher7"
+ }
+ }
+ ],
+ "handle": [
+ {
+ "body": "inline expression matcher shortcut",
+ "handler": "static_response"
+ }
+ ]
+ },
+ {
+ "match": [
+ {
+ "header": {
+ "Bar": [
+ "foo"
+ ],
+ "Foo": [
+ "bar",
+ "foobar"
+ ]
+ }
+ }
+ ],
+ "handle": [
+ {
+ "body": "header matcher merging values of the same field",
+ "handler": "static_response"
+ }
+ ]
+ },
+ {
+ "match": [
+ {
+ "query": {
+ "bar": [
+ "foo",
+ "baz"
+ ],
+ "foo": [
+ "bar",
+ "baz"
+ ]
+ }
+ }
+ ],
+ "handle": [
+ {
+ "body": "query matcher merging pairs with the same keys",
+ "handler": "static_response"
+ }
+ ]
+ },
+ {
+ "match": [
+ {
+ "header": {
+ "Bar": [
+ "foo"
+ ],
+ "Foo": null
+ }
+ }
+ ],
+ "handle": [
+ {
+ "body": "header matcher with null field matcher",
+ "handler": "static_response"
+ }
+ ]
+ },
+ {
+ "match": [
+ {
+ "remote_ip": {
+ "ranges": [
+ "192.168.0.0/16",
+ "172.16.0.0/12",
+ "10.0.0.0/8",
+ "127.0.0.1/8",
+ "fd00::/8",
+ "::1"
+ ]
+ }
+ }
+ ],
+ "handle": [
+ {
+ "body": "remote_ip matcher with private ranges",
+ "handler": "static_response"
+ }
+ ]
+ },
+ {
+ "match": [
+ {
+ "client_ip": {
+ "ranges": [
+ "192.168.0.0/16",
+ "172.16.0.0/12",
+ "10.0.0.0/8",
+ "127.0.0.1/8",
+ "fd00::/8",
+ "::1"
+ ]
+ }
+ }
+ ],
+ "handle": [
+ {
+ "body": "client_ip matcher with private ranges",
+ "handler": "static_response"
+ }
+ ]
+ },
+ {
+ "match": [
+ {
+ "remote_ip": {
+ "ranges": [
+ "1.1.1.1",
+ "2.2.2.2"
+ ]
+ }
+ }
+ ],
+ "handle": [
+ {
+ "body": "remote_ip merged",
+ "handler": "static_response"
+ }
+ ]
+ },
+ {
+ "match": [
+ {
+ "client_ip": {
+ "ranges": [
+ "1.1.1.1",
+ "2.2.2.2"
+ ]
+ }
+ }
+ ],
+ "handle": [
+ {
+ "body": "client_ip merged",
+ "handler": "static_response"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+}
\ No newline at end of file
diff --git a/caddytest/integration/caddyfile_adapt/matchers_in_route.caddyfiletest b/caddytest/integration/caddyfile_adapt/matchers_in_route.caddyfiletest
new file mode 100644
index 00000000..8c587b59
--- /dev/null
+++ b/caddytest/integration/caddyfile_adapt/matchers_in_route.caddyfiletest
@@ -0,0 +1,31 @@
+:80 {
+ route {
+ # unused matchers should not panic
+ # see https://github.com/caddyserver/caddy/issues/3745
+ @matcher1 path /path1
+ @matcher2 path /path2
+ }
+}
+----------
+{
+ "apps": {
+ "http": {
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":80"
+ ],
+ "routes": [
+ {
+ "handle": [
+ {
+ "handler": "subroute"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+}
\ No newline at end of file
diff --git a/caddytest/integration/caddyfile_adapt/method_directive.caddyfiletest b/caddytest/integration/caddyfile_adapt/method_directive.caddyfiletest
new file mode 100644
index 00000000..786df90c
--- /dev/null
+++ b/caddytest/integration/caddyfile_adapt/method_directive.caddyfiletest
@@ -0,0 +1,27 @@
+:8080 {
+ method FOO
+}
+----------
+{
+ "apps": {
+ "http": {
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":8080"
+ ],
+ "routes": [
+ {
+ "handle": [
+ {
+ "handler": "rewrite",
+ "method": "FOO"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+}
\ No newline at end of file
diff --git a/caddytest/integration/caddyfile_adapt/metrics_disable_om.caddyfiletest b/caddytest/integration/caddyfile_adapt/metrics_disable_om.caddyfiletest
new file mode 100644
index 00000000..2d7b24f4
--- /dev/null
+++ b/caddytest/integration/caddyfile_adapt/metrics_disable_om.caddyfiletest
@@ -0,0 +1,36 @@
+:80 {
+ metrics /metrics {
+ disable_openmetrics
+ }
+}
+----------
+{
+ "apps": {
+ "http": {
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":80"
+ ],
+ "routes": [
+ {
+ "match": [
+ {
+ "path": [
+ "/metrics"
+ ]
+ }
+ ],
+ "handle": [
+ {
+ "disable_openmetrics": true,
+ "handler": "metrics"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+}
diff --git a/caddytest/integration/caddyfile_adapt/metrics_merge_options.caddyfiletest b/caddytest/integration/caddyfile_adapt/metrics_merge_options.caddyfiletest
new file mode 100644
index 00000000..946b3d0c
--- /dev/null
+++ b/caddytest/integration/caddyfile_adapt/metrics_merge_options.caddyfiletest
@@ -0,0 +1,39 @@
+{
+ metrics
+ servers :80 {
+ metrics {
+ per_host
+ }
+ }
+}
+:80 {
+ respond "Hello"
+}
+
+----------
+{
+ "apps": {
+ "http": {
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":80"
+ ],
+ "routes": [
+ {
+ "handle": [
+ {
+ "body": "Hello",
+ "handler": "static_response"
+ }
+ ]
+ }
+ ]
+ }
+ },
+ "metrics": {
+ "per_host": true
+ }
+ }
+ }
+}
\ No newline at end of file
diff --git a/caddytest/integration/caddyfile_adapt/metrics_perhost.caddyfiletest b/caddytest/integration/caddyfile_adapt/metrics_perhost.caddyfiletest
new file mode 100644
index 00000000..e362cecc
--- /dev/null
+++ b/caddytest/integration/caddyfile_adapt/metrics_perhost.caddyfiletest
@@ -0,0 +1,37 @@
+{
+ servers :80 {
+ metrics {
+ per_host
+ }
+ }
+}
+:80 {
+ respond "Hello"
+}
+----------
+{
+ "apps": {
+ "http": {
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":80"
+ ],
+ "routes": [
+ {
+ "handle": [
+ {
+ "body": "Hello",
+ "handler": "static_response"
+ }
+ ]
+ }
+ ]
+ }
+ },
+ "metrics": {
+ "per_host": true
+ }
+ }
+ }
+}
diff --git a/caddytest/integration/caddyfile_adapt/metrics_syntax.caddyfiletest b/caddytest/integration/caddyfile_adapt/metrics_syntax.caddyfiletest
new file mode 100644
index 00000000..ca08cea2
--- /dev/null
+++ b/caddytest/integration/caddyfile_adapt/metrics_syntax.caddyfiletest
@@ -0,0 +1,33 @@
+:80 {
+ metrics /metrics
+}
+----------
+{
+ "apps": {
+ "http": {
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":80"
+ ],
+ "routes": [
+ {
+ "match": [
+ {
+ "path": [
+ "/metrics"
+ ]
+ }
+ ],
+ "handle": [
+ {
+ "handler": "metrics"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+}
diff --git a/caddytest/integration/caddyfile_adapt/not_block_merging.caddyfiletest b/caddytest/integration/caddyfile_adapt/not_block_merging.caddyfiletest
new file mode 100644
index 00000000..c41a7496
--- /dev/null
+++ b/caddytest/integration/caddyfile_adapt/not_block_merging.caddyfiletest
@@ -0,0 +1,49 @@
+:80
+
+@test {
+ not {
+ header Abc "123"
+ header Bcd "123"
+ }
+}
+respond @test 403
+----------
+{
+ "apps": {
+ "http": {
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":80"
+ ],
+ "routes": [
+ {
+ "match": [
+ {
+ "not": [
+ {
+ "header": {
+ "Abc": [
+ "123"
+ ],
+ "Bcd": [
+ "123"
+ ]
+ }
+ }
+ ]
+ }
+ ],
+ "handle": [
+ {
+ "handler": "static_response",
+ "status_code": 403
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+}
\ No newline at end of file
diff --git a/caddytest/integration/caddyfile_adapt/php_fastcgi_expanded_form.caddyfiletest b/caddytest/integration/caddyfile_adapt/php_fastcgi_expanded_form.caddyfiletest
new file mode 100644
index 00000000..df2e2488
--- /dev/null
+++ b/caddytest/integration/caddyfile_adapt/php_fastcgi_expanded_form.caddyfiletest
@@ -0,0 +1,133 @@
+:8886
+
+route {
+ # Add trailing slash for directory requests
+ @canonicalPath {
+ file {
+ try_files {path}/index.php
+ }
+ not path */
+ }
+ redir @canonicalPath {orig_path}/{orig_?query} 308
+
+ # If the requested file does not exist, try index files
+ @indexFiles {
+ file {
+ try_files {path} {path}/index.php index.php
+ split_path .php
+ }
+ }
+ rewrite @indexFiles {file_match.relative}
+
+ # Proxy PHP files to the FastCGI responder
+ @phpFiles {
+ path *.php
+ }
+ reverse_proxy @phpFiles 127.0.0.1:9000 {
+ transport fastcgi {
+ split .php
+ }
+ }
+}
+----------
+{
+ "apps": {
+ "http": {
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":8886"
+ ],
+ "routes": [
+ {
+ "handle": [
+ {
+ "handler": "subroute",
+ "routes": [
+ {
+ "handle": [
+ {
+ "handler": "static_response",
+ "headers": {
+ "Location": [
+ "{http.request.orig_uri.path}/{http.request.orig_uri.prefixed_query}"
+ ]
+ },
+ "status_code": 308
+ }
+ ],
+ "match": [
+ {
+ "file": {
+ "try_files": [
+ "{http.request.uri.path}/index.php"
+ ]
+ },
+ "not": [
+ {
+ "path": [
+ "*/"
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "group": "group0",
+ "handle": [
+ {
+ "handler": "rewrite",
+ "uri": "{http.matchers.file.relative}"
+ }
+ ],
+ "match": [
+ {
+ "file": {
+ "split_path": [
+ ".php"
+ ],
+ "try_files": [
+ "{http.request.uri.path}",
+ "{http.request.uri.path}/index.php",
+ "index.php"
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "handle": [
+ {
+ "handler": "reverse_proxy",
+ "transport": {
+ "protocol": "fastcgi",
+ "split_path": [
+ ".php"
+ ]
+ },
+ "upstreams": [
+ {
+ "dial": "127.0.0.1:9000"
+ }
+ ]
+ }
+ ],
+ "match": [
+ {
+ "path": [
+ "*.php"
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+}
diff --git a/caddytest/integration/caddyfile_adapt/php_fastcgi_handle_response.caddyfiletest b/caddytest/integration/caddyfile_adapt/php_fastcgi_handle_response.caddyfiletest
new file mode 100644
index 00000000..3a857654
--- /dev/null
+++ b/caddytest/integration/caddyfile_adapt/php_fastcgi_handle_response.caddyfiletest
@@ -0,0 +1,146 @@
+:8881 {
+ php_fastcgi app:9000 {
+ env FOO bar
+
+ @error status 4xx
+ handle_response @error {
+ root * /errors
+ rewrite * /{http.reverse_proxy.status_code}.html
+ file_server
+ }
+ }
+}
+----------
+{
+ "apps": {
+ "http": {
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":8881"
+ ],
+ "routes": [
+ {
+ "match": [
+ {
+ "file": {
+ "try_files": [
+ "{http.request.uri.path}/index.php"
+ ]
+ },
+ "not": [
+ {
+ "path": [
+ "*/"
+ ]
+ }
+ ]
+ }
+ ],
+ "handle": [
+ {
+ "handler": "static_response",
+ "headers": {
+ "Location": [
+ "{http.request.orig_uri.path}/{http.request.orig_uri.prefixed_query}"
+ ]
+ },
+ "status_code": 308
+ }
+ ]
+ },
+ {
+ "match": [
+ {
+ "file": {
+ "try_files": [
+ "{http.request.uri.path}",
+ "{http.request.uri.path}/index.php",
+ "index.php"
+ ],
+ "try_policy": "first_exist_fallback",
+ "split_path": [
+ ".php"
+ ]
+ }
+ }
+ ],
+ "handle": [
+ {
+ "handler": "rewrite",
+ "uri": "{http.matchers.file.relative}"
+ }
+ ]
+ },
+ {
+ "match": [
+ {
+ "path": [
+ "*.php"
+ ]
+ }
+ ],
+ "handle": [
+ {
+ "handle_response": [
+ {
+ "match": {
+ "status_code": [
+ 4
+ ]
+ },
+ "routes": [
+ {
+ "handle": [
+ {
+ "handler": "vars",
+ "root": "/errors"
+ }
+ ]
+ },
+ {
+ "group": "group0",
+ "handle": [
+ {
+ "handler": "rewrite",
+ "uri": "/{http.reverse_proxy.status_code}.html"
+ }
+ ]
+ },
+ {
+ "handle": [
+ {
+ "handler": "file_server",
+ "hide": [
+ "./Caddyfile"
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ],
+ "handler": "reverse_proxy",
+ "transport": {
+ "env": {
+ "FOO": "bar"
+ },
+ "protocol": "fastcgi",
+ "split_path": [
+ ".php"
+ ]
+ },
+ "upstreams": [
+ {
+ "dial": "app:9000"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+}
\ No newline at end of file
diff --git a/caddytest/integration/caddyfile_adapt/php_fastcgi_index_off.caddyfiletest b/caddytest/integration/caddyfile_adapt/php_fastcgi_index_off.caddyfiletest
new file mode 100644
index 00000000..5ebdbd2e
--- /dev/null
+++ b/caddytest/integration/caddyfile_adapt/php_fastcgi_index_off.caddyfiletest
@@ -0,0 +1,72 @@
+:8884
+
+php_fastcgi localhost:9000 {
+ # some php_fastcgi-specific subdirectives
+ split .php .php5
+ env VAR1 value1
+ env VAR2 value2
+ root /var/www
+ index off
+ dial_timeout 3s
+ read_timeout 10s
+ write_timeout 20s
+
+ # passed through to reverse_proxy (directive order doesn't matter!)
+ lb_policy random
+}
+----------
+{
+ "apps": {
+ "http": {
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":8884"
+ ],
+ "routes": [
+ {
+ "match": [
+ {
+ "path": [
+ "*.php",
+ "*.php5"
+ ]
+ }
+ ],
+ "handle": [
+ {
+ "handler": "reverse_proxy",
+ "load_balancing": {
+ "selection_policy": {
+ "policy": "random"
+ }
+ },
+ "transport": {
+ "dial_timeout": 3000000000,
+ "env": {
+ "VAR1": "value1",
+ "VAR2": "value2"
+ },
+ "protocol": "fastcgi",
+ "read_timeout": 10000000000,
+ "root": "/var/www",
+ "split_path": [
+ ".php",
+ ".php5"
+ ],
+ "write_timeout": 20000000000
+ },
+ "upstreams": [
+ {
+ "dial": "localhost:9000"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+}
diff --git a/caddytest/integration/caddyfile_adapt/php_fastcgi_matcher.caddyfiletest b/caddytest/integration/caddyfile_adapt/php_fastcgi_matcher.caddyfiletest
new file mode 100644
index 00000000..4d1298fc
--- /dev/null
+++ b/caddytest/integration/caddyfile_adapt/php_fastcgi_matcher.caddyfiletest
@@ -0,0 +1,128 @@
+:8884
+
+# the use of a host matcher here should cause this
+# site block to be wrapped in a subroute, even though
+# the site block does not have a hostname; this is
+# to prevent auto-HTTPS from picking up on this host
+# matcher because it is not a key on the site block
+@test host example.com
+php_fastcgi @test localhost:9000
+----------
+{
+ "apps": {
+ "http": {
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":8884"
+ ],
+ "routes": [
+ {
+ "handle": [
+ {
+ "handler": "subroute",
+ "routes": [
+ {
+ "handle": [
+ {
+ "handler": "subroute",
+ "routes": [
+ {
+ "handle": [
+ {
+ "handler": "static_response",
+ "headers": {
+ "Location": [
+ "{http.request.orig_uri.path}/{http.request.orig_uri.prefixed_query}"
+ ]
+ },
+ "status_code": 308
+ }
+ ],
+ "match": [
+ {
+ "file": {
+ "try_files": [
+ "{http.request.uri.path}/index.php"
+ ]
+ },
+ "not": [
+ {
+ "path": [
+ "*/"
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "handle": [
+ {
+ "handler": "rewrite",
+ "uri": "{http.matchers.file.relative}"
+ }
+ ],
+ "match": [
+ {
+ "file": {
+ "split_path": [
+ ".php"
+ ],
+ "try_files": [
+ "{http.request.uri.path}",
+ "{http.request.uri.path}/index.php",
+ "index.php"
+ ],
+ "try_policy": "first_exist_fallback"
+ }
+ }
+ ]
+ },
+ {
+ "handle": [
+ {
+ "handler": "reverse_proxy",
+ "transport": {
+ "protocol": "fastcgi",
+ "split_path": [
+ ".php"
+ ]
+ },
+ "upstreams": [
+ {
+ "dial": "localhost:9000"
+ }
+ ]
+ }
+ ],
+ "match": [
+ {
+ "path": [
+ "*.php"
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ],
+ "match": [
+ {
+ "host": [
+ "example.com"
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ],
+ "terminal": true
+ }
+ ]
+ }
+ }
+ }
+ }
+}
\ No newline at end of file
diff --git a/caddytest/integration/caddyfile_adapt/php_fastcgi_subdirectives.caddyfiletest b/caddytest/integration/caddyfile_adapt/php_fastcgi_subdirectives.caddyfiletest
new file mode 100644
index 00000000..9a9ab5ab
--- /dev/null
+++ b/caddytest/integration/caddyfile_adapt/php_fastcgi_subdirectives.caddyfiletest
@@ -0,0 +1,119 @@
+:8884
+
+php_fastcgi localhost:9000 {
+ # some php_fastcgi-specific subdirectives
+ split .php .php5
+ env VAR1 value1
+ env VAR2 value2
+ root /var/www
+ index index.php5
+
+ # passed through to reverse_proxy (directive order doesn't matter!)
+ lb_policy random
+}
+----------
+{
+ "apps": {
+ "http": {
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":8884"
+ ],
+ "routes": [
+ {
+ "match": [
+ {
+ "file": {
+ "try_files": [
+ "{http.request.uri.path}/index.php5"
+ ]
+ },
+ "not": [
+ {
+ "path": [
+ "*/"
+ ]
+ }
+ ]
+ }
+ ],
+ "handle": [
+ {
+ "handler": "static_response",
+ "headers": {
+ "Location": [
+ "{http.request.orig_uri.path}/{http.request.orig_uri.prefixed_query}"
+ ]
+ },
+ "status_code": 308
+ }
+ ]
+ },
+ {
+ "match": [
+ {
+ "file": {
+ "try_files": [
+ "{http.request.uri.path}",
+ "{http.request.uri.path}/index.php5",
+ "index.php5"
+ ],
+ "try_policy": "first_exist_fallback",
+ "split_path": [
+ ".php",
+ ".php5"
+ ]
+ }
+ }
+ ],
+ "handle": [
+ {
+ "handler": "rewrite",
+ "uri": "{http.matchers.file.relative}"
+ }
+ ]
+ },
+ {
+ "match": [
+ {
+ "path": [
+ "*.php",
+ "*.php5"
+ ]
+ }
+ ],
+ "handle": [
+ {
+ "handler": "reverse_proxy",
+ "load_balancing": {
+ "selection_policy": {
+ "policy": "random"
+ }
+ },
+ "transport": {
+ "env": {
+ "VAR1": "value1",
+ "VAR2": "value2"
+ },
+ "protocol": "fastcgi",
+ "root": "/var/www",
+ "split_path": [
+ ".php",
+ ".php5"
+ ]
+ },
+ "upstreams": [
+ {
+ "dial": "localhost:9000"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+}
\ No newline at end of file
diff --git a/caddytest/integration/caddyfile_adapt/php_fastcgi_try_files_override.caddyfiletest b/caddytest/integration/caddyfile_adapt/php_fastcgi_try_files_override.caddyfiletest
new file mode 100644
index 00000000..75487a93
--- /dev/null
+++ b/caddytest/integration/caddyfile_adapt/php_fastcgi_try_files_override.caddyfiletest
@@ -0,0 +1,124 @@
+:8884
+
+php_fastcgi localhost:9000 {
+ # some php_fastcgi-specific subdirectives
+ split .php .php5
+ env VAR1 value1
+ env VAR2 value2
+ root /var/www
+ try_files {path} {path}/index.php =404
+ dial_timeout 3s
+ read_timeout 10s
+ write_timeout 20s
+
+ # passed through to reverse_proxy (directive order doesn't matter!)
+ lb_policy random
+}
+----------
+{
+ "apps": {
+ "http": {
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":8884"
+ ],
+ "routes": [
+ {
+ "match": [
+ {
+ "file": {
+ "try_files": [
+ "{http.request.uri.path}/index.php"
+ ]
+ },
+ "not": [
+ {
+ "path": [
+ "*/"
+ ]
+ }
+ ]
+ }
+ ],
+ "handle": [
+ {
+ "handler": "static_response",
+ "headers": {
+ "Location": [
+ "{http.request.orig_uri.path}/{http.request.orig_uri.prefixed_query}"
+ ]
+ },
+ "status_code": 308
+ }
+ ]
+ },
+ {
+ "match": [
+ {
+ "file": {
+ "try_files": [
+ "{http.request.uri.path}",
+ "{http.request.uri.path}/index.php",
+ "=404"
+ ],
+ "split_path": [
+ ".php",
+ ".php5"
+ ]
+ }
+ }
+ ],
+ "handle": [
+ {
+ "handler": "rewrite",
+ "uri": "{http.matchers.file.relative}"
+ }
+ ]
+ },
+ {
+ "match": [
+ {
+ "path": [
+ "*.php",
+ "*.php5"
+ ]
+ }
+ ],
+ "handle": [
+ {
+ "handler": "reverse_proxy",
+ "load_balancing": {
+ "selection_policy": {
+ "policy": "random"
+ }
+ },
+ "transport": {
+ "dial_timeout": 3000000000,
+ "env": {
+ "VAR1": "value1",
+ "VAR2": "value2"
+ },
+ "protocol": "fastcgi",
+ "read_timeout": 10000000000,
+ "root": "/var/www",
+ "split_path": [
+ ".php",
+ ".php5"
+ ],
+ "write_timeout": 20000000000
+ },
+ "upstreams": [
+ {
+ "dial": "localhost:9000"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+}
diff --git a/caddytest/integration/caddyfile_adapt/php_fastcgi_try_files_override_no_dir_index.caddyfiletest b/caddytest/integration/caddyfile_adapt/php_fastcgi_try_files_override_no_dir_index.caddyfiletest
new file mode 100644
index 00000000..203ab3b6
--- /dev/null
+++ b/caddytest/integration/caddyfile_adapt/php_fastcgi_try_files_override_no_dir_index.caddyfiletest
@@ -0,0 +1,95 @@
+:8884
+
+php_fastcgi localhost:9000 {
+ # some php_fastcgi-specific subdirectives
+ split .php .php5
+ env VAR1 value1
+ env VAR2 value2
+ root /var/www
+ try_files {path} index.php
+ dial_timeout 3s
+ read_timeout 10s
+ write_timeout 20s
+
+ # passed through to reverse_proxy (directive order doesn't matter!)
+ lb_policy random
+}
+----------
+{
+ "apps": {
+ "http": {
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":8884"
+ ],
+ "routes": [
+ {
+ "match": [
+ {
+ "file": {
+ "try_files": [
+ "{http.request.uri.path}",
+ "index.php"
+ ],
+ "try_policy": "first_exist_fallback",
+ "split_path": [
+ ".php",
+ ".php5"
+ ]
+ }
+ }
+ ],
+ "handle": [
+ {
+ "handler": "rewrite",
+ "uri": "{http.matchers.file.relative}"
+ }
+ ]
+ },
+ {
+ "match": [
+ {
+ "path": [
+ "*.php",
+ "*.php5"
+ ]
+ }
+ ],
+ "handle": [
+ {
+ "handler": "reverse_proxy",
+ "load_balancing": {
+ "selection_policy": {
+ "policy": "random"
+ }
+ },
+ "transport": {
+ "dial_timeout": 3000000000,
+ "env": {
+ "VAR1": "value1",
+ "VAR2": "value2"
+ },
+ "protocol": "fastcgi",
+ "read_timeout": 10000000000,
+ "root": "/var/www",
+ "split_path": [
+ ".php",
+ ".php5"
+ ],
+ "write_timeout": 20000000000
+ },
+ "upstreams": [
+ {
+ "dial": "localhost:9000"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+}
diff --git a/caddytest/integration/caddyfile_adapt/portless_upstream.caddyfiletest b/caddytest/integration/caddyfile_adapt/portless_upstream.caddyfiletest
new file mode 100644
index 00000000..0e060ddf
--- /dev/null
+++ b/caddytest/integration/caddyfile_adapt/portless_upstream.caddyfiletest
@@ -0,0 +1,113 @@
+whoami.example.com {
+ reverse_proxy whoami
+}
+
+app.example.com {
+ reverse_proxy app:80
+}
+unix.example.com {
+ reverse_proxy unix//path/to/socket
+}
+----------
+{
+ "apps": {
+ "http": {
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":443"
+ ],
+ "routes": [
+ {
+ "match": [
+ {
+ "host": [
+ "whoami.example.com"
+ ]
+ }
+ ],
+ "handle": [
+ {
+ "handler": "subroute",
+ "routes": [
+ {
+ "handle": [
+ {
+ "handler": "reverse_proxy",
+ "upstreams": [
+ {
+ "dial": "whoami:80"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ],
+ "terminal": true
+ },
+ {
+ "match": [
+ {
+ "host": [
+ "unix.example.com"
+ ]
+ }
+ ],
+ "handle": [
+ {
+ "handler": "subroute",
+ "routes": [
+ {
+ "handle": [
+ {
+ "handler": "reverse_proxy",
+ "upstreams": [
+ {
+ "dial": "unix//path/to/socket"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ],
+ "terminal": true
+ },
+ {
+ "match": [
+ {
+ "host": [
+ "app.example.com"
+ ]
+ }
+ ],
+ "handle": [
+ {
+ "handler": "subroute",
+ "routes": [
+ {
+ "handle": [
+ {
+ "handler": "reverse_proxy",
+ "upstreams": [
+ {
+ "dial": "app:80"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ],
+ "terminal": true
+ }
+ ]
+ }
+ }
+ }
+ }
+}
diff --git a/caddytest/integration/caddyfile_adapt/push.caddyfiletest b/caddytest/integration/caddyfile_adapt/push.caddyfiletest
new file mode 100644
index 00000000..1fe344e0
--- /dev/null
+++ b/caddytest/integration/caddyfile_adapt/push.caddyfiletest
@@ -0,0 +1,78 @@
+:80
+
+push * /foo.txt
+
+push {
+ GET /foo.txt
+}
+
+push {
+ GET /foo.txt
+ HEAD /foo.txt
+}
+
+push {
+ headers {
+ Foo bar
+ }
+}
+----------
+{
+ "apps": {
+ "http": {
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":80"
+ ],
+ "routes": [
+ {
+ "handle": [
+ {
+ "handler": "push",
+ "resources": [
+ {
+ "target": "/foo.txt"
+ }
+ ]
+ },
+ {
+ "handler": "push",
+ "resources": [
+ {
+ "method": "GET",
+ "target": "/foo.txt"
+ }
+ ]
+ },
+ {
+ "handler": "push",
+ "resources": [
+ {
+ "method": "GET",
+ "target": "/foo.txt"
+ },
+ {
+ "method": "HEAD",
+ "target": "/foo.txt"
+ }
+ ]
+ },
+ {
+ "handler": "push",
+ "headers": {
+ "set": {
+ "Foo": [
+ "bar"
+ ]
+ }
+ }
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+}
\ No newline at end of file
diff --git a/caddytest/integration/caddyfile_adapt/replaceable_upstream.caddyfiletest b/caddytest/integration/caddyfile_adapt/replaceable_upstream.caddyfiletest
new file mode 100644
index 00000000..202e3304
--- /dev/null
+++ b/caddytest/integration/caddyfile_adapt/replaceable_upstream.caddyfiletest
@@ -0,0 +1,100 @@
+*.sandbox.localhost {
+ @sandboxPort {
+ header_regexp first_label Host ^([0-9]{3})\.sandbox\.
+ }
+ handle @sandboxPort {
+ reverse_proxy {re.first_label.1}
+ }
+ handle {
+ redir {scheme}://application.localhost
+ }
+}
+
+----------
+{
+ "apps": {
+ "http": {
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":443"
+ ],
+ "routes": [
+ {
+ "match": [
+ {
+ "host": [
+ "*.sandbox.localhost"
+ ]
+ }
+ ],
+ "handle": [
+ {
+ "handler": "subroute",
+ "routes": [
+ {
+ "group": "group2",
+ "handle": [
+ {
+ "handler": "subroute",
+ "routes": [
+ {
+ "handle": [
+ {
+ "handler": "reverse_proxy",
+ "upstreams": [
+ {
+ "dial": "{http.regexp.first_label.1}"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ],
+ "match": [
+ {
+ "header_regexp": {
+ "Host": {
+ "name": "first_label",
+ "pattern": "^([0-9]{3})\\.sandbox\\."
+ }
+ }
+ }
+ ]
+ },
+ {
+ "group": "group2",
+ "handle": [
+ {
+ "handler": "subroute",
+ "routes": [
+ {
+ "handle": [
+ {
+ "handler": "static_response",
+ "headers": {
+ "Location": [
+ "{http.request.scheme}://application.localhost"
+ ]
+ },
+ "status_code": 302
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ],
+ "terminal": true
+ }
+ ]
+ }
+ }
+ }
+ }
+}
\ No newline at end of file
diff --git a/caddytest/integration/caddyfile_adapt/replaceable_upstream_partial_port.caddyfiletest b/caddytest/integration/caddyfile_adapt/replaceable_upstream_partial_port.caddyfiletest
new file mode 100644
index 00000000..7fbcb5c7
--- /dev/null
+++ b/caddytest/integration/caddyfile_adapt/replaceable_upstream_partial_port.caddyfiletest
@@ -0,0 +1,100 @@
+*.sandbox.localhost {
+ @sandboxPort {
+ header_regexp port Host ^([0-9]{3})\.sandbox\.
+ }
+ handle @sandboxPort {
+ reverse_proxy app:6{re.port.1}
+ }
+ handle {
+ redir {scheme}://application.localhost
+ }
+}
+
+----------
+{
+ "apps": {
+ "http": {
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":443"
+ ],
+ "routes": [
+ {
+ "match": [
+ {
+ "host": [
+ "*.sandbox.localhost"
+ ]
+ }
+ ],
+ "handle": [
+ {
+ "handler": "subroute",
+ "routes": [
+ {
+ "group": "group2",
+ "handle": [
+ {
+ "handler": "subroute",
+ "routes": [
+ {
+ "handle": [
+ {
+ "handler": "reverse_proxy",
+ "upstreams": [
+ {
+ "dial": "app:6{http.regexp.port.1}"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ],
+ "match": [
+ {
+ "header_regexp": {
+ "Host": {
+ "name": "port",
+ "pattern": "^([0-9]{3})\\.sandbox\\."
+ }
+ }
+ }
+ ]
+ },
+ {
+ "group": "group2",
+ "handle": [
+ {
+ "handler": "subroute",
+ "routes": [
+ {
+ "handle": [
+ {
+ "handler": "static_response",
+ "headers": {
+ "Location": [
+ "{http.request.scheme}://application.localhost"
+ ]
+ },
+ "status_code": 302
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ],
+ "terminal": true
+ }
+ ]
+ }
+ }
+ }
+ }
+}
\ No newline at end of file
diff --git a/caddytest/integration/caddyfile_adapt/replaceable_upstream_port.caddyfiletest b/caddytest/integration/caddyfile_adapt/replaceable_upstream_port.caddyfiletest
new file mode 100644
index 00000000..8f75c5bd
--- /dev/null
+++ b/caddytest/integration/caddyfile_adapt/replaceable_upstream_port.caddyfiletest
@@ -0,0 +1,100 @@
+*.sandbox.localhost {
+ @sandboxPort {
+ header_regexp port Host ^([0-9]{3})\.sandbox\.
+ }
+ handle @sandboxPort {
+ reverse_proxy app:{re.port.1}
+ }
+ handle {
+ redir {scheme}://application.localhost
+ }
+}
+
+----------
+{
+ "apps": {
+ "http": {
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":443"
+ ],
+ "routes": [
+ {
+ "match": [
+ {
+ "host": [
+ "*.sandbox.localhost"
+ ]
+ }
+ ],
+ "handle": [
+ {
+ "handler": "subroute",
+ "routes": [
+ {
+ "group": "group2",
+ "handle": [
+ {
+ "handler": "subroute",
+ "routes": [
+ {
+ "handle": [
+ {
+ "handler": "reverse_proxy",
+ "upstreams": [
+ {
+ "dial": "app:{http.regexp.port.1}"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ],
+ "match": [
+ {
+ "header_regexp": {
+ "Host": {
+ "name": "port",
+ "pattern": "^([0-9]{3})\\.sandbox\\."
+ }
+ }
+ }
+ ]
+ },
+ {
+ "group": "group2",
+ "handle": [
+ {
+ "handler": "subroute",
+ "routes": [
+ {
+ "handle": [
+ {
+ "handler": "static_response",
+ "headers": {
+ "Location": [
+ "{http.request.scheme}://application.localhost"
+ ]
+ },
+ "status_code": 302
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ],
+ "terminal": true
+ }
+ ]
+ }
+ }
+ }
+ }
+}
\ No newline at end of file
diff --git a/caddytest/integration/caddyfile_adapt/request_body.caddyfiletest b/caddytest/integration/caddyfile_adapt/request_body.caddyfiletest
new file mode 100644
index 00000000..1e4fd471
--- /dev/null
+++ b/caddytest/integration/caddyfile_adapt/request_body.caddyfiletest
@@ -0,0 +1,46 @@
+localhost
+
+request_body {
+ max_size 1MB
+}
+----------
+{
+ "apps": {
+ "http": {
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":443"
+ ],
+ "routes": [
+ {
+ "match": [
+ {
+ "host": [
+ "localhost"
+ ]
+ }
+ ],
+ "handle": [
+ {
+ "handler": "subroute",
+ "routes": [
+ {
+ "handle": [
+ {
+ "handler": "request_body",
+ "max_size": 1000000
+ }
+ ]
+ }
+ ]
+ }
+ ],
+ "terminal": true
+ }
+ ]
+ }
+ }
+ }
+ }
+}
\ No newline at end of file
diff --git a/caddytest/integration/caddyfile_adapt/request_header.caddyfiletest b/caddytest/integration/caddyfile_adapt/request_header.caddyfiletest
new file mode 100644
index 00000000..bab3fcac
--- /dev/null
+++ b/caddytest/integration/caddyfile_adapt/request_header.caddyfiletest
@@ -0,0 +1,90 @@
+:80
+
+@matcher path /something*
+request_header @matcher Denis "Ritchie"
+
+request_header +Edsger "Dijkstra"
+request_header -Wolfram
+
+@images path /images/*
+request_header @images Cache-Control "public, max-age=3600, stale-while-revalidate=86400"
+----------
+{
+ "apps": {
+ "http": {
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":80"
+ ],
+ "routes": [
+ {
+ "match": [
+ {
+ "path": [
+ "/something*"
+ ]
+ }
+ ],
+ "handle": [
+ {
+ "handler": "headers",
+ "request": {
+ "set": {
+ "Denis": [
+ "Ritchie"
+ ]
+ }
+ }
+ }
+ ]
+ },
+ {
+ "match": [
+ {
+ "path": [
+ "/images/*"
+ ]
+ }
+ ],
+ "handle": [
+ {
+ "handler": "headers",
+ "request": {
+ "set": {
+ "Cache-Control": [
+ "public, max-age=3600, stale-while-revalidate=86400"
+ ]
+ }
+ }
+ }
+ ]
+ },
+ {
+ "handle": [
+ {
+ "handler": "headers",
+ "request": {
+ "add": {
+ "Edsger": [
+ "Dijkstra"
+ ]
+ }
+ }
+ },
+ {
+ "handler": "headers",
+ "request": {
+ "delete": [
+ "Wolfram"
+ ]
+ }
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+}
diff --git a/caddytest/integration/caddyfile_adapt/reverse_proxy_buffers.caddyfiletest b/caddytest/integration/caddyfile_adapt/reverse_proxy_buffers.caddyfiletest
new file mode 100644
index 00000000..31789947
--- /dev/null
+++ b/caddytest/integration/caddyfile_adapt/reverse_proxy_buffers.caddyfiletest
@@ -0,0 +1,58 @@
+https://example.com {
+ reverse_proxy https://localhost:54321 {
+ request_buffers unlimited
+ response_buffers unlimited
+ }
+}
+
+----------
+{
+ "apps": {
+ "http": {
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":443"
+ ],
+ "routes": [
+ {
+ "match": [
+ {
+ "host": [
+ "example.com"
+ ]
+ }
+ ],
+ "handle": [
+ {
+ "handler": "subroute",
+ "routes": [
+ {
+ "handle": [
+ {
+ "handler": "reverse_proxy",
+ "request_buffers": -1,
+ "response_buffers": -1,
+ "transport": {
+ "protocol": "http",
+ "tls": {}
+ },
+ "upstreams": [
+ {
+ "dial": "localhost:54321"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ],
+ "terminal": true
+ }
+ ]
+ }
+ }
+ }
+ }
+}
diff --git a/caddytest/integration/caddyfile_adapt/reverse_proxy_dynamic_upstreams.caddyfiletest b/caddytest/integration/caddyfile_adapt/reverse_proxy_dynamic_upstreams.caddyfiletest
new file mode 100644
index 00000000..384cc056
--- /dev/null
+++ b/caddytest/integration/caddyfile_adapt/reverse_proxy_dynamic_upstreams.caddyfiletest
@@ -0,0 +1,120 @@
+:8884 {
+ reverse_proxy {
+ dynamic a foo 9000
+ }
+
+ reverse_proxy {
+ dynamic a {
+ name foo
+ port 9000
+ refresh 5m
+ resolvers 8.8.8.8 8.8.4.4
+ dial_timeout 2s
+ dial_fallback_delay 300ms
+ versions ipv6
+ }
+ }
+}
+
+:8885 {
+ reverse_proxy {
+ dynamic srv _api._tcp.example.com
+ }
+
+ reverse_proxy {
+ dynamic srv {
+ service api
+ proto tcp
+ name example.com
+ refresh 5m
+ resolvers 8.8.8.8 8.8.4.4
+ dial_timeout 1s
+ dial_fallback_delay -1s
+ }
+ }
+}
+
+----------
+{
+ "apps": {
+ "http": {
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":8884"
+ ],
+ "routes": [
+ {
+ "handle": [
+ {
+ "dynamic_upstreams": {
+ "name": "foo",
+ "port": "9000",
+ "source": "a"
+ },
+ "handler": "reverse_proxy"
+ },
+ {
+ "dynamic_upstreams": {
+ "dial_fallback_delay": 300000000,
+ "dial_timeout": 2000000000,
+ "name": "foo",
+ "port": "9000",
+ "refresh": 300000000000,
+ "resolver": {
+ "addresses": [
+ "8.8.8.8",
+ "8.8.4.4"
+ ]
+ },
+ "source": "a",
+ "versions": {
+ "ipv6": true
+ }
+ },
+ "handler": "reverse_proxy"
+ }
+ ]
+ }
+ ]
+ },
+ "srv1": {
+ "listen": [
+ ":8885"
+ ],
+ "routes": [
+ {
+ "handle": [
+ {
+ "dynamic_upstreams": {
+ "name": "_api._tcp.example.com",
+ "source": "srv"
+ },
+ "handler": "reverse_proxy"
+ },
+ {
+ "dynamic_upstreams": {
+ "dial_fallback_delay": -1000000000,
+ "dial_timeout": 1000000000,
+ "name": "example.com",
+ "proto": "tcp",
+ "refresh": 300000000000,
+ "resolver": {
+ "addresses": [
+ "8.8.8.8",
+ "8.8.4.4"
+ ]
+ },
+ "service": "api",
+ "source": "srv"
+ },
+ "handler": "reverse_proxy"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+}
diff --git a/caddytest/integration/caddyfile_adapt/reverse_proxy_dynamic_upstreams_grace_period.caddyfiletest b/caddytest/integration/caddyfile_adapt/reverse_proxy_dynamic_upstreams_grace_period.caddyfiletest
new file mode 100644
index 00000000..0389b2f1
--- /dev/null
+++ b/caddytest/integration/caddyfile_adapt/reverse_proxy_dynamic_upstreams_grace_period.caddyfiletest
@@ -0,0 +1,38 @@
+:8884 {
+ reverse_proxy {
+ dynamic srv {
+ name foo
+ refresh 5m
+ grace_period 5s
+ }
+ }
+}
+----------
+{
+ "apps": {
+ "http": {
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":8884"
+ ],
+ "routes": [
+ {
+ "handle": [
+ {
+ "dynamic_upstreams": {
+ "grace_period": 5000000000,
+ "name": "foo",
+ "refresh": 300000000000,
+ "source": "srv"
+ },
+ "handler": "reverse_proxy"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+}
\ No newline at end of file
diff --git a/caddytest/integration/caddyfile_adapt/reverse_proxy_empty_non_http_transport.caddyfiletest b/caddytest/integration/caddyfile_adapt/reverse_proxy_empty_non_http_transport.caddyfiletest
new file mode 100644
index 00000000..bcbe29b6
--- /dev/null
+++ b/caddytest/integration/caddyfile_adapt/reverse_proxy_empty_non_http_transport.caddyfiletest
@@ -0,0 +1,36 @@
+:8884
+
+reverse_proxy 127.0.0.1:65535 {
+ transport fastcgi
+}
+----------
+{
+ "apps": {
+ "http": {
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":8884"
+ ],
+ "routes": [
+ {
+ "handle": [
+ {
+ "handler": "reverse_proxy",
+ "transport": {
+ "protocol": "fastcgi"
+ },
+ "upstreams": [
+ {
+ "dial": "127.0.0.1:65535"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+}
diff --git a/caddytest/integration/caddyfile_adapt/reverse_proxy_h2c_shorthand.caddyfiletest b/caddytest/integration/caddyfile_adapt/reverse_proxy_h2c_shorthand.caddyfiletest
new file mode 100644
index 00000000..59394673
--- /dev/null
+++ b/caddytest/integration/caddyfile_adapt/reverse_proxy_h2c_shorthand.caddyfiletest
@@ -0,0 +1,55 @@
+:8884
+
+reverse_proxy h2c://localhost:8080
+
+reverse_proxy unix+h2c//run/app.sock
+----------
+{
+ "apps": {
+ "http": {
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":8884"
+ ],
+ "routes": [
+ {
+ "handle": [
+ {
+ "handler": "reverse_proxy",
+ "transport": {
+ "protocol": "http",
+ "versions": [
+ "h2c",
+ "2"
+ ]
+ },
+ "upstreams": [
+ {
+ "dial": "localhost:8080"
+ }
+ ]
+ },
+ {
+ "handler": "reverse_proxy",
+ "transport": {
+ "protocol": "http",
+ "versions": [
+ "h2c",
+ "2"
+ ]
+ },
+ "upstreams": [
+ {
+ "dial": "unix//run/app.sock"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+}
\ No newline at end of file
diff --git a/caddytest/integration/caddyfile_adapt/reverse_proxy_handle_response.caddyfiletest b/caddytest/integration/caddyfile_adapt/reverse_proxy_handle_response.caddyfiletest
new file mode 100644
index 00000000..f6a26090
--- /dev/null
+++ b/caddytest/integration/caddyfile_adapt/reverse_proxy_handle_response.caddyfiletest
@@ -0,0 +1,277 @@
+:8884
+
+reverse_proxy 127.0.0.1:65535 {
+ @500 status 500
+ replace_status @500 400
+
+ @all status 2xx 3xx 4xx 5xx
+ replace_status @all {http.error.status_code}
+
+ replace_status {http.error.status_code}
+
+ @accel header X-Accel-Redirect *
+ handle_response @accel {
+ respond "Header X-Accel-Redirect!"
+ }
+
+ @another {
+ header X-Another *
+ }
+ handle_response @another {
+ respond "Header X-Another!"
+ }
+
+ @401 status 401
+ handle_response @401 {
+ respond "Status 401!"
+ }
+
+ handle_response {
+ respond "Any! This should be last in the JSON!"
+ }
+
+ @403 {
+ status 403
+ }
+ handle_response @403 {
+ respond "Status 403!"
+ }
+
+ @multi {
+ status 401 403
+ status 404
+ header Foo *
+ header Bar *
+ }
+ handle_response @multi {
+ respond "Headers Foo, Bar AND statuses 401, 403 and 404!"
+ }
+
+ @200 status 200
+ handle_response @200 {
+ copy_response_headers {
+ include Foo Bar
+ }
+ respond "Copied headers from the response"
+ }
+
+ @201 status 201
+ handle_response @201 {
+ header Foo "Copying the response"
+ copy_response 404
+ }
+}
+----------
+{
+ "apps": {
+ "http": {
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":8884"
+ ],
+ "routes": [
+ {
+ "handle": [
+ {
+ "handle_response": [
+ {
+ "match": {
+ "status_code": [
+ 500
+ ]
+ },
+ "status_code": 400
+ },
+ {
+ "match": {
+ "status_code": [
+ 2,
+ 3,
+ 4,
+ 5
+ ]
+ },
+ "status_code": "{http.error.status_code}"
+ },
+ {
+ "match": {
+ "headers": {
+ "X-Accel-Redirect": [
+ "*"
+ ]
+ }
+ },
+ "routes": [
+ {
+ "handle": [
+ {
+ "body": "Header X-Accel-Redirect!",
+ "handler": "static_response"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "match": {
+ "headers": {
+ "X-Another": [
+ "*"
+ ]
+ }
+ },
+ "routes": [
+ {
+ "handle": [
+ {
+ "body": "Header X-Another!",
+ "handler": "static_response"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "match": {
+ "status_code": [
+ 401
+ ]
+ },
+ "routes": [
+ {
+ "handle": [
+ {
+ "body": "Status 401!",
+ "handler": "static_response"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "match": {
+ "status_code": [
+ 403
+ ]
+ },
+ "routes": [
+ {
+ "handle": [
+ {
+ "body": "Status 403!",
+ "handler": "static_response"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "match": {
+ "headers": {
+ "Bar": [
+ "*"
+ ],
+ "Foo": [
+ "*"
+ ]
+ },
+ "status_code": [
+ 401,
+ 403,
+ 404
+ ]
+ },
+ "routes": [
+ {
+ "handle": [
+ {
+ "body": "Headers Foo, Bar AND statuses 401, 403 and 404!",
+ "handler": "static_response"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "match": {
+ "status_code": [
+ 200
+ ]
+ },
+ "routes": [
+ {
+ "handle": [
+ {
+ "handler": "copy_response_headers",
+ "include": [
+ "Foo",
+ "Bar"
+ ]
+ },
+ {
+ "body": "Copied headers from the response",
+ "handler": "static_response"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "match": {
+ "status_code": [
+ 201
+ ]
+ },
+ "routes": [
+ {
+ "handle": [
+ {
+ "handler": "headers",
+ "response": {
+ "set": {
+ "Foo": [
+ "Copying the response"
+ ]
+ }
+ }
+ },
+ {
+ "handler": "copy_response",
+ "status_code": 404
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "status_code": "{http.error.status_code}"
+ },
+ {
+ "routes": [
+ {
+ "handle": [
+ {
+ "body": "Any! This should be last in the JSON!",
+ "handler": "static_response"
+ }
+ ]
+ }
+ ]
+ }
+ ],
+ "handler": "reverse_proxy",
+ "upstreams": [
+ {
+ "dial": "127.0.0.1:65535"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+}
diff --git a/caddytest/integration/caddyfile_adapt/reverse_proxy_health_headers.caddyfiletest b/caddytest/integration/caddyfile_adapt/reverse_proxy_health_headers.caddyfiletest
new file mode 100644
index 00000000..800c11f1
--- /dev/null
+++ b/caddytest/integration/caddyfile_adapt/reverse_proxy_health_headers.caddyfiletest
@@ -0,0 +1,69 @@
+:8884
+
+reverse_proxy 127.0.0.1:65535 {
+ health_headers {
+ Host example.com
+ X-Header-Key 95ca39e3cbe7
+ X-Header-Keys VbG4NZwWnipo 335Q9/MhqcNU3s2TO
+ X-Empty-Value
+ Same-Key 1
+ Same-Key 2
+ X-System-Hostname {system.hostname}
+ }
+ health_uri /health
+}
+----------
+{
+ "apps": {
+ "http": {
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":8884"
+ ],
+ "routes": [
+ {
+ "handle": [
+ {
+ "handler": "reverse_proxy",
+ "health_checks": {
+ "active": {
+ "headers": {
+ "Host": [
+ "example.com"
+ ],
+ "Same-Key": [
+ "1",
+ "2"
+ ],
+ "X-Empty-Value": [
+ ""
+ ],
+ "X-Header-Key": [
+ "95ca39e3cbe7"
+ ],
+ "X-Header-Keys": [
+ "VbG4NZwWnipo",
+ "335Q9/MhqcNU3s2TO"
+ ],
+ "X-System-Hostname": [
+ "{system.hostname}"
+ ]
+ },
+ "uri": "/health"
+ }
+ },
+ "upstreams": [
+ {
+ "dial": "127.0.0.1:65535"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+}
diff --git a/caddytest/integration/caddyfile_adapt/reverse_proxy_health_method.caddyfiletest b/caddytest/integration/caddyfile_adapt/reverse_proxy_health_method.caddyfiletest
new file mode 100644
index 00000000..920702c1
--- /dev/null
+++ b/caddytest/integration/caddyfile_adapt/reverse_proxy_health_method.caddyfiletest
@@ -0,0 +1,40 @@
+:8884
+
+reverse_proxy 127.0.0.1:65535 {
+ health_uri /health
+ health_method HEAD
+}
+----------
+{
+ "apps": {
+ "http": {
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":8884"
+ ],
+ "routes": [
+ {
+ "handle": [
+ {
+ "handler": "reverse_proxy",
+ "health_checks": {
+ "active": {
+ "method": "HEAD",
+ "uri": "/health"
+ }
+ },
+ "upstreams": [
+ {
+ "dial": "127.0.0.1:65535"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+}
diff --git a/caddytest/integration/caddyfile_adapt/reverse_proxy_health_path_query.caddyfiletest b/caddytest/integration/caddyfile_adapt/reverse_proxy_health_path_query.caddyfiletest
new file mode 100644
index 00000000..80ac2de5
--- /dev/null
+++ b/caddytest/integration/caddyfile_adapt/reverse_proxy_health_path_query.caddyfiletest
@@ -0,0 +1,75 @@
+# Health with query in the uri
+:8443 {
+ reverse_proxy localhost:54321 {
+ health_uri /health?ready=1
+ health_status 2xx
+ }
+}
+
+# Health without query in the uri
+:8444 {
+ reverse_proxy localhost:54321 {
+ health_uri /health
+ health_status 200
+ }
+}
+
+----------
+{
+ "apps": {
+ "http": {
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":8443"
+ ],
+ "routes": [
+ {
+ "handle": [
+ {
+ "handler": "reverse_proxy",
+ "health_checks": {
+ "active": {
+ "expect_status": 2,
+ "uri": "/health?ready=1"
+ }
+ },
+ "upstreams": [
+ {
+ "dial": "localhost:54321"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ "srv1": {
+ "listen": [
+ ":8444"
+ ],
+ "routes": [
+ {
+ "handle": [
+ {
+ "handler": "reverse_proxy",
+ "health_checks": {
+ "active": {
+ "expect_status": 200,
+ "uri": "/health"
+ }
+ },
+ "upstreams": [
+ {
+ "dial": "localhost:54321"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+}
diff --git a/caddytest/integration/caddyfile_adapt/reverse_proxy_health_reqbody.caddyfiletest b/caddytest/integration/caddyfile_adapt/reverse_proxy_health_reqbody.caddyfiletest
new file mode 100644
index 00000000..ae5a6791
--- /dev/null
+++ b/caddytest/integration/caddyfile_adapt/reverse_proxy_health_reqbody.caddyfiletest
@@ -0,0 +1,40 @@
+:8884
+
+reverse_proxy 127.0.0.1:65535 {
+ health_uri /health
+ health_request_body "test body"
+}
+----------
+{
+ "apps": {
+ "http": {
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":8884"
+ ],
+ "routes": [
+ {
+ "handle": [
+ {
+ "handler": "reverse_proxy",
+ "health_checks": {
+ "active": {
+ "body": "test body",
+ "uri": "/health"
+ }
+ },
+ "upstreams": [
+ {
+ "dial": "127.0.0.1:65535"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+}
diff --git a/caddytest/integration/caddyfile_adapt/reverse_proxy_http_transport_tls_file_cert.txt b/caddytest/integration/caddyfile_adapt/reverse_proxy_http_transport_tls_file_cert.txt
new file mode 100644
index 00000000..d43aa117
--- /dev/null
+++ b/caddytest/integration/caddyfile_adapt/reverse_proxy_http_transport_tls_file_cert.txt
@@ -0,0 +1,47 @@
+:8884
+reverse_proxy 127.0.0.1:65535 {
+ transport http {
+ tls_trust_pool file {
+ pem_file ../caddy.ca.cer
+ }
+ }
+}
+----------
+{
+ "apps": {
+ "http": {
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":8884"
+ ],
+ "routes": [
+ {
+ "handle": [
+ {
+ "handler": "reverse_proxy",
+ "transport": {
+ "protocol": "http",
+ "tls": {
+ "ca": {
+ "pem_files": [
+ "../caddy.ca.cer"
+ ],
+ "provider": "file"
+ }
+ }
+ },
+ "upstreams": [
+ {
+ "dial": "127.0.0.1:65535"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+}
diff --git a/caddytest/integration/caddyfile_adapt/reverse_proxy_http_transport_tls_inline_cert.txt b/caddytest/integration/caddyfile_adapt/reverse_proxy_http_transport_tls_inline_cert.txt
new file mode 100644
index 00000000..ef9e8243
--- /dev/null
+++ b/caddytest/integration/caddyfile_adapt/reverse_proxy_http_transport_tls_inline_cert.txt
@@ -0,0 +1,47 @@
+:8884
+reverse_proxy 127.0.0.1:65535 {
+ transport http {
+ tls_trust_pool inline {
+ trust_der MIIDSzCCAjOgAwIBAgIUfIRObjWNUA4jxQ/0x8BOCvE2Vw4wDQYJKoZIhvcNAQELBQAwFjEUMBIGA1UEAwwLRWFzeS1SU0EgQ0EwHhcNMTkwODI4MTYyNTU5WhcNMjkwODI1MTYyNTU5WjAWMRQwEgYDVQQDDAtFYXN5LVJTQSBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAK5m5elxhQfMp/3aVJ4JnpN9PUSz6LlP6LePAPFU7gqohVVFVtDkChJAG3FNkNQNlieVTja/bgH9IcC6oKbROwdY1h0MvNV8AHHigvl03WuJD8g2ReVFXXwsnrPmKXCFzQyMI6TYk3m2gYrXsZOU1GLnfMRC3KAMRgE2F45twOs9hqG169YJ6mM2eQjzjCHWI6S2/iUYvYxRkCOlYUbLsMD/AhgAf1plzg6LPqNxtdlwxZnA0ytgkmhK67HtzJu0+ovUCsMv0RwcMhsEo9T8nyFAGt9XLZ63X5WpBCTUApaAUhnG0XnerjmUWb6eUWw4zev54sEfY5F3x002iQaW6cECAwEAAaOBkDCBjTAdBgNVHQ4EFgQU4CBUbZsS2GaNIkGRz/cBsD5ivjswUQYDVR0jBEowSIAU4CBUbZsS2GaNIkGRz/cBsD5ivjuhGqQYMBYxFDASBgNVBAMMC0Vhc3ktUlNBIENBghR8hE5uNY1QDiPFD/THwE4K8TZXDjAMBgNVHRMEBTADAQH/MAsGA1UdDwQEAwIBBjANBgkqhkiG9w0BAQsFAAOCAQEAKB3V4HIzoiO/Ch6WMj9bLJ2FGbpkMrcb/Eq01hT5zcfKD66lVS1MlK+cRL446Z2b2KDP1oFyVs+qmrmtdwrWgD+nfe2sBmmIHo9m9KygMkEOfG3MghGTEcS+0cTKEcoHYWYyOqQh6jnedXY8Cdm4GM1hAc9MiL3/sqV8YCVSLNnkoNysmr06/rZ0MCUZPGUtRmfd0heWhrfzAKw2HLgX+RAmpOE2MZqWcjvqKGyaRiaZks4nJkP6521aC2Lgp0HhCz1j8/uQ5ldoDszCnu/iro0NAsNtudTMD+YoLQxLqdleIh6CW+illc2VdXwj7mn6J04yns9jfE2jRjW/yTLFuQ==
+ }
+ }
+}
+----------
+{
+ "apps": {
+ "http": {
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":8884"
+ ],
+ "routes": [
+ {
+ "handle": [
+ {
+ "handler": "reverse_proxy",
+ "transport": {
+ "protocol": "http",
+ "tls": {
+ "ca": {
+ "provider": "inline",
+ "trusted_ca_certs": [
+ "MIIDSzCCAjOgAwIBAgIUfIRObjWNUA4jxQ/0x8BOCvE2Vw4wDQYJKoZIhvcNAQELBQAwFjEUMBIGA1UEAwwLRWFzeS1SU0EgQ0EwHhcNMTkwODI4MTYyNTU5WhcNMjkwODI1MTYyNTU5WjAWMRQwEgYDVQQDDAtFYXN5LVJTQSBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAK5m5elxhQfMp/3aVJ4JnpN9PUSz6LlP6LePAPFU7gqohVVFVtDkChJAG3FNkNQNlieVTja/bgH9IcC6oKbROwdY1h0MvNV8AHHigvl03WuJD8g2ReVFXXwsnrPmKXCFzQyMI6TYk3m2gYrXsZOU1GLnfMRC3KAMRgE2F45twOs9hqG169YJ6mM2eQjzjCHWI6S2/iUYvYxRkCOlYUbLsMD/AhgAf1plzg6LPqNxtdlwxZnA0ytgkmhK67HtzJu0+ovUCsMv0RwcMhsEo9T8nyFAGt9XLZ63X5WpBCTUApaAUhnG0XnerjmUWb6eUWw4zev54sEfY5F3x002iQaW6cECAwEAAaOBkDCBjTAdBgNVHQ4EFgQU4CBUbZsS2GaNIkGRz/cBsD5ivjswUQYDVR0jBEowSIAU4CBUbZsS2GaNIkGRz/cBsD5ivjuhGqQYMBYxFDASBgNVBAMMC0Vhc3ktUlNBIENBghR8hE5uNY1QDiPFD/THwE4K8TZXDjAMBgNVHRMEBTADAQH/MAsGA1UdDwQEAwIBBjANBgkqhkiG9w0BAQsFAAOCAQEAKB3V4HIzoiO/Ch6WMj9bLJ2FGbpkMrcb/Eq01hT5zcfKD66lVS1MlK+cRL446Z2b2KDP1oFyVs+qmrmtdwrWgD+nfe2sBmmIHo9m9KygMkEOfG3MghGTEcS+0cTKEcoHYWYyOqQh6jnedXY8Cdm4GM1hAc9MiL3/sqV8YCVSLNnkoNysmr06/rZ0MCUZPGUtRmfd0heWhrfzAKw2HLgX+RAmpOE2MZqWcjvqKGyaRiaZks4nJkP6521aC2Lgp0HhCz1j8/uQ5ldoDszCnu/iro0NAsNtudTMD+YoLQxLqdleIh6CW+illc2VdXwj7mn6J04yns9jfE2jRjW/yTLFuQ=="
+ ]
+ }
+ }
+ },
+ "upstreams": [
+ {
+ "dial": "127.0.0.1:65535"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+}
diff --git a/caddytest/integration/caddyfile_adapt/reverse_proxy_load_balance.caddyfiletest b/caddytest/integration/caddyfile_adapt/reverse_proxy_load_balance.caddyfiletest
new file mode 100644
index 00000000..5885eec1
--- /dev/null
+++ b/caddytest/integration/caddyfile_adapt/reverse_proxy_load_balance.caddyfiletest
@@ -0,0 +1,64 @@
+:8884
+
+reverse_proxy 127.0.0.1:65535 {
+ lb_policy first
+ lb_retries 5
+ lb_try_duration 10s
+ lb_try_interval 500ms
+ lb_retry_match {
+ path /foo*
+ method POST
+ }
+ lb_retry_match path /bar*
+}
+----------
+{
+ "apps": {
+ "http": {
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":8884"
+ ],
+ "routes": [
+ {
+ "handle": [
+ {
+ "handler": "reverse_proxy",
+ "load_balancing": {
+ "retries": 5,
+ "retry_match": [
+ {
+ "method": [
+ "POST"
+ ],
+ "path": [
+ "/foo*"
+ ]
+ },
+ {
+ "path": [
+ "/bar*"
+ ]
+ }
+ ],
+ "selection_policy": {
+ "policy": "first"
+ },
+ "try_duration": 10000000000,
+ "try_interval": 500000000
+ },
+ "upstreams": [
+ {
+ "dial": "127.0.0.1:65535"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+}
diff --git a/caddytest/integration/caddyfile_adapt/reverse_proxy_load_balance_wrr.caddyfiletest b/caddytest/integration/caddyfile_adapt/reverse_proxy_load_balance_wrr.caddyfiletest
new file mode 100644
index 00000000..d41c4b8b
--- /dev/null
+++ b/caddytest/integration/caddyfile_adapt/reverse_proxy_load_balance_wrr.caddyfiletest
@@ -0,0 +1,71 @@
+:8884
+
+reverse_proxy 127.0.0.1:65535 127.0.0.1:35535 {
+ lb_policy weighted_round_robin 10 1
+ lb_retries 5
+ lb_try_duration 10s
+ lb_try_interval 500ms
+ lb_retry_match {
+ path /foo*
+ method POST
+ }
+ lb_retry_match path /bar*
+}
+----------
+{
+ "apps": {
+ "http": {
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":8884"
+ ],
+ "routes": [
+ {
+ "handle": [
+ {
+ "handler": "reverse_proxy",
+ "load_balancing": {
+ "retries": 5,
+ "retry_match": [
+ {
+ "method": [
+ "POST"
+ ],
+ "path": [
+ "/foo*"
+ ]
+ },
+ {
+ "path": [
+ "/bar*"
+ ]
+ }
+ ],
+ "selection_policy": {
+ "policy": "weighted_round_robin",
+ "weights": [
+ 10,
+ 1
+ ]
+ },
+ "try_duration": 10000000000,
+ "try_interval": 500000000
+ },
+ "upstreams": [
+ {
+ "dial": "127.0.0.1:65535"
+ },
+ {
+ "dial": "127.0.0.1:35535"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+}
diff --git a/caddytest/integration/caddyfile_adapt/reverse_proxy_localaddr.caddyfiletest b/caddytest/integration/caddyfile_adapt/reverse_proxy_localaddr.caddyfiletest
new file mode 100644
index 00000000..d734c9ce
--- /dev/null
+++ b/caddytest/integration/caddyfile_adapt/reverse_proxy_localaddr.caddyfiletest
@@ -0,0 +1,57 @@
+https://example.com {
+ reverse_proxy http://localhost:54321 {
+ transport http {
+ local_address 192.168.0.1
+ }
+ }
+}
+
+----------
+{
+ "apps": {
+ "http": {
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":443"
+ ],
+ "routes": [
+ {
+ "match": [
+ {
+ "host": [
+ "example.com"
+ ]
+ }
+ ],
+ "handle": [
+ {
+ "handler": "subroute",
+ "routes": [
+ {
+ "handle": [
+ {
+ "handler": "reverse_proxy",
+ "transport": {
+ "local_address": "192.168.0.1",
+ "protocol": "http"
+ },
+ "upstreams": [
+ {
+ "dial": "localhost:54321"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ],
+ "terminal": true
+ }
+ ]
+ }
+ }
+ }
+ }
+}
diff --git a/caddytest/integration/caddyfile_adapt/reverse_proxy_options.caddyfiletest b/caddytest/integration/caddyfile_adapt/reverse_proxy_options.caddyfiletest
new file mode 100644
index 00000000..f6420ca0
--- /dev/null
+++ b/caddytest/integration/caddyfile_adapt/reverse_proxy_options.caddyfiletest
@@ -0,0 +1,133 @@
+https://example.com {
+ reverse_proxy /path https://localhost:54321 {
+ header_up Host {upstream_hostport}
+ header_up Foo bar
+
+ method GET
+ rewrite /rewritten?uri={uri}
+
+ request_buffers 4KB
+
+ transport http {
+ read_buffer 10MB
+ write_buffer 20MB
+ max_response_header 30MB
+ dial_timeout 3s
+ dial_fallback_delay 5s
+ response_header_timeout 8s
+ expect_continue_timeout 9s
+ resolvers 8.8.8.8 8.8.4.4
+
+ versions h2c 2
+ compression off
+ max_conns_per_host 5
+ keepalive_idle_conns_per_host 2
+ keepalive_interval 30s
+
+ tls_renegotiation freely
+ tls_except_ports 8181 8182
+ }
+ }
+}
+----------
+{
+ "apps": {
+ "http": {
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":443"
+ ],
+ "routes": [
+ {
+ "match": [
+ {
+ "host": [
+ "example.com"
+ ]
+ }
+ ],
+ "handle": [
+ {
+ "handler": "subroute",
+ "routes": [
+ {
+ "handle": [
+ {
+ "handler": "reverse_proxy",
+ "headers": {
+ "request": {
+ "set": {
+ "Foo": [
+ "bar"
+ ],
+ "Host": [
+ "{http.reverse_proxy.upstream.hostport}"
+ ]
+ }
+ }
+ },
+ "request_buffers": 4000,
+ "rewrite": {
+ "method": "GET",
+ "uri": "/rewritten?uri={http.request.uri}"
+ },
+ "transport": {
+ "compression": false,
+ "dial_fallback_delay": 5000000000,
+ "dial_timeout": 3000000000,
+ "expect_continue_timeout": 9000000000,
+ "keep_alive": {
+ "max_idle_conns_per_host": 2,
+ "probe_interval": 30000000000
+ },
+ "max_conns_per_host": 5,
+ "max_response_header_size": 30000000,
+ "protocol": "http",
+ "read_buffer_size": 10000000,
+ "resolver": {
+ "addresses": [
+ "8.8.8.8",
+ "8.8.4.4"
+ ]
+ },
+ "response_header_timeout": 8000000000,
+ "tls": {
+ "except_ports": [
+ "8181",
+ "8182"
+ ],
+ "renegotiation": "freely"
+ },
+ "versions": [
+ "h2c",
+ "2"
+ ],
+ "write_buffer_size": 20000000
+ },
+ "upstreams": [
+ {
+ "dial": "localhost:54321"
+ }
+ ]
+ }
+ ],
+ "match": [
+ {
+ "path": [
+ "/path"
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ],
+ "terminal": true
+ }
+ ]
+ }
+ }
+ }
+ }
+}
diff --git a/caddytest/integration/caddyfile_adapt/reverse_proxy_port_range.caddyfiletest b/caddytest/integration/caddyfile_adapt/reverse_proxy_port_range.caddyfiletest
new file mode 100644
index 00000000..978d8c96
--- /dev/null
+++ b/caddytest/integration/caddyfile_adapt/reverse_proxy_port_range.caddyfiletest
@@ -0,0 +1,67 @@
+:8884 {
+ # Port range
+ reverse_proxy localhost:8001-8002
+
+ # Port range with placeholder
+ reverse_proxy {host}:8001-8002
+
+ # Port range with scheme
+ reverse_proxy https://localhost:8001-8002
+}
+----------
+{
+ "apps": {
+ "http": {
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":8884"
+ ],
+ "routes": [
+ {
+ "handle": [
+ {
+ "handler": "reverse_proxy",
+ "upstreams": [
+ {
+ "dial": "localhost:8001"
+ },
+ {
+ "dial": "localhost:8002"
+ }
+ ]
+ },
+ {
+ "handler": "reverse_proxy",
+ "upstreams": [
+ {
+ "dial": "{http.request.host}:8001"
+ },
+ {
+ "dial": "{http.request.host}:8002"
+ }
+ ]
+ },
+ {
+ "handler": "reverse_proxy",
+ "transport": {
+ "protocol": "http",
+ "tls": {}
+ },
+ "upstreams": [
+ {
+ "dial": "localhost:8001"
+ },
+ {
+ "dial": "localhost:8002"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+}
\ No newline at end of file
diff --git a/caddytest/integration/caddyfile_adapt/reverse_proxy_trusted_proxies.caddyfiletest b/caddytest/integration/caddyfile_adapt/reverse_proxy_trusted_proxies.caddyfiletest
new file mode 100644
index 00000000..f1e685c0
--- /dev/null
+++ b/caddytest/integration/caddyfile_adapt/reverse_proxy_trusted_proxies.caddyfiletest
@@ -0,0 +1,56 @@
+:8884
+
+reverse_proxy 127.0.0.1:65535 {
+ trusted_proxies 127.0.0.1
+}
+
+reverse_proxy 127.0.0.1:65535 {
+ trusted_proxies private_ranges
+}
+----------
+{
+ "apps": {
+ "http": {
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":8884"
+ ],
+ "routes": [
+ {
+ "handle": [
+ {
+ "handler": "reverse_proxy",
+ "trusted_proxies": [
+ "127.0.0.1"
+ ],
+ "upstreams": [
+ {
+ "dial": "127.0.0.1:65535"
+ }
+ ]
+ },
+ {
+ "handler": "reverse_proxy",
+ "trusted_proxies": [
+ "192.168.0.0/16",
+ "172.16.0.0/12",
+ "10.0.0.0/8",
+ "127.0.0.1/8",
+ "fd00::/8",
+ "::1"
+ ],
+ "upstreams": [
+ {
+ "dial": "127.0.0.1:65535"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+}
diff --git a/caddytest/integration/caddyfile_adapt/reverse_proxy_upstream_placeholder.caddyfiletest b/caddytest/integration/caddyfile_adapt/reverse_proxy_upstream_placeholder.caddyfiletest
new file mode 100644
index 00000000..91c8f307
--- /dev/null
+++ b/caddytest/integration/caddyfile_adapt/reverse_proxy_upstream_placeholder.caddyfiletest
@@ -0,0 +1,102 @@
+:8884 {
+ map {host} {upstream} {
+ foo.example.com 1.2.3.4
+ default 2.3.4.5
+ }
+
+ # Upstream placeholder with a port should retain the port
+ reverse_proxy {upstream}:80
+}
+
+:8885 {
+ map {host} {upstream} {
+ foo.example.com 1.2.3.4:8080
+ default 2.3.4.5:8080
+ }
+
+ # Upstream placeholder with no port should not have a port joined
+ reverse_proxy {upstream}
+}
+----------
+{
+ "apps": {
+ "http": {
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":8884"
+ ],
+ "routes": [
+ {
+ "handle": [
+ {
+ "defaults": [
+ "2.3.4.5"
+ ],
+ "destinations": [
+ "{upstream}"
+ ],
+ "handler": "map",
+ "mappings": [
+ {
+ "input": "foo.example.com",
+ "outputs": [
+ "1.2.3.4"
+ ]
+ }
+ ],
+ "source": "{http.request.host}"
+ },
+ {
+ "handler": "reverse_proxy",
+ "upstreams": [
+ {
+ "dial": "{upstream}:80"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ "srv1": {
+ "listen": [
+ ":8885"
+ ],
+ "routes": [
+ {
+ "handle": [
+ {
+ "defaults": [
+ "2.3.4.5:8080"
+ ],
+ "destinations": [
+ "{upstream}"
+ ],
+ "handler": "map",
+ "mappings": [
+ {
+ "input": "foo.example.com",
+ "outputs": [
+ "1.2.3.4:8080"
+ ]
+ }
+ ],
+ "source": "{http.request.host}"
+ },
+ {
+ "handler": "reverse_proxy",
+ "upstreams": [
+ {
+ "dial": "{upstream}"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+}
diff --git a/caddytest/integration/caddyfile_adapt/rewrite_directive_permutations.caddyfiletest b/caddytest/integration/caddyfile_adapt/rewrite_directive_permutations.caddyfiletest
new file mode 100644
index 00000000..870e82af
--- /dev/null
+++ b/caddytest/integration/caddyfile_adapt/rewrite_directive_permutations.caddyfiletest
@@ -0,0 +1,112 @@
+:8080
+
+# With explicit wildcard matcher
+route {
+ rewrite * /a
+}
+
+# With path matcher
+route {
+ rewrite /path /b
+}
+
+# With named matcher
+route {
+ @named method GET
+ rewrite @named /c
+}
+
+# With no matcher, assumed to be wildcard
+route {
+ rewrite /d
+}
+----------
+{
+ "apps": {
+ "http": {
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":8080"
+ ],
+ "routes": [
+ {
+ "handle": [
+ {
+ "handler": "subroute",
+ "routes": [
+ {
+ "group": "group0",
+ "handle": [
+ {
+ "handler": "rewrite",
+ "uri": "/a"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "handler": "subroute",
+ "routes": [
+ {
+ "group": "group1",
+ "handle": [
+ {
+ "handler": "rewrite",
+ "uri": "/b"
+ }
+ ],
+ "match": [
+ {
+ "path": [
+ "/path"
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "handler": "subroute",
+ "routes": [
+ {
+ "group": "group2",
+ "handle": [
+ {
+ "handler": "rewrite",
+ "uri": "/c"
+ }
+ ],
+ "match": [
+ {
+ "method": [
+ "GET"
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "handler": "subroute",
+ "routes": [
+ {
+ "group": "group3",
+ "handle": [
+ {
+ "handler": "rewrite",
+ "uri": "/d"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+}
\ No newline at end of file
diff --git a/caddytest/integration/caddyfile_adapt/root_directive_permutations.caddyfiletest b/caddytest/integration/caddyfile_adapt/root_directive_permutations.caddyfiletest
new file mode 100644
index 00000000..b2ef86c4
--- /dev/null
+++ b/caddytest/integration/caddyfile_adapt/root_directive_permutations.caddyfiletest
@@ -0,0 +1,108 @@
+:8080
+
+# With explicit wildcard matcher
+route {
+ root * /a
+}
+
+# With path matcher
+route {
+ root /path /b
+}
+
+# With named matcher
+route {
+ @named method GET
+ root @named /c
+}
+
+# With no matcher, assumed to be wildcard
+route {
+ root /d
+}
+----------
+{
+ "apps": {
+ "http": {
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":8080"
+ ],
+ "routes": [
+ {
+ "handle": [
+ {
+ "handler": "subroute",
+ "routes": [
+ {
+ "handle": [
+ {
+ "handler": "vars",
+ "root": "/a"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "handler": "subroute",
+ "routes": [
+ {
+ "handle": [
+ {
+ "handler": "vars",
+ "root": "/b"
+ }
+ ],
+ "match": [
+ {
+ "path": [
+ "/path"
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "handler": "subroute",
+ "routes": [
+ {
+ "handle": [
+ {
+ "handler": "vars",
+ "root": "/c"
+ }
+ ],
+ "match": [
+ {
+ "method": [
+ "GET"
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "handler": "subroute",
+ "routes": [
+ {
+ "handle": [
+ {
+ "handler": "vars",
+ "root": "/d"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+}
\ No newline at end of file
diff --git a/caddytest/integration/caddyfile_adapt/server_names.caddyfiletest b/caddytest/integration/caddyfile_adapt/server_names.caddyfiletest
new file mode 100644
index 00000000..e43eb8c0
--- /dev/null
+++ b/caddytest/integration/caddyfile_adapt/server_names.caddyfiletest
@@ -0,0 +1,77 @@
+{
+ servers :443 {
+ name https
+ }
+
+ servers :8000 {
+ name app1
+ }
+
+ servers :8001 {
+ name app2
+ }
+
+ servers 123.123.123.123:8002 {
+ name bind-server
+ }
+}
+
+example.com {
+}
+
+:8000 {
+}
+
+:8001, :8002 {
+}
+
+:8002 {
+ bind 123.123.123.123 222.222.222.222
+}
+----------
+{
+ "apps": {
+ "http": {
+ "servers": {
+ "app1": {
+ "listen": [
+ ":8000"
+ ]
+ },
+ "app2": {
+ "listen": [
+ ":8001"
+ ]
+ },
+ "bind-server": {
+ "listen": [
+ "123.123.123.123:8002",
+ "222.222.222.222:8002"
+ ]
+ },
+ "https": {
+ "listen": [
+ ":443"
+ ],
+ "routes": [
+ {
+ "match": [
+ {
+ "host": [
+ "example.com"
+ ]
+ }
+ ],
+ "terminal": true
+ }
+ ]
+ },
+ "srv4": {
+ "listen": [
+ ":8002"
+ ]
+ }
+ }
+ }
+ }
+}
\ No newline at end of file
diff --git a/caddytest/integration/caddyfile_adapt/shorthand_parameterized_placeholders.caddyfiletest b/caddytest/integration/caddyfile_adapt/shorthand_parameterized_placeholders.caddyfiletest
new file mode 100644
index 00000000..ef8d2330
--- /dev/null
+++ b/caddytest/integration/caddyfile_adapt/shorthand_parameterized_placeholders.caddyfiletest
@@ -0,0 +1,63 @@
+localhost:80
+
+respond * "{header.content-type} {labels.0} {query.p} {path.0} {re.name.0}"
+
+@match path_regexp ^/foo(.*)$
+respond @match "{re.1}"
+----------
+{
+ "apps": {
+ "http": {
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":80"
+ ],
+ "routes": [
+ {
+ "match": [
+ {
+ "host": [
+ "localhost"
+ ]
+ }
+ ],
+ "handle": [
+ {
+ "handler": "subroute",
+ "routes": [
+ {
+ "handle": [
+ {
+ "body": "{http.regexp.1}",
+ "handler": "static_response"
+ }
+ ],
+ "match": [
+ {
+ "path_regexp": {
+ "name": "match",
+ "pattern": "^/foo(.*)$"
+ }
+ }
+ ]
+ },
+ {
+ "handle": [
+ {
+ "body": "{http.request.header.content-type} {http.request.host.labels.0} {http.request.uri.query.p} {http.request.uri.path.0} {http.regexp.name.0}",
+ "handler": "static_response"
+ }
+ ]
+ }
+ ]
+ }
+ ],
+ "terminal": true
+ }
+ ]
+ }
+ }
+ }
+ }
+}
\ No newline at end of file
diff --git a/caddytest/integration/caddyfile_adapt/site_block_sorting.caddyfiletest b/caddytest/integration/caddyfile_adapt/site_block_sorting.caddyfiletest
new file mode 100644
index 00000000..b2c6a6d3
--- /dev/null
+++ b/caddytest/integration/caddyfile_adapt/site_block_sorting.caddyfiletest
@@ -0,0 +1,193 @@
+# https://caddy.community/t/caddy-suddenly-directs-my-site-to-the-wrong-directive/11597/2
+abcdef {
+ respond "abcdef"
+}
+
+abcdefg {
+ respond "abcdefg"
+}
+
+abc {
+ respond "abc"
+}
+
+abcde, http://abcde {
+ respond "abcde"
+}
+
+:443, ab {
+ respond "443 or ab"
+}
+----------
+{
+ "apps": {
+ "http": {
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":443"
+ ],
+ "routes": [
+ {
+ "match": [
+ {
+ "host": [
+ "abcdefg"
+ ]
+ }
+ ],
+ "handle": [
+ {
+ "handler": "subroute",
+ "routes": [
+ {
+ "handle": [
+ {
+ "body": "abcdefg",
+ "handler": "static_response"
+ }
+ ]
+ }
+ ]
+ }
+ ],
+ "terminal": true
+ },
+ {
+ "match": [
+ {
+ "host": [
+ "abcdef"
+ ]
+ }
+ ],
+ "handle": [
+ {
+ "handler": "subroute",
+ "routes": [
+ {
+ "handle": [
+ {
+ "body": "abcdef",
+ "handler": "static_response"
+ }
+ ]
+ }
+ ]
+ }
+ ],
+ "terminal": true
+ },
+ {
+ "match": [
+ {
+ "host": [
+ "abcde"
+ ]
+ }
+ ],
+ "handle": [
+ {
+ "handler": "subroute",
+ "routes": [
+ {
+ "handle": [
+ {
+ "body": "abcde",
+ "handler": "static_response"
+ }
+ ]
+ }
+ ]
+ }
+ ],
+ "terminal": true
+ },
+ {
+ "match": [
+ {
+ "host": [
+ "abc"
+ ]
+ }
+ ],
+ "handle": [
+ {
+ "handler": "subroute",
+ "routes": [
+ {
+ "handle": [
+ {
+ "body": "abc",
+ "handler": "static_response"
+ }
+ ]
+ }
+ ]
+ }
+ ],
+ "terminal": true
+ },
+ {
+ "handle": [
+ {
+ "handler": "subroute",
+ "routes": [
+ {
+ "handle": [
+ {
+ "body": "443 or ab",
+ "handler": "static_response"
+ }
+ ]
+ }
+ ]
+ }
+ ],
+ "terminal": true
+ }
+ ]
+ },
+ "srv1": {
+ "listen": [
+ ":80"
+ ],
+ "routes": [
+ {
+ "match": [
+ {
+ "host": [
+ "abcde"
+ ]
+ }
+ ],
+ "handle": [
+ {
+ "handler": "subroute",
+ "routes": [
+ {
+ "handle": [
+ {
+ "body": "abcde",
+ "handler": "static_response"
+ }
+ ]
+ }
+ ]
+ }
+ ],
+ "terminal": true
+ }
+ ]
+ }
+ }
+ },
+ "tls": {
+ "certificates": {
+ "automate": [
+ "ab"
+ ]
+ }
+ }
+ }
+}
\ No newline at end of file
diff --git a/caddytest/integration/caddyfile_adapt/sort_directives_with_any_matcher_first.caddyfiletest b/caddytest/integration/caddyfile_adapt/sort_directives_with_any_matcher_first.caddyfiletest
new file mode 100644
index 00000000..3859a7e5
--- /dev/null
+++ b/caddytest/integration/caddyfile_adapt/sort_directives_with_any_matcher_first.caddyfiletest
@@ -0,0 +1,51 @@
+:80
+
+respond 200
+
+@untrusted not remote_ip 10.1.1.0/24
+respond @untrusted 401
+----------
+{
+ "apps": {
+ "http": {
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":80"
+ ],
+ "routes": [
+ {
+ "match": [
+ {
+ "not": [
+ {
+ "remote_ip": {
+ "ranges": [
+ "10.1.1.0/24"
+ ]
+ }
+ }
+ ]
+ }
+ ],
+ "handle": [
+ {
+ "handler": "static_response",
+ "status_code": 401
+ }
+ ]
+ },
+ {
+ "handle": [
+ {
+ "handler": "static_response",
+ "status_code": 200
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+}
\ No newline at end of file
diff --git a/caddytest/integration/caddyfile_adapt/sort_directives_within_handle.caddyfiletest b/caddytest/integration/caddyfile_adapt/sort_directives_within_handle.caddyfiletest
new file mode 100644
index 00000000..ac0d53cc
--- /dev/null
+++ b/caddytest/integration/caddyfile_adapt/sort_directives_within_handle.caddyfiletest
@@ -0,0 +1,169 @@
+*.example.com {
+ @foo host foo.example.com
+ handle @foo {
+ handle_path /strip {
+ respond "this should be first"
+ }
+ handle_path /strip* {
+ respond "this should be second"
+ }
+ handle {
+ respond "this should be last"
+ }
+ }
+ handle {
+ respond "this should be last"
+ }
+}
+----------
+{
+ "apps": {
+ "http": {
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":443"
+ ],
+ "routes": [
+ {
+ "match": [
+ {
+ "host": [
+ "*.example.com"
+ ]
+ }
+ ],
+ "handle": [
+ {
+ "handler": "subroute",
+ "routes": [
+ {
+ "group": "group6",
+ "handle": [
+ {
+ "handler": "subroute",
+ "routes": [
+ {
+ "group": "group3",
+ "handle": [
+ {
+ "handler": "subroute",
+ "routes": [
+ {
+ "handle": [
+ {
+ "handler": "rewrite",
+ "strip_path_prefix": "/strip"
+ }
+ ]
+ },
+ {
+ "handle": [
+ {
+ "body": "this should be first",
+ "handler": "static_response"
+ }
+ ]
+ }
+ ]
+ }
+ ],
+ "match": [
+ {
+ "path": [
+ "/strip"
+ ]
+ }
+ ]
+ },
+ {
+ "group": "group3",
+ "handle": [
+ {
+ "handler": "subroute",
+ "routes": [
+ {
+ "handle": [
+ {
+ "handler": "rewrite",
+ "strip_path_prefix": "/strip"
+ }
+ ]
+ },
+ {
+ "handle": [
+ {
+ "body": "this should be second",
+ "handler": "static_response"
+ }
+ ]
+ }
+ ]
+ }
+ ],
+ "match": [
+ {
+ "path": [
+ "/strip*"
+ ]
+ }
+ ]
+ },
+ {
+ "group": "group3",
+ "handle": [
+ {
+ "handler": "subroute",
+ "routes": [
+ {
+ "handle": [
+ {
+ "body": "this should be last",
+ "handler": "static_response"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ],
+ "match": [
+ {
+ "host": [
+ "foo.example.com"
+ ]
+ }
+ ]
+ },
+ {
+ "group": "group6",
+ "handle": [
+ {
+ "handler": "subroute",
+ "routes": [
+ {
+ "handle": [
+ {
+ "body": "this should be last",
+ "handler": "static_response"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ],
+ "terminal": true
+ }
+ ]
+ }
+ }
+ }
+ }
+}
\ No newline at end of file
diff --git a/caddytest/integration/caddyfile_adapt/sort_vars_in_reverse.caddyfiletest b/caddytest/integration/caddyfile_adapt/sort_vars_in_reverse.caddyfiletest
new file mode 100644
index 00000000..38a912f9
--- /dev/null
+++ b/caddytest/integration/caddyfile_adapt/sort_vars_in_reverse.caddyfiletest
@@ -0,0 +1,75 @@
+:80
+
+vars /foobar foo last
+vars /foo foo middle-last
+vars /foo* foo middle-first
+vars * foo first
+----------
+{
+ "apps": {
+ "http": {
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":80"
+ ],
+ "routes": [
+ {
+ "handle": [
+ {
+ "foo": "first",
+ "handler": "vars"
+ }
+ ]
+ },
+ {
+ "match": [
+ {
+ "path": [
+ "/foo*"
+ ]
+ }
+ ],
+ "handle": [
+ {
+ "foo": "middle-first",
+ "handler": "vars"
+ }
+ ]
+ },
+ {
+ "match": [
+ {
+ "path": [
+ "/foo"
+ ]
+ }
+ ],
+ "handle": [
+ {
+ "foo": "middle-last",
+ "handler": "vars"
+ }
+ ]
+ },
+ {
+ "match": [
+ {
+ "path": [
+ "/foobar"
+ ]
+ }
+ ],
+ "handle": [
+ {
+ "foo": "last",
+ "handler": "vars"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+}
\ No newline at end of file
diff --git a/caddytest/integration/caddyfile_adapt/tls_acme_preferred_chains.caddyfiletest b/caddytest/integration/caddyfile_adapt/tls_acme_preferred_chains.caddyfiletest
new file mode 100644
index 00000000..d6242d7d
--- /dev/null
+++ b/caddytest/integration/caddyfile_adapt/tls_acme_preferred_chains.caddyfiletest
@@ -0,0 +1,57 @@
+localhost
+
+tls {
+ issuer acme {
+ preferred_chains {
+ any_common_name "Generic CA 1" "Generic CA 2"
+ }
+ }
+}
+----------
+{
+ "apps": {
+ "http": {
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":443"
+ ],
+ "routes": [
+ {
+ "match": [
+ {
+ "host": [
+ "localhost"
+ ]
+ }
+ ],
+ "terminal": true
+ }
+ ]
+ }
+ }
+ },
+ "tls": {
+ "automation": {
+ "policies": [
+ {
+ "subjects": [
+ "localhost"
+ ],
+ "issuers": [
+ {
+ "module": "acme",
+ "preferred_chains": {
+ "any_common_name": [
+ "Generic CA 1",
+ "Generic CA 2"
+ ]
+ }
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+}
diff --git a/caddytest/integration/caddyfile_adapt/tls_automation_policies_1.caddyfiletest b/caddytest/integration/caddyfile_adapt/tls_automation_policies_1.caddyfiletest
new file mode 100644
index 00000000..c3fd4898
--- /dev/null
+++ b/caddytest/integration/caddyfile_adapt/tls_automation_policies_1.caddyfiletest
@@ -0,0 +1,86 @@
+{
+ local_certs
+}
+
+*.tld, *.*.tld {
+ tls {
+ on_demand
+ }
+}
+
+foo.tld, www.foo.tld {
+}
+----------
+{
+ "apps": {
+ "http": {
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":443"
+ ],
+ "routes": [
+ {
+ "match": [
+ {
+ "host": [
+ "foo.tld",
+ "www.foo.tld"
+ ]
+ }
+ ],
+ "terminal": true
+ },
+ {
+ "match": [
+ {
+ "host": [
+ "*.tld",
+ "*.*.tld"
+ ]
+ }
+ ],
+ "terminal": true
+ }
+ ]
+ }
+ }
+ },
+ "tls": {
+ "automation": {
+ "policies": [
+ {
+ "subjects": [
+ "foo.tld",
+ "www.foo.tld"
+ ],
+ "issuers": [
+ {
+ "module": "internal"
+ }
+ ]
+ },
+ {
+ "subjects": [
+ "*.*.tld",
+ "*.tld"
+ ],
+ "issuers": [
+ {
+ "module": "internal"
+ }
+ ],
+ "on_demand": true
+ },
+ {
+ "issuers": [
+ {
+ "module": "internal"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+}
\ No newline at end of file
diff --git a/caddytest/integration/caddyfile_adapt/tls_automation_policies_10.caddyfiletest b/caddytest/integration/caddyfile_adapt/tls_automation_policies_10.caddyfiletest
new file mode 100644
index 00000000..b6832ad1
--- /dev/null
+++ b/caddytest/integration/caddyfile_adapt/tls_automation_policies_10.caddyfiletest
@@ -0,0 +1,58 @@
+# example from issue #4667
+{
+ auto_https off
+}
+
+https://, example.com {
+ tls test.crt test.key
+ respond "Hello World"
+}
+----------
+{
+ "apps": {
+ "http": {
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":443"
+ ],
+ "routes": [
+ {
+ "handle": [
+ {
+ "body": "Hello World",
+ "handler": "static_response"
+ }
+ ]
+ }
+ ],
+ "tls_connection_policies": [
+ {
+ "certificate_selection": {
+ "any_tag": [
+ "cert0"
+ ]
+ }
+ }
+ ],
+ "automatic_https": {
+ "disable": true
+ }
+ }
+ }
+ },
+ "tls": {
+ "certificates": {
+ "load_files": [
+ {
+ "certificate": "test.crt",
+ "key": "test.key",
+ "tags": [
+ "cert0"
+ ]
+ }
+ ]
+ }
+ }
+ }
+}
\ No newline at end of file
diff --git a/caddytest/integration/caddyfile_adapt/tls_automation_policies_11.caddyfiletest b/caddytest/integration/caddyfile_adapt/tls_automation_policies_11.caddyfiletest
new file mode 100644
index 00000000..9cdfd120
--- /dev/null
+++ b/caddytest/integration/caddyfile_adapt/tls_automation_policies_11.caddyfiletest
@@ -0,0 +1,67 @@
+# example from https://caddy.community/t/21415
+a.com {
+ tls {
+ get_certificate http http://foo.com/get
+ }
+}
+
+b.com {
+}
+----------
+{
+ "apps": {
+ "http": {
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":443"
+ ],
+ "routes": [
+ {
+ "match": [
+ {
+ "host": [
+ "a.com"
+ ]
+ }
+ ],
+ "terminal": true
+ },
+ {
+ "match": [
+ {
+ "host": [
+ "b.com"
+ ]
+ }
+ ],
+ "terminal": true
+ }
+ ]
+ }
+ }
+ },
+ "tls": {
+ "automation": {
+ "policies": [
+ {
+ "subjects": [
+ "a.com"
+ ],
+ "get_certificate": [
+ {
+ "url": "http://foo.com/get",
+ "via": "http"
+ }
+ ]
+ },
+ {
+ "subjects": [
+ "b.com"
+ ]
+ }
+ ]
+ }
+ }
+ }
+}
\ No newline at end of file
diff --git a/caddytest/integration/caddyfile_adapt/tls_automation_policies_2.caddyfiletest b/caddytest/integration/caddyfile_adapt/tls_automation_policies_2.caddyfiletest
new file mode 100644
index 00000000..17196ec0
--- /dev/null
+++ b/caddytest/integration/caddyfile_adapt/tls_automation_policies_2.caddyfiletest
@@ -0,0 +1,92 @@
+# issue #3953
+{
+ cert_issuer zerossl api_key
+}
+
+example.com {
+ tls {
+ on_demand
+ key_type rsa2048
+ }
+}
+
+http://example.net {
+}
+
+:1234 {
+}
+----------
+{
+ "apps": {
+ "http": {
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":1234"
+ ]
+ },
+ "srv1": {
+ "listen": [
+ ":443"
+ ],
+ "routes": [
+ {
+ "match": [
+ {
+ "host": [
+ "example.com"
+ ]
+ }
+ ],
+ "terminal": true
+ }
+ ]
+ },
+ "srv2": {
+ "listen": [
+ ":80"
+ ],
+ "routes": [
+ {
+ "match": [
+ {
+ "host": [
+ "example.net"
+ ]
+ }
+ ],
+ "terminal": true
+ }
+ ]
+ }
+ }
+ },
+ "tls": {
+ "automation": {
+ "policies": [
+ {
+ "subjects": [
+ "example.com"
+ ],
+ "issuers": [
+ {
+ "api_key": "api_key",
+ "module": "zerossl"
+ }
+ ],
+ "key_type": "rsa2048",
+ "on_demand": true
+ },
+ {
+ "issuers": [
+ {
+ "api_key": "api_key",
+ "module": "zerossl"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+}
\ No newline at end of file
diff --git a/caddytest/integration/caddyfile_adapt/tls_automation_policies_3.caddyfiletest b/caddytest/integration/caddyfile_adapt/tls_automation_policies_3.caddyfiletest
new file mode 100644
index 00000000..9daaf436
--- /dev/null
+++ b/caddytest/integration/caddyfile_adapt/tls_automation_policies_3.caddyfiletest
@@ -0,0 +1,90 @@
+# https://caddy.community/t/caddyfile-having-individual-sites-differ-from-global-options/11297
+{
+ local_certs
+}
+
+a.example.com {
+ tls internal
+}
+
+b.example.com {
+ tls abc@example.com
+}
+
+c.example.com {
+}
+----------
+{
+ "apps": {
+ "http": {
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":443"
+ ],
+ "routes": [
+ {
+ "match": [
+ {
+ "host": [
+ "a.example.com"
+ ]
+ }
+ ],
+ "terminal": true
+ },
+ {
+ "match": [
+ {
+ "host": [
+ "b.example.com"
+ ]
+ }
+ ],
+ "terminal": true
+ },
+ {
+ "match": [
+ {
+ "host": [
+ "c.example.com"
+ ]
+ }
+ ],
+ "terminal": true
+ }
+ ]
+ }
+ }
+ },
+ "tls": {
+ "automation": {
+ "policies": [
+ {
+ "subjects": [
+ "b.example.com"
+ ],
+ "issuers": [
+ {
+ "email": "abc@example.com",
+ "module": "acme"
+ },
+ {
+ "ca": "https://acme.zerossl.com/v2/DV90",
+ "email": "abc@example.com",
+ "module": "acme"
+ }
+ ]
+ },
+ {
+ "issuers": [
+ {
+ "module": "internal"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+}
\ No newline at end of file
diff --git a/caddytest/integration/caddyfile_adapt/tls_automation_policies_4.caddyfiletest b/caddytest/integration/caddyfile_adapt/tls_automation_policies_4.caddyfiletest
new file mode 100644
index 00000000..a4385a8f
--- /dev/null
+++ b/caddytest/integration/caddyfile_adapt/tls_automation_policies_4.caddyfiletest
@@ -0,0 +1,144 @@
+{
+ email my.email@example.com
+}
+
+:82 {
+ redir https://example.com{uri}
+}
+
+:83 {
+ redir https://example.com{uri}
+}
+
+:84 {
+ redir https://example.com{uri}
+}
+
+abc.de {
+ redir https://example.com{uri}
+}
+----------
+{
+ "apps": {
+ "http": {
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":443"
+ ],
+ "routes": [
+ {
+ "match": [
+ {
+ "host": [
+ "abc.de"
+ ]
+ }
+ ],
+ "handle": [
+ {
+ "handler": "subroute",
+ "routes": [
+ {
+ "handle": [
+ {
+ "handler": "static_response",
+ "headers": {
+ "Location": [
+ "https://example.com{http.request.uri}"
+ ]
+ },
+ "status_code": 302
+ }
+ ]
+ }
+ ]
+ }
+ ],
+ "terminal": true
+ }
+ ]
+ },
+ "srv1": {
+ "listen": [
+ ":82"
+ ],
+ "routes": [
+ {
+ "handle": [
+ {
+ "handler": "static_response",
+ "headers": {
+ "Location": [
+ "https://example.com{http.request.uri}"
+ ]
+ },
+ "status_code": 302
+ }
+ ]
+ }
+ ]
+ },
+ "srv2": {
+ "listen": [
+ ":83"
+ ],
+ "routes": [
+ {
+ "handle": [
+ {
+ "handler": "static_response",
+ "headers": {
+ "Location": [
+ "https://example.com{http.request.uri}"
+ ]
+ },
+ "status_code": 302
+ }
+ ]
+ }
+ ]
+ },
+ "srv3": {
+ "listen": [
+ ":84"
+ ],
+ "routes": [
+ {
+ "handle": [
+ {
+ "handler": "static_response",
+ "headers": {
+ "Location": [
+ "https://example.com{http.request.uri}"
+ ]
+ },
+ "status_code": 302
+ }
+ ]
+ }
+ ]
+ }
+ }
+ },
+ "tls": {
+ "automation": {
+ "policies": [
+ {
+ "issuers": [
+ {
+ "email": "my.email@example.com",
+ "module": "acme"
+ },
+ {
+ "ca": "https://acme.zerossl.com/v2/DV90",
+ "email": "my.email@example.com",
+ "module": "acme"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+}
\ No newline at end of file
diff --git a/caddytest/integration/caddyfile_adapt/tls_automation_policies_5.caddyfiletest b/caddytest/integration/caddyfile_adapt/tls_automation_policies_5.caddyfiletest
new file mode 100644
index 00000000..87d278db
--- /dev/null
+++ b/caddytest/integration/caddyfile_adapt/tls_automation_policies_5.caddyfiletest
@@ -0,0 +1,62 @@
+a.example.com {
+}
+
+b.example.com {
+}
+
+:443 {
+ tls {
+ on_demand
+ }
+}
+----------
+{
+ "apps": {
+ "http": {
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":443"
+ ],
+ "routes": [
+ {
+ "match": [
+ {
+ "host": [
+ "a.example.com"
+ ]
+ }
+ ],
+ "terminal": true
+ },
+ {
+ "match": [
+ {
+ "host": [
+ "b.example.com"
+ ]
+ }
+ ],
+ "terminal": true
+ }
+ ]
+ }
+ }
+ },
+ "tls": {
+ "automation": {
+ "policies": [
+ {
+ "subjects": [
+ "a.example.com",
+ "b.example.com"
+ ]
+ },
+ {
+ "on_demand": true
+ }
+ ]
+ }
+ }
+ }
+}
\ No newline at end of file
diff --git a/caddytest/integration/caddyfile_adapt/tls_automation_policies_6.caddyfiletest b/caddytest/integration/caddyfile_adapt/tls_automation_policies_6.caddyfiletest
new file mode 100644
index 00000000..b3ad7ff2
--- /dev/null
+++ b/caddytest/integration/caddyfile_adapt/tls_automation_policies_6.caddyfiletest
@@ -0,0 +1,120 @@
+# (this Caddyfile is contrived, but based on issue #4161)
+
+example.com {
+ tls {
+ ca https://foobar
+ }
+}
+
+example.com:8443 {
+ tls {
+ ca https://foobar
+ }
+}
+
+example.com:8444 {
+ tls {
+ ca https://foobar
+ }
+}
+
+example.com:8445 {
+ tls {
+ ca https://foobar
+ }
+}
+
+----------
+{
+ "apps": {
+ "http": {
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":443"
+ ],
+ "routes": [
+ {
+ "match": [
+ {
+ "host": [
+ "example.com"
+ ]
+ }
+ ],
+ "terminal": true
+ }
+ ]
+ },
+ "srv1": {
+ "listen": [
+ ":8443"
+ ],
+ "routes": [
+ {
+ "match": [
+ {
+ "host": [
+ "example.com"
+ ]
+ }
+ ],
+ "terminal": true
+ }
+ ]
+ },
+ "srv2": {
+ "listen": [
+ ":8444"
+ ],
+ "routes": [
+ {
+ "match": [
+ {
+ "host": [
+ "example.com"
+ ]
+ }
+ ],
+ "terminal": true
+ }
+ ]
+ },
+ "srv3": {
+ "listen": [
+ ":8445"
+ ],
+ "routes": [
+ {
+ "match": [
+ {
+ "host": [
+ "example.com"
+ ]
+ }
+ ],
+ "terminal": true
+ }
+ ]
+ }
+ }
+ },
+ "tls": {
+ "automation": {
+ "policies": [
+ {
+ "subjects": [
+ "example.com"
+ ],
+ "issuers": [
+ {
+ "ca": "https://foobar",
+ "module": "acme"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+}
\ No newline at end of file
diff --git a/caddytest/integration/caddyfile_adapt/tls_automation_policies_7.caddyfiletest b/caddytest/integration/caddyfile_adapt/tls_automation_policies_7.caddyfiletest
new file mode 100644
index 00000000..4b17bf3d
--- /dev/null
+++ b/caddytest/integration/caddyfile_adapt/tls_automation_policies_7.caddyfiletest
@@ -0,0 +1,68 @@
+# (this Caddyfile is contrived, but based on issues #4176 and #4198)
+
+http://example.com {
+}
+
+https://example.com {
+ tls internal
+}
+
+----------
+{
+ "apps": {
+ "http": {
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":443"
+ ],
+ "routes": [
+ {
+ "match": [
+ {
+ "host": [
+ "example.com"
+ ]
+ }
+ ],
+ "terminal": true
+ }
+ ]
+ },
+ "srv1": {
+ "listen": [
+ ":80"
+ ],
+ "routes": [
+ {
+ "match": [
+ {
+ "host": [
+ "example.com"
+ ]
+ }
+ ],
+ "terminal": true
+ }
+ ]
+ }
+ }
+ },
+ "tls": {
+ "automation": {
+ "policies": [
+ {
+ "subjects": [
+ "example.com"
+ ],
+ "issuers": [
+ {
+ "module": "internal"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+}
\ No newline at end of file
diff --git a/caddytest/integration/caddyfile_adapt/tls_automation_policies_8.caddyfiletest b/caddytest/integration/caddyfile_adapt/tls_automation_policies_8.caddyfiletest
new file mode 100644
index 00000000..bd1bbf22
--- /dev/null
+++ b/caddytest/integration/caddyfile_adapt/tls_automation_policies_8.caddyfiletest
@@ -0,0 +1,99 @@
+# (this Caddyfile is contrived, but based on issues #4176 and #4198)
+
+http://example.com {
+}
+
+https://example.com {
+ tls abc@example.com
+}
+
+http://localhost:8081 {
+}
+
+----------
+{
+ "apps": {
+ "http": {
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":443"
+ ],
+ "routes": [
+ {
+ "match": [
+ {
+ "host": [
+ "example.com"
+ ]
+ }
+ ],
+ "terminal": true
+ }
+ ]
+ },
+ "srv1": {
+ "listen": [
+ ":80"
+ ],
+ "routes": [
+ {
+ "match": [
+ {
+ "host": [
+ "example.com"
+ ]
+ }
+ ],
+ "terminal": true
+ }
+ ]
+ },
+ "srv2": {
+ "listen": [
+ ":8081"
+ ],
+ "routes": [
+ {
+ "match": [
+ {
+ "host": [
+ "localhost"
+ ]
+ }
+ ],
+ "terminal": true
+ }
+ ],
+ "automatic_https": {
+ "skip": [
+ "localhost"
+ ]
+ }
+ }
+ }
+ },
+ "tls": {
+ "automation": {
+ "policies": [
+ {
+ "subjects": [
+ "example.com"
+ ],
+ "issuers": [
+ {
+ "email": "abc@example.com",
+ "module": "acme"
+ },
+ {
+ "ca": "https://acme.zerossl.com/v2/DV90",
+ "email": "abc@example.com",
+ "module": "acme"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+}
\ No newline at end of file
diff --git a/caddytest/integration/caddyfile_adapt/tls_automation_policies_9.caddyfiletest b/caddytest/integration/caddyfile_adapt/tls_automation_policies_9.caddyfiletest
new file mode 100644
index 00000000..bd82e96c
--- /dev/null
+++ b/caddytest/integration/caddyfile_adapt/tls_automation_policies_9.caddyfiletest
@@ -0,0 +1,56 @@
+# example from issue #4640
+http://foo:8447, http://127.0.0.1:8447 {
+ reverse_proxy 127.0.0.1:8080
+}
+----------
+{
+ "apps": {
+ "http": {
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":8447"
+ ],
+ "routes": [
+ {
+ "match": [
+ {
+ "host": [
+ "foo",
+ "127.0.0.1"
+ ]
+ }
+ ],
+ "handle": [
+ {
+ "handler": "subroute",
+ "routes": [
+ {
+ "handle": [
+ {
+ "handler": "reverse_proxy",
+ "upstreams": [
+ {
+ "dial": "127.0.0.1:8080"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ],
+ "terminal": true
+ }
+ ],
+ "automatic_https": {
+ "skip": [
+ "foo",
+ "127.0.0.1"
+ ]
+ }
+ }
+ }
+ }
+ }
+}
\ No newline at end of file
diff --git a/caddytest/integration/caddyfile_adapt/tls_automation_policies_global_email_localhost.caddyfiletest b/caddytest/integration/caddyfile_adapt/tls_automation_policies_global_email_localhost.caddyfiletest
new file mode 100644
index 00000000..50fbf51a
--- /dev/null
+++ b/caddytest/integration/caddyfile_adapt/tls_automation_policies_global_email_localhost.caddyfiletest
@@ -0,0 +1,67 @@
+{
+ email foo@bar
+}
+
+localhost {
+}
+
+example.com {
+}
+----------
+{
+ "apps": {
+ "http": {
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":443"
+ ],
+ "routes": [
+ {
+ "match": [
+ {
+ "host": [
+ "example.com"
+ ]
+ }
+ ],
+ "terminal": true
+ },
+ {
+ "match": [
+ {
+ "host": [
+ "localhost"
+ ]
+ }
+ ],
+ "terminal": true
+ }
+ ]
+ }
+ }
+ },
+ "tls": {
+ "automation": {
+ "policies": [
+ {
+ "subjects": [
+ "example.com"
+ ],
+ "issuers": [
+ {
+ "email": "foo@bar",
+ "module": "acme"
+ },
+ {
+ "ca": "https://acme.zerossl.com/v2/DV90",
+ "email": "foo@bar",
+ "module": "acme"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+}
\ No newline at end of file
diff --git a/caddytest/integration/caddyfile_adapt/tls_client_auth_cert_file-legacy-with-verifier.caddyfiletest b/caddytest/integration/caddyfile_adapt/tls_client_auth_cert_file-legacy-with-verifier.caddyfiletest
new file mode 100644
index 00000000..302d8fd1
--- /dev/null
+++ b/caddytest/integration/caddyfile_adapt/tls_client_auth_cert_file-legacy-with-verifier.caddyfiletest
@@ -0,0 +1,75 @@
+localhost
+
+respond "hello from localhost"
+tls {
+ client_auth {
+ mode request
+ trusted_ca_cert_file ../caddy.ca.cer
+ verifier dummy
+ }
+}
+----------
+{
+ "apps": {
+ "http": {
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":443"
+ ],
+ "routes": [
+ {
+ "match": [
+ {
+ "host": [
+ "localhost"
+ ]
+ }
+ ],
+ "handle": [
+ {
+ "handler": "subroute",
+ "routes": [
+ {
+ "handle": [
+ {
+ "body": "hello from localhost",
+ "handler": "static_response"
+ }
+ ]
+ }
+ ]
+ }
+ ],
+ "terminal": true
+ }
+ ],
+ "tls_connection_policies": [
+ {
+ "match": {
+ "sni": [
+ "localhost"
+ ]
+ },
+ "client_authentication": {
+ "ca": {
+ "provider": "inline",
+ "trusted_ca_certs": [
+ "MIIDSzCCAjOgAwIBAgIUfIRObjWNUA4jxQ/0x8BOCvE2Vw4wDQYJKoZIhvcNAQELBQAwFjEUMBIGA1UEAwwLRWFzeS1SU0EgQ0EwHhcNMTkwODI4MTYyNTU5WhcNMjkwODI1MTYyNTU5WjAWMRQwEgYDVQQDDAtFYXN5LVJTQSBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAK5m5elxhQfMp/3aVJ4JnpN9PUSz6LlP6LePAPFU7gqohVVFVtDkChJAG3FNkNQNlieVTja/bgH9IcC6oKbROwdY1h0MvNV8AHHigvl03WuJD8g2ReVFXXwsnrPmKXCFzQyMI6TYk3m2gYrXsZOU1GLnfMRC3KAMRgE2F45twOs9hqG169YJ6mM2eQjzjCHWI6S2/iUYvYxRkCOlYUbLsMD/AhgAf1plzg6LPqNxtdlwxZnA0ytgkmhK67HtzJu0+ovUCsMv0RwcMhsEo9T8nyFAGt9XLZ63X5WpBCTUApaAUhnG0XnerjmUWb6eUWw4zev54sEfY5F3x002iQaW6cECAwEAAaOBkDCBjTAdBgNVHQ4EFgQU4CBUbZsS2GaNIkGRz/cBsD5ivjswUQYDVR0jBEowSIAU4CBUbZsS2GaNIkGRz/cBsD5ivjuhGqQYMBYxFDASBgNVBAMMC0Vhc3ktUlNBIENBghR8hE5uNY1QDiPFD/THwE4K8TZXDjAMBgNVHRMEBTADAQH/MAsGA1UdDwQEAwIBBjANBgkqhkiG9w0BAQsFAAOCAQEAKB3V4HIzoiO/Ch6WMj9bLJ2FGbpkMrcb/Eq01hT5zcfKD66lVS1MlK+cRL446Z2b2KDP1oFyVs+qmrmtdwrWgD+nfe2sBmmIHo9m9KygMkEOfG3MghGTEcS+0cTKEcoHYWYyOqQh6jnedXY8Cdm4GM1hAc9MiL3/sqV8YCVSLNnkoNysmr06/rZ0MCUZPGUtRmfd0heWhrfzAKw2HLgX+RAmpOE2MZqWcjvqKGyaRiaZks4nJkP6521aC2Lgp0HhCz1j8/uQ5ldoDszCnu/iro0NAsNtudTMD+YoLQxLqdleIh6CW+illc2VdXwj7mn6J04yns9jfE2jRjW/yTLFuQ=="
+ ]
+ },
+ "verifiers": [
+ {
+ "verifier": "dummy"
+ }
+ ],
+ "mode": "request"
+ }
+ },
+ {}
+ ]
+ }
+ }
+ }
+ }
+}
\ No newline at end of file
diff --git a/caddytest/integration/caddyfile_adapt/tls_client_auth_cert_file-legacy.caddyfiletest b/caddytest/integration/caddyfile_adapt/tls_client_auth_cert_file-legacy.caddyfiletest
new file mode 100644
index 00000000..36fd978e
--- /dev/null
+++ b/caddytest/integration/caddyfile_adapt/tls_client_auth_cert_file-legacy.caddyfiletest
@@ -0,0 +1,69 @@
+localhost
+
+respond "hello from localhost"
+tls {
+ client_auth {
+ mode request
+ trusted_ca_cert_file ../caddy.ca.cer
+ }
+}
+----------
+{
+ "apps": {
+ "http": {
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":443"
+ ],
+ "routes": [
+ {
+ "match": [
+ {
+ "host": [
+ "localhost"
+ ]
+ }
+ ],
+ "handle": [
+ {
+ "handler": "subroute",
+ "routes": [
+ {
+ "handle": [
+ {
+ "body": "hello from localhost",
+ "handler": "static_response"
+ }
+ ]
+ }
+ ]
+ }
+ ],
+ "terminal": true
+ }
+ ],
+ "tls_connection_policies": [
+ {
+ "match": {
+ "sni": [
+ "localhost"
+ ]
+ },
+ "client_authentication": {
+ "ca": {
+ "provider": "inline",
+ "trusted_ca_certs": [
+ "MIIDSzCCAjOgAwIBAgIUfIRObjWNUA4jxQ/0x8BOCvE2Vw4wDQYJKoZIhvcNAQELBQAwFjEUMBIGA1UEAwwLRWFzeS1SU0EgQ0EwHhcNMTkwODI4MTYyNTU5WhcNMjkwODI1MTYyNTU5WjAWMRQwEgYDVQQDDAtFYXN5LVJTQSBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAK5m5elxhQfMp/3aVJ4JnpN9PUSz6LlP6LePAPFU7gqohVVFVtDkChJAG3FNkNQNlieVTja/bgH9IcC6oKbROwdY1h0MvNV8AHHigvl03WuJD8g2ReVFXXwsnrPmKXCFzQyMI6TYk3m2gYrXsZOU1GLnfMRC3KAMRgE2F45twOs9hqG169YJ6mM2eQjzjCHWI6S2/iUYvYxRkCOlYUbLsMD/AhgAf1plzg6LPqNxtdlwxZnA0ytgkmhK67HtzJu0+ovUCsMv0RwcMhsEo9T8nyFAGt9XLZ63X5WpBCTUApaAUhnG0XnerjmUWb6eUWw4zev54sEfY5F3x002iQaW6cECAwEAAaOBkDCBjTAdBgNVHQ4EFgQU4CBUbZsS2GaNIkGRz/cBsD5ivjswUQYDVR0jBEowSIAU4CBUbZsS2GaNIkGRz/cBsD5ivjuhGqQYMBYxFDASBgNVBAMMC0Vhc3ktUlNBIENBghR8hE5uNY1QDiPFD/THwE4K8TZXDjAMBgNVHRMEBTADAQH/MAsGA1UdDwQEAwIBBjANBgkqhkiG9w0BAQsFAAOCAQEAKB3V4HIzoiO/Ch6WMj9bLJ2FGbpkMrcb/Eq01hT5zcfKD66lVS1MlK+cRL446Z2b2KDP1oFyVs+qmrmtdwrWgD+nfe2sBmmIHo9m9KygMkEOfG3MghGTEcS+0cTKEcoHYWYyOqQh6jnedXY8Cdm4GM1hAc9MiL3/sqV8YCVSLNnkoNysmr06/rZ0MCUZPGUtRmfd0heWhrfzAKw2HLgX+RAmpOE2MZqWcjvqKGyaRiaZks4nJkP6521aC2Lgp0HhCz1j8/uQ5ldoDszCnu/iro0NAsNtudTMD+YoLQxLqdleIh6CW+illc2VdXwj7mn6J04yns9jfE2jRjW/yTLFuQ=="
+ ]
+ },
+ "mode": "request"
+ }
+ },
+ {}
+ ]
+ }
+ }
+ }
+ }
+}
\ No newline at end of file
diff --git a/caddytest/integration/caddyfile_adapt/tls_client_auth_cert_file.caddyfiletest b/caddytest/integration/caddyfile_adapt/tls_client_auth_cert_file.caddyfiletest
new file mode 100644
index 00000000..dbf408fa
--- /dev/null
+++ b/caddytest/integration/caddyfile_adapt/tls_client_auth_cert_file.caddyfiletest
@@ -0,0 +1,71 @@
+localhost
+
+respond "hello from localhost"
+tls {
+ client_auth {
+ mode request
+ trust_pool file {
+ pem_file ../caddy.ca.cer
+ }
+ }
+}
+----------
+{
+ "apps": {
+ "http": {
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":443"
+ ],
+ "routes": [
+ {
+ "match": [
+ {
+ "host": [
+ "localhost"
+ ]
+ }
+ ],
+ "handle": [
+ {
+ "handler": "subroute",
+ "routes": [
+ {
+ "handle": [
+ {
+ "body": "hello from localhost",
+ "handler": "static_response"
+ }
+ ]
+ }
+ ]
+ }
+ ],
+ "terminal": true
+ }
+ ],
+ "tls_connection_policies": [
+ {
+ "match": {
+ "sni": [
+ "localhost"
+ ]
+ },
+ "client_authentication": {
+ "ca": {
+ "pem_files": [
+ "../caddy.ca.cer"
+ ],
+ "provider": "file"
+ },
+ "mode": "request"
+ }
+ },
+ {}
+ ]
+ }
+ }
+ }
+ }
+}
\ No newline at end of file
diff --git a/caddytest/integration/caddyfile_adapt/tls_client_auth_inline_cert-legacy.caddyfiletest b/caddytest/integration/caddyfile_adapt/tls_client_auth_inline_cert-legacy.caddyfiletest
new file mode 100644
index 00000000..3a91e832
--- /dev/null
+++ b/caddytest/integration/caddyfile_adapt/tls_client_auth_inline_cert-legacy.caddyfiletest
@@ -0,0 +1,69 @@
+localhost
+
+respond "hello from localhost"
+tls {
+ client_auth {
+ mode request
+ trusted_ca_cert MIIDSzCCAjOgAwIBAgIUfIRObjWNUA4jxQ/0x8BOCvE2Vw4wDQYJKoZIhvcNAQELBQAwFjEUMBIGA1UEAwwLRWFzeS1SU0EgQ0EwHhcNMTkwODI4MTYyNTU5WhcNMjkwODI1MTYyNTU5WjAWMRQwEgYDVQQDDAtFYXN5LVJTQSBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAK5m5elxhQfMp/3aVJ4JnpN9PUSz6LlP6LePAPFU7gqohVVFVtDkChJAG3FNkNQNlieVTja/bgH9IcC6oKbROwdY1h0MvNV8AHHigvl03WuJD8g2ReVFXXwsnrPmKXCFzQyMI6TYk3m2gYrXsZOU1GLnfMRC3KAMRgE2F45twOs9hqG169YJ6mM2eQjzjCHWI6S2/iUYvYxRkCOlYUbLsMD/AhgAf1plzg6LPqNxtdlwxZnA0ytgkmhK67HtzJu0+ovUCsMv0RwcMhsEo9T8nyFAGt9XLZ63X5WpBCTUApaAUhnG0XnerjmUWb6eUWw4zev54sEfY5F3x002iQaW6cECAwEAAaOBkDCBjTAdBgNVHQ4EFgQU4CBUbZsS2GaNIkGRz/cBsD5ivjswUQYDVR0jBEowSIAU4CBUbZsS2GaNIkGRz/cBsD5ivjuhGqQYMBYxFDASBgNVBAMMC0Vhc3ktUlNBIENBghR8hE5uNY1QDiPFD/THwE4K8TZXDjAMBgNVHRMEBTADAQH/MAsGA1UdDwQEAwIBBjANBgkqhkiG9w0BAQsFAAOCAQEAKB3V4HIzoiO/Ch6WMj9bLJ2FGbpkMrcb/Eq01hT5zcfKD66lVS1MlK+cRL446Z2b2KDP1oFyVs+qmrmtdwrWgD+nfe2sBmmIHo9m9KygMkEOfG3MghGTEcS+0cTKEcoHYWYyOqQh6jnedXY8Cdm4GM1hAc9MiL3/sqV8YCVSLNnkoNysmr06/rZ0MCUZPGUtRmfd0heWhrfzAKw2HLgX+RAmpOE2MZqWcjvqKGyaRiaZks4nJkP6521aC2Lgp0HhCz1j8/uQ5ldoDszCnu/iro0NAsNtudTMD+YoLQxLqdleIh6CW+illc2VdXwj7mn6J04yns9jfE2jRjW/yTLFuQ==
+ }
+}
+----------
+{
+ "apps": {
+ "http": {
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":443"
+ ],
+ "routes": [
+ {
+ "match": [
+ {
+ "host": [
+ "localhost"
+ ]
+ }
+ ],
+ "handle": [
+ {
+ "handler": "subroute",
+ "routes": [
+ {
+ "handle": [
+ {
+ "body": "hello from localhost",
+ "handler": "static_response"
+ }
+ ]
+ }
+ ]
+ }
+ ],
+ "terminal": true
+ }
+ ],
+ "tls_connection_policies": [
+ {
+ "match": {
+ "sni": [
+ "localhost"
+ ]
+ },
+ "client_authentication": {
+ "ca": {
+ "provider": "inline",
+ "trusted_ca_certs": [
+ "MIIDSzCCAjOgAwIBAgIUfIRObjWNUA4jxQ/0x8BOCvE2Vw4wDQYJKoZIhvcNAQELBQAwFjEUMBIGA1UEAwwLRWFzeS1SU0EgQ0EwHhcNMTkwODI4MTYyNTU5WhcNMjkwODI1MTYyNTU5WjAWMRQwEgYDVQQDDAtFYXN5LVJTQSBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAK5m5elxhQfMp/3aVJ4JnpN9PUSz6LlP6LePAPFU7gqohVVFVtDkChJAG3FNkNQNlieVTja/bgH9IcC6oKbROwdY1h0MvNV8AHHigvl03WuJD8g2ReVFXXwsnrPmKXCFzQyMI6TYk3m2gYrXsZOU1GLnfMRC3KAMRgE2F45twOs9hqG169YJ6mM2eQjzjCHWI6S2/iUYvYxRkCOlYUbLsMD/AhgAf1plzg6LPqNxtdlwxZnA0ytgkmhK67HtzJu0+ovUCsMv0RwcMhsEo9T8nyFAGt9XLZ63X5WpBCTUApaAUhnG0XnerjmUWb6eUWw4zev54sEfY5F3x002iQaW6cECAwEAAaOBkDCBjTAdBgNVHQ4EFgQU4CBUbZsS2GaNIkGRz/cBsD5ivjswUQYDVR0jBEowSIAU4CBUbZsS2GaNIkGRz/cBsD5ivjuhGqQYMBYxFDASBgNVBAMMC0Vhc3ktUlNBIENBghR8hE5uNY1QDiPFD/THwE4K8TZXDjAMBgNVHRMEBTADAQH/MAsGA1UdDwQEAwIBBjANBgkqhkiG9w0BAQsFAAOCAQEAKB3V4HIzoiO/Ch6WMj9bLJ2FGbpkMrcb/Eq01hT5zcfKD66lVS1MlK+cRL446Z2b2KDP1oFyVs+qmrmtdwrWgD+nfe2sBmmIHo9m9KygMkEOfG3MghGTEcS+0cTKEcoHYWYyOqQh6jnedXY8Cdm4GM1hAc9MiL3/sqV8YCVSLNnkoNysmr06/rZ0MCUZPGUtRmfd0heWhrfzAKw2HLgX+RAmpOE2MZqWcjvqKGyaRiaZks4nJkP6521aC2Lgp0HhCz1j8/uQ5ldoDszCnu/iro0NAsNtudTMD+YoLQxLqdleIh6CW+illc2VdXwj7mn6J04yns9jfE2jRjW/yTLFuQ=="
+ ]
+ },
+ "mode": "request"
+ }
+ },
+ {}
+ ]
+ }
+ }
+ }
+ }
+}
\ No newline at end of file
diff --git a/caddytest/integration/caddyfile_adapt/tls_client_auth_inline_cert.caddyfiletest b/caddytest/integration/caddyfile_adapt/tls_client_auth_inline_cert.caddyfiletest
new file mode 100644
index 00000000..7b8e5a20
--- /dev/null
+++ b/caddytest/integration/caddyfile_adapt/tls_client_auth_inline_cert.caddyfiletest
@@ -0,0 +1,71 @@
+localhost
+
+respond "hello from localhost"
+tls {
+ client_auth {
+ mode request
+ trust_pool inline {
+ trust_der MIIDSzCCAjOgAwIBAgIUfIRObjWNUA4jxQ/0x8BOCvE2Vw4wDQYJKoZIhvcNAQELBQAwFjEUMBIGA1UEAwwLRWFzeS1SU0EgQ0EwHhcNMTkwODI4MTYyNTU5WhcNMjkwODI1MTYyNTU5WjAWMRQwEgYDVQQDDAtFYXN5LVJTQSBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAK5m5elxhQfMp/3aVJ4JnpN9PUSz6LlP6LePAPFU7gqohVVFVtDkChJAG3FNkNQNlieVTja/bgH9IcC6oKbROwdY1h0MvNV8AHHigvl03WuJD8g2ReVFXXwsnrPmKXCFzQyMI6TYk3m2gYrXsZOU1GLnfMRC3KAMRgE2F45twOs9hqG169YJ6mM2eQjzjCHWI6S2/iUYvYxRkCOlYUbLsMD/AhgAf1plzg6LPqNxtdlwxZnA0ytgkmhK67HtzJu0+ovUCsMv0RwcMhsEo9T8nyFAGt9XLZ63X5WpBCTUApaAUhnG0XnerjmUWb6eUWw4zev54sEfY5F3x002iQaW6cECAwEAAaOBkDCBjTAdBgNVHQ4EFgQU4CBUbZsS2GaNIkGRz/cBsD5ivjswUQYDVR0jBEowSIAU4CBUbZsS2GaNIkGRz/cBsD5ivjuhGqQYMBYxFDASBgNVBAMMC0Vhc3ktUlNBIENBghR8hE5uNY1QDiPFD/THwE4K8TZXDjAMBgNVHRMEBTADAQH/MAsGA1UdDwQEAwIBBjANBgkqhkiG9w0BAQsFAAOCAQEAKB3V4HIzoiO/Ch6WMj9bLJ2FGbpkMrcb/Eq01hT5zcfKD66lVS1MlK+cRL446Z2b2KDP1oFyVs+qmrmtdwrWgD+nfe2sBmmIHo9m9KygMkEOfG3MghGTEcS+0cTKEcoHYWYyOqQh6jnedXY8Cdm4GM1hAc9MiL3/sqV8YCVSLNnkoNysmr06/rZ0MCUZPGUtRmfd0heWhrfzAKw2HLgX+RAmpOE2MZqWcjvqKGyaRiaZks4nJkP6521aC2Lgp0HhCz1j8/uQ5ldoDszCnu/iro0NAsNtudTMD+YoLQxLqdleIh6CW+illc2VdXwj7mn6J04yns9jfE2jRjW/yTLFuQ==
+ }
+ }
+}
+----------
+{
+ "apps": {
+ "http": {
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":443"
+ ],
+ "routes": [
+ {
+ "match": [
+ {
+ "host": [
+ "localhost"
+ ]
+ }
+ ],
+ "handle": [
+ {
+ "handler": "subroute",
+ "routes": [
+ {
+ "handle": [
+ {
+ "body": "hello from localhost",
+ "handler": "static_response"
+ }
+ ]
+ }
+ ]
+ }
+ ],
+ "terminal": true
+ }
+ ],
+ "tls_connection_policies": [
+ {
+ "match": {
+ "sni": [
+ "localhost"
+ ]
+ },
+ "client_authentication": {
+ "ca": {
+ "provider": "inline",
+ "trusted_ca_certs": [
+ "MIIDSzCCAjOgAwIBAgIUfIRObjWNUA4jxQ/0x8BOCvE2Vw4wDQYJKoZIhvcNAQELBQAwFjEUMBIGA1UEAwwLRWFzeS1SU0EgQ0EwHhcNMTkwODI4MTYyNTU5WhcNMjkwODI1MTYyNTU5WjAWMRQwEgYDVQQDDAtFYXN5LVJTQSBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAK5m5elxhQfMp/3aVJ4JnpN9PUSz6LlP6LePAPFU7gqohVVFVtDkChJAG3FNkNQNlieVTja/bgH9IcC6oKbROwdY1h0MvNV8AHHigvl03WuJD8g2ReVFXXwsnrPmKXCFzQyMI6TYk3m2gYrXsZOU1GLnfMRC3KAMRgE2F45twOs9hqG169YJ6mM2eQjzjCHWI6S2/iUYvYxRkCOlYUbLsMD/AhgAf1plzg6LPqNxtdlwxZnA0ytgkmhK67HtzJu0+ovUCsMv0RwcMhsEo9T8nyFAGt9XLZ63X5WpBCTUApaAUhnG0XnerjmUWb6eUWw4zev54sEfY5F3x002iQaW6cECAwEAAaOBkDCBjTAdBgNVHQ4EFgQU4CBUbZsS2GaNIkGRz/cBsD5ivjswUQYDVR0jBEowSIAU4CBUbZsS2GaNIkGRz/cBsD5ivjuhGqQYMBYxFDASBgNVBAMMC0Vhc3ktUlNBIENBghR8hE5uNY1QDiPFD/THwE4K8TZXDjAMBgNVHRMEBTADAQH/MAsGA1UdDwQEAwIBBjANBgkqhkiG9w0BAQsFAAOCAQEAKB3V4HIzoiO/Ch6WMj9bLJ2FGbpkMrcb/Eq01hT5zcfKD66lVS1MlK+cRL446Z2b2KDP1oFyVs+qmrmtdwrWgD+nfe2sBmmIHo9m9KygMkEOfG3MghGTEcS+0cTKEcoHYWYyOqQh6jnedXY8Cdm4GM1hAc9MiL3/sqV8YCVSLNnkoNysmr06/rZ0MCUZPGUtRmfd0heWhrfzAKw2HLgX+RAmpOE2MZqWcjvqKGyaRiaZks4nJkP6521aC2Lgp0HhCz1j8/uQ5ldoDszCnu/iro0NAsNtudTMD+YoLQxLqdleIh6CW+illc2VdXwj7mn6J04yns9jfE2jRjW/yTLFuQ=="
+ ]
+ },
+ "mode": "request"
+ }
+ },
+ {}
+ ]
+ }
+ }
+ }
+ }
+}
\ No newline at end of file
diff --git a/caddytest/integration/caddyfile_adapt/tls_client_auth_inline_cert_with_leaf_trust.caddyfiletest b/caddytest/integration/caddyfile_adapt/tls_client_auth_inline_cert_with_leaf_trust.caddyfiletest
new file mode 100644
index 00000000..66c3a3c3
--- /dev/null
+++ b/caddytest/integration/caddyfile_adapt/tls_client_auth_inline_cert_with_leaf_trust.caddyfiletest
@@ -0,0 +1,75 @@
+localhost
+
+respond "hello from localhost"
+tls {
+ client_auth {
+ mode request
+ trust_pool inline {
+ trust_der MIIDSzCCAjOgAwIBAgIUfIRObjWNUA4jxQ/0x8BOCvE2Vw4wDQYJKoZIhvcNAQELBQAwFjEUMBIGA1UEAwwLRWFzeS1SU0EgQ0EwHhcNMTkwODI4MTYyNTU5WhcNMjkwODI1MTYyNTU5WjAWMRQwEgYDVQQDDAtFYXN5LVJTQSBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAK5m5elxhQfMp/3aVJ4JnpN9PUSz6LlP6LePAPFU7gqohVVFVtDkChJAG3FNkNQNlieVTja/bgH9IcC6oKbROwdY1h0MvNV8AHHigvl03WuJD8g2ReVFXXwsnrPmKXCFzQyMI6TYk3m2gYrXsZOU1GLnfMRC3KAMRgE2F45twOs9hqG169YJ6mM2eQjzjCHWI6S2/iUYvYxRkCOlYUbLsMD/AhgAf1plzg6LPqNxtdlwxZnA0ytgkmhK67HtzJu0+ovUCsMv0RwcMhsEo9T8nyFAGt9XLZ63X5WpBCTUApaAUhnG0XnerjmUWb6eUWw4zev54sEfY5F3x002iQaW6cECAwEAAaOBkDCBjTAdBgNVHQ4EFgQU4CBUbZsS2GaNIkGRz/cBsD5ivjswUQYDVR0jBEowSIAU4CBUbZsS2GaNIkGRz/cBsD5ivjuhGqQYMBYxFDASBgNVBAMMC0Vhc3ktUlNBIENBghR8hE5uNY1QDiPFD/THwE4K8TZXDjAMBgNVHRMEBTADAQH/MAsGA1UdDwQEAwIBBjANBgkqhkiG9w0BAQsFAAOCAQEAKB3V4HIzoiO/Ch6WMj9bLJ2FGbpkMrcb/Eq01hT5zcfKD66lVS1MlK+cRL446Z2b2KDP1oFyVs+qmrmtdwrWgD+nfe2sBmmIHo9m9KygMkEOfG3MghGTEcS+0cTKEcoHYWYyOqQh6jnedXY8Cdm4GM1hAc9MiL3/sqV8YCVSLNnkoNysmr06/rZ0MCUZPGUtRmfd0heWhrfzAKw2HLgX+RAmpOE2MZqWcjvqKGyaRiaZks4nJkP6521aC2Lgp0HhCz1j8/uQ5ldoDszCnu/iro0NAsNtudTMD+YoLQxLqdleIh6CW+illc2VdXwj7mn6J04yns9jfE2jRjW/yTLFuQ==
+ }
+ trusted_leaf_cert_file ../caddy.ca.cer
+ }
+}
+----------
+{
+ "apps": {
+ "http": {
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":443"
+ ],
+ "routes": [
+ {
+ "match": [
+ {
+ "host": [
+ "localhost"
+ ]
+ }
+ ],
+ "handle": [
+ {
+ "handler": "subroute",
+ "routes": [
+ {
+ "handle": [
+ {
+ "body": "hello from localhost",
+ "handler": "static_response"
+ }
+ ]
+ }
+ ]
+ }
+ ],
+ "terminal": true
+ }
+ ],
+ "tls_connection_policies": [
+ {
+ "match": {
+ "sni": [
+ "localhost"
+ ]
+ },
+ "client_authentication": {
+ "ca": {
+ "provider": "inline",
+ "trusted_ca_certs": [
+ "MIIDSzCCAjOgAwIBAgIUfIRObjWNUA4jxQ/0x8BOCvE2Vw4wDQYJKoZIhvcNAQELBQAwFjEUMBIGA1UEAwwLRWFzeS1SU0EgQ0EwHhcNMTkwODI4MTYyNTU5WhcNMjkwODI1MTYyNTU5WjAWMRQwEgYDVQQDDAtFYXN5LVJTQSBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAK5m5elxhQfMp/3aVJ4JnpN9PUSz6LlP6LePAPFU7gqohVVFVtDkChJAG3FNkNQNlieVTja/bgH9IcC6oKbROwdY1h0MvNV8AHHigvl03WuJD8g2ReVFXXwsnrPmKXCFzQyMI6TYk3m2gYrXsZOU1GLnfMRC3KAMRgE2F45twOs9hqG169YJ6mM2eQjzjCHWI6S2/iUYvYxRkCOlYUbLsMD/AhgAf1plzg6LPqNxtdlwxZnA0ytgkmhK67HtzJu0+ovUCsMv0RwcMhsEo9T8nyFAGt9XLZ63X5WpBCTUApaAUhnG0XnerjmUWb6eUWw4zev54sEfY5F3x002iQaW6cECAwEAAaOBkDCBjTAdBgNVHQ4EFgQU4CBUbZsS2GaNIkGRz/cBsD5ivjswUQYDVR0jBEowSIAU4CBUbZsS2GaNIkGRz/cBsD5ivjuhGqQYMBYxFDASBgNVBAMMC0Vhc3ktUlNBIENBghR8hE5uNY1QDiPFD/THwE4K8TZXDjAMBgNVHRMEBTADAQH/MAsGA1UdDwQEAwIBBjANBgkqhkiG9w0BAQsFAAOCAQEAKB3V4HIzoiO/Ch6WMj9bLJ2FGbpkMrcb/Eq01hT5zcfKD66lVS1MlK+cRL446Z2b2KDP1oFyVs+qmrmtdwrWgD+nfe2sBmmIHo9m9KygMkEOfG3MghGTEcS+0cTKEcoHYWYyOqQh6jnedXY8Cdm4GM1hAc9MiL3/sqV8YCVSLNnkoNysmr06/rZ0MCUZPGUtRmfd0heWhrfzAKw2HLgX+RAmpOE2MZqWcjvqKGyaRiaZks4nJkP6521aC2Lgp0HhCz1j8/uQ5ldoDszCnu/iro0NAsNtudTMD+YoLQxLqdleIh6CW+illc2VdXwj7mn6J04yns9jfE2jRjW/yTLFuQ=="
+ ]
+ },
+ "trusted_leaf_certs": [
+ "MIIDSzCCAjOgAwIBAgIUfIRObjWNUA4jxQ/0x8BOCvE2Vw4wDQYJKoZIhvcNAQELBQAwFjEUMBIGA1UEAwwLRWFzeS1SU0EgQ0EwHhcNMTkwODI4MTYyNTU5WhcNMjkwODI1MTYyNTU5WjAWMRQwEgYDVQQDDAtFYXN5LVJTQSBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAK5m5elxhQfMp/3aVJ4JnpN9PUSz6LlP6LePAPFU7gqohVVFVtDkChJAG3FNkNQNlieVTja/bgH9IcC6oKbROwdY1h0MvNV8AHHigvl03WuJD8g2ReVFXXwsnrPmKXCFzQyMI6TYk3m2gYrXsZOU1GLnfMRC3KAMRgE2F45twOs9hqG169YJ6mM2eQjzjCHWI6S2/iUYvYxRkCOlYUbLsMD/AhgAf1plzg6LPqNxtdlwxZnA0ytgkmhK67HtzJu0+ovUCsMv0RwcMhsEo9T8nyFAGt9XLZ63X5WpBCTUApaAUhnG0XnerjmUWb6eUWw4zev54sEfY5F3x002iQaW6cECAwEAAaOBkDCBjTAdBgNVHQ4EFgQU4CBUbZsS2GaNIkGRz/cBsD5ivjswUQYDVR0jBEowSIAU4CBUbZsS2GaNIkGRz/cBsD5ivjuhGqQYMBYxFDASBgNVBAMMC0Vhc3ktUlNBIENBghR8hE5uNY1QDiPFD/THwE4K8TZXDjAMBgNVHRMEBTADAQH/MAsGA1UdDwQEAwIBBjANBgkqhkiG9w0BAQsFAAOCAQEAKB3V4HIzoiO/Ch6WMj9bLJ2FGbpkMrcb/Eq01hT5zcfKD66lVS1MlK+cRL446Z2b2KDP1oFyVs+qmrmtdwrWgD+nfe2sBmmIHo9m9KygMkEOfG3MghGTEcS+0cTKEcoHYWYyOqQh6jnedXY8Cdm4GM1hAc9MiL3/sqV8YCVSLNnkoNysmr06/rZ0MCUZPGUtRmfd0heWhrfzAKw2HLgX+RAmpOE2MZqWcjvqKGyaRiaZks4nJkP6521aC2Lgp0HhCz1j8/uQ5ldoDszCnu/iro0NAsNtudTMD+YoLQxLqdleIh6CW+illc2VdXwj7mn6J04yns9jfE2jRjW/yTLFuQ=="
+ ],
+ "mode": "request"
+ }
+ },
+ {}
+ ]
+ }
+ }
+ }
+ }
+}
\ No newline at end of file
diff --git a/caddytest/integration/caddyfile_adapt/tls_conn_policy_consolidate.caddyfiletest b/caddytest/integration/caddyfile_adapt/tls_conn_policy_consolidate.caddyfiletest
new file mode 100644
index 00000000..68e89b0d
--- /dev/null
+++ b/caddytest/integration/caddyfile_adapt/tls_conn_policy_consolidate.caddyfiletest
@@ -0,0 +1,132 @@
+# https://github.com/caddyserver/caddy/issues/3906
+a.a {
+ tls internal
+ respond 403
+}
+
+http://b.b https://b.b:8443 {
+ tls internal
+ respond 404
+}
+----------
+{
+ "apps": {
+ "http": {
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":443"
+ ],
+ "routes": [
+ {
+ "match": [
+ {
+ "host": [
+ "a.a"
+ ]
+ }
+ ],
+ "handle": [
+ {
+ "handler": "subroute",
+ "routes": [
+ {
+ "handle": [
+ {
+ "handler": "static_response",
+ "status_code": 403
+ }
+ ]
+ }
+ ]
+ }
+ ],
+ "terminal": true
+ }
+ ]
+ },
+ "srv1": {
+ "listen": [
+ ":80"
+ ],
+ "routes": [
+ {
+ "match": [
+ {
+ "host": [
+ "b.b"
+ ]
+ }
+ ],
+ "handle": [
+ {
+ "handler": "subroute",
+ "routes": [
+ {
+ "handle": [
+ {
+ "handler": "static_response",
+ "status_code": 404
+ }
+ ]
+ }
+ ]
+ }
+ ],
+ "terminal": true
+ }
+ ]
+ },
+ "srv2": {
+ "listen": [
+ ":8443"
+ ],
+ "routes": [
+ {
+ "match": [
+ {
+ "host": [
+ "b.b"
+ ]
+ }
+ ],
+ "handle": [
+ {
+ "handler": "subroute",
+ "routes": [
+ {
+ "handle": [
+ {
+ "handler": "static_response",
+ "status_code": 404
+ }
+ ]
+ }
+ ]
+ }
+ ],
+ "terminal": true
+ }
+ ]
+ }
+ }
+ },
+ "tls": {
+ "automation": {
+ "policies": [
+ {
+ "subjects": [
+ "a.a",
+ "b.b"
+ ],
+ "issuers": [
+ {
+ "module": "internal"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+}
\ No newline at end of file
diff --git a/caddytest/integration/caddyfile_adapt/tls_dns_ttl.caddyfiletest b/caddytest/integration/caddyfile_adapt/tls_dns_ttl.caddyfiletest
new file mode 100644
index 00000000..c452bf79
--- /dev/null
+++ b/caddytest/integration/caddyfile_adapt/tls_dns_ttl.caddyfiletest
@@ -0,0 +1,68 @@
+localhost
+
+respond "hello from localhost"
+tls {
+ dns_ttl 5m10s
+}
+----------
+{
+ "apps": {
+ "http": {
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":443"
+ ],
+ "routes": [
+ {
+ "match": [
+ {
+ "host": [
+ "localhost"
+ ]
+ }
+ ],
+ "handle": [
+ {
+ "handler": "subroute",
+ "routes": [
+ {
+ "handle": [
+ {
+ "body": "hello from localhost",
+ "handler": "static_response"
+ }
+ ]
+ }
+ ]
+ }
+ ],
+ "terminal": true
+ }
+ ]
+ }
+ }
+ },
+ "tls": {
+ "automation": {
+ "policies": [
+ {
+ "subjects": [
+ "localhost"
+ ],
+ "issuers": [
+ {
+ "challenges": {
+ "dns": {
+ "ttl": 310000000000
+ }
+ },
+ "module": "acme"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+}
\ No newline at end of file
diff --git a/caddytest/integration/caddyfile_adapt/tls_explicit_issuer_dns_ttl.caddyfiletest b/caddytest/integration/caddyfile_adapt/tls_explicit_issuer_dns_ttl.caddyfiletest
new file mode 100644
index 00000000..d552599f
--- /dev/null
+++ b/caddytest/integration/caddyfile_adapt/tls_explicit_issuer_dns_ttl.caddyfiletest
@@ -0,0 +1,80 @@
+localhost
+
+respond "hello from localhost"
+tls {
+ issuer acme {
+ dns_ttl 5m10s
+ }
+ issuer zerossl api_key {
+ dns_ttl 10m20s
+ }
+}
+----------
+{
+ "apps": {
+ "http": {
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":443"
+ ],
+ "routes": [
+ {
+ "match": [
+ {
+ "host": [
+ "localhost"
+ ]
+ }
+ ],
+ "handle": [
+ {
+ "handler": "subroute",
+ "routes": [
+ {
+ "handle": [
+ {
+ "body": "hello from localhost",
+ "handler": "static_response"
+ }
+ ]
+ }
+ ]
+ }
+ ],
+ "terminal": true
+ }
+ ]
+ }
+ }
+ },
+ "tls": {
+ "automation": {
+ "policies": [
+ {
+ "subjects": [
+ "localhost"
+ ],
+ "issuers": [
+ {
+ "challenges": {
+ "dns": {
+ "ttl": 310000000000
+ }
+ },
+ "module": "acme"
+ },
+ {
+ "api_key": "api_key",
+ "cname_validation": {
+ "ttl": 620000000000
+ },
+ "module": "zerossl"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+}
\ No newline at end of file
diff --git a/caddytest/integration/caddyfile_adapt/tls_explicit_issuer_propagation_options.caddyfiletest b/caddytest/integration/caddyfile_adapt/tls_explicit_issuer_propagation_options.caddyfiletest
new file mode 100644
index 00000000..206d59ca
--- /dev/null
+++ b/caddytest/integration/caddyfile_adapt/tls_explicit_issuer_propagation_options.caddyfiletest
@@ -0,0 +1,84 @@
+localhost
+
+respond "hello from localhost"
+tls {
+ issuer acme {
+ propagation_delay 5m10s
+ propagation_timeout 10m20s
+ }
+ issuer zerossl api_key {
+ propagation_delay 5m30s
+ propagation_timeout -1
+ }
+}
+----------
+{
+ "apps": {
+ "http": {
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":443"
+ ],
+ "routes": [
+ {
+ "match": [
+ {
+ "host": [
+ "localhost"
+ ]
+ }
+ ],
+ "handle": [
+ {
+ "handler": "subroute",
+ "routes": [
+ {
+ "handle": [
+ {
+ "body": "hello from localhost",
+ "handler": "static_response"
+ }
+ ]
+ }
+ ]
+ }
+ ],
+ "terminal": true
+ }
+ ]
+ }
+ }
+ },
+ "tls": {
+ "automation": {
+ "policies": [
+ {
+ "subjects": [
+ "localhost"
+ ],
+ "issuers": [
+ {
+ "challenges": {
+ "dns": {
+ "propagation_delay": 310000000000,
+ "propagation_timeout": 620000000000
+ }
+ },
+ "module": "acme"
+ },
+ {
+ "api_key": "api_key",
+ "cname_validation": {
+ "propagation_delay": 330000000000,
+ "propagation_timeout": -1
+ },
+ "module": "zerossl"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+}
\ No newline at end of file
diff --git a/caddytest/integration/caddyfile_adapt/tls_internal_options.caddyfiletest b/caddytest/integration/caddyfile_adapt/tls_internal_options.caddyfiletest
new file mode 100644
index 00000000..50bbfec2
--- /dev/null
+++ b/caddytest/integration/caddyfile_adapt/tls_internal_options.caddyfiletest
@@ -0,0 +1,54 @@
+a.example.com {
+ tls {
+ issuer internal {
+ ca foo
+ lifetime 24h
+ sign_with_root
+ }
+ }
+}
+----------
+{
+ "apps": {
+ "http": {
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":443"
+ ],
+ "routes": [
+ {
+ "match": [
+ {
+ "host": [
+ "a.example.com"
+ ]
+ }
+ ],
+ "terminal": true
+ }
+ ]
+ }
+ }
+ },
+ "tls": {
+ "automation": {
+ "policies": [
+ {
+ "subjects": [
+ "a.example.com"
+ ],
+ "issuers": [
+ {
+ "ca": "foo",
+ "lifetime": 86400000000000,
+ "module": "internal",
+ "sign_with_root": true
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+}
\ No newline at end of file
diff --git a/caddytest/integration/caddyfile_adapt/tls_propagation_options.caddyfiletest b/caddytest/integration/caddyfile_adapt/tls_propagation_options.caddyfiletest
new file mode 100644
index 00000000..43ec9774
--- /dev/null
+++ b/caddytest/integration/caddyfile_adapt/tls_propagation_options.caddyfiletest
@@ -0,0 +1,70 @@
+localhost
+
+respond "hello from localhost"
+tls {
+ propagation_delay 5m10s
+ propagation_timeout 10m20s
+}
+----------
+{
+ "apps": {
+ "http": {
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":443"
+ ],
+ "routes": [
+ {
+ "match": [
+ {
+ "host": [
+ "localhost"
+ ]
+ }
+ ],
+ "handle": [
+ {
+ "handler": "subroute",
+ "routes": [
+ {
+ "handle": [
+ {
+ "body": "hello from localhost",
+ "handler": "static_response"
+ }
+ ]
+ }
+ ]
+ }
+ ],
+ "terminal": true
+ }
+ ]
+ }
+ }
+ },
+ "tls": {
+ "automation": {
+ "policies": [
+ {
+ "subjects": [
+ "localhost"
+ ],
+ "issuers": [
+ {
+ "challenges": {
+ "dns": {
+ "propagation_delay": 310000000000,
+ "propagation_timeout": 620000000000
+ }
+ },
+ "module": "acme"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+}
\ No newline at end of file
diff --git a/caddytest/integration/caddyfile_adapt/tracing.caddyfiletest b/caddytest/integration/caddyfile_adapt/tracing.caddyfiletest
new file mode 100644
index 00000000..32286600
--- /dev/null
+++ b/caddytest/integration/caddyfile_adapt/tracing.caddyfiletest
@@ -0,0 +1,36 @@
+:80 {
+ tracing /myhandler {
+ span my-span
+ }
+}
+----------
+{
+ "apps": {
+ "http": {
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":80"
+ ],
+ "routes": [
+ {
+ "match": [
+ {
+ "path": [
+ "/myhandler"
+ ]
+ }
+ ],
+ "handle": [
+ {
+ "handler": "tracing",
+ "span": "my-span"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+}
\ No newline at end of file
diff --git a/caddytest/integration/caddyfile_adapt/uri_query_operations.caddyfiletest b/caddytest/integration/caddyfile_adapt/uri_query_operations.caddyfiletest
new file mode 100644
index 00000000..a5346248
--- /dev/null
+++ b/caddytest/integration/caddyfile_adapt/uri_query_operations.caddyfiletest
@@ -0,0 +1,106 @@
+:9080
+uri query +foo bar
+uri query -baz
+uri query taz test
+uri query key=value example
+uri query changethis>changed
+uri query {
+ findme value replacement
+ +foo1 baz
+}
+
+respond "{query}"
+----------
+{
+ "apps": {
+ "http": {
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":9080"
+ ],
+ "routes": [
+ {
+ "handle": [
+ {
+ "handler": "rewrite",
+ "query": {
+ "add": [
+ {
+ "key": "foo",
+ "val": "bar"
+ }
+ ]
+ }
+ },
+ {
+ "handler": "rewrite",
+ "query": {
+ "delete": [
+ "baz"
+ ]
+ }
+ },
+ {
+ "handler": "rewrite",
+ "query": {
+ "set": [
+ {
+ "key": "taz",
+ "val": "test"
+ }
+ ]
+ }
+ },
+ {
+ "handler": "rewrite",
+ "query": {
+ "set": [
+ {
+ "key": "key=value",
+ "val": "example"
+ }
+ ]
+ }
+ },
+ {
+ "handler": "rewrite",
+ "query": {
+ "rename": [
+ {
+ "key": "changethis",
+ "val": "changed"
+ }
+ ]
+ }
+ },
+ {
+ "handler": "rewrite",
+ "query": {
+ "add": [
+ {
+ "key": "foo1",
+ "val": "baz"
+ }
+ ],
+ "replace": [
+ {
+ "key": "findme",
+ "replace": "replacement",
+ "search_regexp": "value"
+ }
+ ]
+ }
+ },
+ {
+ "body": "{http.request.uri.query}",
+ "handler": "static_response"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+}
\ No newline at end of file
diff --git a/caddytest/integration/caddyfile_adapt/uri_replace_brace_escape.caddyfiletest b/caddytest/integration/caddyfile_adapt/uri_replace_brace_escape.caddyfiletest
new file mode 100644
index 00000000..860b8a8d
--- /dev/null
+++ b/caddytest/integration/caddyfile_adapt/uri_replace_brace_escape.caddyfiletest
@@ -0,0 +1,47 @@
+:9080
+uri replace "\}" %7D
+uri replace "\{" %7B
+
+respond "{query}"
+----------
+{
+ "apps": {
+ "http": {
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":9080"
+ ],
+ "routes": [
+ {
+ "handle": [
+ {
+ "handler": "rewrite",
+ "uri_substring": [
+ {
+ "find": "\\}",
+ "replace": "%7D"
+ }
+ ]
+ },
+ {
+ "handler": "rewrite",
+ "uri_substring": [
+ {
+ "find": "\\{",
+ "replace": "%7B"
+ }
+ ]
+ },
+ {
+ "body": "{http.request.uri.query}",
+ "handler": "static_response"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+}
\ No newline at end of file
diff --git a/caddytest/integration/caddyfile_adapt/wildcard_pattern.caddyfiletest b/caddytest/integration/caddyfile_adapt/wildcard_pattern.caddyfiletest
new file mode 100644
index 00000000..1a9ccea7
--- /dev/null
+++ b/caddytest/integration/caddyfile_adapt/wildcard_pattern.caddyfiletest
@@ -0,0 +1,157 @@
+*.example.com {
+ tls foo@example.com {
+ dns mock
+ }
+
+ @foo host foo.example.com
+ handle @foo {
+ respond "Foo!"
+ }
+
+ @bar host bar.example.com
+ handle @bar {
+ respond "Bar!"
+ }
+
+ # Fallback for otherwise unhandled domains
+ handle {
+ abort
+ }
+}
+----------
+{
+ "apps": {
+ "http": {
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":443"
+ ],
+ "routes": [
+ {
+ "match": [
+ {
+ "host": [
+ "*.example.com"
+ ]
+ }
+ ],
+ "handle": [
+ {
+ "handler": "subroute",
+ "routes": [
+ {
+ "group": "group3",
+ "handle": [
+ {
+ "handler": "subroute",
+ "routes": [
+ {
+ "handle": [
+ {
+ "body": "Foo!",
+ "handler": "static_response"
+ }
+ ]
+ }
+ ]
+ }
+ ],
+ "match": [
+ {
+ "host": [
+ "foo.example.com"
+ ]
+ }
+ ]
+ },
+ {
+ "group": "group3",
+ "handle": [
+ {
+ "handler": "subroute",
+ "routes": [
+ {
+ "handle": [
+ {
+ "body": "Bar!",
+ "handler": "static_response"
+ }
+ ]
+ }
+ ]
+ }
+ ],
+ "match": [
+ {
+ "host": [
+ "bar.example.com"
+ ]
+ }
+ ]
+ },
+ {
+ "group": "group3",
+ "handle": [
+ {
+ "handler": "subroute",
+ "routes": [
+ {
+ "handle": [
+ {
+ "abort": true,
+ "handler": "static_response"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ],
+ "terminal": true
+ }
+ ]
+ }
+ }
+ },
+ "tls": {
+ "automation": {
+ "policies": [
+ {
+ "subjects": [
+ "*.example.com"
+ ],
+ "issuers": [
+ {
+ "challenges": {
+ "dns": {
+ "provider": {
+ "name": "mock"
+ }
+ }
+ },
+ "email": "foo@example.com",
+ "module": "acme"
+ },
+ {
+ "ca": "https://acme.zerossl.com/v2/DV90",
+ "challenges": {
+ "dns": {
+ "provider": {
+ "name": "mock"
+ }
+ }
+ },
+ "email": "foo@example.com",
+ "module": "acme"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+}
\ No newline at end of file
diff --git a/caddytest/integration/caddyfile_adapt_test.go b/caddytest/integration/caddyfile_adapt_test.go
index bff325be..0d9f0fa4 100644
--- a/caddytest/integration/caddyfile_adapt_test.go
+++ b/caddytest/integration/caddyfile_adapt_test.go
@@ -1,285 +1,57 @@
package integration
import (
+ jsonMod "encoding/json"
+ "fmt"
+ "os"
+ "path/filepath"
+ "regexp"
+ "strings"
"testing"
"github.com/caddyserver/caddy/v2/caddytest"
+
+ _ "github.com/caddyserver/caddy/v2/internal/testmocks"
)
-func TestHttpOnlyOnLocalhost(t *testing.T) {
- caddytest.AssertAdapt(t, `
- localhost:80 {
- respond /version 200 {
- body "hello from localhost"
- }
+func TestCaddyfileAdaptToJSON(t *testing.T) {
+ // load the list of test files from the dir
+ files, err := os.ReadDir("./caddyfile_adapt")
+ if err != nil {
+ t.Errorf("failed to read caddyfile_adapt dir: %s", err)
}
- `, "caddyfile", `{
- "apps": {
- "http": {
- "servers": {
- "srv0": {
- "listen": [
- ":80"
- ],
- "routes": [
- {
- "match": [
- {
- "host": [
- "localhost"
- ]
- }
- ],
- "handle": [
- {
- "handler": "subroute",
- "routes": [
- {
- "handle": [
- {
- "body": "hello from localhost",
- "handler": "static_response",
- "status_code": 200
- }
- ],
- "match": [
- {
- "path": [
- "/version"
- ]
- }
- ]
- }
- ]
- }
- ],
- "terminal": true
- }
- ]
- }
- }
- }
- }
-}`)
-}
-func TestHttpOnlyOnAnyAddress(t *testing.T) {
- caddytest.AssertAdapt(t, `
- :80 {
- respond /version 200 {
- body "hello from localhost"
- }
- }
- `, "caddyfile", `{
- "apps": {
- "http": {
- "servers": {
- "srv0": {
- "listen": [
- ":80"
- ],
- "routes": [
- {
- "match": [
- {
- "path": [
- "/version"
- ]
- }
- ],
- "handle": [
- {
- "body": "hello from localhost",
- "handler": "static_response",
- "status_code": 200
- }
- ]
- }
- ]
- }
- }
- }
- }
-}`)
-}
+ // prep a regexp to fix strings on windows
+ winNewlines := regexp.MustCompile(`\r?\n`)
-func TestHttpsOnDomain(t *testing.T) {
- caddytest.AssertAdapt(t, `
- a.caddy.localhost {
- respond /version 200 {
- body "hello from localhost"
+ for _, f := range files {
+ if f.IsDir() {
+ continue
}
- }
- `, "caddyfile", `{
- "apps": {
- "http": {
- "servers": {
- "srv0": {
- "listen": [
- ":443"
- ],
- "routes": [
- {
- "match": [
- {
- "host": [
- "a.caddy.localhost"
- ]
- }
- ],
- "handle": [
- {
- "handler": "subroute",
- "routes": [
- {
- "handle": [
- {
- "body": "hello from localhost",
- "handler": "static_response",
- "status_code": 200
- }
- ],
- "match": [
- {
- "path": [
- "/version"
- ]
- }
- ]
- }
- ]
- }
- ],
- "terminal": true
- }
- ]
- }
- }
- }
- }
-}`)
-}
-func TestHttpOnlyOnDomain(t *testing.T) {
- caddytest.AssertAdapt(t, `
- http://a.caddy.localhost {
- respond /version 200 {
- body "hello from localhost"
+ // read the test file
+ filename := f.Name()
+ data, err := os.ReadFile("./caddyfile_adapt/" + filename)
+ if err != nil {
+ t.Errorf("failed to read %s dir: %s", filename, err)
}
- }
- `, "caddyfile", `{
- "apps": {
- "http": {
- "servers": {
- "srv0": {
- "listen": [
- ":80"
- ],
- "routes": [
- {
- "match": [
- {
- "host": [
- "a.caddy.localhost"
- ]
- }
- ],
- "handle": [
- {
- "handler": "subroute",
- "routes": [
- {
- "handle": [
- {
- "body": "hello from localhost",
- "handler": "static_response",
- "status_code": 200
- }
- ],
- "match": [
- {
- "path": [
- "/version"
- ]
- }
- ]
- }
- ]
- }
- ],
- "terminal": true
- }
- ],
- "automatic_https": {
- "skip": [
- "a.caddy.localhost"
- ]
- }
- }
- }
- }
- }
-}`)
-}
-func TestHttpOnlyOnNonStandardPort(t *testing.T) {
- caddytest.AssertAdapt(t, `
- http://a.caddy.localhost:81 {
- respond /version 200 {
- body "hello from localhost"
+ // split the Caddyfile (first) and JSON (second) parts
+ // (append newline to Caddyfile to match formatter expectations)
+ parts := strings.Split(string(data), "----------")
+ caddyfile, json := strings.TrimSpace(parts[0])+"\n", strings.TrimSpace(parts[1])
+
+ // replace windows newlines in the json with unix newlines
+ json = winNewlines.ReplaceAllString(json, "\n")
+
+ // replace os-specific default path for file_server's hide field
+ replacePath, _ := jsonMod.Marshal(fmt.Sprint(".", string(filepath.Separator), "Caddyfile"))
+ json = strings.ReplaceAll(json, `"./Caddyfile"`, string(replacePath))
+
+ // run the test
+ ok := caddytest.CompareAdapt(t, filename, caddyfile, "caddyfile", json)
+ if !ok {
+ t.Errorf("failed to adapt %s", filename)
}
}
- `, "caddyfile", `{
- "apps": {
- "http": {
- "servers": {
- "srv0": {
- "listen": [
- ":81"
- ],
- "routes": [
- {
- "match": [
- {
- "host": [
- "a.caddy.localhost"
- ]
- }
- ],
- "handle": [
- {
- "handler": "subroute",
- "routes": [
- {
- "handle": [
- {
- "body": "hello from localhost",
- "handler": "static_response",
- "status_code": 200
- }
- ],
- "match": [
- {
- "path": [
- "/version"
- ]
- }
- ]
- }
- ]
- }
- ],
- "terminal": true
- }
- ],
- "automatic_https": {
- "skip": [
- "a.caddy.localhost"
- ]
- }
- }
- }
- }
- }
-}`)
}
diff --git a/caddytest/integration/caddyfile_test.go b/caddytest/integration/caddyfile_test.go
index 4e9bdd98..11ffc08a 100644
--- a/caddytest/integration/caddyfile_test.go
+++ b/caddytest/integration/caddyfile_test.go
@@ -9,13 +9,14 @@ import (
)
func TestRespond(t *testing.T) {
-
// arrange
tester := caddytest.NewTester(t)
tester.InitServer(`
{
+ admin localhost:2999
http_port 9080
https_port 9443
+ grace_period 1ns
}
localhost:9080 {
@@ -30,13 +31,14 @@ func TestRespond(t *testing.T) {
}
func TestRedirect(t *testing.T) {
-
// arrange
tester := caddytest.NewTester(t)
tester.InitServer(`
{
+ admin localhost:2999
http_port 9080
https_port 9443
+ grace_period 1ns
}
localhost:9080 {
@@ -57,7 +59,6 @@ func TestRedirect(t *testing.T) {
}
func TestDuplicateHosts(t *testing.T) {
-
// act and assert
caddytest.AssertLoadError(t,
`
@@ -68,11 +69,10 @@ func TestDuplicateHosts(t *testing.T) {
}
`,
"caddyfile",
- "duplicate site address not allowed")
+ "ambiguous site definition")
}
func TestReadCookie(t *testing.T) {
-
localhost, _ := url.Parse("http://localhost")
cookie := http.Cookie{
Name: "clientname",
@@ -84,8 +84,11 @@ func TestReadCookie(t *testing.T) {
tester.Client.Jar.SetCookies(localhost, []*http.Cookie{&cookie})
tester.InitServer(`
{
+ skip_install_trust
+ admin localhost:2999
http_port 9080
https_port 9443
+ grace_period 1ns
}
localhost:9080 {
@@ -101,3 +104,708 @@ func TestReadCookie(t *testing.T) {
// act and assert
tester.AssertGetResponse("http://localhost:9080/cookie.html", 200, "Cookie.ClientName caddytest ")
}
+
+func TestReplIndex(t *testing.T) {
+ tester := caddytest.NewTester(t)
+ tester.InitServer(`
+ {
+ skip_install_trust
+ admin localhost:2999
+ http_port 9080
+ https_port 9443
+ grace_period 1ns
+ }
+
+ localhost:9080 {
+ templates {
+ root testdata
+ }
+ file_server {
+ root testdata
+ index "index.{host}.html"
+ }
+ }
+ `, "caddyfile")
+
+ // act and assert
+ tester.AssertGetResponse("http://localhost:9080/", 200, "")
+}
+
+func TestInvalidPrefix(t *testing.T) {
+ type testCase struct {
+ config, expectedError string
+ }
+
+ failureCases := []testCase{
+ {
+ config: `wss://localhost`,
+ expectedError: `the scheme wss:// is only supported in browsers; use https:// instead`,
+ },
+ {
+ config: `ws://localhost`,
+ expectedError: `the scheme ws:// is only supported in browsers; use http:// instead`,
+ },
+ {
+ config: `someInvalidPrefix://localhost`,
+ expectedError: "unsupported URL scheme someinvalidprefix://",
+ },
+ {
+ config: `h2c://localhost`,
+ expectedError: `unsupported URL scheme h2c://`,
+ },
+ {
+ config: `localhost, wss://localhost`,
+ expectedError: `the scheme wss:// is only supported in browsers; use https:// instead`,
+ },
+ {
+ config: `localhost {
+ reverse_proxy ws://localhost"
+ }`,
+ expectedError: `the scheme ws:// is only supported in browsers; use http:// instead`,
+ },
+ {
+ config: `localhost {
+ reverse_proxy someInvalidPrefix://localhost"
+ }`,
+ expectedError: `unsupported URL scheme someinvalidprefix://`,
+ },
+ }
+
+ for _, failureCase := range failureCases {
+ caddytest.AssertLoadError(t, failureCase.config, "caddyfile", failureCase.expectedError)
+ }
+}
+
+func TestValidPrefix(t *testing.T) {
+ type testCase struct {
+ rawConfig, expectedResponse string
+ }
+
+ successCases := []testCase{
+ {
+ "localhost",
+ `{
+ "apps": {
+ "http": {
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":443"
+ ],
+ "routes": [
+ {
+ "match": [
+ {
+ "host": [
+ "localhost"
+ ]
+ }
+ ],
+ "terminal": true
+ }
+ ]
+ }
+ }
+ }
+ }
+}`,
+ },
+ {
+ "https://localhost",
+ `{
+ "apps": {
+ "http": {
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":443"
+ ],
+ "routes": [
+ {
+ "match": [
+ {
+ "host": [
+ "localhost"
+ ]
+ }
+ ],
+ "terminal": true
+ }
+ ]
+ }
+ }
+ }
+ }
+}`,
+ },
+ {
+ "http://localhost",
+ `{
+ "apps": {
+ "http": {
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":80"
+ ],
+ "routes": [
+ {
+ "match": [
+ {
+ "host": [
+ "localhost"
+ ]
+ }
+ ],
+ "terminal": true
+ }
+ ]
+ }
+ }
+ }
+ }
+}`,
+ },
+ {
+ `localhost {
+ reverse_proxy http://localhost:3000
+ }`,
+ `{
+ "apps": {
+ "http": {
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":443"
+ ],
+ "routes": [
+ {
+ "match": [
+ {
+ "host": [
+ "localhost"
+ ]
+ }
+ ],
+ "handle": [
+ {
+ "handler": "subroute",
+ "routes": [
+ {
+ "handle": [
+ {
+ "handler": "reverse_proxy",
+ "upstreams": [
+ {
+ "dial": "localhost:3000"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ],
+ "terminal": true
+ }
+ ]
+ }
+ }
+ }
+ }
+}`,
+ },
+ {
+ `localhost {
+ reverse_proxy https://localhost:3000
+ }`,
+ `{
+ "apps": {
+ "http": {
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":443"
+ ],
+ "routes": [
+ {
+ "match": [
+ {
+ "host": [
+ "localhost"
+ ]
+ }
+ ],
+ "handle": [
+ {
+ "handler": "subroute",
+ "routes": [
+ {
+ "handle": [
+ {
+ "handler": "reverse_proxy",
+ "transport": {
+ "protocol": "http",
+ "tls": {}
+ },
+ "upstreams": [
+ {
+ "dial": "localhost:3000"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ],
+ "terminal": true
+ }
+ ]
+ }
+ }
+ }
+ }
+}`,
+ },
+ {
+ `localhost {
+ reverse_proxy h2c://localhost:3000
+ }`,
+ `{
+ "apps": {
+ "http": {
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":443"
+ ],
+ "routes": [
+ {
+ "match": [
+ {
+ "host": [
+ "localhost"
+ ]
+ }
+ ],
+ "handle": [
+ {
+ "handler": "subroute",
+ "routes": [
+ {
+ "handle": [
+ {
+ "handler": "reverse_proxy",
+ "transport": {
+ "protocol": "http",
+ "versions": [
+ "h2c",
+ "2"
+ ]
+ },
+ "upstreams": [
+ {
+ "dial": "localhost:3000"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ],
+ "terminal": true
+ }
+ ]
+ }
+ }
+ }
+ }
+}`,
+ },
+ {
+ `localhost {
+ reverse_proxy localhost:3000
+ }`,
+ `{
+ "apps": {
+ "http": {
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":443"
+ ],
+ "routes": [
+ {
+ "match": [
+ {
+ "host": [
+ "localhost"
+ ]
+ }
+ ],
+ "handle": [
+ {
+ "handler": "subroute",
+ "routes": [
+ {
+ "handle": [
+ {
+ "handler": "reverse_proxy",
+ "upstreams": [
+ {
+ "dial": "localhost:3000"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ],
+ "terminal": true
+ }
+ ]
+ }
+ }
+ }
+ }
+}`,
+ },
+ }
+
+ for _, successCase := range successCases {
+ caddytest.AssertAdapt(t, successCase.rawConfig, "caddyfile", successCase.expectedResponse)
+ }
+}
+
+func TestUriReplace(t *testing.T) {
+ tester := caddytest.NewTester(t)
+
+ tester.InitServer(`
+ {
+ admin localhost:2999
+ http_port 9080
+ }
+ :9080
+ uri replace "\}" %7D
+ uri replace "\{" %7B
+
+ respond "{query}"`, "caddyfile")
+
+ tester.AssertGetResponse("http://localhost:9080/endpoint?test={%20content%20}", 200, "test=%7B%20content%20%7D")
+}
+
+func TestUriOps(t *testing.T) {
+ tester := caddytest.NewTester(t)
+
+ tester.InitServer(`
+ {
+ admin localhost:2999
+ http_port 9080
+ }
+ :9080
+ uri query +foo bar
+ uri query -baz
+ uri query taz test
+ uri query key=value example
+ uri query changethis>changed
+
+ respond "{query}"`, "caddyfile")
+
+ tester.AssertGetResponse("http://localhost:9080/endpoint?foo=bar0&baz=buz&taz=nottest&changethis=val", 200, "changed=val&foo=bar0&foo=bar&key%3Dvalue=example&taz=test")
+}
+
+// Tests the `http.request.local.port` placeholder.
+// We don't test the very similar `http.request.local.host` placeholder,
+// because depending on the host the test is running on, localhost might
+// refer to 127.0.0.1 or ::1.
+// TODO: Test each http version separately (especially http/3)
+func TestHttpRequestLocalPortPlaceholder(t *testing.T) {
+ tester := caddytest.NewTester(t)
+
+ tester.InitServer(`
+ {
+ admin localhost:2999
+ http_port 9080
+ }
+ :9080
+ respond "{http.request.local.port}"`, "caddyfile")
+
+ tester.AssertGetResponse("http://localhost:9080/", 200, "9080")
+}
+
+func TestSetThenAddQueryParams(t *testing.T) {
+ tester := caddytest.NewTester(t)
+
+ tester.InitServer(`
+ {
+ admin localhost:2999
+ http_port 9080
+ }
+ :9080
+ uri query foo bar
+ uri query +foo baz
+
+ respond "{query}"`, "caddyfile")
+
+ tester.AssertGetResponse("http://localhost:9080/endpoint", 200, "foo=bar&foo=baz")
+}
+
+func TestSetThenDeleteParams(t *testing.T) {
+ tester := caddytest.NewTester(t)
+
+ tester.InitServer(`
+ {
+ admin localhost:2999
+ http_port 9080
+ }
+ :9080
+ uri query bar foo{query.foo}
+ uri query -foo
+
+ respond "{query}"`, "caddyfile")
+
+ tester.AssertGetResponse("http://localhost:9080/endpoint?foo=bar", 200, "bar=foobar")
+}
+
+func TestRenameAndOtherOps(t *testing.T) {
+ tester := caddytest.NewTester(t)
+
+ tester.InitServer(`
+ {
+ admin localhost:2999
+ http_port 9080
+ }
+ :9080
+ uri query foo>bar
+ uri query bar taz
+ uri query +bar baz
+
+ respond "{query}"`, "caddyfile")
+
+ tester.AssertGetResponse("http://localhost:9080/endpoint?foo=bar", 200, "bar=taz&bar=baz")
+}
+
+func TestReplaceOps(t *testing.T) {
+ tester := caddytest.NewTester(t)
+
+ tester.InitServer(`
+ {
+ admin localhost:2999
+ http_port 9080
+ }
+ :9080
+ uri query foo bar baz
+ respond "{query}"`, "caddyfile")
+
+ tester.AssertGetResponse("http://localhost:9080/endpoint?foo=bar", 200, "foo=baz")
+}
+
+func TestReplaceWithReplacementPlaceholder(t *testing.T) {
+ tester := caddytest.NewTester(t)
+ tester.InitServer(`
+ {
+ admin localhost:2999
+ http_port 9080
+ }
+ :9080
+ uri query foo bar {query.placeholder}
+ respond "{query}"`, "caddyfile")
+
+ tester.AssertGetResponse("http://localhost:9080/endpoint?placeholder=baz&foo=bar", 200, "foo=baz&placeholder=baz")
+
+}
+
+func TestReplaceWithKeyPlaceholder(t *testing.T) {
+ tester := caddytest.NewTester(t)
+ tester.InitServer(`
+ {
+ admin localhost:2999
+ http_port 9080
+ }
+ :9080
+ uri query {query.placeholder} bar baz
+ respond "{query}"`, "caddyfile")
+
+ tester.AssertGetResponse("http://localhost:9080/endpoint?placeholder=foo&foo=bar", 200, "foo=baz&placeholder=foo")
+}
+
+func TestPartialReplacement(t *testing.T) {
+ tester := caddytest.NewTester(t)
+ tester.InitServer(`
+ {
+ admin localhost:2999
+ http_port 9080
+ }
+ :9080
+ uri query foo ar az
+ respond "{query}"`, "caddyfile")
+
+ tester.AssertGetResponse("http://localhost:9080/endpoint?foo=bar", 200, "foo=baz")
+}
+
+func TestNonExistingSearch(t *testing.T) {
+ tester := caddytest.NewTester(t)
+ tester.InitServer(`
+ {
+ admin localhost:2999
+ http_port 9080
+ }
+ :9080
+ uri query foo var baz
+ respond "{query}"`, "caddyfile")
+
+ tester.AssertGetResponse("http://localhost:9080/endpoint?foo=bar", 200, "foo=bar")
+}
+
+func TestReplaceAllOps(t *testing.T) {
+ tester := caddytest.NewTester(t)
+
+ tester.InitServer(`
+ {
+ admin localhost:2999
+ http_port 9080
+ }
+ :9080
+ uri query * bar baz
+ respond "{query}"`, "caddyfile")
+
+ tester.AssertGetResponse("http://localhost:9080/endpoint?foo=bar&baz=bar", 200, "baz=baz&foo=baz")
+}
+
+func TestUriOpsBlock(t *testing.T) {
+ tester := caddytest.NewTester(t)
+
+ tester.InitServer(`
+ {
+ admin localhost:2999
+ http_port 9080
+ }
+ :9080
+ uri query {
+ +foo bar
+ -baz
+ taz test
+ }
+ respond "{query}"`, "caddyfile")
+
+ tester.AssertGetResponse("http://localhost:9080/endpoint?foo=bar0&baz=buz&taz=nottest", 200, "foo=bar0&foo=bar&taz=test")
+}
+
+func TestHandleErrorSimpleCodes(t *testing.T) {
+ tester := caddytest.NewTester(t)
+ tester.InitServer(`{
+ admin localhost:2999
+ http_port 9080
+ }
+ localhost:9080 {
+ root * /srv
+ error /private* "Unauthorized" 410
+ error /hidden* "Not found" 404
+
+ handle_errors 404 410 {
+ respond "404 or 410 error"
+ }
+ }`, "caddyfile")
+ // act and assert
+ tester.AssertGetResponse("http://localhost:9080/private", 410, "404 or 410 error")
+ tester.AssertGetResponse("http://localhost:9080/hidden", 404, "404 or 410 error")
+}
+
+func TestHandleErrorRange(t *testing.T) {
+ tester := caddytest.NewTester(t)
+ tester.InitServer(`{
+ admin localhost:2999
+ http_port 9080
+ }
+ localhost:9080 {
+ root * /srv
+ error /private* "Unauthorized" 410
+ error /hidden* "Not found" 404
+
+ handle_errors 4xx {
+ respond "Error in the [400 .. 499] range"
+ }
+ }`, "caddyfile")
+ // act and assert
+ tester.AssertGetResponse("http://localhost:9080/private", 410, "Error in the [400 .. 499] range")
+ tester.AssertGetResponse("http://localhost:9080/hidden", 404, "Error in the [400 .. 499] range")
+}
+
+func TestHandleErrorSort(t *testing.T) {
+ tester := caddytest.NewTester(t)
+ tester.InitServer(`{
+ admin localhost:2999
+ http_port 9080
+ }
+ localhost:9080 {
+ root * /srv
+ error /private* "Unauthorized" 410
+ error /hidden* "Not found" 404
+ error /internalerr* "Internal Server Error" 500
+
+ handle_errors {
+ respond "Fallback route: code outside the [400..499] range"
+ }
+ handle_errors 4xx {
+ respond "Error in the [400 .. 499] range"
+ }
+ }`, "caddyfile")
+ // act and assert
+ tester.AssertGetResponse("http://localhost:9080/internalerr", 500, "Fallback route: code outside the [400..499] range")
+ tester.AssertGetResponse("http://localhost:9080/hidden", 404, "Error in the [400 .. 499] range")
+}
+
+func TestHandleErrorRangeAndCodes(t *testing.T) {
+ tester := caddytest.NewTester(t)
+ tester.InitServer(`{
+ admin localhost:2999
+ http_port 9080
+ }
+ localhost:9080 {
+ root * /srv
+ error /private* "Unauthorized" 410
+ error /threehundred* "Moved Permanently" 301
+ error /internalerr* "Internal Server Error" 500
+
+ handle_errors 500 3xx {
+ respond "Error code is equal to 500 or in the [300..399] range"
+ }
+ handle_errors 4xx {
+ respond "Error in the [400 .. 499] range"
+ }
+ }`, "caddyfile")
+ // act and assert
+ tester.AssertGetResponse("http://localhost:9080/internalerr", 500, "Error code is equal to 500 or in the [300..399] range")
+ tester.AssertGetResponse("http://localhost:9080/threehundred", 301, "Error code is equal to 500 or in the [300..399] range")
+ tester.AssertGetResponse("http://localhost:9080/private", 410, "Error in the [400 .. 499] range")
+}
+
+func TestInvalidSiteAddressesAsDirectives(t *testing.T) {
+ type testCase struct {
+ config, expectedError string
+ }
+
+ failureCases := []testCase{
+ {
+ config: `
+ handle {
+ file_server
+ }`,
+ expectedError: `Caddyfile:2: parsed 'handle' as a site address, but it is a known directive; directives must appear in a site block`,
+ },
+ {
+ config: `
+ reverse_proxy localhost:9000 localhost:9001 {
+ file_server
+ }`,
+ expectedError: `Caddyfile:2: parsed 'reverse_proxy' as a site address, but it is a known directive; directives must appear in a site block`,
+ },
+ }
+
+ for _, failureCase := range failureCases {
+ caddytest.AssertLoadError(t, failureCase.config, "caddyfile", failureCase.expectedError)
+ }
+}
diff --git a/caddytest/integration/handler_test.go b/caddytest/integration/handler_test.go
new file mode 100644
index 00000000..afc700b0
--- /dev/null
+++ b/caddytest/integration/handler_test.go
@@ -0,0 +1,59 @@
+package integration
+
+import (
+ "bytes"
+ "net/http"
+ "testing"
+
+ "github.com/caddyserver/caddy/v2/caddytest"
+)
+
+func TestBrowse(t *testing.T) {
+ tester := caddytest.NewTester(t)
+ tester.InitServer(`
+ {
+ skip_install_trust
+ admin localhost:2999
+ http_port 9080
+ https_port 9443
+ grace_period 1ns
+ }
+ http://localhost:9080 {
+ file_server browse
+ }
+ `, "caddyfile")
+
+ req, err := http.NewRequest(http.MethodGet, "http://localhost:9080/", nil)
+ if err != nil {
+ t.Fail()
+ return
+ }
+ tester.AssertResponseCode(req, 200)
+}
+
+func TestRespondWithJSON(t *testing.T) {
+ tester := caddytest.NewTester(t)
+ tester.InitServer(`
+ {
+ skip_install_trust
+ admin localhost:2999
+ http_port 9080
+ https_port 9443
+ grace_period 1ns
+ }
+ localhost {
+ respond {http.request.body}
+ }
+ `, "caddyfile")
+
+ res, _ := tester.AssertPostResponseBody("https://localhost:9443/",
+ nil,
+ bytes.NewBufferString(`{
+ "greeting": "Hello, world!"
+ }`), 200, `{
+ "greeting": "Hello, world!"
+ }`)
+ if res.Header.Get("Content-Type") != "application/json" {
+ t.Errorf("expected Content-Type to be application/json, but was %s", res.Header.Get("Content-Type"))
+ }
+}
diff --git a/caddytest/integration/intercept_test.go b/caddytest/integration/intercept_test.go
new file mode 100644
index 00000000..6f8ffc92
--- /dev/null
+++ b/caddytest/integration/intercept_test.go
@@ -0,0 +1,40 @@
+package integration
+
+import (
+ "testing"
+
+ "github.com/caddyserver/caddy/v2/caddytest"
+)
+
+func TestIntercept(t *testing.T) {
+ tester := caddytest.NewTester(t)
+ tester.InitServer(`{
+ skip_install_trust
+ admin localhost:2999
+ http_port 9080
+ https_port 9443
+ grace_period 1ns
+ }
+
+ localhost:9080 {
+ respond /intercept "I'm a teapot" 408
+ header /intercept To-Intercept ok
+ respond /no-intercept "I'm not a teapot"
+
+ intercept {
+ @teapot status 408
+ handle_response @teapot {
+ header /intercept intercepted {resp.header.To-Intercept}
+ respond /intercept "I'm a combined coffee/tea pot that is temporarily out of coffee" 503
+ }
+ }
+ }
+ `, "caddyfile")
+
+ r, _ := tester.AssertGetResponse("http://localhost:9080/intercept", 503, "I'm a combined coffee/tea pot that is temporarily out of coffee")
+ if r.Header.Get("intercepted") != "ok" {
+ t.Fatalf(`header "intercepted" value is not "ok": %s`, r.Header.Get("intercepted"))
+ }
+
+ tester.AssertGetResponse("http://localhost:9080/no-intercept", 200, "I'm not a teapot")
+}
diff --git a/caddytest/integration/leafcertloaders_test.go b/caddytest/integration/leafcertloaders_test.go
new file mode 100644
index 00000000..4399902e
--- /dev/null
+++ b/caddytest/integration/leafcertloaders_test.go
@@ -0,0 +1,70 @@
+package integration
+
+import (
+ "testing"
+
+ "github.com/caddyserver/caddy/v2/caddytest"
+)
+
+func TestLeafCertLoaders(t *testing.T) {
+ tester := caddytest.NewTester(t)
+ tester.InitServer(`
+ {
+ "admin": {
+ "listen": "localhost:2999"
+ },
+ "apps": {
+ "http": {
+ "http_port": 9080,
+ "https_port": 9443,
+ "grace_period": 1,
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":9443"
+ ],
+ "routes": [
+ {
+ "match": [
+ {
+ "host": [
+ "localhost"
+ ]
+ }
+ ],
+ "terminal": true
+ }
+ ],
+ "tls_connection_policies": [
+ {
+ "client_authentication": {
+ "verifiers": [
+ {
+ "verifier": "leaf",
+ "leaf_certs_loaders": [
+ {
+ "loader": "file",
+ "files": ["../leafcert.pem"]
+ },
+ {
+ "loader": "folder",
+ "folders": ["../"]
+ },
+ {
+ "loader": "storage"
+ },
+ {
+ "loader": "pem"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ ]
+ }
+ }
+ }
+ }
+ }`, "json")
+}
diff --git a/caddytest/integration/listener_test.go b/caddytest/integration/listener_test.go
new file mode 100644
index 00000000..30642b1a
--- /dev/null
+++ b/caddytest/integration/listener_test.go
@@ -0,0 +1,94 @@
+package integration
+
+import (
+ "bytes"
+ "fmt"
+ "math/rand"
+ "net"
+ "net/http"
+ "strings"
+ "testing"
+
+ "github.com/caddyserver/caddy/v2/caddytest"
+)
+
+func setupListenerWrapperTest(t *testing.T, handlerFunc http.HandlerFunc) *caddytest.Tester {
+ l, err := net.Listen("tcp", "127.0.0.1:0")
+ if err != nil {
+ t.Fatalf("failed to listen: %s", err)
+ }
+
+ mux := http.NewServeMux()
+ mux.Handle("/", handlerFunc)
+ srv := &http.Server{
+ Handler: mux,
+ }
+ go srv.Serve(l)
+ t.Cleanup(func() {
+ _ = srv.Close()
+ _ = l.Close()
+ })
+ tester := caddytest.NewTester(t)
+ tester.InitServer(fmt.Sprintf(`
+ {
+ skip_install_trust
+ admin localhost:2999
+ http_port 9080
+ https_port 9443
+ local_certs
+ servers :9443 {
+ listener_wrappers {
+ http_redirect
+ tls
+ }
+ }
+ }
+ localhost {
+ reverse_proxy %s
+ }
+ `, l.Addr().String()), "caddyfile")
+ return tester
+}
+
+func TestHTTPRedirectWrapperWithLargeUpload(t *testing.T) {
+ const uploadSize = (1024 * 1024) + 1 // 1 MB + 1 byte
+ // 1 more than an MB
+ body := make([]byte, uploadSize)
+ rand.New(rand.NewSource(0)).Read(body)
+
+ tester := setupListenerWrapperTest(t, func(writer http.ResponseWriter, request *http.Request) {
+ buf := new(bytes.Buffer)
+ _, err := buf.ReadFrom(request.Body)
+ if err != nil {
+ t.Fatalf("failed to read body: %s", err)
+ }
+
+ if !bytes.Equal(buf.Bytes(), body) {
+ t.Fatalf("body not the same")
+ }
+
+ writer.WriteHeader(http.StatusNoContent)
+ })
+ resp, err := tester.Client.Post("https://localhost:9443", "application/octet-stream", bytes.NewReader(body))
+ if err != nil {
+ t.Fatalf("failed to post: %s", err)
+ }
+
+ if resp.StatusCode != http.StatusNoContent {
+ t.Fatalf("unexpected status: %d != %d", resp.StatusCode, http.StatusNoContent)
+ }
+}
+
+func TestLargeHttpRequest(t *testing.T) {
+ tester := setupListenerWrapperTest(t, func(writer http.ResponseWriter, request *http.Request) {
+ t.Fatal("not supposed to handle a request")
+ })
+
+ // We never read the body in any way, set an extra long header instead.
+ req, _ := http.NewRequest("POST", "http://localhost:9443", nil)
+ req.Header.Set("Long-Header", strings.Repeat("X", 1024*1024))
+ _, err := tester.Client.Do(req)
+ if err == nil {
+ t.Fatal("not supposed to succeed")
+ }
+}
diff --git a/caddytest/integration/map_test.go b/caddytest/integration/map_test.go
new file mode 100644
index 00000000..eb338656
--- /dev/null
+++ b/caddytest/integration/map_test.go
@@ -0,0 +1,151 @@
+package integration
+
+import (
+ "bytes"
+ "testing"
+
+ "github.com/caddyserver/caddy/v2/caddytest"
+)
+
+func TestMap(t *testing.T) {
+ // arrange
+ tester := caddytest.NewTester(t)
+ tester.InitServer(`{
+ skip_install_trust
+ admin localhost:2999
+ http_port 9080
+ https_port 9443
+ grace_period 1ns
+ }
+
+ localhost:9080 {
+
+ map {http.request.method} {dest-1} {dest-2} {
+ default unknown1 unknown2
+ ~G(.)(.) G${1}${2}-called
+ POST post-called foobar
+ }
+
+ respond /version 200 {
+ body "hello from localhost {dest-1} {dest-2}"
+ }
+ }
+ `, "caddyfile")
+
+ // act and assert
+ tester.AssertGetResponse("http://localhost:9080/version", 200, "hello from localhost GET-called unknown2")
+ tester.AssertPostResponseBody("http://localhost:9080/version", []string{}, bytes.NewBuffer([]byte{}), 200, "hello from localhost post-called foobar")
+}
+
+func TestMapRespondWithDefault(t *testing.T) {
+ // arrange
+ tester := caddytest.NewTester(t)
+ tester.InitServer(`{
+ skip_install_trust
+ admin localhost:2999
+ http_port 9080
+ https_port 9443
+ }
+
+ localhost:9080 {
+
+ map {http.request.method} {dest-name} {
+ default unknown
+ GET get-called
+ }
+
+ respond /version 200 {
+ body "hello from localhost {dest-name}"
+ }
+ }
+ `, "caddyfile")
+
+ // act and assert
+ tester.AssertGetResponse("http://localhost:9080/version", 200, "hello from localhost get-called")
+ tester.AssertPostResponseBody("http://localhost:9080/version", []string{}, bytes.NewBuffer([]byte{}), 200, "hello from localhost unknown")
+}
+
+func TestMapAsJSON(t *testing.T) {
+ // arrange
+ tester := caddytest.NewTester(t)
+ tester.InitServer(`
+ {
+ "admin": {
+ "listen": "localhost:2999"
+ },
+ "apps": {
+ "pki": {
+ "certificate_authorities" : {
+ "local" : {
+ "install_trust": false
+ }
+ }
+ },
+ "http": {
+ "http_port": 9080,
+ "https_port": 9443,
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":9080"
+ ],
+ "routes": [
+ {
+ "handle": [
+ {
+ "handler": "subroute",
+ "routes": [
+ {
+ "handle": [
+ {
+ "handler": "map",
+ "source": "{http.request.method}",
+ "destinations": ["{dest-name}"],
+ "defaults": ["unknown"],
+ "mappings": [
+ {
+ "input": "GET",
+ "outputs": ["get-called"]
+ },
+ {
+ "input": "POST",
+ "outputs": ["post-called"]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "handle": [
+ {
+ "body": "hello from localhost {dest-name}",
+ "handler": "static_response",
+ "status_code": 200
+ }
+ ],
+ "match": [
+ {
+ "path": ["/version"]
+ }
+ ]
+ }
+ ]
+ }
+ ],
+ "match": [
+ {
+ "host": ["localhost"]
+ }
+ ],
+ "terminal": true
+ }
+ ]
+ }
+ }
+ }
+ }
+ }`, "json")
+
+ tester.AssertGetResponse("http://localhost:9080/version", 200, "hello from localhost get-called")
+ tester.AssertPostResponseBody("http://localhost:9080/version", []string{}, bytes.NewBuffer([]byte{}), 200, "hello from localhost post-called")
+}
diff --git a/caddytest/integration/mockdns_test.go b/caddytest/integration/mockdns_test.go
new file mode 100644
index 00000000..615116a3
--- /dev/null
+++ b/caddytest/integration/mockdns_test.go
@@ -0,0 +1,61 @@
+package integration
+
+import (
+ "context"
+
+ "github.com/caddyserver/caddy/v2"
+ "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile"
+ "github.com/caddyserver/certmagic"
+ "github.com/libdns/libdns"
+)
+
+func init() {
+ caddy.RegisterModule(MockDNSProvider{})
+}
+
+// MockDNSProvider is a mock DNS provider, for testing config with DNS modules.
+type MockDNSProvider struct{}
+
+// CaddyModule returns the Caddy module information.
+func (MockDNSProvider) CaddyModule() caddy.ModuleInfo {
+ return caddy.ModuleInfo{
+ ID: "dns.providers.mock",
+ New: func() caddy.Module { return new(MockDNSProvider) },
+ }
+}
+
+// Provision sets up the module.
+func (MockDNSProvider) Provision(ctx caddy.Context) error {
+ return nil
+}
+
+// UnmarshalCaddyfile sets up the module from Caddyfile tokens.
+func (MockDNSProvider) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
+ return nil
+}
+
+// AppendRecords appends DNS records to the zone.
+func (MockDNSProvider) AppendRecords(ctx context.Context, zone string, recs []libdns.Record) ([]libdns.Record, error) {
+ return nil, nil
+}
+
+// DeleteRecords deletes DNS records from the zone.
+func (MockDNSProvider) DeleteRecords(ctx context.Context, zone string, recs []libdns.Record) ([]libdns.Record, error) {
+ return nil, nil
+}
+
+// GetRecords gets DNS records from the zone.
+func (MockDNSProvider) GetRecords(ctx context.Context, zone string) ([]libdns.Record, error) {
+ return nil, nil
+}
+
+// SetRecords sets DNS records in the zone.
+func (MockDNSProvider) SetRecords(ctx context.Context, zone string, recs []libdns.Record) ([]libdns.Record, error) {
+ return nil, nil
+}
+
+// Interface guard
+var _ caddyfile.Unmarshaler = (*MockDNSProvider)(nil)
+var _ certmagic.DNSProvider = (*MockDNSProvider)(nil)
+var _ caddy.Provisioner = (*MockDNSProvider)(nil)
+var _ caddy.Module = (*MockDNSProvider)(nil)
diff --git a/caddytest/integration/pki_test.go b/caddytest/integration/pki_test.go
new file mode 100644
index 00000000..84679820
--- /dev/null
+++ b/caddytest/integration/pki_test.go
@@ -0,0 +1,107 @@
+package integration
+
+import (
+ "testing"
+
+ "github.com/caddyserver/caddy/v2/caddytest"
+)
+
+func TestLeafCertLifetimeLessThanIntermediate(t *testing.T) {
+ caddytest.AssertLoadError(t, `
+ {
+ "admin": {
+ "disabled": true
+ },
+ "apps": {
+ "http": {
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":443"
+ ],
+ "routes": [
+ {
+ "handle": [
+ {
+ "handler": "subroute",
+ "routes": [
+ {
+ "handle": [
+ {
+ "ca": "internal",
+ "handler": "acme_server",
+ "lifetime": 604800000000000
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ }
+ },
+ "pki": {
+ "certificate_authorities": {
+ "internal": {
+ "install_trust": false,
+ "intermediate_lifetime": 604800000000000,
+ "name": "Internal CA"
+ }
+ }
+ }
+ }
+ }
+ `, "json", "certificate lifetime (168h0m0s) should be less than intermediate certificate lifetime (168h0m0s)")
+}
+
+func TestIntermediateLifetimeLessThanRoot(t *testing.T) {
+ caddytest.AssertLoadError(t, `
+ {
+ "admin": {
+ "disabled": true
+ },
+ "apps": {
+ "http": {
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":443"
+ ],
+ "routes": [
+ {
+ "handle": [
+ {
+ "handler": "subroute",
+ "routes": [
+ {
+ "handle": [
+ {
+ "ca": "internal",
+ "handler": "acme_server",
+ "lifetime": 2592000000000000
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ }
+ },
+ "pki": {
+ "certificate_authorities": {
+ "internal": {
+ "install_trust": false,
+ "intermediate_lifetime": 311040000000000000,
+ "name": "Internal CA"
+ }
+ }
+ }
+ }
+ }
+ `, "json", "intermediate certificate lifetime must be less than root certificate lifetime (86400h0m0s)")
+}
diff --git a/caddytest/integration/reverseproxy_test.go b/caddytest/integration/reverseproxy_test.go
new file mode 100644
index 00000000..cbfe8433
--- /dev/null
+++ b/caddytest/integration/reverseproxy_test.go
@@ -0,0 +1,476 @@
+package integration
+
+import (
+ "fmt"
+ "net"
+ "net/http"
+ "os"
+ "runtime"
+ "strings"
+ "testing"
+ "time"
+
+ "github.com/caddyserver/caddy/v2/caddytest"
+)
+
+func TestSRVReverseProxy(t *testing.T) {
+ tester := caddytest.NewTester(t)
+ tester.InitServer(`
+ {
+ "admin": {
+ "listen": "localhost:2999"
+ },
+ "apps": {
+ "pki": {
+ "certificate_authorities": {
+ "local": {
+ "install_trust": false
+ }
+ }
+ },
+ "http": {
+ "grace_period": 1,
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":18080"
+ ],
+ "routes": [
+ {
+ "handle": [
+ {
+ "handler": "reverse_proxy",
+ "dynamic_upstreams": {
+ "source": "srv",
+ "name": "srv.host.service.consul"
+ }
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+ }
+ `, "json")
+}
+
+func TestDialWithPlaceholderUnix(t *testing.T) {
+ if runtime.GOOS == "windows" {
+ t.SkipNow()
+ }
+
+ f, err := os.CreateTemp("", "*.sock")
+ if err != nil {
+ t.Errorf("failed to create TempFile: %s", err)
+ return
+ }
+ // a hack to get a file name within a valid path to use as socket
+ socketName := f.Name()
+ os.Remove(f.Name())
+
+ server := http.Server{
+ Handler: http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
+ w.Write([]byte("Hello, World!"))
+ }),
+ }
+
+ unixListener, err := net.Listen("unix", socketName)
+ if err != nil {
+ t.Errorf("failed to listen on the socket: %s", err)
+ return
+ }
+ go server.Serve(unixListener)
+ t.Cleanup(func() {
+ server.Close()
+ })
+ runtime.Gosched() // Allow other goroutines to run
+
+ tester := caddytest.NewTester(t)
+ tester.InitServer(`
+ {
+ "admin": {
+ "listen": "localhost:2999"
+ },
+ "apps": {
+ "pki": {
+ "certificate_authorities": {
+ "local": {
+ "install_trust": false
+ }
+ }
+ },
+ "http": {
+ "grace_period": 1,
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":18080"
+ ],
+ "routes": [
+ {
+ "handle": [
+ {
+ "handler": "reverse_proxy",
+ "upstreams": [
+ {
+ "dial": "unix/{http.request.header.X-Caddy-Upstream-Dial}"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+ }
+ `, "json")
+
+ req, err := http.NewRequest(http.MethodGet, "http://localhost:18080", nil)
+ if err != nil {
+ t.Fail()
+ return
+ }
+ req.Header.Set("X-Caddy-Upstream-Dial", socketName)
+ tester.AssertResponse(req, 200, "Hello, World!")
+}
+
+func TestReverseProxyWithPlaceholderDialAddress(t *testing.T) {
+ tester := caddytest.NewTester(t)
+ tester.InitServer(`
+ {
+ "admin": {
+ "listen": "localhost:2999"
+ },
+ "apps": {
+ "pki": {
+ "certificate_authorities": {
+ "local": {
+ "install_trust": false
+ }
+ }
+ },
+ "http": {
+ "grace_period": 1,
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":18080"
+ ],
+ "routes": [
+ {
+ "match": [
+ {
+ "host": [
+ "localhost"
+ ]
+ }
+ ],
+ "handle": [
+ {
+ "handler": "static_response",
+ "body": "Hello, World!"
+ }
+ ],
+ "terminal": true
+ }
+ ],
+ "automatic_https": {
+ "skip": [
+ "localhost"
+ ]
+ }
+ },
+ "srv1": {
+ "listen": [
+ ":9080"
+ ],
+ "routes": [
+ {
+ "match": [
+ {
+ "host": [
+ "localhost"
+ ]
+ }
+ ],
+ "handle": [
+ {
+
+ "handler": "reverse_proxy",
+ "upstreams": [
+ {
+ "dial": "{http.request.header.X-Caddy-Upstream-Dial}"
+ }
+ ]
+ }
+ ],
+ "terminal": true
+ }
+ ],
+ "automatic_https": {
+ "skip": [
+ "localhost"
+ ]
+ }
+ }
+ }
+ }
+ }
+ }
+ `, "json")
+
+ req, err := http.NewRequest(http.MethodGet, "http://localhost:9080", nil)
+ if err != nil {
+ t.Fail()
+ return
+ }
+ req.Header.Set("X-Caddy-Upstream-Dial", "localhost:18080")
+ tester.AssertResponse(req, 200, "Hello, World!")
+}
+
+func TestReverseProxyWithPlaceholderTCPDialAddress(t *testing.T) {
+ tester := caddytest.NewTester(t)
+ tester.InitServer(`
+ {
+ "admin": {
+ "listen": "localhost:2999"
+ },
+ "apps": {
+ "pki": {
+ "certificate_authorities": {
+ "local": {
+ "install_trust": false
+ }
+ }
+ },
+ "http": {
+ "grace_period": 1,
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":18080"
+ ],
+ "routes": [
+ {
+ "match": [
+ {
+ "host": [
+ "localhost"
+ ]
+ }
+ ],
+ "handle": [
+ {
+ "handler": "static_response",
+ "body": "Hello, World!"
+ }
+ ],
+ "terminal": true
+ }
+ ],
+ "automatic_https": {
+ "skip": [
+ "localhost"
+ ]
+ }
+ },
+ "srv1": {
+ "listen": [
+ ":9080"
+ ],
+ "routes": [
+ {
+ "match": [
+ {
+ "host": [
+ "localhost"
+ ]
+ }
+ ],
+ "handle": [
+ {
+
+ "handler": "reverse_proxy",
+ "upstreams": [
+ {
+ "dial": "tcp/{http.request.header.X-Caddy-Upstream-Dial}:18080"
+ }
+ ]
+ }
+ ],
+ "terminal": true
+ }
+ ],
+ "automatic_https": {
+ "skip": [
+ "localhost"
+ ]
+ }
+ }
+ }
+ }
+ }
+ }
+ `, "json")
+
+ req, err := http.NewRequest(http.MethodGet, "http://localhost:9080", nil)
+ if err != nil {
+ t.Fail()
+ return
+ }
+ req.Header.Set("X-Caddy-Upstream-Dial", "localhost")
+ tester.AssertResponse(req, 200, "Hello, World!")
+}
+
+func TestReverseProxyHealthCheck(t *testing.T) {
+ tester := caddytest.NewTester(t)
+ tester.InitServer(`
+ {
+ skip_install_trust
+ admin localhost:2999
+ http_port 9080
+ https_port 9443
+ grace_period 1ns
+ }
+ http://localhost:2020 {
+ respond "Hello, World!"
+ }
+ http://localhost:2021 {
+ respond "ok"
+ }
+ http://localhost:9080 {
+ reverse_proxy {
+ to localhost:2020
+
+ health_uri /health
+ health_port 2021
+ health_interval 10ms
+ health_timeout 100ms
+ health_passes 1
+ health_fails 1
+ }
+ }
+ `, "caddyfile")
+
+ time.Sleep(100 * time.Millisecond) // TODO: for some reason this test seems particularly flaky, getting 503 when it should be 200, unless we wait
+ tester.AssertGetResponse("http://localhost:9080/", 200, "Hello, World!")
+}
+
+func TestReverseProxyHealthCheckUnixSocket(t *testing.T) {
+ if runtime.GOOS == "windows" {
+ t.SkipNow()
+ }
+ tester := caddytest.NewTester(t)
+ f, err := os.CreateTemp("", "*.sock")
+ if err != nil {
+ t.Errorf("failed to create TempFile: %s", err)
+ return
+ }
+ // a hack to get a file name within a valid path to use as socket
+ socketName := f.Name()
+ os.Remove(f.Name())
+
+ server := http.Server{
+ Handler: http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
+ if strings.HasPrefix(req.URL.Path, "/health") {
+ w.Write([]byte("ok"))
+ return
+ }
+ w.Write([]byte("Hello, World!"))
+ }),
+ }
+
+ unixListener, err := net.Listen("unix", socketName)
+ if err != nil {
+ t.Errorf("failed to listen on the socket: %s", err)
+ return
+ }
+ go server.Serve(unixListener)
+ t.Cleanup(func() {
+ server.Close()
+ })
+ runtime.Gosched() // Allow other goroutines to run
+
+ tester.InitServer(fmt.Sprintf(`
+ {
+ skip_install_trust
+ admin localhost:2999
+ http_port 9080
+ https_port 9443
+ grace_period 1ns
+ }
+ http://localhost:9080 {
+ reverse_proxy {
+ to unix/%s
+
+ health_uri /health
+ health_port 2021
+ health_interval 2s
+ health_timeout 5s
+ }
+ }
+ `, socketName), "caddyfile")
+
+ tester.AssertGetResponse("http://localhost:9080/", 200, "Hello, World!")
+}
+
+func TestReverseProxyHealthCheckUnixSocketWithoutPort(t *testing.T) {
+ if runtime.GOOS == "windows" {
+ t.SkipNow()
+ }
+ tester := caddytest.NewTester(t)
+ f, err := os.CreateTemp("", "*.sock")
+ if err != nil {
+ t.Errorf("failed to create TempFile: %s", err)
+ return
+ }
+ // a hack to get a file name within a valid path to use as socket
+ socketName := f.Name()
+ os.Remove(f.Name())
+
+ server := http.Server{
+ Handler: http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
+ if strings.HasPrefix(req.URL.Path, "/health") {
+ w.Write([]byte("ok"))
+ return
+ }
+ w.Write([]byte("Hello, World!"))
+ }),
+ }
+
+ unixListener, err := net.Listen("unix", socketName)
+ if err != nil {
+ t.Errorf("failed to listen on the socket: %s", err)
+ return
+ }
+ go server.Serve(unixListener)
+ t.Cleanup(func() {
+ server.Close()
+ })
+ runtime.Gosched() // Allow other goroutines to run
+
+ tester.InitServer(fmt.Sprintf(`
+ {
+ skip_install_trust
+ admin localhost:2999
+ http_port 9080
+ https_port 9443
+ grace_period 1ns
+ }
+ http://localhost:9080 {
+ reverse_proxy {
+ to unix/%s
+
+ health_uri /health
+ health_interval 2s
+ health_timeout 5s
+ }
+ }
+ `, socketName), "caddyfile")
+
+ tester.AssertGetResponse("http://localhost:9080/", 200, "Hello, World!")
+}
diff --git a/caddytest/integration/sni_test.go b/caddytest/integration/sni_test.go
index 46f7c83e..188f9354 100644
--- a/caddytest/integration/sni_test.go
+++ b/caddytest/integration/sni_test.go
@@ -7,196 +7,202 @@ import (
)
func TestDefaultSNI(t *testing.T) {
-
// arrange
tester := caddytest.NewTester(t)
tester.InitServer(`{
- "apps": {
- "http": {
- "http_port": 9080,
- "https_port": 9443,
- "servers": {
- "srv0": {
- "listen": [
- ":9443"
- ],
- "routes": [
- {
- "handle": [
- {
- "handler": "subroute",
- "routes": [
- {
- "handle": [
- {
- "body": "hello from a.caddy.localhost",
- "handler": "static_response",
- "status_code": 200
- }
- ],
- "match": [
- {
- "path": [
- "/version"
- ]
- }
- ]
- }
- ]
- }
- ],
- "match": [
- {
- "host": [
- "127.0.0.1"
- ]
- }
- ],
- "terminal": true
- }
- ],
- "tls_connection_policies": [
- {
- "certificate_selection": {
- "any_tag": ["cert0"]
- },
- "match": {
- "sni": [
- "127.0.0.1"
- ]
- }
- },
- {
- "default_sni": "*.caddy.localhost"
- }
- ]
- }
- }
- },
- "tls": {
- "certificates": {
- "load_files": [
- {
- "certificate": "/caddy.localhost.crt",
- "key": "/caddy.localhost.key",
- "tags": [
- "cert0"
- ]
- }
- ]
- }
- },
- "pki": {
- "certificate_authorities" : {
- "local" : {
- "install_trust": false
- }
- }
- }
- }
- }
- `, "json")
+ "admin": {
+ "listen": "localhost:2999"
+ },
+ "apps": {
+ "http": {
+ "http_port": 9080,
+ "https_port": 9443,
+ "grace_period": 1,
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":9443"
+ ],
+ "routes": [
+ {
+ "handle": [
+ {
+ "handler": "subroute",
+ "routes": [
+ {
+ "handle": [
+ {
+ "body": "hello from a.caddy.localhost",
+ "handler": "static_response",
+ "status_code": 200
+ }
+ ],
+ "match": [
+ {
+ "path": [
+ "/version"
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ],
+ "match": [
+ {
+ "host": [
+ "127.0.0.1"
+ ]
+ }
+ ],
+ "terminal": true
+ }
+ ],
+ "tls_connection_policies": [
+ {
+ "certificate_selection": {
+ "any_tag": ["cert0"]
+ },
+ "match": {
+ "sni": [
+ "127.0.0.1"
+ ]
+ }
+ },
+ {
+ "default_sni": "*.caddy.localhost"
+ }
+ ]
+ }
+ }
+ },
+ "tls": {
+ "certificates": {
+ "load_files": [
+ {
+ "certificate": "/caddy.localhost.crt",
+ "key": "/caddy.localhost.key",
+ "tags": [
+ "cert0"
+ ]
+ }
+ ]
+ }
+ },
+ "pki": {
+ "certificate_authorities" : {
+ "local" : {
+ "install_trust": false
+ }
+ }
+ }
+ }
+ }
+ `, "json")
// act and assert
// makes a request with no sni
- tester.AssertGetResponse("https://127.0.0.1:9443/version", 200, "hello from a")
+ tester.AssertGetResponse("https://127.0.0.1:9443/version", 200, "hello from a.caddy.localhost")
}
func TestDefaultSNIWithNamedHostAndExplicitIP(t *testing.T) {
-
// arrange
tester := caddytest.NewTester(t)
tester.InitServer(`
- {
- "apps": {
- "http": {
- "http_port": 9080,
- "https_port": 9443,
- "servers": {
- "srv0": {
- "listen": [
- ":9443"
- ],
- "routes": [
- {
- "handle": [
- {
- "handler": "subroute",
- "routes": [
- {
- "handle": [
- {
- "body": "hello from a",
- "handler": "static_response",
- "status_code": 200
- }
- ],
- "match": [
- {
- "path": [
- "/version"
- ]
- }
- ]
- }
- ]
- }
- ],
- "match": [
- {
- "host": [
- "a.caddy.localhost",
- "127.0.0.1"
- ]
- }
- ],
- "terminal": true
- }
- ],
- "tls_connection_policies": [
- {
- "certificate_selection": {
- "any_tag": ["cert0"]
- },
- "default_sni": "a.caddy.localhost",
- "match": {
- "sni": [
- "a.caddy.localhost",
- "127.0.0.1",
- ""
- ]
- }
- },
- {
- "default_sni": "a.caddy.localhost"
- }
- ]
- }
- }
- },
- "tls": {
- "certificates": {
- "load_files": [
- {
- "certificate": "/a.caddy.localhost.crt",
- "key": "/a.caddy.localhost.key",
- "tags": [
- "cert0"
- ]
- }
- ]
- }
- },
- "pki": {
- "certificate_authorities" : {
- "local" : {
- "install_trust": false
- }
- }
- }
- }
- }
- `, "json")
+ {
+ "admin": {
+ "listen": "localhost:2999"
+ },
+ "apps": {
+ "http": {
+ "http_port": 9080,
+ "https_port": 9443,
+ "grace_period": 1,
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":9443"
+ ],
+ "routes": [
+ {
+ "handle": [
+ {
+ "handler": "subroute",
+ "routes": [
+ {
+ "handle": [
+ {
+ "body": "hello from a",
+ "handler": "static_response",
+ "status_code": 200
+ }
+ ],
+ "match": [
+ {
+ "path": [
+ "/version"
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ],
+ "match": [
+ {
+ "host": [
+ "a.caddy.localhost",
+ "127.0.0.1"
+ ]
+ }
+ ],
+ "terminal": true
+ }
+ ],
+ "tls_connection_policies": [
+ {
+ "certificate_selection": {
+ "any_tag": ["cert0"]
+ },
+ "default_sni": "a.caddy.localhost",
+ "match": {
+ "sni": [
+ "a.caddy.localhost",
+ "127.0.0.1",
+ ""
+ ]
+ }
+ },
+ {
+ "default_sni": "a.caddy.localhost"
+ }
+ ]
+ }
+ }
+ },
+ "tls": {
+ "certificates": {
+ "load_files": [
+ {
+ "certificate": "/a.caddy.localhost.crt",
+ "key": "/a.caddy.localhost.key",
+ "tags": [
+ "cert0"
+ ]
+ }
+ ]
+ }
+ },
+ "pki": {
+ "certificate_authorities" : {
+ "local" : {
+ "install_trust": false
+ }
+ }
+ }
+ }
+ }
+ `, "json")
// act and assert
// makes a request with no sni
@@ -204,81 +210,85 @@ func TestDefaultSNIWithNamedHostAndExplicitIP(t *testing.T) {
}
func TestDefaultSNIWithPortMappingOnly(t *testing.T) {
-
// arrange
tester := caddytest.NewTester(t)
tester.InitServer(`
- {
- "apps": {
- "http": {
- "http_port": 9080,
- "https_port": 9443,
- "servers": {
- "srv0": {
- "listen": [
- ":9443"
- ],
- "routes": [
- {
- "handle": [
- {
- "body": "hello from a.caddy.localhost",
- "handler": "static_response",
- "status_code": 200
- }
- ],
- "match": [
- {
- "path": [
- "/version"
- ]
- }
- ]
- }
- ],
- "tls_connection_policies": [
- {
- "certificate_selection": {
- "any_tag": ["cert0"]
- },
- "default_sni": "a.caddy.localhost"
- }
- ]
- }
- }
- },
- "tls": {
- "certificates": {
- "load_files": [
- {
- "certificate": "/a.caddy.localhost.crt",
- "key": "/a.caddy.localhost.key",
- "tags": [
- "cert0"
- ]
- }
- ]
- }
- },
- "pki": {
- "certificate_authorities" : {
- "local" : {
- "install_trust": false
- }
- }
- }
- }
- }
- `, "json")
+ {
+ "admin": {
+ "listen": "localhost:2999"
+ },
+ "apps": {
+ "http": {
+ "http_port": 9080,
+ "https_port": 9443,
+ "grace_period": 1,
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":9443"
+ ],
+ "routes": [
+ {
+ "handle": [
+ {
+ "body": "hello from a.caddy.localhost",
+ "handler": "static_response",
+ "status_code": 200
+ }
+ ],
+ "match": [
+ {
+ "path": [
+ "/version"
+ ]
+ }
+ ]
+ }
+ ],
+ "tls_connection_policies": [
+ {
+ "certificate_selection": {
+ "any_tag": ["cert0"]
+ },
+ "default_sni": "a.caddy.localhost"
+ }
+ ]
+ }
+ }
+ },
+ "tls": {
+ "certificates": {
+ "load_files": [
+ {
+ "certificate": "/a.caddy.localhost.crt",
+ "key": "/a.caddy.localhost.key",
+ "tags": [
+ "cert0"
+ ]
+ }
+ ]
+ }
+ },
+ "pki": {
+ "certificate_authorities" : {
+ "local" : {
+ "install_trust": false
+ }
+ }
+ }
+ }
+ }
+ `, "json")
// act and assert
// makes a request with no sni
- tester.AssertGetResponse("https://127.0.0.1:9443/version", 200, "hello from a")
+ tester.AssertGetResponse("https://127.0.0.1:9443/version", 200, "hello from a.caddy.localhost")
}
func TestHttpOnlyOnDomainWithSNI(t *testing.T) {
caddytest.AssertAdapt(t, `
{
+ skip_install_trust
default_sni a.caddy.localhost
}
:80 {
@@ -314,6 +324,13 @@ func TestHttpOnlyOnDomainWithSNI(t *testing.T) {
]
}
}
+ },
+ "pki": {
+ "certificate_authorities": {
+ "local": {
+ "install_trust": false
+ }
+ }
}
}
}`)
diff --git a/caddytest/integration/stream_test.go b/caddytest/integration/stream_test.go
new file mode 100644
index 00000000..d2f2fd79
--- /dev/null
+++ b/caddytest/integration/stream_test.go
@@ -0,0 +1,439 @@
+package integration
+
+import (
+ "compress/gzip"
+ "context"
+ "crypto/rand"
+ "fmt"
+ "io"
+ "net/http"
+ "net/http/httputil"
+ "net/url"
+ "strings"
+ "testing"
+ "time"
+
+ "github.com/caddyserver/caddy/v2/caddytest"
+ "golang.org/x/net/http2"
+ "golang.org/x/net/http2/h2c"
+)
+
+// (see https://github.com/caddyserver/caddy/issues/3556 for use case)
+func TestH2ToH2CStream(t *testing.T) {
+ tester := caddytest.NewTester(t)
+ tester.InitServer(`
+ {
+ "admin": {
+ "listen": "localhost:2999"
+ },
+ "apps": {
+ "http": {
+ "http_port": 9080,
+ "https_port": 9443,
+ "grace_period": 1,
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":9443"
+ ],
+ "routes": [
+ {
+ "handle": [
+ {
+ "handler": "reverse_proxy",
+ "transport": {
+ "protocol": "http",
+ "compression": false,
+ "versions": [
+ "h2c",
+ "2"
+ ]
+ },
+ "upstreams": [
+ {
+ "dial": "localhost:54321"
+ }
+ ]
+ }
+ ],
+ "match": [
+ {
+ "path": [
+ "/tov2ray"
+ ]
+ }
+ ]
+ }
+ ],
+ "tls_connection_policies": [
+ {
+ "certificate_selection": {
+ "any_tag": ["cert0"]
+ },
+ "default_sni": "a.caddy.localhost"
+ }
+ ]
+ }
+ }
+ },
+ "tls": {
+ "certificates": {
+ "load_files": [
+ {
+ "certificate": "/a.caddy.localhost.crt",
+ "key": "/a.caddy.localhost.key",
+ "tags": [
+ "cert0"
+ ]
+ }
+ ]
+ }
+ },
+ "pki": {
+ "certificate_authorities" : {
+ "local" : {
+ "install_trust": false
+ }
+ }
+ }
+ }
+ }
+ `, "json")
+
+ expectedBody := "some data to be echoed"
+ // start the server
+ server := testH2ToH2CStreamServeH2C(t)
+ go server.ListenAndServe()
+ defer func() {
+ ctx, cancel := context.WithTimeout(context.Background(), time.Nanosecond)
+ defer cancel()
+ server.Shutdown(ctx)
+ }()
+
+ r, w := io.Pipe()
+ req := &http.Request{
+ Method: "PUT",
+ Body: io.NopCloser(r),
+ URL: &url.URL{
+ Scheme: "https",
+ Host: "127.0.0.1:9443",
+ Path: "/tov2ray",
+ },
+ Proto: "HTTP/2",
+ ProtoMajor: 2,
+ ProtoMinor: 0,
+ Header: make(http.Header),
+ }
+ // Disable any compression method from server.
+ req.Header.Set("Accept-Encoding", "identity")
+
+ resp := tester.AssertResponseCode(req, http.StatusOK)
+ if resp.StatusCode != http.StatusOK {
+ return
+ }
+ go func() {
+ fmt.Fprint(w, expectedBody)
+ w.Close()
+ }()
+
+ defer resp.Body.Close()
+ bytes, err := io.ReadAll(resp.Body)
+ if err != nil {
+ t.Fatalf("unable to read the response body %s", err)
+ }
+
+ body := string(bytes)
+
+ if !strings.Contains(body, expectedBody) {
+ t.Errorf("requesting \"%s\" expected response body \"%s\" but got \"%s\"", req.RequestURI, expectedBody, body)
+ }
+}
+
+func testH2ToH2CStreamServeH2C(t *testing.T) *http.Server {
+ h2s := &http2.Server{}
+ handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ rstring, err := httputil.DumpRequest(r, false)
+ if err == nil {
+ t.Logf("h2c server received req: %s", rstring)
+ }
+ // We only accept HTTP/2!
+ if r.ProtoMajor != 2 {
+ t.Error("Not a HTTP/2 request, rejected!")
+ w.WriteHeader(http.StatusInternalServerError)
+ return
+ }
+
+ if r.Host != "127.0.0.1:9443" {
+ t.Errorf("r.Host doesn't match, %v!", r.Host)
+ w.WriteHeader(http.StatusNotFound)
+ return
+ }
+
+ if !strings.HasPrefix(r.URL.Path, "/tov2ray") {
+ w.WriteHeader(http.StatusNotFound)
+ return
+ }
+
+ w.Header().Set("Cache-Control", "no-store")
+ w.WriteHeader(200)
+ http.NewResponseController(w).Flush()
+
+ buf := make([]byte, 4*1024)
+
+ for {
+ n, err := r.Body.Read(buf)
+ if n > 0 {
+ w.Write(buf[:n])
+ }
+
+ if err != nil {
+ if err == io.EOF {
+ r.Body.Close()
+ }
+ break
+ }
+ }
+ })
+
+ server := &http.Server{
+ Addr: "127.0.0.1:54321",
+ Handler: h2c.NewHandler(handler, h2s),
+ }
+ return server
+}
+
+// (see https://github.com/caddyserver/caddy/issues/3606 for use case)
+func TestH2ToH1ChunkedResponse(t *testing.T) {
+ tester := caddytest.NewTester(t)
+ tester.InitServer(`
+{
+ "admin": {
+ "listen": "localhost:2999"
+ },
+ "logging": {
+ "logs": {
+ "default": {
+ "level": "DEBUG"
+ }
+ }
+ },
+ "apps": {
+ "http": {
+ "http_port": 9080,
+ "https_port": 9443,
+ "grace_period": 1,
+ "servers": {
+ "srv0": {
+ "listen": [
+ ":9443"
+ ],
+ "routes": [
+ {
+ "handle": [
+ {
+ "handler": "subroute",
+ "routes": [
+ {
+ "handle": [
+ {
+ "encodings": {
+ "gzip": {}
+ },
+ "handler": "encode"
+ }
+ ]
+ },
+ {
+ "handle": [
+ {
+ "handler": "reverse_proxy",
+ "upstreams": [
+ {
+ "dial": "localhost:54321"
+ }
+ ]
+ }
+ ],
+ "match": [
+ {
+ "path": [
+ "/tov2ray"
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ],
+ "terminal": true
+ }
+ ],
+ "tls_connection_policies": [
+ {
+ "certificate_selection": {
+ "any_tag": [
+ "cert0"
+ ]
+ },
+ "default_sni": "a.caddy.localhost"
+ }
+ ]
+ }
+ }
+ },
+ "tls": {
+ "certificates": {
+ "load_files": [
+ {
+ "certificate": "/a.caddy.localhost.crt",
+ "key": "/a.caddy.localhost.key",
+ "tags": [
+ "cert0"
+ ]
+ }
+ ]
+ }
+ },
+ "pki": {
+ "certificate_authorities": {
+ "local": {
+ "install_trust": false
+ }
+ }
+ }
+ }
+}
+ `, "json")
+
+ // need a large body here to trigger caddy's compression, larger than gzip.miniLength
+ expectedBody, err := GenerateRandomString(1024)
+ if err != nil {
+ t.Fatalf("generate expected body failed, err: %s", err)
+ }
+
+ // start the server
+ server := testH2ToH1ChunkedResponseServeH1(t)
+ go server.ListenAndServe()
+ defer func() {
+ ctx, cancel := context.WithTimeout(context.Background(), time.Nanosecond)
+ defer cancel()
+ server.Shutdown(ctx)
+ }()
+
+ r, w := io.Pipe()
+ req := &http.Request{
+ Method: "PUT",
+ Body: io.NopCloser(r),
+ URL: &url.URL{
+ Scheme: "https",
+ Host: "127.0.0.1:9443",
+ Path: "/tov2ray",
+ },
+ Proto: "HTTP/2",
+ ProtoMajor: 2,
+ ProtoMinor: 0,
+ Header: make(http.Header),
+ }
+ // underlying transport will automatically add gzip
+ // req.Header.Set("Accept-Encoding", "gzip")
+ go func() {
+ fmt.Fprint(w, expectedBody)
+ w.Close()
+ }()
+ resp := tester.AssertResponseCode(req, http.StatusOK)
+ if resp.StatusCode != http.StatusOK {
+ return
+ }
+
+ defer resp.Body.Close()
+ bytes, err := io.ReadAll(resp.Body)
+ if err != nil {
+ t.Fatalf("unable to read the response body %s", err)
+ }
+
+ body := string(bytes)
+
+ if body != expectedBody {
+ t.Errorf("requesting \"%s\" expected response body \"%s\" but got \"%s\"", req.RequestURI, expectedBody, body)
+ }
+}
+
+func testH2ToH1ChunkedResponseServeH1(t *testing.T) *http.Server {
+ handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ if r.Host != "127.0.0.1:9443" {
+ t.Errorf("r.Host doesn't match, %v!", r.Host)
+ w.WriteHeader(http.StatusNotFound)
+ return
+ }
+
+ if !strings.HasPrefix(r.URL.Path, "/tov2ray") {
+ w.WriteHeader(http.StatusNotFound)
+ return
+ }
+
+ defer r.Body.Close()
+ bytes, err := io.ReadAll(r.Body)
+ if err != nil {
+ t.Fatalf("unable to read the response body %s", err)
+ }
+
+ n := len(bytes)
+
+ var writer io.Writer
+ if strings.Contains(r.Header.Get("Accept-Encoding"), "gzip") {
+ gw, err := gzip.NewWriterLevel(w, 5)
+ if err != nil {
+ t.Error("can't return gzip data")
+ w.WriteHeader(http.StatusInternalServerError)
+ return
+ }
+ defer gw.Close()
+ writer = gw
+ w.Header().Set("Content-Encoding", "gzip")
+ w.Header().Del("Content-Length")
+ w.WriteHeader(200)
+ } else {
+ writer = w
+ }
+ if n > 0 {
+ writer.Write(bytes[:])
+ }
+ })
+
+ server := &http.Server{
+ Addr: "127.0.0.1:54321",
+ Handler: handler,
+ }
+ return server
+}
+
+// GenerateRandomBytes returns securely generated random bytes.
+// It will return an error if the system's secure random
+// number generator fails to function correctly, in which
+// case the caller should not continue.
+func GenerateRandomBytes(n int) ([]byte, error) {
+ b := make([]byte, n)
+ _, err := rand.Read(b)
+ // Note that err == nil only if we read len(b) bytes.
+ if err != nil {
+ return nil, err
+ }
+
+ return b, nil
+}
+
+// GenerateRandomString returns a securely generated random string.
+// It will return an error if the system's secure random
+// number generator fails to function correctly, in which
+// case the caller should not continue.
+func GenerateRandomString(n int) (string, error) {
+ const letters = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz-"
+ bytes, err := GenerateRandomBytes(n)
+ if err != nil {
+ return "", err
+ }
+ for i, b := range bytes {
+ bytes[i] = letters[b%byte(len(letters))]
+ }
+ return string(bytes), nil
+}
diff --git a/caddytest/integration/testdata/foo.txt b/caddytest/integration/testdata/foo.txt
new file mode 100644
index 00000000..19102815
--- /dev/null
+++ b/caddytest/integration/testdata/foo.txt
@@ -0,0 +1 @@
+foo
\ No newline at end of file
diff --git a/caddytest/integration/testdata/foo_with_multiple_trailing_newlines.txt b/caddytest/integration/testdata/foo_with_multiple_trailing_newlines.txt
new file mode 100644
index 00000000..75d7bfb8
--- /dev/null
+++ b/caddytest/integration/testdata/foo_with_multiple_trailing_newlines.txt
@@ -0,0 +1,2 @@
+foo
+
diff --git a/caddytest/integration/testdata/foo_with_trailing_newline.txt b/caddytest/integration/testdata/foo_with_trailing_newline.txt
new file mode 100644
index 00000000..257cc564
--- /dev/null
+++ b/caddytest/integration/testdata/foo_with_trailing_newline.txt
@@ -0,0 +1 @@
+foo
diff --git a/caddytest/integration/testdata/import_respond.txt b/caddytest/integration/testdata/import_respond.txt
new file mode 100644
index 00000000..45130885
--- /dev/null
+++ b/caddytest/integration/testdata/import_respond.txt
@@ -0,0 +1 @@
+respond "'I am {args[0]}', hears {args[1]}"
\ No newline at end of file
diff --git a/caddytest/integration/testdata/index.localhost.html b/caddytest/integration/testdata/index.localhost.html
new file mode 100644
index 00000000..e69de29b
diff --git a/caddytest/leafcert.pem b/caddytest/leafcert.pem
new file mode 100644
index 00000000..03febfd3
--- /dev/null
+++ b/caddytest/leafcert.pem
@@ -0,0 +1,15 @@
+-----BEGIN CERTIFICATE-----
+MIICUTCCAfugAwIBAgIBADANBgkqhkiG9w0BAQQFADBXMQswCQYDVQQGEwJDTjEL
+MAkGA1UECBMCUE4xCzAJBgNVBAcTAkNOMQswCQYDVQQKEwJPTjELMAkGA1UECxMC
+VU4xFDASBgNVBAMTC0hlcm9uZyBZYW5nMB4XDTA1MDcxNTIxMTk0N1oXDTA1MDgx
+NDIxMTk0N1owVzELMAkGA1UEBhMCQ04xCzAJBgNVBAgTAlBOMQswCQYDVQQHEwJD
+TjELMAkGA1UEChMCT04xCzAJBgNVBAsTAlVOMRQwEgYDVQQDEwtIZXJvbmcgWWFu
+ZzBcMA0GCSqGSIb3DQEBAQUAA0sAMEgCQQCp5hnG7ogBhtlynpOS21cBewKE/B7j
+V14qeyslnr26xZUsSVko36ZnhiaO/zbMOoRcKK9vEcgMtcLFuQTWDl3RAgMBAAGj
+gbEwga4wHQYDVR0OBBYEFFXI70krXeQDxZgbaCQoR4jUDncEMH8GA1UdIwR4MHaA
+FFXI70krXeQDxZgbaCQoR4jUDncEoVukWTBXMQswCQYDVQQGEwJDTjELMAkGA1UE
+CBMCUE4xCzAJBgNVBAcTAkNOMQswCQYDVQQKEwJPTjELMAkGA1UECxMCVU4xFDAS
+BgNVBAMTC0hlcm9uZyBZYW5nggEAMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEE
+BQADQQA/ugzBrjjK9jcWnDVfGHlk3icNRq0oV7Ri32z/+HQX67aRfgZu7KWdI+Ju
+Wm7DCfrPNGVwFWUQOmsPue9rZBgO
+-----END CERTIFICATE-----
diff --git a/cmd/caddy/main.go b/cmd/caddy/main.go
index 2383546a..f1aeda0a 100644
--- a/cmd/caddy/main.go
+++ b/cmd/caddy/main.go
@@ -1,3 +1,8 @@
+// The below line is required to enable post-quantum key agreement in Go 1.23
+// by default without insisting on setting a minimum version of 1.23 in go.mod.
+// See https://github.com/caddyserver/caddy/issues/6540#issuecomment-2313094905
+//go:debug tlskyber=1
+
// Copyright 2015 Matthew Holt and The Caddy Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
@@ -19,11 +24,13 @@
// There is no need to modify the Caddy source code to customize your
// builds. You can easily build a custom Caddy with these simple steps:
//
-// 1. Copy this file (main.go) into a new folder
-// 2. Edit the imports below to include the modules you want plugged in
-// 3. Run `go mod init caddy`
-// 4. Run `go install` or `go build` - you now have a custom binary!
+// 1. Copy this file (main.go) into a new folder
+// 2. Edit the imports below to include the modules you want plugged in
+// 3. Run `go mod init caddy`
+// 4. Run `go install` or `go build` - you now have a custom binary!
//
+// Or you can use xcaddy which does it all for you as a command:
+// https://github.com/caddyserver/xcaddy
package main
import (
diff --git a/cmd/caddy/setcap.sh b/cmd/caddy/setcap.sh
new file mode 100755
index 00000000..39aea2d6
--- /dev/null
+++ b/cmd/caddy/setcap.sh
@@ -0,0 +1,17 @@
+#!/bin/sh
+
+# USAGE:
+# go run -exec ./setcap.sh main.go
+#
+# (Example: `go run -exec ./setcap.sh main.go run --config caddy.json`)
+#
+# For some reason this does not work on my Arch system, so if you find that's
+# the case, you can instead do:
+#
+# go build && ./setcap.sh ./caddy
+#
+# but this will leave the ./caddy binary laying around.
+#
+
+sudo setcap cap_net_bind_service=+ep "$1"
+"$@"
diff --git a/cmd/cobra.go b/cmd/cobra.go
new file mode 100644
index 00000000..9ecb389e
--- /dev/null
+++ b/cmd/cobra.go
@@ -0,0 +1,161 @@
+package caddycmd
+
+import (
+ "fmt"
+
+ "github.com/spf13/cobra"
+
+ "github.com/caddyserver/caddy/v2"
+)
+
+var defaultFactory = newRootCommandFactory(func() *cobra.Command {
+ return &cobra.Command{
+ Use: "caddy",
+ Long: `Caddy is an extensible server platform written in Go.
+
+At its core, Caddy merely manages configuration. Modules are plugged
+in statically at compile-time to provide useful functionality. Caddy's
+standard distribution includes common modules to serve HTTP, TLS,
+and PKI applications, including the automation of certificates.
+
+To run Caddy, use:
+
+ - 'caddy run' to run Caddy in the foreground (recommended).
+ - 'caddy start' to start Caddy in the background; only do this
+ if you will be keeping the terminal window open until you run
+ 'caddy stop' to close the server.
+
+When Caddy is started, it opens a locally-bound administrative socket
+to which configuration can be POSTed via a restful HTTP API (see
+https://caddyserver.com/docs/api).
+
+Caddy's native configuration format is JSON. However, config adapters
+can be used to convert other config formats to JSON when Caddy receives
+its configuration. The Caddyfile is a built-in config adapter that is
+popular for hand-written configurations due to its straightforward
+syntax (see https://caddyserver.com/docs/caddyfile). Many third-party
+adapters are available (see https://caddyserver.com/docs/config-adapters).
+Use 'caddy adapt' to see how a config translates to JSON.
+
+For convenience, the CLI can act as an HTTP client to give Caddy its
+initial configuration for you. If a file named Caddyfile is in the
+current working directory, it will do this automatically. Otherwise,
+you can use the --config flag to specify the path to a config file.
+
+Some special-purpose subcommands build and load a configuration file
+for you directly from command line input; for example:
+
+ - caddy file-server
+ - caddy reverse-proxy
+ - caddy respond
+
+These commands disable the administration endpoint because their
+configuration is specified solely on the command line.
+
+In general, the most common way to run Caddy is simply:
+
+ $ caddy run
+
+Or, with a configuration file:
+
+ $ caddy run --config caddy.json
+
+If running interactively in a terminal, running Caddy in the
+background may be more convenient:
+
+ $ caddy start
+ ...
+ $ caddy stop
+
+This allows you to run other commands while Caddy stays running.
+Be sure to stop Caddy before you close the terminal!
+
+Depending on the system, Caddy may need permission to bind to low
+ports. One way to do this on Linux is to use setcap:
+
+ $ sudo setcap cap_net_bind_service=+ep $(which caddy)
+
+Remember to run that command again after replacing the binary.
+
+See the Caddy website for tutorials, configuration structure,
+syntax, and module documentation: https://caddyserver.com/docs/
+
+Custom Caddy builds are available on the Caddy download page at:
+https://caddyserver.com/download
+
+The xcaddy command can be used to build Caddy from source with or
+without additional plugins: https://github.com/caddyserver/xcaddy
+
+Where possible, Caddy should be installed using officially-supported
+package installers: https://caddyserver.com/docs/install
+
+Instructions for running Caddy in production are also available:
+https://caddyserver.com/docs/running
+`,
+ Example: ` $ caddy run
+ $ caddy run --config caddy.json
+ $ caddy reload --config caddy.json
+ $ caddy stop`,
+
+ // kind of annoying to have all the help text printed out if
+ // caddy has an error provisioning its modules, for instance...
+ SilenceUsage: true,
+ Version: onlyVersionText(),
+ }
+})
+
+const fullDocsFooter = `Full documentation is available at:
+https://caddyserver.com/docs/command-line`
+
+func init() {
+ defaultFactory.Use(func(rootCmd *cobra.Command) {
+ rootCmd.SetVersionTemplate("{{.Version}}\n")
+ rootCmd.SetHelpTemplate(rootCmd.HelpTemplate() + "\n" + fullDocsFooter + "\n")
+ })
+}
+
+func onlyVersionText() string {
+ _, f := caddy.Version()
+ return f
+}
+
+func caddyCmdToCobra(caddyCmd Command) *cobra.Command {
+ cmd := &cobra.Command{
+ Use: caddyCmd.Name + " " + caddyCmd.Usage,
+ Short: caddyCmd.Short,
+ Long: caddyCmd.Long,
+ }
+ if caddyCmd.CobraFunc != nil {
+ caddyCmd.CobraFunc(cmd)
+ } else {
+ cmd.RunE = WrapCommandFuncForCobra(caddyCmd.Func)
+ cmd.Flags().AddGoFlagSet(caddyCmd.Flags)
+ }
+ return cmd
+}
+
+// WrapCommandFuncForCobra wraps a Caddy CommandFunc for use
+// in a cobra command's RunE field.
+func WrapCommandFuncForCobra(f CommandFunc) func(cmd *cobra.Command, _ []string) error {
+ return func(cmd *cobra.Command, _ []string) error {
+ status, err := f(Flags{cmd.Flags()})
+ if status > 1 {
+ cmd.SilenceErrors = true
+ return &exitError{ExitCode: status, Err: err}
+ }
+ return err
+ }
+}
+
+// exitError carries the exit code from CommandFunc to Main()
+type exitError struct {
+ ExitCode int
+ Err error
+}
+
+func (e *exitError) Error() string {
+ if e.Err == nil {
+ return fmt.Sprintf("exiting with status %d", e.ExitCode)
+ }
+ return e.Err.Error()
+}
diff --git a/cmd/commandfactory.go b/cmd/commandfactory.go
new file mode 100644
index 00000000..ac571a21
--- /dev/null
+++ b/cmd/commandfactory.go
@@ -0,0 +1,28 @@
+package caddycmd
+
+import (
+ "github.com/spf13/cobra"
+)
+
+type rootCommandFactory struct {
+ constructor func() *cobra.Command
+ options []func(*cobra.Command)
+}
+
+func newRootCommandFactory(fn func() *cobra.Command) *rootCommandFactory {
+ return &rootCommandFactory{
+ constructor: fn,
+ }
+}
+
+func (f *rootCommandFactory) Use(fn func(cmd *cobra.Command)) {
+ f.options = append(f.options, fn)
+}
+
+func (f *rootCommandFactory) Build() *cobra.Command {
+ o := f.constructor()
+ for _, v := range f.options {
+ v(o)
+ }
+ return o
+}
diff --git a/cmd/commandfuncs.go b/cmd/commandfuncs.go
index efdcfdca..2adf95bb 100644
--- a/cmd/commandfuncs.go
+++ b/cmd/commandfuncs.go
@@ -16,32 +16,44 @@ package caddycmd
import (
"bytes"
+ "context"
"crypto/rand"
"encoding/json"
+ "errors"
"fmt"
"io"
- "io/ioutil"
+ "io/fs"
"log"
"net"
"net/http"
"os"
"os/exec"
- "reflect"
"runtime"
"runtime/debug"
- "sort"
"strings"
+ "github.com/aryann/difflib"
+ "go.uber.org/zap"
+
"github.com/caddyserver/caddy/v2"
"github.com/caddyserver/caddy/v2/caddyconfig"
"github.com/caddyserver/caddy/v2/caddyconfig/caddyfile"
- "go.uber.org/zap"
+ "github.com/caddyserver/caddy/v2/internal"
)
func cmdStart(fl Flags) (int, error) {
- startCmdConfigFlag := fl.String("config")
- startCmdConfigAdapterFlag := fl.String("adapter")
- startCmdWatchFlag := fl.Bool("watch")
+ configFlag := fl.String("config")
+ configAdapterFlag := fl.String("adapter")
+ pidfileFlag := fl.String("pidfile")
+ watchFlag := fl.Bool("watch")
+
+ var err error
+ var envfileFlag []string
+ envfileFlag, err = fl.GetStringSlice("envfile")
+ if err != nil {
+ return caddy.ExitCodeFailedStartup,
+ fmt.Errorf("reading envfile flag: %v", err)
+ }
// open a listener to which the child process will connect when
// it is ready to confirm that it has successfully started
@@ -62,16 +74,27 @@ func cmdStart(fl Flags) (int, error) {
// sure by giving it some random bytes and having it echo
// them back to us)
cmd := exec.Command(os.Args[0], "run", "--pingback", ln.Addr().String())
- if startCmdConfigFlag != "" {
- cmd.Args = append(cmd.Args, "--config", startCmdConfigFlag)
+ // we should be able to run caddy in relative paths
+ if errors.Is(cmd.Err, exec.ErrDot) {
+ cmd.Err = nil
}
- if startCmdConfigAdapterFlag != "" {
- cmd.Args = append(cmd.Args, "--adapter", startCmdConfigAdapterFlag)
+ if configFlag != "" {
+ cmd.Args = append(cmd.Args, "--config", configFlag)
}
- if startCmdWatchFlag {
+
+ for _, envfile := range envfileFlag {
+ cmd.Args = append(cmd.Args, "--envfile", envfile)
+ }
+ if configAdapterFlag != "" {
+ cmd.Args = append(cmd.Args, "--adapter", configAdapterFlag)
+ }
+ if watchFlag {
cmd.Args = append(cmd.Args, "--watch")
}
- stdinpipe, err := cmd.StdinPipe()
+ if pidfileFlag != "" {
+ cmd.Args = append(cmd.Args, "--pidfile", pidfileFlag)
+ }
+ stdinPipe, err := cmd.StdinPipe()
if err != nil {
return caddy.ExitCodeFailedStartup,
fmt.Errorf("creating stdin pipe: %v", err)
@@ -83,7 +106,8 @@ func cmdStart(fl Flags) (int, error) {
expect := make([]byte, 32)
_, err = rand.Read(expect)
if err != nil {
- return caddy.ExitCodeFailedStartup, fmt.Errorf("generating random confirmation bytes: %v", err)
+ return caddy.ExitCodeFailedStartup,
+ fmt.Errorf("generating random confirmation bytes: %v", err)
}
// begin writing the confirmation bytes to the child's
@@ -91,14 +115,15 @@ func cmdStart(fl Flags) (int, error) {
// started yet, and writing synchronously would result
// in a deadlock
go func() {
- stdinpipe.Write(expect)
- stdinpipe.Close()
+ _, _ = stdinPipe.Write(expect)
+ stdinPipe.Close()
}()
// start the process
err = cmd.Start()
if err != nil {
- return caddy.ExitCodeFailedStartup, fmt.Errorf("starting caddy process: %v", err)
+ return caddy.ExitCodeFailedStartup,
+ fmt.Errorf("starting caddy process: %v", err)
}
// there are two ways we know we're done: either
@@ -111,7 +136,7 @@ func cmdStart(fl Flags) (int, error) {
for {
conn, err := ln.Accept()
if err != nil {
- if !strings.Contains(err.Error(), "use of closed network connection") {
+ if !errors.Is(err, net.ErrClosed) {
log.Println(err)
}
break
@@ -144,34 +169,39 @@ func cmdStart(fl Flags) (int, error) {
}
func cmdRun(fl Flags) (int, error) {
- runCmdConfigFlag := fl.String("config")
- runCmdConfigAdapterFlag := fl.String("adapter")
- runCmdResumeFlag := fl.Bool("resume")
- runCmdPrintEnvFlag := fl.Bool("environ")
- runCmdWatchFlag := fl.Bool("watch")
- runCmdPingbackFlag := fl.String("pingback")
+ caddy.TrapSignals()
+
+ configFlag := fl.String("config")
+ configAdapterFlag := fl.String("adapter")
+ resumeFlag := fl.Bool("resume")
+ printEnvFlag := fl.Bool("environ")
+ watchFlag := fl.Bool("watch")
+ pidfileFlag := fl.String("pidfile")
+ pingbackFlag := fl.String("pingback")
+
+ // load all additional envs as soon as possible
+ err := handleEnvFileFlag(fl)
+ if err != nil {
+ return caddy.ExitCodeFailedStartup, err
+ }
// if we are supposed to print the environment, do that first
- if runCmdPrintEnvFlag {
+ if printEnvFlag {
printEnvironment()
}
- // TODO: This is TEMPORARY, until the RCs
- moveStorage()
-
// load the config, depending on flags
var config []byte
- var err error
- if runCmdResumeFlag {
- config, err = ioutil.ReadFile(caddy.ConfigAutosavePath)
- if os.IsNotExist(err) {
+ if resumeFlag {
+ config, err = os.ReadFile(caddy.ConfigAutosavePath)
+ if errors.Is(err, fs.ErrNotExist) {
// not a bad error; just can't resume if autosave file doesn't exist
caddy.Log().Info("no autosave file exists", zap.String("autosave_file", caddy.ConfigAutosavePath))
- runCmdResumeFlag = false
+ resumeFlag = false
} else if err != nil {
return caddy.ExitCodeFailedStartup, err
} else {
- if runCmdConfigFlag == "" {
+ if configFlag == "" {
caddy.Log().Info("resuming from last configuration",
zap.String("autosave_file", caddy.ConfigAutosavePath))
} else {
@@ -184,13 +214,23 @@ func cmdRun(fl Flags) (int, error) {
}
// we don't use 'else' here since this value might have been changed in 'if' block; i.e. not mutually exclusive
var configFile string
- if !runCmdResumeFlag {
- config, configFile, err = loadConfig(runCmdConfigFlag, runCmdConfigAdapterFlag)
+ if !resumeFlag {
+ config, configFile, err = LoadConfig(configFlag, configAdapterFlag)
if err != nil {
return caddy.ExitCodeFailedStartup, err
}
}
+ // create pidfile now, in case loading config takes a while (issue #5477)
+ if pidfileFlag != "" {
+ err := caddy.PIDFile(pidfileFlag)
+ if err != nil {
+ caddy.Log().Error("unable to write PID file",
+ zap.String("pidfile", pidfileFlag),
+ zap.Error(err))
+ }
+ }
+
// run the initial config
err = caddy.Load(config, true)
if err != nil {
@@ -200,13 +240,13 @@ func cmdRun(fl Flags) (int, error) {
// if we are to report to another process the successful start
// of the server, do so now by echoing back contents of stdin
- if runCmdPingbackFlag != "" {
- confirmationBytes, err := ioutil.ReadAll(os.Stdin)
+ if pingbackFlag != "" {
+ confirmationBytes, err := io.ReadAll(os.Stdin)
if err != nil {
return caddy.ExitCodeFailedStartup,
fmt.Errorf("reading confirmation bytes from stdin: %v", err)
}
- conn, err := net.Dial("tcp", runCmdPingbackFlag)
+ conn, err := net.Dial("tcp", pingbackFlag)
if err != nil {
return caddy.ExitCodeFailedStartup,
fmt.Errorf("dialing confirmation address: %v", err)
@@ -215,14 +255,14 @@ func cmdRun(fl Flags) (int, error) {
_, err = conn.Write(confirmationBytes)
if err != nil {
return caddy.ExitCodeFailedStartup,
- fmt.Errorf("writing confirmation bytes to %s: %v", runCmdPingbackFlag, err)
+ fmt.Errorf("writing confirmation bytes to %s: %v", pingbackFlag, err)
}
}
// if enabled, reload config file automatically on changes
// (this better only be used in dev!)
- if runCmdWatchFlag {
- go watchConfigFile(configFile, runCmdConfigAdapterFlag)
+ if watchFlag {
+ go watchConfigFile(configFile, configAdapterFlag)
}
// warn if the environment does not provide enough information about the disk
@@ -248,39 +288,33 @@ func cmdRun(fl Flags) (int, error) {
}
func cmdStop(fl Flags) (int, error) {
- stopCmdAddrFlag := fl.String("address")
+ addressFlag := fl.String("address")
+ configFlag := fl.String("config")
+ configAdapterFlag := fl.String("adapter")
- adminAddr := caddy.DefaultAdminListen
- if stopCmdAddrFlag != "" {
- adminAddr = stopCmdAddrFlag
- }
- stopEndpoint := fmt.Sprintf("http://%s/stop", adminAddr)
-
- req, err := http.NewRequest(http.MethodPost, stopEndpoint, nil)
+ adminAddr, err := DetermineAdminAPIAddress(addressFlag, nil, configFlag, configAdapterFlag)
if err != nil {
- return caddy.ExitCodeFailedStartup, fmt.Errorf("making request: %v", err)
+ return caddy.ExitCodeFailedStartup, fmt.Errorf("couldn't determine admin API address: %v", err)
}
- req.Header.Set("Origin", adminAddr)
- err = apiRequest(req)
+ resp, err := AdminAPIRequest(adminAddr, http.MethodPost, "/stop", nil, nil)
if err != nil {
- caddy.Log().Warn("failed using API to stop instance",
- zap.String("endpoint", stopEndpoint),
- zap.Error(err),
- )
+ caddy.Log().Warn("failed using API to stop instance", zap.Error(err))
return caddy.ExitCodeFailedStartup, err
}
+ defer resp.Body.Close()
return caddy.ExitCodeSuccess, nil
}
func cmdReload(fl Flags) (int, error) {
- reloadCmdConfigFlag := fl.String("config")
- reloadCmdConfigAdapterFlag := fl.String("adapter")
- reloadCmdAddrFlag := fl.String("address")
+ configFlag := fl.String("config")
+ configAdapterFlag := fl.String("adapter")
+ addressFlag := fl.String("address")
+ forceFlag := fl.Bool("force")
// get the config in caddy's native format
- config, configFile, err := loadConfig(reloadCmdConfigFlag, reloadCmdConfigAdapterFlag)
+ config, configFile, err := LoadConfig(configFlag, configAdapterFlag)
if err != nil {
return caddy.ExitCodeFailedStartup, err
}
@@ -288,208 +322,188 @@ func cmdReload(fl Flags) (int, error) {
return caddy.ExitCodeFailedStartup, fmt.Errorf("no config file to load")
}
- // get the address of the admin listener and craft endpoint URL
- adminAddr := reloadCmdAddrFlag
- if adminAddr == "" && len(config) > 0 {
- var tmpStruct struct {
- Admin caddy.AdminConfig `json:"admin"`
- }
- err = json.Unmarshal(config, &tmpStruct)
- if err != nil {
- return caddy.ExitCodeFailedStartup,
- fmt.Errorf("unmarshaling admin listener address from config: %v", err)
- }
- adminAddr = tmpStruct.Admin.Listen
- }
- if adminAddr == "" {
- adminAddr = caddy.DefaultAdminListen
- }
- loadEndpoint := fmt.Sprintf("http://%s/load", adminAddr)
-
- // prepare the request to update the configuration
- req, err := http.NewRequest(http.MethodPost, loadEndpoint, bytes.NewReader(config))
+ adminAddr, err := DetermineAdminAPIAddress(addressFlag, config, configFlag, configAdapterFlag)
if err != nil {
- return caddy.ExitCodeFailedStartup, fmt.Errorf("making request: %v", err)
+ return caddy.ExitCodeFailedStartup, fmt.Errorf("couldn't determine admin API address: %v", err)
}
- req.Header.Set("Content-Type", "application/json")
- req.Header.Set("Origin", adminAddr)
- err = apiRequest(req)
+ // optionally force a config reload
+ headers := make(http.Header)
+ if forceFlag {
+ headers.Set("Cache-Control", "must-revalidate")
+ }
+
+ resp, err := AdminAPIRequest(adminAddr, http.MethodPost, "/load", headers, bytes.NewReader(config))
if err != nil {
return caddy.ExitCodeFailedStartup, fmt.Errorf("sending configuration to instance: %v", err)
}
+ defer resp.Body.Close()
return caddy.ExitCodeSuccess, nil
}
func cmdVersion(_ Flags) (int, error) {
- goModule := caddy.GoModule()
- fmt.Print(goModule.Version)
- if goModule.Sum != "" {
- // a build with a known version will also have a checksum
- fmt.Printf(" %s", goModule.Sum)
- }
- if goModule.Replace != nil {
- fmt.Printf(" => %s", goModule.Replace.Path)
- if goModule.Replace.Version != "" {
- fmt.Printf(" %s", goModule.Replace.Version)
- }
- }
- fmt.Println()
+ _, full := caddy.Version()
+ fmt.Println(full)
return caddy.ExitCodeSuccess, nil
}
-func cmdBuildInfo(fl Flags) (int, error) {
+func cmdBuildInfo(_ Flags) (int, error) {
bi, ok := debug.ReadBuildInfo()
if !ok {
return caddy.ExitCodeFailedStartup, fmt.Errorf("no build information")
}
-
- fmt.Printf("path: %s\n", bi.Path)
- fmt.Printf("main: %s %s %s\n", bi.Main.Path, bi.Main.Version, bi.Main.Sum)
- fmt.Println("dependencies:")
-
- for _, goMod := range bi.Deps {
- fmt.Printf("%s %s %s", goMod.Path, goMod.Version, goMod.Sum)
- if goMod.Replace != nil {
- fmt.Printf(" => %s %s %s", goMod.Replace.Path, goMod.Replace.Version, goMod.Replace.Sum)
- }
- fmt.Println()
- }
+ fmt.Println(bi)
return caddy.ExitCodeSuccess, nil
}
func cmdListModules(fl Flags) (int, error) {
+ packages := fl.Bool("packages")
versions := fl.Bool("versions")
+ skipStandard := fl.Bool("skip-standard")
- bi, ok := debug.ReadBuildInfo()
- if !ok || !versions {
- // if there's no build information,
- // just print out the modules
+ printModuleInfo := func(mi moduleInfo) {
+ fmt.Print(mi.caddyModuleID)
+ if versions && mi.goModule != nil {
+ fmt.Print(" " + mi.goModule.Version)
+ }
+ if packages && mi.goModule != nil {
+ fmt.Print(" " + mi.goModule.Path)
+ if mi.goModule.Replace != nil {
+ fmt.Print(" => " + mi.goModule.Replace.Path)
+ }
+ }
+ if mi.err != nil {
+ fmt.Printf(" [%v]", mi.err)
+ }
+ fmt.Println()
+ }
+
+ // organize modules by whether they come with the standard distribution
+ standard, nonstandard, unknown, err := getModules()
+ if err != nil {
+ // oh well, just print the module IDs and exit
for _, m := range caddy.Modules() {
fmt.Println(m)
}
return caddy.ExitCodeSuccess, nil
}
- for _, modID := range caddy.Modules() {
- modInfo, err := caddy.GetModule(modID)
- if err != nil {
- // that's weird
- fmt.Println(modID)
- continue
- }
-
- // to get the Caddy plugin's version info, we need to know
- // the package that the Caddy module's value comes from; we
- // can use reflection but we need a non-pointer value (I'm
- // not sure why), and since New() should return a pointer
- // value, we need to dereference it first
- iface := interface{}(modInfo.New())
- if rv := reflect.ValueOf(iface); rv.Kind() == reflect.Ptr {
- iface = reflect.New(reflect.TypeOf(iface).Elem()).Elem().Interface()
- }
- modPkgPath := reflect.TypeOf(iface).PkgPath()
-
- // now we find the Go module that the Caddy module's package
- // belongs to; we assume the Caddy module package path will
- // be prefixed by its Go module path, and we will choose the
- // longest matching prefix in case there are nested modules
- var matched *debug.Module
- for _, dep := range bi.Deps {
- if strings.HasPrefix(modPkgPath, dep.Path) {
- if matched == nil || len(dep.Path) > len(matched.Path) {
- matched = dep
- }
+ // Standard modules (always shipped with Caddy)
+ if !skipStandard {
+ if len(standard) > 0 {
+ for _, mod := range standard {
+ printModuleInfo(mod)
}
}
-
- // if we could find no matching module, just print out
- // the module ID instead
- if matched == nil {
- fmt.Println(modID)
- continue
- }
-
- fmt.Printf("%s %s\n", modID, matched.Version)
+ fmt.Printf("\n Standard modules: %d\n", len(standard))
}
+ // Non-standard modules (third party plugins)
+ if len(nonstandard) > 0 {
+ if len(standard) > 0 && !skipStandard {
+ fmt.Println()
+ }
+ for _, mod := range nonstandard {
+ printModuleInfo(mod)
+ }
+ }
+ fmt.Printf("\n Non-standard modules: %d\n", len(nonstandard))
+
+ // Unknown modules (couldn't get Caddy module info)
+ if len(unknown) > 0 {
+ if (len(standard) > 0 && !skipStandard) || len(nonstandard) > 0 {
+ fmt.Println()
+ }
+ for _, mod := range unknown {
+ printModuleInfo(mod)
+ }
+ }
+ fmt.Printf("\n Unknown modules: %d\n", len(unknown))
+
return caddy.ExitCodeSuccess, nil
}
-func cmdEnviron(_ Flags) (int, error) {
+func cmdEnviron(fl Flags) (int, error) {
+ // load all additional envs as soon as possible
+ err := handleEnvFileFlag(fl)
+ if err != nil {
+ return caddy.ExitCodeFailedStartup, err
+ }
+
printEnvironment()
return caddy.ExitCodeSuccess, nil
}
func cmdAdaptConfig(fl Flags) (int, error) {
- adaptCmdInputFlag := fl.String("config")
- adaptCmdAdapterFlag := fl.String("adapter")
- adaptCmdPrettyFlag := fl.Bool("pretty")
- adaptCmdValidateFlag := fl.Bool("validate")
+ inputFlag := fl.String("config")
+ adapterFlag := fl.String("adapter")
+ prettyFlag := fl.Bool("pretty")
+ validateFlag := fl.Bool("validate")
- // if no input file was specified, try a default
- // Caddyfile if the Caddyfile adapter is plugged in
- if adaptCmdInputFlag == "" && caddyconfig.GetAdapter("caddyfile") != nil {
- _, err := os.Stat("Caddyfile")
- if err == nil {
- // default Caddyfile exists
- adaptCmdInputFlag = "Caddyfile"
- caddy.Log().Info("using adjacent Caddyfile")
- } else if !os.IsNotExist(err) {
- // default Caddyfile exists, but error accessing it
- return caddy.ExitCodeFailedStartup, fmt.Errorf("accessing default Caddyfile: %v", err)
- }
+ var err error
+ inputFlag, err = configFileWithRespectToDefault(caddy.Log(), inputFlag)
+ if err != nil {
+ return caddy.ExitCodeFailedStartup, err
}
- if adaptCmdInputFlag == "" {
- return caddy.ExitCodeFailedStartup,
- fmt.Errorf("input file required when there is no Caddyfile in current directory (use --config flag)")
+ // load all additional envs as soon as possible
+ err = handleEnvFileFlag(fl)
+ if err != nil {
+ return caddy.ExitCodeFailedStartup, err
}
- if adaptCmdAdapterFlag == "" {
+
+ if adapterFlag == "" {
return caddy.ExitCodeFailedStartup,
fmt.Errorf("adapter name is required (use --adapt flag or leave unspecified for default)")
}
- cfgAdapter := caddyconfig.GetAdapter(adaptCmdAdapterFlag)
+ cfgAdapter := caddyconfig.GetAdapter(adapterFlag)
if cfgAdapter == nil {
return caddy.ExitCodeFailedStartup,
- fmt.Errorf("unrecognized config adapter: %s", adaptCmdAdapterFlag)
+ fmt.Errorf("unrecognized config adapter: %s", adapterFlag)
}
- input, err := ioutil.ReadFile(adaptCmdInputFlag)
+ input, err := os.ReadFile(inputFlag)
if err != nil {
return caddy.ExitCodeFailedStartup,
fmt.Errorf("reading input file: %v", err)
}
- opts := make(map[string]interface{})
- if adaptCmdPrettyFlag {
- opts["pretty"] = "true"
- }
- opts["filename"] = adaptCmdInputFlag
+ opts := map[string]any{"filename": inputFlag}
adaptedConfig, warnings, err := cfgAdapter.Adapt(input, opts)
if err != nil {
return caddy.ExitCodeFailedStartup, err
}
+ if prettyFlag {
+ var prettyBuf bytes.Buffer
+ err = json.Indent(&prettyBuf, adaptedConfig, "", "\t")
+ if err != nil {
+ return caddy.ExitCodeFailedStartup, err
+ }
+ adaptedConfig = prettyBuf.Bytes()
+ }
+
+ // print result to stdout
+ fmt.Println(string(adaptedConfig))
+
// print warnings to stderr
for _, warn := range warnings {
msg := warn.Message
if warn.Directive != "" {
msg = fmt.Sprintf("%s: %s", warn.Directive, warn.Message)
}
- fmt.Fprintf(os.Stderr, "[WARNING][%s] %s:%d: %s\n", adaptCmdAdapterFlag, warn.File, warn.Line, msg)
+ caddy.Log().Named(adapterFlag).Warn(msg,
+ zap.String("file", warn.File),
+ zap.Int("line", warn.Line))
}
- // print result to stdout
- fmt.Println(string(adaptedConfig))
-
// validate output if requested
- if adaptCmdValidateFlag {
+ if validateFlag {
var cfg *caddy.Config
- err = json.Unmarshal(adaptedConfig, &cfg)
+ err = caddy.StrictUnmarshalJSON(adaptedConfig, &cfg)
if err != nil {
return caddy.ExitCodeFailedStartup, fmt.Errorf("decoding config: %v", err)
}
@@ -503,17 +517,33 @@ func cmdAdaptConfig(fl Flags) (int, error) {
}
func cmdValidateConfig(fl Flags) (int, error) {
- validateCmdConfigFlag := fl.String("config")
- validateCmdAdapterFlag := fl.String("adapter")
+ configFlag := fl.String("config")
+ adapterFlag := fl.String("adapter")
- input, _, err := loadConfig(validateCmdConfigFlag, validateCmdAdapterFlag)
+ // load all additional envs as soon as possible
+ err := handleEnvFileFlag(fl)
+ if err != nil {
+ return caddy.ExitCodeFailedStartup, err
+ }
+
+ // use default config and ensure a config file is specified
+ configFlag, err = configFileWithRespectToDefault(caddy.Log(), configFlag)
+ if err != nil {
+ return caddy.ExitCodeFailedStartup, err
+ }
+ if configFlag == "" {
+ return caddy.ExitCodeFailedStartup,
+ fmt.Errorf("input file required when there is no Caddyfile in current directory (use --config flag)")
+ }
+
+ input, _, err := LoadConfig(configFlag, adapterFlag)
if err != nil {
return caddy.ExitCodeFailedStartup, err
}
input = caddy.RemoveMetaFields(input)
var cfg *caddy.Config
- err = json.Unmarshal(input, &cfg)
+ err = caddy.StrictUnmarshalJSON(input, &cfg)
if err != nil {
return caddy.ExitCodeFailedStartup, fmt.Errorf("decoding config: %v", err)
}
@@ -529,13 +559,28 @@ func cmdValidateConfig(fl Flags) (int, error) {
}
func cmdFmt(fl Flags) (int, error) {
- formatCmdConfigFile := fl.Arg(0)
- if formatCmdConfigFile == "" {
- formatCmdConfigFile = "Caddyfile"
+ configFile := fl.Arg(0)
+ configFlag := fl.String("config")
+ if (len(fl.Args()) > 1) || (configFlag != "" && configFile != "") {
+ return caddy.ExitCodeFailedStartup, fmt.Errorf("fmt does not support multiple files %s %s", configFlag, strings.Join(fl.Args(), " "))
+ }
+ if configFile == "" && configFlag == "" {
+ configFile = "Caddyfile"
+ } else if configFile == "" {
+ configFile = configFlag
+ }
+ // as a special case, read from stdin if the file name is "-"
+ if configFile == "-" {
+ input, err := io.ReadAll(os.Stdin)
+ if err != nil {
+ return caddy.ExitCodeFailedStartup,
+ fmt.Errorf("reading stdin: %v", err)
+ }
+ fmt.Print(string(caddyfile.Format(input)))
+ return caddy.ExitCodeSuccess, nil
}
- overwrite := fl.Bool("overwrite")
- input, err := ioutil.ReadFile(formatCmdConfigFile)
+ input, err := os.ReadFile(configFile)
if err != nil {
return caddy.ExitCodeFailedStartup,
fmt.Errorf("reading input file: %v", err)
@@ -543,97 +588,226 @@ func cmdFmt(fl Flags) (int, error) {
output := caddyfile.Format(input)
- if overwrite {
- err = ioutil.WriteFile(formatCmdConfigFile, output, 0644)
- if err != nil {
- return caddy.ExitCodeFailedStartup, nil
+ if fl.Bool("overwrite") {
+ if err := os.WriteFile(configFile, output, 0o600); err != nil {
+ return caddy.ExitCodeFailedStartup, fmt.Errorf("overwriting formatted file: %v", err)
+ }
+ return caddy.ExitCodeSuccess, nil
+ }
+
+ if fl.Bool("diff") {
+ diff := difflib.Diff(
+ strings.Split(string(input), "\n"),
+ strings.Split(string(output), "\n"))
+ for _, d := range diff {
+ switch d.Delta {
+ case difflib.Common:
+ fmt.Printf(" %s\n", d.Payload)
+ case difflib.LeftOnly:
+ fmt.Printf("- %s\n", d.Payload)
+ case difflib.RightOnly:
+ fmt.Printf("+ %s\n", d.Payload)
+ }
}
} else {
fmt.Print(string(output))
}
- return caddy.ExitCodeSuccess, nil
-}
-
-func cmdHelp(fl Flags) (int, error) {
- const fullDocs = `Full documentation is available at:
-https://caddyserver.com/docs/command-line`
-
- args := fl.Args()
- if len(args) == 0 {
- s := `Caddy is an extensible server platform.
-
-usage:
- caddy []
-
-commands:
-`
- keys := make([]string, 0, len(commands))
- for k := range commands {
- keys = append(keys, k)
- }
- sort.Strings(keys)
- for _, k := range keys {
- cmd := commands[k]
- short := strings.TrimSuffix(cmd.Short, ".")
- s += fmt.Sprintf(" %-15s %s\n", cmd.Name, short)
- }
-
- s += "\nUse 'caddy help ' for more information about a command.\n"
- s += "\n" + fullDocs + "\n"
-
- fmt.Print(s)
-
- return caddy.ExitCodeSuccess, nil
- } else if len(args) > 1 {
- return caddy.ExitCodeFailedStartup, fmt.Errorf("can only give help with one command")
+ if warning, diff := caddyfile.FormattingDifference(configFile, input); diff {
+ return caddy.ExitCodeFailedStartup, fmt.Errorf(`%s:%d: Caddyfile input is not formatted; Tip: use '--overwrite' to update your Caddyfile in-place instead of previewing it. Consult '--help' for more options`,
+ warning.File,
+ warning.Line,
+ )
}
- subcommand, ok := commands[args[0]]
- if !ok {
- return caddy.ExitCodeFailedStartup, fmt.Errorf("unknown command: %s", args[0])
- }
-
- helpText := strings.TrimSpace(subcommand.Long)
- if helpText == "" {
- helpText = subcommand.Short
- if !strings.HasSuffix(helpText, ".") {
- helpText += "."
- }
- }
-
- result := fmt.Sprintf("%s\n\nusage:\n caddy %s %s\n",
- helpText,
- subcommand.Name,
- strings.TrimSpace(subcommand.Usage),
- )
-
- if help := flagHelp(subcommand.Flags); help != "" {
- result += fmt.Sprintf("\nflags:\n%s", help)
- }
-
- result += "\n" + fullDocs + "\n"
-
- fmt.Print(result)
-
return caddy.ExitCodeSuccess, nil
}
-func apiRequest(req *http.Request) error {
- resp, err := http.DefaultClient.Do(req)
+// handleEnvFileFlag loads the environment variables from the given --envfile
+// flag if specified. This should be called as early in the command function.
+func handleEnvFileFlag(fl Flags) error {
+ var err error
+ var envfileFlag []string
+ envfileFlag, err = fl.GetStringSlice("envfile")
if err != nil {
- return fmt.Errorf("performing request: %v", err)
+ return fmt.Errorf("reading envfile flag: %v", err)
}
- defer resp.Body.Close()
- // if it didn't work, let the user know
- if resp.StatusCode >= 400 {
- respBody, err := ioutil.ReadAll(io.LimitReader(resp.Body, 1024*10))
- if err != nil {
- return fmt.Errorf("HTTP %d: reading error message: %v", resp.StatusCode, err)
+ for _, envfile := range envfileFlag {
+ if err := loadEnvFromFile(envfile); err != nil {
+ return fmt.Errorf("loading additional environment variables: %v", err)
}
- return fmt.Errorf("caddy responded with error: HTTP %d: %s", resp.StatusCode, respBody)
}
return nil
}
+
+// AdminAPIRequest makes an API request according to the CLI flags given,
+// with the given HTTP method and request URI. If body is non-nil, it will
+// be assumed to be Content-Type application/json. The caller should close
+// the response body. Should only be used by Caddy CLI commands which
+// need to interact with a running instance of Caddy via the admin API.
+func AdminAPIRequest(adminAddr, method, uri string, headers http.Header, body io.Reader) (*http.Response, error) {
+ parsedAddr, err := caddy.ParseNetworkAddress(adminAddr)
+ if err != nil || parsedAddr.PortRangeSize() > 1 {
+ return nil, fmt.Errorf("invalid admin address %s: %v", adminAddr, err)
+ }
+ origin := "http://" + parsedAddr.JoinHostPort(0)
+ if parsedAddr.IsUnixNetwork() {
+ origin = "http://127.0.0.1" // bogus host is a hack so that http.NewRequest() is happy
+
+ // the unix address at this point might still contain the optional
+ // unix socket permissions, which are part of the address/host.
+ // those need to be removed first, as they aren't part of the
+ // resulting unix file path
+ addr, _, err := internal.SplitUnixSocketPermissionsBits(parsedAddr.Host)
+ if err != nil {
+ return nil, err
+ }
+ parsedAddr.Host = addr
+ } else if parsedAddr.IsFdNetwork() {
+ origin = "http://127.0.0.1"
+ }
+
+ // form the request
+ req, err := http.NewRequest(method, origin+uri, body)
+ if err != nil {
+ return nil, fmt.Errorf("making request: %v", err)
+ }
+ if parsedAddr.IsUnixNetwork() || parsedAddr.IsFdNetwork() {
+ // We used to conform to RFC 2616 Section 14.26 which requires
+ // an empty host header when there is no host, as is the case
+ // with unix sockets and socket fds. However, Go required a
+ // Host value so we used a hack of a space character as the host
+ // (it would see the Host was non-empty, then trim the space later).
+ // As of Go 1.20.6 (July 2023), this hack no longer works. See:
+ // https://github.com/golang/go/issues/60374
+ // See also the discussion here:
+ // https://github.com/golang/go/issues/61431
+ //
+ // After that, we now require a Host value of either 127.0.0.1
+ // or ::1 if one is set. Above I choose to use 127.0.0.1. Even
+ // though the value should be completely irrelevant (it could be
+ // "srldkjfsd"), if for some reason the Host *is* used, at least
+ // we can have some reasonable assurance it will stay on the local
+ // machine and that browsers, if they ever allow access to unix
+ // sockets, can still enforce CORS, ensuring it is still coming
+ // from the local machine.
+ } else {
+ req.Header.Set("Origin", origin)
+ }
+ if body != nil {
+ req.Header.Set("Content-Type", "application/json")
+ }
+ for k, v := range headers {
+ req.Header[k] = v
+ }
+
+ // make an HTTP client that dials our network type, since admin
+ // endpoints aren't always TCP, which is what the default transport
+ // expects; reuse is not of particular concern here
+ client := http.Client{
+ Transport: &http.Transport{
+ DialContext: func(_ context.Context, _, _ string) (net.Conn, error) {
+ return net.Dial(parsedAddr.Network, parsedAddr.JoinHostPort(0))
+ },
+ },
+ }
+
+ resp, err := client.Do(req)
+ if err != nil {
+ return nil, fmt.Errorf("performing request: %v", err)
+ }
+
+ // if it didn't work, let the user know
+ if resp.StatusCode >= 400 {
+ respBody, err := io.ReadAll(io.LimitReader(resp.Body, 1024*1024*2))
+ if err != nil {
+ return nil, fmt.Errorf("HTTP %d: reading error message: %v", resp.StatusCode, err)
+ }
+ return nil, fmt.Errorf("caddy responded with error: HTTP %d: %s", resp.StatusCode, respBody)
+ }
+
+ return resp, nil
+}
+
+// DetermineAdminAPIAddress determines which admin API endpoint address should
+// be used based on the inputs. By priority: if `address` is specified, then
+// it is returned; if `config` is specified, then that config will be used for
+// finding the admin address; if `configFile` (and `configAdapter`) are specified,
+// then that config will be loaded to find the admin address; otherwise, the
+// default admin listen address will be returned.
+func DetermineAdminAPIAddress(address string, config []byte, configFile, configAdapter string) (string, error) {
+ // Prefer the address if specified and non-empty
+ if address != "" {
+ return address, nil
+ }
+
+ // Try to load the config from file if specified, with the given adapter name
+ if configFile != "" {
+ var loadedConfigFile string
+ var err error
+
+ // use the provided loaded config if non-empty
+ // otherwise, load it from the specified file/adapter
+ loadedConfig := config
+ if len(loadedConfig) == 0 {
+ // get the config in caddy's native format
+ loadedConfig, loadedConfigFile, err = LoadConfig(configFile, configAdapter)
+ if err != nil {
+ return "", err
+ }
+ if loadedConfigFile == "" {
+ return "", fmt.Errorf("no config file to load; either use --config flag or ensure Caddyfile exists in current directory")
+ }
+ }
+
+ // get the address of the admin listener from the config
+ if len(loadedConfig) > 0 {
+ var tmpStruct struct {
+ Admin caddy.AdminConfig `json:"admin"`
+ }
+ err := json.Unmarshal(loadedConfig, &tmpStruct)
+ if err != nil {
+ return "", fmt.Errorf("unmarshaling admin listener address from config: %v", err)
+ }
+ if tmpStruct.Admin.Listen != "" {
+ return tmpStruct.Admin.Listen, nil
+ }
+ }
+ }
+
+ // Fallback to the default listen address otherwise
+ return caddy.DefaultAdminListen, nil
+}
+
+// configFileWithRespectToDefault returns the filename to use for loading the config, based
+// on whether a config file is already specified and a supported default config file exists.
+func configFileWithRespectToDefault(logger *zap.Logger, configFile string) (string, error) {
+ const defaultCaddyfile = "Caddyfile"
+
+ // if no input file was specified, try a default Caddyfile if the Caddyfile adapter is plugged in
+ if configFile == "" && caddyconfig.GetAdapter("caddyfile") != nil {
+ _, err := os.Stat(defaultCaddyfile)
+ if err == nil {
+ // default Caddyfile exists
+ if logger != nil {
+ logger.Info("using adjacent Caddyfile")
+ }
+ return defaultCaddyfile, nil
+ }
+ if !errors.Is(err, fs.ErrNotExist) {
+ // problem checking
+ return configFile, fmt.Errorf("checking if default Caddyfile exists: %v", err)
+ }
+ }
+
+ // default config file does not exist or is irrelevant
+ return configFile, nil
+}
+
+type moduleInfo struct {
+ caddyModuleID string
+ goModule *debug.Module
+ err error
+}
diff --git a/cmd/commands.go b/cmd/commands.go
index 43aba01b..259dd358 100644
--- a/cmd/commands.go
+++ b/cmd/commands.go
@@ -16,7 +16,15 @@ package caddycmd
import (
"flag"
+ "fmt"
+ "os"
"regexp"
+ "strings"
+
+ "github.com/spf13/cobra"
+ "github.com/spf13/cobra/doc"
+
+ "github.com/caddyserver/caddy/v2"
)
// Command represents a subcommand. Name, Func,
@@ -27,12 +35,6 @@ type Command struct {
// Required.
Name string
- // Run is a function that executes a subcommand using
- // the parsed flags. It returns an exit code and any
- // associated error.
- // Required.
- Func CommandFunc
-
// Usage is a brief message describing the syntax of
// the subcommand's flags and args. Use [] to indicate
// optional parameters and <> to enclose literal values
@@ -53,7 +55,21 @@ type Command struct {
Long string
// Flags is the flagset for command.
+ // This is ignored if CobraFunc is set.
Flags *flag.FlagSet
+
+ // Func is a function that executes a subcommand using
+ // the parsed flags. It returns an exit code and any
+ // associated error.
+ // Required if CobraFunc is not set.
+ Func CommandFunc
+
+ // CobraFunc allows further configuration of the command
+ // via cobra's APIs. If this is set, then Func and Flags
+ // are ignored, with the assumption that they are set in
+ // this function. A caddycmd.WrapCommandFuncForCobra helper
+ // exists to simplify porting CommandFunc to Cobra's RunE.
+ CobraFunc func(*cobra.Command)
}
// CommandFunc is a command's function. It runs the
@@ -61,41 +77,43 @@ type Command struct {
// any error that occurred.
type CommandFunc func(Flags) (int, error)
+// Commands returns a list of commands initialised by
+// RegisterCommand
+func Commands() map[string]Command {
+ return commands
+}
+
var commands = make(map[string]Command)
func init() {
- RegisterCommand(Command{
- Name: "help",
- Func: cmdHelp,
- Usage: "",
- Short: "Shows help for a Caddy subcommand",
- })
-
RegisterCommand(Command{
Name: "start",
- Func: cmdStart,
- Usage: "[--config [--adapter ]] [--watch]",
+ Usage: "[--config [--adapter ]] [--envfile ] [--watch] [--pidfile ]",
Short: "Starts the Caddy process in the background and then returns",
Long: `
Starts the Caddy process, optionally bootstrapped with an initial config file.
This command unblocks after the server starts running or fails to run.
+If --envfile is specified, an environment file with environment variables
+in the KEY=VALUE format will be loaded into the Caddy process.
+
On Windows, the spawned child process will remain attached to the terminal, so
closing the window will forcefully stop Caddy; to avoid forgetting this, try
-using 'caddy run' instead to keep it in the foreground.`,
- Flags: func() *flag.FlagSet {
- fs := flag.NewFlagSet("start", flag.ExitOnError)
- fs.String("config", "", "Configuration file")
- fs.String("adapter", "", "Name of config adapter to apply")
- fs.Bool("watch", false, "Reload changed config file automatically")
- return fs
- }(),
+using 'caddy run' instead to keep it in the foreground.
+`,
+ CobraFunc: func(cmd *cobra.Command) {
+ cmd.Flags().StringP("config", "c", "", "Configuration file")
+ cmd.Flags().StringP("adapter", "a", "", "Name of config adapter to apply")
+ cmd.Flags().StringSliceP("envfile", "", []string{}, "Environment file(s) to load")
+ cmd.Flags().BoolP("watch", "w", false, "Reload changed config file automatically")
+ cmd.Flags().StringP("pidfile", "", "", "Path of file to which to write process ID")
+ cmd.RunE = WrapCommandFuncForCobra(cmdStart)
+ },
})
RegisterCommand(Command{
Name: "run",
- Func: cmdRun,
- Usage: "[--config [--adapter ]] [--environ] [--watch]",
+ Usage: "[--config [--adapter ]] [--envfile ] [--environ] [--resume] [--watch] [--pidfile ]",
Short: `Starts the Caddy process and blocks indefinitely`,
Long: `
Starts the Caddy process, optionally bootstrapped with an initial config file,
@@ -115,6 +133,9 @@ As a special case, if the current working directory has a file called
that file will be loaded and used to configure Caddy, even without any command
line flags.
+If --envfile is specified, an environment file with environment variables
+in the KEY=VALUE format will be loaded into the Caddy process.
+
If --environ is specified, the environment as seen by the Caddy process will
be printed before starting. This is the same as the environ command but does
not quit after printing, and can be useful for troubleshooting.
@@ -123,40 +144,43 @@ The --resume flag will override the --config flag if there is a config auto-
save file. It is not an error if --resume is used and no autosave file exists.
If --watch is specified, the config file will be loaded automatically after
-changes. ⚠️ This is dangerous in production! Only use this option in a local
-development environment.`,
- Flags: func() *flag.FlagSet {
- fs := flag.NewFlagSet("run", flag.ExitOnError)
- fs.String("config", "", "Configuration file")
- fs.String("adapter", "", "Name of config adapter to apply")
- fs.Bool("environ", false, "Print environment")
- fs.Bool("resume", false, "Use saved config, if any (and prefer over --config file)")
- fs.Bool("watch", false, "Watch config file for changes and reload it automatically")
- fs.String("pingback", "", "Echo confirmation bytes to this address on success")
- return fs
- }(),
+changes. ⚠️ This can make unintentional config changes easier; only use this
+option in a local development environment.
+`,
+ CobraFunc: func(cmd *cobra.Command) {
+ cmd.Flags().StringP("config", "c", "", "Configuration file")
+ cmd.Flags().StringP("adapter", "a", "", "Name of config adapter to apply")
+ cmd.Flags().StringSliceP("envfile", "", []string{}, "Environment file(s) to load")
+ cmd.Flags().BoolP("environ", "e", false, "Print environment")
+ cmd.Flags().BoolP("resume", "r", false, "Use saved config, if any (and prefer over --config file)")
+ cmd.Flags().BoolP("watch", "w", false, "Watch config file for changes and reload it automatically")
+ cmd.Flags().StringP("pidfile", "", "", "Path of file to which to write process ID")
+ cmd.Flags().StringP("pingback", "", "", "Echo confirmation bytes to this address on success")
+ cmd.RunE = WrapCommandFuncForCobra(cmdRun)
+ },
})
RegisterCommand(Command{
Name: "stop",
- Func: cmdStop,
+ Usage: "[--config [--adapter ]] [--address ]",
Short: "Gracefully stops a started Caddy process",
Long: `
Stops the background Caddy process as gracefully as possible.
It requires that the admin API is enabled and accessible, since it will
-use the API's /stop endpoint. The address of this request can be
-customized using the --address flag if it is not the default.`,
- Flags: func() *flag.FlagSet {
- fs := flag.NewFlagSet("stop", flag.ExitOnError)
- fs.String("address", "", "The address to use to reach the admin API endpoint, if not the default")
- return fs
- }(),
+use the API's /stop endpoint. The address of this request can be customized
+using the --address flag, or from the given --config, if not the default.
+`,
+ CobraFunc: func(cmd *cobra.Command) {
+ cmd.Flags().StringP("config", "c", "", "Configuration file to use to parse the admin address, if --address is not used")
+ cmd.Flags().StringP("adapter", "a", "", "Name of config adapter to apply (when --config is used)")
+ cmd.Flags().StringP("address", "", "", "The address to use to reach the admin API endpoint, if not the default")
+ cmd.RunE = WrapCommandFuncForCobra(cmdStop)
+ },
})
RegisterCommand(Command{
Name: "reload",
- Func: cmdReload,
Usage: "--config [--adapter ] [--address ]",
Short: "Changes the config of the running Caddy instance",
Long: `
@@ -166,50 +190,88 @@ workflows revolving around config files.
Since the admin endpoint is configurable, the endpoint configuration is loaded
from the --address flag if specified; otherwise it is loaded from the given
-config file; otherwise the default is assumed.`,
- Flags: func() *flag.FlagSet {
- fs := flag.NewFlagSet("reload", flag.ExitOnError)
- fs.String("config", "", "Configuration file (required)")
- fs.String("adapter", "", "Name of config adapter to apply")
- fs.String("address", "", "Address of the administration listener, if different from config")
- return fs
- }(),
+config file; otherwise the default is assumed.
+`,
+ CobraFunc: func(cmd *cobra.Command) {
+ cmd.Flags().StringP("config", "c", "", "Configuration file (required)")
+ cmd.Flags().StringP("adapter", "a", "", "Name of config adapter to apply")
+ cmd.Flags().StringP("address", "", "", "Address of the administration listener, if different from config")
+ cmd.Flags().BoolP("force", "f", false, "Force config reload, even if it is the same")
+ cmd.RunE = WrapCommandFuncForCobra(cmdReload)
+ },
})
RegisterCommand(Command{
Name: "version",
- Func: cmdVersion,
Short: "Prints the version",
+ Long: `
+Prints the version of this Caddy binary.
+
+Version information must be embedded into the binary at compile-time in
+order for Caddy to display anything useful with this command. If Caddy
+is built from within a version control repository, the Go command will
+embed the revision hash if available. However, if Caddy is built in the
+way specified by our online documentation (or by using xcaddy), more
+detailed version information is printed as given by Go modules.
+
+For more details about the full version string, see the Go module
+documentation: https://go.dev/doc/modules/version-numbers
+`,
+ Func: cmdVersion,
})
RegisterCommand(Command{
Name: "list-modules",
- Func: cmdListModules,
- Usage: "[--versions]",
+ Usage: "[--packages] [--versions] [--skip-standard]",
Short: "Lists the installed Caddy modules",
- Flags: func() *flag.FlagSet {
- fs := flag.NewFlagSet("list-modules", flag.ExitOnError)
- fs.Bool("versions", false, "Print version information")
- return fs
- }(),
+ CobraFunc: func(cmd *cobra.Command) {
+ cmd.Flags().BoolP("packages", "", false, "Print package paths")
+ cmd.Flags().BoolP("versions", "", false, "Print version information")
+ cmd.Flags().BoolP("skip-standard", "s", false, "Skip printing standard modules")
+ cmd.RunE = WrapCommandFuncForCobra(cmdListModules)
+ },
})
RegisterCommand(Command{
Name: "build-info",
- Func: cmdBuildInfo,
Short: "Prints information about this build",
+ Func: cmdBuildInfo,
})
RegisterCommand(Command{
Name: "environ",
- Func: cmdEnviron,
+ Usage: "[--envfile ]",
Short: "Prints the environment",
+ Long: `
+Prints the environment as seen by this Caddy process.
+
+The environment includes variables set in the system. If your Caddy
+configuration uses environment variables (e.g. "{env.VARIABLE}") then
+this command can be useful for verifying that the variables will have
+the values you expect in your config.
+
+If --envfile is specified, an environment file with environment variables
+in the KEY=VALUE format will be loaded into the Caddy process.
+
+Note that environments may be different depending on how you run Caddy.
+Environments for Caddy instances started by service managers such as
+systemd are often different than the environment inherited from your
+shell or terminal.
+
+You can also print the environment the same time you use "caddy run"
+by adding the "--environ" flag.
+
+Environments may contain sensitive data.
+`,
+ CobraFunc: func(cmd *cobra.Command) {
+ cmd.Flags().StringSliceP("envfile", "", []string{}, "Environment file(s) to load")
+ cmd.RunE = WrapCommandFuncForCobra(cmdEnviron)
+ },
})
RegisterCommand(Command{
Name: "adapt",
- Func: cmdAdaptConfig,
- Usage: "--config [--adapter ] [--pretty] [--validate]",
+ Usage: "--config [--adapter ] [--pretty] [--validate] [--envfile ]",
Short: "Adapts a configuration to Caddy's native JSON",
Long: `
Adapts a configuration to Caddy's native JSON format and writes the
@@ -220,62 +282,269 @@ for human readability.
If --validate is used, the adapted config will be checked for validity.
If the config is invalid, an error will be printed to stderr and a non-
-zero exit status will be returned.`,
- Flags: func() *flag.FlagSet {
- fs := flag.NewFlagSet("adapt", flag.ExitOnError)
- fs.String("config", "", "Configuration file to adapt (required)")
- fs.String("adapter", "caddyfile", "Name of config adapter")
- fs.Bool("pretty", false, "Format the output for human readability")
- fs.Bool("validate", false, "Validate the output")
- return fs
- }(),
+zero exit status will be returned.
+
+If --envfile is specified, an environment file with environment variables
+in the KEY=VALUE format will be loaded into the Caddy process.
+`,
+ CobraFunc: func(cmd *cobra.Command) {
+ cmd.Flags().StringP("config", "c", "", "Configuration file to adapt (required)")
+ cmd.Flags().StringP("adapter", "a", "caddyfile", "Name of config adapter")
+ cmd.Flags().BoolP("pretty", "p", false, "Format the output for human readability")
+ cmd.Flags().BoolP("validate", "", false, "Validate the output")
+ cmd.Flags().StringSliceP("envfile", "", []string{}, "Environment file(s) to load")
+ cmd.RunE = WrapCommandFuncForCobra(cmdAdaptConfig)
+ },
})
RegisterCommand(Command{
Name: "validate",
- Func: cmdValidateConfig,
- Usage: "--config [--adapter ]",
+ Usage: "--config [--adapter ] [--envfile ]",
Short: "Tests whether a configuration file is valid",
Long: `
Loads and provisions the provided config, but does not start running it.
This reveals any errors with the configuration through the loading and
-provisioning stages.`,
- Flags: func() *flag.FlagSet {
- fs := flag.NewFlagSet("load", flag.ExitOnError)
- fs.String("config", "", "Input configuration file")
- fs.String("adapter", "", "Name of config adapter")
- return fs
- }(),
+provisioning stages.
+
+If --envfile is specified, an environment file with environment variables
+in the KEY=VALUE format will be loaded into the Caddy process.
+`,
+ CobraFunc: func(cmd *cobra.Command) {
+ cmd.Flags().StringP("config", "c", "", "Input configuration file")
+ cmd.Flags().StringP("adapter", "a", "", "Name of config adapter")
+ cmd.Flags().StringSliceP("envfile", "", []string{}, "Environment file(s) to load")
+ cmd.RunE = WrapCommandFuncForCobra(cmdValidateConfig)
+ },
+ })
+
+ RegisterCommand(Command{
+ Name: "storage",
+ Short: "Commands for working with Caddy's storage (EXPERIMENTAL)",
+ Long: `
+Allows exporting and importing Caddy's storage contents. The two commands can be
+combined in a pipeline to transfer directly from one storage to another:
+
+$ caddy storage export --config Caddyfile.old --output - |
+> caddy storage import --config Caddyfile.new --input -
+
+The - argument refers to stdout and stdin, respectively.
+
+NOTE: When importing to or exporting from file_system storage (the default), the command
+should be run as the user that owns the associated root path.
+
+EXPERIMENTAL: May be changed or removed.
+`,
+ CobraFunc: func(cmd *cobra.Command) {
+ exportCmd := &cobra.Command{
+ Use: "export --config --output ",
+ Short: "Exports storage assets as a tarball",
+ Long: `
+The contents of the configured storage module (TLS certificates, etc)
+are exported via a tarball.
+
+--output is required, - can be given for stdout.
+`,
+ RunE: WrapCommandFuncForCobra(cmdExportStorage),
+ }
+ exportCmd.Flags().StringP("config", "c", "", "Input configuration file (required)")
+ exportCmd.Flags().StringP("output", "o", "", "Output path")
+ cmd.AddCommand(exportCmd)
+
+ importCmd := &cobra.Command{
+ Use: "import --config --input ",
+ Short: "Imports storage assets from a tarball.",
+ Long: `
+Imports storage assets to the configured storage module. The import file must be
+a tar archive.
+
+--input is required, - can be given for stdin.
+`,
+ RunE: WrapCommandFuncForCobra(cmdImportStorage),
+ }
+ importCmd.Flags().StringP("config", "c", "", "Configuration file to load (required)")
+ importCmd.Flags().StringP("input", "i", "", "Tar of assets to load (required)")
+ cmd.AddCommand(importCmd)
+ },
})
RegisterCommand(Command{
Name: "fmt",
- Func: cmdFmt,
- Usage: "[--overwrite] []",
+ Usage: "[--overwrite] [--diff] []",
Short: "Formats a Caddyfile",
Long: `
Formats the Caddyfile by adding proper indentation and spaces to improve
human readability. It prints the result to stdout.
-If --write is specified, the output will be written to the config file
-directly instead of printing it.`,
- Flags: func() *flag.FlagSet {
- fs := flag.NewFlagSet("format", flag.ExitOnError)
- fs.Bool("overwrite", false, "Overwrite the input file with the results")
- return fs
- }(),
+If --overwrite is specified, the output will be written to the config file
+directly instead of printing it.
+
+If --diff is specified, the output will be compared against the input, and
+lines will be prefixed with '-' and '+' where they differ. Note that
+unchanged lines are prefixed with two spaces for alignment, and that this
+is not a valid patch format.
+
+If you wish you use stdin instead of a regular file, use - as the path.
+When reading from stdin, the --overwrite flag has no effect: the result
+is always printed to stdout.
+`,
+ CobraFunc: func(cmd *cobra.Command) {
+ cmd.Flags().StringP("config", "c", "", "Configuration file")
+ cmd.Flags().BoolP("overwrite", "w", false, "Overwrite the input file with the results")
+ cmd.Flags().BoolP("diff", "d", false, "Print the differences between the input file and the formatted output")
+ cmd.RunE = WrapCommandFuncForCobra(cmdFmt)
+ },
})
+ RegisterCommand(Command{
+ Name: "upgrade",
+ Short: "Upgrade Caddy (EXPERIMENTAL)",
+ Long: `
+Downloads an updated Caddy binary with the same modules/plugins at the
+latest versions. EXPERIMENTAL: May be changed or removed.
+`,
+ CobraFunc: func(cmd *cobra.Command) {
+ cmd.Flags().BoolP("keep-backup", "k", false, "Keep the backed up binary, instead of deleting it")
+ cmd.RunE = WrapCommandFuncForCobra(cmdUpgrade)
+ },
+ })
+
+ RegisterCommand(Command{
+ Name: "add-package",
+ Usage: "",
+ Short: "Adds Caddy packages (EXPERIMENTAL)",
+ Long: `
+Downloads an updated Caddy binary with the specified packages (module/plugin)
+added, with an optional version specified (e.g., "package@version"). Retains
+existing packages. Returns an error if any of the specified packages are already
+included. EXPERIMENTAL: May be changed or removed.
+`,
+ CobraFunc: func(cmd *cobra.Command) {
+ cmd.Flags().BoolP("keep-backup", "k", false, "Keep the backed up binary, instead of deleting it")
+ cmd.RunE = WrapCommandFuncForCobra(cmdAddPackage)
+ },
+ })
+
+ RegisterCommand(Command{
+ Name: "remove-package",
+ Func: cmdRemovePackage,
+ Usage: "",
+ Short: "Removes Caddy packages (EXPERIMENTAL)",
+ Long: `
+Downloads an updated Caddy binaries without the specified packages (module/plugin).
+Returns an error if any of the packages are not included.
+EXPERIMENTAL: May be changed or removed.
+`,
+ CobraFunc: func(cmd *cobra.Command) {
+ cmd.Flags().BoolP("keep-backup", "k", false, "Keep the backed up binary, instead of deleting it")
+ cmd.RunE = WrapCommandFuncForCobra(cmdRemovePackage)
+ },
+ })
+
+ defaultFactory.Use(func(rootCmd *cobra.Command) {
+ rootCmd.AddCommand(caddyCmdToCobra(Command{
+ Name: "manpage",
+ Usage: "--directory ",
+ Short: "Generates the manual pages for Caddy commands",
+ Long: `
+Generates the manual pages for Caddy commands into the designated directory
+tagged into section 8 (System Administration).
+
+The manual page files are generated into the directory specified by the
+argument of --directory. If the directory does not exist, it will be created.
+`,
+ CobraFunc: func(cmd *cobra.Command) {
+ cmd.Flags().StringP("directory", "o", "", "The output directory where the manpages are generated")
+ cmd.RunE = WrapCommandFuncForCobra(func(fl Flags) (int, error) {
+ dir := strings.TrimSpace(fl.String("directory"))
+ if dir == "" {
+ return caddy.ExitCodeFailedQuit, fmt.Errorf("designated output directory and specified section are required")
+ }
+ if err := os.MkdirAll(dir, 0o755); err != nil {
+ return caddy.ExitCodeFailedQuit, err
+ }
+ if err := doc.GenManTree(rootCmd, &doc.GenManHeader{
+ Title: "Caddy",
+ Section: "8", // https://en.wikipedia.org/wiki/Man_page#Manual_sections
+ }, dir); err != nil {
+ return caddy.ExitCodeFailedQuit, err
+ }
+ return caddy.ExitCodeSuccess, nil
+ })
+ },
+ }))
+
+ // source: https://github.com/spf13/cobra/blob/main/shell_completions.md
+ rootCmd.AddCommand(&cobra.Command{
+ Use: "completion [bash|zsh|fish|powershell]",
+ Short: "Generate completion script",
+ Long: fmt.Sprintf(`To load completions:
+
+ Bash:
+
+ $ source <(%[1]s completion bash)
+
+ # To load completions for each session, execute once:
+ # Linux:
+ $ %[1]s completion bash > /etc/bash_completion.d/%[1]s
+ # macOS:
+ $ %[1]s completion bash > $(brew --prefix)/etc/bash_completion.d/%[1]s
+
+ Zsh:
+
+ # If shell completion is not already enabled in your environment,
+ # you will need to enable it. You can execute the following once:
+
+ $ echo "autoload -U compinit; compinit" >> ~/.zshrc
+
+ # To load completions for each session, execute once:
+ $ %[1]s completion zsh > "${fpath[1]}/_%[1]s"
+
+ # You will need to start a new shell for this setup to take effect.
+
+ fish:
+
+ $ %[1]s completion fish | source
+
+ # To load completions for each session, execute once:
+ $ %[1]s completion fish > ~/.config/fish/completions/%[1]s.fish
+
+ PowerShell:
+
+ PS> %[1]s completion powershell | Out-String | Invoke-Expression
+
+ # To load completions for every new session, run:
+ PS> %[1]s completion powershell > %[1]s.ps1
+ # and source this file from your PowerShell profile.
+ `, rootCmd.Root().Name()),
+ DisableFlagsInUseLine: true,
+ ValidArgs: []string{"bash", "zsh", "fish", "powershell"},
+ Args: cobra.MatchAll(cobra.ExactArgs(1), cobra.OnlyValidArgs),
+ RunE: func(cmd *cobra.Command, args []string) error {
+ switch args[0] {
+ case "bash":
+ return cmd.Root().GenBashCompletion(os.Stdout)
+ case "zsh":
+ return cmd.Root().GenZshCompletion(os.Stdout)
+ case "fish":
+ return cmd.Root().GenFishCompletion(os.Stdout, true)
+ case "powershell":
+ return cmd.Root().GenPowerShellCompletionWithDesc(os.Stdout)
+ default:
+ return fmt.Errorf("unrecognized shell: %s", args[0])
+ }
+ },
+ })
+ })
}
// RegisterCommand registers the command cmd.
// cmd.Name must be unique and conform to the
// following format:
//
-// - lowercase
-// - alphanumeric and hyphen characters only
-// - cannot start or end with a hyphen
-// - hyphen cannot be adjacent to another hyphen
+// - lowercase
+// - alphanumeric and hyphen characters only
+// - cannot start or end with a hyphen
+// - hyphen cannot be adjacent to another hyphen
//
// This function panics if the name is already registered,
// if the name does not meet the described format, or if
@@ -286,7 +555,7 @@ func RegisterCommand(cmd Command) {
if cmd.Name == "" {
panic("command name is required")
}
- if cmd.Func == nil {
+ if cmd.Func == nil && cmd.CobraFunc == nil {
panic("command function missing")
}
if cmd.Short == "" {
@@ -298,7 +567,9 @@ func RegisterCommand(cmd Command) {
if !commandNameRegex.MatchString(cmd.Name) {
panic("invalid command name")
}
- commands[cmd.Name] = cmd
+ defaultFactory.Use(func(rootCmd *cobra.Command) {
+ rootCmd.AddCommand(caddyCmdToCobra(cmd))
+ })
}
var commandNameRegex = regexp.MustCompile(`^[a-z0-9]$|^([a-z0-9]+-?[a-z0-9]*)+[a-z0-9]$`)
diff --git a/cmd/main.go b/cmd/main.go
index bdc95a45..655c0084 100644
--- a/cmd/main.go
+++ b/cmd/main.go
@@ -15,85 +15,77 @@
package caddycmd
import (
+ "bufio"
"bytes"
+ "encoding/json"
+ "errors"
"flag"
"fmt"
"io"
- "io/ioutil"
+ "io/fs"
+ "log"
"net"
"os"
"path/filepath"
"runtime"
+ "runtime/debug"
"strconv"
"strings"
"time"
+ "github.com/caddyserver/certmagic"
+ "github.com/spf13/pflag"
+ "go.uber.org/automaxprocs/maxprocs"
+ "go.uber.org/zap"
+
"github.com/caddyserver/caddy/v2"
"github.com/caddyserver/caddy/v2/caddyconfig"
- "github.com/caddyserver/certmagic"
- "go.uber.org/zap"
)
func init() {
// set a fitting User-Agent for ACME requests
- goModule := caddy.GoModule()
- cleanModVersion := strings.TrimPrefix(goModule.Version, "v")
- certmagic.UserAgent = "Caddy/" + cleanModVersion
+ version, _ := caddy.Version()
+ cleanModVersion := strings.TrimPrefix(version, "v")
+ ua := "Caddy/" + cleanModVersion
+ if uaEnv, ok := os.LookupEnv("USERAGENT"); ok {
+ ua = uaEnv + " " + ua
+ }
+ certmagic.UserAgent = ua
// by using Caddy, user indicates agreement to CA terms
- // (very important, or ACME account creation will fail!)
+ // (very important, as Caddy is often non-interactive
+ // and thus ACME account creation will fail!)
certmagic.DefaultACME.Agreed = true
}
// Main implements the main function of the caddy command.
-// Call this if Caddy is to be the main() if your program.
+// Call this if Caddy is to be the main() of your program.
func Main() {
- caddy.TrapSignals()
-
- switch len(os.Args) {
- case 0:
+ if len(os.Args) == 0 {
fmt.Printf("[FATAL] no arguments provided by OS; args[0] must be command\n")
os.Exit(caddy.ExitCodeFailedStartup)
- case 1:
- os.Args = append(os.Args, "help")
}
- subcommandName := os.Args[1]
- subcommand, ok := commands[subcommandName]
- if !ok {
- if strings.HasPrefix(os.Args[1], "-") {
- // user probably forgot to type the subcommand
- fmt.Println("[ERROR] first argument must be a subcommand; see 'caddy help'")
- } else {
- fmt.Printf("[ERROR] '%s' is not a recognized subcommand; see 'caddy help'\n", os.Args[1])
+ undo, err := maxprocs.Set()
+ defer undo()
+ if err != nil {
+ caddy.Log().Warn("failed to set GOMAXPROCS", zap.Error(err))
+ }
+
+ if err := defaultFactory.Build().Execute(); err != nil {
+ var exitError *exitError
+ if errors.As(err, &exitError) {
+ os.Exit(exitError.ExitCode)
}
- os.Exit(caddy.ExitCodeFailedStartup)
+ os.Exit(1)
}
-
- fs := subcommand.Flags
- if fs == nil {
- fs = flag.NewFlagSet(subcommand.Name, flag.ExitOnError)
- }
-
- err := fs.Parse(os.Args[2:])
- if err != nil {
- fmt.Println(err)
- os.Exit(caddy.ExitCodeFailedStartup)
- }
-
- exitCode, err := subcommand.Func(Flags{fs})
- if err != nil {
- fmt.Fprintf(os.Stderr, "%s: %v\n", subcommand.Name, err)
- }
-
- os.Exit(exitCode)
}
// handlePingbackConn reads from conn and ensures it matches
// the bytes in expect, or returns an error if it doesn't.
func handlePingbackConn(conn net.Conn, expect []byte) error {
defer conn.Close()
- confirmationBytes, err := ioutil.ReadAll(io.LimitReader(conn, 32))
+ confirmationBytes, err := io.ReadAll(io.LimitReader(conn, 32))
if err != nil {
return err
}
@@ -103,15 +95,59 @@ func handlePingbackConn(conn net.Conn, expect []byte) error {
return nil
}
-// loadConfig loads the config from configFile and adapts it
+// LoadConfig loads the config from configFile and adapts it
// using adapterName. If adapterName is specified, configFile
// must be also. If no configFile is specified, it tries
// loading a default config file. The lack of a config file is
// not treated as an error, but false will be returned if
// there is no config available. It prints any warnings to stderr,
// and returns the resulting JSON config bytes along with
-// whether a config file was loaded or not.
-func loadConfig(configFile, adapterName string) ([]byte, string, error) {
+// the name of the loaded config file (if any).
+func LoadConfig(configFile, adapterName string) ([]byte, string, error) {
+ return loadConfigWithLogger(caddy.Log(), configFile, adapterName)
+}
+
+func isCaddyfile(configFile, adapterName string) (bool, error) {
+ if adapterName == "caddyfile" {
+ return true, nil
+ }
+
+ // as a special case, if a config file starts with "caddyfile" or
+ // has a ".caddyfile" extension, and no adapter is specified, and
+ // no adapter module name matches the extension, assume
+ // caddyfile adapter for convenience
+ baseConfig := strings.ToLower(filepath.Base(configFile))
+ baseConfigExt := filepath.Ext(baseConfig)
+ startsOrEndsInCaddyfile := strings.HasPrefix(baseConfig, "caddyfile") || strings.HasSuffix(baseConfig, ".caddyfile")
+
+ if baseConfigExt == ".json" {
+ return false, nil
+ }
+
+ // If the adapter is not specified,
+ // the config file starts with "caddyfile",
+ // the config file has an extension,
+ // and isn't a JSON file (e.g. Caddyfile.yaml),
+ // then we don't know what the config format is.
+ if adapterName == "" && startsOrEndsInCaddyfile {
+ return true, nil
+ }
+
+ // adapter is not empty,
+ // adapter is not "caddyfile",
+ // extension is not ".json",
+ // extension is not ".caddyfile"
+ // file does not start with "Caddyfile"
+ return false, nil
+}
+
+func loadConfigWithLogger(logger *zap.Logger, configFile, adapterName string) ([]byte, string, error) {
+ // if no logger is provided, use a nop logger
+ // just so we don't have to check for nil
+ if logger == nil {
+ logger = zap.NewNop()
+ }
+
// specifying an adapter without a config file is ambiguous
if adapterName != "" && configFile == "" {
return nil, "", fmt.Errorf("cannot adapt config without config file (use --config)")
@@ -122,21 +158,26 @@ func loadConfig(configFile, adapterName string) ([]byte, string, error) {
var cfgAdapter caddyconfig.Adapter
var err error
if configFile != "" {
- config, err = ioutil.ReadFile(configFile)
- if err != nil {
- return nil, "", fmt.Errorf("reading config file: %v", err)
+ if configFile == "-" {
+ config, err = io.ReadAll(os.Stdin)
+ if err != nil {
+ return nil, "", fmt.Errorf("reading config from stdin: %v", err)
+ }
+ logger.Info("using config from stdin")
+ } else {
+ config, err = os.ReadFile(configFile)
+ if err != nil {
+ return nil, "", fmt.Errorf("reading config from file: %v", err)
+ }
+ logger.Info("using config from file", zap.String("file", configFile))
}
- caddy.Log().Info("using provided configuration",
- zap.String("config_file", configFile),
- zap.String("config_adapter", adapterName))
} else if adapterName == "" {
- // as a special case when no config file or adapter
- // is specified, see if the Caddyfile adapter is
- // plugged in, and if so, try using a default Caddyfile
+ // if the Caddyfile adapter is plugged in, we can try using an
+ // adjacent Caddyfile by default
cfgAdapter = caddyconfig.GetAdapter("caddyfile")
if cfgAdapter != nil {
- config, err = ioutil.ReadFile("Caddyfile")
- if os.IsNotExist(err) {
+ config, err = os.ReadFile("Caddyfile")
+ if errors.Is(err, fs.ErrNotExist) {
// okay, no default Caddyfile; pretend like this never happened
cfgAdapter = nil
} else if err != nil {
@@ -145,18 +186,15 @@ func loadConfig(configFile, adapterName string) ([]byte, string, error) {
} else {
// success reading default Caddyfile
configFile = "Caddyfile"
- caddy.Log().Info("using adjacent Caddyfile")
+ logger.Info("using adjacent Caddyfile")
}
}
}
- // as a special case, if a config file called "Caddyfile" was
- // specified, and no adapter is specified, assume caddyfile adapter
- // for convenience
- if strings.HasPrefix(filepath.Base(configFile), "Caddyfile") &&
- filepath.Ext(configFile) != ".json" &&
- adapterName == "" {
+ if yes, err := isCaddyfile(configFile, adapterName); yes {
adapterName = "caddyfile"
+ } else if err != nil {
+ return nil, "", err
}
// load config adapter
@@ -169,20 +207,30 @@ func loadConfig(configFile, adapterName string) ([]byte, string, error) {
// adapt config
if cfgAdapter != nil {
- adaptedConfig, warnings, err := cfgAdapter.Adapt(config, map[string]interface{}{
+ adaptedConfig, warnings, err := cfgAdapter.Adapt(config, map[string]any{
"filename": configFile,
})
if err != nil {
return nil, "", fmt.Errorf("adapting config using %s: %v", adapterName, err)
}
+ logger.Info("adapted config to JSON", zap.String("adapter", adapterName))
for _, warn := range warnings {
msg := warn.Message
if warn.Directive != "" {
msg = fmt.Sprintf("%s: %s", warn.Directive, warn.Message)
}
- fmt.Printf("[WARNING][%s] %s:%d: %s\n", adapterName, warn.File, warn.Line, msg)
+ logger.Warn(msg,
+ zap.String("adapter", adapterName),
+ zap.String("file", warn.File),
+ zap.Int("line", warn.Line))
}
config = adaptedConfig
+ } else if len(config) != 0 {
+ // validate that the config is at least valid JSON
+ err = json.Unmarshal(config, new(any))
+ if err != nil {
+ return nil, "", fmt.Errorf("config is not valid JSON: %v; did you mean to use a config adapter (the --adapter flag)?", err)
+ }
}
return config, configFile, nil
@@ -193,7 +241,15 @@ func loadConfig(configFile, adapterName string) ([]byte, string, error) {
// blocks indefinitely; it only quits if the poller has errors for
// long enough time. The filename passed in must be the actual
// config file used, not one to be discovered.
+// Each second the config files is loaded and parsed into an object
+// and is compared to the last config object that was loaded
func watchConfigFile(filename, adapterName string) {
+ defer func() {
+ if err := recover(); err != nil {
+ log.Printf("[PANIC] watching config file: %v\n%s", err, debug.Stack())
+ }
+ }()
+
// make our logger; since config reloads can change the
// default logger, we need to get it dynamically each time
logger := func() *zap.Logger {
@@ -202,63 +258,36 @@ func watchConfigFile(filename, adapterName string) {
With(zap.String("config_file", filename))
}
- // get the initial timestamp on the config file
- info, err := os.Stat(filename)
+ // get current config
+ lastCfg, _, err := loadConfigWithLogger(nil, filename, adapterName)
if err != nil {
- logger().Error("cannot watch config file", zap.Error(err))
+ logger().Error("unable to load latest config", zap.Error(err))
return
}
- lastModified := info.ModTime()
logger().Info("watching config file for changes")
- // if the file disappears or something, we can
- // stop polling if the error lasts long enough
- var lastErr time.Time
- finalError := func(err error) bool {
- if lastErr.IsZero() {
- lastErr = time.Now()
- return false
- }
- if time.Since(lastErr) > 30*time.Second {
- logger().Error("giving up watching config file; too many errors",
- zap.Error(err))
- return true
- }
- return false
- }
-
// begin poller
+ //nolint:staticcheck
for range time.Tick(1 * time.Second) {
- // get the file info
- info, err := os.Stat(filename)
- if err != nil {
- if finalError(err) {
- return
- }
- continue
- }
- lastErr = time.Time{} // no error, so clear any memory of one
-
- // if it hasn't changed, nothing to do
- if !info.ModTime().After(lastModified) {
- continue
- }
-
- logger().Info("config file changed; reloading")
-
- // remember this timestamp
- lastModified = info.ModTime()
-
- // load the contents of the file
- config, _, err := loadConfig(filename, adapterName)
+ // get current config
+ newCfg, _, err := loadConfigWithLogger(nil, filename, adapterName)
if err != nil {
logger().Error("unable to load latest config", zap.Error(err))
- continue
+ return
}
+ // if it hasn't changed, nothing to do
+ if bytes.Equal(lastCfg, newCfg) {
+ continue
+ }
+ logger().Info("config file changed; reloading")
+
+ // remember the current config
+ lastCfg = newCfg
+
// apply the updated config
- err = caddy.Load(config, false)
+ err = caddy.Load(lastCfg, false)
if err != nil {
logger().Error("applying latest config", zap.Error(err))
continue
@@ -269,7 +298,7 @@ func watchConfigFile(filename, adapterName string) {
// Flags wraps a FlagSet so that typed values
// from flags can be easily retrieved.
type Flags struct {
- *flag.FlagSet
+ *pflag.FlagSet
}
// String returns the string representation of the
@@ -299,7 +328,7 @@ func (f Flags) Int(name string) int {
// Float64 returns the float64 representation of the
// flag given by name. It returns false if the flag
-// is not a float63 type. It panics if the flag is
+// is not a float64 type. It panics if the flag is
// not in the flag set.
func (f Flags) Float64(name string) float64 {
val, _ := strconv.ParseFloat(f.String(name), 64)
@@ -311,31 +340,117 @@ func (f Flags) Float64(name string) float64 {
// is not a duration type. It panics if the flag is
// not in the flag set.
func (f Flags) Duration(name string) time.Duration {
- val, _ := time.ParseDuration(f.String(name))
+ val, _ := caddy.ParseDuration(f.String(name))
return val
}
-// flagHelp returns the help text for fs.
-func flagHelp(fs *flag.FlagSet) string {
- if fs == nil {
- return ""
+func loadEnvFromFile(envFile string) error {
+ file, err := os.Open(envFile)
+ if err != nil {
+ return fmt.Errorf("reading environment file: %v", err)
+ }
+ defer file.Close()
+
+ envMap, err := parseEnvFile(file)
+ if err != nil {
+ return fmt.Errorf("parsing environment file: %v", err)
}
- // temporarily redirect output
- out := fs.Output()
- defer fs.SetOutput(out)
+ for k, v := range envMap {
+ // do not overwrite existing environment variables
+ _, exists := os.LookupEnv(k)
+ if !exists {
+ if err := os.Setenv(k, v); err != nil {
+ return fmt.Errorf("setting environment variables: %v", err)
+ }
+ }
+ }
- buf := new(bytes.Buffer)
- fs.SetOutput(buf)
- fs.PrintDefaults()
- return buf.String()
+ // Update the storage paths to ensure they have the proper
+ // value after loading a specified env file.
+ caddy.ConfigAutosavePath = filepath.Join(caddy.AppConfigDir(), "autosave.json")
+ caddy.DefaultStorage = &certmagic.FileStorage{Path: caddy.AppDataDir()}
+
+ return nil
+}
+
+// parseEnvFile parses an env file from KEY=VALUE format.
+// It's pretty naive. Limited value quotation is supported,
+// but variable and command expansions are not supported.
+func parseEnvFile(envInput io.Reader) (map[string]string, error) {
+ envMap := make(map[string]string)
+
+ scanner := bufio.NewScanner(envInput)
+ var lineNumber int
+
+ for scanner.Scan() {
+ line := strings.TrimSpace(scanner.Text())
+ lineNumber++
+
+ // skip empty lines and lines starting with comment
+ if line == "" || strings.HasPrefix(line, "#") {
+ continue
+ }
+
+ // split line into key and value
+ before, after, isCut := strings.Cut(line, "=")
+ if !isCut {
+ return nil, fmt.Errorf("can't parse line %d; line should be in KEY=VALUE format", lineNumber)
+ }
+ key, val := before, after
+
+ // sometimes keys are prefixed by "export " so file can be sourced in bash; ignore it here
+ key = strings.TrimPrefix(key, "export ")
+
+ // validate key and value
+ if key == "" {
+ return nil, fmt.Errorf("missing or empty key on line %d", lineNumber)
+ }
+ if strings.Contains(key, " ") {
+ return nil, fmt.Errorf("invalid key on line %d: contains whitespace: %s", lineNumber, key)
+ }
+ if strings.HasPrefix(val, " ") || strings.HasPrefix(val, "\t") {
+ return nil, fmt.Errorf("invalid value on line %d: whitespace before value: '%s'", lineNumber, val)
+ }
+
+ // remove any trailing comment after value
+ if commentStart, _, found := strings.Cut(val, "#"); found {
+ val = strings.TrimRight(commentStart, " \t")
+ }
+
+ // quoted value: support newlines
+ if strings.HasPrefix(val, `"`) || strings.HasPrefix(val, "'") {
+ quote := string(val[0])
+ for !(strings.HasSuffix(line, quote) && !strings.HasSuffix(line, `\`+quote)) {
+ val = strings.ReplaceAll(val, `\`+quote, quote)
+ if !scanner.Scan() {
+ break
+ }
+ lineNumber++
+ line = strings.ReplaceAll(scanner.Text(), `\`+quote, quote)
+ val += "\n" + line
+ }
+ val = strings.TrimPrefix(val, quote)
+ val = strings.TrimSuffix(val, quote)
+ }
+
+ envMap[key] = val
+ }
+
+ if err := scanner.Err(); err != nil {
+ return nil, err
+ }
+
+ return envMap, nil
}
func printEnvironment() {
+ _, version := caddy.Version()
fmt.Printf("caddy.HomeDir=%s\n", caddy.HomeDir())
fmt.Printf("caddy.AppDataDir=%s\n", caddy.AppDataDir())
fmt.Printf("caddy.AppConfigDir=%s\n", caddy.AppConfigDir())
fmt.Printf("caddy.ConfigAutosavePath=%s\n", caddy.ConfigAutosavePath)
+ fmt.Printf("caddy.Version=%s\n", version)
fmt.Printf("runtime.GOOS=%s\n", runtime.GOOS)
fmt.Printf("runtime.GOARCH=%s\n", runtime.GOARCH)
fmt.Printf("runtime.Compiler=%s\n", runtime.Compiler)
@@ -352,70 +467,15 @@ func printEnvironment() {
}
}
-// moveStorage moves the old default dataDir to the new default dataDir.
-// TODO: This is TEMPORARY until the release candidates.
-func moveStorage() {
- // get the home directory (the old way)
- oldHome := os.Getenv("HOME")
- if oldHome == "" && runtime.GOOS == "windows" {
- drive := os.Getenv("HOMEDRIVE")
- path := os.Getenv("HOMEPATH")
- oldHome = drive + path
- if drive == "" || path == "" {
- oldHome = os.Getenv("USERPROFILE")
- }
- }
- if oldHome == "" {
- oldHome = "."
- }
- oldDataDir := filepath.Join(oldHome, ".local", "share", "caddy")
+// StringSlice is a flag.Value that enables repeated use of a string flag.
+type StringSlice []string
- // nothing to do if old data dir doesn't exist
- _, err := os.Stat(oldDataDir)
- if os.IsNotExist(err) {
- return
- }
+func (ss StringSlice) String() string { return "[" + strings.Join(ss, ", ") + "]" }
- // nothing to do if the new data dir is the same as the old one
- newDataDir := caddy.AppDataDir()
- if oldDataDir == newDataDir {
- return
- }
-
- logger := caddy.Log().Named("automigrate").With(
- zap.String("old_dir", oldDataDir),
- zap.String("new_dir", newDataDir))
-
- logger.Info("beginning one-time data directory migration",
- zap.String("details", "https://github.com/caddyserver/caddy/issues/2955"))
-
- // if new data directory exists, avoid auto-migration as a conservative safety measure
- _, err = os.Stat(newDataDir)
- if !os.IsNotExist(err) {
- logger.Error("new data directory already exists; skipping auto-migration as conservative safety measure",
- zap.Error(err),
- zap.String("instructions", "https://github.com/caddyserver/caddy/issues/2955#issuecomment-570000333"))
- return
- }
-
- // construct the new data directory's parent folder
- err = os.MkdirAll(filepath.Dir(newDataDir), 0700)
- if err != nil {
- logger.Error("unable to make new datadirectory - follow link for instructions",
- zap.String("instructions", "https://github.com/caddyserver/caddy/issues/2955#issuecomment-570000333"),
- zap.Error(err))
- return
- }
-
- // folder structure is same, so just try to rename (move) it;
- // this fails if the new path is on a separate device
- err = os.Rename(oldDataDir, newDataDir)
- if err != nil {
- logger.Error("new data directory already exists; skipping auto-migration as conservative safety measure - follow link for instructions",
- zap.String("instructions", "https://github.com/caddyserver/caddy/issues/2955#issuecomment-570000333"),
- zap.Error(err))
- }
-
- logger.Info("successfully completed one-time migration of data directory",
- zap.String("details", "https://github.com/caddyserver/caddy/issues/2955"))
+func (ss *StringSlice) Set(value string) error {
+ *ss = append(*ss, value)
+ return nil
}
+
+// Interface guard
+var _ flag.Value = (*StringSlice)(nil)
diff --git a/cmd/main_test.go b/cmd/main_test.go
new file mode 100644
index 00000000..3b2412c5
--- /dev/null
+++ b/cmd/main_test.go
@@ -0,0 +1,280 @@
+package caddycmd
+
+import (
+ "reflect"
+ "strings"
+ "testing"
+)
+
+func TestParseEnvFile(t *testing.T) {
+ for i, tc := range []struct {
+ input string
+ expect map[string]string
+ shouldErr bool
+ }{
+ {
+ input: `KEY=value`,
+ expect: map[string]string{
+ "KEY": "value",
+ },
+ },
+ {
+ input: `
+ KEY=value
+ OTHER_KEY=Some Value
+ `,
+ expect: map[string]string{
+ "KEY": "value",
+ "OTHER_KEY": "Some Value",
+ },
+ },
+ {
+ input: `
+ KEY=value
+ INVALID KEY=asdf
+ OTHER_KEY=Some Value
+ `,
+ shouldErr: true,
+ },
+ {
+ input: `
+ KEY=value
+ SIMPLE_QUOTED="quoted value"
+ OTHER_KEY=Some Value
+ `,
+ expect: map[string]string{
+ "KEY": "value",
+ "SIMPLE_QUOTED": "quoted value",
+ "OTHER_KEY": "Some Value",
+ },
+ },
+ {
+ input: `
+ KEY=value
+ NEWLINES="foo
+ bar"
+ OTHER_KEY=Some Value
+ `,
+ expect: map[string]string{
+ "KEY": "value",
+ "NEWLINES": "foo\n\tbar",
+ "OTHER_KEY": "Some Value",
+ },
+ },
+ {
+ input: `
+ KEY=value
+ ESCAPED="\"escaped quotes\"
+here"
+ OTHER_KEY=Some Value
+ `,
+ expect: map[string]string{
+ "KEY": "value",
+ "ESCAPED": "\"escaped quotes\"\nhere",
+ "OTHER_KEY": "Some Value",
+ },
+ },
+ {
+ input: `
+ export KEY=value
+ OTHER_KEY=Some Value
+ `,
+ expect: map[string]string{
+ "KEY": "value",
+ "OTHER_KEY": "Some Value",
+ },
+ },
+ {
+ input: `
+ =value
+ OTHER_KEY=Some Value
+ `,
+ shouldErr: true,
+ },
+ {
+ input: `
+ EMPTY=
+ OTHER_KEY=Some Value
+ `,
+ expect: map[string]string{
+ "EMPTY": "",
+ "OTHER_KEY": "Some Value",
+ },
+ },
+ {
+ input: `
+ EMPTY=""
+ OTHER_KEY=Some Value
+ `,
+ expect: map[string]string{
+ "EMPTY": "",
+ "OTHER_KEY": "Some Value",
+ },
+ },
+ {
+ input: `
+ KEY=value
+ #OTHER_KEY=Some Value
+ `,
+ expect: map[string]string{
+ "KEY": "value",
+ },
+ },
+ {
+ input: `
+ KEY=value
+ COMMENT=foo bar # some comment here
+ OTHER_KEY=Some Value
+ `,
+ expect: map[string]string{
+ "KEY": "value",
+ "COMMENT": "foo bar",
+ "OTHER_KEY": "Some Value",
+ },
+ },
+ {
+ input: `
+ KEY=value
+ WHITESPACE= foo
+ OTHER_KEY=Some Value
+ `,
+ shouldErr: true,
+ },
+ {
+ input: `
+ KEY=value
+ WHITESPACE=" foo bar "
+ OTHER_KEY=Some Value
+ `,
+ expect: map[string]string{
+ "KEY": "value",
+ "WHITESPACE": " foo bar ",
+ "OTHER_KEY": "Some Value",
+ },
+ },
+ } {
+ actual, err := parseEnvFile(strings.NewReader(tc.input))
+ if err != nil && !tc.shouldErr {
+ t.Errorf("Test %d: Got error but shouldn't have: %v", i, err)
+ }
+ if err == nil && tc.shouldErr {
+ t.Errorf("Test %d: Did not get error but should have", i)
+ }
+ if tc.shouldErr {
+ continue
+ }
+ if !reflect.DeepEqual(tc.expect, actual) {
+ t.Errorf("Test %d: Expected %v but got %v", i, tc.expect, actual)
+ }
+ }
+}
+
+func Test_isCaddyfile(t *testing.T) {
+ type args struct {
+ configFile string
+ adapterName string
+ }
+ tests := []struct {
+ name string
+ args args
+ want bool
+ wantErr bool
+ }{
+ {
+ name: "bare Caddyfile without adapter",
+ args: args{
+ configFile: "Caddyfile",
+ adapterName: "",
+ },
+ want: true,
+ wantErr: false,
+ },
+ {
+ name: "local Caddyfile without adapter",
+ args: args{
+ configFile: "./Caddyfile",
+ adapterName: "",
+ },
+ want: true,
+ wantErr: false,
+ },
+ {
+ name: "local caddyfile with adapter",
+ args: args{
+ configFile: "./Caddyfile",
+ adapterName: "caddyfile",
+ },
+ want: true,
+ wantErr: false,
+ },
+ {
+ name: "ends with .caddyfile with adapter",
+ args: args{
+ configFile: "./conf.caddyfile",
+ adapterName: "caddyfile",
+ },
+ want: true,
+ wantErr: false,
+ },
+ {
+ name: "ends with .caddyfile without adapter",
+ args: args{
+ configFile: "./conf.caddyfile",
+ adapterName: "",
+ },
+ want: true,
+ wantErr: false,
+ },
+ {
+ name: "config is Caddyfile.yaml with adapter",
+ args: args{
+ configFile: "./Caddyfile.yaml",
+ adapterName: "yaml",
+ },
+ want: false,
+ wantErr: false,
+ },
+ {
+
+ name: "json is not caddyfile but not error",
+ args: args{
+ configFile: "./Caddyfile.json",
+ adapterName: "",
+ },
+ want: false,
+ wantErr: false,
+ },
+ {
+
+ name: "prefix of Caddyfile and ./ with any extension is Caddyfile",
+ args: args{
+ configFile: "./Caddyfile.prd",
+ adapterName: "",
+ },
+ want: true,
+ wantErr: false,
+ },
+ {
+
+ name: "prefix of Caddyfile without ./ with any extension is Caddyfile",
+ args: args{
+ configFile: "Caddyfile.prd",
+ adapterName: "",
+ },
+ want: true,
+ wantErr: false,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ got, err := isCaddyfile(tt.args.configFile, tt.args.adapterName)
+ if (err != nil) != tt.wantErr {
+ t.Errorf("isCaddyfile() error = %v, wantErr %v", err, tt.wantErr)
+ return
+ }
+ if got != tt.want {
+ t.Errorf("isCaddyfile() = %v, want %v", got, tt.want)
+ }
+ })
+ }
+}
diff --git a/cmd/packagesfuncs.go b/cmd/packagesfuncs.go
new file mode 100644
index 00000000..69523200
--- /dev/null
+++ b/cmd/packagesfuncs.go
@@ -0,0 +1,354 @@
+// Copyright 2015 Matthew Holt and The Caddy Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package caddycmd
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+ "net/http"
+ "net/url"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "reflect"
+ "runtime"
+ "runtime/debug"
+ "strings"
+
+ "go.uber.org/zap"
+
+ "github.com/caddyserver/caddy/v2"
+)
+
+func cmdUpgrade(fl Flags) (int, error) {
+ _, nonstandard, _, err := getModules()
+ if err != nil {
+ return caddy.ExitCodeFailedStartup, fmt.Errorf("unable to enumerate installed plugins: %v", err)
+ }
+ pluginPkgs, err := getPluginPackages(nonstandard)
+ if err != nil {
+ return caddy.ExitCodeFailedStartup, err
+ }
+
+ return upgradeBuild(pluginPkgs, fl)
+}
+
+func splitModule(arg string) (module, version string, err error) {
+ const versionSplit = "@"
+
+ // accommodate module paths that have @ in them, but we can only tolerate that if there's also
+ // a version, otherwise we don't know if it's a version separator or part of the file path
+ lastVersionSplit := strings.LastIndex(arg, versionSplit)
+ if lastVersionSplit < 0 {
+ module = arg
+ } else {
+ module, version = arg[:lastVersionSplit], arg[lastVersionSplit+1:]
+ }
+
+ if module == "" {
+ err = fmt.Errorf("module name is required")
+ }
+
+ return
+}
+
+func cmdAddPackage(fl Flags) (int, error) {
+ if len(fl.Args()) == 0 {
+ return caddy.ExitCodeFailedStartup, fmt.Errorf("at least one package name must be specified")
+ }
+ _, nonstandard, _, err := getModules()
+ if err != nil {
+ return caddy.ExitCodeFailedStartup, fmt.Errorf("unable to enumerate installed plugins: %v", err)
+ }
+ pluginPkgs, err := getPluginPackages(nonstandard)
+ if err != nil {
+ return caddy.ExitCodeFailedStartup, err
+ }
+
+ for _, arg := range fl.Args() {
+ module, version, err := splitModule(arg)
+ if err != nil {
+ return caddy.ExitCodeFailedStartup, fmt.Errorf("invalid module name: %v", err)
+ }
+ // only allow a version to be specified if it's different from the existing version
+ if _, ok := pluginPkgs[module]; ok && !(version != "" && pluginPkgs[module].Version != version) {
+ return caddy.ExitCodeFailedStartup, fmt.Errorf("package is already added")
+ }
+ pluginPkgs[module] = pluginPackage{Version: version, Path: module}
+ }
+
+ return upgradeBuild(pluginPkgs, fl)
+}
+
+func cmdRemovePackage(fl Flags) (int, error) {
+ if len(fl.Args()) == 0 {
+ return caddy.ExitCodeFailedStartup, fmt.Errorf("at least one package name must be specified")
+ }
+ _, nonstandard, _, err := getModules()
+ if err != nil {
+ return caddy.ExitCodeFailedStartup, fmt.Errorf("unable to enumerate installed plugins: %v", err)
+ }
+ pluginPkgs, err := getPluginPackages(nonstandard)
+ if err != nil {
+ return caddy.ExitCodeFailedStartup, err
+ }
+
+ for _, arg := range fl.Args() {
+ module, _, err := splitModule(arg)
+ if err != nil {
+ return caddy.ExitCodeFailedStartup, fmt.Errorf("invalid module name: %v", err)
+ }
+ if _, ok := pluginPkgs[module]; !ok {
+ // package does not exist
+ return caddy.ExitCodeFailedStartup, fmt.Errorf("package is not added")
+ }
+ delete(pluginPkgs, arg)
+ }
+
+ return upgradeBuild(pluginPkgs, fl)
+}
+
+func upgradeBuild(pluginPkgs map[string]pluginPackage, fl Flags) (int, error) {
+ l := caddy.Log()
+
+ thisExecPath, err := os.Executable()
+ if err != nil {
+ return caddy.ExitCodeFailedStartup, fmt.Errorf("determining current executable path: %v", err)
+ }
+ thisExecStat, err := os.Stat(thisExecPath)
+ if err != nil {
+ return caddy.ExitCodeFailedStartup, fmt.Errorf("retrieving current executable permission bits: %v", err)
+ }
+ if thisExecStat.Mode()&os.ModeSymlink == os.ModeSymlink {
+ symSource := thisExecPath
+ // we are a symlink; resolve it
+ thisExecPath, err = filepath.EvalSymlinks(thisExecPath)
+ if err != nil {
+ return caddy.ExitCodeFailedStartup, fmt.Errorf("resolving current executable symlink: %v", err)
+ }
+ l.Info("this executable is a symlink", zap.String("source", symSource), zap.String("target", thisExecPath))
+ }
+ l.Info("this executable will be replaced", zap.String("path", thisExecPath))
+
+ // build the request URL to download this custom build
+ qs := url.Values{
+ "os": {runtime.GOOS},
+ "arch": {runtime.GOARCH},
+ }
+ for _, pkgInfo := range pluginPkgs {
+ qs.Add("p", pkgInfo.String())
+ }
+
+ // initiate the build
+ resp, err := downloadBuild(qs)
+ if err != nil {
+ return caddy.ExitCodeFailedStartup, fmt.Errorf("download failed: %v", err)
+ }
+ defer resp.Body.Close()
+
+ // back up the current binary, in case something goes wrong we can replace it
+ backupExecPath := thisExecPath + ".tmp"
+ l.Info("build acquired; backing up current executable",
+ zap.String("current_path", thisExecPath),
+ zap.String("backup_path", backupExecPath))
+ err = os.Rename(thisExecPath, backupExecPath)
+ if err != nil {
+ return caddy.ExitCodeFailedStartup, fmt.Errorf("backing up current binary: %v", err)
+ }
+ defer func() {
+ if err != nil {
+ err2 := os.Rename(backupExecPath, thisExecPath)
+ if err2 != nil {
+ l.Error("restoring original executable failed; will need to be restored manually",
+ zap.String("backup_path", backupExecPath),
+ zap.String("original_path", thisExecPath),
+ zap.Error(err2))
+ }
+ }
+ }()
+
+ // download the file; do this in a closure to close reliably before we execute it
+ err = writeCaddyBinary(thisExecPath, &resp.Body, thisExecStat)
+ if err != nil {
+ return caddy.ExitCodeFailedStartup, err
+ }
+
+ l.Info("download successful; displaying new binary details", zap.String("location", thisExecPath))
+
+ // use the new binary to print out version and module info
+ fmt.Print("\nModule versions:\n\n")
+ if err = listModules(thisExecPath); err != nil {
+ return caddy.ExitCodeFailedStartup, fmt.Errorf("download succeeded, but unable to execute 'caddy list-modules': %v", err)
+ }
+ fmt.Println("\nVersion:")
+ if err = showVersion(thisExecPath); err != nil {
+ return caddy.ExitCodeFailedStartup, fmt.Errorf("download succeeded, but unable to execute 'caddy version': %v", err)
+ }
+ fmt.Println()
+
+ // clean up the backup file
+ if !fl.Bool("keep-backup") {
+ if err = removeCaddyBinary(backupExecPath); err != nil {
+ return caddy.ExitCodeFailedStartup, fmt.Errorf("download succeeded, but unable to clean up backup binary: %v", err)
+ }
+ } else {
+ l.Info("skipped cleaning up the backup file", zap.String("backup_path", backupExecPath))
+ }
+
+ l.Info("upgrade successful; please restart any running Caddy instances", zap.String("executable", thisExecPath))
+
+ return caddy.ExitCodeSuccess, nil
+}
+
+func getModules() (standard, nonstandard, unknown []moduleInfo, err error) {
+ bi, ok := debug.ReadBuildInfo()
+ if !ok {
+ err = fmt.Errorf("no build info")
+ return
+ }
+
+ for _, modID := range caddy.Modules() {
+ modInfo, err := caddy.GetModule(modID)
+ if err != nil {
+ // that's weird, shouldn't happen
+ unknown = append(unknown, moduleInfo{caddyModuleID: modID, err: err})
+ continue
+ }
+
+ // to get the Caddy plugin's version info, we need to know
+ // the package that the Caddy module's value comes from; we
+ // can use reflection but we need a non-pointer value (I'm
+ // not sure why), and since New() should return a pointer
+ // value, we need to dereference it first
+ iface := any(modInfo.New())
+ if rv := reflect.ValueOf(iface); rv.Kind() == reflect.Ptr {
+ iface = reflect.New(reflect.TypeOf(iface).Elem()).Elem().Interface()
+ }
+ modPkgPath := reflect.TypeOf(iface).PkgPath()
+
+ // now we find the Go module that the Caddy module's package
+ // belongs to; we assume the Caddy module package path will
+ // be prefixed by its Go module path, and we will choose the
+ // longest matching prefix in case there are nested modules
+ var matched *debug.Module
+ for _, dep := range bi.Deps {
+ if strings.HasPrefix(modPkgPath, dep.Path) {
+ if matched == nil || len(dep.Path) > len(matched.Path) {
+ matched = dep
+ }
+ }
+ }
+
+ caddyModGoMod := moduleInfo{caddyModuleID: modID, goModule: matched}
+
+ if strings.HasPrefix(modPkgPath, caddy.ImportPath) {
+ standard = append(standard, caddyModGoMod)
+ } else {
+ nonstandard = append(nonstandard, caddyModGoMod)
+ }
+ }
+ return
+}
+
+func listModules(path string) error {
+ cmd := exec.Command(path, "list-modules", "--versions", "--skip-standard")
+ cmd.Stdout = os.Stdout
+ cmd.Stderr = os.Stderr
+ return cmd.Run()
+}
+
+func showVersion(path string) error {
+ cmd := exec.Command(path, "version")
+ cmd.Stdout = os.Stdout
+ cmd.Stderr = os.Stderr
+ return cmd.Run()
+}
+
+func downloadBuild(qs url.Values) (*http.Response, error) {
+ l := caddy.Log()
+ l.Info("requesting build",
+ zap.String("os", qs.Get("os")),
+ zap.String("arch", qs.Get("arch")),
+ zap.Strings("packages", qs["p"]))
+ resp, err := http.Get(fmt.Sprintf("%s?%s", downloadPath, qs.Encode()))
+ if err != nil {
+ return nil, fmt.Errorf("secure request failed: %v", err)
+ }
+ if resp.StatusCode >= 400 {
+ var details struct {
+ StatusCode int `json:"status_code"`
+ Error struct {
+ Message string `json:"message"`
+ ID string `json:"id"`
+ } `json:"error"`
+ }
+ err2 := json.NewDecoder(resp.Body).Decode(&details)
+ if err2 != nil {
+ return nil, fmt.Errorf("download and error decoding failed: HTTP %d: %v", resp.StatusCode, err2)
+ }
+ return nil, fmt.Errorf("download failed: HTTP %d: %s (id=%s)", resp.StatusCode, details.Error.Message, details.Error.ID)
+ }
+ return resp, nil
+}
+
+func getPluginPackages(modules []moduleInfo) (map[string]pluginPackage, error) {
+ pluginPkgs := make(map[string]pluginPackage)
+ for _, mod := range modules {
+ if mod.goModule.Replace != nil {
+ return nil, fmt.Errorf("cannot auto-upgrade when Go module has been replaced: %s => %s",
+ mod.goModule.Path, mod.goModule.Replace.Path)
+ }
+ pluginPkgs[mod.goModule.Path] = pluginPackage{Version: mod.goModule.Version, Path: mod.goModule.Path}
+ }
+ return pluginPkgs, nil
+}
+
+func writeCaddyBinary(path string, body *io.ReadCloser, fileInfo os.FileInfo) error {
+ l := caddy.Log()
+ destFile, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE|os.O_TRUNC, fileInfo.Mode())
+ if err != nil {
+ return fmt.Errorf("unable to open destination file: %v", err)
+ }
+ defer destFile.Close()
+
+ l.Info("downloading binary", zap.String("destination", path))
+
+ _, err = io.Copy(destFile, *body)
+ if err != nil {
+ return fmt.Errorf("unable to download file: %v", err)
+ }
+
+ err = destFile.Sync()
+ if err != nil {
+ return fmt.Errorf("syncing downloaded file to device: %v", err)
+ }
+
+ return nil
+}
+
+const downloadPath = "https://caddyserver.com/api/download"
+
+type pluginPackage struct {
+ Version string
+ Path string
+}
+
+func (p pluginPackage) String() string {
+ if p.Version == "" {
+ return p.Path
+ }
+ return p.Path + "@" + p.Version
+}
diff --git a/cmd/proc_posix.go b/cmd/removebinary.go
similarity index 67%
rename from cmd/proc_posix.go
rename to cmd/removebinary.go
index 9ca589f7..c74d2b2e 100644
--- a/cmd/proc_posix.go
+++ b/cmd/removebinary.go
@@ -12,26 +12,18 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-// +build !windows
+//go:build !windows
package caddycmd
import (
- "fmt"
"os"
- "path/filepath"
- "syscall"
)
-func gracefullyStopProcess(pid int) error {
- fmt.Print("Graceful stop... ")
- err := syscall.Kill(pid, syscall.SIGINT)
- if err != nil {
- return fmt.Errorf("kill: %v", err)
- }
- return nil
-}
-
-func getProcessName() string {
- return filepath.Base(os.Args[0])
+// removeCaddyBinary removes the Caddy binary at the given path.
+//
+// On any non-Windows OS, this simply calls os.Remove, since they should
+// probably not exhibit any issue with processes deleting themselves.
+func removeCaddyBinary(path string) error {
+ return os.Remove(path)
}
diff --git a/cmd/removebinary_windows.go b/cmd/removebinary_windows.go
new file mode 100644
index 00000000..8cc271ad
--- /dev/null
+++ b/cmd/removebinary_windows.go
@@ -0,0 +1,39 @@
+// Copyright 2015 Matthew Holt and The Caddy Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package caddycmd
+
+import (
+ "os"
+ "path/filepath"
+ "syscall"
+)
+
+// removeCaddyBinary removes the Caddy binary at the given path.
+//
+// On Windows, this uses a syscall to indirectly remove the file,
+// because otherwise we get an "Access is denied." error when trying
+// to delete the binary while Caddy is still running and performing
+// the upgrade. "cmd.exe /C" executes a command specified by the
+// following arguments, i.e. "del" which will run as a separate process,
+// which avoids the "Access is denied." error.
+func removeCaddyBinary(path string) error {
+ var sI syscall.StartupInfo
+ var pI syscall.ProcessInformation
+ argv, err := syscall.UTF16PtrFromString(filepath.Join(os.Getenv("windir"), "system32", "cmd.exe") + " /C del " + path)
+ if err != nil {
+ return err
+ }
+ return syscall.CreateProcess(nil, argv, nil, nil, true, 0, nil, nil, &sI, &pI)
+}
diff --git a/cmd/storagefuncs.go b/cmd/storagefuncs.go
new file mode 100644
index 00000000..3c421971
--- /dev/null
+++ b/cmd/storagefuncs.go
@@ -0,0 +1,231 @@
+// Copyright 2015 Matthew Holt and The Caddy Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package caddycmd
+
+import (
+ "archive/tar"
+ "context"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "io/fs"
+ "os"
+
+ "github.com/caddyserver/certmagic"
+
+ "github.com/caddyserver/caddy/v2"
+)
+
+type storVal struct {
+ StorageRaw json.RawMessage `json:"storage,omitempty" caddy:"namespace=caddy.storage inline_key=module"`
+}
+
+// determineStorage returns the top-level storage module from the given config.
+// It may return nil even if no error.
+func determineStorage(configFile string, configAdapter string) (*storVal, error) {
+ cfg, _, err := LoadConfig(configFile, configAdapter)
+ if err != nil {
+ return nil, err
+ }
+
+ // storage defaults to FileStorage if not explicitly
+ // defined in the config, so the config can be valid
+ // json but unmarshaling will fail.
+ if !json.Valid(cfg) {
+ return nil, &json.SyntaxError{}
+ }
+ var tmpStruct storVal
+ err = json.Unmarshal(cfg, &tmpStruct)
+ if err != nil {
+ // default case, ignore the error
+ var jsonError *json.SyntaxError
+ if errors.As(err, &jsonError) {
+ return nil, nil
+ }
+ return nil, err
+ }
+
+ return &tmpStruct, nil
+}
+
+func cmdImportStorage(fl Flags) (int, error) {
+ importStorageCmdConfigFlag := fl.String("config")
+ importStorageCmdImportFile := fl.String("input")
+
+ if importStorageCmdConfigFlag == "" {
+ return caddy.ExitCodeFailedStartup, errors.New("--config is required")
+ }
+ if importStorageCmdImportFile == "" {
+ return caddy.ExitCodeFailedStartup, errors.New("--input is required")
+ }
+
+ // extract storage from config if possible
+ storageCfg, err := determineStorage(importStorageCmdConfigFlag, "")
+ if err != nil {
+ return caddy.ExitCodeFailedStartup, err
+ }
+
+ // load specified storage or fallback to default
+ var stor certmagic.Storage
+ ctx, cancel := caddy.NewContext(caddy.Context{Context: context.Background()})
+ defer cancel()
+ if storageCfg != nil && storageCfg.StorageRaw != nil {
+ val, err := ctx.LoadModule(storageCfg, "StorageRaw")
+ if err != nil {
+ return caddy.ExitCodeFailedStartup, err
+ }
+ stor, err = val.(caddy.StorageConverter).CertMagicStorage()
+ if err != nil {
+ return caddy.ExitCodeFailedStartup, err
+ }
+ } else {
+ stor = caddy.DefaultStorage
+ }
+
+ // setup input
+ var f *os.File
+ if importStorageCmdImportFile == "-" {
+ f = os.Stdin
+ } else {
+ f, err = os.Open(importStorageCmdImportFile)
+ if err != nil {
+ return caddy.ExitCodeFailedStartup, fmt.Errorf("opening input file: %v", err)
+ }
+ defer f.Close()
+ }
+
+ // store each archive element
+ tr := tar.NewReader(f)
+ for {
+ hdr, err := tr.Next()
+ if err == io.EOF {
+ break
+ }
+ if err != nil {
+ return caddy.ExitCodeFailedQuit, fmt.Errorf("reading archive: %v", err)
+ }
+
+ b, err := io.ReadAll(tr)
+ if err != nil {
+ return caddy.ExitCodeFailedQuit, fmt.Errorf("reading archive: %v", err)
+ }
+
+ err = stor.Store(ctx, hdr.Name, b)
+ if err != nil {
+ return caddy.ExitCodeFailedQuit, fmt.Errorf("reading archive: %v", err)
+ }
+ }
+
+ fmt.Println("Successfully imported storage")
+ return caddy.ExitCodeSuccess, nil
+}
+
+func cmdExportStorage(fl Flags) (int, error) {
+ exportStorageCmdConfigFlag := fl.String("config")
+ exportStorageCmdOutputFlag := fl.String("output")
+
+ if exportStorageCmdConfigFlag == "" {
+ return caddy.ExitCodeFailedStartup, errors.New("--config is required")
+ }
+ if exportStorageCmdOutputFlag == "" {
+ return caddy.ExitCodeFailedStartup, errors.New("--output is required")
+ }
+
+ // extract storage from config if possible
+ storageCfg, err := determineStorage(exportStorageCmdConfigFlag, "")
+ if err != nil {
+ return caddy.ExitCodeFailedStartup, err
+ }
+
+ // load specified storage or fallback to default
+ var stor certmagic.Storage
+ ctx, cancel := caddy.NewContext(caddy.Context{Context: context.Background()})
+ defer cancel()
+ if storageCfg != nil && storageCfg.StorageRaw != nil {
+ val, err := ctx.LoadModule(storageCfg, "StorageRaw")
+ if err != nil {
+ return caddy.ExitCodeFailedStartup, err
+ }
+ stor, err = val.(caddy.StorageConverter).CertMagicStorage()
+ if err != nil {
+ return caddy.ExitCodeFailedStartup, err
+ }
+ } else {
+ stor = caddy.DefaultStorage
+ }
+
+ // enumerate all keys
+ keys, err := stor.List(ctx, "", true)
+ if err != nil {
+ return caddy.ExitCodeFailedStartup, err
+ }
+
+ // setup output
+ var f *os.File
+ if exportStorageCmdOutputFlag == "-" {
+ f = os.Stdout
+ } else {
+ f, err = os.Create(exportStorageCmdOutputFlag)
+ if err != nil {
+ return caddy.ExitCodeFailedStartup, fmt.Errorf("opening output file: %v", err)
+ }
+ defer f.Close()
+ }
+
+ // `IsTerminal: true` keys hold the values we
+ // care about, write them out
+ tw := tar.NewWriter(f)
+ for _, k := range keys {
+ info, err := stor.Stat(ctx, k)
+ if err != nil {
+ if errors.Is(err, fs.ErrNotExist) {
+ caddy.Log().Warn(fmt.Sprintf("key: %s removed while export is in-progress", k))
+ continue
+ }
+ return caddy.ExitCodeFailedQuit, err
+ }
+
+ if info.IsTerminal {
+ v, err := stor.Load(ctx, k)
+ if err != nil {
+ if errors.Is(err, fs.ErrNotExist) {
+ caddy.Log().Warn(fmt.Sprintf("key: %s removed while export is in-progress", k))
+ continue
+ }
+ return caddy.ExitCodeFailedQuit, err
+ }
+
+ hdr := &tar.Header{
+ Name: k,
+ Mode: 0o600,
+ Size: int64(len(v)),
+ ModTime: info.Modified,
+ }
+
+ if err = tw.WriteHeader(hdr); err != nil {
+ return caddy.ExitCodeFailedQuit, fmt.Errorf("writing archive: %v", err)
+ }
+ if _, err = tw.Write(v); err != nil {
+ return caddy.ExitCodeFailedQuit, fmt.Errorf("writing archive: %v", err)
+ }
+ }
+ }
+ if err = tw.Close(); err != nil {
+ return caddy.ExitCodeFailedQuit, fmt.Errorf("writing archive: %v", err)
+ }
+
+ return caddy.ExitCodeSuccess, nil
+}
diff --git a/cmd/x509rootsfallback.go b/cmd/x509rootsfallback.go
new file mode 100644
index 00000000..935a48ec
--- /dev/null
+++ b/cmd/x509rootsfallback.go
@@ -0,0 +1,33 @@
+// Copyright 2015 Matthew Holt and The Caddy Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package caddycmd
+
+import (
+ // For running in minimal environments, this can ease
+ // headaches related to establishing TLS connections.
+ // "Package fallback embeds a set of fallback X.509 trusted
+ // roots in the application by automatically invoking
+ // x509.SetFallbackRoots. This allows the application to
+ // work correctly even if the operating system does not
+ // provide a verifier or system roots pool. ... It's
+ // recommended that only binaries, and not libraries,
+ // import this package. This package must be kept up to
+ // date for security and compatibility reasons."
+ //
+ // This is in its own file only because of conflicts
+ // between gci and goimports when in main.go.
+ // See https://github.com/daixiang0/gci/issues/76
+ _ "golang.org/x/crypto/x509roots/fallback"
+)
diff --git a/context.go b/context.go
index 5c8df51b..d4d7afac 100644
--- a/context.go
+++ b/context.go
@@ -19,10 +19,16 @@ import (
"encoding/json"
"fmt"
"log"
+ "log/slog"
"reflect"
"github.com/caddyserver/certmagic"
+ "github.com/prometheus/client_golang/prometheus"
+ "github.com/prometheus/client_golang/prometheus/collectors"
"go.uber.org/zap"
+ "go.uber.org/zap/exp/zapslog"
+
+ "github.com/caddyserver/caddy/v2/internal/filesystems"
)
// Context is a type which defines the lifetime of modules that
@@ -37,9 +43,13 @@ import (
// not actually need to do this).
type Context struct {
context.Context
- moduleInstances map[string][]interface{}
+
+ moduleInstances map[string][]Module
cfg *Config
- cleanupFuncs []func()
+ ancestry []Module
+ cleanupFuncs []func() // invoked at every config unload
+ exitFuncs []func(context.Context) // invoked at config unload ONLY IF the process is exiting (EXPERIMENTAL)
+ metricsRegistry *prometheus.Registry
}
// NewContext provides a new context derived from the given
@@ -51,7 +61,7 @@ type Context struct {
// modules which are loaded will be properly unloaded.
// See standard library context package's documentation.
func NewContext(ctx Context) (Context, context.CancelFunc) {
- newCtx := Context{moduleInstances: make(map[string][]interface{}), cfg: ctx.cfg}
+ newCtx := Context{moduleInstances: make(map[string][]Module), cfg: ctx.cfg, metricsRegistry: prometheus.NewPedanticRegistry()}
c, cancel := context.WithCancel(ctx.Context)
wrappedCancel := func() {
cancel()
@@ -72,6 +82,7 @@ func NewContext(ctx Context) (Context, context.CancelFunc) {
}
}
newCtx.Context = c
+ newCtx.initMetrics()
return newCtx, wrappedCancel
}
@@ -80,6 +91,43 @@ func (ctx *Context) OnCancel(f func()) {
ctx.cleanupFuncs = append(ctx.cleanupFuncs, f)
}
+// Filesystems returns a ref to the FilesystemMap.
+// EXPERIMENTAL: This API is subject to change.
+func (ctx *Context) Filesystems() FileSystems {
+ // if no config is loaded, we use a default filesystemmap, which includes the osfs
+ if ctx.cfg == nil {
+ return &filesystems.FilesystemMap{}
+ }
+ return ctx.cfg.filesystems
+}
+
+// Returns the active metrics registry for the context
+// EXPERIMENTAL: This API is subject to change.
+func (ctx *Context) GetMetricsRegistry() *prometheus.Registry {
+ return ctx.metricsRegistry
+}
+
+func (ctx *Context) initMetrics() {
+ ctx.metricsRegistry.MustRegister(
+ collectors.NewBuildInfoCollector(),
+ collectors.NewProcessCollector(collectors.ProcessCollectorOpts{}),
+ collectors.NewGoCollector(),
+ adminMetrics.requestCount,
+ adminMetrics.requestErrors,
+ globalMetrics.configSuccess,
+ globalMetrics.configSuccessTime,
+ )
+}
+
+// OnExit executes f when the process exits gracefully.
+// The function is only executed if the process is gracefully
+// shut down while this context is active.
+//
+// EXPERIMENTAL API: subject to change or removal.
+func (ctx *Context) OnExit(f func(context.Context)) {
+ ctx.exitFuncs = append(ctx.exitFuncs, f)
+}
+
// LoadModule loads the Caddy module(s) from the specified field of the parent struct
// pointer and returns the loaded module(s). The struct pointer and its field name as
// a string are necessary so that reflection can be used to read the struct tag on the
@@ -90,14 +138,15 @@ func (ctx *Context) OnCancel(f func()) {
// ModuleMap may be used in place of map[string]json.RawMessage. The return value's
// underlying type mirrors the input field's type:
//
-// json.RawMessage => interface{}
-// []json.RawMessage => []interface{}
-// map[string]json.RawMessage => map[string]interface{}
-// []map[string]json.RawMessage => []map[string]interface{}
+// json.RawMessage => any
+// []json.RawMessage => []any
+// [][]json.RawMessage => [][]any
+// map[string]json.RawMessage => map[string]any
+// []map[string]json.RawMessage => []map[string]any
//
// The field must have a "caddy" struct tag in this format:
//
-// caddy:"key1=val1 key2=val2"
+// caddy:"key1=val1 key2=val2"
//
// To load modules, a "namespace" key is required. For example, to load modules
// in the "http.handlers" namespace, you'd put: `namespace=http.handlers` in the
@@ -114,20 +163,20 @@ func (ctx *Context) OnCancel(f func()) {
// meaning the key containing the module's name that is defined inline with the module
// itself. You must specify the inline key in a struct tag, along with the namespace:
//
-// caddy:"namespace=http.handlers inline_key=handler"
+// caddy:"namespace=http.handlers inline_key=handler"
//
// This will look for a key/value pair like `"handler": "..."` in the json.RawMessage
// in order to know the module name.
//
// To make use of the loaded module(s) (the return value), you will probably want
-// to type-assert each interface{} value(s) to the types that are useful to you
+// to type-assert each 'any' value(s) to the types that are useful to you
// and store them on the same struct. Storing them on the same struct makes for
// easy garbage collection when your host module is no longer needed.
//
// Loaded modules have already been provisioned and validated. Upon returning
// successfully, this method clears the json.RawMessage(s) in the field since
// the raw JSON is no longer needed, and this allows the GC to free up memory.
-func (ctx Context) LoadModule(structPointer interface{}, fieldName string) (interface{}, error) {
+func (ctx Context) LoadModule(structPointer any, fieldName string) (any, error) {
val := reflect.ValueOf(structPointer).Elem().FieldByName(fieldName)
typ := val.Type()
@@ -147,7 +196,7 @@ func (ctx Context) LoadModule(structPointer interface{}, fieldName string) (inte
}
inlineModuleKey := opts["inline_key"]
- var result interface{}
+ var result any
switch val.Kind() {
case reflect.Slice:
@@ -162,14 +211,13 @@ func (ctx Context) LoadModule(structPointer interface{}, fieldName string) (inte
return nil, err
}
result = val
-
} else if isJSONRawMessage(typ.Elem()) {
// val is `[]json.RawMessage`
if inlineModuleKey == "" {
panic("unable to determine module name without inline_key because type is not a ModuleMap")
}
- var all []interface{}
+ var all []any
for i := 0; i < val.Len(); i++ {
val, err := ctx.loadModuleInline(inlineModuleKey, moduleNamespace, val.Index(i).Interface().(json.RawMessage))
if err != nil {
@@ -178,11 +226,30 @@ func (ctx Context) LoadModule(structPointer interface{}, fieldName string) (inte
all = append(all, val)
}
result = all
+ } else if typ.Elem().Kind() == reflect.Slice && isJSONRawMessage(typ.Elem().Elem()) {
+ // val is `[][]json.RawMessage`
+ if inlineModuleKey == "" {
+ panic("unable to determine module name without inline_key because type is not a ModuleMap")
+ }
+ var all [][]any
+ for i := 0; i < val.Len(); i++ {
+ innerVal := val.Index(i)
+ var allInner []any
+ for j := 0; j < innerVal.Len(); j++ {
+ innerInnerVal, err := ctx.loadModuleInline(inlineModuleKey, moduleNamespace, innerVal.Index(j).Interface().(json.RawMessage))
+ if err != nil {
+ return nil, fmt.Errorf("position %d: %v", j, err)
+ }
+ allInner = append(allInner, innerInnerVal)
+ }
+ all = append(all, allInner)
+ }
+ result = all
} else if isModuleMapType(typ.Elem()) {
// val is `[]map[string]json.RawMessage`
- var all []map[string]interface{}
+ var all []map[string]any
for i := 0; i < val.Len(); i++ {
thisSet, err := ctx.loadModulesFromSomeMap(moduleNamespace, inlineModuleKey, val.Index(i))
if err != nil {
@@ -210,10 +277,10 @@ func (ctx Context) LoadModule(structPointer interface{}, fieldName string) (inte
return result, nil
}
-// loadModulesFromSomeMap loads modules from val, which must be a type of map[string]interface{}.
+// loadModulesFromSomeMap loads modules from val, which must be a type of map[string]any.
// Depending on inlineModuleKey, it will be interpreted as either a ModuleMap (key is the module
// name) or as a regular map (key is not the module name, and module name is defined inline).
-func (ctx Context) loadModulesFromSomeMap(namespace, inlineModuleKey string, val reflect.Value) (map[string]interface{}, error) {
+func (ctx Context) loadModulesFromSomeMap(namespace, inlineModuleKey string, val reflect.Value) (map[string]any, error) {
// if no inline_key is specified, then val must be a ModuleMap,
// where the key is the module name
if inlineModuleKey == "" {
@@ -231,8 +298,8 @@ func (ctx Context) loadModulesFromSomeMap(namespace, inlineModuleKey string, val
// loadModulesFromRegularMap loads modules from val, where val is a map[string]json.RawMessage.
// Map keys are NOT interpreted as module names, so module names are still expected to appear
// inline with the objects.
-func (ctx Context) loadModulesFromRegularMap(namespace, inlineModuleKey string, val reflect.Value) (map[string]interface{}, error) {
- mods := make(map[string]interface{})
+func (ctx Context) loadModulesFromRegularMap(namespace, inlineModuleKey string, val reflect.Value) (map[string]any, error) {
+ mods := make(map[string]any)
iter := val.MapRange()
for iter.Next() {
k := iter.Key()
@@ -246,10 +313,10 @@ func (ctx Context) loadModulesFromRegularMap(namespace, inlineModuleKey string,
return mods, nil
}
-// loadModuleMap loads modules from a ModuleMap, i.e. map[string]interface{}, where the key is the
+// loadModuleMap loads modules from a ModuleMap, i.e. map[string]any, where the key is the
// module name. With a module map, module names do not need to be defined inline with their values.
-func (ctx Context) loadModuleMap(namespace string, val reflect.Value) (map[string]interface{}, error) {
- all := make(map[string]interface{})
+func (ctx Context) loadModuleMap(namespace string, val reflect.Value) (map[string]any, error) {
+ all := make(map[string]any)
iter := val.MapRange()
for iter.Next() {
k := iter.Key().Interface().(string)
@@ -277,19 +344,19 @@ func (ctx Context) loadModuleMap(namespace string, val reflect.Value) (map[strin
// directly by most modules. However, this method is useful when
// dynamically loading/unloading modules in their own context,
// like from embedded scripts, etc.
-func (ctx Context) LoadModuleByID(id string, rawMsg json.RawMessage) (interface{}, error) {
+func (ctx Context) LoadModuleByID(id string, rawMsg json.RawMessage) (any, error) {
modulesMu.RLock()
- mod, ok := modules[id]
+ modInfo, ok := modules[id]
modulesMu.RUnlock()
if !ok {
return nil, fmt.Errorf("unknown module: %s", id)
}
- if mod.New == nil {
- return nil, fmt.Errorf("module '%s' has no constructor", mod.ID)
+ if modInfo.New == nil {
+ return nil, fmt.Errorf("module '%s' has no constructor", modInfo.ID)
}
- val := mod.New().(interface{})
+ val := modInfo.New()
// value must be a pointer for unmarshaling into concrete type, even if
// the module's concrete type is a slice or map; New() *should* return
@@ -303,9 +370,9 @@ func (ctx Context) LoadModuleByID(id string, rawMsg json.RawMessage) (interface{
// fill in its config only if there is a config to fill in
if len(rawMsg) > 0 {
- err := strictUnmarshalJSON(rawMsg, &val)
+ err := StrictUnmarshalJSON(rawMsg, &val)
if err != nil {
- return nil, fmt.Errorf("decoding module config: %s: %v", mod, err)
+ return nil, fmt.Errorf("decoding module config: %s: %v", modInfo, err)
}
}
@@ -318,6 +385,8 @@ func (ctx Context) LoadModuleByID(id string, rawMsg json.RawMessage) (interface{
return nil, fmt.Errorf("module value cannot be null")
}
+ ctx.ancestry = append(ctx.ancestry, val)
+
if prov, ok := val.(Provisioner); ok {
err := prov.Provision(ctx)
if err != nil {
@@ -329,7 +398,7 @@ func (ctx Context) LoadModuleByID(id string, rawMsg json.RawMessage) (interface{
err = fmt.Errorf("%v; additionally, cleanup: %v", err, err2)
}
}
- return nil, fmt.Errorf("provision %s: %v", mod, err)
+ return nil, fmt.Errorf("provision %s: %v", modInfo, err)
}
}
@@ -343,7 +412,7 @@ func (ctx Context) LoadModuleByID(id string, rawMsg json.RawMessage) (interface{
err = fmt.Errorf("%v; additionally, cleanup: %v", err, err2)
}
}
- return nil, fmt.Errorf("%s: invalid configuration: %v", mod, err)
+ return nil, fmt.Errorf("%s: invalid configuration: %v", modInfo, err)
}
}
@@ -353,7 +422,7 @@ func (ctx Context) LoadModuleByID(id string, rawMsg json.RawMessage) (interface{
}
// loadModuleInline loads a module from a JSON raw message which decodes to
-// a map[string]interface{}, where one of the object keys is moduleNameKey
+// a map[string]any, where one of the object keys is moduleNameKey
// and the corresponding value is the module name (as a string) which can
// be found in the given scope. In other words, the module name is declared
// in-line with the module itself.
@@ -363,7 +432,7 @@ func (ctx Context) LoadModuleByID(id string, rawMsg json.RawMessage) (interface{
// multiple instances in the map or it appears in an array (where there are
// no custom keys). In other words, the key containing the module name is
// treated special/separate from all the other keys in the object.
-func (ctx Context) loadModuleInline(moduleNameKey, moduleScope string, raw json.RawMessage) (interface{}, error) {
+func (ctx Context) loadModuleInline(moduleNameKey, moduleScope string, raw json.RawMessage) (any, error) {
moduleName, raw, err := getModuleNameInline(moduleNameKey, raw)
if err != nil {
return nil, err
@@ -385,7 +454,12 @@ func (ctx Context) loadModuleInline(moduleNameKey, moduleScope string, raw json.
// called during the Provision/Validate phase to reference a
// module's own host app (since the parent app module is still
// in the process of being provisioned, it is not yet ready).
-func (ctx Context) App(name string) (interface{}, error) {
+//
+// We return any type instead of the App type because it is NOT
+// intended for the caller of this method to be the one to start
+// or stop App modules. The caller is expected to assert to the
+// concrete type.
+func (ctx Context) App(name string) (any, error) {
if app, ok := ctx.cfg.apps[name]; ok {
return app, nil
}
@@ -401,12 +475,118 @@ func (ctx Context) App(name string) (interface{}, error) {
return modVal, nil
}
+// AppIfConfigured is like App, but it returns an error if the
+// app has not been configured. This is useful when the app is
+// required and its absence is a configuration error; or when
+// the app is optional and you don't want to instantiate a
+// new one that hasn't been explicitly configured. If the app
+// is not in the configuration, the error wraps ErrNotConfigured.
+func (ctx Context) AppIfConfigured(name string) (any, error) {
+ if ctx.cfg == nil {
+ return nil, fmt.Errorf("app module %s: %w", name, ErrNotConfigured)
+ }
+ if app, ok := ctx.cfg.apps[name]; ok {
+ return app, nil
+ }
+ appRaw := ctx.cfg.AppsRaw[name]
+ if appRaw == nil {
+ return nil, fmt.Errorf("app module %s: %w", name, ErrNotConfigured)
+ }
+ return ctx.App(name)
+}
+
+// ErrNotConfigured indicates a module is not configured.
+var ErrNotConfigured = fmt.Errorf("module not configured")
+
// Storage returns the configured Caddy storage implementation.
func (ctx Context) Storage() certmagic.Storage {
return ctx.cfg.storage
}
-// Logger returns a logger that can be used by mod.
-func (ctx Context) Logger(mod Module) *zap.Logger {
+// Logger returns a logger that is intended for use by the most
+// recent module associated with the context. Callers should not
+// pass in any arguments unless they want to associate with a
+// different module; it panics if more than 1 value is passed in.
+//
+// Originally, this method's signature was `Logger(mod Module)`,
+// requiring that an instance of a Caddy module be passed in.
+// However, that is no longer necessary, as the closest module
+// most recently associated with the context will be automatically
+// assumed. To prevent a sudden breaking change, this method's
+// signature has been changed to be variadic, but we may remove
+// the parameter altogether in the future. Callers should not
+// pass in any argument. If there is valid need to specify a
+// different module, please open an issue to discuss.
+//
+// PARTIALLY DEPRECATED: The Logger(module) form is deprecated and
+// may be removed in the future. Do not pass in any arguments.
+func (ctx Context) Logger(module ...Module) *zap.Logger {
+ if len(module) > 1 {
+ panic("more than 1 module passed in")
+ }
+ if ctx.cfg == nil {
+ // often the case in tests; just use a dev logger
+ l, err := zap.NewDevelopment()
+ if err != nil {
+ panic("config missing, unable to create dev logger: " + err.Error())
+ }
+ return l
+ }
+ mod := ctx.Module()
+ if len(module) > 0 {
+ mod = module[0]
+ }
+ if mod == nil {
+ return Log()
+ }
return ctx.cfg.Logging.Logger(mod)
}
+
+// Slogger returns a slog logger that is intended for use by
+// the most recent module associated with the context.
+func (ctx Context) Slogger() *slog.Logger {
+ if ctx.cfg == nil {
+ // often the case in tests; just use a dev logger
+ l, err := zap.NewDevelopment()
+ if err != nil {
+ panic("config missing, unable to create dev logger: " + err.Error())
+ }
+ return slog.New(zapslog.NewHandler(l.Core(), nil))
+ }
+ mod := ctx.Module()
+ if mod == nil {
+ return slog.New(zapslog.NewHandler(Log().Core(), nil))
+ }
+ return slog.New(zapslog.NewHandler(ctx.cfg.Logging.Logger(mod).Core(),
+ zapslog.WithName(string(mod.CaddyModule().ID)),
+ ))
+}
+
+// Modules returns the lineage of modules that this context provisioned,
+// with the most recent/current module being last in the list.
+func (ctx Context) Modules() []Module {
+ mods := make([]Module, len(ctx.ancestry))
+ copy(mods, ctx.ancestry)
+ return mods
+}
+
+// Module returns the current module, or the most recent one
+// provisioned by the context.
+func (ctx Context) Module() Module {
+ if len(ctx.ancestry) == 0 {
+ return nil
+ }
+ return ctx.ancestry[len(ctx.ancestry)-1]
+}
+
+// WithValue returns a new context with the given key-value pair.
+func (ctx *Context) WithValue(key, value any) Context {
+ return Context{
+ Context: context.WithValue(ctx.Context, key, value),
+ moduleInstances: ctx.moduleInstances,
+ cfg: ctx.cfg,
+ ancestry: ctx.ancestry,
+ cleanupFuncs: ctx.cleanupFuncs,
+ exitFuncs: ctx.exitFuncs,
+ }
+}
diff --git a/context_test.go b/context_test.go
index afa10dbd..27395612 100644
--- a/context_test.go
+++ b/context_test.go
@@ -71,13 +71,13 @@ func ExampleContext_LoadModule_array() {
},
}
- // since our input is []json.RawMessage, the output will be []interface{}
+ // since our input is []json.RawMessage, the output will be []any
mods, err := ctx.LoadModule(myStruct, "GuestModulesRaw")
if err != nil {
// you'd want to actually handle the error here
// return fmt.Errorf("loading guest modules: %v", err)
}
- for _, mod := range mods.([]interface{}) {
+ for _, mod := range mods.([]any) {
myStruct.guestModules = append(myStruct.guestModules, mod.(io.Writer))
}
@@ -104,13 +104,13 @@ func ExampleContext_LoadModule_map() {
},
}
- // since our input is map[string]json.RawMessage, the output will be map[string]interface{}
+ // since our input is map[string]json.RawMessage, the output will be map[string]any
mods, err := ctx.LoadModule(myStruct, "GuestModulesRaw")
if err != nil {
// you'd want to actually handle the error here
// return fmt.Errorf("loading guest modules: %v", err)
}
- for modName, mod := range mods.(map[string]interface{}) {
+ for modName, mod := range mods.(map[string]any) {
myStruct.guestModules[modName] = mod.(io.Writer)
}
diff --git a/duration_fuzz.go b/duration_fuzz.go
new file mode 100644
index 00000000..8a1f0c7c
--- /dev/null
+++ b/duration_fuzz.go
@@ -0,0 +1,25 @@
+// Copyright 2015 Matthew Holt and The Caddy Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build gofuzz
+
+package caddy
+
+func FuzzParseDuration(data []byte) int {
+ _, err := ParseDuration(string(data))
+ if err != nil {
+ return 0
+ }
+ return 1
+}
diff --git a/cmd/proc_windows.go b/filepath.go
similarity index 50%
rename from cmd/proc_windows.go
rename to filepath.go
index 4a62c272..aad90779 100644
--- a/cmd/proc_windows.go
+++ b/filepath.go
@@ -12,33 +12,28 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-package caddycmd
+//go:build !windows
+
+package caddy
import (
- "fmt"
"os"
- "os/exec"
"path/filepath"
- "strconv"
)
-func gracefullyStopProcess(pid int) error {
- fmt.Print("Forceful stop... ")
- // process on windows will not stop unless forced with /f
- cmd := exec.Command("taskkill", "/pid", strconv.Itoa(pid), "/f")
- if err := cmd.Run(); err != nil {
- return fmt.Errorf("taskkill: %v", err)
+// FastAbs is an optimized version of filepath.Abs for Unix systems,
+// since we don't expect the working directory to ever change once
+// Caddy is running. Avoid the os.Getwd() syscall overhead.
+// It's overall the same as stdlib's implementation, the difference
+// being cached working directory.
+func FastAbs(path string) (string, error) {
+ if filepath.IsAbs(path) {
+ return filepath.Clean(path), nil
}
- return nil
+ if wderr != nil {
+ return "", wderr
+ }
+ return filepath.Join(wd, path), nil
}
-// On Windows the app name passed in os.Args[0] will match how
-// caddy was started eg will match caddy or caddy.exe.
-// So return appname with .exe for consistency
-func getProcessName() string {
- base := filepath.Base(os.Args[0])
- if filepath.Ext(base) == "" {
- return base + ".exe"
- }
- return base
-}
+var wd, wderr = os.Getwd()
diff --git a/filepath_windows.go b/filepath_windows.go
new file mode 100644
index 00000000..aa70955e
--- /dev/null
+++ b/filepath_windows.go
@@ -0,0 +1,27 @@
+// Copyright 2015 Matthew Holt and The Caddy Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package caddy
+
+import (
+ "path/filepath"
+)
+
+// FastAbs can't be optimized on Windows because there
+// are special file paths that require the use of syscall.FullPath
+// to handle correctly.
+// Just call stdlib's implementation which uses that function.
+func FastAbs(path string) (string, error) {
+ return filepath.Abs(path)
+}
diff --git a/filesystem.go b/filesystem.go
new file mode 100644
index 00000000..d6679e90
--- /dev/null
+++ b/filesystem.go
@@ -0,0 +1,24 @@
+// Copyright 2015 Matthew Holt and The Caddy Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package caddy
+
+import "io/fs"
+
+type FileSystems interface {
+ Register(k string, v fs.FS)
+ Unregister(k string)
+ Get(k string) (v fs.FS, ok bool)
+ Default() fs.FS
+}
diff --git a/go.mod b/go.mod
index f7d7af5f..87b0e434 100644
--- a/go.mod
+++ b/go.mod
@@ -1,38 +1,157 @@
module github.com/caddyserver/caddy/v2
-go 1.14
+go 1.22.3
+
+toolchain go1.23.0
require (
- github.com/Masterminds/sprig/v3 v3.0.2
- github.com/alecthomas/chroma v0.7.2
- github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a
- github.com/caddyserver/certmagic v0.10.12
- github.com/cenkalti/backoff/v4 v4.0.2 // indirect
- github.com/dustin/go-humanize v1.0.1-0.20200219035652-afde56e7acac
- github.com/go-acme/lego/v3 v3.5.0
- github.com/gogo/protobuf v1.3.1
- github.com/google/cel-go v0.4.1
- github.com/imdario/mergo v0.3.9 // indirect
- github.com/jsternberg/zap-logfmt v1.2.0
- github.com/klauspost/compress v1.10.4
- github.com/klauspost/cpuid v1.2.3
- github.com/lucas-clemente/quic-go v0.15.3
- github.com/manifoldco/promptui v0.7.0 // indirect
- github.com/miekg/dns v1.1.29 // indirect
- github.com/naoina/go-stringutil v0.1.0 // indirect
- github.com/naoina/toml v0.1.1
- github.com/smallstep/certificates v0.14.2
- github.com/smallstep/cli v0.14.2
- github.com/smallstep/truststore v0.9.5
- github.com/vulcand/oxy v1.1.0
- github.com/yuin/goldmark v1.1.28
- github.com/yuin/goldmark-highlighting v0.0.0-20200307114337-60d527fdb691
- go.uber.org/zap v1.14.1
- golang.org/x/crypto v0.0.0-20200414155820-4f8f47aa7992
- golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e
- golang.org/x/sys v0.0.0-20200413165638-669c56c373c4 // indirect
- google.golang.org/genproto v0.0.0-20200413115906-b5235f65be36
- gopkg.in/natefinch/lumberjack.v2 v2.0.0
- gopkg.in/square/go-jose.v2 v2.5.0 // indirect
- gopkg.in/yaml.v2 v2.2.8
+ github.com/BurntSushi/toml v1.4.0
+ github.com/Masterminds/sprig/v3 v3.3.0
+ github.com/alecthomas/chroma/v2 v2.14.0
+ github.com/aryann/difflib v0.0.0-20210328193216-ff5ff6dc229b
+ github.com/caddyserver/certmagic v0.21.5-0.20241105180249-4293198e094d
+ github.com/caddyserver/zerossl v0.1.3
+ github.com/dustin/go-humanize v1.0.1
+ github.com/go-chi/chi/v5 v5.0.12
+ github.com/google/cel-go v0.21.0
+ github.com/google/uuid v1.6.0
+ github.com/klauspost/compress v1.17.11
+ github.com/klauspost/cpuid/v2 v2.2.8
+ github.com/mholt/acmez/v2 v2.0.3
+ github.com/prometheus/client_golang v1.19.1
+ github.com/quic-go/quic-go v0.48.2
+ github.com/smallstep/certificates v0.26.1
+ github.com/smallstep/nosql v0.6.1
+ github.com/smallstep/truststore v0.13.0
+ github.com/spf13/cobra v1.8.1
+ github.com/spf13/pflag v1.0.5
+ github.com/stretchr/testify v1.9.0
+ github.com/tailscale/tscert v0.0.0-20240608151842-d3f834017e53
+ github.com/yuin/goldmark v1.7.8
+ github.com/yuin/goldmark-highlighting/v2 v2.0.0-20230729083705-37449abec8cc
+ go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.56.0
+ go.opentelemetry.io/contrib/propagators/autoprop v0.42.0
+ go.opentelemetry.io/otel v1.31.0
+ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.31.0
+ go.opentelemetry.io/otel/sdk v1.31.0
+ go.uber.org/automaxprocs v1.6.0
+ go.uber.org/zap v1.27.0
+ go.uber.org/zap/exp v0.3.0
+ golang.org/x/crypto v0.30.0
+ golang.org/x/crypto/x509roots/fallback v0.0.0-20241104001025-71ed71b4faf9
+ golang.org/x/net v0.32.0
+ golang.org/x/sync v0.10.0
+ golang.org/x/term v0.27.0
+ golang.org/x/time v0.7.0
+ gopkg.in/natefinch/lumberjack.v2 v2.2.1
+ gopkg.in/yaml.v3 v3.0.1
+)
+
+require (
+ dario.cat/mergo v1.0.1 // indirect
+ github.com/Microsoft/go-winio v0.6.0 // indirect
+ github.com/antlr4-go/antlr/v4 v4.13.0 // indirect
+ github.com/davecgh/go-spew v1.1.1 // indirect
+ github.com/francoispqt/gojay v1.2.13 // indirect
+ github.com/fxamacker/cbor/v2 v2.6.0 // indirect
+ github.com/go-jose/go-jose/v3 v3.0.3 // indirect
+ github.com/go-kit/log v0.2.1 // indirect
+ github.com/golang/glog v1.2.2 // indirect
+ github.com/google/certificate-transparency-go v1.1.8-0.20240110162603-74a5dd331745 // indirect
+ github.com/google/go-tpm v0.9.0 // indirect
+ github.com/google/go-tspi v0.3.0 // indirect
+ github.com/google/pprof v0.0.0-20231212022811-ec68065c825e // indirect
+ github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0 // indirect
+ github.com/onsi/ginkgo/v2 v2.13.2 // indirect
+ github.com/pmezard/go-difflib v1.0.0 // indirect
+ github.com/quic-go/qpack v0.5.1 // indirect
+ github.com/smallstep/go-attestation v0.4.4-0.20240109183208-413678f90935 // indirect
+ github.com/smallstep/pkcs7 v0.0.0-20231024181729-3b98ecc1ca81 // indirect
+ github.com/smallstep/scep v0.0.0-20231024192529-aee96d7ad34d // indirect
+ github.com/x448/float16 v0.8.4 // indirect
+ github.com/zeebo/blake3 v0.2.4 // indirect
+ go.opentelemetry.io/contrib/propagators/aws v1.17.0 // indirect
+ go.opentelemetry.io/contrib/propagators/b3 v1.17.0 // indirect
+ go.opentelemetry.io/contrib/propagators/jaeger v1.17.0 // indirect
+ go.opentelemetry.io/contrib/propagators/ot v1.17.0 // indirect
+ go.uber.org/mock v0.4.0 // indirect
+ golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 // indirect
+ google.golang.org/genproto/googleapis/api v0.0.0-20241007155032-5fefd90f89a9 // indirect
+ google.golang.org/genproto/googleapis/rpc v0.0.0-20241007155032-5fefd90f89a9 // indirect
+)
+
+require (
+ filippo.io/edwards25519 v1.1.0 // indirect
+ github.com/AndreasBriese/bbloom v0.0.0-20190825152654-46b345b51c96 // indirect
+ github.com/Masterminds/goutils v1.1.1 // indirect
+ github.com/Masterminds/semver/v3 v3.3.0 // indirect
+ github.com/beorn7/perks v1.0.1 // indirect
+ github.com/cenkalti/backoff/v4 v4.3.0 // indirect
+ github.com/cespare/xxhash v1.1.0 // indirect
+ github.com/cespare/xxhash/v2 v2.3.0
+ github.com/chzyer/readline v1.5.1 // indirect
+ github.com/cpuguy83/go-md2man/v2 v2.0.4 // indirect
+ github.com/dgraph-io/badger v1.6.2 // indirect
+ github.com/dgraph-io/badger/v2 v2.2007.4 // indirect
+ github.com/dgraph-io/ristretto v0.1.0 // indirect
+ github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 // indirect
+ github.com/dlclark/regexp2 v1.11.0 // indirect
+ github.com/felixge/httpsnoop v1.0.4 // indirect
+ github.com/go-kit/kit v0.13.0 // indirect
+ github.com/go-logfmt/logfmt v0.6.0 // indirect
+ github.com/go-logr/logr v1.4.2 // indirect
+ github.com/go-logr/stdr v1.2.2 // indirect
+ github.com/go-sql-driver/mysql v1.7.1 // indirect
+ github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect
+ github.com/golang/protobuf v1.5.4 // indirect
+ github.com/golang/snappy v0.0.4 // indirect
+ github.com/huandu/xstrings v1.5.0 // indirect
+ github.com/inconshreveable/mousetrap v1.1.0 // indirect
+ github.com/jackc/chunkreader/v2 v2.0.1 // indirect
+ github.com/jackc/pgconn v1.14.3 // indirect
+ github.com/jackc/pgio v1.0.0 // indirect
+ github.com/jackc/pgpassfile v1.0.0 // indirect
+ github.com/jackc/pgproto3/v2 v2.3.3 // indirect
+ github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a // indirect
+ github.com/jackc/pgtype v1.14.0 // indirect
+ github.com/jackc/pgx/v4 v4.18.3 // indirect
+ github.com/libdns/libdns v0.2.2
+ github.com/manifoldco/promptui v0.9.0 // indirect
+ github.com/mattn/go-colorable v0.1.13 // indirect
+ github.com/mattn/go-isatty v0.0.20 // indirect
+ github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d // indirect
+ github.com/miekg/dns v1.1.62 // indirect
+ github.com/mitchellh/copystructure v1.2.0 // indirect
+ github.com/mitchellh/go-ps v1.0.0 // indirect
+ github.com/mitchellh/reflectwalk v1.0.2 // indirect
+ github.com/pires/go-proxyproto v0.7.1-0.20240628150027-b718e7ce4964
+ github.com/pkg/errors v0.9.1 // indirect
+ github.com/prometheus/client_model v0.5.0 // indirect
+ github.com/prometheus/common v0.48.0 // indirect
+ github.com/prometheus/procfs v0.12.0 // indirect
+ github.com/rs/xid v1.5.0 // indirect
+ github.com/russross/blackfriday/v2 v2.1.0 // indirect
+ github.com/shopspring/decimal v1.4.0 // indirect
+ github.com/shurcooL/sanitized_anchor_name v1.0.0 // indirect
+ github.com/sirupsen/logrus v1.9.3 // indirect
+ github.com/slackhq/nebula v1.6.1 // indirect
+ github.com/spf13/cast v1.7.0 // indirect
+ github.com/stoewer/go-strcase v1.2.0 // indirect
+ github.com/urfave/cli v1.22.14 // indirect
+ go.etcd.io/bbolt v1.3.9 // indirect
+ go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.31.0 // indirect
+ go.opentelemetry.io/otel/metric v1.31.0 // indirect
+ go.opentelemetry.io/otel/trace v1.31.0
+ go.opentelemetry.io/proto/otlp v1.3.1 // indirect
+ go.step.sm/cli-utils v0.9.0 // indirect
+ go.step.sm/crypto v0.45.0
+ go.step.sm/linkedca v0.20.1 // indirect
+ go.uber.org/multierr v1.11.0 // indirect
+ golang.org/x/mod v0.18.0 // indirect
+ golang.org/x/sys v0.28.0
+ golang.org/x/text v0.21.0 // indirect
+ golang.org/x/tools v0.22.0 // indirect
+ google.golang.org/grpc v1.67.1 // indirect
+ google.golang.org/protobuf v1.35.1 // indirect
+ howett.net/plist v1.0.0 // indirect
)
diff --git a/go.sum b/go.sum
index bd5e30c6..98306f79 100644
--- a/go.sum
+++ b/go.sum
@@ -2,608 +2,417 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMT
cloud.google.com/go v0.31.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.37.0/go.mod h1:TS1dMSSfndXH133OKGwekG838Om/cQT0BUHV3HcBgoo=
-cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
-cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
-cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
-cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
-cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
-cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To=
-cloud.google.com/go v0.51.0 h1:PvKAVQWCtlGUSlZkGW3QLelKaWq7KYv/MW1EboG8bfM=
-cloud.google.com/go v0.51.0/go.mod h1:hWtGJ6gnXH+KgDv+V0zFGDvpi07n3z8ZNj3T1RW0Gcw=
-cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4=
-cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M=
-cloud.google.com/go v0.54.0 h1:3ithwDMr7/3vpAMXiH+ZQnYbuIsh+OPhUPMFC9enmn0=
-cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc=
-cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
-cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
-cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
-cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
-cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
-cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
-cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
-cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=
-cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
-cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos=
-cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
-contrib.go.opencensus.io/exporter/ocagent v0.4.12/go.mod h1:450APlNTSR6FrvC3CTRqYosuDstRB9un7SOx2k/9ckA=
-contrib.go.opencensus.io/exporter/stackdriver v0.12.1/go.mod h1:iwB6wGarfphGGe/e5CWqyUk/cLzKnWsOKPVW3no6OTw=
-contrib.go.opencensus.io/resource v0.1.1/go.mod h1:F361eGI91LCmW1I/Saf+rX0+OFcigGlFvXwEGEnkRLA=
+cloud.google.com/go v0.112.1 h1:uJSeirPke5UNZHIb4SxfZklVSiWWVqW4oXlETwZziwM=
+cloud.google.com/go/auth v0.4.1 h1:Z7YNIhlWRtrnKlZke7z3GMqzvuYzdc2z98F9D1NV5Hg=
+cloud.google.com/go/auth v0.4.1/go.mod h1:QVBuVEKpCn4Zp58hzRGvL0tjRGU0YqdRTdCHM1IHnro=
+cloud.google.com/go/auth/oauth2adapt v0.2.2 h1:+TTV8aXpjeChS9M+aTtN/TjdQnzJvmzKFt//oWu7HX4=
+cloud.google.com/go/auth/oauth2adapt v0.2.2/go.mod h1:wcYjgpZI9+Yu7LyYBg4pqSiaRkfEK3GQcpb7C/uyF1Q=
+cloud.google.com/go/compute v1.23.3 h1:6sVlXXBmbd7jNX0Ipq0trII3e4n1/MsADLK6a+aiVlk=
+cloud.google.com/go/compute/metadata v0.5.0 h1:Zr0eK8JbFv6+Wi4ilXAR8FJ3wyNdpxHKJNPos6LTZOY=
+cloud.google.com/go/compute/metadata v0.5.0/go.mod h1:aHnloV2TPI38yx4s9+wAZhHykWvVCfu7hQbF+9CWoiY=
+cloud.google.com/go/iam v1.1.8 h1:r7umDwhj+BQyz0ScZMp4QrGXjSTI3ZINnpgU2nlB/K0=
+cloud.google.com/go/iam v1.1.8/go.mod h1:GvE6lyMmfxXauzNq8NbgJbeVQNspG+tcdL/W8QO1+zE=
+cloud.google.com/go/kms v1.16.0 h1:1yZsRPhmargZOmY+fVAh8IKiR9HzCb0U1zsxb5g2nRY=
+cloud.google.com/go/kms v1.16.0/go.mod h1:olQUXy2Xud+1GzYfiBO9N0RhjsJk5IJLU6n/ethLXVc=
+cloud.google.com/go/longrunning v0.5.7 h1:WLbHekDbjK1fVFD3ibpFFVoyizlLRl73I7YKuAKilhU=
+cloud.google.com/go/longrunning v0.5.7/go.mod h1:8GClkudohy1Fxm3owmBGid8W0pSgodEMwEAztp38Xng=
+dario.cat/mergo v1.0.1 h1:Ra4+bf83h2ztPIQYNP99R6m+Y7KfnARDfID+a+vLl4s=
+dario.cat/mergo v1.0.1/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk=
dmitri.shuralyov.com/app/changes v0.0.0-20180602232624-0a106ad413e3/go.mod h1:Yl+fi1br7+Rr3LqpNJf1/uxUdtRUV+Tnj0o93V2B9MU=
-dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
dmitri.shuralyov.com/html/belt v0.0.0-20180602232347-f7d459c86be0/go.mod h1:JLBrvjyP0v+ecvNYvCpyZgu5/xkfAUhi6wJj28eUfSU=
dmitri.shuralyov.com/service/change v0.0.0-20181023043359-a85b471d5412/go.mod h1:a1inKt/atXimZ4Mv927x+r7UpyzRUf4emIoiiSC2TN4=
dmitri.shuralyov.com/state v0.0.0-20180228185332-28bcc343414c/go.mod h1:0PRwlb0D6DFvNNtx+9ybjezNCa8XF0xaYcETyp6rHWU=
+filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA=
+filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4=
git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg=
-github.com/AndreasBriese/bbloom v0.0.0-20190306092124-e2d15f34fcf9 h1:HD8gA2tkByhMAwYaFAX9w2l7vxvBQ5NMoxDrkhqhtn4=
-github.com/AndreasBriese/bbloom v0.0.0-20190306092124-e2d15f34fcf9/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8=
-github.com/Azure/azure-sdk-for-go v32.4.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
-github.com/Azure/go-autorest/autorest v0.1.0/go.mod h1:AKyIcETwSUFxIcs/Wnq/C+kwCtlEYGUVd7FPNb2slmg=
-github.com/Azure/go-autorest/autorest v0.5.0/go.mod h1:9HLKlQjVBH6U3oDfsXOeVc56THsLPw1L03yban4xThw=
-github.com/Azure/go-autorest/autorest/adal v0.1.0/go.mod h1:MeS4XhScH55IST095THyTxElntu7WqB7pNbZo8Q5G3E=
-github.com/Azure/go-autorest/autorest/adal v0.2.0/go.mod h1:MeS4XhScH55IST095THyTxElntu7WqB7pNbZo8Q5G3E=
-github.com/Azure/go-autorest/autorest/azure/auth v0.1.0/go.mod h1:Gf7/i2FUpyb/sGBLIFxTBzrNzBo7aPXXE3ZVeDRwdpM=
-github.com/Azure/go-autorest/autorest/azure/cli v0.1.0/go.mod h1:Dk8CUAt/b/PzkfeRsWzVG9Yj3ps8mS8ECztu43rdU8U=
-github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA=
-github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
-github.com/Azure/go-autorest/autorest/to v0.2.0/go.mod h1:GunWKJp1AEqgMaGLV+iocmRAJWqST1wQYhyyjXJ3SJc=
-github.com/Azure/go-autorest/autorest/validation v0.1.0/go.mod h1:Ha3z/SqBeaalWQvokg3NZAlQTalVMtOIAs1aGK7G6u8=
-github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc=
-github.com/Azure/go-autorest/tracing v0.1.0/go.mod h1:ROEEAFwXycQw7Sn3DXNtEedEvdeRAgDr0izn4z5Ij88=
-github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
+github.com/AndreasBriese/bbloom v0.0.0-20190825152654-46b345b51c96 h1:cTp8I5+VIoKjsnZuH8vjyaysT/ses3EvZeaV/1UkF2M=
+github.com/AndreasBriese/bbloom v0.0.0-20190825152654-46b345b51c96/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
-github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
-github.com/GeertJohan/go.incremental v1.0.0/go.mod h1:6fAjUhbVuX1KcMD3c8TEgVUqmo4seqhv0i0kdATSkM0=
-github.com/GeertJohan/go.rice v1.0.0/go.mod h1:eH6gbSOAUv07dQuZVnBmoDP8mgsM1rtixis4Tib9if0=
-github.com/Masterminds/glide v0.13.2/go.mod h1:STyF5vcenH/rUqTEv+/hBXlSTo7KYwg2oc2f4tzPWic=
-github.com/Masterminds/goutils v1.1.0 h1:zukEsf/1JZwCMgHiK3GZftabmxiCw4apj3a28RPBiVg=
-github.com/Masterminds/goutils v1.1.0/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU=
-github.com/Masterminds/semver v1.4.2 h1:WBLTQ37jOCzSLtXNdoo8bNM8876KhNqOKvrlGITgsTc=
-github.com/Masterminds/semver v1.4.2/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y=
-github.com/Masterminds/semver/v3 v3.0.1 h1:2kKm5lb7dKVrt5TYUiAavE6oFc1cFT0057UVGT+JqLk=
-github.com/Masterminds/semver/v3 v3.0.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs=
-github.com/Masterminds/semver/v3 v3.0.3 h1:znjIyLfpXEDQjOIEWh+ehwpTU14UzUPub3c3sm36u14=
-github.com/Masterminds/semver/v3 v3.0.3/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs=
-github.com/Masterminds/sprig/v3 v3.0.0 h1:KSQz7Nb08/3VU9E4ns29dDxcczhOD1q7O1UfM4G3t3g=
-github.com/Masterminds/sprig/v3 v3.0.0/go.mod h1:NEUY/Qq8Gdm2xgYA+NwJM6wmfdRV9xkh8h/Rld20R0U=
-github.com/Masterminds/sprig/v3 v3.0.2 h1:wz22D0CiSctrliXiI9ZO3HoNApweeRGftyDN+BQa3B8=
-github.com/Masterminds/sprig/v3 v3.0.2/go.mod h1:oesJ8kPONMONaZgtiHNzUShJbksypC5kWczhZAf6+aU=
-github.com/Masterminds/vcs v1.13.0/go.mod h1:N09YCmOQr6RLxC6UNHzuVwAdodYbbnycGHSmwVJjcKA=
-github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA=
+github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
+github.com/BurntSushi/toml v1.4.0 h1:kuoIxZQy2WRRk1pttg9asf+WVv6tWQuBNVmK8+nqPr0=
+github.com/BurntSushi/toml v1.4.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho=
+github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI=
+github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU=
+github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs=
+github.com/Masterminds/semver/v3 v3.3.0 h1:B8LGeaivUe71a5qox1ICM/JLl0NqZSW5CHyL+hmvYS0=
+github.com/Masterminds/semver/v3 v3.3.0/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM=
+github.com/Masterminds/sprig/v3 v3.3.0 h1:mQh0Yrg1XPo6vjYXgtf5OtijNAKJRNcTdOOGZe3tPhs=
+github.com/Masterminds/sprig/v3 v3.3.0/go.mod h1:Zy1iXRYNqNLUolqCpL4uhk6SHUMAOSCzdgBfDb35Lz0=
+github.com/Microsoft/go-winio v0.6.0 h1:slsWYD/zyx7lCXoZVlvQrj0hPTM1HI4+v1sIda2yDvg=
+github.com/Microsoft/go-winio v0.6.0/go.mod h1:cTAf44im0RAYeL23bpB+fzCyDH2MJiz2BO69KH/soAE=
+github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE=
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
-github.com/OpenDNS/vegadns2client v0.0.0-20180418235048-a3fa4a771d87/go.mod h1:iGLljf5n9GjT6kc0HBvyI1nOKnGQbNB66VzSNbK5iks=
-github.com/OpenPeeDeeP/depguard v1.0.0/go.mod h1:7/4sitnI9YlQgTLLk734QlzXT8DuHVnAyztLplQjk+o=
-github.com/OpenPeeDeeP/depguard v1.0.1/go.mod h1:xsIw86fROiiwelg+jB2uM9PiKihMMmUx/1V+TNhjQvM=
-github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo=
-github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI=
-github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg=
-github.com/ThomasRooney/gexpect v0.0.0-20161231170123-5482f0350944/go.mod h1:sPML5WwI6oxLRLPuuqbtoOKhtmpVDCYtwsps+I+vjIY=
-github.com/akamai/AkamaiOPEN-edgegrid-golang v0.9.0/go.mod h1:zpDJeKyp9ScW4NNrbdr+Eyxvry3ilGPewKoXw3XGN1k=
-github.com/akamai/AkamaiOPEN-edgegrid-golang v0.9.8/go.mod h1:aVvklgKsPENRkl29bNwrHISa1F+YLGTHArMxZMBqWM8=
-github.com/akavel/rsrc v0.8.0/go.mod h1:uLoCtb9J+EyAqh+26kdrTgmzRBFPGOolLWKpdxkKq+c=
-github.com/alangpierce/go-forceexport v0.0.0-20160317203124-8f1d6941cd75 h1:3ILjVyslFbc4jl1w5TWuvvslFD/nDfR2H8tVaMVLrEY=
-github.com/alangpierce/go-forceexport v0.0.0-20160317203124-8f1d6941cd75/go.mod h1:uAXEEpARkRhCZfEvy/y0Jcc888f9tHCc1W7/UeEtreE=
-github.com/alecthomas/assert v0.0.0-20170929043011-405dbfeb8e38 h1:smF2tmSOzy2Mm+0dGI2AIUHY+w0BUc+4tn40djz7+6U=
-github.com/alecthomas/assert v0.0.0-20170929043011-405dbfeb8e38/go.mod h1:r7bzyVFMNntcxPZXK3/+KdruV1H5KSlyVY0gc+NgInI=
-github.com/alecthomas/chroma v0.7.2-0.20200305040604-4f3623dce67a h1:3v1NrYWWqp2S72e4HLgxKt83B3l0lnORDholH/ihoMM=
-github.com/alecthomas/chroma v0.7.2-0.20200305040604-4f3623dce67a/go.mod h1:fv5SzZPFJbwp2NXJWpFIX7DZS4HgV1K4ew4Pc2OZD9s=
-github.com/alecthomas/chroma v0.7.2 h1:B76NU/zbQYIUhUowbi4fmvREmDUJLsUzKWTZmQd3ABY=
-github.com/alecthomas/chroma v0.7.2/go.mod h1:fv5SzZPFJbwp2NXJWpFIX7DZS4HgV1K4ew4Pc2OZD9s=
-github.com/alecthomas/colour v0.0.0-20160524082231-60882d9e2721 h1:JHZL0hZKJ1VENNfmXvHbgYlbUOvpzYzvy2aZU5gXVeo=
-github.com/alecthomas/colour v0.0.0-20160524082231-60882d9e2721/go.mod h1:QO9JBoKquHd+jz9nshCh40fOfO+JzsoXy8qTHF68zU0=
-github.com/alecthomas/kong v0.1.17-0.20190424132513-439c674f7ae0/go.mod h1:+inYUSluD+p4L8KdviBSgzcqEjUQOfC5fQDRFuc36lI=
-github.com/alecthomas/kong v0.2.1-0.20190708041108-0548c6b1afae/go.mod h1:+inYUSluD+p4L8KdviBSgzcqEjUQOfC5fQDRFuc36lI=
-github.com/alecthomas/kong-hcl v0.1.8-0.20190615233001-b21fea9723c8/go.mod h1:MRgZdU3vrFd05IQ89AxUZ0aYdF39BYoNFa324SodPCA=
-github.com/alecthomas/repr v0.0.0-20180818092828-117648cd9897 h1:p9Sln00KOTlrYkxI1zYWl1QLnEqAqEARBEYa8FQnQcY=
-github.com/alecthomas/repr v0.0.0-20180818092828-117648cd9897/go.mod h1:xTS7Pm1pD1mvyM075QCDSRqH6qRLXylzS24ZTpRiSzQ=
-github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
-github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
-github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
-github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
-github.com/aliyun/alibaba-cloud-sdk-go v0.0.0-20190808125512-07798873deee h1:NYqDBPkhVYt68W3yoGoRRi32i3MLx2ey7SFkJ1v/UI0=
-github.com/aliyun/alibaba-cloud-sdk-go v0.0.0-20190808125512-07798873deee/go.mod h1:myCDvQSzCW+wB1WAlocEru4wMGJxy+vlxHdhegi1CDQ=
-github.com/aliyun/aliyun-oss-go-sdk v0.0.0-20190307165228-86c17b95fcd5/go.mod h1:T/Aws4fEfogEE9v+HPhhw+CntffsBHJ8nXQCwKr0/g8=
+github.com/alecthomas/assert/v2 v2.7.0 h1:QtqSACNS3tF7oasA8CU6A6sXZSBDqnm7RfpLl9bZqbE=
+github.com/alecthomas/assert/v2 v2.7.0/go.mod h1:Bze95FyfUr7x34QZrjL+XP+0qgp/zg8yS+TtBj1WA3k=
+github.com/alecthomas/chroma/v2 v2.2.0/go.mod h1:vf4zrexSH54oEjJ7EdB65tGNHmH3pGZmVkgTP5RHvAs=
+github.com/alecthomas/chroma/v2 v2.14.0 h1:R3+wzpnUArGcQz7fCETQBzO5n9IMNi13iIs46aU4V9E=
+github.com/alecthomas/chroma/v2 v2.14.0/go.mod h1:QolEbTfmUHIMVpBqxeDnNBj2uoeI4EbYP4i6n68SG4I=
+github.com/alecthomas/repr v0.0.0-20220113201626-b1b626ac65ae/go.mod h1:2kn6fqh/zIyPLmm3ugklbEi5hg5wS435eygvNfaDQL8=
+github.com/alecthomas/repr v0.4.0 h1:GhI2A8MACjfegCPVq9f1FLvIBS+DrQ2KQBFZP1iFzXc=
+github.com/alecthomas/repr v0.4.0/go.mod h1:Fr0507jx4eOXV7AlPV6AVZLYrLIuIeSOWtW57eE/O/4=
github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c=
-github.com/antihax/optional v0.0.0-20180407024304-ca021399b1a6/go.mod h1:V8iCPQYkqmusNa815XgQio277wI47sdRh1dUOLdyC6Q=
-github.com/antlr/antlr4 v0.0.0-20190819145818-b43a4c3a8015 h1:StuiJFxQUsxSCzcby6NFZRdEhPkXD5vxN7TZ4MD6T84=
-github.com/antlr/antlr4 v0.0.0-20190819145818-b43a4c3a8015/go.mod h1:T7PbCXFs94rrTttyxjbyT5+/1V8T2TYDejxUfHJjw1Y=
-github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
+github.com/antlr4-go/antlr/v4 v4.13.0 h1:lxCg3LAv+EUK6t1i0y1V6/SLeUi0eKEKdhQAlS8TVTI=
+github.com/antlr4-go/antlr/v4 v4.13.0/go.mod h1:pfChB/xh/Unjila75QW7+VU4TSnWnnk9UTnmpPaOR2g=
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
-github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a h1:pv34s756C4pEXnjgPfGYgdhg/ZdajGhyOvzx8k+23nw=
-github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A=
-github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
-github.com/aws/aws-sdk-go v1.19.18/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
-github.com/aws/aws-sdk-go v1.23.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
-github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f/go.mod h1:AuiFmCCPBSrqvVMvuqFuk0qogytodnVFVSN5CeJB8Gc=
+github.com/aryann/difflib v0.0.0-20210328193216-ff5ff6dc229b h1:uUXgbcPDK3KpW29o4iy7GtuappbWT0l5NaMo9H9pJDw=
+github.com/aryann/difflib v0.0.0-20210328193216-ff5ff6dc229b/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A=
+github.com/aws/aws-sdk-go-v2 v1.26.1 h1:5554eUqIYVWpU0YmeeYZ0wU64H2VLBs8TlhRB2L+EkA=
+github.com/aws/aws-sdk-go-v2 v1.26.1/go.mod h1:ffIFB97e2yNsv4aTSGkqtHnppsIJzw7G7BReUZ3jCXM=
+github.com/aws/aws-sdk-go-v2/config v1.27.13 h1:WbKW8hOzrWoOA/+35S5okqO/2Ap8hkkFUzoW8Hzq24A=
+github.com/aws/aws-sdk-go-v2/config v1.27.13/go.mod h1:XLiyiTMnguytjRER7u5RIkhIqS8Nyz41SwAWb4xEjxs=
+github.com/aws/aws-sdk-go-v2/credentials v1.17.13 h1:XDCJDzk/u5cN7Aple7D/MiAhx1Rjo/0nueJ0La8mRuE=
+github.com/aws/aws-sdk-go-v2/credentials v1.17.13/go.mod h1:FMNcjQrmuBYvOTZDtOLCIu0esmxjF7RuA/89iSXWzQI=
+github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.1 h1:FVJ0r5XTHSmIHJV6KuDmdYhEpvlHpiSd38RQWhut5J4=
+github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.1/go.mod h1:zusuAeqezXzAB24LGuzuekqMAEgWkVYukBec3kr3jUg=
+github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.5 h1:aw39xVGeRWlWx9EzGVnhOR4yOjQDHPQ6o6NmBlscyQg=
+github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.5/go.mod h1:FSaRudD0dXiMPK2UjknVwwTYyZMRsHv3TtkabsZih5I=
+github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.5 h1:PG1F3OD1szkuQPzDw3CIQsRIrtTlUC3lP84taWzHlq0=
+github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.5/go.mod h1:jU1li6RFryMz+so64PpKtudI+QzbKoIEivqdf6LNpOc=
+github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0 h1:hT8rVHwugYE2lEfdFE0QWVo81lF7jMrYJVDWI+f+VxU=
+github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0/go.mod h1:8tu/lYfQfFe6IGnaOdrpVgEL2IrrDOf6/m9RQum4NkY=
+github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.2 h1:Ji0DY1xUsUr3I8cHps0G+XM3WWU16lP6yG8qu1GAZAs=
+github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.2/go.mod h1:5CsjAbs3NlGQyZNFACh+zztPDI7fU6eW9QsxjfnuBKg=
+github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.7 h1:ogRAwT1/gxJBcSWDMZlgyFUM962F51A5CRhDLbxLdmo=
+github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.7/go.mod h1:YCsIZhXfRPLFFCl5xxY+1T9RKzOKjCut+28JSX2DnAk=
+github.com/aws/aws-sdk-go-v2/service/kms v1.31.1 h1:5wtyAwuUiJiM3DHYeGZmP5iMonM7DFBWAEaaVPHYZA0=
+github.com/aws/aws-sdk-go-v2/service/kms v1.31.1/go.mod h1:2snWQJQUKsbN66vAawJuOGX7dr37pfOq9hb0tZDGIqQ=
+github.com/aws/aws-sdk-go-v2/service/sso v1.20.6 h1:o5cTaeunSpfXiLTIBx5xo2enQmiChtu1IBbzXnfU9Hs=
+github.com/aws/aws-sdk-go-v2/service/sso v1.20.6/go.mod h1:qGzynb/msuZIE8I75DVRCUXw3o3ZyBmUvMwQ2t/BrGM=
+github.com/aws/aws-sdk-go-v2/service/ssooidc v1.24.0 h1:Qe0r0lVURDDeBQJ4yP+BOrJkvkiCo/3FH/t+wY11dmw=
+github.com/aws/aws-sdk-go-v2/service/ssooidc v1.24.0/go.mod h1:mUYPBhaF2lGiukDEjJX2BLRRKTmoUSitGDUgM4tRxak=
+github.com/aws/aws-sdk-go-v2/service/sts v1.28.7 h1:et3Ta53gotFR4ERLXXHIHl/Uuk1qYpP5uU7cvNql8ns=
+github.com/aws/aws-sdk-go-v2/service/sts v1.28.7/go.mod h1:FZf1/nKNEkHdGGJP/cI2MoIMquumuRK6ol3QQJNDxmw=
+github.com/aws/smithy-go v1.20.2 h1:tbp628ireGtzcHDDmLT/6ADHidqnwgF57XOXZe6tp4Q=
+github.com/aws/smithy-go v1.20.2/go.mod h1:krry+ya/rV9RDcV/Q16kpu6ypI4K2czasz0NC3qS14E=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
-github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
+github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
-github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
-github.com/bombsimon/wsl/v2 v2.0.0/go.mod h1:mf25kr/SqFEPhhcxW1+7pxzGlW+hIl/hYTKY95VwV8U=
-github.com/boombuler/barcode v1.0.0/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8=
github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g=
github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s=
-github.com/caddyserver/certmagic v0.10.12 h1:aZtgzcIssiMSlP0jDdpDBbBzQ5INf5eKL9T6Nf3YzKM=
-github.com/caddyserver/certmagic v0.10.12/go.mod h1:Y8jcUBctgk/IhpAzlHKfimZNyXCkfGgRTC0orl8gROQ=
-github.com/cenkalti/backoff/v4 v4.0.0 h1:6VeaLF9aI+MAUQ95106HwWzYZgJJpZ4stumjj6RFYAU=
-github.com/cenkalti/backoff/v4 v4.0.0/go.mod h1:eEew/i+1Q6OrCDZh3WiXYv3+nJwBASZ8Bog/87DQnVg=
-github.com/cenkalti/backoff/v4 v4.0.2 h1:JIufpQLbh4DkbQoii76ItQIUFzevQSqOLZca4eamEDs=
-github.com/cenkalti/backoff/v4 v4.0.2/go.mod h1:eEew/i+1Q6OrCDZh3WiXYv3+nJwBASZ8Bog/87DQnVg=
-github.com/census-instrumentation/opencensus-proto v0.2.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
-github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
+github.com/caddyserver/certmagic v0.21.5-0.20241105180249-4293198e094d h1:+zOduGxxC4WBAnlDf5Uf0TXbWXRqjUXkJKevDZZa79A=
+github.com/caddyserver/certmagic v0.21.5-0.20241105180249-4293198e094d/go.mod h1:swUXjQ1T9ZtMv95qj7/InJvWLXURU85r+CfG0T+ZbDE=
+github.com/caddyserver/zerossl v0.1.3 h1:onS+pxp3M8HnHpN5MMbOMyNjmTheJyWRaZYwn+YTAyA=
+github.com/caddyserver/zerossl v0.1.3/go.mod h1:CxA0acn7oEGO6//4rtrRjYgEoa4MFw/XofZnrYwGqG4=
+github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8=
+github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
+github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
-github.com/cheekybits/genny v1.0.0 h1:uGGa4nei+j20rOSeDeP5Of12XVm7TGUd4dJA9RDitfE=
-github.com/cheekybits/genny v1.0.0/go.mod h1:+tQajlRqAUrPI7DOSpB0XAqZYtQakVtB7wXkRAgjxjQ=
-github.com/chzyer/logex v1.1.10 h1:Swpa1K6QvQznwJRcfTfQJmTE72DqScAa40E+fbHEXEE=
+github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
+github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
-github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e h1:fY5BOSpyZCqRo5OhCuC+XN+r/bBCmeuuJtjz+bCNIf8=
+github.com/chzyer/logex v1.2.1 h1:XHDu3E6q+gdHgsdTPH6ImJMIp436vR6MPtH8gP05QzM=
+github.com/chzyer/logex v1.2.1/go.mod h1:JLbx6lG2kDbNRFnfkgvh4eRJRPX1QCoOIWomwysCBrQ=
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
-github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1 h1:q763qf9huN11kDQavWsoZXJNW3xEE4JJyHa5Q25/sd8=
+github.com/chzyer/readline v1.5.1 h1:upd/6fQk4src78LMRzh5vItIt361/o4uq553V8B5sGI=
+github.com/chzyer/readline v1.5.1/go.mod h1:Eh+b79XXUwfKfcPLepksvw2tcLE/Ct21YObkaSkeBlk=
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
+github.com/chzyer/test v1.0.0 h1:p3BQDXSxOhOG0P9z6/hGnII4LGiEPOYBhs8asl/fC04=
+github.com/chzyer/test v1.0.0/go.mod h1:2JlltgoNkt4TW/z9V/IzDdFaMTM2JPIi26O1pF38GC8=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
-github.com/cloudflare/cloudflare-go v0.10.2 h1:VBodKICVPnwmDxstcW3biKcDSpFIfS/RELUXsZSBYK4=
-github.com/cloudflare/cloudflare-go v0.10.2/go.mod h1:qhVI5MKwBGhdNU89ZRz2plgYutcJ5PCekLxXn56w6SY=
-github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd h1:qMd81Ts1T2OTKmB4acZcyKaMtRnY5Y44NuXGX2GFJ1w=
-github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI=
-github.com/codegangsta/cli v1.20.0/go.mod h1:/qJNoX69yVSKu5o4jLyXAENLRyk1uhi7zkbQ3slBdOA=
-github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
-github.com/coreos/bbolt v1.3.3/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
+github.com/cockroachdb/apd v1.1.0 h1:3LFP3629v+1aKXU5Q37mxmRxX/pIu1nijXydLShEq5I=
+github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ=
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
-github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
-github.com/coreos/etcd v3.3.18+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk=
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
-github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
-github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
-github.com/corpix/uarand v0.1.1/go.mod h1:SFKZvkcRoLqVRFZ4u25xPmp6m9ktANfbpXZ7SJ0/FNU=
-github.com/cpu/goacmedns v0.0.1/go.mod h1:sesf/pNnCYwUevQEQfEwY0Y3DydlQWSGZbaMElOWxok=
-github.com/cpuguy83/go-md2man v1.0.10 h1:BSKMNlYxDvnunlTymqtgONjNnaRV1sTpcovwwjF22jk=
+github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE=
-github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d h1:U+s90UTSYgptZMwQh2aRr3LuazLJIa+Pg3Kc1ylSYVY=
-github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
-github.com/cpuguy83/go-md2man/v2 v2.0.0 h1:EoUDS0afbrsXAZ9YQ9jdu/mZ2sXgT1/2yyNng4PGlyM=
-github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
+github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
+github.com/cpuguy83/go-md2man/v2 v2.0.4 h1:wfIWP927BUkWJb2NmU/kNDYIBTh/ziUX91+lVfRxZq4=
+github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
-github.com/daaku/go.zipexe v1.0.0/go.mod h1:z8IiR6TsVLEYKwXAoE/I+8ys/sDkgTzSL0CLnGVd57E=
-github.com/danwakefield/fnmatch v0.0.0-20160403171240-cbb64ac3d964 h1:y5HC9v93H5EPKqaS1UYVg1uYah5Xf51mBfIoWehClUQ=
-github.com/danwakefield/fnmatch v0.0.0-20160403171240-cbb64ac3d964/go.mod h1:Xd9hchkHSWYkEqJwUGisez3G1QY8Ryz0sdWrLPMGjLk=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/dgraph-io/badger v1.5.3 h1:5oWIuRvwn93cie+OSt1zSnkaIQ1JFQM8bGlIv6O6Sts=
-github.com/dgraph-io/badger v1.5.3/go.mod h1:VZxzAIRPHRVNRKRo6AXrX9BJegn6il06VMTZVJYCIjQ=
-github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
-github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 h1:tdlZCpZ/P9DhczCTSixgIKmwPv6+wP5DGjqLYw5SUiA=
+github.com/dgraph-io/badger v1.6.2 h1:mNw0qs90GVgGGWylh0umH5iag1j6n/PeJtNvL6KY/x8=
+github.com/dgraph-io/badger v1.6.2/go.mod h1:JW2yswe3V058sS0kZ2h/AXeDSqFjxnZcRrVH//y2UQE=
+github.com/dgraph-io/badger/v2 v2.2007.4 h1:TRWBQg8UrlUhaFdco01nO2uXwzKS7zd+HVdwV/GHc4o=
+github.com/dgraph-io/badger/v2 v2.2007.4/go.mod h1:vSw/ax2qojzbN6eXHIx6KPKtCSHJN/Uz0X0VPruTIhk=
+github.com/dgraph-io/ristretto v0.0.2/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E=
+github.com/dgraph-io/ristretto v0.0.3-0.20200630154024-f66de99634de/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E=
+github.com/dgraph-io/ristretto v0.1.0 h1:Jv3CGQHp9OjuMBSne1485aDpUkTKEcUqF+jm/LuerPI=
+github.com/dgraph-io/ristretto v0.1.0/go.mod h1:fux0lOrBhrVCJd3lcTHsIJhq1T2rokOu6v9Vcb3Q9ug=
github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw=
-github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
-github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8=
-github.com/dlclark/regexp2 v1.1.6 h1:CqB4MjHw0MFCDj+PHHjiESmHX+N7t0tJzKvC6M97BRg=
-github.com/dlclark/regexp2 v1.1.6/go.mod h1:2pZnwuY/m+8K6iRw6wQdMtk+rH5tNGR1i55kozfMjCc=
-github.com/dlclark/regexp2 v1.2.0 h1:8sAhBGEM0dRWogWqWyQeIJnxjWO6oIjl8FKqREDsGfk=
-github.com/dlclark/regexp2 v1.2.0/go.mod h1:2pZnwuY/m+8K6iRw6wQdMtk+rH5tNGR1i55kozfMjCc=
-github.com/dnaeon/go-vcr v0.0.0-20180814043457-aafff18a5cc2/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E=
-github.com/dnsimple/dnsimple-go v0.30.0/go.mod h1:O5TJ0/U6r7AfT8niYNlmohpLbCSG+c71tQlGr9SeGrg=
-github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo=
+github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 h1:fAjc9m62+UWV/WAFKLNi6ZS0675eEUC9y3AlwSbQu1Y=
+github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw=
+github.com/dlclark/regexp2 v1.4.0/go.mod h1:2pZnwuY/m+8K6iRw6wQdMtk+rH5tNGR1i55kozfMjCc=
+github.com/dlclark/regexp2 v1.7.0/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8=
+github.com/dlclark/regexp2 v1.11.0 h1:G/nrcoOa7ZXlpoa/91N3X7mM3r8eIlMBBJZvsz/mxKI=
+github.com/dlclark/regexp2 v1.11.0/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8=
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
-github.com/dustin/go-humanize v1.0.1-0.20200219035652-afde56e7acac h1:opbrjaN/L8gg6Xh5D04Tem+8xVcz6ajZlGCs49mQgyg=
-github.com/dustin/go-humanize v1.0.1-0.20200219035652-afde56e7acac/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
-github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs=
-github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU=
-github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I=
-github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
-github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
-github.com/exoscale/egoscale v0.18.1/go.mod h1:Z7OOdzzTOz1Q1PjQXumlz9Wn/CddH0zSYdCF3rnBKXE=
-github.com/fatih/color v1.6.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
-github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
-github.com/fatih/color v1.8.0/go.mod h1:3l45GVGkyrnYNl9HoIjnp2NnNWvh6hLAqD8yTfGjnw8=
-github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M=
+github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
+github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
+github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
+github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc=
github.com/francoispqt/gojay v1.2.13 h1:d2m3sFjloqoIUQU3TsHBgj6qg/BVGlTBeHDUmyJnXKk=
github.com/francoispqt/gojay v1.2.13/go.mod h1:ehT5mTG4ua4581f1++1WLG0vPdaA9HaiDsoyrBGkyDY=
-github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
+github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8=
+github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
+github.com/fxamacker/cbor/v2 v2.6.0 h1:sU6J2usfADwWlYDAFhZBQ6TnLFBHxgesMrQfQgk1tWA=
+github.com/fxamacker/cbor/v2 v2.6.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0=
-github.com/go-acme/lego/v3 v3.4.0 h1:deB9NkelA+TfjGHVw8J7iKl/rMtffcGMWSMmptvMv0A=
-github.com/go-acme/lego/v3 v3.4.0/go.mod h1:xYbLDuxq3Hy4bMUT1t9JIuz6GWIWb3m5X+TeTHYaT7M=
-github.com/go-acme/lego/v3 v3.5.0 h1:/0+NJQK+hNwRznhCi+19lbEa4xufhe7wJZOVd5j486s=
-github.com/go-acme/lego/v3 v3.5.0/go.mod h1:TXodhTGOiWEqXDdgrzBoCtJ5R4L9lfOE68CTM0KGkT0=
-github.com/go-chi/chi v4.0.2+incompatible/go.mod h1:eB3wogJHnLi3x/kFX2A+IbTBlXxmMeXJVKy9tTv1XzQ=
-github.com/go-cmd/cmd v1.0.5/go.mod h1:y8q8qlK5wQibcw63djSl/ntiHUHXHGdCkPk0j4QeW4s=
-github.com/go-critic/go-critic v0.3.5-0.20190526074819-1df300866540/go.mod h1:+sE8vrLDS2M0pZkBk0wy6+nLdKexVDrl/jBqQOTDThA=
-github.com/go-critic/go-critic v0.4.0/go.mod h1:7/14rZGnZbY6E38VEGk2kVhoq6itzc1E68facVDK23g=
+github.com/go-chi/chi/v5 v5.0.12 h1:9euLV5sTrTNTRUU9POmDUvfxyj6LAABLUcEWO+JJb4s=
+github.com/go-chi/chi/v5 v5.0.12/go.mod h1:DslCQbL2OYiznFReuXYUmQ2hGd1aDpCnlMNITLSKoi8=
github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q=
-github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
-github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
-github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
-github.com/go-ini/ini v1.44.0/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8=
-github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
-github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
-github.com/go-lintpack/lintpack v0.5.2/go.mod h1:NwZuYi2nUHho8XEIZ6SIxihrnPoqBTDqfpXvXAN0sXM=
-github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
-github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
-github.com/go-ole/go-ole v1.2.1/go.mod h1:7FAglXiTm7HKlQRDeOQ6ZNUHidzCWXuZWq/1dTyBNF8=
-github.com/go-sql-driver/mysql v1.4.1 h1:g24URVg0OFbNUTx9qqY1IRZ9D9z3iPyi5zKhQZpNwpA=
-github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
+github.com/go-jose/go-jose/v3 v3.0.3 h1:fFKWeig/irsp7XD2zBxvnmA/XaRWp5V3CBsZXJF7G7k=
+github.com/go-jose/go-jose/v3 v3.0.3/go.mod h1:5b+7YgP7ZICgJDBdfjZaIt+H/9L9T/YQrVfLAMboGkQ=
+github.com/go-kit/kit v0.4.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
+github.com/go-kit/kit v0.13.0 h1:OoneCcHKHQ03LfBpoQCUfCluwd2Vt3ohz+kvbJneZAU=
+github.com/go-kit/kit v0.13.0/go.mod h1:phqEHMMUbyrCFCTgH48JueqrM3md2HcAZ8N3XE4FKDg=
+github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
+github.com/go-kit/log v0.2.1 h1:MRVx0/zhvdseW+Gza6N9rVzU/IVzaeE1SFI4raAhmBU=
+github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0=
+github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
+github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs=
+github.com/go-logfmt/logfmt v0.6.0 h1:wGYYu3uicYdqXVgoYbvnkrPVXkuLM1p1ifugDMEdRi4=
+github.com/go-logfmt/logfmt v0.6.0/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs=
+github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
+github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
+github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
+github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
+github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
+github.com/go-sql-driver/mysql v1.7.1 h1:lUIinVbN1DY0xBg0eMOzmmtGoHwWBbvnWubQUrtU8EI=
+github.com/go-sql-driver/mysql v1.7.1/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI=
+github.com/go-stack/stack v1.6.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
-github.com/go-toolsmith/astcast v1.0.0/go.mod h1:mt2OdQTeAQcY4DQgPSArJjHCcOwlX+Wl/kwN+LbLGQ4=
-github.com/go-toolsmith/astcopy v1.0.0/go.mod h1:vrgyG+5Bxrnz4MZWPF+pI4R8h3qKRjjyvV/DSez4WVQ=
-github.com/go-toolsmith/astequal v0.0.0-20180903214952-dcb477bfacd6/go.mod h1:H+xSiq0+LtiDC11+h1G32h7Of5O3CYFJ99GVbS5lDKY=
-github.com/go-toolsmith/astequal v1.0.0/go.mod h1:H+xSiq0+LtiDC11+h1G32h7Of5O3CYFJ99GVbS5lDKY=
-github.com/go-toolsmith/astfmt v0.0.0-20180903215011-8f8ee99c3086/go.mod h1:mP93XdblcopXwlyN4X4uodxXQhldPGZbcEJIimQHrkg=
-github.com/go-toolsmith/astfmt v1.0.0/go.mod h1:cnWmsOAuq4jJY6Ct5YWlVLmcmLMn1JUPuQIHCY7CJDw=
-github.com/go-toolsmith/astinfo v0.0.0-20180906194353-9809ff7efb21/go.mod h1:dDStQCHtmZpYOmjRP/8gHHnCCch3Zz3oEgCdZVdtweU=
-github.com/go-toolsmith/astp v0.0.0-20180903215135-0af7e3c24f30/go.mod h1:SV2ur98SGypH1UjcPpCatrV5hPazG6+IfNHbkDXBRrk=
-github.com/go-toolsmith/astp v1.0.0/go.mod h1:RSyrtpVlfTFGDYRbrjyWP1pYu//tSFcvdYrA8meBmLI=
-github.com/go-toolsmith/pkgload v0.0.0-20181119091011-e9e65178eee8/go.mod h1:WoMrjiy4zvdS+Bg6z9jZH82QXwkcgCBX6nOfnmdaHks=
-github.com/go-toolsmith/pkgload v1.0.0/go.mod h1:5eFArkbO80v7Z0kdngIxsRXRMTaX4Ilcwuh3clNrQJc=
-github.com/go-toolsmith/strparse v1.0.0/go.mod h1:YI2nUKP9YGZnL/L1/DLFBfixrcjslWct4wyljWhSRy8=
-github.com/go-toolsmith/typep v1.0.0/go.mod h1:JSQCQMUPdRlMZFswiq3TGpNp1GMktqkR2Ns5AIQkATU=
-github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8=
-github.com/gofrs/flock v0.0.0-20190320160742-5135e617513b/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU=
-github.com/gofrs/uuid v3.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
+github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI=
+github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls=
+github.com/gofrs/uuid v4.0.0+incompatible h1:1SD/1F5pU8p29ybwgQSwpQk+mwdRrXCYuPhW6m+TnJw=
+github.com/gofrs/uuid v4.0.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
-github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
-github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
-github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls=
-github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
-github.com/goji/httpauth v0.0.0-20160601135302-2da839ab0f4d/go.mod h1:nnjvkQ9ptGaCkuDUx6wNykzzlUixGxvkme+H/lnzb+A=
-github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
-github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
-github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
-github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7 h1:5ZkaAPbicIKTF2I64qf5Fh8Aa83Q/dnOafMYV0OMwjA=
-github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
-github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY=
-github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/glog v1.2.2 h1:1+mZ9upx1Dh6FmUTFR1naJ77miKiXgALjWOZ3NVFPmY=
+github.com/golang/glog v1.2.2/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w=
+github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
+github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E=
-github.com/golang/mock v1.0.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
-github.com/golang/mock v1.2.0 h1:28o5sBqPkBsMGnC6b4MvE2TzSr5/AT4c/1fLqVGIwlk=
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
-github.com/golang/mock v1.3.1 h1:qGJ6qTW+x6xX/my+8YUVl4WNpX9B7+/l2tRsHGZ7f2s=
-github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
-github.com/golang/mock v1.4.0 h1:Rd1kQnQu0Hq3qvJppYSG0HtP+f5LPPUiDswTLiEegLg=
-github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
-github.com/golang/mock v1.4.1 h1:ocYkMQY5RrXTYgXl7ICpV0IXwlEQGwKIsery4gyXa1U=
-github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
-github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
-github.com/golang/protobuf v1.3.4 h1:87PNWwrRvUSnqS4dlcBU/ftvOIBep4sYuBLlh6rX2wk=
-github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
-github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
-github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2/go.mod h1:k9Qvh+8juN+UKMCS/3jFtGICgW8O96FVaZsaxdzDkR4=
-github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a/go.mod h1:ryS0uhF+x9jgbj/N71xsEqODy9BN81/GonCZiOzirOk=
-github.com/golangci/errcheck v0.0.0-20181223084120-ef45e06d44b6/go.mod h1:DbHgvLiFKX1Sh2T1w8Q/h4NAI8MHIpzCdnBUDTXU3I0=
-github.com/golangci/go-misc v0.0.0-20180628070357-927a3d87b613/go.mod h1:SyvUF2NxV+sN8upjjeVYr5W7tyxaT1JVtvhKhOn2ii8=
-github.com/golangci/go-tools v0.0.0-20190318055746-e32c54105b7c/go.mod h1:unzUULGw35sjyOYjUt0jMTXqHlZPpPc6e+xfO4cd6mM=
-github.com/golangci/goconst v0.0.0-20180610141641-041c5f2b40f3/go.mod h1:JXrF4TWy4tXYn62/9x8Wm/K/dm06p8tCKwFRDPZG/1o=
-github.com/golangci/gocyclo v0.0.0-20180528134321-2becd97e67ee/go.mod h1:ozx7R9SIwqmqf5pRP90DhR2Oay2UIjGuKheCBCNwAYU=
-github.com/golangci/gocyclo v0.0.0-20180528144436-0a533e8fa43d/go.mod h1:ozx7R9SIwqmqf5pRP90DhR2Oay2UIjGuKheCBCNwAYU=
-github.com/golangci/gofmt v0.0.0-20181222123516-0b8337e80d98/go.mod h1:9qCChq59u/eW8im404Q2WWTrnBUQKjpNYKMbU4M7EFU=
-github.com/golangci/gofmt v0.0.0-20190930125516-244bba706f1a/go.mod h1:9qCChq59u/eW8im404Q2WWTrnBUQKjpNYKMbU4M7EFU=
-github.com/golangci/golangci-lint v1.17.2-0.20190910081718-bad04bb7378f/go.mod h1:kaqo8l0OZKYPtjNmG4z4HrWLgcYNIJ9B9q3LWri9uLg=
-github.com/golangci/golangci-lint v1.22.2/go.mod h1:2Bj42k6hPQFTRxkDb7S3TQ+EsnumZXOmIYNqlQrp0FI=
-github.com/golangci/gosec v0.0.0-20190211064107-66fb7fc33547/go.mod h1:0qUabqiIQgfmlAmulqxyiGkkyF6/tOGSnY2cnPVwrzU=
-github.com/golangci/ineffassign v0.0.0-20190609212857-42439a7714cc/go.mod h1:e5tpTHCfVze+7EpLEozzMB3eafxo2KT5veNg1k6byQU=
-github.com/golangci/lint-1 v0.0.0-20190420132249-ee948d087217/go.mod h1:66R6K6P6VWk9I95jvqGxkqJxVWGFy9XlDwLwVz1RCFg=
-github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0/go.mod h1:66R6K6P6VWk9I95jvqGxkqJxVWGFy9XlDwLwVz1RCFg=
-github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca/go.mod h1:tvlJhZqDe4LMs4ZHD0oMUlt9G2LWuDGoisJTBzLMV9o=
-github.com/golangci/misspell v0.0.0-20180809174111-950f5d19e770/go.mod h1:dEbvlSfYbMQDtrpRMQU675gSDLDNa8sCPPChZ7PhiVA=
-github.com/golangci/prealloc v0.0.0-20180630174525-215b22d4de21/go.mod h1:tf5+bzsHdTM0bsB7+8mt0GUMvjCgwLpTapNZHU8AajI=
-github.com/golangci/revgrep v0.0.0-20180526074752-d9c87f5ffaf0/go.mod h1:qOQCunEYvmd/TLamH+7LlVccLvUH5kZNhbCgTHoBbp4=
-github.com/golangci/revgrep v0.0.0-20180812185044-276a5c0a1039/go.mod h1:qOQCunEYvmd/TLamH+7LlVccLvUH5kZNhbCgTHoBbp4=
-github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4/go.mod h1:Izgrg8RkN3rCIMLGE9CyYmU9pY2Jer6DgANEnZ/L/cQ=
+github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
+github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
+github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
+github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM=
+github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
-github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
-github.com/google/cel-go v0.4.1 h1:2kqc5arTucvtLJzXVUbmiUh7n2xjizwZijPrpEsagAE=
-github.com/google/cel-go v0.4.1/go.mod h1:F0UncVAXNlNjl/4C8hqGdoV6APmuFpetoMJSLIQLBPU=
-github.com/google/cel-spec v0.3.0/go.mod h1:MjQm800JAGhOZXI7vatnVpmIaFTR6L8FHcKk+piiKpI=
-github.com/google/certificate-transparency-go v1.1.0/go.mod h1:i+Q7XY+ArBveOUT36jiHGfuSK1fHICIg6sUkRxPAbCs=
+github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU=
+github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4=
+github.com/google/cel-go v0.21.0 h1:cl6uW/gxN+Hy50tNYvI691+sXxioCnstFzLp2WO4GCI=
+github.com/google/cel-go v0.21.0/go.mod h1:rHUlWCcBKgyEk+eV03RPdZUekPp6YcJwV0FxuUksYxc=
+github.com/google/certificate-transparency-go v1.0.21/go.mod h1:QeJfpSbVSfYc7RgB3gJFj9cbuQMMchQxrWXz8Ruopmg=
+github.com/google/certificate-transparency-go v1.1.8-0.20240110162603-74a5dd331745 h1:heyoXNxkRT155x4jTAiSv5BVSVkueifPUm+Q8LUXMRo=
+github.com/google/certificate-transparency-go v1.1.8-0.20240110162603-74a5dd331745/go.mod h1:zN0wUQgV9LjwLZeFHnrAbQi8hzMVvEWePyk+MhPOk7k=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
-github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY=
-github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
-github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
-github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4=
-github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
+github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
+github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ=
github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
-github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
+github.com/google/go-tpm v0.9.0 h1:sQF6YqWMi+SCXpsmS3fd21oPy/vSddwZry4JnmltHVk=
+github.com/google/go-tpm v0.9.0/go.mod h1:FkNVkc6C+IsvDI9Jw1OveJmxGZUUaKxtrpOS47QWKfU=
+github.com/google/go-tpm-tools v0.4.4 h1:oiQfAIkc6xTy9Fl5NKTeTJkBTlXdHsxAofmQyxBKY98=
+github.com/google/go-tpm-tools v0.4.4/go.mod h1:T8jXkp2s+eltnCDIsXR84/MTcVU9Ja7bh3Mit0pa4AY=
+github.com/google/go-tspi v0.3.0 h1:ADtq8RKfP+jrTyIWIZDIYcKOMecRqNJFOew2IT0Inus=
+github.com/google/go-tspi v0.3.0/go.mod h1:xfMGI3G0PhxCdNVcYr1C4C+EizojDg/TXuX5by8CiHI=
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
-github.com/google/monologue v0.0.0-20190606152607-4b11a32b5934/go.mod h1:6NTfaQoUpg5QmPsCUWLR3ig33FHrKXhTtWzF0DVdmuk=
-github.com/google/monologue v0.0.0-20191220140058-35abc9683a6c/go.mod h1:6NTfaQoUpg5QmPsCUWLR3ig33FHrKXhTtWzF0DVdmuk=
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
-github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
-github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
-github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
-github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20231212022811-ec68065c825e h1:bwOy7hAFd0C91URzMIEBfr6BAz29yk7Qj0cy6S7DJlU=
+github.com/google/pprof v0.0.0-20231212022811-ec68065c825e/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik=
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
-github.com/google/trillian v1.2.2-0.20190612132142-05461f4df60a/go.mod h1:YPmUVn5NGwgnDUgqlVyFGMTgaWlnSvH7W5p+NdOG8UA=
-github.com/google/trillian-examples v0.0.0-20190603134952-4e75ba15216c/go.mod h1:WgL3XZ3pA8/9cm7yxqWrZE6iZkESB2ItGxy5Fo6k2lk=
-github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
-github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY=
-github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o=
+github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw=
+github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
+github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs=
+github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0=
github.com/googleapis/gax-go v2.0.0+incompatible h1:j0GKcs05QVmm7yesiZq2+9cxHkNK9YM6zKx4D2qucQU=
github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY=
github.com/googleapis/gax-go/v2 v2.0.3/go.mod h1:LLvjysVCY1JZeum8Z6l8qUty8fiNwE08qbEPm1M08qg=
-github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
-github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM=
-github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
-github.com/gophercloud/gophercloud v0.3.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8=
+github.com/googleapis/gax-go/v2 v2.12.4 h1:9gWcmF85Wvq4ryPFvGFaOgPIs1AQX0d0bcbGw4Z96qg=
+github.com/googleapis/gax-go/v2 v2.12.4/go.mod h1:KYEYLorsnIGDi/rPC8b5TdlB9kbKoFubselGIoBMCwI=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
-github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg=
-github.com/gorilla/csrf v1.6.0/go.mod h1:7tSf8kmjNYr7IWDCYhd3U8Ck34iQ/Yw5CJu7bAkHEGI=
-github.com/gorilla/handlers v1.4.1/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ=
-github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
-github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
-github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4=
-github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
-github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
-github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
-github.com/gostaticanalysis/analysisutil v0.0.0-20190318220348-4088753ea4d3/go.mod h1:eEOZF4jCKGi+aprrirO9e7WKB3beBRtWgqGunKl6pKE=
-github.com/gostaticanalysis/analysisutil v0.0.3/go.mod h1:eEOZF4jCKGi+aprrirO9e7WKB3beBRtWgqGunKl6pKE=
-github.com/gravitational/trace v0.0.0-20190726142706-a535a178675f/go.mod h1:RvdOUHE4SHqR3oXlFFKnGzms8a5dugHygGw1bqDstYI=
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
-github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
-github.com/grpc-ecosystem/go-grpc-middleware v1.1.0/go.mod h1:f5nM7jw/oeRSadq3xCzHAvxcr8HZnzsqU6ILg/0NiiE=
-github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw=
-github.com/grpc-ecosystem/grpc-gateway v1.8.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
-github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
-github.com/grpc-ecosystem/grpc-gateway v1.12.1/go.mod h1:8XEsbTttt/W+VvjtQhLACqCisSPWTxCZ7sBRjU6iH9c=
-github.com/h2non/parth v0.0.0-20190131123155-b4df798d6542/go.mod h1:Ow0tF8D4Kplbc8s8sSb3V2oUCygFHVp8gC3Dn6U4MNI=
-github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
-github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
-github.com/hashicorp/golang-lru v0.5.3/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
-github.com/hashicorp/hcl v0.0.0-20180404174102-ef8a98b0bbce/go.mod h1:oZtUIOe8dh44I2q6ScRibXws4Ajl+d+nod3AaR9vL5w=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0 h1:asbCHRVmodnJTuQ3qamDwqVOIjwqUPTYmYuemVOx+Ys=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0/go.mod h1:ggCgvZ2r7uOoQjOyu2Y1NhHmEPPzzuhWgcza5M1Ji1I=
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
-github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
-github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
-github.com/huandu/xstrings v1.2.0 h1:yPeWdRnmynF7p+lLYz0H2tthW9lqhMJrQV/U7yy4wX0=
-github.com/huandu/xstrings v1.2.0/go.mod h1:DvyZB1rfVYsBIigL8HwpZgxHwXozlTgGqn63UyNX5k4=
-github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
-github.com/icrowley/fake v0.0.0-20180203215853-4178557ae428/go.mod h1:uhpZMVGznybq1itEKXj6RYw9I71qK4kH+OGMjRC4KEo=
-github.com/iij/doapi v0.0.0-20190504054126-0bbf12d6d7df/go.mod h1:QMZY7/J/KSQEhKWFeDesPjMj+wCHReeknARU3wqlyN4=
-github.com/imdario/mergo v0.3.7/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
-github.com/imdario/mergo v0.3.9 h1:UauaLniWCFHWd+Jp9oCEkTBj8VO/9DKg3PV3VCNMDIg=
-github.com/imdario/mergo v0.3.9/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
+github.com/hexops/gotextdiff v1.0.3 h1:gitA9+qJrrTCsiCl7+kh75nPqQt1cx4ZkudSTLoUqJM=
+github.com/hexops/gotextdiff v1.0.3/go.mod h1:pSWU5MAI3yDq+fZBTazCSJysOMbxWL1BSow5/V2vxeg=
+github.com/huandu/xstrings v1.5.0 h1:2ag3IFq9ZDANvthTwTiqSSZLjDc+BedvHPAp5tJy2TI=
+github.com/huandu/xstrings v1.5.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE=
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
+github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
+github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
+github.com/jackc/chunkreader v1.0.0/go.mod h1:RT6O25fNZIuasFJRyZ4R/Y2BbhasbmZXF9QQ7T3kePo=
+github.com/jackc/chunkreader/v2 v2.0.0/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk=
+github.com/jackc/chunkreader/v2 v2.0.1 h1:i+RDz65UE+mmpjTfyz0MoVTnzeYxroil2G82ki7MGG8=
+github.com/jackc/chunkreader/v2 v2.0.1/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk=
+github.com/jackc/pgconn v0.0.0-20190420214824-7e0022ef6ba3/go.mod h1:jkELnwuX+w9qN5YIfX0fl88Ehu4XC3keFuOJJk9pcnA=
+github.com/jackc/pgconn v0.0.0-20190824142844-760dd75542eb/go.mod h1:lLjNuW/+OfW9/pnVKPazfWOgNfH2aPem8YQ7ilXGvJE=
+github.com/jackc/pgconn v0.0.0-20190831204454-2fabfa3c18b7/go.mod h1:ZJKsE/KZfsUgOEh9hBm+xYTstcNHg7UPMVJqRfQxq4s=
+github.com/jackc/pgconn v1.8.0/go.mod h1:1C2Pb36bGIP9QHGBYCjnyhqu7Rv3sGshaQUvmfGIB/o=
+github.com/jackc/pgconn v1.9.0/go.mod h1:YctiPyvzfU11JFxoXokUOOKQXQmDMoJL9vJzHH8/2JY=
+github.com/jackc/pgconn v1.9.1-0.20210724152538-d89c8390a530/go.mod h1:4z2w8XhRbP1hYxkpTuBjTS3ne3J48K83+u0zoyvg2pI=
+github.com/jackc/pgconn v1.14.3 h1:bVoTr12EGANZz66nZPkMInAV/KHD2TxH9npjXXgiB3w=
+github.com/jackc/pgconn v1.14.3/go.mod h1:RZbme4uasqzybK2RK5c65VsHxoyaml09lx3tXOcO/VM=
+github.com/jackc/pgio v1.0.0 h1:g12B9UwVnzGhueNavwioyEEpAmqMe1E/BN9ES+8ovkE=
+github.com/jackc/pgio v1.0.0/go.mod h1:oP+2QK2wFfUWgr+gxjoBH9KGBb31Eio69xUb0w5bYf8=
+github.com/jackc/pgmock v0.0.0-20190831213851-13a1b77aafa2/go.mod h1:fGZlG77KXmcq05nJLRkk0+p82V8B8Dw8KN2/V9c/OAE=
+github.com/jackc/pgmock v0.0.0-20201204152224-4fe30f7445fd/go.mod h1:hrBW0Enj2AZTNpt/7Y5rr2xe/9Mn757Wtb2xeBzPv2c=
+github.com/jackc/pgmock v0.0.0-20210724152146-4ad1a8207f65 h1:DadwsjnMwFjfWc9y5Wi/+Zz7xoE5ALHsRQlOctkOiHc=
+github.com/jackc/pgmock v0.0.0-20210724152146-4ad1a8207f65/go.mod h1:5R2h2EEX+qri8jOWMbJCtaPWkrrNc7OHwsp2TCqp7ak=
+github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM=
+github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg=
+github.com/jackc/pgproto3 v1.1.0/go.mod h1:eR5FA3leWg7p9aeAqi37XOTgTIbkABlvcPB3E5rlc78=
+github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190420180111-c116219b62db/go.mod h1:bhq50y+xrl9n5mRYyCBFKkpRVTLYJVWeCc+mEAI3yXA=
+github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190609003834-432c2951c711/go.mod h1:uH0AWtUmuShn0bcesswc4aBTWGvw0cAxIJp+6OB//Wg=
+github.com/jackc/pgproto3/v2 v2.0.0-rc3/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM=
+github.com/jackc/pgproto3/v2 v2.0.0-rc3.0.20190831210041-4c03ce451f29/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM=
+github.com/jackc/pgproto3/v2 v2.0.6/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA=
+github.com/jackc/pgproto3/v2 v2.1.1/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA=
+github.com/jackc/pgproto3/v2 v2.3.3 h1:1HLSx5H+tXR9pW3in3zaztoEwQYRC9SQaYUHjTSUOag=
+github.com/jackc/pgproto3/v2 v2.3.3/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA=
+github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b/go.mod h1:vsD4gTJCa9TptPL8sPkXrLZ+hDuNrZCnj29CQpr4X1E=
+github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a h1:bbPeKD0xmW/Y25WS6cokEszi5g+S0QxI/d45PkRi7Nk=
+github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM=
+github.com/jackc/pgtype v0.0.0-20190421001408-4ed0de4755e0/go.mod h1:hdSHsc1V01CGwFsrv11mJRHWJ6aifDLfdV3aVjFF0zg=
+github.com/jackc/pgtype v0.0.0-20190824184912-ab885b375b90/go.mod h1:KcahbBH1nCMSo2DXpzsoWOAfFkdEtEJpPbVLq8eE+mc=
+github.com/jackc/pgtype v0.0.0-20190828014616-a8802b16cc59/go.mod h1:MWlu30kVJrUS8lot6TQqcg7mtthZ9T0EoIBFiJcmcyw=
+github.com/jackc/pgtype v1.8.1-0.20210724151600-32e20a603178/go.mod h1:C516IlIV9NKqfsMCXTdChteoXmwgUceqaLfjg2e3NlM=
+github.com/jackc/pgtype v1.14.0 h1:y+xUdabmyMkJLyApYuPj38mW+aAIqCe5uuBB51rH3Vw=
+github.com/jackc/pgtype v1.14.0/go.mod h1:LUMuVrfsFfdKGLw+AFFVv6KtHOFMwRgDDzBt76IqCA4=
+github.com/jackc/pgx/v4 v4.0.0-20190420224344-cc3461e65d96/go.mod h1:mdxmSJJuR08CZQyj1PVQBHy9XOp5p8/SHH6a0psbY9Y=
+github.com/jackc/pgx/v4 v4.0.0-20190421002000-1b8f0016e912/go.mod h1:no/Y67Jkk/9WuGR0JG/JseM9irFbnEPbuWV2EELPNuM=
+github.com/jackc/pgx/v4 v4.0.0-pre1.0.20190824185557-6972a5742186/go.mod h1:X+GQnOEnf1dqHGpw7JmHqHc1NxDoalibchSk9/RWuDc=
+github.com/jackc/pgx/v4 v4.12.1-0.20210724153913-640aa07df17c/go.mod h1:1QD0+tgSXP7iUjYm9C1NxKhny7lq6ee99u/z+IHFcgs=
+github.com/jackc/pgx/v4 v4.18.3 h1:dE2/TrEsGX3RBprb3qryqSV9Y60iZN1C6i8IrmW9/BA=
+github.com/jackc/pgx/v4 v4.18.3/go.mod h1:Ey4Oru5tH5sB6tV7hDmfWFahwF15Eb7DNXlRKx2CkVw=
+github.com/jackc/puddle v0.0.0-20190413234325-e4ced69a3a2b/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
+github.com/jackc/puddle v0.0.0-20190608224051-11cab39313c9/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
+github.com/jackc/puddle v1.1.3/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU=
github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
-github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af h1:pmfjZENx5imkbgOkpRUYLnmbU7UEFbjtDA2hxJ1ichM=
-github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
-github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
-github.com/json-iterator/go v1.1.5/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
-github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
-github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
-github.com/json-iterator/go v1.1.9 h1:9yzud/Ht36ygwatGx56VwCZtlI/2AD15T1X2sjSuGns=
-github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
-github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
-github.com/jsternberg/zap-logfmt v1.2.0 h1:1v+PK4/B48cy8cfQbxL4FmmNZrjnIMr2BsnyEmXqv2o=
-github.com/jsternberg/zap-logfmt v1.2.0/go.mod h1:kz+1CUmCutPWABnNkOu9hOHKdT2q3TDYCcsFy9hpqb0=
-github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
-github.com/juju/ansiterm v0.0.0-20180109212912-720a0952cc2a h1:FaWFmfWdAUKbSCtOU2QjDaorUexogfaMgbipgYATUMU=
-github.com/juju/ansiterm v0.0.0-20180109212912-720a0952cc2a/go.mod h1:UJSiEoRfvx3hP73CvoARgeLjaIOjybY9vj8PUPPFGeU=
-github.com/juju/ratelimit v1.0.1/go.mod h1:qapgC/Gy+xNh9UxzV13HGGl/6UXNN+ct+vwSgWNm/qk=
-github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
-github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8=
-github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
-github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
-github.com/kisielk/gotool v0.0.0-20161130080628-0de1eaf82fa3/go.mod h1:jxZFDH7ILpTPQTk+E2s+z4CUas9lVNjIuKR4c5/zKgM=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
-github.com/klauspost/compress v1.4.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
-github.com/klauspost/compress v1.4.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
-github.com/klauspost/compress v1.10.4 h1:jFzIFaf586tquEB5EhzQG0HwGNSlgAJpG53G6Ss11wc=
-github.com/klauspost/compress v1.10.4/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
-github.com/klauspost/cpuid v0.0.0-20180405133222-e7e905edc00e/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
-github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
-github.com/klauspost/cpuid v1.2.3 h1:CCtW0xUnWGVINKvE/WWOYKdsPV6mawAtvQuSl8guwQs=
-github.com/klauspost/cpuid v1.2.3/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
-github.com/kolo/xmlrpc v0.0.0-20190717152603-07c4ee3fd181/go.mod h1:o03bZfuBwAXHetKXuInt4S7omeXUu62/A845kiycsSQ=
+github.com/klauspost/compress v1.12.3/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg=
+github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc=
+github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0=
+github.com/klauspost/cpuid/v2 v2.2.8 h1:+StwCXwm9PdpiEkPyzBXIy+M9KUb4ODm0Zarf1kS5BM=
+github.com/klauspost/cpuid/v2 v2.2.8/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
-github.com/konsorten/go-windows-terminal-sequences v1.0.2 h1:DB17ag19krx9CFsz4o3enTrPXyIXCl+2iCXH/aMAp9s=
github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
-github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
-github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
+github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
+github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
+github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw=
-github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
-github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
-github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
-github.com/labbsr0x/bindman-dns-webhook v1.0.2/go.mod h1:p6b+VCXIR8NYKpDr8/dg1HKfQoRHCdcsROXKvmoehKA=
-github.com/labbsr0x/goh v1.0.1/go.mod h1:8K2UhVoaWXcCU7Lxoa2omWnC8gyW8px7/lmO61c027w=
-github.com/letsencrypt/pkcs11key v2.0.1-0.20170608213348-396559074696+incompatible/go.mod h1:iGYXKqDXt0cpBthCHdr9ZdsQwyGlYFh/+8xa4WzIQ34=
-github.com/lib/pq v1.1.1/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
+github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
+github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
+github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
+github.com/lib/pq v1.1.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
-github.com/linode/linodego v0.10.0/go.mod h1:cziNP7pbvE3mXIPneHj0oRY8L1WtGEIKlZ8LANE4eXA=
-github.com/liquidweb/liquidweb-go v1.6.0/go.mod h1:UDcVnAMDkZxpw4Y7NOHkqoeiGacVLEIG/i5J9cyixzQ=
-github.com/logrusorgru/aurora v0.0.0-20181002194514-a7b3b318ed4e/go.mod h1:7rIyQOR62GCctdiQpZ/zOJlFyk6y+94wXzv6RNZgaR4=
-github.com/lucas-clemente/quic-go v0.15.3 h1:i6n4Jr7673z9TlurAjc87+GlE/BN10++r9XZIPS9j6I=
-github.com/lucas-clemente/quic-go v0.15.3/go.mod h1:oj40DjNLuNugvtXWg4PwaYgv7tAbzAabrT57CC69EhI=
-github.com/lunixbochs/vtclean v0.0.0-20180621232353-2d01aacdc34a h1:weJVJJRzAJBFRlAiJQROKQs8oC9vOxvm4rZmBBk0ONw=
-github.com/lunixbochs/vtclean v0.0.0-20180621232353-2d01aacdc34a/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI=
-github.com/lunixbochs/vtclean v1.0.0 h1:xu2sLAri4lGiovBDQKxl5mrXyESr3gUr5m5SM5+LVb8=
+github.com/lib/pq v1.10.2/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
+github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw=
+github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
+github.com/libdns/libdns v0.2.2 h1:O6ws7bAfRPaBsgAYt8MDe2HcNBGC29hkZ9MX2eUSX3s=
+github.com/libdns/libdns v0.2.2/go.mod h1:4Bj9+5CQiNMVGf87wjX4CY3HQJypUHRuLvlsfsZqLWQ=
github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI=
-github.com/magiconair/properties v1.7.6/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
-github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
-github.com/mailgun/minheap v0.0.0-20170619185613-3dbe6c6bf55f/go.mod h1:V3EvCedtJTvUYzJF2GZMRB0JMlai+6cBu3VCTQz33GQ=
-github.com/mailgun/multibuf v0.0.0-20150714184110-565402cd71fb/go.mod h1:E0vRBBIQUHcRtmL/oR6w/jehh4FJqJFxe86gBnw9gXc=
-github.com/mailgun/timetools v0.0.0-20141028012446-7e6055773c51 h1:Kg/NPZLLC3aAFr1YToMs98dbCdhootQ1hZIvZU28hAQ=
-github.com/mailgun/timetools v0.0.0-20141028012446-7e6055773c51/go.mod h1:RYmqHbhWwIz3z9eVmQ2rx82rulEMG0t+Q1bzfc9DYN4=
-github.com/mailgun/ttlmap v0.0.0-20170619185759-c1c17f74874f/go.mod h1:8heskWJ5c0v5J9WH89ADhyal1DOZcayll8fSbhB+/9A=
github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
-github.com/manifoldco/promptui v0.3.1/go.mod h1:zoCNXiJnyM03LlBgTsWv8mq28s7aTC71UgKasqRJHww=
-github.com/manifoldco/promptui v0.7.0 h1:3l11YT8tm9MnwGFQ4kETwkzpAwY2Jt9lCrumCUW4+z4=
-github.com/manifoldco/promptui v0.7.0/go.mod h1:n4zTdgP0vr0S3w7/O/g98U+e0gwLScEXGwov2nIKuGQ=
-github.com/marten-seemann/qpack v0.1.0 h1:/0M7lkda/6mus9B8u34Asqm8ZhHAAt9Ho0vniNuVSVg=
-github.com/marten-seemann/qpack v0.1.0/go.mod h1:LFt1NU/Ptjip0C2CPkhimBz5CGE3WGDAUWqna+CNTrI=
-github.com/marten-seemann/qtls v0.9.0 h1:8Zguhc72eS+DH5EAb0BpAPIy3HDXYcihQi4xoDZOnjQ=
-github.com/marten-seemann/qtls v0.9.0/go.mod h1:T1MmAdDPyISzxlK6kjRr0pcZFBVd1OZbBb/j3cvzHhk=
-github.com/matoous/godox v0.0.0-20190911065817-5d6d842e92eb/go.mod h1:1BELzlh859Sh1c6+90blK8lbYy0kwQf1bYlBhBysy1s=
-github.com/mattn/go-colorable v0.0.9 h1:UVL0vNpWh04HeJXV0KLcaT7r06gOH2l4OW6ddYRUIY4=
-github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
-github.com/mattn/go-colorable v0.1.4 h1:snbPLB8fVfU9iwbbo30TPtbLRzwWu6aJS6Xh4eaaviA=
-github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
-github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
-github.com/mattn/go-isatty v0.0.4 h1:bnP0vzxcAdeI1zdubAl5PjU6zsERjGZb7raWodagDYs=
-github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
-github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
-github.com/mattn/go-isatty v0.0.11 h1:FxPOTFNqGkuDUGi3H/qkUbQO4ZiBa2brKq5r0l8TGeM=
-github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE=
-github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
-github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
-github.com/mattn/go-runewidth v0.0.7/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
-github.com/mattn/go-sqlite3 v1.10.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
-github.com/mattn/go-tty v0.0.0-20180219170247-931426f7535a/go.mod h1:XPvLUNfbS4fJH25nqRHfWLMa1ONC8Amw+mIA639KxkE=
-github.com/mattn/goveralls v0.0.2/go.mod h1:8d1ZMHsd7fW6IRPKQh46F2WRpyib5/X4FOpevwGNQEw=
+github.com/manifoldco/promptui v0.9.0 h1:3V4HzJk1TtXW1MTZMP7mdlwbBpIinw3HztaIlYthEiA=
+github.com/manifoldco/promptui v0.9.0/go.mod h1:ka04sppxSGFAtxX0qhlYQjISsg9mR4GWtQEhdbn6Pgg=
+github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ=
+github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
+github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
+github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
+github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
+github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
+github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
+github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
+github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
+github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
+github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d h1:5PJl274Y63IEHC+7izoQE9x6ikvDFZS2mDVS3drnohI=
+github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE=
+github.com/mholt/acmez/v2 v2.0.3 h1:CgDBlEwg3QBp6s45tPQmFIBrkRIkBT4rW4orMM6p4sw=
+github.com/mholt/acmez/v2 v2.0.3/go.mod h1:pQ1ysaDeGrIMvJ9dfJMk5kJNkn7L2sb3UhyrX6Q91cw=
github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4=
-github.com/miekg/dns v1.1.15 h1:CSSIDtllwGLMoA6zjdKnaE6Tx6eVUxQ29LUgGetiDCI=
-github.com/miekg/dns v1.1.15/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
-github.com/miekg/dns v1.1.27/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM=
-github.com/miekg/dns v1.1.29 h1:xHBEhR+t5RzcFJjBLJlax2daXOrTYtr9z4WdKEfWFzg=
-github.com/miekg/dns v1.1.29/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM=
-github.com/miekg/pkcs11 v1.0.2/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs=
-github.com/mitchellh/copystructure v1.0.0 h1:Laisrj+bAB6b/yJwB5Bt3ITZhGJdqmxquMKeZ+mmkFQ=
-github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw=
-github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
+github.com/miekg/dns v1.1.62 h1:cN8OuEF1/x5Rq6Np+h1epln8OiyPWV+lROx9LxcGgIQ=
+github.com/miekg/dns v1.1.62/go.mod h1:mvDlcItzm+br7MToIKqkglaGhlFMHJ9DTNNWONWXbNQ=
+github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw=
+github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s=
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
-github.com/mitchellh/go-ps v0.0.0-20170309133038-4fdf99ab2936/go.mod h1:r1VsdOzOPt1ZSrGZWFoNhsAedKnEd6r9Np1+5blZCWk=
-github.com/mitchellh/go-ps v0.0.0-20190716172923-621e5597135b/go.mod h1:r1VsdOzOPt1ZSrGZWFoNhsAedKnEd6r9Np1+5blZCWk=
-github.com/mitchellh/go-vnc v0.0.0-20150629162542-723ed9867aed/go.mod h1:3rdaFaCv4AyBgu5ALFM0+tSuHrBh6v692nyQe3ikrq0=
-github.com/mitchellh/mapstructure v0.0.0-20180220230111-00c29f56e238/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
+github.com/mitchellh/go-ps v1.0.0 h1:i6ampVEEF4wQFF+bkYfwYgY+F/uYJDktmvLPf7qIgjc=
+github.com/mitchellh/go-ps v1.0.0/go.mod h1:J4lOc8z8yJs6vUwklHw2XEIiT4z4C40KtWVN3nvg8Pg=
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
-github.com/mitchellh/reflectwalk v1.0.0 h1:9D+8oIskB4VJBN5SFlmc27fSlIBZaov1Wpk/IfikLNY=
-github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw=
-github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
-github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
+github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ=
+github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
-github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
-github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI=
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
-github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8=
-github.com/mozilla/tls-observatory v0.0.0-20180409132520-8791a200eb40/go.mod h1:SrKMQvPiws7F7iqYp8/TX+IhxCYhzr6N/1yb8cwHsGk=
-github.com/mozilla/tls-observatory v0.0.0-20190404164649-a3c1b6cfecfd/go.mod h1:SrKMQvPiws7F7iqYp8/TX+IhxCYhzr6N/1yb8cwHsGk=
-github.com/mreiferson/go-httpclient v0.0.0-20160630210159-31f0106b4474/go.mod h1:OQA4XLvDbMgS8P0CevmM4m9Q3Jq4phKUzcocxuGJ5m8=
-github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
-github.com/namedotcom/go v0.0.0-20180403034216-08470befbe04/go.mod h1:5sN+Lt1CaY4wsPvgQH/jsuJi4XO2ssZbdsIizr4CVC8=
-github.com/naoina/go-stringutil v0.1.0 h1:rCUeRUHjBjGTSHl0VC00jUPLz8/F9dDzYI70Hzifhks=
-github.com/naoina/go-stringutil v0.1.0/go.mod h1:XJ2SJL9jCtBh+P9q5btrd/Ylo8XwT/h1USek5+NqSA0=
-github.com/naoina/toml v0.1.1 h1:PT/lllxVVN0gzzSqSlHEmP8MJB4MY2U7STGxiouV4X8=
-github.com/naoina/toml v0.1.1/go.mod h1:NBIhNtsFMo3G2szEBne+bO4gS192HuIYRqfvOWb4i1E=
-github.com/nbio/st v0.0.0-20140626010706-e9e8d9816f32/go.mod h1:9wM+0iRr9ahx58uYLpLIr5fm8diHn0JbqRycJi6w0Ms=
-github.com/nbutton23/zxcvbn-go v0.0.0-20160627004424-a22cb81b2ecd/go.mod h1:o96djdrsSGy3AWPyBgZMAGfxZNfgntdJG+11KU4QvbU=
-github.com/nbutton23/zxcvbn-go v0.0.0-20171102151520-eafdab6b0663/go.mod h1:o96djdrsSGy3AWPyBgZMAGfxZNfgntdJG+11KU4QvbU=
-github.com/nbutton23/zxcvbn-go v0.0.0-20180912185939-ae427f1e4c1d/go.mod h1:o96djdrsSGy3AWPyBgZMAGfxZNfgntdJG+11KU4QvbU=
github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo=
github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM=
-github.com/newrelic/go-agent v2.15.0+incompatible/go.mod h1:a8Fv1b/fYhFSReoTU6HDkTYIMZeSVNffmoS726Y0LzQ=
-github.com/ngdinhtoan/glide-cleanup v0.2.0/go.mod h1:UQzsmiDOb8YV3nOsCxK/c9zPpCZVNoHScRE3EO9pVMM=
-github.com/nkovacs/streamquote v0.0.0-20170412213628-49af9bddb229/go.mod h1:0aYXnNPJ8l7uZxf45rWW1a/uME32OF0rhiYGNQ2oF2E=
-github.com/nrdcg/auroradns v1.0.0/go.mod h1:6JPXKzIRzZzMqtTDgueIhTi6rFf1QvYE/HzqidhOhjw=
-github.com/nrdcg/auroradns v1.0.1/go.mod h1:y4pc0i9QXYlFCWrhWrUSIETnZgrf4KuwjDIWmmXo3JI=
-github.com/nrdcg/dnspod-go v0.4.0/go.mod h1:vZSoFSFeQVm2gWLMkyX61LZ8HI3BaqtHZWgPTGKr6KQ=
-github.com/nrdcg/goinwx v0.6.1/go.mod h1:XPiut7enlbEdntAqalBIqcYcTEVhpv/dKWgDCX2SwKQ=
-github.com/nrdcg/namesilo v0.2.1/go.mod h1:lwMvfQTyYq+BbjJd30ylEG4GPSS6PII0Tia4rRpRiyw=
-github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
-github.com/olekukonko/tablewriter v0.0.1/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo=
-github.com/olekukonko/tablewriter v0.0.4/go.mod h1:zq6QwlOf5SlnkVbMSr5EoBv3636FWnp+qbPhuoO21uA=
-github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
-github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
-github.com/onsi/ginkgo v1.7.0 h1:WSHQ+IS43OoUrWtD1/bbclrwK8TTH5hzp+umCiuxHgs=
-github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
-github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
-github.com/onsi/ginkgo v1.11.0 h1:JAKSXpt1YjtLA7YpPiqO9ss6sNXEsPfSGdwN0UHqzrw=
-github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
-github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
-github.com/onsi/gomega v1.4.2/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
-github.com/onsi/gomega v1.4.3 h1:RE1xgDvH7imwFD45h+u2SgIfERHlS2yNG4DObb5BSKU=
-github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
-github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
-github.com/onsi/gomega v1.8.1 h1:C5Dqfs/LeauYDX0jJXIe2SWmwCbGzx9yF8C8xy3Lh34=
-github.com/onsi/gomega v1.8.1/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA=
-github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk=
-github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
+github.com/onsi/ginkgo/v2 v2.13.2 h1:Bi2gGVkfn6gQcjNjZJVO8Gf0FHzMPf2phUei9tejVMs=
+github.com/onsi/ginkgo/v2 v2.13.2/go.mod h1:XStQ8QcGwLyF4HdfcZB8SFOS/MWCgDuXMSBe6zrvLgM=
+github.com/onsi/gomega v1.29.0 h1:KIA/t2t5UBzoirT4H9tsML45GEbo3ouUnBHsCfD2tVg=
+github.com/onsi/gomega v1.29.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ=
github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8=
-github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw=
-github.com/oracle/oci-go-sdk v7.0.0+incompatible/go.mod h1:VQb79nF8Z2cwLkLS35ukwStZIg5F66tcBccjip/j888=
-github.com/ovh/go-ovh v0.0.0-20181109152953-ba5adb4cf014/go.mod h1:joRatxRJaZBsY3JAOEMcoOp05CnZzsx4scTxi95DHyQ=
-github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k=
-github.com/pelletier/go-toml v1.1.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
-github.com/pelletier/go-toml v1.6.0/go.mod h1:5N711Q9dKgbdkxHL+MEfF31hpT7l0S0s/t2kKREewys=
-github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
-github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
-github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
+github.com/peterbourgon/diskv/v3 v3.0.1 h1:x06SQA46+PKIUftmEujdwSEpIx8kR+M9eLYsUxeYveU=
+github.com/peterbourgon/diskv/v3 v3.0.1/go.mod h1:kJ5Ny7vLdARGU3WUuy6uzO6T0nb/2gWcT1JiBvRmb5o=
+github.com/pires/go-proxyproto v0.7.1-0.20240628150027-b718e7ce4964 h1:ct/vxNBgHpASQ4sT8NaBX9LtsEtluZqaUJydLG50U3E=
+github.com/pires/go-proxyproto v0.7.1-0.20240628150027-b718e7ce4964/go.mod h1:iknsfgnH8EkjrMeMyvfKByp9TiBZCKZM0jx2xmKqnVY=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
+github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
-github.com/pquerna/otp v1.0.0/go.mod h1:Zad1CMQfSQZI5KLpahDiSUX4tMMREnXw98IvL1nhgMk=
+github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g=
+github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U=
github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
-github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
-github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs=
-github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
-github.com/prometheus/client_golang v0.9.4/go.mod h1:oCXIBxdI62A4cR6aTRJCgetEjecSIYzOEaeAn4iYEpM=
-github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
-github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g=
-github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og=
+github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE=
+github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
-github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
-github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
-github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
-github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
+github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw=
+github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI=
github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
-github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
-github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
-github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
-github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
-github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc=
-github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA=
+github.com/prometheus/common v0.48.0 h1:QO8U2CdOzSn1BBsmXJXduaaW+dY/5QLjfB8svtSzKKE=
+github.com/prometheus/common v0.48.0/go.mod h1:0/KsvlIEfPQCQ5I2iNSAWKPZziNCvRs5EC6ILDTlAPc=
github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
-github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
-github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
-github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
-github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
-github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ=
-github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
-github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
-github.com/quasilyte/go-consistent v0.0.0-20190521200055-c6f3937de18c/go.mod h1:5STLWrekHfjyYwxBRVRXNOSewLJ3PWfDJd1VyTS21fI=
-github.com/rainycape/memcache v0.0.0-20150622160815-1031fa0ce2f2/go.mod h1:7tZKcyumwBO6qip7RNQ5r77yrssm9bfCowcLEBcU5IA=
-github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
-github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
-github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
-github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
+github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo=
+github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo=
+github.com/quic-go/qpack v0.5.1 h1:giqksBPnT/HDtZ6VhtFKgoLOWmlyo9Ei6u9PqzIMbhI=
+github.com/quic-go/qpack v0.5.1/go.mod h1:+PC4XFrEskIVkcLzpEkbLqq1uCoxPhQuvK5rH1ZgaEg=
+github.com/quic-go/quic-go v0.48.2 h1:wsKXZPeGWpMpCGSWqOcqpW2wZYic/8T3aqiOID0/KWE=
+github.com/quic-go/quic-go v0.48.2/go.mod h1:yBgs3rWBOADpga7F+jJsb6Ybg1LSYiQvwWlLX+/6HMs=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
-github.com/rogpeppe/go-internal v1.3.2/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
-github.com/rs/cors v1.6.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
+github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII=
+github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o=
github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ=
-github.com/russross/blackfriday v1.5.2 h1:HyvC0ARfnZBqnXwABFeSZHpKvJHJJfPz81GNueLj0oo=
+github.com/rs/xid v1.5.0 h1:mKX4bl4iPYJtEIxp6CYiUuLQ/8DYMoz0PUdtGgMFRVc=
+github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg=
+github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU=
+github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc=
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
-github.com/russross/blackfriday/v2 v2.0.1 h1:lPqVAte+HuHNfhJ/0LC98ESWRz8afy9tM/0RK8m9o+Q=
-github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
-github.com/ryanuber/go-glob v0.0.0-20170128012129-256dc444b735/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc=
-github.com/sacloud/libsacloud v1.26.1/go.mod h1:79ZwATmHLIFZIMd7sxA3LwzVy/B77uj3LDoToVTxDoQ=
-github.com/samfoo/ansi v0.0.0-20160124022901-b6bd2ded7189 h1:CmSpbxmewNQbzqztaY0bke1qzHhyNyC29wYgh17Gxfo=
-github.com/samfoo/ansi v0.0.0-20160124022901-b6bd2ded7189/go.mod h1:UUwuHEJ9zkkPDxspIHOa59PUeSkGFljESGzbxntLmIg=
+github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
+github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
-github.com/securego/gosec v0.0.0-20191002120514-e680875ea14d/go.mod h1:w5+eXa0mYznDkHaMCXA4XYffjlH+cy1oyKbfzJXa2Do=
-github.com/securego/gosec v0.0.0-20200106085552-9cb83e10afad/go.mod h1:7fJLcv5NlMd4t9waQEDLgpZeE3nv4D5DMz5JuZZGufg=
-github.com/sergi/go-diff v1.0.0 h1:Kpca3qRNrduNnOQeazBd0ysaKrUJiIuISHxogkT9RPQ=
+github.com/schollz/jsonstore v1.1.0 h1:WZBDjgezFS34CHI+myb4s8GGpir3UMpy7vWoCeO0n6E=
+github.com/schollz/jsonstore v1.1.0/go.mod h1:15c6+9guw8vDRyozGjN3FoILt0wpruJk9Pi66vjaZfg=
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
-github.com/shirou/gopsutil v0.0.0-20180427012116-c95755e4bcd7/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA=
-github.com/shirou/gopsutil v0.0.0-20190901111213-e4ec7b275ada/go.mod h1:WWnYX4lzhCH5h/3YBfyVA3VbLYjlMZZAQcW9ojMexNc=
-github.com/shirou/w32 v0.0.0-20160930032740-bb4de0191aa4/go.mod h1:qsXQc7+bwAM3Q1u/4XEfrquwF8Lw7D7y5cD8CuHnfIc=
+github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4=
+github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o=
+github.com/shopspring/decimal v1.4.0 h1:bxl37RwXBklmTi0C79JfXCEBD1cqqHt0bbgBAGFp81k=
+github.com/shopspring/decimal v1.4.0/go.mod h1:gawqmDU56v4yIKSwfBSFip1HdCCXN8/+DMd9qYNcwME=
github.com/shurcooL/component v0.0.0-20170202220835-f88ec8f54cc4/go.mod h1:XhFIlyj5a1fBNx5aJTbKoIq0mNaPvOagO+HjB3EtxrY=
github.com/shurcooL/events v0.0.0-20181021180414-410e4ca65f48/go.mod h1:5u70Mqkb5O5cxEA8nxTsgrgLehJeAw6Oc4Ab1c/P1HM=
github.com/shurcooL/github_flavored_markdown v0.0.0-20181002035957-2122de532470/go.mod h1:2dOwnU2uBioM+SGy2aZoq1f/Sd1l9OkAeAUvjSyvgU0=
@@ -628,526 +437,343 @@ github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5I
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
github.com/shurcooL/users v0.0.0-20180125191416-49c67e49c537/go.mod h1:QJTqeLYEDaXHZDBsXlPCDqdhQuJkuw4NOtaxYe3xii4=
github.com/shurcooL/webdavfs v0.0.0-20170829043945-18c3829fa133/go.mod h1:hKmq5kWdCj2z2KEozexVbfEZIWiTjhE0+UjmZgPqehw=
-github.com/sirupsen/logrus v1.0.5/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc=
-github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
-github.com/sirupsen/logrus v1.3.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
-github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
-github.com/skratchdot/open-golang v0.0.0-20160302144031-75fb7ed4208c/go.mod h1:sUM3LWHvSMaG192sy56D9F7CNvL7jUJVXoqM1QKLnog=
-github.com/smallstep/assert v0.0.0-20180720014142-de77670473b5/go.mod h1:TC9A4+RjIOS+HyTH7wG17/gSqVv95uDw2J64dQZx7RE=
-github.com/smallstep/assert v0.0.0-20200103212524-b99dc1097b15 h1:kSImCuenAkXtCaBeQ1UhmzzJGRhSm8sVH7I3sHE2Qdg=
-github.com/smallstep/assert v0.0.0-20200103212524-b99dc1097b15/go.mod h1:MyOHs9Po2fbM1LHej6sBUT8ozbxmMOFG+E+rx/GSGuc=
-github.com/smallstep/certificates v0.14.2 h1:EEGmyHNawWIw+QFMztFxegSa6NMnInhC/dRfpz38n6o=
-github.com/smallstep/certificates v0.14.2/go.mod h1:eleWnbKTXDdV9GxWNOtbdjBiitdK5SuO4FCXOvcdLEY=
-github.com/smallstep/certinfo v1.2.0/go.mod h1:1gQJekdPwPvUwFWGTi7bZELmQT09cxC9wJ0VBkBNiwU=
-github.com/smallstep/cli v0.14.2 h1:0Z1MtcgJfVS9RstNokWSNqE20xPwdiEhZgNuZxYRWRI=
-github.com/smallstep/cli v0.14.2/go.mod h1:JOTzEzQ4/l863KUqs9qlAqPagWPOqu6lc3C59S1nYzU=
-github.com/smallstep/nosql v0.2.0 h1:IscXK9m9hRyl5GoYgn+Iml//5Bpad3LyIj6R0dZosKM=
-github.com/smallstep/nosql v0.2.0/go.mod h1:qyxCqeyGwkuM6bfJSY3sg+aiXEiD0GbQOPzIF8/ZD8Q=
-github.com/smallstep/truststore v0.9.3/go.mod h1:PRSkpRIhAYBK/KLWkHNgRdYgzWMEy45bN7PSJCfKKGE=
-github.com/smallstep/truststore v0.9.5 h1:KQ6bFXUadu3PG57sFSIBsu2pb/35NqO+MyS2Pvi62bA=
-github.com/smallstep/truststore v0.9.5/go.mod h1:HwHKRcBi0RUxxw1LYDpTRhYC4jZUuxPpkHdVonlkoDM=
-github.com/smallstep/zcrypto v0.0.0-20200203191936-fbc32cf76bce/go.mod h1:+F24VU3UCxfVFvvqgm5jNUFQOm/L6ed13ImwWGFgg/g=
-github.com/smallstep/zlint v0.0.0-20180727184541-d84eaafe274f/go.mod h1:GeHHT7sJDI9ti3oEaFnvx1F4N8n3ZSw2YM1+sbEoxc4=
-github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
-github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
-github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
-github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
+github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
+github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
+github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
+github.com/slackhq/nebula v1.6.1 h1:/OCTR3abj0Sbf2nGoLUrdDXImrCv0ZVFpVPP5qa0DsM=
+github.com/slackhq/nebula v1.6.1/go.mod h1:UmkqnXe4O53QwToSl/gG7sM4BroQwAB7dd4hUaT6MlI=
+github.com/smallstep/assert v0.0.0-20200723003110-82e2b9b3b262 h1:unQFBIznI+VYD1/1fApl1A+9VcBk+9dcqGfnePY87LY=
+github.com/smallstep/assert v0.0.0-20200723003110-82e2b9b3b262/go.mod h1:MyOHs9Po2fbM1LHej6sBUT8ozbxmMOFG+E+rx/GSGuc=
+github.com/smallstep/certificates v0.26.1 h1:FIUliEBcExSfJJDhRFA/s8aZgMIFuorexnRSKQd884o=
+github.com/smallstep/certificates v0.26.1/go.mod h1:OQMrW39IrGKDViKSHrKcgSQArMZ8c7EcjhYKK7mYqis=
+github.com/smallstep/go-attestation v0.4.4-0.20240109183208-413678f90935 h1:kjYvkvS/Wdy0PVRDUAA0gGJIVSEZYhiAJtfwYgOYoGA=
+github.com/smallstep/go-attestation v0.4.4-0.20240109183208-413678f90935/go.mod h1:vNAduivU014fubg6ewygkAvQC0IQVXqdc8vaGl/0er4=
+github.com/smallstep/nosql v0.6.1 h1:X8IBZFTRIp1gmuf23ne/jlD/BWKJtDQbtatxEn7Et1Y=
+github.com/smallstep/nosql v0.6.1/go.mod h1:vrN+CftYYNnDM+DQqd863ATynvYFm/6FuY9D4TeAm2Y=
+github.com/smallstep/pkcs7 v0.0.0-20231024181729-3b98ecc1ca81 h1:B6cED3iLJTgxpdh4tuqByDjRRKan2EvtnOfHr2zHJVg=
+github.com/smallstep/pkcs7 v0.0.0-20231024181729-3b98ecc1ca81/go.mod h1:SoUAr/4M46rZ3WaLstHxGhLEgoYIDRqxQEXLOmOEB0Y=
+github.com/smallstep/scep v0.0.0-20231024192529-aee96d7ad34d h1:06LUHn4Ia2X6syjIaCMNaXXDNdU+1N/oOHynJbWgpXw=
+github.com/smallstep/scep v0.0.0-20231024192529-aee96d7ad34d/go.mod h1:4d0ub42ut1mMtvGyMensjuHYEUpRrASvkzLEJvoRQcU=
+github.com/smallstep/truststore v0.13.0 h1:90if9htAOblavbMeWlqNLnO9bsjjgVv2hQeQJCi/py4=
+github.com/smallstep/truststore v0.13.0/go.mod h1:3tmMp2aLKZ/OA/jnFUB0cYPcho402UG2knuJoPh4j7A=
github.com/sourcegraph/annotate v0.0.0-20160123013949-f4cad6c6324d/go.mod h1:UdhH50NIW0fCiwBSr0co2m7BnFLdv4fQTgdqdJTHFeE=
-github.com/sourcegraph/go-diff v0.5.1/go.mod h1:j2dHj3m8aZgQO8lMTcTnBcXkRRRqi34cd2MNlA9u1mE=
github.com/sourcegraph/syntaxhighlight v0.0.0-20170531221838-bd320f5d308e/go.mod h1:HuIsMU8RRBOtsCgI77wP899iHVBQpCmg4ErYMZB+2IA=
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
-github.com/spf13/afero v1.1.0/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
+github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI=
+github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
-github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
-github.com/spf13/cast v1.2.0/go.mod h1:r2rcYCSwa1IExKTDiTfzaxqT2FNHs8hODu4LnUfgKEg=
-github.com/spf13/cast v1.3.0 h1:oget//CVOEoFewqQxwr0Ej5yjygnqGkvggSE/gB35Q8=
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
-github.com/spf13/cast v1.3.1 h1:nFm6S0SMdyzrzcmThSipiEubIDy8WEXKNZ0UOgiRpng=
-github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
-github.com/spf13/cobra v0.0.2/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
+github.com/spf13/cast v1.7.0 h1:ntdiHjuueXFgm5nzDRdOS4yfT43P5Fnud6DH50rz/7w=
+github.com/spf13/cast v1.7.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo=
github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU=
-github.com/spf13/jwalterweatherman v0.0.0-20180109140146-7c0cea34c8ec/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
+github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM=
+github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y=
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
-github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo=
-github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
+github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
-github.com/spf13/viper v1.0.2/go.mod h1:A8kyI5cUJhb8N+3pkfONlcEcZbueH6nhAm0Fq7SrnBM=
github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s=
-github.com/spf13/viper v1.6.1/go.mod h1:t3iDnF5Jlj76alVNuyFBk5oUMCvsrkbvZK0WQdfDi5k=
+github.com/stoewer/go-strcase v1.2.0 h1:Z2iHWqGXH00XYgqDmNgQbIBxf3wrNq0F3feEy0ainaU=
+github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
+github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
+github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
-github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
-github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4=
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
-github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
+github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
+github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
+github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
+github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
+github.com/tailscale/tscert v0.0.0-20240608151842-d3f834017e53 h1:uxMgm0C+EjytfAqyfBG55ZONKQ7mvd7x4YYCWsf8QHQ=
+github.com/tailscale/tscert v0.0.0-20240608151842-d3f834017e53/go.mod h1:kNGUQ3VESx3VZwRwA9MSCUegIl6+saPL8Noq82ozCaU=
github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA=
-github.com/timakin/bodyclose v0.0.0-20190721030226-87058b9bfcec/go.mod h1:Qimiffbc6q9tBWlVV6x0P9sat/ao1xEkREYPPj9hphk=
-github.com/timakin/bodyclose v0.0.0-20190930140734-f7f2e9bca95e/go.mod h1:Qimiffbc6q9tBWlVV6x0P9sat/ao1xEkREYPPj9hphk=
-github.com/timewasted/linode v0.0.0-20160829202747-37e84520dcf7/go.mod h1:imsgLplxEC/etjIhdr3dNzV3JeT27LbVu5pYWm0JCBY=
-github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
-github.com/tomasen/realip v0.0.0-20180522021738-f0c99a92ddce/go.mod h1:o8v6yHRoik09Xen7gje4m9ERNah1d1PPsVq1VEx9vE4=
-github.com/tommy-muehle/go-mnd v1.1.1/go.mod h1:dSUh0FtTP8VhvkL1S+gUR1OKd9ZnSaozuI6r3m6wOig=
-github.com/transip/gotransip v0.0.0-20190812104329-6d8d9179b66f/go.mod h1:i0f4R4o2HM0m3DZYQWsj6/MEowD57VzoH0v3d7igeFY=
-github.com/uber-go/atomic v1.3.2/go.mod h1:/Ct5t2lcmbJ4OSe/waGBoaVvVqtO0bmtfVNex1PFV8g=
-github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc=
github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
-github.com/ultraware/funlen v0.0.1/go.mod h1:Dp4UiAus7Wdb9KUZsYWZEWiRzGuM2kXM1lPbfaF6xhA=
-github.com/ultraware/funlen v0.0.2/go.mod h1:Dp4UiAus7Wdb9KUZsYWZEWiRzGuM2kXM1lPbfaF6xhA=
-github.com/ultraware/whitespace v0.0.4/go.mod h1:aVMh/gQve5Maj9hQ/hg+F75lr/X5A89uZnzAmWSineA=
-github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
-github.com/urfave/cli v1.22.1 h1:+mkCCcOFKPnCmVYVcURKps1Xe+3zP90gSYGNfRkjoIY=
-github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
-github.com/urfave/cli v1.22.2 h1:gsqYFH8bb9ekPA12kRo0hfjngWQjkJPlN9R0N78BoUo=
-github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
-github.com/uudashr/gocognit v1.0.1/go.mod h1:j44Ayx2KW4+oB6SWMv8KsmHzZrOInQav7D3cQMJ5JUM=
-github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
-github.com/valyala/fasthttp v1.2.0/go.mod h1:4vX61m6KN+xDduDNwXrhIAVZaZaZiQ1luJk8LWSxF3s=
-github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8=
-github.com/valyala/quicktemplate v1.1.1/go.mod h1:EH+4AkTd43SvgIbQHYu59/cJyxDoOVRUAfrukLPuGJ4=
-github.com/valyala/quicktemplate v1.2.0/go.mod h1:EH+4AkTd43SvgIbQHYu59/cJyxDoOVRUAfrukLPuGJ4=
-github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV9WzVtRmSR+PDvWpU/qWl4Wa5LApYYX4ZtKbio=
+github.com/urfave/cli v1.22.14 h1:ebbhrRiGK2i4naQJr+1Xj92HXZCrK7MsyTS/ob3HnAk=
+github.com/urfave/cli v1.22.14/go.mod h1:X0eDS6pD6Exaclxm99NJ3FiCDRED7vIHpx2mDOHLvkA=
github.com/viant/assertly v0.4.8/go.mod h1:aGifi++jvCrUaklKEKT0BU95igDNaqkvz+49uaYMPRU=
github.com/viant/toolbox v0.24.0/go.mod h1:OxMCG57V0PXuIP2HNQrtJf2CjqdmbrOx5EkMILuUhzM=
-github.com/vulcand/oxy v1.1.0 h1:DbBijGo1+6cFqR9jarkMxasdj0lgWwrrFtue6ijek4Q=
-github.com/vulcand/oxy v1.1.0/go.mod h1:ADiMYHi8gkGl2987yQIzDRoXZilANF4WtKaQ92OppKY=
-github.com/vulcand/predicate v1.1.0/go.mod h1:mlccC5IRBoc2cIFmCB8ZM62I3VDb6p2GXESMHa3CnZg=
-github.com/vultr/govultr v0.1.4/go.mod h1:9H008Uxr/C4vFNGLqKx232C206GL0PBHzOP0809bGNA=
-github.com/weppos/publicsuffix-go v0.4.0/go.mod h1:z3LCPQ38eedDQSwmsSRW4Y7t2L8Ln16JPQ02lHAdn5k=
-github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
-github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ=
-github.com/xeipuuv/gojsonschema v1.1.0/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs=
-github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y=
-github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
+github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
+github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
-github.com/yuin/goldmark v1.1.22/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
-github.com/yuin/goldmark v1.1.28 h1:3Ksz4BbKZVlaGbkXzHxoazZzASQKsfUuOZPr5CNxnC4=
-github.com/yuin/goldmark v1.1.28/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
-github.com/yuin/goldmark-highlighting v0.0.0-20200307114337-60d527fdb691 h1:VWSxtAiQNh3zgHJpdpkpVYjTPqRE3P6UZCOPa1nRDio=
-github.com/yuin/goldmark-highlighting v0.0.0-20200307114337-60d527fdb691/go.mod h1:YLF3kDffRfUH/bTxOxHhV6lxwIB3Vfj91rEwNMS9MXo=
-github.com/zmap/rc2 v0.0.0-20131011165748-24b9757f5521/go.mod h1:3YZ9o3WnatTIZhuOtot4IcUfzoKVjUHqu6WALIyI0nE=
-github.com/zmap/rc2 v0.0.0-20190804163417-abaa70531248/go.mod h1:3YZ9o3WnatTIZhuOtot4IcUfzoKVjUHqu6WALIyI0nE=
-github.com/zmap/zcertificate v0.0.0-20180516150559-0e3d58b1bac4/go.mod h1:5iU54tB79AMBcySS0R2XIyZBAVmeHranShAFELYx7is=
-github.com/zmap/zcertificate v0.0.0-20190521191901-30e388164f71/go.mod h1:gIZi1KPgkZNUQzPZXsZrNnUnxy05nTc0+tmlqvIkhRw=
-github.com/zmap/zcrypto v0.0.0-20190329181646-dff83107394d/go.mod h1:ix3q2kpLy0ibAuFXlr7qOhPKwFRRSjuynGuTR8EUPCk=
-github.com/zmap/zlint v0.0.0-20190516161541-9047d02cf65a/go.mod h1:xwLbce0UzBXp44sIAL1cii+hoK8j4AxRKlymZA2AIcY=
-go.etcd.io/bbolt v1.3.2 h1:Z/90sZLPOeCy2PwprqkFa25PdkusRzaj9P8zm/KNyvk=
-go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
-go.etcd.io/etcd v3.3.13+incompatible/go.mod h1:yaeTdrJi5lOmYerz05bd8+V7KubZs8YSFZfzsF9A6aI=
-go.etcd.io/etcd v3.3.18+incompatible/go.mod h1:yaeTdrJi5lOmYerz05bd8+V7KubZs8YSFZfzsF9A6aI=
+github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
+github.com/yuin/goldmark v1.4.15/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
+github.com/yuin/goldmark v1.7.8 h1:iERMLn0/QJeHFhxSt3p6PeN9mGnvIKSpG9YYorDMnic=
+github.com/yuin/goldmark v1.7.8/go.mod h1:uzxRWxtg69N339t3louHJ7+O03ezfj6PlliRlaOzY1E=
+github.com/yuin/goldmark-highlighting/v2 v2.0.0-20230729083705-37449abec8cc h1:+IAOyRda+RLrxa1WC7umKOZRsGq4QrFFMYApOeHzQwQ=
+github.com/yuin/goldmark-highlighting/v2 v2.0.0-20230729083705-37449abec8cc/go.mod h1:ovIvrum6DQJA4QsJSovrkC4saKHQVs7TvcaeO8AIl5I=
+github.com/zeebo/assert v1.1.0 h1:hU1L1vLTHsnO8x8c9KAR5GmM5QscxHg5RNU5z5qbUWY=
+github.com/zeebo/assert v1.1.0/go.mod h1:Pq9JiuJQpG8JLJdtkwrJESF0Foym2/D9XMU5ciN/wJ0=
+github.com/zeebo/blake3 v0.2.4 h1:KYQPkhpRtcqh0ssGYcKLG1JYvddkEA8QwCM/yBqhaZI=
+github.com/zeebo/blake3 v0.2.4/go.mod h1:7eeQ6d2iXWRGF6npfaxl2CU+xy2Fjo2gxeyZGCRUjcE=
+github.com/zeebo/pcg v1.0.1 h1:lyqfGeWiv4ahac6ttHs+I5hwtH/+1mrhlCtVNQM2kHo=
+github.com/zeebo/pcg v1.0.1/go.mod h1:09F0S9iiKrwn9rlI5yjLkmrug154/YRW6KnnXVDM/l4=
+github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q=
+go.etcd.io/bbolt v1.3.9 h1:8x7aARPEXiXbHmtUwAIv7eV2fQFHrLLavdiJ3uzJXoI=
+go.etcd.io/bbolt v1.3.9/go.mod h1:zaO32+Ti0PK1ivdPtgMESzuzL2VPoIG1PCQNvOdo/dE=
go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA=
-go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
-go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
-go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
-go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
-go.opencensus.io v0.22.2 h1:75k/FF0Q2YM8QYo07VPddOLBslDt1MZOdEslOHvmzAs=
-go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
-go.opencensus.io v0.22.3 h1:8sGtKOrtQqkN1bp2AtX+misvLIlOmsEsNd+9NIcPEm8=
-go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
-go.uber.org/atomic v1.3.2 h1:2Oa65PReHzfn29GpvgsYwloV9AVFHPDk8tYxt2c2tr4=
+go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
+go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
+go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0 h1:4Pp6oUg3+e/6M4C0A/3kJ2VYa++dsWVTtGgLVj5xtHg=
+go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0/go.mod h1:Mjt1i1INqiaoZOMGR1RIUJN+i3ChKoFRqzrRQhlkbs0=
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.56.0 h1:UP6IpuHFkUgOQL9FFQFrZ+5LiwhhYRbi7VZSIx6Nj5s=
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.56.0/go.mod h1:qxuZLtbq5QDtdeSHsS7bcf6EH6uO6jUAgk764zd3rhM=
+go.opentelemetry.io/contrib/propagators/autoprop v0.42.0 h1:s2RzYOAqHVgG23q8fPWYChobUoZM6rJZ98EnylJr66w=
+go.opentelemetry.io/contrib/propagators/autoprop v0.42.0/go.mod h1:Mv/tWNtZn+NbALDb2XcItP0OM3lWWZjAfSroINxfW+Y=
+go.opentelemetry.io/contrib/propagators/aws v1.17.0 h1:IX8d7l2uRw61BlmZBOTQFaK+y22j6vytMVTs9wFrO+c=
+go.opentelemetry.io/contrib/propagators/aws v1.17.0/go.mod h1:pAlCYRWff4uGqRXOVn3WP8pDZ5E0K56bEoG7a1VSL4k=
+go.opentelemetry.io/contrib/propagators/b3 v1.17.0 h1:ImOVvHnku8jijXqkwCSyYKRDt2YrnGXD4BbhcpfbfJo=
+go.opentelemetry.io/contrib/propagators/b3 v1.17.0/go.mod h1:IkfUfMpKWmynvvE0264trz0sf32NRTZL4nuAN9AbWRc=
+go.opentelemetry.io/contrib/propagators/jaeger v1.17.0 h1:Zbpbmwav32Ea5jSotpmkWEl3a6Xvd4tw/3xxGO1i05Y=
+go.opentelemetry.io/contrib/propagators/jaeger v1.17.0/go.mod h1:tcTUAlmO8nuInPDSBVfG+CP6Mzjy5+gNV4mPxMbL0IA=
+go.opentelemetry.io/contrib/propagators/ot v1.17.0 h1:ufo2Vsz8l76eI47jFjuVyjyB3Ae2DmfiCV/o6Vc8ii0=
+go.opentelemetry.io/contrib/propagators/ot v1.17.0/go.mod h1:SbKPj5XGp8K/sGm05XblaIABgMgw2jDczP8gGeuaVLk=
+go.opentelemetry.io/otel v1.31.0 h1:NsJcKPIW0D0H3NgzPDHmo0WW6SptzPdqg/L1zsIm2hY=
+go.opentelemetry.io/otel v1.31.0/go.mod h1:O0C14Yl9FgkjqcCZAsE053C13OaddMYr/hz6clDkEJE=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.31.0 h1:K0XaT3DwHAcV4nKLzcQvwAgSyisUghWoY20I7huthMk=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.31.0/go.mod h1:B5Ki776z/MBnVha1Nzwp5arlzBbE3+1jk+pGmaP5HME=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.31.0 h1:FFeLy03iVTXP6ffeN2iXrxfGsZGCjVx0/4KlizjyBwU=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.31.0/go.mod h1:TMu73/k1CP8nBUpDLc71Wj/Kf7ZS9FK5b53VapRsP9o=
+go.opentelemetry.io/otel/metric v1.31.0 h1:FSErL0ATQAmYHUIzSezZibnyVlft1ybhy4ozRPcF2fE=
+go.opentelemetry.io/otel/metric v1.31.0/go.mod h1:C3dEloVbLuYoX41KpmAhOqNriGbA+qqH6PQ5E5mUfnY=
+go.opentelemetry.io/otel/sdk v1.31.0 h1:xLY3abVHYZ5HSfOg3l2E5LUj2Cwva5Y7yGxnSW9H5Gk=
+go.opentelemetry.io/otel/sdk v1.31.0/go.mod h1:TfRbMdhvxIIr/B2N2LQW2S5v9m3gOQ/08KsbbO5BPT0=
+go.opentelemetry.io/otel/trace v1.31.0 h1:ffjsj1aRouKewfr85U2aGagJ46+MvodynlQ1HYdmJys=
+go.opentelemetry.io/otel/trace v1.31.0/go.mod h1:TXZkRk7SM2ZQLtR6eoAWQFIHPvzQ06FJAsO1tJg480A=
+go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0=
+go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8=
+go.step.sm/cli-utils v0.9.0 h1:55jYcsQbnArNqepZyAwcato6Zy2MoZDRkWW+jF+aPfQ=
+go.step.sm/cli-utils v0.9.0/go.mod h1:Y/CRoWl1FVR9j+7PnAewufAwKmBOTzR6l9+7EYGAnp8=
+go.step.sm/crypto v0.45.0 h1:Z0WYAaaOYrJmKP9sJkPW+6wy3pgN3Ija8ek/D4serjc=
+go.step.sm/crypto v0.45.0/go.mod h1:6IYlT0L2jfj81nVyCPpvA5cORy0EVHPhieSgQyuwHIY=
+go.step.sm/linkedca v0.20.1 h1:bHDn1+UG1NgRrERkWbbCiAIvv4lD5NOFaswPDTyO5vU=
+go.step.sm/linkedca v0.20.1/go.mod h1:Vaq4+Umtjh7DLFI1KuIxeo598vfBzgSYZUjgVJ7Syxw=
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
-go.uber.org/atomic v1.5.1 h1:rsqfU5vBkVknbhUGbAUwQKR2H4ItV8tjJ+6kJX4cxHM=
-go.uber.org/atomic v1.5.1/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
-go.uber.org/atomic v1.6.0 h1:Ezj3JGmsOnG1MoRWQkPBsKLe9DwWD9QeXzTRzzldNVk=
go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
+go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs=
+go.uber.org/automaxprocs v1.6.0/go.mod h1:ifeIMSnPZuznNm6jmdzmU3/bfk01Fe2fotchwEFJ8r8=
+go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
+go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
+go.uber.org/mock v0.4.0 h1:VcM4ZOtdbR4f6VXfiOpwpVJDL6lCReaZ6mw31wqh7KU=
+go.uber.org/mock v0.4.0/go.mod h1:a6FSlNadKUHUa9IP5Vyt1zh4fC7uAwxMutEAscFbkZc=
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4=
-go.uber.org/multierr v1.4.0 h1:f3WCSC2KzAcBXGATIxAB1E2XuCpNU255wNKZ505qi3E=
-go.uber.org/multierr v1.4.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4=
-go.uber.org/multierr v1.5.0 h1:KCa4XfM8CWFCpxXRGok+Q0SS/0XBhMDbHHGABQLvD2A=
go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU=
-go.uber.org/ratelimit v0.0.0-20180316092928-c15da0234277/go.mod h1:2X8KaoNd1J0lZV+PxJk/5+DGbO/tpwLR1m++a7FnB/Y=
-go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee h1:0mgffUl7nfd+FpvXMVz4IDEaUSmT1ysygQC7qYo7sG4=
+go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
+go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA=
go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
-go.uber.org/zap v1.10.0 h1:ORx85nbTijNz8ljznvCMR1ZBIPKFn3jQrag10X2AsuM=
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
-go.uber.org/zap v1.13.0 h1:nR6NoDBgAf67s68NhaXbsojM+2gxp3S1hWkHDl27pVU=
go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM=
-go.uber.org/zap v1.14.1 h1:nYDKopTbvAPq/NrUVZwT15y2lpROBiLLyoRTbXOYWOo=
-go.uber.org/zap v1.14.1/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc=
+go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8=
+go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
+go.uber.org/zap/exp v0.3.0 h1:6JYzdifzYkGmTdRR59oYH+Ng7k49H9qVpWwNSsGJj3U=
+go.uber.org/zap/exp v0.3.0/go.mod h1:5I384qq7XGxYyByIhHm6jg5CHkGY0nsTfbDLgDDlgJQ=
go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE=
golang.org/x/build v0.0.0-20190111050920-041ab4dc3f9d/go.mod h1:OWs+y06UdEOHN4y+MfF/py+xQ/tYqIWW03b70/CG9Rw=
-golang.org/x/crypto v0.0.0-20180621125126-a49355c7e3f8/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
-golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20181030102418-4d3f4d9ffa16/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
-golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190313024323-a1f597ede03a/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
-golang.org/x/crypto v0.0.0-20190418165655-df01cb2cc480/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE=
+golang.org/x/crypto v0.0.0-20190411191339-88737f569e3a/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE=
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
-golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
-golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
-golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY=
+golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
-golang.org/x/crypto v0.0.0-20191227163750-53104e6ec876 h1:sKJQZMuxjOAR/Uo2LBfU90onWEf1dF4C+0hPJCc9Mpc=
-golang.org/x/crypto v0.0.0-20191227163750-53104e6ec876/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
-golang.org/x/crypto v0.0.0-20200221231518-2aa609cf4a9d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
-golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073 h1:xMPOj6Pz6UipU1wXLkrtqpHbR0AVFnyPEQq/wRWz9lM=
-golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
-golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59 h1:3zb4D3T4G8jdExgVU/95+vQXfpEPiMdCaZgmGVxjNHM=
-golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
-golang.org/x/crypto v0.0.0-20200414155820-4f8f47aa7992 h1:B4Wjn2mWOWzjcWfyRYlf00lQ1/9h5vRKmQnhIKhMFR0=
-golang.org/x/crypto v0.0.0-20200414155820-4f8f47aa7992/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20201203163018-be400aefbc4c/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
+golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
+golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
+golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
+golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
+golang.org/x/crypto v0.30.0 h1:RwoQn3GkWiMkzlX562cLB7OxWvjH1L8xutO2WoJcRoY=
+golang.org/x/crypto v0.30.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk=
+golang.org/x/crypto/x509roots/fallback v0.0.0-20241104001025-71ed71b4faf9 h1:4cEcP5+OjGppY79LCQ5Go2B1Boix2x0v6pvA01P3FoA=
+golang.org/x/crypto/x509roots/fallback v0.0.0-20241104001025-71ed71b4faf9/go.mod h1:kNa9WdvYnzFwC79zRpLRMJbdEFlhyM5RPFBBZp/wWH8=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
-golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
-golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
-golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=
-golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY=
-golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
-golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
-golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
-golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
-golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
-golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
-golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
+golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 h1:vr/HnozRka3pE4EsMEg1lgkXJkTFJCVUX+S/ZT6wYzM=
+golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842/go.mod h1:XtvwrStGgqGPLc4cjQfWqZHG1YFdYs6swckp8vpsjnc=
golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
-golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
-golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
-golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
-golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
-golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f h1:J5lckAjkw6qYlOZNj90mLYNTEKDvWeuc1yieZ8qUzUE=
-golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=
-golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
-golang.org/x/lint v0.0.0-20200302205851-738671d3881b h1:Wh+f8QHJXR411sJR8/vRBTZ7YapZaRvUcLFFJhusH0k=
-golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
-golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
-golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
-golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
-golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
-golang.org/x/mod v0.2.0 h1:KU7oHjnv3XNWfa5COkzUifxZmxp1TyI7ImMXqFxLwvQ=
-golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
-golang.org/x/net v0.0.0-20170915142106-8351a756f30f/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20180611182652-db08ff08e862/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
+golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
+golang.org/x/mod v0.18.0 h1:5+9lSbEzPSdWkH32vYPBwEpX8KwDbM52Ud9xBUvNlb0=
+golang.org/x/mod v0.18.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20180911220305-26e67e76b6c3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181029044818-c44066c5c816/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181106065722-10aee1819953/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20190228165749-92fc7df08ae7/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20190301231341-16b79f2e4e95/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190313220215-9f648a60d977/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/net v0.0.0-20190424112056-4829fb13d2c6/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
-golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
-golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20190930134127-c5a3c61f89f3/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20191027093000-83d349e8ac1a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553 h1:efeOvDhwQ29Dj3SdAV/MJf8oukgn+8D8WgaCaRMchF8=
-golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200301022130-244492dfa37a h1:GuSPYbZzB5/dcLNCwLQLsg3obCJtX9IJhpXkvY7kzk0=
-golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e h1:3G+cUijn7XD+S4eJFddp53Pv7+slrESplyjG25HgL+k=
-golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
+golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
+golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
+golang.org/x/net v0.32.0 h1:ZqPmj8Kzc+Y6e0+skZsuACbx+wzMgo5MQsJh9Qd6aYI=
+golang.org/x/net v0.32.0/go.mod h1:CwU0IoeOlnQQWJ6ioyFrfRuomB8GKF6KbYXZVyeXNfs=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
-golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
-golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
-golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6 h1:pE8b58s1HRDMi8RDc79m0HISf9D4TzseP40cEA6IGfs=
-golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
-golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d h1:TzXSXBo42m9gQenoE3b9BGiEpg5IG2JkU5FkPIawgtw=
-golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/oauth2 v0.22.0 h1:BzDx2FehcG7jJwgWLELCdmLuxk2i+x9UDpSiss2u0ZA=
+golang.org/x/oauth2 v0.22.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852/go.mod h1:JLpeXjPJfIyPr5TlbXLkXWLhP8nz10XfvxElABhCtcw=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e h1:vcxGaoTs7kV8m5Np9uUNQin4BrLOthgV7252N8V+FwY=
-golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sys v0.0.0-20171026204733-164713f0dfce/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20180202135801-37707fdb30a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20180622082034-63fc586f45fe/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ=
+golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181029174526-d69651ed3497/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20181128092732-4ed8d59d0b35/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190316082340-a2f829d7f35f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190424175732-18eb32c0e2f0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3 h1:4y9KwBHBgBNwDbtu44R5o1fdOCQUEXhbk/P4A9WmJq0=
-golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e h1:LwyF2AFISC9nVbS6MgzsaQNSUsRXI49GS+YQ5KX/QH0=
-golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae h1:/WDfKMnPU+m5M4xB+6x4kaepxRw6jWvR5iDRdvjHgy8=
+golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527 h1:uYVVQ9WP/Ds2ROhcaGPeIdVq0RIXVLwsHlnvJ+cT1So=
-golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd h1:xhmwyvizuTgC2qz7ZlMluP20uW+C3Rm0FD/WLDX8884=
-golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200413165638-669c56c373c4 h1:opSr2sbRXk5X5/givKrrKj9HXxFpW2sdCiP8MJSKLQY=
-golang.org/x/sys v0.0.0-20200413165638-669c56c373c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
-golang.org/x/text v0.0.0-20170915090833-1cbadb444a80/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA=
+golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
+golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
+golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
+golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
+golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
+golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
+golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q=
+golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
-golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
+golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
+golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
+golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
+golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
+golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo=
+golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/time v0.0.0-20190921001708-c4c64cad1fd0 h1:xQwXv67TxFo9nC1GJFyab5eq/5B590r6RlnL/G8Sz7w=
-golang.org/x/time v0.0.0-20190921001708-c4c64cad1fd0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/tools v0.0.0-20170915040203-e531a2a1c15f/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/time v0.7.0 h1:ntUhktv3OPE6TgYxXWv9vKvUSJyIFJlyohwbkEwPrKQ=
+golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20181030000716-a0a13e073c7b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20181117154741-2ddaf7f79a09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20190110163146-51295c7ec13a/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20190121143147-24cd39ecf745/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20190221204921-83362c3779f5/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190311215038-5c2858a9cfe5/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190322203728-c1a832b0ad89/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
-golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
-golang.org/x/tools v0.0.0-20190521203540-521d6ed310dd/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
-golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
-golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20190425163242-31fd60d6bfdc/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
-golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
-golang.org/x/tools v0.0.0-20190719005602-e377ae9d6386/go.mod h1:jcCCGcm9btYwXyDqrUWc6MKQKKGJCWEQ3AfLSRIbEuI=
-golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20190909030654-5b82db07426d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20190910044552-dd2b5c81c578/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20190911151314-feee8acb394c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20190930201159-7c411dea38b0/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20190823170909-c4a336ef6a2f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191113232020-e2727e816f5a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191216052735-49a3e744a425/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200106190116-7be0a674c9fc h1:MR2F33ipDGog0C4eMhU6u9o3q6c3dvYis2aG6Jl12Wg=
-golang.org/x/tools v0.0.0-20200106190116-7be0a674c9fc/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb h1:iKlO7ROJc6SttHKlxzwGytRtBUqX4VARrNTgP2YLX5M=
-golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
+golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
+golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
+golang.org/x/tools v0.22.0 h1:gqSGLZqv+AI9lIQzniJ0nZDRG5GBPsSi+DRNHWNz6yA=
+golang.org/x/tools v0.22.0/go.mod h1:aCwcsjqvq7Yqt6TNyX7QMU2enbQ/Gt0bo6krSeEri+c=
+golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898 h1:/atklqdjdhuosWIl6AIbOeHJjicWYPqR9bpxqxYG2pA=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
-golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
google.golang.org/api v0.0.0-20181030000543-1d582fd0359e/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
google.golang.org/api v0.1.0/go.mod h1:UGEZY7KEX120AnNLIHFMKIo4obdJhkp2tPbaPlQx13Y=
-google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk=
-google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
-google.golang.org/api v0.5.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
-google.golang.org/api v0.6.0/go.mod h1:btoxGiFvQNVUZQ8W08zLtrVS08CNpINPEfxXxgJL1Q4=
-google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
-google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
-google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
-google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
-google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
-google.golang.org/api v0.15.0 h1:yzlyyDW/J0w8yNFJIhiAJy4kq74S+1DOLdawELNxFMA=
-google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
-google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
-google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
-google.golang.org/api v0.20.0 h1:jz2KixHX7EcCPiQrySzPdnYT7DbINAypCqKZ1Z7GM40=
-google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/api v0.180.0 h1:M2D87Yo0rGBPWpo1orwfCLehUUL6E7/TYe5gvMQWDh4=
+google.golang.org/api v0.180.0/go.mod h1:51AiyoEg1MJPSZ9zvklA8VnRILPXxn1iVen9v25XHAE=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
-google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
-google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
-google.golang.org/appengine v1.6.5 h1:tycE03LOZYQNhDpS27tcQdAzLCVMaj7QT2SXxebnpCM=
-google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20181029155118-b69ba1387ce2/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20181202183823-bd91e49a0898/go.mod h1:7Ep/1NZk928CDR8SjdVbjWNpdIf6nzjE3BTgJDr2Atg=
google.golang.org/genproto v0.0.0-20190306203927-b5d61aea6440/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
-google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
-google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
-google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
-google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
-google.golang.org/genproto v0.0.0-20190605220351-eb0b1bdb6ae6/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s=
-google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
-google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
-google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
-google.golang.org/genproto v0.0.0-20190927181202-20e1ac93f88c/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
-google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
-google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
-google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
-google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb h1:ADPHZzpzM4tk4V4S5cnCrr5SwzvlrPRmqqCuJDB8UTs=
-google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
-google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
-google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
-google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA=
-google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200305110556-506484158171 h1:xes2Q2k+d/+YNXVw0FpZkIDJiaux4OVrRKXRAzH6A0U=
-google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200413115906-b5235f65be36 h1:j7CmVRD4Kec0+f8VuBAc2Ak2MFfXm5Q2/RxuJLL+76E=
-google.golang.org/genproto v0.0.0-20200413115906-b5235f65be36/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20240401170217-c3f982113cda h1:wu/KJm9KJwpfHWhkkZGohVC6KRrc1oJNr4jwtQMOQXw=
+google.golang.org/genproto v0.0.0-20240401170217-c3f982113cda/go.mod h1:g2LLCvCeCSir/JJSWosk19BR4NVxGqHUC6rxIRsd7Aw=
+google.golang.org/genproto/googleapis/api v0.0.0-20241007155032-5fefd90f89a9 h1:T6rh4haD3GVYsgEfWExoCZA2o2FmbNyKpTuAxbEFPTg=
+google.golang.org/genproto/googleapis/api v0.0.0-20241007155032-5fefd90f89a9/go.mod h1:wp2WsuBYj6j8wUdo3ToZsdxxixbvQNAHqVJrTgi5E5M=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20241007155032-5fefd90f89a9 h1:QCqS/PdaHTSWGvupk2F/ehwHtGc0/GYkT+3GAcR1CCc=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20241007155032-5fefd90f89a9/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI=
google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio=
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
-google.golang.org/grpc v1.19.1/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
-google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
-google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
-google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
-google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
-google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA=
-google.golang.org/grpc v1.26.0 h1:2dTRdpdFEEhJYQD8EMLB61nnrzSCTbG38PhqdhvOltg=
-google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
-google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
-google.golang.org/grpc v1.27.1 h1:zvIju4sqAGvwKspUQOhwnpcqSbzi7/H6QomNNjTL4sk=
-google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
-gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U=
-gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
+google.golang.org/grpc v1.67.1 h1:zWnc1Vrcno+lHZCOofnIMvycFcc0QRGIzm9dhnDX68E=
+google.golang.org/grpc v1.67.1/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA=
+google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA=
+google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/cheggaaa/pb.v1 v1.0.28/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw=
+gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
+gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
-gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
-gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
-gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo=
-gopkg.in/h2non/gock.v1 v1.0.15/go.mod h1:sX4zAkdYX1TRGJ2JY156cFspQn4yRWn6p9EMdODlynE=
+gopkg.in/inconshreveable/log15.v2 v2.0.0-20180818164646-67afb5ed74ec/go.mod h1:aPpfJ7XW+gOuirDoZ8gHhLh3kZ1B08FtV2bbmy7Jv3s=
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
-gopkg.in/ini.v1 v1.42.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
-gopkg.in/ini.v1 v1.44.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
-gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
-gopkg.in/ini.v1 v1.51.1/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
-gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8=
-gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k=
-gopkg.in/ns1/ns1-go.v2 v2.0.0-20190730140822-b51389932cbc h1:GAcf+t0o8gdJAdSFYdE9wChu4bIyguMVqz0RHiFL5VY=
-gopkg.in/ns1/ns1-go.v2 v2.0.0-20190730140822-b51389932cbc/go.mod h1:VV+3haRsgDiVLxyifmMBrBIuCWFBPYKbRssXB9z67Hw=
-gopkg.in/resty.v1 v1.9.1/go.mod h1:vo52Hzryw9PnPHcJfPsBiFW62XhNx5OczbV9y+IMpgc=
-gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
-gopkg.in/square/go-jose.v2 v2.3.1 h1:SK5KegNXmKmqE342YYN2qPHEnUYeoMiXXl1poUlI+o4=
-gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
-gopkg.in/square/go-jose.v2 v2.4.0/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
-gopkg.in/square/go-jose.v2 v2.5.0 h1:OZ4sdq+Y+SHfYB7vfthi1Ei8b0vkP8ZPQgUfUwdUSqo=
-gopkg.in/square/go-jose.v2 v2.5.0/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
-gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
-gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
-gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
+gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc=
+gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc=
+gopkg.in/yaml.v1 v1.0.0-20140924161607-9f9df34309c0/go.mod h1:WDnlLJ4WF5VGsH/HVa3CI79GS0ol3YnhVnKP89i0kNg=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.2.7 h1:VUgggvou5XRW9mHwD/yXxIYSMtY0zoKQf/v226p2nyo=
-gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10=
-gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
+gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
+gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o=
honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
-honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
-honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
-honnef.co/go/tools v0.0.1-2019.2.3 h1:3JgtbtFHMiCmsznwGVTUWbgGov+pVqnlf1dEJTNAXeM=
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
-honnef.co/go/tools v0.0.1-2020.1.3 h1:sXmLre5bzIR6ypkjXCDI3jHPssRhc8KD/Ome589sc3U=
-honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
-howett.net/plist v0.0.0-20181124034731-591f970eefbb h1:jhnBjNi9UFpfpl8YZhA9CrOqpnJdvzuiHsl/dnxl11M=
-howett.net/plist v0.0.0-20181124034731-591f970eefbb/go.mod h1:vMygbs4qMhSZSc4lCUl2OEE+rDiIIJAIdR4m7MiMcm0=
-howett.net/plist v0.0.0-20200225050739-77e249a2e2ba h1:HiEs/6jQFMHpFqsdPBAk3ieVcsSS8IV+D93f43UuDPo=
-howett.net/plist v0.0.0-20200225050739-77e249a2e2ba/go.mod h1:vMygbs4qMhSZSc4lCUl2OEE+rDiIIJAIdR4m7MiMcm0=
-launchpad.net/gocheck v0.0.0-20140225173054-000000000087/go.mod h1:hj7XX3B/0A+80Vse0e+BUHsHMTEhd0O4cpUHr/e/BUM=
-mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed/go.mod h1:Xkxe497xwlCKkIaQYRfC7CSLworTXY9RMqwhhCm+8Nc=
-mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b/go.mod h1:2odslEg/xrtNQqCYg2/jCoyKnw3vv5biOc3JnIcYfL4=
-mvdan.cc/unparam v0.0.0-20190209190245-fbb59629db34/go.mod h1:H6SUd1XjIs+qQCyskXg5OFSrilMRUkD8ePJpHKDPaeY=
-mvdan.cc/unparam v0.0.0-20190720180237-d51796306d8f/go.mod h1:4G1h5nDURzA3bwVMZIVpwbkw+04kSxk3rAtzlimaUJw=
-mvdan.cc/unparam v0.0.0-20191111180625-960b1ec0f2c2/go.mod h1:rCqoQrfAmpTX/h2APczwM7UymU/uvaOluiVPIYCSY/k=
-rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
-rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
-rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
-sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
+howett.net/plist v1.0.0 h1:7CrbWYbPPO/PyNy38b2EB/+gYbjCe2DXBxgtOOZbSQM=
+howett.net/plist v1.0.0/go.mod h1:lqaXoTrLY4hg8tnEzNru53gicrbv7rrk+2xJA/7hw9g=
sourcegraph.com/sourcegraph/go-diff v0.5.0/go.mod h1:kuch7UrkMzY0X+p9CRK03kfuPQ2zzQcaEFbx8wA8rck=
sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0=
-sourcegraph.com/sqs/pbtypes v1.0.0/go.mod h1:3AciMUv4qUuRHRHhOG4TZOB+72GdPVz5k+c648qsFS4=
diff --git a/internal/filesystems/map.go b/internal/filesystems/map.go
new file mode 100644
index 00000000..e795ed1f
--- /dev/null
+++ b/internal/filesystems/map.go
@@ -0,0 +1,77 @@
+package filesystems
+
+import (
+ "io/fs"
+ "strings"
+ "sync"
+)
+
+const (
+ DefaultFilesystemKey = "default"
+)
+
+var DefaultFilesystem = &wrapperFs{key: DefaultFilesystemKey, FS: OsFS{}}
+
+// wrapperFs exists so can easily add to wrapperFs down the line
+type wrapperFs struct {
+ key string
+ fs.FS
+}
+
+// FilesystemMap stores a map of filesystems
+// the empty key will be overwritten to be the default key
+// it includes a default filesystem, based off the os fs
+type FilesystemMap struct {
+ m sync.Map
+}
+
+// note that the first invocation of key cannot be called in a racy context.
+func (f *FilesystemMap) key(k string) string {
+ if k == "" {
+ k = DefaultFilesystemKey
+ }
+ return k
+}
+
+// Register will add the filesystem with key to later be retrieved
+// A call with a nil fs will call unregister, ensuring that a call to Default() will never be nil
+func (f *FilesystemMap) Register(k string, v fs.FS) {
+ k = f.key(k)
+ if v == nil {
+ f.Unregister(k)
+ return
+ }
+ f.m.Store(k, &wrapperFs{key: k, FS: v})
+}
+
+// Unregister will remove the filesystem with key from the filesystem map
+// if the key is the default key, it will set the default to the osFS instead of deleting it
+// modules should call this on cleanup to be safe
+func (f *FilesystemMap) Unregister(k string) {
+ k = f.key(k)
+ if k == DefaultFilesystemKey {
+ f.m.Store(k, DefaultFilesystem)
+ } else {
+ f.m.Delete(k)
+ }
+}
+
+// Get will get a filesystem with a given key
+func (f *FilesystemMap) Get(k string) (v fs.FS, ok bool) {
+ k = f.key(k)
+ c, ok := f.m.Load(strings.TrimSpace(k))
+ if !ok {
+ if k == DefaultFilesystemKey {
+ f.m.Store(k, DefaultFilesystem)
+ return DefaultFilesystem, true
+ }
+ return nil, ok
+ }
+ return c.(fs.FS), true
+}
+
+// Default will get the default filesystem in the filesystem map
+func (f *FilesystemMap) Default() fs.FS {
+ val, _ := f.Get(DefaultFilesystemKey)
+ return val
+}
diff --git a/internal/filesystems/os.go b/internal/filesystems/os.go
new file mode 100644
index 00000000..04b4d5b4
--- /dev/null
+++ b/internal/filesystems/os.go
@@ -0,0 +1,29 @@
+package filesystems
+
+import (
+ "io/fs"
+ "os"
+ "path/filepath"
+)
+
+// OsFS is a simple fs.FS implementation that uses the local
+// file system. (We do not use os.DirFS because we do our own
+// rooting or path prefixing without being constrained to a single
+// root folder. The standard os.DirFS implementation is problematic
+// since roots can be dynamic in our application.)
+//
+// OsFS also implements fs.StatFS, fs.GlobFS, fs.ReadDirFS, and fs.ReadFileFS.
+type OsFS struct{}
+
+func (OsFS) Open(name string) (fs.File, error) { return os.Open(name) }
+func (OsFS) Stat(name string) (fs.FileInfo, error) { return os.Stat(name) }
+func (OsFS) Glob(pattern string) ([]string, error) { return filepath.Glob(pattern) }
+func (OsFS) ReadDir(name string) ([]fs.DirEntry, error) { return os.ReadDir(name) }
+func (OsFS) ReadFile(name string) ([]byte, error) { return os.ReadFile(name) }
+
+var (
+ _ fs.StatFS = (*OsFS)(nil)
+ _ fs.GlobFS = (*OsFS)(nil)
+ _ fs.ReadDirFS = (*OsFS)(nil)
+ _ fs.ReadFileFS = (*OsFS)(nil)
+)
diff --git a/internal/metrics/metrics.go b/internal/metrics/metrics.go
new file mode 100644
index 00000000..7ae09b60
--- /dev/null
+++ b/internal/metrics/metrics.go
@@ -0,0 +1,39 @@
+package metrics
+
+import (
+ "net/http"
+ "strconv"
+)
+
+func SanitizeCode(s int) string {
+ switch s {
+ case 0, 200:
+ return "200"
+ default:
+ return strconv.Itoa(s)
+ }
+}
+
+// Only support the list of "regular" HTTP methods, see
+// https://developer.mozilla.org/en-US/docs/Web/HTTP/Methods
+var methodMap = map[string]string{
+ "GET": http.MethodGet, "get": http.MethodGet,
+ "HEAD": http.MethodHead, "head": http.MethodHead,
+ "PUT": http.MethodPut, "put": http.MethodPut,
+ "POST": http.MethodPost, "post": http.MethodPost,
+ "DELETE": http.MethodDelete, "delete": http.MethodDelete,
+ "CONNECT": http.MethodConnect, "connect": http.MethodConnect,
+ "OPTIONS": http.MethodOptions, "options": http.MethodOptions,
+ "TRACE": http.MethodTrace, "trace": http.MethodTrace,
+ "PATCH": http.MethodPatch, "patch": http.MethodPatch,
+}
+
+// SanitizeMethod sanitizes the method for use as a metric label. This helps
+// prevent high cardinality on the method label. The name is always upper case.
+func SanitizeMethod(m string) string {
+ if m, ok := methodMap[m]; ok {
+ return m
+ }
+
+ return "OTHER"
+}
diff --git a/internal/metrics/metrics_test.go b/internal/metrics/metrics_test.go
new file mode 100644
index 00000000..c3f5965b
--- /dev/null
+++ b/internal/metrics/metrics_test.go
@@ -0,0 +1,28 @@
+package metrics
+
+import (
+ "strings"
+ "testing"
+)
+
+func TestSanitizeMethod(t *testing.T) {
+ tests := []struct {
+ method string
+ expected string
+ }{
+ {method: "get", expected: "GET"},
+ {method: "POST", expected: "POST"},
+ {method: "OPTIONS", expected: "OPTIONS"},
+ {method: "connect", expected: "CONNECT"},
+ {method: "trace", expected: "TRACE"},
+ {method: "UNKNOWN", expected: "OTHER"},
+ {method: strings.Repeat("ohno", 9999), expected: "OTHER"},
+ }
+
+ for _, d := range tests {
+ actual := SanitizeMethod(d.method)
+ if actual != d.expected {
+ t.Errorf("Not same: expected %#v, but got %#v", d.expected, actual)
+ }
+ }
+}
diff --git a/internal/ranges.go b/internal/ranges.go
new file mode 100644
index 00000000..e9429e26
--- /dev/null
+++ b/internal/ranges.go
@@ -0,0 +1,14 @@
+package internal
+
+// PrivateRangesCIDR returns a list of private CIDR range
+// strings, which can be used as a configuration shortcut.
+func PrivateRangesCIDR() []string {
+ return []string{
+ "192.168.0.0/16",
+ "172.16.0.0/12",
+ "10.0.0.0/8",
+ "127.0.0.1/8",
+ "fd00::/8",
+ "::1",
+ }
+}
diff --git a/internal/sockets.go b/internal/sockets.go
new file mode 100644
index 00000000..56ae9f4e
--- /dev/null
+++ b/internal/sockets.go
@@ -0,0 +1,56 @@
+// Copyright 2015 Matthew Holt and The Caddy Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package internal
+
+import (
+ "fmt"
+ "io/fs"
+ "strconv"
+ "strings"
+)
+
+// SplitUnixSocketPermissionsBits takes a unix socket address in the
+// unusual "path|bits" format (e.g. /run/caddy.sock|0222) and tries
+// to split it into socket path (host) and permissions bits (port).
+// Colons (":") can't be used as separator, as socket paths on Windows
+// may include a drive letter (e.g. `unix/c:\absolute\path.sock`).
+// Permission bits will default to 0200 if none are specified.
+// Throws an error, if the first carrying bit does not
+// include write perms (e.g. `0422` or `022`).
+// Symbolic permission representation (e.g. `u=w,g=w,o=w`)
+// is not supported and will throw an error for now!
+func SplitUnixSocketPermissionsBits(addr string) (path string, fileMode fs.FileMode, err error) {
+ addrSplit := strings.SplitN(addr, "|", 2)
+
+ if len(addrSplit) == 2 {
+ // parse octal permission bit string as uint32
+ fileModeUInt64, err := strconv.ParseUint(addrSplit[1], 8, 32)
+ if err != nil {
+ return "", 0, fmt.Errorf("could not parse octal permission bits in %s: %v", addr, err)
+ }
+ fileMode = fs.FileMode(fileModeUInt64)
+
+ // FileMode.String() returns a string like `-rwxr-xr--` for `u=rwx,g=rx,o=r` (`0754`)
+ if string(fileMode.String()[2]) != "w" {
+ return "", 0, fmt.Errorf("owner of the socket requires '-w-' (write, octal: '2') permissions at least; got '%s' in %s", fileMode.String()[1:4], addr)
+ }
+
+ return addrSplit[0], fileMode, nil
+ }
+
+ // default to 0200 (symbolic: `u=w,g=,o=`)
+ // if no permission bits are specified
+ return addr, 0o200, nil
+}
diff --git a/internal/testmocks/dummyverifier.go b/internal/testmocks/dummyverifier.go
new file mode 100644
index 00000000..1fbef32b
--- /dev/null
+++ b/internal/testmocks/dummyverifier.go
@@ -0,0 +1,41 @@
+package testmocks
+
+import (
+ "crypto/x509"
+
+ "github.com/caddyserver/caddy/v2"
+ "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile"
+ "github.com/caddyserver/caddy/v2/modules/caddytls"
+)
+
+func init() {
+ caddy.RegisterModule(new(dummyVerifier))
+}
+
+type dummyVerifier struct{}
+
+// UnmarshalCaddyfile implements caddyfile.Unmarshaler.
+func (dummyVerifier) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
+ return nil
+}
+
+// CaddyModule implements caddy.Module.
+func (dummyVerifier) CaddyModule() caddy.ModuleInfo {
+ return caddy.ModuleInfo{
+ ID: "tls.client_auth.verifier.dummy",
+ New: func() caddy.Module {
+ return new(dummyVerifier)
+ },
+ }
+}
+
+// VerifyClientCertificate implements ClientCertificateVerifier.
+func (dummyVerifier) VerifyClientCertificate(rawCerts [][]byte, verifiedChains [][]*x509.Certificate) error {
+ return nil
+}
+
+var (
+ _ caddy.Module = dummyVerifier{}
+ _ caddytls.ClientCertificateVerifier = dummyVerifier{}
+ _ caddyfile.Unmarshaler = dummyVerifier{}
+)
diff --git a/listen.go b/listen.go
new file mode 100644
index 00000000..1a7051bb
--- /dev/null
+++ b/listen.go
@@ -0,0 +1,318 @@
+// Copyright 2015 Matthew Holt and The Caddy Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build !unix || solaris
+
+package caddy
+
+import (
+ "context"
+ "fmt"
+ "net"
+ "os"
+ "slices"
+ "strconv"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "go.uber.org/zap"
+)
+
+func reuseUnixSocket(_, _ string) (any, error) {
+ return nil, nil
+}
+
+func listenReusable(ctx context.Context, lnKey string, network, address string, config net.ListenConfig) (any, error) {
+ var socketFile *os.File
+
+ fd := slices.Contains([]string{"fd", "fdgram"}, network)
+ if fd {
+ socketFd, err := strconv.ParseUint(address, 0, strconv.IntSize)
+ if err != nil {
+ return nil, fmt.Errorf("invalid file descriptor: %v", err)
+ }
+
+ func() {
+ socketFilesMu.Lock()
+ defer socketFilesMu.Unlock()
+
+ socketFdWide := uintptr(socketFd)
+ var ok bool
+
+ socketFile, ok = socketFiles[socketFdWide]
+
+ if !ok {
+ socketFile = os.NewFile(socketFdWide, lnKey)
+ if socketFile != nil {
+ socketFiles[socketFdWide] = socketFile
+ }
+ }
+ }()
+
+ if socketFile == nil {
+ return nil, fmt.Errorf("invalid socket file descriptor: %d", socketFd)
+ }
+ }
+
+ datagram := slices.Contains([]string{"udp", "udp4", "udp6", "unixgram", "fdgram"}, network)
+ if datagram {
+ sharedPc, _, err := listenerPool.LoadOrNew(lnKey, func() (Destructor, error) {
+ var (
+ pc net.PacketConn
+ err error
+ )
+ if fd {
+ pc, err = net.FilePacketConn(socketFile)
+ } else {
+ pc, err = config.ListenPacket(ctx, network, address)
+ }
+ if err != nil {
+ return nil, err
+ }
+ return &sharedPacketConn{PacketConn: pc, key: lnKey}, nil
+ })
+ if err != nil {
+ return nil, err
+ }
+ return &fakeClosePacketConn{sharedPacketConn: sharedPc.(*sharedPacketConn)}, nil
+ }
+
+ sharedLn, _, err := listenerPool.LoadOrNew(lnKey, func() (Destructor, error) {
+ var (
+ ln net.Listener
+ err error
+ )
+ if fd {
+ ln, err = net.FileListener(socketFile)
+ } else {
+ ln, err = config.Listen(ctx, network, address)
+ }
+ if err != nil {
+ return nil, err
+ }
+ return &sharedListener{Listener: ln, key: lnKey}, nil
+ })
+ if err != nil {
+ return nil, err
+ }
+ return &fakeCloseListener{sharedListener: sharedLn.(*sharedListener), keepAlivePeriod: config.KeepAlive}, nil
+}
+
+// fakeCloseListener is a private wrapper over a listener that
+// is shared. The state of fakeCloseListener is not shared.
+// This allows one user of a socket to "close" the listener
+// while in reality the socket stays open for other users of
+// the listener. In this way, servers become hot-swappable
+// while the listener remains running. Listeners should be
+// re-wrapped in a new fakeCloseListener each time the listener
+// is reused. This type is atomic and values must not be copied.
+type fakeCloseListener struct {
+ closed int32 // accessed atomically; belongs to this struct only
+ *sharedListener // embedded, so we also become a net.Listener
+ keepAlivePeriod time.Duration
+}
+
+type canSetKeepAlive interface {
+ SetKeepAlivePeriod(d time.Duration) error
+ SetKeepAlive(bool) error
+}
+
+func (fcl *fakeCloseListener) Accept() (net.Conn, error) {
+ // if the listener is already "closed", return error
+ if atomic.LoadInt32(&fcl.closed) == 1 {
+ return nil, fakeClosedErr(fcl)
+ }
+
+ // call underlying accept
+ conn, err := fcl.sharedListener.Accept()
+ if err == nil {
+ // if 0, do nothing, Go's default is already set
+ // and if the connection allows setting KeepAlive, set it
+ if tconn, ok := conn.(canSetKeepAlive); ok && fcl.keepAlivePeriod != 0 {
+ if fcl.keepAlivePeriod > 0 {
+ err = tconn.SetKeepAlivePeriod(fcl.keepAlivePeriod)
+ } else { // negative
+ err = tconn.SetKeepAlive(false)
+ }
+ if err != nil {
+ Log().With(zap.String("server", fcl.sharedListener.key)).Warn("unable to set keepalive for new connection:", zap.Error(err))
+ }
+ }
+ return conn, nil
+ }
+
+ // since Accept() returned an error, it may be because our reference to
+ // the listener (this fakeCloseListener) may have been closed, i.e. the
+ // server is shutting down; in that case, we need to clear the deadline
+ // that we set when Close() was called, and return a non-temporary and
+ // non-timeout error value to the caller, masking the "true" error, so
+ // that server loops / goroutines won't retry, linger, and leak
+ if atomic.LoadInt32(&fcl.closed) == 1 {
+ // we dereference the sharedListener explicitly even though it's embedded
+ // so that it's clear in the code that side-effects are shared with other
+ // users of this listener, not just our own reference to it; we also don't
+ // do anything with the error because all we could do is log it, but we
+ // explicitly assign it to nothing so we don't forget it's there if needed
+ _ = fcl.sharedListener.clearDeadline()
+
+ if netErr, ok := err.(net.Error); ok && netErr.Timeout() {
+ return nil, fakeClosedErr(fcl)
+ }
+ }
+
+ return nil, err
+}
+
+// Close stops accepting new connections without closing the
+// underlying listener. The underlying listener is only closed
+// if the caller is the last known user of the socket.
+func (fcl *fakeCloseListener) Close() error {
+ if atomic.CompareAndSwapInt32(&fcl.closed, 0, 1) {
+ // There are two ways I know of to get an Accept()
+ // function to return to the server loop that called
+ // it: close the listener, or set a deadline in the
+ // past. Obviously, we can't close the socket yet
+ // since others may be using it (hence this whole
+ // file). But we can set the deadline in the past,
+ // and this is kind of cheating, but it works, and
+ // it apparently even works on Windows.
+ _ = fcl.sharedListener.setDeadline()
+ _, _ = listenerPool.Delete(fcl.sharedListener.key)
+ }
+ return nil
+}
+
+// sharedListener is a wrapper over an underlying listener. The listener
+// and the other fields on the struct are shared state that is synchronized,
+// so sharedListener structs must never be copied (always use a pointer).
+type sharedListener struct {
+ net.Listener
+ key string // uniquely identifies this listener
+ deadline bool // whether a deadline is currently set
+ deadlineMu sync.Mutex
+}
+
+func (sl *sharedListener) clearDeadline() error {
+ var err error
+ sl.deadlineMu.Lock()
+ if sl.deadline {
+ switch ln := sl.Listener.(type) {
+ case *net.TCPListener:
+ err = ln.SetDeadline(time.Time{})
+ }
+ sl.deadline = false
+ }
+ sl.deadlineMu.Unlock()
+ return err
+}
+
+func (sl *sharedListener) setDeadline() error {
+ timeInPast := time.Now().Add(-1 * time.Minute)
+ var err error
+ sl.deadlineMu.Lock()
+ if !sl.deadline {
+ switch ln := sl.Listener.(type) {
+ case *net.TCPListener:
+ err = ln.SetDeadline(timeInPast)
+ }
+ sl.deadline = true
+ }
+ sl.deadlineMu.Unlock()
+ return err
+}
+
+// Destruct is called by the UsagePool when the listener is
+// finally not being used anymore. It closes the socket.
+func (sl *sharedListener) Destruct() error {
+ return sl.Listener.Close()
+}
+
+// fakeClosePacketConn is like fakeCloseListener, but for PacketConns,
+// or more specifically, *net.UDPConn
+type fakeClosePacketConn struct {
+ closed int32 // accessed atomically; belongs to this struct only
+ *sharedPacketConn // embedded, so we also become a net.PacketConn; its key is used in Close
+}
+
+func (fcpc *fakeClosePacketConn) ReadFrom(p []byte) (n int, addr net.Addr, err error) {
+ // if the listener is already "closed", return error
+ if atomic.LoadInt32(&fcpc.closed) == 1 {
+ return 0, nil, &net.OpError{
+ Op: "readfrom",
+ Net: fcpc.LocalAddr().Network(),
+ Addr: fcpc.LocalAddr(),
+ Err: errFakeClosed,
+ }
+ }
+
+ // call underlying readfrom
+ n, addr, err = fcpc.sharedPacketConn.ReadFrom(p)
+ if err != nil {
+ // this server was stopped, so clear the deadline and let
+ // any new server continue reading; but we will exit
+ if atomic.LoadInt32(&fcpc.closed) == 1 {
+ if netErr, ok := err.(net.Error); ok && netErr.Timeout() {
+ if err = fcpc.SetReadDeadline(time.Time{}); err != nil {
+ return
+ }
+ }
+ }
+ return
+ }
+
+ return
+}
+
+// Close won't close the underlying socket unless there is no more reference, then listenerPool will close it.
+func (fcpc *fakeClosePacketConn) Close() error {
+ if atomic.CompareAndSwapInt32(&fcpc.closed, 0, 1) {
+ _ = fcpc.SetReadDeadline(time.Now()) // unblock ReadFrom() calls to kick old servers out of their loops
+ _, _ = listenerPool.Delete(fcpc.sharedPacketConn.key)
+ }
+ return nil
+}
+
+func (fcpc *fakeClosePacketConn) Unwrap() net.PacketConn {
+ return fcpc.sharedPacketConn.PacketConn
+}
+
+// sharedPacketConn is like sharedListener, but for net.PacketConns.
+type sharedPacketConn struct {
+ net.PacketConn
+ key string
+}
+
+// Destruct closes the underlying socket.
+func (spc *sharedPacketConn) Destruct() error {
+ return spc.PacketConn.Close()
+}
+
+// Unwrap returns the underlying socket
+func (spc *sharedPacketConn) Unwrap() net.PacketConn {
+ return spc.PacketConn
+}
+
+// Interface guards (see https://github.com/caddyserver/caddy/issues/3998)
+var (
+ _ (interface {
+ Unwrap() net.PacketConn
+ }) = (*fakeClosePacketConn)(nil)
+)
+
+// socketFiles is a fd -> *os.File map used to make a FileListener/FilePacketConn from a socket file descriptor.
+var socketFiles = map[uintptr]*os.File{}
+
+// socketFilesMu synchronizes socketFiles insertions
+var socketFilesMu sync.Mutex
diff --git a/listen_unix.go b/listen_unix.go
new file mode 100644
index 00000000..d6ae0cb8
--- /dev/null
+++ b/listen_unix.go
@@ -0,0 +1,312 @@
+// Copyright 2015 Matthew Holt and The Caddy Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Even though the filename ends in _unix.go, we still have to specify the
+// build constraint here, because the filename convention only works for
+// literal GOOS values, and "unix" is a shortcut unique to build tags.
+//go:build unix && !solaris
+
+package caddy
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "io"
+ "io/fs"
+ "net"
+ "os"
+ "slices"
+ "strconv"
+ "sync"
+ "sync/atomic"
+ "syscall"
+
+ "go.uber.org/zap"
+ "golang.org/x/sys/unix"
+)
+
+// reuseUnixSocket copies and reuses the unix domain socket (UDS) if we already
+// have it open; if not, unlink it so we can have it.
+// No-op if not a unix network.
+func reuseUnixSocket(network, addr string) (any, error) {
+ socketKey := listenerKey(network, addr)
+
+ socket, exists := unixSockets[socketKey]
+ if exists {
+ // make copy of file descriptor
+ socketFile, err := socket.File() // does dup() deep down
+ if err != nil {
+ return nil, err
+ }
+
+ // use copied fd to make new Listener or PacketConn, then replace
+ // it in the map so that future copies always come from the most
+ // recent fd (as the previous ones will be closed, and we'd get
+ // "use of closed network connection" errors) -- note that we
+ // preserve the *pointer* to the counter (not just the value) so
+ // that all socket wrappers will refer to the same value
+ switch unixSocket := socket.(type) {
+ case *unixListener:
+ ln, err := net.FileListener(socketFile)
+ if err != nil {
+ return nil, err
+ }
+ atomic.AddInt32(unixSocket.count, 1)
+ unixSockets[socketKey] = &unixListener{ln.(*net.UnixListener), socketKey, unixSocket.count}
+
+ case *unixConn:
+ pc, err := net.FilePacketConn(socketFile)
+ if err != nil {
+ return nil, err
+ }
+ atomic.AddInt32(unixSocket.count, 1)
+ unixSockets[socketKey] = &unixConn{pc.(*net.UnixConn), socketKey, unixSocket.count}
+ }
+
+ return unixSockets[socketKey], nil
+ }
+
+ // from what I can tell after some quick research, it's quite common for programs to
+ // leave their socket file behind after they close, so the typical pattern is to
+ // unlink it before you bind to it -- this is often crucial if the last program using
+ // it was killed forcefully without a chance to clean up the socket, but there is a
+ // race, as the comment in net.UnixListener.close() explains... oh well, I guess?
+ if err := syscall.Unlink(addr); err != nil && !errors.Is(err, fs.ErrNotExist) {
+ return nil, err
+ }
+
+ return nil, nil
+}
+
+// listenReusable creates a new listener for the given network and address, and adds it to listenerPool.
+func listenReusable(ctx context.Context, lnKey string, network, address string, config net.ListenConfig) (any, error) {
+ // even though SO_REUSEPORT lets us bind the socket multiple times,
+ // we still put it in the listenerPool so we can count how many
+ // configs are using this socket; necessary to ensure we can know
+ // whether to enforce shutdown delays, for example (see #5393).
+ var (
+ ln io.Closer
+ err error
+ socketFile *os.File
+ )
+
+ fd := slices.Contains([]string{"fd", "fdgram"}, network)
+ if fd {
+ socketFd, err := strconv.ParseUint(address, 0, strconv.IntSize)
+ if err != nil {
+ return nil, fmt.Errorf("invalid file descriptor: %v", err)
+ }
+
+ func() {
+ socketFilesMu.Lock()
+ defer socketFilesMu.Unlock()
+
+ socketFdWide := uintptr(socketFd)
+ var ok bool
+
+ socketFile, ok = socketFiles[socketFdWide]
+
+ if !ok {
+ socketFile = os.NewFile(socketFdWide, lnKey)
+ if socketFile != nil {
+ socketFiles[socketFdWide] = socketFile
+ }
+ }
+ }()
+
+ if socketFile == nil {
+ return nil, fmt.Errorf("invalid socket file descriptor: %d", socketFd)
+ }
+ } else {
+ // wrap any Control function set by the user so we can also add our reusePort control without clobbering theirs
+ oldControl := config.Control
+ config.Control = func(network, address string, c syscall.RawConn) error {
+ if oldControl != nil {
+ if err := oldControl(network, address, c); err != nil {
+ return err
+ }
+ }
+ return reusePort(network, address, c)
+ }
+ }
+
+ datagram := slices.Contains([]string{"udp", "udp4", "udp6", "unixgram", "fdgram"}, network)
+ if datagram {
+ if fd {
+ ln, err = net.FilePacketConn(socketFile)
+ } else {
+ ln, err = config.ListenPacket(ctx, network, address)
+ }
+ } else {
+ if fd {
+ ln, err = net.FileListener(socketFile)
+ } else {
+ ln, err = config.Listen(ctx, network, address)
+ }
+ }
+
+ if err == nil {
+ listenerPool.LoadOrStore(lnKey, nil)
+ }
+
+ if datagram {
+ if !fd {
+ // TODO: Not 100% sure this is necessary, but we do this for net.UnixListener, so...
+ if unix, ok := ln.(*net.UnixConn); ok {
+ one := int32(1)
+ ln = &unixConn{unix, lnKey, &one}
+ unixSockets[lnKey] = ln.(*unixConn)
+ }
+ }
+ // lightly wrap the connection so that when it is closed,
+ // we can decrement the usage pool counter
+ if specificLn, ok := ln.(net.PacketConn); ok {
+ ln = deletePacketConn{specificLn, lnKey}
+ }
+ } else {
+ if !fd {
+ // if new listener is a unix socket, make sure we can reuse it later
+ // (we do our own "unlink on close" -- not required, but more tidy)
+ if unix, ok := ln.(*net.UnixListener); ok {
+ unix.SetUnlinkOnClose(false)
+ one := int32(1)
+ ln = &unixListener{unix, lnKey, &one}
+ unixSockets[lnKey] = ln.(*unixListener)
+ }
+ }
+ // lightly wrap the listener so that when it is closed,
+ // we can decrement the usage pool counter
+ if specificLn, ok := ln.(net.Listener); ok {
+ ln = deleteListener{specificLn, lnKey}
+ }
+ }
+
+ // other types, I guess we just return them directly
+ return ln, err
+}
+
+// reusePort sets SO_REUSEPORT. Ineffective for unix sockets.
+func reusePort(network, address string, conn syscall.RawConn) error {
+ if IsUnixNetwork(network) {
+ return nil
+ }
+ return conn.Control(func(descriptor uintptr) {
+ if err := unix.SetsockoptInt(int(descriptor), unix.SOL_SOCKET, unixSOREUSEPORT, 1); err != nil {
+ Log().Error("setting SO_REUSEPORT",
+ zap.String("network", network),
+ zap.String("address", address),
+ zap.Uintptr("descriptor", descriptor),
+ zap.Error(err))
+ }
+ })
+}
+
+type unixListener struct {
+ *net.UnixListener
+ mapKey string
+ count *int32 // accessed atomically
+}
+
+func (uln *unixListener) Close() error {
+ newCount := atomic.AddInt32(uln.count, -1)
+ if newCount == 0 {
+ file, err := uln.File()
+ var name string
+ if err == nil {
+ name = file.Name()
+ }
+ defer func() {
+ unixSocketsMu.Lock()
+ delete(unixSockets, uln.mapKey)
+ unixSocketsMu.Unlock()
+ if err == nil {
+ _ = syscall.Unlink(name)
+ }
+ }()
+ }
+ return uln.UnixListener.Close()
+}
+
+type unixConn struct {
+ *net.UnixConn
+ mapKey string
+ count *int32 // accessed atomically
+}
+
+func (uc *unixConn) Close() error {
+ newCount := atomic.AddInt32(uc.count, -1)
+ if newCount == 0 {
+ file, err := uc.File()
+ var name string
+ if err == nil {
+ name = file.Name()
+ }
+ defer func() {
+ unixSocketsMu.Lock()
+ delete(unixSockets, uc.mapKey)
+ unixSocketsMu.Unlock()
+ if err == nil {
+ _ = syscall.Unlink(name)
+ }
+ }()
+ }
+ return uc.UnixConn.Close()
+}
+
+func (uc *unixConn) Unwrap() net.PacketConn {
+ return uc.UnixConn
+}
+
+// unixSockets keeps track of the currently-active unix sockets
+// so we can transfer their FDs gracefully during reloads.
+var unixSockets = make(map[string]interface {
+ File() (*os.File, error)
+})
+
+// socketFiles is a fd -> *os.File map used to make a FileListener/FilePacketConn from a socket file descriptor.
+var socketFiles = map[uintptr]*os.File{}
+
+// socketFilesMu synchronizes socketFiles insertions
+var socketFilesMu sync.Mutex
+
+// deleteListener is a type that simply deletes itself
+// from the listenerPool when it closes. It is used
+// solely for the purpose of reference counting (i.e.
+// counting how many configs are using a given socket).
+type deleteListener struct {
+ net.Listener
+ lnKey string
+}
+
+func (dl deleteListener) Close() error {
+ _, _ = listenerPool.Delete(dl.lnKey)
+ return dl.Listener.Close()
+}
+
+// deletePacketConn is like deleteListener, but
+// for net.PacketConns.
+type deletePacketConn struct {
+ net.PacketConn
+ lnKey string
+}
+
+func (dl deletePacketConn) Close() error {
+ _, _ = listenerPool.Delete(dl.lnKey)
+ return dl.PacketConn.Close()
+}
+
+func (dl deletePacketConn) Unwrap() net.PacketConn {
+ return dl.PacketConn
+}
diff --git a/listen_unix_setopt.go b/listen_unix_setopt.go
new file mode 100644
index 00000000..13ee7b83
--- /dev/null
+++ b/listen_unix_setopt.go
@@ -0,0 +1,7 @@
+//go:build unix && !freebsd && !solaris
+
+package caddy
+
+import "golang.org/x/sys/unix"
+
+const unixSOREUSEPORT = unix.SO_REUSEPORT
diff --git a/listen_unix_setopt_freebsd.go b/listen_unix_setopt_freebsd.go
new file mode 100644
index 00000000..06520540
--- /dev/null
+++ b/listen_unix_setopt_freebsd.go
@@ -0,0 +1,7 @@
+//go:build freebsd
+
+package caddy
+
+import "golang.org/x/sys/unix"
+
+const unixSOREUSEPORT = unix.SO_REUSEPORT_LB
diff --git a/listeners.go b/listeners.go
index 39bd8118..b22df77b 100644
--- a/listeners.go
+++ b/listeners.go
@@ -15,270 +15,233 @@
package caddy
import (
+ "context"
+ "crypto/tls"
+ "errors"
"fmt"
- "log"
+ "io"
+ "io/fs"
"net"
+ "net/netip"
+ "os"
"strconv"
"strings"
"sync"
"sync/atomic"
- "time"
+
+ "github.com/quic-go/quic-go"
+ "github.com/quic-go/quic-go/http3"
+ "github.com/quic-go/quic-go/qlog"
+ "go.uber.org/zap"
+ "golang.org/x/time/rate"
+
+ "github.com/caddyserver/caddy/v2/internal"
)
-// Listen returns a listener suitable for use in a Caddy module.
-// Always be sure to close listeners when you are done with them.
-func Listen(network, addr string) (net.Listener, error) {
- lnKey := network + "/" + addr
-
- listenersMu.Lock()
- defer listenersMu.Unlock()
-
- // if listener already exists, increment usage counter, then return listener
- if lnGlobal, ok := listeners[lnKey]; ok {
- atomic.AddInt32(&lnGlobal.usage, 1)
- return &fakeCloseListener{
- usage: &lnGlobal.usage,
- deadline: &lnGlobal.deadline,
- deadlineMu: &lnGlobal.deadlineMu,
- key: lnKey,
- Listener: lnGlobal.ln,
- }, nil
- }
-
- // or, create new one and save it
- ln, err := net.Listen(network, addr)
- if err != nil {
- return nil, err
- }
-
- // make sure to start its usage counter at 1
- lnGlobal := &globalListener{usage: 1, ln: ln}
- listeners[lnKey] = lnGlobal
-
- return &fakeCloseListener{
- usage: &lnGlobal.usage,
- deadline: &lnGlobal.deadline,
- deadlineMu: &lnGlobal.deadlineMu,
- key: lnKey,
- Listener: ln,
- }, nil
-}
-
-// ListenPacket returns a net.PacketConn suitable for use in a Caddy module.
-// Always be sure to close the PacketConn when you are done.
-func ListenPacket(network, addr string) (net.PacketConn, error) {
- lnKey := network + "/" + addr
-
- listenersMu.Lock()
- defer listenersMu.Unlock()
-
- // if listener already exists, increment usage counter, then return listener
- if lnGlobal, ok := listeners[lnKey]; ok {
- atomic.AddInt32(&lnGlobal.usage, 1)
- log.Printf("[DEBUG] %s: Usage counter should not go above 2 or maybe 3, is now: %d", lnKey, atomic.LoadInt32(&lnGlobal.usage)) // TODO: remove
- return &fakeClosePacketConn{usage: &lnGlobal.usage, key: lnKey, PacketConn: lnGlobal.pc}, nil
- }
-
- // or, create new one and save it
- pc, err := net.ListenPacket(network, addr)
- if err != nil {
- return nil, err
- }
-
- // make sure to start its usage counter at 1
- lnGlobal := &globalListener{usage: 1, pc: pc}
- listeners[lnKey] = lnGlobal
-
- return &fakeClosePacketConn{usage: &lnGlobal.usage, key: lnKey, PacketConn: pc}, nil
-}
-
-// fakeCloseListener's Close() method is a no-op. This allows
-// stopping servers that are using the listener without giving
-// up the socket; thus, servers become hot-swappable while the
-// listener remains running. Listeners should be re-wrapped in
-// a new fakeCloseListener each time the listener is reused.
-// Other than the 'closed' field (which pertains to this value
-// only), the other fields in this struct should be pointers to
-// the associated globalListener's struct fields (except 'key'
-// which is there for read-only purposes, so it can be a copy).
-type fakeCloseListener struct {
- closed int32 // accessed atomically; belongs to this struct only
- usage *int32 // accessed atomically; global
- deadline *bool // protected by deadlineMu; global
- deadlineMu *sync.Mutex // global
- key string // global, but read-only, so can be copy
- net.Listener // global
-}
-
-// Accept accepts connections until Close() is called.
-func (fcl *fakeCloseListener) Accept() (net.Conn, error) {
- // if the listener is already "closed", return error
- if atomic.LoadInt32(&fcl.closed) == 1 {
- return nil, fcl.fakeClosedErr()
- }
-
- // wrap underlying accept
- conn, err := fcl.Listener.Accept()
- if err == nil {
- return conn, nil
- }
-
- // accept returned with error
- // TODO: This may be better as a condition variable so the deadline is cleared only once?
- fcl.deadlineMu.Lock()
- if *fcl.deadline {
- switch ln := fcl.Listener.(type) {
- case *net.TCPListener:
- ln.SetDeadline(time.Time{})
- case *net.UnixListener:
- ln.SetDeadline(time.Time{})
- }
- *fcl.deadline = false
- }
- fcl.deadlineMu.Unlock()
-
- if atomic.LoadInt32(&fcl.closed) == 1 {
- // if we canceled the Accept() by setting a deadline
- // on the listener, we need to make sure any callers of
- // Accept() think the listener was actually closed;
- // if we return the timeout error instead, callers might
- // simply retry, leaking goroutines for longer
- if netErr, ok := err.(net.Error); ok && netErr.Timeout() {
- return nil, fcl.fakeClosedErr()
- }
- }
-
- return nil, err
-}
-
-// Close stops accepting new connections without
-// closing the underlying listener, unless no one
-// else is using it.
-func (fcl *fakeCloseListener) Close() error {
- if atomic.CompareAndSwapInt32(&fcl.closed, 0, 1) {
- // unfortunately, there is no way to cancel any
- // currently-blocking calls to Accept() that are
- // awaiting connections since we're not actually
- // closing the listener; so we cheat by setting
- // a deadline in the past, which forces it to
- // time out; note that this only works for
- // certain types of listeners...
- fcl.deadlineMu.Lock()
- if !*fcl.deadline {
- switch ln := fcl.Listener.(type) {
- case *net.TCPListener:
- ln.SetDeadline(time.Now().Add(-1 * time.Minute))
- case *net.UnixListener:
- ln.SetDeadline(time.Now().Add(-1 * time.Minute))
- }
- *fcl.deadline = true
- }
- fcl.deadlineMu.Unlock()
-
- // since we're no longer using this listener,
- // decrement the usage counter and, if no one
- // else is using it, close underlying listener
- if atomic.AddInt32(fcl.usage, -1) == 0 {
- listenersMu.Lock()
- delete(listeners, fcl.key)
- listenersMu.Unlock()
- err := fcl.Listener.Close()
- if err != nil {
- return err
- }
- }
-
- }
-
- return nil
-}
-
-func (fcl *fakeCloseListener) fakeClosedErr() error {
- return &net.OpError{
- Op: "accept",
- Net: fcl.Listener.Addr().Network(),
- Addr: fcl.Listener.Addr(),
- Err: errFakeClosed,
- }
-}
-
-type fakeClosePacketConn struct {
- closed int32 // accessed atomically
- usage *int32 // accessed atomically
- key string
- net.PacketConn
-}
-
-func (fcpc *fakeClosePacketConn) Close() error {
- log.Println("[DEBUG] Fake-closing underlying packet conn") // TODO: remove this
-
- if atomic.CompareAndSwapInt32(&fcpc.closed, 0, 1) {
- // since we're no longer using this listener,
- // decrement the usage counter and, if no one
- // else is using it, close underlying listener
- if atomic.AddInt32(fcpc.usage, -1) == 0 {
- listenersMu.Lock()
- delete(listeners, fcpc.key)
- listenersMu.Unlock()
- err := fcpc.PacketConn.Close()
- if err != nil {
- return err
- }
- }
- }
-
- return nil
-}
-
-// ErrFakeClosed is the underlying error value returned by
-// fakeCloseListener.Accept() after Close() has been called,
-// indicating that it is pretending to be closed so that the
-// server using it can terminate, while the underlying
-// socket is actually left open.
-var errFakeClosed = fmt.Errorf("listener 'closed' 😉")
-
-// globalListener keeps global state for a listener
-// that may be shared by multiple servers. In other
-// words, values in this struct exist only once and
-// all other uses of these values point to the ones
-// in this struct. In particular, the usage count
-// (how many callers are using the listener), the
-// actual listener, and synchronization of the
-// listener's deadline changes are singular, global
-// values that must not be copied.
-type globalListener struct {
- usage int32 // accessed atomically
- deadline bool
- deadlineMu sync.Mutex
- ln net.Listener
- pc net.PacketConn
-}
-
-// NetworkAddress contains the individual components
-// for a parsed network address of the form accepted
-// by ParseNetworkAddress(). Network should be a
-// network value accepted by Go's net package. Port
-// ranges are given by [StartPort, EndPort].
+// NetworkAddress represents one or more network addresses.
+// It contains the individual components for a parsed network
+// address of the form accepted by ParseNetworkAddress().
type NetworkAddress struct {
- Network string
- Host string
+ // Should be a network value accepted by Go's net package or
+ // by a plugin providing a listener for that network type.
+ Network string
+
+ // The "main" part of the network address is the host, which
+ // often takes the form of a hostname, DNS name, IP address,
+ // or socket path.
+ Host string
+
+ // For addresses that contain a port, ranges are given by
+ // [StartPort, EndPort]; i.e. for a single port, StartPort
+ // and EndPort are the same. For no port, they are 0.
StartPort uint
EndPort uint
}
+// ListenAll calls Listen for all addresses represented by this struct, i.e. all ports in the range.
+// (If the address doesn't use ports or has 1 port only, then only 1 listener will be created.)
+// It returns an error if any listener failed to bind, and closes any listeners opened up to that point.
+func (na NetworkAddress) ListenAll(ctx context.Context, config net.ListenConfig) ([]any, error) {
+ var listeners []any
+ var err error
+
+ // if one of the addresses has a failure, we need to close
+ // any that did open a socket to avoid leaking resources
+ defer func() {
+ if err == nil {
+ return
+ }
+ for _, ln := range listeners {
+ if cl, ok := ln.(io.Closer); ok {
+ cl.Close()
+ }
+ }
+ }()
+
+ // an address can contain a port range, which represents multiple addresses;
+ // some addresses don't use ports at all and have a port range size of 1;
+ // whatever the case, iterate each address represented and bind a socket
+ for portOffset := uint(0); portOffset < na.PortRangeSize(); portOffset++ {
+ select {
+ case <-ctx.Done():
+ return nil, ctx.Err()
+ default:
+ }
+
+ // create (or reuse) the listener ourselves
+ var ln any
+ ln, err = na.Listen(ctx, portOffset, config)
+ if err != nil {
+ return nil, err
+ }
+ listeners = append(listeners, ln)
+ }
+
+ return listeners, nil
+}
+
+// Listen is similar to net.Listen, with a few differences:
+//
+// Listen announces on the network address using the port calculated by adding
+// portOffset to the start port. (For network types that do not use ports, the
+// portOffset is ignored.)
+//
+// First Listen checks if a plugin can provide a listener from this address. Otherwise,
+// the provided ListenConfig is used to create the listener. Its Control function,
+// if set, may be wrapped by an internally-used Control function. The provided
+// context may be used to cancel long operations early. The context is not used
+// to close the listener after it has been created.
+//
+// Caddy's listeners can overlap each other: multiple listeners may be created on
+// the same socket at the same time. This is useful because during config changes,
+// the new config is started while the old config is still running. How this is
+// accomplished varies by platform and network type. For example, on Unix, SO_REUSEPORT
+// is set except on Unix sockets, for which the file descriptor is duplicated and
+// reused; on Windows, the close logic is virtualized using timeouts. Like normal
+// listeners, be sure to Close() them when you are done.
+//
+// This method returns any type, as the implementations of listeners for various
+// network types are not interchangeable. The type of listener returned is switched
+// on the network type. Stream-based networks ("tcp", "unix", "unixpacket", etc.)
+// return a net.Listener; datagram-based networks ("udp", "unixgram", etc.) return
+// a net.PacketConn; and so forth. The actual concrete types are not guaranteed to
+// be standard, exported types (wrapping is necessary to provide graceful reloads).
+//
+// Unix sockets will be unlinked before being created, to ensure we can bind to
+// it even if the previous program using it exited uncleanly; it will also be
+// unlinked upon a graceful exit (or when a new config does not use that socket).
+// Listen synchronizes binds to unix domain sockets to avoid race conditions
+// while an existing socket is unlinked.
+func (na NetworkAddress) Listen(ctx context.Context, portOffset uint, config net.ListenConfig) (any, error) {
+ if na.IsUnixNetwork() {
+ unixSocketsMu.Lock()
+ defer unixSocketsMu.Unlock()
+ }
+
+ // check to see if plugin provides listener
+ if ln, err := getListenerFromPlugin(ctx, na.Network, na.Host, na.port(), portOffset, config); ln != nil || err != nil {
+ return ln, err
+ }
+
+ // create (or reuse) the listener ourselves
+ return na.listen(ctx, portOffset, config)
+}
+
+func (na NetworkAddress) listen(ctx context.Context, portOffset uint, config net.ListenConfig) (any, error) {
+ var (
+ ln any
+ err error
+ address string
+ unixFileMode fs.FileMode
+ )
+
+ // split unix socket addr early so lnKey
+ // is independent of permissions bits
+ if na.IsUnixNetwork() {
+ address, unixFileMode, err = internal.SplitUnixSocketPermissionsBits(na.Host)
+ if err != nil {
+ return nil, err
+ }
+ } else if na.IsFdNetwork() {
+ address = na.Host
+ } else {
+ address = na.JoinHostPort(portOffset)
+ }
+
+ if strings.HasPrefix(na.Network, "ip") {
+ ln, err = config.ListenPacket(ctx, na.Network, address)
+ } else {
+ if na.IsUnixNetwork() {
+ // if this is a unix socket, see if we already have it open
+ ln, err = reuseUnixSocket(na.Network, address)
+ }
+
+ if ln == nil && err == nil {
+ // otherwise, create a new listener
+ lnKey := listenerKey(na.Network, address)
+ ln, err = listenReusable(ctx, lnKey, na.Network, address, config)
+ }
+ }
+
+ if err != nil {
+ return nil, err
+ }
+
+ if ln == nil {
+ return nil, fmt.Errorf("unsupported network type: %s", na.Network)
+ }
+
+ if IsUnixNetwork(na.Network) {
+ isAbstractUnixSocket := strings.HasPrefix(address, "@")
+ if !isAbstractUnixSocket {
+ err = os.Chmod(address, unixFileMode)
+ if err != nil {
+ return nil, fmt.Errorf("unable to set permissions (%s) on %s: %v", unixFileMode, address, err)
+ }
+ }
+ }
+
+ return ln, nil
+}
+
// IsUnixNetwork returns true if na.Network is
// unix, unixgram, or unixpacket.
func (na NetworkAddress) IsUnixNetwork() bool {
- return isUnixNetwork(na.Network)
+ return IsUnixNetwork(na.Network)
+}
+
+// IsUnixNetwork returns true if na.Network is
+// fd or fdgram.
+func (na NetworkAddress) IsFdNetwork() bool {
+ return IsFdNetwork(na.Network)
}
// JoinHostPort is like net.JoinHostPort, but where the port
// is StartPort + offset.
func (na NetworkAddress) JoinHostPort(offset uint) string {
- if na.IsUnixNetwork() {
+ if na.IsUnixNetwork() || na.IsFdNetwork() {
return na.Host
}
- return net.JoinHostPort(na.Host, strconv.Itoa(int(na.StartPort+offset)))
+ return net.JoinHostPort(na.Host, strconv.FormatUint(uint64(na.StartPort+offset), 10))
+}
+
+// Expand returns one NetworkAddress for each port in the port range.
+func (na NetworkAddress) Expand() []NetworkAddress {
+ size := na.PortRangeSize()
+ addrs := make([]NetworkAddress, size)
+ for portOffset := uint(0); portOffset < size; portOffset++ {
+ addrs[portOffset] = na.At(portOffset)
+ }
+ return addrs
+}
+
+// At returns a NetworkAddress with a port range of just 1
+// at the given port offset; i.e. a NetworkAddress that
+// represents precisely 1 address only.
+func (na NetworkAddress) At(portOffset uint) NetworkAddress {
+ na2 := na
+ na2.StartPort, na2.EndPort = na.StartPort+portOffset, na.StartPort+portOffset
+ return na2
}
// PortRangeSize returns how many ports are in
@@ -286,17 +249,20 @@ func (na NetworkAddress) JoinHostPort(offset uint) string {
// so the size is the difference of start and
// end ports plus one.
func (na NetworkAddress) PortRangeSize() uint {
+ if na.EndPort < na.StartPort {
+ return 0
+ }
return (na.EndPort - na.StartPort) + 1
}
func (na NetworkAddress) isLoopback() bool {
- if na.IsUnixNetwork() {
+ if na.IsUnixNetwork() || na.IsFdNetwork() {
return true
}
if na.Host == "localhost" {
return true
}
- if ip := net.ParseIP(na.Host); ip != nil {
+ if ip, err := netip.ParseAddr(na.Host); err == nil {
return ip.IsLoopback()
}
return false
@@ -306,7 +272,7 @@ func (na NetworkAddress) isWildcardInterface() bool {
if na.Host == "" {
return true
}
- if ip := net.ParseIP(na.Host); ip != nil {
+ if ip, err := netip.ParseAddr(na.Host); err == nil {
return ip.IsUnspecified()
}
return false
@@ -319,15 +285,24 @@ func (na NetworkAddress) port() string {
return fmt.Sprintf("%d-%d", na.StartPort, na.EndPort)
}
-// String reconstructs the address string to the form expected
-// by ParseNetworkAddress(). If the address is a unix socket,
-// any non-zero port will be dropped.
+// String reconstructs the address string for human display.
+// The output can be parsed by ParseNetworkAddress(). If the
+// address is a unix socket, any non-zero port will be dropped.
func (na NetworkAddress) String() string {
+ if na.Network == "tcp" && (na.Host != "" || na.port() != "") {
+ na.Network = "" // omit default network value for brevity
+ }
return JoinNetworkAddress(na.Network, na.Host, na.port())
}
-func isUnixNetwork(netw string) bool {
- return netw == "unix" || netw == "unixgram" || netw == "unixpacket"
+// IsUnixNetwork returns true if the netw is a unix network.
+func IsUnixNetwork(netw string) bool {
+ return strings.HasPrefix(netw, "unix")
+}
+
+// IsFdNetwork returns true if the netw is a fd network.
+func IsFdNetwork(netw string) bool {
+ return strings.HasPrefix(netw, "fd")
}
// ParseNetworkAddress parses addr into its individual
@@ -339,38 +314,56 @@ func isUnixNetwork(netw string) bool {
// Network addresses are distinct from URLs and do not
// use URL syntax.
func ParseNetworkAddress(addr string) (NetworkAddress, error) {
+ return ParseNetworkAddressWithDefaults(addr, "tcp", 0)
+}
+
+// ParseNetworkAddressWithDefaults is like ParseNetworkAddress but allows
+// the default network and port to be specified.
+func ParseNetworkAddressWithDefaults(addr, defaultNetwork string, defaultPort uint) (NetworkAddress, error) {
var host, port string
network, host, port, err := SplitNetworkAddress(addr)
- if network == "" {
- network = "tcp"
- }
if err != nil {
return NetworkAddress{}, err
}
- if isUnixNetwork(network) {
+ if network == "" {
+ network = defaultNetwork
+ }
+ if IsUnixNetwork(network) {
+ _, _, err := internal.SplitUnixSocketPermissionsBits(host)
+ return NetworkAddress{
+ Network: network,
+ Host: host,
+ }, err
+ }
+ if IsFdNetwork(network) {
return NetworkAddress{
Network: network,
Host: host,
}, nil
}
- ports := strings.SplitN(port, "-", 2)
- if len(ports) == 1 {
- ports = append(ports, ports[0])
- }
var start, end uint64
- start, err = strconv.ParseUint(ports[0], 10, 16)
- if err != nil {
- return NetworkAddress{}, fmt.Errorf("invalid start port: %v", err)
- }
- end, err = strconv.ParseUint(ports[1], 10, 16)
- if err != nil {
- return NetworkAddress{}, fmt.Errorf("invalid end port: %v", err)
- }
- if end < start {
- return NetworkAddress{}, fmt.Errorf("end port must not be less than start port")
- }
- if (end - start) > maxPortSpan {
- return NetworkAddress{}, fmt.Errorf("port range exceeds %d ports", maxPortSpan)
+ if port == "" {
+ start = uint64(defaultPort)
+ end = uint64(defaultPort)
+ } else {
+ before, after, found := strings.Cut(port, "-")
+ if !found {
+ after = before
+ }
+ start, err = strconv.ParseUint(before, 10, 16)
+ if err != nil {
+ return NetworkAddress{}, fmt.Errorf("invalid start port: %v", err)
+ }
+ end, err = strconv.ParseUint(after, 10, 16)
+ if err != nil {
+ return NetworkAddress{}, fmt.Errorf("invalid end port: %v", err)
+ }
+ if end < start {
+ return NetworkAddress{}, fmt.Errorf("end port must not be less than start port")
+ }
+ if (end - start) > maxPortSpan {
+ return NetworkAddress{}, fmt.Errorf("port range exceeds %d ports", maxPortSpan)
+ }
}
return NetworkAddress{
Network: network,
@@ -383,15 +376,32 @@ func ParseNetworkAddress(addr string) (NetworkAddress, error) {
// SplitNetworkAddress splits a into its network, host, and port components.
// Note that port may be a port range (:X-Y), or omitted for unix sockets.
func SplitNetworkAddress(a string) (network, host, port string, err error) {
- if idx := strings.Index(a, "/"); idx >= 0 {
- network = strings.ToLower(strings.TrimSpace(a[:idx]))
- a = a[idx+1:]
- }
- if isUnixNetwork(network) {
- host = a
- return
+ beforeSlash, afterSlash, slashFound := strings.Cut(a, "/")
+ if slashFound {
+ network = strings.ToLower(strings.TrimSpace(beforeSlash))
+ a = afterSlash
+ if IsUnixNetwork(network) || IsFdNetwork(network) {
+ host = a
+ return
+ }
}
+
host, port, err = net.SplitHostPort(a)
+ firstErr := err
+
+ if err != nil {
+ // in general, if there was an error, it was likely "missing port",
+ // so try removing square brackets around an IPv6 host, adding a bogus
+ // port to take advantage of standard library's robust parser, then
+ // strip the artificial port.
+ host, _, err = net.SplitHostPort(net.JoinHostPort(strings.Trim(a, "[]"), "0"))
+ port = ""
+ }
+
+ if err != nil {
+ err = errors.Join(firstErr, err)
+ }
+
return
}
@@ -405,7 +415,7 @@ func JoinNetworkAddress(network, host, port string) string {
if network != "" {
a = network + "/"
}
- if (host != "" && port == "") || isUnixNetwork(network) {
+ if (host != "" && port == "") || IsUnixNetwork(network) || IsFdNetwork(network) {
a += host
} else if port != "" {
a += net.JoinHostPort(host, port)
@@ -413,6 +423,263 @@ func JoinNetworkAddress(network, host, port string) string {
return a
}
+// ListenQUIC returns a http3.QUICEarlyListener suitable for use in a Caddy module.
+//
+// The network will be transformed into a QUIC-compatible type if the same address can be used with
+// different networks. Currently this just means that for tcp, udp will be used with the same
+// address instead.
+//
+// NOTE: This API is EXPERIMENTAL and may be changed or removed.
+func (na NetworkAddress) ListenQUIC(ctx context.Context, portOffset uint, config net.ListenConfig, tlsConf *tls.Config) (http3.QUICEarlyListener, error) {
+ lnKey := listenerKey("quic"+na.Network, na.JoinHostPort(portOffset))
+
+ sharedEarlyListener, _, err := listenerPool.LoadOrNew(lnKey, func() (Destructor, error) {
+ lnAny, err := na.Listen(ctx, portOffset, config)
+ if err != nil {
+ return nil, err
+ }
+
+ ln := lnAny.(net.PacketConn)
+
+ h3ln := ln
+ for {
+ // retrieve the underlying socket, so quic-go can optimize.
+ if unwrapper, ok := h3ln.(interface{ Unwrap() net.PacketConn }); ok {
+ h3ln = unwrapper.Unwrap()
+ } else {
+ break
+ }
+ }
+
+ sqs := newSharedQUICState(tlsConf)
+ // http3.ConfigureTLSConfig only uses this field and tls App sets this field as well
+ //nolint:gosec
+ quicTlsConfig := &tls.Config{GetConfigForClient: sqs.getConfigForClient}
+ // Require clients to verify their source address when we're handling more than 1000 handshakes per second.
+ // TODO: make tunable?
+ limiter := rate.NewLimiter(1000, 1000)
+ tr := &quic.Transport{
+ Conn: h3ln,
+ VerifySourceAddress: func(addr net.Addr) bool { return !limiter.Allow() },
+ }
+ earlyLn, err := tr.ListenEarly(
+ http3.ConfigureTLSConfig(quicTlsConfig),
+ &quic.Config{
+ Allow0RTT: true,
+ Tracer: qlog.DefaultConnectionTracer,
+ },
+ )
+ if err != nil {
+ return nil, err
+ }
+ // TODO: figure out when to close the listener and the transport
+ // using the original net.PacketConn to close them properly
+ return &sharedQuicListener{EarlyListener: earlyLn, packetConn: ln, sqs: sqs, key: lnKey}, nil
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ sql := sharedEarlyListener.(*sharedQuicListener)
+ // add current tls.Config to sqs, so GetConfigForClient will always return the latest tls.Config in case of context cancellation
+ ctx, cancel := sql.sqs.addState(tlsConf)
+
+ return &fakeCloseQuicListener{
+ sharedQuicListener: sql,
+ context: ctx,
+ contextCancel: cancel,
+ }, nil
+}
+
+// ListenerUsage returns the current usage count of the given listener address.
+func ListenerUsage(network, addr string) int {
+ count, _ := listenerPool.References(listenerKey(network, addr))
+ return count
+}
+
+// contextAndCancelFunc groups context and its cancelFunc
+type contextAndCancelFunc struct {
+ context.Context
+ context.CancelFunc
+}
+
+// sharedQUICState manages GetConfigForClient
+// see issue: https://github.com/caddyserver/caddy/pull/4849
+type sharedQUICState struct {
+ rmu sync.RWMutex
+ tlsConfs map[*tls.Config]contextAndCancelFunc
+ activeTlsConf *tls.Config
+}
+
+// newSharedQUICState creates a new sharedQUICState
+func newSharedQUICState(tlsConfig *tls.Config) *sharedQUICState {
+ sqtc := &sharedQUICState{
+ tlsConfs: make(map[*tls.Config]contextAndCancelFunc),
+ activeTlsConf: tlsConfig,
+ }
+ sqtc.addState(tlsConfig)
+ return sqtc
+}
+
+// getConfigForClient is used as tls.Config's GetConfigForClient field
+func (sqs *sharedQUICState) getConfigForClient(ch *tls.ClientHelloInfo) (*tls.Config, error) {
+ sqs.rmu.RLock()
+ defer sqs.rmu.RUnlock()
+ return sqs.activeTlsConf.GetConfigForClient(ch)
+}
+
+// addState adds tls.Config and activeRequests to the map if not present and returns the corresponding context and its cancelFunc
+// so that when cancelled, the active tls.Config will change
+func (sqs *sharedQUICState) addState(tlsConfig *tls.Config) (context.Context, context.CancelFunc) {
+ sqs.rmu.Lock()
+ defer sqs.rmu.Unlock()
+
+ if cacc, ok := sqs.tlsConfs[tlsConfig]; ok {
+ return cacc.Context, cacc.CancelFunc
+ }
+
+ ctx, cancel := context.WithCancel(context.Background())
+ wrappedCancel := func() {
+ cancel()
+
+ sqs.rmu.Lock()
+ defer sqs.rmu.Unlock()
+
+ delete(sqs.tlsConfs, tlsConfig)
+ if sqs.activeTlsConf == tlsConfig {
+ // select another tls.Config, if there is none,
+ // related sharedQuicListener will be destroyed anyway
+ for tc := range sqs.tlsConfs {
+ sqs.activeTlsConf = tc
+ break
+ }
+ }
+ }
+ sqs.tlsConfs[tlsConfig] = contextAndCancelFunc{ctx, wrappedCancel}
+ // there should be at most 2 tls.Configs
+ if len(sqs.tlsConfs) > 2 {
+ Log().Warn("quic listener tls configs are more than 2", zap.Int("number of configs", len(sqs.tlsConfs)))
+ }
+ return ctx, wrappedCancel
+}
+
+// sharedQuicListener is like sharedListener, but for quic.EarlyListeners.
+type sharedQuicListener struct {
+ *quic.EarlyListener
+ packetConn net.PacketConn // we have to hold these because quic-go won't close listeners it didn't create
+ sqs *sharedQUICState
+ key string
+}
+
+// Destruct closes the underlying QUIC listener and its associated net.PacketConn.
+func (sql *sharedQuicListener) Destruct() error {
+ // close EarlyListener first to stop any operations being done to the net.PacketConn
+ _ = sql.EarlyListener.Close()
+ // then close the net.PacketConn
+ return sql.packetConn.Close()
+}
+
+// fakeClosedErr returns an error value that is not temporary
+// nor a timeout, suitable for making the caller think the
+// listener is actually closed
+func fakeClosedErr(l interface{ Addr() net.Addr }) error {
+ return &net.OpError{
+ Op: "accept",
+ Net: l.Addr().Network(),
+ Addr: l.Addr(),
+ Err: errFakeClosed,
+ }
+}
+
+// errFakeClosed is the underlying error value returned by
+// fakeCloseListener.Accept() after Close() has been called,
+// indicating that it is pretending to be closed so that the
+// server using it can terminate, while the underlying
+// socket is actually left open.
+var errFakeClosed = fmt.Errorf("listener 'closed' 😉")
+
+type fakeCloseQuicListener struct {
+ closed int32 // accessed atomically; belongs to this struct only
+ *sharedQuicListener // embedded, so we also become a quic.EarlyListener
+ context context.Context
+ contextCancel context.CancelFunc
+}
+
+// Currently Accept ignores the passed context, however a situation where
+// someone would need a hotswappable QUIC-only (not http3, since it uses context.Background here)
+// server on which Accept would be called with non-empty contexts
+// (mind that the default net listeners' Accept doesn't take a context argument)
+// sounds way too rare for us to sacrifice efficiency here.
+func (fcql *fakeCloseQuicListener) Accept(_ context.Context) (quic.EarlyConnection, error) {
+ conn, err := fcql.sharedQuicListener.Accept(fcql.context)
+ if err == nil {
+ return conn, nil
+ }
+
+ // if the listener is "closed", return a fake closed error instead
+ if atomic.LoadInt32(&fcql.closed) == 1 && errors.Is(err, context.Canceled) {
+ return nil, fakeClosedErr(fcql)
+ }
+ return nil, err
+}
+
+func (fcql *fakeCloseQuicListener) Close() error {
+ if atomic.CompareAndSwapInt32(&fcql.closed, 0, 1) {
+ fcql.contextCancel()
+ _, _ = listenerPool.Delete(fcql.sharedQuicListener.key)
+ }
+ return nil
+}
+
+// RegisterNetwork registers a network type with Caddy so that if a listener is
+// created for that network type, getListener will be invoked to get the listener.
+// This should be called during init() and will panic if the network type is standard
+// or reserved, or if it is already registered. EXPERIMENTAL and subject to change.
+func RegisterNetwork(network string, getListener ListenerFunc) {
+ network = strings.TrimSpace(strings.ToLower(network))
+
+ if network == "tcp" || network == "tcp4" || network == "tcp6" ||
+ network == "udp" || network == "udp4" || network == "udp6" ||
+ network == "unix" || network == "unixpacket" || network == "unixgram" ||
+ strings.HasPrefix("ip:", network) || strings.HasPrefix("ip4:", network) || strings.HasPrefix("ip6:", network) ||
+ network == "fd" || network == "fdgram" {
+ panic("network type " + network + " is reserved")
+ }
+
+ if _, ok := networkTypes[strings.ToLower(network)]; ok {
+ panic("network type " + network + " is already registered")
+ }
+
+ networkTypes[network] = getListener
+}
+
+var unixSocketsMu sync.Mutex
+
+// getListenerFromPlugin returns a listener on the given network and address
+// if a plugin has registered the network name. It may return (nil, nil) if
+// no plugin can provide a listener.
+func getListenerFromPlugin(ctx context.Context, network, host, port string, portOffset uint, config net.ListenConfig) (any, error) {
+ // get listener from plugin if network type is registered
+ if getListener, ok := networkTypes[network]; ok {
+ Log().Debug("getting listener from plugin", zap.String("network", network))
+ return getListener(ctx, network, host, port, portOffset, config)
+ }
+
+ return nil, nil
+}
+
+func listenerKey(network, addr string) string {
+ return network + "/" + addr
+}
+
+// ListenerFunc is a function that can return a listener given a network and address.
+// The listeners must be capable of overlapping: with Caddy, new configs are loaded
+// before old ones are unloaded, so listeners may overlap briefly if the configs
+// both need the same listener. EXPERIMENTAL and subject to change.
+type ListenerFunc func(ctx context.Context, network, host, portRange string, portOffset uint, cfg net.ListenConfig) (any, error)
+
+var networkTypes = map[string]ListenerFunc{}
+
// ListenerWrapper is a type that wraps a listener
// so it can modify the input listener's methods.
// Modules that implement this interface are found
@@ -426,9 +693,7 @@ type ListenerWrapper interface {
WrapListener(net.Listener) net.Listener
}
-var (
- listeners = make(map[string]*globalListener)
- listenersMu sync.Mutex
-)
+// listenerPool stores and allows reuse of active listeners.
+var listenerPool = NewUsagePool()
const maxPortSpan = 65535
diff --git a/listeners_fuzz.go b/listeners_fuzz.go
index 823d0beb..02b65ab2 100644
--- a/listeners_fuzz.go
+++ b/listeners_fuzz.go
@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-// +build gofuzz
+//go:build gofuzz
package caddy
diff --git a/listeners_test.go b/listeners_test.go
index b75e2dce..03945308 100644
--- a/listeners_test.go
+++ b/listeners_test.go
@@ -17,6 +17,8 @@ package caddy
import (
"reflect"
"testing"
+
+ "github.com/caddyserver/caddy/v2/internal"
)
func TestSplitNetworkAddress(t *testing.T) {
@@ -29,11 +31,26 @@ func TestSplitNetworkAddress(t *testing.T) {
}{
{
input: "",
- expectErr: true,
+ expectHost: "",
},
{
- input: "foo",
- expectErr: true,
+ input: "foo",
+ expectHost: "foo",
+ },
+ {
+ input: ":", // empty host & empty port
+ },
+ {
+ input: "::",
+ expectHost: "::",
+ },
+ {
+ input: "[::]",
+ expectHost: "::",
+ },
+ {
+ input: ":1234",
+ expectPort: "1234",
},
{
input: "foo:1234",
@@ -60,7 +77,7 @@ func TestSplitNetworkAddress(t *testing.T) {
{
input: "udp/",
expectNetwork: "udp",
- expectErr: true,
+ expectHost: "",
},
{
input: "unix//foo/bar",
@@ -80,10 +97,10 @@ func TestSplitNetworkAddress(t *testing.T) {
} {
actualNetwork, actualHost, actualPort, err := SplitNetworkAddress(tc.input)
if tc.expectErr && err == nil {
- t.Errorf("Test %d: Expected error but got: %v", i, err)
+ t.Errorf("Test %d: Expected error but got %v", i, err)
}
if !tc.expectErr && err != nil {
- t.Errorf("Test %d: Expected no error but got: %v", i, err)
+ t.Errorf("Test %d: Expected no error but got %v", i, err)
}
if actualNetwork != tc.expectNetwork {
t.Errorf("Test %d: Expected network '%s' but got '%s'", i, tc.expectNetwork, actualNetwork)
@@ -160,38 +177,58 @@ func TestJoinNetworkAddress(t *testing.T) {
func TestParseNetworkAddress(t *testing.T) {
for i, tc := range []struct {
- input string
- expectAddr NetworkAddress
- expectErr bool
+ input string
+ defaultNetwork string
+ defaultPort uint
+ expectAddr NetworkAddress
+ expectErr bool
}{
{
input: "",
- expectErr: true,
- },
- {
- input: ":",
- expectErr: true,
- },
- {
- input: ":1234",
expectAddr: NetworkAddress{
- Network: "tcp",
+ },
+ },
+ {
+ input: ":",
+ defaultNetwork: "udp",
+ expectAddr: NetworkAddress{
+ Network: "udp",
+ },
+ },
+ {
+ input: "[::]",
+ defaultNetwork: "udp",
+ defaultPort: 53,
+ expectAddr: NetworkAddress{
+ Network: "udp",
+ Host: "::",
+ StartPort: 53,
+ EndPort: 53,
+ },
+ },
+ {
+ input: ":1234",
+ defaultNetwork: "udp",
+ expectAddr: NetworkAddress{
+ Network: "udp",
Host: "",
StartPort: 1234,
EndPort: 1234,
},
},
{
- input: "tcp/:1234",
+ input: "udp/:1234",
+ defaultNetwork: "udp",
expectAddr: NetworkAddress{
- Network: "tcp",
+ Network: "udp",
Host: "",
StartPort: 1234,
EndPort: 1234,
},
},
{
- input: "tcp6/:1234",
+ input: "tcp6/:1234",
+ defaultNetwork: "tcp",
expectAddr: NetworkAddress{
Network: "tcp6",
Host: "",
@@ -200,7 +237,8 @@ func TestParseNetworkAddress(t *testing.T) {
},
},
{
- input: "tcp4/localhost:1234",
+ input: "tcp4/localhost:1234",
+ defaultNetwork: "tcp",
expectAddr: NetworkAddress{
Network: "tcp4",
Host: "localhost",
@@ -209,14 +247,16 @@ func TestParseNetworkAddress(t *testing.T) {
},
},
{
- input: "unix//foo/bar",
+ input: "unix//foo/bar",
+ defaultNetwork: "tcp",
expectAddr: NetworkAddress{
Network: "unix",
Host: "/foo/bar",
},
},
{
- input: "localhost:1234-1234",
+ input: "localhost:1234-1234",
+ defaultNetwork: "tcp",
expectAddr: NetworkAddress{
Network: "tcp",
Host: "localhost",
@@ -225,11 +265,13 @@ func TestParseNetworkAddress(t *testing.T) {
},
},
{
- input: "localhost:2-1",
- expectErr: true,
+ input: "localhost:2-1",
+ defaultNetwork: "tcp",
+ expectErr: true,
},
{
- input: "localhost:0",
+ input: "localhost:0",
+ defaultNetwork: "tcp",
expectAddr: NetworkAddress{
Network: "tcp",
Host: "localhost",
@@ -238,11 +280,139 @@ func TestParseNetworkAddress(t *testing.T) {
},
},
{
- input: "localhost:1-999999999999",
- expectErr: true,
+ input: "localhost:1-999999999999",
+ defaultNetwork: "tcp",
+ expectErr: true,
},
} {
- actualAddr, err := ParseNetworkAddress(tc.input)
+ actualAddr, err := ParseNetworkAddressWithDefaults(tc.input, tc.defaultNetwork, tc.defaultPort)
+ if tc.expectErr && err == nil {
+ t.Errorf("Test %d: Expected error but got: %v", i, err)
+ }
+ if !tc.expectErr && err != nil {
+ t.Errorf("Test %d: Expected no error but got: %v", i, err)
+ }
+
+ if actualAddr.Network != tc.expectAddr.Network {
+ t.Errorf("Test %d: Expected network '%v' but got '%v'", i, tc.expectAddr, actualAddr)
+ }
+ if !reflect.DeepEqual(tc.expectAddr, actualAddr) {
+ t.Errorf("Test %d: Expected addresses %v but got %v", i, tc.expectAddr, actualAddr)
+ }
+ }
+}
+
+func TestParseNetworkAddressWithDefaults(t *testing.T) {
+ for i, tc := range []struct {
+ input string
+ defaultNetwork string
+ defaultPort uint
+ expectAddr NetworkAddress
+ expectErr bool
+ }{
+ {
+ input: "",
+ expectAddr: NetworkAddress{
+ },
+ },
+ {
+ input: ":",
+ defaultNetwork: "udp",
+ expectAddr: NetworkAddress{
+ Network: "udp",
+ },
+ },
+ {
+ input: "[::]",
+ defaultNetwork: "udp",
+ defaultPort: 53,
+ expectAddr: NetworkAddress{
+ Network: "udp",
+ Host: "::",
+ StartPort: 53,
+ EndPort: 53,
+ },
+ },
+ {
+ input: ":1234",
+ defaultNetwork: "udp",
+ expectAddr: NetworkAddress{
+ Network: "udp",
+ Host: "",
+ StartPort: 1234,
+ EndPort: 1234,
+ },
+ },
+ {
+ input: "udp/:1234",
+ defaultNetwork: "udp",
+ expectAddr: NetworkAddress{
+ Network: "udp",
+ Host: "",
+ StartPort: 1234,
+ EndPort: 1234,
+ },
+ },
+ {
+ input: "tcp6/:1234",
+ defaultNetwork: "tcp",
+ expectAddr: NetworkAddress{
+ Network: "tcp6",
+ Host: "",
+ StartPort: 1234,
+ EndPort: 1234,
+ },
+ },
+ {
+ input: "tcp4/localhost:1234",
+ defaultNetwork: "tcp",
+ expectAddr: NetworkAddress{
+ Network: "tcp4",
+ Host: "localhost",
+ StartPort: 1234,
+ EndPort: 1234,
+ },
+ },
+ {
+ input: "unix//foo/bar",
+ defaultNetwork: "tcp",
+ expectAddr: NetworkAddress{
+ Network: "unix",
+ Host: "/foo/bar",
+ },
+ },
+ {
+ input: "localhost:1234-1234",
+ defaultNetwork: "tcp",
+ expectAddr: NetworkAddress{
+ Network: "tcp",
+ Host: "localhost",
+ StartPort: 1234,
+ EndPort: 1234,
+ },
+ },
+ {
+ input: "localhost:2-1",
+ defaultNetwork: "tcp",
+ expectErr: true,
+ },
+ {
+ input: "localhost:0",
+ defaultNetwork: "tcp",
+ expectAddr: NetworkAddress{
+ Network: "tcp",
+ Host: "localhost",
+ StartPort: 0,
+ EndPort: 0,
+ },
+ },
+ {
+ input: "localhost:1-999999999999",
+ defaultNetwork: "tcp",
+ expectErr: true,
+ },
+ } {
+ actualAddr, err := ParseNetworkAddressWithDefaults(tc.input, tc.defaultNetwork, tc.defaultPort)
if tc.expectErr && err == nil {
t.Errorf("Test %d: Expected error but got: %v", i, err)
}
@@ -307,3 +477,180 @@ func TestJoinHostPort(t *testing.T) {
}
}
}
+
+func TestExpand(t *testing.T) {
+ for i, tc := range []struct {
+ input NetworkAddress
+ expect []NetworkAddress
+ }{
+ {
+ input: NetworkAddress{
+ Network: "tcp",
+ Host: "localhost",
+ StartPort: 2000,
+ EndPort: 2000,
+ },
+ expect: []NetworkAddress{
+ {
+ Network: "tcp",
+ Host: "localhost",
+ StartPort: 2000,
+ EndPort: 2000,
+ },
+ },
+ },
+ {
+ input: NetworkAddress{
+ Network: "tcp",
+ Host: "localhost",
+ StartPort: 2000,
+ EndPort: 2002,
+ },
+ expect: []NetworkAddress{
+ {
+ Network: "tcp",
+ Host: "localhost",
+ StartPort: 2000,
+ EndPort: 2000,
+ },
+ {
+ Network: "tcp",
+ Host: "localhost",
+ StartPort: 2001,
+ EndPort: 2001,
+ },
+ {
+ Network: "tcp",
+ Host: "localhost",
+ StartPort: 2002,
+ EndPort: 2002,
+ },
+ },
+ },
+ {
+ input: NetworkAddress{
+ Network: "tcp",
+ Host: "localhost",
+ StartPort: 2000,
+ EndPort: 1999,
+ },
+ expect: []NetworkAddress{},
+ },
+ {
+ input: NetworkAddress{
+ Network: "unix",
+ Host: "/foo/bar",
+ StartPort: 0,
+ EndPort: 0,
+ },
+ expect: []NetworkAddress{
+ {
+ Network: "unix",
+ Host: "/foo/bar",
+ StartPort: 0,
+ EndPort: 0,
+ },
+ },
+ },
+ } {
+ actual := tc.input.Expand()
+ if !reflect.DeepEqual(actual, tc.expect) {
+ t.Errorf("Test %d: Expected %+v but got %+v", i, tc.expect, actual)
+ }
+ }
+}
+
+func TestSplitUnixSocketPermissionsBits(t *testing.T) {
+ for i, tc := range []struct {
+ input string
+ expectNetwork string
+ expectPath string
+ expectFileMode string
+ expectErr bool
+ }{
+ {
+ input: "./foo.socket",
+ expectPath: "./foo.socket",
+ expectFileMode: "--w-------",
+ },
+ {
+ input: `.\relative\path.socket`,
+ expectPath: `.\relative\path.socket`,
+ expectFileMode: "--w-------",
+ },
+ {
+ // literal colon in resulting address
+ // and defaulting to 0200 bits
+ input: "./foo.socket:0666",
+ expectPath: "./foo.socket:0666",
+ expectFileMode: "--w-------",
+ },
+ {
+ input: "./foo.socket|0220",
+ expectPath: "./foo.socket",
+ expectFileMode: "--w--w----",
+ },
+ {
+ input: "/var/run/foo|222",
+ expectPath: "/var/run/foo",
+ expectFileMode: "--w--w--w-",
+ },
+ {
+ input: "./foo.socket|0660",
+ expectPath: "./foo.socket",
+ expectFileMode: "-rw-rw----",
+ },
+ {
+ input: "./foo.socket|0666",
+ expectPath: "./foo.socket",
+ expectFileMode: "-rw-rw-rw-",
+ },
+ {
+ input: "/var/run/foo|666",
+ expectPath: "/var/run/foo",
+ expectFileMode: "-rw-rw-rw-",
+ },
+ {
+ input: `c:\absolute\path.socket|220`,
+ expectPath: `c:\absolute\path.socket`,
+ expectFileMode: "--w--w----",
+ },
+ {
+ // symbolic permission representation is not supported for now
+ input: "./foo.socket|u=rw,g=rw,o=rw",
+ expectErr: true,
+ },
+ {
+ // octal (base-8) permission representation has to be between
+ // `0` for no read, no write, no exec (`---`) and
+ // `7` for read (4), write (2), exec (1) (`rwx` => `4+2+1 = 7`)
+ input: "./foo.socket|888",
+ expectErr: true,
+ },
+ {
+ // too many colons in address
+ input: "./foo.socket|123456|0660",
+ expectErr: true,
+ },
+ {
+ // owner is missing write perms
+ input: "./foo.socket|0522",
+ expectErr: true,
+ },
+ } {
+ actualPath, actualFileMode, err := internal.SplitUnixSocketPermissionsBits(tc.input)
+ if tc.expectErr && err == nil {
+ t.Errorf("Test %d: Expected error but got: %v", i, err)
+ }
+ if !tc.expectErr && err != nil {
+ t.Errorf("Test %d: Expected no error but got: %v", i, err)
+ }
+ if actualPath != tc.expectPath {
+ t.Errorf("Test %d: Expected path '%s' but got '%s'", i, tc.expectPath, actualPath)
+ }
+ // fileMode.Perm().String() parses 0 to "----------"
+ if !tc.expectErr && actualFileMode.Perm().String() != tc.expectFileMode {
+ t.Errorf("Test %d: Expected perms '%s' but got '%s'", i, tc.expectFileMode, actualFileMode.Perm().String())
+ }
+ }
+}
diff --git a/logging.go b/logging.go
index 8f3f8426..ca10beee 100644
--- a/logging.go
+++ b/logging.go
@@ -18,7 +18,6 @@ import (
"encoding/json"
"fmt"
"io"
- "io/ioutil"
"log"
"os"
"strings"
@@ -27,7 +26,7 @@ import (
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
- "golang.org/x/crypto/ssh/terminal"
+ "golang.org/x/term"
)
func init() {
@@ -63,7 +62,7 @@ type Logging struct {
// in dependencies that are not designed specifically for use
// in Caddy. Because it is global and unstructured, the sink
// lacks most advanced features and customizations.
- Sink *StandardLibLog `json:"sink,omitempty"`
+ Sink *SinkLog `json:"sink,omitempty"`
// Logs are your logs, keyed by an arbitrary name of your
// choosing. The default log can be customized by defining
@@ -106,7 +105,7 @@ func (logging *Logging) openLogs(ctx Context) error {
// then set up any other custom logs
for name, l := range logging.Logs {
// the default log is already set up
- if name == "default" {
+ if name == DefaultLoggerName {
continue
}
@@ -139,7 +138,7 @@ func (logging *Logging) setupNewDefault(ctx Context) error {
// extract the user-defined default log, if any
newDefault := new(defaultCustomLog)
- if userDefault, ok := logging.Logs["default"]; ok {
+ if userDefault, ok := logging.Logs[DefaultLoggerName]; ok {
newDefault.CustomLog = userDefault
} else {
// if none, make one with our own default settings
@@ -148,15 +147,21 @@ func (logging *Logging) setupNewDefault(ctx Context) error {
if err != nil {
return fmt.Errorf("setting up default Caddy log: %v", err)
}
- logging.Logs["default"] = newDefault.CustomLog
+ logging.Logs[DefaultLoggerName] = newDefault.CustomLog
}
- // set up this new log
- err := newDefault.CustomLog.provision(ctx, logging)
+ // options for the default logger
+ options, err := newDefault.CustomLog.buildOptions()
if err != nil {
return fmt.Errorf("setting up default log: %v", err)
}
- newDefault.logger = zap.New(newDefault.CustomLog.core)
+
+ // set up this new log
+ err = newDefault.CustomLog.provision(ctx, logging)
+ if err != nil {
+ return fmt.Errorf("setting up default log: %v", err)
+ }
+ newDefault.logger = zap.New(newDefault.CustomLog.core, options...)
// redirect the default caddy logs
defaultLoggerMu.Lock()
@@ -202,6 +207,7 @@ func (logging *Logging) closeLogs() error {
func (logging *Logging) Logger(mod Module) *zap.Logger {
modID := string(mod.CaddyModule().ID)
var cores []zapcore.Core
+ var options []zap.Option
if logging != nil {
for _, l := range logging.Logs {
@@ -210,6 +216,13 @@ func (logging *Logging) Logger(mod Module) *zap.Logger {
cores = append(cores, l.core)
continue
}
+ if len(options) == 0 {
+ newOptions, err := l.buildOptions()
+ if err != nil {
+ Log().Error("building options for logger", zap.String("module", modID), zap.Error(err))
+ }
+ options = newOptions
+ }
cores = append(cores, &filteringCore{Core: l.core, cl: l})
}
}
@@ -217,7 +230,7 @@ func (logging *Logging) Logger(mod Module) *zap.Logger {
multiCore := zapcore.NewTee(cores...)
- return zap.New(multiCore).Named(modID)
+ return zap.New(multiCore, options...).Named(modID)
}
// openWriter opens a writer using opener, and returns true if
@@ -252,6 +265,17 @@ type WriterOpener interface {
OpenWriter() (io.WriteCloser, error)
}
+// IsWriterStandardStream returns true if the input is a
+// writer-opener to a standard stream (stdout, stderr).
+func IsWriterStandardStream(wo WriterOpener) bool {
+ switch wo.(type) {
+ case StdoutWriter, StderrWriter,
+ *StdoutWriter, *StderrWriter:
+ return true
+ }
+ return false
+}
+
type writerDestructor struct {
io.WriteCloser
}
@@ -260,39 +284,171 @@ func (wdest writerDestructor) Destruct() error {
return wdest.Close()
}
-// StandardLibLog configures the default Go standard library
+// BaseLog contains the common logging parameters for logging.
+type BaseLog struct {
+ // The module that writes out log entries for the sink.
+ WriterRaw json.RawMessage `json:"writer,omitempty" caddy:"namespace=caddy.logging.writers inline_key=output"`
+
+ // The encoder is how the log entries are formatted or encoded.
+ EncoderRaw json.RawMessage `json:"encoder,omitempty" caddy:"namespace=caddy.logging.encoders inline_key=format"`
+
+ // Tees entries through a zap.Core module which can extract
+ // log entry metadata and fields for further processing.
+ CoreRaw json.RawMessage `json:"core,omitempty" caddy:"namespace=caddy.logging.cores inline_key=module"`
+
+ // Level is the minimum level to emit, and is inclusive.
+ // Possible levels: DEBUG, INFO, WARN, ERROR, PANIC, and FATAL
+ Level string `json:"level,omitempty"`
+
+ // Sampling configures log entry sampling. If enabled,
+ // only some log entries will be emitted. This is useful
+ // for improving performance on extremely high-pressure
+ // servers.
+ Sampling *LogSampling `json:"sampling,omitempty"`
+
+ // If true, the log entry will include the caller's
+ // file name and line number. Default off.
+ WithCaller bool `json:"with_caller,omitempty"`
+
+ // If non-zero, and `with_caller` is true, this many
+ // stack frames will be skipped when determining the
+ // caller. Default 0.
+ WithCallerSkip int `json:"with_caller_skip,omitempty"`
+
+ // If not empty, the log entry will include a stack trace
+ // for all logs at the given level or higher. See `level`
+ // for possible values. Default off.
+ WithStacktrace string `json:"with_stacktrace,omitempty"`
+
+ writerOpener WriterOpener
+ writer io.WriteCloser
+ encoder zapcore.Encoder
+ levelEnabler zapcore.LevelEnabler
+ core zapcore.Core
+}
+
+func (cl *BaseLog) provisionCommon(ctx Context, logging *Logging) error {
+ if cl.WriterRaw != nil {
+ mod, err := ctx.LoadModule(cl, "WriterRaw")
+ if err != nil {
+ return fmt.Errorf("loading log writer module: %v", err)
+ }
+ cl.writerOpener = mod.(WriterOpener)
+ }
+ if cl.writerOpener == nil {
+ cl.writerOpener = StderrWriter{}
+ }
+ var err error
+ cl.writer, _, err = logging.openWriter(cl.writerOpener)
+ if err != nil {
+ return fmt.Errorf("opening log writer using %#v: %v", cl.writerOpener, err)
+ }
+
+ // set up the log level
+ cl.levelEnabler, err = parseLevel(cl.Level)
+ if err != nil {
+ return err
+ }
+
+ if cl.EncoderRaw != nil {
+ mod, err := ctx.LoadModule(cl, "EncoderRaw")
+ if err != nil {
+ return fmt.Errorf("loading log encoder module: %v", err)
+ }
+ cl.encoder = mod.(zapcore.Encoder)
+
+ // if the encoder module needs the writer to determine
+ // the correct default to use for a nested encoder, we
+ // pass it down as a secondary provisioning step
+ if cfd, ok := mod.(ConfiguresFormatterDefault); ok {
+ if err := cfd.ConfigureDefaultFormat(cl.writerOpener); err != nil {
+ return fmt.Errorf("configuring default format for encoder module: %v", err)
+ }
+ }
+ }
+ if cl.encoder == nil {
+ cl.encoder = newDefaultProductionLogEncoder(cl.writerOpener)
+ }
+ cl.buildCore()
+ if cl.CoreRaw != nil {
+ mod, err := ctx.LoadModule(cl, "CoreRaw")
+ if err != nil {
+ return fmt.Errorf("loading log core module: %v", err)
+ }
+ core := mod.(zapcore.Core)
+ cl.core = zapcore.NewTee(cl.core, core)
+ }
+ return nil
+}
+
+func (cl *BaseLog) buildCore() {
+ // logs which only discard their output don't need
+ // to perform encoding or any other processing steps
+ // at all, so just shortcut to a nop core instead
+ if _, ok := cl.writerOpener.(*DiscardWriter); ok {
+ cl.core = zapcore.NewNopCore()
+ return
+ }
+ c := zapcore.NewCore(
+ cl.encoder,
+ zapcore.AddSync(cl.writer),
+ cl.levelEnabler,
+ )
+ if cl.Sampling != nil {
+ if cl.Sampling.Interval == 0 {
+ cl.Sampling.Interval = 1 * time.Second
+ }
+ if cl.Sampling.First == 0 {
+ cl.Sampling.First = 100
+ }
+ if cl.Sampling.Thereafter == 0 {
+ cl.Sampling.Thereafter = 100
+ }
+ c = zapcore.NewSamplerWithOptions(c, cl.Sampling.Interval,
+ cl.Sampling.First, cl.Sampling.Thereafter)
+ }
+ cl.core = c
+}
+
+func (cl *BaseLog) buildOptions() ([]zap.Option, error) {
+ var options []zap.Option
+ if cl.WithCaller {
+ options = append(options, zap.AddCaller())
+ if cl.WithCallerSkip != 0 {
+ options = append(options, zap.AddCallerSkip(cl.WithCallerSkip))
+ }
+ }
+ if cl.WithStacktrace != "" {
+ levelEnabler, err := parseLevel(cl.WithStacktrace)
+ if err != nil {
+ return options, fmt.Errorf("setting up default Caddy log: %v", err)
+ }
+ options = append(options, zap.AddStacktrace(levelEnabler))
+ }
+ return options, nil
+}
+
+// SinkLog configures the default Go standard library
// global logger in the log package. This is necessary because
// module dependencies which are not built specifically for
// Caddy will use the standard logger. This is also known as
// the "sink" logger.
-type StandardLibLog struct {
- // The module that writes out log entries for the sink.
- WriterRaw json.RawMessage `json:"writer,omitempty" caddy:"namespace=caddy.logging.writers inline_key=output"`
-
- writer io.WriteCloser
+type SinkLog struct {
+ BaseLog
}
-func (sll *StandardLibLog) provision(ctx Context, logging *Logging) error {
- if sll.WriterRaw != nil {
- mod, err := ctx.LoadModule(sll, "WriterRaw")
- if err != nil {
- return fmt.Errorf("loading sink log writer module: %v", err)
- }
- wo := mod.(WriterOpener)
-
- var isNew bool
- sll.writer, isNew, err = logging.openWriter(wo)
- if err != nil {
- return fmt.Errorf("opening sink log writer %#v: %v", mod, err)
- }
-
- if isNew {
- log.Printf("[INFO] Redirecting sink to: %s", wo)
- log.SetOutput(sll.writer)
- log.Printf("[INFO] Redirected sink to here (%s)", wo)
- }
+func (sll *SinkLog) provision(ctx Context, logging *Logging) error {
+ if err := sll.provisionCommon(ctx, logging); err != nil {
+ return err
}
+ options, err := sll.buildOptions()
+ if err != nil {
+ return err
+ }
+
+ logger := zap.New(sll.core, options...)
+ ctx.cleanupFuncs = append(ctx.cleanupFuncs, zap.RedirectStdLog(logger))
return nil
}
@@ -306,21 +462,7 @@ func (sll *StandardLibLog) provision(ctx Context, logging *Logging) error {
// exclusive, and longer namespaces have priority. If neither
// are populated, all logs are emitted.
type CustomLog struct {
- // The writer defines where log entries are emitted.
- WriterRaw json.RawMessage `json:"writer,omitempty" caddy:"namespace=caddy.logging.writers inline_key=output"`
-
- // The encoder is how the log entries are formatted or encoded.
- EncoderRaw json.RawMessage `json:"encoder,omitempty" caddy:"namespace=caddy.logging.encoders inline_key=format"`
-
- // Level is the minimum level to emit, and is inclusive.
- // Possible levels: DEBUG, INFO, WARN, ERROR, PANIC, and FATAL
- Level string `json:"level,omitempty"`
-
- // Sampling configures log entry sampling. If enabled,
- // only some log entries will be emitted. This is useful
- // for improving performance on extremely high-pressure
- // servers.
- Sampling *LogSampling `json:"sampling,omitempty"`
+ BaseLog
// Include defines the names of loggers to emit in this
// log. For example, to include only logs emitted by the
@@ -331,39 +473,11 @@ type CustomLog struct {
// skipped by this log. For example, to exclude only
// HTTP access logs, you would exclude "http.log.access".
Exclude []string `json:"exclude,omitempty"`
-
- writerOpener WriterOpener
- writer io.WriteCloser
- encoder zapcore.Encoder
- levelEnabler zapcore.LevelEnabler
- core zapcore.Core
}
func (cl *CustomLog) provision(ctx Context, logging *Logging) error {
- // Replace placeholder for log level
- repl := NewReplacer()
- level, err := repl.ReplaceOrErr(cl.Level, true, true)
- if err != nil {
- return fmt.Errorf("invalid log level: %v", err)
- }
- level = strings.ToLower(level)
-
- // set up the log level
- switch level {
- case "debug":
- cl.levelEnabler = zapcore.DebugLevel
- case "", "info":
- cl.levelEnabler = zapcore.InfoLevel
- case "warn":
- cl.levelEnabler = zapcore.WarnLevel
- case "error":
- cl.levelEnabler = zapcore.ErrorLevel
- case "panic":
- cl.levelEnabler = zapcore.PanicLevel
- case "fatal":
- cl.levelEnabler = zapcore.FatalLevel
- default:
- return fmt.Errorf("unrecognized log level: %s", cl.Level)
+ if err := cl.provisionCommon(ctx, logging); err != nil {
+ return err
}
// If both Include and Exclude lists are populated, then each item must
@@ -395,75 +509,9 @@ func (cl *CustomLog) provision(ctx Context, logging *Logging) error {
return fmt.Errorf("when both include and exclude are populated, each element must be a superspace or subspace of one in the other list; check '%s' in include", allow)
}
}
-
- if cl.WriterRaw != nil {
- mod, err := ctx.LoadModule(cl, "WriterRaw")
- if err != nil {
- return fmt.Errorf("loading log writer module: %v", err)
- }
- cl.writerOpener = mod.(WriterOpener)
- }
- if cl.writerOpener == nil {
- cl.writerOpener = StderrWriter{}
- }
-
- cl.writer, _, err = logging.openWriter(cl.writerOpener)
- if err != nil {
- return fmt.Errorf("opening log writer using %#v: %v", cl.writerOpener, err)
- }
-
- if cl.EncoderRaw != nil {
- mod, err := ctx.LoadModule(cl, "EncoderRaw")
- if err != nil {
- return fmt.Errorf("loading log encoder module: %v", err)
- }
- cl.encoder = mod.(zapcore.Encoder)
- }
- if cl.encoder == nil {
- // only allow colorized output if this log is going to stdout or stderr
- var colorize bool
- switch cl.writerOpener.(type) {
- case StdoutWriter, StderrWriter,
- *StdoutWriter, *StderrWriter:
- colorize = true
- }
- cl.encoder = newDefaultProductionLogEncoder(colorize)
- }
-
- cl.buildCore()
-
return nil
}
-func (cl *CustomLog) buildCore() {
- // logs which only discard their output don't need
- // to perform encoding or any other processing steps
- // at all, so just shorcut to a nop core instead
- if _, ok := cl.writerOpener.(*DiscardWriter); ok {
- cl.core = zapcore.NewNopCore()
- return
- }
- c := zapcore.NewCore(
- cl.encoder,
- zapcore.AddSync(cl.writer),
- cl.levelEnabler,
- )
- if cl.Sampling != nil {
- if cl.Sampling.Interval == 0 {
- cl.Sampling.Interval = 1 * time.Second
- }
- if cl.Sampling.First == 0 {
- cl.Sampling.First = 100
- }
- if cl.Sampling.Thereafter == 0 {
- cl.Sampling.Thereafter = 100
- }
- c = zapcore.NewSampler(c, cl.Sampling.Interval,
- cl.Sampling.First, cl.Sampling.Thereafter)
- }
- cl.core = c
-}
-
func (cl *CustomLog) matchesModule(moduleID string) bool {
return cl.loggerAllowed(moduleID, true)
}
@@ -481,7 +529,7 @@ func (cl *CustomLog) loggerAllowed(name string, isModule bool) bool {
// append a dot so that partial names don't match
// (i.e. we don't want "foo.b" to match "foo.bar"); we
// will also have to append a dot when we do HasPrefix
- // below to compensate for when when namespaces are equal
+ // below to compensate for when namespaces are equal
if name != "" && name != "*" && name != "." {
name += "."
}
@@ -630,9 +678,9 @@ func (StderrWriter) OpenWriter() (io.WriteCloser, error) {
return notClosable{os.Stderr}, nil
}
-// OpenWriter returns ioutil.Discard that can't be closed.
+// OpenWriter returns io.Discard that can't be closed.
func (DiscardWriter) OpenWriter() (io.WriteCloser, error) {
- return notClosable{ioutil.Discard}, nil
+ return notClosable{io.Discard}, nil
}
// notClosable is an io.WriteCloser that can't be closed.
@@ -657,32 +705,66 @@ func newDefaultProductionLog() (*defaultCustomLog, error) {
if err != nil {
return nil, err
}
- cl.encoder = newDefaultProductionLogEncoder(true)
+ cl.encoder = newDefaultProductionLogEncoder(cl.writerOpener)
cl.levelEnabler = zapcore.InfoLevel
cl.buildCore()
+ logger := zap.New(cl.core)
+
+ // capture logs from other libraries which
+ // may not be using zap logging directly
+ _ = zap.RedirectStdLog(logger)
+
return &defaultCustomLog{
CustomLog: cl,
- logger: zap.New(cl.core),
+ logger: logger,
}, nil
}
-func newDefaultProductionLogEncoder(colorize bool) zapcore.Encoder {
+func newDefaultProductionLogEncoder(wo WriterOpener) zapcore.Encoder {
encCfg := zap.NewProductionEncoderConfig()
- if terminal.IsTerminal(int(os.Stdout.Fd())) {
+ if IsWriterStandardStream(wo) && term.IsTerminal(int(os.Stderr.Fd())) {
// if interactive terminal, make output more human-readable by default
encCfg.EncodeTime = func(ts time.Time, encoder zapcore.PrimitiveArrayEncoder) {
encoder.AppendString(ts.UTC().Format("2006/01/02 15:04:05.000"))
}
- if colorize {
+ if coloringEnabled {
encCfg.EncodeLevel = zapcore.CapitalColorLevelEncoder
}
+
return zapcore.NewConsoleEncoder(encCfg)
}
return zapcore.NewJSONEncoder(encCfg)
}
+func parseLevel(levelInput string) (zapcore.LevelEnabler, error) {
+ repl := NewReplacer()
+ level, err := repl.ReplaceOrErr(levelInput, true, true)
+ if err != nil {
+ return nil, fmt.Errorf("invalid log level: %v", err)
+ }
+ level = strings.ToLower(level)
+
+ // set up the log level
+ switch level {
+ case "debug":
+ return zapcore.DebugLevel, nil
+ case "", "info":
+ return zapcore.InfoLevel, nil
+ case "warn":
+ return zapcore.WarnLevel, nil
+ case "error":
+ return zapcore.ErrorLevel, nil
+ case "panic":
+ return zapcore.PanicLevel, nil
+ case "fatal":
+ return zapcore.FatalLevel, nil
+ default:
+ return nil, fmt.Errorf("unrecognized log level: %s", level)
+ }
+}
+
// Log returns the current default logger.
func Log() *zap.Logger {
defaultLoggerMu.RLock()
@@ -691,12 +773,24 @@ func Log() *zap.Logger {
}
var (
+ coloringEnabled = os.Getenv("NO_COLOR") == "" && os.Getenv("TERM") != "xterm-mono"
defaultLogger, _ = newDefaultProductionLog()
defaultLoggerMu sync.RWMutex
)
var writers = NewUsagePool()
+// ConfiguresFormatterDefault is an optional interface that
+// encoder modules can implement to configure the default
+// format of their encoder. This is useful for encoders
+// which nest an encoder, that needs to know the writer
+// in order to determine the correct default.
+type ConfiguresFormatterDefault interface {
+ ConfigureDefaultFormat(WriterOpener) error
+}
+
+const DefaultLoggerName = "default"
+
// Interface guards
var (
_ io.WriteCloser = (*notClosable)(nil)
diff --git a/metrics.go b/metrics.go
new file mode 100644
index 00000000..0ee3853e
--- /dev/null
+++ b/metrics.go
@@ -0,0 +1,84 @@
+package caddy
+
+import (
+ "net/http"
+
+ "github.com/prometheus/client_golang/prometheus"
+
+ "github.com/caddyserver/caddy/v2/internal/metrics"
+)
+
+// define and register the metrics used in this package.
+func init() {
+ const ns, sub = "caddy", "admin"
+ adminMetrics.requestCount = prometheus.NewCounterVec(prometheus.CounterOpts{
+ Namespace: ns,
+ Subsystem: sub,
+ Name: "http_requests_total",
+ Help: "Counter of requests made to the Admin API's HTTP endpoints.",
+ }, []string{"handler", "path", "code", "method"})
+ adminMetrics.requestErrors = prometheus.NewCounterVec(prometheus.CounterOpts{
+ Namespace: ns,
+ Subsystem: sub,
+ Name: "http_request_errors_total",
+ Help: "Number of requests resulting in middleware errors.",
+ }, []string{"handler", "path", "method"})
+ globalMetrics.configSuccess = prometheus.NewGauge(prometheus.GaugeOpts{
+ Name: "caddy_config_last_reload_successful",
+ Help: "Whether the last configuration reload attempt was successful.",
+ })
+ globalMetrics.configSuccessTime = prometheus.NewGauge(prometheus.GaugeOpts{
+ Name: "caddy_config_last_reload_success_timestamp_seconds",
+ Help: "Timestamp of the last successful configuration reload.",
+ })
+}
+
+// adminMetrics is a collection of metrics that can be tracked for the admin API.
+var adminMetrics = struct {
+ requestCount *prometheus.CounterVec
+ requestErrors *prometheus.CounterVec
+}{}
+
+// globalMetrics is a collection of metrics that can be tracked for Caddy global state
+var globalMetrics = struct {
+ configSuccess prometheus.Gauge
+ configSuccessTime prometheus.Gauge
+}{}
+
+// Similar to promhttp.InstrumentHandlerCounter, but upper-cases method names
+// instead of lower-casing them.
+//
+// Unlike promhttp.InstrumentHandlerCounter, this assumes a "code" and "method"
+// label is present, and will panic otherwise.
+func instrumentHandlerCounter(counter *prometheus.CounterVec, next http.Handler) http.HandlerFunc {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ d := newDelegator(w)
+ next.ServeHTTP(d, r)
+ counter.With(prometheus.Labels{
+ "code": metrics.SanitizeCode(d.status),
+ "method": metrics.SanitizeMethod(r.Method),
+ }).Inc()
+ })
+}
+
+func newDelegator(w http.ResponseWriter) *delegator {
+ return &delegator{
+ ResponseWriter: w,
+ }
+}
+
+type delegator struct {
+ http.ResponseWriter
+ status int
+}
+
+func (d *delegator) WriteHeader(code int) {
+ d.status = code
+ d.ResponseWriter.WriteHeader(code)
+}
+
+// Unwrap returns the underlying ResponseWriter, necessary for
+// http.ResponseController to work correctly.
+func (d *delegator) Unwrap() http.ResponseWriter {
+ return d.ResponseWriter
+}
diff --git a/modules.go b/modules.go
index 0f4a563b..470c25e3 100644
--- a/modules.go
+++ b/modules.go
@@ -44,7 +44,7 @@ import (
// Provisioner, the Provision() method is called. 4) If the
// module is a Validator, the Validate() method is called.
// 5) The module will probably be type-asserted from
-// interface{} to some other, more useful interface expected
+// 'any' to some other, more useful interface expected
// by the host module. For example, HTTP handler modules are
// type-asserted as caddyhttp.MiddlewareHandler values.
// 6) When a module's containing Context is canceled, if it is
@@ -172,7 +172,7 @@ func GetModule(name string) (ModuleInfo, error) {
// GetModuleName returns a module's name (the last label of its ID)
// from an instance of its value. If the value is not a module, an
// empty string will be returned.
-func GetModuleName(instance interface{}) string {
+func GetModuleName(instance any) string {
var name string
if mod, ok := instance.(Module); ok {
name = mod.CaddyModule().ID.Name()
@@ -182,7 +182,7 @@ func GetModuleName(instance interface{}) string {
// GetModuleID returns a module's ID from an instance of its value.
// If the value is not a module, an empty string will be returned.
-func GetModuleID(instance interface{}) string {
+func GetModuleID(instance any) string {
var id string
if mod, ok := instance.(Module); ok {
id = string(mod.CaddyModule().ID)
@@ -259,7 +259,7 @@ func Modules() []string {
// where raw must be a JSON encoding of a map. It returns that value,
// along with the result of removing that key from raw.
func getModuleNameInline(moduleNameKey string, raw json.RawMessage) (string, json.RawMessage, error) {
- var tmp map[string]interface{}
+ var tmp map[string]any
err := json.Unmarshal(raw, &tmp)
if err != nil {
return "", nil, err
@@ -324,20 +324,20 @@ func ParseStructTag(tag string) (map[string]string, error) {
if pair == "" {
continue
}
- parts := strings.SplitN(pair, "=", 2)
- if len(parts) != 2 {
+ before, after, isCut := strings.Cut(pair, "=")
+ if !isCut {
return nil, fmt.Errorf("missing key in '%s' (pair %d)", pair, i)
}
- results[parts[0]] = parts[1]
+ results[before] = after
}
return results, nil
}
-// strictUnmarshalJSON is like json.Unmarshal but returns an error
+// StrictUnmarshalJSON is like json.Unmarshal but returns an error
// if any of the fields are unrecognized. Useful when decoding
// module configurations, where you want to be more sure they're
// correct.
-func strictUnmarshalJSON(data []byte, v interface{}) error {
+func StrictUnmarshalJSON(data []byte, v any) error {
dec := json.NewDecoder(bytes.NewReader(data))
dec.DisallowUnknownFields()
return dec.Decode(v)
diff --git a/modules/caddyevents/app.go b/modules/caddyevents/app.go
new file mode 100644
index 00000000..e78b00f8
--- /dev/null
+++ b/modules/caddyevents/app.go
@@ -0,0 +1,405 @@
+// Copyright 2015 Matthew Holt and The Caddy Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package caddyevents
+
+import (
+ "context"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "strings"
+ "time"
+
+ "github.com/google/uuid"
+ "go.uber.org/zap"
+
+ "github.com/caddyserver/caddy/v2"
+)
+
+func init() {
+ caddy.RegisterModule(App{})
+}
+
+// App implements a global eventing system within Caddy.
+// Modules can emit and subscribe to events, providing
+// hooks into deep parts of the code base that aren't
+// otherwise accessible. Events provide information about
+// what and when things are happening, and this facility
+// allows handlers to take action when events occur,
+// add information to the event's metadata, and even
+// control program flow in some cases.
+//
+// Events are propagated in a DOM-like fashion. An event
+// emitted from module `a.b.c` (the "origin") will first
+// invoke handlers listening to `a.b.c`, then `a.b`,
+// then `a`, then those listening regardless of origin.
+// If a handler returns the special error Aborted, then
+// propagation immediately stops and the event is marked
+// as aborted. Emitters may optionally choose to adjust
+// program flow based on an abort.
+//
+// Modules can subscribe to events by origin and/or name.
+// A handler is invoked only if it is subscribed to the
+// event by name and origin. Subscriptions should be
+// registered during the provisioning phase, before apps
+// are started.
+//
+// Event handlers are fired synchronously as part of the
+// regular flow of the program. This allows event handlers
+// to control the flow of the program if the origin permits
+// it and also allows handlers to convey new information
+// back into the origin module before it continues.
+// In essence, event handlers are similar to HTTP
+// middleware handlers.
+//
+// Event bindings/subscribers are unordered; i.e.
+// event handlers are invoked in an arbitrary order.
+// Event handlers should not rely on the logic of other
+// handlers to succeed.
+//
+// The entirety of this app module is EXPERIMENTAL and
+// subject to change. Pay attention to release notes.
+type App struct {
+ // Subscriptions bind handlers to one or more events
+ // either globally or scoped to specific modules or module
+ // namespaces.
+ Subscriptions []*Subscription `json:"subscriptions,omitempty"`
+
+ // Map of event name to map of module ID/namespace to handlers
+ subscriptions map[string]map[caddy.ModuleID][]Handler
+
+ logger *zap.Logger
+ started bool
+}
+
+// Subscription represents binding of one or more handlers to
+// one or more events.
+type Subscription struct {
+ // The name(s) of the event(s) to bind to. Default: all events.
+ Events []string `json:"events,omitempty"`
+
+ // The ID or namespace of the module(s) from which events
+ // originate to listen to for events. Default: all modules.
+ //
+ // Events propagate up, so events emitted by module "a.b.c"
+ // will also trigger the event for "a.b" and "a". Thus, to
+ // receive all events from "a.b.c" and "a.b.d", for example,
+ // one can subscribe to either "a.b" or all of "a" entirely.
+ Modules []caddy.ModuleID `json:"modules,omitempty"`
+
+ // The event handler modules. These implement the actual
+ // behavior to invoke when an event occurs. At least one
+ // handler is required.
+ HandlersRaw []json.RawMessage `json:"handlers,omitempty" caddy:"namespace=events.handlers inline_key=handler"`
+
+ // The decoded handlers; Go code that is subscribing to
+ // an event should set this field directly; HandlersRaw
+ // is meant for JSON configuration to fill out this field.
+ Handlers []Handler `json:"-"`
+}
+
+// CaddyModule returns the Caddy module information.
+func (App) CaddyModule() caddy.ModuleInfo {
+ return caddy.ModuleInfo{
+ ID: "events",
+ New: func() caddy.Module { return new(App) },
+ }
+}
+
+// Provision sets up the app.
+func (app *App) Provision(ctx caddy.Context) error {
+ app.logger = ctx.Logger()
+ app.subscriptions = make(map[string]map[caddy.ModuleID][]Handler)
+
+ for _, sub := range app.Subscriptions {
+ if sub.HandlersRaw == nil {
+ continue
+ }
+ handlersIface, err := ctx.LoadModule(sub, "HandlersRaw")
+ if err != nil {
+ return fmt.Errorf("loading event subscriber modules: %v", err)
+ }
+ for _, h := range handlersIface.([]any) {
+ sub.Handlers = append(sub.Handlers, h.(Handler))
+ }
+ if len(sub.Handlers) == 0 {
+ // pointless to bind without any handlers
+ return fmt.Errorf("no handlers defined")
+ }
+ }
+
+ return nil
+}
+
+// Start runs the app.
+func (app *App) Start() error {
+ for _, sub := range app.Subscriptions {
+ if err := app.Subscribe(sub); err != nil {
+ return err
+ }
+ }
+
+ app.started = true
+
+ return nil
+}
+
+// Stop gracefully shuts down the app.
+func (app *App) Stop() error {
+ return nil
+}
+
+// Subscribe binds one or more event handlers to one or more events
+// according to the subscription s. For now, subscriptions can only
+// be created during the provision phase; new bindings cannot be
+// created after the events app has started.
+func (app *App) Subscribe(s *Subscription) error {
+ if app.started {
+ return fmt.Errorf("events already started; new subscriptions closed")
+ }
+
+ // handle special case of catch-alls (omission of event name or module space implies all)
+ if len(s.Events) == 0 {
+ s.Events = []string{""}
+ }
+ if len(s.Modules) == 0 {
+ s.Modules = []caddy.ModuleID{""}
+ }
+
+ for _, eventName := range s.Events {
+ if app.subscriptions[eventName] == nil {
+ app.subscriptions[eventName] = make(map[caddy.ModuleID][]Handler)
+ }
+ for _, originModule := range s.Modules {
+ app.subscriptions[eventName][originModule] = append(app.subscriptions[eventName][originModule], s.Handlers...)
+ }
+ }
+
+ return nil
+}
+
+// On is syntactic sugar for Subscribe() that binds a single handler
+// to a single event from any module. If the eventName is empty string,
+// it counts for all events.
+func (app *App) On(eventName string, handler Handler) error {
+ return app.Subscribe(&Subscription{
+ Events: []string{eventName},
+ Handlers: []Handler{handler},
+ })
+}
+
+// Emit creates and dispatches an event named eventName to all relevant handlers with
+// the metadata data. Events are emitted and propagated synchronously. The returned Event
+// value will have any additional information from the invoked handlers.
+//
+// Note that the data map is not copied, for efficiency. After Emit() is called, the
+// data passed in should not be changed in other goroutines.
+func (app *App) Emit(ctx caddy.Context, eventName string, data map[string]any) Event {
+ logger := app.logger.With(zap.String("name", eventName))
+
+ id, err := uuid.NewRandom()
+ if err != nil {
+ logger.Error("failed generating new event ID", zap.Error(err))
+ }
+
+ eventName = strings.ToLower(eventName)
+
+ e := Event{
+ Data: data,
+ id: id,
+ ts: time.Now(),
+ name: eventName,
+ origin: ctx.Module(),
+ }
+
+ logger = logger.With(
+ zap.String("id", e.id.String()),
+ zap.String("origin", e.origin.CaddyModule().String()))
+
+ // add event info to replacer, make sure it's in the context
+ repl, ok := ctx.Context.Value(caddy.ReplacerCtxKey).(*caddy.Replacer)
+ if !ok {
+ repl = caddy.NewReplacer()
+ ctx.Context = context.WithValue(ctx.Context, caddy.ReplacerCtxKey, repl)
+ }
+ repl.Map(func(key string) (any, bool) {
+ switch key {
+ case "event":
+ return e, true
+ case "event.id":
+ return e.id, true
+ case "event.name":
+ return e.name, true
+ case "event.time":
+ return e.ts, true
+ case "event.time_unix":
+ return e.ts.UnixMilli(), true
+ case "event.module":
+ return e.origin.CaddyModule().ID, true
+ case "event.data":
+ return e.Data, true
+ }
+
+ if strings.HasPrefix(key, "event.data.") {
+ key = strings.TrimPrefix(key, "event.data.")
+ if val, ok := e.Data[key]; ok {
+ return val, true
+ }
+ }
+
+ return nil, false
+ })
+
+ logger = logger.WithLazy(zap.Any("data", e.Data))
+
+ logger.Debug("event")
+
+ // invoke handlers bound to the event by name and also all events; this for loop
+ // iterates twice at most: once for the event name, once for "" (all events)
+ for {
+ moduleID := e.origin.CaddyModule().ID
+
+ // implement propagation up the module tree (i.e. start with "a.b.c" then "a.b" then "a" then "")
+ for {
+ if app.subscriptions[eventName] == nil {
+ break // shortcut if event not bound at all
+ }
+
+ for _, handler := range app.subscriptions[eventName][moduleID] {
+ select {
+ case <-ctx.Done():
+ logger.Error("context canceled; event handling stopped")
+ return e
+ default:
+ }
+
+ // this log can be a useful sanity check to ensure your handlers are in fact being invoked
+ // (see https://github.com/mholt/caddy-events-exec/issues/6)
+ logger.Debug("invoking subscribed handler",
+ zap.String("subscribed_to", eventName),
+ zap.Any("handler", handler))
+
+ if err := handler.Handle(ctx, e); err != nil {
+ aborted := errors.Is(err, ErrAborted)
+
+ logger.Error("handler error",
+ zap.Error(err),
+ zap.Bool("aborted", aborted))
+
+ if aborted {
+ e.Aborted = err
+ return e
+ }
+ }
+ }
+
+ if moduleID == "" {
+ break
+ }
+ lastDot := strings.LastIndex(string(moduleID), ".")
+ if lastDot < 0 {
+ moduleID = "" // include handlers bound to events regardless of module
+ } else {
+ moduleID = moduleID[:lastDot]
+ }
+ }
+
+ // include handlers listening to all events
+ if eventName == "" {
+ break
+ }
+ eventName = ""
+ }
+
+ return e
+}
+
+// Event represents something that has happened or is happening.
+// An Event value is not synchronized, so it should be copied if
+// being used in goroutines.
+//
+// EXPERIMENTAL: As with the rest of this package, events are
+// subject to change.
+type Event struct {
+ // If non-nil, the event has been aborted, meaning
+ // propagation has stopped to other handlers and
+ // the code should stop what it was doing. Emitters
+ // may choose to use this as a signal to adjust their
+ // code path appropriately.
+ Aborted error
+
+ // The data associated with the event. Usually the
+ // original emitter will be the only one to set or
+ // change these values, but the field is exported
+ // so handlers can have full access if needed.
+ // However, this map is not synchronized, so
+ // handlers must not use this map directly in new
+ // goroutines; instead, copy the map to use it in a
+ // goroutine.
+ Data map[string]any
+
+ id uuid.UUID
+ ts time.Time
+ name string
+ origin caddy.Module
+}
+
+func (e Event) ID() uuid.UUID { return e.id }
+func (e Event) Timestamp() time.Time { return e.ts }
+func (e Event) Name() string { return e.name }
+func (e Event) Origin() caddy.Module { return e.origin }
+
+// CloudEvent exports event e as a structure that, when
+// serialized as JSON, is compatible with the
+// CloudEvents spec.
+func (e Event) CloudEvent() CloudEvent {
+ dataJSON, _ := json.Marshal(e.Data)
+ return CloudEvent{
+ ID: e.id.String(),
+ Source: e.origin.CaddyModule().String(),
+ SpecVersion: "1.0",
+ Type: e.name,
+ Time: e.ts,
+ DataContentType: "application/json",
+ Data: dataJSON,
+ }
+}
+
+// CloudEvent is a JSON-serializable structure that
+// is compatible with the CloudEvents specification.
+// See https://cloudevents.io.
+type CloudEvent struct {
+ ID string `json:"id"`
+ Source string `json:"source"`
+ SpecVersion string `json:"specversion"`
+ Type string `json:"type"`
+ Time time.Time `json:"time"`
+ DataContentType string `json:"datacontenttype,omitempty"`
+ Data json.RawMessage `json:"data,omitempty"`
+}
+
+// ErrAborted cancels an event.
+var ErrAborted = errors.New("event aborted")
+
+// Handler is a type that can handle events.
+type Handler interface {
+ Handle(context.Context, Event) error
+}
+
+// Interface guards
+var (
+ _ caddy.App = (*App)(nil)
+ _ caddy.Provisioner = (*App)(nil)
+)
diff --git a/modules/caddyevents/eventsconfig/caddyfile.go b/modules/caddyevents/eventsconfig/caddyfile.go
new file mode 100644
index 00000000..93a4c3d3
--- /dev/null
+++ b/modules/caddyevents/eventsconfig/caddyfile.go
@@ -0,0 +1,82 @@
+// Copyright 2015 Matthew Holt and The Caddy Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package eventsconfig is for configuring caddyevents.App with the
+// Caddyfile. This code can't be in the caddyevents package because
+// the httpcaddyfile package imports caddyhttp, which imports
+// caddyevents: hence, it creates an import cycle.
+package eventsconfig
+
+import (
+ "encoding/json"
+
+ "github.com/caddyserver/caddy/v2/caddyconfig"
+ "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile"
+ "github.com/caddyserver/caddy/v2/caddyconfig/httpcaddyfile"
+ "github.com/caddyserver/caddy/v2/modules/caddyevents"
+)
+
+func init() {
+ httpcaddyfile.RegisterGlobalOption("events", parseApp)
+}
+
+// parseApp configures the "events" global option from Caddyfile to set up the events app.
+// Syntax:
+//
+// events {
+// on
+// }
+//
+// If is *, then it will bind to all events.
+func parseApp(d *caddyfile.Dispenser, _ any) (any, error) {
+ d.Next() // consume option name
+ app := new(caddyevents.App)
+ for d.NextBlock(0) {
+ switch d.Val() {
+ case "on":
+ if !d.NextArg() {
+ return nil, d.ArgErr()
+ }
+ eventName := d.Val()
+ if eventName == "*" {
+ eventName = ""
+ }
+
+ if !d.NextArg() {
+ return nil, d.ArgErr()
+ }
+ handlerName := d.Val()
+ modID := "events.handlers." + handlerName
+ unm, err := caddyfile.UnmarshalModule(d, modID)
+ if err != nil {
+ return nil, err
+ }
+
+ app.Subscriptions = append(app.Subscriptions, &caddyevents.Subscription{
+ Events: []string{eventName},
+ HandlersRaw: []json.RawMessage{
+ caddyconfig.JSONModuleObject(unm, "handler", handlerName, nil),
+ },
+ })
+
+ default:
+ return nil, d.ArgErr()
+ }
+ }
+
+ return httpcaddyfile.App{
+ Name: "events",
+ Value: caddyconfig.JSON(app, nil),
+ }, nil
+}
diff --git a/modules/caddyfs/filesystem.go b/modules/caddyfs/filesystem.go
new file mode 100644
index 00000000..b2fdcf7a
--- /dev/null
+++ b/modules/caddyfs/filesystem.go
@@ -0,0 +1,112 @@
+package caddyfs
+
+import (
+ "encoding/json"
+ "fmt"
+ "io/fs"
+
+ "go.uber.org/zap"
+
+ "github.com/caddyserver/caddy/v2"
+ "github.com/caddyserver/caddy/v2/caddyconfig"
+ "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile"
+ "github.com/caddyserver/caddy/v2/caddyconfig/httpcaddyfile"
+)
+
+func init() {
+ caddy.RegisterModule(Filesystems{})
+ httpcaddyfile.RegisterGlobalOption("filesystem", parseFilesystems)
+}
+
+type moduleEntry struct {
+ Key string `json:"name,omitempty"`
+ FileSystemRaw json.RawMessage `json:"file_system,omitempty" caddy:"namespace=caddy.fs inline_key=backend"`
+ fileSystem fs.FS
+}
+
+// Filesystems loads caddy.fs modules into the global filesystem map
+type Filesystems struct {
+ Filesystems []*moduleEntry `json:"filesystems"`
+
+ defers []func()
+}
+
+func parseFilesystems(d *caddyfile.Dispenser, existingVal any) (any, error) {
+ p := &Filesystems{}
+ current, ok := existingVal.(*Filesystems)
+ if ok {
+ p = current
+ }
+ x := &moduleEntry{}
+ err := x.UnmarshalCaddyfile(d)
+ if err != nil {
+ return nil, err
+ }
+ p.Filesystems = append(p.Filesystems, x)
+ return p, nil
+}
+
+// CaddyModule returns the Caddy module information.
+func (Filesystems) CaddyModule() caddy.ModuleInfo {
+ return caddy.ModuleInfo{
+ ID: "caddy.filesystems",
+ New: func() caddy.Module { return new(Filesystems) },
+ }
+}
+
+func (xs *Filesystems) Start() error { return nil }
+func (xs *Filesystems) Stop() error { return nil }
+
+func (xs *Filesystems) Provision(ctx caddy.Context) error {
+ // load the filesystem module
+ for _, f := range xs.Filesystems {
+ if len(f.FileSystemRaw) > 0 {
+ mod, err := ctx.LoadModule(f, "FileSystemRaw")
+ if err != nil {
+ return fmt.Errorf("loading file system module: %v", err)
+ }
+ f.fileSystem = mod.(fs.FS)
+ }
+ // register that module
+ ctx.Logger().Debug("registering fs", zap.String("fs", f.Key))
+ ctx.Filesystems().Register(f.Key, f.fileSystem)
+ // remember to unregister the module when we are done
+ xs.defers = append(xs.defers, func() {
+ ctx.Logger().Debug("unregistering fs", zap.String("fs", f.Key))
+ ctx.Filesystems().Unregister(f.Key)
+ })
+ }
+ return nil
+}
+
+func (f *Filesystems) Cleanup() error {
+ for _, v := range f.defers {
+ v()
+ }
+ return nil
+}
+
+func (f *moduleEntry) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
+ for d.Next() {
+ // key required for now
+ if !d.Args(&f.Key) {
+ return d.ArgErr()
+ }
+ // get the module json
+ if !d.NextArg() {
+ return d.ArgErr()
+ }
+ name := d.Val()
+ modID := "caddy.fs." + name
+ unm, err := caddyfile.UnmarshalModule(d, modID)
+ if err != nil {
+ return err
+ }
+ fsys, ok := unm.(fs.FS)
+ if !ok {
+ return d.Errf("module %s (%T) is not a supported file system implementation (requires fs.FS)", modID, unm)
+ }
+ f.FileSystemRaw = caddyconfig.JSONModuleObject(fsys, "backend", name, nil)
+ }
+ return nil
+}
diff --git a/modules/caddyhttp/app.go b/modules/caddyhttp/app.go
index 8e7f3eaa..850d3aa8 100644
--- a/modules/caddyhttp/app.go
+++ b/modules/caddyhttp/app.go
@@ -15,18 +15,24 @@
package caddyhttp
import (
+ "cmp"
"context"
"crypto/tls"
"fmt"
+ "maps"
"net"
"net/http"
"strconv"
+ "sync"
"time"
- "github.com/caddyserver/caddy/v2"
- "github.com/caddyserver/caddy/v2/modules/caddytls"
- "github.com/lucas-clemente/quic-go/http3"
"go.uber.org/zap"
+ "golang.org/x/net/http2"
+ "golang.org/x/net/http2/h2c"
+
+ "github.com/caddyserver/caddy/v2"
+ "github.com/caddyserver/caddy/v2/modules/caddyevents"
+ "github.com/caddyserver/caddy/v2/modules/caddytls"
)
func init() {
@@ -45,24 +51,32 @@ func init() {
//
// Placeholder | Description
// ------------|---------------
+// `{http.request.body}` | The request body (⚠️ inefficient; use only for debugging)
// `{http.request.cookie.*}` | HTTP request cookie
+// `{http.request.duration}` | Time up to now spent handling the request (after decoding headers from client)
+// `{http.request.duration_ms}` | Same as 'duration', but in milliseconds.
+// `{http.request.uuid}` | The request unique identifier
// `{http.request.header.*}` | Specific request header field
-// `{http.request.host.labels.*}` | Request host labels (0-based from right); e.g. for foo.example.com: 0=com, 1=example, 2=foo
// `{http.request.host}` | The host part of the request's Host header
+// `{http.request.host.labels.*}` | Request host labels (0-based from right); e.g. for foo.example.com: 0=com, 1=example, 2=foo
// `{http.request.hostport}` | The host and port from the request's Host header
// `{http.request.method}` | The request method
// `{http.request.orig_method}` | The request's original method
+// `{http.request.orig_uri}` | The request's original URI
+// `{http.request.orig_uri.path}` | The request's original path
+// `{http.request.orig_uri.path.*}` | Parts of the original path, split by `/` (0-based from left)
// `{http.request.orig_uri.path.dir}` | The request's original directory
// `{http.request.orig_uri.path.file}` | The request's original filename
-// `{http.request.orig_uri.path}` | The request's original path
// `{http.request.orig_uri.query}` | The request's original query string (without `?`)
-// `{http.request.orig_uri}` | The request's original URI
// `{http.request.port}` | The port part of the request's Host header
// `{http.request.proto}` | The protocol of the request
-// `{http.request.remote.host}` | The host part of the remote client's address
+// `{http.request.local.host}` | The host (IP) part of the local address the connection arrived on
+// `{http.request.local.port}` | The port part of the local address the connection arrived on
+// `{http.request.local}` | The local address the connection arrived on
+// `{http.request.remote.host}` | The host (IP) part of the remote client's address
// `{http.request.remote.port}` | The port part of the remote client's address
// `{http.request.remote}` | The address of the remote client
-// `{http.request.scheme}` | The request scheme
+// `{http.request.scheme}` | The request scheme, typically `http` or `https`
// `{http.request.tls.version}` | The TLS version name
// `{http.request.tls.cipher_suite}` | The TLS cipher suite
// `{http.request.tls.resumed}` | The TLS connection resumed a previous connection
@@ -70,18 +84,28 @@ func init() {
// `{http.request.tls.proto_mutual}` | The negotiated next protocol was advertised by the server
// `{http.request.tls.server_name}` | The server name requested by the client, if any
// `{http.request.tls.client.fingerprint}` | The SHA256 checksum of the client certificate
+// `{http.request.tls.client.public_key}` | The public key of the client certificate.
+// `{http.request.tls.client.public_key_sha256}` | The SHA256 checksum of the client's public key.
+// `{http.request.tls.client.certificate_pem}` | The PEM-encoded value of the certificate.
+// `{http.request.tls.client.certificate_der_base64}` | The base64-encoded value of the certificate.
// `{http.request.tls.client.issuer}` | The issuer DN of the client certificate
// `{http.request.tls.client.serial}` | The serial number of the client certificate
// `{http.request.tls.client.subject}` | The subject DN of the client certificate
+// `{http.request.tls.client.san.dns_names.*}` | SAN DNS names(index optional)
+// `{http.request.tls.client.san.emails.*}` | SAN email addresses (index optional)
+// `{http.request.tls.client.san.ips.*}` | SAN IP addresses (index optional)
+// `{http.request.tls.client.san.uris.*}` | SAN URIs (index optional)
+// `{http.request.uri}` | The full request URI
+// `{http.request.uri.path}` | The path component of the request URI
// `{http.request.uri.path.*}` | Parts of the path, split by `/` (0-based from left)
// `{http.request.uri.path.dir}` | The directory, excluding leaf filename
// `{http.request.uri.path.file}` | The filename of the path, excluding directory
-// `{http.request.uri.path}` | The path component of the request URI
-// `{http.request.uri.query.*}` | Individual query string value
// `{http.request.uri.query}` | The query string (without `?`)
-// `{http.request.uri}` | The full request URI
+// `{http.request.uri.query.*}` | Individual query string value
// `{http.response.header.*}` | Specific response header field
// `{http.vars.*}` | Custom variables in the HTTP handler chain
+// `{http.shutting_down}` | True if the HTTP app is shutting down
+// `{http.time_until_shutdown}` | Time until HTTP server shutdown, if scheduled
type App struct {
// HTTPPort specifies the port to use for HTTP (as opposed to HTTPS),
// which is used when setting up HTTP->HTTPS redirects or ACME HTTP
@@ -94,18 +118,34 @@ type App struct {
HTTPSPort int `json:"https_port,omitempty"`
// GracePeriod is how long to wait for active connections when shutting
- // down the server. Once the grace period is over, connections will
- // be forcefully closed.
+ // down the servers. During the grace period, no new connections are
+ // accepted, idle connections are closed, and active connections will
+ // be given the full length of time to become idle and close.
+ // Once the grace period is over, connections will be forcefully closed.
+ // If zero, the grace period is eternal. Default: 0.
GracePeriod caddy.Duration `json:"grace_period,omitempty"`
+ // ShutdownDelay is how long to wait before initiating the grace
+ // period. When this app is stopping (e.g. during a config reload or
+ // process exit), all servers will be shut down. Normally this immediately
+ // initiates the grace period. However, if this delay is configured, servers
+ // will not be shut down until the delay is over. During this time, servers
+ // continue to function normally and allow new connections. At the end, the
+ // grace period will begin. This can be useful to allow downstream load
+ // balancers time to move this instance out of the rotation without hiccups.
+ //
+ // When shutdown has been scheduled, placeholders {http.shutting_down} (bool)
+ // and {http.time_until_shutdown} (duration) may be useful for health checks.
+ ShutdownDelay caddy.Duration `json:"shutdown_delay,omitempty"`
+
// Servers is the list of servers, keyed by arbitrary names chosen
// at your discretion for your own convenience; the keys do not
// affect functionality.
Servers map[string]*Server `json:"servers,omitempty"`
- servers []*http.Server
- h3servers []*http3.Server
- h3listeners []net.PacketConn
+ // If set, metrics observations will be enabled.
+ // This setting is EXPERIMENTAL and subject to change.
+ Metrics *Metrics `json:"metrics,omitempty"`
ctx caddy.Context
logger *zap.Logger
@@ -132,7 +172,12 @@ func (app *App) Provision(ctx caddy.Context) error {
}
app.tlsApp = tlsAppIface.(*caddytls.TLS)
app.ctx = ctx
- app.logger = ctx.Logger(app)
+ app.logger = ctx.Logger()
+
+ eventsAppIface, err := ctx.App("events")
+ if err != nil {
+ return fmt.Errorf("getting events app: %v", err)
+ }
repl := caddy.NewReplacer()
@@ -144,15 +189,106 @@ func (app *App) Provision(ctx caddy.Context) error {
return err
}
+ if app.Metrics != nil {
+ app.Metrics.init = sync.Once{}
+ app.Metrics.httpMetrics = &httpMetrics{}
+ }
// prepare each server
+ oldContext := ctx.Context
for srvName, srv := range app.Servers {
+ ctx.Context = context.WithValue(oldContext, ServerCtxKey, srv)
+ srv.name = srvName
srv.tlsApp = app.tlsApp
+ srv.events = eventsAppIface.(*caddyevents.App)
+ srv.ctx = ctx
srv.logger = app.logger.Named("log")
srv.errorLogger = app.logger.Named("log.error")
+ srv.shutdownAtMu = new(sync.RWMutex)
+
+ if srv.Metrics != nil {
+ srv.logger.Warn("per-server 'metrics' is deprecated; use 'metrics' in the root 'http' app instead")
+ app.Metrics = cmp.Or[*Metrics](app.Metrics, &Metrics{
+ init: sync.Once{},
+ httpMetrics: &httpMetrics{},
+ })
+ app.Metrics.PerHost = app.Metrics.PerHost || srv.Metrics.PerHost
+ }
// only enable access logs if configured
if srv.Logs != nil {
srv.accessLogger = app.logger.Named("log.access")
+ if srv.Logs.Trace {
+ srv.traceLogger = app.logger.Named("log.trace")
+ }
+ }
+
+ // if no protocols configured explicitly, enable all except h2c
+ if len(srv.Protocols) == 0 {
+ srv.Protocols = []string{"h1", "h2", "h3"}
+ }
+
+ srvProtocolsUnique := map[string]struct{}{}
+ for _, srvProtocol := range srv.Protocols {
+ srvProtocolsUnique[srvProtocol] = struct{}{}
+ }
+ _, h1ok := srvProtocolsUnique["h1"]
+ _, h2ok := srvProtocolsUnique["h2"]
+ _, h2cok := srvProtocolsUnique["h2c"]
+
+ // the Go standard library does not let us serve only HTTP/2 using
+ // http.Server; we would probably need to write our own server
+ if !h1ok && (h2ok || h2cok) {
+ return fmt.Errorf("server %s: cannot enable HTTP/2 or H2C without enabling HTTP/1.1; add h1 to protocols or remove h2/h2c", srvName)
+ }
+
+ if srv.ListenProtocols != nil {
+ if len(srv.ListenProtocols) != len(srv.Listen) {
+ return fmt.Errorf("server %s: listener protocols count does not match address count: %d != %d",
+ srvName, len(srv.ListenProtocols), len(srv.Listen))
+ }
+
+ for i, lnProtocols := range srv.ListenProtocols {
+ if lnProtocols != nil {
+ // populate empty listen protocols with server protocols
+ lnProtocolsDefault := false
+ var lnProtocolsInclude []string
+ srvProtocolsInclude := maps.Clone(srvProtocolsUnique)
+
+ // keep existing listener protocols unless they are empty
+ for _, lnProtocol := range lnProtocols {
+ if lnProtocol == "" {
+ lnProtocolsDefault = true
+ } else {
+ lnProtocolsInclude = append(lnProtocolsInclude, lnProtocol)
+ delete(srvProtocolsInclude, lnProtocol)
+ }
+ }
+
+ // append server protocols to listener protocols if any listener protocols were empty
+ if lnProtocolsDefault {
+ for _, srvProtocol := range srv.Protocols {
+ if _, ok := srvProtocolsInclude[srvProtocol]; ok {
+ lnProtocolsInclude = append(lnProtocolsInclude, srvProtocol)
+ }
+ }
+ }
+
+ lnProtocolsIncludeUnique := map[string]struct{}{}
+ for _, lnProtocol := range lnProtocolsInclude {
+ lnProtocolsIncludeUnique[lnProtocol] = struct{}{}
+ }
+ _, h1ok := lnProtocolsIncludeUnique["h1"]
+ _, h2ok := lnProtocolsIncludeUnique["h2"]
+ _, h2cok := lnProtocolsIncludeUnique["h2c"]
+
+ // check if any listener protocols contain h2 or h2c without h1
+ if !h1ok && (h2ok || h2cok) {
+ return fmt.Errorf("server %s, listener %d: cannot enable HTTP/2 or H2C without enabling HTTP/1.1; add h1 to protocols or remove h2/h2c", srvName, i)
+ }
+
+ srv.ListenProtocols[i] = lnProtocolsInclude
+ }
+ }
}
// if not explicitly configured by the user, disallow TLS
@@ -165,19 +301,31 @@ func (app *App) Provision(ctx caddy.Context) error {
// domain fronting is desired and access is not restricted
// based on hostname
if srv.StrictSNIHost == nil && srv.hasTLSClientAuth() {
- app.logger.Info("enabling strict SNI-Host matching because TLS client auth is configured",
- zap.String("server_name", srvName),
- )
+ app.logger.Warn("enabling strict SNI-Host enforcement because TLS client auth is configured",
+ zap.String("server_id", srvName))
trueBool := true
srv.StrictSNIHost = &trueBool
}
+ // set up the trusted proxies source
+ for srv.TrustedProxiesRaw != nil {
+ val, err := ctx.LoadModule(srv, "TrustedProxiesRaw")
+ if err != nil {
+ return fmt.Errorf("loading trusted proxies modules: %v", err)
+ }
+ srv.trustedProxies = val.(IPRangeSource)
+ }
+
+ // set the default client IP header to read from
+ if srv.ClientIPHeaders == nil {
+ srv.ClientIPHeaders = []string{"X-Forwarded-For"}
+ }
+
// process each listener address
for i := range srv.Listen {
lnOut, err := repl.ReplaceOrErr(srv.Listen[i], true, true)
if err != nil {
- return fmt.Errorf("server %s, listener %d: %v",
- srvName, i, err)
+ return fmt.Errorf("server %s, listener %d: %v", srvName, i, err)
}
srv.Listen[i] = lnOut
}
@@ -189,7 +337,7 @@ func (app *App) Provision(ctx caddy.Context) error {
return fmt.Errorf("loading listener wrapper modules: %v", err)
}
var hasTLSPlaceholder bool
- for i, val := range vals.([]interface{}) {
+ for i, val := range vals.([]any) {
if _, ok := val.(*tlsPlaceholderWrapper); ok {
if i == 0 {
// putting the tls placeholder wrapper first is nonsensical because
@@ -212,12 +360,11 @@ func (app *App) Provision(ctx caddy.Context) error {
srv.listenerWrappers = append([]caddy.ListenerWrapper{new(tlsPlaceholderWrapper)}, srv.listenerWrappers...)
}
}
-
// pre-compile the primary handler chain, and be sure to wrap it in our
// route handler so that important security checks are done, etc.
primaryRoute := emptyHandler
if srv.Routes != nil {
- err := srv.Routes.ProvisionHandlers(ctx)
+ err := srv.Routes.ProvisionHandlers(ctx, app.Metrics)
if err != nil {
return fmt.Errorf("server %s: setting up route handlers: %v", srvName, err)
}
@@ -229,26 +376,45 @@ func (app *App) Provision(ctx caddy.Context) error {
if srv.Errors != nil {
err := srv.Errors.Routes.Provision(ctx)
if err != nil {
- return fmt.Errorf("server %s: setting up server error handling routes: %v", srvName, err)
+ return fmt.Errorf("server %s: setting up error handling routes: %v", srvName, err)
}
srv.errorHandlerChain = srv.Errors.Routes.Compile(errorEmptyHandler)
}
+ // provision the named routes (they get compiled at runtime)
+ for name, route := range srv.NamedRoutes {
+ err := route.Provision(ctx, app.Metrics)
+ if err != nil {
+ return fmt.Errorf("server %s: setting up named route '%s' handlers: %v", name, srvName, err)
+ }
+ }
+
// prepare the TLS connection policies
err = srv.TLSConnPolicies.Provision(ctx)
if err != nil {
return fmt.Errorf("server %s: setting up TLS connection policies: %v", srvName, err)
}
- }
+ // if there is no idle timeout, set a sane default; users have complained
+ // before that aggressive CDNs leave connections open until the server
+ // closes them, so if we don't close them it leads to resource exhaustion
+ if srv.IdleTimeout == 0 {
+ srv.IdleTimeout = defaultIdleTimeout
+ }
+ if srv.ReadHeaderTimeout == 0 {
+ srv.ReadHeaderTimeout = defaultReadHeaderTimeout // see #6663
+ }
+ }
+ ctx.Context = oldContext
return nil
}
// Validate ensures the app's configuration is valid.
func (app *App) Validate() error {
- // each server must use distinct listener addresses
lnAddrs := make(map[string]string)
+
for srvName, srv := range app.Servers {
+ // each server must use distinct listener addresses
for _, addr := range srv.Listen {
listenAddr, err := caddy.ParseNetworkAddress(addr)
if err != nil {
@@ -257,115 +423,208 @@ func (app *App) Validate() error {
// check that every address in the port range is unique to this server;
// we do not use <= here because PortRangeSize() adds 1 to EndPort for us
for i := uint(0); i < listenAddr.PortRangeSize(); i++ {
- addr := caddy.JoinNetworkAddress(listenAddr.Network, listenAddr.Host, strconv.Itoa(int(listenAddr.StartPort+i)))
+ addr := caddy.JoinNetworkAddress(listenAddr.Network, listenAddr.Host, strconv.FormatUint(uint64(listenAddr.StartPort+i), 10))
if sn, ok := lnAddrs[addr]; ok {
return fmt.Errorf("server %s: listener address repeated: %s (already claimed by server '%s')", srvName, addr, sn)
}
lnAddrs[addr] = srvName
}
}
- }
+ // logger names must not have ports
+ if srv.Logs != nil {
+ for host := range srv.Logs.LoggerNames {
+ if _, _, err := net.SplitHostPort(host); err == nil {
+ return fmt.Errorf("server %s: logger name must not have a port: %s", srvName, host)
+ }
+ }
+ }
+ }
return nil
}
// Start runs the app. It finishes automatic HTTPS if enabled,
// including management of certificates.
func (app *App) Start() error {
+ // get a logger compatible with http.Server
+ serverLogger, err := zap.NewStdLogAt(app.logger.Named("stdlib"), zap.DebugLevel)
+ if err != nil {
+ return fmt.Errorf("failed to set up server logger: %v", err)
+ }
+
for srvName, srv := range app.Servers {
- s := &http.Server{
+ srv.server = &http.Server{
ReadTimeout: time.Duration(srv.ReadTimeout),
ReadHeaderTimeout: time.Duration(srv.ReadHeaderTimeout),
WriteTimeout: time.Duration(srv.WriteTimeout),
IdleTimeout: time.Duration(srv.IdleTimeout),
MaxHeaderBytes: srv.MaxHeaderBytes,
Handler: srv,
+ ErrorLog: serverLogger,
+ ConnContext: func(ctx context.Context, c net.Conn) context.Context {
+ return context.WithValue(ctx, ConnCtxKey, c)
+ },
+ }
+ h2server := new(http2.Server)
+
+ // disable HTTP/2, which we enabled by default during provisioning
+ if !srv.protocol("h2") {
+ srv.server.TLSNextProto = make(map[string]func(*http.Server, *tls.Conn, http.Handler))
+ for _, cp := range srv.TLSConnPolicies {
+ // the TLSConfig was already provisioned, so... manually remove it
+ for i, np := range cp.TLSConfig.NextProtos {
+ if np == "h2" {
+ cp.TLSConfig.NextProtos = append(cp.TLSConfig.NextProtos[:i], cp.TLSConfig.NextProtos[i+1:]...)
+ break
+ }
+ }
+ // remove it from the parent connection policy too, just to keep things tidy
+ for i, alpn := range cp.ALPN {
+ if alpn == "h2" {
+ cp.ALPN = append(cp.ALPN[:i], cp.ALPN[i+1:]...)
+ break
+ }
+ }
+ }
+ } else {
+ //nolint:errcheck
+ http2.ConfigureServer(srv.server, h2server)
}
- for _, lnAddr := range srv.Listen {
+ // this TLS config is used by the std lib to choose the actual TLS config for connections
+ // by looking through the connection policies to find the first one that matches
+ tlsCfg := srv.TLSConnPolicies.TLSConfig(app.ctx)
+ srv.configureServer(srv.server)
+
+ // enable H2C if configured
+ if srv.protocol("h2c") {
+ srv.server.Handler = h2c.NewHandler(srv, h2server)
+ }
+
+ for lnIndex, lnAddr := range srv.Listen {
listenAddr, err := caddy.ParseNetworkAddress(lnAddr)
if err != nil {
return fmt.Errorf("%s: parsing listen address '%s': %v", srvName, lnAddr, err)
}
- for portOffset := uint(0); portOffset < listenAddr.PortRangeSize(); portOffset++ {
- // create the listener for this socket
- hostport := listenAddr.JoinHostPort(portOffset)
- ln, err := caddy.Listen(listenAddr.Network, hostport)
- if err != nil {
- return fmt.Errorf("%s: listening on %s: %v", listenAddr.Network, hostport, err)
- }
- // wrap listener before TLS (up to the TLS placeholder wrapper)
- var lnWrapperIdx int
- for i, lnWrapper := range srv.listenerWrappers {
- if _, ok := lnWrapper.(*tlsPlaceholderWrapper); ok {
- lnWrapperIdx = i + 1 // mark the next wrapper's spot
- break
- }
- ln = lnWrapper.WrapListener(ln)
- }
+ srv.addresses = append(srv.addresses, listenAddr)
+
+ protocols := srv.Protocols
+ if srv.ListenProtocols != nil && srv.ListenProtocols[lnIndex] != nil {
+ protocols = srv.ListenProtocols[lnIndex]
+ }
+
+ protocolsUnique := map[string]struct{}{}
+ for _, protocol := range protocols {
+ protocolsUnique[protocol] = struct{}{}
+ }
+ _, h1ok := protocolsUnique["h1"]
+ _, h2ok := protocolsUnique["h2"]
+ _, h2cok := protocolsUnique["h2c"]
+ _, h3ok := protocolsUnique["h3"]
+
+ for portOffset := uint(0); portOffset < listenAddr.PortRangeSize(); portOffset++ {
+ hostport := listenAddr.JoinHostPort(portOffset)
// enable TLS if there is a policy and if this is not the HTTP port
useTLS := len(srv.TLSConnPolicies) > 0 && int(listenAddr.StartPort+portOffset) != app.httpPort()
- if useTLS {
- // create TLS listener
- tlsCfg := srv.TLSConnPolicies.TLSConfig(app.ctx)
- ln = tls.NewListener(ln, tlsCfg)
- /////////
- // TODO: HTTP/3 support is experimental for now
- if srv.ExperimentalHTTP3 {
- app.logger.Info("enabling experimental HTTP/3 listener",
- zap.String("addr", hostport),
- )
- h3ln, err := caddy.ListenPacket("udp", hostport)
- if err != nil {
- return fmt.Errorf("getting HTTP/3 UDP listener: %v", err)
- }
- h3srv := &http3.Server{
- Server: &http.Server{
- Addr: hostport,
- Handler: srv,
- TLSConfig: tlsCfg,
- },
- }
- go h3srv.Serve(h3ln)
- app.h3servers = append(app.h3servers, h3srv)
- app.h3listeners = append(app.h3listeners, h3ln)
- srv.h3server = h3srv
+ // enable HTTP/3 if configured
+ if h3ok && useTLS {
+ app.logger.Info("enabling HTTP/3 listener", zap.String("addr", hostport))
+ if err := srv.serveHTTP3(listenAddr.At(portOffset), tlsCfg); err != nil {
+ return err
}
- /////////
}
- // finish wrapping listener where we left off before TLS
- for i := lnWrapperIdx; i < len(srv.listenerWrappers); i++ {
- ln = srv.listenerWrappers[i].WrapListener(ln)
+ if h3ok && !useTLS {
+ // Can only serve h3 with TLS enabled
+ app.logger.Warn("HTTP/3 skipped because it requires TLS",
+ zap.String("network", listenAddr.Network),
+ zap.String("addr", hostport))
}
- // if binding to port 0, the OS chooses a port for us;
- // but the user won't know the port unless we print it
- if listenAddr.StartPort == 0 && listenAddr.EndPort == 0 {
- app.logger.Info("port 0 listener",
- zap.String("input_address", lnAddr),
- zap.String("actual_address", ln.Addr().String()),
- )
+ if h1ok || h2ok && useTLS || h2cok {
+ // create the listener for this socket
+ lnAny, err := listenAddr.Listen(app.ctx, portOffset, net.ListenConfig{KeepAlive: time.Duration(srv.KeepAliveInterval)})
+ if err != nil {
+ return fmt.Errorf("listening on %s: %v", listenAddr.At(portOffset), err)
+ }
+ ln, ok := lnAny.(net.Listener)
+ if !ok {
+ return fmt.Errorf("network '%s' cannot handle HTTP/1 or HTTP/2 connections", listenAddr.Network)
+ }
+
+ // wrap listener before TLS (up to the TLS placeholder wrapper)
+ var lnWrapperIdx int
+ for i, lnWrapper := range srv.listenerWrappers {
+ if _, ok := lnWrapper.(*tlsPlaceholderWrapper); ok {
+ lnWrapperIdx = i + 1 // mark the next wrapper's spot
+ break
+ }
+ ln = lnWrapper.WrapListener(ln)
+ }
+
+ if useTLS {
+ // create TLS listener - this enables and terminates TLS
+ ln = tls.NewListener(ln, tlsCfg)
+ }
+
+ // finish wrapping listener where we left off before TLS
+ for i := lnWrapperIdx; i < len(srv.listenerWrappers); i++ {
+ ln = srv.listenerWrappers[i].WrapListener(ln)
+ }
+
+ // handle http2 if use tls listener wrapper
+ if h2ok {
+ http2lnWrapper := &http2Listener{
+ Listener: ln,
+ server: srv.server,
+ h2server: h2server,
+ }
+ srv.h2listeners = append(srv.h2listeners, http2lnWrapper)
+ ln = http2lnWrapper
+ }
+
+ // if binding to port 0, the OS chooses a port for us;
+ // but the user won't know the port unless we print it
+ if !listenAddr.IsUnixNetwork() && !listenAddr.IsFdNetwork() && listenAddr.StartPort == 0 && listenAddr.EndPort == 0 {
+ app.logger.Info("port 0 listener",
+ zap.String("input_address", lnAddr),
+ zap.String("actual_address", ln.Addr().String()))
+ }
+
+ app.logger.Debug("starting server loop",
+ zap.String("address", ln.Addr().String()),
+ zap.Bool("tls", useTLS),
+ zap.Bool("http3", srv.h3server != nil))
+
+ srv.listeners = append(srv.listeners, ln)
+
+ // enable HTTP/1 if configured
+ if h1ok {
+ //nolint:errcheck
+ go srv.server.Serve(ln)
+ }
}
- app.logger.Debug("starting server loop",
- zap.String("address", ln.Addr().String()),
- zap.Bool("http3", srv.ExperimentalHTTP3),
- zap.Bool("tls", useTLS),
- )
-
- go s.Serve(ln)
- app.servers = append(app.servers, s)
+ if h2ok && !useTLS {
+ // Can only serve h2 with TLS enabled
+ app.logger.Warn("HTTP/2 skipped because it requires TLS",
+ zap.String("network", listenAddr.Network),
+ zap.String("addr", hostport))
+ }
}
}
+
+ srv.logger.Info("server running",
+ zap.String("name", srvName),
+ zap.Strings("protocols", srv.Protocols))
}
// finish automatic HTTPS by finally beginning
// certificate management
- err := app.automaticHTTPSPhase2()
+ err = app.automaticHTTPSPhase2()
if err != nil {
return fmt.Errorf("finalizing automatic HTTPS: %v", err)
}
@@ -376,44 +635,127 @@ func (app *App) Start() error {
// Stop gracefully shuts down the HTTP server.
func (app *App) Stop() error {
ctx := context.Background()
+
+ // see if any listeners in our config will be closing or if they are continuing
+ // through a reload; because if any are closing, we will enforce shutdown delay
+ var delay bool
+ scheduledTime := time.Now().Add(time.Duration(app.ShutdownDelay))
+ if app.ShutdownDelay > 0 {
+ for _, server := range app.Servers {
+ for _, na := range server.addresses {
+ for _, addr := range na.Expand() {
+ if caddy.ListenerUsage(addr.Network, addr.JoinHostPort(0)) < 2 {
+ app.logger.Debug("listener closing and shutdown delay is configured", zap.String("address", addr.String()))
+ server.shutdownAtMu.Lock()
+ server.shutdownAt = scheduledTime
+ server.shutdownAtMu.Unlock()
+ delay = true
+ } else {
+ app.logger.Debug("shutdown delay configured but listener will remain open", zap.String("address", addr.String()))
+ }
+ }
+ }
+ }
+ }
+
+ // honor scheduled/delayed shutdown time
+ if delay {
+ app.logger.Info("shutdown scheduled",
+ zap.Duration("delay_duration", time.Duration(app.ShutdownDelay)),
+ zap.Time("time", scheduledTime))
+ time.Sleep(time.Duration(app.ShutdownDelay))
+ }
+
+ // enforce grace period if configured
if app.GracePeriod > 0 {
var cancel context.CancelFunc
ctx, cancel = context.WithTimeout(ctx, time.Duration(app.GracePeriod))
defer cancel()
+ app.logger.Info("servers shutting down; grace period initiated", zap.Duration("duration", time.Duration(app.GracePeriod)))
+ } else {
+ app.logger.Info("servers shutting down with eternal grace period")
}
- for _, s := range app.servers {
- err := s.Shutdown(ctx)
- if err != nil {
- return err
+
+ // goroutines aren't guaranteed to be scheduled right away,
+ // so we'll use one WaitGroup to wait for all the goroutines
+ // to start their server shutdowns, and another to wait for
+ // them to finish; we'll always block for them to start so
+ // that when we return the caller can be confident* that the
+ // old servers are no longer accepting new connections
+ // (* the scheduler might still pause them right before
+ // calling Shutdown(), but it's unlikely)
+ var startedShutdown, finishedShutdown sync.WaitGroup
+
+ // these will run in goroutines
+ stopServer := func(server *Server) {
+ defer finishedShutdown.Done()
+ startedShutdown.Done()
+
+ if err := server.server.Shutdown(ctx); err != nil {
+ app.logger.Error("server shutdown",
+ zap.Error(err),
+ zap.Strings("addresses", server.Listen))
+ }
+ }
+ stopH3Server := func(server *Server) {
+ defer finishedShutdown.Done()
+ startedShutdown.Done()
+
+ if server.h3server == nil {
+ return
+ }
+
+ if err := server.h3server.Shutdown(ctx); err != nil {
+ app.logger.Error("HTTP/3 server shutdown",
+ zap.Error(err),
+ zap.Strings("addresses", server.Listen))
+ }
+ }
+ stopH2Listener := func(server *Server) {
+ defer finishedShutdown.Done()
+ startedShutdown.Done()
+
+ for i, s := range server.h2listeners {
+ if err := s.Shutdown(ctx); err != nil {
+ app.logger.Error("http2 listener shutdown",
+ zap.Error(err),
+ zap.Int("index", i))
+ }
}
}
- // close the http3 servers; it's unclear whether the bug reported in
- // https://github.com/caddyserver/caddy/pull/2727#issuecomment-526856566
- // was ever truly fixed, since it seemed racey/nondeterministic; but
- // recent tests in 2020 were unable to replicate the issue again after
- // repeated attempts (the bug manifested after a config reload; i.e.
- // reusing a http3 server or listener was problematic), but it seems
- // to be working fine now
- for _, s := range app.h3servers {
- // TODO: CloseGracefully, once implemented upstream
- // (see https://github.com/lucas-clemente/quic-go/issues/2103)
- err := s.Close()
- if err != nil {
- return err
+ for _, server := range app.Servers {
+ startedShutdown.Add(3)
+ finishedShutdown.Add(3)
+ go stopServer(server)
+ go stopH3Server(server)
+ go stopH2Listener(server)
+ }
+
+ // block until all the goroutines have been run by the scheduler;
+ // this means that they have likely called Shutdown() by now
+ startedShutdown.Wait()
+
+ // if the process is exiting, we need to block here and wait
+ // for the grace periods to complete, otherwise the process will
+ // terminate before the servers are finished shutting down; but
+ // we don't really need to wait for the grace period to finish
+ // if the process isn't exiting (but note that frequent config
+ // reloads with long grace periods for a sustained length of time
+ // may deplete resources)
+ if caddy.Exiting() {
+ finishedShutdown.Wait()
+ }
+
+ // run stop callbacks now that the server shutdowns are complete
+ for name, s := range app.Servers {
+ for _, stopHook := range s.onStopFuncs {
+ if err := stopHook(ctx); err != nil {
+ app.logger.Error("server stop hook", zap.String("server", name), zap.Error(err))
+ }
}
}
- // closing an http3.Server does not close their underlying listeners
- // since apparently the listener can be used both by servers and
- // clients at the same time; so we need to manually call Close()
- // on the underlying h3 listeners (see lucas-clemente/quic-go#2103)
- for _, pc := range app.h3listeners {
- err := pc.Close()
- if err != nil {
- return err
- }
- }
return nil
}
@@ -431,6 +773,21 @@ func (app *App) httpsPort() int {
return app.HTTPSPort
}
+const (
+ // defaultIdleTimeout is the default HTTP server timeout
+ // for closing idle connections; useful to avoid resource
+ // exhaustion behind hungry CDNs, for example (we've had
+ // several complaints without this).
+ defaultIdleTimeout = caddy.Duration(5 * time.Minute)
+
+ // defaultReadHeaderTimeout is the default timeout for
+ // reading HTTP headers from clients. Headers are generally
+ // small, often less than 1 KB, so it shouldn't take a
+ // long time even on legitimately slow connections or
+ // busy servers to read it.
+ defaultReadHeaderTimeout = caddy.Duration(time.Minute)
+)
+
// Interface guards
var (
_ caddy.App = (*App)(nil)
diff --git a/modules/caddyhttp/autohttps.go b/modules/caddyhttp/autohttps.go
index f62543be..4449e1f4 100644
--- a/modules/caddyhttp/autohttps.go
+++ b/modules/caddyhttp/autohttps.go
@@ -17,13 +17,15 @@ package caddyhttp
import (
"fmt"
"net/http"
+ "slices"
"strconv"
"strings"
- "github.com/caddyserver/caddy/v2"
- "github.com/caddyserver/caddy/v2/modules/caddytls"
"github.com/caddyserver/certmagic"
"go.uber.org/zap"
+
+ "github.com/caddyserver/caddy/v2"
+ "github.com/caddyserver/caddy/v2/modules/caddytls"
)
// AutoHTTPSConfig is used to disable automatic HTTPS
@@ -31,13 +33,20 @@ import (
// HTTPS is enabled automatically and by default when
// qualifying hostnames are available from the config.
type AutoHTTPSConfig struct {
- // If true, automatic HTTPS will be entirely disabled.
+ // If true, automatic HTTPS will be entirely disabled,
+ // including certificate management and redirects.
Disabled bool `json:"disable,omitempty"`
// If true, only automatic HTTP->HTTPS redirects will
- // be disabled.
+ // be disabled, but other auto-HTTPS features will
+ // remain enabled.
DisableRedir bool `json:"disable_redirects,omitempty"`
+ // If true, automatic certificate management will be
+ // disabled, but other auto-HTTPS features will
+ // remain enabled.
+ DisableCerts bool `json:"disable_certificates,omitempty"`
+
// Hosts/domain names listed here will not be included
// in automatic HTTPS (they will not have certificates
// loaded nor redirects applied).
@@ -56,17 +65,12 @@ type AutoHTTPSConfig struct {
// enabled. To force automated certificate management
// regardless of loaded certificates, set this to true.
IgnoreLoadedCerts bool `json:"ignore_loaded_certificates,omitempty"`
-}
-// Skipped returns true if name is in skipSlice, which
-// should be either the Skip or SkipCerts field on ahc.
-func (ahc AutoHTTPSConfig) Skipped(name string, skipSlice []string) bool {
- for _, n := range skipSlice {
- if name == n {
- return true
- }
- }
- return false
+ // If true, automatic HTTPS will prefer wildcard names
+ // and ignore non-wildcard names if both are available.
+ // This allows for writing a config with top-level host
+ // matchers without having those names produce certificates.
+ PreferWildcard bool `json:"prefer_wildcard,omitempty"`
}
// automaticHTTPSPhase1 provisions all route matchers, determines
@@ -76,13 +80,20 @@ func (ahc AutoHTTPSConfig) Skipped(name string, skipSlice []string) bool {
// even servers to the app, which still need to be set up with the
// rest of them during provisioning.
func (app *App) automaticHTTPSPhase1(ctx caddy.Context, repl *caddy.Replacer) error {
+ logger := app.logger.Named("auto_https")
+
// this map acts as a set to store the domain names
// for which we will manage certificates automatically
uniqueDomainsForCerts := make(map[string]struct{})
// this maps domain names for automatic HTTP->HTTPS
- // redirects to their destination server address
- redirDomains := make(map[string]caddy.NetworkAddress)
+ // redirects to their destination server addresses
+ // (there might be more than 1 if bind is used; see
+ // https://github.com/caddyserver/caddy/issues/3443)
+ redirDomains := make(map[string][]caddy.NetworkAddress)
+
+ // the log configuration for an HTTPS enabled server
+ var logCfg *ServerLogConfig
for srvName, srv := range app.Servers {
// as a prerequisite, provision route matchers; this is
@@ -102,12 +113,13 @@ func (app *App) automaticHTTPSPhase1(ctx caddy.Context, repl *caddy.Replacer) er
srv.AutoHTTPS = new(AutoHTTPSConfig)
}
if srv.AutoHTTPS.Disabled {
+ logger.Info("automatic HTTPS is completely disabled for server", zap.String("server_name", srvName))
continue
}
// skip if all listeners use the HTTP port
if !srv.listenersUseAnyPortOtherThan(app.httpPort()) {
- app.logger.Info("server is listening only on the HTTP port, so no automatic HTTPS will be applied to this server",
+ logger.Warn("server is listening only on the HTTP port, so no automatic HTTPS will be applied to this server",
zap.String("server_name", srvName),
zap.Int("http_port", app.httpPort()),
)
@@ -121,7 +133,7 @@ func (app *App) automaticHTTPSPhase1(ctx caddy.Context, repl *caddy.Replacer) er
// needing to specify one empty policy to enable it
if srv.TLSConnPolicies == nil &&
!srv.listenersUseAnyPortOtherThan(app.httpsPort()) {
- app.logger.Info("server is listening only on the HTTPS port but has no TLS connection policies; adding one to enable TLS",
+ logger.Info("server is listening only on the HTTPS port but has no TLS connection policies; adding one to enable TLS",
zap.String("server_name", srvName),
zap.Int("https_port", app.httpsPort()),
)
@@ -142,7 +154,7 @@ func (app *App) automaticHTTPSPhase1(ctx caddy.Context, repl *caddy.Replacer) er
return fmt.Errorf("%s: route %d, matcher set %d, matcher %d, host matcher %d: %v",
srvName, routeIdx, matcherSetIdx, matcherIdx, hostMatcherIdx, err)
}
- if !srv.AutoHTTPS.Skipped(d, srv.AutoHTTPS.Skip) {
+ if !slices.Contains(srv.AutoHTTPS.Skip, d) {
serverDomainSet[d] = struct{}{}
}
}
@@ -151,6 +163,27 @@ func (app *App) automaticHTTPSPhase1(ctx caddy.Context, repl *caddy.Replacer) er
}
}
+ if srv.AutoHTTPS.PreferWildcard {
+ wildcards := make(map[string]struct{})
+ for d := range serverDomainSet {
+ if strings.HasPrefix(d, "*.") {
+ wildcards[d[2:]] = struct{}{}
+ }
+ }
+ for d := range serverDomainSet {
+ if strings.HasPrefix(d, "*.") {
+ continue
+ }
+ base := d
+ if idx := strings.Index(d, "."); idx != -1 {
+ base = d[idx+1:]
+ }
+ if _, ok := wildcards[base]; ok {
+ delete(serverDomainSet, d)
+ }
+ }
+ }
+
// nothing more to do here if there are no domains that qualify for
// automatic HTTPS and there are no explicit TLS connection policies:
// if there is at least one domain but no TLS conn policy (F&&T), we'll
@@ -162,32 +195,43 @@ func (app *App) automaticHTTPSPhase1(ctx caddy.Context, repl *caddy.Replacer) er
continue
}
+ // clone the logger so we can apply it to the HTTP server
+ // (not sure if necessary to clone it; but probably safer)
+ // (we choose one log cfg arbitrarily; not sure which is best)
+ if srv.Logs != nil {
+ logCfg = srv.Logs.clone()
+ }
+
// for all the hostnames we found, filter them so we have
// a deduplicated list of names for which to obtain certs
- for d := range serverDomainSet {
- if certmagic.SubjectQualifiesForCert(d) &&
- !srv.AutoHTTPS.Skipped(d, srv.AutoHTTPS.SkipCerts) {
- // if a certificate for this name is already loaded,
- // don't obtain another one for it, unless we are
- // supposed to ignore loaded certificates
- if !srv.AutoHTTPS.IgnoreLoadedCerts &&
- len(app.tlsApp.AllMatchingCertificates(d)) > 0 {
- app.logger.Info("skipping automatic certificate management because one or more matching certificates are already loaded",
- zap.String("domain", d),
- zap.String("server_name", srvName),
- )
- continue
- }
+ // (only if cert management not disabled for this server)
+ if srv.AutoHTTPS.DisableCerts {
+ logger.Warn("skipping automated certificate management for server because it is disabled", zap.String("server_name", srvName))
+ } else {
+ for d := range serverDomainSet {
+ if certmagic.SubjectQualifiesForCert(d) &&
+ !slices.Contains(srv.AutoHTTPS.SkipCerts, d) {
+ // if a certificate for this name is already loaded,
+ // don't obtain another one for it, unless we are
+ // supposed to ignore loaded certificates
+ if !srv.AutoHTTPS.IgnoreLoadedCerts && app.tlsApp.HasCertificateForSubject(d) {
+ logger.Info("skipping automatic certificate management because one or more matching certificates are already loaded",
+ zap.String("domain", d),
+ zap.String("server_name", srvName),
+ )
+ continue
+ }
- // most clients don't accept wildcards like *.tld... we
- // can handle that, but as a courtesy, warn the user
- if strings.Contains(d, "*") &&
- strings.Count(strings.Trim(d, "."), ".") == 1 {
- app.logger.Warn("most clients do not trust second-level wildcard certificates (*.tld)",
- zap.String("domain", d))
- }
+ // most clients don't accept wildcards like *.tld... we
+ // can handle that, but as a courtesy, warn the user
+ if strings.Contains(d, "*") &&
+ strings.Count(strings.Trim(d, "."), ".") == 1 {
+ logger.Warn("most clients do not trust second-level wildcard certificates (*.tld)",
+ zap.String("domain", d))
+ }
- uniqueDomainsForCerts[d] = struct{}{}
+ uniqueDomainsForCerts[d] = struct{}{}
+ }
}
}
@@ -198,19 +242,22 @@ func (app *App) automaticHTTPSPhase1(ctx caddy.Context, repl *caddy.Replacer) er
// nothing left to do if auto redirects are disabled
if srv.AutoHTTPS.DisableRedir {
+ logger.Info("automatic HTTP->HTTPS redirects are disabled", zap.String("server_name", srvName))
continue
}
- app.logger.Info("enabling automatic HTTP->HTTPS redirects",
- zap.String("server_name", srvName),
- )
+ logger.Info("enabling automatic HTTP->HTTPS redirects", zap.String("server_name", srvName))
// create HTTP->HTTPS redirects
- for _, addr := range srv.Listen {
+ for _, listenAddr := range srv.Listen {
// figure out the address we will redirect to...
- addr, err := caddy.ParseNetworkAddress(addr)
+ addr, err := caddy.ParseNetworkAddress(listenAddr)
if err != nil {
- return fmt.Errorf("%s: invalid listener address: %v", srvName, addr)
+ msg := "%s: invalid listener address: %v"
+ if strings.Count(listenAddr, ":") > 1 {
+ msg = msg + ", there are too many colons, so the port is ambiguous. Did you mean to wrap the IPv6 address with [] brackets?"
+ }
+ return fmt.Errorf(msg, srvName, listenAddr)
}
// this address might not have a hostname, i.e. might be a
@@ -220,7 +267,7 @@ func (app *App) automaticHTTPSPhase1(ctx caddy.Context, repl *caddy.Replacer) er
// an empty string to indicate a catch-all, which we have to
// treat special later
if len(serverDomainSet) == 0 {
- redirDomains[""] = addr
+ redirDomains[""] = append(redirDomains[""], addr)
continue
}
@@ -230,7 +277,7 @@ func (app *App) automaticHTTPSPhase1(ctx caddy.Context, repl *caddy.Replacer) er
// port, we'll have to choose one, so prefer the HTTPS port
if _, ok := redirDomains[d]; !ok ||
addr.StartPort == uint(app.httpsPort()) {
- redirDomains[d] = addr
+ redirDomains[d] = append(redirDomains[d], addr)
}
}
}
@@ -239,12 +286,15 @@ func (app *App) automaticHTTPSPhase1(ctx caddy.Context, repl *caddy.Replacer) er
// we now have a list of all the unique names for which we need certs;
// turn the set into a slice so that phase 2 can use it
app.allCertDomains = make([]string, 0, len(uniqueDomainsForCerts))
- var internal, external []string
+ var internal, tailscale []string
uniqueDomainsLoop:
for d := range uniqueDomainsForCerts {
- // whether or not there is already an automation policy for this
- // name, we should add it to the list to manage a cert for it
- app.allCertDomains = append(app.allCertDomains, d)
+ if !isTailscaleDomain(d) {
+ // whether or not there is already an automation policy for this
+ // name, we should add it to the list to manage a cert for it,
+ // unless it's a Tailscale domain, because we don't manage those
+ app.allCertDomains = append(app.allCertDomains, d)
+ }
// some names we've found might already have automation policies
// explicitly specified for them; we should exclude those from
@@ -252,25 +302,45 @@ uniqueDomainsLoop:
// one automation policy would be confusing and an error
if app.tlsApp.Automation != nil {
for _, ap := range app.tlsApp.Automation.Policies {
- for _, apHost := range ap.Subjects {
+ for _, apHost := range ap.Subjects() {
if apHost == d {
+ // if the automation policy has all internal subjects but no issuers,
+ // it will default to CertMagic's issuers which are public CAs; use
+ // our internal issuer instead
+ if len(ap.Issuers) == 0 && ap.AllInternalSubjects() {
+ iss := new(caddytls.InternalIssuer)
+ if err := iss.Provision(ctx); err != nil {
+ return err
+ }
+ ap.Issuers = append(ap.Issuers, iss)
+ }
continue uniqueDomainsLoop
}
}
}
}
- // if no automation policy exists for the name yet, we
- // will associate it with an implicit one
- if certmagic.SubjectQualifiesForPublicCert(d) {
- external = append(external, d)
- } else {
+ // if no automation policy exists for the name yet, we will associate it with an implicit one;
+ // we handle tailscale domains specially, and we also separate out identifiers that need the
+ // internal issuer (self-signed certs); certmagic does not consider public IP addresses to be
+ // disqualified for public certs, because there are public CAs that will issue certs for IPs.
+ // However, with auto-HTTPS, many times there is no issuer explicitly defined, and the default
+ // issuers do not (currently, as of 2024) issue IP certificates; so assign all IP subjects to
+ // the internal issuer when there are no explicit automation policies
+ shouldUseInternal := func(ident string) bool {
+ usingDefaultIssuersAndIsIP := certmagic.SubjectIsIP(ident) &&
+ (app.tlsApp == nil || app.tlsApp.Automation == nil || len(app.tlsApp.Automation.Policies) == 0)
+ return !certmagic.SubjectQualifiesForPublicCert(d) || usingDefaultIssuersAndIsIP
+ }
+ if isTailscaleDomain(d) {
+ tailscale = append(tailscale, d)
+ } else if shouldUseInternal(d) {
internal = append(internal, d)
}
}
// ensure there is an automation policy to handle these certs
- err := app.createAutomationPolicies(ctx, external, internal)
+ err := app.createAutomationPolicies(ctx, internal, tailscale)
if err != nil {
return err
}
@@ -278,9 +348,11 @@ uniqueDomainsLoop:
// we need to reduce the mapping, i.e. group domains by address
// since new routes are appended to servers by their address
domainsByAddr := make(map[string][]string)
- for domain, addr := range redirDomains {
- addrStr := addr.String()
- domainsByAddr[addrStr] = append(domainsByAddr[addrStr], domain)
+ for domain, addrs := range redirDomains {
+ for _, addr := range addrs {
+ addrStr := addr.String()
+ domainsByAddr[addrStr] = append(domainsByAddr[addrStr], domain)
+ }
}
// these keep track of the redirect server address(es)
@@ -301,31 +373,11 @@ uniqueDomainsLoop:
matcherSet = append(matcherSet, MatchHost(domains))
}
- // build the address to which to redirect
addr, err := caddy.ParseNetworkAddress(addrStr)
if err != nil {
return err
}
- redirTo := "https://{http.request.host}"
- if addr.StartPort != uint(app.httpsPort()) {
- redirTo += ":" + strconv.Itoa(int(addr.StartPort))
- }
- redirTo += "{http.request.uri}"
-
- // build the route
- redirRoute := Route{
- MatcherSets: []MatcherSet{matcherSet},
- Handlers: []MiddlewareHandler{
- StaticResponse{
- StatusCode: WeakString(strconv.Itoa(http.StatusPermanentRedirect)),
- Headers: http.Header{
- "Location": []string{redirTo},
- "Connection": []string{"close"},
- },
- Close: true,
- },
- },
- }
+ redirRoute := app.makeRedirRoute(addr.StartPort, matcherSet)
// use the network/host information from the address,
// but change the port to the HTTP port then rebuild
@@ -353,48 +405,41 @@ uniqueDomainsLoop:
// it's not something that should be relied on. We can change this
// if we want to.
appendCatchAll := func(routes []Route) []Route {
- redirTo := "https://{http.request.host}"
- if app.httpsPort() != DefaultHTTPSPort {
- redirTo += ":" + strconv.Itoa(app.httpsPort())
- }
- redirTo += "{http.request.uri}"
- routes = append(routes, Route{
- MatcherSets: []MatcherSet{{MatchProtocol("http")}},
- Handlers: []MiddlewareHandler{
- StaticResponse{
- StatusCode: WeakString(strconv.Itoa(http.StatusPermanentRedirect)),
- Headers: http.Header{
- "Location": []string{redirTo},
- "Connection": []string{"close"},
- },
- Close: true,
- },
- },
- })
- return routes
+ return append(routes, app.makeRedirRoute(uint(app.httpsPort()), MatcherSet{MatchProtocol("http")}))
}
redirServersLoop:
for redirServerAddr, routes := range redirServers {
// for each redirect listener, see if there's already a
// server configured to listen on that exact address; if so,
- // simply add the redirect route to the end of its route
- // list; otherwise, we'll create a new server for all the
- // listener addresses that are unused and serve the
- // remaining redirects from it
- for srvName, srv := range app.Servers {
- if srv.hasListenerAddress(redirServerAddr) {
- // user has configured a server for the same address
- // that the redirect runs from; simply append our
- // redirect route to the existing routes, with a
- // caveat that their config might override ours
- app.logger.Warn("user server is listening on same interface as automatic HTTP->HTTPS redirects; user-configured routes might override these redirects",
- zap.String("server_name", srvName),
- zap.String("interface", redirServerAddr),
- )
- srv.Routes = append(srv.Routes, appendCatchAll(routes)...)
- continue redirServersLoop
+ // insert the redirect route to the end of its route list
+ // after any other routes with host matchers; otherwise,
+ // we'll create a new server for all the listener addresses
+ // that are unused and serve the remaining redirects from it
+ for _, srv := range app.Servers {
+ // only look at servers which listen on an address which
+ // we want to add redirects to
+ if !srv.hasListenerAddress(redirServerAddr) {
+ continue
}
+
+ // find the index of the route after the last route with a host
+ // matcher, then insert the redirects there, but before any
+ // user-defined catch-all routes
+ // see https://github.com/caddyserver/caddy/issues/3212
+ insertIndex := srv.findLastRouteWithHostMatcher()
+
+ // add the redirects at the insert index, except for when
+ // we have a catch-all for HTTPS, in which case the user's
+ // defined catch-all should take precedence. See #4829
+ if len(uniqueDomainsForCerts) != 0 {
+ srv.Routes = append(srv.Routes[:insertIndex], append(routes, srv.Routes[insertIndex:]...)...)
+ }
+
+ // append our catch-all route in case the user didn't define their own
+ srv.Routes = appendCatchAll(srv.Routes)
+
+ continue redirServersLoop
}
// no server with this listener address exists;
@@ -414,19 +459,57 @@ redirServersLoop:
app.Servers["remaining_auto_https_redirects"] = &Server{
Listen: redirServerAddrsList,
Routes: appendCatchAll(redirRoutes),
+ Logs: logCfg,
}
}
+ logger.Debug("adjusted config",
+ zap.Reflect("tls", app.tlsApp),
+ zap.Reflect("http", app))
+
return nil
}
-// createAutomationPolicy ensures that automated certificates for this
+func (app *App) makeRedirRoute(redirToPort uint, matcherSet MatcherSet) Route {
+ redirTo := "https://{http.request.host}"
+
+ // since this is an external redirect, we should only append an explicit
+ // port if we know it is not the officially standardized HTTPS port, and,
+ // notably, also not the port that Caddy thinks is the HTTPS port (the
+ // configurable HTTPSPort parameter) - we can't change the standard HTTPS
+ // port externally, so that config parameter is for internal use only;
+ // we also do not append the port if it happens to be the HTTP port as
+ // well, obviously (for example, user defines the HTTP port explicitly
+ // in the list of listen addresses for a server)
+ if redirToPort != uint(app.httpPort()) &&
+ redirToPort != uint(app.httpsPort()) &&
+ redirToPort != DefaultHTTPPort &&
+ redirToPort != DefaultHTTPSPort {
+ redirTo += ":" + strconv.Itoa(int(redirToPort))
+ }
+
+ redirTo += "{http.request.uri}"
+ return Route{
+ MatcherSets: []MatcherSet{matcherSet},
+ Handlers: []MiddlewareHandler{
+ StaticResponse{
+ StatusCode: WeakString(strconv.Itoa(http.StatusPermanentRedirect)),
+ Headers: http.Header{
+ "Location": []string{redirTo},
+ },
+ Close: true,
+ },
+ },
+ }
+}
+
+// createAutomationPolicies ensures that automated certificates for this
// app are managed properly. This adds up to two automation policies:
// one for the public names, and one for the internal names. If a catch-all
// automation policy exists, it will be shallow-copied and used as the
// base for the new ones (this is important for preserving behavior the
// user intends to be "defaults").
-func (app *App) createAutomationPolicies(ctx caddy.Context, publicNames, internalNames []string) error {
+func (app *App) createAutomationPolicies(ctx caddy.Context, internalNames, tailscaleNames []string) error {
// before we begin, loop through the existing automation policies
// and, for any ACMEIssuers we find, make sure they're filled in
// with default values that might be specified in our HTTP app; also
@@ -440,44 +523,74 @@ func (app *App) createAutomationPolicies(ctx caddy.Context, publicNames, interna
app.tlsApp.Automation = new(caddytls.AutomationConfig)
}
for _, ap := range app.tlsApp.Automation.Policies {
+ // on-demand policies can have the tailscale manager added implicitly
+ // if there's no explicit manager configured -- for convenience
+ if ap.OnDemand && len(ap.Managers) == 0 {
+ var ts caddytls.Tailscale
+ if err := ts.Provision(ctx); err != nil {
+ return err
+ }
+ ap.Managers = []certmagic.Manager{ts}
+
+ // must reprovision the automation policy so that the underlying
+ // CertMagic config knows about the updated Managers
+ if err := ap.Provision(app.tlsApp); err != nil {
+ return fmt.Errorf("re-provisioning automation policy: %v", err)
+ }
+ }
+
// set up default issuer -- honestly, this is only
// really necessary because the HTTP app is opinionated
// and has settings which could be inferred as new
- // defaults for the ACMEIssuer in the TLS app
- if ap.Issuer == nil {
- ap.Issuer = new(caddytls.ACMEIssuer)
- }
- if acmeIssuer, ok := ap.Issuer.(*caddytls.ACMEIssuer); ok {
- err := app.fillInACMEIssuer(acmeIssuer)
+ // defaults for the ACMEIssuer in the TLS app (such as
+ // what the HTTP and HTTPS ports are)
+ if ap.Issuers == nil {
+ var err error
+ ap.Issuers, err = caddytls.DefaultIssuersProvisioned(ctx)
if err != nil {
return err
}
}
+ for _, iss := range ap.Issuers {
+ if acmeIssuer, ok := iss.(acmeCapable); ok {
+ err := app.fillInACMEIssuer(acmeIssuer.GetACMEIssuer())
+ if err != nil {
+ return err
+ }
+ }
+ }
// while we're here, is this the catch-all/base policy?
- if !foundBasePolicy && len(ap.Subjects) == 0 {
+ if !foundBasePolicy && len(ap.SubjectsRaw) == 0 {
basePolicy = ap
foundBasePolicy = true
}
}
if basePolicy == nil {
- // no base policy found, we will make one!
+ // no base policy found; we will make one
basePolicy = new(caddytls.AutomationPolicy)
}
- // if the basePolicy has an existing ACMEIssuer, let's
- // use it, otherwise we'll make one
- baseACMEIssuer, _ := basePolicy.Issuer.(*caddytls.ACMEIssuer)
+ // if the basePolicy has an existing ACMEIssuer (particularly to
+ // include any type that embeds/wraps an ACMEIssuer), let's use it
+ // (I guess we just use the first one?), otherwise we'll make one
+ var baseACMEIssuer *caddytls.ACMEIssuer
+ for _, iss := range basePolicy.Issuers {
+ if acmeWrapper, ok := iss.(acmeCapable); ok {
+ baseACMEIssuer = acmeWrapper.GetACMEIssuer()
+ break
+ }
+ }
if baseACMEIssuer == nil {
- // note that this happens if basePolicy.Issuer is nil
- // OR if it is not nil but is not an ACMEIssuer
+ // note that this happens if basePolicy.Issuers is empty
+ // OR if it is not empty but does not have not an ACMEIssuer
baseACMEIssuer = new(caddytls.ACMEIssuer)
}
// if there was a base policy to begin with, we already
// filled in its issuer's defaults; if there wasn't, we
- // stil need to do that
+ // still need to do that
if !foundBasePolicy {
err := app.fillInACMEIssuer(baseACMEIssuer)
if err != nil {
@@ -486,8 +599,20 @@ func (app *App) createAutomationPolicies(ctx caddy.Context, publicNames, interna
}
// never overwrite any other issuer that might already be configured
- if basePolicy.Issuer == nil {
- basePolicy.Issuer = baseACMEIssuer
+ if basePolicy.Issuers == nil {
+ var err error
+ basePolicy.Issuers, err = caddytls.DefaultIssuersProvisioned(ctx)
+ if err != nil {
+ return err
+ }
+ for _, iss := range basePolicy.Issuers {
+ if acmeIssuer, ok := iss.(acmeCapable); ok {
+ err := app.fillInACMEIssuer(acmeIssuer.GetACMEIssuer())
+ if err != nil {
+ return err
+ }
+ }
+ }
}
if !foundBasePolicy {
@@ -495,7 +620,10 @@ func (app *App) createAutomationPolicies(ctx caddy.Context, publicNames, interna
// our base/catch-all policy - this will serve the
// public-looking names as well as any other names
// that don't match any other policy
- app.tlsApp.AddAutomationPolicy(basePolicy)
+ err := app.tlsApp.AddAutomationPolicy(basePolicy)
+ if err != nil {
+ return err
+ }
} else {
// a base policy already existed; we might have
// changed it, so re-provision it
@@ -540,9 +668,29 @@ func (app *App) createAutomationPolicies(ctx caddy.Context, publicNames, interna
// rather they just want to change the CA for the set
// of names that would normally use the production API;
// anyway, that gets into the weeds a bit...
- newPolicy.Subjects = internalNames
- newPolicy.Issuer = internalIssuer
+ newPolicy.SubjectsRaw = internalNames
+ newPolicy.Issuers = []certmagic.Issuer{internalIssuer}
+ err := app.tlsApp.AddAutomationPolicy(newPolicy)
+ if err != nil {
+ return err
+ }
+ }
+ // tailscale names go in their own automation policies because
+ // they require on-demand TLS to be enabled, which we obviously
+ // can't enable for everything
+ if len(tailscaleNames) > 0 {
+ policyCopy := *basePolicy
+ newPolicy := &policyCopy
+
+ var ts caddytls.Tailscale
+ if err := ts.Provision(ctx); err != nil {
+ return err
+ }
+
+ newPolicy.SubjectsRaw = tailscaleNames
+ newPolicy.Issuers = nil
+ newPolicy.Managers = append(newPolicy.Managers, ts)
err := app.tlsApp.AddAutomationPolicy(newPolicy)
if err != nil {
return err
@@ -621,8 +769,14 @@ func (app *App) automaticHTTPSPhase2() error {
)
err := app.tlsApp.Manage(app.allCertDomains)
if err != nil {
- return fmt.Errorf("managing certificates for %v: %s", app.allCertDomains, err)
+ return fmt.Errorf("managing certificates for %d domains: %s", len(app.allCertDomains), err)
}
app.allCertDomains = nil // no longer needed; allow GC to deallocate
return nil
}
+
+func isTailscaleDomain(name string) bool {
+ return strings.HasSuffix(strings.ToLower(name), ".ts.net")
+}
+
+type acmeCapable interface{ GetACMEIssuer() *caddytls.ACMEIssuer }
diff --git a/modules/caddyhttp/caddyauth/basicauth.go b/modules/caddyhttp/caddyauth/basicauth.go
index d709f94e..52a5a08c 100644
--- a/modules/caddyhttp/caddyauth/basicauth.go
+++ b/modules/caddyhttp/caddyauth/basicauth.go
@@ -16,9 +16,15 @@ package caddyauth
import (
"encoding/base64"
+ "encoding/hex"
"encoding/json"
"fmt"
+ weakrand "math/rand"
"net/http"
+ "strings"
+ "sync"
+
+ "golang.org/x/sync/singleflight"
"github.com/caddyserver/caddy/v2"
)
@@ -38,8 +44,27 @@ type HTTPBasicAuth struct {
// The name of the realm. Default: restricted
Realm string `json:"realm,omitempty"`
+ // If non-nil, a mapping of plaintext passwords to their
+ // hashes will be cached in memory (with random eviction).
+ // This can greatly improve the performance of traffic-heavy
+ // servers that use secure password hashing algorithms, with
+ // the downside that plaintext passwords will be stored in
+ // memory for a longer time (this should not be a problem
+ // as long as your machine is not compromised, at which point
+ // all bets are off, since basicauth necessitates plaintext
+ // passwords being received over the wire anyway). Note that
+ // a cache hit does not mean it is a valid password.
+ HashCache *Cache `json:"hash_cache,omitempty"`
+
Accounts map[string]Account `json:"-"`
Hash Comparer `json:"-"`
+
+ // fakePassword is used when a given user is not found,
+ // so that timing side-channels can be mitigated: it gives
+ // us something to hash and compare even if the user does
+ // not exist, which should have similar timing as a user
+ // account that does exist.
+ fakePassword []byte
}
// CaddyModule returns the Caddy module information.
@@ -67,6 +92,11 @@ func (hba *HTTPBasicAuth) Provision(ctx caddy.Context) error {
return fmt.Errorf("hash is required")
}
+ // if supported, generate a fake password we can compare against if needed
+ if hasher, ok := hba.Hash.(Hasher); ok {
+ hba.fakePassword = hasher.FakeHash()
+ }
+
repl := caddy.NewReplacer()
// load account list
@@ -78,20 +108,21 @@ func (hba *HTTPBasicAuth) Provision(ctx caddy.Context) error {
acct.Username = repl.ReplaceAll(acct.Username, "")
acct.Password = repl.ReplaceAll(acct.Password, "")
- acct.Salt = repl.ReplaceAll(acct.Salt, "")
if acct.Username == "" || acct.Password == "" {
return fmt.Errorf("account %d: username and password are required", i)
}
- acct.password, err = base64.StdEncoding.DecodeString(acct.Password)
- if err != nil {
- return fmt.Errorf("base64-decoding password: %v", err)
- }
- if acct.Salt != "" {
- acct.salt, err = base64.StdEncoding.DecodeString(acct.Salt)
+ // TODO: Remove support for redundantly-encoded b64-encoded hashes
+ // Passwords starting with '$' are likely in Modular Crypt Format,
+ // so we don't need to base64 decode them. But historically, we
+ // required redundant base64, so we try to decode it otherwise.
+ if strings.HasPrefix(acct.Password, "$") {
+ acct.password = []byte(acct.Password)
+ } else {
+ acct.password, err = base64.StdEncoding.DecodeString(acct.Password)
if err != nil {
- return fmt.Errorf("base64-decoding salt: %v", err)
+ return fmt.Errorf("base64-decoding password: %v", err)
}
}
@@ -99,6 +130,12 @@ func (hba *HTTPBasicAuth) Provision(ctx caddy.Context) error {
}
hba.AccountList = nil // allow GC to deallocate
+ if hba.HashCache != nil {
+ hba.HashCache.cache = make(map[string]bool)
+ hba.HashCache.mu = new(sync.RWMutex)
+ hba.HashCache.g = new(singleflight.Group)
+ }
+
return nil
}
@@ -109,23 +146,64 @@ func (hba HTTPBasicAuth) Authenticate(w http.ResponseWriter, req *http.Request)
return hba.promptForCredentials(w, nil)
}
- plaintextPassword := []byte(plaintextPasswordStr)
-
account, accountExists := hba.Accounts[username]
- // don't return early if account does not exist; we want
- // to try to avoid side-channels that leak existence
-
- same, err := hba.Hash.Compare(account.password, plaintextPassword, account.salt)
- if err != nil {
- return hba.promptForCredentials(w, err)
+ if !accountExists {
+ // don't return early if account does not exist; we want
+ // to try to avoid side-channels that leak existence, so
+ // we use a fake password to simulate realistic CPU cycles
+ account.password = hba.fakePassword
}
- if !same || !accountExists {
- return hba.promptForCredentials(w, nil)
+
+ same, err := hba.correctPassword(account, []byte(plaintextPasswordStr))
+ if err != nil || !same || !accountExists {
+ return hba.promptForCredentials(w, err)
}
return User{ID: username}, true, nil
}
+func (hba HTTPBasicAuth) correctPassword(account Account, plaintextPassword []byte) (bool, error) {
+ compare := func() (bool, error) {
+ return hba.Hash.Compare(account.password, plaintextPassword)
+ }
+
+ // if no caching is enabled, simply return the result of hashing + comparing
+ if hba.HashCache == nil {
+ return compare()
+ }
+
+ // compute a cache key that is unique for these input parameters
+ cacheKey := hex.EncodeToString(append(account.password, plaintextPassword...))
+
+ // fast track: if the result of the input is already cached, use it
+ hba.HashCache.mu.RLock()
+ same, ok := hba.HashCache.cache[cacheKey]
+ hba.HashCache.mu.RUnlock()
+ if ok {
+ return same, nil
+ }
+ // slow track: do the expensive op, then add it to the cache
+ // but perform it in a singleflight group so that multiple
+ // parallel requests using the same password don't cause a
+ // thundering herd problem by all performing the same hashing
+ // operation before the first one finishes and caches it.
+ v, err, _ := hba.HashCache.g.Do(cacheKey, func() (any, error) {
+ return compare()
+ })
+ if err != nil {
+ return false, err
+ }
+ same = v.(bool)
+ hba.HashCache.mu.Lock()
+ if len(hba.HashCache.cache) >= 1000 {
+ hba.HashCache.makeRoom() // keep cache size under control
+ }
+ hba.HashCache.cache[cacheKey] = same
+ hba.HashCache.mu.Unlock()
+
+ return same, nil
+}
+
func (hba HTTPBasicAuth) promptForCredentials(w http.ResponseWriter, err error) (User, bool, error) {
// browsers show a message that says something like:
// "The website says: "
@@ -138,6 +216,49 @@ func (hba HTTPBasicAuth) promptForCredentials(w http.ResponseWriter, err error)
return User{}, false, err
}
+// Cache enables caching of basic auth results. This is especially
+// helpful for secure password hashes which can be expensive to
+// compute on every HTTP request.
+type Cache struct {
+ mu *sync.RWMutex
+ g *singleflight.Group
+
+ // map of concatenated hashed password + plaintext password, to result
+ cache map[string]bool
+}
+
+// makeRoom deletes about 1/10 of the items in the cache
+// in order to keep its size under control. It must not be
+// called without a lock on c.mu.
+func (c *Cache) makeRoom() {
+ // we delete more than just 1 entry so that we don't have
+ // to do this on every request; assuming the capacity of
+ // the cache is on a long tail, we can save a lot of CPU
+ // time by doing a whole bunch of deletions now and then
+ // we won't have to do them again for a while
+ numToDelete := len(c.cache) / 10
+ if numToDelete < 1 {
+ numToDelete = 1
+ }
+ for deleted := 0; deleted <= numToDelete; deleted++ {
+ // Go maps are "nondeterministic" not actually random,
+ // so although we could just chop off the "front" of the
+ // map with less code, this is a heavily skewed eviction
+ // strategy; generating random numbers is cheap and
+ // ensures a much better distribution.
+ //nolint:gosec
+ rnd := weakrand.Intn(len(c.cache))
+ i := 0
+ for key := range c.cache {
+ if i == rnd {
+ delete(c.cache, key)
+ break
+ }
+ i++
+ }
+ }
+}
+
// Comparer is a type that can securely compare
// a plaintext password with a hashed password
// in constant-time. Comparers should hash the
@@ -145,25 +266,33 @@ func (hba HTTPBasicAuth) promptForCredentials(w http.ResponseWriter, err error)
// comparison.
type Comparer interface {
// Compare returns true if the result of hashing
- // plaintextPassword with salt is hashedPassword,
- // false otherwise. An error is returned only if
+ // plaintextPassword is hashedPassword, false
+ // otherwise. An error is returned only if
// there is a technical/configuration error.
- Compare(hashedPassword, plaintextPassword, salt []byte) (bool, error)
+ Compare(hashedPassword, plaintextPassword []byte) (bool, error)
}
-// Account contains a username, password, and salt (if applicable).
+// Hasher is a type that can generate a secure hash
+// given a plaintext. Hashing modules which implement
+// this interface can be used with the hash-password
+// subcommand as well as benefitting from anti-timing
+// features. A hasher also returns a fake hash which
+// can be used for timing side-channel mitigation.
+type Hasher interface {
+ Hash(plaintext []byte) ([]byte, error)
+ FakeHash() []byte
+}
+
+// Account contains a username and password.
type Account struct {
// A user's username.
Username string `json:"username"`
- // The user's hashed password, base64-encoded.
+ // The user's hashed password, in Modular Crypt Format (with `$` prefix)
+ // or base64-encoded.
Password string `json:"password"`
- // The user's password salt, base64-encoded; for
- // algorithms where external salt is needed.
- Salt string `json:"salt,omitempty"`
-
- password, salt []byte
+ password []byte
}
// Interface guards
diff --git a/modules/caddyhttp/caddyauth/caddyauth.go b/modules/caddyhttp/caddyauth/caddyauth.go
index a88bff60..f799d7a0 100644
--- a/modules/caddyhttp/caddyauth/caddyauth.go
+++ b/modules/caddyhttp/caddyauth/caddyauth.go
@@ -16,9 +16,11 @@ package caddyauth
import (
"fmt"
- "log"
"net/http"
+ "go.uber.org/zap"
+ "go.uber.org/zap/zapcore"
+
"github.com/caddyserver/caddy/v2"
"github.com/caddyserver/caddy/v2/modules/caddyhttp"
)
@@ -30,6 +32,11 @@ func init() {
// Authentication is a middleware which provides user authentication.
// Rejects requests with HTTP 401 if the request is not authenticated.
//
+// After a successful authentication, the placeholder
+// `{http.auth.user.id}` will be set to the username, and also
+// `{http.auth.user.*}` placeholders may be set for any authentication
+// modules that provide user metadata.
+//
// Its API is still experimental and may be subject to change.
type Authentication struct {
// A set of authentication providers. If none are specified,
@@ -37,6 +44,8 @@ type Authentication struct {
ProvidersRaw caddy.ModuleMap `json:"providers,omitempty" caddy:"namespace=http.authentication.providers"`
Providers map[string]Authenticator `json:"-"`
+
+ logger *zap.Logger
}
// CaddyModule returns the Caddy module information.
@@ -49,12 +58,13 @@ func (Authentication) CaddyModule() caddy.ModuleInfo {
// Provision sets up a.
func (a *Authentication) Provision(ctx caddy.Context) error {
+ a.logger = ctx.Logger()
a.Providers = make(map[string]Authenticator)
mods, err := ctx.LoadModule(a, "ProvidersRaw")
if err != nil {
return fmt.Errorf("loading authentication providers: %v", err)
}
- for modName, modIface := range mods.(map[string]interface{}) {
+ for modName, modIface := range mods.(map[string]any) {
a.Providers[modName] = modIface.(Authenticator)
}
return nil
@@ -67,7 +77,9 @@ func (a Authentication) ServeHTTP(w http.ResponseWriter, r *http.Request, next c
for provName, prov := range a.Providers {
user, authed, err = prov.Authenticate(w, r)
if err != nil {
- log.Printf("[ERROR] Authenticating with %s: %v", provName, err)
+ if c := a.logger.Check(zapcore.ErrorLevel, "auth provider returned error"); c != nil {
+ c.Write(zap.String("provider", provName), zap.Error(err))
+ }
continue
}
if authed {
diff --git a/modules/caddyhttp/caddyauth/caddyfile.go b/modules/caddyhttp/caddyauth/caddyfile.go
index 31acd61e..cc92477e 100644
--- a/modules/caddyhttp/caddyauth/caddyfile.go
+++ b/modules/caddyhttp/caddyauth/caddyfile.go
@@ -22,64 +22,71 @@ import (
)
func init() {
- httpcaddyfile.RegisterHandlerDirective("basicauth", parseCaddyfile)
+ httpcaddyfile.RegisterHandlerDirective("basicauth", parseCaddyfile) // deprecated
+ httpcaddyfile.RegisterHandlerDirective("basic_auth", parseCaddyfile)
}
// parseCaddyfile sets up the handler from Caddyfile tokens. Syntax:
//
-// basicauth [] [] {
-// []
-// ...
-// }
+// basic_auth [] [ []] {
+//
+// ...
+// }
//
// If no hash algorithm is supplied, bcrypt will be assumed.
func parseCaddyfile(h httpcaddyfile.Helper) (caddyhttp.MiddlewareHandler, error) {
+ h.Next() // consume directive name
+
+ // "basicauth" is deprecated, replaced by "basic_auth"
+ if h.Val() == "basicauth" {
+ caddy.Log().Named("config.adapter.caddyfile").Warn("the 'basicauth' directive is deprecated, please use 'basic_auth' instead!")
+ }
+
var ba HTTPBasicAuth
+ ba.HashCache = new(Cache)
- for h.Next() {
- var cmp Comparer
- args := h.RemainingArgs()
+ var cmp Comparer
+ args := h.RemainingArgs()
- var hashName string
- switch len(args) {
- case 0:
- hashName = "bcrypt"
- case 1:
- hashName = args[0]
- default:
+ var hashName string
+ switch len(args) {
+ case 0:
+ hashName = "bcrypt"
+ case 1:
+ hashName = args[0]
+ case 2:
+ hashName = args[0]
+ ba.Realm = args[1]
+ default:
+ return nil, h.ArgErr()
+ }
+
+ switch hashName {
+ case "bcrypt":
+ cmp = BcryptHash{}
+ default:
+ return nil, h.Errf("unrecognized hash algorithm: %s", hashName)
+ }
+
+ ba.HashRaw = caddyconfig.JSONModuleObject(cmp, "algorithm", hashName, nil)
+
+ for h.NextBlock(0) {
+ username := h.Val()
+
+ var b64Pwd string
+ h.Args(&b64Pwd)
+ if h.NextArg() {
return nil, h.ArgErr()
}
- switch hashName {
- case "bcrypt":
- cmp = BcryptHash{}
- case "scrypt":
- cmp = ScryptHash{}
- default:
- return nil, h.Errf("unrecognized hash algorithm: %s", hashName)
+ if username == "" || b64Pwd == "" {
+ return nil, h.Err("username and password cannot be empty or missing")
}
- ba.HashRaw = caddyconfig.JSONModuleObject(cmp, "algorithm", hashName, nil)
-
- for h.NextBlock(0) {
- username := h.Val()
-
- var b64Pwd, b64Salt string
- h.Args(&b64Pwd, &b64Salt)
- if h.NextArg() {
- return nil, h.ArgErr()
- }
-
- if username == "" || b64Pwd == "" {
- return nil, h.Err("username and password cannot be empty or missing")
- }
-
- ba.AccountList = append(ba.AccountList, Account{
- Username: username,
- Password: b64Pwd,
- Salt: b64Salt,
- })
- }
+ ba.AccountList = append(ba.AccountList, Account{
+ Username: username,
+ Password: b64Pwd,
+ })
}
return Authentication{
diff --git a/modules/caddyhttp/caddyauth/command.go b/modules/caddyhttp/caddyauth/command.go
index 24f6c5a1..c9f44006 100644
--- a/modules/caddyhttp/caddyauth/command.go
+++ b/modules/caddyhttp/caddyauth/command.go
@@ -15,60 +15,101 @@
package caddyauth
import (
- "encoding/base64"
- "flag"
+ "bufio"
+ "bytes"
"fmt"
+ "os"
+ "os/signal"
+
+ "github.com/spf13/cobra"
+ "golang.org/x/term"
+
+ caddycmd "github.com/caddyserver/caddy/v2/cmd"
"github.com/caddyserver/caddy/v2"
- caddycmd "github.com/caddyserver/caddy/v2/cmd"
- "golang.org/x/crypto/bcrypt"
- "golang.org/x/crypto/scrypt"
)
func init() {
caddycmd.RegisterCommand(caddycmd.Command{
Name: "hash-password",
- Func: cmdHashPassword,
- Usage: "--plaintext [--salt ] [--algorithm ]",
+ Usage: "[--plaintext ] [--algorithm ]",
Short: "Hashes a password and writes base64",
Long: `
Convenient way to hash a plaintext password. The resulting
hash is written to stdout as a base64 string.
---algorithm may be bcrypt or scrypt. If script, the default
-parameters are used.
+--plaintext, when omitted, will be read from stdin. If
+Caddy is attached to a controlling tty, the plaintext will
+not be echoed.
-Use the --salt flag for algorithms which require a salt to
-be provided (scrypt).
+--algorithm currently only supports 'bcrypt', and is the default.
`,
- Flags: func() *flag.FlagSet {
- fs := flag.NewFlagSet("hash-password", flag.ExitOnError)
- fs.String("algorithm", "bcrypt", "Name of the hash algorithm")
- fs.String("plaintext", "", "The plaintext password")
- fs.String("salt", "", "The password salt")
- return fs
- }(),
+ CobraFunc: func(cmd *cobra.Command) {
+ cmd.Flags().StringP("plaintext", "p", "", "The plaintext password")
+ cmd.Flags().StringP("algorithm", "a", "bcrypt", "Name of the hash algorithm")
+ cmd.RunE = caddycmd.WrapCommandFuncForCobra(cmdHashPassword)
+ },
})
}
func cmdHashPassword(fs caddycmd.Flags) (int, error) {
+ var err error
+
algorithm := fs.String("algorithm")
plaintext := []byte(fs.String("plaintext"))
- salt := []byte(fs.String("salt"))
if len(plaintext) == 0 {
- return caddy.ExitCodeFailedStartup, fmt.Errorf("password is required")
+ fd := int(os.Stdin.Fd())
+ if term.IsTerminal(fd) {
+ // ensure the terminal state is restored on SIGINT
+ state, _ := term.GetState(fd)
+ c := make(chan os.Signal, 1)
+ signal.Notify(c, os.Interrupt)
+ go func() {
+ <-c
+ _ = term.Restore(fd, state)
+ os.Exit(caddy.ExitCodeFailedStartup)
+ }()
+ defer signal.Stop(c)
+
+ fmt.Fprint(os.Stderr, "Enter password: ")
+ plaintext, err = term.ReadPassword(fd)
+ fmt.Fprintln(os.Stderr)
+ if err != nil {
+ return caddy.ExitCodeFailedStartup, err
+ }
+
+ fmt.Fprint(os.Stderr, "Confirm password: ")
+ confirmation, err := term.ReadPassword(fd)
+ fmt.Fprintln(os.Stderr)
+ if err != nil {
+ return caddy.ExitCodeFailedStartup, err
+ }
+
+ if !bytes.Equal(plaintext, confirmation) {
+ return caddy.ExitCodeFailedStartup, fmt.Errorf("password does not match")
+ }
+ } else {
+ rd := bufio.NewReader(os.Stdin)
+ plaintext, err = rd.ReadBytes('\n')
+ if err != nil {
+ return caddy.ExitCodeFailedStartup, err
+ }
+
+ plaintext = plaintext[:len(plaintext)-1] // Trailing newline
+ }
+
+ if len(plaintext) == 0 {
+ return caddy.ExitCodeFailedStartup, fmt.Errorf("plaintext is required")
+ }
}
var hash []byte
- var err error
+ var hashString string
switch algorithm {
case "bcrypt":
- hash, err = bcrypt.GenerateFromPassword(plaintext, bcrypt.DefaultCost)
- case "scrypt":
- def := ScryptHash{}
- def.SetDefaults()
- hash, err = scrypt.Key(plaintext, salt, def.N, def.R, def.P, def.KeyLength)
+ hash, err = BcryptHash{}.Hash(plaintext)
+ hashString = string(hash)
default:
return caddy.ExitCodeFailedStartup, fmt.Errorf("unrecognized hash algorithm: %s", algorithm)
}
@@ -76,9 +117,7 @@ func cmdHashPassword(fs caddycmd.Flags) (int, error) {
return caddy.ExitCodeFailedStartup, err
}
- hashBase64 := base64.StdEncoding.EncodeToString(hash)
-
- fmt.Println(hashBase64)
+ fmt.Println(hashString)
return 0, nil
}
diff --git a/modules/caddyhttp/caddyauth/hashes.go b/modules/caddyhttp/caddyauth/hashes.go
index 5a3173eb..ce3df901 100644
--- a/modules/caddyhttp/caddyauth/hashes.go
+++ b/modules/caddyhttp/caddyauth/hashes.go
@@ -15,16 +15,13 @@
package caddyauth
import (
- "crypto/subtle"
+ "golang.org/x/crypto/bcrypt"
"github.com/caddyserver/caddy/v2"
- "golang.org/x/crypto/bcrypt"
- "golang.org/x/crypto/scrypt"
)
func init() {
caddy.RegisterModule(BcryptHash{})
- caddy.RegisterModule(ScryptHash{})
}
// BcryptHash implements the bcrypt hash.
@@ -39,7 +36,7 @@ func (BcryptHash) CaddyModule() caddy.ModuleInfo {
}
// Compare compares passwords.
-func (BcryptHash) Compare(hashed, plaintext, _ []byte) (bool, error) {
+func (BcryptHash) Compare(hashed, plaintext []byte) (bool, error) {
err := bcrypt.CompareHashAndPassword(hashed, plaintext)
if err == bcrypt.ErrMismatchedHashAndPassword {
return false, nil
@@ -50,76 +47,20 @@ func (BcryptHash) Compare(hashed, plaintext, _ []byte) (bool, error) {
return true, nil
}
-// ScryptHash implements the scrypt KDF as a hash.
-type ScryptHash struct {
- // scrypt's N parameter. If unset or 0, a safe default is used.
- N int `json:"N,omitempty"`
-
- // scrypt's r parameter. If unset or 0, a safe default is used.
- R int `json:"r,omitempty"`
-
- // scrypt's p parameter. If unset or 0, a safe default is used.
- P int `json:"p,omitempty"`
-
- // scrypt's key length parameter (in bytes). If unset or 0, a
- // safe default is used.
- KeyLength int `json:"key_length,omitempty"`
+// Hash hashes plaintext using a random salt.
+func (BcryptHash) Hash(plaintext []byte) ([]byte, error) {
+ return bcrypt.GenerateFromPassword(plaintext, 14)
}
-// CaddyModule returns the Caddy module information.
-func (ScryptHash) CaddyModule() caddy.ModuleInfo {
- return caddy.ModuleInfo{
- ID: "http.authentication.hashes.scrypt",
- New: func() caddy.Module { return new(ScryptHash) },
- }
-}
-
-// Provision sets up s.
-func (s *ScryptHash) Provision(_ caddy.Context) error {
- s.SetDefaults()
- return nil
-}
-
-// SetDefaults sets safe default parameters, but does
-// not overwrite existing values. Each default parameter
-// is set independently; it does not check to ensure
-// that r*p < 2^30. The defaults chosen are those as
-// recommended in 2019 by
-// https://godoc.org/golang.org/x/crypto/scrypt.
-func (s *ScryptHash) SetDefaults() {
- if s.N == 0 {
- s.N = 32768
- }
- if s.R == 0 {
- s.R = 8
- }
- if s.P == 0 {
- s.P = 1
- }
- if s.KeyLength == 0 {
- s.KeyLength = 32
- }
-}
-
-// Compare compares passwords.
-func (s ScryptHash) Compare(hashed, plaintext, salt []byte) (bool, error) {
- ourHash, err := scrypt.Key(plaintext, salt, s.N, s.R, s.P, s.KeyLength)
- if err != nil {
- return false, err
- }
- if hashesMatch(hashed, ourHash) {
- return true, nil
- }
- return false, nil
-}
-
-func hashesMatch(pwdHash1, pwdHash2 []byte) bool {
- return subtle.ConstantTimeCompare(pwdHash1, pwdHash2) == 1
+// FakeHash returns a fake hash.
+func (BcryptHash) FakeHash() []byte {
+ // hashed with the following command:
+ // caddy hash-password --plaintext "antitiming" --algorithm "bcrypt"
+ return []byte("$2a$14$X3ulqf/iGxnf1k6oMZ.RZeJUoqI9PX2PM4rS5lkIKJXduLGXGPrt6")
}
// Interface guards
var (
- _ Comparer = (*BcryptHash)(nil)
- _ Comparer = (*ScryptHash)(nil)
- _ caddy.Provisioner = (*ScryptHash)(nil)
+ _ Comparer = (*BcryptHash)(nil)
+ _ Hasher = (*BcryptHash)(nil)
)
diff --git a/modules/caddyhttp/caddyhttp.go b/modules/caddyhttp/caddyhttp.go
index fda7a929..aacafc92 100644
--- a/modules/caddyhttp/caddyhttp.go
+++ b/modules/caddyhttp/caddyhttp.go
@@ -18,28 +18,44 @@ import (
"bytes"
"encoding/json"
"io"
- weakrand "math/rand"
"net"
"net/http"
+ "path"
+ "path/filepath"
"strconv"
- "time"
+ "strings"
"github.com/caddyserver/caddy/v2"
+ "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile"
)
func init() {
- weakrand.Seed(time.Now().UnixNano())
-
caddy.RegisterModule(tlsPlaceholderWrapper{})
}
// RequestMatcher is a type that can match to a request.
// A route matcher MUST NOT modify the request, with the
// only exception being its context.
+//
+// Deprecated: Matchers should now implement RequestMatcherWithError.
+// You may remove any interface guards for RequestMatcher
+// but keep your Match() methods for backwards compatibility.
type RequestMatcher interface {
Match(*http.Request) bool
}
+// RequestMatcherWithError is like RequestMatcher but can return an error.
+// An error during matching will abort the request middleware chain and
+// invoke the error middleware chain.
+//
+// This will eventually replace RequestMatcher. Matcher modules
+// should implement both interfaces, and once all modules have
+// been updated to use RequestMatcherWithError, the RequestMatcher
+// interface may eventually be dropped.
+type RequestMatcherWithError interface {
+ MatchWithError(*http.Request) (bool, error)
+}
+
// Handler is like http.Handler except ServeHTTP may return an error.
//
// If any handler encounters an error, it should be returned for proper
@@ -76,7 +92,10 @@ type MiddlewareHandler interface {
}
// emptyHandler is used as a no-op handler.
-var emptyHandler Handler = HandlerFunc(func(http.ResponseWriter, *http.Request) error { return nil })
+var emptyHandler Handler = HandlerFunc(func(_ http.ResponseWriter, req *http.Request) error {
+ SetVar(req.Context(), "unhandled", true)
+ return nil
+})
// An implicit suffix middleware that, if reached, sets the StatusCode to the
// error stored in the ErrorCtxKey. This is to prevent situations where the
@@ -92,6 +111,45 @@ var errorEmptyHandler Handler = HandlerFunc(func(w http.ResponseWriter, r *http.
return nil
})
+// ResponseHandler pairs a response matcher with custom handling
+// logic. Either the status code can be changed to something else
+// while using the original response body, or, if a status code
+// is not set, it can execute a custom route list; this is useful
+// for executing handler routes based on the properties of an HTTP
+// response that has not been written out to the client yet.
+//
+// To use this type, provision it at module load time, then when
+// ready to use, match the response against its matcher; if it
+// matches (or doesn't have a matcher), change the status code on
+// the response if configured; otherwise invoke the routes by
+// calling `rh.Routes.Compile(next).ServeHTTP(rw, req)` (or similar).
+type ResponseHandler struct {
+ // The response matcher for this handler. If empty/nil,
+ // it always matches.
+ Match *ResponseMatcher `json:"match,omitempty"`
+
+ // To write the original response body but with a different
+ // status code, set this field to the desired status code.
+ // If set, this takes priority over routes.
+ StatusCode WeakString `json:"status_code,omitempty"`
+
+ // The list of HTTP routes to execute if no status code is
+ // specified. If evaluated, the original response body
+ // will not be written.
+ Routes RouteList `json:"routes,omitempty"`
+}
+
+// Provision sets up the routes in rh.
+func (rh *ResponseHandler) Provision(ctx caddy.Context) error {
+ if rh.Routes != nil {
+ err := rh.Routes.Provision(ctx)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
// WeakString is a type that unmarshals any JSON value
// as a string literal, with the following exceptions:
//
@@ -181,6 +239,74 @@ func StatusCodeMatches(actual, configured int) bool {
return false
}
+// SanitizedPathJoin performs filepath.Join(root, reqPath) that
+// is safe against directory traversal attacks. It uses logic
+// similar to that in the Go standard library, specifically
+// in the implementation of http.Dir. The root is assumed to
+// be a trusted path, but reqPath is not; and the output will
+// never be outside of root. The resulting path can be used
+// with the local file system. If root is empty, the current
+// directory is assumed. If the cleaned request path is deemed
+// not local according to lexical processing (i.e. ignoring links),
+// it will be rejected as unsafe and only the root will be returned.
+func SanitizedPathJoin(root, reqPath string) string {
+ if root == "" {
+ root = "."
+ }
+
+ relPath := path.Clean("/" + reqPath)[1:] // clean path and trim the leading /
+ if relPath != "" && !filepath.IsLocal(relPath) {
+ // path is unsafe (see https://github.com/golang/go/issues/56336#issuecomment-1416214885)
+ return root
+ }
+
+ path := filepath.Join(root, filepath.FromSlash(relPath))
+
+ // filepath.Join also cleans the path, and cleaning strips
+ // the trailing slash, so we need to re-add it afterwards.
+ // if the length is 1, then it's a path to the root,
+ // and that should return ".", so we don't append the separator.
+ if strings.HasSuffix(reqPath, "/") && len(reqPath) > 1 {
+ path += separator
+ }
+
+ return path
+}
+
+// CleanPath cleans path p according to path.Clean(), but only
+// merges repeated slashes if collapseSlashes is true, and always
+// preserves trailing slashes.
+func CleanPath(p string, collapseSlashes bool) string {
+ if collapseSlashes {
+ return cleanPath(p)
+ }
+
+ // insert an invalid/impossible URI character into each two consecutive
+ // slashes to expand empty path segments; then clean the path as usual,
+ // and then remove the remaining temporary characters.
+ const tmpCh = 0xff
+ var sb strings.Builder
+ for i, ch := range p {
+ if ch == '/' && i > 0 && p[i-1] == '/' {
+ sb.WriteByte(tmpCh)
+ }
+ sb.WriteRune(ch)
+ }
+ halfCleaned := cleanPath(sb.String())
+ halfCleaned = strings.ReplaceAll(halfCleaned, string([]byte{tmpCh}), "")
+
+ return halfCleaned
+}
+
+// cleanPath does path.Clean(p) but preserves any trailing slash.
+func cleanPath(p string) string {
+ cleaned := path.Clean(p)
+ if cleaned != "/" && strings.HasSuffix(p, "/") {
+ cleaned = cleaned + "/"
+ }
+ return cleaned
+}
+
// tlsPlaceholderWrapper is a no-op listener wrapper that marks
// where the TLS listener should be in a chain of listener wrappers.
// It should only be used if another listener wrapper must be placed
@@ -196,6 +322,8 @@ func (tlsPlaceholderWrapper) CaddyModule() caddy.ModuleInfo {
func (tlsPlaceholderWrapper) WrapListener(ln net.Listener) net.Listener { return ln }
+func (tlsPlaceholderWrapper) UnmarshalCaddyfile(d *caddyfile.Dispenser) error { return nil }
+
const (
// DefaultHTTPPort is the default port for HTTP.
DefaultHTTPPort = 80
@@ -204,5 +332,10 @@ const (
DefaultHTTPSPort = 443
)
+const separator = string(filepath.Separator)
+
// Interface guard
-var _ caddy.ListenerWrapper = (*tlsPlaceholderWrapper)(nil)
+var (
+ _ caddy.ListenerWrapper = (*tlsPlaceholderWrapper)(nil)
+ _ caddyfile.Unmarshaler = (*tlsPlaceholderWrapper)(nil)
+)
diff --git a/modules/caddyhttp/caddyhttp_test.go b/modules/caddyhttp/caddyhttp_test.go
new file mode 100644
index 00000000..aeed0135
--- /dev/null
+++ b/modules/caddyhttp/caddyhttp_test.go
@@ -0,0 +1,205 @@
+package caddyhttp
+
+import (
+ "net/url"
+ "path/filepath"
+ "runtime"
+ "testing"
+)
+
+func TestSanitizedPathJoin(t *testing.T) {
+ // For reference:
+ // %2e = .
+ // %2f = /
+ // %5c = \
+ for i, tc := range []struct {
+ inputRoot string
+ inputPath string
+ expect string
+ expectWindows string
+ }{
+ {
+ inputPath: "",
+ expect: ".",
+ },
+ {
+ inputPath: "/",
+ expect: ".",
+ },
+ {
+ // fileserver.MatchFile passes an inputPath of "//" for some try_files values.
+ // See https://github.com/caddyserver/caddy/issues/6352
+ inputPath: "//",
+ expect: filepath.FromSlash("./"),
+ },
+ {
+ inputPath: "/foo",
+ expect: "foo",
+ },
+ {
+ inputPath: "/foo/",
+ expect: filepath.FromSlash("foo/"),
+ },
+ {
+ inputPath: "/foo/bar",
+ expect: filepath.FromSlash("foo/bar"),
+ },
+ {
+ inputRoot: "/a",
+ inputPath: "/foo/bar",
+ expect: filepath.FromSlash("/a/foo/bar"),
+ },
+ {
+ inputPath: "/foo/../bar",
+ expect: "bar",
+ },
+ {
+ inputRoot: "/a/b",
+ inputPath: "/foo/../bar",
+ expect: filepath.FromSlash("/a/b/bar"),
+ },
+ {
+ inputRoot: "/a/b",
+ inputPath: "/..%2fbar",
+ expect: filepath.FromSlash("/a/b/bar"),
+ },
+ {
+ inputRoot: "/a/b",
+ inputPath: "/%2e%2e%2fbar",
+ expect: filepath.FromSlash("/a/b/bar"),
+ },
+ {
+ // inputPath fails the IsLocal test so only the root is returned,
+ // but with a trailing slash since one was included in inputPath
+ inputRoot: "/a/b",
+ inputPath: "/%2e%2e%2f%2e%2e%2f",
+ expect: filepath.FromSlash("/a/b/"),
+ },
+ {
+ inputRoot: "/a/b",
+ inputPath: "/foo%2fbar",
+ expect: filepath.FromSlash("/a/b/foo/bar"),
+ },
+ {
+ inputRoot: "/a/b",
+ inputPath: "/foo%252fbar",
+ expect: filepath.FromSlash("/a/b/foo%2fbar"),
+ },
+ {
+ inputRoot: "C:\\www",
+ inputPath: "/foo/bar",
+ expect: filepath.Join("C:\\www", "foo", "bar"),
+ },
+ {
+ inputRoot: "C:\\www",
+ inputPath: "/D:\\foo\\bar",
+ expect: filepath.Join("C:\\www", "D:\\foo\\bar"),
+ expectWindows: "C:\\www", // inputPath fails IsLocal on Windows
+ },
+ {
+ inputRoot: `C:\www`,
+ inputPath: `/..\windows\win.ini`,
+ expect: `C:\www/..\windows\win.ini`,
+ expectWindows: `C:\www`,
+ },
+ {
+ inputRoot: `C:\www`,
+ inputPath: `/..\..\..\..\..\..\..\..\..\..\windows\win.ini`,
+ expect: `C:\www/..\..\..\..\..\..\..\..\..\..\windows\win.ini`,
+ expectWindows: `C:\www`,
+ },
+ {
+ inputRoot: `C:\www`,
+ inputPath: `/..%5cwindows%5cwin.ini`,
+ expect: `C:\www/..\windows\win.ini`,
+ expectWindows: `C:\www`,
+ },
+ {
+ inputRoot: `C:\www`,
+ inputPath: `/..%5c..%5c..%5c..%5c..%5c..%5c..%5c..%5c..%5c..%5cwindows%5cwin.ini`,
+ expect: `C:\www/..\..\..\..\..\..\..\..\..\..\windows\win.ini`,
+ expectWindows: `C:\www`,
+ },
+ {
+ // https://github.com/golang/go/issues/56336#issuecomment-1416214885
+ inputRoot: "root",
+ inputPath: "/a/b/../../c",
+ expect: filepath.FromSlash("root/c"),
+ },
+ } {
+ // we don't *need* to use an actual parsed URL, but it
+ // adds some authenticity to the tests since real-world
+ // values will be coming in from URLs; thus, the test
+ // corpus can contain paths as encoded by clients, which
+ // more closely emulates the actual attack vector
+ u, err := url.Parse("http://test:9999" + tc.inputPath)
+ if err != nil {
+ t.Fatalf("Test %d: invalid URL: %v", i, err)
+ }
+ actual := SanitizedPathJoin(tc.inputRoot, u.Path)
+ if runtime.GOOS == "windows" && tc.expectWindows != "" {
+ tc.expect = tc.expectWindows
+ }
+ if actual != tc.expect {
+ t.Errorf("Test %d: SanitizedPathJoin('%s', '%s') => '%s' (expected '%s')",
+ i, tc.inputRoot, tc.inputPath, actual, tc.expect)
+ }
+ }
+}
+
+func TestCleanPath(t *testing.T) {
+ for i, tc := range []struct {
+ input string
+ mergeSlashes bool
+ expect string
+ }{
+ {
+ input: "/foo",
+ expect: "/foo",
+ },
+ {
+ input: "/foo/",
+ expect: "/foo/",
+ },
+ {
+ input: "//foo",
+ expect: "//foo",
+ },
+ {
+ input: "//foo",
+ mergeSlashes: true,
+ expect: "/foo",
+ },
+ {
+ input: "/foo//bar/",
+ mergeSlashes: true,
+ expect: "/foo/bar/",
+ },
+ {
+ input: "/foo/./.././bar",
+ expect: "/bar",
+ },
+ {
+ input: "/foo//./..//./bar",
+ expect: "/foo//bar",
+ },
+ {
+ input: "/foo///./..//./bar",
+ expect: "/foo///bar",
+ },
+ {
+ input: "/foo///./..//.",
+ expect: "/foo//",
+ },
+ {
+ input: "/foo//./bar",
+ expect: "/foo//bar",
+ },
+ } {
+ actual := CleanPath(tc.input, tc.mergeSlashes)
+ if actual != tc.expect {
+ t.Errorf("Test %d [input='%s' mergeSlashes=%t]: Got '%s', expected '%s'",
+ i, tc.input, tc.mergeSlashes, actual, tc.expect)
+ }
+ }
+}
diff --git a/modules/caddyhttp/celmatcher.go b/modules/caddyhttp/celmatcher.go
index 19c7bca3..3d118ea7 100644
--- a/modules/caddyhttp/celmatcher.go
+++ b/modules/caddyhttp/celmatcher.go
@@ -15,24 +15,31 @@
package caddyhttp
import (
+ "crypto/x509/pkix"
"encoding/json"
+ "errors"
"fmt"
"net/http"
"reflect"
"regexp"
"strings"
+ "time"
- "github.com/caddyserver/caddy/v2"
- "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile"
- "github.com/gogo/protobuf/proto"
"github.com/google/cel-go/cel"
- "github.com/google/cel-go/checker/decls"
+ "github.com/google/cel-go/common"
+ "github.com/google/cel-go/common/ast"
+ "github.com/google/cel-go/common/operators"
"github.com/google/cel-go/common/types"
"github.com/google/cel-go/common/types/ref"
"github.com/google/cel-go/common/types/traits"
"github.com/google/cel-go/ext"
+ "github.com/google/cel-go/interpreter"
"github.com/google/cel-go/interpreter/functions"
- exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
+ "github.com/google/cel-go/parser"
+ "go.uber.org/zap"
+
+ "github.com/caddyserver/caddy/v2"
+ "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile"
)
func init() {
@@ -42,7 +49,8 @@ func init() {
// MatchExpression matches requests by evaluating a
// [CEL](https://github.com/google/cel-spec) expression.
// This enables complex logic to be expressed using a comfortable,
-// familiar syntax.
+// familiar syntax. Please refer to
+// [the standard definitions of CEL functions and operators](https://github.com/google/cel-spec/blob/master/doc/langdef.md#standard-definitions).
//
// This matcher's JSON interface is actually a string, not a struct.
// The generated docs are not correct because this type has custom
@@ -54,11 +62,18 @@ type MatchExpression struct {
// The CEL expression to evaluate. Any Caddy placeholders
// will be expanded and situated into proper CEL function
// calls before evaluating.
- Expr string
+ Expr string `json:"expr,omitempty"`
+
+ // Name is an optional name for this matcher.
+ // This is used to populate the name for regexp
+ // matchers that appear in the expression.
+ Name string `json:"name,omitempty"`
expandedExpr string
prg cel.Program
- ta ref.TypeAdapter
+ ta types.Adapter
+
+ log *zap.Logger
}
// CaddyModule returns the Caddy module information.
@@ -71,34 +86,95 @@ func (MatchExpression) CaddyModule() caddy.ModuleInfo {
// MarshalJSON marshals m's expression.
func (m MatchExpression) MarshalJSON() ([]byte, error) {
- return json.Marshal(m.Expr)
+ // if the name is empty, then we can marshal just the expression string
+ if m.Name == "" {
+ return json.Marshal(m.Expr)
+ }
+ // otherwise, we need to marshal the full object, using an
+ // anonymous struct to avoid infinite recursion
+ return json.Marshal(struct {
+ Expr string `json:"expr"`
+ Name string `json:"name"`
+ }{
+ Expr: m.Expr,
+ Name: m.Name,
+ })
}
// UnmarshalJSON unmarshals m's expression.
func (m *MatchExpression) UnmarshalJSON(data []byte) error {
- return json.Unmarshal(data, &m.Expr)
+ // if the data is a string, then it's just the expression
+ if data[0] == '"' {
+ return json.Unmarshal(data, &m.Expr)
+ }
+ // otherwise, it's a full object, so unmarshal it,
+ // using an temp map to avoid infinite recursion
+ var tmpJson map[string]any
+ err := json.Unmarshal(data, &tmpJson)
+ *m = MatchExpression{
+ Expr: tmpJson["expr"].(string),
+ Name: tmpJson["name"].(string),
+ }
+ return err
}
// Provision sets ups m.
-func (m *MatchExpression) Provision(_ caddy.Context) error {
+func (m *MatchExpression) Provision(ctx caddy.Context) error {
+ m.log = ctx.Logger()
+
// replace placeholders with a function call - this is just some
// light (and possibly naïve) syntactic sugar
m.expandedExpr = placeholderRegexp.ReplaceAllString(m.Expr, placeholderExpansion)
+ // as a second pass, we'll strip the escape character from an escaped
+ // placeholder, so that it can be used as an input to other CEL functions
+ m.expandedExpr = escapedPlaceholderRegexp.ReplaceAllString(m.expandedExpr, escapedPlaceholderExpansion)
+
// our type adapter expands CEL's standard type support
m.ta = celTypeAdapter{}
+ // initialize the CEL libraries from the Matcher implementations which
+ // have been configured to support CEL.
+ matcherLibProducers := []CELLibraryProducer{}
+ for _, info := range caddy.GetModules("http.matchers") {
+ p, ok := info.New().(CELLibraryProducer)
+ if ok {
+ matcherLibProducers = append(matcherLibProducers, p)
+ }
+ }
+
+ // add the matcher name to the context so that the matcher name
+ // can be used by regexp matchers being provisioned
+ ctx = ctx.WithValue(MatcherNameCtxKey, m.Name)
+
+ // Assemble the compilation and program options from the different library
+ // producers into a single cel.Library implementation.
+ matcherEnvOpts := []cel.EnvOption{}
+ matcherProgramOpts := []cel.ProgramOption{}
+ for _, producer := range matcherLibProducers {
+ l, err := producer.CELLibrary(ctx)
+ if err != nil {
+ return fmt.Errorf("error initializing CEL library for %T: %v", producer, err)
+ }
+ matcherEnvOpts = append(matcherEnvOpts, l.CompileOptions()...)
+ matcherProgramOpts = append(matcherProgramOpts, l.ProgramOptions()...)
+ }
+ matcherLib := cel.Lib(NewMatcherCELLibrary(matcherEnvOpts, matcherProgramOpts))
+
// create the CEL environment
env, err := cel.NewEnv(
- cel.Declarations(
- decls.NewIdent("request", httpRequestObjectType, nil),
- decls.NewFunction(placeholderFuncName,
- decls.NewOverload(placeholderFuncName+"_httpRequest_string",
- []*exprpb.Type{httpRequestObjectType, decls.String},
- decls.Any)),
- ),
+ cel.Function(CELPlaceholderFuncName, cel.SingletonBinaryBinding(m.caddyPlaceholderFunc), cel.Overload(
+ CELPlaceholderFuncName+"_httpRequest_string",
+ []*cel.Type{httpRequestObjectType, cel.StringType},
+ cel.AnyType,
+ )),
+ cel.Variable(CELRequestVarName, httpRequestObjectType),
cel.CustomTypeAdapter(m.ta),
ext.Strings(),
+ ext.Bindings(),
+ ext.Lists(),
+ ext.Math(),
+ matcherLib,
)
if err != nil {
return fmt.Errorf("setting up CEL environment: %v", err)
@@ -106,26 +182,18 @@ func (m *MatchExpression) Provision(_ caddy.Context) error {
// parse and type-check the expression
checked, issues := env.Compile(m.expandedExpr)
- if issues != nil && issues.Err() != nil {
+ if issues.Err() != nil {
return fmt.Errorf("compiling CEL program: %s", issues.Err())
}
// request matching is a boolean operation, so we don't really know
// what to do if the expression returns a non-boolean type
- if !proto.Equal(checked.ResultType(), decls.Bool) {
- return fmt.Errorf("CEL request matcher expects return type of bool, not %s", checked.ResultType())
+ if checked.OutputType() != cel.BoolType {
+ return fmt.Errorf("CEL request matcher expects return type of bool, not %s", checked.OutputType())
}
// compile the "program"
- m.prg, err = env.Program(checked,
- cel.Functions(
- &functions.Overload{
- Operator: placeholderFuncName,
- Binary: m.caddyPlaceholderFunc,
- },
- ),
- )
-
+ m.prg, err = env.Program(checked, cel.EvalOptions(cel.OptOptimize))
if err != nil {
return fmt.Errorf("compiling CEL program: %s", err)
}
@@ -134,21 +202,55 @@ func (m *MatchExpression) Provision(_ caddy.Context) error {
// Match returns true if r matches m.
func (m MatchExpression) Match(r *http.Request) bool {
- out, _, _ := m.prg.Eval(map[string]interface{}{
- "request": celHTTPRequest{r},
- })
- if outBool, ok := out.Value().(bool); ok {
- return outBool
+ match, err := m.MatchWithError(r)
+ if err != nil {
+ SetVar(r.Context(), MatcherErrorVarKey, err)
}
- return false
+ return match
+}
+// MatchWithError returns true if r matches m.
+func (m MatchExpression) MatchWithError(r *http.Request) (bool, error) {
+ celReq := celHTTPRequest{r}
+ out, _, err := m.prg.Eval(celReq)
+ if err != nil {
+ m.log.Error("evaluating expression", zap.Error(err))
+ return false, err
+ }
+ if outBool, ok := out.Value().(bool); ok {
+ return outBool, nil
+ }
+ return false, nil
}
// UnmarshalCaddyfile implements caddyfile.Unmarshaler.
func (m *MatchExpression) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
- for d.Next() {
- m.Expr = strings.Join(d.RemainingArgs(), " ")
+ d.Next() // consume matcher name
+
+ // if there's multiple args, then we need to keep the raw
+ // tokens because the user may have used quotes within their
+ // CEL expression (e.g. strings) and we should retain that
+ if d.CountRemainingArgs() > 1 {
+ m.Expr = strings.Join(d.RemainingArgsRaw(), " ")
+ return nil
}
+
+ // there should at least be one arg
+ if !d.NextArg() {
+ return d.ArgErr()
+ }
+
+ // if there's only one token, then we can safely grab the
+ // cleaned token (no quotes) and use that as the expression
+ // because there's no valid CEL expression that is only a
+ // quoted string; commonly quotes are used in Caddyfile to
+ // define the expression
+ m.Expr = d.Val()
+
+ // use the named matcher's name, to fill regexp
+ // matchers names by default
+ m.Name = d.GetContextString(caddyfile.MatcherNameCtxKey)
+
return nil
}
@@ -158,14 +260,18 @@ func (m MatchExpression) caddyPlaceholderFunc(lhs, rhs ref.Val) ref.Val {
celReq, ok := lhs.(celHTTPRequest)
if !ok {
return types.NewErr(
- "invalid request of type '%v' to "+placeholderFuncName+"(request, placeholderVarName)",
- lhs.Type())
+ "invalid request of type '%v' to %s(request, placeholderVarName)",
+ lhs.Type(),
+ CELPlaceholderFuncName,
+ )
}
phStr, ok := rhs.(types.String)
if !ok {
return types.NewErr(
- "invalid placeholder variable name of type '%v' to "+placeholderFuncName+"(request, placeholderVarName)",
- rhs.Type())
+ "invalid placeholder variable name of type '%v' to %s(request, placeholderVarName)",
+ rhs.Type(),
+ CELPlaceholderFuncName,
+ )
}
repl := celReq.Context().Value(caddy.ReplacerCtxKey).(*caddy.Replacer)
@@ -175,58 +281,530 @@ func (m MatchExpression) caddyPlaceholderFunc(lhs, rhs ref.Val) ref.Val {
}
// httpRequestCELType is the type representation of a native HTTP request.
-var httpRequestCELType = types.NewTypeValue("http.Request", traits.ReceiverType)
+var httpRequestCELType = cel.ObjectType("http.Request", traits.ReceiverType)
-// cellHTTPRequest wraps an http.Request with
-// methods to satisfy the ref.Val interface.
+// celHTTPRequest wraps an http.Request with ref.Val interface methods.
+//
+// This type also implements the interpreter.Activation interface which
+// drops allocation costs for CEL expression evaluations by roughly half.
type celHTTPRequest struct{ *http.Request }
-func (cr celHTTPRequest) ConvertToNative(typeDesc reflect.Type) (interface{}, error) {
+func (cr celHTTPRequest) ResolveName(name string) (any, bool) {
+ if name == CELRequestVarName {
+ return cr, true
+ }
+ return nil, false
+}
+
+func (cr celHTTPRequest) Parent() interpreter.Activation {
+ return nil
+}
+
+func (cr celHTTPRequest) ConvertToNative(typeDesc reflect.Type) (any, error) {
return cr.Request, nil
}
+
func (celHTTPRequest) ConvertToType(typeVal ref.Type) ref.Val {
panic("not implemented")
}
+
func (cr celHTTPRequest) Equal(other ref.Val) ref.Val {
if o, ok := other.Value().(celHTTPRequest); ok {
return types.Bool(o.Request == cr.Request)
}
return types.ValOrErr(other, "%v is not comparable type", other)
}
-func (celHTTPRequest) Type() ref.Type { return httpRequestCELType }
-func (cr celHTTPRequest) Value() interface{} { return cr }
+func (celHTTPRequest) Type() ref.Type { return httpRequestCELType }
+func (cr celHTTPRequest) Value() any { return cr }
+
+var pkixNameCELType = cel.ObjectType("pkix.Name", traits.ReceiverType)
+
+// celPkixName wraps an pkix.Name with
+// methods to satisfy the ref.Val interface.
+type celPkixName struct{ *pkix.Name }
+
+func (pn celPkixName) ConvertToNative(typeDesc reflect.Type) (any, error) {
+ return pn.Name, nil
+}
+
+func (pn celPkixName) ConvertToType(typeVal ref.Type) ref.Val {
+ if typeVal.TypeName() == "string" {
+ return types.String(pn.Name.String())
+ }
+ panic("not implemented")
+}
+
+func (pn celPkixName) Equal(other ref.Val) ref.Val {
+ if o, ok := other.Value().(string); ok {
+ return types.Bool(pn.Name.String() == o)
+ }
+ return types.ValOrErr(other, "%v is not comparable type", other)
+}
+func (celPkixName) Type() ref.Type { return pkixNameCELType }
+func (pn celPkixName) Value() any { return pn }
// celTypeAdapter can adapt our custom types to a CEL value.
type celTypeAdapter struct{}
-func (celTypeAdapter) NativeToValue(value interface{}) ref.Val {
+func (celTypeAdapter) NativeToValue(value any) ref.Val {
switch v := value.(type) {
case celHTTPRequest:
return v
+ case pkix.Name:
+ return celPkixName{&v}
+ case time.Time:
+ return types.Timestamp{Time: v}
case error:
- types.NewErr(v.Error())
+ return types.WrapErr(v)
}
return types.DefaultTypeAdapter.NativeToValue(value)
}
+// CELLibraryProducer provide CEL libraries that expose a Matcher
+// implementation as a first class function within the CEL expression
+// matcher.
+type CELLibraryProducer interface {
+ // CELLibrary creates a cel.Library which makes it possible to use the
+ // target object within CEL expression matchers.
+ CELLibrary(caddy.Context) (cel.Library, error)
+}
+
+// CELMatcherImpl creates a new cel.Library based on the following pieces of
+// data:
+//
+// - macroName: the function name to be used within CEL. This will be a macro
+// and not a function proper.
+// - funcName: the function overload name generated by the CEL macro used to
+// represent the matcher.
+// - matcherDataTypes: the argument types to the macro.
+// - fac: a matcherFactory implementation which converts from CEL constant
+// values to a Matcher instance.
+//
+// Note, macro names and function names must not collide with other macros or
+// functions exposed within CEL expressions, or an error will be produced
+// during the expression matcher plan time.
+//
+// The existing CELMatcherImpl support methods are configured to support a
+// limited set of function signatures. For strong type validation you may need
+// to provide a custom macro which does a more detailed analysis of the CEL
+// literal provided to the macro as an argument.
+func CELMatcherImpl(macroName, funcName string, matcherDataTypes []*cel.Type, fac any) (cel.Library, error) {
+ requestType := cel.ObjectType("http.Request")
+ var macro parser.Macro
+ switch len(matcherDataTypes) {
+ case 1:
+ matcherDataType := matcherDataTypes[0]
+ switch matcherDataType.String() {
+ case "list(string)":
+ macro = parser.NewGlobalVarArgMacro(macroName, celMatcherStringListMacroExpander(funcName))
+ case cel.StringType.String():
+ macro = parser.NewGlobalMacro(macroName, 1, celMatcherStringMacroExpander(funcName))
+ case CELTypeJSON.String():
+ macro = parser.NewGlobalMacro(macroName, 1, celMatcherJSONMacroExpander(funcName))
+ default:
+ return nil, fmt.Errorf("unsupported matcher data type: %s", matcherDataType)
+ }
+ case 2:
+ if matcherDataTypes[0] == cel.StringType && matcherDataTypes[1] == cel.StringType {
+ macro = parser.NewGlobalMacro(macroName, 2, celMatcherStringListMacroExpander(funcName))
+ matcherDataTypes = []*cel.Type{cel.ListType(cel.StringType)}
+ } else {
+ return nil, fmt.Errorf("unsupported matcher data type: %s, %s", matcherDataTypes[0], matcherDataTypes[1])
+ }
+ case 3:
+ if matcherDataTypes[0] == cel.StringType && matcherDataTypes[1] == cel.StringType && matcherDataTypes[2] == cel.StringType {
+ macro = parser.NewGlobalMacro(macroName, 3, celMatcherStringListMacroExpander(funcName))
+ matcherDataTypes = []*cel.Type{cel.ListType(cel.StringType)}
+ } else {
+ return nil, fmt.Errorf("unsupported matcher data type: %s, %s, %s", matcherDataTypes[0], matcherDataTypes[1], matcherDataTypes[2])
+ }
+ }
+ envOptions := []cel.EnvOption{
+ cel.Macros(macro),
+ cel.Function(funcName,
+ cel.Overload(funcName, append([]*cel.Type{requestType}, matcherDataTypes...), cel.BoolType),
+ cel.SingletonBinaryBinding(CELMatcherRuntimeFunction(funcName, fac))),
+ }
+ programOptions := []cel.ProgramOption{
+ cel.CustomDecorator(CELMatcherDecorator(funcName, fac)),
+ }
+ return NewMatcherCELLibrary(envOptions, programOptions), nil
+}
+
+// CELMatcherFactory converts a constant CEL value into a RequestMatcher.
+// Deprecated: Use CELMatcherWithErrorFactory instead.
+type CELMatcherFactory = func(data ref.Val) (RequestMatcher, error)
+
+// CELMatcherWithErrorFactory converts a constant CEL value into a RequestMatcherWithError.
+type CELMatcherWithErrorFactory = func(data ref.Val) (RequestMatcherWithError, error)
+
+// matcherCELLibrary is a simplistic configurable cel.Library implementation.
+type matcherCELLibrary struct {
+ envOptions []cel.EnvOption
+ programOptions []cel.ProgramOption
+}
+
+// NewMatcherCELLibrary creates a matcherLibrary from option setes.
+func NewMatcherCELLibrary(envOptions []cel.EnvOption, programOptions []cel.ProgramOption) cel.Library {
+ return &matcherCELLibrary{
+ envOptions: envOptions,
+ programOptions: programOptions,
+ }
+}
+
+func (lib *matcherCELLibrary) CompileOptions() []cel.EnvOption {
+ return lib.envOptions
+}
+
+func (lib *matcherCELLibrary) ProgramOptions() []cel.ProgramOption {
+ return lib.programOptions
+}
+
+// CELMatcherDecorator matches a call overload generated by a CEL macro
+// that takes a single argument, and optimizes the implementation to precompile
+// the matcher and return a function that references the precompiled and
+// provisioned matcher.
+func CELMatcherDecorator(funcName string, fac any) interpreter.InterpretableDecorator {
+ return func(i interpreter.Interpretable) (interpreter.Interpretable, error) {
+ call, ok := i.(interpreter.InterpretableCall)
+ if !ok {
+ return i, nil
+ }
+ if call.OverloadID() != funcName {
+ return i, nil
+ }
+ callArgs := call.Args()
+ reqAttr, ok := callArgs[0].(interpreter.InterpretableAttribute)
+ if !ok {
+ return nil, errors.New("missing 'req' argument")
+ }
+ nsAttr, ok := reqAttr.Attr().(interpreter.NamespacedAttribute)
+ if !ok {
+ return nil, errors.New("missing 'req' argument")
+ }
+ varNames := nsAttr.CandidateVariableNames()
+ if len(varNames) != 1 || len(varNames) == 1 && varNames[0] != CELRequestVarName {
+ return nil, errors.New("missing 'req' argument")
+ }
+ matcherData, ok := callArgs[1].(interpreter.InterpretableConst)
+ if !ok {
+ // If the matcher arguments are not constant, then this means
+ // they contain a Caddy placeholder reference and the evaluation
+ // and matcher provisioning should be handled at dynamically.
+ return i, nil
+ }
+
+ if factory, ok := fac.(CELMatcherWithErrorFactory); ok {
+ matcher, err := factory(matcherData.Value())
+ if err != nil {
+ return nil, err
+ }
+ return interpreter.NewCall(
+ i.ID(), funcName, funcName+"_opt",
+ []interpreter.Interpretable{reqAttr},
+ func(args ...ref.Val) ref.Val {
+ // The request value, guaranteed to be of type celHTTPRequest
+ celReq := args[0]
+ // If needed this call could be changed to convert the value
+ // to a *http.Request using CEL's ConvertToNative method.
+ httpReq := celReq.Value().(celHTTPRequest)
+ match, err := matcher.MatchWithError(httpReq.Request)
+ if err != nil {
+ return types.WrapErr(err)
+ }
+ return types.Bool(match)
+ },
+ ), nil
+ }
+
+ if factory, ok := fac.(CELMatcherFactory); ok {
+ matcher, err := factory(matcherData.Value())
+ if err != nil {
+ return nil, err
+ }
+ return interpreter.NewCall(
+ i.ID(), funcName, funcName+"_opt",
+ []interpreter.Interpretable{reqAttr},
+ func(args ...ref.Val) ref.Val {
+ // The request value, guaranteed to be of type celHTTPRequest
+ celReq := args[0]
+ // If needed this call could be changed to convert the value
+ // to a *http.Request using CEL's ConvertToNative method.
+ httpReq := celReq.Value().(celHTTPRequest)
+ if m, ok := matcher.(RequestMatcherWithError); ok {
+ match, err := m.MatchWithError(httpReq.Request)
+ if err != nil {
+ return types.WrapErr(err)
+ }
+ return types.Bool(match)
+ }
+ return types.Bool(matcher.Match(httpReq.Request))
+ },
+ ), nil
+ }
+
+ return nil, fmt.Errorf("invalid matcher factory, must be CELMatcherFactory or CELMatcherWithErrorFactory: %T", fac)
+ }
+}
+
+// CELMatcherRuntimeFunction creates a function binding for when the input to the matcher
+// is dynamically resolved rather than a set of static constant values.
+func CELMatcherRuntimeFunction(funcName string, fac any) functions.BinaryOp {
+ return func(celReq, matcherData ref.Val) ref.Val {
+ if factory, ok := fac.(CELMatcherWithErrorFactory); ok {
+ matcher, err := factory(matcherData)
+ if err != nil {
+ return types.WrapErr(err)
+ }
+ httpReq := celReq.Value().(celHTTPRequest)
+ match, err := matcher.MatchWithError(httpReq.Request)
+ if err != nil {
+ return types.WrapErr(err)
+ }
+ return types.Bool(match)
+ }
+ if factory, ok := fac.(CELMatcherFactory); ok {
+ matcher, err := factory(matcherData)
+ if err != nil {
+ return types.WrapErr(err)
+ }
+ httpReq := celReq.Value().(celHTTPRequest)
+ if m, ok := matcher.(RequestMatcherWithError); ok {
+ match, err := m.MatchWithError(httpReq.Request)
+ if err != nil {
+ return types.WrapErr(err)
+ }
+ return types.Bool(match)
+ }
+ return types.Bool(matcher.Match(httpReq.Request))
+ }
+ return types.NewErr("CELMatcherRuntimeFunction invalid matcher factory: %T", fac)
+ }
+}
+
+// celMatcherStringListMacroExpander validates that the macro is called
+// with a variable number of string arguments (at least one).
+//
+// The arguments are collected into a single list argument the following
+// function call returned: (request, [args])
+func celMatcherStringListMacroExpander(funcName string) cel.MacroFactory {
+ return func(eh cel.MacroExprFactory, target ast.Expr, args []ast.Expr) (ast.Expr, *common.Error) {
+ matchArgs := []ast.Expr{}
+ if len(args) == 0 {
+ return nil, eh.NewError(0, "matcher requires at least one argument")
+ }
+ for _, arg := range args {
+ if isCELStringExpr(arg) {
+ matchArgs = append(matchArgs, arg)
+ } else {
+ return nil, eh.NewError(arg.ID(), "matcher arguments must be string constants")
+ }
+ }
+ return eh.NewCall(funcName, eh.NewIdent(CELRequestVarName), eh.NewList(matchArgs...)), nil
+ }
+}
+
+// celMatcherStringMacroExpander validates that the macro is called a single
+// string argument.
+//
+// The following function call is returned: (request, arg)
+func celMatcherStringMacroExpander(funcName string) parser.MacroExpander {
+ return func(eh cel.MacroExprFactory, target ast.Expr, args []ast.Expr) (ast.Expr, *common.Error) {
+ if len(args) != 1 {
+ return nil, eh.NewError(0, "matcher requires one argument")
+ }
+ if isCELStringExpr(args[0]) {
+ return eh.NewCall(funcName, eh.NewIdent(CELRequestVarName), args[0]), nil
+ }
+ return nil, eh.NewError(args[0].ID(), "matcher argument must be a string literal")
+ }
+}
+
+// celMatcherJSONMacroExpander validates that the macro is called a single
+// map literal argument.
+//
+// The following function call is returned: (request, arg)
+func celMatcherJSONMacroExpander(funcName string) parser.MacroExpander {
+ return func(eh cel.MacroExprFactory, target ast.Expr, args []ast.Expr) (ast.Expr, *common.Error) {
+ if len(args) != 1 {
+ return nil, eh.NewError(0, "matcher requires a map literal argument")
+ }
+ arg := args[0]
+
+ switch arg.Kind() {
+ case ast.StructKind:
+ return nil, eh.NewError(arg.ID(),
+ fmt.Sprintf("matcher input must be a map literal, not a %s", arg.AsStruct().TypeName()))
+ case ast.MapKind:
+ mapExpr := arg.AsMap()
+ for _, entry := range mapExpr.Entries() {
+ isStringPlaceholder := isCELStringExpr(entry.AsMapEntry().Key())
+ if !isStringPlaceholder {
+ return nil, eh.NewError(entry.ID(), "matcher map keys must be string literals")
+ }
+ isStringListPlaceholder := isCELStringExpr(entry.AsMapEntry().Value()) ||
+ isCELStringListLiteral(entry.AsMapEntry().Value())
+ if !isStringListPlaceholder {
+ return nil, eh.NewError(entry.AsMapEntry().Value().ID(), "matcher map values must be string or list literals")
+ }
+ }
+ return eh.NewCall(funcName, eh.NewIdent(CELRequestVarName), arg), nil
+ case ast.UnspecifiedExprKind, ast.CallKind, ast.ComprehensionKind, ast.IdentKind, ast.ListKind, ast.LiteralKind, ast.SelectKind:
+ // appeasing the linter :)
+ }
+
+ return nil, eh.NewError(arg.ID(), "matcher requires a map literal argument")
+ }
+}
+
+// CELValueToMapStrList converts a CEL value to a map[string][]string
+//
+// Earlier validation stages should guarantee that the value has this type
+// at compile time, and that the runtime value type is map[string]any.
+// The reason for the slight difference in value type is that CEL allows for
+// map literals containing heterogeneous values, in this case string and list
+// of string.
+func CELValueToMapStrList(data ref.Val) (map[string][]string, error) {
+ mapStrType := reflect.TypeOf(map[string]any{})
+ mapStrRaw, err := data.ConvertToNative(mapStrType)
+ if err != nil {
+ return nil, err
+ }
+ mapStrIface := mapStrRaw.(map[string]any)
+ mapStrListStr := make(map[string][]string, len(mapStrIface))
+ for k, v := range mapStrIface {
+ switch val := v.(type) {
+ case string:
+ mapStrListStr[k] = []string{val}
+ case types.String:
+ mapStrListStr[k] = []string{string(val)}
+ case []string:
+ mapStrListStr[k] = val
+ case []ref.Val:
+ convVals := make([]string, len(val))
+ for i, elem := range val {
+ strVal, ok := elem.(types.String)
+ if !ok {
+ return nil, fmt.Errorf("unsupported value type in header match: %T", val)
+ }
+ convVals[i] = string(strVal)
+ }
+ mapStrListStr[k] = convVals
+ default:
+ return nil, fmt.Errorf("unsupported value type in header match: %T", val)
+ }
+ }
+ return mapStrListStr, nil
+}
+
+// isCELStringExpr indicates whether the expression is a supported string expression
+func isCELStringExpr(e ast.Expr) bool {
+ return isCELStringLiteral(e) || isCELCaddyPlaceholderCall(e) || isCELConcatCall(e)
+}
+
+// isCELStringLiteral returns whether the expression is a CEL string literal.
+func isCELStringLiteral(e ast.Expr) bool {
+ switch e.Kind() {
+ case ast.LiteralKind:
+ constant := e.AsLiteral()
+ switch constant.Type() {
+ case types.StringType:
+ return true
+ }
+ case ast.UnspecifiedExprKind, ast.CallKind, ast.ComprehensionKind, ast.IdentKind, ast.ListKind, ast.MapKind, ast.SelectKind, ast.StructKind:
+ // appeasing the linter :)
+ }
+ return false
+}
+
+// isCELCaddyPlaceholderCall returns whether the expression is a caddy placeholder call.
+func isCELCaddyPlaceholderCall(e ast.Expr) bool {
+ switch e.Kind() {
+ case ast.CallKind:
+ call := e.AsCall()
+ if call.FunctionName() == CELPlaceholderFuncName {
+ return true
+ }
+ case ast.UnspecifiedExprKind, ast.ComprehensionKind, ast.IdentKind, ast.ListKind, ast.LiteralKind, ast.MapKind, ast.SelectKind, ast.StructKind:
+ // appeasing the linter :)
+ }
+ return false
+}
+
+// isCELConcatCall tests whether the expression is a concat function (+) with string, placeholder, or
+// other concat call arguments.
+func isCELConcatCall(e ast.Expr) bool {
+ switch e.Kind() {
+ case ast.CallKind:
+ call := e.AsCall()
+ if call.Target().Kind() != ast.UnspecifiedExprKind {
+ return false
+ }
+ if call.FunctionName() != operators.Add {
+ return false
+ }
+ for _, arg := range call.Args() {
+ if !isCELStringExpr(arg) {
+ return false
+ }
+ }
+ return true
+ case ast.UnspecifiedExprKind, ast.ComprehensionKind, ast.IdentKind, ast.ListKind, ast.LiteralKind, ast.MapKind, ast.SelectKind, ast.StructKind:
+ // appeasing the linter :)
+ }
+ return false
+}
+
+// isCELStringListLiteral returns whether the expression resolves to a list literal
+// containing only string constants or a placeholder call.
+func isCELStringListLiteral(e ast.Expr) bool {
+ switch e.Kind() {
+ case ast.ListKind:
+ list := e.AsList()
+ for _, elem := range list.Elements() {
+ if !isCELStringExpr(elem) {
+ return false
+ }
+ }
+ return true
+ case ast.UnspecifiedExprKind, ast.CallKind, ast.ComprehensionKind, ast.IdentKind, ast.LiteralKind, ast.MapKind, ast.SelectKind, ast.StructKind:
+ // appeasing the linter :)
+ }
+ return false
+}
+
// Variables used for replacing Caddy placeholders in CEL
// expressions with a proper CEL function call; this is
// just for syntactic sugar.
var (
- placeholderRegexp = regexp.MustCompile(`{([\w.-]+)}`)
- placeholderExpansion = `caddyPlaceholder(request, "${1}")`
+ // The placeholder may not be preceded by a backslash; the expansion
+ // will include the preceding character if it is not a backslash.
+ placeholderRegexp = regexp.MustCompile(`([^\\]|^){([a-zA-Z][\w.-]+)}`)
+ placeholderExpansion = `${1}ph(req, "${2}")`
+
+ // As a second pass, we need to strip the escape character in front of
+ // the placeholder, if it exists.
+ escapedPlaceholderRegexp = regexp.MustCompile(`\\{([a-zA-Z][\w.-]+)}`)
+ escapedPlaceholderExpansion = `{${1}}`
+
+ CELTypeJSON = cel.MapType(cel.StringType, cel.DynType)
)
-var httpRequestObjectType = decls.NewObjectType("http.Request")
+var httpRequestObjectType = cel.ObjectType("http.Request")
// The name of the CEL function which accesses Replacer values.
-const placeholderFuncName = "caddyPlaceholder"
+const CELPlaceholderFuncName = "ph"
+
+// The name of the CEL request variable.
+const CELRequestVarName = "req"
+
+const MatcherNameCtxKey = "matcher_name"
// Interface guards
var (
- _ caddy.Provisioner = (*MatchExpression)(nil)
- _ RequestMatcher = (*MatchExpression)(nil)
- _ caddyfile.Unmarshaler = (*MatchExpression)(nil)
- _ json.Marshaler = (*MatchExpression)(nil)
- _ json.Unmarshaler = (*MatchExpression)(nil)
+ _ caddy.Provisioner = (*MatchExpression)(nil)
+ _ RequestMatcherWithError = (*MatchExpression)(nil)
+ _ caddyfile.Unmarshaler = (*MatchExpression)(nil)
+ _ json.Marshaler = (*MatchExpression)(nil)
+ _ json.Unmarshaler = (*MatchExpression)(nil)
)
diff --git a/modules/caddyhttp/celmatcher_test.go b/modules/caddyhttp/celmatcher_test.go
new file mode 100644
index 00000000..a7e91529
--- /dev/null
+++ b/modules/caddyhttp/celmatcher_test.go
@@ -0,0 +1,575 @@
+// Copyright 2015 Matthew Holt and The Caddy Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package caddyhttp
+
+import (
+ "context"
+ "crypto/tls"
+ "crypto/x509"
+ "encoding/pem"
+ "net/http"
+ "net/http/httptest"
+ "testing"
+
+ "github.com/caddyserver/caddy/v2"
+)
+
+var (
+ clientCert = []byte(`-----BEGIN CERTIFICATE-----
+MIIB9jCCAV+gAwIBAgIBAjANBgkqhkiG9w0BAQsFADAYMRYwFAYDVQQDDA1DYWRk
+eSBUZXN0IENBMB4XDTE4MDcyNDIxMzUwNVoXDTI4MDcyMTIxMzUwNVowHTEbMBkG
+A1UEAwwSY2xpZW50LmxvY2FsZG9tYWluMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCB
+iQKBgQDFDEpzF0ew68teT3xDzcUxVFaTII+jXH1ftHXxxP4BEYBU4q90qzeKFneF
+z83I0nC0WAQ45ZwHfhLMYHFzHPdxr6+jkvKPASf0J2v2HDJuTM1bHBbik5Ls5eq+
+fVZDP8o/VHKSBKxNs8Goc2NTsr5b07QTIpkRStQK+RJALk4x9QIDAQABo0swSTAJ
+BgNVHRMEAjAAMAsGA1UdDwQEAwIHgDAaBgNVHREEEzARgglsb2NhbGhvc3SHBH8A
+AAEwEwYDVR0lBAwwCgYIKwYBBQUHAwIwDQYJKoZIhvcNAQELBQADgYEANSjz2Sk+
+eqp31wM9il1n+guTNyxJd+FzVAH+hCZE5K+tCgVDdVFUlDEHHbS/wqb2PSIoouLV
+3Q9fgDkiUod+uIK0IynzIKvw+Cjg+3nx6NQ0IM0zo8c7v398RzB4apbXKZyeeqUH
+9fNwfEi+OoXR6s+upSKobCmLGLGi9Na5s5g=
+-----END CERTIFICATE-----`)
+
+ matcherTests = []struct {
+ name string
+ expression *MatchExpression
+ urlTarget string
+ httpMethod string
+ httpHeader *http.Header
+ wantErr bool
+ wantResult bool
+ clientCertificate []byte
+ }{
+ {
+ name: "boolean matches succeed for placeholder http.request.tls.client.subject",
+ expression: &MatchExpression{
+ Expr: "{http.request.tls.client.subject} == 'CN=client.localdomain'",
+ },
+ clientCertificate: clientCert,
+ urlTarget: "https://example.com/foo",
+ wantResult: true,
+ },
+ {
+ name: "header matches (MatchHeader)",
+ expression: &MatchExpression{
+ Expr: `header({'Field': 'foo'})`,
+ },
+ urlTarget: "https://example.com/foo",
+ httpHeader: &http.Header{"Field": []string{"foo", "bar"}},
+ wantResult: true,
+ },
+ {
+ name: "header matches an escaped placeholder value (MatchHeader)",
+ expression: &MatchExpression{
+ Expr: `header({'Field': '\\\{foobar}'})`,
+ },
+ urlTarget: "https://example.com/foo",
+ httpHeader: &http.Header{"Field": []string{"{foobar}"}},
+ wantResult: true,
+ },
+ {
+ name: "header matches an placeholder replaced during the header matcher (MatchHeader)",
+ expression: &MatchExpression{
+ Expr: `header({'Field': '\{http.request.uri.path}'})`,
+ },
+ urlTarget: "https://example.com/foo",
+ httpHeader: &http.Header{"Field": []string{"/foo"}},
+ wantResult: true,
+ },
+ {
+ name: "header error, invalid escape sequence (MatchHeader)",
+ expression: &MatchExpression{
+ Expr: `header({'Field': '\\{foobar}'})`,
+ },
+ wantErr: true,
+ },
+ {
+ name: "header error, needs to be JSON syntax with field as key (MatchHeader)",
+ expression: &MatchExpression{
+ Expr: `header('foo')`,
+ },
+ wantErr: true,
+ },
+ {
+ name: "header_regexp matches (MatchHeaderRE)",
+ expression: &MatchExpression{
+ Expr: `header_regexp('Field', 'fo{2}')`,
+ },
+ urlTarget: "https://example.com/foo",
+ httpHeader: &http.Header{"Field": []string{"foo", "bar"}},
+ wantResult: true,
+ },
+ {
+ name: "header_regexp matches with name (MatchHeaderRE)",
+ expression: &MatchExpression{
+ Expr: `header_regexp('foo', 'Field', 'fo{2}')`,
+ },
+ urlTarget: "https://example.com/foo",
+ httpHeader: &http.Header{"Field": []string{"foo", "bar"}},
+ wantResult: true,
+ },
+ {
+ name: "header_regexp does not match (MatchHeaderRE)",
+ expression: &MatchExpression{
+ Expr: `header_regexp('foo', 'Nope', 'fo{2}')`,
+ },
+ urlTarget: "https://example.com/foo",
+ httpHeader: &http.Header{"Field": []string{"foo", "bar"}},
+ wantResult: false,
+ },
+ {
+ name: "header_regexp error (MatchHeaderRE)",
+ expression: &MatchExpression{
+ Expr: `header_regexp('foo')`,
+ },
+ wantErr: true,
+ },
+ {
+ name: "host matches localhost (MatchHost)",
+ expression: &MatchExpression{
+ Expr: `host('localhost')`,
+ },
+ urlTarget: "http://localhost",
+ wantResult: true,
+ },
+ {
+ name: "host matches (MatchHost)",
+ expression: &MatchExpression{
+ Expr: `host('*.example.com')`,
+ },
+ urlTarget: "https://foo.example.com",
+ wantResult: true,
+ },
+ {
+ name: "host does not match (MatchHost)",
+ expression: &MatchExpression{
+ Expr: `host('example.net', '*.example.com')`,
+ },
+ urlTarget: "https://foo.example.org",
+ wantResult: false,
+ },
+ {
+ name: "host error (MatchHost)",
+ expression: &MatchExpression{
+ Expr: `host(80)`,
+ },
+ wantErr: true,
+ },
+ {
+ name: "method does not match (MatchMethod)",
+ expression: &MatchExpression{
+ Expr: `method('PUT')`,
+ },
+ urlTarget: "https://foo.example.com",
+ httpMethod: "GET",
+ wantResult: false,
+ },
+ {
+ name: "method matches (MatchMethod)",
+ expression: &MatchExpression{
+ Expr: `method('DELETE', 'PUT', 'POST')`,
+ },
+ urlTarget: "https://foo.example.com",
+ httpMethod: "PUT",
+ wantResult: true,
+ },
+ {
+ name: "method error not enough arguments (MatchMethod)",
+ expression: &MatchExpression{
+ Expr: `method()`,
+ },
+ wantErr: true,
+ },
+ {
+ name: "path matches substring (MatchPath)",
+ expression: &MatchExpression{
+ Expr: `path('*substring*')`,
+ },
+ urlTarget: "https://example.com/foo/substring/bar.txt",
+ wantResult: true,
+ },
+ {
+ name: "path does not match (MatchPath)",
+ expression: &MatchExpression{
+ Expr: `path('/foo')`,
+ },
+ urlTarget: "https://example.com/foo/bar",
+ wantResult: false,
+ },
+ {
+ name: "path matches end url fragment (MatchPath)",
+ expression: &MatchExpression{
+ Expr: `path('/foo')`,
+ },
+ urlTarget: "https://example.com/FOO",
+ wantResult: true,
+ },
+ {
+ name: "path matches end fragment with substring prefix (MatchPath)",
+ expression: &MatchExpression{
+ Expr: `path('/foo*')`,
+ },
+ urlTarget: "https://example.com/FOOOOO",
+ wantResult: true,
+ },
+ {
+ name: "path matches one of multiple (MatchPath)",
+ expression: &MatchExpression{
+ Expr: `path('/foo', '/foo/*', '/bar', '/bar/*', '/baz', '/baz*')`,
+ },
+ urlTarget: "https://example.com/foo",
+ wantResult: true,
+ },
+ {
+ name: "path_regexp with empty regex matches empty path (MatchPathRE)",
+ expression: &MatchExpression{
+ Expr: `path_regexp('')`,
+ },
+ urlTarget: "https://example.com/",
+ wantResult: true,
+ },
+ {
+ name: "path_regexp with slash regex matches empty path (MatchPathRE)",
+ expression: &MatchExpression{
+ Expr: `path_regexp('/')`,
+ },
+ urlTarget: "https://example.com/",
+ wantResult: true,
+ },
+ {
+ name: "path_regexp matches end url fragment (MatchPathRE)",
+ expression: &MatchExpression{
+ Expr: `path_regexp('^/foo')`,
+ },
+ urlTarget: "https://example.com/foo/",
+ wantResult: true,
+ },
+ {
+ name: "path_regexp does not match fragment at end (MatchPathRE)",
+ expression: &MatchExpression{
+ Expr: `path_regexp('bar_at_start', '^/bar')`,
+ },
+ urlTarget: "https://example.com/foo/bar",
+ wantResult: false,
+ },
+ {
+ name: "protocol matches (MatchProtocol)",
+ expression: &MatchExpression{
+ Expr: `protocol('HTTPs')`,
+ },
+ urlTarget: "https://example.com",
+ wantResult: true,
+ },
+ {
+ name: "protocol does not match (MatchProtocol)",
+ expression: &MatchExpression{
+ Expr: `protocol('grpc')`,
+ },
+ urlTarget: "https://example.com",
+ wantResult: false,
+ },
+ {
+ name: "protocol invocation error no args (MatchProtocol)",
+ expression: &MatchExpression{
+ Expr: `protocol()`,
+ },
+ wantErr: true,
+ },
+ {
+ name: "protocol invocation error too many args (MatchProtocol)",
+ expression: &MatchExpression{
+ Expr: `protocol('grpc', 'https')`,
+ },
+ wantErr: true,
+ },
+ {
+ name: "protocol invocation error wrong arg type (MatchProtocol)",
+ expression: &MatchExpression{
+ Expr: `protocol(true)`,
+ },
+ wantErr: true,
+ },
+ {
+ name: "query does not match against a specific value (MatchQuery)",
+ expression: &MatchExpression{
+ Expr: `query({"debug": "1"})`,
+ },
+ urlTarget: "https://example.com/foo",
+ wantResult: false,
+ },
+ {
+ name: "query matches against a specific value (MatchQuery)",
+ expression: &MatchExpression{
+ Expr: `query({"debug": "1"})`,
+ },
+ urlTarget: "https://example.com/foo/?debug=1",
+ wantResult: true,
+ },
+ {
+ name: "query matches against multiple values (MatchQuery)",
+ expression: &MatchExpression{
+ Expr: `query({"debug": ["0", "1", {http.request.uri.query.debug}+"1"]})`,
+ },
+ urlTarget: "https://example.com/foo/?debug=1",
+ wantResult: true,
+ },
+ {
+ name: "query matches against a wildcard (MatchQuery)",
+ expression: &MatchExpression{
+ Expr: `query({"debug": ["*"]})`,
+ },
+ urlTarget: "https://example.com/foo/?debug=something",
+ wantResult: true,
+ },
+ {
+ name: "query matches against a placeholder value (MatchQuery)",
+ expression: &MatchExpression{
+ Expr: `query({"debug": {http.request.uri.query.debug}})`,
+ },
+ urlTarget: "https://example.com/foo/?debug=1",
+ wantResult: true,
+ },
+ {
+ name: "query error bad map key type (MatchQuery)",
+ expression: &MatchExpression{
+ Expr: `query({1: "1"})`,
+ },
+ wantErr: true,
+ },
+ {
+ name: "query error typed struct instead of map (MatchQuery)",
+ expression: &MatchExpression{
+ Expr: `query(Message{field: "1"})`,
+ },
+ wantErr: true,
+ },
+ {
+ name: "query error bad map value type (MatchQuery)",
+ expression: &MatchExpression{
+ Expr: `query({"debug": 1})`,
+ },
+ wantErr: true,
+ },
+ {
+ name: "query error no args (MatchQuery)",
+ expression: &MatchExpression{
+ Expr: `query()`,
+ },
+ wantErr: true,
+ },
+ {
+ name: "remote_ip error no args (MatchRemoteIP)",
+ expression: &MatchExpression{
+ Expr: `remote_ip()`,
+ },
+ wantErr: true,
+ },
+ {
+ name: "remote_ip single IP match (MatchRemoteIP)",
+ expression: &MatchExpression{
+ Expr: `remote_ip('192.0.2.1')`,
+ },
+ urlTarget: "https://example.com/foo",
+ wantResult: true,
+ },
+ {
+ name: "vars value (VarsMatcher)",
+ expression: &MatchExpression{
+ Expr: `vars({'foo': 'bar'})`,
+ },
+ urlTarget: "https://example.com/foo",
+ wantResult: true,
+ },
+ {
+ name: "vars matches placeholder, needs escape (VarsMatcher)",
+ expression: &MatchExpression{
+ Expr: `vars({'\{http.request.uri.path}': '/foo'})`,
+ },
+ urlTarget: "https://example.com/foo",
+ wantResult: true,
+ },
+ {
+ name: "vars error wrong syntax (VarsMatcher)",
+ expression: &MatchExpression{
+ Expr: `vars('foo', 'bar')`,
+ },
+ wantErr: true,
+ },
+ {
+ name: "vars error no args (VarsMatcher)",
+ expression: &MatchExpression{
+ Expr: `vars()`,
+ },
+ wantErr: true,
+ },
+ {
+ name: "vars_regexp value (MatchVarsRE)",
+ expression: &MatchExpression{
+ Expr: `vars_regexp('foo', 'ba?r')`,
+ },
+ urlTarget: "https://example.com/foo",
+ wantResult: true,
+ },
+ {
+ name: "vars_regexp value with name (MatchVarsRE)",
+ expression: &MatchExpression{
+ Expr: `vars_regexp('name', 'foo', 'ba?r')`,
+ },
+ urlTarget: "https://example.com/foo",
+ wantResult: true,
+ },
+ {
+ name: "vars_regexp matches placeholder, needs escape (MatchVarsRE)",
+ expression: &MatchExpression{
+ Expr: `vars_regexp('\{http.request.uri.path}', '/fo?o')`,
+ },
+ urlTarget: "https://example.com/foo",
+ wantResult: true,
+ },
+ {
+ name: "vars_regexp error no args (MatchVarsRE)",
+ expression: &MatchExpression{
+ Expr: `vars_regexp()`,
+ },
+ wantErr: true,
+ },
+ }
+)
+
+func TestMatchExpressionMatch(t *testing.T) {
+ for _, tst := range matcherTests {
+ tc := tst
+ t.Run(tc.name, func(t *testing.T) {
+ caddyCtx, cancel := caddy.NewContext(caddy.Context{Context: context.Background()})
+ defer cancel()
+ err := tc.expression.Provision(caddyCtx)
+ if err != nil {
+ if !tc.wantErr {
+ t.Errorf("MatchExpression.Provision() error = %v, wantErr %v", err, tc.wantErr)
+ }
+ return
+ }
+
+ req := httptest.NewRequest(tc.httpMethod, tc.urlTarget, nil)
+ if tc.httpHeader != nil {
+ req.Header = *tc.httpHeader
+ }
+ repl := caddy.NewReplacer()
+ ctx := context.WithValue(req.Context(), caddy.ReplacerCtxKey, repl)
+ ctx = context.WithValue(ctx, VarsCtxKey, map[string]any{
+ "foo": "bar",
+ })
+ req = req.WithContext(ctx)
+ addHTTPVarsToReplacer(repl, req, httptest.NewRecorder())
+
+ if tc.clientCertificate != nil {
+ block, _ := pem.Decode(clientCert)
+ if block == nil {
+ t.Fatalf("failed to decode PEM certificate")
+ }
+
+ cert, err := x509.ParseCertificate(block.Bytes)
+ if err != nil {
+ t.Fatalf("failed to decode PEM certificate: %v", err)
+ }
+
+ req.TLS = &tls.ConnectionState{
+ PeerCertificates: []*x509.Certificate{cert},
+ }
+ }
+
+ matches, err := tc.expression.MatchWithError(req)
+ if err != nil {
+ t.Errorf("MatchExpression.Match() error = %v", err)
+ }
+ if matches != tc.wantResult {
+ t.Errorf("MatchExpression.Match() expected to return '%t', for expression : '%s'", tc.wantResult, tc.expression.Expr)
+ }
+ })
+ }
+}
+
+func BenchmarkMatchExpressionMatch(b *testing.B) {
+ for _, tst := range matcherTests {
+ tc := tst
+ if tc.wantErr {
+ continue
+ }
+ b.Run(tst.name, func(b *testing.B) {
+ tc.expression.Provision(caddy.Context{})
+ req := httptest.NewRequest(tc.httpMethod, tc.urlTarget, nil)
+ if tc.httpHeader != nil {
+ req.Header = *tc.httpHeader
+ }
+ repl := caddy.NewReplacer()
+ ctx := context.WithValue(req.Context(), caddy.ReplacerCtxKey, repl)
+ ctx = context.WithValue(ctx, VarsCtxKey, map[string]any{
+ "foo": "bar",
+ })
+ req = req.WithContext(ctx)
+ addHTTPVarsToReplacer(repl, req, httptest.NewRecorder())
+ if tc.clientCertificate != nil {
+ block, _ := pem.Decode(clientCert)
+ if block == nil {
+ b.Fatalf("failed to decode PEM certificate")
+ }
+
+ cert, err := x509.ParseCertificate(block.Bytes)
+ if err != nil {
+ b.Fatalf("failed to decode PEM certificate: %v", err)
+ }
+
+ req.TLS = &tls.ConnectionState{
+ PeerCertificates: []*x509.Certificate{cert},
+ }
+ }
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ tc.expression.MatchWithError(req)
+ }
+ })
+ }
+}
+
+func TestMatchExpressionProvision(t *testing.T) {
+ tests := []struct {
+ name string
+ expression *MatchExpression
+ wantErr bool
+ }{
+ {
+ name: "boolean matches succeed",
+ expression: &MatchExpression{
+ Expr: "{http.request.uri.query} != ''",
+ },
+ wantErr: false,
+ },
+ {
+ name: "reject expressions with non-boolean results",
+ expression: &MatchExpression{
+ Expr: "{http.request.uri.query}",
+ },
+ wantErr: true,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ ctx, cancel := caddy.NewContext(caddy.Context{Context: context.Background()})
+ defer cancel()
+ if err := tt.expression.Provision(ctx); (err != nil) != tt.wantErr {
+ t.Errorf("MatchExpression.Provision() error = %v, wantErr %v", err, tt.wantErr)
+ }
+ })
+ }
+}
diff --git a/modules/caddyhttp/encode/brotli/brotli_precompressed.go b/modules/caddyhttp/encode/brotli/brotli_precompressed.go
new file mode 100644
index 00000000..fbd04418
--- /dev/null
+++ b/modules/caddyhttp/encode/brotli/brotli_precompressed.go
@@ -0,0 +1,31 @@
+package caddybrotli
+
+import (
+ "github.com/caddyserver/caddy/v2"
+ "github.com/caddyserver/caddy/v2/modules/caddyhttp/encode"
+)
+
+func init() {
+ caddy.RegisterModule(BrotliPrecompressed{})
+}
+
+// BrotliPrecompressed provides the file extension for files precompressed with brotli encoding.
+type BrotliPrecompressed struct{}
+
+// CaddyModule returns the Caddy module information.
+func (BrotliPrecompressed) CaddyModule() caddy.ModuleInfo {
+ return caddy.ModuleInfo{
+ ID: "http.precompressed.br",
+ New: func() caddy.Module { return new(BrotliPrecompressed) },
+ }
+}
+
+// AcceptEncoding returns the name of the encoding as
+// used in the Accept-Encoding request headers.
+func (BrotliPrecompressed) AcceptEncoding() string { return "br" }
+
+// Suffix returns the filename suffix of precompressed files.
+func (BrotliPrecompressed) Suffix() string { return ".br" }
+
+// Interface guards
+var _ encode.Precompressed = (*BrotliPrecompressed)(nil)
diff --git a/modules/caddyhttp/encode/caddyfile.go b/modules/caddyhttp/encode/caddyfile.go
index 9d9646c0..8b865708 100644
--- a/modules/caddyhttp/encode/caddyfile.go
+++ b/modules/caddyhttp/encode/caddyfile.go
@@ -15,7 +15,7 @@
package encode
import (
- "fmt"
+ "strconv"
"github.com/caddyserver/caddy/v2"
"github.com/caddyserver/caddy/v2/caddyconfig"
@@ -39,54 +39,87 @@ func parseCaddyfile(h httpcaddyfile.Helper) (caddyhttp.MiddlewareHandler, error)
// UnmarshalCaddyfile sets up the handler from Caddyfile tokens. Syntax:
//
-// encode [] {
-// gzip []
-// zstd
-// }
+// encode [] {
+// gzip []
+// zstd
+// minimum_length
+// # response matcher block
+// match {
+// status
+// header []
+// }
+// # or response matcher single line syntax
+// match [header []] | [status ]
+// }
//
// Specifying the formats on the first line will use those formats' defaults.
func (enc *Encode) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
- for d.Next() {
- for _, arg := range d.RemainingArgs() {
- mod, err := caddy.GetModule("http.encoders." + arg)
- if err != nil {
- return fmt.Errorf("finding encoder module '%s': %v", mod, err)
- }
- encoding, ok := mod.New().(Encoding)
- if !ok {
- return fmt.Errorf("module %s is not an HTTP encoding", mod)
- }
- if enc.EncodingsRaw == nil {
- enc.EncodingsRaw = make(caddy.ModuleMap)
- }
- enc.EncodingsRaw[arg] = caddyconfig.JSON(encoding, nil)
- }
+ d.Next() // consume directive name
- for d.NextBlock(0) {
- name := d.Val()
- mod, err := caddy.GetModule("http.encoders." + name)
+ prefer := []string{}
+ remainingArgs := d.RemainingArgs()
+
+ responseMatchers := make(map[string]caddyhttp.ResponseMatcher)
+ for d.NextBlock(0) {
+ switch d.Val() {
+ case "minimum_length":
+ if !d.NextArg() {
+ return d.ArgErr()
+ }
+ minLength, err := strconv.Atoi(d.Val())
if err != nil {
- return fmt.Errorf("getting encoder module '%s': %v", name, err)
+ return err
}
- unm, ok := mod.New().(caddyfile.Unmarshaler)
- if !ok {
- return fmt.Errorf("encoder module '%s' is not a Caddyfile unmarshaler", mod)
+ enc.MinLength = minLength
+ case "match":
+ err := caddyhttp.ParseNamedResponseMatcher(d.NewFromNextSegment(), responseMatchers)
+ if err != nil {
+ return err
}
- err = unm.UnmarshalCaddyfile(d.NewFromNextSegment())
+ matcher := responseMatchers["match"]
+ enc.Matcher = &matcher
+ default:
+ name := d.Val()
+ modID := "http.encoders." + name
+ unm, err := caddyfile.UnmarshalModule(d, modID)
if err != nil {
return err
}
encoding, ok := unm.(Encoding)
if !ok {
- return fmt.Errorf("module %s is not an HTTP encoding", mod)
+ return d.Errf("module %s is not an HTTP encoding; is %T", modID, unm)
}
if enc.EncodingsRaw == nil {
enc.EncodingsRaw = make(caddy.ModuleMap)
}
enc.EncodingsRaw[name] = caddyconfig.JSON(encoding, nil)
+ prefer = append(prefer, name)
}
}
+ if len(prefer) == 0 && len(remainingArgs) == 0 {
+ remainingArgs = []string{"zstd", "gzip"}
+ }
+
+ for _, arg := range remainingArgs {
+ mod, err := caddy.GetModule("http.encoders." + arg)
+ if err != nil {
+ return d.Errf("finding encoder module '%s': %v", mod, err)
+ }
+ encoding, ok := mod.New().(Encoding)
+ if !ok {
+ return d.Errf("module %s is not an HTTP encoding", mod)
+ }
+ if enc.EncodingsRaw == nil {
+ enc.EncodingsRaw = make(caddy.ModuleMap)
+ }
+ enc.EncodingsRaw[arg] = caddyconfig.JSON(encoding, nil)
+ prefer = append(prefer, arg)
+ }
+
+ // use the order in which the encoders were defined.
+ enc.Prefer = prefer
+
return nil
}
diff --git a/modules/caddyhttp/encode/encode.go b/modules/caddyhttp/encode/encode.go
index 52205aa2..597772cc 100644
--- a/modules/caddyhttp/encode/encode.go
+++ b/modules/caddyhttp/encode/encode.go
@@ -20,10 +20,11 @@
package encode
import (
- "bytes"
"fmt"
"io"
+ "math"
"net/http"
+ "slices"
"sort"
"strconv"
"strings"
@@ -43,12 +44,16 @@ type Encode struct {
// will be chosen based on the client's Accept-Encoding header.
EncodingsRaw caddy.ModuleMap `json:"encodings,omitempty" caddy:"namespace=http.encoders"`
- // If the client has no strong preference, choose this encoding. TODO: Not yet implemented
- // Prefer []string `json:"prefer,omitempty"`
+ // If the client has no strong preference, choose these encodings in order.
+ Prefer []string `json:"prefer,omitempty"`
// Only encode responses that are at least this many bytes long.
MinLength int `json:"minimum_length,omitempty"`
+ // Only encode responses that match against this ResponseMmatcher.
+ // The default is a collection of text-based Content-Type headers.
+ Matcher *caddyhttp.ResponseMatcher `json:"match,omitempty"`
+
writerPools map[string]*sync.Pool // TODO: these pools do not get reused through config reloads...
}
@@ -66,7 +71,7 @@ func (enc *Encode) Provision(ctx caddy.Context) error {
if err != nil {
return fmt.Errorf("loading encoder modules: %v", err)
}
- for modName, modIface := range mods.(map[string]interface{}) {
+ for modName, modIface := range mods.(map[string]any) {
err = enc.addEncoding(modIface.(Encoding))
if err != nil {
return fmt.Errorf("adding encoding %s: %v", modName, err)
@@ -75,17 +80,101 @@ func (enc *Encode) Provision(ctx caddy.Context) error {
if enc.MinLength == 0 {
enc.MinLength = defaultMinLength
}
+
+ if enc.Matcher == nil {
+ // common text-based content types
+ // list based on https://developers.cloudflare.com/speed/optimization/content/brotli/content-compression/#compression-between-cloudflare-and-website-visitors
+ enc.Matcher = &caddyhttp.ResponseMatcher{
+ Headers: http.Header{
+ "Content-Type": []string{
+ "application/atom+xml*",
+ "application/eot*",
+ "application/font*",
+ "application/geo+json*",
+ "application/graphql+json*",
+ "application/javascript*",
+ "application/json*",
+ "application/ld+json*",
+ "application/manifest+json*",
+ "application/opentype*",
+ "application/otf*",
+ "application/rss+xml*",
+ "application/truetype*",
+ "application/ttf*",
+ "application/vnd.api+json*",
+ "application/vnd.ms-fontobject*",
+ "application/wasm*",
+ "application/x-httpd-cgi*",
+ "application/x-javascript*",
+ "application/x-opentype*",
+ "application/x-otf*",
+ "application/x-perl*",
+ "application/x-protobuf*",
+ "application/x-ttf*",
+ "application/xhtml+xml*",
+ "application/xml*",
+ "font/ttf*",
+ "font/otf*",
+ "image/svg+xml*",
+ "image/vnd.microsoft.icon*",
+ "image/x-icon*",
+ "multipart/bag*",
+ "multipart/mixed*",
+ "text/*",
+ },
+ },
+ }
+ }
+
return nil
}
-func (enc *Encode) ServeHTTP(w http.ResponseWriter, r *http.Request, next caddyhttp.Handler) error {
- for _, encName := range acceptedEncodings(r) {
+// Validate ensures that enc's configuration is valid.
+func (enc *Encode) Validate() error {
+ check := make(map[string]bool)
+ for _, encName := range enc.Prefer {
if _, ok := enc.writerPools[encName]; !ok {
- continue // encoding not offered
+ return fmt.Errorf("encoding %s not enabled", encName)
+ }
+
+ if _, ok := check[encName]; ok {
+ return fmt.Errorf("encoding %s is duplicated in prefer", encName)
+ }
+ check[encName] = true
+ }
+
+ return nil
+}
+
+func isEncodeAllowed(h http.Header) bool {
+ return !strings.Contains(h.Get("Cache-Control"), "no-transform")
+}
+
+func (enc *Encode) ServeHTTP(w http.ResponseWriter, r *http.Request, next caddyhttp.Handler) error {
+ if isEncodeAllowed(r.Header) {
+ for _, encName := range AcceptedEncodings(r, enc.Prefer) {
+ if _, ok := enc.writerPools[encName]; !ok {
+ continue // encoding not offered
+ }
+ w = enc.openResponseWriter(encName, w, r.Method == http.MethodConnect)
+ defer w.(*responseWriter).Close()
+
+ // to comply with RFC 9110 section 8.8.3(.3), we modify the Etag when encoding
+ // by appending a hyphen and the encoder name; the problem is, the client will
+ // send back that Etag in a If-None-Match header, but upstream handlers that set
+ // the Etag in the first place don't know that we appended to their Etag! so here
+ // we have to strip our addition so the upstream handlers can still honor client
+ // caches without knowing about our changes...
+ if etag := r.Header.Get("If-None-Match"); etag != "" && !strings.HasPrefix(etag, "W/") {
+ ourSuffix := "-" + encName + `"`
+ if strings.HasSuffix(etag, ourSuffix) {
+ etag = strings.TrimSuffix(etag, ourSuffix) + `"`
+ r.Header.Set("If-None-Match", etag)
+ }
+ }
+
+ break
}
- w = enc.openResponseWriter(encName, w)
- defer w.(*responseWriter).Close()
- break
}
return next.ServeHTTP(w, r)
}
@@ -102,7 +191,7 @@ func (enc *Encode) addEncoding(e Encoding) error {
enc.writerPools = make(map[string]*sync.Pool)
}
enc.writerPools[ae] = &sync.Pool{
- New: func() interface{} {
+ New: func() any {
return e.NewEncoder()
},
}
@@ -112,22 +201,22 @@ func (enc *Encode) addEncoding(e Encoding) error {
// openResponseWriter creates a new response writer that may (or may not)
// encode the response with encodingName. The returned response writer MUST
// be closed after the handler completes.
-func (enc *Encode) openResponseWriter(encodingName string, w http.ResponseWriter) *responseWriter {
+func (enc *Encode) openResponseWriter(encodingName string, w http.ResponseWriter, isConnect bool) *responseWriter {
var rw responseWriter
- return enc.initResponseWriter(&rw, encodingName, w)
+ return enc.initResponseWriter(&rw, encodingName, w, isConnect)
}
// initResponseWriter initializes the responseWriter instance
// allocated in openResponseWriter, enabling mid-stack inlining.
-func (enc *Encode) initResponseWriter(rw *responseWriter, encodingName string, wrappedRW http.ResponseWriter) *responseWriter {
- buf := bufPool.Get().(*bytes.Buffer)
- buf.Reset()
-
- // The allocation of ResponseWriterWrapper might be optimized as well.
- rw.ResponseWriterWrapper = &caddyhttp.ResponseWriterWrapper{ResponseWriter: wrappedRW}
+func (enc *Encode) initResponseWriter(rw *responseWriter, encodingName string, wrappedRW http.ResponseWriter, isConnect bool) *responseWriter {
+ if rww, ok := wrappedRW.(*caddyhttp.ResponseWriterWrapper); ok {
+ rw.ResponseWriter = rww
+ } else {
+ rw.ResponseWriter = &caddyhttp.ResponseWriterWrapper{ResponseWriter: wrappedRW}
+ }
rw.encodingName = encodingName
- rw.buf = buf
rw.config = enc
+ rw.isConnect = isConnect
return rw
}
@@ -136,39 +225,106 @@ func (enc *Encode) initResponseWriter(rw *responseWriter, encodingName string, w
// using the encoding represented by encodingName and
// configured by config.
type responseWriter struct {
- *caddyhttp.ResponseWriterWrapper
+ http.ResponseWriter
encodingName string
w Encoder
- buf *bytes.Buffer
config *Encode
statusCode int
+ wroteHeader bool
+ isConnect bool
}
// WriteHeader stores the status to write when the time comes
// to actually write the header.
func (rw *responseWriter) WriteHeader(status int) {
rw.statusCode = status
+
+ // See #5849 and RFC 9110 section 15.4.5 (https://www.rfc-editor.org/rfc/rfc9110.html#section-15.4.5) - 304
+ // Not Modified must have certain headers set as if it was a 200 response, and according to the issue
+ // we would miss the Vary header in this case when compression was also enabled; note that we set this
+ // header in the responseWriter.init() method but that is only called if we are writing a response body
+ if status == http.StatusNotModified && !hasVaryValue(rw.Header(), "Accept-Encoding") {
+ rw.Header().Add("Vary", "Accept-Encoding")
+ }
+
+ // write status immediately if status is 2xx and the request is CONNECT
+ // since it means the response is successful.
+ // see: https://github.com/caddyserver/caddy/issues/6733#issuecomment-2525058845
+ if rw.isConnect && 200 <= status && status <= 299 {
+ rw.ResponseWriter.WriteHeader(status)
+ rw.wroteHeader = true
+ }
+
+ // write status immediately when status code is informational
+ // see: https://caddy.community/t/disappear-103-early-hints-response-with-encode-enable-caddy-v2-7-6/23081/5
+ if 100 <= status && status <= 199 {
+ rw.ResponseWriter.WriteHeader(status)
+ }
+}
+
+// Match determines, if encoding should be done based on the ResponseMatcher.
+func (enc *Encode) Match(rw *responseWriter) bool {
+ return enc.Matcher.Match(rw.statusCode, rw.Header())
+}
+
+// FlushError is an alternative Flush returning an error. It delays the actual Flush of the underlying
+// ResponseWriterWrapper until headers were written.
+func (rw *responseWriter) FlushError() error {
+ // WriteHeader wasn't called and is a CONNECT request, treat it as a success.
+ // otherwise, wait until header is written.
+ if rw.isConnect && !rw.wroteHeader && rw.statusCode == 0 {
+ rw.WriteHeader(http.StatusOK)
+ }
+
+ if !rw.wroteHeader {
+ // flushing the underlying ResponseWriter will write header and status code,
+ // but we need to delay that until we can determine if we must encode and
+ // therefore add the Content-Encoding header; this happens in the first call
+ // to rw.Write (see bug in #4314)
+ return nil
+ }
+ // also flushes the encoder, if any
+ // see: https://github.com/jjiang-stripe/caddy-slow-gzip
+ if rw.w != nil {
+ err := rw.w.Flush()
+ if err != nil {
+ return err
+ }
+ }
+ //nolint:bodyclose
+ return http.NewResponseController(rw.ResponseWriter).Flush()
}
// Write writes to the response. If the response qualifies,
// it is encoded using the encoder, which is initialized
// if not done so already.
func (rw *responseWriter) Write(p []byte) (int, error) {
- var n, written int
- var err error
+ // ignore zero data writes, probably head request
+ if len(p) == 0 {
+ return 0, nil
+ }
- if rw.buf != nil && rw.config.MinLength > 0 {
- written = rw.buf.Len()
- _, err := rw.buf.Write(p)
- if err != nil {
- return 0, err
+ // WriteHeader wasn't called and is a CONNECT request, treat it as a success.
+ // otherwise, determine if the response should be compressed.
+ if rw.isConnect && !rw.wroteHeader && rw.statusCode == 0 {
+ rw.WriteHeader(http.StatusOK)
+ }
+
+ // sniff content-type and determine content-length
+ if !rw.wroteHeader && rw.config.MinLength > 0 {
+ var gtMinLength bool
+ if len(p) > rw.config.MinLength {
+ gtMinLength = true
+ } else if cl, err := strconv.Atoi(rw.Header().Get("Content-Length")); err == nil && cl > rw.config.MinLength {
+ gtMinLength = true
+ }
+
+ if gtMinLength {
+ if rw.Header().Get("Content-Type") == "" {
+ rw.Header().Set("Content-Type", http.DetectContentType(p))
+ }
+ rw.init()
}
- rw.init()
- p = rw.buf.Bytes()
- defer func() {
- bufPool.Put(rw.buf)
- rw.buf = nil
- }()
}
// before we write to the response, we need to make
@@ -177,92 +333,107 @@ func (rw *responseWriter) Write(p []byte) (int, error) {
// and if so, that means we haven't written the
// header OR the default status code will be written
// by the standard library
- if rw.statusCode > 0 {
- rw.ResponseWriter.WriteHeader(rw.statusCode)
- rw.statusCode = 0
+ if !rw.wroteHeader {
+ if rw.statusCode != 0 {
+ rw.ResponseWriter.WriteHeader(rw.statusCode)
+ }
+ rw.wroteHeader = true
}
- switch {
- case rw.w != nil:
- n, err = rw.w.Write(p)
- default:
- n, err = rw.ResponseWriter.Write(p)
+ if rw.w != nil {
+ return rw.w.Write(p)
+ } else {
+ return rw.ResponseWriter.Write(p)
}
- n -= written
- if n < 0 {
- n = 0
- }
- return n, err
}
// Close writes any remaining buffered response and
// deallocates any active resources.
func (rw *responseWriter) Close() error {
- var err error
- // only attempt to write the remaining buffered response
- // if there are any bytes left to write; otherwise, if
- // the handler above us returned an error without writing
- // anything, we'd write to the response when we instead
- // should simply let the error propagate back down; this
- // is why the check for rw.buf.Len() > 0 is crucial
- if rw.buf != nil && rw.buf.Len() > 0 {
- rw.init()
- p := rw.buf.Bytes()
- defer func() {
- bufPool.Put(rw.buf)
- rw.buf = nil
- }()
- switch {
- case rw.w != nil:
- _, err = rw.w.Write(p)
- default:
- _, err = rw.ResponseWriter.Write(p)
+ // didn't write, probably head request
+ if !rw.wroteHeader {
+ cl, err := strconv.Atoi(rw.Header().Get("Content-Length"))
+ if err == nil && cl > rw.config.MinLength {
+ rw.init()
}
- } else if rw.statusCode != 0 {
- // it is possible that a body was not written, and
- // a header was not even written yet, even though
- // we are closing; ensure the proper status code is
- // written exactly once, or we risk breaking requests
- // that rely on If-None-Match, for example
- rw.ResponseWriter.WriteHeader(rw.statusCode)
- rw.statusCode = 0
+
+ // issue #5059, don't write status code if not set explicitly.
+ if rw.statusCode != 0 {
+ rw.ResponseWriter.WriteHeader(rw.statusCode)
+ }
+ rw.wroteHeader = true
}
+
+ var err error
if rw.w != nil {
- err2 := rw.w.Close()
- if err2 != nil && err == nil {
- err = err2
- }
+ err = rw.w.Close()
+ rw.w.Reset(nil)
rw.config.writerPools[rw.encodingName].Put(rw.w)
rw.w = nil
}
return err
}
-// init should be called before we write a response, if rw.buf has contents.
-func (rw *responseWriter) init() {
- if rw.Header().Get("Content-Encoding") == "" && rw.buf.Len() >= rw.config.MinLength {
- rw.w = rw.config.writerPools[rw.encodingName].Get().(Encoder)
- rw.w.Reset(rw.ResponseWriter)
- rw.Header().Del("Content-Length") // https://github.com/golang/go/issues/14975
- rw.Header().Set("Content-Encoding", rw.encodingName)
- rw.Header().Add("Vary", "Accept-Encoding")
- }
- rw.Header().Del("Accept-Ranges") // we don't know ranges for dynamically-encoded content
+// Unwrap returns the underlying ResponseWriter.
+func (rw *responseWriter) Unwrap() http.ResponseWriter {
+ return rw.ResponseWriter
}
-// acceptedEncodings returns the list of encodings that the
-// client supports, in descending order of preference. If
+// init should be called before we write a response, if rw.buf has contents.
+func (rw *responseWriter) init() {
+ hdr := rw.Header()
+ if hdr.Get("Content-Encoding") == "" && isEncodeAllowed(hdr) &&
+ rw.config.Match(rw) {
+ rw.w = rw.config.writerPools[rw.encodingName].Get().(Encoder)
+ rw.w.Reset(rw.ResponseWriter)
+ hdr.Del("Content-Length") // https://github.com/golang/go/issues/14975
+ hdr.Set("Content-Encoding", rw.encodingName)
+ if !hasVaryValue(hdr, "Accept-Encoding") {
+ hdr.Add("Vary", "Accept-Encoding")
+ }
+ hdr.Del("Accept-Ranges") // we don't know ranges for dynamically-encoded content
+
+ // strong ETags need to be distinct depending on the encoding ("selected representation")
+ // see RFC 9110 section 8.8.3.3:
+ // https://www.rfc-editor.org/rfc/rfc9110.html#name-example-entity-tags-varying
+ // I don't know a great way to do this... how about appending? That's a neat trick!
+ // (We have to strip the value we append from If-None-Match headers before
+ // sending subsequent requests back upstream, however, since upstream handlers
+ // don't know about our appending to their Etag since they've already done their work)
+ if etag := hdr.Get("Etag"); etag != "" && !strings.HasPrefix(etag, "W/") {
+ etag = fmt.Sprintf(`%s-%s"`, strings.TrimSuffix(etag, `"`), rw.encodingName)
+ hdr.Set("Etag", etag)
+ }
+ }
+}
+
+func hasVaryValue(hdr http.Header, target string) bool {
+ for _, vary := range hdr.Values("Vary") {
+ vals := strings.Split(vary, ",")
+ for _, val := range vals {
+ if strings.EqualFold(strings.TrimSpace(val), target) {
+ return true
+ }
+ }
+ }
+ return false
+}
+
+// AcceptedEncodings returns the list of encodings that the
+// client supports, in descending order of preference.
+// The client preference via q-factor and the server
+// preference via Prefer setting are taken into account. If
// the Sec-WebSocket-Key header is present then non-identity
// encodings are not considered. See
// http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html.
-func acceptedEncodings(r *http.Request) []string {
+func AcceptedEncodings(r *http.Request, preferredOrder []string) []string {
acceptEncHeader := r.Header.Get("Accept-Encoding")
websocketKey := r.Header.Get("Sec-WebSocket-Key")
if acceptEncHeader == "" {
return []string{}
}
- var prefs []encodingPreference
+ prefs := []encodingPreference{}
for _, accepted := range strings.Split(acceptEncHeader, ",") {
parts := strings.Split(accepted, ";")
@@ -292,18 +463,26 @@ func acceptedEncodings(r *http.Request) []string {
continue
}
+ // set server preference
+ prefOrder := slices.Index(preferredOrder, encName)
+ if prefOrder > -1 {
+ prefOrder = len(preferredOrder) - prefOrder
+ }
+
prefs = append(prefs, encodingPreference{
- encoding: encName,
- q: qFactor,
+ encoding: encName,
+ q: qFactor,
+ preferOrder: prefOrder,
})
}
- // sort preferences by descending q-factor
- sort.Slice(prefs, func(i, j int) bool { return prefs[i].q > prefs[j].q })
-
- // TODO: If no preference, or same pref for all encodings,
- // and not websocket, use default encoding ordering (enc.Prefer)
- // for those which are accepted by the client
+ // sort preferences by descending q-factor first, then by preferOrder
+ sort.Slice(prefs, func(i, j int) bool {
+ if math.Abs(prefs[i].q-prefs[j].q) < 0.00001 {
+ return prefs[i].preferOrder > prefs[j].preferOrder
+ }
+ return prefs[i].q > prefs[j].q
+ })
prefEncNames := make([]string, len(prefs))
for i := range prefs {
@@ -315,14 +494,16 @@ func acceptedEncodings(r *http.Request) []string {
// encodingPreference pairs an encoding with its q-factor.
type encodingPreference struct {
- encoding string
- q float64
+ encoding string
+ q float64
+ preferOrder int
}
// Encoder is a type which can encode a stream of data.
type Encoder interface {
io.WriteCloser
Reset(io.Writer)
+ Flush() error // encoder by default buffers data to maximize compressing rate
}
// Encoding is a type which can create encoders of its kind
@@ -332,10 +513,11 @@ type Encoding interface {
NewEncoder() Encoder
}
-var bufPool = sync.Pool{
- New: func() interface{} {
- return new(bytes.Buffer)
- },
+// Precompressed is a type which returns filename suffix of precompressed
+// file and Accept-Encoding header to use when serving this file.
+type Precompressed interface {
+ AcceptEncoding() string
+ Suffix() string
}
// defaultMinLength is the minimum length at which to compress content.
@@ -344,6 +526,6 @@ const defaultMinLength = 512
// Interface guards
var (
_ caddy.Provisioner = (*Encode)(nil)
+ _ caddy.Validator = (*Encode)(nil)
_ caddyhttp.MiddlewareHandler = (*Encode)(nil)
- _ caddyhttp.HTTPInterfaces = (*responseWriter)(nil)
)
diff --git a/modules/caddyhttp/encode/encode_test.go b/modules/caddyhttp/encode/encode_test.go
index 79eae3c5..83effa58 100644
--- a/modules/caddyhttp/encode/encode_test.go
+++ b/modules/caddyhttp/encode/encode_test.go
@@ -1,12 +1,308 @@
package encode
import (
+ "net/http"
+ "sync"
"testing"
)
func BenchmarkOpenResponseWriter(b *testing.B) {
enc := new(Encode)
for n := 0; n < b.N; n++ {
- enc.openResponseWriter("test", nil)
+ enc.openResponseWriter("test", nil, false)
+ }
+}
+
+func TestPreferOrder(t *testing.T) {
+ testCases := []struct {
+ name string
+ accept string
+ prefer []string
+ expected []string
+ }{
+ {
+ name: "PreferOrder(): 4 accept, 3 prefer",
+ accept: "deflate, gzip, br, zstd",
+ prefer: []string{"zstd", "br", "gzip"},
+ expected: []string{"zstd", "br", "gzip", "deflate"},
+ },
+ {
+ name: "PreferOrder(): 2 accept, 3 prefer",
+ accept: "deflate, zstd",
+ prefer: []string{"zstd", "br", "gzip"},
+ expected: []string{"zstd", "deflate"},
+ },
+ {
+ name: "PreferOrder(): 2 accept (1 empty), 3 prefer",
+ accept: "gzip,,zstd",
+ prefer: []string{"zstd", "br", "gzip"},
+ expected: []string{"zstd", "gzip", ""},
+ },
+ {
+ name: "PreferOrder(): 1 accept, 2 prefer",
+ accept: "gzip",
+ prefer: []string{"zstd", "gzip"},
+ expected: []string{"gzip"},
+ },
+ {
+ name: "PreferOrder(): 4 accept (1 duplicate), 1 prefer",
+ accept: "deflate, gzip, br, br",
+ prefer: []string{"br"},
+ expected: []string{"br", "br", "deflate", "gzip"},
+ },
+ {
+ name: "PreferOrder(): empty accept, 0 prefer",
+ accept: "",
+ prefer: []string{},
+ expected: []string{},
+ },
+ {
+ name: "PreferOrder(): empty accept, 1 prefer",
+ accept: "",
+ prefer: []string{"gzip"},
+ expected: []string{},
+ },
+ {
+ name: "PreferOrder(): with q-factor",
+ accept: "deflate;q=0.8, gzip;q=0.4, br;q=0.2, zstd",
+ prefer: []string{"gzip"},
+ expected: []string{"zstd", "deflate", "gzip", "br"},
+ },
+ {
+ name: "PreferOrder(): with q-factor, no prefer",
+ accept: "deflate;q=0.8, gzip;q=0.4, br;q=0.2, zstd",
+ prefer: []string{},
+ expected: []string{"zstd", "deflate", "gzip", "br"},
+ },
+ {
+ name: "PreferOrder(): q-factor=0 filtered out",
+ accept: "deflate;q=0.1, gzip;q=0.4, br;q=0.5, zstd;q=0",
+ prefer: []string{"gzip"},
+ expected: []string{"br", "gzip", "deflate"},
+ },
+ {
+ name: "PreferOrder(): q-factor=0 filtered out, no prefer",
+ accept: "deflate;q=0.1, gzip;q=0.4, br;q=0.5, zstd;q=0",
+ prefer: []string{},
+ expected: []string{"br", "gzip", "deflate"},
+ },
+ {
+ name: "PreferOrder(): with invalid q-factor",
+ accept: "br, deflate, gzip;q=2, zstd;q=0.1",
+ prefer: []string{"zstd", "gzip"},
+ expected: []string{"gzip", "br", "deflate", "zstd"},
+ },
+ {
+ name: "PreferOrder(): with invalid q-factor, no prefer",
+ accept: "br, deflate, gzip;q=2, zstd;q=0.1",
+ prefer: []string{},
+ expected: []string{"br", "deflate", "gzip", "zstd"},
+ },
+ }
+
+ enc := new(Encode)
+ r, _ := http.NewRequest("", "", nil)
+
+ for _, test := range testCases {
+ t.Run(test.name, func(t *testing.T) {
+ if test.accept == "" {
+ r.Header.Del("Accept-Encoding")
+ } else {
+ r.Header.Set("Accept-Encoding", test.accept)
+ }
+ enc.Prefer = test.prefer
+ result := AcceptedEncodings(r, enc.Prefer)
+ if !sliceEqual(result, test.expected) {
+ t.Errorf("AcceptedEncodings() actual: %s expected: %s",
+ result,
+ test.expected)
+ }
+ })
+ }
+}
+
+func sliceEqual(a, b []string) bool {
+ if len(a) != len(b) {
+ return false
+ }
+ for i := range a {
+ if a[i] != b[i] {
+ return false
+ }
+ }
+ return true
+}
+
+func TestValidate(t *testing.T) {
+ type testCase struct {
+ name string
+ prefer []string
+ wantErr bool
+ }
+
+ var err error
+ var testCases []testCase
+ enc := new(Encode)
+
+ enc.writerPools = map[string]*sync.Pool{
+ "zstd": nil,
+ "gzip": nil,
+ "br": nil,
+ }
+ testCases = []testCase{
+ {
+ name: "ValidatePrefer (zstd, gzip & br enabled): valid order with all encoder",
+ prefer: []string{"zstd", "br", "gzip"},
+ wantErr: false,
+ },
+ {
+ name: "ValidatePrefer (zstd, gzip & br enabled): valid order with 2 out of 3 encoders",
+ prefer: []string{"br", "gzip"},
+ wantErr: false,
+ },
+ {
+ name: "ValidatePrefer (zstd, gzip & br enabled): valid order with 1 out of 3 encoders",
+ prefer: []string{"gzip"},
+ wantErr: false,
+ },
+ {
+ name: "ValidatePrefer (zstd, gzip & br enabled): 1 duplicated (once) encoder",
+ prefer: []string{"gzip", "zstd", "gzip"},
+ wantErr: true,
+ },
+ {
+ name: "ValidatePrefer (zstd, gzip & br enabled): 1 not enabled encoder in prefer list",
+ prefer: []string{"br", "zstd", "gzip", "deflate"},
+ wantErr: true,
+ },
+ {
+ name: "ValidatePrefer (zstd, gzip & br enabled): no prefer list",
+ prefer: []string{},
+ wantErr: false,
+ },
+ }
+
+ for _, test := range testCases {
+ t.Run(test.name, func(t *testing.T) {
+ enc.Prefer = test.prefer
+ err = enc.Validate()
+ if (err != nil) != test.wantErr {
+ t.Errorf("Validate() error = %v, wantErr = %v", err, test.wantErr)
+ }
+ })
+ }
+
+ enc.writerPools = map[string]*sync.Pool{
+ "zstd": nil,
+ "gzip": nil,
+ }
+ testCases = []testCase{
+ {
+ name: "ValidatePrefer (zstd & gzip enabled): 1 not enabled encoder in prefer list",
+ prefer: []string{"zstd", "br", "gzip"},
+ wantErr: true,
+ },
+ {
+ name: "ValidatePrefer (zstd & gzip enabled): 2 not enabled encoder in prefer list",
+ prefer: []string{"br", "zstd", "gzip", "deflate"},
+ wantErr: true,
+ },
+ {
+ name: "ValidatePrefer (zstd & gzip enabled): only not enabled encoder in prefer list",
+ prefer: []string{"deflate", "br", "gzip"},
+ wantErr: true,
+ },
+ {
+ name: "ValidatePrefer (zstd & gzip enabled): 1 duplicated (once) encoder in prefer list",
+ prefer: []string{"gzip", "zstd", "gzip"},
+ wantErr: true,
+ },
+ {
+ name: "ValidatePrefer (zstd & gzip enabled): 1 duplicated (twice) encoder in prefer list",
+ prefer: []string{"gzip", "zstd", "gzip", "gzip"},
+ wantErr: true,
+ },
+ {
+ name: "ValidatePrefer (zstd & gzip enabled): 1 duplicated encoder in prefer list",
+ prefer: []string{"zstd", "zstd", "gzip", "gzip"},
+ wantErr: true,
+ },
+ {
+ name: "ValidatePrefer (zstd & gzip enabled): 1 duplicated not enabled encoder in prefer list",
+ prefer: []string{"br", "br", "gzip"},
+ wantErr: true,
+ },
+ {
+ name: "ValidatePrefer (zstd & gzip enabled): 2 duplicated not enabled encoder in prefer list",
+ prefer: []string{"br", "deflate", "br", "deflate"},
+ wantErr: true,
+ },
+ {
+ name: "ValidatePrefer (zstd & gzip enabled): valid order zstd first",
+ prefer: []string{"zstd", "gzip"},
+ wantErr: false,
+ },
+ {
+ name: "ValidatePrefer (zstd & gzip enabled): valid order gzip first",
+ prefer: []string{"gzip", "zstd"},
+ wantErr: false,
+ },
+ }
+
+ for _, test := range testCases {
+ t.Run(test.name, func(t *testing.T) {
+ enc.Prefer = test.prefer
+ err = enc.Validate()
+ if (err != nil) != test.wantErr {
+ t.Errorf("Validate() error = %v, wantErr = %v", err, test.wantErr)
+ }
+ })
+ }
+}
+
+func TestIsEncodeAllowed(t *testing.T) {
+ testCases := []struct {
+ name string
+ headers http.Header
+ expected bool
+ }{
+ {
+ name: "Without any headers",
+ headers: http.Header{},
+ expected: true,
+ },
+ {
+ name: "Without Cache-Control HTTP header",
+ headers: http.Header{
+ "Accept-Encoding": {"gzip"},
+ },
+ expected: true,
+ },
+ {
+ name: "Cache-Control HTTP header ending with no-transform directive",
+ headers: http.Header{
+ "Accept-Encoding": {"gzip"},
+ "Cache-Control": {"no-cache; no-transform"},
+ },
+ expected: false,
+ },
+ {
+ name: "With Cache-Control HTTP header no-transform as Cache-Extension value",
+ headers: http.Header{
+ "Accept-Encoding": {"gzip"},
+ "Cache-Control": {`no-store; no-cache; community="no-transform"`},
+ },
+ expected: false,
+ },
+ }
+
+ for _, test := range testCases {
+ t.Run(test.name, func(t *testing.T) {
+ if result := isEncodeAllowed(test.headers); result != test.expected {
+ t.Errorf("The headers given to the isEncodeAllowed should return %t, %t given.",
+ result,
+ test.expected)
+ }
+ })
}
}
diff --git a/modules/caddyhttp/encode/gzip/gzip.go b/modules/caddyhttp/encode/gzip/gzip.go
index 8215b3fb..40f37ab8 100644
--- a/modules/caddyhttp/encode/gzip/gzip.go
+++ b/modules/caddyhttp/encode/gzip/gzip.go
@@ -15,14 +15,14 @@
package caddygzip
import (
- "compress/flate"
"fmt"
"strconv"
+ "github.com/klauspost/compress/gzip"
+
"github.com/caddyserver/caddy/v2"
"github.com/caddyserver/caddy/v2/caddyconfig/caddyfile"
"github.com/caddyserver/caddy/v2/modules/caddyhttp/encode"
- "github.com/klauspost/compress/gzip"
)
func init() {
@@ -44,17 +44,16 @@ func (Gzip) CaddyModule() caddy.ModuleInfo {
// UnmarshalCaddyfile sets up the handler from Caddyfile tokens.
func (g *Gzip) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
- for d.Next() {
- if !d.NextArg() {
- continue
- }
- levelStr := d.Val()
- level, err := strconv.Atoi(levelStr)
- if err != nil {
- return err
- }
- g.Level = level
+ d.Next() // consume option name
+ if !d.NextArg() {
+ return nil
}
+ levelStr := d.Val()
+ level, err := strconv.Atoi(levelStr)
+ if err != nil {
+ return err
+ }
+ g.Level = level
return nil
}
@@ -68,11 +67,11 @@ func (g *Gzip) Provision(ctx caddy.Context) error {
// Validate validates g's configuration.
func (g Gzip) Validate() error {
- if g.Level < flate.NoCompression {
- return fmt.Errorf("quality too low; must be >= %d", flate.NoCompression)
+ if g.Level < gzip.StatelessCompression {
+ return fmt.Errorf("quality too low; must be >= %d", gzip.StatelessCompression)
}
- if g.Level > flate.BestCompression {
- return fmt.Errorf("quality too high; must be <= %d", flate.BestCompression)
+ if g.Level > gzip.BestCompression {
+ return fmt.Errorf("quality too high; must be <= %d", gzip.BestCompression)
}
return nil
}
diff --git a/modules/caddyhttp/encode/gzip/gzip_precompressed.go b/modules/caddyhttp/encode/gzip/gzip_precompressed.go
new file mode 100644
index 00000000..7103cc8d
--- /dev/null
+++ b/modules/caddyhttp/encode/gzip/gzip_precompressed.go
@@ -0,0 +1,28 @@
+package caddygzip
+
+import (
+ "github.com/caddyserver/caddy/v2"
+ "github.com/caddyserver/caddy/v2/modules/caddyhttp/encode"
+)
+
+func init() {
+ caddy.RegisterModule(GzipPrecompressed{})
+}
+
+// GzipPrecompressed provides the file extension for files precompressed with gzip encoding.
+type GzipPrecompressed struct {
+ Gzip
+}
+
+// CaddyModule returns the Caddy module information.
+func (GzipPrecompressed) CaddyModule() caddy.ModuleInfo {
+ return caddy.ModuleInfo{
+ ID: "http.precompressed.gzip",
+ New: func() caddy.Module { return new(GzipPrecompressed) },
+ }
+}
+
+// Suffix returns the filename suffix of precompressed files.
+func (GzipPrecompressed) Suffix() string { return ".gz" }
+
+var _ encode.Precompressed = (*GzipPrecompressed)(nil)
diff --git a/modules/caddyhttp/encode/zstd/zstd.go b/modules/caddyhttp/encode/zstd/zstd.go
index 5182fc4e..1706de89 100644
--- a/modules/caddyhttp/encode/zstd/zstd.go
+++ b/modules/caddyhttp/encode/zstd/zstd.go
@@ -15,10 +15,13 @@
package caddyzstd
import (
+ "fmt"
+
+ "github.com/klauspost/compress/zstd"
+
"github.com/caddyserver/caddy/v2"
"github.com/caddyserver/caddy/v2/caddyconfig/caddyfile"
"github.com/caddyserver/caddy/v2/modules/caddyhttp/encode"
- "github.com/klauspost/compress/zstd"
)
func init() {
@@ -26,7 +29,13 @@ func init() {
}
// Zstd can create Zstandard encoders.
-type Zstd struct{}
+type Zstd struct {
+ // The compression level. Accepted values: fastest, better, best, default.
+ Level string `json:"level,omitempty"`
+
+ // Compression level refer to type constants value from zstd.SpeedFastest to zstd.SpeedBestCompression
+ level zstd.EncoderLevel
+}
// CaddyModule returns the Caddy module information.
func (Zstd) CaddyModule() caddy.ModuleInfo {
@@ -38,6 +47,37 @@ func (Zstd) CaddyModule() caddy.ModuleInfo {
// UnmarshalCaddyfile sets up the handler from Caddyfile tokens.
func (z *Zstd) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
+ d.Next() // consume option name
+ if !d.NextArg() {
+ return nil
+ }
+ levelStr := d.Val()
+ if ok, _ := zstd.EncoderLevelFromString(levelStr); !ok {
+ return d.Errf("unexpected compression level, use one of '%s', '%s', '%s', '%s'",
+ zstd.SpeedFastest,
+ zstd.SpeedBetterCompression,
+ zstd.SpeedBestCompression,
+ zstd.SpeedDefault,
+ )
+ }
+ z.Level = levelStr
+ return nil
+}
+
+// Provision provisions z's configuration.
+func (z *Zstd) Provision(ctx caddy.Context) error {
+ if z.Level == "" {
+ z.Level = zstd.SpeedDefault.String()
+ }
+ var ok bool
+ if ok, z.level = zstd.EncoderLevelFromString(z.Level); !ok {
+ return fmt.Errorf("unexpected compression level, use one of '%s', '%s', '%s', '%s'",
+ zstd.SpeedFastest,
+ zstd.SpeedDefault,
+ zstd.SpeedBetterCompression,
+ zstd.SpeedBestCompression,
+ )
+ }
return nil
}
@@ -45,9 +85,18 @@ func (z *Zstd) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
// used in the Accept-Encoding request headers.
func (Zstd) AcceptEncoding() string { return "zstd" }
-// NewEncoder returns a new gzip writer.
+// NewEncoder returns a new Zstandard writer.
func (z Zstd) NewEncoder() encode.Encoder {
- writer, _ := zstd.NewWriter(nil)
+ // The default of 8MB for the window is
+ // too large for many clients, so we limit
+ // it to 128K to lighten their load.
+ writer, _ := zstd.NewWriter(
+ nil,
+ zstd.WithWindowSize(128<<10),
+ zstd.WithEncoderConcurrency(1),
+ zstd.WithZeroFrames(true),
+ zstd.WithEncoderLevel(z.level),
+ )
return writer
}
@@ -55,4 +104,5 @@ func (z Zstd) NewEncoder() encode.Encoder {
var (
_ encode.Encoding = (*Zstd)(nil)
_ caddyfile.Unmarshaler = (*Zstd)(nil)
+ _ caddy.Provisioner = (*Zstd)(nil)
)
diff --git a/modules/caddyhttp/encode/zstd/zstd_precompressed.go b/modules/caddyhttp/encode/zstd/zstd_precompressed.go
new file mode 100644
index 00000000..522f4173
--- /dev/null
+++ b/modules/caddyhttp/encode/zstd/zstd_precompressed.go
@@ -0,0 +1,28 @@
+package caddyzstd
+
+import (
+ "github.com/caddyserver/caddy/v2"
+ "github.com/caddyserver/caddy/v2/modules/caddyhttp/encode"
+)
+
+func init() {
+ caddy.RegisterModule(ZstdPrecompressed{})
+}
+
+// ZstdPrecompressed provides the file extension for files precompressed with zstandard encoding.
+type ZstdPrecompressed struct {
+ Zstd
+}
+
+// CaddyModule returns the Caddy module information.
+func (ZstdPrecompressed) CaddyModule() caddy.ModuleInfo {
+ return caddy.ModuleInfo{
+ ID: "http.precompressed.zstd",
+ New: func() caddy.Module { return new(ZstdPrecompressed) },
+ }
+}
+
+// Suffix returns the filename suffix of precompressed files.
+func (ZstdPrecompressed) Suffix() string { return ".zst" }
+
+var _ encode.Precompressed = (*ZstdPrecompressed)(nil)
diff --git a/modules/caddyhttp/errors.go b/modules/caddyhttp/errors.go
index 05930636..fc8ffbfa 100644
--- a/modules/caddyhttp/errors.go
+++ b/modules/caddyhttp/errors.go
@@ -15,8 +15,9 @@
package caddyhttp
import (
+ "errors"
"fmt"
- mathrand "math/rand"
+ weakrand "math/rand"
"path"
"runtime"
"strings"
@@ -30,7 +31,8 @@ import (
// set will be populated.
func Error(statusCode int, err error) HandlerError {
const idLen = 9
- if he, ok := err.(HandlerError); ok {
+ var he HandlerError
+ if errors.As(err, &he) {
if he.ID == "" {
he.ID = randString(idLen, true)
}
@@ -77,6 +79,9 @@ func (e HandlerError) Error() string {
return strings.TrimSpace(s)
}
+// Unwrap returns the underlying error value. See the `errors` package for info.
+func (e HandlerError) Unwrap() error { return e.Err }
+
// randString returns a string of n random characters.
// It is not even remotely secure OR a proper distribution.
// But it's good enough for some things. It excludes certain
@@ -92,7 +97,8 @@ func randString(n int, sameCase bool) string {
}
b := make([]byte, n)
for i := range b {
- b[i] = dict[mathrand.Int63()%int64(len(dict))]
+ //nolint:gosec
+ b[i] = dict[weakrand.Int63()%int64(len(dict))]
}
return string(b)
}
diff --git a/modules/caddyhttp/fileserver/browse.go b/modules/caddyhttp/fileserver/browse.go
index e5e137fc..0a623c79 100644
--- a/modules/caddyhttp/fileserver/browse.go
+++ b/modules/caddyhttp/fileserver/browse.go
@@ -16,37 +16,98 @@ package fileserver
import (
"bytes"
+ "context"
+ _ "embed"
"encoding/json"
- "html/template"
+ "errors"
+ "fmt"
+ "io"
+ "io/fs"
"net/http"
"os"
"path"
"strings"
+ "sync"
+ "text/tabwriter"
+ "text/template"
+ "time"
+
+ "go.uber.org/zap"
+ "go.uber.org/zap/zapcore"
"github.com/caddyserver/caddy/v2"
"github.com/caddyserver/caddy/v2/modules/caddyhttp"
+ "github.com/caddyserver/caddy/v2/modules/caddyhttp/templates"
)
+// BrowseTemplate is the default template document to use for
+// file listings. By default, its default value is an embedded
+// document. You can override this value at program start, or
+// if you are running Caddy via config, you can specify a
+// custom template_file in the browse configuration.
+//
+//go:embed browse.html
+var BrowseTemplate string
+
// Browse configures directory browsing.
type Browse struct {
- // Use this template file instead of the default browse template.
+ // Filename of the template to use instead of the embedded browse template.
TemplateFile string `json:"template_file,omitempty"`
- template *template.Template
+ // Determines whether or not targets of symlinks should be revealed.
+ RevealSymlinks bool `json:"reveal_symlinks,omitempty"`
+
+ // Override the default sort.
+ // It includes the following options:
+ // - sort_by: name(default), namedirfirst, size, time
+ // - order: asc(default), desc
+ // eg.:
+ // - `sort time desc` will sort by time in descending order
+ // - `sort size` will sort by size in ascending order
+ // The first option must be `sort_by` and the second option must be `order` (if exists).
+ SortOptions []string `json:"sort,omitempty"`
+
+ // FileLimit limits the number of up to n DirEntry values in directory order.
+ FileLimit int `json:"file_limit,omitempty"`
}
-func (fsrv *FileServer) serveBrowse(dirPath string, w http.ResponseWriter, r *http.Request, next caddyhttp.Handler) error {
- // navigation on the client-side gets messed up if the
- // URL doesn't end in a trailing slash because hrefs like
- // "/b/c" on a path like "/a" end up going to "/b/c" instead
- // of "/a/b/c" - so we have to redirect in this case
- if !strings.HasSuffix(r.URL.Path, "/") {
- r.URL.Path += "/"
- http.Redirect(w, r, r.URL.String(), http.StatusMovedPermanently)
- return nil
+const (
+ defaultDirEntryLimit = 10000
+)
+
+func (fsrv *FileServer) serveBrowse(fileSystem fs.FS, root, dirPath string, w http.ResponseWriter, r *http.Request, next caddyhttp.Handler) error {
+ if c := fsrv.logger.Check(zapcore.DebugLevel, "browse enabled; listing directory contents"); c != nil {
+ c.Write(zap.String("path", dirPath), zap.String("root", root))
}
- dir, err := fsrv.openFile(dirPath, w)
+ // Navigation on the client-side gets messed up if the
+ // URL doesn't end in a trailing slash because hrefs to
+ // "b/c" at path "/a" end up going to "/b/c" instead
+ // of "/a/b/c" - so we have to redirect in this case
+ // so that the path is "/a/" and the client constructs
+ // relative hrefs "b/c" to be "/a/b/c".
+ //
+ // Only redirect if the last element of the path (the filename) was not
+ // rewritten; if the admin wanted to rewrite to the canonical path, they
+ // would have, and we have to be very careful not to introduce unwanted
+ // redirects and especially redirect loops! (Redirecting using the
+ // original URI is necessary because that's the URI the browser knows,
+ // we don't want to redirect from internally-rewritten URIs.)
+ // See https://github.com/caddyserver/caddy/issues/4205.
+ // We also redirect if the path is empty, because this implies the path
+ // prefix was fully stripped away by a `handle_path` handler for example.
+ // See https://github.com/caddyserver/caddy/issues/4466.
+ origReq := r.Context().Value(caddyhttp.OriginalRequestCtxKey).(http.Request)
+ if r.URL.Path == "" || path.Base(origReq.URL.Path) == path.Base(r.URL.Path) {
+ if !strings.HasSuffix(origReq.URL.Path, "/") {
+ if c := fsrv.logger.Check(zapcore.DebugLevel, "redirecting to trailing slash to preserve hrefs"); c != nil {
+ c.Write(zap.String("request_path", r.URL.Path))
+ }
+ return redirect(w, r, origReq.URL.Path+"/")
+ }
+ }
+
+ dir, err := fsrv.openFile(fileSystem, dirPath, w)
if err != nil {
return err
}
@@ -54,60 +115,159 @@ func (fsrv *FileServer) serveBrowse(dirPath string, w http.ResponseWriter, r *ht
repl := r.Context().Value(caddy.ReplacerCtxKey).(*caddy.Replacer)
- // calling path.Clean here prevents weird breadcrumbs when URL paths are sketchy like /%2e%2e%2f
- listing, err := fsrv.loadDirectoryContents(dir, path.Clean(r.URL.Path), repl)
+ // TODO: not entirely sure if path.Clean() is necessary here but seems like a safe plan (i.e. /%2e%2e%2f) - someone could verify this
+ listing, err := fsrv.loadDirectoryContents(r.Context(), fileSystem, dir.(fs.ReadDirFile), root, path.Clean(r.URL.EscapedPath()), repl)
switch {
- case os.IsPermission(err):
+ case errors.Is(err, fs.ErrPermission):
return caddyhttp.Error(http.StatusForbidden, err)
- case os.IsNotExist(err):
+ case errors.Is(err, fs.ErrNotExist):
return fsrv.notFound(w, r, next)
case err != nil:
return caddyhttp.Error(http.StatusInternalServerError, err)
}
- fsrv.browseApplyQueryParams(w, r, &listing)
+ w.Header().Add("Vary", "Accept, Accept-Encoding")
+
+ // speed up browser/client experience and caching by supporting If-Modified-Since
+ if ifModSinceStr := r.Header.Get("If-Modified-Since"); ifModSinceStr != "" {
+ ifModSince, err := time.ParseInLocation(http.TimeFormat, ifModSinceStr, time.Local)
+ lastModTrunc := listing.lastModified.Truncate(time.Second)
+ if err == nil && (lastModTrunc.Equal(ifModSince) || lastModTrunc.Before(ifModSince)) {
+ w.WriteHeader(http.StatusNotModified)
+ return nil
+ }
+ }
+
+ fsrv.browseApplyQueryParams(w, r, listing)
+
+ buf := bufPool.Get().(*bytes.Buffer)
+ buf.Reset()
+ defer bufPool.Put(buf)
- // write response as either JSON or HTML
- var buf *bytes.Buffer
acceptHeader := strings.ToLower(strings.Join(r.Header["Accept"], ","))
- if strings.Contains(acceptHeader, "application/json") {
- if buf, err = fsrv.browseWriteJSON(listing); err != nil {
+ w.Header().Set("Last-Modified", listing.lastModified.Format(http.TimeFormat))
+
+ switch {
+ case strings.Contains(acceptHeader, "application/json"):
+ if err := json.NewEncoder(buf).Encode(listing.Items); err != nil {
return caddyhttp.Error(http.StatusInternalServerError, err)
}
w.Header().Set("Content-Type", "application/json; charset=utf-8")
- } else {
- if buf, err = fsrv.browseWriteHTML(listing); err != nil {
+
+ case strings.Contains(acceptHeader, "text/plain"):
+ writer := tabwriter.NewWriter(buf, 0, 8, 1, '\t', tabwriter.AlignRight)
+
+ // Header on top
+ if _, err := fmt.Fprintln(writer, "Name\tSize\tModified"); err != nil {
+ return caddyhttp.Error(http.StatusInternalServerError, err)
+ }
+
+ // Lines to separate the header
+ if _, err := fmt.Fprintln(writer, "----\t----\t--------"); err != nil {
+ return caddyhttp.Error(http.StatusInternalServerError, err)
+ }
+
+ // Actual files
+ for _, item := range listing.Items {
+ if _, err := fmt.Fprintf(writer, "%s\t%s\t%s\n",
+ item.Name, item.HumanSize(), item.HumanModTime("January 2, 2006 at 15:04:05"),
+ ); err != nil {
+ return caddyhttp.Error(http.StatusInternalServerError, err)
+ }
+ }
+
+ if err := writer.Flush(); err != nil {
+ return caddyhttp.Error(http.StatusInternalServerError, err)
+ }
+
+ w.Header().Set("Content-Type", "text/plain; charset=utf-8")
+
+ default:
+ var fs http.FileSystem
+ if fsrv.Root != "" {
+ fs = http.Dir(repl.ReplaceAll(fsrv.Root, "."))
+ }
+
+ tplCtx := &templateContext{
+ TemplateContext: templates.TemplateContext{
+ Root: fs,
+ Req: r,
+ RespHeader: templates.WrappedHeader{Header: w.Header()},
+ },
+ browseTemplateContext: listing,
+ }
+
+ tpl, err := fsrv.makeBrowseTemplate(tplCtx)
+ if err != nil {
+ return fmt.Errorf("parsing browse template: %v", err)
+ }
+ if err := tpl.Execute(buf, tplCtx); err != nil {
return caddyhttp.Error(http.StatusInternalServerError, err)
}
w.Header().Set("Content-Type", "text/html; charset=utf-8")
}
- buf.WriteTo(w)
+ _, _ = buf.WriteTo(w)
return nil
}
-func (fsrv *FileServer) loadDirectoryContents(dir *os.File, urlPath string, repl *caddy.Replacer) (browseListing, error) {
- files, err := dir.Readdir(-1)
- if err != nil {
- return browseListing{}, err
+func (fsrv *FileServer) loadDirectoryContents(ctx context.Context, fileSystem fs.FS, dir fs.ReadDirFile, root, urlPath string, repl *caddy.Replacer) (*browseTemplateContext, error) {
+ dirLimit := defaultDirEntryLimit
+ if fsrv.Browse.FileLimit != 0 {
+ dirLimit = fsrv.Browse.FileLimit
+ }
+ files, err := dir.ReadDir(dirLimit)
+ if err != nil && err != io.EOF {
+ return nil, err
}
- // determine if user can browse up another folder
- curPathDir := path.Dir(strings.TrimSuffix(urlPath, "/"))
- canGoUp := strings.HasPrefix(curPathDir, fsrv.Root)
+ // user can presumably browse "up" to parent folder if path is longer than "/"
+ canGoUp := len(urlPath) > 1
- return fsrv.directoryListing(files, canGoUp, urlPath, repl), nil
+ return fsrv.directoryListing(ctx, fileSystem, files, canGoUp, root, urlPath, repl), nil
}
// browseApplyQueryParams applies query parameters to the listing.
// It mutates the listing and may set cookies.
-func (fsrv *FileServer) browseApplyQueryParams(w http.ResponseWriter, r *http.Request, listing *browseListing) {
- sortParam := r.URL.Query().Get("sort")
- orderParam := r.URL.Query().Get("order")
- limitParam := r.URL.Query().Get("limit")
+func (fsrv *FileServer) browseApplyQueryParams(w http.ResponseWriter, r *http.Request, listing *browseTemplateContext) {
+ var orderParam, sortParam string
- // first figure out what to sort by
+ // The configs in Caddyfile have lower priority than Query params,
+ // so put it at first.
+ for idx, item := range fsrv.Browse.SortOptions {
+ // Only `sort` & `order`, 2 params are allowed
+ if idx >= 2 {
+ break
+ }
+ switch item {
+ case sortByName, sortByNameDirFirst, sortBySize, sortByTime:
+ sortParam = item
+ case sortOrderAsc, sortOrderDesc:
+ orderParam = item
+ }
+ }
+
+ layoutParam := r.URL.Query().Get("layout")
+ limitParam := r.URL.Query().Get("limit")
+ offsetParam := r.URL.Query().Get("offset")
+ sortParamTmp := r.URL.Query().Get("sort")
+ if sortParamTmp != "" {
+ sortParam = sortParamTmp
+ }
+ orderParamTmp := r.URL.Query().Get("order")
+ if orderParamTmp != "" {
+ orderParam = orderParamTmp
+ }
+
+ switch layoutParam {
+ case "list", "grid", "":
+ listing.Layout = layoutParam
+ default:
+ listing.Layout = "list"
+ }
+
+ // figure out what to sort by
switch sortParam {
case "":
sortParam = sortByNameDirFirst
@@ -121,47 +281,70 @@ func (fsrv *FileServer) browseApplyQueryParams(w http.ResponseWriter, r *http.Re
// then figure out the order
switch orderParam {
case "":
- orderParam = "asc"
+ orderParam = sortOrderAsc
if orderCookie, orderErr := r.Cookie("order"); orderErr == nil {
orderParam = orderCookie.Value
}
- case "asc", "desc":
+ case sortOrderAsc, sortOrderDesc:
http.SetCookie(w, &http.Cookie{Name: "order", Value: orderParam, Secure: r.TLS != nil})
}
// finally, apply the sorting and limiting
- listing.applySortAndLimit(sortParam, orderParam, limitParam)
+ listing.applySortAndLimit(sortParam, orderParam, limitParam, offsetParam)
}
-func (fsrv *FileServer) browseWriteJSON(listing browseListing) (*bytes.Buffer, error) {
- buf := bufPool.Get().(*bytes.Buffer)
- err := json.NewEncoder(buf).Encode(listing.Items)
- bufPool.Put(buf)
- return buf, err
-}
+// makeBrowseTemplate creates the template to be used for directory listings.
+func (fsrv *FileServer) makeBrowseTemplate(tplCtx *templateContext) (*template.Template, error) {
+ var tpl *template.Template
+ var err error
-func (fsrv *FileServer) browseWriteHTML(listing browseListing) (*bytes.Buffer, error) {
- buf := bufPool.Get().(*bytes.Buffer)
- err := fsrv.Browse.template.Execute(buf, listing)
- bufPool.Put(buf)
- return buf, err
-}
+ if fsrv.Browse.TemplateFile != "" {
+ tpl = tplCtx.NewTemplate(path.Base(fsrv.Browse.TemplateFile))
+ tpl, err = tpl.ParseFiles(fsrv.Browse.TemplateFile)
+ if err != nil {
+ return nil, fmt.Errorf("parsing browse template file: %v", err)
+ }
+ } else {
+ tpl = tplCtx.NewTemplate("default_listing")
+ tpl, err = tpl.Parse(BrowseTemplate)
+ if err != nil {
+ return nil, fmt.Errorf("parsing default browse template: %v", err)
+ }
+ }
-// isSymlink return true if f is a symbolic link
-func isSymlink(f os.FileInfo) bool {
- return f.Mode()&os.ModeSymlink != 0
+ return tpl, nil
}
// isSymlinkTargetDir returns true if f's symbolic link target
// is a directory.
-func isSymlinkTargetDir(f os.FileInfo, root, urlPath string) bool {
+func (fsrv *FileServer) isSymlinkTargetDir(fileSystem fs.FS, f fs.FileInfo, root, urlPath string) bool {
if !isSymlink(f) {
return false
}
- target := sanitizedPathJoin(root, path.Join(urlPath, f.Name()))
- targetInfo, err := os.Stat(target)
+ target := caddyhttp.SanitizedPathJoin(root, path.Join(urlPath, f.Name()))
+ targetInfo, err := fs.Stat(fileSystem, target)
if err != nil {
return false
}
return targetInfo.IsDir()
}
+
+// isSymlink return true if f is a symbolic link.
+func isSymlink(f fs.FileInfo) bool {
+ return f.Mode()&os.ModeSymlink != 0
+}
+
+// templateContext powers the context used when evaluating the browse template.
+// It combines browse-specific features with the standard templates handler
+// features.
+type templateContext struct {
+ templates.TemplateContext
+ *browseTemplateContext
+}
+
+// bufPool is used to increase the efficiency of file listings.
+var bufPool = sync.Pool{
+ New: func() any {
+ return new(bytes.Buffer)
+ },
+}
diff --git a/modules/caddyhttp/fileserver/browse.html b/modules/caddyhttp/fileserver/browse.html
new file mode 100644
index 00000000..d2d69819
--- /dev/null
+++ b/modules/caddyhttp/fileserver/browse.html
@@ -0,0 +1,1180 @@
+{{ $nonce := uuidv4 -}}
+{{ $nonceAttribute := print "nonce=" (quote $nonce) -}}
+{{ $csp := printf "default-src 'none'; img-src 'self'; object-src 'none'; base-uri 'none'; script-src 'nonce-%s'; style-src 'nonce-%s'; frame-ancestors 'self'; form-action 'self';" $nonce $nonce -}}
+{{/* To disable the Content-Security-Policy, set this to false */}}{{ $enableCsp := true -}}
+{{ if $enableCsp -}}
+ {{- .RespHeader.Set "Content-Security-Policy" $csp -}}
+{{ end -}}
+{{- define "icon"}}
+ {{- if .IsDir}}
+ {{- if .IsSymlink}}
+
+
+
+
+
+ {{- else}}
+
+
+
+
+ {{- end}}
+ {{- else if or (eq .Name "LICENSE") (eq .Name "README")}}
+
+
+
+
+
+
+ {{- else if .HasExt ".jpg" ".jpeg" ".png" ".gif" ".webp" ".tiff" ".bmp" ".heif" ".heic" ".svg"}}
+ {{- if eq .Tpl.Layout "grid"}}
+
+ {{- else}}
+
+
+
+
+
+
+
+ {{- end}}
+ {{- else if .HasExt ".mp4" ".mov" ".m4v" ".mpeg" ".mpg" ".avi" ".ogg" ".webm" ".mkv" ".vob" ".gifv" ".3gp"}}
+
+
+
+
+
+
+
+
+
+
+
+ {{- else if .HasExt ".mp3" ".m4a" ".aac" ".ogg" ".flac" ".wav" ".wma" ".midi" ".cda"}}
+
+
+
+
+
+
+
+ {{- else if .HasExt ".pdf"}}
+
+
+
+
+
+
+
+
+
+ {{- else if .HasExt ".csv" ".tsv"}}
+
+
+
+
+
+
+
+
+ {{- else if .HasExt ".txt" ".doc" ".docx" ".odt" ".fodt" ".rtf"}}
+
+
+
+
+
+
+
+
+ {{- else if .HasExt ".xls" ".xlsx" ".ods" ".fods"}}
+
+
+
+
+
+
+
+
+ {{- else if .HasExt ".ppt" ".pptx" ".odp" ".fodp"}}
+
+
+
+
+
+
+
+
+
+
+ {{- else if .HasExt ".zip" ".gz" ".xz" ".tar" ".7z" ".rar" ".xz" ".zst"}}
+
+
+
+
+
+
+
+
+
+
+
+ {{- else if .HasExt ".deb" ".dpkg"}}
+
+
+
+
+
+ {{- else if .HasExt ".rpm" ".exe" ".flatpak" ".appimage" ".jar" ".msi" ".apk"}}
+
+
+
+
+
+
+
+
+ {{- else if .HasExt ".ps1"}}
+
+
+
+
+
+
+ {{- else if .HasExt ".py" ".pyc" ".pyo"}}
+
+
+
+
+
+
+
+
+ {{- else if .HasExt ".bash" ".sh" ".com" ".bat" ".dll" ".so"}}
+
+
+
+
+ {{- else if .HasExt ".dmg"}}
+
+
+
+
+
+
+
+
+ {{- else if .HasExt ".iso" ".img"}}
+
+
+
+
+
+
+
+ {{- else if .HasExt ".md" ".mdown" ".markdown"}}
+
+
+
+
+
+
+ {{- else if .HasExt ".ttf" ".otf" ".woff" ".woff2" ".eof"}}
+
+
+
+
+
+
+
+
+ {{- else if .HasExt ".go"}}
+
+
+
+
+
+
+
+
+ {{- else if .HasExt ".html" ".htm"}}
+
+
+
+
+
+
+
+
+
+
+
+
+ {{- else if .HasExt ".js"}}
+
+
+
+
+
+
+
+ {{- else if .HasExt ".css"}}
+
+
+
+
+
+
+
+
+ {{- else if .HasExt ".json" ".json5" ".jsonc"}}
+
+
+
+
+
+
+
+ {{- else if .HasExt ".ts"}}
+
+
+
+
+
+
+
+
+
+ {{- else if .HasExt ".sql"}}
+
+
+
+
+
+
+
+
+
+
+ {{- else if .HasExt ".db" ".sqlite" ".bak" ".mdb"}}
+
+
+
+
+
+
+ {{- else if .HasExt ".eml" ".email" ".mailbox" ".mbox" ".msg"}}
+
+
+
+
+
+ {{- else if .HasExt ".crt" ".pem" ".x509" ".cer" ".ca-bundle"}}
+
+
+
+
+
+
+
+
+
+ {{- else if .HasExt ".key" ".keystore" ".jks" ".p12" ".pfx" ".pub"}}
+
+
+
+
+
+ {{- else}}
+ {{- if .IsSymlink}}
+
+
+
+
+
+
+
+ {{- else}}
+
+
+
+
+
+ {{- end}}
+ {{- end}}
+{{- end}}
+
+
+
+ {{html .Name}}
+
+
+
+
+
+{{- if eq .Layout "grid"}}
+
+{{- end}}
+
+
+
+
+
Folder Path
+
+ {{range $i, $crumb := .Breadcrumbs}}{{html $crumb.Text}} {{if ne $i 0}}/{{end}}{{end}}
+
+
+
+
+
+
+
+ {{- if eq .Layout "grid"}}
+ {{- range .Items}}
+
+ {{- end}}
+ {{- else}}
+
+ {{- end}}
+
+
+
+
+
+
+
+
diff --git a/modules/caddyhttp/fileserver/browse_test.go b/modules/caddyhttp/fileserver/browse_test.go
deleted file mode 100644
index b1f7092b..00000000
--- a/modules/caddyhttp/fileserver/browse_test.go
+++ /dev/null
@@ -1,54 +0,0 @@
-package fileserver
-
-import (
- "html/template"
- "testing"
-
- "github.com/caddyserver/caddy/v2"
-)
-
-func BenchmarkBrowseWriteJSON(b *testing.B) {
- fsrv := new(FileServer)
- fsrv.Provision(caddy.Context{})
- listing := browseListing{
- Name: "test",
- Path: "test",
- CanGoUp: false,
- Items: make([]fileInfo, 100),
- NumDirs: 42,
- NumFiles: 420,
- Sort: "",
- Order: "",
- ItemsLimitedTo: 42,
- }
- b.ResetTimer()
-
- for n := 0; n < b.N; n++ {
- fsrv.browseWriteJSON(listing)
- }
-}
-
-func BenchmarkBrowseWriteHTML(b *testing.B) {
- fsrv := new(FileServer)
- fsrv.Provision(caddy.Context{})
- fsrv.Browse = &Browse{
- TemplateFile: "",
- template: template.New("test"),
- }
- listing := browseListing{
- Name: "test",
- Path: "test",
- CanGoUp: false,
- Items: make([]fileInfo, 100),
- NumDirs: 42,
- NumFiles: 420,
- Sort: "",
- Order: "",
- ItemsLimitedTo: 42,
- }
- b.ResetTimer()
-
- for n := 0; n < b.N; n++ {
- fsrv.browseWriteHTML(listing)
- }
-}
diff --git a/modules/caddyhttp/fileserver/browselisting.go b/modules/caddyhttp/fileserver/browselisting.go
deleted file mode 100644
index 9c7c4a20..00000000
--- a/modules/caddyhttp/fileserver/browselisting.go
+++ /dev/null
@@ -1,259 +0,0 @@
-// Copyright 2015 Matthew Holt and The Caddy Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package fileserver
-
-import (
- "net/url"
- "os"
- "path"
- "sort"
- "strconv"
- "strings"
- "time"
-
- "github.com/caddyserver/caddy/v2"
- "github.com/dustin/go-humanize"
-)
-
-func (fsrv *FileServer) directoryListing(files []os.FileInfo, canGoUp bool, urlPath string, repl *caddy.Replacer) browseListing {
- filesToHide := fsrv.transformHidePaths(repl)
-
- var (
- fileInfos []fileInfo
- dirCount, fileCount int
- )
-
- for _, f := range files {
- name := f.Name()
-
- if fileHidden(name, filesToHide) {
- continue
- }
-
- isDir := f.IsDir() || isSymlinkTargetDir(f, fsrv.Root, urlPath)
-
- if isDir {
- name += "/"
- dirCount++
- } else {
- fileCount++
- }
-
- u := url.URL{Path: "./" + name} // prepend with "./" to fix paths with ':' in the name
-
- fileInfos = append(fileInfos, fileInfo{
- IsDir: isDir,
- IsSymlink: isSymlink(f),
- Name: f.Name(),
- Size: f.Size(),
- URL: u.String(),
- ModTime: f.ModTime().UTC(),
- Mode: f.Mode(),
- })
- }
-
- return browseListing{
- Name: path.Base(urlPath),
- Path: urlPath,
- CanGoUp: canGoUp,
- Items: fileInfos,
- NumDirs: dirCount,
- NumFiles: fileCount,
- }
-}
-
-type browseListing struct {
- // The name of the directory (the last element of the path).
- Name string
-
- // The full path of the request.
- Path string
-
- // Whether the parent directory is browseable.
- CanGoUp bool
-
- // The items (files and folders) in the path.
- Items []fileInfo
-
- // The number of directories in the listing.
- NumDirs int
-
- // The number of files (items that aren't directories) in the listing.
- NumFiles int
-
- // Sort column used
- Sort string
-
- // Sorting order
- Order string
-
- // If ≠0 then Items have been limited to that many elements.
- ItemsLimitedTo int
-}
-
-// Breadcrumbs returns l.Path where every element maps
-// the link to the text to display.
-func (l browseListing) Breadcrumbs() []crumb {
- var result []crumb
-
- if len(l.Path) == 0 {
- return result
- }
-
- // skip trailing slash
- lpath := l.Path
- if lpath[len(lpath)-1] == '/' {
- lpath = lpath[:len(lpath)-1]
- }
-
- parts := strings.Split(lpath, "/")
- for i := range parts {
- txt := parts[i]
- if i == 0 && parts[i] == "" {
- txt = "/"
- }
- lnk := strings.Repeat("../", len(parts)-i-1)
- result = append(result, crumb{Link: lnk, Text: txt})
- }
-
- return result
-}
-
-func (l *browseListing) applySortAndLimit(sortParam, orderParam, limitParam string) {
- l.Sort = sortParam
- l.Order = orderParam
-
- if l.Order == "desc" {
- switch l.Sort {
- case sortByName:
- sort.Sort(sort.Reverse(byName(*l)))
- case sortByNameDirFirst:
- sort.Sort(sort.Reverse(byNameDirFirst(*l)))
- case sortBySize:
- sort.Sort(sort.Reverse(bySize(*l)))
- case sortByTime:
- sort.Sort(sort.Reverse(byTime(*l)))
- }
- } else {
- switch l.Sort {
- case sortByName:
- sort.Sort(byName(*l))
- case sortByNameDirFirst:
- sort.Sort(byNameDirFirst(*l))
- case sortBySize:
- sort.Sort(bySize(*l))
- case sortByTime:
- sort.Sort(byTime(*l))
- }
- }
-
- if limitParam != "" {
- limit, _ := strconv.Atoi(limitParam)
- if limit > 0 && limit <= len(l.Items) {
- l.Items = l.Items[:limit]
- l.ItemsLimitedTo = limit
- }
- }
-}
-
-// crumb represents part of a breadcrumb menu,
-// pairing a link with the text to display.
-type crumb struct {
- Link, Text string
-}
-
-// fileInfo contains serializable information
-// about a file or directory.
-type fileInfo struct {
- Name string `json:"name"`
- Size int64 `json:"size"`
- URL string `json:"url"`
- ModTime time.Time `json:"mod_time"`
- Mode os.FileMode `json:"mode"`
- IsDir bool `json:"is_dir"`
- IsSymlink bool `json:"is_symlink"`
-}
-
-// HumanSize returns the size of the file as a
-// human-readable string in IEC format (i.e.
-// power of 2 or base 1024).
-func (fi fileInfo) HumanSize() string {
- return humanize.IBytes(uint64(fi.Size))
-}
-
-// HumanModTime returns the modified time of the file
-// as a human-readable string given by format.
-func (fi fileInfo) HumanModTime(format string) string {
- return fi.ModTime.Format(format)
-}
-
-type byName browseListing
-type byNameDirFirst browseListing
-type bySize browseListing
-type byTime browseListing
-
-func (l byName) Len() int { return len(l.Items) }
-func (l byName) Swap(i, j int) { l.Items[i], l.Items[j] = l.Items[j], l.Items[i] }
-
-func (l byName) Less(i, j int) bool {
- return strings.ToLower(l.Items[i].Name) < strings.ToLower(l.Items[j].Name)
-}
-
-func (l byNameDirFirst) Len() int { return len(l.Items) }
-func (l byNameDirFirst) Swap(i, j int) { l.Items[i], l.Items[j] = l.Items[j], l.Items[i] }
-
-func (l byNameDirFirst) Less(i, j int) bool {
- // sort by name if both are dir or file
- if l.Items[i].IsDir == l.Items[j].IsDir {
- return strings.ToLower(l.Items[i].Name) < strings.ToLower(l.Items[j].Name)
- }
- // sort dir ahead of file
- return l.Items[i].IsDir
-}
-
-func (l bySize) Len() int { return len(l.Items) }
-func (l bySize) Swap(i, j int) { l.Items[i], l.Items[j] = l.Items[j], l.Items[i] }
-
-func (l bySize) Less(i, j int) bool {
- const directoryOffset = -1 << 31 // = -math.MinInt32
-
- iSize, jSize := l.Items[i].Size, l.Items[j].Size
-
- // directory sizes depend on the file system; to
- // provide a consistent experience, put them up front
- // and sort them by name
- if l.Items[i].IsDir {
- iSize = directoryOffset
- }
- if l.Items[j].IsDir {
- jSize = directoryOffset
- }
- if l.Items[i].IsDir && l.Items[j].IsDir {
- return strings.ToLower(l.Items[i].Name) < strings.ToLower(l.Items[j].Name)
- }
-
- return iSize < jSize
-}
-
-func (l byTime) Len() int { return len(l.Items) }
-func (l byTime) Swap(i, j int) { l.Items[i], l.Items[j] = l.Items[j], l.Items[i] }
-func (l byTime) Less(i, j int) bool { return l.Items[i].ModTime.Before(l.Items[j].ModTime) }
-
-const (
- sortByName = "name"
- sortByNameDirFirst = "name_dir_first"
- sortBySize = "size"
- sortByTime = "time"
-)
diff --git a/modules/caddyhttp/fileserver/browsetpl.go b/modules/caddyhttp/fileserver/browsetpl.go
deleted file mode 100644
index 567e8dd7..00000000
--- a/modules/caddyhttp/fileserver/browsetpl.go
+++ /dev/null
@@ -1,427 +0,0 @@
-// Copyright 2015 Matthew Holt and The Caddy Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package fileserver
-
-const defaultBrowseTemplate = `
-
-
- {{html .Name}}
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- {{- if and (eq .Sort "namedirfirst") (ne .Order "desc")}}
-
- {{- else if and (eq .Sort "namedirfirst") (ne .Order "asc")}}
-
- {{- else}}
-
- {{- end}}
-
- {{- if and (eq .Sort "name") (ne .Order "desc")}}
- Name
- {{- else if and (eq .Sort "name") (ne .Order "asc")}}
- Name
- {{- else}}
- Name
- {{- end}}
-
-
- {{- if and (eq .Sort "size") (ne .Order "desc")}}
- Size
- {{- else if and (eq .Sort "size") (ne .Order "asc")}}
- Size
- {{- else}}
- Size
- {{- end}}
-
-
- {{- if and (eq .Sort "time") (ne .Order "desc")}}
- Modified
- {{- else if and (eq .Sort "time") (ne .Order "asc")}}
- Modified
- {{- else}}
- Modified
- {{- end}}
-
-
-
-
-
- {{- if .CanGoUp}}
-
-
-
-
- Go up
-
-
- —
- —
-
-
- {{- end}}
- {{- range .Items}}
-
-
-
-
- {{- if .IsDir}}
-
- {{- else}}
-
- {{- end}}
- {{html .Name}}
-
-
- {{- if .IsDir}}
- —
- {{- else}}
- {{.HumanSize}}
- {{- end}}
- {{.HumanModTime "01/02/2006 03:04:05 PM -07:00"}}
-
-
- {{- end}}
-
-
-
-
-
-
-
-`
diff --git a/modules/caddyhttp/fileserver/browsetplcontext.go b/modules/caddyhttp/fileserver/browsetplcontext.go
new file mode 100644
index 00000000..8e5d138f
--- /dev/null
+++ b/modules/caddyhttp/fileserver/browsetplcontext.go
@@ -0,0 +1,378 @@
+// Copyright 2015 Matthew Holt and The Caddy Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package fileserver
+
+import (
+ "context"
+ "io/fs"
+ "net/url"
+ "os"
+ "path"
+ "path/filepath"
+ "slices"
+ "sort"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/dustin/go-humanize"
+ "go.uber.org/zap"
+ "go.uber.org/zap/zapcore"
+
+ "github.com/caddyserver/caddy/v2"
+ "github.com/caddyserver/caddy/v2/modules/caddyhttp"
+)
+
+func (fsrv *FileServer) directoryListing(ctx context.Context, fileSystem fs.FS, entries []fs.DirEntry, canGoUp bool, root, urlPath string, repl *caddy.Replacer) *browseTemplateContext {
+ filesToHide := fsrv.transformHidePaths(repl)
+
+ name, _ := url.PathUnescape(urlPath)
+
+ tplCtx := &browseTemplateContext{
+ Name: path.Base(name),
+ Path: urlPath,
+ CanGoUp: canGoUp,
+ }
+
+ for _, entry := range entries {
+ if err := ctx.Err(); err != nil {
+ break
+ }
+
+ name := entry.Name()
+
+ if fileHidden(name, filesToHide) {
+ continue
+ }
+
+ info, err := entry.Info()
+ if err != nil {
+ if c := fsrv.logger.Check(zapcore.ErrorLevel, "could not get info about directory entry"); c != nil {
+ c.Write(zap.String("name", entry.Name()), zap.String("root", root))
+ }
+ continue
+ }
+
+ // keep track of the most recently modified item in the listing
+ modTime := info.ModTime()
+ if tplCtx.lastModified.IsZero() || modTime.After(tplCtx.lastModified) {
+ tplCtx.lastModified = modTime
+ }
+
+ isDir := entry.IsDir() || fsrv.isSymlinkTargetDir(fileSystem, info, root, urlPath)
+
+ // add the slash after the escape of path to avoid escaping the slash as well
+ if isDir {
+ name += "/"
+ tplCtx.NumDirs++
+ } else {
+ tplCtx.NumFiles++
+ }
+
+ size := info.Size()
+
+ if !isDir {
+ // increase the total by the symlink's size, not the target's size,
+ // by incrementing before we follow the symlink
+ tplCtx.TotalFileSize += size
+ }
+
+ fileIsSymlink := isSymlink(info)
+ symlinkPath := ""
+ if fileIsSymlink {
+ path := caddyhttp.SanitizedPathJoin(root, path.Join(urlPath, info.Name()))
+ fileInfo, err := fs.Stat(fileSystem, path)
+ if err == nil {
+ size = fileInfo.Size()
+ }
+
+ if fsrv.Browse.RevealSymlinks {
+ symLinkTarget, err := filepath.EvalSymlinks(path)
+ if err == nil {
+ symlinkPath = symLinkTarget
+ }
+ }
+
+ // An error most likely means the symlink target doesn't exist,
+ // which isn't entirely unusual and shouldn't fail the listing.
+ // In this case, just use the size of the symlink itself, which
+ // was already set above.
+ }
+
+ if !isDir {
+ // increase the total including the symlink target's size
+ tplCtx.TotalFileSizeFollowingSymlinks += size
+ }
+
+ u := url.URL{Path: "./" + name} // prepend with "./" to fix paths with ':' in the name
+
+ tplCtx.Items = append(tplCtx.Items, fileInfo{
+ IsDir: isDir,
+ IsSymlink: fileIsSymlink,
+ Name: name,
+ Size: size,
+ URL: u.String(),
+ ModTime: modTime.UTC(),
+ Mode: info.Mode(),
+ Tpl: tplCtx, // a reference up to the template context is useful
+ SymlinkPath: symlinkPath,
+ })
+ }
+
+ return tplCtx
+}
+
+// browseTemplateContext provides the template context for directory listings.
+type browseTemplateContext struct {
+ // The name of the directory (the last element of the path).
+ Name string `json:"name"`
+
+ // The full path of the request.
+ Path string `json:"path"`
+
+ // Whether the parent directory is browsable.
+ CanGoUp bool `json:"can_go_up"`
+
+ // The items (files and folders) in the path.
+ Items []fileInfo `json:"items,omitempty"`
+
+ // If ≠0 then Items starting from that many elements.
+ Offset int `json:"offset,omitempty"`
+
+ // If ≠0 then Items have been limited to that many elements.
+ Limit int `json:"limit,omitempty"`
+
+ // The number of directories in the listing.
+ NumDirs int `json:"num_dirs"`
+
+ // The number of files (items that aren't directories) in the listing.
+ NumFiles int `json:"num_files"`
+
+ // The total size of all files in the listing. Only includes the
+ // size of the files themselves, not the size of symlink targets
+ // (i.e. the calculation of this value does not follow symlinks).
+ TotalFileSize int64 `json:"total_file_size"`
+
+ // The total size of all files in the listing, including the
+ // size of the files targeted by symlinks.
+ TotalFileSizeFollowingSymlinks int64 `json:"total_file_size_following_symlinks"`
+
+ // Sort column used
+ Sort string `json:"sort,omitempty"`
+
+ // Sorting order
+ Order string `json:"order,omitempty"`
+
+ // Display format (list or grid)
+ Layout string `json:"layout,omitempty"`
+
+ // The most recent file modification date in the listing.
+ // Used for HTTP header purposes.
+ lastModified time.Time
+}
+
+// Breadcrumbs returns l.Path where every element maps
+// the link to the text to display.
+func (l browseTemplateContext) Breadcrumbs() []crumb {
+ if len(l.Path) == 0 {
+ return []crumb{}
+ }
+
+ // skip trailing slash
+ lpath := l.Path
+ if lpath[len(lpath)-1] == '/' {
+ lpath = lpath[:len(lpath)-1]
+ }
+ parts := strings.Split(lpath, "/")
+ result := make([]crumb, len(parts))
+ for i, p := range parts {
+ if i == 0 && p == "" {
+ p = "/"
+ }
+ // the directory name could include an encoded slash in its path,
+ // so the item name should be unescaped in the loop rather than unescaping the
+ // entire path outside the loop.
+ p, _ = url.PathUnescape(p)
+ lnk := strings.Repeat("../", len(parts)-i-1)
+ result[i] = crumb{Link: lnk, Text: p}
+ }
+
+ return result
+}
+
+func (l *browseTemplateContext) applySortAndLimit(sortParam, orderParam, limitParam string, offsetParam string) {
+ l.Sort = sortParam
+ l.Order = orderParam
+
+ if l.Order == "desc" {
+ switch l.Sort {
+ case sortByName:
+ sort.Sort(sort.Reverse(byName(*l)))
+ case sortByNameDirFirst:
+ sort.Sort(sort.Reverse(byNameDirFirst(*l)))
+ case sortBySize:
+ sort.Sort(sort.Reverse(bySize(*l)))
+ case sortByTime:
+ sort.Sort(sort.Reverse(byTime(*l)))
+ }
+ } else {
+ switch l.Sort {
+ case sortByName:
+ sort.Sort(byName(*l))
+ case sortByNameDirFirst:
+ sort.Sort(byNameDirFirst(*l))
+ case sortBySize:
+ sort.Sort(bySize(*l))
+ case sortByTime:
+ sort.Sort(byTime(*l))
+ }
+ }
+
+ if offsetParam != "" {
+ offset, _ := strconv.Atoi(offsetParam)
+ if offset > 0 && offset <= len(l.Items) {
+ l.Items = l.Items[offset:]
+ l.Offset = offset
+ }
+ }
+
+ if limitParam != "" {
+ limit, _ := strconv.Atoi(limitParam)
+
+ if limit > 0 && limit <= len(l.Items) {
+ l.Items = l.Items[:limit]
+ l.Limit = limit
+ }
+ }
+}
+
+// crumb represents part of a breadcrumb menu,
+// pairing a link with the text to display.
+type crumb struct {
+ Link, Text string
+}
+
+// fileInfo contains serializable information
+// about a file or directory.
+type fileInfo struct {
+ Name string `json:"name"`
+ Size int64 `json:"size"`
+ URL string `json:"url"`
+ ModTime time.Time `json:"mod_time"`
+ Mode os.FileMode `json:"mode"`
+ IsDir bool `json:"is_dir"`
+ IsSymlink bool `json:"is_symlink"`
+ SymlinkPath string `json:"symlink_path,omitempty"`
+
+ // a pointer to the template context is useful inside nested templates
+ Tpl *browseTemplateContext `json:"-"`
+}
+
+// HasExt returns true if the filename has any of the given suffixes, case-insensitive.
+func (fi fileInfo) HasExt(exts ...string) bool {
+ return slices.ContainsFunc(exts, func(ext string) bool {
+ return strings.HasSuffix(strings.ToLower(fi.Name), strings.ToLower(ext))
+ })
+}
+
+// HumanSize returns the size of the file as a
+// human-readable string in IEC format (i.e.
+// power of 2 or base 1024).
+func (fi fileInfo) HumanSize() string {
+ return humanize.IBytes(uint64(fi.Size))
+}
+
+// HumanTotalFileSize returns the total size of all files
+// in the listing as a human-readable string in IEC format
+// (i.e. power of 2 or base 1024).
+func (btc browseTemplateContext) HumanTotalFileSize() string {
+ return humanize.IBytes(uint64(btc.TotalFileSize))
+}
+
+// HumanTotalFileSizeFollowingSymlinks is the same as HumanTotalFileSize
+// except the returned value reflects the size of symlink targets.
+func (btc browseTemplateContext) HumanTotalFileSizeFollowingSymlinks() string {
+ return humanize.IBytes(uint64(btc.TotalFileSizeFollowingSymlinks))
+}
+
+// HumanModTime returns the modified time of the file
+// as a human-readable string given by format.
+func (fi fileInfo) HumanModTime(format string) string {
+ return fi.ModTime.Format(format)
+}
+
+type (
+ byName browseTemplateContext
+ byNameDirFirst browseTemplateContext
+ bySize browseTemplateContext
+ byTime browseTemplateContext
+)
+
+func (l byName) Len() int { return len(l.Items) }
+func (l byName) Swap(i, j int) { l.Items[i], l.Items[j] = l.Items[j], l.Items[i] }
+
+func (l byName) Less(i, j int) bool {
+ return strings.ToLower(l.Items[i].Name) < strings.ToLower(l.Items[j].Name)
+}
+
+func (l byNameDirFirst) Len() int { return len(l.Items) }
+func (l byNameDirFirst) Swap(i, j int) { l.Items[i], l.Items[j] = l.Items[j], l.Items[i] }
+
+func (l byNameDirFirst) Less(i, j int) bool {
+ // sort by name if both are dir or file
+ if l.Items[i].IsDir == l.Items[j].IsDir {
+ return strings.ToLower(l.Items[i].Name) < strings.ToLower(l.Items[j].Name)
+ }
+ // sort dir ahead of file
+ return l.Items[i].IsDir
+}
+
+func (l bySize) Len() int { return len(l.Items) }
+func (l bySize) Swap(i, j int) { l.Items[i], l.Items[j] = l.Items[j], l.Items[i] }
+
+func (l bySize) Less(i, j int) bool {
+ const directoryOffset = -1 << 31 // = -math.MinInt32
+
+ iSize, jSize := l.Items[i].Size, l.Items[j].Size
+
+ // directory sizes depend on the file system; to
+ // provide a consistent experience, put them up front
+ // and sort them by name
+ if l.Items[i].IsDir {
+ iSize = directoryOffset
+ }
+ if l.Items[j].IsDir {
+ jSize = directoryOffset
+ }
+ if l.Items[i].IsDir && l.Items[j].IsDir {
+ return strings.ToLower(l.Items[i].Name) < strings.ToLower(l.Items[j].Name)
+ }
+
+ return iSize < jSize
+}
+
+func (l byTime) Len() int { return len(l.Items) }
+func (l byTime) Swap(i, j int) { l.Items[i], l.Items[j] = l.Items[j], l.Items[i] }
+func (l byTime) Less(i, j int) bool { return l.Items[i].ModTime.Before(l.Items[j].ModTime) }
+
+const (
+ sortByName = "name"
+ sortByNameDirFirst = "namedirfirst"
+ sortBySize = "size"
+ sortByTime = "time"
+
+ sortOrderAsc = "asc"
+ sortOrderDesc = "desc"
+)
diff --git a/modules/caddyhttp/fileserver/browsetplcontext_test.go b/modules/caddyhttp/fileserver/browsetplcontext_test.go
new file mode 100644
index 00000000..184196fa
--- /dev/null
+++ b/modules/caddyhttp/fileserver/browsetplcontext_test.go
@@ -0,0 +1,106 @@
+// Copyright 2015 Matthew Holt and The Caddy Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package fileserver
+
+import (
+ "testing"
+)
+
+func TestBreadcrumbs(t *testing.T) {
+ testdata := []struct {
+ path string
+ expected []crumb
+ }{
+ {"", []crumb{}},
+ {"/", []crumb{{Text: "/"}}},
+ {"/foo/", []crumb{
+ {Link: "../", Text: "/"},
+ {Link: "", Text: "foo"},
+ }},
+ {"/foo/bar/", []crumb{
+ {Link: "../../", Text: "/"},
+ {Link: "../", Text: "foo"},
+ {Link: "", Text: "bar"},
+ }},
+ {"/foo bar/", []crumb{
+ {Link: "../", Text: "/"},
+ {Link: "", Text: "foo bar"},
+ }},
+ {"/foo bar/baz/", []crumb{
+ {Link: "../../", Text: "/"},
+ {Link: "../", Text: "foo bar"},
+ {Link: "", Text: "baz"},
+ }},
+ {"/100%25 test coverage/is a lie/", []crumb{
+ {Link: "../../", Text: "/"},
+ {Link: "../", Text: "100% test coverage"},
+ {Link: "", Text: "is a lie"},
+ }},
+ {"/AC%2FDC/", []crumb{
+ {Link: "../", Text: "/"},
+ {Link: "", Text: "AC/DC"},
+ }},
+ {"/foo/%2e%2e%2f/bar", []crumb{
+ {Link: "../../../", Text: "/"},
+ {Link: "../../", Text: "foo"},
+ {Link: "../", Text: "../"},
+ {Link: "", Text: "bar"},
+ }},
+ {"/foo/../bar", []crumb{
+ {Link: "../../../", Text: "/"},
+ {Link: "../../", Text: "foo"},
+ {Link: "../", Text: ".."},
+ {Link: "", Text: "bar"},
+ }},
+ {"foo/bar/baz", []crumb{
+ {Link: "../../", Text: "foo"},
+ {Link: "../", Text: "bar"},
+ {Link: "", Text: "baz"},
+ }},
+ {"/qux/quux/corge/", []crumb{
+ {Link: "../../../", Text: "/"},
+ {Link: "../../", Text: "qux"},
+ {Link: "../", Text: "quux"},
+ {Link: "", Text: "corge"},
+ }},
+ {"/مجلد/", []crumb{
+ {Link: "../", Text: "/"},
+ {Link: "", Text: "مجلد"},
+ }},
+ {"/مجلد-1/مجلد-2", []crumb{
+ {Link: "../../", Text: "/"},
+ {Link: "../", Text: "مجلد-1"},
+ {Link: "", Text: "مجلد-2"},
+ }},
+ {"/مجلد%2F1", []crumb{
+ {Link: "../", Text: "/"},
+ {Link: "", Text: "مجلد/1"},
+ }},
+ }
+
+ for testNum, d := range testdata {
+ l := browseTemplateContext{Path: d.path}
+ actual := l.Breadcrumbs()
+ if len(actual) != len(d.expected) {
+ t.Errorf("Test %d: Got %d components but expected %d; got: %+v", testNum, len(actual), len(d.expected), actual)
+ continue
+ }
+ for i, c := range actual {
+ if c != d.expected[i] {
+ t.Errorf("Test %d crumb %d: got %#v but expected %#v at index %d", testNum, i, c, d.expected[i], i)
+ }
+ }
+ }
+}
diff --git a/modules/caddyhttp/fileserver/caddyfile.go b/modules/caddyhttp/fileserver/caddyfile.go
index 9b458b2f..80a37322 100644
--- a/modules/caddyhttp/fileserver/caddyfile.go
+++ b/modules/caddyhttp/fileserver/caddyfile.go
@@ -15,11 +15,16 @@
package fileserver
import (
+ "path/filepath"
+ "strconv"
"strings"
"github.com/caddyserver/caddy/v2"
+ "github.com/caddyserver/caddy/v2/caddyconfig"
+ "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile"
"github.com/caddyserver/caddy/v2/caddyconfig/httpcaddyfile"
"github.com/caddyserver/caddy/v2/modules/caddyhttp"
+ "github.com/caddyserver/caddy/v2/modules/caddyhttp/encode"
"github.com/caddyserver/caddy/v2/modules/caddyhttp/rewrite"
)
@@ -28,92 +33,221 @@ func init() {
httpcaddyfile.RegisterDirective("try_files", parseTryFiles)
}
-// parseCaddyfile parses the file_server directive. It enables the static file
-// server and configures it with this syntax:
-//
-// file_server [] [browse] {
-// root
-// hide
-// index
-// browse []
-// }
-//
+// parseCaddyfile parses the file_server directive.
+// See UnmarshalCaddyfile for the syntax.
func parseCaddyfile(h httpcaddyfile.Helper) (caddyhttp.MiddlewareHandler, error) {
- var fsrv FileServer
+ fsrv := new(FileServer)
+ err := fsrv.UnmarshalCaddyfile(h.Dispenser)
+ if err != nil {
+ return fsrv, err
+ }
+ err = fsrv.FinalizeUnmarshalCaddyfile(h)
+ if err != nil {
+ return nil, err
+ }
+ return fsrv, err
+}
- for h.Next() {
- args := h.RemainingArgs()
- switch len(args) {
- case 0:
- case 1:
- if args[0] != "browse" {
- return nil, h.ArgErr()
+// UnmarshalCaddyfile parses the file_server directive. It enables
+// the static file server and configures it with this syntax:
+//
+// file_server [] [browse] {
+// fs
+// root
+// hide
+// index
+// browse []
+// precompressed
+// status
+// disable_canonical_uris
+// }
+//
+// The FinalizeUnmarshalCaddyfile method should be called after this
+// to finalize setup of hidden Caddyfiles.
+func (fsrv *FileServer) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
+ d.Next() // consume directive name
+
+ args := d.RemainingArgs()
+ switch len(args) {
+ case 0:
+ case 1:
+ if args[0] != "browse" {
+ return d.ArgErr()
+ }
+ fsrv.Browse = new(Browse)
+ default:
+ return d.ArgErr()
+ }
+
+ for nesting := d.Nesting(); d.NextBlock(nesting); {
+ switch d.Val() {
+ case "fs":
+ if !d.NextArg() {
+ return d.ArgErr()
+ }
+ if fsrv.FileSystem != "" {
+ return d.Err("file system already specified")
+ }
+ fsrv.FileSystem = d.Val()
+
+ case "hide":
+ fsrv.Hide = d.RemainingArgs()
+ if len(fsrv.Hide) == 0 {
+ return d.ArgErr()
+ }
+
+ case "index":
+ fsrv.IndexNames = d.RemainingArgs()
+ if len(fsrv.IndexNames) == 0 {
+ return d.ArgErr()
+ }
+
+ case "root":
+ if !d.Args(&fsrv.Root) {
+ return d.ArgErr()
+ }
+
+ case "browse":
+ if fsrv.Browse != nil {
+ return d.Err("browsing is already configured")
}
fsrv.Browse = new(Browse)
- default:
- return nil, h.ArgErr()
- }
-
- for h.NextBlock(0) {
- switch h.Val() {
- case "hide":
- fsrv.Hide = h.RemainingArgs()
- if len(fsrv.Hide) == 0 {
- return nil, h.ArgErr()
+ d.Args(&fsrv.Browse.TemplateFile)
+ for nesting := d.Nesting(); d.NextBlock(nesting); {
+ switch d.Val() {
+ case "reveal_symlinks":
+ if fsrv.Browse.RevealSymlinks {
+ return d.Err("Symlinks path reveal is already enabled")
+ }
+ fsrv.Browse.RevealSymlinks = true
+ case "sort":
+ for d.NextArg() {
+ dVal := d.Val()
+ switch dVal {
+ case sortByName, sortByNameDirFirst, sortBySize, sortByTime, sortOrderAsc, sortOrderDesc:
+ fsrv.Browse.SortOptions = append(fsrv.Browse.SortOptions, dVal)
+ default:
+ return d.Errf("unknown sort option '%s'", dVal)
+ }
+ }
+ case "file_limit":
+ fileLimit := d.RemainingArgs()
+ if len(fileLimit) != 1 {
+ return d.Err("file_limit should have an integer value")
+ }
+ val, _ := strconv.Atoi(fileLimit[0])
+ if fsrv.Browse.FileLimit != 0 {
+ return d.Err("file_limit is already enabled")
+ }
+ fsrv.Browse.FileLimit = val
+ default:
+ return d.Errf("unknown subdirective '%s'", d.Val())
}
- case "index":
- fsrv.IndexNames = h.RemainingArgs()
- if len(fsrv.IndexNames) == 0 {
- return nil, h.ArgErr()
- }
- case "root":
- if !h.Args(&fsrv.Root) {
- return nil, h.ArgErr()
- }
- case "browse":
- if fsrv.Browse != nil {
- return nil, h.Err("browsing is already configured")
- }
- fsrv.Browse = new(Browse)
- h.Args(&fsrv.Browse.TemplateFile)
- default:
- return nil, h.Errf("unknown subdirective '%s'", h.Val())
}
+
+ case "precompressed":
+ fsrv.PrecompressedOrder = d.RemainingArgs()
+ if len(fsrv.PrecompressedOrder) == 0 {
+ fsrv.PrecompressedOrder = []string{"br", "zstd", "gzip"}
+ }
+
+ for _, format := range fsrv.PrecompressedOrder {
+ modID := "http.precompressed." + format
+ mod, err := caddy.GetModule(modID)
+ if err != nil {
+ return d.Errf("getting module named '%s': %v", modID, err)
+ }
+ inst := mod.New()
+ precompress, ok := inst.(encode.Precompressed)
+ if !ok {
+ return d.Errf("module %s is not a precompressor; is %T", modID, inst)
+ }
+ if fsrv.PrecompressedRaw == nil {
+ fsrv.PrecompressedRaw = make(caddy.ModuleMap)
+ }
+ fsrv.PrecompressedRaw[format] = caddyconfig.JSON(precompress, nil)
+ }
+
+ case "status":
+ if !d.NextArg() {
+ return d.ArgErr()
+ }
+ fsrv.StatusCode = caddyhttp.WeakString(d.Val())
+
+ case "disable_canonical_uris":
+ if d.NextArg() {
+ return d.ArgErr()
+ }
+ falseBool := false
+ fsrv.CanonicalURIs = &falseBool
+
+ case "pass_thru":
+ if d.NextArg() {
+ return d.ArgErr()
+ }
+ fsrv.PassThru = true
+
+ case "etag_file_extensions":
+ etagFileExtensions := d.RemainingArgs()
+ if len(etagFileExtensions) == 0 {
+ return d.ArgErr()
+ }
+ fsrv.EtagFileExtensions = etagFileExtensions
+
+ default:
+ return d.Errf("unknown subdirective '%s'", d.Val())
}
}
- // hide the Caddyfile (and any imported Caddyfiles)
+ return nil
+}
+
+// FinalizeUnmarshalCaddyfile finalizes the Caddyfile parsing which
+// requires having an httpcaddyfile.Helper to function, to setup hidden Caddyfiles.
+func (fsrv *FileServer) FinalizeUnmarshalCaddyfile(h httpcaddyfile.Helper) error {
+ // Hide the Caddyfile (and any imported Caddyfiles).
+ // This needs to be done in here instead of UnmarshalCaddyfile
+ // because UnmarshalCaddyfile only has access to the dispenser
+ // and not the helper, and only the helper has access to the
+ // Caddyfiles function.
if configFiles := h.Caddyfiles(); len(configFiles) > 0 {
for _, file := range configFiles {
+ file = filepath.Clean(file)
if !fileHidden(file, fsrv.Hide) {
+ // if there's no path separator, the file server module will hide all
+ // files by that name, rather than a specific one; but we want to hide
+ // only this specific file, so ensure there's always a path separator
+ if !strings.Contains(file, separator) {
+ file = "." + separator + file
+ }
fsrv.Hide = append(fsrv.Hide, file)
}
}
}
-
- return &fsrv, nil
+ return nil
}
// parseTryFiles parses the try_files directive. It combines a file matcher
// with a rewrite directive, so this is not a standard handler directive.
// A try_files directive has this syntax (notice no matcher tokens accepted):
//
-// try_files
+// try_files {
+// policy first_exist|smallest_size|largest_size|most_recently_modified
+// }
//
// and is basically shorthand for:
//
-// @try_files {
-// file {
-// try_files
-// }
-// }
-// rewrite @try_files {http.matchers.file.relative}
+// @try_files file {
+// try_files
+// policy first_exist|smallest_size|largest_size|most_recently_modified
+// }
+// rewrite @try_files {http.matchers.file.relative}
//
// This directive rewrites request paths only, preserving any other part
// of the URI, unless the part is explicitly given in the file list. For
// example, if any of the files in the list have a query string:
//
-// try_files {path} index.php?{query}&p={path}
+// try_files {path} index.php?{query}&p={path}
//
// then the query string will not be treated as part of the file name; and
// if that file matches, the given query string will replace any query string
@@ -128,6 +262,27 @@ func parseTryFiles(h httpcaddyfile.Helper) ([]httpcaddyfile.ConfigValue, error)
return nil, h.ArgErr()
}
+ // parse out the optional try policy
+ var tryPolicy string
+ for h.NextBlock(0) {
+ switch h.Val() {
+ case "policy":
+ if tryPolicy != "" {
+ return nil, h.Err("try policy already configured")
+ }
+ if !h.NextArg() {
+ return nil, h.ArgErr()
+ }
+ tryPolicy = h.Val()
+
+ switch tryPolicy {
+ case tryPolicyFirstExist, tryPolicyFirstExistFallback, tryPolicyLargestSize, tryPolicySmallestSize, tryPolicyMostRecentlyMod:
+ default:
+ return nil, h.Errf("unrecognized try policy: %s", tryPolicy)
+ }
+ }
+ }
+
// makeRoute returns a route that tries the files listed in try
// and then rewrites to the matched file; userQueryString is
// appended to the rewrite rule.
@@ -136,7 +291,7 @@ func parseTryFiles(h httpcaddyfile.Helper) ([]httpcaddyfile.ConfigValue, error)
URI: "{http.matchers.file.relative}" + userQueryString,
}
matcherSet := caddy.ModuleMap{
- "file": h.JSON(MatchFile{TryFiles: try}),
+ "file": h.JSON(MatchFile{TryFiles: try, TryPolicy: tryPolicy}),
}
return h.NewRoute(matcherSet, handler)
}
@@ -170,3 +325,5 @@ func parseTryFiles(h httpcaddyfile.Helper) ([]httpcaddyfile.ConfigValue, error)
return result, nil
}
+
+var _ caddyfile.Unmarshaler = (*FileServer)(nil)
diff --git a/modules/caddyhttp/fileserver/command.go b/modules/caddyhttp/fileserver/command.go
index 53aa7aa5..a04d7cad 100644
--- a/modules/caddyhttp/fileserver/command.go
+++ b/modules/caddyhttp/fileserver/command.go
@@ -16,24 +16,30 @@ package fileserver
import (
"encoding/json"
- "flag"
+ "fmt"
+ "io"
"log"
+ "os"
"strconv"
"time"
+ "github.com/caddyserver/certmagic"
+ "github.com/spf13/cobra"
+ "go.uber.org/zap"
+
+ caddycmd "github.com/caddyserver/caddy/v2/cmd"
+
"github.com/caddyserver/caddy/v2"
"github.com/caddyserver/caddy/v2/caddyconfig"
- caddycmd "github.com/caddyserver/caddy/v2/cmd"
"github.com/caddyserver/caddy/v2/modules/caddyhttp"
+ "github.com/caddyserver/caddy/v2/modules/caddyhttp/encode"
caddytpl "github.com/caddyserver/caddy/v2/modules/caddyhttp/templates"
- "github.com/caddyserver/certmagic"
)
func init() {
caddycmd.RegisterCommand(caddycmd.Command{
Name: "file-server",
- Func: cmdFileServer,
- Usage: "[--domain ] [--root ] [--listen ] [--browse]",
+ Usage: "[--domain ] [--root ] [--listen ] [--browse] [--reveal-symlinks] [--access-log] [--precompressed]",
Short: "Spins up a production-ready file server",
Long: `
A simple but production-ready file server. Useful for quick deployments,
@@ -46,37 +52,108 @@ will be changed to the HTTPS port and the server will use HTTPS. If using
a public domain, ensure A/AAAA records are properly configured before
using this option.
+By default, Zstandard and Gzip compression are enabled. Use --no-compress
+to disable compression.
+
If --browse is enabled, requests for folders without an index file will
respond with a file listing.`,
- Flags: func() *flag.FlagSet {
- fs := flag.NewFlagSet("file-server", flag.ExitOnError)
- fs.String("domain", "", "Domain name at which to serve the files")
- fs.String("root", "", "The path to the root of the site")
- fs.String("listen", "", "The address to which to bind the listener")
- fs.Bool("browse", false, "Enable directory browsing")
- fs.Bool("templates", false, "Enable template rendering")
- return fs
- }(),
+ CobraFunc: func(cmd *cobra.Command) {
+ cmd.Flags().StringP("domain", "d", "", "Domain name at which to serve the files")
+ cmd.Flags().StringP("root", "r", "", "The path to the root of the site")
+ cmd.Flags().StringP("listen", "l", "", "The address to which to bind the listener")
+ cmd.Flags().BoolP("browse", "b", false, "Enable directory browsing")
+ cmd.Flags().BoolP("reveal-symlinks", "", false, "Show symlink paths when browse is enabled.")
+ cmd.Flags().BoolP("templates", "t", false, "Enable template rendering")
+ cmd.Flags().BoolP("access-log", "a", false, "Enable the access log")
+ cmd.Flags().BoolP("debug", "v", false, "Enable verbose debug logs")
+ cmd.Flags().IntP("file-limit", "f", defaultDirEntryLimit, "Max directories to read")
+ cmd.Flags().BoolP("no-compress", "", false, "Disable Zstandard and Gzip compression")
+ cmd.Flags().StringSliceP("precompressed", "p", []string{}, "Specify precompression file extensions. Compression preference implied from flag order.")
+ cmd.RunE = caddycmd.WrapCommandFuncForCobra(cmdFileServer)
+ cmd.AddCommand(&cobra.Command{
+ Use: "export-template",
+ Short: "Exports the default file browser template",
+ Example: "caddy file-server export-template > browse.html",
+ RunE: func(cmd *cobra.Command, args []string) error {
+ _, err := io.WriteString(os.Stdout, BrowseTemplate)
+ return err
+ },
+ })
+ },
})
}
func cmdFileServer(fs caddycmd.Flags) (int, error) {
+ caddy.TrapSignals()
+
domain := fs.String("domain")
root := fs.String("root")
listen := fs.String("listen")
browse := fs.Bool("browse")
templates := fs.Bool("templates")
-
+ accessLog := fs.Bool("access-log")
+ fileLimit := fs.Int("file-limit")
+ debug := fs.Bool("debug")
+ revealSymlinks := fs.Bool("reveal-symlinks")
+ compress := !fs.Bool("no-compress")
+ precompressed, err := fs.GetStringSlice("precompressed")
+ if err != nil {
+ return caddy.ExitCodeFailedStartup, fmt.Errorf("invalid precompressed flag: %v", err)
+ }
var handlers []json.RawMessage
+ if compress {
+ zstd, err := caddy.GetModule("http.encoders.zstd")
+ if err != nil {
+ return caddy.ExitCodeFailedStartup, err
+ }
+
+ gzip, err := caddy.GetModule("http.encoders.gzip")
+ if err != nil {
+ return caddy.ExitCodeFailedStartup, err
+ }
+
+ handlers = append(handlers, caddyconfig.JSONModuleObject(encode.Encode{
+ EncodingsRaw: caddy.ModuleMap{
+ "zstd": caddyconfig.JSON(zstd.New(), nil),
+ "gzip": caddyconfig.JSON(gzip.New(), nil),
+ },
+ Prefer: []string{"zstd", "gzip"},
+ }, "handler", "encode", nil))
+ }
+
if templates {
handler := caddytpl.Templates{FileRoot: root}
handlers = append(handlers, caddyconfig.JSONModuleObject(handler, "handler", "templates", nil))
}
handler := FileServer{Root: root}
+
+ if len(precompressed) != 0 {
+ // logic mirrors modules/caddyhttp/fileserver/caddyfile.go case "precompressed"
+ var order []string
+ for _, compression := range precompressed {
+ modID := "http.precompressed." + compression
+ mod, err := caddy.GetModule(modID)
+ if err != nil {
+ return caddy.ExitCodeFailedStartup, fmt.Errorf("getting module named '%s': %v", modID, err)
+ }
+ inst := mod.New()
+ precompress, ok := inst.(encode.Precompressed)
+ if !ok {
+ return caddy.ExitCodeFailedStartup, fmt.Errorf("module %s is not a precompressor; is %T", modID, inst)
+ }
+ if handler.PrecompressedRaw == nil {
+ handler.PrecompressedRaw = make(caddy.ModuleMap)
+ }
+ handler.PrecompressedRaw[compression] = caddyconfig.JSON(precompress, nil)
+ order = append(order, compression)
+ }
+ handler.PrecompressedOrder = order
+ }
+
if browse {
- handler.Browse = new(Browse)
+ handler.Browse = &Browse{RevealSymlinks: revealSymlinks, FileLimit: fileLimit}
}
handlers = append(handlers, caddyconfig.JSONModuleObject(handler, "handler", "file_server", nil))
@@ -105,24 +182,43 @@ func cmdFileServer(fs caddycmd.Flags) (int, error) {
}
}
server.Listen = []string{listen}
+ if accessLog {
+ server.Logs = &caddyhttp.ServerLogConfig{}
+ }
httpApp := caddyhttp.App{
Servers: map[string]*caddyhttp.Server{"static": server},
}
+ var false bool
cfg := &caddy.Config{
- Admin: &caddy.AdminConfig{Disabled: true},
+ Admin: &caddy.AdminConfig{
+ Disabled: true,
+ Config: &caddy.ConfigSettings{
+ Persist: &false,
+ },
+ },
AppsRaw: caddy.ModuleMap{
"http": caddyconfig.JSON(httpApp, nil),
},
}
- err := caddy.Run(cfg)
+ if debug {
+ cfg.Logging = &caddy.Logging{
+ Logs: map[string]*caddy.CustomLog{
+ "default": {
+ BaseLog: caddy.BaseLog{Level: zap.DebugLevel.CapitalString()},
+ },
+ },
+ }
+ }
+
+ err = caddy.Run(cfg)
if err != nil {
return caddy.ExitCodeFailedStartup, err
}
- log.Printf("Caddy 2 serving static files on %s", listen)
+ log.Printf("Caddy serving static files on %s", listen)
select {}
}
diff --git a/modules/caddyhttp/fileserver/matcher.go b/modules/caddyhttp/fileserver/matcher.go
index 1beb8bac..2bc665d4 100644
--- a/modules/caddyhttp/fileserver/matcher.go
+++ b/modules/caddyhttp/fileserver/matcher.go
@@ -16,11 +16,24 @@ package fileserver
import (
"fmt"
+ "io/fs"
"net/http"
"os"
"path"
+ "path/filepath"
+ "runtime"
+ "strconv"
"strings"
- "time"
+
+ "github.com/google/cel-go/cel"
+ "github.com/google/cel-go/common"
+ "github.com/google/cel-go/common/ast"
+ "github.com/google/cel-go/common/operators"
+ "github.com/google/cel-go/common/types"
+ "github.com/google/cel-go/common/types/ref"
+ "github.com/google/cel-go/parser"
+ "go.uber.org/zap"
+ "go.uber.org/zap/zapcore"
"github.com/caddyserver/caddy/v2"
"github.com/caddyserver/caddy/v2/caddyconfig/caddyfile"
@@ -34,7 +47,7 @@ func init() {
// MatchFile is an HTTP request matcher that can match
// requests based upon file existence.
//
-// Upon matching, two new placeholders will be made
+// Upon matching, three new placeholders will be made
// available:
//
// - `{http.matchers.file.relative}` The root-relative
@@ -42,7 +55,18 @@ func init() {
// requests.
// - `{http.matchers.file.absolute}` The absolute path
// of the matched file.
+// - `{http.matchers.file.type}` Set to "directory" if
+// the matched file is a directory, "file" otherwise.
+// - `{http.matchers.file.remainder}` Set to the remainder
+// of the path if the path was split by `split_path`.
+//
+// Even though file matching may depend on the OS path
+// separator, the placeholder values always use /.
type MatchFile struct {
+ // The file system implementation to use. By default, the
+ // local disk file system will be used.
+ FileSystem string `json:"fs,omitempty"`
+
// The root directory, used for creating absolute
// file paths, and required when working with
// relative paths; if not specified, `{http.vars.root}`
@@ -56,12 +80,17 @@ type MatchFile struct {
// directories are treated distinctly, so to match
// a directory, the filepath MUST end in a forward
// slash `/`. To match a regular file, there must
- // be no trailing slash. Accepts placeholders.
+ // be no trailing slash. Accepts placeholders. If
+ // the policy is "first_exist", then an error may
+ // be triggered as a fallback by configuring "="
+ // followed by a status code number,
+ // for example "=404".
TryFiles []string `json:"try_files,omitempty"`
// How to choose a file in TryFiles. Can be:
//
// - first_exist
+ // - first_exist_fallback
// - smallest_size
// - largest_size
// - most_recently_modified
@@ -79,6 +108,10 @@ type MatchFile struct {
// Each delimiter must appear at the end of a URI path
// component in order to be used as a split delimiter.
SplitPath []string `json:"split_path,omitempty"`
+
+ fsmap caddy.FileSystems
+
+ logger *zap.Logger
}
// CaddyModule returns the Caddy module information.
@@ -91,14 +124,15 @@ func (MatchFile) CaddyModule() caddy.ModuleInfo {
// UnmarshalCaddyfile sets up the matcher from Caddyfile tokens. Syntax:
//
-// file {
-// root
-// try_files
-// try_policy first_exist|smallest_size|largest_size|most_recently_modified
-// }
-//
+// file {
+// root
+// try_files
+// try_policy first_exist|smallest_size|largest_size|most_recently_modified
+// }
func (m *MatchFile) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
+ // iterate to merge multiple matchers into one
for d.Next() {
+ m.TryFiles = append(m.TryFiles, d.RemainingArgs()...)
for d.NextBlock(0) {
switch d.Val() {
case "root":
@@ -107,7 +141,7 @@ func (m *MatchFile) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
}
m.Root = d.Val()
case "try_files":
- m.TryFiles = d.RemainingArgs()
+ m.TryFiles = append(m.TryFiles, d.RemainingArgs()...)
if len(m.TryFiles) == 0 {
return d.ArgErr()
}
@@ -116,22 +150,145 @@ func (m *MatchFile) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
return d.ArgErr()
}
m.TryPolicy = d.Val()
- case "split":
+ case "split_path":
m.SplitPath = d.RemainingArgs()
if len(m.SplitPath) == 0 {
return d.ArgErr()
}
+ default:
+ return d.Errf("unrecognized subdirective: %s", d.Val())
}
}
}
return nil
}
+// CELLibrary produces options that expose this matcher for use in CEL
+// expression matchers.
+//
+// Example:
+//
+// expression file()
+// expression file({http.request.uri.path}, '/index.php')
+// expression file({'root': '/srv', 'try_files': [{http.request.uri.path}, '/index.php'], 'try_policy': 'first_exist', 'split_path': ['.php']})
+func (MatchFile) CELLibrary(ctx caddy.Context) (cel.Library, error) {
+ requestType := cel.ObjectType("http.Request")
+
+ matcherFactory := func(data ref.Val) (caddyhttp.RequestMatcherWithError, error) {
+ values, err := caddyhttp.CELValueToMapStrList(data)
+ if err != nil {
+ return nil, err
+ }
+
+ var root string
+ if len(values["root"]) > 0 {
+ root = values["root"][0]
+ }
+
+ var fsName string
+ if len(values["fs"]) > 0 {
+ fsName = values["fs"][0]
+ }
+
+ var try_policy string
+ if len(values["try_policy"]) > 0 {
+ try_policy = values["try_policy"][0]
+ }
+
+ m := MatchFile{
+ Root: root,
+ TryFiles: values["try_files"],
+ TryPolicy: try_policy,
+ SplitPath: values["split_path"],
+ FileSystem: fsName,
+ }
+
+ err = m.Provision(ctx)
+ return m, err
+ }
+
+ envOptions := []cel.EnvOption{
+ cel.Macros(parser.NewGlobalVarArgMacro("file", celFileMatcherMacroExpander())),
+ cel.Function("file", cel.Overload("file_request_map", []*cel.Type{requestType, caddyhttp.CELTypeJSON}, cel.BoolType)),
+ cel.Function("file_request_map",
+ cel.Overload("file_request_map", []*cel.Type{requestType, caddyhttp.CELTypeJSON}, cel.BoolType),
+ cel.SingletonBinaryBinding(caddyhttp.CELMatcherRuntimeFunction("file_request_map", matcherFactory))),
+ }
+
+ programOptions := []cel.ProgramOption{
+ cel.CustomDecorator(caddyhttp.CELMatcherDecorator("file_request_map", matcherFactory)),
+ }
+
+ return caddyhttp.NewMatcherCELLibrary(envOptions, programOptions), nil
+}
+
+func celFileMatcherMacroExpander() parser.MacroExpander {
+ return func(eh parser.ExprHelper, target ast.Expr, args []ast.Expr) (ast.Expr, *common.Error) {
+ if len(args) == 0 {
+ return eh.NewCall("file",
+ eh.NewIdent(caddyhttp.CELRequestVarName),
+ eh.NewMap(),
+ ), nil
+ }
+ if len(args) == 1 {
+ arg := args[0]
+ if isCELStringLiteral(arg) || isCELCaddyPlaceholderCall(arg) {
+ return eh.NewCall("file",
+ eh.NewIdent(caddyhttp.CELRequestVarName),
+ eh.NewMap(eh.NewMapEntry(
+ eh.NewLiteral(types.String("try_files")),
+ eh.NewList(arg),
+ false,
+ )),
+ ), nil
+ }
+ if isCELTryFilesLiteral(arg) {
+ return eh.NewCall("file", eh.NewIdent(caddyhttp.CELRequestVarName), arg), nil
+ }
+ return nil, &common.Error{
+ Location: eh.OffsetLocation(arg.ID()),
+ Message: "matcher requires either a map or string literal argument",
+ }
+ }
+
+ for _, arg := range args {
+ if !(isCELStringLiteral(arg) || isCELCaddyPlaceholderCall(arg)) {
+ return nil, &common.Error{
+ Location: eh.OffsetLocation(arg.ID()),
+ Message: "matcher only supports repeated string literal arguments",
+ }
+ }
+ }
+ return eh.NewCall("file",
+ eh.NewIdent(caddyhttp.CELRequestVarName),
+ eh.NewMap(eh.NewMapEntry(
+ eh.NewLiteral(types.String("try_files")),
+ eh.NewList(args...),
+ false,
+ )),
+ ), nil
+ }
+}
+
// Provision sets up m's defaults.
-func (m *MatchFile) Provision(_ caddy.Context) error {
+func (m *MatchFile) Provision(ctx caddy.Context) error {
+ m.logger = ctx.Logger()
+
+ m.fsmap = ctx.Filesystems()
+
if m.Root == "" {
m.Root = "{http.vars.root}"
}
+
+ if m.FileSystem == "" {
+ m.FileSystem = "{http.vars.fs}"
+ }
+
+ // if list of files to try was omitted entirely, assume URL path
+ // (use placeholder instead of r.URL.Path; see issue #4146)
+ if m.TryFiles == nil {
+ m.TryFiles = []string{"{http.request.uri.path}"}
+ }
return nil
}
@@ -140,6 +297,7 @@ func (m MatchFile) Validate() error {
switch m.TryPolicy {
case "",
tryPolicyFirstExist,
+ tryPolicyFirstExistFallback,
tryPolicyLargestSize,
tryPolicySmallestSize,
tryPolicyMostRecentlyMod:
@@ -150,97 +308,237 @@ func (m MatchFile) Validate() error {
}
// Match returns true if r matches m. Returns true
-// if a file was matched. If so, two placeholders
+// if a file was matched. If so, four placeholders
// will be available:
-// - http.matchers.file.relative
-// - http.matchers.file.absolute
+// - http.matchers.file.relative: Path to file relative to site root
+// - http.matchers.file.absolute: Path to file including site root
+// - http.matchers.file.type: file or directory
+// - http.matchers.file.remainder: Portion remaining after splitting file path (if configured)
func (m MatchFile) Match(r *http.Request) bool {
- repl := r.Context().Value(caddy.ReplacerCtxKey).(*caddy.Replacer)
- rel, abs, matched := m.selectFile(r)
- if matched {
- repl.Set("http.matchers.file.relative", rel)
- repl.Set("http.matchers.file.absolute", abs)
+ match, err := m.selectFile(r)
+ if err != nil {
+ // nolint:staticcheck
+ caddyhttp.SetVar(r.Context(), caddyhttp.MatcherErrorVarKey, err)
}
- return matched
+ return match
+}
+
+// MatchWithError returns true if r matches m.
+func (m MatchFile) MatchWithError(r *http.Request) (bool, error) {
+ return m.selectFile(r)
}
// selectFile chooses a file according to m.TryPolicy by appending
// the paths in m.TryFiles to m.Root, with placeholder replacements.
-// It returns the root-relative path to the matched file, the full
-// or absolute path, and whether a match was made.
-func (m MatchFile) selectFile(r *http.Request) (rel, abs string, matched bool) {
+func (m MatchFile) selectFile(r *http.Request) (bool, error) {
repl := r.Context().Value(caddy.ReplacerCtxKey).(*caddy.Replacer)
- root := repl.ReplaceAll(m.Root, ".")
+ root := filepath.Clean(repl.ReplaceAll(m.Root, "."))
- // if list of files to try was omitted entirely,
- // assume URL path
- if m.TryFiles == nil {
- // m is not a pointer, so this is safe
- m.TryFiles = []string{r.URL.Path}
+ fsName := repl.ReplaceAll(m.FileSystem, "")
+
+ fileSystem, ok := m.fsmap.Get(fsName)
+ if !ok {
+ if c := m.logger.Check(zapcore.ErrorLevel, "use of unregistered filesystem"); c != nil {
+ c.Write(zap.String("fs", fsName))
+ }
+ return false, nil
+ }
+ type matchCandidate struct {
+ fullpath, relative, splitRemainder string
}
+ // makeCandidates evaluates placeholders in file and expands any glob expressions
+ // to build a list of file candidates. Special glob characters are escaped in
+ // placeholder replacements so globs cannot be expanded from placeholders, and
+ // globs are not evaluated on Windows because of its path separator character:
+ // escaping is not supported so we can't safely glob on Windows, or we can't
+ // support placeholders on Windows (pick one). (Actually, evaluating untrusted
+ // globs is not the end of the world since the file server will still hide any
+ // hidden files, it just might lead to unexpected behavior.)
+ makeCandidates := func(file string) []matchCandidate {
+ // first, evaluate placeholders in the file pattern
+ expandedFile, err := repl.ReplaceFunc(file, func(variable string, val any) (any, error) {
+ if runtime.GOOS == "windows" {
+ return val, nil
+ }
+ switch v := val.(type) {
+ case string:
+ return globSafeRepl.Replace(v), nil
+ case fmt.Stringer:
+ return globSafeRepl.Replace(v.String()), nil
+ }
+ return val, nil
+ })
+ if err != nil {
+ if c := m.logger.Check(zapcore.ErrorLevel, "evaluating placeholders"); c != nil {
+ c.Write(zap.Error(err))
+ }
+
+ expandedFile = file // "oh well," I guess?
+ }
+
+ // clean the path and split, if configured -- we must split before
+ // globbing so that the file system doesn't include the remainder
+ // ("afterSplit") in the filename; be sure to restore trailing slash
+ beforeSplit, afterSplit := m.firstSplit(path.Clean(expandedFile))
+ if strings.HasSuffix(file, "/") {
+ beforeSplit += "/"
+ }
+
+ // create the full path to the file by prepending the site root
+ fullPattern := caddyhttp.SanitizedPathJoin(root, beforeSplit)
+
+ // expand glob expressions, but not on Windows because Glob() doesn't
+ // support escaping on Windows due to path separator)
+ var globResults []string
+ if runtime.GOOS == "windows" {
+ globResults = []string{fullPattern} // precious Windows
+ } else {
+ globResults, err = fs.Glob(fileSystem, fullPattern)
+ if err != nil {
+ if c := m.logger.Check(zapcore.ErrorLevel, "expanding glob"); c != nil {
+ c.Write(zap.Error(err))
+ }
+ }
+ }
+
+ // for each glob result, combine all the forms of the path
+ var candidates []matchCandidate
+ for _, result := range globResults {
+ candidates = append(candidates, matchCandidate{
+ fullpath: result,
+ relative: strings.TrimPrefix(result, root),
+ splitRemainder: afterSplit,
+ })
+ }
+
+ return candidates
+ }
+
+ // setPlaceholders creates the placeholders for the matched file
+ setPlaceholders := func(candidate matchCandidate, isDir bool) {
+ repl.Set("http.matchers.file.relative", filepath.ToSlash(candidate.relative))
+ repl.Set("http.matchers.file.absolute", filepath.ToSlash(candidate.fullpath))
+ repl.Set("http.matchers.file.remainder", filepath.ToSlash(candidate.splitRemainder))
+
+ fileType := "file"
+ if isDir {
+ fileType = "directory"
+ }
+ repl.Set("http.matchers.file.type", fileType)
+ }
+
+ // match file according to the configured policy
switch m.TryPolicy {
- case "", tryPolicyFirstExist:
- for _, f := range m.TryFiles {
- suffix := m.firstSplit(path.Clean(repl.ReplaceAll(f, "")))
- fullpath := sanitizedPathJoin(root, suffix)
- if strictFileExists(fullpath) {
- return suffix, fullpath, true
+ case "", tryPolicyFirstExist, tryPolicyFirstExistFallback:
+ maxI := -1
+ if m.TryPolicy == tryPolicyFirstExistFallback {
+ maxI = len(m.TryFiles) - 1
+ }
+
+ for i, pattern := range m.TryFiles {
+ // If the pattern is a status code, emit an error,
+ // which short-circuits the middleware pipeline and
+ // writes an HTTP error response.
+ if err := parseErrorCode(pattern); err != nil {
+ return false, err
+ }
+
+ candidates := makeCandidates(pattern)
+ for _, c := range candidates {
+ // Skip the IO if using fallback policy and it's the latest item
+ if i == maxI {
+ setPlaceholders(c, false)
+
+ return true, nil
+ }
+
+ if info, exists := m.strictFileExists(fileSystem, c.fullpath); exists {
+ setPlaceholders(c, info.IsDir())
+ return true, nil
+ }
}
}
case tryPolicyLargestSize:
var largestSize int64
- var largestFilename string
- var largestSuffix string
- for _, f := range m.TryFiles {
- suffix := m.firstSplit(path.Clean(repl.ReplaceAll(f, "")))
- fullpath := sanitizedPathJoin(root, suffix)
- info, err := os.Stat(fullpath)
- if err == nil && info.Size() > largestSize {
- largestSize = info.Size()
- largestFilename = fullpath
- largestSuffix = suffix
+ var largest matchCandidate
+ var largestInfo os.FileInfo
+ for _, pattern := range m.TryFiles {
+ candidates := makeCandidates(pattern)
+ for _, c := range candidates {
+ info, err := fs.Stat(fileSystem, c.fullpath)
+ if err == nil && info.Size() > largestSize {
+ largestSize = info.Size()
+ largest = c
+ largestInfo = info
+ }
}
}
- return largestSuffix, largestFilename, true
+ if largestInfo == nil {
+ return false, nil
+ }
+ setPlaceholders(largest, largestInfo.IsDir())
+ return true, nil
case tryPolicySmallestSize:
var smallestSize int64
- var smallestFilename string
- var smallestSuffix string
- for _, f := range m.TryFiles {
- suffix := m.firstSplit(path.Clean(repl.ReplaceAll(f, "")))
- fullpath := sanitizedPathJoin(root, suffix)
- info, err := os.Stat(fullpath)
- if err == nil && (smallestSize == 0 || info.Size() < smallestSize) {
- smallestSize = info.Size()
- smallestFilename = fullpath
- smallestSuffix = suffix
+ var smallest matchCandidate
+ var smallestInfo os.FileInfo
+ for _, pattern := range m.TryFiles {
+ candidates := makeCandidates(pattern)
+ for _, c := range candidates {
+ info, err := fs.Stat(fileSystem, c.fullpath)
+ if err == nil && (smallestSize == 0 || info.Size() < smallestSize) {
+ smallestSize = info.Size()
+ smallest = c
+ smallestInfo = info
+ }
}
}
- return smallestSuffix, smallestFilename, true
+ if smallestInfo == nil {
+ return false, nil
+ }
+ setPlaceholders(smallest, smallestInfo.IsDir())
+ return true, nil
case tryPolicyMostRecentlyMod:
- var recentDate time.Time
- var recentFilename string
- var recentSuffix string
- for _, f := range m.TryFiles {
- suffix := m.firstSplit(path.Clean(repl.ReplaceAll(f, "")))
- fullpath := sanitizedPathJoin(root, suffix)
- info, err := os.Stat(fullpath)
- if err == nil &&
- (recentDate.IsZero() || info.ModTime().After(recentDate)) {
- recentDate = info.ModTime()
- recentFilename = fullpath
- recentSuffix = suffix
+ var recent matchCandidate
+ var recentInfo os.FileInfo
+ for _, pattern := range m.TryFiles {
+ candidates := makeCandidates(pattern)
+ for _, c := range candidates {
+ info, err := fs.Stat(fileSystem, c.fullpath)
+ if err == nil &&
+ (recentInfo == nil || info.ModTime().After(recentInfo.ModTime())) {
+ recent = c
+ recentInfo = info
+ }
}
}
- return recentSuffix, recentFilename, true
+ if recentInfo == nil {
+ return false, nil
+ }
+ setPlaceholders(recent, recentInfo.IsDir())
+ return true, nil
}
- return
+ return false, nil
+}
+
+// parseErrorCode checks if the input is a status
+// code number, prefixed by "=", and returns an
+// error if so.
+func parseErrorCode(input string) error {
+ if len(input) > 1 && input[0] == '=' {
+ code, err := strconv.Atoi(input[1:])
+ if err != nil || code < 100 || code > 999 {
+ return nil
+ }
+ return caddyhttp.Error(code, fmt.Errorf("%s", input[1:]))
+ }
+ return nil
}
// strictFileExists returns true if file exists
@@ -249,8 +547,8 @@ func (m MatchFile) selectFile(r *http.Request) (rel, abs string, matched bool) {
// the file must also be a directory; if it does
// NOT end in a forward slash, the file must NOT
// be a directory.
-func strictFileExists(file string) bool {
- stat, err := os.Stat(file)
+func (m MatchFile) strictFileExists(fileSystem fs.FS, file string) (os.FileInfo, bool) {
+ info, err := fs.Stat(fileSystem, file)
if err != nil {
// in reality, this can be any error
// such as permission or even obscure
@@ -260,47 +558,180 @@ func strictFileExists(file string) bool {
// the file exists, so we just treat any
// error as if it does not exist; see
// https://stackoverflow.com/a/12518877/1048862
- return false
+ return nil, false
}
- if strings.HasSuffix(file, "/") {
+ if strings.HasSuffix(file, separator) {
// by convention, file paths ending
- // in a slash must be a directory
- return stat.IsDir()
+ // in a path separator must be a directory
+ return info, info.IsDir()
}
// by convention, file paths NOT ending
- // in a slash must NOT be a directory
- return !stat.IsDir()
+ // in a path separator must NOT be a directory
+ return info, !info.IsDir()
}
// firstSplit returns the first result where the path
// can be split in two by a value in m.SplitPath. The
-// result is the first piece of the path that ends with
-// in the split value. Returns the path as-is if the
-// path cannot be split.
-func (m MatchFile) firstSplit(path string) string {
- lowerPath := strings.ToLower(path)
+// return values are the first piece of the path that
+// ends with the split substring and the remainder.
+// If the path cannot be split, the path is returned
+// as-is (with no remainder).
+func (m MatchFile) firstSplit(path string) (splitPart, remainder string) {
for _, split := range m.SplitPath {
- if idx := strings.Index(lowerPath, strings.ToLower(split)); idx > -1 {
+ if idx := indexFold(path, split); idx > -1 {
pos := idx + len(split)
// skip the split if it's not the final part of the filename
if pos != len(path) && !strings.HasPrefix(path[pos:], "/") {
continue
}
- return path[:pos]
+ return path[:pos], path[pos:]
}
}
- return path
+ return path, ""
}
+// There is no strings.IndexFold() function like there is strings.EqualFold(),
+// but we can use strings.EqualFold() to build our own case-insensitive
+// substring search (as of Go 1.14).
+func indexFold(haystack, needle string) int {
+ nlen := len(needle)
+ for i := 0; i+nlen < len(haystack); i++ {
+ if strings.EqualFold(haystack[i:i+nlen], needle) {
+ return i
+ }
+ }
+ return -1
+}
+
+// isCELTryFilesLiteral returns whether the expression resolves to a map literal containing
+// only string keys with or a placeholder call.
+func isCELTryFilesLiteral(e ast.Expr) bool {
+ switch e.Kind() {
+ case ast.MapKind:
+ mapExpr := e.AsMap()
+ for _, entry := range mapExpr.Entries() {
+ mapKey := entry.AsMapEntry().Key()
+ mapVal := entry.AsMapEntry().Value()
+ if !isCELStringLiteral(mapKey) {
+ return false
+ }
+ mapKeyStr := mapKey.AsLiteral().ConvertToType(types.StringType).Value()
+ if mapKeyStr == "try_files" || mapKeyStr == "split_path" {
+ if !isCELStringListLiteral(mapVal) {
+ return false
+ }
+ } else if mapKeyStr == "try_policy" || mapKeyStr == "root" {
+ if !(isCELStringExpr(mapVal)) {
+ return false
+ }
+ } else {
+ return false
+ }
+ }
+ return true
+
+ case ast.UnspecifiedExprKind, ast.CallKind, ast.ComprehensionKind, ast.IdentKind, ast.ListKind, ast.LiteralKind, ast.SelectKind, ast.StructKind:
+ // appeasing the linter :)
+ }
+ return false
+}
+
+// isCELStringExpr indicates whether the expression is a supported string expression
+func isCELStringExpr(e ast.Expr) bool {
+ return isCELStringLiteral(e) || isCELCaddyPlaceholderCall(e) || isCELConcatCall(e)
+}
+
+// isCELStringLiteral returns whether the expression is a CEL string literal.
+func isCELStringLiteral(e ast.Expr) bool {
+ switch e.Kind() {
+ case ast.LiteralKind:
+ constant := e.AsLiteral()
+ switch constant.Type() {
+ case types.StringType:
+ return true
+ }
+ case ast.UnspecifiedExprKind, ast.CallKind, ast.ComprehensionKind, ast.IdentKind, ast.ListKind, ast.MapKind, ast.SelectKind, ast.StructKind:
+ // appeasing the linter :)
+ }
+ return false
+}
+
+// isCELCaddyPlaceholderCall returns whether the expression is a caddy placeholder call.
+func isCELCaddyPlaceholderCall(e ast.Expr) bool {
+ switch e.Kind() {
+ case ast.CallKind:
+ call := e.AsCall()
+ if call.FunctionName() == caddyhttp.CELPlaceholderFuncName {
+ return true
+ }
+ case ast.UnspecifiedExprKind, ast.ComprehensionKind, ast.IdentKind, ast.ListKind, ast.LiteralKind, ast.MapKind, ast.SelectKind, ast.StructKind:
+ // appeasing the linter :)
+ }
+ return false
+}
+
+// isCELConcatCall tests whether the expression is a concat function (+) with string, placeholder, or
+// other concat call arguments.
+func isCELConcatCall(e ast.Expr) bool {
+ switch e.Kind() {
+ case ast.CallKind:
+ call := e.AsCall()
+ if call.Target().Kind() != ast.UnspecifiedExprKind {
+ return false
+ }
+ if call.FunctionName() != operators.Add {
+ return false
+ }
+ for _, arg := range call.Args() {
+ if !isCELStringExpr(arg) {
+ return false
+ }
+ }
+ return true
+ case ast.UnspecifiedExprKind, ast.ComprehensionKind, ast.IdentKind, ast.ListKind, ast.LiteralKind, ast.MapKind, ast.SelectKind, ast.StructKind:
+ // appeasing the linter :)
+ }
+ return false
+}
+
+// isCELStringListLiteral returns whether the expression resolves to a list literal
+// containing only string constants or a placeholder call.
+func isCELStringListLiteral(e ast.Expr) bool {
+ switch e.Kind() {
+ case ast.ListKind:
+ list := e.AsList()
+ for _, elem := range list.Elements() {
+ if !isCELStringExpr(elem) {
+ return false
+ }
+ }
+ return true
+ case ast.UnspecifiedExprKind, ast.CallKind, ast.ComprehensionKind, ast.IdentKind, ast.LiteralKind, ast.MapKind, ast.SelectKind, ast.StructKind:
+ // appeasing the linter :)
+ }
+ return false
+}
+
+// globSafeRepl replaces special glob characters with escaped
+// equivalents. Note that the filepath godoc states that
+// escaping is not done on Windows because of the separator.
+var globSafeRepl = strings.NewReplacer(
+ "*", "\\*",
+ "[", "\\[",
+ "?", "\\?",
+)
+
const (
- tryPolicyFirstExist = "first_exist"
- tryPolicyLargestSize = "largest_size"
- tryPolicySmallestSize = "smallest_size"
- tryPolicyMostRecentlyMod = "most_recently_modified"
+ tryPolicyFirstExist = "first_exist"
+ tryPolicyFirstExistFallback = "first_exist_fallback"
+ tryPolicyLargestSize = "largest_size"
+ tryPolicySmallestSize = "smallest_size"
+ tryPolicyMostRecentlyMod = "most_recently_modified"
)
// Interface guards
var (
- _ caddy.Validator = (*MatchFile)(nil)
- _ caddyhttp.RequestMatcher = (*MatchFile)(nil)
+ _ caddy.Validator = (*MatchFile)(nil)
+ _ caddyhttp.RequestMatcherWithError = (*MatchFile)(nil)
+ _ caddyhttp.CELLibraryProducer = (*MatchFile)(nil)
)
diff --git a/modules/caddyhttp/fileserver/matcher_test.go b/modules/caddyhttp/fileserver/matcher_test.go
index aa84900a..b6697b9d 100644
--- a/modules/caddyhttp/fileserver/matcher_test.go
+++ b/modules/caddyhttp/fileserver/matcher_test.go
@@ -15,88 +15,404 @@
package fileserver
import (
+ "context"
"net/http"
+ "net/http/httptest"
"net/url"
+ "os"
+ "runtime"
"testing"
+ "github.com/caddyserver/caddy/v2"
+ "github.com/caddyserver/caddy/v2/internal/filesystems"
"github.com/caddyserver/caddy/v2/modules/caddyhttp"
)
-func TestPhpFileMatcher(t *testing.T) {
+func TestFileMatcher(t *testing.T) {
+ // Windows doesn't like colons in files names
+ isWindows := runtime.GOOS == "windows"
+ if !isWindows {
+ filename := "with:in-name.txt"
+ f, err := os.Create("./testdata/" + filename)
+ if err != nil {
+ t.Fail()
+ return
+ }
+ t.Cleanup(func() {
+ os.Remove("./testdata/" + filename)
+ })
+ f.WriteString(filename)
+ f.Close()
+ }
for i, tc := range []struct {
- path string
+ path string
expectedPath string
- matched bool
+ expectedType string
+ matched bool
}{
{
- path: "/index.php",
- expectedPath: "/index.php",
- matched: true,
+ path: "/foo.txt",
+ expectedPath: "/foo.txt",
+ expectedType: "file",
+ matched: true,
},
{
- path: "/index.php/somewhere",
- expectedPath: "/index.php",
- matched: true,
+ path: "/foo.txt/",
+ expectedPath: "/foo.txt",
+ expectedType: "file",
+ matched: true,
},
{
- path: "/remote.php",
- expectedPath: "/remote.php",
- matched: true,
+ path: "/foo.txt?a=b",
+ expectedPath: "/foo.txt",
+ expectedType: "file",
+ matched: true,
},
{
- path: "/remote.php/somewhere",
- expectedPath: "/remote.php",
- matched: true,
+ path: "/foodir",
+ expectedPath: "/foodir/",
+ expectedType: "directory",
+ matched: true,
},
{
- path: "/missingfile.php",
+ path: "/foodir/",
+ expectedPath: "/foodir/",
+ expectedType: "directory",
+ matched: true,
+ },
+ {
+ path: "/foodir/foo.txt",
+ expectedPath: "/foodir/foo.txt",
+ expectedType: "file",
+ matched: true,
+ },
+ {
+ path: "/missingfile.php",
matched: false,
},
{
- path: "/notphp.php.txt",
- expectedPath: "/notphp.php.txt",
- matched: true,
+ path: "ملف.txt", // the path file name is not escaped
+ expectedPath: "/ملف.txt",
+ expectedType: "file",
+ matched: true,
},
{
- path: "/notphp.php.txt/",
- expectedPath: "/notphp.php.txt",
- matched: true,
+ path: url.PathEscape("ملف.txt"), // singly-escaped path
+ expectedPath: "/ملف.txt",
+ expectedType: "file",
+ matched: true,
},
{
- path: "/notphp.php.txt.suffixed",
- matched: false,
+ path: url.PathEscape(url.PathEscape("ملف.txt")), // doubly-escaped path
+ expectedPath: "/%D9%85%D9%84%D9%81.txt",
+ expectedType: "file",
+ matched: true,
},
{
- path: "/foo.php.php/index.php",
- expectedPath: "/foo.php.php/index.php",
- matched: true,
+ path: "./with:in-name.txt", // browsers send the request with the path as such
+ expectedPath: "/with:in-name.txt",
+ expectedType: "file",
+ matched: !isWindows,
},
} {
m := &MatchFile{
- Root: "./testdata",
- TryFiles: []string{"{http.request.uri.path}"},
- SplitPath: []string{".php"},
+ fsmap: &filesystems.FilesystemMap{},
+ Root: "./testdata",
+ TryFiles: []string{"{http.request.uri.path}", "{http.request.uri.path}/"},
}
- req := &http.Request{URL: &url.URL{Path: tc.path}}
+ u, err := url.Parse(tc.path)
+ if err != nil {
+ t.Errorf("Test %d: parsing path: %v", i, err)
+ }
+
+ req := &http.Request{URL: u}
repl := caddyhttp.NewTestReplacer(req)
- result := m.Match(req)
+ result, err := m.MatchWithError(req)
+ if err != nil {
+ t.Errorf("Test %d: unexpected error: %v", i, err)
+ }
if result != tc.matched {
- t.Fatalf("Test %d: match bool result: %v, expected: %v", i, result, tc.matched)
+ t.Errorf("Test %d: expected match=%t, got %t", i, tc.matched, result)
}
rel, ok := repl.Get("http.matchers.file.relative")
if !ok && result {
- t.Fatalf("Test %d: expected replacer value", i)
+ t.Errorf("Test %d: expected replacer value", i)
}
if !result {
continue
}
if rel != tc.expectedPath {
- t.Fatalf("Test %d: actual path: %v, expected: %v", i, rel, tc.expectedPath)
+ t.Errorf("Test %d: actual path: %v, expected: %v", i, rel, tc.expectedPath)
+ }
+
+ fileType, _ := repl.Get("http.matchers.file.type")
+ if fileType != tc.expectedType {
+ t.Errorf("Test %d: actual file type: %v, expected: %v", i, fileType, tc.expectedType)
}
}
-}
\ No newline at end of file
+}
+
+func TestPHPFileMatcher(t *testing.T) {
+ for i, tc := range []struct {
+ path string
+ expectedPath string
+ expectedType string
+ matched bool
+ }{
+ {
+ path: "/index.php",
+ expectedPath: "/index.php",
+ expectedType: "file",
+ matched: true,
+ },
+ {
+ path: "/index.php/somewhere",
+ expectedPath: "/index.php",
+ expectedType: "file",
+ matched: true,
+ },
+ {
+ path: "/remote.php",
+ expectedPath: "/remote.php",
+ expectedType: "file",
+ matched: true,
+ },
+ {
+ path: "/remote.php/somewhere",
+ expectedPath: "/remote.php",
+ expectedType: "file",
+ matched: true,
+ },
+ {
+ path: "/missingfile.php",
+ matched: false,
+ },
+ {
+ path: "/notphp.php.txt",
+ expectedPath: "/notphp.php.txt",
+ expectedType: "file",
+ matched: true,
+ },
+ {
+ path: "/notphp.php.txt/",
+ expectedPath: "/notphp.php.txt",
+ expectedType: "file",
+ matched: true,
+ },
+ {
+ path: "/notphp.php.txt.suffixed",
+ matched: false,
+ },
+ {
+ path: "/foo.php.php/index.php",
+ expectedPath: "/foo.php.php/index.php",
+ expectedType: "file",
+ matched: true,
+ },
+ {
+ // See https://github.com/caddyserver/caddy/issues/3623
+ path: "/%E2%C3",
+ expectedPath: "/%E2%C3",
+ expectedType: "file",
+ matched: false,
+ },
+ {
+ path: "/index.php?path={path}&{query}",
+ expectedPath: "/index.php",
+ expectedType: "file",
+ matched: true,
+ },
+ } {
+ m := &MatchFile{
+ fsmap: &filesystems.FilesystemMap{},
+ Root: "./testdata",
+ TryFiles: []string{"{http.request.uri.path}", "{http.request.uri.path}/index.php"},
+ SplitPath: []string{".php"},
+ }
+
+ u, err := url.Parse(tc.path)
+ if err != nil {
+ t.Errorf("Test %d: parsing path: %v", i, err)
+ }
+
+ req := &http.Request{URL: u}
+ repl := caddyhttp.NewTestReplacer(req)
+
+ result, err := m.MatchWithError(req)
+ if err != nil {
+ t.Errorf("Test %d: unexpected error: %v", i, err)
+ }
+ if result != tc.matched {
+ t.Errorf("Test %d: expected match=%t, got %t", i, tc.matched, result)
+ }
+
+ rel, ok := repl.Get("http.matchers.file.relative")
+ if !ok && result {
+ t.Errorf("Test %d: expected replacer value", i)
+ }
+ if !result {
+ continue
+ }
+
+ if rel != tc.expectedPath {
+ t.Errorf("Test %d: actual path: %v, expected: %v", i, rel, tc.expectedPath)
+ }
+
+ fileType, _ := repl.Get("http.matchers.file.type")
+ if fileType != tc.expectedType {
+ t.Errorf("Test %d: actual file type: %v, expected: %v", i, fileType, tc.expectedType)
+ }
+ }
+}
+
+func TestFirstSplit(t *testing.T) {
+ m := MatchFile{
+ SplitPath: []string{".php"},
+ fsmap: &filesystems.FilesystemMap{},
+ }
+ actual, remainder := m.firstSplit("index.PHP/somewhere")
+ expected := "index.PHP"
+ expectedRemainder := "/somewhere"
+ if actual != expected {
+ t.Errorf("Expected split %s but got %s", expected, actual)
+ }
+ if remainder != expectedRemainder {
+ t.Errorf("Expected remainder %s but got %s", expectedRemainder, remainder)
+ }
+}
+
+var expressionTests = []struct {
+ name string
+ expression *caddyhttp.MatchExpression
+ urlTarget string
+ httpMethod string
+ httpHeader *http.Header
+ wantErr bool
+ wantResult bool
+ clientCertificate []byte
+ expectedPath string
+}{
+ {
+ name: "file error no args (MatchFile)",
+ expression: &caddyhttp.MatchExpression{
+ Expr: `file()`,
+ },
+ urlTarget: "https://example.com/foo.txt",
+ wantResult: true,
+ },
+ {
+ name: "file error bad try files (MatchFile)",
+ expression: &caddyhttp.MatchExpression{
+ Expr: `file({"try_file": ["bad_arg"]})`,
+ },
+ urlTarget: "https://example.com/foo",
+ wantErr: true,
+ },
+ {
+ name: "file match short pattern index.php (MatchFile)",
+ expression: &caddyhttp.MatchExpression{
+ Expr: `file("index.php")`,
+ },
+ urlTarget: "https://example.com/foo",
+ wantResult: true,
+ },
+ {
+ name: "file match short pattern foo.txt (MatchFile)",
+ expression: &caddyhttp.MatchExpression{
+ Expr: `file({http.request.uri.path})`,
+ },
+ urlTarget: "https://example.com/foo.txt",
+ wantResult: true,
+ },
+ {
+ name: "file match index.php (MatchFile)",
+ expression: &caddyhttp.MatchExpression{
+ Expr: `file({"root": "./testdata", "try_files": [{http.request.uri.path}, "/index.php"]})`,
+ },
+ urlTarget: "https://example.com/foo",
+ wantResult: true,
+ },
+ {
+ name: "file match long pattern foo.txt (MatchFile)",
+ expression: &caddyhttp.MatchExpression{
+ Expr: `file({"root": "./testdata", "try_files": [{http.request.uri.path}]})`,
+ },
+ urlTarget: "https://example.com/foo.txt",
+ wantResult: true,
+ },
+ {
+ name: "file match long pattern foo.txt with concatenation (MatchFile)",
+ expression: &caddyhttp.MatchExpression{
+ Expr: `file({"root": ".", "try_files": ["./testdata" + {http.request.uri.path}]})`,
+ },
+ urlTarget: "https://example.com/foo.txt",
+ wantResult: true,
+ },
+ {
+ name: "file not match long pattern (MatchFile)",
+ expression: &caddyhttp.MatchExpression{
+ Expr: `file({"root": "./testdata", "try_files": [{http.request.uri.path}]})`,
+ },
+ urlTarget: "https://example.com/nopenope.txt",
+ wantResult: false,
+ },
+ {
+ name: "file match long pattern foo.txt with try_policy (MatchFile)",
+ expression: &caddyhttp.MatchExpression{
+ Expr: `file({"root": "./testdata", "try_policy": "largest_size", "try_files": ["foo.txt", "large.txt"]})`,
+ },
+ urlTarget: "https://example.com/",
+ wantResult: true,
+ expectedPath: "/large.txt",
+ },
+}
+
+func TestMatchExpressionMatch(t *testing.T) {
+ for _, tst := range expressionTests {
+ tc := tst
+ t.Run(tc.name, func(t *testing.T) {
+ caddyCtx, cancel := caddy.NewContext(caddy.Context{Context: context.Background()})
+ defer cancel()
+ err := tc.expression.Provision(caddyCtx)
+ if err != nil {
+ if !tc.wantErr {
+ t.Errorf("MatchExpression.Provision() error = %v, wantErr %v", err, tc.wantErr)
+ }
+ return
+ }
+
+ req := httptest.NewRequest(tc.httpMethod, tc.urlTarget, nil)
+ if tc.httpHeader != nil {
+ req.Header = *tc.httpHeader
+ }
+ repl := caddyhttp.NewTestReplacer(req)
+ repl.Set("http.vars.root", "./testdata")
+ ctx := context.WithValue(req.Context(), caddy.ReplacerCtxKey, repl)
+ req = req.WithContext(ctx)
+
+ matches, err := tc.expression.MatchWithError(req)
+ if err != nil {
+ t.Errorf("MatchExpression.Match() error = %v", err)
+ return
+ }
+ if matches != tc.wantResult {
+ t.Errorf("MatchExpression.Match() expected to return '%t', for expression : '%s'", tc.wantResult, tc.expression.Expr)
+ }
+
+ if tc.expectedPath != "" {
+ path, ok := repl.Get("http.matchers.file.relative")
+ if !ok {
+ t.Errorf("MatchExpression.Match() expected to return path '%s', but got none", tc.expectedPath)
+ }
+ if path != tc.expectedPath {
+ t.Errorf("MatchExpression.Match() expected to return path '%s', but got '%s'", tc.expectedPath, path)
+ }
+ }
+ })
+ }
+}
diff --git a/modules/caddyhttp/fileserver/staticfiles.go b/modules/caddyhttp/fileserver/staticfiles.go
index d6cf4d65..2b0caecf 100644
--- a/modules/caddyhttp/fileserver/staticfiles.go
+++ b/modules/caddyhttp/fileserver/staticfiles.go
@@ -16,41 +16,119 @@ package fileserver
import (
"bytes"
+ "errors"
"fmt"
- "html/template"
"io"
+ "io/fs"
weakrand "math/rand"
"mime"
"net/http"
"os"
"path"
"path/filepath"
+ "runtime"
"strconv"
"strings"
- "sync"
- "time"
+
+ "go.uber.org/zap"
+ "go.uber.org/zap/zapcore"
"github.com/caddyserver/caddy/v2"
"github.com/caddyserver/caddy/v2/modules/caddyhttp"
+ "github.com/caddyserver/caddy/v2/modules/caddyhttp/encode"
)
func init() {
- weakrand.Seed(time.Now().UnixNano())
-
caddy.RegisterModule(FileServer{})
}
-// FileServer implements a static file server responder for Caddy.
+// FileServer implements a handler that serves static files.
+//
+// The path of the file to serve is constructed by joining the site root
+// and the sanitized request path. Any and all files within the root and
+// links with targets outside the site root may therefore be accessed.
+// For example, with a site root of `/www`, requests to `/foo/bar.txt`
+// will serve the file at `/www/foo/bar.txt`.
+//
+// The request path is sanitized using the Go standard library's
+// path.Clean() function (https://pkg.go.dev/path#Clean) before being
+// joined to the root. Request paths must be valid and well-formed.
+//
+// For requests that access directories instead of regular files,
+// Caddy will attempt to serve an index file if present. For example,
+// a request to `/dir/` will attempt to serve `/dir/index.html` if
+// it exists. The index file names to try are configurable. If a
+// requested directory does not have an index file, Caddy writes a
+// 404 response. Alternatively, file browsing can be enabled with
+// the "browse" parameter which shows a list of files when directories
+// are requested if no index file is present. If "browse" is enabled,
+// Caddy may serve a JSON array of the directory listing when the `Accept`
+// header mentions `application/json` with the following structure:
+//
+// [{
+// "name": "",
+// "size": 0,
+// "url": "",
+// "mod_time": "",
+// "mode": 0,
+// "is_dir": false,
+// "is_symlink": false
+// }]
+//
+// with the `url` being relative to the request path and `mod_time` in the RFC 3339 format
+// with sub-second precision. For any other value for the `Accept` header, the
+// respective browse template is executed with `Content-Type: text/html`.
+//
+// By default, this handler will canonicalize URIs so that requests to
+// directories end with a slash, but requests to regular files do not.
+// This is enforced with HTTP redirects automatically and can be disabled.
+// Canonicalization redirects are not issued, however, if a URI rewrite
+// modified the last component of the path (the filename).
+//
+// This handler sets the Etag and Last-Modified headers for static files.
+// It does not perform MIME sniffing to determine Content-Type based on
+// contents, but does use the extension (if known); see the Go docs for
+// details: https://pkg.go.dev/mime#TypeByExtension
+//
+// The file server properly handles requests with If-Match,
+// If-Unmodified-Since, If-Modified-Since, If-None-Match, Range, and
+// If-Range headers. It includes the file's modification time in the
+// Last-Modified header of the response.
type FileServer struct {
+ // The file system implementation to use. By default, Caddy uses the local
+ // disk file system.
+ //
+ // if a non default filesystem is used, it must be first be registered in the globals section.
+ FileSystem string `json:"fs,omitempty"`
+
// The path to the root of the site. Default is `{http.vars.root}` if set,
- // or current working directory otherwise.
+ // or current working directory otherwise. This should be a trusted value.
+ //
+ // Note that a site root is not a sandbox. Although the file server does
+ // sanitize the request URI to prevent directory traversal, files (including
+ // links) within the site root may be directly accessed based on the request
+ // path. Files and folders within the root should be secure and trustworthy.
Root string `json:"root,omitempty"`
// A list of files or folders to hide; the file server will pretend as if
- // they don't exist. Accepts globular patterns like "*.hidden" or "/foo/*/bar".
+ // they don't exist. Accepts globular patterns like `*.ext` or `/foo/*/bar`
+ // as well as placeholders. Because site roots can be dynamic, this list
+ // uses file system paths, not request paths. To clarify, the base of
+ // relative paths is the current working directory, NOT the site root.
+ //
+ // Entries without a path separator (`/` or `\` depending on OS) will match
+ // any file or directory of that name regardless of its path. To hide only a
+ // specific file with a name that may not be unique, always use a path
+ // separator. For example, to hide all files or folder trees named "hidden",
+ // put "hidden" in the list. To hide only ./hidden, put "./hidden" in the list.
+ //
+ // When possible, all paths are resolved to their absolute form before
+ // comparisons are made. For maximum clarity and explictness, use complete,
+ // absolute paths; or, for greater portability, use relative paths instead.
Hide []string `json:"hide,omitempty"`
// The names of files to try as index files if a folder is requested.
+ // Default: index.html, index.txt.
IndexNames []string `json:"index_names,omitempty"`
// Enables file listings if a directory was requested and no index
@@ -59,12 +137,41 @@ type FileServer struct {
// Use redirects to enforce trailing slashes for directories, or to
// remove trailing slash from URIs for files. Default is true.
+ //
+ // Canonicalization will not happen if the last element of the request's
+ // path (the filename) is changed in an internal rewrite, to avoid
+ // clobbering the explicit rewrite with implicit behavior.
CanonicalURIs *bool `json:"canonical_uris,omitempty"`
+ // Override the status code written when successfully serving a file.
+ // Particularly useful when explicitly serving a file as display for
+ // an error, like a 404 page. A placeholder may be used. By default,
+ // the status code will typically be 200, or 206 for partial content.
+ StatusCode caddyhttp.WeakString `json:"status_code,omitempty"`
+
// If pass-thru mode is enabled and a requested file is not found,
// it will invoke the next handler in the chain instead of returning
// a 404 error. By default, this is false (disabled).
PassThru bool `json:"pass_thru,omitempty"`
+
+ // Selection of encoders to use to check for precompressed files.
+ PrecompressedRaw caddy.ModuleMap `json:"precompressed,omitempty" caddy:"namespace=http.precompressed"`
+
+ // If the client has no strong preference (q-factor), choose these encodings in order.
+ // If no order specified here, the first encoding from the Accept-Encoding header
+ // that both client and server support is used
+ PrecompressedOrder []string `json:"precompressed_order,omitempty"`
+ precompressors map[string]encode.Precompressed
+
+ // List of file extensions to try to read Etags from.
+ // If set, file Etags will be read from sidecar files
+ // with any of these suffixes, instead of generating
+ // our own Etag.
+ EtagFileExtensions []string `json:"etag_file_extensions,omitempty"`
+
+ fsmap caddy.FileSystems
+
+ logger *zap.Logger
}
// CaddyModule returns the Caddy module information.
@@ -77,6 +184,14 @@ func (FileServer) CaddyModule() caddy.ModuleInfo {
// Provision sets up the static files responder.
func (fsrv *FileServer) Provision(ctx caddy.Context) error {
+ fsrv.logger = ctx.Logger()
+
+ fsrv.fsmap = ctx.Filesystems()
+
+ if fsrv.FileSystem == "" {
+ fsrv.FileSystem = "{http.vars.fs}"
+ }
+
if fsrv.Root == "" {
fsrv.Root = "{http.vars.root}"
}
@@ -85,21 +200,59 @@ func (fsrv *FileServer) Provision(ctx caddy.Context) error {
fsrv.IndexNames = defaultIndexNames
}
- if fsrv.Browse != nil {
- var tpl *template.Template
- var err error
- if fsrv.Browse.TemplateFile != "" {
- tpl, err = template.ParseFiles(fsrv.Browse.TemplateFile)
- if err != nil {
- return fmt.Errorf("parsing browse template file: %v", err)
- }
- } else {
- tpl, err = template.New("default_listing").Parse(defaultBrowseTemplate)
- if err != nil {
- return fmt.Errorf("parsing default browse template: %v", err)
+ // for hide paths that are static (i.e. no placeholders), we can transform them into
+ // absolute paths before the server starts for very slight performance improvement
+ for i, h := range fsrv.Hide {
+ if !strings.Contains(h, "{") && strings.Contains(h, separator) {
+ if abs, err := caddy.FastAbs(h); err == nil {
+ fsrv.Hide[i] = abs
+ }
+ }
+ }
+
+ // support precompressed sidecar files
+ mods, err := ctx.LoadModule(fsrv, "PrecompressedRaw")
+ if err != nil {
+ return fmt.Errorf("loading encoder modules: %v", err)
+ }
+ for modName, modIface := range mods.(map[string]any) {
+ p, ok := modIface.(encode.Precompressed)
+ if !ok {
+ return fmt.Errorf("module %s is not precompressor", modName)
+ }
+ ae := p.AcceptEncoding()
+ if ae == "" {
+ return fmt.Errorf("precompressor does not specify an Accept-Encoding value")
+ }
+ suffix := p.Suffix()
+ if suffix == "" {
+ return fmt.Errorf("precompressor does not specify a Suffix value")
+ }
+ if _, ok := fsrv.precompressors[ae]; ok {
+ return fmt.Errorf("precompressor already added: %s", ae)
+ }
+ if fsrv.precompressors == nil {
+ fsrv.precompressors = make(map[string]encode.Precompressed)
+ }
+ fsrv.precompressors[ae] = p
+ }
+
+ if fsrv.Browse != nil {
+ // check sort options
+ for idx, sortOption := range fsrv.Browse.SortOptions {
+ switch idx {
+ case 0:
+ if sortOption != sortByName && sortOption != sortByNameDirFirst && sortOption != sortBySize && sortOption != sortByTime {
+ return fmt.Errorf("the first option must be one of the following: %s, %s, %s, %s, but got %s", sortByName, sortByNameDirFirst, sortBySize, sortByTime, sortOption)
+ }
+ case 1:
+ if sortOption != sortOrderAsc && sortOption != sortOrderDesc {
+ return fmt.Errorf("the second option must be one of the following: %s, %s, but got %s", sortOrderAsc, sortOrderDesc, sortOption)
+ }
+ default:
+ return fmt.Errorf("only max 2 sort options are allowed, but got %d", idx+1)
}
}
- fsrv.Browse.template = tpl
}
return nil
@@ -108,22 +261,50 @@ func (fsrv *FileServer) Provision(ctx caddy.Context) error {
func (fsrv *FileServer) ServeHTTP(w http.ResponseWriter, r *http.Request, next caddyhttp.Handler) error {
repl := r.Context().Value(caddy.ReplacerCtxKey).(*caddy.Replacer)
+ if runtime.GOOS == "windows" {
+ // reject paths with Alternate Data Streams (ADS)
+ if strings.Contains(r.URL.Path, ":") {
+ return caddyhttp.Error(http.StatusBadRequest, fmt.Errorf("illegal ADS path"))
+ }
+ // reject paths with "8.3" short names
+ trimmedPath := strings.TrimRight(r.URL.Path, ". ") // Windows ignores trailing dots and spaces, sigh
+ if len(path.Base(trimmedPath)) <= 12 && strings.Contains(trimmedPath, "~") {
+ return caddyhttp.Error(http.StatusBadRequest, fmt.Errorf("illegal short name"))
+ }
+ // both of those could bypass file hiding or possibly leak information even if the file is not hidden
+ }
+
filesToHide := fsrv.transformHidePaths(repl)
root := repl.ReplaceAll(fsrv.Root, ".")
- suffix := repl.ReplaceAll(r.URL.Path, "")
- filename := sanitizedPathJoin(root, suffix)
+ fsName := repl.ReplaceAll(fsrv.FileSystem, "")
+
+ fileSystem, ok := fsrv.fsmap.Get(fsName)
+ if !ok {
+ return caddyhttp.Error(http.StatusNotFound, fmt.Errorf("filesystem not found"))
+ }
+
+ // remove any trailing `/` as it breaks fs.ValidPath() in the stdlib
+ filename := strings.TrimSuffix(caddyhttp.SanitizedPathJoin(root, r.URL.Path), "/")
+
+ if c := fsrv.logger.Check(zapcore.DebugLevel, "sanitized path join"); c != nil {
+ c.Write(
+ zap.String("site_root", root),
+ zap.String("fs", fsName),
+ zap.String("request_path", r.URL.Path),
+ zap.String("result", filename),
+ )
+ }
// get information about the file
- info, err := os.Stat(filename)
+ info, err := fs.Stat(fileSystem, filename)
if err != nil {
- err = mapDirOpenError(err, filename)
- if os.IsNotExist(err) {
+ err = fsrv.mapDirOpenError(fileSystem, err, filename)
+ if errors.Is(err, fs.ErrNotExist) || errors.Is(err, fs.ErrInvalid) {
return fsrv.notFound(w, r, next)
- } else if os.IsPermission(err) {
+ } else if errors.Is(err, fs.ErrPermission) {
return caddyhttp.Error(http.StatusForbidden, err)
}
- // TODO: treat this as resource exhaustion like with os.Open? Or unnecessary here?
return caddyhttp.Error(http.StatusInternalServerError, err)
}
@@ -132,13 +313,20 @@ func (fsrv *FileServer) ServeHTTP(w http.ResponseWriter, r *http.Request, next c
var implicitIndexFile bool
if info.IsDir() && len(fsrv.IndexNames) > 0 {
for _, indexPage := range fsrv.IndexNames {
- indexPath := sanitizedPathJoin(filename, indexPage)
+ indexPage := repl.ReplaceAll(indexPage, "")
+ indexPath := caddyhttp.SanitizedPathJoin(filename, indexPage)
if fileHidden(indexPath, filesToHide) {
// pretend this file doesn't exist
+ if c := fsrv.logger.Check(zapcore.DebugLevel, "hiding index file"); c != nil {
+ c.Write(
+ zap.String("filename", indexPath),
+ zap.Strings("files_to_hide", filesToHide),
+ )
+ }
continue
}
- indexInfo, err := os.Stat(indexPath)
+ indexInfo, err := fs.Stat(fileSystem, indexPath)
if err != nil {
continue
}
@@ -154,6 +342,9 @@ func (fsrv *FileServer) ServeHTTP(w http.ResponseWriter, r *http.Request, next c
info = indexInfo
filename = indexPath
implicitIndexFile = true
+ if c := fsrv.logger.Check(zapcore.DebugLevel, "located index file"); c != nil {
+ c.Write(zap.String("filename", filename))
+ }
break
}
}
@@ -161,17 +352,27 @@ func (fsrv *FileServer) ServeHTTP(w http.ResponseWriter, r *http.Request, next c
// if still referencing a directory, delegate
// to browse or return an error
if info.IsDir() {
+ if c := fsrv.logger.Check(zapcore.DebugLevel, "no index file in directory"); c != nil {
+ c.Write(
+ zap.String("path", filename),
+ zap.Strings("index_filenames", fsrv.IndexNames),
+ )
+ }
if fsrv.Browse != nil && !fileHidden(filename, filesToHide) {
- return fsrv.serveBrowse(filename, w, r, next)
+ return fsrv.serveBrowse(fileSystem, root, filename, w, r, next)
}
return fsrv.notFound(w, r, next)
}
- // TODO: content negotiation (brotli sidecar files, etc...)
-
// one last check to ensure the file isn't hidden (we might
// have changed the filename from when we last checked)
if fileHidden(filename, filesToHide) {
+ if c := fsrv.logger.Check(zapcore.DebugLevel, "hiding file"); c != nil {
+ c.Write(
+ zap.String("filename", filename),
+ zap.Strings("files_to_hide", filesToHide),
+ )
+ }
return fsrv.notFound(w, r, next)
}
@@ -180,63 +381,189 @@ func (fsrv *FileServer) ServeHTTP(w http.ResponseWriter, r *http.Request, next c
// trailing slash - not enforcing this can break relative hrefs
// in HTML (see https://github.com/caddyserver/caddy/issues/2741)
if fsrv.CanonicalURIs == nil || *fsrv.CanonicalURIs {
- if implicitIndexFile && !strings.HasSuffix(r.URL.Path, "/") {
- return redirect(w, r, r.URL.Path+"/")
- } else if !implicitIndexFile && strings.HasSuffix(r.URL.Path, "/") {
- return redirect(w, r, r.URL.Path[:len(r.URL.Path)-1])
- }
- }
-
- // open the file
- file, err := fsrv.openFile(filename, w)
- if err != nil {
- if herr, ok := err.(caddyhttp.HandlerError); ok &&
- herr.StatusCode == http.StatusNotFound {
- return fsrv.notFound(w, r, next)
- }
- return err // error is already structured
- }
- defer file.Close()
-
- // set the ETag - note that a conditional If-None-Match request is handled
- // by http.ServeContent below, which checks against this ETag value
- w.Header().Set("ETag", calculateEtag(info))
-
- if w.Header().Get("Content-Type") == "" {
- mtyp := mime.TypeByExtension(filepath.Ext(filename))
- if mtyp == "" {
- // do not allow Go to sniff the content-type; see
- // https://www.youtube.com/watch?v=8t8JYpt0egE
- // TODO: If we want a Content-Type, consider writing a default of application/octet-stream - this is secure but violates spec
- w.Header()["Content-Type"] = nil
- } else {
- w.Header().Set("Content-Type", mtyp)
- }
- }
-
- // if this handler exists in an error context (i.e. is
- // part of a handler chain that is supposed to handle
- // a previous error), we have to serve the content
- // manually in order to write the correct status code
- if reqErr, ok := r.Context().Value(caddyhttp.ErrorCtxKey).(error); ok {
- statusCode := http.StatusInternalServerError
- if handlerErr, ok := reqErr.(caddyhttp.HandlerError); ok {
- if handlerErr.StatusCode > 0 {
- statusCode = handlerErr.StatusCode
+ // Only redirect if the last element of the path (the filename) was not
+ // rewritten; if the admin wanted to rewrite to the canonical path, they
+ // would have, and we have to be very careful not to introduce unwanted
+ // redirects and especially redirect loops!
+ // See https://github.com/caddyserver/caddy/issues/4205.
+ origReq := r.Context().Value(caddyhttp.OriginalRequestCtxKey).(http.Request)
+ if path.Base(origReq.URL.Path) == path.Base(r.URL.Path) {
+ if implicitIndexFile && !strings.HasSuffix(origReq.URL.Path, "/") {
+ to := origReq.URL.Path + "/"
+ if c := fsrv.logger.Check(zapcore.DebugLevel, "redirecting to canonical URI (adding trailing slash for directory"); c != nil {
+ c.Write(
+ zap.String("from_path", origReq.URL.Path),
+ zap.String("to_path", to),
+ )
+ }
+ return redirect(w, r, to)
+ } else if !implicitIndexFile && strings.HasSuffix(origReq.URL.Path, "/") {
+ to := origReq.URL.Path[:len(origReq.URL.Path)-1]
+ if c := fsrv.logger.Check(zapcore.DebugLevel, "redirecting to canonical URI (removing trailing slash for file"); c != nil {
+ c.Write(
+ zap.String("from_path", origReq.URL.Path),
+ zap.String("to_path", to),
+ )
+ }
+ return redirect(w, r, to)
}
}
- w.WriteHeader(statusCode)
- if r.Method != "HEAD" {
- io.Copy(w, file)
+ }
+
+ var file fs.File
+ respHeader := w.Header()
+
+ // etag is usually unset, but if the user knows what they're doing, let them override it
+ etag := respHeader.Get("Etag")
+
+ // static file responses are often compressed, either on-the-fly
+ // or with precompressed sidecar files; in any case, the headers
+ // should contain "Vary: Accept-Encoding" even when not compressed
+ // so caches can craft a reliable key (according to REDbot results)
+ // see #5849
+ respHeader.Add("Vary", "Accept-Encoding")
+
+ // check for precompressed files
+ for _, ae := range encode.AcceptedEncodings(r, fsrv.PrecompressedOrder) {
+ precompress, ok := fsrv.precompressors[ae]
+ if !ok {
+ continue
}
- return nil
+ compressedFilename := filename + precompress.Suffix()
+ compressedInfo, err := fs.Stat(fileSystem, compressedFilename)
+ if err != nil || compressedInfo.IsDir() {
+ if c := fsrv.logger.Check(zapcore.DebugLevel, "precompressed file not accessible"); c != nil {
+ c.Write(zap.String("filename", compressedFilename), zap.Error(err))
+ }
+ continue
+ }
+ if c := fsrv.logger.Check(zapcore.DebugLevel, "opening compressed sidecar file"); c != nil {
+ c.Write(zap.String("filename", compressedFilename), zap.Error(err))
+ }
+ file, err = fsrv.openFile(fileSystem, compressedFilename, w)
+ if err != nil {
+ if c := fsrv.logger.Check(zapcore.WarnLevel, "opening precompressed file failed"); c != nil {
+ c.Write(zap.String("filename", compressedFilename), zap.Error(err))
+ }
+ if caddyErr, ok := err.(caddyhttp.HandlerError); ok && caddyErr.StatusCode == http.StatusServiceUnavailable {
+ return err
+ }
+ file = nil
+ continue
+ }
+ defer file.Close()
+ respHeader.Set("Content-Encoding", ae)
+ respHeader.Del("Accept-Ranges")
+
+ // try to get the etag from pre computed files if an etag suffix list was provided
+ if etag == "" && fsrv.EtagFileExtensions != nil {
+ etag, err = fsrv.getEtagFromFile(fileSystem, compressedFilename)
+ if err != nil {
+ return err
+ }
+ }
+
+ // don't assign info = compressedInfo because sidecars are kind
+ // of transparent; however we do need to set the Etag:
+ // https://caddy.community/t/gzipped-sidecar-file-wrong-same-etag/16793
+ if etag == "" {
+ etag = calculateEtag(compressedInfo)
+ }
+
+ break
+ }
+
+ // no precompressed file found, use the actual file
+ if file == nil {
+ if c := fsrv.logger.Check(zapcore.DebugLevel, "opening file"); c != nil {
+ c.Write(zap.String("filename", filename))
+ }
+
+ // open the file
+ file, err = fsrv.openFile(fileSystem, filename, w)
+ if err != nil {
+ if herr, ok := err.(caddyhttp.HandlerError); ok &&
+ herr.StatusCode == http.StatusNotFound {
+ return fsrv.notFound(w, r, next)
+ }
+ return err // error is already structured
+ }
+ defer file.Close()
+ // try to get the etag from pre computed files if an etag suffix list was provided
+ if etag == "" && fsrv.EtagFileExtensions != nil {
+ etag, err = fsrv.getEtagFromFile(fileSystem, filename)
+ if err != nil {
+ return err
+ }
+ }
+ if etag == "" {
+ etag = calculateEtag(info)
+ }
+ }
+
+ // at this point, we're serving a file; Go std lib supports only
+ // GET and HEAD, which is sensible for a static file server - reject
+ // any other methods (see issue #5166)
+ if r.Method != http.MethodGet && r.Method != http.MethodHead {
+ // if we're in an error context, then it doesn't make sense
+ // to repeat the error; just continue because we're probably
+ // trying to write an error page response (see issue #5703)
+ if _, ok := r.Context().Value(caddyhttp.ErrorCtxKey).(error); !ok {
+ respHeader.Add("Allow", "GET, HEAD")
+ return caddyhttp.Error(http.StatusMethodNotAllowed, nil)
+ }
+ }
+
+ // set the Etag - note that a conditional If-None-Match request is handled
+ // by http.ServeContent below, which checks against this Etag value
+ if etag != "" {
+ respHeader.Set("Etag", etag)
+ }
+
+ if respHeader.Get("Content-Type") == "" {
+ mtyp := mime.TypeByExtension(filepath.Ext(filename))
+ if mtyp == "" {
+ // do not allow Go to sniff the content-type; see https://www.youtube.com/watch?v=8t8JYpt0egE
+ respHeader["Content-Type"] = nil
+ } else {
+ respHeader.Set("Content-Type", mtyp)
+ }
+ }
+
+ var statusCodeOverride int
+
+ // if this handler exists in an error context (i.e. is part of a
+ // handler chain that is supposed to handle a previous error),
+ // we should set status code to the one from the error instead
+ // of letting http.ServeContent set the default (usually 200)
+ if reqErr, ok := r.Context().Value(caddyhttp.ErrorCtxKey).(error); ok {
+ statusCodeOverride = http.StatusInternalServerError
+ if handlerErr, ok := reqErr.(caddyhttp.HandlerError); ok {
+ if handlerErr.StatusCode > 0 {
+ statusCodeOverride = handlerErr.StatusCode
+ }
+ }
+ }
+
+ // if a status code override is configured, run the replacer on it
+ if codeStr := fsrv.StatusCode.String(); codeStr != "" {
+ statusCodeOverride, err = strconv.Atoi(repl.ReplaceAll(codeStr, ""))
+ if err != nil {
+ return caddyhttp.Error(http.StatusInternalServerError, err)
+ }
+ }
+
+ // if we do have an override from the previous two parts, then
+ // we wrap the response writer to intercept the WriteHeader call
+ if statusCodeOverride > 0 {
+ w = statusOverrideResponseWriter{ResponseWriter: w, code: statusCodeOverride}
}
// let the standard library do what it does best; note, however,
// that errors generated by ServeContent are written immediately
// to the response, so we cannot handle them (but errors there
// are rare)
- http.ServeContent(w, r, info.Name(), info.ModTime(), file)
+ http.ServeContent(w, r, info.Name(), info.ModTime(), file.(io.ReadSeeker))
return nil
}
@@ -245,19 +572,29 @@ func (fsrv *FileServer) ServeHTTP(w http.ResponseWriter, r *http.Request, next c
// the response is configured to inform the client how to best handle it
// and a well-described handler error is returned (do not wrap the
// returned error value).
-func (fsrv *FileServer) openFile(filename string, w http.ResponseWriter) (*os.File, error) {
- file, err := os.Open(filename)
+func (fsrv *FileServer) openFile(fileSystem fs.FS, filename string, w http.ResponseWriter) (fs.File, error) {
+ file, err := fileSystem.Open(filename)
if err != nil {
- err = mapDirOpenError(err, filename)
- if os.IsNotExist(err) {
+ err = fsrv.mapDirOpenError(fileSystem, err, filename)
+ if errors.Is(err, fs.ErrNotExist) {
+ if c := fsrv.logger.Check(zapcore.DebugLevel, "file not found"); c != nil {
+ c.Write(zap.String("filename", filename), zap.Error(err))
+ }
return nil, caddyhttp.Error(http.StatusNotFound, err)
- } else if os.IsPermission(err) {
+ } else if errors.Is(err, fs.ErrPermission) {
+ if c := fsrv.logger.Check(zapcore.DebugLevel, "permission denied"); c != nil {
+ c.Write(zap.String("filename", filename), zap.Error(err))
+ }
return nil, caddyhttp.Error(http.StatusForbidden, err)
}
// maybe the server is under load and ran out of file descriptors?
// have client wait arbitrary seconds to help prevent a stampede
+ //nolint:gosec
backoff := weakrand.Intn(maxBackoff-minBackoff) + minBackoff
w.Header().Set("Retry-After", strconv.Itoa(backoff))
+ if c := fsrv.logger.Check(zapcore.DebugLevel, "retry after backoff"); c != nil {
+ c.Write(zap.String("filename", filename), zap.Int("backoff", backoff), zap.Error(err))
+ }
return nil, caddyhttp.Error(http.StatusServiceUnavailable, err)
}
return file, nil
@@ -269,88 +606,89 @@ func (fsrv *FileServer) openFile(filename string, w http.ResponseWriter) (*os.Fi
// Adapted from the Go standard library; originally written by Nathaniel Caza.
// https://go-review.googlesource.com/c/go/+/36635/
// https://go-review.googlesource.com/c/go/+/36804/
-func mapDirOpenError(originalErr error, name string) error {
- if os.IsNotExist(originalErr) || os.IsPermission(originalErr) {
+func (fsrv *FileServer) mapDirOpenError(fileSystem fs.FS, originalErr error, name string) error {
+ if errors.Is(originalErr, fs.ErrNotExist) || errors.Is(originalErr, fs.ErrPermission) {
return originalErr
}
- parts := strings.Split(name, string(filepath.Separator))
+ parts := strings.Split(name, separator)
for i := range parts {
if parts[i] == "" {
continue
}
- fi, err := os.Stat(strings.Join(parts[:i+1], string(filepath.Separator)))
+ fi, err := fs.Stat(fileSystem, strings.Join(parts[:i+1], separator))
if err != nil {
return originalErr
}
if !fi.IsDir() {
- return os.ErrNotExist
+ return fs.ErrNotExist
}
}
return originalErr
}
-// transformHidePaths performs replacements for all the elements of
-// fsrv.Hide and returns a new list of the transformed values.
+// transformHidePaths performs replacements for all the elements of fsrv.Hide and
+// makes them absolute paths (if they contain a path separator), then returns a
+// new list of the transformed values.
func (fsrv *FileServer) transformHidePaths(repl *caddy.Replacer) []string {
hide := make([]string, len(fsrv.Hide))
for i := range fsrv.Hide {
hide[i] = repl.ReplaceAll(fsrv.Hide[i], "")
+ if strings.Contains(hide[i], separator) {
+ abs, err := caddy.FastAbs(hide[i])
+ if err == nil {
+ hide[i] = abs
+ }
+ }
}
return hide
}
-// sanitizedPathJoin performs filepath.Join(root, reqPath) that
-// is safe against directory traversal attacks. It uses logic
-// similar to that in the Go standard library, specifically
-// in the implementation of http.Dir. The root is assumed to
-// be a trusted path, but reqPath is not.
-func sanitizedPathJoin(root, reqPath string) string {
- // TODO: Caddy 1 uses this:
- // prevent absolute path access on Windows, e.g. http://localhost:5000/C:\Windows\notepad.exe
- // if runtime.GOOS == "windows" && len(reqPath) > 0 && filepath.IsAbs(reqPath[1:]) {
- // TODO.
- // }
-
- // TODO: whereas std lib's http.Dir.Open() uses this:
- // if filepath.Separator != '/' && strings.ContainsRune(name, filepath.Separator) {
- // return nil, errors.New("http: invalid character in file path")
- // }
-
- // TODO: see https://play.golang.org/p/oh77BiVQFti for another thing to consider
-
- if root == "" {
- root = "."
- }
- return filepath.Join(root, filepath.FromSlash(path.Clean("/"+reqPath)))
-}
-
-// fileHidden returns true if filename is hidden
-// according to the hide list.
+// fileHidden returns true if filename is hidden according to the hide list.
+// filename must be a relative or absolute file system path, not a request
+// URI path. It is expected that all the paths in the hide list are absolute
+// paths or are singular filenames (without a path separator).
func fileHidden(filename string, hide []string) bool {
- nameOnly := filepath.Base(filename)
- sep := string(filepath.Separator)
+ if len(hide) == 0 {
+ return false
+ }
+
+ // all path comparisons use the complete absolute path if possible
+ filenameAbs, err := caddy.FastAbs(filename)
+ if err == nil {
+ filename = filenameAbs
+ }
+
+ var components []string
for _, h := range hide {
- // assuming h is a glob/shell-like pattern,
- // use it to compare the whole file path;
- // but if there is no separator in h, then
- // just compare against the file's name
- compare := filename
- if !strings.Contains(h, sep) {
- compare = nameOnly
- }
-
- hidden, err := filepath.Match(h, compare)
- if err != nil {
- // malformed pattern; fallback by checking prefix
- if strings.HasPrefix(filename, h) {
+ if !strings.Contains(h, separator) {
+ // if there is no separator in h, then we assume the user
+ // wants to hide any files or folders that match that
+ // name; thus we have to compare against each component
+ // of the filename, e.g. hiding "bar" would hide "/bar"
+ // as well as "/foo/bar/baz" but not "/barstool".
+ if len(components) == 0 {
+ components = strings.Split(filename, separator)
+ }
+ for _, c := range components {
+ if hidden, _ := filepath.Match(h, c); hidden {
+ return true
+ }
+ }
+ } else if strings.HasPrefix(filename, h) {
+ // if there is a separator in h, and filename is exactly
+ // prefixed with h, then we can do a prefix match so that
+ // "/foo" matches "/foo/bar" but not "/foobar".
+ withoutPrefix := strings.TrimPrefix(filename, h)
+ if strings.HasPrefix(withoutPrefix, separator) {
return true
}
}
- if hidden {
- // file name or path matches hide pattern
+
+ // in the general case, a glob match will suffice
+ if hidden, _ := filepath.Match(h, filename); hidden {
return true
}
}
@@ -367,35 +705,95 @@ func (fsrv *FileServer) notFound(w http.ResponseWriter, r *http.Request, next ca
return caddyhttp.Error(http.StatusNotFound, nil)
}
-// calculateEtag produces a strong etag by default, although, for
-// efficiency reasons, it does not actually consume the contents
-// of the file to make a hash of all the bytes. ¯\_(ツ)_/¯
-// Prefix the etag with "W/" to convert it into a weak etag.
-// See: https://tools.ietf.org/html/rfc7232#section-2.3
+// calculateEtag computes an entity tag using a strong validator
+// without consuming the contents of the file. It requires the
+// file info contain the correct size and modification time.
+// It strives to implement the semantics regarding ETags as defined
+// by RFC 9110 section 8.8.3 and 8.8.1. See
+// https://www.rfc-editor.org/rfc/rfc9110.html#section-8.8.3.
+//
+// As our implementation uses file modification timestamp and size,
+// note the following from RFC 9110 section 8.8.1: "A representation's
+// modification time, if defined with only one-second resolution,
+// might be a weak validator if it is possible for the representation to
+// be modified twice during a single second and retrieved between those
+// modifications." The ext4 file system, which underpins the vast majority
+// of Caddy deployments, stores mod times with millisecond precision,
+// which we consider precise enough to qualify as a strong validator.
func calculateEtag(d os.FileInfo) string {
- t := strconv.FormatInt(d.ModTime().Unix(), 36)
- s := strconv.FormatInt(d.Size(), 36)
- return `"` + t + s + `"`
+ mtime := d.ModTime()
+ if mtimeUnix := mtime.Unix(); mtimeUnix == 0 || mtimeUnix == 1 {
+ return "" // not useful anyway; see issue #5548
+ }
+ var sb strings.Builder
+ sb.WriteRune('"')
+ sb.WriteString(strconv.FormatInt(mtime.UnixNano(), 36))
+ sb.WriteString(strconv.FormatInt(d.Size(), 36))
+ sb.WriteRune('"')
+ return sb.String()
}
-func redirect(w http.ResponseWriter, r *http.Request, to string) error {
- for strings.HasPrefix(to, "//") {
- // prevent path-based open redirects
- to = strings.TrimPrefix(to, "/")
+// Finds the first corresponding etag file for a given file in the file system and return its content
+func (fsrv *FileServer) getEtagFromFile(fileSystem fs.FS, filename string) (string, error) {
+ for _, suffix := range fsrv.EtagFileExtensions {
+ etagFilename := filename + suffix
+ etag, err := fs.ReadFile(fileSystem, etagFilename)
+ if errors.Is(err, fs.ErrNotExist) {
+ continue
+ }
+ if err != nil {
+ return "", fmt.Errorf("cannot read etag from file %s: %v", etagFilename, err)
+ }
+
+ // Etags should not contain newline characters
+ etag = bytes.ReplaceAll(etag, []byte("\n"), []byte{})
+
+ return string(etag), nil
}
- http.Redirect(w, r, to, http.StatusPermanentRedirect)
+ return "", nil
+}
+
+// redirect performs a redirect to a given path. The 'toPath' parameter
+// MUST be solely a path, and MUST NOT include a query.
+func redirect(w http.ResponseWriter, r *http.Request, toPath string) error {
+ for strings.HasPrefix(toPath, "//") {
+ // prevent path-based open redirects
+ toPath = strings.TrimPrefix(toPath, "/")
+ }
+ // preserve the query string if present
+ if r.URL.RawQuery != "" {
+ toPath += "?" + r.URL.RawQuery
+ }
+ http.Redirect(w, r, toPath, http.StatusPermanentRedirect)
return nil
}
+// statusOverrideResponseWriter intercepts WriteHeader calls
+// to instead write the HTTP status code we want instead
+// of the one http.ServeContent will use by default (usually 200)
+type statusOverrideResponseWriter struct {
+ http.ResponseWriter
+ code int
+}
+
+// WriteHeader intercepts calls by the stdlib to WriteHeader
+// to instead write the HTTP status code we want.
+func (wr statusOverrideResponseWriter) WriteHeader(int) {
+ wr.ResponseWriter.WriteHeader(wr.code)
+}
+
+// Unwrap returns the underlying ResponseWriter, necessary for
+// http.ResponseController to work correctly.
+func (wr statusOverrideResponseWriter) Unwrap() http.ResponseWriter {
+ return wr.ResponseWriter
+}
+
var defaultIndexNames = []string{"index.html", "index.txt"}
-var bufPool = sync.Pool{
- New: func() interface{} {
- return new(bytes.Buffer)
- },
-}
-
-const minBackoff, maxBackoff = 2, 5
+const (
+ minBackoff, maxBackoff = 2, 5
+ separator = string(filepath.Separator)
+)
// Interface guards
var (
diff --git a/modules/caddyhttp/fileserver/staticfiles_test.go b/modules/caddyhttp/fileserver/staticfiles_test.go
index 73762c77..5d6133c7 100644
--- a/modules/caddyhttp/fileserver/staticfiles_test.go
+++ b/modules/caddyhttp/fileserver/staticfiles_test.go
@@ -15,87 +15,116 @@
package fileserver
import (
- "net/url"
"path/filepath"
+ "runtime"
+ "strings"
"testing"
)
-func TestSanitizedPathJoin(t *testing.T) {
- // For easy reference:
- // %2e = .
- // %2f = /
- // %5c = \
+func TestFileHidden(t *testing.T) {
for i, tc := range []struct {
- inputRoot string
+ inputHide []string
inputPath string
- expect string
+ expect bool
}{
{
+ inputHide: nil,
inputPath: "",
- expect: ".",
+ expect: false,
},
{
- inputPath: "/",
- expect: ".",
+ inputHide: []string{".gitignore"},
+ inputPath: "/.gitignore",
+ expect: true,
},
{
+ inputHide: []string{".git"},
+ inputPath: "/.gitignore",
+ expect: false,
+ },
+ {
+ inputHide: []string{"/.git"},
+ inputPath: "/.gitignore",
+ expect: false,
+ },
+ {
+ inputHide: []string{".git"},
+ inputPath: "/.git",
+ expect: true,
+ },
+ {
+ inputHide: []string{".git"},
+ inputPath: "/.git/foo",
+ expect: true,
+ },
+ {
+ inputHide: []string{".git"},
+ inputPath: "/foo/.git/bar",
+ expect: true,
+ },
+ {
+ inputHide: []string{"/prefix"},
+ inputPath: "/prefix/foo",
+ expect: true,
+ },
+ {
+ inputHide: []string{"/foo/*/bar"},
+ inputPath: "/foo/asdf/bar",
+ expect: true,
+ },
+ {
+ inputHide: []string{"*.txt"},
+ inputPath: "/foo/bar.txt",
+ expect: true,
+ },
+ {
+ inputHide: []string{"/foo/bar/*.txt"},
+ inputPath: "/foo/bar/baz.txt",
+ expect: true,
+ },
+ {
+ inputHide: []string{"/foo/bar/*.txt"},
+ inputPath: "/foo/bar.txt",
+ expect: false,
+ },
+ {
+ inputHide: []string{"/foo/bar/*.txt"},
+ inputPath: "/foo/bar/index.html",
+ expect: false,
+ },
+ {
+ inputHide: []string{"/foo"},
inputPath: "/foo",
- expect: "foo",
+ expect: true,
},
{
- inputPath: "/foo/bar",
- expect: filepath.Join("foo", "bar"),
+ inputHide: []string{"/foo"},
+ inputPath: "/foobar",
+ expect: false,
},
{
- inputRoot: "/a",
- inputPath: "/foo/bar",
- expect: filepath.Join("/", "a", "foo", "bar"),
+ inputHide: []string{"first", "second"},
+ inputPath: "/second",
+ expect: true,
},
- {
- inputPath: "/foo/../bar",
- expect: "bar",
- },
- {
- inputRoot: "/a/b",
- inputPath: "/foo/../bar",
- expect: filepath.Join("/", "a", "b", "bar"),
- },
- {
- inputRoot: "/a/b",
- inputPath: "/..%2fbar",
- expect: filepath.Join("/", "a", "b", "bar"),
- },
- {
- inputRoot: "/a/b",
- inputPath: "/%2e%2e%2fbar",
- expect: filepath.Join("/", "a", "b", "bar"),
- },
- {
- inputRoot: "/a/b",
- inputPath: "/%2e%2e%2f%2e%2e%2f",
- expect: filepath.Join("/", "a", "b"),
- },
- {
- inputRoot: "C:\\www",
- inputPath: "/foo/bar",
- expect: filepath.Join("C:\\www", "foo", "bar"),
- },
- // TODO: test more windows paths... on windows... sigh.
} {
- // we don't *need* to use an actual parsed URL, but it
- // adds some authenticity to the tests since real-world
- // values will be coming in from URLs; thus, the test
- // corpus can contain paths as encoded by clients, which
- // more closely emulates the actual attack vector
- u, err := url.Parse("http://test:9999" + tc.inputPath)
- if err != nil {
- t.Fatalf("Test %d: invalid URL: %v", i, err)
+ if runtime.GOOS == "windows" {
+ if strings.HasPrefix(tc.inputPath, "/") {
+ tc.inputPath, _ = filepath.Abs(tc.inputPath)
+ }
+ tc.inputPath = filepath.FromSlash(tc.inputPath)
+ for i := range tc.inputHide {
+ if strings.HasPrefix(tc.inputHide[i], "/") {
+ tc.inputHide[i], _ = filepath.Abs(tc.inputHide[i])
+ }
+ tc.inputHide[i] = filepath.FromSlash(tc.inputHide[i])
+ }
}
- actual := sanitizedPathJoin(tc.inputRoot, u.Path)
+
+ actual := fileHidden(tc.inputPath, tc.inputHide)
if actual != tc.expect {
- t.Errorf("Test %d: [%s %s] => %s (expected %s)", i, tc.inputRoot, tc.inputPath, actual, tc.expect)
+ t.Errorf("Test %d: Does %v hide %s? Got %t but expected %t",
+ i, tc.inputHide, tc.inputPath, actual, tc.expect)
}
}
}
-
-// TODO: test fileHidden
diff --git a/modules/caddyhttp/fileserver/testdata/%D9%85%D9%84%D9%81.txt b/modules/caddyhttp/fileserver/testdata/%D9%85%D9%84%D9%81.txt
new file mode 100644
index 00000000..0f4bf1a9
--- /dev/null
+++ b/modules/caddyhttp/fileserver/testdata/%D9%85%D9%84%D9%81.txt
@@ -0,0 +1 @@
+%D9%85%D9%84%D9%81.txt
\ No newline at end of file
diff --git a/modules/caddyhttp/fileserver/testdata/foo.txt b/modules/caddyhttp/fileserver/testdata/foo.txt
new file mode 100644
index 00000000..996f1789
--- /dev/null
+++ b/modules/caddyhttp/fileserver/testdata/foo.txt
@@ -0,0 +1 @@
+foo.txt
\ No newline at end of file
diff --git a/modules/caddyhttp/fileserver/testdata/foodir/bar.txt b/modules/caddyhttp/fileserver/testdata/foodir/bar.txt
new file mode 100644
index 00000000..df34bd20
--- /dev/null
+++ b/modules/caddyhttp/fileserver/testdata/foodir/bar.txt
@@ -0,0 +1 @@
+foodir/bar.txt
\ No newline at end of file
diff --git a/modules/caddyhttp/fileserver/testdata/foodir/foo.txt b/modules/caddyhttp/fileserver/testdata/foodir/foo.txt
new file mode 100644
index 00000000..0e3335b4
--- /dev/null
+++ b/modules/caddyhttp/fileserver/testdata/foodir/foo.txt
@@ -0,0 +1 @@
+foodir/foo.txt
\ No newline at end of file
diff --git a/modules/caddyhttp/fileserver/testdata/large.txt b/modules/caddyhttp/fileserver/testdata/large.txt
new file mode 100644
index 00000000..c3662374
--- /dev/null
+++ b/modules/caddyhttp/fileserver/testdata/large.txt
@@ -0,0 +1,3 @@
+This is a file with more content than the other files in this directory
+such that tests using the largest_size policy pick this file, or the
+smallest_size policy avoids this file.
\ No newline at end of file
diff --git a/modules/caddyhttp/fileserver/testdata/ملف.txt b/modules/caddyhttp/fileserver/testdata/ملف.txt
new file mode 100644
index 00000000..91858286
--- /dev/null
+++ b/modules/caddyhttp/fileserver/testdata/ملف.txt
@@ -0,0 +1 @@
+ملف.txt
\ No newline at end of file
diff --git a/modules/caddyhttp/headers/caddyfile.go b/modules/caddyhttp/headers/caddyfile.go
index d893cab3..e55e9fab 100644
--- a/modules/caddyhttp/headers/caddyfile.go
+++ b/modules/caddyhttp/headers/caddyfile.go
@@ -15,7 +15,9 @@
package headers
import (
+ "fmt"
"net/http"
+ "reflect"
"strings"
"github.com/caddyserver/caddy/v2/caddyconfig/httpcaddyfile"
@@ -23,157 +25,257 @@ import (
)
func init() {
- httpcaddyfile.RegisterHandlerDirective("header", parseCaddyfile)
- httpcaddyfile.RegisterHandlerDirective("request_header", parseReqHdrCaddyfile)
+ httpcaddyfile.RegisterDirective("header", parseCaddyfile)
+ httpcaddyfile.RegisterDirective("request_header", parseReqHdrCaddyfile)
}
// parseCaddyfile sets up the handler for response headers from
// Caddyfile tokens. Syntax:
//
-// header [] [[+|-] [] []] {
-// [+] [ []]
-// -
-// [defer]
-// }
+// header [] [[+|-|?|>] [] []] {
+// [+] [ []]
+// ?
+// -
+// >
+// [defer]
+// }
//
// Either a block can be opened or a single header field can be configured
// in the first line, but not both in the same directive. Header operations
// are deferred to write-time if any headers are being deleted or if the
-// 'defer' subdirective is used.
-func parseCaddyfile(h httpcaddyfile.Helper) (caddyhttp.MiddlewareHandler, error) {
- hdr := new(Handler)
+// 'defer' subdirective is used. + appends a header value, - deletes a field,
+// ? conditionally sets a value only if the header field is not already set,
+// and > sets a field with defer enabled.
+func parseCaddyfile(h httpcaddyfile.Helper) ([]httpcaddyfile.ConfigValue, error) {
+ h.Next() // consume directive name
+ matcherSet, err := h.ExtractMatcherSet()
+ if err != nil {
+ return nil, err
+ }
+ h.Next() // consume the directive name again (matcher parsing resets)
- makeResponseOps := func() {
- if hdr.Response == nil {
- hdr.Response = &RespHeaderOps{
- HeaderOps: new(HeaderOps),
- }
+ makeHandler := func() Handler {
+ return Handler{
+ Response: &RespHeaderOps{
+ HeaderOps: &HeaderOps{},
+ },
}
}
+ handler, handlerWithRequire := makeHandler(), makeHandler()
- for h.Next() {
- // first see if headers are in the initial line
- var hasArgs bool
+ // first see if headers are in the initial line
+ var hasArgs bool
+ if h.NextArg() {
+ hasArgs = true
+ field := h.Val()
+ var value string
+ var replacement *string
if h.NextArg() {
- hasArgs = true
- field := h.Val()
- var value, replacement string
- if h.NextArg() {
- value = h.Val()
- }
- if h.NextArg() {
- replacement = h.Val()
- }
- makeResponseOps()
- CaddyfileHeaderOp(hdr.Response.HeaderOps, field, value, replacement)
- if len(hdr.Response.HeaderOps.Delete) > 0 {
- hdr.Response.Deferred = true
- }
+ value = h.Val()
}
-
- // if not, they should be in a block
- for h.NextBlock(0) {
- field := h.Val()
- if field == "defer" {
- hdr.Response.Deferred = true
- continue
- }
- if hasArgs {
- return nil, h.Err("cannot specify headers in both arguments and block")
- }
- var value, replacement string
- if h.NextArg() {
- value = h.Val()
- }
- if h.NextArg() {
- replacement = h.Val()
- }
- makeResponseOps()
- CaddyfileHeaderOp(hdr.Response.HeaderOps, field, value, replacement)
- if len(hdr.Response.HeaderOps.Delete) > 0 {
- hdr.Response.Deferred = true
- }
+ if h.NextArg() {
+ arg := h.Val()
+ replacement = &arg
+ }
+ err := applyHeaderOp(
+ handler.Response.HeaderOps,
+ handler.Response,
+ field,
+ value,
+ replacement,
+ )
+ if err != nil {
+ return nil, h.Err(err.Error())
+ }
+ if len(handler.Response.HeaderOps.Delete) > 0 {
+ handler.Response.Deferred = true
}
}
- return hdr, nil
+ // if not, they should be in a block
+ for h.NextBlock(0) {
+ field := h.Val()
+ if field == "defer" {
+ handler.Response.Deferred = true
+ continue
+ }
+ if hasArgs {
+ return nil, h.Err("cannot specify headers in both arguments and block") // because it would be weird
+ }
+
+ // sometimes it is habitual for users to suffix a field name with a colon,
+ // as if they were writing a curl command or something; see
+ // https://caddy.community/t/v2-reverse-proxy-please-add-cors-example-to-the-docs/7349/19
+ field = strings.TrimSuffix(field, ":")
+
+ var value string
+ var replacement *string
+ if h.NextArg() {
+ value = h.Val()
+ }
+ if h.NextArg() {
+ arg := h.Val()
+ replacement = &arg
+ }
+
+ handlerToUse := handler
+ if strings.HasPrefix(field, "?") {
+ handlerToUse = handlerWithRequire
+ }
+
+ err := applyHeaderOp(
+ handlerToUse.Response.HeaderOps,
+ handlerToUse.Response,
+ field,
+ value,
+ replacement,
+ )
+ if err != nil {
+ return nil, h.Err(err.Error())
+ }
+ }
+
+ var configValues []httpcaddyfile.ConfigValue
+ if !reflect.DeepEqual(handler, makeHandler()) {
+ configValues = append(configValues, h.NewRoute(matcherSet, handler)...)
+ }
+ if !reflect.DeepEqual(handlerWithRequire, makeHandler()) {
+ configValues = append(configValues, h.NewRoute(matcherSet, handlerWithRequire)...)
+ }
+
+ return configValues, nil
}
// parseReqHdrCaddyfile sets up the handler for request headers
// from Caddyfile tokens. Syntax:
//
-// request_header [] [[+|-] [] []]
-//
-func parseReqHdrCaddyfile(h httpcaddyfile.Helper) (caddyhttp.MiddlewareHandler, error) {
- hdr := new(Handler)
- for h.Next() {
- if !h.NextArg() {
- return nil, h.ArgErr()
- }
- field := h.Val()
+// request_header [] [[+|-] [] []]
+func parseReqHdrCaddyfile(h httpcaddyfile.Helper) ([]httpcaddyfile.ConfigValue, error) {
+ h.Next() // consume directive name
+ matcherSet, err := h.ExtractMatcherSet()
+ if err != nil {
+ return nil, err
+ }
+ h.Next() // consume the directive name again (matcher parsing resets)
- // sometimes it is habitual for users to suffix a field name with a colon,
- // as if they were writing a curl command or something; see
- // https://caddy.community/t/v2-reverse-proxy-please-add-cors-example-to-the-docs/7349
- field = strings.TrimSuffix(field, ":")
+ configValues := []httpcaddyfile.ConfigValue{}
- var value, replacement string
- if h.NextArg() {
- value = h.Val()
- }
- if h.NextArg() {
- replacement = h.Val()
- if h.NextArg() {
- return nil, h.ArgErr()
- }
- }
+ if !h.NextArg() {
+ return nil, h.ArgErr()
+ }
+ field := h.Val()
- if hdr.Request == nil {
- hdr.Request = new(HeaderOps)
- }
- CaddyfileHeaderOp(hdr.Request, field, value, replacement)
+ hdr := Handler{
+ Request: &HeaderOps{},
+ }
+ // sometimes it is habitual for users to suffix a field name with a colon,
+ // as if they were writing a curl command or something; see
+ // https://caddy.community/t/v2-reverse-proxy-please-add-cors-example-to-the-docs/7349/19
+ field = strings.TrimSuffix(field, ":")
+
+ var value string
+ var replacement *string
+ if h.NextArg() {
+ value = h.Val()
+ }
+ if h.NextArg() {
+ arg := h.Val()
+ replacement = &arg
if h.NextArg() {
return nil, h.ArgErr()
}
}
- return hdr, nil
+
+ if hdr.Request == nil {
+ hdr.Request = new(HeaderOps)
+ }
+ if err := CaddyfileHeaderOp(hdr.Request, field, value, replacement); err != nil {
+ return nil, h.Err(err.Error())
+ }
+
+ configValues = append(configValues, h.NewRoute(matcherSet, hdr)...)
+
+ if h.NextArg() {
+ return nil, h.ArgErr()
+ }
+ return configValues, nil
}
// CaddyfileHeaderOp applies a new header operation according to
// field, value, and replacement. The field can be prefixed with
// "+" or "-" to specify adding or removing; otherwise, the value
// will be set (overriding any previous value). If replacement is
-// non-empty, value will be treated as a regular expression which
+// non-nil, value will be treated as a regular expression which
// will be used to search and then replacement will be used to
// complete the substring replacement; in that case, any + or -
// prefix to field will be ignored.
-func CaddyfileHeaderOp(ops *HeaderOps, field, value, replacement string) {
- if strings.HasPrefix(field, "+") {
+func CaddyfileHeaderOp(ops *HeaderOps, field, value string, replacement *string) error {
+ return applyHeaderOp(ops, nil, field, value, replacement)
+}
+
+func applyHeaderOp(ops *HeaderOps, respHeaderOps *RespHeaderOps, field, value string, replacement *string) error {
+ switch {
+ case strings.HasPrefix(field, "+"): // append
if ops.Add == nil {
ops.Add = make(http.Header)
}
- ops.Add.Set(field[1:], value)
- } else if strings.HasPrefix(field, "-") {
+ ops.Add.Add(field[1:], value)
+
+ case strings.HasPrefix(field, "-"): // delete
ops.Delete = append(ops.Delete, field[1:])
- } else {
- if replacement == "" {
- if ops.Set == nil {
- ops.Set = make(http.Header)
- }
- ops.Set.Set(field, value)
- } else {
- if ops.Replace == nil {
- ops.Replace = make(map[string][]Replacement)
- }
- field = strings.TrimLeft(field, "+-")
- ops.Replace[field] = append(
- ops.Replace[field],
- Replacement{
- SearchRegexp: value,
- Replace: replacement,
- },
- )
+ if respHeaderOps != nil {
+ respHeaderOps.Deferred = true
}
+
+ case strings.HasPrefix(field, "?"): // default (conditional on not existing) - response headers only
+ if respHeaderOps == nil {
+ return fmt.Errorf("%v: the default header modifier ('?') can only be used on response headers; for conditional manipulation of request headers, use matchers", field)
+ }
+ if respHeaderOps.Require == nil {
+ respHeaderOps.Require = &caddyhttp.ResponseMatcher{
+ Headers: make(http.Header),
+ }
+ }
+ field = strings.TrimPrefix(field, "?")
+ respHeaderOps.Require.Headers[field] = nil
+ if respHeaderOps.Set == nil {
+ respHeaderOps.Set = make(http.Header)
+ }
+ respHeaderOps.Set.Set(field, value)
+
+ case replacement != nil: // replace
+ // allow defer shortcut for replace syntax
+ if strings.HasPrefix(field, ">") && respHeaderOps != nil {
+ respHeaderOps.Deferred = true
+ }
+ if ops.Replace == nil {
+ ops.Replace = make(map[string][]Replacement)
+ }
+ field = strings.TrimLeft(field, "+-?>")
+ ops.Replace[field] = append(
+ ops.Replace[field],
+ Replacement{
+ SearchRegexp: value,
+ Replace: *replacement,
+ },
+ )
+
+ case strings.HasPrefix(field, ">"): // set (overwrite) with defer
+ if ops.Set == nil {
+ ops.Set = make(http.Header)
+ }
+ ops.Set.Set(field[1:], value)
+ if respHeaderOps != nil {
+ respHeaderOps.Deferred = true
+ }
+
+ default: // set (overwrite)
+ if ops.Set == nil {
+ ops.Set = make(http.Header)
+ }
+ ops.Set.Set(field, value)
}
+
+ return nil
}
diff --git a/modules/caddyhttp/headers/headers.go b/modules/caddyhttp/headers/headers.go
index 681c21f7..c66bd414 100644
--- a/modules/caddyhttp/headers/headers.go
+++ b/modules/caddyhttp/headers/headers.go
@@ -54,15 +54,15 @@ func (Handler) CaddyModule() caddy.ModuleInfo {
}
// Provision sets up h's configuration.
-func (h *Handler) Provision(_ caddy.Context) error {
+func (h *Handler) Provision(ctx caddy.Context) error {
if h.Request != nil {
- err := h.Request.provision()
+ err := h.Request.Provision(ctx)
if err != nil {
return err
}
}
if h.Response != nil {
- err := h.Response.provision()
+ err := h.Response.Provision(ctx)
if err != nil {
return err
}
@@ -118,23 +118,31 @@ type HeaderOps struct {
// Sets HTTP headers; replaces existing header fields.
Set http.Header `json:"set,omitempty"`
- // Names of HTTP header fields to delete.
+ // Names of HTTP header fields to delete. Basic wildcards are supported:
+ //
+ // - Start with `*` for all field names with the given suffix;
+ // - End with `*` for all field names with the given prefix;
+ // - Start and end with `*` for all field names containing a substring.
Delete []string `json:"delete,omitempty"`
- // Performs substring replacements of HTTP headers in-situ.
+ // Performs in-situ substring replacements of HTTP headers.
+ // Keys are the field names on which to perform the associated replacements.
+ // If the field name is `*`, the replacements are performed on all header fields.
Replace map[string][]Replacement `json:"replace,omitempty"`
}
-func (ops *HeaderOps) provision() error {
+// Provision sets up the header operations.
+func (ops *HeaderOps) Provision(_ caddy.Context) error {
for fieldName, replacements := range ops.Replace {
for i, r := range replacements {
- if r.SearchRegexp != "" {
- re, err := regexp.Compile(r.SearchRegexp)
- if err != nil {
- return fmt.Errorf("replacement %d for header field '%s': %v", i, fieldName, err)
- }
- replacements[i].re = re
+ if r.SearchRegexp == "" {
+ continue
}
+ re, err := regexp.Compile(r.SearchRegexp)
+ if err != nil {
+ return fmt.Errorf("replacement %d for header field '%s': %v", i, fieldName, err)
+ }
+ replacements[i].re = re
}
}
return nil
@@ -177,7 +185,7 @@ type RespHeaderOps struct {
Require *caddyhttp.ResponseMatcher `json:"require,omitempty"`
// If true, header operations will be deferred until
- // they are written out. Superceded if Require is set.
+ // they are written out. Superseded if Require is set.
// Usually you will need to set this to true if any
// fields are being deleted.
Deferred bool `json:"deferred,omitempty"`
@@ -185,40 +193,76 @@ type RespHeaderOps struct {
// ApplyTo applies ops to hdr using repl.
func (ops HeaderOps) ApplyTo(hdr http.Header, repl *caddy.Replacer) {
+ // before manipulating headers in other ways, check if there
+ // is configuration to delete all headers, and do that first
+ // because if a header is to be added, we don't want to delete
+ // it also
+ for _, fieldName := range ops.Delete {
+ fieldName = repl.ReplaceKnown(fieldName, "")
+ if fieldName == "*" {
+ clear(hdr)
+ }
+ }
+
// add
for fieldName, vals := range ops.Add {
- fieldName = repl.ReplaceAll(fieldName, "")
+ fieldName = repl.ReplaceKnown(fieldName, "")
for _, v := range vals {
- hdr.Add(fieldName, repl.ReplaceAll(v, ""))
+ hdr.Add(fieldName, repl.ReplaceKnown(v, ""))
}
}
// set
for fieldName, vals := range ops.Set {
- fieldName = repl.ReplaceAll(fieldName, "")
+ fieldName = repl.ReplaceKnown(fieldName, "")
var newVals []string
for i := range vals {
// append to new slice so we don't overwrite
// the original values in ops.Set
- newVals = append(newVals, repl.ReplaceAll(vals[i], ""))
+ newVals = append(newVals, repl.ReplaceKnown(vals[i], ""))
}
hdr.Set(fieldName, strings.Join(newVals, ","))
}
// delete
for _, fieldName := range ops.Delete {
- hdr.Del(repl.ReplaceAll(fieldName, ""))
+ fieldName = strings.ToLower(repl.ReplaceKnown(fieldName, ""))
+ if fieldName == "*" {
+ continue // handled above
+ }
+ switch {
+ case strings.HasPrefix(fieldName, "*") && strings.HasSuffix(fieldName, "*"):
+ for existingField := range hdr {
+ if strings.Contains(strings.ToLower(existingField), fieldName[1:len(fieldName)-1]) {
+ delete(hdr, existingField)
+ }
+ }
+ case strings.HasPrefix(fieldName, "*"):
+ for existingField := range hdr {
+ if strings.HasSuffix(strings.ToLower(existingField), fieldName[1:]) {
+ delete(hdr, existingField)
+ }
+ }
+ case strings.HasSuffix(fieldName, "*"):
+ for existingField := range hdr {
+ if strings.HasPrefix(strings.ToLower(existingField), fieldName[:len(fieldName)-1]) {
+ delete(hdr, existingField)
+ }
+ }
+ default:
+ hdr.Del(fieldName)
+ }
}
// replace
for fieldName, replacements := range ops.Replace {
- fieldName = repl.ReplaceAll(fieldName, "")
+ fieldName = http.CanonicalHeaderKey(repl.ReplaceKnown(fieldName, ""))
// all fields...
if fieldName == "*" {
for _, r := range replacements {
- search := repl.ReplaceAll(r.Search, "")
- replace := repl.ReplaceAll(r.Replace, "")
+ search := repl.ReplaceKnown(r.Search, "")
+ replace := repl.ReplaceKnown(r.Replace, "")
for fieldName, vals := range hdr {
for i := range vals {
if r.re != nil {
@@ -234,13 +278,19 @@ func (ops HeaderOps) ApplyTo(hdr http.Header, repl *caddy.Replacer) {
// ...or only with the named field
for _, r := range replacements {
- search := repl.ReplaceAll(r.Search, "")
- replace := repl.ReplaceAll(r.Replace, "")
- for i := range hdr[fieldName] {
- if r.re != nil {
- hdr[fieldName][i] = r.re.ReplaceAllString(hdr[fieldName][i], replace)
- } else {
- hdr[fieldName][i] = strings.ReplaceAll(hdr[fieldName][i], search, replace)
+ search := repl.ReplaceKnown(r.Search, "")
+ replace := repl.ReplaceKnown(r.Replace, "")
+ for hdrFieldName, vals := range hdr {
+ // see issue #4330 for why we don't simply use hdr[fieldName]
+ if http.CanonicalHeaderKey(hdrFieldName) != fieldName {
+ continue
+ }
+ for i := range vals {
+ if r.re != nil {
+ hdr[hdrFieldName][i] = r.re.ReplaceAllString(hdr[hdrFieldName][i], replace)
+ } else {
+ hdr[hdrFieldName][i] = strings.ReplaceAll(hdr[hdrFieldName][i], search, replace)
+ }
}
}
}
@@ -297,7 +347,10 @@ func (rww *responseWriterWrapper) WriteHeader(status int) {
if rww.wroteHeader {
return
}
- rww.wroteHeader = true
+ // 1xx responses aren't final; just informational
+ if status < 100 || status > 199 {
+ rww.wroteHeader = true
+ }
if rww.require == nil || rww.require.Match(status, rww.ResponseWriterWrapper.Header()) {
if rww.headerOps != nil {
rww.headerOps.ApplyTo(rww.ResponseWriterWrapper.Header(), rww.replacer)
@@ -317,5 +370,5 @@ func (rww *responseWriterWrapper) Write(d []byte) (int, error) {
var (
_ caddy.Provisioner = (*Handler)(nil)
_ caddyhttp.MiddlewareHandler = (*Handler)(nil)
- _ caddyhttp.HTTPInterfaces = (*responseWriterWrapper)(nil)
+ _ http.ResponseWriter = (*responseWriterWrapper)(nil)
)
diff --git a/modules/caddyhttp/headers/headers_test.go b/modules/caddyhttp/headers/headers_test.go
index e4f03adc..d74e6fc3 100644
--- a/modules/caddyhttp/headers/headers_test.go
+++ b/modules/caddyhttp/headers/headers_test.go
@@ -14,8 +14,239 @@
package headers
-import "testing"
+import (
+ "context"
+ "fmt"
+ "net/http"
+ "net/http/httptest"
+ "reflect"
+ "testing"
-func TestReqHeaders(t *testing.T) {
- // TODO: write tests
+ "github.com/caddyserver/caddy/v2"
+ "github.com/caddyserver/caddy/v2/modules/caddyhttp"
+)
+
+func TestHandler(t *testing.T) {
+ for i, tc := range []struct {
+ handler Handler
+ reqHeader http.Header
+ respHeader http.Header
+ respStatusCode int
+ expectedReqHeader http.Header
+ expectedRespHeader http.Header
+ }{
+ {
+ handler: Handler{
+ Request: &HeaderOps{
+ Add: http.Header{
+ "Expose-Secrets": []string{"always"},
+ },
+ },
+ },
+ reqHeader: http.Header{
+ "Expose-Secrets": []string{"i'm serious"},
+ },
+ expectedReqHeader: http.Header{
+ "Expose-Secrets": []string{"i'm serious", "always"},
+ },
+ },
+ {
+ handler: Handler{
+ Request: &HeaderOps{
+ Set: http.Header{
+ "Who-Wins": []string{"batman"},
+ },
+ },
+ },
+ reqHeader: http.Header{
+ "Who-Wins": []string{"joker"},
+ },
+ expectedReqHeader: http.Header{
+ "Who-Wins": []string{"batman"},
+ },
+ },
+ {
+ handler: Handler{
+ Request: &HeaderOps{
+ Delete: []string{"Kick-Me"},
+ },
+ },
+ reqHeader: http.Header{
+ "Kick-Me": []string{"if you can"},
+ "Keep-Me": []string{"i swear i'm innocent"},
+ },
+ expectedReqHeader: http.Header{
+ "Keep-Me": []string{"i swear i'm innocent"},
+ },
+ },
+ {
+ handler: Handler{
+ Request: &HeaderOps{
+ Delete: []string{
+ "*-suffix",
+ "prefix-*",
+ "*_*",
+ },
+ },
+ },
+ reqHeader: http.Header{
+ "Header-Suffix": []string{"lalala"},
+ "Prefix-Test": []string{"asdf"},
+ "Host_Header": []string{"silly django... sigh"}, // see issue #4830
+ "Keep-Me": []string{"foofoofoo"},
+ },
+ expectedReqHeader: http.Header{
+ "Keep-Me": []string{"foofoofoo"},
+ },
+ },
+ {
+ handler: Handler{
+ Request: &HeaderOps{
+ Replace: map[string][]Replacement{
+ "Best-Server": {
+ Replacement{
+ Search: "NGINX",
+ Replace: "the Caddy web server",
+ },
+ Replacement{
+ SearchRegexp: `Apache(\d+)`,
+ Replace: "Caddy",
+ },
+ },
+ },
+ },
+ },
+ reqHeader: http.Header{
+ "Best-Server": []string{"it's NGINX, undoubtedly", "I love Apache2"},
+ },
+ expectedReqHeader: http.Header{
+ "Best-Server": []string{"it's the Caddy web server, undoubtedly", "I love Caddy"},
+ },
+ },
+ {
+ handler: Handler{
+ Response: &RespHeaderOps{
+ Require: &caddyhttp.ResponseMatcher{
+ Headers: http.Header{
+ "Cache-Control": nil,
+ },
+ },
+ HeaderOps: &HeaderOps{
+ Add: http.Header{
+ "Cache-Control": []string{"no-cache"},
+ },
+ },
+ },
+ },
+ respHeader: http.Header{},
+ expectedRespHeader: http.Header{
+ "Cache-Control": []string{"no-cache"},
+ },
+ },
+ {
+ handler: Handler{
+ Response: &RespHeaderOps{
+ Require: &caddyhttp.ResponseMatcher{
+ Headers: http.Header{
+ "Cache-Control": []string{"no-cache"},
+ },
+ },
+ HeaderOps: &HeaderOps{
+ Delete: []string{"Cache-Control"},
+ },
+ },
+ },
+ respHeader: http.Header{
+ "Cache-Control": []string{"no-cache"},
+ },
+ expectedRespHeader: http.Header{},
+ },
+ {
+ handler: Handler{
+ Response: &RespHeaderOps{
+ Require: &caddyhttp.ResponseMatcher{
+ StatusCode: []int{5},
+ },
+ HeaderOps: &HeaderOps{
+ Add: http.Header{
+ "Fail-5xx": []string{"true"},
+ },
+ },
+ },
+ },
+ respStatusCode: 503,
+ respHeader: http.Header{},
+ expectedRespHeader: http.Header{
+ "Fail-5xx": []string{"true"},
+ },
+ },
+ {
+ handler: Handler{
+ Request: &HeaderOps{
+ Replace: map[string][]Replacement{
+ "Case-Insensitive": {
+ Replacement{
+ Search: "issue4330",
+ Replace: "issue #4330",
+ },
+ },
+ },
+ },
+ },
+ reqHeader: http.Header{
+ "case-insensitive": []string{"issue4330"},
+ "Other-Header": []string{"issue4330"},
+ },
+ expectedReqHeader: http.Header{
+ "case-insensitive": []string{"issue #4330"},
+ "Other-Header": []string{"issue4330"},
+ },
+ },
+ } {
+ rr := httptest.NewRecorder()
+
+ req := &http.Request{Header: tc.reqHeader}
+ repl := caddy.NewReplacer()
+ ctx := context.WithValue(req.Context(), caddy.ReplacerCtxKey, repl)
+ req = req.WithContext(ctx)
+
+ tc.handler.Provision(caddy.Context{})
+
+ next := nextHandler(func(w http.ResponseWriter, r *http.Request) error {
+ for k, hdrs := range tc.respHeader {
+ for _, v := range hdrs {
+ w.Header().Add(k, v)
+ }
+ }
+
+ status := 200
+ if tc.respStatusCode != 0 {
+ status = tc.respStatusCode
+ }
+ w.WriteHeader(status)
+
+ if tc.expectedReqHeader != nil && !reflect.DeepEqual(r.Header, tc.expectedReqHeader) {
+ return fmt.Errorf("expected request header %v, got %v", tc.expectedReqHeader, r.Header)
+ }
+
+ return nil
+ })
+
+ if err := tc.handler.ServeHTTP(rr, req, next); err != nil {
+ t.Errorf("Test %d: %v", i, err)
+ continue
+ }
+
+ actual := rr.Header()
+ if tc.expectedRespHeader != nil && !reflect.DeepEqual(actual, tc.expectedRespHeader) {
+ t.Errorf("Test %d: expected response header %v, got %v", i, tc.expectedRespHeader, actual)
+ continue
+ }
+ }
+}
+
+type nextHandler func(http.ResponseWriter, *http.Request) error
+
+func (f nextHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) error {
+ return f(w, r)
}
diff --git a/modules/caddyhttp/http2listener.go b/modules/caddyhttp/http2listener.go
new file mode 100644
index 00000000..51b356a7
--- /dev/null
+++ b/modules/caddyhttp/http2listener.go
@@ -0,0 +1,102 @@
+package caddyhttp
+
+import (
+ "context"
+ "crypto/tls"
+ weakrand "math/rand"
+ "net"
+ "net/http"
+ "sync/atomic"
+ "time"
+
+ "golang.org/x/net/http2"
+)
+
+// http2Listener wraps the listener to solve the following problems:
+// 1. server h2 natively without using h2c hack when listener handles tls connection but
+// don't return *tls.Conn
+// 2. graceful shutdown. the shutdown logic is copied from stdlib http.Server, it's an extra maintenance burden but
+// whatever, the shutdown logic maybe extracted to be used with h2c graceful shutdown. http2.Server supports graceful shutdown
+// sending GO_AWAY frame to connected clients, but doesn't track connection status. It requires explicit call of http2.ConfigureServer
+type http2Listener struct {
+ cnt uint64
+ net.Listener
+ server *http.Server
+ h2server *http2.Server
+}
+
+type connectionStateConn interface {
+ net.Conn
+ ConnectionState() tls.ConnectionState
+}
+
+func (h *http2Listener) Accept() (net.Conn, error) {
+ for {
+ conn, err := h.Listener.Accept()
+ if err != nil {
+ return nil, err
+ }
+
+ if csc, ok := conn.(connectionStateConn); ok {
+ // *tls.Conn will return empty string because it's only populated after handshake is complete
+ if csc.ConnectionState().NegotiatedProtocol == http2.NextProtoTLS {
+ go h.serveHttp2(csc)
+ continue
+ }
+ }
+
+ return conn, nil
+ }
+}
+
+func (h *http2Listener) serveHttp2(csc connectionStateConn) {
+ atomic.AddUint64(&h.cnt, 1)
+ h.runHook(csc, http.StateNew)
+ defer func() {
+ csc.Close()
+ atomic.AddUint64(&h.cnt, ^uint64(0))
+ h.runHook(csc, http.StateClosed)
+ }()
+ h.h2server.ServeConn(csc, &http2.ServeConnOpts{
+ Context: h.server.ConnContext(context.Background(), csc),
+ BaseConfig: h.server,
+ Handler: h.server.Handler,
+ })
+}
+
+const shutdownPollIntervalMax = 500 * time.Millisecond
+
+func (h *http2Listener) Shutdown(ctx context.Context) error {
+ pollIntervalBase := time.Millisecond
+ nextPollInterval := func() time.Duration {
+ // Add 10% jitter.
+ //nolint:gosec
+ interval := pollIntervalBase + time.Duration(weakrand.Intn(int(pollIntervalBase/10)))
+ // Double and clamp for next time.
+ pollIntervalBase *= 2
+ if pollIntervalBase > shutdownPollIntervalMax {
+ pollIntervalBase = shutdownPollIntervalMax
+ }
+ return interval
+ }
+
+ timer := time.NewTimer(nextPollInterval())
+ defer timer.Stop()
+ for {
+ if atomic.LoadUint64(&h.cnt) == 0 {
+ return nil
+ }
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ case <-timer.C:
+ timer.Reset(nextPollInterval())
+ }
+ }
+}
+
+func (h *http2Listener) runHook(conn net.Conn, state http.ConnState) {
+ if h.server.ConnState != nil {
+ h.server.ConnState(conn, state)
+ }
+}
diff --git a/modules/caddyhttp/httpredirectlistener.go b/modules/caddyhttp/httpredirectlistener.go
new file mode 100644
index 00000000..ce9ac030
--- /dev/null
+++ b/modules/caddyhttp/httpredirectlistener.go
@@ -0,0 +1,174 @@
+// Copyright 2015 Matthew Holt and The Caddy Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package caddyhttp
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "io"
+ "net"
+ "net/http"
+
+ "github.com/caddyserver/caddy/v2"
+ "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile"
+)
+
+func init() {
+ caddy.RegisterModule(HTTPRedirectListenerWrapper{})
+}
+
+// HTTPRedirectListenerWrapper provides HTTP->HTTPS redirects for
+// connections that come on the TLS port as an HTTP request,
+// by detecting using the first few bytes that it's not a TLS
+// handshake, but instead an HTTP request.
+//
+// This is especially useful when using a non-standard HTTPS port.
+// A user may simply type the address in their browser without the
+// https:// scheme, which would cause the browser to attempt the
+// connection over HTTP, but this would cause a "Client sent an
+// HTTP request to an HTTPS server" error response.
+//
+// This listener wrapper must be placed BEFORE the "tls" listener
+// wrapper, for it to work properly.
+type HTTPRedirectListenerWrapper struct {
+ // MaxHeaderBytes is the maximum size to parse from a client's
+ // HTTP request headers. Default: 1 MB
+ MaxHeaderBytes int64 `json:"max_header_bytes,omitempty"`
+}
+
+func (HTTPRedirectListenerWrapper) CaddyModule() caddy.ModuleInfo {
+ return caddy.ModuleInfo{
+ ID: "caddy.listeners.http_redirect",
+ New: func() caddy.Module { return new(HTTPRedirectListenerWrapper) },
+ }
+}
+
+func (h *HTTPRedirectListenerWrapper) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
+ return nil
+}
+
+func (h *HTTPRedirectListenerWrapper) WrapListener(l net.Listener) net.Listener {
+ return &httpRedirectListener{l, h.MaxHeaderBytes}
+}
+
+// httpRedirectListener is listener that checks the first few bytes
+// of the request when the server is intended to accept HTTPS requests,
+// to respond to an HTTP request with a redirect.
+type httpRedirectListener struct {
+ net.Listener
+ maxHeaderBytes int64
+}
+
+// Accept waits for and returns the next connection to the listener,
+// wrapping it with a httpRedirectConn.
+func (l *httpRedirectListener) Accept() (net.Conn, error) {
+ c, err := l.Listener.Accept()
+ if err != nil {
+ return nil, err
+ }
+
+ maxHeaderBytes := l.maxHeaderBytes
+ if maxHeaderBytes == 0 {
+ maxHeaderBytes = 1024 * 1024
+ }
+
+ return &httpRedirectConn{
+ Conn: c,
+ limit: maxHeaderBytes,
+ r: bufio.NewReader(c),
+ }, nil
+}
+
+type httpRedirectConn struct {
+ net.Conn
+ once bool
+ limit int64
+ r *bufio.Reader
+}
+
+// Read tries to peek at the first few bytes of the request, and if we get
+// an error reading the headers, and that error was due to the bytes looking
+// like an HTTP request, then we perform a HTTP->HTTPS redirect on the same
+// port as the original connection.
+func (c *httpRedirectConn) Read(p []byte) (int, error) {
+ if c.once {
+ return c.r.Read(p)
+ }
+ // no need to use sync.Once - net.Conn is not read from concurrently.
+ c.once = true
+
+ firstBytes, err := c.r.Peek(5)
+ if err != nil {
+ return 0, err
+ }
+
+ // If the request doesn't look like HTTP, then it's probably
+ // TLS bytes, and we don't need to do anything.
+ if !firstBytesLookLikeHTTP(firstBytes) {
+ return c.r.Read(p)
+ }
+
+ // From now on, we can be almost certain the request is HTTP.
+ // The returned error will be non nil and caller are expected to
+ // close the connection.
+
+ // Set the read limit, io.MultiReader is needed because
+ // when resetting, *bufio.Reader discards buffered data.
+ buffered, _ := c.r.Peek(c.r.Buffered())
+ mr := io.MultiReader(bytes.NewReader(buffered), c.Conn)
+ c.r.Reset(io.LimitReader(mr, c.limit))
+
+ // Parse the HTTP request, so we can get the Host and URL to redirect to.
+ req, err := http.ReadRequest(c.r)
+ if err != nil {
+ return 0, fmt.Errorf("couldn't read HTTP request")
+ }
+
+ // Build the redirect response, using the same Host and URL,
+ // but replacing the scheme with https.
+ headers := make(http.Header)
+ headers.Add("Location", "https://"+req.Host+req.URL.String())
+ resp := &http.Response{
+ Proto: "HTTP/1.0",
+ Status: "308 Permanent Redirect",
+ StatusCode: 308,
+ ProtoMajor: 1,
+ ProtoMinor: 0,
+ Header: headers,
+ }
+
+ err = resp.Write(c.Conn)
+ if err != nil {
+ return 0, fmt.Errorf("couldn't write HTTP->HTTPS redirect")
+ }
+
+ return 0, fmt.Errorf("redirected HTTP request on HTTPS port")
+}
+
+// firstBytesLookLikeHTTP reports whether a TLS record header
+// looks like it might've been a misdirected plaintext HTTP request.
+func firstBytesLookLikeHTTP(hdr []byte) bool {
+ switch string(hdr[:5]) {
+ case "GET /", "HEAD ", "POST ", "PUT /", "OPTIO":
+ return true
+ }
+ return false
+}
+
+var (
+ _ caddy.ListenerWrapper = (*HTTPRedirectListenerWrapper)(nil)
+ _ caddyfile.Unmarshaler = (*HTTPRedirectListenerWrapper)(nil)
+)
diff --git a/modules/caddyhttp/intercept/intercept.go b/modules/caddyhttp/intercept/intercept.go
new file mode 100644
index 00000000..29889dcc
--- /dev/null
+++ b/modules/caddyhttp/intercept/intercept.go
@@ -0,0 +1,352 @@
+// Copyright 2015 Matthew Holt and The Caddy Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package intercept
+
+import (
+ "bytes"
+ "fmt"
+ "net/http"
+ "strconv"
+ "strings"
+ "sync"
+
+ "go.uber.org/zap"
+ "go.uber.org/zap/zapcore"
+
+ "github.com/caddyserver/caddy/v2"
+ "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile"
+ "github.com/caddyserver/caddy/v2/caddyconfig/httpcaddyfile"
+ "github.com/caddyserver/caddy/v2/modules/caddyhttp"
+)
+
+func init() {
+ caddy.RegisterModule(Intercept{})
+ httpcaddyfile.RegisterHandlerDirective("intercept", parseCaddyfile)
+}
+
+// Intercept is a middleware that intercepts then replaces or modifies the original response.
+// It can, for instance, be used to implement X-Sendfile/X-Accel-Redirect-like features
+// when using modules like FrankenPHP or Caddy Snake.
+//
+// EXPERIMENTAL: Subject to change or removal.
+type Intercept struct {
+ // List of handlers and their associated matchers to evaluate
+ // after successful response generation.
+ // The first handler that matches the original response will
+ // be invoked. The original response body will not be
+ // written to the client;
+ // it is up to the handler to finish handling the response.
+ //
+ // Three new placeholders are available in this handler chain:
+ // - `{http.intercept.status_code}` The status code from the response
+ // - `{http.intercept.header.*}` The headers from the response
+ HandleResponse []caddyhttp.ResponseHandler `json:"handle_response,omitempty"`
+
+ // Holds the named response matchers from the Caddyfile while adapting
+ responseMatchers map[string]caddyhttp.ResponseMatcher
+
+ // Holds the handle_response Caddyfile tokens while adapting
+ handleResponseSegments []*caddyfile.Dispenser
+
+ logger *zap.Logger
+}
+
+// CaddyModule returns the Caddy module information.
+//
+// EXPERIMENTAL: Subject to change or removal.
+func (Intercept) CaddyModule() caddy.ModuleInfo {
+ return caddy.ModuleInfo{
+ ID: "http.handlers.intercept",
+ New: func() caddy.Module { return new(Intercept) },
+ }
+}
+
+// Provision ensures that i is set up properly before use.
+//
+// EXPERIMENTAL: Subject to change or removal.
+func (irh *Intercept) Provision(ctx caddy.Context) error {
+ // set up any response routes
+ for i, rh := range irh.HandleResponse {
+ err := rh.Provision(ctx)
+ if err != nil {
+ return fmt.Errorf("provisioning response handler %d: %w", i, err)
+ }
+ }
+
+ irh.logger = ctx.Logger()
+
+ return nil
+}
+
+var bufPool = sync.Pool{
+ New: func() any {
+ return new(bytes.Buffer)
+ },
+}
+
+// TODO: handle status code replacement
+//
+// EXPERIMENTAL: Subject to change or removal.
+type interceptedResponseHandler struct {
+ caddyhttp.ResponseRecorder
+ replacer *caddy.Replacer
+ handler caddyhttp.ResponseHandler
+ handlerIndex int
+ statusCode int
+}
+
+// EXPERIMENTAL: Subject to change or removal.
+func (irh interceptedResponseHandler) WriteHeader(statusCode int) {
+ if irh.statusCode != 0 && (statusCode < 100 || statusCode >= 200) {
+ irh.ResponseRecorder.WriteHeader(irh.statusCode)
+
+ return
+ }
+
+ irh.ResponseRecorder.WriteHeader(statusCode)
+}
+
+// EXPERIMENTAL: Subject to change or removal.
+func (ir Intercept) ServeHTTP(w http.ResponseWriter, r *http.Request, next caddyhttp.Handler) error {
+ buf := bufPool.Get().(*bytes.Buffer)
+ buf.Reset()
+ defer bufPool.Put(buf)
+
+ repl := r.Context().Value(caddy.ReplacerCtxKey).(*caddy.Replacer)
+ rec := interceptedResponseHandler{replacer: repl}
+ rec.ResponseRecorder = caddyhttp.NewResponseRecorder(w, buf, func(status int, header http.Header) bool {
+ // see if any response handler is configured for this original response
+ for i, rh := range ir.HandleResponse {
+ if rh.Match != nil && !rh.Match.Match(status, header) {
+ continue
+ }
+ rec.handler = rh
+ rec.handlerIndex = i
+
+ // if configured to only change the status code,
+ // do that then stream
+ if statusCodeStr := rh.StatusCode.String(); statusCodeStr != "" {
+ sc, err := strconv.Atoi(repl.ReplaceAll(statusCodeStr, ""))
+ if err != nil {
+ rec.statusCode = http.StatusInternalServerError
+ } else {
+ rec.statusCode = sc
+ }
+ }
+
+ return rec.statusCode == 0
+ }
+
+ return false
+ })
+
+ if err := next.ServeHTTP(rec, r); err != nil {
+ return err
+ }
+ if !rec.Buffered() {
+ return nil
+ }
+
+ // set up the replacer so that parts of the original response can be
+ // used for routing decisions
+ for field, value := range rec.Header() {
+ repl.Set("http.intercept.header."+field, strings.Join(value, ","))
+ }
+ repl.Set("http.intercept.status_code", rec.Status())
+
+ if c := ir.logger.Check(zapcore.DebugLevel, "handling response"); c != nil {
+ c.Write(zap.Int("handler", rec.handlerIndex))
+ }
+
+ // pass the request through the response handler routes
+ return rec.handler.Routes.Compile(next).ServeHTTP(w, r)
+}
+
+// UnmarshalCaddyfile sets up the handler from Caddyfile tokens. Syntax:
+//
+// intercept [] {
+// # intercept original responses
+// @name {
+// status
+// header []
+// }
+// replace_status []
+// handle_response [] {
+//
+// }
+// }
+//
+// The FinalizeUnmarshalCaddyfile method should be called after this
+// to finalize parsing of "handle_response" blocks, if possible.
+//
+// EXPERIMENTAL: Subject to change or removal.
+func (i *Intercept) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
+ // collect the response matchers defined as subdirectives
+ // prefixed with "@" for use with "handle_response" blocks
+ i.responseMatchers = make(map[string]caddyhttp.ResponseMatcher)
+
+ d.Next() // consume the directive name
+ for d.NextBlock(0) {
+ // if the subdirective has an "@" prefix then we
+ // parse it as a response matcher for use with "handle_response"
+ if strings.HasPrefix(d.Val(), matcherPrefix) {
+ err := caddyhttp.ParseNamedResponseMatcher(d.NewFromNextSegment(), i.responseMatchers)
+ if err != nil {
+ return err
+ }
+ continue
+ }
+
+ switch d.Val() {
+ case "handle_response":
+ // delegate the parsing of handle_response to the caller,
+ // since we need the httpcaddyfile.Helper to parse subroutes.
+ // See h.FinalizeUnmarshalCaddyfile
+ i.handleResponseSegments = append(i.handleResponseSegments, d.NewFromNextSegment())
+
+ case "replace_status":
+ args := d.RemainingArgs()
+ if len(args) != 1 && len(args) != 2 {
+ return d.Errf("must have one or two arguments: an optional response matcher, and a status code")
+ }
+
+ responseHandler := caddyhttp.ResponseHandler{}
+
+ if len(args) == 2 {
+ if !strings.HasPrefix(args[0], matcherPrefix) {
+ return d.Errf("must use a named response matcher, starting with '@'")
+ }
+ foundMatcher, ok := i.responseMatchers[args[0]]
+ if !ok {
+ return d.Errf("no named response matcher defined with name '%s'", args[0][1:])
+ }
+ responseHandler.Match = &foundMatcher
+ responseHandler.StatusCode = caddyhttp.WeakString(args[1])
+ } else if len(args) == 1 {
+ responseHandler.StatusCode = caddyhttp.WeakString(args[0])
+ }
+
+ // make sure there's no block, cause it doesn't make sense
+ if nesting := d.Nesting(); d.NextBlock(nesting) {
+ return d.Errf("cannot define routes for 'replace_status', use 'handle_response' instead.")
+ }
+
+ i.HandleResponse = append(
+ i.HandleResponse,
+ responseHandler,
+ )
+
+ default:
+ return d.Errf("unrecognized subdirective %s", d.Val())
+ }
+ }
+
+ return nil
+}
+
+// FinalizeUnmarshalCaddyfile finalizes the Caddyfile parsing which
+// requires having an httpcaddyfile.Helper to function, to parse subroutes.
+//
+// EXPERIMENTAL: Subject to change or removal.
+func (i *Intercept) FinalizeUnmarshalCaddyfile(helper httpcaddyfile.Helper) error {
+ for _, d := range i.handleResponseSegments {
+ // consume the "handle_response" token
+ d.Next()
+ args := d.RemainingArgs()
+
+ // TODO: Remove this check at some point in the future
+ if len(args) == 2 {
+ return d.Errf("configuring 'handle_response' for status code replacement is no longer supported. Use 'replace_status' instead.")
+ }
+
+ if len(args) > 1 {
+ return d.Errf("too many arguments for 'handle_response': %s", args)
+ }
+
+ var matcher *caddyhttp.ResponseMatcher
+ if len(args) == 1 {
+ // the first arg should always be a matcher.
+ if !strings.HasPrefix(args[0], matcherPrefix) {
+ return d.Errf("must use a named response matcher, starting with '@'")
+ }
+
+ foundMatcher, ok := i.responseMatchers[args[0]]
+ if !ok {
+ return d.Errf("no named response matcher defined with name '%s'", args[0][1:])
+ }
+ matcher = &foundMatcher
+ }
+
+ // parse the block as routes
+ handler, err := httpcaddyfile.ParseSegmentAsSubroute(helper.WithDispenser(d.NewFromNextSegment()))
+ if err != nil {
+ return err
+ }
+ subroute, ok := handler.(*caddyhttp.Subroute)
+ if !ok {
+ return helper.Errf("segment was not parsed as a subroute")
+ }
+ i.HandleResponse = append(
+ i.HandleResponse,
+ caddyhttp.ResponseHandler{
+ Match: matcher,
+ Routes: subroute.Routes,
+ },
+ )
+ }
+
+ // move the handle_response entries without a matcher to the end.
+ // we can't use sort.SliceStable because it will reorder the rest of the
+ // entries which may be undesirable because we don't have a good
+ // heuristic to use for sorting.
+ withoutMatchers := []caddyhttp.ResponseHandler{}
+ withMatchers := []caddyhttp.ResponseHandler{}
+ for _, hr := range i.HandleResponse {
+ if hr.Match == nil {
+ withoutMatchers = append(withoutMatchers, hr)
+ } else {
+ withMatchers = append(withMatchers, hr)
+ }
+ }
+ i.HandleResponse = append(withMatchers, withoutMatchers...)
+
+ // clean up the bits we only needed for adapting
+ i.handleResponseSegments = nil
+ i.responseMatchers = nil
+
+ return nil
+}
+
+const matcherPrefix = "@"
+
+func parseCaddyfile(helper httpcaddyfile.Helper) (caddyhttp.MiddlewareHandler, error) {
+ var ir Intercept
+ if err := ir.UnmarshalCaddyfile(helper.Dispenser); err != nil {
+ return nil, err
+ }
+
+ if err := ir.FinalizeUnmarshalCaddyfile(helper); err != nil {
+ return nil, err
+ }
+
+ return ir, nil
+}
+
+// Interface guards
+var (
+ _ caddy.Provisioner = (*Intercept)(nil)
+ _ caddyfile.Unmarshaler = (*Intercept)(nil)
+ _ caddyhttp.MiddlewareHandler = (*Intercept)(nil)
+)
diff --git a/modules/caddyhttp/invoke.go b/modules/caddyhttp/invoke.go
new file mode 100644
index 00000000..97fd1cc3
--- /dev/null
+++ b/modules/caddyhttp/invoke.go
@@ -0,0 +1,56 @@
+// Copyright 2015 Matthew Holt and The Caddy Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package caddyhttp
+
+import (
+ "fmt"
+ "net/http"
+
+ "github.com/caddyserver/caddy/v2"
+)
+
+func init() {
+ caddy.RegisterModule(Invoke{})
+}
+
+// Invoke implements a handler that compiles and executes a
+// named route that was defined on the server.
+//
+// EXPERIMENTAL: Subject to change or removal.
+type Invoke struct {
+ // Name is the key of the named route to execute
+ Name string `json:"name,omitempty"`
+}
+
+// CaddyModule returns the Caddy module information.
+func (Invoke) CaddyModule() caddy.ModuleInfo {
+ return caddy.ModuleInfo{
+ ID: "http.handlers.invoke",
+ New: func() caddy.Module { return new(Invoke) },
+ }
+}
+
+func (invoke *Invoke) ServeHTTP(w http.ResponseWriter, r *http.Request, next Handler) error {
+ server := r.Context().Value(ServerCtxKey).(*Server)
+ if route, ok := server.NamedRoutes[invoke.Name]; ok {
+ return route.Compile(next).ServeHTTP(w, r)
+ }
+ return fmt.Errorf("invoke: route '%s' not found", invoke.Name)
+}
+
+// Interface guards
+var (
+ _ MiddlewareHandler = (*Invoke)(nil)
+)
diff --git a/modules/caddyhttp/ip_matchers.go b/modules/caddyhttp/ip_matchers.go
new file mode 100644
index 00000000..5e0b356e
--- /dev/null
+++ b/modules/caddyhttp/ip_matchers.go
@@ -0,0 +1,366 @@
+// Copyright 2015 Matthew Holt and The Caddy Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package caddyhttp
+
+import (
+ "errors"
+ "fmt"
+ "net"
+ "net/http"
+ "net/netip"
+ "reflect"
+ "strings"
+
+ "github.com/google/cel-go/cel"
+ "github.com/google/cel-go/common/types/ref"
+ "go.uber.org/zap"
+ "go.uber.org/zap/zapcore"
+
+ "github.com/caddyserver/caddy/v2"
+ "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile"
+ "github.com/caddyserver/caddy/v2/internal"
+)
+
+// MatchRemoteIP matches requests by the remote IP address,
+// i.e. the IP address of the direct connection to Caddy.
+type MatchRemoteIP struct {
+ // The IPs or CIDR ranges to match.
+ Ranges []string `json:"ranges,omitempty"`
+
+ // cidrs and zones vars should aligned always in the same
+ // length and indexes for matching later
+ cidrs []*netip.Prefix
+ zones []string
+ logger *zap.Logger
+}
+
+// MatchClientIP matches requests by the client IP address,
+// i.e. the resolved address, considering trusted proxies.
+type MatchClientIP struct {
+ // The IPs or CIDR ranges to match.
+ Ranges []string `json:"ranges,omitempty"`
+
+ // cidrs and zones vars should aligned always in the same
+ // length and indexes for matching later
+ cidrs []*netip.Prefix
+ zones []string
+ logger *zap.Logger
+}
+
+func init() {
+ caddy.RegisterModule(MatchRemoteIP{})
+ caddy.RegisterModule(MatchClientIP{})
+}
+
+// CaddyModule returns the Caddy module information.
+func (MatchRemoteIP) CaddyModule() caddy.ModuleInfo {
+ return caddy.ModuleInfo{
+ ID: "http.matchers.remote_ip",
+ New: func() caddy.Module { return new(MatchRemoteIP) },
+ }
+}
+
+// UnmarshalCaddyfile implements caddyfile.Unmarshaler.
+func (m *MatchRemoteIP) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
+ // iterate to merge multiple matchers into one
+ for d.Next() {
+ for d.NextArg() {
+ if d.Val() == "forwarded" {
+ return d.Err("the 'forwarded' option is no longer supported; use the 'client_ip' matcher instead")
+ }
+ if d.Val() == "private_ranges" {
+ m.Ranges = append(m.Ranges, internal.PrivateRangesCIDR()...)
+ continue
+ }
+ m.Ranges = append(m.Ranges, d.Val())
+ }
+ if d.NextBlock(0) {
+ return d.Err("malformed remote_ip matcher: blocks are not supported")
+ }
+ }
+ return nil
+}
+
+// CELLibrary produces options that expose this matcher for use in CEL
+// expression matchers.
+//
+// Example:
+//
+// expression remote_ip('192.168.0.0/16', '172.16.0.0/12', '10.0.0.0/8')
+func (MatchRemoteIP) CELLibrary(ctx caddy.Context) (cel.Library, error) {
+ return CELMatcherImpl(
+ // name of the macro, this is the function name that users see when writing expressions.
+ "remote_ip",
+ // name of the function that the macro will be rewritten to call.
+ "remote_ip_match_request_list",
+ // internal data type of the MatchPath value.
+ []*cel.Type{cel.ListType(cel.StringType)},
+ // function to convert a constant list of strings to a MatchPath instance.
+ func(data ref.Val) (RequestMatcherWithError, error) {
+ refStringList := reflect.TypeOf([]string{})
+ strList, err := data.ConvertToNative(refStringList)
+ if err != nil {
+ return nil, err
+ }
+
+ m := MatchRemoteIP{}
+
+ for _, input := range strList.([]string) {
+ if input == "forwarded" {
+ return nil, errors.New("the 'forwarded' option is no longer supported; use the 'client_ip' matcher instead")
+ }
+ m.Ranges = append(m.Ranges, input)
+ }
+
+ err = m.Provision(ctx)
+ return m, err
+ },
+ )
+}
+
+// Provision parses m's IP ranges, either from IP or CIDR expressions.
+func (m *MatchRemoteIP) Provision(ctx caddy.Context) error {
+ m.logger = ctx.Logger()
+ cidrs, zones, err := provisionCidrsZonesFromRanges(m.Ranges)
+ if err != nil {
+ return err
+ }
+ m.cidrs = cidrs
+ m.zones = zones
+
+ return nil
+}
+
+// Match returns true if r matches m.
+func (m MatchRemoteIP) Match(r *http.Request) bool {
+ match, err := m.MatchWithError(r)
+ if err != nil {
+ SetVar(r.Context(), MatcherErrorVarKey, err)
+ }
+ return match
+}
+
+// MatchWithError returns true if r matches m.
+func (m MatchRemoteIP) MatchWithError(r *http.Request) (bool, error) {
+ // if handshake is not finished, we infer 0-RTT that has
+ // not verified remote IP; could be spoofed, so we throw
+ // HTTP 425 status to tell the client to try again after
+ // the handshake is complete
+ if r.TLS != nil && !r.TLS.HandshakeComplete {
+ return false, Error(http.StatusTooEarly, fmt.Errorf("TLS handshake not complete, remote IP cannot be verified"))
+ }
+
+ address := r.RemoteAddr
+ clientIP, zoneID, err := parseIPZoneFromString(address)
+ if err != nil {
+ if c := m.logger.Check(zapcore.ErrorLevel, "getting remote "); c != nil {
+ c.Write(zap.Error(err))
+ }
+
+ return false, nil
+ }
+ matches, zoneFilter := matchIPByCidrZones(clientIP, zoneID, m.cidrs, m.zones)
+ if !matches && !zoneFilter {
+ if c := m.logger.Check(zapcore.DebugLevel, "zone ID from remote IP did not match"); c != nil {
+ c.Write(zap.String("zone", zoneID))
+ }
+ }
+ return matches, nil
+}
+
+// CaddyModule returns the Caddy module information.
+func (MatchClientIP) CaddyModule() caddy.ModuleInfo {
+ return caddy.ModuleInfo{
+ ID: "http.matchers.client_ip",
+ New: func() caddy.Module { return new(MatchClientIP) },
+ }
+}
+
+// UnmarshalCaddyfile implements caddyfile.Unmarshaler.
+func (m *MatchClientIP) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
+ // iterate to merge multiple matchers into one
+ for d.Next() {
+ for d.NextArg() {
+ if d.Val() == "private_ranges" {
+ m.Ranges = append(m.Ranges, internal.PrivateRangesCIDR()...)
+ continue
+ }
+ m.Ranges = append(m.Ranges, d.Val())
+ }
+ if d.NextBlock(0) {
+ return d.Err("malformed client_ip matcher: blocks are not supported")
+ }
+ }
+ return nil
+}
+
+// CELLibrary produces options that expose this matcher for use in CEL
+// expression matchers.
+//
+// Example:
+//
+// expression client_ip('192.168.0.0/16', '172.16.0.0/12', '10.0.0.0/8')
+func (MatchClientIP) CELLibrary(ctx caddy.Context) (cel.Library, error) {
+ return CELMatcherImpl(
+ // name of the macro, this is the function name that users see when writing expressions.
+ "client_ip",
+ // name of the function that the macro will be rewritten to call.
+ "client_ip_match_request_list",
+ // internal data type of the MatchPath value.
+ []*cel.Type{cel.ListType(cel.StringType)},
+ // function to convert a constant list of strings to a MatchPath instance.
+ func(data ref.Val) (RequestMatcherWithError, error) {
+ refStringList := reflect.TypeOf([]string{})
+ strList, err := data.ConvertToNative(refStringList)
+ if err != nil {
+ return nil, err
+ }
+
+ m := MatchClientIP{
+ Ranges: strList.([]string),
+ }
+
+ err = m.Provision(ctx)
+ return m, err
+ },
+ )
+}
+
+// Provision parses m's IP ranges, either from IP or CIDR expressions.
+func (m *MatchClientIP) Provision(ctx caddy.Context) error {
+ m.logger = ctx.Logger()
+ cidrs, zones, err := provisionCidrsZonesFromRanges(m.Ranges)
+ if err != nil {
+ return err
+ }
+ m.cidrs = cidrs
+ m.zones = zones
+ return nil
+}
+
+// Match returns true if r matches m.
+func (m MatchClientIP) Match(r *http.Request) bool {
+ match, err := m.MatchWithError(r)
+ if err != nil {
+ SetVar(r.Context(), MatcherErrorVarKey, err)
+ }
+ return match
+}
+
+// MatchWithError returns true if r matches m.
+func (m MatchClientIP) MatchWithError(r *http.Request) (bool, error) {
+ // if handshake is not finished, we infer 0-RTT that has
+ // not verified remote IP; could be spoofed, so we throw
+ // HTTP 425 status to tell the client to try again after
+ // the handshake is complete
+ if r.TLS != nil && !r.TLS.HandshakeComplete {
+ return false, Error(http.StatusTooEarly, fmt.Errorf("TLS handshake not complete, remote IP cannot be verified"))
+ }
+
+ address := GetVar(r.Context(), ClientIPVarKey).(string)
+ clientIP, zoneID, err := parseIPZoneFromString(address)
+ if err != nil {
+ m.logger.Error("getting client IP", zap.Error(err))
+ return false, nil
+ }
+ matches, zoneFilter := matchIPByCidrZones(clientIP, zoneID, m.cidrs, m.zones)
+ if !matches && !zoneFilter {
+ m.logger.Debug("zone ID from client IP did not match", zap.String("zone", zoneID))
+ }
+ return matches, nil
+}
+
+func provisionCidrsZonesFromRanges(ranges []string) ([]*netip.Prefix, []string, error) {
+ cidrs := []*netip.Prefix{}
+ zones := []string{}
+ repl := caddy.NewReplacer()
+ for _, str := range ranges {
+ str = repl.ReplaceAll(str, "")
+ // Exclude the zone_id from the IP
+ if strings.Contains(str, "%") {
+ split := strings.Split(str, "%")
+ str = split[0]
+ // write zone identifiers in m.zones for matching later
+ zones = append(zones, split[1])
+ } else {
+ zones = append(zones, "")
+ }
+ if strings.Contains(str, "/") {
+ ipNet, err := netip.ParsePrefix(str)
+ if err != nil {
+ return nil, nil, fmt.Errorf("parsing CIDR expression '%s': %v", str, err)
+ }
+ cidrs = append(cidrs, &ipNet)
+ } else {
+ ipAddr, err := netip.ParseAddr(str)
+ if err != nil {
+ return nil, nil, fmt.Errorf("invalid IP address: '%s': %v", str, err)
+ }
+ ipNew := netip.PrefixFrom(ipAddr, ipAddr.BitLen())
+ cidrs = append(cidrs, &ipNew)
+ }
+ }
+ return cidrs, zones, nil
+}
+
+func parseIPZoneFromString(address string) (netip.Addr, string, error) {
+ ipStr, _, err := net.SplitHostPort(address)
+ if err != nil {
+ ipStr = address // OK; probably didn't have a port
+ }
+
+ // Some IPv6-Addresses can contain zone identifiers at the end,
+ // which are separated with "%"
+ zoneID := ""
+ if strings.Contains(ipStr, "%") {
+ split := strings.Split(ipStr, "%")
+ ipStr = split[0]
+ zoneID = split[1]
+ }
+
+ ipAddr, err := netip.ParseAddr(ipStr)
+ if err != nil {
+ return netip.IPv4Unspecified(), "", err
+ }
+
+ return ipAddr, zoneID, nil
+}
+
+func matchIPByCidrZones(clientIP netip.Addr, zoneID string, cidrs []*netip.Prefix, zones []string) (bool, bool) {
+ zoneFilter := true
+ for i, ipRange := range cidrs {
+ if ipRange.Contains(clientIP) {
+ // Check if there are zone filters assigned and if they match.
+ if zones[i] == "" || zoneID == zones[i] {
+ return true, false
+ }
+ zoneFilter = false
+ }
+ }
+ return false, zoneFilter
+}
+
+// Interface guards
+var (
+ _ RequestMatcherWithError = (*MatchRemoteIP)(nil)
+ _ caddy.Provisioner = (*MatchRemoteIP)(nil)
+ _ caddyfile.Unmarshaler = (*MatchRemoteIP)(nil)
+ _ CELLibraryProducer = (*MatchRemoteIP)(nil)
+
+ _ RequestMatcherWithError = (*MatchClientIP)(nil)
+ _ caddy.Provisioner = (*MatchClientIP)(nil)
+ _ caddyfile.Unmarshaler = (*MatchClientIP)(nil)
+ _ CELLibraryProducer = (*MatchClientIP)(nil)
+)
diff --git a/modules/caddyhttp/ip_range.go b/modules/caddyhttp/ip_range.go
new file mode 100644
index 00000000..bfd76c14
--- /dev/null
+++ b/modules/caddyhttp/ip_range.go
@@ -0,0 +1,137 @@
+// Copyright 2015 Matthew Holt and The Caddy Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package caddyhttp
+
+import (
+ "fmt"
+ "net/http"
+ "net/netip"
+ "strings"
+
+ "github.com/caddyserver/caddy/v2"
+ "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile"
+ "github.com/caddyserver/caddy/v2/internal"
+)
+
+func init() {
+ caddy.RegisterModule(StaticIPRange{})
+}
+
+// IPRangeSource gets a list of IP ranges.
+//
+// The request is passed as an argument to allow plugin implementations
+// to have more flexibility. But, a plugin MUST NOT modify the request.
+// The caller will have read the `r.RemoteAddr` before getting IP ranges.
+//
+// This should be a very fast function -- instant if possible.
+// The list of IP ranges should be sourced as soon as possible if loaded
+// from an external source (i.e. initially loaded during Provisioning),
+// so that it's ready to be used when requests start getting handled.
+// A read lock should probably be used to get the cached value if the
+// ranges can change at runtime (e.g. periodically refreshed).
+// Using a `caddy.UsagePool` may be a good idea to avoid having refetch
+// the values when a config reload occurs, which would waste time.
+//
+// If the list of IP ranges cannot be sourced, then provisioning SHOULD
+// fail. Getting the IP ranges at runtime MUST NOT fail, because it would
+// cancel incoming requests. If refreshing the list fails, then the
+// previous list of IP ranges should continue to be returned so that the
+// server can continue to operate normally.
+type IPRangeSource interface {
+ GetIPRanges(*http.Request) []netip.Prefix
+}
+
+// StaticIPRange provides a static range of IP address prefixes (CIDRs).
+type StaticIPRange struct {
+ // A static list of IP ranges (supports CIDR notation).
+ Ranges []string `json:"ranges,omitempty"`
+
+ // Holds the parsed CIDR ranges from Ranges.
+ ranges []netip.Prefix
+}
+
+// CaddyModule returns the Caddy module information.
+func (StaticIPRange) CaddyModule() caddy.ModuleInfo {
+ return caddy.ModuleInfo{
+ ID: "http.ip_sources.static",
+ New: func() caddy.Module { return new(StaticIPRange) },
+ }
+}
+
+func (s *StaticIPRange) Provision(ctx caddy.Context) error {
+ for _, str := range s.Ranges {
+ prefix, err := CIDRExpressionToPrefix(str)
+ if err != nil {
+ return err
+ }
+ s.ranges = append(s.ranges, prefix)
+ }
+
+ return nil
+}
+
+func (s *StaticIPRange) GetIPRanges(_ *http.Request) []netip.Prefix {
+ return s.ranges
+}
+
+// UnmarshalCaddyfile implements caddyfile.Unmarshaler.
+func (m *StaticIPRange) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
+ if !d.Next() {
+ return nil
+ }
+ for d.NextArg() {
+ if d.Val() == "private_ranges" {
+ m.Ranges = append(m.Ranges, internal.PrivateRangesCIDR()...)
+ continue
+ }
+ m.Ranges = append(m.Ranges, d.Val())
+ }
+ return nil
+}
+
+// CIDRExpressionToPrefix takes a string which could be either a
+// CIDR expression or a single IP address, and returns a netip.Prefix.
+func CIDRExpressionToPrefix(expr string) (netip.Prefix, error) {
+ // Having a slash means it should be a CIDR expression
+ if strings.Contains(expr, "/") {
+ prefix, err := netip.ParsePrefix(expr)
+ if err != nil {
+ return netip.Prefix{}, fmt.Errorf("parsing CIDR expression: '%s': %v", expr, err)
+ }
+ return prefix, nil
+ }
+
+ // Otherwise it's likely a single IP address
+ parsed, err := netip.ParseAddr(expr)
+ if err != nil {
+ return netip.Prefix{}, fmt.Errorf("invalid IP address: '%s': %v", expr, err)
+ }
+ prefix := netip.PrefixFrom(parsed, parsed.BitLen())
+ return prefix, nil
+}
+
+// Interface guards
+var (
+ _ caddy.Provisioner = (*StaticIPRange)(nil)
+ _ caddyfile.Unmarshaler = (*StaticIPRange)(nil)
+ _ IPRangeSource = (*StaticIPRange)(nil)
+)
+
+// PrivateRangesCIDR returns a list of private CIDR range
+// strings, which can be used as a configuration shortcut.
+// Note: this function is used at least by mholt/caddy-l4.
+func PrivateRangesCIDR() []string {
+ return internal.PrivateRangesCIDR()
+}
diff --git a/modules/caddyhttp/logging.go b/modules/caddyhttp/logging.go
new file mode 100644
index 00000000..0a389fe1
--- /dev/null
+++ b/modules/caddyhttp/logging.go
@@ -0,0 +1,251 @@
+// Copyright 2015 Matthew Holt and The Caddy Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package caddyhttp
+
+import (
+ "encoding/json"
+ "errors"
+ "net"
+ "net/http"
+ "strings"
+
+ "go.uber.org/zap"
+ "go.uber.org/zap/zapcore"
+
+ "github.com/caddyserver/caddy/v2"
+)
+
+// ServerLogConfig describes a server's logging configuration. If
+// enabled without customization, all requests to this server are
+// logged to the default logger; logger destinations may be
+// customized per-request-host.
+type ServerLogConfig struct {
+ // The default logger name for all logs emitted by this server for
+ // hostnames that are not in the logger_names map.
+ DefaultLoggerName string `json:"default_logger_name,omitempty"`
+
+ // LoggerNames maps request hostnames to one or more custom logger
+ // names. For example, a mapping of `"example.com": ["example"]` would
+ // cause access logs from requests with a Host of example.com to be
+ // emitted by a logger named "http.log.access.example". If there are
+ // multiple logger names, then the log will be emitted to all of them.
+ // If the logger name is an empty, the default logger is used, i.e.
+ // the logger "http.log.access".
+ //
+ // Keys must be hostnames (without ports), and may contain wildcards
+ // to match subdomains. The value is an array of logger names.
+ //
+ // For backwards compatibility, if the value is a string, it is treated
+ // as a single-element array.
+ LoggerNames map[string]StringArray `json:"logger_names,omitempty"`
+
+ // By default, all requests to this server will be logged if
+ // access logging is enabled. This field lists the request
+ // hosts for which access logging should be disabled.
+ SkipHosts []string `json:"skip_hosts,omitempty"`
+
+ // If true, requests to any host not appearing in the
+ // logger_names map will not be logged.
+ SkipUnmappedHosts bool `json:"skip_unmapped_hosts,omitempty"`
+
+ // If true, credentials that are otherwise omitted, will be logged.
+ // The definition of credentials is defined by https://fetch.spec.whatwg.org/#credentials,
+ // and this includes some request and response headers, i.e `Cookie`,
+ // `Set-Cookie`, `Authorization`, and `Proxy-Authorization`.
+ ShouldLogCredentials bool `json:"should_log_credentials,omitempty"`
+
+ // Log each individual handler that is invoked.
+ // Requires that the log emit at DEBUG level.
+ //
+ // NOTE: This may log the configuration of your
+ // HTTP handler modules; do not enable this in
+ // insecure contexts when there is sensitive
+ // data in the configuration.
+ //
+ // EXPERIMENTAL: Subject to change or removal.
+ Trace bool `json:"trace,omitempty"`
+}
+
+// wrapLogger wraps logger in one or more logger named
+// according to user preferences for the given host.
+func (slc ServerLogConfig) wrapLogger(logger *zap.Logger, req *http.Request) []*zap.Logger {
+ // using the `log_name` directive or the `access_logger_names` variable,
+ // the logger names can be overridden for the current request
+ if names := GetVar(req.Context(), AccessLoggerNameVarKey); names != nil {
+ if namesSlice, ok := names.([]any); ok {
+ loggers := make([]*zap.Logger, 0, len(namesSlice))
+ for _, loggerName := range namesSlice {
+ // no name, use the default logger
+ if loggerName == "" {
+ loggers = append(loggers, logger)
+ continue
+ }
+ // make a logger with the given name
+ loggers = append(loggers, logger.Named(loggerName.(string)))
+ }
+ return loggers
+ }
+ }
+
+ // get the hostname from the request, with the port number stripped
+ host, _, err := net.SplitHostPort(req.Host)
+ if err != nil {
+ host = req.Host
+ }
+
+ // get the logger names for this host from the config
+ hosts := slc.getLoggerHosts(host)
+
+ // make a list of named loggers, or the default logger
+ loggers := make([]*zap.Logger, 0, len(hosts))
+ for _, loggerName := range hosts {
+ // no name, use the default logger
+ if loggerName == "" {
+ loggers = append(loggers, logger)
+ continue
+ }
+ // make a logger with the given name
+ loggers = append(loggers, logger.Named(loggerName))
+ }
+ return loggers
+}
+
+func (slc ServerLogConfig) getLoggerHosts(host string) []string {
+ // try the exact hostname first
+ if hosts, ok := slc.LoggerNames[host]; ok {
+ return hosts
+ }
+
+ // try matching wildcard domains if other non-specific loggers exist
+ labels := strings.Split(host, ".")
+ for i := range labels {
+ if labels[i] == "" {
+ continue
+ }
+ labels[i] = "*"
+ wildcardHost := strings.Join(labels, ".")
+ if hosts, ok := slc.LoggerNames[wildcardHost]; ok {
+ return hosts
+ }
+ }
+
+ return []string{slc.DefaultLoggerName}
+}
+
+func (slc *ServerLogConfig) clone() *ServerLogConfig {
+ clone := &ServerLogConfig{
+ DefaultLoggerName: slc.DefaultLoggerName,
+ LoggerNames: make(map[string]StringArray),
+ SkipHosts: append([]string{}, slc.SkipHosts...),
+ SkipUnmappedHosts: slc.SkipUnmappedHosts,
+ ShouldLogCredentials: slc.ShouldLogCredentials,
+ }
+ for k, v := range slc.LoggerNames {
+ clone.LoggerNames[k] = append([]string{}, v...)
+ }
+ return clone
+}
+
+// StringArray is a slices of strings, but also accepts
+// a single string as a value when JSON unmarshaling,
+// converting it to a slice of one string.
+type StringArray []string
+
+// UnmarshalJSON satisfies json.Unmarshaler.
+func (sa *StringArray) UnmarshalJSON(b []byte) error {
+ var jsonObj any
+ err := json.Unmarshal(b, &jsonObj)
+ if err != nil {
+ return err
+ }
+ switch obj := jsonObj.(type) {
+ case string:
+ *sa = StringArray([]string{obj})
+ return nil
+ case []any:
+ s := make([]string, 0, len(obj))
+ for _, v := range obj {
+ value, ok := v.(string)
+ if !ok {
+ return errors.New("unsupported type")
+ }
+ s = append(s, value)
+ }
+ *sa = StringArray(s)
+ return nil
+ }
+ return errors.New("unsupported type")
+}
+
+// errLogValues inspects err and returns the status code
+// to use, the error log message, and any extra fields.
+// If err is a HandlerError, the returned values will
+// have richer information.
+func errLogValues(err error) (status int, msg string, fields func() []zapcore.Field) {
+ var handlerErr HandlerError
+ if errors.As(err, &handlerErr) {
+ status = handlerErr.StatusCode
+ if handlerErr.Err == nil {
+ msg = err.Error()
+ } else {
+ msg = handlerErr.Err.Error()
+ }
+ fields = func() []zapcore.Field {
+ return []zapcore.Field{
+ zap.Int("status", handlerErr.StatusCode),
+ zap.String("err_id", handlerErr.ID),
+ zap.String("err_trace", handlerErr.Trace),
+ }
+ }
+ return
+ }
+ status = http.StatusInternalServerError
+ msg = err.Error()
+ return
+}
+
+// ExtraLogFields is a list of extra fields to log with every request.
+type ExtraLogFields struct {
+ fields []zapcore.Field
+}
+
+// Add adds a field to the list of extra fields to log.
+func (e *ExtraLogFields) Add(field zap.Field) {
+ e.fields = append(e.fields, field)
+}
+
+// Set sets a field in the list of extra fields to log.
+// If the field already exists, it is replaced.
+func (e *ExtraLogFields) Set(field zap.Field) {
+ for i := range e.fields {
+ if e.fields[i].Key == field.Key {
+ e.fields[i] = field
+ return
+ }
+ }
+ e.fields = append(e.fields, field)
+}
+
+const (
+ // Variable name used to indicate that this request
+ // should be omitted from the access logs
+ LogSkipVar string = "log_skip"
+
+ // For adding additional fields to the access logs
+ ExtraLogFieldsCtxKey caddy.CtxKey = "extra_log_fields"
+
+ // Variable name used to indicate the logger to be used
+ AccessLoggerNameVarKey string = "access_logger_names"
+)
diff --git a/modules/caddyhttp/logging/caddyfile.go b/modules/caddyhttp/logging/caddyfile.go
new file mode 100644
index 00000000..010b4891
--- /dev/null
+++ b/modules/caddyhttp/logging/caddyfile.go
@@ -0,0 +1,53 @@
+// Copyright 2015 Matthew Holt and The Caddy Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package logging
+
+import (
+ "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile"
+ "github.com/caddyserver/caddy/v2/caddyconfig/httpcaddyfile"
+ "github.com/caddyserver/caddy/v2/modules/caddyhttp"
+)
+
+func init() {
+ httpcaddyfile.RegisterHandlerDirective("log_append", parseCaddyfile)
+}
+
+// parseCaddyfile sets up the log_append handler from Caddyfile tokens. Syntax:
+//
+// log_append []
+func parseCaddyfile(h httpcaddyfile.Helper) (caddyhttp.MiddlewareHandler, error) {
+ handler := new(LogAppend)
+ err := handler.UnmarshalCaddyfile(h.Dispenser)
+ return handler, err
+}
+
+// UnmarshalCaddyfile implements caddyfile.Unmarshaler.
+func (h *LogAppend) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
+ d.Next() // consume directive name
+ if !d.NextArg() {
+ return d.ArgErr()
+ }
+ h.Key = d.Val()
+ if !d.NextArg() {
+ return d.ArgErr()
+ }
+ h.Value = d.Val()
+ return nil
+}
+
+// Interface guards
+var (
+ _ caddyfile.Unmarshaler = (*LogAppend)(nil)
+)
diff --git a/modules/caddyhttp/logging/logadd.go b/modules/caddyhttp/logging/logadd.go
new file mode 100644
index 00000000..3b554367
--- /dev/null
+++ b/modules/caddyhttp/logging/logadd.go
@@ -0,0 +1,94 @@
+// Copyright 2015 Matthew Holt and The Caddy Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package logging
+
+import (
+ "net/http"
+ "strings"
+
+ "go.uber.org/zap"
+
+ "github.com/caddyserver/caddy/v2"
+ "github.com/caddyserver/caddy/v2/modules/caddyhttp"
+)
+
+func init() {
+ caddy.RegisterModule(LogAppend{})
+}
+
+// LogAppend implements a middleware that takes a key and value, where
+// the key is the name of a log field and the value is a placeholder,
+// or variable key, or constant value to use for that field.
+type LogAppend struct {
+ // Key is the name of the log field.
+ Key string `json:"key,omitempty"`
+
+ // Value is the value to use for the log field.
+ // If it is a placeholder (with surrounding `{}`),
+ // it will be evaluated when the log is written.
+ // If the value is a key that exists in the `vars`
+ // map, the value of that key will be used. Otherwise
+ // the value will be used as-is as a constant string.
+ Value string `json:"value,omitempty"`
+}
+
+// CaddyModule returns the Caddy module information.
+func (LogAppend) CaddyModule() caddy.ModuleInfo {
+ return caddy.ModuleInfo{
+ ID: "http.handlers.log_append",
+ New: func() caddy.Module { return new(LogAppend) },
+ }
+}
+
+func (h LogAppend) ServeHTTP(w http.ResponseWriter, r *http.Request, next caddyhttp.Handler) error {
+ // Run the next handler in the chain first.
+ // If an error occurs, we still want to add
+ // any extra log fields that we can, so we
+ // hold onto the error and return it later.
+ handlerErr := next.ServeHTTP(w, r)
+
+ // On the way back up the chain, add the extra log field
+ ctx := r.Context()
+
+ vars := ctx.Value(caddyhttp.VarsCtxKey).(map[string]any)
+ repl := ctx.Value(caddy.ReplacerCtxKey).(*caddy.Replacer)
+ extra := ctx.Value(caddyhttp.ExtraLogFieldsCtxKey).(*caddyhttp.ExtraLogFields)
+
+ var varValue any
+ if strings.HasPrefix(h.Value, "{") &&
+ strings.HasSuffix(h.Value, "}") &&
+ strings.Count(h.Value, "{") == 1 {
+ // the value looks like a placeholder, so get its value
+ varValue, _ = repl.Get(strings.Trim(h.Value, "{}"))
+ } else if val, ok := vars[h.Value]; ok {
+ // the value is a key in the vars map
+ varValue = val
+ } else {
+ // the value is a constant string
+ varValue = h.Value
+ }
+
+ // Add the field to the extra log fields.
+ // We use zap.Any because it will reflect
+ // to the correct type for us.
+ extra.Add(zap.Any(h.Key, varValue))
+
+ return handlerErr
+}
+
+// Interface guards
+var (
+ _ caddyhttp.MiddlewareHandler = (*LogAppend)(nil)
+)
diff --git a/modules/caddyhttp/map/caddyfile.go b/modules/caddyhttp/map/caddyfile.go
new file mode 100644
index 00000000..8f7b5d34
--- /dev/null
+++ b/modules/caddyhttp/map/caddyfile.go
@@ -0,0 +1,114 @@
+// Copyright 2015 Matthew Holt and The Caddy Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package maphandler
+
+import (
+ "strings"
+
+ "github.com/caddyserver/caddy/v2/caddyconfig/httpcaddyfile"
+ "github.com/caddyserver/caddy/v2/modules/caddyhttp"
+)
+
+func init() {
+ httpcaddyfile.RegisterHandlerDirective("map", parseCaddyfile)
+}
+
+// parseCaddyfile sets up the map handler from Caddyfile tokens. Syntax:
+//
+// map [] {
+// [~]
+// default
+// }
+//
+// If the input value is prefixed with a tilde (~), then the input will be parsed as a
+// regular expression.
+//
+// The Caddyfile adapter treats outputs that are a literal hyphen (-) as a null/nil
+// value. This is useful if you want to fall back to default for that particular output.
+//
+// The number of outputs for each mapping must not be more than the number of destinations.
+// However, for convenience, there may be fewer outputs than destinations and any missing
+// outputs will be filled in implicitly.
+func parseCaddyfile(h httpcaddyfile.Helper) (caddyhttp.MiddlewareHandler, error) {
+ h.Next() // consume directive name
+
+ var handler Handler
+
+ // source
+ if !h.NextArg() {
+ return nil, h.ArgErr()
+ }
+ handler.Source = h.Val()
+
+ // destinations
+ handler.Destinations = h.RemainingArgs()
+ if len(handler.Destinations) == 0 {
+ return nil, h.Err("missing destination argument(s)")
+ }
+ for _, dest := range handler.Destinations {
+ if shorthand := httpcaddyfile.WasReplacedPlaceholderShorthand(dest); shorthand != "" {
+ return nil, h.Errf("destination %s conflicts with a Caddyfile placeholder shorthand", shorthand)
+ }
+ }
+
+ // mappings
+ for h.NextBlock(0) {
+ // defaults are a special case
+ if h.Val() == "default" {
+ if len(handler.Defaults) > 0 {
+ return nil, h.Err("defaults already defined")
+ }
+ handler.Defaults = h.RemainingArgs()
+ for len(handler.Defaults) < len(handler.Destinations) {
+ handler.Defaults = append(handler.Defaults, "")
+ }
+ continue
+ }
+
+ // every line maps an input value to one or more outputs
+ in := h.Val()
+ var outs []any
+ for h.NextArg() {
+ val := h.ScalarVal()
+ if val == "-" {
+ outs = append(outs, nil)
+ } else {
+ outs = append(outs, val)
+ }
+ }
+
+ // cannot have more outputs than destinations
+ if len(outs) > len(handler.Destinations) {
+ return nil, h.Err("too many outputs")
+ }
+
+ // for convenience, can have fewer outputs than destinations, but the
+ // underlying handler won't accept that, so we fill in nil values
+ for len(outs) < len(handler.Destinations) {
+ outs = append(outs, nil)
+ }
+
+ // create the mapping
+ mapping := Mapping{Outputs: outs}
+ if strings.HasPrefix(in, "~") {
+ mapping.InputRegexp = in[1:]
+ } else {
+ mapping.Input = in
+ }
+
+ handler.Mappings = append(handler.Mappings, mapping)
+ }
+ return handler, nil
+}
diff --git a/modules/caddyhttp/map/map.go b/modules/caddyhttp/map/map.go
new file mode 100644
index 00000000..d02085e7
--- /dev/null
+++ b/modules/caddyhttp/map/map.go
@@ -0,0 +1,196 @@
+// Copyright 2015 Matthew Holt and The Caddy Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package maphandler
+
+import (
+ "fmt"
+ "net/http"
+ "regexp"
+ "slices"
+ "strings"
+
+ "github.com/caddyserver/caddy/v2"
+ "github.com/caddyserver/caddy/v2/modules/caddyhttp"
+)
+
+func init() {
+ caddy.RegisterModule(Handler{})
+}
+
+// Handler implements a middleware that maps inputs to outputs. Specifically, it
+// compares a source value against the map inputs, and for one that matches, it
+// applies the output values to each destination. Destinations become placeholder
+// names.
+//
+// Mapped placeholders are not evaluated until they are used, so even for very
+// large mappings, this handler is quite efficient.
+type Handler struct {
+ // Source is the placeholder from which to get the input value.
+ Source string `json:"source,omitempty"`
+
+ // Destinations are the names of placeholders in which to store the outputs.
+ // Destination values should be wrapped in braces, for example, {my_placeholder}.
+ Destinations []string `json:"destinations,omitempty"`
+
+ // Mappings from source values (inputs) to destination values (outputs).
+ // The first matching, non-nil mapping will be applied.
+ Mappings []Mapping `json:"mappings,omitempty"`
+
+ // If no mappings match or if the mapped output is null/nil, the associated
+ // default output will be applied (optional).
+ Defaults []string `json:"defaults,omitempty"`
+}
+
+// CaddyModule returns the Caddy module information.
+func (Handler) CaddyModule() caddy.ModuleInfo {
+ return caddy.ModuleInfo{
+ ID: "http.handlers.map",
+ New: func() caddy.Module { return new(Handler) },
+ }
+}
+
+// Provision sets up h.
+func (h *Handler) Provision(_ caddy.Context) error {
+ for j, dest := range h.Destinations {
+ if strings.Count(dest, "{") != 1 || !strings.HasPrefix(dest, "{") {
+ return fmt.Errorf("destination must be a placeholder and only a placeholder")
+ }
+ h.Destinations[j] = strings.Trim(dest, "{}")
+ }
+
+ for i, m := range h.Mappings {
+ if m.InputRegexp == "" {
+ continue
+ }
+ var err error
+ h.Mappings[i].re, err = regexp.Compile(m.InputRegexp)
+ if err != nil {
+ return fmt.Errorf("compiling regexp for mapping %d: %v", i, err)
+ }
+ }
+
+ // TODO: improve efficiency even further by using an actual map type
+ // for the non-regexp mappings, OR sort them and do a binary search
+
+ return nil
+}
+
+// Validate ensures that h is configured properly.
+func (h *Handler) Validate() error {
+ nDest, nDef := len(h.Destinations), len(h.Defaults)
+ if nDef > 0 && nDef != nDest {
+ return fmt.Errorf("%d destinations != %d defaults", nDest, nDef)
+ }
+
+ seen := make(map[string]int)
+ for i, m := range h.Mappings {
+ // prevent confusing/ambiguous mappings
+ if m.Input != "" && m.InputRegexp != "" {
+ return fmt.Errorf("mapping %d has both input and input_regexp fields specified, which is confusing", i)
+ }
+
+ // prevent duplicate mappings
+ input := m.Input
+ if m.InputRegexp != "" {
+ input = m.InputRegexp
+ }
+ if prev, ok := seen[input]; ok {
+ return fmt.Errorf("mapping %d has a duplicate input '%s' previously used with mapping %d", i, input, prev)
+ }
+ seen[input] = i
+
+ // ensure mappings have 1:1 output-to-destination correspondence
+ nOut := len(m.Outputs)
+ if nOut != nDest {
+ return fmt.Errorf("mapping %d has %d outputs but there are %d destinations defined", i, nOut, nDest)
+ }
+ }
+
+ return nil
+}
+
+func (h Handler) ServeHTTP(w http.ResponseWriter, r *http.Request, next caddyhttp.Handler) error {
+ repl := r.Context().Value(caddy.ReplacerCtxKey).(*caddy.Replacer)
+
+ // defer work until a variable is actually evaluated by using replacer's Map callback
+ repl.Map(func(key string) (any, bool) {
+ // return early if the variable is not even a configured destination
+ destIdx := slices.Index(h.Destinations, key)
+ if destIdx < 0 {
+ return nil, false
+ }
+
+ input := repl.ReplaceAll(h.Source, "")
+
+ // find the first mapping matching the input and return
+ // the requested destination/output value
+ for _, m := range h.Mappings {
+ output := m.Outputs[destIdx]
+ if output == nil {
+ continue
+ }
+ outputStr := caddy.ToString(output)
+
+ // evaluate regular expression if configured
+ if m.re != nil {
+ var result []byte
+ matches := m.re.FindStringSubmatchIndex(input)
+ if matches == nil {
+ continue
+ }
+ result = m.re.ExpandString(result, outputStr, input, matches)
+ return string(result), true
+ }
+
+ // otherwise simple string comparison
+ if input == m.Input {
+ return repl.ReplaceAll(outputStr, ""), true
+ }
+ }
+
+ // fall back to default if no match or if matched nil value
+ if len(h.Defaults) > destIdx {
+ return repl.ReplaceAll(h.Defaults[destIdx], ""), true
+ }
+
+ return nil, true
+ })
+
+ return next.ServeHTTP(w, r)
+}
+
+// Mapping describes a mapping from input to outputs.
+type Mapping struct {
+ // The input value to match. Must be distinct from other mappings.
+ // Mutually exclusive to input_regexp.
+ Input string `json:"input,omitempty"`
+
+ // The input regular expression to match. Mutually exclusive to input.
+ InputRegexp string `json:"input_regexp,omitempty"`
+
+ // Upon a match with the input, each output is positionally correlated
+ // with each destination of the parent handler. An output that is null
+ // (nil) will be treated as if it was not mapped at all.
+ Outputs []any `json:"outputs,omitempty"`
+
+ re *regexp.Regexp
+}
+
+// Interface guards
+var (
+ _ caddy.Provisioner = (*Handler)(nil)
+ _ caddy.Validator = (*Handler)(nil)
+ _ caddyhttp.MiddlewareHandler = (*Handler)(nil)
+)
diff --git a/modules/caddyhttp/map/map_test.go b/modules/caddyhttp/map/map_test.go
new file mode 100644
index 00000000..3ff5e711
--- /dev/null
+++ b/modules/caddyhttp/map/map_test.go
@@ -0,0 +1,152 @@
+package maphandler
+
+import (
+ "context"
+ "net/http"
+ "net/http/httptest"
+ "reflect"
+ "testing"
+
+ "github.com/caddyserver/caddy/v2"
+ "github.com/caddyserver/caddy/v2/modules/caddyhttp"
+)
+
+func TestHandler(t *testing.T) {
+ for i, tc := range []struct {
+ handler Handler
+ reqURI string
+ expect map[string]any
+ }{
+ {
+ reqURI: "/foo",
+ handler: Handler{
+ Source: "{http.request.uri.path}",
+ Destinations: []string{"{output}"},
+ Mappings: []Mapping{
+ {
+ Input: "/foo",
+ Outputs: []any{"FOO"},
+ },
+ },
+ },
+ expect: map[string]any{
+ "output": "FOO",
+ },
+ },
+ {
+ reqURI: "/abcdef",
+ handler: Handler{
+ Source: "{http.request.uri.path}",
+ Destinations: []string{"{output}"},
+ Mappings: []Mapping{
+ {
+ InputRegexp: "(/abc)",
+ Outputs: []any{"ABC"},
+ },
+ },
+ },
+ expect: map[string]any{
+ "output": "ABC",
+ },
+ },
+ {
+ reqURI: "/ABCxyzDEF",
+ handler: Handler{
+ Source: "{http.request.uri.path}",
+ Destinations: []string{"{output}"},
+ Mappings: []Mapping{
+ {
+ InputRegexp: "(xyz)",
+ Outputs: []any{"...${1}..."},
+ },
+ },
+ },
+ expect: map[string]any{
+ "output": "...xyz...",
+ },
+ },
+ {
+ // Test case from https://caddy.community/t/map-directive-and-regular-expressions/13866/14?u=matt
+ reqURI: "/?s=0%27+AND+%28SELECT+0+FROM+%28SELECT+count%28%2A%29%2C+CONCAT%28%28SELECT+%40%40version%29%2C+0x23%2C+FLOOR%28RAND%280%29%2A2%29%29+AS+x+FROM+information_schema.columns+GROUP+BY+x%29+y%29+-+-+%27",
+ handler: Handler{
+ Source: "{http.request.uri}",
+ Destinations: []string{"{output}"},
+ Mappings: []Mapping{
+ {
+ InputRegexp: "(?i)(\\^|`|<|>|%|\\\\|\\{|\\}|\\|)",
+ Outputs: []any{"3"},
+ },
+ },
+ },
+ expect: map[string]any{
+ "output": "3",
+ },
+ },
+ {
+ reqURI: "/foo",
+ handler: Handler{
+ Source: "{http.request.uri.path}",
+ Destinations: []string{"{output}"},
+ Mappings: []Mapping{
+ {
+ Input: "/foo",
+ Outputs: []any{"{testvar}"},
+ },
+ },
+ },
+ expect: map[string]any{
+ "output": "testing",
+ },
+ },
+ {
+ reqURI: "/foo",
+ handler: Handler{
+ Source: "{http.request.uri.path}",
+ Destinations: []string{"{output}"},
+ Defaults: []string{"default"},
+ },
+ expect: map[string]any{
+ "output": "default",
+ },
+ },
+ {
+ reqURI: "/foo",
+ handler: Handler{
+ Source: "{http.request.uri.path}",
+ Destinations: []string{"{output}"},
+ Defaults: []string{"{testvar}"},
+ },
+ expect: map[string]any{
+ "output": "testing",
+ },
+ },
+ } {
+ if err := tc.handler.Provision(caddy.Context{}); err != nil {
+ t.Fatalf("Test %d: Provisioning handler: %v", i, err)
+ }
+
+ req, err := http.NewRequest(http.MethodGet, tc.reqURI, nil)
+ if err != nil {
+ t.Fatalf("Test %d: Creating request: %v", i, err)
+ }
+ repl := caddyhttp.NewTestReplacer(req)
+ repl.Set("testvar", "testing")
+ ctx := context.WithValue(req.Context(), caddy.ReplacerCtxKey, repl)
+ req = req.WithContext(ctx)
+
+ rr := httptest.NewRecorder()
+ noop := caddyhttp.HandlerFunc(func(_ http.ResponseWriter, _ *http.Request) error { return nil })
+
+ if err := tc.handler.ServeHTTP(rr, req, noop); err != nil {
+ t.Errorf("Test %d: Handler returned error: %v", i, err)
+ continue
+ }
+
+ for key, expected := range tc.expect {
+ actual, _ := repl.Get(key)
+ if !reflect.DeepEqual(actual, expected) {
+ t.Errorf("Test %d: Expected %#v but got %#v for {%s}", i, expected, actual, key)
+ }
+ }
+ }
+}
diff --git a/modules/caddyhttp/marshalers.go b/modules/caddyhttp/marshalers.go
index e35a73ed..c985bb92 100644
--- a/modules/caddyhttp/marshalers.go
+++ b/modules/caddyhttp/marshalers.go
@@ -16,22 +16,41 @@ package caddyhttp
import (
"crypto/tls"
+ "net"
"net/http"
+ "strings"
"go.uber.org/zap/zapcore"
)
// LoggableHTTPRequest makes an HTTP request loggable with zap.Object().
-type LoggableHTTPRequest struct{ *http.Request }
+type LoggableHTTPRequest struct {
+ *http.Request
+
+ ShouldLogCredentials bool
+}
// MarshalLogObject satisfies the zapcore.ObjectMarshaler interface.
func (r LoggableHTTPRequest) MarshalLogObject(enc zapcore.ObjectEncoder) error {
- enc.AddString("method", r.Method)
- enc.AddString("uri", r.RequestURI)
+ ip, port, err := net.SplitHostPort(r.RemoteAddr)
+ if err != nil {
+ ip = r.RemoteAddr
+ port = ""
+ }
+
+ enc.AddString("remote_ip", ip)
+ enc.AddString("remote_port", port)
+ if ip, ok := GetVar(r.Context(), ClientIPVarKey).(string); ok {
+ enc.AddString("client_ip", ip)
+ }
enc.AddString("proto", r.Proto)
- enc.AddString("remote_addr", r.RemoteAddr)
+ enc.AddString("method", r.Method)
enc.AddString("host", r.Host)
- enc.AddObject("headers", LoggableHTTPHeader(r.Header))
+ enc.AddString("uri", r.RequestURI)
+ enc.AddObject("headers", LoggableHTTPHeader{
+ Header: r.Header,
+ ShouldLogCredentials: r.ShouldLogCredentials,
+ })
if r.TLS != nil {
enc.AddObject("tls", LoggableTLSConnState(*r.TLS))
}
@@ -39,14 +58,26 @@ func (r LoggableHTTPRequest) MarshalLogObject(enc zapcore.ObjectEncoder) error {
}
// LoggableHTTPHeader makes an HTTP header loggable with zap.Object().
-type LoggableHTTPHeader http.Header
+// Headers with potentially sensitive information (Cookie, Set-Cookie,
+// Authorization, and Proxy-Authorization) are logged with empty values.
+type LoggableHTTPHeader struct {
+ http.Header
+
+ ShouldLogCredentials bool
+}
// MarshalLogObject satisfies the zapcore.ObjectMarshaler interface.
func (h LoggableHTTPHeader) MarshalLogObject(enc zapcore.ObjectEncoder) error {
- if h == nil {
+ if h.Header == nil {
return nil
}
- for key, val := range h {
+ for key, val := range h.Header {
+ if !h.ShouldLogCredentials {
+ switch strings.ToLower(key) {
+ case "cookie", "set-cookie", "authorization", "proxy-authorization":
+ val = []string{"REDACTED"} // see #5669. I still think ▒▒▒▒ would be cool.
+ }
+ }
enc.AddArray(key, LoggableStringArray(val))
}
return nil
@@ -73,10 +104,13 @@ type LoggableTLSConnState tls.ConnectionState
func (t LoggableTLSConnState) MarshalLogObject(enc zapcore.ObjectEncoder) error {
enc.AddBool("resumed", t.DidResume)
enc.AddUint16("version", t.Version)
- enc.AddUint16("ciphersuite", t.CipherSuite)
+ enc.AddUint16("cipher_suite", t.CipherSuite)
enc.AddString("proto", t.NegotiatedProtocol)
- enc.AddBool("proto_mutual", t.NegotiatedProtocolIsMutual)
enc.AddString("server_name", t.ServerName)
+ if len(t.PeerCertificates) > 0 {
+ enc.AddString("client_common_name", t.PeerCertificates[0].Subject.CommonName)
+ enc.AddString("client_serial", t.PeerCertificates[0].SerialNumber.String())
+ }
return nil
}
diff --git a/modules/caddyhttp/matchers.go b/modules/caddyhttp/matchers.go
index 95100d58..25fdc1fe 100644
--- a/modules/caddyhttp/matchers.go
+++ b/modules/caddyhttp/matchers.go
@@ -16,16 +16,26 @@ package caddyhttp
import (
"encoding/json"
+ "errors"
"fmt"
- "log"
"net"
"net/http"
"net/textproto"
"net/url"
- "path/filepath"
+ "path"
+ "reflect"
"regexp"
+ "runtime"
+ "slices"
+ "sort"
+ "strconv"
"strings"
+ "github.com/google/cel-go/cel"
+ "github.com/google/cel-go/common/types"
+ "github.com/google/cel-go/common/types/ref"
+ "golang.org/x/net/idna"
+
"github.com/caddyserver/caddy/v2"
"github.com/caddyserver/caddy/v2/caddyconfig/caddyfile"
)
@@ -51,22 +61,55 @@ type (
//
// The wildcard can be useful for matching all subdomains, for example:
// `*.example.com` matches `foo.example.com` but not `foo.bar.example.com`.
+ //
+ // Duplicate entries will return an error.
MatchHost []string
- // MatchPath matches requests by the URI's path (case-insensitive). Path
- // matches are exact, but wildcards may be used:
+ // MatchPath case-insensitively matches requests by the URI's path. Path
+ // matching is exact, not prefix-based, giving you more control and clarity
+ // over matching. Wildcards (`*`) may be used:
//
- // - At the end, for a prefix match (`/prefix/*`)
- // - At the beginning, for a suffix match (`*.suffix`)
- // - On both sides, for a substring match (`*/contains/*`)
+ // - At the end only, for a prefix match (`/prefix/*`)
+ // - At the beginning only, for a suffix match (`*.suffix`)
+ // - On both sides only, for a substring match (`*/contains/*`)
// - In the middle, for a globular match (`/accounts/*/info`)
//
+ // Slashes are significant; i.e. `/foo*` matches `/foo`, `/foo/`, `/foo/bar`,
+ // and `/foobar`; but `/foo/*` does not match `/foo` or `/foobar`. Valid
+ // paths start with a slash `/`.
+ //
+ // Because there are, in general, multiple possible escaped forms of any
+ // path, path matchers operate in unescaped space; that is, path matchers
+ // should be written in their unescaped form to prevent ambiguities and
+ // possible security issues, as all request paths will be normalized to
+ // their unescaped forms before matcher evaluation.
+ //
+ // However, escape sequences in a match pattern are supported; they are
+ // compared with the request's raw/escaped path for those bytes only.
+ // In other words, a matcher of `/foo%2Fbar` will match a request path
+ // of precisely `/foo%2Fbar`, but not `/foo/bar`. It follows that matching
+ // the literal percent sign (%) in normalized space can be done using the
+ // escaped form, `%25`.
+ //
+ // Even though wildcards (`*`) operate in the normalized space, the special
+ // escaped wildcard (`%*`), which is not a valid escape sequence, may be
+ // used in place of a span that should NOT be decoded; that is, `/bands/%*`
+ // will match `/bands/AC%2fDC` whereas `/bands/*` will not.
+ //
+ // Even though path matching is done in normalized space, the special
+ // wildcard `%*` may be used in place of a span that should NOT be decoded;
+ // that is, `/bands/%*/` will match `/bands/AC%2fDC/` whereas `/bands/*/`
+ // will not.
+ //
// This matcher is fast, so it does not support regular expressions or
// capture groups. For slower but more powerful matching, use the
- // path_regexp matcher.
+ // path_regexp matcher. (Note that due to the special treatment of
+ // escape sequences in matcher patterns, they may perform slightly slower
+ // in high-traffic environments.)
MatchPath []string
// MatchPathRE matches requests by a regular expression on the URI's path.
+ // Path matching is performed in the unescaped (decoded) form of the path.
//
// Upon a match, it adds placeholders to the request: `{http.regexp.name.capture_group}`
// where `name` is the regular expression's name, and `capture_group` is either
@@ -78,15 +121,46 @@ type (
// MatchMethod matches requests by the method.
MatchMethod []string
- // MatchQuery matches requests by URI's query string.
+ // MatchQuery matches requests by the URI's query string. It takes a JSON object
+ // keyed by the query keys, with an array of string values to match for that key.
+ // Query key matches are exact, but wildcards may be used for value matches. Both
+ // keys and values may be placeholders.
+ //
+ // An example of the structure to match `?key=value&topic=api&query=something` is:
+ //
+ // ```json
+ // {
+ // "key": ["value"],
+ // "topic": ["api"],
+ // "query": ["*"]
+ // }
+ // ```
+ //
+ // Invalid query strings, including those with bad escapings or illegal characters
+ // like semicolons, will fail to parse and thus fail to match.
+ //
+ // **NOTE:** Notice that query string values are arrays, not singular values. This is
+ // because repeated keys are valid in query strings, and each one may have a
+ // different value. This matcher will match for a key if any one of its configured
+ // values is assigned in the query string. Backend applications relying on query
+ // strings MUST take into consideration that query string values are arrays and can
+ // have multiple values.
MatchQuery url.Values
- // MatchHeader matches requests by header fields. It performs fast,
+ // MatchHeader matches requests by header fields. The key is the field
+ // name and the array is the list of field values. It performs fast,
// exact string comparisons of the field values. Fast prefix, suffix,
// and substring matches can also be done by suffixing, prefixing, or
// surrounding the value with the wildcard `*` character, respectively.
// If a list is null, the header must not exist. If the list is empty,
// the field must simply exist, regardless of its value.
+ //
+ // **NOTE:** Notice that header values are arrays, not singular values. This is
+ // because repeated fields are valid in headers, and each one may have a
+ // different value. This matcher will match for a field if any one of its configured
+ // values matches in the header. Backend applications relying on headers MUST take
+ // into consideration that header field values are arrays and can have multiple
+ // values.
MatchHeader http.Header
// MatchHeaderRE matches requests by a regular expression on header fields.
@@ -98,14 +172,26 @@ type (
// (potentially leading to collisions).
MatchHeaderRE map[string]*MatchRegexp
- // MatchProtocol matches requests by protocol.
+ // MatchProtocol matches requests by protocol. Recognized values are
+ // "http", "https", and "grpc" for broad protocol matches, or specific
+ // HTTP versions can be specified like so: "http/1", "http/1.1",
+ // "http/2", "http/3", or minimum versions: "http/2+", etc.
MatchProtocol string
- // MatchRemoteIP matches requests by client IP (or CIDR range).
- MatchRemoteIP struct {
- Ranges []string `json:"ranges,omitempty"`
-
- cidrs []*net.IPNet
+ // MatchTLS matches HTTP requests based on the underlying
+ // TLS connection state. If this matcher is specified but
+ // the request did not come over TLS, it will never match.
+ // If this matcher is specified but is empty and the request
+ // did come in over TLS, it will always match.
+ MatchTLS struct {
+ // Matches if the TLS handshake has completed. QUIC 0-RTT early
+ // data may arrive before the handshake completes. Generally, it
+ // is unsafe to replay these requests if they are not idempotent;
+ // additionally, the remote IP of early data packets can more
+ // easily be spoofed. It is conventional to respond with HTTP 425
+ // Too Early if the request cannot risk being processed in this
+ // state.
+ HandshakeComplete *bool `json:"handshake_complete,omitempty"`
}
// MatchNot matches requests by negating the results of its matcher
@@ -115,9 +201,9 @@ type (
// matchers within a set work the same (i.e. different matchers in
// the same set are AND'ed).
//
- // Note that the generated docs which describe the structure of
- // this module are wrong because of how this type unmarshals JSON
- // in a custom way. The correct structure is:
+ // NOTE: The generated docs which describe the structure of this
+ // module are wrong because of how this type unmarshals JSON in a
+ // custom way. The correct structure is:
//
// ```json
// [
@@ -143,7 +229,7 @@ func init() {
caddy.RegisterModule(MatchHeader{})
caddy.RegisterModule(MatchHeaderRE{})
caddy.RegisterModule(new(MatchProtocol))
- caddy.RegisterModule(MatchRemoteIP{})
+ caddy.RegisterModule(MatchTLS{})
caddy.RegisterModule(MatchNot{})
}
@@ -157,6 +243,7 @@ func (MatchHost) CaddyModule() caddy.ModuleInfo {
// UnmarshalCaddyfile implements caddyfile.Unmarshaler.
func (m *MatchHost) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
+ // iterate to merge multiple matchers into one
for d.Next() {
*m = append(*m, d.RemainingArgs()...)
if d.NextBlock(0) {
@@ -166,8 +253,55 @@ func (m *MatchHost) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
return nil
}
+// Provision sets up and validates m, including making it more efficient for large lists.
+func (m MatchHost) Provision(_ caddy.Context) error {
+ // check for duplicates; they are nonsensical and reduce efficiency
+ // (we could just remove them, but the user should know their config is erroneous)
+ seen := make(map[string]int, len(m))
+ for i, host := range m {
+ asciiHost, err := idna.ToASCII(host)
+ if err != nil {
+ return fmt.Errorf("converting hostname '%s' to ASCII: %v", host, err)
+ }
+ if asciiHost != host {
+ m[i] = asciiHost
+ }
+ normalizedHost := strings.ToLower(asciiHost)
+ if firstI, ok := seen[normalizedHost]; ok {
+ return fmt.Errorf("host at index %d is repeated at index %d: %s", firstI, i, host)
+ }
+ seen[normalizedHost] = i
+ }
+
+ if m.large() {
+ // sort the slice lexicographically, grouping "fuzzy" entries (wildcards and placeholders)
+ // at the front of the list; this allows us to use binary search for exact matches, which
+ // we have seen from experience is the most common kind of value in large lists; and any
+ // other kinds of values (wildcards and placeholders) are grouped in front so the linear
+ // search should find a match fairly quickly
+ sort.Slice(m, func(i, j int) bool {
+ iInexact, jInexact := m.fuzzy(m[i]), m.fuzzy(m[j])
+ if iInexact && !jInexact {
+ return true
+ }
+ if !iInexact && jInexact {
+ return false
+ }
+ return m[i] < m[j]
+ })
+ }
+
+ return nil
+}
+
// Match returns true if r matches m.
func (m MatchHost) Match(r *http.Request) bool {
+ match, _ := m.MatchWithError(r)
+ return match
+}
+
+// MatchWithError returns true if r matches m.
+func (m MatchHost) MatchWithError(r *http.Request) (bool, error) {
reqHost, _, err := net.SplitHostPort(r.Host)
if err != nil {
// OK; probably didn't have a port
@@ -178,10 +312,31 @@ func (m MatchHost) Match(r *http.Request) bool {
reqHost = strings.TrimSuffix(reqHost, "]")
}
+ if m.large() {
+ // fast path: locate exact match using binary search (about 100-1000x faster for large lists)
+ pos := sort.Search(len(m), func(i int) bool {
+ if m.fuzzy(m[i]) {
+ return false
+ }
+ return m[i] >= reqHost
+ })
+ if pos < len(m) && m[pos] == reqHost {
+ return true, nil
+ }
+ }
+
repl := r.Context().Value(caddy.ReplacerCtxKey).(*caddy.Replacer)
outer:
for _, host := range m {
+ // fast path: if matcher is large, we already know we don't have an exact
+ // match, so we're only looking for fuzzy match now, which should be at the
+ // front of the list; if we have reached a value that is not fuzzy, there
+ // will be no match and we can short-circuit for efficiency
+ if m.large() && !m.fuzzy(host) {
+ break
+ }
+
host = repl.ReplaceAll(host, "")
if strings.Contains(host, "*") {
patternParts := strings.Split(host, ".")
@@ -197,15 +352,48 @@ outer:
continue outer
}
}
- return true
+ return true, nil
} else if strings.EqualFold(reqHost, host) {
- return true
+ return true, nil
}
}
- return false
+ return false, nil
}
+// CELLibrary produces options that expose this matcher for use in CEL
+// expression matchers.
+//
+// Example:
+//
+// expression host('localhost')
+func (MatchHost) CELLibrary(ctx caddy.Context) (cel.Library, error) {
+ return CELMatcherImpl(
+ "host",
+ "host_match_request_list",
+ []*cel.Type{cel.ListType(cel.StringType)},
+ func(data ref.Val) (RequestMatcherWithError, error) {
+ refStringList := reflect.TypeOf([]string{})
+ strList, err := data.ConvertToNative(refStringList)
+ if err != nil {
+ return nil, err
+ }
+ matcher := MatchHost(strList.([]string))
+ err = matcher.Provision(ctx)
+ return matcher, err
+ },
+ )
+}
+
+// fuzzy returns true if the given hostname h is not a specific
+// hostname, e.g. has placeholders or wildcards.
+func (MatchHost) fuzzy(h string) bool { return strings.ContainsAny(h, "{*") }
+
+// large returns true if m is considered to be large. Optimizing
+// the matcher for smaller lists has diminishing returns.
+// See related benchmark function in test file to conduct experiments.
+func (m MatchHost) large() bool { return len(m) > 100 }
+
// CaddyModule returns the Caddy module information.
func (MatchPath) CaddyModule() caddy.ModuleInfo {
return caddy.ModuleInfo{
@@ -217,6 +405,11 @@ func (MatchPath) CaddyModule() caddy.ModuleInfo {
// Provision lower-cases the paths in m to ensure case-insensitive matching.
func (m MatchPath) Provision(_ caddy.Context) error {
for i := range m {
+ if m[i] == "*" && i > 0 {
+ // will always match, so just put it first
+ m[0] = m[i]
+ break
+ }
m[i] = strings.ToLower(m[i])
}
return nil
@@ -224,68 +417,259 @@ func (m MatchPath) Provision(_ caddy.Context) error {
// Match returns true if r matches m.
func (m MatchPath) Match(r *http.Request) bool {
- lowerPath := strings.ToLower(r.URL.Path)
+ match, _ := m.MatchWithError(r)
+ return match
+}
- // see #2917; Windows ignores trailing dots and spaces
+// MatchWithError returns true if r matches m.
+func (m MatchPath) MatchWithError(r *http.Request) (bool, error) {
+ // Even though RFC 9110 says that path matching is case-sensitive
+ // (https://www.rfc-editor.org/rfc/rfc9110.html#section-4.2.3),
+ // we do case-insensitive matching to mitigate security issues
+ // related to differences between operating systems, applications,
+ // etc; if case-sensitive matching is needed, the regex matcher
+ // can be used instead.
+ reqPath := strings.ToLower(r.URL.Path)
+
+ // See #2917; Windows ignores trailing dots and spaces
// when accessing files (sigh), potentially causing a
// security risk (cry) if PHP files end up being served
// as static files, exposing the source code, instead of
- // being matched by *.php to be treated as PHP scripts
- lowerPath = strings.TrimRight(lowerPath, ". ")
+ // being matched by *.php to be treated as PHP scripts.
+ if runtime.GOOS == "windows" { // issue #5613
+ reqPath = strings.TrimRight(reqPath, ". ")
+ }
repl := r.Context().Value(caddy.ReplacerCtxKey).(*caddy.Replacer)
- for _, matchPath := range m {
- matchPath = repl.ReplaceAll(matchPath, "")
+ for _, matchPattern := range m {
+ matchPattern = repl.ReplaceAll(matchPattern, "")
// special case: whole path is wildcard; this is unnecessary
// as it matches all requests, which is the same as no matcher
- if matchPath == "*" {
- return true
+ if matchPattern == "*" {
+ return true, nil
}
+ // Clean the path, merge doubled slashes, etc.
+ // This ensures maliciously crafted requests can't bypass
+ // the path matcher. See #4407. Good security posture
+ // requires that we should do all we can to reduce any
+ // funny-looking paths into "normalized" forms such that
+ // weird variants can't sneak by.
+ //
+ // How we clean the path depends on the kind of pattern:
+ // we either merge slashes or we don't. If the pattern
+ // has double slashes, we preserve them in the path.
+ //
+ // TODO: Despite the fact that the *vast* majority of path
+ // matchers have only 1 pattern, a possible optimization is
+ // to remember the cleaned form of the path for future
+ // iterations; it's just that the way we clean depends on
+ // the kind of pattern.
+
+ mergeSlashes := !strings.Contains(matchPattern, "//")
+
+ // if '%' appears in the match pattern, we interpret that to mean
+ // the intent is to compare that part of the path in raw/escaped
+ // space; i.e. "%40"=="%40", not "@", and "%2F"=="%2F", not "/"
+ if strings.Contains(matchPattern, "%") {
+ reqPathForPattern := CleanPath(r.URL.EscapedPath(), mergeSlashes)
+ if m.matchPatternWithEscapeSequence(reqPathForPattern, matchPattern) {
+ return true, nil
+ }
+
+ // doing prefix/suffix/substring matches doesn't make sense
+ continue
+ }
+
+ reqPathForPattern := CleanPath(reqPath, mergeSlashes)
+
+ // for substring, prefix, and suffix matching, only perform those
+ // special, fast matches if they are the only wildcards in the pattern;
+ // otherwise we assume a globular match if any * appears in the middle
+
// special case: first and last characters are wildcard,
// treat it as a fast substring match
- if len(matchPath) > 1 &&
- strings.HasPrefix(matchPath, "*") &&
- strings.HasSuffix(matchPath, "*") {
- if strings.Contains(lowerPath, matchPath[1:len(matchPath)-1]) {
- return true
+ if strings.Count(matchPattern, "*") == 2 &&
+ strings.HasPrefix(matchPattern, "*") &&
+ strings.HasSuffix(matchPattern, "*") {
+ if strings.Contains(reqPathForPattern, matchPattern[1:len(matchPattern)-1]) {
+ return true, nil
}
continue
}
- // special case: first character is a wildcard,
- // treat it as a fast suffix match
- if strings.HasPrefix(matchPath, "*") {
- if strings.HasSuffix(lowerPath, matchPath[1:]) {
- return true
+ // only perform prefix/suffix match if it is the only wildcard...
+ // I think that is more correct most of the time
+ if strings.Count(matchPattern, "*") == 1 {
+ // special case: first character is a wildcard,
+ // treat it as a fast suffix match
+ if strings.HasPrefix(matchPattern, "*") {
+ if strings.HasSuffix(reqPathForPattern, matchPattern[1:]) {
+ return true, nil
+ }
+ continue
+ }
+
+ // special case: last character is a wildcard,
+ // treat it as a fast prefix match
+ if strings.HasSuffix(matchPattern, "*") {
+ if strings.HasPrefix(reqPathForPattern, matchPattern[:len(matchPattern)-1]) {
+ return true, nil
+ }
+ continue
}
- continue
}
- // special case: last character is a wildcard,
- // treat it as a fast prefix match
- if strings.HasSuffix(matchPath, "*") {
- if strings.HasPrefix(lowerPath, matchPath[:len(matchPath)-1]) {
- return true
- }
- continue
- }
-
- // for everything else, try globular matching, which also
- // is exact matching if there are no glob/wildcard chars;
- // can ignore error here because we can't handle it anyway
- matches, _ := filepath.Match(matchPath, lowerPath)
+ // at last, use globular matching, which also is exact matching
+ // if there are no glob/wildcard chars; we ignore the error here
+ // because we can't handle it anyway
+ matches, _ := path.Match(matchPattern, reqPathForPattern)
if matches {
- return true
+ return true, nil
}
}
- return false
+ return false, nil
+}
+
+func (MatchPath) matchPatternWithEscapeSequence(escapedPath, matchPath string) bool {
+ // We would just compare the pattern against r.URL.Path,
+ // but the pattern contains %, indicating that we should
+ // compare at least some part of the path in raw/escaped
+ // space, not normalized space; so we build the string we
+ // will compare against by adding the normalized parts
+ // of the path, then switching to the escaped parts where
+ // the pattern hints to us wherever % is present.
+ var sb strings.Builder
+
+ // iterate the pattern and escaped path in lock-step;
+ // increment iPattern every time we consume a char from the pattern,
+ // increment iPath every time we consume a char from the path;
+ // iPattern and iPath are our cursors/iterator positions for each string
+ var iPattern, iPath int
+ for {
+ if iPattern >= len(matchPath) || iPath >= len(escapedPath) {
+ break
+ }
+
+ // get the next character from the request path
+
+ pathCh := string(escapedPath[iPath])
+ var escapedPathCh string
+
+ // normalize (decode) escape sequences
+ if pathCh == "%" && len(escapedPath) >= iPath+3 {
+ // hold onto this in case we find out the intent is to match in escaped space here;
+ // we lowercase it even though technically the spec says: "For consistency, URI
+ // producers and normalizers should use uppercase hexadecimal digits for all percent-
+ // encodings" (RFC 3986 section 2.1) - we lowercased the matcher pattern earlier in
+ // provisioning so we do the same here to gain case-insensitivity in equivalence;
+ // besides, this string is never shown visibly
+ escapedPathCh = strings.ToLower(escapedPath[iPath : iPath+3])
+
+ var err error
+ pathCh, err = url.PathUnescape(escapedPathCh)
+ if err != nil {
+ // should be impossible unless EscapedPath() is giving us an invalid sequence!
+ return false
+ }
+ iPath += 2 // escape sequence is 2 bytes longer than normal char
+ }
+
+ // now get the next character from the pattern
+
+ normalize := true
+ switch matchPath[iPattern] {
+ case '%':
+ // escape sequence
+
+ // if not a wildcard ("%*"), compare literally; consume next two bytes of pattern
+ if len(matchPath) >= iPattern+3 && matchPath[iPattern+1] != '*' {
+ sb.WriteString(escapedPathCh)
+ iPath++
+ iPattern += 2
+ break
+ }
+
+ // escaped wildcard sequence; consume next byte only ('*')
+ iPattern++
+ normalize = false
+
+ fallthrough
+ case '*':
+ // wildcard, so consume until next matching character
+ remaining := escapedPath[iPath:]
+ until := len(escapedPath) - iPath // go until end of string...
+ if iPattern < len(matchPath)-1 { // ...unless the * is not at the end
+ nextCh := matchPath[iPattern+1]
+ until = strings.IndexByte(remaining, nextCh)
+ if until == -1 {
+ // terminating char of wildcard span not found, so definitely no match
+ return false
+ }
+ }
+ if until == 0 {
+ // empty span; nothing to add on this iteration
+ break
+ }
+ next := remaining[:until]
+ if normalize {
+ var err error
+ next, err = url.PathUnescape(next)
+ if err != nil {
+ return false // should be impossible anyway
+ }
+ }
+ sb.WriteString(next)
+ iPath += until
+ default:
+ sb.WriteString(pathCh)
+ iPath++
+ }
+
+ iPattern++
+ }
+
+ // we can now treat rawpath globs (%*) as regular globs (*)
+ matchPath = strings.ReplaceAll(matchPath, "%*", "*")
+
+ // ignore error here because we can't handle it anyway=
+ matches, _ := path.Match(matchPath, sb.String())
+ return matches
+}
+
+// CELLibrary produces options that expose this matcher for use in CEL
+// expression matchers.
+//
+// Example:
+//
+// expression path('*substring*', '*suffix')
+func (MatchPath) CELLibrary(ctx caddy.Context) (cel.Library, error) {
+ return CELMatcherImpl(
+ // name of the macro, this is the function name that users see when writing expressions.
+ "path",
+ // name of the function that the macro will be rewritten to call.
+ "path_match_request_list",
+ // internal data type of the MatchPath value.
+ []*cel.Type{cel.ListType(cel.StringType)},
+ // function to convert a constant list of strings to a MatchPath instance.
+ func(data ref.Val) (RequestMatcherWithError, error) {
+ refStringList := reflect.TypeOf([]string{})
+ strList, err := data.ConvertToNative(refStringList)
+ if err != nil {
+ return nil, err
+ }
+ matcher := MatchPath(strList.([]string))
+ err = matcher.Provision(ctx)
+ return matcher, err
+ },
+ )
}
// UnmarshalCaddyfile implements caddyfile.Unmarshaler.
func (m *MatchPath) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
+ // iterate to merge multiple matchers into one
for d.Next() {
*m = append(*m, d.RemainingArgs()...)
if d.NextBlock(0) {
@@ -305,8 +689,75 @@ func (MatchPathRE) CaddyModule() caddy.ModuleInfo {
// Match returns true if r matches m.
func (m MatchPathRE) Match(r *http.Request) bool {
+ match, _ := m.MatchWithError(r)
+ return match
+}
+
+// MatchWithError returns true if r matches m.
+func (m MatchPathRE) MatchWithError(r *http.Request) (bool, error) {
repl := r.Context().Value(caddy.ReplacerCtxKey).(*caddy.Replacer)
- return m.MatchRegexp.Match(r.URL.Path, repl)
+
+ // Clean the path, merges doubled slashes, etc.
+ // This ensures maliciously crafted requests can't bypass
+ // the path matcher. See #4407
+ cleanedPath := cleanPath(r.URL.Path)
+
+ return m.MatchRegexp.Match(cleanedPath, repl), nil
+}
+
+// CELLibrary produces options that expose this matcher for use in CEL
+// expression matchers.
+//
+// Example:
+//
+// expression path_regexp('^/bar')
+func (MatchPathRE) CELLibrary(ctx caddy.Context) (cel.Library, error) {
+ unnamedPattern, err := CELMatcherImpl(
+ "path_regexp",
+ "path_regexp_request_string",
+ []*cel.Type{cel.StringType},
+ func(data ref.Val) (RequestMatcherWithError, error) {
+ pattern := data.(types.String)
+ matcher := MatchPathRE{MatchRegexp{
+ Name: ctx.Value(MatcherNameCtxKey).(string),
+ Pattern: string(pattern),
+ }}
+ err := matcher.Provision(ctx)
+ return matcher, err
+ },
+ )
+ if err != nil {
+ return nil, err
+ }
+ namedPattern, err := CELMatcherImpl(
+ "path_regexp",
+ "path_regexp_request_string_string",
+ []*cel.Type{cel.StringType, cel.StringType},
+ func(data ref.Val) (RequestMatcherWithError, error) {
+ refStringList := reflect.TypeOf([]string{})
+ params, err := data.ConvertToNative(refStringList)
+ if err != nil {
+ return nil, err
+ }
+ strParams := params.([]string)
+ name := strParams[0]
+ if name == "" {
+ name = ctx.Value(MatcherNameCtxKey).(string)
+ }
+ matcher := MatchPathRE{MatchRegexp{
+ Name: name,
+ Pattern: strParams[1],
+ }}
+ err = matcher.Provision(ctx)
+ return matcher, err
+ },
+ )
+ if err != nil {
+ return nil, err
+ }
+ envOpts := append(unnamedPattern.CompileOptions(), namedPattern.CompileOptions()...)
+ prgOpts := append(unnamedPattern.ProgramOptions(), namedPattern.ProgramOptions()...)
+ return NewMatcherCELLibrary(envOpts, prgOpts), nil
}
// CaddyModule returns the Caddy module information.
@@ -319,6 +770,7 @@ func (MatchMethod) CaddyModule() caddy.ModuleInfo {
// UnmarshalCaddyfile implements caddyfile.Unmarshaler.
func (m *MatchMethod) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
+ // iterate to merge multiple matchers into one
for d.Next() {
*m = append(*m, d.RemainingArgs()...)
if d.NextBlock(0) {
@@ -330,12 +782,35 @@ func (m *MatchMethod) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
// Match returns true if r matches m.
func (m MatchMethod) Match(r *http.Request) bool {
- for _, method := range m {
- if r.Method == method {
- return true
- }
- }
- return false
+ match, _ := m.MatchWithError(r)
+ return match
+}
+
+// MatchWithError returns true if r matches m.
+func (m MatchMethod) MatchWithError(r *http.Request) (bool, error) {
+ return slices.Contains(m, r.Method), nil
+}
+
+// CELLibrary produces options that expose this matcher for use in CEL
+// expression matchers.
+//
+// Example:
+//
+// expression method('PUT', 'POST')
+func (MatchMethod) CELLibrary(_ caddy.Context) (cel.Library, error) {
+ return CELMatcherImpl(
+ "method",
+ "method_request_list",
+ []*cel.Type{cel.ListType(cel.StringType)},
+ func(data ref.Val) (RequestMatcherWithError, error) {
+ refStringList := reflect.TypeOf([]string{})
+ strList, err := data.ConvertToNative(refStringList)
+ if err != nil {
+ return nil, err
+ }
+ return MatchMethod(strList.([]string)), nil
+ },
+ )
}
// CaddyModule returns the Caddy module information.
@@ -351,17 +826,18 @@ func (m *MatchQuery) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
if *m == nil {
*m = make(map[string][]string)
}
-
+ // iterate to merge multiple matchers into one
for d.Next() {
- var query string
- if !d.Args(&query) {
- return d.ArgErr()
+ for _, query := range d.RemainingArgs() {
+ if query == "" {
+ continue
+ }
+ before, after, found := strings.Cut(query, "=")
+ if !found {
+ return d.Errf("malformed query matcher token: %s; must be in param=val format", d.Val())
+ }
+ url.Values(*m).Add(before, after)
}
- parts := strings.SplitN(query, "=", 2)
- if len(parts) != 2 {
- return d.Errf("malformed query matcher token: %s; must be in param=val format", d.Val())
- }
- url.Values(*m).Set(parts[0], parts[1])
if d.NextBlock(0) {
return d.Err("malformed query matcher: blocks are not supported")
}
@@ -369,19 +845,78 @@ func (m *MatchQuery) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
return nil
}
-// Match returns true if r matches m.
+// Match returns true if r matches m. An empty m matches an empty query string.
func (m MatchQuery) Match(r *http.Request) bool {
+ match, _ := m.MatchWithError(r)
+ return match
+}
+
+// MatchWithError returns true if r matches m.
+// An empty m matches an empty query string.
+func (m MatchQuery) MatchWithError(r *http.Request) (bool, error) {
+ // If no query keys are configured, this only
+ // matches an empty query string.
+ if len(m) == 0 {
+ return len(r.URL.Query()) == 0, nil
+ }
+
+ repl := r.Context().Value(caddy.ReplacerCtxKey).(*caddy.Replacer)
+
+ // parse query string just once, for efficiency
+ parsed, err := url.ParseQuery(r.URL.RawQuery)
+ if err != nil {
+ // Illegal query string. Likely bad escape sequence or unescaped literals.
+ // Note that semicolons in query string have a controversial history. Summaries:
+ // - https://github.com/golang/go/issues/50034
+ // - https://github.com/golang/go/issues/25192
+ // Despite the URL WHATWG spec mandating the use of & separators for query strings,
+ // every URL parser implementation is different, and Filippo Valsorda rightly wrote:
+ // "Relying on parser alignment for security is doomed." Overall conclusion is that
+ // splitting on & and rejecting ; in key=value pairs is safer than accepting raw ;.
+ // We regard the Go team's decision as sound and thus reject malformed query strings.
+ return false, nil
+ }
+
+ // Count the amount of matched keys, to ensure we AND
+ // between all configured query keys; all keys must
+ // match at least one value.
+ matchedKeys := 0
for param, vals := range m {
- paramVal, found := r.URL.Query()[param]
- if found {
- for _, v := range vals {
- if paramVal[0] == v || v == "*" {
- return true
- }
+ param = repl.ReplaceAll(param, "")
+ paramVal, found := parsed[param]
+ if !found {
+ return false, nil
+ }
+ for _, v := range vals {
+ v = repl.ReplaceAll(v, "")
+ if slices.Contains(paramVal, v) || v == "*" {
+ matchedKeys++
+ break
}
}
}
- return false
+ return matchedKeys == len(m), nil
+}
+
+// CELLibrary produces options that expose this matcher for use in CEL
+// expression matchers.
+//
+// Example:
+//
+// expression query({'sort': 'asc'}) || query({'foo': ['*bar*', 'baz']})
+func (MatchQuery) CELLibrary(_ caddy.Context) (cel.Library, error) {
+ return CELMatcherImpl(
+ "query",
+ "query_matcher_request_map",
+ []*cel.Type{CELTypeJSON},
+ func(data ref.Val) (RequestMatcherWithError, error) {
+ mapStrListStr, err := CELValueToMapStrList(data)
+ if err != nil {
+ return nil, err
+ }
+ return MatchQuery(url.Values(mapStrListStr)), nil
+ },
+ )
}
// CaddyModule returns the Caddy module information.
@@ -397,12 +932,36 @@ func (m *MatchHeader) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
if *m == nil {
*m = make(map[string][]string)
}
+ // iterate to merge multiple matchers into one
for d.Next() {
var field, val string
- if !d.Args(&field, &val) {
- return d.Errf("malformed header matcher: expected both field and value")
+ if !d.Args(&field) {
+ return d.Errf("malformed header matcher: expected field")
}
- http.Header(*m).Set(field, val)
+
+ if strings.HasPrefix(field, "!") {
+ if len(field) == 1 {
+ return d.Errf("malformed header matcher: must have field name following ! character")
+ }
+
+ field = field[1:]
+ headers := *m
+ headers[field] = nil
+ m = &headers
+ if d.NextArg() {
+ return d.Errf("malformed header matcher: null matching headers cannot have a field value")
+ }
+ } else {
+ if !d.NextArg() {
+ return d.Errf("malformed header matcher: expected both field and value")
+ }
+
+ // If multiple header matchers with the same header field are defined,
+ // we want to add the existing to the list of headers (will be OR'ed)
+ val = d.Val()
+ http.Header(*m).Add(field, val)
+ }
+
if d.NextBlock(0) {
return d.Err("malformed header matcher: blocks are not supported")
}
@@ -410,32 +969,73 @@ func (m *MatchHeader) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
return nil
}
-// Like req.Header.Get(), but that works with Host header.
-// go's http module swallows "Host" header.
-func getHeader(r *http.Request, field string) []string {
- field = textproto.CanonicalMIMEHeaderKey(field)
-
- if field == "Host" {
- return []string{r.Host}
- }
-
- return r.Header[field]
-}
-
// Match returns true if r matches m.
func (m MatchHeader) Match(r *http.Request) bool {
- for field, allowedFieldVals := range m {
- actualFieldVals := getHeader(r, field)
+ match, _ := m.MatchWithError(r)
+ return match
+}
+// MatchWithError returns true if r matches m.
+func (m MatchHeader) MatchWithError(r *http.Request) (bool, error) {
+ repl := r.Context().Value(caddy.ReplacerCtxKey).(*caddy.Replacer)
+ return matchHeaders(r.Header, http.Header(m), r.Host, repl), nil
+}
+
+// CELLibrary produces options that expose this matcher for use in CEL
+// expression matchers.
+//
+// Example:
+//
+// expression header({'content-type': 'image/png'})
+// expression header({'foo': ['bar', 'baz']}) // match bar or baz
+func (MatchHeader) CELLibrary(_ caddy.Context) (cel.Library, error) {
+ return CELMatcherImpl(
+ "header",
+ "header_matcher_request_map",
+ []*cel.Type{CELTypeJSON},
+ func(data ref.Val) (RequestMatcherWithError, error) {
+ mapStrListStr, err := CELValueToMapStrList(data)
+ if err != nil {
+ return nil, err
+ }
+ return MatchHeader(http.Header(mapStrListStr)), nil
+ },
+ )
+}
+
+// getHeaderFieldVals returns the field values for the given fieldName from input.
+// The host parameter should be obtained from the http.Request.Host field since
+// net/http removes it from the header map.
+func getHeaderFieldVals(input http.Header, fieldName, host string) []string {
+ fieldName = textproto.CanonicalMIMEHeaderKey(fieldName)
+ if fieldName == "Host" && host != "" {
+ return []string{host}
+ }
+ return input[fieldName]
+}
+
+// matchHeaders returns true if input matches the criteria in against without regex.
+// The host parameter should be obtained from the http.Request.Host field since
+// net/http removes it from the header map.
+func matchHeaders(input, against http.Header, host string, repl *caddy.Replacer) bool {
+ for field, allowedFieldVals := range against {
+ actualFieldVals := getHeaderFieldVals(input, field, host)
if allowedFieldVals != nil && len(allowedFieldVals) == 0 && actualFieldVals != nil {
// a non-nil but empty list of allowed values means
// match if the header field exists at all
continue
}
+ if allowedFieldVals == nil && actualFieldVals == nil {
+ // a nil list means match if the header does not exist at all
+ continue
+ }
var match bool
fieldVals:
for _, actualFieldVal := range actualFieldVals {
for _, allowedFieldVal := range allowedFieldVals {
+ if repl != nil {
+ allowedFieldVal = repl.ReplaceAll(allowedFieldVal, "")
+ }
switch {
case allowedFieldVal == "*":
match = true
@@ -473,6 +1073,7 @@ func (m *MatchHeaderRE) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
if *m == nil {
*m = make(map[string]*MatchRegexp)
}
+ // iterate to merge multiple matchers into one
for d.Next() {
var first, second, third string
if !d.Args(&first, &second) {
@@ -489,6 +1090,17 @@ func (m *MatchHeaderRE) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
val = second
}
+ // Default to the named matcher's name, if no regexp name is provided
+ if name == "" {
+ name = d.GetContextString(caddyfile.MatcherNameCtxKey)
+ }
+
+ // If there's already a pattern for this field
+ // then we would end up overwriting the old one
+ if (*m)[field] != nil {
+ return d.Errf("header_regexp matcher can only be used once per named matcher, per header field: %s", field)
+ }
+
(*m)[field] = &MatchRegexp{Pattern: val, Name: name}
if d.NextBlock(0) {
@@ -500,9 +1112,14 @@ func (m *MatchHeaderRE) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
// Match returns true if r matches m.
func (m MatchHeaderRE) Match(r *http.Request) bool {
- for field, rm := range m {
- actualFieldVals := getHeader(r, field)
+ match, _ := m.MatchWithError(r)
+ return match
+}
+// MatchWithError returns true if r matches m.
+func (m MatchHeaderRE) MatchWithError(r *http.Request) (bool, error) {
+ for field, rm := range m {
+ actualFieldVals := getHeaderFieldVals(r.Header, field, r.Host)
match := false
fieldVal:
for _, actualFieldVal := range actualFieldVals {
@@ -513,10 +1130,10 @@ func (m MatchHeaderRE) Match(r *http.Request) bool {
}
}
if !match {
- return false
+ return false, nil
}
}
- return true
+ return true, nil
}
// Provision compiles m's regular expressions.
@@ -541,6 +1158,68 @@ func (m MatchHeaderRE) Validate() error {
return nil
}
+// CELLibrary produces options that expose this matcher for use in CEL
+// expression matchers.
+//
+// Example:
+//
+// expression header_regexp('foo', 'Field', 'fo+')
+func (MatchHeaderRE) CELLibrary(ctx caddy.Context) (cel.Library, error) {
+ unnamedPattern, err := CELMatcherImpl(
+ "header_regexp",
+ "header_regexp_request_string_string",
+ []*cel.Type{cel.StringType, cel.StringType},
+ func(data ref.Val) (RequestMatcherWithError, error) {
+ refStringList := reflect.TypeOf([]string{})
+ params, err := data.ConvertToNative(refStringList)
+ if err != nil {
+ return nil, err
+ }
+ strParams := params.([]string)
+ matcher := MatchHeaderRE{}
+ matcher[strParams[0]] = &MatchRegexp{
+ Pattern: strParams[1],
+ Name: ctx.Value(MatcherNameCtxKey).(string),
+ }
+ err = matcher.Provision(ctx)
+ return matcher, err
+ },
+ )
+ if err != nil {
+ return nil, err
+ }
+ namedPattern, err := CELMatcherImpl(
+ "header_regexp",
+ "header_regexp_request_string_string_string",
+ []*cel.Type{cel.StringType, cel.StringType, cel.StringType},
+ func(data ref.Val) (RequestMatcherWithError, error) {
+ refStringList := reflect.TypeOf([]string{})
+ params, err := data.ConvertToNative(refStringList)
+ if err != nil {
+ return nil, err
+ }
+ strParams := params.([]string)
+ name := strParams[0]
+ if name == "" {
+ name = ctx.Value(MatcherNameCtxKey).(string)
+ }
+ matcher := MatchHeaderRE{}
+ matcher[strParams[1]] = &MatchRegexp{
+ Pattern: strParams[2],
+ Name: name,
+ }
+ err = matcher.Provision(ctx)
+ return matcher, err
+ },
+ )
+ if err != nil {
+ return nil, err
+ }
+ envOpts := append(unnamedPattern.CompileOptions(), namedPattern.CompileOptions()...)
+ prgOpts := append(unnamedPattern.ProgramOptions(), namedPattern.ProgramOptions()...)
+ return NewMatcherCELLibrary(envOpts, prgOpts), nil
+}
+
// CaddyModule returns the Caddy module information.
func (MatchProtocol) CaddyModule() caddy.ModuleInfo {
return caddy.ModuleInfo{
@@ -551,19 +1230,42 @@ func (MatchProtocol) CaddyModule() caddy.ModuleInfo {
// Match returns true if r matches m.
func (m MatchProtocol) Match(r *http.Request) bool {
+ match, _ := m.MatchWithError(r)
+ return match
+}
+
+// MatchWithError returns true if r matches m.
+func (m MatchProtocol) MatchWithError(r *http.Request) (bool, error) {
switch string(m) {
case "grpc":
- return r.Header.Get("content-type") == "application/grpc"
+ return strings.HasPrefix(r.Header.Get("content-type"), "application/grpc"), nil
case "https":
- return r.TLS != nil
+ return r.TLS != nil, nil
case "http":
- return r.TLS == nil
+ return r.TLS == nil, nil
+ case "http/1.0":
+ return r.ProtoMajor == 1 && r.ProtoMinor == 0, nil
+ case "http/1.0+":
+ return r.ProtoAtLeast(1, 0), nil
+ case "http/1.1":
+ return r.ProtoMajor == 1 && r.ProtoMinor == 1, nil
+ case "http/1.1+":
+ return r.ProtoAtLeast(1, 1), nil
+ case "http/2":
+ return r.ProtoMajor == 2, nil
+ case "http/2+":
+ return r.ProtoAtLeast(2, 0), nil
+ case "http/3":
+ return r.ProtoMajor == 3, nil
+ case "http/3+":
+ return r.ProtoAtLeast(3, 0), nil
}
- return false
+ return false, nil
}
// UnmarshalCaddyfile implements caddyfile.Unmarshaler.
func (m *MatchProtocol) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
+ // iterate to merge multiple matchers into one
for d.Next() {
var proto string
if !d.Args(&proto) {
@@ -574,6 +1276,80 @@ func (m *MatchProtocol) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
return nil
}
+// CELLibrary produces options that expose this matcher for use in CEL
+// expression matchers.
+//
+// Example:
+//
+// expression protocol('https')
+func (MatchProtocol) CELLibrary(_ caddy.Context) (cel.Library, error) {
+ return CELMatcherImpl(
+ "protocol",
+ "protocol_request_string",
+ []*cel.Type{cel.StringType},
+ func(data ref.Val) (RequestMatcherWithError, error) {
+ protocolStr, ok := data.(types.String)
+ if !ok {
+ return nil, errors.New("protocol argument was not a string")
+ }
+ return MatchProtocol(strings.ToLower(string(protocolStr))), nil
+ },
+ )
+}
+
+// CaddyModule returns the Caddy module information.
+func (MatchTLS) CaddyModule() caddy.ModuleInfo {
+ return caddy.ModuleInfo{
+ ID: "http.matchers.tls",
+ New: func() caddy.Module { return new(MatchTLS) },
+ }
+}
+
+// Match returns true if r matches m.
+func (m MatchTLS) Match(r *http.Request) bool {
+ match, _ := m.MatchWithError(r)
+ return match
+}
+
+// MatchWithError returns true if r matches m.
+func (m MatchTLS) MatchWithError(r *http.Request) (bool, error) {
+ if r.TLS == nil {
+ return false, nil
+ }
+ if m.HandshakeComplete != nil {
+ if (!*m.HandshakeComplete && r.TLS.HandshakeComplete) ||
+ (*m.HandshakeComplete && !r.TLS.HandshakeComplete) {
+ return false, nil
+ }
+ }
+ return true, nil
+}
+
+// UnmarshalCaddyfile parses Caddyfile tokens for this matcher. Syntax:
+//
+// ... tls [early_data]
+//
+// EXPERIMENTAL SYNTAX: Subject to change.
+func (m *MatchTLS) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
+ // iterate to merge multiple matchers into one
+ for d.Next() {
+ if d.NextArg() {
+ switch d.Val() {
+ case "early_data":
+ var false bool
+ m.HandshakeComplete = &false
+ }
+ }
+ if d.NextArg() {
+ return d.ArgErr()
+ }
+ if d.NextBlock(0) {
+ return d.Err("malformed tls matcher: blocks are not supported yet")
+ }
+ }
+ return nil
+}
+
// CaddyModule returns the Caddy module information.
func (MatchNot) CaddyModule() caddy.ModuleInfo {
return caddy.ModuleInfo{
@@ -584,45 +1360,13 @@ func (MatchNot) CaddyModule() caddy.ModuleInfo {
// UnmarshalCaddyfile implements caddyfile.Unmarshaler.
func (m *MatchNot) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
- // first, unmarshal each matcher in the set from its tokens
- type matcherPair struct {
- raw caddy.ModuleMap
- decoded MatcherSet
- }
+ // iterate to merge multiple matchers into one
for d.Next() {
- var mp matcherPair
- matcherMap := make(map[string]RequestMatcher)
- for d.NextArg() || d.NextBlock(0) {
- matcherName := d.Val()
- mod, err := caddy.GetModule("http.matchers." + matcherName)
- if err != nil {
- return d.Errf("getting matcher module '%s': %v", matcherName, err)
- }
- unm, ok := mod.New().(caddyfile.Unmarshaler)
- if !ok {
- return d.Errf("matcher module '%s' is not a Caddyfile unmarshaler", matcherName)
- }
- err = unm.UnmarshalCaddyfile(d.NewFromNextSegment())
- if err != nil {
- return err
- }
- rm := unm.(RequestMatcher)
- matcherMap[matcherName] = rm
- mp.decoded = append(mp.decoded, rm)
+ matcherSet, err := ParseCaddyfileNestedMatcherSet(d)
+ if err != nil {
+ return err
}
-
- // we should now have a functional 'not' matcher, but we also
- // need to be able to marshal as JSON, otherwise config
- // adaptation will be missing the matchers!
- mp.raw = make(caddy.ModuleMap)
- for name, matcher := range matcherMap {
- jsonBytes, err := json.Marshal(matcher)
- if err != nil {
- return fmt.Errorf("marshaling %T matcher: %v", matcher, err)
- }
- mp.raw[name] = jsonBytes
- }
- m.MatcherSetsRaw = append(m.MatcherSetsRaw, mp.raw)
+ m.MatcherSetsRaw = append(m.MatcherSetsRaw, matcherSet)
}
return nil
}
@@ -645,10 +1389,18 @@ func (m *MatchNot) Provision(ctx caddy.Context) error {
if err != nil {
return fmt.Errorf("loading matcher sets: %v", err)
}
- for _, modMap := range matcherSets.([]map[string]interface{}) {
+ for _, modMap := range matcherSets.([]map[string]any) {
var ms MatcherSet
for _, modIface := range modMap {
- ms = append(ms, modIface.(RequestMatcher))
+ if mod, ok := modIface.(RequestMatcherWithError); ok {
+ ms = append(ms, mod)
+ continue
+ }
+ if mod, ok := modIface.(RequestMatcher); ok {
+ ms = append(ms, mod)
+ continue
+ }
+ return fmt.Errorf("module is not a request matcher: %T", modIface)
}
m.MatcherSets = append(m.MatcherSets, ms)
}
@@ -659,92 +1411,24 @@ func (m *MatchNot) Provision(ctx caddy.Context) error {
// the embedded matchers, false is returned if any of its matcher
// sets return true.
func (m MatchNot) Match(r *http.Request) bool {
+ match, _ := m.MatchWithError(r)
+ return match
+}
+
+// MatchWithError returns true if r matches m. Since this matcher
+// negates the embedded matchers, false is returned if any of its
+// matcher sets return true.
+func (m MatchNot) MatchWithError(r *http.Request) (bool, error) {
for _, ms := range m.MatcherSets {
- if ms.Match(r) {
- return false
+ matches, err := ms.MatchWithError(r)
+ if err != nil {
+ return false, err
+ }
+ if matches {
+ return false, nil
}
}
- return true
-}
-
-// CaddyModule returns the Caddy module information.
-func (MatchRemoteIP) CaddyModule() caddy.ModuleInfo {
- return caddy.ModuleInfo{
- ID: "http.matchers.remote_ip",
- New: func() caddy.Module { return new(MatchRemoteIP) },
- }
-}
-
-// UnmarshalCaddyfile implements caddyfile.Unmarshaler.
-func (m *MatchRemoteIP) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
- for d.Next() {
- m.Ranges = append(m.Ranges, d.RemainingArgs()...)
- if d.NextBlock(0) {
- return d.Err("malformed remote_ip matcher: blocks are not supported")
- }
- }
- return nil
-}
-
-// Provision parses m's IP ranges, either from IP or CIDR expressions.
-func (m *MatchRemoteIP) Provision(ctx caddy.Context) error {
- for _, str := range m.Ranges {
- if strings.Contains(str, "/") {
- _, ipNet, err := net.ParseCIDR(str)
- if err != nil {
- return fmt.Errorf("parsing CIDR expression: %v", err)
- }
- m.cidrs = append(m.cidrs, ipNet)
- } else {
- ip := net.ParseIP(str)
- if ip == nil {
- return fmt.Errorf("invalid IP address: %s", str)
- }
- mask := len(ip) * 8
- m.cidrs = append(m.cidrs, &net.IPNet{
- IP: ip,
- Mask: net.CIDRMask(mask, mask),
- })
- }
- }
- return nil
-}
-
-func (m MatchRemoteIP) getClientIP(r *http.Request) (net.IP, error) {
- var remote string
- if fwdFor := r.Header.Get("X-Forwarded-For"); fwdFor != "" {
- remote = strings.TrimSpace(strings.Split(fwdFor, ",")[0])
- }
- if remote == "" {
- remote = r.RemoteAddr
- }
-
- ipStr, _, err := net.SplitHostPort(remote)
- if err != nil {
- ipStr = remote // OK; probably didn't have a port
- }
-
- ip := net.ParseIP(ipStr)
- if ip == nil {
- return nil, fmt.Errorf("invalid client IP address: %s", ipStr)
- }
-
- return ip, nil
-}
-
-// Match returns true if r matches m.
-func (m MatchRemoteIP) Match(r *http.Request) bool {
- clientIP, err := m.getClientIP(r)
- if err != nil {
- log.Printf("[ERROR] remote_ip matcher: %v", err)
- return false
- }
- for _, ipRange := range m.cidrs {
- if ipRange.Contains(clientIP) {
- return true
- }
- }
- return false
+ return true, nil
}
// MatchRegexp is an embedable type for matching
@@ -767,7 +1451,6 @@ type MatchRegexp struct {
Pattern string `json:"pattern"`
compiled *regexp.Regexp
- phPrefix string
}
// Provision compiles the regular expression.
@@ -777,10 +1460,6 @@ func (mre *MatchRegexp) Provision(caddy.Context) error {
return fmt.Errorf("compiling matcher regexp %s: %v", mre.Pattern, err)
}
mre.compiled = re
- mre.phPrefix = regexpPlaceholderPrefix
- if mre.Name != "" {
- mre.phPrefix += "." + mre.Name
- }
return nil
}
@@ -804,16 +1483,25 @@ func (mre *MatchRegexp) Match(input string, repl *caddy.Replacer) bool {
// save all capture groups, first by index
for i, match := range matches {
- key := fmt.Sprintf("%s.%d", mre.phPrefix, i)
- repl.Set(key, match)
+ keySuffix := "." + strconv.Itoa(i)
+ if mre.Name != "" {
+ repl.Set(regexpPlaceholderPrefix+"."+mre.Name+keySuffix, match)
+ }
+ repl.Set(regexpPlaceholderPrefix+keySuffix, match)
}
// then by name
for i, name := range mre.compiled.SubexpNames() {
- if i != 0 && name != "" {
- key := fmt.Sprintf("%s.%s", mre.phPrefix, name)
- repl.Set(key, matches[i])
+ // skip the first element (the full match), and empty names
+ if i == 0 || name == "" {
+ continue
}
+
+ keySuffix := "." + name
+ if mre.Name != "" {
+ repl.Set(regexpPlaceholderPrefix+"."+mre.Name+keySuffix, matches[i])
+ }
+ repl.Set(regexpPlaceholderPrefix+keySuffix, matches[i])
}
return true
@@ -821,7 +1509,15 @@ func (mre *MatchRegexp) Match(input string, repl *caddy.Replacer) bool {
// UnmarshalCaddyfile implements caddyfile.Unmarshaler.
func (mre *MatchRegexp) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
+ // iterate to merge multiple matchers into one
for d.Next() {
+ // If this is the second iteration of the loop
+ // then there's more than one path_regexp matcher
+ // and we would end up overwriting the old one
+ if mre.Pattern != "" {
+ return d.Err("regular expression can only be used once per named matcher")
+ }
+
args := d.RemainingArgs()
switch len(args) {
case 1:
@@ -832,6 +1528,12 @@ func (mre *MatchRegexp) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
default:
return d.ArgErr()
}
+
+ // Default to the named matcher's name, if no regexp name is provided
+ if mre.Name == "" {
+ mre.Name = d.GetContextString(caddyfile.MatcherNameCtxKey)
+ }
+
if d.NextBlock(0) {
return d.Err("malformed path_regexp matcher: blocks are not supported")
}
@@ -839,84 +1541,90 @@ func (mre *MatchRegexp) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
return nil
}
-// ResponseMatcher is a type which can determine if an
-// HTTP response matches some criteria.
-type ResponseMatcher struct {
- // If set, one of these status codes would be required.
- // A one-digit status can be used to represent all codes
- // in that class (e.g. 3 for all 3xx codes).
- StatusCode []int `json:"status_code,omitempty"`
+// ParseCaddyfileNestedMatcher parses the Caddyfile tokens for a nested
+// matcher set, and returns its raw module map value.
+func ParseCaddyfileNestedMatcherSet(d *caddyfile.Dispenser) (caddy.ModuleMap, error) {
+ matcherMap := make(map[string]any)
- // If set, each header specified must be one of the specified values.
- Headers http.Header `json:"headers,omitempty"`
-}
-
-// Match returns true if the given statusCode and hdr match rm.
-func (rm ResponseMatcher) Match(statusCode int, hdr http.Header) bool {
- if !rm.matchStatusCode(statusCode) {
- return false
+ // in case there are multiple instances of the same matcher, concatenate
+ // their tokens (we expect that UnmarshalCaddyfile should be able to
+ // handle more than one segment); otherwise, we'd overwrite other
+ // instances of the matcher in this set
+ tokensByMatcherName := make(map[string][]caddyfile.Token)
+ for nesting := d.Nesting(); d.NextArg() || d.NextBlock(nesting); {
+ matcherName := d.Val()
+ tokensByMatcherName[matcherName] = append(tokensByMatcherName[matcherName], d.NextSegment()...)
}
- return rm.matchHeaders(hdr)
-}
-func (rm ResponseMatcher) matchStatusCode(statusCode int) bool {
- if rm.StatusCode == nil {
- return true
- }
- for _, code := range rm.StatusCode {
- if StatusCodeMatches(statusCode, code) {
- return true
+ for matcherName, tokens := range tokensByMatcherName {
+ mod, err := caddy.GetModule("http.matchers." + matcherName)
+ if err != nil {
+ return nil, d.Errf("getting matcher module '%s': %v", matcherName, err)
}
- }
- return false
-}
-
-func (rm ResponseMatcher) matchHeaders(hdr http.Header) bool {
- for field, allowedFieldVals := range rm.Headers {
- actualFieldVals, fieldExists := hdr[textproto.CanonicalMIMEHeaderKey(field)]
- if allowedFieldVals != nil && len(allowedFieldVals) == 0 && fieldExists {
- // a non-nil but empty list of allowed values means
- // match if the header field exists at all
+ unm, ok := mod.New().(caddyfile.Unmarshaler)
+ if !ok {
+ return nil, d.Errf("matcher module '%s' is not a Caddyfile unmarshaler", matcherName)
+ }
+ err = unm.UnmarshalCaddyfile(caddyfile.NewDispenser(tokens))
+ if err != nil {
+ return nil, err
+ }
+ if rm, ok := unm.(RequestMatcherWithError); ok {
+ matcherMap[matcherName] = rm
continue
}
- var match bool
- fieldVals:
- for _, actualFieldVal := range actualFieldVals {
- for _, allowedFieldVal := range allowedFieldVals {
- if actualFieldVal == allowedFieldVal {
- match = true
- break fieldVals
- }
- }
- }
- if !match {
- return false
+ if rm, ok := unm.(RequestMatcher); ok {
+ matcherMap[matcherName] = rm
+ continue
}
+ return nil, fmt.Errorf("matcher module '%s' is not a request matcher", matcherName)
}
- return true
+
+ // we should now have a functional matcher, but we also
+ // need to be able to marshal as JSON, otherwise config
+ // adaptation will be missing the matchers!
+ matcherSet := make(caddy.ModuleMap)
+ for name, matcher := range matcherMap {
+ jsonBytes, err := json.Marshal(matcher)
+ if err != nil {
+ return nil, fmt.Errorf("marshaling %T matcher: %v", matcher, err)
+ }
+ matcherSet[name] = jsonBytes
+ }
+
+ return matcherSet, nil
}
var wordRE = regexp.MustCompile(`\w+`)
const regexpPlaceholderPrefix = "http.regexp"
+// MatcherErrorVarKey is the key used for the variable that
+// holds an optional error emitted from a request matcher,
+// to short-circuit the handler chain, since matchers cannot
+// return errors via the RequestMatcher interface.
+//
+// Deprecated: Matchers should implement RequestMatcherWithError
+// which can return an error directly, instead of smuggling it
+// through the vars map.
+const MatcherErrorVarKey = "matchers.error"
+
// Interface guards
var (
- _ RequestMatcher = (*MatchHost)(nil)
- _ RequestMatcher = (*MatchPath)(nil)
- _ RequestMatcher = (*MatchPathRE)(nil)
- _ caddy.Provisioner = (*MatchPathRE)(nil)
- _ RequestMatcher = (*MatchMethod)(nil)
- _ RequestMatcher = (*MatchQuery)(nil)
- _ RequestMatcher = (*MatchHeader)(nil)
- _ RequestMatcher = (*MatchHeaderRE)(nil)
- _ caddy.Provisioner = (*MatchHeaderRE)(nil)
- _ RequestMatcher = (*MatchProtocol)(nil)
- _ RequestMatcher = (*MatchRemoteIP)(nil)
- _ caddy.Provisioner = (*MatchRemoteIP)(nil)
- _ RequestMatcher = (*MatchNot)(nil)
- _ caddy.Provisioner = (*MatchNot)(nil)
- _ caddy.Provisioner = (*MatchRegexp)(nil)
+ _ RequestMatcherWithError = (*MatchHost)(nil)
+ _ caddy.Provisioner = (*MatchHost)(nil)
+ _ RequestMatcherWithError = (*MatchPath)(nil)
+ _ RequestMatcherWithError = (*MatchPathRE)(nil)
+ _ caddy.Provisioner = (*MatchPathRE)(nil)
+ _ RequestMatcherWithError = (*MatchMethod)(nil)
+ _ RequestMatcherWithError = (*MatchQuery)(nil)
+ _ RequestMatcherWithError = (*MatchHeader)(nil)
+ _ RequestMatcherWithError = (*MatchHeaderRE)(nil)
+ _ caddy.Provisioner = (*MatchHeaderRE)(nil)
+ _ RequestMatcherWithError = (*MatchProtocol)(nil)
+ _ RequestMatcherWithError = (*MatchNot)(nil)
+ _ caddy.Provisioner = (*MatchNot)(nil)
+ _ caddy.Provisioner = (*MatchRegexp)(nil)
_ caddyfile.Unmarshaler = (*MatchHost)(nil)
_ caddyfile.Unmarshaler = (*MatchPath)(nil)
@@ -926,7 +1634,19 @@ var (
_ caddyfile.Unmarshaler = (*MatchHeader)(nil)
_ caddyfile.Unmarshaler = (*MatchHeaderRE)(nil)
_ caddyfile.Unmarshaler = (*MatchProtocol)(nil)
- _ caddyfile.Unmarshaler = (*MatchRemoteIP)(nil)
+ _ caddyfile.Unmarshaler = (*VarsMatcher)(nil)
+ _ caddyfile.Unmarshaler = (*MatchVarsRE)(nil)
+
+ _ CELLibraryProducer = (*MatchHost)(nil)
+ _ CELLibraryProducer = (*MatchPath)(nil)
+ _ CELLibraryProducer = (*MatchPathRE)(nil)
+ _ CELLibraryProducer = (*MatchMethod)(nil)
+ _ CELLibraryProducer = (*MatchQuery)(nil)
+ _ CELLibraryProducer = (*MatchHeader)(nil)
+ _ CELLibraryProducer = (*MatchHeaderRE)(nil)
+ _ CELLibraryProducer = (*MatchProtocol)(nil)
+ _ CELLibraryProducer = (*VarsMatcher)(nil)
+ _ CELLibraryProducer = (*MatchVarsRE)(nil)
_ json.Marshaler = (*MatchNot)(nil)
_ json.Unmarshaler = (*MatchNot)(nil)
diff --git a/modules/caddyhttp/matchers_test.go b/modules/caddyhttp/matchers_test.go
index 021bb983..f7be6909 100644
--- a/modules/caddyhttp/matchers_test.go
+++ b/modules/caddyhttp/matchers_test.go
@@ -21,6 +21,7 @@ import (
"net/http/httptest"
"net/url"
"os"
+ "runtime"
"testing"
"github.com/caddyserver/caddy/v2"
@@ -77,6 +78,11 @@ func TestHostMatcher(t *testing.T) {
input: "bar.example.com",
expect: false,
},
+ {
+ match: MatchHost{"éxàmplê.com"},
+ input: "xn--xmpl-0na6cm.com",
+ expect: true,
+ },
{
match: MatchHost{"*.example.com"},
input: "example.com",
@@ -122,6 +128,11 @@ func TestHostMatcher(t *testing.T) {
input: "sub.foo.example.net",
expect: false,
},
+ {
+ match: MatchHost{"www.*.*"},
+ input: "www.example.com",
+ expect: true,
+ },
{
match: MatchHost{"example.com"},
input: "example.com:5555",
@@ -143,7 +154,14 @@ func TestHostMatcher(t *testing.T) {
ctx := context.WithValue(req.Context(), caddy.ReplacerCtxKey, repl)
req = req.WithContext(ctx)
- actual := tc.match.Match(req)
+ if err := tc.match.Provision(caddy.Context{}); err != nil {
+ t.Errorf("Test %d %v: provisioning failed: %v", i, tc.match, err)
+ }
+
+ actual, err := tc.match.MatchWithError(req)
+ if err != nil {
+ t.Errorf("Test %d %v: matching failed: %v", i, tc.match, err)
+ }
if actual != tc.expect {
t.Errorf("Test %d %v: Expected %t, got %t for '%s'", i, tc.match, tc.expect, actual, tc.input)
continue
@@ -153,9 +171,10 @@ func TestHostMatcher(t *testing.T) {
func TestPathMatcher(t *testing.T) {
for i, tc := range []struct {
- match MatchPath
- input string
- expect bool
+ match MatchPath // not URI-encoded because not parsing from a URI
+ input string // should be valid URI encoding (escaped) since it will become part of a request
+ expect bool
+ provisionErr bool
}{
{
match: MatchPath{},
@@ -252,6 +271,71 @@ func TestPathMatcher(t *testing.T) {
input: "/foo/BAR.txt",
expect: true,
},
+ {
+ match: MatchPath{"/foo*"},
+ input: "//foo/bar",
+ expect: true,
+ },
+ {
+ match: MatchPath{"/foo"},
+ input: "//foo",
+ expect: true,
+ },
+ {
+ match: MatchPath{"//foo"},
+ input: "/foo",
+ expect: false,
+ },
+ {
+ match: MatchPath{"//foo"},
+ input: "//foo",
+ expect: true,
+ },
+ {
+ match: MatchPath{"/foo//*"},
+ input: "/foo//bar",
+ expect: true,
+ },
+ {
+ match: MatchPath{"/foo//*"},
+ input: "/foo/%2Fbar",
+ expect: true,
+ },
+ {
+ match: MatchPath{"/foo/%2F*"},
+ input: "/foo/%2Fbar",
+ expect: true,
+ },
+ {
+ match: MatchPath{"/foo/%2F*"},
+ input: "/foo//bar",
+ expect: false,
+ },
+ {
+ match: MatchPath{"/foo//bar"},
+ input: "/foo//bar",
+ expect: true,
+ },
+ {
+ match: MatchPath{"/foo/*//bar"},
+ input: "/foo///bar",
+ expect: true,
+ },
+ {
+ match: MatchPath{"/foo/%*//bar"},
+ input: "/foo///bar",
+ expect: true,
+ },
+ {
+ match: MatchPath{"/foo/%*//bar"},
+ input: "/foo//%2Fbar",
+ expect: true,
+ },
+ {
+ match: MatchPath{"/foo*"},
+ input: "/%2F/foo",
+ expect: true,
+ },
{
match: MatchPath{"*"},
input: "/",
@@ -272,13 +356,87 @@ func TestPathMatcher(t *testing.T) {
input: "/foo/bar",
expect: true,
},
+ // notice these next three test cases are the same normalized path but are written differently
+ {
+ match: MatchPath{"/%25@.txt"},
+ input: "/%25@.txt",
+ expect: true,
+ },
+ {
+ match: MatchPath{"/%25@.txt"},
+ input: "/%25%40.txt",
+ expect: true,
+ },
+ {
+ match: MatchPath{"/%25%40.txt"},
+ input: "/%25%40.txt",
+ expect: true,
+ },
+ {
+ match: MatchPath{"/bands/*/*"},
+ input: "/bands/AC%2FDC/T.N.T",
+ expect: false, // because * operates in normalized space
+ },
+ {
+ match: MatchPath{"/bands/%*/%*"},
+ input: "/bands/AC%2FDC/T.N.T",
+ expect: true,
+ },
+ {
+ match: MatchPath{"/bands/%*/%*"},
+ input: "/bands/AC/DC/T.N.T",
+ expect: false,
+ },
+ {
+ match: MatchPath{"/bands/%*"},
+ input: "/bands/AC/DC",
+ expect: false, // not a suffix match
+ },
+ {
+ match: MatchPath{"/bands/%*"},
+ input: "/bands/AC%2FDC",
+ expect: true,
+ },
+ {
+ match: MatchPath{"/foo%2fbar/baz"},
+ input: "/foo%2Fbar/baz",
+ expect: true,
+ },
+ {
+ match: MatchPath{"/foo%2fbar/baz"},
+ input: "/foo/bar/baz",
+ expect: false,
+ },
+ {
+ match: MatchPath{"/foo/bar/baz"},
+ input: "/foo%2fbar/baz",
+ expect: true,
+ },
} {
- req := &http.Request{URL: &url.URL{Path: tc.input}}
+ err := tc.match.Provision(caddy.Context{})
+ if err == nil && tc.provisionErr {
+ t.Errorf("Test %d %v: Expected error provisioning, but there was no error", i, tc.match)
+ }
+ if err != nil && !tc.provisionErr {
+ t.Errorf("Test %d %v: Expected no error provisioning, but there was an error: %v", i, tc.match, err)
+ }
+ if tc.provisionErr {
+ continue // if it's not supposed to provision properly, pointless to test it
+ }
+
+ u, err := url.ParseRequestURI(tc.input)
+ if err != nil {
+ t.Fatalf("Test %d (%v): Invalid request URI (should be rejected by Go's HTTP server): %v", i, tc.input, err)
+ }
+ req := &http.Request{URL: u}
repl := caddy.NewReplacer()
ctx := context.WithValue(req.Context(), caddy.ReplacerCtxKey, repl)
req = req.WithContext(ctx)
- actual := tc.match.Match(req)
+ actual, err := tc.match.MatchWithError(req)
+ if err != nil {
+ t.Errorf("Test %d %v: matching failed: %v", i, tc.match, err)
+ }
if actual != tc.expect {
t.Errorf("Test %d %v: Expected %t, got %t for '%s'", i, tc.match, tc.expect, actual, tc.input)
continue
@@ -288,8 +446,10 @@ func TestPathMatcher(t *testing.T) {
func TestPathMatcherWindows(t *testing.T) {
// only Windows has this bug where it will ignore
- // trailing dots and spaces in a filename, but we
- // test for it on all platforms to be more consistent
+ // trailing dots and spaces in a filename
+ if runtime.GOOS != "windows" {
+ return
+ }
req := &http.Request{URL: &url.URL{Path: "/index.php . . .."}}
repl := caddy.NewReplacer()
@@ -297,7 +457,10 @@ func TestPathMatcherWindows(t *testing.T) {
req = req.WithContext(ctx)
match := MatchPath{"*.php"}
- matched := match.Match(req)
+ matched, err := match.MatchWithError(req)
+ if err != nil {
+ t.Errorf("Expected no error, but got: %v", err)
+ }
if !matched {
t.Errorf("Expected to match; should ignore trailing dots and spaces")
}
@@ -321,15 +484,30 @@ func TestPathREMatcher(t *testing.T) {
expect: true,
},
{
- match: MatchPathRE{MatchRegexp{Pattern: "/foo"}},
+ match: MatchPathRE{MatchRegexp{Pattern: "^/foo"}},
input: "/foo",
expect: true,
},
{
- match: MatchPathRE{MatchRegexp{Pattern: "/foo"}},
+ match: MatchPathRE{MatchRegexp{Pattern: "^/foo"}},
input: "/foo/",
expect: true,
},
+ {
+ match: MatchPathRE{MatchRegexp{Pattern: "^/foo"}},
+ input: "//foo",
+ expect: true,
+ },
+ {
+ match: MatchPathRE{MatchRegexp{Pattern: "^/foo"}},
+ input: "//foo/",
+ expect: true,
+ },
+ {
+ match: MatchPathRE{MatchRegexp{Pattern: "^/foo"}},
+ input: "/%2F/foo/",
+ expect: true,
+ },
{
match: MatchPathRE{MatchRegexp{Pattern: "/bar"}},
input: "/foo/",
@@ -352,6 +530,16 @@ func TestPathREMatcher(t *testing.T) {
expect: true,
expectRepl: map[string]string{"name.myparam": "bar"},
},
+ {
+ match: MatchPathRE{MatchRegexp{Pattern: "^/%@.txt"}},
+ input: "/%25@.txt",
+ expect: true,
+ },
+ {
+ match: MatchPathRE{MatchRegexp{Pattern: "^/%25@.txt"}},
+ input: "/%25@.txt",
+ expect: false,
+ },
} {
// compile the regexp and validate its name
err := tc.match.Provision(caddy.Context{})
@@ -366,13 +554,20 @@ func TestPathREMatcher(t *testing.T) {
}
// set up the fake request and its Replacer
- req := &http.Request{URL: &url.URL{Path: tc.input}}
+ u, err := url.ParseRequestURI(tc.input)
+ if err != nil {
+ t.Fatalf("Test %d: Bad input URI: %v", i, err)
+ }
+ req := &http.Request{URL: u}
repl := caddy.NewReplacer()
ctx := context.WithValue(req.Context(), caddy.ReplacerCtxKey, repl)
req = req.WithContext(ctx)
addHTTPVarsToReplacer(repl, req, httptest.NewRecorder())
- actual := tc.match.Match(req)
+ actual, err := tc.match.MatchWithError(req)
+ if err != nil {
+ t.Errorf("Test %d %v: matching failed: %v", i, tc.match, err)
+ }
if actual != tc.expect {
t.Errorf("Test %d [%v]: Expected %t, got %t for input '%s'",
i, tc.match.Pattern, tc.expect, actual, tc.input)
@@ -392,6 +587,9 @@ func TestPathREMatcher(t *testing.T) {
}
func TestHeaderMatcher(t *testing.T) {
+ repl := caddy.NewReplacer()
+ repl.Set("a", "foobar")
+
for i, tc := range []struct {
match MatchHeader
input http.Header // make sure these are canonical cased (std lib will do that in a real request)
@@ -448,6 +646,21 @@ func TestHeaderMatcher(t *testing.T) {
input: http.Header{"Field2": []string{"foo"}},
expect: false,
},
+ {
+ match: MatchHeader{"Field1": []string{"foo*"}},
+ input: http.Header{"Field1": []string{"foo"}},
+ expect: true,
+ },
+ {
+ match: MatchHeader{"Field1": []string{"foo*"}},
+ input: http.Header{"Field1": []string{"asdf", "foobar"}},
+ expect: true,
+ },
+ {
+ match: MatchHeader{"Field1": []string{"*bar"}},
+ input: http.Header{"Field1": []string{"asdf", "foobar"}},
+ expect: true,
+ },
{
match: MatchHeader{"host": []string{"localhost"}},
input: http.Header{},
@@ -460,9 +673,40 @@ func TestHeaderMatcher(t *testing.T) {
host: "caddyserver.com",
expect: false,
},
+ {
+ match: MatchHeader{"Must-Not-Exist": nil},
+ input: http.Header{},
+ expect: true,
+ },
+ {
+ match: MatchHeader{"Must-Not-Exist": nil},
+ input: http.Header{"Must-Not-Exist": []string{"do not match"}},
+ expect: false,
+ },
+ {
+ match: MatchHeader{"Foo": []string{"{a}"}},
+ input: http.Header{"Foo": []string{"foobar"}},
+ expect: true,
+ },
+ {
+ match: MatchHeader{"Foo": []string{"{a}"}},
+ input: http.Header{"Foo": []string{"asdf"}},
+ expect: false,
+ },
+ {
+ match: MatchHeader{"Foo": []string{"{a}*"}},
+ input: http.Header{"Foo": []string{"foobar-baz"}},
+ expect: true,
+ },
} {
req := &http.Request{Header: tc.input, Host: tc.host}
- actual := tc.match.Match(req)
+ ctx := context.WithValue(req.Context(), caddy.ReplacerCtxKey, repl)
+ req = req.WithContext(ctx)
+
+ actual, err := tc.match.MatchWithError(req)
+ if err != nil {
+ t.Errorf("Test %d %v: matching failed: %v", i, tc.match, err)
+ }
if actual != tc.expect {
t.Errorf("Test %d %v: Expected %t, got %t for '%s'", i, tc.match, tc.expect, actual, tc.input)
continue
@@ -513,12 +757,86 @@ func TestQueryMatcher(t *testing.T) {
input: "/?someparam",
expect: false,
},
+ {
+ scenario: "empty matcher value should match empty query",
+ match: MatchQuery{},
+ input: "/?",
+ expect: true,
+ },
+ {
+ scenario: "nil matcher value should NOT match a non-empty query",
+ match: MatchQuery{},
+ input: "/?foo=bar",
+ expect: false,
+ },
+ {
+ scenario: "non-nil matcher should NOT match an empty query",
+ match: MatchQuery{"": nil},
+ input: "/?",
+ expect: false,
+ },
+ {
+ scenario: "match against a placeholder value",
+ match: MatchQuery{"debug": []string{"{http.vars.debug}"}},
+ input: "/?debug=1",
+ expect: true,
+ },
+ {
+ scenario: "match against a placeholder key",
+ match: MatchQuery{"{http.vars.key}": []string{"1"}},
+ input: "/?somekey=1",
+ expect: true,
+ },
+ {
+ scenario: "do not match when not all query params are present",
+ match: MatchQuery{"debug": []string{"1"}, "foo": []string{"bar"}},
+ input: "/?debug=1",
+ expect: false,
+ },
+ {
+ scenario: "match when all query params are present",
+ match: MatchQuery{"debug": []string{"1"}, "foo": []string{"bar"}},
+ input: "/?debug=1&foo=bar",
+ expect: true,
+ },
+ {
+ scenario: "do not match when the value of a query param does not match",
+ match: MatchQuery{"debug": []string{"1"}, "foo": []string{"bar"}},
+ input: "/?debug=2&foo=bar",
+ expect: false,
+ },
+ {
+ scenario: "do not match when all the values the query params do not match",
+ match: MatchQuery{"debug": []string{"1"}, "foo": []string{"bar"}},
+ input: "/?debug=2&foo=baz",
+ expect: false,
+ },
+ {
+ scenario: "match against two values for the same key",
+ match: MatchQuery{"debug": []string{"1"}},
+ input: "/?debug=1&debug=2",
+ expect: true,
+ },
+ {
+ scenario: "match against two values for the same key",
+ match: MatchQuery{"debug": []string{"2", "1"}},
+ input: "/?debug=2&debug=1",
+ expect: true,
+ },
} {
u, _ := url.Parse(tc.input)
req := &http.Request{URL: u}
- actual := tc.match.Match(req)
+ repl := caddy.NewReplacer()
+ ctx := context.WithValue(req.Context(), caddy.ReplacerCtxKey, repl)
+ repl.Set("http.vars.debug", "1")
+ repl.Set("http.vars.key", "somekey")
+ req = req.WithContext(ctx)
+ actual, err := tc.match.MatchWithError(req)
+ if err != nil {
+ t.Errorf("Test %d %v: matching failed: %v", i, tc.match, err)
+ }
if actual != tc.expect {
t.Errorf("Test %d %v: Expected %t, got %t for '%s'", i, tc.match, tc.expect, actual, tc.input)
continue
@@ -587,7 +905,10 @@ func TestHeaderREMatcher(t *testing.T) {
req = req.WithContext(ctx)
addHTTPVarsToReplacer(repl, req, httptest.NewRecorder())
- actual := tc.match.Match(req)
+ actual, err := tc.match.MatchWithError(req)
+ if err != nil {
+ t.Errorf("Test %d %v: matching failed: %v", i, tc.match, err)
+ }
if actual != tc.expect {
t.Errorf("Test %d [%v]: Expected %t, got %t for input '%s'",
i, tc.match, tc.expect, actual, tc.input)
@@ -606,6 +927,31 @@ func TestHeaderREMatcher(t *testing.T) {
}
}
+func BenchmarkHeaderREMatcher(b *testing.B) {
+ i := 0
+ match := MatchHeaderRE{"Field": &MatchRegexp{Pattern: "^foo(.*)$", Name: "name"}}
+ input := http.Header{"Field": []string{"foobar"}}
+ var host string
+ err := match.Provision(caddy.Context{})
+ if err != nil {
+ b.Errorf("Test %d %v: Provisioning: %v", i, match, err)
+ }
+ err = match.Validate()
+ if err != nil {
+ b.Errorf("Test %d %v: Validating: %v", i, match, err)
+ }
+
+ // set up the fake request and its Replacer
+ req := &http.Request{Header: input, URL: new(url.URL), Host: host}
+ repl := caddy.NewReplacer()
+ ctx := context.WithValue(req.Context(), caddy.ReplacerCtxKey, repl)
+ req = req.WithContext(ctx)
+ addHTTPVarsToReplacer(repl, req, httptest.NewRecorder())
+ for run := 0; run < b.N; run++ {
+ match.MatchWithError(req)
+ }
+}
+
func TestVarREMatcher(t *testing.T) {
for i, tc := range []struct {
desc string
@@ -646,6 +992,7 @@ func TestVarREMatcher(t *testing.T) {
expect: true,
},
} {
+ i := i // capture range value
tc := tc // capture range value
t.Run(tc.desc, func(t *testing.T) {
t.Parallel()
@@ -665,14 +1012,17 @@ func TestVarREMatcher(t *testing.T) {
req := &http.Request{URL: new(url.URL), Method: http.MethodGet}
repl := caddy.NewReplacer()
ctx := context.WithValue(req.Context(), caddy.ReplacerCtxKey, repl)
- ctx = context.WithValue(ctx, VarsCtxKey, make(map[string]interface{}))
+ ctx = context.WithValue(ctx, VarsCtxKey, make(map[string]any))
req = req.WithContext(ctx)
addHTTPVarsToReplacer(repl, req, httptest.NewRecorder())
tc.input.ServeHTTP(httptest.NewRecorder(), req, emptyHandler)
- actual := tc.match.Match(req)
+ actual, err := tc.match.MatchWithError(req)
+ if err != nil {
+ t.Errorf("Test %d %v: matching failed: %v", i, tc.match, err)
+ }
if actual != tc.expect {
t.Errorf("Test %d [%v]: Expected %t, got %t for input '%s'",
i, tc.match, tc.expect, actual, tc.input)
@@ -692,137 +1042,6 @@ func TestVarREMatcher(t *testing.T) {
}
}
-func TestResponseMatcher(t *testing.T) {
- for i, tc := range []struct {
- require ResponseMatcher
- status int
- hdr http.Header // make sure these are canonical cased (std lib will do that in a real request)
- expect bool
- }{
- {
- require: ResponseMatcher{},
- status: 200,
- expect: true,
- },
- {
- require: ResponseMatcher{
- StatusCode: []int{200},
- },
- status: 200,
- expect: true,
- },
- {
- require: ResponseMatcher{
- StatusCode: []int{2},
- },
- status: 200,
- expect: true,
- },
- {
- require: ResponseMatcher{
- StatusCode: []int{201},
- },
- status: 200,
- expect: false,
- },
- {
- require: ResponseMatcher{
- StatusCode: []int{2},
- },
- status: 301,
- expect: false,
- },
- {
- require: ResponseMatcher{
- StatusCode: []int{3},
- },
- status: 301,
- expect: true,
- },
- {
- require: ResponseMatcher{
- StatusCode: []int{3},
- },
- status: 399,
- expect: true,
- },
- {
- require: ResponseMatcher{
- StatusCode: []int{3},
- },
- status: 400,
- expect: false,
- },
- {
- require: ResponseMatcher{
- StatusCode: []int{3, 4},
- },
- status: 400,
- expect: true,
- },
- {
- require: ResponseMatcher{
- StatusCode: []int{3, 401},
- },
- status: 401,
- expect: true,
- },
- {
- require: ResponseMatcher{
- Headers: http.Header{
- "Foo": []string{"bar"},
- },
- },
- hdr: http.Header{"Foo": []string{"bar"}},
- expect: true,
- },
- {
- require: ResponseMatcher{
- Headers: http.Header{
- "Foo2": []string{"bar"},
- },
- },
- hdr: http.Header{"Foo": []string{"bar"}},
- expect: false,
- },
- {
- require: ResponseMatcher{
- Headers: http.Header{
- "Foo": []string{"bar", "baz"},
- },
- },
- hdr: http.Header{"Foo": []string{"baz"}},
- expect: true,
- },
- {
- require: ResponseMatcher{
- Headers: http.Header{
- "Foo": []string{"bar"},
- "Foo2": []string{"baz"},
- },
- },
- hdr: http.Header{"Foo": []string{"baz"}},
- expect: false,
- },
- {
- require: ResponseMatcher{
- Headers: http.Header{
- "Foo": []string{"bar"},
- "Foo2": []string{"baz"},
- },
- },
- hdr: http.Header{"Foo": []string{"bar"}, "Foo2": []string{"baz"}},
- expect: true,
- },
- } {
- actual := tc.require.Match(tc.status, tc.hdr)
- if actual != tc.expect {
- t.Errorf("Test %d %v: Expected %t, got %t for HTTP %d %v", i, tc.require, tc.expect, actual, tc.status, tc.hdr)
- continue
- }
- }
-}
-
func TestNotMatcher(t *testing.T) {
for i, tc := range []struct {
host, path string
@@ -928,7 +1147,10 @@ func TestNotMatcher(t *testing.T) {
ctx := context.WithValue(req.Context(), caddy.ReplacerCtxKey, repl)
req = req.WithContext(ctx)
- actual := tc.match.Match(req)
+ actual, err := tc.match.MatchWithError(req)
+ if err != nil {
+ t.Errorf("Test %d %v: matching failed: %v", i, tc.match, err)
+ }
if actual != tc.expect {
t.Errorf("Test %d %+v: Expected %t, got %t for: host=%s path=%s'", i, tc.match, tc.expect, actual, tc.host, tc.path)
continue
@@ -936,6 +1158,34 @@ func TestNotMatcher(t *testing.T) {
}
}
+func BenchmarkLargeHostMatcher(b *testing.B) {
+ // this benchmark simulates a large host matcher (thousands of entries) where each
+ // value is an exact hostname (not a placeholder or wildcard) - compare the results
+ // of this with and without the binary search (comment out the various fast path
+ // sections in Match) to conduct experiments
+
+ const n = 10000
+ lastHost := fmt.Sprintf("%d.example.com", n-1)
+ req := &http.Request{Host: lastHost}
+ repl := caddy.NewReplacer()
+ ctx := context.WithValue(req.Context(), caddy.ReplacerCtxKey, repl)
+ req = req.WithContext(ctx)
+
+ matcher := make(MatchHost, n)
+ for i := 0; i < n; i++ {
+ matcher[i] = fmt.Sprintf("%d.example.com", i)
+ }
+ err := matcher.Provision(caddy.Context{})
+ if err != nil {
+ b.Fatal(err)
+ }
+
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ matcher.MatchWithError(req)
+ }
+}
+
func BenchmarkHostMatcherWithoutPlaceholder(b *testing.B) {
req := &http.Request{Host: "localhost"}
repl := caddy.NewReplacer()
@@ -946,7 +1196,7 @@ func BenchmarkHostMatcherWithoutPlaceholder(b *testing.B) {
b.ResetTimer()
for i := 0; i < b.N; i++ {
- match.Match(req)
+ match.MatchWithError(req)
}
}
@@ -964,6 +1214,6 @@ func BenchmarkHostMatcherWithPlaceholder(b *testing.B) {
b.ResetTimer()
for i := 0; i < b.N; i++ {
- match.Match(req)
+ match.MatchWithError(req)
}
}
diff --git a/modules/caddyhttp/metrics.go b/modules/caddyhttp/metrics.go
new file mode 100644
index 00000000..9bb97e0b
--- /dev/null
+++ b/modules/caddyhttp/metrics.go
@@ -0,0 +1,214 @@
+package caddyhttp
+
+import (
+ "context"
+ "errors"
+ "net/http"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/prometheus/client_golang/prometheus"
+ "github.com/prometheus/client_golang/prometheus/promauto"
+
+ "github.com/caddyserver/caddy/v2"
+ "github.com/caddyserver/caddy/v2/internal/metrics"
+)
+
+// Metrics configures metrics observations.
+// EXPERIMENTAL and subject to change or removal.
+type Metrics struct {
+ // Enable per-host metrics. Enabling this option may
+ // incur high-memory consumption, depending on the number of hosts
+ // managed by Caddy.
+ PerHost bool `json:"per_host,omitempty"`
+
+ init sync.Once
+ httpMetrics *httpMetrics `json:"-"`
+}
+
+type httpMetrics struct {
+ requestInFlight *prometheus.GaugeVec
+ requestCount *prometheus.CounterVec
+ requestErrors *prometheus.CounterVec
+ requestDuration *prometheus.HistogramVec
+ requestSize *prometheus.HistogramVec
+ responseSize *prometheus.HistogramVec
+ responseDuration *prometheus.HistogramVec
+}
+
+func initHTTPMetrics(ctx caddy.Context, metrics *Metrics) {
+ const ns, sub = "caddy", "http"
+ registry := ctx.GetMetricsRegistry()
+ basicLabels := []string{"server", "handler"}
+ if metrics.PerHost {
+ basicLabels = append(basicLabels, "host")
+ }
+ metrics.httpMetrics.requestInFlight = promauto.With(registry).NewGaugeVec(prometheus.GaugeOpts{
+ Namespace: ns,
+ Subsystem: sub,
+ Name: "requests_in_flight",
+ Help: "Number of requests currently handled by this server.",
+ }, basicLabels)
+ metrics.httpMetrics.requestErrors = promauto.With(registry).NewCounterVec(prometheus.CounterOpts{
+ Namespace: ns,
+ Subsystem: sub,
+ Name: "request_errors_total",
+ Help: "Number of requests resulting in middleware errors.",
+ }, basicLabels)
+ metrics.httpMetrics.requestCount = promauto.With(registry).NewCounterVec(prometheus.CounterOpts{
+ Namespace: ns,
+ Subsystem: sub,
+ Name: "requests_total",
+ Help: "Counter of HTTP(S) requests made.",
+ }, basicLabels)
+
+ // TODO: allow these to be customized in the config
+ durationBuckets := prometheus.DefBuckets
+ sizeBuckets := prometheus.ExponentialBuckets(256, 4, 8)
+
+ httpLabels := []string{"server", "handler", "code", "method"}
+ if metrics.PerHost {
+ httpLabels = append(httpLabels, "host")
+ }
+ metrics.httpMetrics.requestDuration = promauto.With(registry).NewHistogramVec(prometheus.HistogramOpts{
+ Namespace: ns,
+ Subsystem: sub,
+ Name: "request_duration_seconds",
+ Help: "Histogram of round-trip request durations.",
+ Buckets: durationBuckets,
+ }, httpLabels)
+ metrics.httpMetrics.requestSize = promauto.With(registry).NewHistogramVec(prometheus.HistogramOpts{
+ Namespace: ns,
+ Subsystem: sub,
+ Name: "request_size_bytes",
+ Help: "Total size of the request. Includes body",
+ Buckets: sizeBuckets,
+ }, httpLabels)
+ metrics.httpMetrics.responseSize = promauto.With(registry).NewHistogramVec(prometheus.HistogramOpts{
+ Namespace: ns,
+ Subsystem: sub,
+ Name: "response_size_bytes",
+ Help: "Size of the returned response.",
+ Buckets: sizeBuckets,
+ }, httpLabels)
+ metrics.httpMetrics.responseDuration = promauto.With(registry).NewHistogramVec(prometheus.HistogramOpts{
+ Namespace: ns,
+ Subsystem: sub,
+ Name: "response_duration_seconds",
+ Help: "Histogram of times to first byte in response bodies.",
+ Buckets: durationBuckets,
+ }, httpLabels)
+}
+
+// serverNameFromContext extracts the current server name from the context.
+// Returns "UNKNOWN" if none is available (should probably never happen).
+func serverNameFromContext(ctx context.Context) string {
+ srv, ok := ctx.Value(ServerCtxKey).(*Server)
+ if !ok || srv == nil || srv.name == "" {
+ return "UNKNOWN"
+ }
+ return srv.name
+}
+
+type metricsInstrumentedHandler struct {
+ handler string
+ mh MiddlewareHandler
+ metrics *Metrics
+}
+
+func newMetricsInstrumentedHandler(ctx caddy.Context, handler string, mh MiddlewareHandler, metrics *Metrics) *metricsInstrumentedHandler {
+ metrics.init.Do(func() {
+ initHTTPMetrics(ctx, metrics)
+ })
+
+ return &metricsInstrumentedHandler{handler, mh, metrics}
+}
+
+func (h *metricsInstrumentedHandler) ServeHTTP(w http.ResponseWriter, r *http.Request, next Handler) error {
+ server := serverNameFromContext(r.Context())
+ labels := prometheus.Labels{"server": server, "handler": h.handler}
+ method := metrics.SanitizeMethod(r.Method)
+ // the "code" value is set later, but initialized here to eliminate the possibility
+ // of a panic
+ statusLabels := prometheus.Labels{"server": server, "handler": h.handler, "method": method, "code": ""}
+
+ if h.metrics.PerHost {
+ labels["host"] = strings.ToLower(r.Host)
+ statusLabels["host"] = strings.ToLower(r.Host)
+ }
+
+ inFlight := h.metrics.httpMetrics.requestInFlight.With(labels)
+ inFlight.Inc()
+ defer inFlight.Dec()
+
+ start := time.Now()
+
+ // This is a _bit_ of a hack - it depends on the ShouldBufferFunc always
+ // being called when the headers are written.
+ // Effectively the same behaviour as promhttp.InstrumentHandlerTimeToWriteHeader.
+ writeHeaderRecorder := ShouldBufferFunc(func(status int, header http.Header) bool {
+ statusLabels["code"] = metrics.SanitizeCode(status)
+ ttfb := time.Since(start).Seconds()
+ h.metrics.httpMetrics.responseDuration.With(statusLabels).Observe(ttfb)
+ return false
+ })
+ wrec := NewResponseRecorder(w, nil, writeHeaderRecorder)
+ err := h.mh.ServeHTTP(wrec, r, next)
+ dur := time.Since(start).Seconds()
+ h.metrics.httpMetrics.requestCount.With(labels).Inc()
+
+ observeRequest := func(status int) {
+ // If the code hasn't been set yet, and we didn't encounter an error, we're
+ // probably falling through with an empty handler.
+ if statusLabels["code"] == "" {
+ // we still sanitize it, even though it's likely to be 0. A 200 is
+ // returned on fallthrough so we want to reflect that.
+ statusLabels["code"] = metrics.SanitizeCode(status)
+ }
+
+ h.metrics.httpMetrics.requestDuration.With(statusLabels).Observe(dur)
+ h.metrics.httpMetrics.requestSize.With(statusLabels).Observe(float64(computeApproximateRequestSize(r)))
+ h.metrics.httpMetrics.responseSize.With(statusLabels).Observe(float64(wrec.Size()))
+ }
+
+ if err != nil {
+ var handlerErr HandlerError
+ if errors.As(err, &handlerErr) {
+ observeRequest(handlerErr.StatusCode)
+ }
+
+ h.metrics.httpMetrics.requestErrors.With(labels).Inc()
+
+ return err
+ }
+
+ observeRequest(wrec.Status())
+
+ return nil
+}
+
+// taken from https://github.com/prometheus/client_golang/blob/6007b2b5cae01203111de55f753e76d8dac1f529/prometheus/promhttp/instrument_server.go#L298
+func computeApproximateRequestSize(r *http.Request) int {
+ s := 0
+ if r.URL != nil {
+ s += len(r.URL.String())
+ }
+
+ s += len(r.Method)
+ s += len(r.Proto)
+ for name, values := range r.Header {
+ s += len(name)
+ for _, value := range values {
+ s += len(value)
+ }
+ }
+ s += len(r.Host)
+
+ // N.B. r.Form and r.MultipartForm are assumed to be included in r.URL.
+
+ if r.ContentLength != -1 {
+ s += int(r.ContentLength)
+ }
+ return s
+}
diff --git a/modules/caddyhttp/metrics_test.go b/modules/caddyhttp/metrics_test.go
new file mode 100644
index 00000000..4a0519b8
--- /dev/null
+++ b/modules/caddyhttp/metrics_test.go
@@ -0,0 +1,385 @@
+package caddyhttp
+
+import (
+ "context"
+ "errors"
+ "net/http"
+ "net/http/httptest"
+ "strings"
+ "sync"
+ "testing"
+
+ "github.com/caddyserver/caddy/v2"
+ "github.com/prometheus/client_golang/prometheus/testutil"
+)
+
+func TestServerNameFromContext(t *testing.T) {
+ ctx := context.Background()
+ expected := "UNKNOWN"
+ if actual := serverNameFromContext(ctx); actual != expected {
+ t.Errorf("Not equal: expected %q, but got %q", expected, actual)
+ }
+
+ in := "foo"
+ ctx = context.WithValue(ctx, ServerCtxKey, &Server{name: in})
+ if actual := serverNameFromContext(ctx); actual != in {
+ t.Errorf("Not equal: expected %q, but got %q", in, actual)
+ }
+}
+
+func TestMetricsInstrumentedHandler(t *testing.T) {
+ ctx, _ := caddy.NewContext(caddy.Context{Context: context.Background()})
+ metrics := &Metrics{
+ init: sync.Once{},
+ httpMetrics: &httpMetrics{},
+ }
+ handlerErr := errors.New("oh noes")
+ response := []byte("hello world!")
+ h := HandlerFunc(func(w http.ResponseWriter, r *http.Request) error {
+ if actual := testutil.ToFloat64(metrics.httpMetrics.requestInFlight); actual != 1.0 {
+ t.Errorf("Not same: expected %#v, but got %#v", 1.0, actual)
+ }
+ if handlerErr == nil {
+ w.Write(response)
+ }
+ return handlerErr
+ })
+
+ mh := middlewareHandlerFunc(func(w http.ResponseWriter, r *http.Request, h Handler) error {
+ return h.ServeHTTP(w, r)
+ })
+
+ ih := newMetricsInstrumentedHandler(ctx, "bar", mh, metrics)
+
+ r := httptest.NewRequest("GET", "/", nil)
+ w := httptest.NewRecorder()
+
+ if actual := ih.ServeHTTP(w, r, h); actual != handlerErr {
+ t.Errorf("Not same: expected %#v, but got %#v", handlerErr, actual)
+ }
+ if actual := testutil.ToFloat64(metrics.httpMetrics.requestInFlight); actual != 0.0 {
+ t.Errorf("Not same: expected %#v, but got %#v", 0.0, actual)
+ }
+
+ handlerErr = nil
+ if err := ih.ServeHTTP(w, r, h); err != nil {
+ t.Errorf("Received unexpected error: %v", err)
+ }
+
+ // an empty handler - no errors, no header written
+ mh = middlewareHandlerFunc(func(w http.ResponseWriter, r *http.Request, h Handler) error {
+ return nil
+ })
+ ih = newMetricsInstrumentedHandler(ctx, "empty", mh, metrics)
+ r = httptest.NewRequest("GET", "/", nil)
+ w = httptest.NewRecorder()
+
+ if err := ih.ServeHTTP(w, r, h); err != nil {
+ t.Errorf("Received unexpected error: %v", err)
+ }
+ if actual := w.Result().StatusCode; actual != 200 {
+ t.Errorf("Not same: expected status code %#v, but got %#v", 200, actual)
+ }
+ if actual := w.Result().Header; len(actual) != 0 {
+ t.Errorf("Not empty: expected headers to be empty, but got %#v", actual)
+ }
+
+ // handler returning an error with an HTTP status
+ mh = middlewareHandlerFunc(func(w http.ResponseWriter, r *http.Request, h Handler) error {
+ return Error(http.StatusTooManyRequests, nil)
+ })
+
+ ih = newMetricsInstrumentedHandler(ctx, "foo", mh, metrics)
+
+ r = httptest.NewRequest("GET", "/", nil)
+ w = httptest.NewRecorder()
+
+ if err := ih.ServeHTTP(w, r, nil); err == nil {
+ t.Errorf("expected error to be propagated")
+ }
+
+ expected := `
+ # HELP caddy_http_request_duration_seconds Histogram of round-trip request durations.
+ # TYPE caddy_http_request_duration_seconds histogram
+ caddy_http_request_duration_seconds_bucket{code="429",handler="foo",method="GET",server="UNKNOWN",le="0.005"} 1
+ caddy_http_request_duration_seconds_bucket{code="429",handler="foo",method="GET",server="UNKNOWN",le="0.01"} 1
+ caddy_http_request_duration_seconds_bucket{code="429",handler="foo",method="GET",server="UNKNOWN",le="0.025"} 1
+ caddy_http_request_duration_seconds_bucket{code="429",handler="foo",method="GET",server="UNKNOWN",le="0.05"} 1
+ caddy_http_request_duration_seconds_bucket{code="429",handler="foo",method="GET",server="UNKNOWN",le="0.1"} 1
+ caddy_http_request_duration_seconds_bucket{code="429",handler="foo",method="GET",server="UNKNOWN",le="0.25"} 1
+ caddy_http_request_duration_seconds_bucket{code="429",handler="foo",method="GET",server="UNKNOWN",le="0.5"} 1
+ caddy_http_request_duration_seconds_bucket{code="429",handler="foo",method="GET",server="UNKNOWN",le="1"} 1
+ caddy_http_request_duration_seconds_bucket{code="429",handler="foo",method="GET",server="UNKNOWN",le="2.5"} 1
+ caddy_http_request_duration_seconds_bucket{code="429",handler="foo",method="GET",server="UNKNOWN",le="5"} 1
+ caddy_http_request_duration_seconds_bucket{code="429",handler="foo",method="GET",server="UNKNOWN",le="10"} 1
+ caddy_http_request_duration_seconds_bucket{code="429",handler="foo",method="GET",server="UNKNOWN",le="+Inf"} 1
+ caddy_http_request_duration_seconds_count{code="429",handler="foo",method="GET",server="UNKNOWN"} 1
+ # HELP caddy_http_request_size_bytes Total size of the request. Includes body
+ # TYPE caddy_http_request_size_bytes histogram
+ caddy_http_request_size_bytes_bucket{code="200",handler="bar",method="GET",server="UNKNOWN",le="256"} 1
+ caddy_http_request_size_bytes_bucket{code="200",handler="bar",method="GET",server="UNKNOWN",le="1024"} 1
+ caddy_http_request_size_bytes_bucket{code="200",handler="bar",method="GET",server="UNKNOWN",le="4096"} 1
+ caddy_http_request_size_bytes_bucket{code="200",handler="bar",method="GET",server="UNKNOWN",le="16384"} 1
+ caddy_http_request_size_bytes_bucket{code="200",handler="bar",method="GET",server="UNKNOWN",le="65536"} 1
+ caddy_http_request_size_bytes_bucket{code="200",handler="bar",method="GET",server="UNKNOWN",le="262144"} 1
+ caddy_http_request_size_bytes_bucket{code="200",handler="bar",method="GET",server="UNKNOWN",le="1.048576e+06"} 1
+ caddy_http_request_size_bytes_bucket{code="200",handler="bar",method="GET",server="UNKNOWN",le="4.194304e+06"} 1
+ caddy_http_request_size_bytes_bucket{code="200",handler="bar",method="GET",server="UNKNOWN",le="+Inf"} 1
+ caddy_http_request_size_bytes_sum{code="200",handler="bar",method="GET",server="UNKNOWN"} 23
+ caddy_http_request_size_bytes_count{code="200",handler="bar",method="GET",server="UNKNOWN"} 1
+ caddy_http_request_size_bytes_bucket{code="200",handler="empty",method="GET",server="UNKNOWN",le="256"} 1
+ caddy_http_request_size_bytes_bucket{code="200",handler="empty",method="GET",server="UNKNOWN",le="1024"} 1
+ caddy_http_request_size_bytes_bucket{code="200",handler="empty",method="GET",server="UNKNOWN",le="4096"} 1
+ caddy_http_request_size_bytes_bucket{code="200",handler="empty",method="GET",server="UNKNOWN",le="16384"} 1
+ caddy_http_request_size_bytes_bucket{code="200",handler="empty",method="GET",server="UNKNOWN",le="65536"} 1
+ caddy_http_request_size_bytes_bucket{code="200",handler="empty",method="GET",server="UNKNOWN",le="262144"} 1
+ caddy_http_request_size_bytes_bucket{code="200",handler="empty",method="GET",server="UNKNOWN",le="1.048576e+06"} 1
+ caddy_http_request_size_bytes_bucket{code="200",handler="empty",method="GET",server="UNKNOWN",le="4.194304e+06"} 1
+ caddy_http_request_size_bytes_bucket{code="200",handler="empty",method="GET",server="UNKNOWN",le="+Inf"} 1
+ caddy_http_request_size_bytes_sum{code="200",handler="empty",method="GET",server="UNKNOWN"} 23
+ caddy_http_request_size_bytes_count{code="200",handler="empty",method="GET",server="UNKNOWN"} 1
+ caddy_http_request_size_bytes_bucket{code="429",handler="foo",method="GET",server="UNKNOWN",le="256"} 1
+ caddy_http_request_size_bytes_bucket{code="429",handler="foo",method="GET",server="UNKNOWN",le="1024"} 1
+ caddy_http_request_size_bytes_bucket{code="429",handler="foo",method="GET",server="UNKNOWN",le="4096"} 1
+ caddy_http_request_size_bytes_bucket{code="429",handler="foo",method="GET",server="UNKNOWN",le="16384"} 1
+ caddy_http_request_size_bytes_bucket{code="429",handler="foo",method="GET",server="UNKNOWN",le="65536"} 1
+ caddy_http_request_size_bytes_bucket{code="429",handler="foo",method="GET",server="UNKNOWN",le="262144"} 1
+ caddy_http_request_size_bytes_bucket{code="429",handler="foo",method="GET",server="UNKNOWN",le="1.048576e+06"} 1
+ caddy_http_request_size_bytes_bucket{code="429",handler="foo",method="GET",server="UNKNOWN",le="4.194304e+06"} 1
+ caddy_http_request_size_bytes_bucket{code="429",handler="foo",method="GET",server="UNKNOWN",le="+Inf"} 1
+ caddy_http_request_size_bytes_sum{code="429",handler="foo",method="GET",server="UNKNOWN"} 23
+ caddy_http_request_size_bytes_count{code="429",handler="foo",method="GET",server="UNKNOWN"} 1
+ # HELP caddy_http_response_size_bytes Size of the returned response.
+ # TYPE caddy_http_response_size_bytes histogram
+ caddy_http_response_size_bytes_bucket{code="200",handler="bar",method="GET",server="UNKNOWN",le="256"} 1
+ caddy_http_response_size_bytes_bucket{code="200",handler="bar",method="GET",server="UNKNOWN",le="1024"} 1
+ caddy_http_response_size_bytes_bucket{code="200",handler="bar",method="GET",server="UNKNOWN",le="4096"} 1
+ caddy_http_response_size_bytes_bucket{code="200",handler="bar",method="GET",server="UNKNOWN",le="16384"} 1
+ caddy_http_response_size_bytes_bucket{code="200",handler="bar",method="GET",server="UNKNOWN",le="65536"} 1
+ caddy_http_response_size_bytes_bucket{code="200",handler="bar",method="GET",server="UNKNOWN",le="262144"} 1
+ caddy_http_response_size_bytes_bucket{code="200",handler="bar",method="GET",server="UNKNOWN",le="1.048576e+06"} 1
+ caddy_http_response_size_bytes_bucket{code="200",handler="bar",method="GET",server="UNKNOWN",le="4.194304e+06"} 1
+ caddy_http_response_size_bytes_bucket{code="200",handler="bar",method="GET",server="UNKNOWN",le="+Inf"} 1
+ caddy_http_response_size_bytes_sum{code="200",handler="bar",method="GET",server="UNKNOWN"} 12
+ caddy_http_response_size_bytes_count{code="200",handler="bar",method="GET",server="UNKNOWN"} 1
+ caddy_http_response_size_bytes_bucket{code="200",handler="empty",method="GET",server="UNKNOWN",le="256"} 1
+ caddy_http_response_size_bytes_bucket{code="200",handler="empty",method="GET",server="UNKNOWN",le="1024"} 1
+ caddy_http_response_size_bytes_bucket{code="200",handler="empty",method="GET",server="UNKNOWN",le="4096"} 1
+ caddy_http_response_size_bytes_bucket{code="200",handler="empty",method="GET",server="UNKNOWN",le="16384"} 1
+ caddy_http_response_size_bytes_bucket{code="200",handler="empty",method="GET",server="UNKNOWN",le="65536"} 1
+ caddy_http_response_size_bytes_bucket{code="200",handler="empty",method="GET",server="UNKNOWN",le="262144"} 1
+ caddy_http_response_size_bytes_bucket{code="200",handler="empty",method="GET",server="UNKNOWN",le="1.048576e+06"} 1
+ caddy_http_response_size_bytes_bucket{code="200",handler="empty",method="GET",server="UNKNOWN",le="4.194304e+06"} 1
+ caddy_http_response_size_bytes_bucket{code="200",handler="empty",method="GET",server="UNKNOWN",le="+Inf"} 1
+ caddy_http_response_size_bytes_sum{code="200",handler="empty",method="GET",server="UNKNOWN"} 0
+ caddy_http_response_size_bytes_count{code="200",handler="empty",method="GET",server="UNKNOWN"} 1
+ caddy_http_response_size_bytes_bucket{code="429",handler="foo",method="GET",server="UNKNOWN",le="256"} 1
+ caddy_http_response_size_bytes_bucket{code="429",handler="foo",method="GET",server="UNKNOWN",le="1024"} 1
+ caddy_http_response_size_bytes_bucket{code="429",handler="foo",method="GET",server="UNKNOWN",le="4096"} 1
+ caddy_http_response_size_bytes_bucket{code="429",handler="foo",method="GET",server="UNKNOWN",le="16384"} 1
+ caddy_http_response_size_bytes_bucket{code="429",handler="foo",method="GET",server="UNKNOWN",le="65536"} 1
+ caddy_http_response_size_bytes_bucket{code="429",handler="foo",method="GET",server="UNKNOWN",le="262144"} 1
+ caddy_http_response_size_bytes_bucket{code="429",handler="foo",method="GET",server="UNKNOWN",le="1.048576e+06"} 1
+ caddy_http_response_size_bytes_bucket{code="429",handler="foo",method="GET",server="UNKNOWN",le="4.194304e+06"} 1
+ caddy_http_response_size_bytes_bucket{code="429",handler="foo",method="GET",server="UNKNOWN",le="+Inf"} 1
+ caddy_http_response_size_bytes_sum{code="429",handler="foo",method="GET",server="UNKNOWN"} 0
+ caddy_http_response_size_bytes_count{code="429",handler="foo",method="GET",server="UNKNOWN"} 1
+ # HELP caddy_http_request_errors_total Number of requests resulting in middleware errors.
+ # TYPE caddy_http_request_errors_total counter
+ caddy_http_request_errors_total{handler="bar",server="UNKNOWN"} 1
+ caddy_http_request_errors_total{handler="foo",server="UNKNOWN"} 1
+ `
+ if err := testutil.GatherAndCompare(ctx.GetMetricsRegistry(), strings.NewReader(expected),
+ "caddy_http_request_size_bytes",
+ "caddy_http_response_size_bytes",
+ // caddy_http_request_duration_seconds_sum will vary based on how long the test took to run,
+ // so we check just the _bucket and _count metrics
+ "caddy_http_request_duration_seconds_bucket",
+ "caddy_http_request_duration_seconds_count",
+ "caddy_http_request_errors_total",
+ ); err != nil {
+ t.Errorf("received unexpected error: %s", err)
+ }
+}
+
+func TestMetricsInstrumentedHandlerPerHost(t *testing.T) {
+ ctx, _ := caddy.NewContext(caddy.Context{Context: context.Background()})
+ metrics := &Metrics{
+ PerHost: true,
+ init: sync.Once{},
+ httpMetrics: &httpMetrics{},
+ }
+ handlerErr := errors.New("oh noes")
+ response := []byte("hello world!")
+ h := HandlerFunc(func(w http.ResponseWriter, r *http.Request) error {
+ if actual := testutil.ToFloat64(metrics.httpMetrics.requestInFlight); actual != 1.0 {
+ t.Errorf("Not same: expected %#v, but got %#v", 1.0, actual)
+ }
+ if handlerErr == nil {
+ w.Write(response)
+ }
+ return handlerErr
+ })
+
+ mh := middlewareHandlerFunc(func(w http.ResponseWriter, r *http.Request, h Handler) error {
+ return h.ServeHTTP(w, r)
+ })
+
+ ih := newMetricsInstrumentedHandler(ctx, "bar", mh, metrics)
+
+ r := httptest.NewRequest("GET", "/", nil)
+ w := httptest.NewRecorder()
+
+ if actual := ih.ServeHTTP(w, r, h); actual != handlerErr {
+ t.Errorf("Not same: expected %#v, but got %#v", handlerErr, actual)
+ }
+ if actual := testutil.ToFloat64(metrics.httpMetrics.requestInFlight); actual != 0.0 {
+ t.Errorf("Not same: expected %#v, but got %#v", 0.0, actual)
+ }
+
+ handlerErr = nil
+ if err := ih.ServeHTTP(w, r, h); err != nil {
+ t.Errorf("Received unexpected error: %v", err)
+ }
+
+ // an empty handler - no errors, no header written
+ mh = middlewareHandlerFunc(func(w http.ResponseWriter, r *http.Request, h Handler) error {
+ return nil
+ })
+ ih = newMetricsInstrumentedHandler(ctx, "empty", mh, metrics)
+ r = httptest.NewRequest("GET", "/", nil)
+ w = httptest.NewRecorder()
+
+ if err := ih.ServeHTTP(w, r, h); err != nil {
+ t.Errorf("Received unexpected error: %v", err)
+ }
+ if actual := w.Result().StatusCode; actual != 200 {
+ t.Errorf("Not same: expected status code %#v, but got %#v", 200, actual)
+ }
+ if actual := w.Result().Header; len(actual) != 0 {
+ t.Errorf("Not empty: expected headers to be empty, but got %#v", actual)
+ }
+
+ // handler returning an error with an HTTP status
+ mh = middlewareHandlerFunc(func(w http.ResponseWriter, r *http.Request, h Handler) error {
+ return Error(http.StatusTooManyRequests, nil)
+ })
+
+ ih = newMetricsInstrumentedHandler(ctx, "foo", mh, metrics)
+
+ r = httptest.NewRequest("GET", "/", nil)
+ w = httptest.NewRecorder()
+
+ if err := ih.ServeHTTP(w, r, nil); err == nil {
+ t.Errorf("expected error to be propagated")
+ }
+
+ expected := `
+ # HELP caddy_http_request_duration_seconds Histogram of round-trip request durations.
+ # TYPE caddy_http_request_duration_seconds histogram
+ caddy_http_request_duration_seconds_bucket{code="429",handler="foo",host="example.com",method="GET",server="UNKNOWN",le="0.005"} 1
+ caddy_http_request_duration_seconds_bucket{code="429",handler="foo",host="example.com",method="GET",server="UNKNOWN",le="0.01"} 1
+ caddy_http_request_duration_seconds_bucket{code="429",handler="foo",host="example.com",method="GET",server="UNKNOWN",le="0.025"} 1
+ caddy_http_request_duration_seconds_bucket{code="429",handler="foo",host="example.com",method="GET",server="UNKNOWN",le="0.05"} 1
+ caddy_http_request_duration_seconds_bucket{code="429",handler="foo",host="example.com",method="GET",server="UNKNOWN",le="0.1"} 1
+ caddy_http_request_duration_seconds_bucket{code="429",handler="foo",host="example.com",method="GET",server="UNKNOWN",le="0.25"} 1
+ caddy_http_request_duration_seconds_bucket{code="429",handler="foo",host="example.com",method="GET",server="UNKNOWN",le="0.5"} 1
+ caddy_http_request_duration_seconds_bucket{code="429",handler="foo",host="example.com",method="GET",server="UNKNOWN",le="1"} 1
+ caddy_http_request_duration_seconds_bucket{code="429",handler="foo",host="example.com",method="GET",server="UNKNOWN",le="2.5"} 1
+ caddy_http_request_duration_seconds_bucket{code="429",handler="foo",host="example.com",method="GET",server="UNKNOWN",le="5"} 1
+ caddy_http_request_duration_seconds_bucket{code="429",handler="foo",host="example.com",method="GET",server="UNKNOWN",le="10"} 1
+ caddy_http_request_duration_seconds_bucket{code="429",handler="foo",host="example.com",method="GET",server="UNKNOWN",le="+Inf"} 1
+ caddy_http_request_duration_seconds_count{code="429",handler="foo",host="example.com",method="GET",server="UNKNOWN"} 1
+ # HELP caddy_http_request_size_bytes Total size of the request. Includes body
+ # TYPE caddy_http_request_size_bytes histogram
+ caddy_http_request_size_bytes_bucket{code="200",handler="bar",host="example.com",method="GET",server="UNKNOWN",le="256"} 1
+ caddy_http_request_size_bytes_bucket{code="200",handler="bar",host="example.com",method="GET",server="UNKNOWN",le="1024"} 1
+ caddy_http_request_size_bytes_bucket{code="200",handler="bar",host="example.com",method="GET",server="UNKNOWN",le="4096"} 1
+ caddy_http_request_size_bytes_bucket{code="200",handler="bar",host="example.com",method="GET",server="UNKNOWN",le="16384"} 1
+ caddy_http_request_size_bytes_bucket{code="200",handler="bar",host="example.com",method="GET",server="UNKNOWN",le="65536"} 1
+ caddy_http_request_size_bytes_bucket{code="200",handler="bar",host="example.com",method="GET",server="UNKNOWN",le="262144"} 1
+ caddy_http_request_size_bytes_bucket{code="200",handler="bar",host="example.com",method="GET",server="UNKNOWN",le="1.048576e+06"} 1
+ caddy_http_request_size_bytes_bucket{code="200",handler="bar",host="example.com",method="GET",server="UNKNOWN",le="4.194304e+06"} 1
+ caddy_http_request_size_bytes_bucket{code="200",handler="bar",host="example.com",method="GET",server="UNKNOWN",le="+Inf"} 1
+ caddy_http_request_size_bytes_sum{code="200",handler="bar",host="example.com",method="GET",server="UNKNOWN"} 23
+ caddy_http_request_size_bytes_count{code="200",handler="bar",host="example.com",method="GET",server="UNKNOWN"} 1
+ caddy_http_request_size_bytes_bucket{code="200",handler="empty",host="example.com",method="GET",server="UNKNOWN",le="256"} 1
+ caddy_http_request_size_bytes_bucket{code="200",handler="empty",host="example.com",method="GET",server="UNKNOWN",le="1024"} 1
+ caddy_http_request_size_bytes_bucket{code="200",handler="empty",host="example.com",method="GET",server="UNKNOWN",le="4096"} 1
+ caddy_http_request_size_bytes_bucket{code="200",handler="empty",host="example.com",method="GET",server="UNKNOWN",le="16384"} 1
+ caddy_http_request_size_bytes_bucket{code="200",handler="empty",host="example.com",method="GET",server="UNKNOWN",le="65536"} 1
+ caddy_http_request_size_bytes_bucket{code="200",handler="empty",host="example.com",method="GET",server="UNKNOWN",le="262144"} 1
+ caddy_http_request_size_bytes_bucket{code="200",handler="empty",host="example.com",method="GET",server="UNKNOWN",le="1.048576e+06"} 1
+ caddy_http_request_size_bytes_bucket{code="200",handler="empty",host="example.com",method="GET",server="UNKNOWN",le="4.194304e+06"} 1
+ caddy_http_request_size_bytes_bucket{code="200",handler="empty",host="example.com",method="GET",server="UNKNOWN",le="+Inf"} 1
+ caddy_http_request_size_bytes_sum{code="200",handler="empty",host="example.com",method="GET",server="UNKNOWN"} 23
+ caddy_http_request_size_bytes_count{code="200",handler="empty",host="example.com",method="GET",server="UNKNOWN"} 1
+ caddy_http_request_size_bytes_bucket{code="429",handler="foo",host="example.com",method="GET",server="UNKNOWN",le="256"} 1
+ caddy_http_request_size_bytes_bucket{code="429",handler="foo",host="example.com",method="GET",server="UNKNOWN",le="1024"} 1
+ caddy_http_request_size_bytes_bucket{code="429",handler="foo",host="example.com",method="GET",server="UNKNOWN",le="4096"} 1
+ caddy_http_request_size_bytes_bucket{code="429",handler="foo",host="example.com",method="GET",server="UNKNOWN",le="16384"} 1
+ caddy_http_request_size_bytes_bucket{code="429",handler="foo",host="example.com",method="GET",server="UNKNOWN",le="65536"} 1
+ caddy_http_request_size_bytes_bucket{code="429",handler="foo",host="example.com",method="GET",server="UNKNOWN",le="262144"} 1
+ caddy_http_request_size_bytes_bucket{code="429",handler="foo",host="example.com",method="GET",server="UNKNOWN",le="1.048576e+06"} 1
+ caddy_http_request_size_bytes_bucket{code="429",handler="foo",host="example.com",method="GET",server="UNKNOWN",le="4.194304e+06"} 1
+ caddy_http_request_size_bytes_bucket{code="429",handler="foo",host="example.com",method="GET",server="UNKNOWN",le="+Inf"} 1
+ caddy_http_request_size_bytes_sum{code="429",handler="foo",host="example.com",method="GET",server="UNKNOWN"} 23
+ caddy_http_request_size_bytes_count{code="429",handler="foo",host="example.com",method="GET",server="UNKNOWN"} 1
+ # HELP caddy_http_response_size_bytes Size of the returned response.
+ # TYPE caddy_http_response_size_bytes histogram
+ caddy_http_response_size_bytes_bucket{code="200",handler="bar",host="example.com",method="GET",server="UNKNOWN",le="256"} 1
+ caddy_http_response_size_bytes_bucket{code="200",handler="bar",host="example.com",method="GET",server="UNKNOWN",le="1024"} 1
+ caddy_http_response_size_bytes_bucket{code="200",handler="bar",host="example.com",method="GET",server="UNKNOWN",le="4096"} 1
+ caddy_http_response_size_bytes_bucket{code="200",handler="bar",host="example.com",method="GET",server="UNKNOWN",le="16384"} 1
+ caddy_http_response_size_bytes_bucket{code="200",handler="bar",host="example.com",method="GET",server="UNKNOWN",le="65536"} 1
+ caddy_http_response_size_bytes_bucket{code="200",handler="bar",host="example.com",method="GET",server="UNKNOWN",le="262144"} 1
+ caddy_http_response_size_bytes_bucket{code="200",handler="bar",host="example.com",method="GET",server="UNKNOWN",le="1.048576e+06"} 1
+ caddy_http_response_size_bytes_bucket{code="200",handler="bar",host="example.com",method="GET",server="UNKNOWN",le="4.194304e+06"} 1
+ caddy_http_response_size_bytes_bucket{code="200",handler="bar",host="example.com",method="GET",server="UNKNOWN",le="+Inf"} 1
+ caddy_http_response_size_bytes_sum{code="200",handler="bar",host="example.com",method="GET",server="UNKNOWN"} 12
+ caddy_http_response_size_bytes_count{code="200",handler="bar",host="example.com",method="GET",server="UNKNOWN"} 1
+ caddy_http_response_size_bytes_bucket{code="200",handler="empty",host="example.com",method="GET",server="UNKNOWN",le="256"} 1
+ caddy_http_response_size_bytes_bucket{code="200",handler="empty",host="example.com",method="GET",server="UNKNOWN",le="1024"} 1
+ caddy_http_response_size_bytes_bucket{code="200",handler="empty",host="example.com",method="GET",server="UNKNOWN",le="4096"} 1
+ caddy_http_response_size_bytes_bucket{code="200",handler="empty",host="example.com",method="GET",server="UNKNOWN",le="16384"} 1
+ caddy_http_response_size_bytes_bucket{code="200",handler="empty",host="example.com",method="GET",server="UNKNOWN",le="65536"} 1
+ caddy_http_response_size_bytes_bucket{code="200",handler="empty",host="example.com",method="GET",server="UNKNOWN",le="262144"} 1
+ caddy_http_response_size_bytes_bucket{code="200",handler="empty",host="example.com",method="GET",server="UNKNOWN",le="1.048576e+06"} 1
+ caddy_http_response_size_bytes_bucket{code="200",handler="empty",host="example.com",method="GET",server="UNKNOWN",le="4.194304e+06"} 1
+ caddy_http_response_size_bytes_bucket{code="200",handler="empty",host="example.com",method="GET",server="UNKNOWN",le="+Inf"} 1
+ caddy_http_response_size_bytes_sum{code="200",handler="empty",host="example.com",method="GET",server="UNKNOWN"} 0
+ caddy_http_response_size_bytes_count{code="200",handler="empty",host="example.com",method="GET",server="UNKNOWN"} 1
+ caddy_http_response_size_bytes_bucket{code="429",handler="foo",host="example.com",method="GET",server="UNKNOWN",le="256"} 1
+ caddy_http_response_size_bytes_bucket{code="429",handler="foo",host="example.com",method="GET",server="UNKNOWN",le="1024"} 1
+ caddy_http_response_size_bytes_bucket{code="429",handler="foo",host="example.com",method="GET",server="UNKNOWN",le="4096"} 1
+ caddy_http_response_size_bytes_bucket{code="429",handler="foo",host="example.com",method="GET",server="UNKNOWN",le="16384"} 1
+ caddy_http_response_size_bytes_bucket{code="429",handler="foo",host="example.com",method="GET",server="UNKNOWN",le="65536"} 1
+ caddy_http_response_size_bytes_bucket{code="429",handler="foo",host="example.com",method="GET",server="UNKNOWN",le="262144"} 1
+ caddy_http_response_size_bytes_bucket{code="429",handler="foo",host="example.com",method="GET",server="UNKNOWN",le="1.048576e+06"} 1
+ caddy_http_response_size_bytes_bucket{code="429",handler="foo",host="example.com",method="GET",server="UNKNOWN",le="4.194304e+06"} 1
+ caddy_http_response_size_bytes_bucket{code="429",handler="foo",host="example.com",method="GET",server="UNKNOWN",le="+Inf"} 1
+ caddy_http_response_size_bytes_sum{code="429",handler="foo",host="example.com",method="GET",server="UNKNOWN"} 0
+ caddy_http_response_size_bytes_count{code="429",handler="foo",host="example.com",method="GET",server="UNKNOWN"} 1
+ # HELP caddy_http_request_errors_total Number of requests resulting in middleware errors.
+ # TYPE caddy_http_request_errors_total counter
+ caddy_http_request_errors_total{handler="bar",host="example.com",server="UNKNOWN"} 1
+ caddy_http_request_errors_total{handler="foo",host="example.com",server="UNKNOWN"} 1
+ `
+ if err := testutil.GatherAndCompare(ctx.GetMetricsRegistry(), strings.NewReader(expected),
+ "caddy_http_request_size_bytes",
+ "caddy_http_response_size_bytes",
+ // caddy_http_request_duration_seconds_sum will vary based on how long the test took to run,
+ // so we check just the _bucket and _count metrics
+ "caddy_http_request_duration_seconds_bucket",
+ "caddy_http_request_duration_seconds_count",
+ "caddy_http_request_errors_total",
+ ); err != nil {
+ t.Errorf("received unexpected error: %s", err)
+ }
+}
+
+type middlewareHandlerFunc func(http.ResponseWriter, *http.Request, Handler) error
+
+func (f middlewareHandlerFunc) ServeHTTP(w http.ResponseWriter, r *http.Request, h Handler) error {
+ return f(w, r, h)
+}
diff --git a/modules/caddyhttp/proxyprotocol/listenerwrapper.go b/modules/caddyhttp/proxyprotocol/listenerwrapper.go
new file mode 100644
index 00000000..f1d170c3
--- /dev/null
+++ b/modules/caddyhttp/proxyprotocol/listenerwrapper.go
@@ -0,0 +1,144 @@
+// Copyright 2015 Matthew Holt and The Caddy Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package proxyprotocol
+
+import (
+ "net"
+ "net/netip"
+ "time"
+
+ goproxy "github.com/pires/go-proxyproto"
+
+ "github.com/caddyserver/caddy/v2"
+)
+
+// ListenerWrapper provides PROXY protocol support to Caddy by implementing
+// the caddy.ListenerWrapper interface. If a connection is received via Unix
+// socket, it's trusted. Otherwise, it's checked against the Allow/Deny lists,
+// then it's handled by the FallbackPolicy.
+//
+// It must be loaded before the `tls` listener because the PROXY protocol
+// encapsulates the TLS data.
+//
+// Credit goes to https://github.com/mastercactapus/caddy2-proxyprotocol for having
+// initially implemented this as a plugin.
+type ListenerWrapper struct {
+ // Timeout specifies an optional maximum time for
+ // the PROXY header to be received.
+ // If zero, timeout is disabled. Default is 5s.
+ Timeout caddy.Duration `json:"timeout,omitempty"`
+
+ // Allow is an optional list of CIDR ranges to
+ // allow/require PROXY headers from.
+ Allow []string `json:"allow,omitempty"`
+ allow []netip.Prefix
+
+ // Deny is an optional list of CIDR ranges to
+ // deny PROXY headers from.
+ Deny []string `json:"deny,omitempty"`
+ deny []netip.Prefix
+
+ // FallbackPolicy specifies the policy to use if the downstream
+ // IP address is not in the Allow list nor is in the Deny list.
+ //
+ // NOTE: The generated docs which describe the value of this
+ // field is wrong because of how this type unmarshals JSON in a
+ // custom way. The field expects a string, not a number.
+ //
+ // Accepted values are: IGNORE, USE, REJECT, REQUIRE, SKIP
+ //
+ // - IGNORE: address from PROXY header, but accept connection
+ //
+ // - USE: address from PROXY header
+ //
+ // - REJECT: connection when PROXY header is sent
+ // Note: even though the first read on the connection returns an error if
+ // a PROXY header is present, subsequent reads do not. It is the task of
+ // the code using the connection to handle that case properly.
+ //
+ // - REQUIRE: connection to send PROXY header, reject if not present
+ // Note: even though the first read on the connection returns an error if
+ // a PROXY header is not present, subsequent reads do not. It is the task
+ // of the code using the connection to handle that case properly.
+ //
+ // - SKIP: accepts a connection without requiring the PROXY header.
+ // Note: an example usage can be found in the SkipProxyHeaderForCIDR
+ // function.
+ //
+ // Default: IGNORE
+ //
+ // Policy definitions are here: https://pkg.go.dev/github.com/pires/go-proxyproto@v0.7.0#Policy
+ FallbackPolicy Policy `json:"fallback_policy,omitempty"`
+
+ policy goproxy.ConnPolicyFunc
+}
+
+// Provision sets up the listener wrapper.
+func (pp *ListenerWrapper) Provision(ctx caddy.Context) error {
+ for _, cidr := range pp.Allow {
+ ipnet, err := netip.ParsePrefix(cidr)
+ if err != nil {
+ return err
+ }
+ pp.allow = append(pp.allow, ipnet)
+ }
+ for _, cidr := range pp.Deny {
+ ipnet, err := netip.ParsePrefix(cidr)
+ if err != nil {
+ return err
+ }
+ pp.deny = append(pp.deny, ipnet)
+ }
+
+ pp.policy = func(options goproxy.ConnPolicyOptions) (goproxy.Policy, error) {
+ // trust unix sockets
+ if network := options.Upstream.Network(); caddy.IsUnixNetwork(network) || caddy.IsFdNetwork(network) {
+ return goproxy.USE, nil
+ }
+ ret := pp.FallbackPolicy
+ host, _, err := net.SplitHostPort(options.Upstream.String())
+ if err != nil {
+ return goproxy.REJECT, err
+ }
+
+ ip, err := netip.ParseAddr(host)
+ if err != nil {
+ return goproxy.REJECT, err
+ }
+ for _, ipnet := range pp.deny {
+ if ipnet.Contains(ip) {
+ return goproxy.REJECT, nil
+ }
+ }
+ for _, ipnet := range pp.allow {
+ if ipnet.Contains(ip) {
+ ret = PolicyUSE
+ break
+ }
+ }
+ return policyToGoProxyPolicy[ret], nil
+ }
+ return nil
+}
+
+// WrapListener adds PROXY protocol support to the listener.
+func (pp *ListenerWrapper) WrapListener(l net.Listener) net.Listener {
+ pl := &goproxy.Listener{
+ Listener: l,
+ ReadHeaderTimeout: time.Duration(pp.Timeout),
+ }
+ pl.ConnPolicy = pp.policy
+ return pl
+}
diff --git a/modules/caddyhttp/proxyprotocol/module.go b/modules/caddyhttp/proxyprotocol/module.go
new file mode 100644
index 00000000..75a156a2
--- /dev/null
+++ b/modules/caddyhttp/proxyprotocol/module.go
@@ -0,0 +1,87 @@
+// Copyright 2015 Matthew Holt and The Caddy Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package proxyprotocol
+
+import (
+ "github.com/caddyserver/caddy/v2"
+ "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile"
+)
+
+func init() {
+ caddy.RegisterModule(ListenerWrapper{})
+}
+
+func (ListenerWrapper) CaddyModule() caddy.ModuleInfo {
+ return caddy.ModuleInfo{
+ ID: "caddy.listeners.proxy_protocol",
+ New: func() caddy.Module { return new(ListenerWrapper) },
+ }
+}
+
+// UnmarshalCaddyfile sets up the listener Listenerwrapper from Caddyfile tokens. Syntax:
+//
+// proxy_protocol {
+// timeout
+// allow
+// deny
+// fallback_policy
+// }
+func (w *ListenerWrapper) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
+ d.Next() // consume wrapper name
+
+ // No same-line options are supported
+ if d.NextArg() {
+ return d.ArgErr()
+ }
+
+ for d.NextBlock(0) {
+ switch d.Val() {
+ case "timeout":
+ if !d.NextArg() {
+ return d.ArgErr()
+ }
+ dur, err := caddy.ParseDuration(d.Val())
+ if err != nil {
+ return d.Errf("parsing proxy_protocol timeout duration: %v", err)
+ }
+ w.Timeout = caddy.Duration(dur)
+
+ case "allow":
+ w.Allow = append(w.Allow, d.RemainingArgs()...)
+ case "deny":
+ w.Deny = append(w.Deny, d.RemainingArgs()...)
+ case "fallback_policy":
+ if !d.NextArg() {
+ return d.ArgErr()
+ }
+ p, err := parsePolicy(d.Val())
+ if err != nil {
+ return d.WrapErr(err)
+ }
+ w.FallbackPolicy = p
+ default:
+ return d.ArgErr()
+ }
+ }
+ return nil
+}
+
+// Interface guards
+var (
+ _ caddy.Provisioner = (*ListenerWrapper)(nil)
+ _ caddy.Module = (*ListenerWrapper)(nil)
+ _ caddy.ListenerWrapper = (*ListenerWrapper)(nil)
+ _ caddyfile.Unmarshaler = (*ListenerWrapper)(nil)
+)
diff --git a/modules/caddyhttp/proxyprotocol/policy.go b/modules/caddyhttp/proxyprotocol/policy.go
new file mode 100644
index 00000000..6dc8beb4
--- /dev/null
+++ b/modules/caddyhttp/proxyprotocol/policy.go
@@ -0,0 +1,82 @@
+package proxyprotocol
+
+import (
+ "errors"
+ "fmt"
+ "strings"
+
+ goproxy "github.com/pires/go-proxyproto"
+)
+
+type Policy int
+
+// as defined in: https://pkg.go.dev/github.com/pires/go-proxyproto@v0.7.0#Policy
+const (
+ // IGNORE address from PROXY header, but accept connection
+ PolicyIGNORE Policy = iota
+ // USE address from PROXY header
+ PolicyUSE
+ // REJECT connection when PROXY header is sent
+ // Note: even though the first read on the connection returns an error if
+ // a PROXY header is present, subsequent reads do not. It is the task of
+ // the code using the connection to handle that case properly.
+ PolicyREJECT
+ // REQUIRE connection to send PROXY header, reject if not present
+ // Note: even though the first read on the connection returns an error if
+ // a PROXY header is not present, subsequent reads do not. It is the task
+ // of the code using the connection to handle that case properly.
+ PolicyREQUIRE
+ // SKIP accepts a connection without requiring the PROXY header
+ // Note: an example usage can be found in the SkipProxyHeaderForCIDR
+ // function.
+ PolicySKIP
+)
+
+var policyToGoProxyPolicy = map[Policy]goproxy.Policy{
+ PolicyUSE: goproxy.USE,
+ PolicyIGNORE: goproxy.IGNORE,
+ PolicyREJECT: goproxy.REJECT,
+ PolicyREQUIRE: goproxy.REQUIRE,
+ PolicySKIP: goproxy.SKIP,
+}
+
+var policyMap = map[Policy]string{
+ PolicyUSE: "USE",
+ PolicyIGNORE: "IGNORE",
+ PolicyREJECT: "REJECT",
+ PolicyREQUIRE: "REQUIRE",
+ PolicySKIP: "SKIP",
+}
+
+var policyMapRev = map[string]Policy{
+ "USE": PolicyUSE,
+ "IGNORE": PolicyIGNORE,
+ "REJECT": PolicyREJECT,
+ "REQUIRE": PolicyREQUIRE,
+ "SKIP": PolicySKIP,
+}
+
+// MarshalText implements the text marshaller method.
+func (x Policy) MarshalText() ([]byte, error) {
+ return []byte(policyMap[x]), nil
+}
+
+// UnmarshalText implements the text unmarshaller method.
+func (x *Policy) UnmarshalText(text []byte) error {
+ name := string(text)
+ tmp, err := parsePolicy(name)
+ if err != nil {
+ return err
+ }
+ *x = tmp
+ return nil
+}
+
+func parsePolicy(name string) (Policy, error) {
+ if x, ok := policyMapRev[strings.ToUpper(name)]; ok {
+ return x, nil
+ }
+ return Policy(0), fmt.Errorf("%s is %w", name, errInvalidPolicy)
+}
+
+var errInvalidPolicy = errors.New("invalid policy")
diff --git a/modules/caddyhttp/push/caddyfile.go b/modules/caddyhttp/push/caddyfile.go
new file mode 100644
index 00000000..f56db81f
--- /dev/null
+++ b/modules/caddyhttp/push/caddyfile.go
@@ -0,0 +1,106 @@
+// Copyright 2015 Matthew Holt and The Caddy Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package push
+
+import (
+ "github.com/caddyserver/caddy/v2/caddyconfig/httpcaddyfile"
+ "github.com/caddyserver/caddy/v2/modules/caddyhttp"
+ "github.com/caddyserver/caddy/v2/modules/caddyhttp/headers"
+)
+
+func init() {
+ httpcaddyfile.RegisterHandlerDirective("push", parseCaddyfile)
+}
+
+// parseCaddyfile sets up the push handler. Syntax:
+//
+// push [] [] {
+// [GET|HEAD]
+// headers {
+// [+] [ []]
+// -