2019-08-09 21:05:47 +03:00
|
|
|
// Copyright 2015 Matthew Holt and The Caddy Authors
|
|
|
|
//
|
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
|
|
|
|
|
|
|
package httpcaddyfile
|
|
|
|
|
|
|
|
import (
|
|
|
|
"encoding/json"
|
|
|
|
"fmt"
|
|
|
|
"reflect"
|
2019-08-21 19:46:35 +03:00
|
|
|
"sort"
|
2020-03-31 03:39:21 +03:00
|
|
|
"strconv"
|
2019-08-09 21:05:47 +03:00
|
|
|
"strings"
|
|
|
|
|
|
|
|
"github.com/caddyserver/caddy/v2"
|
|
|
|
"github.com/caddyserver/caddy/v2/caddyconfig"
|
|
|
|
"github.com/caddyserver/caddy/v2/caddyconfig/caddyfile"
|
|
|
|
"github.com/caddyserver/caddy/v2/modules/caddyhttp"
|
|
|
|
"github.com/caddyserver/caddy/v2/modules/caddytls"
|
|
|
|
)
|
|
|
|
|
|
|
|
func init() {
|
|
|
|
caddyconfig.RegisterAdapter("caddyfile", caddyfile.Adapter{ServerType: ServerType{}})
|
|
|
|
}
|
|
|
|
|
|
|
|
// ServerType can set up a config from an HTTP Caddyfile.
|
|
|
|
type ServerType struct {
|
|
|
|
}
|
|
|
|
|
|
|
|
// Setup makes a config from the tokens.
|
2020-04-28 17:32:04 +03:00
|
|
|
func (st ServerType) Setup(inputServerBlocks []caddyfile.ServerBlock,
|
2019-08-22 22:38:37 +03:00
|
|
|
options map[string]interface{}) (*caddy.Config, []caddyconfig.Warning, error) {
|
2019-08-09 21:05:47 +03:00
|
|
|
var warnings []caddyconfig.Warning
|
2020-01-17 03:08:52 +03:00
|
|
|
gc := counter{new(int)}
|
2020-03-04 19:58:49 +03:00
|
|
|
state := make(map[string]interface{})
|
2019-08-09 21:05:47 +03:00
|
|
|
|
2020-02-17 01:28:27 +03:00
|
|
|
// load all the server blocks and associate them with a "pile"
|
|
|
|
// of config values; also prohibit duplicate keys because they
|
|
|
|
// can make a config confusing if more than one server block is
|
|
|
|
// chosen to handle a request - we actually will make each
|
|
|
|
// server block's route terminal so that only one will run
|
|
|
|
sbKeys := make(map[string]struct{})
|
2020-04-28 17:32:04 +03:00
|
|
|
originalServerBlocks := make([]serverBlock, 0, len(inputServerBlocks))
|
|
|
|
for i, sblock := range inputServerBlocks {
|
2020-02-17 01:28:27 +03:00
|
|
|
for j, k := range sblock.Keys {
|
|
|
|
if _, ok := sbKeys[k]; ok {
|
|
|
|
return nil, warnings, fmt.Errorf("duplicate site address not allowed: '%s' in %v (site block %d, key %d)", k, sblock.Keys, i, j)
|
|
|
|
}
|
|
|
|
sbKeys[k] = struct{}{}
|
|
|
|
}
|
2020-04-28 17:32:04 +03:00
|
|
|
originalServerBlocks = append(originalServerBlocks, serverBlock{
|
2019-08-21 19:46:35 +03:00
|
|
|
block: sblock,
|
|
|
|
pile: make(map[string][]ConfigValue),
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2020-02-17 01:28:27 +03:00
|
|
|
// apply any global options
|
|
|
|
var err error
|
2020-04-28 17:32:04 +03:00
|
|
|
originalServerBlocks, err = st.evaluateGlobalOptionsBlock(originalServerBlocks, options)
|
2020-02-17 01:28:27 +03:00
|
|
|
if err != nil {
|
|
|
|
return nil, warnings, err
|
2019-08-22 22:38:37 +03:00
|
|
|
}
|
|
|
|
|
2020-04-28 17:32:04 +03:00
|
|
|
for _, sb := range originalServerBlocks {
|
2019-08-21 20:03:50 +03:00
|
|
|
// replace shorthand placeholders (which are
|
|
|
|
// convenient when writing a Caddyfile) with
|
|
|
|
// their actual placeholder identifiers or
|
|
|
|
// variable names
|
|
|
|
replacer := strings.NewReplacer(
|
2020-02-04 23:31:22 +03:00
|
|
|
"{dir}", "{http.request.uri.path.dir}",
|
|
|
|
"{file}", "{http.request.uri.path.file}",
|
2019-08-21 20:03:50 +03:00
|
|
|
"{host}", "{http.request.host}",
|
|
|
|
"{hostport}", "{http.request.hostport}",
|
|
|
|
"{method}", "{http.request.method}",
|
2020-02-04 23:31:22 +03:00
|
|
|
"{path}", "{http.request.uri.path}",
|
2020-01-11 03:02:11 +03:00
|
|
|
"{query}", "{http.request.uri.query}",
|
2020-03-14 04:14:49 +03:00
|
|
|
"{remote}", "{http.request.remote}",
|
2020-02-04 23:31:22 +03:00
|
|
|
"{remote_host}", "{http.request.remote.host}",
|
|
|
|
"{remote_port}", "{http.request.remote.port}",
|
|
|
|
"{scheme}", "{http.request.scheme}",
|
|
|
|
"{uri}", "{http.request.uri}",
|
2020-02-26 05:22:50 +03:00
|
|
|
"{tls_cipher}", "{http.request.tls.cipher_suite}",
|
|
|
|
"{tls_version}", "{http.request.tls.version}",
|
|
|
|
"{tls_client_fingerprint}", "{http.request.tls.client.fingerprint}",
|
|
|
|
"{tls_client_issuer}", "{http.request.tls.client.issuer}",
|
|
|
|
"{tls_client_serial}", "{http.request.tls.client.serial}",
|
|
|
|
"{tls_client_subject}", "{http.request.tls.client.subject}",
|
2019-08-21 20:03:50 +03:00
|
|
|
)
|
|
|
|
for _, segment := range sb.block.Segments {
|
|
|
|
for i := 0; i < len(segment); i++ {
|
|
|
|
segment[i].Text = replacer.Replace(segment[i].Text)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-08-22 22:38:37 +03:00
|
|
|
if len(sb.block.Keys) == 0 {
|
|
|
|
return nil, warnings, fmt.Errorf("server block without any key is global configuration, and if used, it must be first")
|
|
|
|
}
|
|
|
|
|
2019-08-21 19:46:35 +03:00
|
|
|
// extract matcher definitions
|
2020-01-10 00:00:32 +03:00
|
|
|
matcherDefs := make(map[string]caddy.ModuleMap)
|
|
|
|
for _, segment := range sb.block.Segments {
|
|
|
|
if dir := segment.Directive(); strings.HasPrefix(dir, matcherPrefix) {
|
|
|
|
d := sb.block.DispenseDirective(dir)
|
|
|
|
err := parseMatcherDefinitions(d, matcherDefs)
|
|
|
|
if err != nil {
|
|
|
|
return nil, warnings, err
|
|
|
|
}
|
|
|
|
}
|
2019-08-21 19:46:35 +03:00
|
|
|
}
|
|
|
|
|
2020-02-17 01:28:27 +03:00
|
|
|
// evaluate each directive ("segment") in this block
|
2019-08-21 19:46:35 +03:00
|
|
|
for _, segment := range sb.block.Segments {
|
|
|
|
dir := segment.Directive()
|
2020-01-10 00:00:32 +03:00
|
|
|
|
|
|
|
if strings.HasPrefix(dir, matcherPrefix) {
|
|
|
|
// matcher definitions were pre-processed
|
2019-08-21 19:46:35 +03:00
|
|
|
continue
|
|
|
|
}
|
2020-01-10 00:00:32 +03:00
|
|
|
|
|
|
|
dirFunc, ok := registeredDirectives[dir]
|
|
|
|
if !ok {
|
2019-08-21 20:03:50 +03:00
|
|
|
tkn := segment[0]
|
|
|
|
return nil, warnings, fmt.Errorf("%s:%d: unrecognized directive: %s", tkn.File, tkn.Line, dir)
|
2019-08-21 19:46:35 +03:00
|
|
|
}
|
2020-01-10 00:00:32 +03:00
|
|
|
|
2020-03-04 19:58:49 +03:00
|
|
|
h := Helper{
|
2020-01-16 21:29:20 +03:00
|
|
|
Dispenser: caddyfile.NewDispenser(segment),
|
|
|
|
options: options,
|
|
|
|
warnings: &warnings,
|
|
|
|
matcherDefs: matcherDefs,
|
|
|
|
parentBlock: sb.block,
|
2020-01-17 03:08:52 +03:00
|
|
|
groupCounter: gc,
|
2020-03-04 19:58:49 +03:00
|
|
|
State: state,
|
|
|
|
}
|
|
|
|
|
|
|
|
results, err := dirFunc(h)
|
2020-01-10 00:00:32 +03:00
|
|
|
if err != nil {
|
|
|
|
return nil, warnings, fmt.Errorf("parsing caddyfile tokens for '%s': %v", dir, err)
|
|
|
|
}
|
|
|
|
for _, result := range results {
|
|
|
|
result.directive = dir
|
|
|
|
sb.pile[result.Class] = append(sb.pile[result.Class], result)
|
|
|
|
}
|
2019-08-21 19:46:35 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-08-09 21:05:47 +03:00
|
|
|
// map
|
2020-04-28 17:32:04 +03:00
|
|
|
sbmap, err := st.mapAddressToServerBlocks(originalServerBlocks, options)
|
2019-08-09 21:05:47 +03:00
|
|
|
if err != nil {
|
|
|
|
return nil, warnings, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// reduce
|
|
|
|
pairings := st.consolidateAddrMappings(sbmap)
|
|
|
|
|
|
|
|
// each pairing of listener addresses to list of server
|
|
|
|
// blocks is basically a server definition
|
2020-01-17 03:08:52 +03:00
|
|
|
servers, err := st.serversFromPairings(pairings, options, &warnings, gc)
|
2019-08-09 21:05:47 +03:00
|
|
|
if err != nil {
|
|
|
|
return nil, warnings, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// now that each server is configured, make the HTTP app
|
|
|
|
httpApp := caddyhttp.App{
|
2020-03-14 04:14:49 +03:00
|
|
|
HTTPPort: tryInt(options["http_port"], &warnings),
|
|
|
|
HTTPSPort: tryInt(options["https_port"], &warnings),
|
|
|
|
Servers: servers,
|
2019-08-09 21:05:47 +03:00
|
|
|
}
|
|
|
|
|
2020-03-18 06:00:45 +03:00
|
|
|
// then make the TLS app
|
|
|
|
tlsApp, warnings, err := st.buildTLSApp(pairings, options, warnings)
|
|
|
|
if err != nil {
|
|
|
|
return nil, warnings, err
|
2019-09-30 18:11:30 +03:00
|
|
|
}
|
2019-08-09 21:05:47 +03:00
|
|
|
|
2019-09-12 02:16:21 +03:00
|
|
|
// if experimental HTTP/3 is enabled, enable it on each server
|
|
|
|
if enableH3, ok := options["experimental_http3"].(bool); ok && enableH3 {
|
|
|
|
for _, srv := range httpApp.Servers {
|
|
|
|
srv.ExperimentalHTTP3 = true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-02-26 08:00:33 +03:00
|
|
|
// extract any custom logs, and enforce configured levels
|
|
|
|
var customLogs []namedCustomLog
|
|
|
|
var hasDefaultLog bool
|
2020-04-28 17:32:04 +03:00
|
|
|
for _, p := range pairings {
|
|
|
|
for _, sb := range p.serverBlocks {
|
|
|
|
for _, clVal := range sb.pile["custom_log"] {
|
|
|
|
ncl := clVal.Value.(namedCustomLog)
|
|
|
|
if ncl.name == "" {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if ncl.name == "default" {
|
|
|
|
hasDefaultLog = true
|
|
|
|
}
|
|
|
|
if _, ok := options["debug"]; ok && ncl.log.Level == "" {
|
|
|
|
ncl.log.Level = "DEBUG"
|
|
|
|
}
|
|
|
|
customLogs = append(customLogs, ncl)
|
2020-02-26 08:00:33 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2020-04-28 17:32:04 +03:00
|
|
|
|
2020-02-26 08:00:33 +03:00
|
|
|
if !hasDefaultLog {
|
|
|
|
// if the default log was not customized, ensure we
|
|
|
|
// configure it with any applicable options
|
|
|
|
if _, ok := options["debug"]; ok {
|
|
|
|
customLogs = append(customLogs, namedCustomLog{
|
|
|
|
name: "default",
|
|
|
|
log: &caddy.CustomLog{Level: "DEBUG"},
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-08-09 21:05:47 +03:00
|
|
|
// annnd the top-level config, then we're done!
|
2019-12-10 23:36:46 +03:00
|
|
|
cfg := &caddy.Config{AppsRaw: make(caddy.ModuleMap)}
|
2020-03-18 06:00:45 +03:00
|
|
|
if len(httpApp.Servers) > 0 {
|
2019-08-09 21:05:47 +03:00
|
|
|
cfg.AppsRaw["http"] = caddyconfig.JSON(httpApp, &warnings)
|
|
|
|
}
|
2020-03-18 06:00:45 +03:00
|
|
|
if !reflect.DeepEqual(tlsApp, &caddytls.TLS{CertificatesRaw: make(caddy.ModuleMap)}) {
|
2019-08-09 21:05:47 +03:00
|
|
|
cfg.AppsRaw["tls"] = caddyconfig.JSON(tlsApp, &warnings)
|
|
|
|
}
|
2019-09-19 21:42:36 +03:00
|
|
|
if storageCvtr, ok := options["storage"].(caddy.StorageConverter); ok {
|
2019-09-27 03:06:15 +03:00
|
|
|
cfg.StorageRaw = caddyconfig.JSONModuleObject(storageCvtr,
|
|
|
|
"module",
|
2019-12-10 23:36:46 +03:00
|
|
|
storageCvtr.(caddy.Module).CaddyModule().ID.Name(),
|
2019-09-27 03:06:15 +03:00
|
|
|
&warnings)
|
2019-09-19 21:42:36 +03:00
|
|
|
}
|
2019-10-31 00:12:42 +03:00
|
|
|
if adminConfig, ok := options["admin"].(string); ok && adminConfig != "" {
|
2020-02-28 07:04:06 +03:00
|
|
|
if adminConfig == "off" {
|
|
|
|
cfg.Admin = &caddy.AdminConfig{Disabled: true}
|
|
|
|
} else {
|
|
|
|
cfg.Admin = &caddy.AdminConfig{Listen: adminConfig}
|
|
|
|
}
|
2019-10-31 00:12:42 +03:00
|
|
|
}
|
2020-02-26 08:00:33 +03:00
|
|
|
if len(customLogs) > 0 {
|
|
|
|
if cfg.Logging == nil {
|
|
|
|
cfg.Logging = &caddy.Logging{
|
|
|
|
Logs: make(map[string]*caddy.CustomLog),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
for _, ncl := range customLogs {
|
|
|
|
if ncl.name != "" {
|
|
|
|
cfg.Logging.Logs[ncl.name] = ncl.log
|
|
|
|
}
|
2020-04-28 17:32:04 +03:00
|
|
|
// most users seem to prefer not writing access logs
|
|
|
|
// to the default log when they are directed to a
|
|
|
|
// file or have any other special customization
|
|
|
|
if len(ncl.log.Include) > 0 {
|
|
|
|
defaultLog, ok := cfg.Logging.Logs["default"]
|
|
|
|
if !ok {
|
|
|
|
defaultLog = new(caddy.CustomLog)
|
|
|
|
cfg.Logging.Logs["default"] = defaultLog
|
|
|
|
}
|
|
|
|
defaultLog.Exclude = append(defaultLog.Exclude, ncl.log.Include...)
|
|
|
|
}
|
2020-02-26 08:00:33 +03:00
|
|
|
}
|
|
|
|
}
|
2019-08-09 21:05:47 +03:00
|
|
|
|
|
|
|
return cfg, warnings, nil
|
|
|
|
}
|
|
|
|
|
2020-02-17 01:28:27 +03:00
|
|
|
// evaluateGlobalOptionsBlock evaluates the global options block,
|
|
|
|
// which is expected to be the first server block if it has zero
|
|
|
|
// keys. It returns the updated list of server blocks with the
|
|
|
|
// global options block removed, and updates options accordingly.
|
|
|
|
func (ServerType) evaluateGlobalOptionsBlock(serverBlocks []serverBlock, options map[string]interface{}) ([]serverBlock, error) {
|
|
|
|
if len(serverBlocks) == 0 || len(serverBlocks[0].block.Keys) > 0 {
|
|
|
|
return serverBlocks, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, segment := range serverBlocks[0].block.Segments {
|
|
|
|
dir := segment.Directive()
|
|
|
|
var val interface{}
|
|
|
|
var err error
|
|
|
|
disp := caddyfile.NewDispenser(segment)
|
|
|
|
switch dir {
|
2020-03-19 00:51:31 +03:00
|
|
|
case "debug":
|
|
|
|
val = true
|
2020-02-17 01:28:27 +03:00
|
|
|
case "http_port":
|
|
|
|
val, err = parseOptHTTPPort(disp)
|
|
|
|
case "https_port":
|
|
|
|
val, err = parseOptHTTPSPort(disp)
|
2020-02-27 02:01:47 +03:00
|
|
|
case "default_sni":
|
|
|
|
val, err = parseOptSingleString(disp)
|
2020-02-17 01:28:27 +03:00
|
|
|
case "order":
|
|
|
|
val, err = parseOptOrder(disp)
|
|
|
|
case "experimental_http3":
|
|
|
|
val, err = parseOptExperimentalHTTP3(disp)
|
|
|
|
case "storage":
|
|
|
|
val, err = parseOptStorage(disp)
|
|
|
|
case "acme_ca", "acme_dns", "acme_ca_root":
|
2020-02-27 02:01:47 +03:00
|
|
|
val, err = parseOptSingleString(disp)
|
2020-02-17 01:28:27 +03:00
|
|
|
case "email":
|
2020-02-27 02:01:47 +03:00
|
|
|
val, err = parseOptSingleString(disp)
|
2020-02-17 01:28:27 +03:00
|
|
|
case "admin":
|
|
|
|
val, err = parseOptAdmin(disp)
|
2020-03-18 06:00:45 +03:00
|
|
|
case "on_demand_tls":
|
|
|
|
val, err = parseOptOnDemand(disp)
|
|
|
|
case "local_certs":
|
|
|
|
val = true
|
2020-04-08 20:09:38 +03:00
|
|
|
case "key_type":
|
|
|
|
val, err = parseOptSingleString(disp)
|
2020-02-17 01:28:27 +03:00
|
|
|
default:
|
|
|
|
return nil, fmt.Errorf("unrecognized parameter name: %s", dir)
|
|
|
|
}
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("%s: %v", dir, err)
|
|
|
|
}
|
|
|
|
options[dir] = val
|
|
|
|
}
|
|
|
|
|
|
|
|
return serverBlocks[1:], nil
|
|
|
|
}
|
|
|
|
|
2019-08-09 21:05:47 +03:00
|
|
|
// serversFromPairings creates the servers for each pairing of addresses
|
|
|
|
// to server blocks. Each pairing is essentially a server definition.
|
2019-08-22 23:26:33 +03:00
|
|
|
func (st *ServerType) serversFromPairings(
|
|
|
|
pairings []sbAddrAssociation,
|
|
|
|
options map[string]interface{},
|
|
|
|
warnings *[]caddyconfig.Warning,
|
2020-01-17 03:08:52 +03:00
|
|
|
groupCounter counter,
|
2019-08-22 23:26:33 +03:00
|
|
|
) (map[string]*caddyhttp.Server, error) {
|
2019-08-09 21:05:47 +03:00
|
|
|
servers := make(map[string]*caddyhttp.Server)
|
2020-03-13 20:32:53 +03:00
|
|
|
defaultSNI := tryString(options["default_sni"], warnings)
|
|
|
|
|
2020-04-09 21:39:05 +03:00
|
|
|
httpPort := strconv.Itoa(caddyhttp.DefaultHTTPPort)
|
|
|
|
if hp, ok := options["http_port"].(int); ok {
|
|
|
|
httpPort = strconv.Itoa(hp)
|
|
|
|
}
|
2020-04-25 03:58:28 +03:00
|
|
|
httpsPort := strconv.Itoa(caddyhttp.DefaultHTTPSPort)
|
|
|
|
if hsp, ok := options["https_port"].(int); ok {
|
|
|
|
httpsPort = strconv.Itoa(hsp)
|
|
|
|
}
|
2020-04-09 21:39:05 +03:00
|
|
|
|
2019-08-09 21:05:47 +03:00
|
|
|
for i, p := range pairings {
|
|
|
|
srv := &caddyhttp.Server{
|
|
|
|
Listen: p.addresses,
|
|
|
|
}
|
|
|
|
|
2020-01-15 23:51:12 +03:00
|
|
|
// sort server blocks by their keys; this is important because
|
|
|
|
// only the first matching site should be evaluated, and we should
|
|
|
|
// attempt to match most specific site first (host and path), in
|
|
|
|
// case their matchers overlap; we do this somewhat naively by
|
|
|
|
// descending sort by length of host then path
|
|
|
|
sort.SliceStable(p.serverBlocks, func(i, j int) bool {
|
2020-01-17 03:08:52 +03:00
|
|
|
// TODO: we could pre-process the specificities for efficiency,
|
2020-04-02 23:20:30 +03:00
|
|
|
// but I don't expect many blocks will have THAT many keys...
|
2020-01-15 23:51:12 +03:00
|
|
|
var iLongestPath, jLongestPath string
|
|
|
|
var iLongestHost, jLongestHost string
|
2020-04-02 23:20:30 +03:00
|
|
|
for _, addr := range p.serverBlocks[i].keys {
|
2020-01-17 03:08:52 +03:00
|
|
|
if specificity(addr.Host) > specificity(iLongestHost) {
|
2020-01-15 23:51:12 +03:00
|
|
|
iLongestHost = addr.Host
|
|
|
|
}
|
2020-01-17 03:08:52 +03:00
|
|
|
if specificity(addr.Path) > specificity(iLongestPath) {
|
2020-01-15 23:51:12 +03:00
|
|
|
iLongestPath = addr.Path
|
|
|
|
}
|
|
|
|
}
|
2020-04-02 23:20:30 +03:00
|
|
|
for _, addr := range p.serverBlocks[j].keys {
|
2020-01-17 03:08:52 +03:00
|
|
|
if specificity(addr.Host) > specificity(jLongestHost) {
|
2020-01-15 23:51:12 +03:00
|
|
|
jLongestHost = addr.Host
|
|
|
|
}
|
2020-01-17 03:08:52 +03:00
|
|
|
if specificity(addr.Path) > specificity(jLongestPath) {
|
2020-01-15 23:51:12 +03:00
|
|
|
jLongestPath = addr.Path
|
|
|
|
}
|
|
|
|
}
|
2020-01-17 03:08:52 +03:00
|
|
|
if specificity(iLongestHost) == specificity(jLongestHost) {
|
2020-01-15 23:51:12 +03:00
|
|
|
return len(iLongestPath) > len(jLongestPath)
|
|
|
|
}
|
2020-01-17 03:08:52 +03:00
|
|
|
return specificity(iLongestHost) > specificity(jLongestHost)
|
2020-01-15 23:51:12 +03:00
|
|
|
})
|
|
|
|
|
2020-04-25 03:58:28 +03:00
|
|
|
var hasCatchAllTLSConnPolicy, addressQualifiesForTLS bool
|
|
|
|
autoHTTPSWillAddConnPolicy := true
|
2020-02-20 10:15:11 +03:00
|
|
|
|
2020-01-15 23:51:12 +03:00
|
|
|
// create a subroute for each site in the server block
|
2019-08-09 21:05:47 +03:00
|
|
|
for _, sblock := range p.serverBlocks {
|
2020-04-02 23:20:30 +03:00
|
|
|
matcherSetsEnc, err := st.compileEncodedMatcherSets(sblock)
|
2019-08-09 21:05:47 +03:00
|
|
|
if err != nil {
|
2019-08-21 19:46:35 +03:00
|
|
|
return nil, fmt.Errorf("server block %v: compiling matcher sets: %v", sblock.block.Keys, err)
|
2019-08-09 21:05:47 +03:00
|
|
|
}
|
|
|
|
|
httpcaddyfile, caddytls: Multiple edge case fixes; add tests
- Create two default automation policies; if the TLS app is used in
isolation with the 'automate' certificate loader, it will now use
an internal issuer for internal-only names, and an ACME issuer for
all other names by default.
- If the HTTP Caddyfile adds an 'automate' loader, it now also adds an
automation policy for any names in that loader that do not qualify
for public certificates so that they will be issued internally. (It
might be nice if this wasn't necessary, but the alternative is to
either make auto-HTTPS logic way more complex by scanning the names in
the 'automate' loader, or to have an automation policy without an
issuer switch between default issuer based on the name being issued
a certificate - I think I like the latter option better, right now we
do something kind of like that but at a level above each individual
automation policies, we do that switch only when no automation
policies match, rather than when a policy without an issuer does
match.)
- Set the default LoggerName rather than a LoggerNames with an empty
host value, which is now taken literally rather than as a catch-all.
- hostsFromKeys, the function that gets a list of hosts from server
block keys, no longer returns an empty string in its resulting slice,
ever.
2020-04-08 23:46:44 +03:00
|
|
|
hosts := sblock.hostsFromKeys(false)
|
2020-03-07 09:15:25 +03:00
|
|
|
|
2020-03-18 06:00:45 +03:00
|
|
|
// tls: connection policies
|
|
|
|
if cpVals, ok := sblock.pile["tls.connection_policy"]; ok {
|
|
|
|
// tls connection policies
|
2019-08-21 19:46:35 +03:00
|
|
|
for _, cpVal := range cpVals {
|
|
|
|
cp := cpVal.Value.(*caddytls.ConnectionPolicy)
|
2019-08-09 21:05:47 +03:00
|
|
|
|
2019-12-14 02:32:27 +03:00
|
|
|
// make sure the policy covers all hostnames from the block
|
2020-03-07 09:15:25 +03:00
|
|
|
for _, h := range hosts {
|
|
|
|
if h == defaultSNI {
|
|
|
|
hosts = append(hosts, "")
|
|
|
|
cp.DefaultSNI = defaultSNI
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
2019-12-14 02:32:27 +03:00
|
|
|
|
|
|
|
if len(hosts) > 0 {
|
2019-12-10 23:36:46 +03:00
|
|
|
cp.MatchersRaw = caddy.ModuleMap{
|
2019-08-21 19:46:35 +03:00
|
|
|
"sni": caddyconfig.JSON(hosts, warnings), // make sure to match all hosts, not just auto-HTTPS-qualified ones
|
2019-08-09 21:05:47 +03:00
|
|
|
}
|
2020-02-06 23:00:41 +03:00
|
|
|
} else {
|
2020-03-17 21:39:01 +03:00
|
|
|
cp.DefaultSNI = defaultSNI
|
2020-02-20 10:15:11 +03:00
|
|
|
hasCatchAllTLSConnPolicy = true
|
2019-08-09 21:05:47 +03:00
|
|
|
}
|
2020-02-06 23:00:41 +03:00
|
|
|
|
2019-12-14 02:32:27 +03:00
|
|
|
srv.TLSConnPolicies = append(srv.TLSConnPolicies, cp)
|
2019-08-09 21:05:47 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-04-02 23:20:30 +03:00
|
|
|
for _, addr := range sblock.keys {
|
2020-04-25 03:58:28 +03:00
|
|
|
// exclude any hosts that were defined explicitly with "http://"
|
|
|
|
// in the key from automated cert management (issue #2998)
|
2020-03-18 06:00:45 +03:00
|
|
|
if addr.Scheme == "http" && addr.Host != "" {
|
2020-01-23 23:17:16 +03:00
|
|
|
if srv.AutoHTTPS == nil {
|
|
|
|
srv.AutoHTTPS = new(caddyhttp.AutoHTTPSConfig)
|
|
|
|
}
|
|
|
|
if !sliceContains(srv.AutoHTTPS.Skip, addr.Host) {
|
|
|
|
srv.AutoHTTPS.Skip = append(srv.AutoHTTPS.Skip, addr.Host)
|
|
|
|
}
|
|
|
|
}
|
2020-04-25 03:58:28 +03:00
|
|
|
// we'll need to remember if the address qualifies for auto-HTTPS, so we
|
|
|
|
// can add a TLS conn policy if necessary
|
|
|
|
if addr.Scheme == "https" ||
|
|
|
|
(addr.Scheme != "http" && addr.Host != "" && addr.Port != httpPort) {
|
|
|
|
addressQualifiesForTLS = true
|
2020-04-09 21:39:05 +03:00
|
|
|
}
|
2020-04-25 03:58:28 +03:00
|
|
|
// predict whether auto-HTTPS will add the conn policy for us; if so, we
|
|
|
|
// may not need to add one for this server
|
|
|
|
autoHTTPSWillAddConnPolicy = autoHTTPSWillAddConnPolicy &&
|
|
|
|
(addr.Port == httpsPort || (addr.Port != httpPort && addr.Host != ""))
|
2020-01-23 23:17:16 +03:00
|
|
|
}
|
|
|
|
|
2020-01-16 22:09:54 +03:00
|
|
|
// set up each handler directive, making sure to honor directive order
|
2019-08-21 19:46:35 +03:00
|
|
|
dirRoutes := sblock.pile["route"]
|
2020-01-17 03:08:52 +03:00
|
|
|
siteSubroute, err := buildSubroute(dirRoutes, groupCounter)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
2019-08-09 21:05:47 +03:00
|
|
|
}
|
|
|
|
|
2020-02-17 08:24:20 +03:00
|
|
|
// add the site block's route(s) to the server
|
|
|
|
srv.Routes = appendSubrouteToRouteList(srv.Routes, siteSubroute, matcherSetsEnc, p, warnings)
|
|
|
|
|
|
|
|
// if error routes are defined, add those too
|
|
|
|
if errorSubrouteVals, ok := sblock.pile["error_route"]; ok {
|
|
|
|
if srv.Errors == nil {
|
|
|
|
srv.Errors = new(caddyhttp.HTTPErrorConfig)
|
|
|
|
}
|
|
|
|
for _, val := range errorSubrouteVals {
|
|
|
|
sr := val.Value.(*caddyhttp.Subroute)
|
|
|
|
srv.Errors.Routes = appendSubrouteToRouteList(srv.Errors.Routes, sr, matcherSetsEnc, p, warnings)
|
|
|
|
}
|
2020-01-15 23:51:12 +03:00
|
|
|
}
|
2020-02-26 08:00:33 +03:00
|
|
|
|
|
|
|
// add log associations
|
2020-04-28 17:32:04 +03:00
|
|
|
// see https://github.com/caddyserver/caddy/issues/3310
|
|
|
|
sblockLogHosts := sblock.hostsFromKeys(true)
|
2020-02-26 08:00:33 +03:00
|
|
|
for _, cval := range sblock.pile["custom_log"] {
|
|
|
|
ncl := cval.Value.(namedCustomLog)
|
|
|
|
if srv.Logs == nil {
|
2020-04-28 17:32:04 +03:00
|
|
|
srv.Logs = new(caddyhttp.ServerLogConfig)
|
2020-02-26 08:00:33 +03:00
|
|
|
}
|
httpcaddyfile, caddytls: Multiple edge case fixes; add tests
- Create two default automation policies; if the TLS app is used in
isolation with the 'automate' certificate loader, it will now use
an internal issuer for internal-only names, and an ACME issuer for
all other names by default.
- If the HTTP Caddyfile adds an 'automate' loader, it now also adds an
automation policy for any names in that loader that do not qualify
for public certificates so that they will be issued internally. (It
might be nice if this wasn't necessary, but the alternative is to
either make auto-HTTPS logic way more complex by scanning the names in
the 'automate' loader, or to have an automation policy without an
issuer switch between default issuer based on the name being issued
a certificate - I think I like the latter option better, right now we
do something kind of like that but at a level above each individual
automation policies, we do that switch only when no automation
policies match, rather than when a policy without an issuer does
match.)
- Set the default LoggerName rather than a LoggerNames with an empty
host value, which is now taken literally rather than as a catch-all.
- hostsFromKeys, the function that gets a list of hosts from server
block keys, no longer returns an empty string in its resulting slice,
ever.
2020-04-08 23:46:44 +03:00
|
|
|
if sblock.hasHostCatchAllKey() {
|
2020-04-28 17:32:04 +03:00
|
|
|
// all requests for hosts not able to be listed should use
|
|
|
|
// this log because it's a catch-all-hosts server block
|
|
|
|
srv.Logs.DefaultLoggerName = ncl.name
|
httpcaddyfile, caddytls: Multiple edge case fixes; add tests
- Create two default automation policies; if the TLS app is used in
isolation with the 'automate' certificate loader, it will now use
an internal issuer for internal-only names, and an ACME issuer for
all other names by default.
- If the HTTP Caddyfile adds an 'automate' loader, it now also adds an
automation policy for any names in that loader that do not qualify
for public certificates so that they will be issued internally. (It
might be nice if this wasn't necessary, but the alternative is to
either make auto-HTTPS logic way more complex by scanning the names in
the 'automate' loader, or to have an automation policy without an
issuer switch between default issuer based on the name being issued
a certificate - I think I like the latter option better, right now we
do something kind of like that but at a level above each individual
automation policies, we do that switch only when no automation
policies match, rather than when a policy without an issuer does
match.)
- Set the default LoggerName rather than a LoggerNames with an empty
host value, which is now taken literally rather than as a catch-all.
- hostsFromKeys, the function that gets a list of hosts from server
block keys, no longer returns an empty string in its resulting slice,
ever.
2020-04-08 23:46:44 +03:00
|
|
|
} else {
|
2020-04-28 17:32:04 +03:00
|
|
|
// map each host to the user's desired logger name
|
|
|
|
for _, h := range sblockLogHosts {
|
|
|
|
// if the custom logger name is non-empty, add it to
|
|
|
|
// the map; otherwise, only map to an empty logger
|
|
|
|
// name if the server block has a catch-all host (in
|
|
|
|
// which case only requests with mapped hostnames will
|
|
|
|
// be access-logged, so it'll be necessary to add them
|
|
|
|
// to the map even if they use default logger)
|
|
|
|
if ncl.name != "" || len(hosts) == 0 {
|
|
|
|
if srv.Logs.LoggerNames == nil {
|
|
|
|
srv.Logs.LoggerNames = make(map[string]string)
|
|
|
|
}
|
httpcaddyfile, caddytls: Multiple edge case fixes; add tests
- Create two default automation policies; if the TLS app is used in
isolation with the 'automate' certificate loader, it will now use
an internal issuer for internal-only names, and an ACME issuer for
all other names by default.
- If the HTTP Caddyfile adds an 'automate' loader, it now also adds an
automation policy for any names in that loader that do not qualify
for public certificates so that they will be issued internally. (It
might be nice if this wasn't necessary, but the alternative is to
either make auto-HTTPS logic way more complex by scanning the names in
the 'automate' loader, or to have an automation policy without an
issuer switch between default issuer based on the name being issued
a certificate - I think I like the latter option better, right now we
do something kind of like that but at a level above each individual
automation policies, we do that switch only when no automation
policies match, rather than when a policy without an issuer does
match.)
- Set the default LoggerName rather than a LoggerNames with an empty
host value, which is now taken literally rather than as a catch-all.
- hostsFromKeys, the function that gets a list of hosts from server
block keys, no longer returns an empty string in its resulting slice,
ever.
2020-04-08 23:46:44 +03:00
|
|
|
srv.Logs.LoggerNames[h] = ncl.name
|
|
|
|
}
|
2020-04-04 05:19:46 +03:00
|
|
|
}
|
2020-02-26 08:00:33 +03:00
|
|
|
}
|
|
|
|
}
|
2020-04-28 17:32:04 +03:00
|
|
|
if srv.Logs != nil && len(sblock.pile["custom_log"]) == 0 {
|
|
|
|
// server has access logs enabled, but this server block does not
|
|
|
|
// enable access logs; therefore, all hosts of this server block
|
|
|
|
// should not be access-logged
|
|
|
|
if len(hosts) == 0 {
|
|
|
|
// if the server block has a catch-all-hosts key, then we should
|
|
|
|
// not log reqs to any host unless it appears in the map
|
|
|
|
srv.Logs.SkipUnmappedHosts = true
|
|
|
|
}
|
|
|
|
srv.Logs.SkipHosts = append(srv.Logs.SkipHosts, sblockLogHosts...)
|
|
|
|
}
|
2019-08-09 21:05:47 +03:00
|
|
|
}
|
|
|
|
|
2020-04-02 23:20:30 +03:00
|
|
|
// a server cannot (natively) serve both HTTP and HTTPS at the
|
|
|
|
// same time, so make sure the configuration isn't in conflict
|
|
|
|
err := detectConflictingSchemes(srv, p.serverBlocks, options)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2020-02-20 10:15:11 +03:00
|
|
|
// a catch-all TLS conn policy is necessary to ensure TLS can
|
|
|
|
// be offered to all hostnames of the server; even though only
|
|
|
|
// one policy is needed to enable TLS for the server, that
|
|
|
|
// policy might apply to only certain TLS handshakes; but when
|
|
|
|
// using the Caddyfile, user would expect all handshakes to at
|
|
|
|
// least have a matching connection policy, so here we append a
|
|
|
|
// catch-all/default policy if there isn't one already (it's
|
|
|
|
// important that it goes at the end) - see issue #3004:
|
|
|
|
// https://github.com/caddyserver/caddy/issues/3004
|
2020-03-18 06:00:45 +03:00
|
|
|
// TODO: maybe a smarter way to handle this might be to just make the
|
|
|
|
// auto-HTTPS logic at provision-time detect if there is any connection
|
|
|
|
// policy missing for any HTTPS-enabled hosts, if so, add it... maybe?
|
2020-04-25 03:58:28 +03:00
|
|
|
if addressQualifiesForTLS &&
|
2020-04-09 21:39:05 +03:00
|
|
|
!hasCatchAllTLSConnPolicy &&
|
2020-04-25 03:58:28 +03:00
|
|
|
(len(srv.TLSConnPolicies) > 0 || !autoHTTPSWillAddConnPolicy || defaultSNI != "") {
|
2020-03-13 20:32:53 +03:00
|
|
|
srv.TLSConnPolicies = append(srv.TLSConnPolicies, &caddytls.ConnectionPolicy{DefaultSNI: defaultSNI})
|
2020-02-20 10:15:11 +03:00
|
|
|
}
|
|
|
|
|
2020-03-18 06:00:45 +03:00
|
|
|
// tidy things up a bit
|
caddytls: Refactor certificate selection policies (close #1575)
Certificate selection used to be a module, but this seems unnecessary,
especially since the built-in CustomSelectionPolicy allows quite complex
selection logic on a number of fields in certs. If we need to extend
that logic, we can, but I don't think there are SO many possibilities
that we need modules.
This update also allows certificate selection to choose between multiple
matching certs based on client compatibility and makes a number of other
improvements in the default cert selection logic, both here and in the
latest CertMagic.
The hardest part of this was the conn policy consolidation logic
(Caddyfile only, of course). We have to merge connection policies that
we can easily combine, because if two certs are manually loaded in a
Caddyfile site block, that produces two connection policies, and each
cert is tagged with a different tag, meaning only the first would ever
be selected. So given the same matchers, we can merge the two, but this
required improving the Tag selection logic to support multiple tags to
choose from, hence "tags" changed to "any_tag" or "all_tags" (but we
use any_tag in our Caddyfile logic).
Combining conn policies with conflicting settings is impossible, so
that should return an error if two policies with the exact same matchers
have non-empty settings that are not the same (the one exception being
any_tag which we can merge because the logic for them is to OR them).
It was a bit complicated. It seems to work in numerous tests I've
conducted, but we'll see how it pans out in the release candidates.
2020-04-02 05:49:35 +03:00
|
|
|
srv.TLSConnPolicies, err = consolidateConnPolicies(srv.TLSConnPolicies)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("consolidating TLS connection policies for server %d: %v", i, err)
|
|
|
|
}
|
2019-08-09 21:05:47 +03:00
|
|
|
srv.Routes = consolidateRoutes(srv.Routes)
|
|
|
|
|
|
|
|
servers[fmt.Sprintf("srv%d", i)] = srv
|
|
|
|
}
|
|
|
|
|
|
|
|
return servers, nil
|
|
|
|
}
|
|
|
|
|
2020-04-02 23:20:30 +03:00
|
|
|
func detectConflictingSchemes(srv *caddyhttp.Server, serverBlocks []serverBlock, options map[string]interface{}) error {
|
|
|
|
httpPort := strconv.Itoa(caddyhttp.DefaultHTTPPort)
|
|
|
|
if hp, ok := options["http_port"].(int); ok {
|
|
|
|
httpPort = strconv.Itoa(hp)
|
|
|
|
}
|
|
|
|
httpsPort := strconv.Itoa(caddyhttp.DefaultHTTPSPort)
|
|
|
|
if hsp, ok := options["https_port"].(int); ok {
|
|
|
|
httpsPort = strconv.Itoa(hsp)
|
|
|
|
}
|
|
|
|
|
|
|
|
var httpOrHTTPS string
|
|
|
|
checkAndSetHTTP := func(addr Address) error {
|
|
|
|
if httpOrHTTPS == "HTTPS" {
|
|
|
|
errMsg := fmt.Errorf("server listening on %v is configured for HTTPS and cannot natively multiplex HTTP and HTTPS: %s",
|
|
|
|
srv.Listen, addr.Original)
|
|
|
|
if addr.Scheme == "" && addr.Host == "" {
|
|
|
|
errMsg = fmt.Errorf("%s (try specifying https:// in the address)", errMsg)
|
|
|
|
}
|
|
|
|
return errMsg
|
|
|
|
}
|
|
|
|
if len(srv.TLSConnPolicies) > 0 {
|
|
|
|
// any connection policies created for an HTTP server
|
|
|
|
// is a logical conflict, as it would enable HTTPS
|
|
|
|
return fmt.Errorf("server listening on %v is HTTP, but attempts to configure TLS connection policies", srv.Listen)
|
|
|
|
}
|
|
|
|
httpOrHTTPS = "HTTP"
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
checkAndSetHTTPS := func(addr Address) error {
|
|
|
|
if httpOrHTTPS == "HTTP" {
|
|
|
|
return fmt.Errorf("server listening on %v is configured for HTTP and cannot natively multiplex HTTP and HTTPS: %s",
|
|
|
|
srv.Listen, addr.Original)
|
|
|
|
}
|
|
|
|
httpOrHTTPS = "HTTPS"
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, sblock := range serverBlocks {
|
|
|
|
for _, addr := range sblock.keys {
|
|
|
|
if addr.Scheme == "http" || addr.Port == httpPort {
|
|
|
|
if err := checkAndSetHTTP(addr); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2020-04-25 03:58:28 +03:00
|
|
|
} else if addr.Scheme == "https" || addr.Port == httpsPort || len(srv.TLSConnPolicies) > 0 {
|
2020-04-02 23:20:30 +03:00
|
|
|
if err := checkAndSetHTTPS(addr); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
} else if addr.Host == "" {
|
|
|
|
if err := checkAndSetHTTP(addr); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-04-04 05:19:46 +03:00
|
|
|
// consolidateConnPolicies removes empty TLS connection policies and combines
|
|
|
|
// equivalent ones for a cleaner overall output.
|
caddytls: Refactor certificate selection policies (close #1575)
Certificate selection used to be a module, but this seems unnecessary,
especially since the built-in CustomSelectionPolicy allows quite complex
selection logic on a number of fields in certs. If we need to extend
that logic, we can, but I don't think there are SO many possibilities
that we need modules.
This update also allows certificate selection to choose between multiple
matching certs based on client compatibility and makes a number of other
improvements in the default cert selection logic, both here and in the
latest CertMagic.
The hardest part of this was the conn policy consolidation logic
(Caddyfile only, of course). We have to merge connection policies that
we can easily combine, because if two certs are manually loaded in a
Caddyfile site block, that produces two connection policies, and each
cert is tagged with a different tag, meaning only the first would ever
be selected. So given the same matchers, we can merge the two, but this
required improving the Tag selection logic to support multiple tags to
choose from, hence "tags" changed to "any_tag" or "all_tags" (but we
use any_tag in our Caddyfile logic).
Combining conn policies with conflicting settings is impossible, so
that should return an error if two policies with the exact same matchers
have non-empty settings that are not the same (the one exception being
any_tag which we can merge because the logic for them is to OR them).
It was a bit complicated. It seems to work in numerous tests I've
conducted, but we'll see how it pans out in the release candidates.
2020-04-02 05:49:35 +03:00
|
|
|
func consolidateConnPolicies(cps caddytls.ConnectionPolicies) (caddytls.ConnectionPolicies, error) {
|
2020-03-18 06:00:45 +03:00
|
|
|
for i := 0; i < len(cps); i++ {
|
2020-04-04 05:19:46 +03:00
|
|
|
// compare it to the others
|
2020-03-18 06:00:45 +03:00
|
|
|
for j := 0; j < len(cps); j++ {
|
|
|
|
if j == i {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// if they're exactly equal in every way, just keep one of them
|
|
|
|
if reflect.DeepEqual(cps[i], cps[j]) {
|
|
|
|
cps = append(cps[:j], cps[j+1:]...)
|
|
|
|
i--
|
|
|
|
break
|
|
|
|
}
|
caddytls: Refactor certificate selection policies (close #1575)
Certificate selection used to be a module, but this seems unnecessary,
especially since the built-in CustomSelectionPolicy allows quite complex
selection logic on a number of fields in certs. If we need to extend
that logic, we can, but I don't think there are SO many possibilities
that we need modules.
This update also allows certificate selection to choose between multiple
matching certs based on client compatibility and makes a number of other
improvements in the default cert selection logic, both here and in the
latest CertMagic.
The hardest part of this was the conn policy consolidation logic
(Caddyfile only, of course). We have to merge connection policies that
we can easily combine, because if two certs are manually loaded in a
Caddyfile site block, that produces two connection policies, and each
cert is tagged with a different tag, meaning only the first would ever
be selected. So given the same matchers, we can merge the two, but this
required improving the Tag selection logic to support multiple tags to
choose from, hence "tags" changed to "any_tag" or "all_tags" (but we
use any_tag in our Caddyfile logic).
Combining conn policies with conflicting settings is impossible, so
that should return an error if two policies with the exact same matchers
have non-empty settings that are not the same (the one exception being
any_tag which we can merge because the logic for them is to OR them).
It was a bit complicated. It seems to work in numerous tests I've
conducted, but we'll see how it pans out in the release candidates.
2020-04-02 05:49:35 +03:00
|
|
|
|
|
|
|
// if they have the same matcher, try to reconcile each field: either they must
|
|
|
|
// be identical, or we have to be able to combine them safely
|
|
|
|
if reflect.DeepEqual(cps[i].MatchersRaw, cps[j].MatchersRaw) {
|
|
|
|
if len(cps[i].ALPN) > 0 &&
|
|
|
|
len(cps[j].ALPN) > 0 &&
|
|
|
|
!reflect.DeepEqual(cps[i].ALPN, cps[j].ALPN) {
|
|
|
|
return nil, fmt.Errorf("two policies with same match criteria have conflicting ALPN: %v vs. %v",
|
|
|
|
cps[i].ALPN, cps[j].ALPN)
|
|
|
|
}
|
|
|
|
if len(cps[i].CipherSuites) > 0 &&
|
|
|
|
len(cps[j].CipherSuites) > 0 &&
|
|
|
|
!reflect.DeepEqual(cps[i].CipherSuites, cps[j].CipherSuites) {
|
|
|
|
return nil, fmt.Errorf("two policies with same match criteria have conflicting cipher suites: %v vs. %v",
|
|
|
|
cps[i].CipherSuites, cps[j].CipherSuites)
|
|
|
|
}
|
|
|
|
if cps[i].ClientAuthentication == nil &&
|
|
|
|
cps[j].ClientAuthentication != nil &&
|
|
|
|
!reflect.DeepEqual(cps[i].ClientAuthentication, cps[j].ClientAuthentication) {
|
|
|
|
return nil, fmt.Errorf("two policies with same match criteria have conflicting client auth configuration: %+v vs. %+v",
|
|
|
|
cps[i].ClientAuthentication, cps[j].ClientAuthentication)
|
|
|
|
}
|
|
|
|
if len(cps[i].Curves) > 0 &&
|
|
|
|
len(cps[j].Curves) > 0 &&
|
|
|
|
!reflect.DeepEqual(cps[i].Curves, cps[j].Curves) {
|
|
|
|
return nil, fmt.Errorf("two policies with same match criteria have conflicting curves: %v vs. %v",
|
|
|
|
cps[i].Curves, cps[j].Curves)
|
|
|
|
}
|
|
|
|
if cps[i].DefaultSNI != "" &&
|
|
|
|
cps[j].DefaultSNI != "" &&
|
|
|
|
cps[i].DefaultSNI != cps[j].DefaultSNI {
|
|
|
|
return nil, fmt.Errorf("two policies with same match criteria have conflicting default SNI: %s vs. %s",
|
|
|
|
cps[i].DefaultSNI, cps[j].DefaultSNI)
|
|
|
|
}
|
|
|
|
if cps[i].ProtocolMin != "" &&
|
|
|
|
cps[j].ProtocolMin != "" &&
|
|
|
|
cps[i].ProtocolMin != cps[j].ProtocolMin {
|
|
|
|
return nil, fmt.Errorf("two policies with same match criteria have conflicting min protocol: %s vs. %s",
|
|
|
|
cps[i].ProtocolMin, cps[j].ProtocolMin)
|
|
|
|
}
|
|
|
|
if cps[i].ProtocolMax != "" &&
|
|
|
|
cps[j].ProtocolMax != "" &&
|
|
|
|
cps[i].ProtocolMax != cps[j].ProtocolMax {
|
|
|
|
return nil, fmt.Errorf("two policies with same match criteria have conflicting max protocol: %s vs. %s",
|
|
|
|
cps[i].ProtocolMax, cps[j].ProtocolMax)
|
|
|
|
}
|
|
|
|
if cps[i].CertSelection != nil && cps[j].CertSelection != nil {
|
|
|
|
// merging fields other than AnyTag is not implemented
|
|
|
|
if !reflect.DeepEqual(cps[i].CertSelection.SerialNumber, cps[j].CertSelection.SerialNumber) ||
|
|
|
|
!reflect.DeepEqual(cps[i].CertSelection.SubjectOrganization, cps[j].CertSelection.SubjectOrganization) ||
|
|
|
|
cps[i].CertSelection.PublicKeyAlgorithm != cps[j].CertSelection.PublicKeyAlgorithm ||
|
|
|
|
!reflect.DeepEqual(cps[i].CertSelection.AllTags, cps[j].CertSelection.AllTags) {
|
|
|
|
return nil, fmt.Errorf("two policies with same match criteria have conflicting cert selections: %+v vs. %+v",
|
|
|
|
cps[i].CertSelection, cps[j].CertSelection)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// by now we've decided that we can merge the two -- we'll keep i and drop j
|
|
|
|
|
|
|
|
if len(cps[i].ALPN) == 0 && len(cps[j].ALPN) > 0 {
|
|
|
|
cps[i].ALPN = cps[j].ALPN
|
|
|
|
}
|
|
|
|
if len(cps[i].CipherSuites) == 0 && len(cps[j].CipherSuites) > 0 {
|
|
|
|
cps[i].CipherSuites = cps[j].CipherSuites
|
|
|
|
}
|
|
|
|
if cps[i].ClientAuthentication == nil && cps[j].ClientAuthentication != nil {
|
|
|
|
cps[i].ClientAuthentication = cps[j].ClientAuthentication
|
|
|
|
}
|
|
|
|
if len(cps[i].Curves) == 0 && len(cps[j].Curves) > 0 {
|
|
|
|
cps[i].Curves = cps[j].Curves
|
|
|
|
}
|
|
|
|
if cps[i].DefaultSNI == "" && cps[j].DefaultSNI != "" {
|
|
|
|
cps[i].DefaultSNI = cps[j].DefaultSNI
|
|
|
|
}
|
|
|
|
if cps[i].ProtocolMin == "" && cps[j].ProtocolMin != "" {
|
|
|
|
cps[i].ProtocolMin = cps[j].ProtocolMin
|
|
|
|
}
|
|
|
|
if cps[i].ProtocolMax == "" && cps[j].ProtocolMax != "" {
|
|
|
|
cps[i].ProtocolMax = cps[j].ProtocolMax
|
|
|
|
}
|
|
|
|
|
|
|
|
if cps[i].CertSelection == nil && cps[j].CertSelection != nil {
|
|
|
|
// if j is the only one with a policy, move it over to i
|
|
|
|
cps[i].CertSelection = cps[j].CertSelection
|
|
|
|
} else if cps[i].CertSelection != nil && cps[j].CertSelection != nil {
|
|
|
|
// if both have one, then combine AnyTag
|
|
|
|
for _, tag := range cps[j].CertSelection.AnyTag {
|
|
|
|
if !sliceContains(cps[i].CertSelection.AnyTag, tag) {
|
|
|
|
cps[i].CertSelection.AnyTag = append(cps[i].CertSelection.AnyTag, tag)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
cps = append(cps[:j], cps[j+1:]...)
|
|
|
|
i--
|
|
|
|
break
|
|
|
|
}
|
2020-03-18 06:00:45 +03:00
|
|
|
}
|
|
|
|
}
|
caddytls: Refactor certificate selection policies (close #1575)
Certificate selection used to be a module, but this seems unnecessary,
especially since the built-in CustomSelectionPolicy allows quite complex
selection logic on a number of fields in certs. If we need to extend
that logic, we can, but I don't think there are SO many possibilities
that we need modules.
This update also allows certificate selection to choose between multiple
matching certs based on client compatibility and makes a number of other
improvements in the default cert selection logic, both here and in the
latest CertMagic.
The hardest part of this was the conn policy consolidation logic
(Caddyfile only, of course). We have to merge connection policies that
we can easily combine, because if two certs are manually loaded in a
Caddyfile site block, that produces two connection policies, and each
cert is tagged with a different tag, meaning only the first would ever
be selected. So given the same matchers, we can merge the two, but this
required improving the Tag selection logic to support multiple tags to
choose from, hence "tags" changed to "any_tag" or "all_tags" (but we
use any_tag in our Caddyfile logic).
Combining conn policies with conflicting settings is impossible, so
that should return an error if two policies with the exact same matchers
have non-empty settings that are not the same (the one exception being
any_tag which we can merge because the logic for them is to OR them).
It was a bit complicated. It seems to work in numerous tests I've
conducted, but we'll see how it pans out in the release candidates.
2020-04-02 05:49:35 +03:00
|
|
|
return cps, nil
|
2020-03-18 06:00:45 +03:00
|
|
|
}
|
|
|
|
|
2020-02-17 08:24:20 +03:00
|
|
|
// appendSubrouteToRouteList appends the routes in subroute
|
|
|
|
// to the routeList, optionally qualified by matchers.
|
|
|
|
func appendSubrouteToRouteList(routeList caddyhttp.RouteList,
|
|
|
|
subroute *caddyhttp.Subroute,
|
|
|
|
matcherSetsEnc []caddy.ModuleMap,
|
|
|
|
p sbAddrAssociation,
|
|
|
|
warnings *[]caddyconfig.Warning) caddyhttp.RouteList {
|
2020-04-02 23:20:30 +03:00
|
|
|
|
|
|
|
// nothing to do if... there's nothing to do
|
|
|
|
if len(matcherSetsEnc) == 0 && len(subroute.Routes) == 0 && subroute.Errors == nil {
|
|
|
|
return routeList
|
|
|
|
}
|
|
|
|
|
2020-02-17 08:24:20 +03:00
|
|
|
if len(matcherSetsEnc) == 0 && len(p.serverBlocks) == 1 {
|
|
|
|
// no need to wrap the handlers in a subroute if this is
|
|
|
|
// the only server block and there is no matcher for it
|
|
|
|
routeList = append(routeList, subroute.Routes...)
|
|
|
|
} else {
|
2020-04-02 23:20:30 +03:00
|
|
|
route := caddyhttp.Route{
|
|
|
|
// the semantics of a site block in the Caddyfile dictate
|
|
|
|
// that only the first matching one is evaluated, since
|
|
|
|
// site blocks do not cascade nor inherit
|
|
|
|
Terminal: true,
|
|
|
|
}
|
|
|
|
if len(matcherSetsEnc) > 0 {
|
|
|
|
route.MatcherSetsRaw = matcherSetsEnc
|
|
|
|
}
|
|
|
|
if len(subroute.Routes) > 0 || subroute.Errors != nil {
|
|
|
|
route.HandlersRaw = []json.RawMessage{
|
2020-02-17 08:24:20 +03:00
|
|
|
caddyconfig.JSONModuleObject(subroute, "handler", "subroute", warnings),
|
2020-04-02 23:20:30 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
if len(route.MatcherSetsRaw) > 0 || len(route.HandlersRaw) > 0 {
|
|
|
|
routeList = append(routeList, route)
|
|
|
|
}
|
2020-02-17 08:24:20 +03:00
|
|
|
}
|
|
|
|
return routeList
|
|
|
|
}
|
|
|
|
|
|
|
|
// buildSubroute turns the config values, which are expected to be routes
|
|
|
|
// into a clean and orderly subroute that has all the routes within it.
|
2020-01-17 03:08:52 +03:00
|
|
|
func buildSubroute(routes []ConfigValue, groupCounter counter) (*caddyhttp.Subroute, error) {
|
|
|
|
for _, val := range routes {
|
|
|
|
if !directiveIsOrdered(val.directive) {
|
|
|
|
return nil, fmt.Errorf("directive '%s' is not ordered, so it cannot be used here", val.directive)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
sortRoutes(routes)
|
|
|
|
|
|
|
|
subroute := new(caddyhttp.Subroute)
|
|
|
|
|
2020-02-04 23:04:34 +03:00
|
|
|
// some directives are mutually exclusive (only first matching
|
|
|
|
// instance should be evaluated); this is done by putting their
|
|
|
|
// routes in the same group
|
|
|
|
mutuallyExclusiveDirs := map[string]*struct {
|
|
|
|
count int
|
|
|
|
groupName string
|
|
|
|
}{
|
2020-01-17 03:08:52 +03:00
|
|
|
// as a special case, group rewrite directives so that they are mutually exclusive;
|
|
|
|
// this means that only the first matching rewrite will be evaluated, and that's
|
|
|
|
// probably a good thing, since there should never be a need to do more than one
|
|
|
|
// rewrite (I think?), and cascading rewrites smell bad... imagine these rewrites:
|
|
|
|
// rewrite /docs/json/* /docs/json/index.html
|
|
|
|
// rewrite /docs/* /docs/index.html
|
|
|
|
// (We use this on the Caddy website, or at least we did once.) The first rewrite's
|
|
|
|
// result is also matched by the second rewrite, making the first rewrite pointless.
|
|
|
|
// See issue #2959.
|
2020-02-04 23:04:34 +03:00
|
|
|
"rewrite": {},
|
2020-01-17 03:08:52 +03:00
|
|
|
|
|
|
|
// handle blocks are also mutually exclusive by definition
|
2020-02-04 23:04:34 +03:00
|
|
|
"handle": {},
|
|
|
|
|
|
|
|
// root just sets a variable, so if it was not mutually exclusive, intersecting
|
|
|
|
// root directives would overwrite previously-matched ones; they should not cascade
|
|
|
|
"root": {},
|
|
|
|
}
|
|
|
|
for meDir, info := range mutuallyExclusiveDirs {
|
|
|
|
// see how many instances of the directive there are
|
|
|
|
for _, r := range routes {
|
|
|
|
if r.directive == meDir {
|
|
|
|
info.count++
|
|
|
|
if info.count > 1 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// if there is more than one, put them in a group
|
2020-03-18 21:18:10 +03:00
|
|
|
// (special case: "rewrite" directive must always be in
|
|
|
|
// its own group--even if there is only one--because we
|
|
|
|
// do not want a rewrite to be consolidated into other
|
|
|
|
// adjacent routes that happen to have the same matcher,
|
|
|
|
// see caddyserver/caddy#3108 - because the implied
|
|
|
|
// intent of rewrite is to do an internal redirect,
|
|
|
|
// we can't assume that the request will continue to
|
|
|
|
// match the same matcher; anyway, giving a route a
|
|
|
|
// unique group name should keep it from consolidating)
|
|
|
|
if info.count > 1 || meDir == "rewrite" {
|
2020-02-04 23:04:34 +03:00
|
|
|
info.groupName = groupCounter.nextGroup()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// add all the routes piled in from directives
|
|
|
|
for _, r := range routes {
|
|
|
|
// put this route into a group if it is mutually exclusive
|
|
|
|
if info, ok := mutuallyExclusiveDirs[r.directive]; ok {
|
2020-01-17 03:08:52 +03:00
|
|
|
route := r.Value.(caddyhttp.Route)
|
2020-02-04 23:04:34 +03:00
|
|
|
route.Group = info.groupName
|
2020-01-17 03:08:52 +03:00
|
|
|
r.Value = route
|
|
|
|
}
|
|
|
|
|
|
|
|
switch route := r.Value.(type) {
|
|
|
|
case caddyhttp.Subroute:
|
|
|
|
// if a route-class config value is actually a Subroute handler
|
|
|
|
// with nothing but a list of routes, then it is the intention
|
|
|
|
// of the directive to keep these handlers together and in this
|
|
|
|
// same order, but not necessarily in a subroute (if it wanted
|
|
|
|
// to keep them in a subroute, the directive would have returned
|
|
|
|
// a route with a Subroute as its handler); this is useful to
|
|
|
|
// keep multiple handlers/routes together and in the same order
|
|
|
|
// so that the sorting procedure we did above doesn't reorder them
|
|
|
|
if route.Errors != nil {
|
|
|
|
// if error handlers are also set, this is confusing; it's
|
|
|
|
// probably supposed to be wrapped in a Route and encoded
|
|
|
|
// as a regular handler route... programmer error.
|
|
|
|
panic("found subroute with more than just routes; perhaps it should have been wrapped in a route?")
|
|
|
|
}
|
|
|
|
subroute.Routes = append(subroute.Routes, route.Routes...)
|
|
|
|
case caddyhttp.Route:
|
|
|
|
subroute.Routes = append(subroute.Routes, route)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
subroute.Routes = consolidateRoutes(subroute.Routes)
|
|
|
|
|
|
|
|
return subroute, nil
|
|
|
|
}
|
|
|
|
|
2019-08-09 21:05:47 +03:00
|
|
|
// consolidateRoutes combines routes with the same properties
|
|
|
|
// (same matchers, same Terminal and Group settings) for a
|
|
|
|
// cleaner overall output.
|
|
|
|
func consolidateRoutes(routes caddyhttp.RouteList) caddyhttp.RouteList {
|
|
|
|
for i := 0; i < len(routes)-1; i++ {
|
2019-08-21 19:46:35 +03:00
|
|
|
if reflect.DeepEqual(routes[i].MatcherSetsRaw, routes[i+1].MatcherSetsRaw) &&
|
2019-08-09 21:05:47 +03:00
|
|
|
routes[i].Terminal == routes[i+1].Terminal &&
|
|
|
|
routes[i].Group == routes[i+1].Group {
|
|
|
|
// keep the handlers in the same order, then splice out repetitive route
|
2019-08-21 19:46:35 +03:00
|
|
|
routes[i].HandlersRaw = append(routes[i].HandlersRaw, routes[i+1].HandlersRaw...)
|
2019-08-09 21:05:47 +03:00
|
|
|
routes = append(routes[:i+1], routes[i+2:]...)
|
|
|
|
i--
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return routes
|
|
|
|
}
|
|
|
|
|
2019-08-21 19:46:35 +03:00
|
|
|
func matcherSetFromMatcherToken(
|
2019-08-09 21:05:47 +03:00
|
|
|
tkn caddyfile.Token,
|
2019-12-10 23:36:46 +03:00
|
|
|
matcherDefs map[string]caddy.ModuleMap,
|
2019-08-09 21:05:47 +03:00
|
|
|
warnings *[]caddyconfig.Warning,
|
2019-12-10 23:36:46 +03:00
|
|
|
) (caddy.ModuleMap, bool, error) {
|
2019-08-09 21:05:47 +03:00
|
|
|
// matcher tokens can be wildcards, simple path matchers,
|
|
|
|
// or refer to a pre-defined matcher by some name
|
|
|
|
if tkn.Text == "*" {
|
|
|
|
// match all requests == no matchers, so nothing to do
|
|
|
|
return nil, true, nil
|
2020-01-10 00:00:32 +03:00
|
|
|
} else if strings.HasPrefix(tkn.Text, "/") {
|
2019-08-09 21:05:47 +03:00
|
|
|
// convenient way to specify a single path match
|
2019-12-10 23:36:46 +03:00
|
|
|
return caddy.ModuleMap{
|
2019-08-09 21:05:47 +03:00
|
|
|
"path": caddyconfig.JSON(caddyhttp.MatchPath{tkn.Text}, warnings),
|
|
|
|
}, true, nil
|
2020-01-10 00:00:32 +03:00
|
|
|
} else if strings.HasPrefix(tkn.Text, matcherPrefix) {
|
2019-08-09 21:05:47 +03:00
|
|
|
// pre-defined matcher
|
2020-01-10 00:00:32 +03:00
|
|
|
m, ok := matcherDefs[tkn.Text]
|
2019-08-09 21:05:47 +03:00
|
|
|
if !ok {
|
2020-01-10 00:00:32 +03:00
|
|
|
return nil, false, fmt.Errorf("unrecognized matcher name: %+v", tkn.Text)
|
2019-08-09 21:05:47 +03:00
|
|
|
}
|
|
|
|
return m, true, nil
|
|
|
|
}
|
|
|
|
return nil, false, nil
|
|
|
|
}
|
|
|
|
|
2020-04-02 23:20:30 +03:00
|
|
|
func (st *ServerType) compileEncodedMatcherSets(sblock serverBlock) ([]caddy.ModuleMap, error) {
|
2019-08-09 21:05:47 +03:00
|
|
|
type hostPathPair struct {
|
|
|
|
hostm caddyhttp.MatchHost
|
|
|
|
pathm caddyhttp.MatchPath
|
|
|
|
}
|
|
|
|
|
|
|
|
// keep routes with common host and path matchers together
|
|
|
|
var matcherPairs []*hostPathPair
|
|
|
|
|
2020-03-18 06:00:45 +03:00
|
|
|
var catchAllHosts bool
|
2020-04-02 23:20:30 +03:00
|
|
|
for _, addr := range sblock.keys {
|
2019-08-09 21:05:47 +03:00
|
|
|
// choose a matcher pair that should be shared by this
|
|
|
|
// server block; if none exists yet, create one
|
|
|
|
var chosenMatcherPair *hostPathPair
|
|
|
|
for _, mp := range matcherPairs {
|
|
|
|
if (len(mp.pathm) == 0 && addr.Path == "") ||
|
|
|
|
(len(mp.pathm) == 1 && mp.pathm[0] == addr.Path) {
|
|
|
|
chosenMatcherPair = mp
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if chosenMatcherPair == nil {
|
|
|
|
chosenMatcherPair = new(hostPathPair)
|
|
|
|
if addr.Path != "" {
|
|
|
|
chosenMatcherPair.pathm = []string{addr.Path}
|
|
|
|
}
|
|
|
|
matcherPairs = append(matcherPairs, chosenMatcherPair)
|
|
|
|
}
|
|
|
|
|
2020-03-18 06:00:45 +03:00
|
|
|
// if one of the keys has no host (i.e. is a catch-all for
|
|
|
|
// any hostname), then we need to null out the host matcher
|
|
|
|
// entirely so that it matches all hosts
|
|
|
|
if addr.Host == "" && !catchAllHosts {
|
|
|
|
chosenMatcherPair.hostm = nil
|
|
|
|
catchAllHosts = true
|
|
|
|
}
|
|
|
|
if catchAllHosts {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2019-08-09 21:05:47 +03:00
|
|
|
// add this server block's keys to the matcher
|
|
|
|
// pair if it doesn't already exist
|
|
|
|
if addr.Host != "" {
|
|
|
|
var found bool
|
|
|
|
for _, h := range chosenMatcherPair.hostm {
|
|
|
|
if h == addr.Host {
|
|
|
|
found = true
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if !found {
|
|
|
|
chosenMatcherPair.hostm = append(chosenMatcherPair.hostm, addr.Host)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// iterate each pairing of host and path matchers and
|
|
|
|
// put them into a map for JSON encoding
|
|
|
|
var matcherSets []map[string]caddyhttp.RequestMatcher
|
|
|
|
for _, mp := range matcherPairs {
|
|
|
|
matcherSet := make(map[string]caddyhttp.RequestMatcher)
|
|
|
|
if len(mp.hostm) > 0 {
|
|
|
|
matcherSet["host"] = mp.hostm
|
|
|
|
}
|
|
|
|
if len(mp.pathm) > 0 {
|
|
|
|
matcherSet["path"] = mp.pathm
|
|
|
|
}
|
|
|
|
if len(matcherSet) > 0 {
|
|
|
|
matcherSets = append(matcherSets, matcherSet)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// finally, encode each of the matcher sets
|
2020-04-09 00:31:51 +03:00
|
|
|
matcherSetsEnc := make([]caddy.ModuleMap, 0, len(matcherSets))
|
2019-08-09 21:05:47 +03:00
|
|
|
for _, ms := range matcherSets {
|
|
|
|
msEncoded, err := encodeMatcherSet(ms)
|
|
|
|
if err != nil {
|
2020-04-02 23:20:30 +03:00
|
|
|
return nil, fmt.Errorf("server block %v: %v", sblock.block.Keys, err)
|
2019-08-09 21:05:47 +03:00
|
|
|
}
|
|
|
|
matcherSetsEnc = append(matcherSetsEnc, msEncoded)
|
|
|
|
}
|
|
|
|
|
|
|
|
return matcherSetsEnc, nil
|
|
|
|
}
|
|
|
|
|
2020-01-10 00:00:32 +03:00
|
|
|
func parseMatcherDefinitions(d *caddyfile.Dispenser, matchers map[string]caddy.ModuleMap) error {
|
2019-09-30 18:11:30 +03:00
|
|
|
for d.Next() {
|
|
|
|
definitionName := d.Val()
|
2020-01-10 00:00:32 +03:00
|
|
|
|
|
|
|
if _, ok := matchers[definitionName]; ok {
|
|
|
|
return fmt.Errorf("matcher is defined more than once: %s", definitionName)
|
|
|
|
}
|
|
|
|
matchers[definitionName] = make(caddy.ModuleMap)
|
|
|
|
|
2020-02-14 21:00:16 +03:00
|
|
|
// in case there are multiple instances of the same matcher, concatenate
|
|
|
|
// their tokens (we expect that UnmarshalCaddyfile should be able to
|
|
|
|
// handle more than one segment); otherwise, we'd overwrite other
|
|
|
|
// instances of the matcher in this set
|
|
|
|
tokensByMatcherName := make(map[string][]caddyfile.Token)
|
2020-05-05 21:29:21 +03:00
|
|
|
for nesting := d.Nesting(); d.NextArg() || d.NextBlock(nesting); {
|
2019-09-30 18:11:30 +03:00
|
|
|
matcherName := d.Val()
|
2020-02-14 21:00:16 +03:00
|
|
|
tokensByMatcherName[matcherName] = append(tokensByMatcherName[matcherName], d.NextSegment()...)
|
|
|
|
}
|
|
|
|
for matcherName, tokens := range tokensByMatcherName {
|
2019-09-30 18:11:30 +03:00
|
|
|
mod, err := caddy.GetModule("http.matchers." + matcherName)
|
|
|
|
if err != nil {
|
2020-01-10 00:00:32 +03:00
|
|
|
return fmt.Errorf("getting matcher module '%s': %v", matcherName, err)
|
2019-09-30 18:11:30 +03:00
|
|
|
}
|
|
|
|
unm, ok := mod.New().(caddyfile.Unmarshaler)
|
|
|
|
if !ok {
|
2020-01-10 00:00:32 +03:00
|
|
|
return fmt.Errorf("matcher module '%s' is not a Caddyfile unmarshaler", matcherName)
|
2019-09-30 18:11:30 +03:00
|
|
|
}
|
2020-02-14 21:00:16 +03:00
|
|
|
err = unm.UnmarshalCaddyfile(caddyfile.NewDispenser(tokens))
|
2019-09-30 18:11:30 +03:00
|
|
|
if err != nil {
|
2020-01-10 00:00:32 +03:00
|
|
|
return err
|
2019-09-30 18:11:30 +03:00
|
|
|
}
|
|
|
|
rm, ok := unm.(caddyhttp.RequestMatcher)
|
|
|
|
if !ok {
|
2020-01-10 00:00:32 +03:00
|
|
|
return fmt.Errorf("matcher module '%s' is not a request matcher", matcherName)
|
2019-09-30 18:11:30 +03:00
|
|
|
}
|
|
|
|
matchers[definitionName][matcherName] = caddyconfig.JSON(rm, nil)
|
|
|
|
}
|
|
|
|
}
|
2020-01-10 00:00:32 +03:00
|
|
|
return nil
|
2019-09-30 18:11:30 +03:00
|
|
|
}
|
|
|
|
|
2019-12-10 23:36:46 +03:00
|
|
|
func encodeMatcherSet(matchers map[string]caddyhttp.RequestMatcher) (caddy.ModuleMap, error) {
|
|
|
|
msEncoded := make(caddy.ModuleMap)
|
2019-08-09 21:05:47 +03:00
|
|
|
for matcherName, val := range matchers {
|
|
|
|
jsonBytes, err := json.Marshal(val)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("marshaling matcher set %#v: %v", matchers, err)
|
|
|
|
}
|
|
|
|
msEncoded[matcherName] = jsonBytes
|
|
|
|
}
|
|
|
|
return msEncoded, nil
|
|
|
|
}
|
|
|
|
|
2019-08-22 22:38:37 +03:00
|
|
|
// tryInt tries to convert val to an integer. If it fails,
|
|
|
|
// it downgrades the error to a warning and returns 0.
|
|
|
|
func tryInt(val interface{}, warnings *[]caddyconfig.Warning) int {
|
|
|
|
intVal, ok := val.(int)
|
|
|
|
if val != nil && !ok && warnings != nil {
|
|
|
|
*warnings = append(*warnings, caddyconfig.Warning{Message: "not an integer type"})
|
2019-08-09 21:05:47 +03:00
|
|
|
}
|
2019-08-22 22:38:37 +03:00
|
|
|
return intVal
|
2019-08-09 21:05:47 +03:00
|
|
|
}
|
|
|
|
|
2020-02-27 02:01:47 +03:00
|
|
|
func tryString(val interface{}, warnings *[]caddyconfig.Warning) string {
|
|
|
|
stringVal, ok := val.(string)
|
|
|
|
if val != nil && !ok && warnings != nil {
|
|
|
|
*warnings = append(*warnings, caddyconfig.Warning{Message: "not a string type"})
|
|
|
|
}
|
|
|
|
return stringVal
|
|
|
|
}
|
|
|
|
|
2020-01-23 23:17:16 +03:00
|
|
|
// sliceContains returns true if needle is in haystack.
|
|
|
|
func sliceContains(haystack []string, needle string) bool {
|
|
|
|
for _, s := range haystack {
|
|
|
|
if s == needle {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2020-02-28 05:30:48 +03:00
|
|
|
// specificity returns len(s) minus any wildcards (*) and
|
2020-01-17 03:08:52 +03:00
|
|
|
// placeholders ({...}). Basically, it's a length count
|
|
|
|
// that penalizes the use of wildcards and placeholders.
|
|
|
|
// This is useful for comparing hostnames and paths.
|
|
|
|
// However, wildcards in paths are not a sure answer to
|
2020-02-28 05:30:48 +03:00
|
|
|
// the question of specificity. For example,
|
2020-01-17 03:08:52 +03:00
|
|
|
// '*.example.com' is clearly less specific than
|
|
|
|
// 'a.example.com', but is '/a' more or less specific
|
|
|
|
// than '/a*'?
|
|
|
|
func specificity(s string) int {
|
|
|
|
l := len(s) - strings.Count(s, "*")
|
|
|
|
for len(s) > 0 {
|
|
|
|
start := strings.Index(s, "{")
|
|
|
|
if start < 0 {
|
|
|
|
return l
|
|
|
|
}
|
|
|
|
end := strings.Index(s[start:], "}") + start + 1
|
|
|
|
if end <= start {
|
|
|
|
return l
|
|
|
|
}
|
|
|
|
l -= end - start
|
|
|
|
s = s[end:]
|
|
|
|
}
|
|
|
|
return l
|
|
|
|
}
|
|
|
|
|
|
|
|
type counter struct {
|
|
|
|
n *int
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c counter) nextGroup() string {
|
|
|
|
name := fmt.Sprintf("group%d", *c.n)
|
|
|
|
*c.n++
|
|
|
|
return name
|
2020-01-15 23:51:12 +03:00
|
|
|
}
|
|
|
|
|
2020-02-26 08:00:33 +03:00
|
|
|
type namedCustomLog struct {
|
|
|
|
name string
|
|
|
|
log *caddy.CustomLog
|
|
|
|
}
|
|
|
|
|
2020-02-28 05:30:48 +03:00
|
|
|
// sbAddrAssociation is a mapping from a list of
|
2019-08-09 21:05:47 +03:00
|
|
|
// addresses to a list of server blocks that are
|
|
|
|
// served on those addresses.
|
|
|
|
type sbAddrAssociation struct {
|
|
|
|
addresses []string
|
2019-08-21 19:46:35 +03:00
|
|
|
serverBlocks []serverBlock
|
2019-08-09 21:05:47 +03:00
|
|
|
}
|
|
|
|
|
2020-01-10 00:00:32 +03:00
|
|
|
const matcherPrefix = "@"
|
|
|
|
|
2019-08-09 21:05:47 +03:00
|
|
|
// Interface guard
|
|
|
|
var _ caddyfile.ServerType = (*ServerType)(nil)
|