mirror of
https://github.com/caddyserver/caddy.git
synced 2024-12-27 22:23:48 +03:00
reverseproxy: Implement retry count, alternative to try_duration (#4756)
* reverseproxy: Implement retry count, alternative to try_duration * Add Caddyfile support for `retry_match` * Refactor to deduplicate matcher parsing logic * Fix lint
This commit is contained in:
parent
04a14ee37a
commit
7d1f7771c9
4 changed files with 189 additions and 69 deletions
|
@ -0,0 +1,64 @@
|
||||||
|
:8884
|
||||||
|
|
||||||
|
reverse_proxy 127.0.0.1:65535 {
|
||||||
|
lb_policy first
|
||||||
|
lb_retries 5
|
||||||
|
lb_try_duration 10s
|
||||||
|
lb_try_interval 500ms
|
||||||
|
lb_retry_match {
|
||||||
|
path /foo*
|
||||||
|
method POST
|
||||||
|
}
|
||||||
|
lb_retry_match path /bar*
|
||||||
|
}
|
||||||
|
----------
|
||||||
|
{
|
||||||
|
"apps": {
|
||||||
|
"http": {
|
||||||
|
"servers": {
|
||||||
|
"srv0": {
|
||||||
|
"listen": [
|
||||||
|
":8884"
|
||||||
|
],
|
||||||
|
"routes": [
|
||||||
|
{
|
||||||
|
"handle": [
|
||||||
|
{
|
||||||
|
"handler": "reverse_proxy",
|
||||||
|
"load_balancing": {
|
||||||
|
"retries": 5,
|
||||||
|
"retry_match": [
|
||||||
|
{
|
||||||
|
"method": [
|
||||||
|
"POST"
|
||||||
|
],
|
||||||
|
"path": [
|
||||||
|
"/foo*"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"path": [
|
||||||
|
"/bar*"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"selection_policy": {
|
||||||
|
"policy": "first"
|
||||||
|
},
|
||||||
|
"try_duration": 10000000000,
|
||||||
|
"try_interval": 500000000
|
||||||
|
},
|
||||||
|
"upstreams": [
|
||||||
|
{
|
||||||
|
"dial": "127.0.0.1:65535"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -1003,57 +1003,12 @@ func (MatchNot) CaddyModule() caddy.ModuleInfo {
|
||||||
|
|
||||||
// UnmarshalCaddyfile implements caddyfile.Unmarshaler.
|
// UnmarshalCaddyfile implements caddyfile.Unmarshaler.
|
||||||
func (m *MatchNot) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
|
func (m *MatchNot) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
|
||||||
// first, unmarshal each matcher in the set from its tokens
|
|
||||||
type matcherPair struct {
|
|
||||||
raw caddy.ModuleMap
|
|
||||||
decoded MatcherSet
|
|
||||||
}
|
|
||||||
for d.Next() {
|
for d.Next() {
|
||||||
var mp matcherPair
|
matcherSet, err := ParseCaddyfileNestedMatcherSet(d)
|
||||||
matcherMap := make(map[string]RequestMatcher)
|
|
||||||
|
|
||||||
// in case there are multiple instances of the same matcher, concatenate
|
|
||||||
// their tokens (we expect that UnmarshalCaddyfile should be able to
|
|
||||||
// handle more than one segment); otherwise, we'd overwrite other
|
|
||||||
// instances of the matcher in this set
|
|
||||||
tokensByMatcherName := make(map[string][]caddyfile.Token)
|
|
||||||
for nesting := d.Nesting(); d.NextArg() || d.NextBlock(nesting); {
|
|
||||||
matcherName := d.Val()
|
|
||||||
tokensByMatcherName[matcherName] = append(tokensByMatcherName[matcherName], d.NextSegment()...)
|
|
||||||
}
|
|
||||||
for matcherName, tokens := range tokensByMatcherName {
|
|
||||||
mod, err := caddy.GetModule("http.matchers." + matcherName)
|
|
||||||
if err != nil {
|
|
||||||
return d.Errf("getting matcher module '%s': %v", matcherName, err)
|
|
||||||
}
|
|
||||||
unm, ok := mod.New().(caddyfile.Unmarshaler)
|
|
||||||
if !ok {
|
|
||||||
return d.Errf("matcher module '%s' is not a Caddyfile unmarshaler", matcherName)
|
|
||||||
}
|
|
||||||
err = unm.UnmarshalCaddyfile(caddyfile.NewDispenser(tokens))
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
rm, ok := unm.(RequestMatcher)
|
m.MatcherSetsRaw = append(m.MatcherSetsRaw, matcherSet)
|
||||||
if !ok {
|
|
||||||
return fmt.Errorf("matcher module '%s' is not a request matcher", matcherName)
|
|
||||||
}
|
|
||||||
matcherMap[matcherName] = rm
|
|
||||||
mp.decoded = append(mp.decoded, rm)
|
|
||||||
}
|
|
||||||
|
|
||||||
// we should now have a functional 'not' matcher, but we also
|
|
||||||
// need to be able to marshal as JSON, otherwise config
|
|
||||||
// adaptation will be missing the matchers!
|
|
||||||
mp.raw = make(caddy.ModuleMap)
|
|
||||||
for name, matcher := range matcherMap {
|
|
||||||
jsonBytes, err := json.Marshal(matcher)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("marshaling %T matcher: %v", matcher, err)
|
|
||||||
}
|
|
||||||
mp.raw[name] = jsonBytes
|
|
||||||
}
|
|
||||||
m.MatcherSetsRaw = append(m.MatcherSetsRaw, mp.raw)
|
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -1352,6 +1307,56 @@ func (mre *MatchRegexp) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ParseCaddyfileNestedMatcher parses the Caddyfile tokens for a nested
|
||||||
|
// matcher set, and returns its raw module map value.
|
||||||
|
func ParseCaddyfileNestedMatcherSet(d *caddyfile.Dispenser) (caddy.ModuleMap, error) {
|
||||||
|
matcherMap := make(map[string]RequestMatcher)
|
||||||
|
|
||||||
|
// in case there are multiple instances of the same matcher, concatenate
|
||||||
|
// their tokens (we expect that UnmarshalCaddyfile should be able to
|
||||||
|
// handle more than one segment); otherwise, we'd overwrite other
|
||||||
|
// instances of the matcher in this set
|
||||||
|
tokensByMatcherName := make(map[string][]caddyfile.Token)
|
||||||
|
for nesting := d.Nesting(); d.NextArg() || d.NextBlock(nesting); {
|
||||||
|
matcherName := d.Val()
|
||||||
|
tokensByMatcherName[matcherName] = append(tokensByMatcherName[matcherName], d.NextSegment()...)
|
||||||
|
}
|
||||||
|
|
||||||
|
for matcherName, tokens := range tokensByMatcherName {
|
||||||
|
mod, err := caddy.GetModule("http.matchers." + matcherName)
|
||||||
|
if err != nil {
|
||||||
|
return nil, d.Errf("getting matcher module '%s': %v", matcherName, err)
|
||||||
|
}
|
||||||
|
unm, ok := mod.New().(caddyfile.Unmarshaler)
|
||||||
|
if !ok {
|
||||||
|
return nil, d.Errf("matcher module '%s' is not a Caddyfile unmarshaler", matcherName)
|
||||||
|
}
|
||||||
|
err = unm.UnmarshalCaddyfile(caddyfile.NewDispenser(tokens))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
rm, ok := unm.(RequestMatcher)
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("matcher module '%s' is not a request matcher", matcherName)
|
||||||
|
}
|
||||||
|
matcherMap[matcherName] = rm
|
||||||
|
}
|
||||||
|
|
||||||
|
// we should now have a functional matcher, but we also
|
||||||
|
// need to be able to marshal as JSON, otherwise config
|
||||||
|
// adaptation will be missing the matchers!
|
||||||
|
matcherSet := make(caddy.ModuleMap)
|
||||||
|
for name, matcher := range matcherMap {
|
||||||
|
jsonBytes, err := json.Marshal(matcher)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("marshaling %T matcher: %v", matcher, err)
|
||||||
|
}
|
||||||
|
matcherSet[name] = jsonBytes
|
||||||
|
}
|
||||||
|
|
||||||
|
return matcherSet, nil
|
||||||
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
wordRE = regexp.MustCompile(`\w+`)
|
wordRE = regexp.MustCompile(`\w+`)
|
||||||
)
|
)
|
||||||
|
|
|
@ -59,8 +59,10 @@ func parseCaddyfile(h httpcaddyfile.Helper) (caddyhttp.MiddlewareHandler, error)
|
||||||
//
|
//
|
||||||
// # load balancing
|
// # load balancing
|
||||||
// lb_policy <name> [<options...>]
|
// lb_policy <name> [<options...>]
|
||||||
|
// lb_retries <retries>
|
||||||
// lb_try_duration <duration>
|
// lb_try_duration <duration>
|
||||||
// lb_try_interval <interval>
|
// lb_try_interval <interval>
|
||||||
|
// lb_retry_match <request-matcher>
|
||||||
//
|
//
|
||||||
// # active health checking
|
// # active health checking
|
||||||
// health_uri <uri>
|
// health_uri <uri>
|
||||||
|
@ -247,6 +249,19 @@ func (h *Handler) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
|
||||||
}
|
}
|
||||||
h.LoadBalancing.SelectionPolicyRaw = caddyconfig.JSONModuleObject(sel, "policy", name, nil)
|
h.LoadBalancing.SelectionPolicyRaw = caddyconfig.JSONModuleObject(sel, "policy", name, nil)
|
||||||
|
|
||||||
|
case "lb_retries":
|
||||||
|
if !d.NextArg() {
|
||||||
|
return d.ArgErr()
|
||||||
|
}
|
||||||
|
tries, err := strconv.Atoi(d.Val())
|
||||||
|
if err != nil {
|
||||||
|
return d.Errf("bad lb_retries number '%s': %v", d.Val(), err)
|
||||||
|
}
|
||||||
|
if h.LoadBalancing == nil {
|
||||||
|
h.LoadBalancing = new(LoadBalancing)
|
||||||
|
}
|
||||||
|
h.LoadBalancing.Retries = tries
|
||||||
|
|
||||||
case "lb_try_duration":
|
case "lb_try_duration":
|
||||||
if !d.NextArg() {
|
if !d.NextArg() {
|
||||||
return d.ArgErr()
|
return d.ArgErr()
|
||||||
|
@ -273,6 +288,16 @@ func (h *Handler) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
|
||||||
}
|
}
|
||||||
h.LoadBalancing.TryInterval = caddy.Duration(dur)
|
h.LoadBalancing.TryInterval = caddy.Duration(dur)
|
||||||
|
|
||||||
|
case "lb_retry_match":
|
||||||
|
matcherSet, err := caddyhttp.ParseCaddyfileNestedMatcherSet(d)
|
||||||
|
if err != nil {
|
||||||
|
return d.Errf("failed to parse lb_retry_match: %v", err)
|
||||||
|
}
|
||||||
|
if h.LoadBalancing == nil {
|
||||||
|
h.LoadBalancing = new(LoadBalancing)
|
||||||
|
}
|
||||||
|
h.LoadBalancing.RetryMatchRaw = append(h.LoadBalancing.RetryMatchRaw, matcherSet)
|
||||||
|
|
||||||
case "health_uri":
|
case "health_uri":
|
||||||
if !d.NextArg() {
|
if !d.NextArg() {
|
||||||
return d.ArgErr()
|
return d.ArgErr()
|
||||||
|
|
|
@ -430,12 +430,14 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request, next caddyht
|
||||||
// and because we may retry some number of times, carry over the error
|
// and because we may retry some number of times, carry over the error
|
||||||
// from previous tries because of the nuances of load balancing & retries
|
// from previous tries because of the nuances of load balancing & retries
|
||||||
var proxyErr error
|
var proxyErr error
|
||||||
|
var retries int
|
||||||
for {
|
for {
|
||||||
var done bool
|
var done bool
|
||||||
done, proxyErr = h.proxyLoopIteration(clonedReq, r, w, proxyErr, start, repl, reqHeader, reqHost, next)
|
done, proxyErr = h.proxyLoopIteration(clonedReq, r, w, proxyErr, start, retries, repl, reqHeader, reqHost, next)
|
||||||
if done {
|
if done {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
retries++
|
||||||
}
|
}
|
||||||
|
|
||||||
if proxyErr != nil {
|
if proxyErr != nil {
|
||||||
|
@ -449,7 +451,7 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request, next caddyht
|
||||||
// that has to be passed in, we brought this into its own method so that we could run defer more easily.
|
// that has to be passed in, we brought this into its own method so that we could run defer more easily.
|
||||||
// It returns true when the loop is done and should break; false otherwise. The error value returned should
|
// It returns true when the loop is done and should break; false otherwise. The error value returned should
|
||||||
// be assigned to the proxyErr value for the next iteration of the loop (or the error handled after break).
|
// be assigned to the proxyErr value for the next iteration of the loop (or the error handled after break).
|
||||||
func (h *Handler) proxyLoopIteration(r *http.Request, origReq *http.Request, w http.ResponseWriter, proxyErr error, start time.Time,
|
func (h *Handler) proxyLoopIteration(r *http.Request, origReq *http.Request, w http.ResponseWriter, proxyErr error, start time.Time, retries int,
|
||||||
repl *caddy.Replacer, reqHeader http.Header, reqHost string, next caddyhttp.Handler) (bool, error) {
|
repl *caddy.Replacer, reqHeader http.Header, reqHost string, next caddyhttp.Handler) (bool, error) {
|
||||||
// get the updated list of upstreams
|
// get the updated list of upstreams
|
||||||
upstreams := h.Upstreams
|
upstreams := h.Upstreams
|
||||||
|
@ -479,7 +481,7 @@ func (h *Handler) proxyLoopIteration(r *http.Request, origReq *http.Request, w h
|
||||||
if proxyErr == nil {
|
if proxyErr == nil {
|
||||||
proxyErr = caddyhttp.Error(http.StatusServiceUnavailable, fmt.Errorf("no upstreams available"))
|
proxyErr = caddyhttp.Error(http.StatusServiceUnavailable, fmt.Errorf("no upstreams available"))
|
||||||
}
|
}
|
||||||
if !h.LoadBalancing.tryAgain(h.ctx, start, proxyErr, r) {
|
if !h.LoadBalancing.tryAgain(h.ctx, start, retries, proxyErr, r) {
|
||||||
return true, proxyErr
|
return true, proxyErr
|
||||||
}
|
}
|
||||||
return false, proxyErr
|
return false, proxyErr
|
||||||
|
@ -542,7 +544,7 @@ func (h *Handler) proxyLoopIteration(r *http.Request, origReq *http.Request, w h
|
||||||
h.countFailure(upstream)
|
h.countFailure(upstream)
|
||||||
|
|
||||||
// if we've tried long enough, break
|
// if we've tried long enough, break
|
||||||
if !h.LoadBalancing.tryAgain(h.ctx, start, proxyErr, r) {
|
if !h.LoadBalancing.tryAgain(h.ctx, start, retries, proxyErr, r) {
|
||||||
return true, proxyErr
|
return true, proxyErr
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -944,16 +946,26 @@ func (h Handler) finalizeResponse(
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// tryAgain takes the time that the handler was initially invoked
|
// tryAgain takes the time that the handler was initially invoked,
|
||||||
// as well as any error currently obtained, and the request being
|
// the amount of retries already performed, as well as any error
|
||||||
// tried, and returns true if another attempt should be made at
|
// currently obtained, and the request being tried, and returns
|
||||||
// proxying the request. If true is returned, it has already blocked
|
// true if another attempt should be made at proxying the request.
|
||||||
// long enough before the next retry (i.e. no more sleeping is
|
// If true is returned, it has already blocked long enough before
|
||||||
// needed). If false is returned, the handler should stop trying to
|
// the next retry (i.e. no more sleeping is needed). If false is
|
||||||
// proxy the request.
|
// returned, the handler should stop trying to proxy the request.
|
||||||
func (lb LoadBalancing) tryAgain(ctx caddy.Context, start time.Time, proxyErr error, req *http.Request) bool {
|
func (lb LoadBalancing) tryAgain(ctx caddy.Context, start time.Time, retries int, proxyErr error, req *http.Request) bool {
|
||||||
|
// no retries are configured
|
||||||
|
if lb.TryDuration == 0 && lb.Retries == 0 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
// if we've tried long enough, break
|
// if we've tried long enough, break
|
||||||
if time.Since(start) >= time.Duration(lb.TryDuration) {
|
if lb.TryDuration > 0 && time.Since(start) >= time.Duration(lb.TryDuration) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// if we've reached the retry limit, break
|
||||||
|
if lb.Retries > 0 && retries >= lb.Retries {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -976,6 +988,11 @@ func (lb LoadBalancing) tryAgain(ctx caddy.Context, start time.Time, proxyErr er
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// fast path; if the interval is zero, we don't need to wait
|
||||||
|
if lb.TryInterval == 0 {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
// otherwise, wait and try the next available host
|
// otherwise, wait and try the next available host
|
||||||
timer := time.NewTimer(time.Duration(lb.TryInterval))
|
timer := time.NewTimer(time.Duration(lb.TryInterval))
|
||||||
select {
|
select {
|
||||||
|
@ -1190,16 +1207,25 @@ type LoadBalancing struct {
|
||||||
// The default policy is random selection.
|
// The default policy is random selection.
|
||||||
SelectionPolicyRaw json.RawMessage `json:"selection_policy,omitempty" caddy:"namespace=http.reverse_proxy.selection_policies inline_key=policy"`
|
SelectionPolicyRaw json.RawMessage `json:"selection_policy,omitempty" caddy:"namespace=http.reverse_proxy.selection_policies inline_key=policy"`
|
||||||
|
|
||||||
|
// How many times to retry selecting available backends for each
|
||||||
|
// request if the next available host is down. If try_duration is
|
||||||
|
// also configured, then retries may stop early if the duration
|
||||||
|
// is reached. By default, retries are disabled (zero).
|
||||||
|
Retries int `json:"retries,omitempty"`
|
||||||
|
|
||||||
// How long to try selecting available backends for each request
|
// How long to try selecting available backends for each request
|
||||||
// if the next available host is down. By default, this retry is
|
// if the next available host is down. Clients will wait for up
|
||||||
// disabled. Clients will wait for up to this long while the load
|
// to this long while the load balancer tries to find an available
|
||||||
// balancer tries to find an available upstream host.
|
// upstream host. If retries is also configured, tries may stop
|
||||||
|
// early if the maximum retries is reached. By default, retries
|
||||||
|
// are disabled (zero duration).
|
||||||
TryDuration caddy.Duration `json:"try_duration,omitempty"`
|
TryDuration caddy.Duration `json:"try_duration,omitempty"`
|
||||||
|
|
||||||
// How long to wait between selecting the next host from the pool. Default
|
// How long to wait between selecting the next host from the pool.
|
||||||
// is 250ms. Only relevant when a request to an upstream host fails. Be
|
// Default is 250ms if try_duration is enabled, otherwise zero. Only
|
||||||
// aware that setting this to 0 with a non-zero try_duration can cause the
|
// relevant when a request to an upstream host fails. Be aware that
|
||||||
// CPU to spin if all backends are down and latency is very low.
|
// setting this to 0 with a non-zero try_duration can cause the CPU
|
||||||
|
// to spin if all backends are down and latency is very low.
|
||||||
TryInterval caddy.Duration `json:"try_interval,omitempty"`
|
TryInterval caddy.Duration `json:"try_interval,omitempty"`
|
||||||
|
|
||||||
// A list of matcher sets that restricts with which requests retries are
|
// A list of matcher sets that restricts with which requests retries are
|
||||||
|
|
Loading…
Reference in a new issue