mirror of
https://github.com/caddyserver/caddy.git
synced 2025-01-14 14:56:27 +03:00
* reverseproxy: Don't buffer chunked requests (fix #5366)
Mostly reverts 845bc4d50b
(#5289)
Adds warning for unsafe config.
Deprecates unsafe properties in favor of simpler, safer designed ones.
* Update modules/caddyhttp/reverseproxy/caddyfile.go
Co-authored-by: Y.Horie <u5.horie@gmail.com>
* Update modules/caddyhttp/reverseproxy/reverseproxy.go
Co-authored-by: Y.Horie <u5.horie@gmail.com>
* Update modules/caddyhttp/reverseproxy/reverseproxy.go
Co-authored-by: Y.Horie <u5.horie@gmail.com>
* Remove unused code
---------
Co-authored-by: Y.Horie <u5.horie@gmail.com>
This commit is contained in:
parent
90798f3eea
commit
4b119a475f
2 changed files with 86 additions and 47 deletions
|
@ -521,19 +521,8 @@ func (h *Handler) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
|
||||||
h.FlushInterval = caddy.Duration(dur)
|
h.FlushInterval = caddy.Duration(dur)
|
||||||
}
|
}
|
||||||
|
|
||||||
case "buffer_requests":
|
case "request_buffers", "response_buffers":
|
||||||
if d.NextArg() {
|
subdir := d.Val()
|
||||||
return d.ArgErr()
|
|
||||||
}
|
|
||||||
h.BufferRequests = true
|
|
||||||
|
|
||||||
case "buffer_responses":
|
|
||||||
if d.NextArg() {
|
|
||||||
return d.ArgErr()
|
|
||||||
}
|
|
||||||
h.BufferResponses = true
|
|
||||||
|
|
||||||
case "max_buffer_size":
|
|
||||||
if !d.NextArg() {
|
if !d.NextArg() {
|
||||||
return d.ArgErr()
|
return d.ArgErr()
|
||||||
}
|
}
|
||||||
|
@ -544,7 +533,39 @@ func (h *Handler) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
|
||||||
if d.NextArg() {
|
if d.NextArg() {
|
||||||
return d.ArgErr()
|
return d.ArgErr()
|
||||||
}
|
}
|
||||||
h.MaxBufferSize = int64(size)
|
if subdir == "request_buffers" {
|
||||||
|
h.RequestBuffers = int64(size)
|
||||||
|
} else if subdir == "response_buffers" {
|
||||||
|
h.ResponseBuffers = int64(size)
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: These three properties are deprecated; remove them sometime after v2.6.4
|
||||||
|
case "buffer_requests": // TODO: deprecated
|
||||||
|
if d.NextArg() {
|
||||||
|
return d.ArgErr()
|
||||||
|
}
|
||||||
|
caddy.Log().Named("config.adapter.caddyfile").Warn("DEPRECATED: buffer_requests: use request_buffers instead (with a maximum buffer size)")
|
||||||
|
h.DeprecatedBufferRequests = true
|
||||||
|
case "buffer_responses": // TODO: deprecated
|
||||||
|
if d.NextArg() {
|
||||||
|
return d.ArgErr()
|
||||||
|
}
|
||||||
|
caddy.Log().Named("config.adapter.caddyfile").Warn("DEPRECATED: buffer_responses: use response_buffers instead (with a maximum buffer size)")
|
||||||
|
h.DeprecatedBufferResponses = true
|
||||||
|
case "max_buffer_size": // TODO: deprecated
|
||||||
|
if !d.NextArg() {
|
||||||
|
return d.ArgErr()
|
||||||
|
}
|
||||||
|
size, err := humanize.ParseBytes(d.Val())
|
||||||
|
if err != nil {
|
||||||
|
return d.Errf("invalid byte size '%s': %v", d.Val(), err)
|
||||||
|
}
|
||||||
|
if d.NextArg() {
|
||||||
|
return d.ArgErr()
|
||||||
|
}
|
||||||
|
caddy.Log().Named("config.adapter.caddyfile").Warn("DEPRECATED: max_buffer_size: use request_buffers and/or response_buffers instead (with maximum buffer sizes)")
|
||||||
|
h.DeprecatedMaxBufferSize = int64(size)
|
||||||
|
|
||||||
case "trusted_proxies":
|
case "trusted_proxies":
|
||||||
for d.NextArg() {
|
for d.NextArg() {
|
||||||
|
|
|
@ -136,21 +136,27 @@ type Handler struct {
|
||||||
// are also set implicitly.
|
// are also set implicitly.
|
||||||
Headers *headers.Handler `json:"headers,omitempty"`
|
Headers *headers.Handler `json:"headers,omitempty"`
|
||||||
|
|
||||||
// If true, the entire request body will be read and buffered
|
// DEPRECATED: Do not use; will be removed. See request_buffers instead.
|
||||||
// in memory before being proxied to the backend. This should
|
DeprecatedBufferRequests bool `json:"buffer_requests,omitempty"`
|
||||||
// be avoided if at all possible for performance reasons, but
|
|
||||||
// could be useful if the backend is intolerant of read latency.
|
|
||||||
BufferRequests bool `json:"buffer_requests,omitempty"`
|
|
||||||
|
|
||||||
// If true, the entire response body will be read and buffered
|
// DEPRECATED: Do not use; will be removed. See response_buffers instead.
|
||||||
// in memory before being proxied to the client. This should
|
DeprecatedBufferResponses bool `json:"buffer_responses,omitempty"`
|
||||||
// be avoided if at all possible for performance reasons, but
|
|
||||||
|
// DEPRECATED: Do not use; will be removed. See request_buffers and response_buffers instead.
|
||||||
|
DeprecatedMaxBufferSize int64 `json:"max_buffer_size,omitempty"`
|
||||||
|
|
||||||
|
// If nonzero, the entire request body up to this size will be read
|
||||||
|
// and buffered in memory before being proxied to the backend. This
|
||||||
|
// should be avoided if at all possible for performance reasons, but
|
||||||
|
// could be useful if the backend is intolerant of read latency or
|
||||||
|
// chunked encodings.
|
||||||
|
RequestBuffers int64 `json:"request_buffers,omitempty"`
|
||||||
|
|
||||||
|
// If nonzero, the entire response body up to this size will be read
|
||||||
|
// and buffered in memory before being proxied to the client. This
|
||||||
|
// should be avoided if at all possible for performance reasons, but
|
||||||
// could be useful if the backend has tighter memory constraints.
|
// could be useful if the backend has tighter memory constraints.
|
||||||
BufferResponses bool `json:"buffer_responses,omitempty"`
|
ResponseBuffers int64 `json:"response_buffers,omitempty"`
|
||||||
|
|
||||||
// If body buffering is enabled, the maximum size of the buffers
|
|
||||||
// used for the requests and responses (in bytes).
|
|
||||||
MaxBufferSize int64 `json:"max_buffer_size,omitempty"`
|
|
||||||
|
|
||||||
// If configured, rewrites the copy of the upstream request.
|
// If configured, rewrites the copy of the upstream request.
|
||||||
// Allows changing the request method and URI (path and query).
|
// Allows changing the request method and URI (path and query).
|
||||||
|
@ -221,11 +227,28 @@ func (h *Handler) Provision(ctx caddy.Context) error {
|
||||||
h.connections = make(map[io.ReadWriteCloser]openConnection)
|
h.connections = make(map[io.ReadWriteCloser]openConnection)
|
||||||
h.connectionsMu = new(sync.Mutex)
|
h.connectionsMu = new(sync.Mutex)
|
||||||
|
|
||||||
|
// TODO: remove deprecated fields sometime after v2.6.4
|
||||||
|
if h.DeprecatedBufferRequests {
|
||||||
|
h.logger.Warn("DEPRECATED: buffer_requests: this property will be removed soon; use request_buffers instead (and set a maximum buffer size)")
|
||||||
|
}
|
||||||
|
if h.DeprecatedBufferResponses {
|
||||||
|
h.logger.Warn("DEPRECATED: buffer_responses: this property will be removed soon; use response_buffers instead (and set a maximum buffer size)")
|
||||||
|
}
|
||||||
|
if h.DeprecatedMaxBufferSize != 0 {
|
||||||
|
h.logger.Warn("DEPRECATED: max_buffer_size: this property will be removed soon; use request_buffers and/or response_buffers instead (and set maximum buffer sizes)")
|
||||||
|
}
|
||||||
|
|
||||||
|
// warn about unsafe buffering config
|
||||||
|
if h.RequestBuffers == -1 || h.ResponseBuffers == -1 {
|
||||||
|
h.logger.Warn("UNLIMITED BUFFERING: buffering is enabled without any cap on buffer size, which can result in OOM crashes")
|
||||||
|
}
|
||||||
|
|
||||||
// verify SRV compatibility - TODO: LookupSRV deprecated; will be removed
|
// verify SRV compatibility - TODO: LookupSRV deprecated; will be removed
|
||||||
for i, v := range h.Upstreams {
|
for i, v := range h.Upstreams {
|
||||||
if v.LookupSRV == "" {
|
if v.LookupSRV == "" {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
h.logger.Warn("DEPRECATED: lookup_srv: will be removed in a near-future version of Caddy; use the http.reverse_proxy.upstreams.srv module instead")
|
||||||
if h.HealthChecks != nil && h.HealthChecks.Active != nil {
|
if h.HealthChecks != nil && h.HealthChecks.Active != nil {
|
||||||
return fmt.Errorf(`upstream: lookup_srv is incompatible with active health checks: %d: {"dial": %q, "lookup_srv": %q}`, i, v.Dial, v.LookupSRV)
|
return fmt.Errorf(`upstream: lookup_srv is incompatible with active health checks: %d: {"dial": %q, "lookup_srv": %q}`, i, v.Dial, v.LookupSRV)
|
||||||
}
|
}
|
||||||
|
@ -622,9 +645,8 @@ func (h Handler) prepareRequest(req *http.Request, repl *caddy.Replacer) (*http.
|
||||||
// attacks, so it is strongly recommended to only use this
|
// attacks, so it is strongly recommended to only use this
|
||||||
// feature if absolutely required, if read timeouts are
|
// feature if absolutely required, if read timeouts are
|
||||||
// set, and if body size is limited
|
// set, and if body size is limited
|
||||||
if (h.BufferRequests || isChunkedRequest(req)) && req.Body != nil {
|
if h.RequestBuffers != 0 && req.Body != nil {
|
||||||
req.Body, req.ContentLength = h.bufferedBody(req.Body)
|
req.Body, _ = h.bufferedBody(req.Body, h.RequestBuffers)
|
||||||
req.Header.Set("Content-Length", strconv.FormatInt(req.ContentLength, 10))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if req.ContentLength == 0 {
|
if req.ContentLength == 0 {
|
||||||
|
@ -852,8 +874,8 @@ func (h *Handler) reverseProxy(rw http.ResponseWriter, req *http.Request, origRe
|
||||||
}
|
}
|
||||||
|
|
||||||
// if enabled, buffer the response body
|
// if enabled, buffer the response body
|
||||||
if h.BufferResponses {
|
if h.ResponseBuffers != 0 {
|
||||||
res.Body, _ = h.bufferedBody(res.Body)
|
res.Body, _ = h.bufferedBody(res.Body, h.ResponseBuffers)
|
||||||
}
|
}
|
||||||
|
|
||||||
// see if any response handler is configured for this response from the backend
|
// see if any response handler is configured for this response from the backend
|
||||||
|
@ -1122,15 +1144,20 @@ func (h Handler) provisionUpstream(upstream *Upstream) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// bufferedBody reads originalBody into a buffer, then returns a reader for the buffer.
|
// bufferedBody reads originalBody into a buffer with maximum size of limit (-1 for unlimited),
|
||||||
// Always close the return value when done with it, just like if it was the original body!
|
// then returns a reader for the buffer along with how many bytes were buffered. Always close
|
||||||
func (h Handler) bufferedBody(originalBody io.ReadCloser) (io.ReadCloser, int64) {
|
// the return value when done with it, just like if it was the original body! If limit is 0
|
||||||
|
// (which it shouldn't be), this function returns its input; i.e. is a no-op, for safety.
|
||||||
|
func (h Handler) bufferedBody(originalBody io.ReadCloser, limit int64) (io.ReadCloser, int64) {
|
||||||
|
if limit == 0 {
|
||||||
|
return originalBody, 0
|
||||||
|
}
|
||||||
var written int64
|
var written int64
|
||||||
buf := bufPool.Get().(*bytes.Buffer)
|
buf := bufPool.Get().(*bytes.Buffer)
|
||||||
buf.Reset()
|
buf.Reset()
|
||||||
if h.MaxBufferSize > 0 {
|
if limit > 0 {
|
||||||
n, err := io.CopyN(buf, originalBody, h.MaxBufferSize)
|
n, err := io.CopyN(buf, originalBody, limit)
|
||||||
if err != nil || n == h.MaxBufferSize {
|
if err != nil || n == limit {
|
||||||
return bodyReadCloser{
|
return bodyReadCloser{
|
||||||
Reader: io.MultiReader(buf, originalBody),
|
Reader: io.MultiReader(buf, originalBody),
|
||||||
buf: buf,
|
buf: buf,
|
||||||
|
@ -1147,15 +1174,6 @@ func (h Handler) bufferedBody(originalBody io.ReadCloser) (io.ReadCloser, int64)
|
||||||
}, written
|
}, written
|
||||||
}
|
}
|
||||||
|
|
||||||
func isChunkedRequest(req *http.Request) bool {
|
|
||||||
for _, transferEncoding := range req.TransferEncoding {
|
|
||||||
if transferEncoding == "chunked" {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// cloneRequest makes a semi-deep clone of origReq.
|
// cloneRequest makes a semi-deep clone of origReq.
|
||||||
//
|
//
|
||||||
// Most of this code is borrowed from the Go stdlib reverse proxy,
|
// Most of this code is borrowed from the Go stdlib reverse proxy,
|
||||||
|
|
Loading…
Reference in a new issue