diff --git a/.gitignore b/.gitignore index d0ebd4c..345e216 100644 --- a/.gitignore +++ b/.gitignore @@ -1,2 +1,3 @@ *.swp -/vendor/ \ No newline at end of file +/vendor/ +/.idea/ diff --git a/accepts.go b/accepts.go index ccb8d4e..d0c0282 100644 --- a/accepts.go +++ b/accepts.go @@ -53,7 +53,7 @@ func parseEncodings(vv []string) codings { return c } -// parseCoding parses a single conding (content-coding with an optional qvalue), +// parseCoding parses a single coding (content-coding with an optional qvalue), // as might appear in an Accept-Encoding header. It attempts to forgive minor // formatting errors. func parseCoding(s string) (coding string, qvalue float64) { diff --git a/adapter.go b/adapter.go index 19c10c7..c3d760a 100644 --- a/adapter.go +++ b/adapter.go @@ -1,16 +1,8 @@ -package httpcompression // import "github.com/CAFxX/httpcompression" +package httpcompression import ( - "compress/gzip" - "fmt" "net/http" "strings" - "sync" - - "github.com/CAFxX/httpcompression/contrib/andybalholm/brotli" - cgzip "github.com/CAFxX/httpcompression/contrib/compress/gzip" - "github.com/CAFxX/httpcompression/contrib/compress/zlib" - "github.com/CAFxX/httpcompression/contrib/klauspost/zstd" ) const ( @@ -23,21 +15,7 @@ const ( _range = "Range" ) -type codings map[string]float64 - -const ( - // DefaultMinSize is the default minimum response body size for which we enable compression. - // - // 200 is a somewhat arbitrary number; in experiments compressing short text/markup-like sequences - // with different compressors we saw that sequences shorter that ~180 the output generated by the - // compressor would sometime be larger than the input. - // This default may change between versions. - // In general there can be no one-size-fits-all value: you will want to measure if a different - // minimum size improves end-to-end performance for your workloads. - DefaultMinSize = 200 -) - -// Adapter returns a HTTP handler wrapping function (a.k.a. middleware) +// Adapter returns an HTTP handler wrapping function (a.k.a. middleware) // which can be used to wrap an HTTP handler to transparently compress the response // body if the client supports it (via the Accept-Encoding header). // It is possible to pass one or more options to modify the middleware configuration. @@ -45,18 +23,15 @@ const ( // is a no-op. // An error will be returned if invalid options are given. func Adapter(opts ...Option) (func(http.Handler) http.Handler, error) { - c := config{ - prefer: PreferServer, - compressor: comps{}, - } - for _, o := range opts { - err := o(&c) - if err != nil { - return nil, err - } + f, err := NewResponseWriterFactory(opts...) + if err != nil { + return nil, err } + return adapter(f) +} - if len(c.compressor) == 0 { +func adapter(f *ResponseWriterFactoryFactory) (func(http.Handler) http.Handler, error) { + if f.AmountOfCompressors() == 0 { // No compressors have been configured, so there is no useful work // that this adapter can do. return func(h http.Handler) http.Handler { @@ -64,61 +39,21 @@ func Adapter(opts ...Option) (func(http.Handler) http.Handler, error) { }, nil } - bufPool := &sync.Pool{} - writerPool := &sync.Pool{} - return func(h http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - addVaryHeader(w.Header(), acceptEncoding) - - accept := parseEncodings(r.Header.Values(acceptEncoding)) - common := acceptedCompression(accept, c.compressor) - if len(common) == 0 { - h.ServeHTTP(w, r) + return http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) { + ww, finalizer, err := f.Create(rw, req) + if err != nil { + f.config.handleError(rw, req, err) return } - // We do not handle range requests when compression is used, as the - // range specified applies to the compressed data, not to the uncompressed one. - // So we would need to (1) ensure that compressors are deterministic and (2) - // generate the whole uncompressed response anyway, compress it, and then discard - // the bits outside of the range. - // Let's keep it simple, and simply ignore completely the range header. - // We also need to remove the Accept: Range header from any response that is - // compressed; this is done in the ResponseWriter. - // See https://github.com/nytimes/gziphandler/issues/83. - r.Header.Del(_range) - - gw, _ := writerPool.Get().(*compressWriter) - if gw == nil { - gw = &compressWriter{} - } - *gw = compressWriter{ - ResponseWriter: w, - config: c, - accept: accept, - common: common, - pool: bufPool, - } defer func() { - // Important: gw.Close() must be called *always*, as this will - // in turn Close() the compressor. This is important because - // it is guaranteed by the CompressorProvider interface, and - // because some compressors may be implemented via cgo, and they - // may rely on Close() being called to release memory resources. - // TODO: expose the error - _ = gw.Close() // expose the error - *gw = compressWriter{} - writerPool.Put(gw) + if err := finalizer(); err != nil { + f.config.handleError(rw, req, err) + } }() - if _, ok := w.(http.CloseNotifier); ok { - w = compressWriterWithCloseNotify{gw} - } else { - w = gw - } - - h.ServeHTTP(w, r) + h.ServeHTTP(ww, req) }) }, nil } @@ -133,122 +68,15 @@ func addVaryHeader(h http.Header, value string) { } // DefaultAdapter is like Adapter, but it includes sane defaults for general usage. -// Currently the defaults enable gzip and brotli compression, and set a minimum body size +// Currently, the defaults enable gzip and brotli compression, and set a minimum body size // of 200 bytes. // The provided opts override the defaults. // The defaults are not guaranteed to remain constant over time: if you want to avoid this // use Adapter directly. func DefaultAdapter(opts ...Option) (func(http.Handler) http.Handler, error) { - defaults := []Option{ - DeflateCompressionLevel(zlib.DefaultCompression), - GzipCompressionLevel(gzip.DefaultCompression), - BrotliCompressionLevel(brotli.DefaultCompression), - defaultZstandardCompressor(), - MinSize(DefaultMinSize), - } - opts = append(defaults, opts...) - return Adapter(opts...) -} - -// Used for functional configuration. -type config struct { - minSize int // Specifies the minimum response size to gzip. If the response length is bigger than this value, it is compressed. - contentTypes []parsedContentType // Only compress if the response is one of these content-types. All are accepted if empty. - blacklist bool - prefer PreferType - compressor comps -} - -type comps map[string]comp - -type comp struct { - comp CompressorProvider - priority int -} - -// Option can be passed to Handler to control its configuration. -type Option func(c *config) error - -// MinSize is an option that controls the minimum size of payloads that -// should be compressed. The default is DefaultMinSize. -func MinSize(size int) Option { - return func(c *config) error { - if size < 0 { - return fmt.Errorf("minimum size can not be negative: %d", size) - } - c.minSize = size - return nil - } -} - -// DeflateCompressionLevel is an option that controls the Deflate compression -// level to be used when compressing payloads. -// The default is flate.DefaultCompression. -func DeflateCompressionLevel(level int) Option { - c, err := zlib.New(zlib.Options{Level: level}) - if err != nil { - return errorOption(err) - } - return DeflateCompressor(c) -} - -// GzipCompressionLevel is an option that controls the Gzip compression -// level to be used when compressing payloads. -// The default is gzip.DefaultCompression. -func GzipCompressionLevel(level int) Option { - c, err := NewDefaultGzipCompressor(level) - if err != nil { - return errorOption(err) - } - return GzipCompressor(c) -} - -// BrotliCompressionLevel is an option that controls the Brotli compression -// level to be used when compressing payloads. -// The default is 3 (the same default used in the reference brotli C -// implementation). -func BrotliCompressionLevel(level int) Option { - c, err := brotli.New(brotli.Options{Quality: level}) + f, err := NewDefaultResponseWriterFactory(opts...) if err != nil { - return errorOption(err) - } - return BrotliCompressor(c) -} - -// DeflateCompressor is an option to specify a custom compressor factory for Deflate. -func DeflateCompressor(g CompressorProvider) Option { - return Compressor(zlib.Encoding, -300, g) -} - -// GzipCompressor is an option to specify a custom compressor factory for Gzip. -func GzipCompressor(g CompressorProvider) Option { - return Compressor(cgzip.Encoding, -200, g) -} - -// BrotliCompressor is an option to specify a custom compressor factory for Brotli. -func BrotliCompressor(b CompressorProvider) Option { - return Compressor(brotli.Encoding, -100, b) -} - -// ZstandardCompressor is an option to specify a custom compressor factory for Zstandard. -func ZstandardCompressor(b CompressorProvider) Option { - return Compressor(zstd.Encoding, -50, b) -} - -func NewDefaultGzipCompressor(level int) (CompressorProvider, error) { - return cgzip.New(cgzip.Options{Level: level}) -} - -func defaultZstandardCompressor() Option { - zstdComp, err := zstd.New() - if err != nil { - return errorOption(fmt.Errorf("initializing zstd compressor: %w", err)) - } - return ZstandardCompressor(zstdComp) -} - -func errorOption(err error) Option { - return func(_ *config) error { - return err + return nil, err } + return adapter(f) } diff --git a/adapter_test.go b/adapter_test.go index 1c67710..bd58349 100644 --- a/adapter_test.go +++ b/adapter_test.go @@ -4,6 +4,7 @@ import ( "bytes" "compress/gzip" "context" + "errors" "fmt" "io" "net" @@ -15,16 +16,10 @@ import ( "testing" "github.com/CAFxX/httpcompression/contrib/andybalholm/brotli" - "github.com/CAFxX/httpcompression/contrib/google/cbrotli" - kpgzip "github.com/CAFxX/httpcompression/contrib/klauspost/gzip" - "github.com/CAFxX/httpcompression/contrib/klauspost/zstd" - "github.com/CAFxX/httpcompression/contrib/valyala/gozstd" "github.com/stretchr/testify/assert" ibrotli "github.com/andybalholm/brotli" - gcbrotli "github.com/google/brotli/go/cbrotli" kpzstd "github.com/klauspost/compress/zstd" - vzstd "github.com/valyala/gozstd" ) const ( @@ -398,7 +393,7 @@ func TestNewGzipLevelHandler(t *testing.T) { }) for lvl := gzip.BestSpeed; lvl <= gzip.BestCompression; lvl++ { - wrapper, err := DefaultAdapter(GzipCompressionLevel(lvl)) + adapter, err := DefaultAdapter(GzipCompressionLevel(lvl)) if !assert.Nil(t, err, "NewGzipLevleHandler returned error for level:", lvl) { continue } @@ -406,7 +401,7 @@ func TestNewGzipLevelHandler(t *testing.T) { req, _ := http.NewRequest("GET", "/whatever", nil) req.Header.Set("Accept-Encoding", "gzip") resp := httptest.NewRecorder() - wrapper(handler).ServeHTTP(resp, req) + adapter(handler).ServeHTTP(resp, req) res := resp.Result() assert.Equal(t, 200, res.StatusCode) @@ -571,8 +566,8 @@ func TestGzipHandlerMinSize(t *testing.T) { responseLength := 0 b := []byte{'x'} - wrapper, _ := DefaultAdapter(MinSize(128)) - handler := wrapper(http.HandlerFunc( + adapter, _ := DefaultAdapter(MinSize(128)) + handler := adapter(http.HandlerFunc( func(w http.ResponseWriter, r *http.Request) { // Write responses one byte at a time to ensure that the flush // mechanism, if used, is working properly. @@ -597,6 +592,7 @@ func TestGzipHandlerMinSize(t *testing.T) { // Long response is compressed responseLength = 128 + b = []byte{'y'} w = httptest.NewRecorder() handler.ServeHTTP(w, r) if w.Result().Header.Get(contentEncoding) != "gzip" { @@ -604,6 +600,71 @@ func TestGzipHandlerMinSize(t *testing.T) { } } +func TestGzipHandlerMinSizeRequestFunc(t *testing.T) { + t.Parallel() + + responseLength := 0 + b := []byte{'x'} + + adapter, _ := DefaultAdapter(MinSizeRequestFunc(func(req *http.Request) (int, error) { + return 128, nil + })) + handler := adapter(http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + // Write responses one byte at a time to ensure that the flush + // mechanism, if used, is working properly. + for i := 0; i < responseLength; i++ { + n, err := w.Write(b) + assert.Equal(t, 1, n) + assert.Nil(t, err) + } + }, + )) + + r, _ := http.NewRequest("GET", "/whatever", &bytes.Buffer{}) + r.Header.Add("Accept-Encoding", "gzip") + + // Short response is not compressed + responseLength = 127 + w := httptest.NewRecorder() + handler.ServeHTTP(w, r) + if w.Result().Header.Get(contentEncoding) == "gzip" { + t.Error("Expected uncompressed response, got compressed") + } + + // Long response is compressed + responseLength = 128 + w = httptest.NewRecorder() + handler.ServeHTTP(w, r) + if w.Result().Header.Get(contentEncoding) != "gzip" { + t.Error("Expected compressed response, got uncompressed") + } +} + +func TestFailGzipHandlerMinSizeRequestFunc(t *testing.T) { + t.Parallel() + + expectedError := errors.New("expected") + var actualError error + adapter, _ := DefaultAdapter( + MinSizeRequestFunc(func(req *http.Request) (int, error) { + return 0, expectedError + }), + ErrorHandler(func(_ http.ResponseWriter, _ *http.Request, err error) { + actualError = err + }), + ) + + handler := adapter(http.HandlerFunc(func(http.ResponseWriter, *http.Request) {})) + r, _ := http.NewRequest("GET", "/whatever", &bytes.Buffer{}) + r.Header.Add("Accept-Encoding", "gzip") + w := httptest.NewRecorder() + + handler.ServeHTTP(w, r) + + assert.ErrorIs(t, actualError, expectedError) +} + type panicOnSecondWriteHeaderWriter struct { http.ResponseWriter headerWritten bool @@ -628,7 +689,7 @@ func TestGzipHandlerDoubleWriteHeader(t *testing.T) { // Ensure that after a Write the header isn't triggered again on close w.Write(nil) })) - wrapper := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + adapter := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w = &panicOnSecondWriteHeaderWriter{ ResponseWriter: w, } @@ -647,7 +708,7 @@ func TestGzipHandlerDoubleWriteHeader(t *testing.T) { Header: make(http.Header), } req.Header.Set("Accept-Encoding", "gzip") - wrapper.ServeHTTP(rec, req) + adapter.ServeHTTP(rec, req) body, err := io.ReadAll(rec.Body) if err != nil { t.Fatalf("Unexpected error reading response body: %v", err) @@ -666,7 +727,7 @@ func TestGzipHandlerDoubleVary(t *testing.T) { handler := mw(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.Write([]byte(testBody)) })) - wrapper := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + adapter := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.Header().Add("Vary", "accept-encoding") w.Header().Add("Vary", "X-Something") handler.ServeHTTP(w, r) @@ -675,7 +736,7 @@ func TestGzipHandlerDoubleVary(t *testing.T) { rec := httptest.NewRecorder() req := httptest.NewRequest(http.MethodGet, "/", nil) req.Header.Set("Accept-Encoding", "gzip") - wrapper.ServeHTTP(rec, req) + adapter.ServeHTTP(rec, req) body, err := io.ReadAll(rec.Body) if err != nil { t.Fatalf("Unexpected error reading response body: %v", err) @@ -926,7 +987,7 @@ func TestContentTypes(t *testing.T) { io.WriteString(w, testBody) }) - wrapper, err := DefaultAdapter(ContentTypes(tt.acceptedContentTypes, false)) + adapter, err := DefaultAdapter(ContentTypes(tt.acceptedContentTypes, false)) if !assert.Nil(t, err, "NewGzipHandlerWithOpts returned error", tt.name) { continue } @@ -934,7 +995,7 @@ func TestContentTypes(t *testing.T) { req, _ := http.NewRequest("GET", "/whatever", nil) req.Header.Set("Accept-Encoding", "gzip") resp := httptest.NewRecorder() - wrapper(handler).ServeHTTP(resp, req) + adapter(handler).ServeHTTP(resp, req) res := resp.Result() assert.Equal(t, 200, res.StatusCode) @@ -1114,14 +1175,14 @@ func TestAcceptRanges(t *testing.T) { w.Write([]byte(c.body)) }) - wrapper, err := DefaultAdapter(ContentTypes([]string{"text/plain"}, false)) + adapter, err := DefaultAdapter(ContentTypes([]string{"text/plain"}, false)) assert.Nil(t, err, "DefaultAdapter returned error") req, _ := http.NewRequest("GET", "/", nil) req.Header.Set("Accept-Encoding", c.acceptEncoding) req.Header.Set("Range", c._range) resp := httptest.NewRecorder() - wrapper(handler).ServeHTTP(resp, req) + adapter(handler).ServeHTTP(resp, req) res := resp.Result() assert.Equal(t, 200, res.StatusCode) @@ -1143,13 +1204,13 @@ func TestShortFirstWrite(t *testing.T) { assert.Nil(t, err) }) - wrapper, err := DefaultAdapter() + adapter, err := DefaultAdapter() assert.Nil(t, err, "DefaultAdapter returned error") req, _ := http.NewRequest("GET", "/", nil) req.Header.Set("Accept-Encoding", "gzip") resp := httptest.NewRecorder() - wrapper(handler).ServeHTTP(resp, req) + adapter(handler).ServeHTTP(resp, req) res := resp.Result() assert.Equal(t, 200, res.StatusCode) @@ -1160,6 +1221,26 @@ func TestShortFirstWrite(t *testing.T) { assert.Equal(t, testBody, string(buf)) } +func TestErrorHandler(t *testing.T) { + t.Parallel() + + handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + _, _ = w.Write([]byte{'x'}) + }) + expectedError := errors.New("expected") + var recordedError error + adapter, err := DefaultAdapter(ErrorHandler(func(_ http.ResponseWriter, _ *http.Request, err error) { + recordedError = err + })) + assert.Nil(t, err, "DefaultAdapter returned error") + + req, _ := http.NewRequest("GET", "/", nil) + req.Header.Set("Accept-Encoding", "gzip") + resp := &errorThrowingResponseWriter{expectedError} + adapter(handler).ServeHTTP(resp, req) + assert.Errorf(t, recordedError, "httpcompression: write to regular responseWriter at close gets error: %v", expectedError) +} + // -------------------------------------------------------------------- const ( @@ -1167,13 +1248,12 @@ const ( googleCbrotli = "google-cbrotli" andybalholmBrotli = "andybalholm-brotli" klauspostGzip = "klauspost-gzip" - klauspostPgzip = "klauspost-pgzip" klauspostZstd = "klauspost-zstd" valyalaGozstd = "valyala-gozstd" ) func BenchmarkAdapter(b *testing.B) { - comps := map[string]int{stdlibGzip: 9, klauspostGzip: 9, andybalholmBrotli: 11, googleCbrotli: 11, klauspostZstd: 4, valyalaGozstd: 22} + comps := benchMarkComps sizes := []int{100, 1000, 10000, 100000} if testing.Short() { comps = map[string]int{stdlibGzip: 9, andybalholmBrotli: 11} @@ -1235,21 +1315,7 @@ func benchmark(b *testing.B, parallel bool, size int, ae string, d int) { b.Fatal(err) } - var enc CompressorProvider - switch ae { - case stdlibGzip: - enc, err = NewDefaultGzipCompressor(d) - case klauspostGzip: - enc, err = kpgzip.New(kpgzip.Options{Level: d}) - case andybalholmBrotli: - enc, err = brotli.New(brotli.Options{Quality: d}) - case googleCbrotli: - enc, err = cbrotli.New(gcbrotli.WriterOptions{Quality: d}) - case klauspostZstd: - enc, err = zstd.New(kpzstd.WithEncoderLevel(kpzstd.EncoderLevel(d))) - case valyalaGozstd: - enc, err = gozstd.New(vzstd.WriterParams{CompressionLevel: d}) - } + enc, err := benchmarkCompressorProvider(ae, d) if err != nil { b.Fatal(err) } @@ -1347,3 +1413,17 @@ func decodeGzip(i io.Reader) ([]byte, error) { } return io.ReadAll(r) } + +type errorThrowingResponseWriter struct { + errorToThrow error +} + +func (w *errorThrowingResponseWriter) Header() http.Header { + return map[string][]string{} +} + +func (w *errorThrowingResponseWriter) Write([]byte) (int, error) { + return 0, w.errorToThrow +} + +func (w *errorThrowingResponseWriter) WriteHeader(int) {} diff --git a/adapter_w_cgo_test.go b/adapter_w_cgo_test.go new file mode 100644 index 0000000..e16709c --- /dev/null +++ b/adapter_w_cgo_test.go @@ -0,0 +1,38 @@ +//go:build cgo + +package httpcompression + +import ( + "fmt" + "github.com/CAFxX/httpcompression/contrib/andybalholm/brotli" + "github.com/CAFxX/httpcompression/contrib/google/cbrotli" + kpgzip "github.com/CAFxX/httpcompression/contrib/klauspost/gzip" + "github.com/CAFxX/httpcompression/contrib/klauspost/zstd" + "github.com/CAFxX/httpcompression/contrib/valyala/gozstd" + gcbrotli "github.com/google/brotli/go/cbrotli" + kpzstd "github.com/klauspost/compress/zstd" + vzstd "github.com/valyala/gozstd" +) + +var ( + benchMarkComps = map[string]int{stdlibGzip: 9, klauspostGzip: 9, andybalholmBrotli: 11, googleCbrotli: 11, klauspostZstd: 4, valyalaGozstd: 22} +) + +func benchmarkCompressorProvider(ae string, d int) (CompressorProvider, error) { + switch ae { + case stdlibGzip: + return NewDefaultGzipCompressor(d) + case klauspostGzip: + return kpgzip.New(kpgzip.Options{Level: d}) + case andybalholmBrotli: + return brotli.New(brotli.Options{Quality: d}) + case googleCbrotli: + return cbrotli.New(gcbrotli.WriterOptions{Quality: d}) + case klauspostZstd: + return zstd.New(kpzstd.WithEncoderLevel(kpzstd.EncoderLevel(d))) + case valyalaGozstd: + return gozstd.New(vzstd.WriterParams{CompressionLevel: d}) + default: + return nil, fmt.Errorf("unknown compressor provider: %s", ae) + } +} diff --git a/adapter_wo_cgo_test.go b/adapter_wo_cgo_test.go new file mode 100644 index 0000000..b6f90e2 --- /dev/null +++ b/adapter_wo_cgo_test.go @@ -0,0 +1,30 @@ +//go:build !cgo + +package httpcompression + +import ( + "fmt" + "github.com/CAFxX/httpcompression/contrib/andybalholm/brotli" + kpgzip "github.com/CAFxX/httpcompression/contrib/klauspost/gzip" + "github.com/CAFxX/httpcompression/contrib/klauspost/zstd" + kpzstd "github.com/klauspost/compress/zstd" +) + +var ( + benchMarkComps = map[string]int{stdlibGzip: 9, klauspostGzip: 9, andybalholmBrotli: 11, klauspostZstd: 4, valyalaGozstd: 22} +) + +func benchmarkCompressorProvider(ae string, d int) (CompressorProvider, error) { + switch ae { + case stdlibGzip: + return NewDefaultGzipCompressor(d) + case klauspostGzip: + return kpgzip.New(kpgzip.Options{Level: d}) + case andybalholmBrotli: + return brotli.New(brotli.Options{Quality: d}) + case klauspostZstd: + return zstd.New(kpzstd.WithEncoderLevel(kpzstd.EncoderLevel(d))) + default: + return nil, fmt.Errorf("unknown compressor provider: %s", ae) + } +} diff --git a/config.go b/config.go new file mode 100644 index 0000000..98edd9b --- /dev/null +++ b/config.go @@ -0,0 +1,165 @@ +package httpcompression + +import ( + "fmt" + "github.com/CAFxX/httpcompression/contrib/andybalholm/brotli" + cgzip "github.com/CAFxX/httpcompression/contrib/compress/gzip" + "github.com/CAFxX/httpcompression/contrib/compress/zlib" + "github.com/CAFxX/httpcompression/contrib/klauspost/zstd" + "log" + "net/http" +) + +const ( + // DefaultMinSize is the default minimum response body size for which we enable compression. + // + // 200 is a somewhat arbitrary number; in experiments compressing short text/markup-like sequences + // with different compressors we saw that sequences shorter that ~180 the output generated by the + // compressor would sometime be larger than the input. + // This default may change between versions. + // In general there can be no one-size-fits-all value: you will want to measure if a different + // minimum size improves end-to-end performance for your workloads. + DefaultMinSize = 200 +) + +// Option can be passed to Handler to control its configuration. +type Option func(c *config) error + +// MinSize is an option that controls the minimum size of payloads that +// should be compressed. The default is DefaultMinSize. +func MinSize(size int) Option { + return func(c *config) error { + if size < 0 { + return fmt.Errorf("minimum size can not be negative: %d", size) + } + if c.minSizeFunc != nil { + return fmt.Errorf("cannot use MinSize and MinSizeRequestFunc together") + } + c.minSize = size + return nil + } +} + +// MinSizeRequestFunc is an option that controls the minimum size of payloads that +// should be compressed. The provided func can select this minimum based on +// the provided http.Request. The default is DefaultMinSize. +func MinSizeRequestFunc(f func(*http.Request) (int, error)) Option { + return func(c *config) error { + if f == nil { + return fmt.Errorf("there was no minSizeFunc provided") + } + if c.minSize > 0 { + return fmt.Errorf("cannot use MinSize and MinSizeRequestFunc together") + } + c.minSizeFunc = f + return nil + } +} + +// DeflateCompressionLevel is an option that controls the Deflate compression +// level to be used when compressing payloads. +// The default is flate.DefaultCompression. +func DeflateCompressionLevel(level int) Option { + c, err := zlib.New(zlib.Options{Level: level}) + if err != nil { + return errorOption(err) + } + return DeflateCompressor(c) +} + +// GzipCompressionLevel is an option that controls the Gzip compression +// level to be used when compressing payloads. +// The default is gzip.DefaultCompression. +func GzipCompressionLevel(level int) Option { + c, err := NewDefaultGzipCompressor(level) + if err != nil { + return errorOption(err) + } + return GzipCompressor(c) +} + +// BrotliCompressionLevel is an option that controls the Brotli compression +// level to be used when compressing payloads. +// The default is 3 (the same default used in the reference brotli C +// implementation). +func BrotliCompressionLevel(level int) Option { + c, err := brotli.New(brotli.Options{Quality: level}) + if err != nil { + return errorOption(err) + } + return BrotliCompressor(c) +} + +// DeflateCompressor is an option to specify a custom compressor factory for Deflate. +func DeflateCompressor(g CompressorProvider) Option { + return Compressor(zlib.Encoding, -300, g) +} + +// GzipCompressor is an option to specify a custom compressor factory for Gzip. +func GzipCompressor(g CompressorProvider) Option { + return Compressor(cgzip.Encoding, -200, g) +} + +// BrotliCompressor is an option to specify a custom compressor factory for Brotli. +func BrotliCompressor(b CompressorProvider) Option { + return Compressor(brotli.Encoding, -100, b) +} + +// ZstandardCompressor is an option to specify a custom compressor factory for Zstandard. +func ZstandardCompressor(b CompressorProvider) Option { + return Compressor(zstd.Encoding, -50, b) +} + +func NewDefaultGzipCompressor(level int) (CompressorProvider, error) { + return cgzip.New(cgzip.Options{Level: level}) +} + +func defaultZstandardCompressor() Option { + zstdComp, err := zstd.New() + if err != nil { + return errorOption(fmt.Errorf("initializing zstd compressor: %w", err)) + } + return ZstandardCompressor(zstdComp) +} + +func errorOption(err error) Option { + return func(_ *config) error { + return err + } +} + +// ErrorHandler defines what should happen if an unexpected error happens +// within the httpcompression execution chain. +func ErrorHandler(handler func(w http.ResponseWriter, r *http.Request, err error)) Option { + return func(c *config) error { + c.errorHandler = handler + return nil + } +} + +// Used for functional configuration. +type config struct { + minSize int // Specifies the minimum response size to gzip. If the response length is bigger than this value, it is compressed. + minSizeFunc func(r *http.Request) (int, error) // Similar to minSize but selects the value based on given request. + contentTypes []parsedContentType // Only compress if the response is one of these content-types. All are accepted if empty. + blacklist bool + prefer PreferType + compressor comps + errorHandler func(w http.ResponseWriter, r *http.Request, err error) +} + +func (c *config) handleError(w http.ResponseWriter, r *http.Request, err error) { + if c.errorHandler != nil { + c.errorHandler(w, r, err) + } else { + http.Error(w, "500 Internal Server Error", http.StatusInternalServerError) + log.Printf("ERROR: %v", err) + } +} + +type comps map[string]comp + +type comp struct { + comp CompressorProvider + priority int +} diff --git a/contrib/andybalholm/brotli/brotli_race_test.go b/contrib/andybalholm/brotli/brotli_race_test.go index 6dcfe98..d23fe94 100644 --- a/contrib/andybalholm/brotli/brotli_race_test.go +++ b/contrib/andybalholm/brotli/brotli_race_test.go @@ -1,4 +1,4 @@ -// +build race +//go:build race package brotli_test diff --git a/contrib/compress/gzip/gzip_race_test.go b/contrib/compress/gzip/gzip_race_test.go index 5c4b32d..103ba2d 100644 --- a/contrib/compress/gzip/gzip_race_test.go +++ b/contrib/compress/gzip/gzip_race_test.go @@ -1,5 +1,4 @@ //go:build race -// +build race package gzip_test diff --git a/contrib/compress/zlib/deflate_race_test.go b/contrib/compress/zlib/deflate_race_test.go index 5e33f43..0df0f34 100644 --- a/contrib/compress/zlib/deflate_race_test.go +++ b/contrib/compress/zlib/deflate_race_test.go @@ -1,5 +1,4 @@ //go:build race -// +build race package zlib_test diff --git a/contrib/google/cbrotli/brotli.go b/contrib/google/cbrotli/brotli.go index fcbd47f..bf481fb 100644 --- a/contrib/google/cbrotli/brotli.go +++ b/contrib/google/cbrotli/brotli.go @@ -1,3 +1,5 @@ +//go:build cgo + package cbrotli import ( diff --git a/contrib/google/cbrotli/brotli_race_test.go b/contrib/google/cbrotli/brotli_race_test.go index 0885aa9..0431d84 100644 --- a/contrib/google/cbrotli/brotli_race_test.go +++ b/contrib/google/cbrotli/brotli_race_test.go @@ -1,5 +1,4 @@ -//go:build race -// +build race +//go:build race && cgo package cbrotli_test diff --git a/contrib/google/cbrotli/brotli_test.go b/contrib/google/cbrotli/brotli_test.go index 0ea025c..387ecc7 100644 --- a/contrib/google/cbrotli/brotli_test.go +++ b/contrib/google/cbrotli/brotli_test.go @@ -1,3 +1,5 @@ +//go:build cgo + package cbrotli_test import ( diff --git a/contrib/google/cbrotli/export_test.go b/contrib/google/cbrotli/export_test.go index 215376a..c4b032f 100644 --- a/contrib/google/cbrotli/export_test.go +++ b/contrib/google/cbrotli/export_test.go @@ -1,3 +1,5 @@ +//go:build cgo + package cbrotli type Compressor = compressor diff --git a/contrib/klauspost/gzip/gzip_race_test.go b/contrib/klauspost/gzip/gzip_race_test.go index 317c070..a884015 100644 --- a/contrib/klauspost/gzip/gzip_race_test.go +++ b/contrib/klauspost/gzip/gzip_race_test.go @@ -1,5 +1,4 @@ //go:build race -// +build race package gzip_test diff --git a/contrib/klauspost/pgzip/pgzip_race_test.go b/contrib/klauspost/pgzip/pgzip_race_test.go index 802e2df..36d2343 100644 --- a/contrib/klauspost/pgzip/pgzip_race_test.go +++ b/contrib/klauspost/pgzip/pgzip_race_test.go @@ -1,4 +1,4 @@ -// +build race +//go:build race package pgzip_test diff --git a/contrib/klauspost/zlib/deflate_race_test.go b/contrib/klauspost/zlib/deflate_race_test.go index 81cba27..4827c33 100644 --- a/contrib/klauspost/zlib/deflate_race_test.go +++ b/contrib/klauspost/zlib/deflate_race_test.go @@ -1,5 +1,4 @@ //go:build race -// +build race package zlib_test diff --git a/contrib/klauspost/zstd/zstd_race_test.go b/contrib/klauspost/zstd/zstd_race_test.go index 1d774d7..8ec7265 100644 --- a/contrib/klauspost/zstd/zstd_race_test.go +++ b/contrib/klauspost/zstd/zstd_race_test.go @@ -1,4 +1,4 @@ -// +build race +//go:build race package zstd_test diff --git a/contrib/pierrec/lz4/lz4_race_test.go b/contrib/pierrec/lz4/lz4_race_test.go index ea42bc6..def6254 100644 --- a/contrib/pierrec/lz4/lz4_race_test.go +++ b/contrib/pierrec/lz4/lz4_race_test.go @@ -1,5 +1,4 @@ //go:build race -// +build race package lz4_test diff --git a/contrib/ulikunitz/xz/xz_race_test.go b/contrib/ulikunitz/xz/xz_race_test.go index ab1822d..2f1064b 100644 --- a/contrib/ulikunitz/xz/xz_race_test.go +++ b/contrib/ulikunitz/xz/xz_race_test.go @@ -1,5 +1,4 @@ //go:build race -// +build race package xz_test diff --git a/contrib/valyala/gozstd/export_test.go b/contrib/valyala/gozstd/export_test.go index acb0026..0e60b89 100644 --- a/contrib/valyala/gozstd/export_test.go +++ b/contrib/valyala/gozstd/export_test.go @@ -1,3 +1,5 @@ +//go:build cgo + package gozstd type Compressor = compressor diff --git a/contrib/valyala/gozstd/zstd.go b/contrib/valyala/gozstd/zstd.go index 6fa8425..60fb6ab 100644 --- a/contrib/valyala/gozstd/zstd.go +++ b/contrib/valyala/gozstd/zstd.go @@ -1,3 +1,5 @@ +//go:build cgo + package gozstd import ( diff --git a/contrib/valyala/gozstd/zstd_race_test.go b/contrib/valyala/gozstd/zstd_race_test.go index 99004e6..b240992 100644 --- a/contrib/valyala/gozstd/zstd_race_test.go +++ b/contrib/valyala/gozstd/zstd_race_test.go @@ -1,5 +1,4 @@ -//go:build race -// +build race +//go:build race && cgo package gozstd_test diff --git a/contrib/valyala/gozstd/zstd_test.go b/contrib/valyala/gozstd/zstd_test.go index e4a51bb..516a22a 100644 --- a/contrib/valyala/gozstd/zstd_test.go +++ b/contrib/valyala/gozstd/zstd_test.go @@ -1,3 +1,5 @@ +//go:build cgo + package gozstd_test import ( diff --git a/go.mod b/go.mod index 1ae2974..ff085be 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/CAFxX/httpcompression -go 1.11 +go 1.17 require ( github.com/andybalholm/brotli v1.1.1 @@ -12,3 +12,9 @@ require ( github.com/ulikunitz/xz v0.5.12 github.com/valyala/gozstd v1.21.2 ) + +require ( + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect +) diff --git a/go.sum b/go.sum index 963fbde..aef5108 100644 --- a/go.sum +++ b/go.sum @@ -3,8 +3,8 @@ github.com/andybalholm/brotli v1.1.1/go.mod h1:05ib4cKhjx3OQYUY22hTVd34Bc8upXjOL github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/google/brotli/go/cbrotli v0.0.0-20230829110029-ed738e842d2f h1:jopqB+UTSdJGEJT8tEqYyE29zN91fi2827oLET8tl7k= -github.com/google/brotli/go/cbrotli v0.0.0-20230829110029-ed738e842d2f/go.mod h1:nOPhAkwVliJdNTkj3gXpljmWhjc4wCaVqbMJcPKWP4s= +github.com/google/brotli/go/cbrotli v0.0.0-20231208153621-fef82ea10435 h1:YToLuFkOrNdDMLhU1FvD7HDyXoC8PCeuvTGihUNr/ok= +github.com/google/brotli/go/cbrotli v0.0.0-20231208153621-fef82ea10435/go.mod h1:nOPhAkwVliJdNTkj3gXpljmWhjc4wCaVqbMJcPKWP4s= github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA= github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= github.com/klauspost/pgzip v1.2.6 h1:8RXeL5crjEUFnR2/Sn6GJNWtSQ3Dk8pq4CL3jvdDyjU= diff --git a/prefer.go b/prefer.go index eddf559..3b773ee 100644 --- a/prefer.go +++ b/prefer.go @@ -28,13 +28,13 @@ type PreferType byte const ( // PreferServer prefers compressors in the order specified on the server. // If two or more compressors have the same priority on the server, the client preference is taken into consideration. - // If both server and client do no specify a preference between two or more compressors, the order is determined by the name of the encoding. + // If both server and client do not specify a preference between two or more compressors, the order is determined by the name of the encoding. // PreferServer is the default. PreferServer PreferType = iota // PreferClient prefers compressors in the order specified by the client. // If two or more compressors have the same priority according to the client, the server priority is taken into consideration. - // If both server and client do no specify a preference between two or more compressors, the order is determined by the name of the encoding. + // If both server and client do not specify a preference between two or more compressors, the order is determined by the name of the encoding. PreferClient ) diff --git a/response_writer.go b/response_writer.go index fbb3f00..b98f14e 100644 --- a/response_writer.go +++ b/response_writer.go @@ -10,17 +10,24 @@ import ( "sync" ) -// compressWriter provides an http.ResponseWriter interface, which gzips +var ( + // ErrHijackerNotSupported indicates that a provided http.ResponseWriter does not implement + // http.Hijacker and can be therefore not be called by wrapped responses. + ErrHijackerNotSupported = fmt.Errorf("http.Hijacker interface is not supported") +) + +// compressWriter provides a http.ResponseWriter interface, which gzips // bytes before writing them to the underlying response. This doesn't close the // writers, so don't forget to do that. // It can be configured to skip response smaller than minSize. type compressWriter struct { http.ResponseWriter - config config - accept codings - common []string - pool *sync.Pool // pool of buffers (buf []byte); max size of each buf is maxBuf + config *config + minSize int + accept codings + common []string + pool *sync.Pool // pool of buffers (buf []byte); max size of each buf is maxBuf w io.Writer enc string @@ -52,6 +59,27 @@ var ( const maxBuf = 1 << 16 // maximum size of recycled buffer +func (w *compressWriter) configure( + rw http.ResponseWriter, + minSize int, + accept codings, + common []string, +) { + w.ResponseWriter = rw + w.minSize = minSize + w.accept = accept + w.common = common + w.w = nil +} + +func (w *compressWriter) clean() { + w.ResponseWriter = nil + w.minSize = 0 + w.accept = nil + w.common = nil + w.w = nil +} + // Write compresses and appends the given byte slice to the underlying ResponseWriter. func (w *compressWriter) Write(b []byte) (int, error) { if w.w != nil { @@ -71,8 +99,8 @@ func (w *compressWriter) Write(b []byte) (int, error) { // Fast path: we have enough information to know whether we will compress // or not this response from the first write, so we don't need to buffer // writes to defer the decision until we have more data. - if w.buf == nil && (ct != "" || len(w.config.contentTypes) == 0) && (cl > 0 || len(b) >= w.config.minSize) { - if ce == "" && (cl >= w.config.minSize || len(b) >= w.config.minSize) && handleContentType(ct, w.config.contentTypes, w.config.blacklist) { + if w.buf == nil && (ct != "" || len(w.config.contentTypes) == 0) && (cl > 0 || len(b) >= w.minSize) { + if ce == "" && (cl >= w.minSize || len(b) >= w.minSize) && handleContentType(ct, w.config.contentTypes, w.config.blacklist) { enc := preferredEncoding(w.accept, w.config.compressor, w.common, w.config.prefer) if err := w.startCompress(enc, b); err != nil { return 0, err @@ -93,19 +121,19 @@ func (w *compressWriter) Write(b []byte) (int, error) { *w.buf = append(*w.buf, b...) // Only continue if they didn't already choose an encoding or a known unhandled content length or type. - if ce == "" && (cl == 0 || cl >= w.config.minSize) && (ct == "" || handleContentType(ct, w.config.contentTypes, w.config.blacklist)) { + if ce == "" && (cl == 0 || cl >= w.minSize) && (ct == "" || handleContentType(ct, w.config.contentTypes, w.config.blacklist)) { // If the current buffer is less than minSize and a Content-Length isn't set, then wait until we have more data. - if len(*w.buf) < w.config.minSize && cl == 0 { + if len(*w.buf) < w.minSize && cl == 0 { return len(b), nil } // If the Content-Length is larger than minSize or the current buffer is larger than minSize, then continue. - if cl >= w.config.minSize || len(*w.buf) >= w.config.minSize { + if cl >= w.minSize || len(*w.buf) >= w.minSize { // If a Content-Type wasn't specified, infer it from the current buffer. if ct == "" { ct = http.DetectContentType(*w.buf) if ct != "" { // net/http by default performs content sniffing but this is disabled if content-encoding is set. - // Since we set content-encoding, if content-type was not set and we successfully sniffed it, + // Since we set content-encoding, if content-type was not set, and we successfully sniffed it, // set the content-type. w.Header().Set(contentType, ct) } @@ -210,9 +238,9 @@ func (w *compressWriter) startPlain(buf []byte) error { return nil } n, err := w.ResponseWriter.Write(buf) - // This should never happen (per io.Writer docs), but if the write didn't - // accept the entire buffer but returned no specific error, we have no clue - // what's going on, so abort just to be safe. + // This should never happen (per io.Writer docs), but if the write operation + // didn't accept the entire buffer but returned no specific error, we have no + // clue what's going on, so abort just to be safe. if err == nil && n < len(buf) { err = io.ErrShortWrite } @@ -251,8 +279,8 @@ func (w *compressWriter) Close() error { } // Flush flushes the underlying compressor Writer and then the underlying -// http.ResponseWriter if it is an http.Flusher. This makes compressWriter -// an http.Flusher. +// http.ResponseWriter if it is a http.Flusher. This makes compressWriter +// a http.Flusher. // Flush is a no-op until enough data has been written to decide whether the // response should be compressed or not (e.g. less than MinSize bytes have // been written). @@ -269,7 +297,7 @@ func (w *compressWriter) Flush() { // - in case we are bypassing compression, w.w is the parent ResponseWriter, and therefore we skip // this as the parent ResponseWriter does not implement Flusher. // - in case we are NOT bypassing compression, w.w is the compressor, and therefore we flush the - // compressor and then we flush the parent ResponseWriter. + // compressor, and then we flush the parent ResponseWriter. if fw, ok := w.w.(Flusher); ok { _ = fw.Flush() } @@ -281,21 +309,16 @@ func (w *compressWriter) Flush() { } // Hijack implements http.Hijacker. If the underlying ResponseWriter is a -// Hijacker, its Hijack method is returned. Otherwise an error is returned. +// Hijacker, its Hijack method is returned. Otherwise, an error is returned. func (w *compressWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) { if hj, ok := w.ResponseWriter.(http.Hijacker); ok { return hj.Hijack() } - return nil, nil, fmt.Errorf("http.Hijacker interface is not supported") + return nil, nil, ErrHijackerNotSupported } func (w *compressWriter) getBuffer() *[]byte { - b := w.pool.Get() - if b == nil { - var s []byte - return &s - } - return b.(*[]byte) + return w.pool.Get().(*[]byte) } func (w *compressWriter) recycleBuffer() { diff --git a/response_writer_factory.go b/response_writer_factory.go new file mode 100644 index 0000000..b57a792 --- /dev/null +++ b/response_writer_factory.go @@ -0,0 +1,158 @@ +package httpcompression + +import ( + "compress/gzip" + "fmt" + "github.com/CAFxX/httpcompression/contrib/andybalholm/brotli" + "github.com/CAFxX/httpcompression/contrib/compress/zlib" + "net/http" + "sync" +) + +type codings map[string]float64 + +type ResponseWriterFactoryFactory struct { + config *config + bufPool *sync.Pool + writerPool *sync.Pool +} + +func NewResponseWriterFactory(opts ...Option) (*ResponseWriterFactoryFactory, error) { + f := ResponseWriterFactoryFactory{ + config: &config{ + prefer: PreferServer, + compressor: comps{}, + }, + bufPool: &sync.Pool{}, + writerPool: &sync.Pool{}, + } + + f.bufPool.New = func() interface{} { + return &[]byte{} + } + f.writerPool.New = func() interface{} { + return &compressWriter{ + config: f.config, + pool: f.bufPool, + } + } + + for _, o := range opts { + err := o(f.config) + if err != nil { + return nil, err + } + } + + if f.config.minSize == 0 && f.config.minSizeFunc == nil { + f.config.minSize = DefaultMinSize + } + + return &f, nil +} + +// NewDefaultResponseWriterFactory is like NewResponseWriterFactory, but it includes sane +// defaults for general usage. +// Currently, the defaults enable gzip and brotli compression, and set a minimum body size +// of 200 bytes. +// The provided opts override the defaults. +// The defaults are not guaranteed to remain constant over time: if you want to avoid this +// use NewResponseWriterFactory directly. +func NewDefaultResponseWriterFactory(opts ...Option) (*ResponseWriterFactoryFactory, error) { + defaults := []Option{ + DeflateCompressionLevel(zlib.DefaultCompression), + GzipCompressionLevel(gzip.DefaultCompression), + BrotliCompressionLevel(brotli.DefaultCompression), + defaultZstandardCompressor(), + } + opts = append(defaults, opts...) + return NewResponseWriterFactory(opts...) +} + +// Create wraps the given http.ResponseWriter into a new instance +// with is using compressor (if supported and requested). +// +// Important: Finalizer() must be called *always*, as this will +// in turn Close() the compressor. This is important because +// it is guaranteed by the CompressorProvider interface, and +// because some compressors may be implemented via cgo, and they +// may rely on Close() being called to release memory resources. +func (f *ResponseWriterFactoryFactory) Create(rw http.ResponseWriter, req *http.Request) (http.ResponseWriter, Finalizer, error) { + addVaryHeader(rw.Header(), acceptEncoding) + + accept := parseEncodings(req.Header.Values(acceptEncoding)) + common := acceptedCompression(accept, f.config.compressor) + if len(common) == 0 { + return rw, noopFinalizer, nil + } + + // We do not handle range requests when compression is used, as the + // range specified applies to the compressed data, not to the uncompressed one. + // So we would need to (1) ensure that compressors are deterministic and (2) + // generate the whole uncompressed response anyway, compress it, and then discard + // the bits outside the range. + // Let's keep it simple, and simply ignore completely the range header. + // We also need to remove the Accept: Range header from any response that is + // compressed; this is done in the ResponseWriter. + // See https://github.com/nytimes/gziphandler/issues/83. + req.Header.Del(_range) + + minSize := f.config.minSize + if msf := f.config.minSizeFunc; msf != nil { + v, err := msf(req) + if err != nil { + return nil, nil, fmt.Errorf("cannot resolve minSize for request: %w", err) + } + minSize = v + } + + cw := f.writerPool.Get().(*compressWriter) + cw.configure(rw, minSize, accept, common) + + if _, ok := rw.(http.CloseNotifier); ok { + rw = compressWriterWithCloseNotify{cw} + } else { + rw = cw + } + + return rw, func() error { + defer f.writerPool.Put(cw) + defer cw.clean() + return cw.Close() + }, nil +} + +// AmountOfCompressors returns the amount of compressors configured at this ResponseWriterFactoryFactory. +func (f *ResponseWriterFactoryFactory) AmountOfCompressors() int { + return len(f.config.compressor) +} + +func (f *ResponseWriterFactoryFactory) getBuffer() *[]byte { + b := f.bufPool.Get() + if b == nil { + var s []byte + return &s + } + return b.(*[]byte) +} + +func (f *ResponseWriterFactoryFactory) recycleBuffer(target *[]byte) { + if target == nil { + return + } + if cap(*target) > maxBuf { + // If the buffer is too big, let's drop it to avoid + // keeping huge buffers alive in the pool. In this case + // we still recycle the pointer to the slice. + *target = nil + } + if len(*target) > 0 { + // Reset the buffer to zero length. + *target = (*target)[:0] + } + f.bufPool.Put(target) +} + +type Finalizer func() error + +func noopFinalizer() error { return nil }