Update dependencies

This commit is contained in:
2017-08-28 00:04:21 +01:00
parent b4adc35cac
commit 57a85b35e7
33 changed files with 542 additions and 976 deletions

View File

@@ -558,6 +558,7 @@ uintBuf := fasthttp.AppendUint(nil, 1234)
* [Iris](https://github.com/kataras/iris)
* [fasthttp-routing](https://github.com/qiangxue/fasthttp-routing)
* [fasthttprouter](https://github.com/buaazp/fasthttprouter)
* [gramework](https://github.com/gramework/gramework)
* [lu](https://github.com/vincentLiuxiang/lu)
See also [this issue](https://github.com/valyala/fasthttp/issues/9) for more info.

View File

@@ -428,15 +428,15 @@ func (s *argsScanner) next(kv *argsKV) bool {
case '=':
if isKey {
isKey = false
kv.key = decodeArg(kv.key, s.b[:i], true)
kv.key = decodeArgAppend(kv.key[:0], s.b[:i])
k = i + 1
}
case '&':
if isKey {
kv.key = decodeArg(kv.key, s.b[:i], true)
kv.key = decodeArgAppend(kv.key[:0], s.b[:i])
kv.value = kv.value[:0]
} else {
kv.value = decodeArg(kv.value, s.b[k:i], true)
kv.value = decodeArgAppend(kv.value[:0], s.b[k:i])
}
s.b = s.b[i+1:]
return true
@@ -444,35 +444,37 @@ func (s *argsScanner) next(kv *argsKV) bool {
}
if isKey {
kv.key = decodeArg(kv.key, s.b, true)
kv.key = decodeArgAppend(kv.key[:0], s.b)
kv.value = kv.value[:0]
} else {
kv.value = decodeArg(kv.value, s.b[k:], true)
kv.value = decodeArgAppend(kv.value[:0], s.b[k:])
}
s.b = s.b[len(s.b):]
return true
}
func decodeArg(dst, src []byte, decodePlus bool) []byte {
return decodeArgAppend(dst[:0], src, decodePlus)
}
func decodeArgAppend(dst, src []byte) []byte {
if bytes.IndexByte(src, '%') < 0 && bytes.IndexByte(src, '+') < 0 {
// fast path: src doesn't contain encoded chars
return append(dst, src...)
}
func decodeArgAppend(dst, src []byte, decodePlus bool) []byte {
for i, n := 0, len(src); i < n; i++ {
// slow path
for i := 0; i < len(src); i++ {
c := src[i]
if c == '%' {
if i+2 >= n {
if i+2 >= len(src) {
return append(dst, src[i:]...)
}
x1 := hexbyte2int(src[i+1])
x2 := hexbyte2int(src[i+2])
if x1 < 0 || x2 < 0 {
dst = append(dst, c)
x2 := hex2intTable[src[i+2]]
x1 := hex2intTable[src[i+1]]
if x1 == 16 || x2 == 16 {
dst = append(dst, '%')
} else {
dst = append(dst, byte(x1<<4|x2))
dst = append(dst, x1<<4|x2)
i += 2
}
} else if decodePlus && c == '+' {
} else if c == '+' {
dst = append(dst, ' ')
} else {
dst = append(dst, c)
@@ -480,3 +482,36 @@ func decodeArgAppend(dst, src []byte, decodePlus bool) []byte {
}
return dst
}
// decodeArgAppendNoPlus is almost identical to decodeArgAppend, but it doesn't
// substitute '+' with ' '.
//
// The function is copy-pasted from decodeArgAppend due to the preformance
// reasons only.
func decodeArgAppendNoPlus(dst, src []byte) []byte {
if bytes.IndexByte(src, '%') < 0 {
// fast path: src doesn't contain encoded chars
return append(dst, src...)
}
// slow path
for i := 0; i < len(src); i++ {
c := src[i]
if c == '%' {
if i+2 >= len(src) {
return append(dst, src[i:]...)
}
x2 := hex2intTable[src[i+2]]
x1 := hex2intTable[src[i+1]]
if x1 == 16 || x2 == 16 {
dst = append(dst, '%')
} else {
dst = append(dst, x1<<4|x2)
i += 2
}
} else {
dst = append(dst, c)
}
}
return dst
}

View File

@@ -9,6 +9,7 @@ import (
"math"
"net"
"reflect"
"strings"
"sync"
"time"
"unsafe"
@@ -16,6 +17,16 @@ import (
// AppendHTMLEscape appends html-escaped s to dst and returns the extended dst.
func AppendHTMLEscape(dst []byte, s string) []byte {
if strings.IndexByte(s, '<') < 0 &&
strings.IndexByte(s, '>') < 0 &&
strings.IndexByte(s, '"') < 0 &&
strings.IndexByte(s, '\'') < 0 {
// fast path - nothing to escape
return append(dst, s...)
}
// slow path
var prev int
var sub string
for i, n := 0, len(s); i < n; i++ {
@@ -254,8 +265,8 @@ func readHexInt(r *bufio.Reader) (int, error) {
}
return -1, err
}
k = hexbyte2int(c)
if k < 0 {
k = int(hex2intTable[c])
if k == 16 {
if i == 0 {
return -1, errEmptyHexNum
}
@@ -313,42 +324,49 @@ func hexCharUpper(c byte) byte {
var hex2intTable = func() []byte {
b := make([]byte, 255)
for i := byte(0); i < 255; i++ {
c := byte(0)
c := byte(16)
if i >= '0' && i <= '9' {
c = 1 + i - '0'
c = i - '0'
} else if i >= 'a' && i <= 'f' {
c = 1 + i - 'a' + 10
c = i - 'a' + 10
} else if i >= 'A' && i <= 'F' {
c = 1 + i - 'A' + 10
c = i - 'A' + 10
}
b[i] = c
}
return b
}()
func hexbyte2int(c byte) int {
return int(hex2intTable[c]) - 1
}
const toLower = 'a' - 'A'
func uppercaseByte(p *byte) {
c := *p
if c >= 'a' && c <= 'z' {
*p = c - toLower
var toLowerTable = func() [256]byte {
var a [256]byte
for i := 0; i < 256; i++ {
c := byte(i)
if c >= 'A' && c <= 'Z' {
c += toLower
}
a[i] = c
}
}
return a
}()
func lowercaseByte(p *byte) {
c := *p
if c >= 'A' && c <= 'Z' {
*p = c + toLower
var toUpperTable = func() [256]byte {
var a [256]byte
for i := 0; i < 256; i++ {
c := byte(i)
if c >= 'a' && c <= 'z' {
c -= toLower
}
a[i] = c
}
}
return a
}()
func lowercaseBytes(b []byte) {
for i, n := 0, len(b); i < n; i++ {
lowercaseByte(&b[i])
for i := 0; i < len(b); i++ {
p := &b[i]
*p = toLowerTable[*p]
}
}
@@ -375,6 +393,13 @@ func s2b(s string) []byte {
return *(*[]byte)(unsafe.Pointer(&bh))
}
// AppendUnquotedArg appends url-decoded src to dst and returns appended dst.
//
// dst may point to src. In this case src will be overwritten.
func AppendUnquotedArg(dst, src []byte) []byte {
return decodeArgAppend(dst, src)
}
// AppendQuotedArg appends url-encoded src to dst and returns appended dst.
func AppendQuotedArg(dst, src []byte) []byte {
for _, c := range src {

View File

@@ -1380,6 +1380,9 @@ func newClientTLSConfig(c *tls.Config, addr string) *tls.Config {
}
func tlsServerName(addr string) string {
if !strings.Contains(addr, ":") {
return addr
}
host, _, err := net.SplitHostPort(addr)
if err != nil {
return "*"

View File

@@ -1,6 +1,7 @@
package fasthttp
import (
"bytes"
"fmt"
"io"
"os"
@@ -9,6 +10,7 @@ import (
"github.com/klauspost/compress/flate"
"github.com/klauspost/compress/gzip"
"github.com/klauspost/compress/zlib"
"github.com/valyala/bytebufferpool"
"github.com/valyala/fasthttp/stackless"
)
@@ -17,7 +19,8 @@ const (
CompressNoCompression = flate.NoCompression
CompressBestSpeed = flate.BestSpeed
CompressBestCompression = flate.BestCompression
CompressDefaultCompression = flate.DefaultCompression
CompressDefaultCompression = 6 // flate.DefaultCompression
CompressHuffmanOnly = -2 // flate.HuffmanOnly
)
func acquireGzipReader(r io.Reader) (*gzip.Reader, error) {
@@ -70,51 +73,54 @@ func resetFlateReader(zr io.ReadCloser, r io.Reader) error {
var flateReaderPool sync.Pool
func acquireGzipWriter(w io.Writer, level int) *gzipWriter {
p := gzipWriterPoolMap[level]
if p == nil {
panic(fmt.Sprintf("BUG: unexpected compression level passed: %d. See compress/gzip for supported levels", level))
}
func acquireStacklessGzipWriter(w io.Writer, level int) stackless.Writer {
nLevel := normalizeCompressLevel(level)
p := stacklessGzipWriterPoolMap[nLevel]
v := p.Get()
if v == nil {
sw := stackless.NewWriter(w, func(w io.Writer) stackless.Writer {
zw, err := gzip.NewWriterLevel(w, level)
if err != nil {
panic(fmt.Sprintf("BUG: unexpected error from gzip.NewWriterLevel(%d): %s", level, err))
}
return zw
return stackless.NewWriter(w, func(w io.Writer) stackless.Writer {
return acquireRealGzipWriter(w, level)
})
return &gzipWriter{
Writer: sw,
p: p,
}
}
zw := v.(*gzipWriter)
sw := v.(stackless.Writer)
sw.Reset(w)
return sw
}
func releaseStacklessGzipWriter(sw stackless.Writer, level int) {
sw.Close()
nLevel := normalizeCompressLevel(level)
p := stacklessGzipWriterPoolMap[nLevel]
p.Put(sw)
}
func acquireRealGzipWriter(w io.Writer, level int) *gzip.Writer {
nLevel := normalizeCompressLevel(level)
p := realGzipWriterPoolMap[nLevel]
v := p.Get()
if v == nil {
zw, err := gzip.NewWriterLevel(w, level)
if err != nil {
panic(fmt.Sprintf("BUG: unexpected error from gzip.NewWriterLevel(%d): %s", level, err))
}
return zw
}
zw := v.(*gzip.Writer)
zw.Reset(w)
return zw
}
func releaseGzipWriter(zw *gzipWriter) {
func releaseRealGzipWriter(zw *gzip.Writer, level int) {
zw.Close()
zw.p.Put(zw)
nLevel := normalizeCompressLevel(level)
p := realGzipWriterPoolMap[nLevel]
p.Put(zw)
}
type gzipWriter struct {
stackless.Writer
p *sync.Pool
}
var gzipWriterPoolMap = func() map[int]*sync.Pool {
// Initialize pools for all the compression levels defined
// in https://golang.org/pkg/compress/gzip/#pkg-constants .
m := make(map[int]*sync.Pool, 11)
m[-1] = &sync.Pool{}
for i := 0; i < 10; i++ {
m[i] = &sync.Pool{}
}
return m
}()
var (
stacklessGzipWriterPoolMap = newCompressWriterPoolMap()
realGzipWriterPoolMap = newCompressWriterPoolMap()
)
// AppendGzipBytesLevel appends gzipped src to dst using the given
// compression level and returns the resulting dst.
@@ -125,6 +131,7 @@ var gzipWriterPoolMap = func() map[int]*sync.Pool {
// * CompressBestSpeed
// * CompressBestCompression
// * CompressDefaultCompression
// * CompressHuffmanOnly
func AppendGzipBytesLevel(dst, src []byte, level int) []byte {
w := &byteSliceWriter{dst}
WriteGzipLevel(w, src, level)
@@ -140,11 +147,41 @@ func AppendGzipBytesLevel(dst, src []byte, level int) []byte {
// * CompressBestSpeed
// * CompressBestCompression
// * CompressDefaultCompression
// * CompressHuffmanOnly
func WriteGzipLevel(w io.Writer, p []byte, level int) (int, error) {
zw := acquireGzipWriter(w, level)
n, err := zw.Write(p)
releaseGzipWriter(zw)
return n, err
switch w.(type) {
case *byteSliceWriter,
*bytes.Buffer,
*ByteBuffer,
*bytebufferpool.ByteBuffer:
// These writers don't block, so we can just use stacklessWriteGzip
ctx := &compressCtx{
w: w,
p: p,
level: level,
}
stacklessWriteGzip(ctx)
return len(p), nil
default:
zw := acquireStacklessGzipWriter(w, level)
n, err := zw.Write(p)
releaseStacklessGzipWriter(zw, level)
return n, err
}
}
var stacklessWriteGzip = stackless.NewFunc(nonblockingWriteGzip)
func nonblockingWriteGzip(ctxv interface{}) {
ctx := ctxv.(*compressCtx)
zw := acquireRealGzipWriter(ctx.w, ctx.level)
_, err := zw.Write(ctx.p)
if err != nil {
panic(fmt.Sprintf("BUG: gzip.Writer.Write for len(p)=%d returned unexpected error: %s", len(ctx.p), err))
}
releaseRealGzipWriter(zw, ctx.level)
}
// WriteGzip writes gzipped p to w and returns the number of compressed
@@ -175,6 +212,92 @@ func WriteGunzip(w io.Writer, p []byte) (int, error) {
return nn, err
}
// AppendGunzipBytes appends gunzipped src to dst and returns the resulting dst.
func AppendGunzipBytes(dst, src []byte) ([]byte, error) {
w := &byteSliceWriter{dst}
_, err := WriteGunzip(w, src)
return w.b, err
}
// AppendDeflateBytesLevel appends deflated src to dst using the given
// compression level and returns the resulting dst.
//
// Supported compression levels are:
//
// * CompressNoCompression
// * CompressBestSpeed
// * CompressBestCompression
// * CompressDefaultCompression
// * CompressHuffmanOnly
func AppendDeflateBytesLevel(dst, src []byte, level int) []byte {
w := &byteSliceWriter{dst}
WriteDeflateLevel(w, src, level)
return w.b
}
// WriteDeflateLevel writes deflated p to w using the given compression level
// and returns the number of compressed bytes written to w.
//
// Supported compression levels are:
//
// * CompressNoCompression
// * CompressBestSpeed
// * CompressBestCompression
// * CompressDefaultCompression
// * CompressHuffmanOnly
func WriteDeflateLevel(w io.Writer, p []byte, level int) (int, error) {
switch w.(type) {
case *byteSliceWriter,
*bytes.Buffer,
*ByteBuffer,
*bytebufferpool.ByteBuffer:
// These writers don't block, so we can just use stacklessWriteDeflate
ctx := &compressCtx{
w: w,
p: p,
level: level,
}
stacklessWriteDeflate(ctx)
return len(p), nil
default:
zw := acquireStacklessDeflateWriter(w, level)
n, err := zw.Write(p)
releaseStacklessDeflateWriter(zw, level)
return n, err
}
}
var stacklessWriteDeflate = stackless.NewFunc(nonblockingWriteDeflate)
func nonblockingWriteDeflate(ctxv interface{}) {
ctx := ctxv.(*compressCtx)
zw := acquireRealDeflateWriter(ctx.w, ctx.level)
_, err := zw.Write(ctx.p)
if err != nil {
panic(fmt.Sprintf("BUG: zlib.Writer.Write for len(p)=%d returned unexpected error: %s", len(ctx.p), err))
}
releaseRealDeflateWriter(zw, ctx.level)
}
type compressCtx struct {
w io.Writer
p []byte
level int
}
// WriteDeflate writes deflated p to w and returns the number of compressed
// bytes written to w.
func WriteDeflate(w io.Writer, p []byte) (int, error) {
return WriteDeflateLevel(w, p, CompressDefaultCompression)
}
// AppendDeflateBytes appends deflated src to dst and returns the resulting dst.
func AppendDeflateBytes(dst, src []byte) []byte {
return AppendDeflateBytesLevel(dst, src, CompressDefaultCompression)
}
// WriteInflate writes inflated p to w and returns the number of uncompressed
// bytes written to w.
func WriteInflate(w io.Writer, p []byte) (int, error) {
@@ -192,10 +315,10 @@ func WriteInflate(w io.Writer, p []byte) (int, error) {
return nn, err
}
// AppendGunzipBytes append gunzipped src to dst and returns the resulting dst.
func AppendGunzipBytes(dst, src []byte) ([]byte, error) {
// AppendInflateBytes appends inflated src to dst and returns the resulting dst.
func AppendInflateBytes(dst, src []byte) ([]byte, error) {
w := &byteSliceWriter{dst}
_, err := WriteGunzip(w, src)
_, err := WriteInflate(w, src)
return w.b, err
}
@@ -221,64 +344,79 @@ func (r *byteSliceReader) Read(p []byte) (int, error) {
return n, nil
}
func acquireFlateWriter(w io.Writer, level int) *flateWriter {
p := flateWriterPoolMap[level]
if p == nil {
panic(fmt.Sprintf("BUG: unexpected compression level passed: %d. See compress/flate for supported levels", level))
}
func acquireStacklessDeflateWriter(w io.Writer, level int) stackless.Writer {
nLevel := normalizeCompressLevel(level)
p := stacklessDeflateWriterPoolMap[nLevel]
v := p.Get()
if v == nil {
sw := stackless.NewWriter(w, func(w io.Writer) stackless.Writer {
zw, err := zlib.NewWriterLevel(w, level)
if err != nil {
panic(fmt.Sprintf("BUG: unexpected error in zlib.NewWriterLevel(%d): %s", level, err))
}
return zw
return stackless.NewWriter(w, func(w io.Writer) stackless.Writer {
return acquireRealDeflateWriter(w, level)
})
return &flateWriter{
Writer: sw,
p: p,
}
}
zw := v.(*flateWriter)
sw := v.(stackless.Writer)
sw.Reset(w)
return sw
}
func releaseStacklessDeflateWriter(sw stackless.Writer, level int) {
sw.Close()
nLevel := normalizeCompressLevel(level)
p := stacklessDeflateWriterPoolMap[nLevel]
p.Put(sw)
}
func acquireRealDeflateWriter(w io.Writer, level int) *zlib.Writer {
nLevel := normalizeCompressLevel(level)
p := realDeflateWriterPoolMap[nLevel]
v := p.Get()
if v == nil {
zw, err := zlib.NewWriterLevel(w, level)
if err != nil {
panic(fmt.Sprintf("BUG: unexpected error from zlib.NewWriterLevel(%d): %s", level, err))
}
return zw
}
zw := v.(*zlib.Writer)
zw.Reset(w)
return zw
}
func releaseFlateWriter(zw *flateWriter) {
func releaseRealDeflateWriter(zw *zlib.Writer, level int) {
zw.Close()
zw.p.Put(zw)
nLevel := normalizeCompressLevel(level)
p := realDeflateWriterPoolMap[nLevel]
p.Put(zw)
}
type flateWriter struct {
stackless.Writer
p *sync.Pool
}
var (
stacklessDeflateWriterPoolMap = newCompressWriterPoolMap()
realDeflateWriterPoolMap = newCompressWriterPoolMap()
)
var flateWriterPoolMap = func() map[int]*sync.Pool {
func newCompressWriterPoolMap() []*sync.Pool {
// Initialize pools for all the compression levels defined
// in https://golang.org/pkg/compress/flate/#pkg-constants .
m := make(map[int]*sync.Pool, 11)
m[-1] = &sync.Pool{}
for i := 0; i < 10; i++ {
m[i] = &sync.Pool{}
// Compression levels are normalized with normalizeCompressLevel,
// so the fit [0..11].
var m []*sync.Pool
for i := 0; i < 12; i++ {
m = append(m, &sync.Pool{})
}
return m
}()
}
func isFileCompressible(f *os.File, minCompressRatio float64) bool {
// Try compressing the first 4kb of of the file
// and see if it can be compressed by more than
// the given minCompressRatio.
b := AcquireByteBuffer()
zw := acquireGzipWriter(b, CompressDefaultCompression)
zw := acquireStacklessGzipWriter(b, CompressDefaultCompression)
lr := &io.LimitedReader{
R: f,
N: 4096,
}
_, err := copyZeroAlloc(zw, lr)
releaseGzipWriter(zw)
releaseStacklessGzipWriter(zw, CompressDefaultCompression)
f.Seek(0, 0)
if err != nil {
return false
@@ -289,3 +427,14 @@ func isFileCompressible(f *os.File, minCompressRatio float64) bool {
ReleaseByteBuffer(b)
return float64(zn) < float64(n)*minCompressRatio
}
// normalizes compression level into [0..11], so it could be used as an index
// in *PoolMap.
func normalizeCompressLevel(level int) int {
// -2 is the lowest compression level - CompressHuffmanOnly
// 9 is the highest compression level - CompressBestCompression
if level < -2 || level > 9 {
level = CompressDefaultCompression
}
return level + 2
}

View File

@@ -0,0 +1,5 @@
-----BEGIN EC PRIVATE KEY-----
MHcCAQEEIBpQbZ6a5jL1Yh4wdP6yZk4MKjYWArD/QOLENFw8vbELoAoGCCqGSM49
AwEHoUQDQgAEKQCZWgE2IBhb47ot8MIs1D4KSisHYlZ41IWyeutpjb0fjwwIhimh
pl1Qld1/d2j3Z3vVyfa5yD+ncV7qCFZuSg==
-----END EC PRIVATE KEY-----

View File

@@ -0,0 +1,10 @@
-----BEGIN CERTIFICATE-----
MIIBbTCCAROgAwIBAgIQPo718S+K+G7hc1SgTEU4QDAKBggqhkjOPQQDAjASMRAw
DgYDVQQKEwdBY21lIENvMB4XDTE3MDQyMDIxMDExNFoXDTE4MDQyMDIxMDExNFow
EjEQMA4GA1UEChMHQWNtZSBDbzBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABCkA
mVoBNiAYW+O6LfDCLNQ+CkorB2JWeNSFsnrraY29H48MCIYpoaZdUJXdf3do92d7
1cn2ucg/p3Fe6ghWbkqjSzBJMA4GA1UdDwEB/wQEAwIFoDATBgNVHSUEDDAKBggr
BgEFBQcDATAMBgNVHRMBAf8EAjAAMBQGA1UdEQQNMAuCCWxvY2FsaG9zdDAKBggq
hkjOPQQDAgNIADBFAiEAoLAIQkvSuIcHUqyWroA6yWYw2fznlRH/uO9/hMCxUCEC
IClRYb/5O9eD/Eq/ozPnwNpsQHOeYefEhadJ/P82y0lG
-----END CERTIFICATE-----

View File

@@ -958,12 +958,7 @@ func (h *fsHandler) createDirIndex(base *URI, dirPath string, mustCompress bool)
if mustCompress {
var zbuf ByteBuffer
zw := acquireGzipWriter(&zbuf, CompressDefaultCompression)
_, err = zw.Write(w.B)
releaseGzipWriter(zw)
if err != nil {
return nil, fmt.Errorf("error when compressing automatically generated index for directory %q: %s", dirPath, err)
}
zbuf.B = AppendGzipBytesLevel(zbuf.B, w.B, CompressDefaultCompression)
w = &zbuf
}
@@ -1048,12 +1043,12 @@ func (h *fsHandler) compressFileNolock(f *os.File, fileInfo os.FileInfo, filePat
return nil, errNoCreatePermission
}
zw := acquireGzipWriter(zf, CompressDefaultCompression)
zw := acquireStacklessGzipWriter(zf, CompressDefaultCompression)
_, err = copyZeroAlloc(zw, f)
if err1 := zw.Flush(); err == nil {
err = err1
}
releaseGzipWriter(zw)
releaseStacklessGzipWriter(zw, CompressDefaultCompression)
zf.Close()
f.Close()
if err != nil {

View File

@@ -263,6 +263,12 @@ func (h *RequestHeader) SetContentLength(contentLength int) {
}
}
func (h *ResponseHeader) isCompressibleContentType() bool {
contentType := h.ContentType()
return bytes.HasPrefix(contentType, strTextSlash) ||
bytes.HasPrefix(contentType, strApplicationSlash)
}
// ContentType returns Content-Type header value.
func (h *ResponseHeader) ContentType() []byte {
contentType := h.contentType
@@ -2008,19 +2014,21 @@ func normalizeHeaderKey(b []byte, disableNormalizing bool) {
}
n := len(b)
up := true
for i := 0; i < n; i++ {
switch b[i] {
case '-':
up = true
default:
if up {
up = false
uppercaseByte(&b[i])
} else {
lowercaseByte(&b[i])
if n == 0 {
return
}
b[0] = toUpperTable[b[0]]
for i := 1; i < n; i++ {
p := &b[i]
if *p == '-' {
i++
if i < n {
b[i] = toUpperTable[b[i]]
}
continue
}
*p = toLowerTable[*p]
}
}

View File

@@ -1138,6 +1138,7 @@ func (resp *Response) WriteGzip(w *bufio.Writer) error {
// * CompressBestSpeed
// * CompressBestCompression
// * CompressDefaultCompression
// * CompressHuffmanOnly
//
// The method gzips response body and sets 'Content-Encoding: gzip'
// header before writing response to w.
@@ -1168,6 +1169,7 @@ func (resp *Response) WriteDeflate(w *bufio.Writer) error {
// * CompressBestSpeed
// * CompressBestCompression
// * CompressDefaultCompression
// * CompressHuffmanOnly
//
// The method deflates response body and sets 'Content-Encoding: deflate'
// header before writing response to w.
@@ -1187,30 +1189,42 @@ func (resp *Response) gzipBody(level int) error {
return nil
}
// Do not care about memory allocations here, since gzip is slow
// and allocates a lot of memory by itself.
if !resp.Header.isCompressibleContentType() {
// The content-type cannot be compressed.
return nil
}
if resp.bodyStream != nil {
// Reset Content-Length to -1, since it is impossible
// to determine body size beforehand of streamed compression.
// For https://github.com/valyala/fasthttp/issues/176 .
resp.Header.SetContentLength(-1)
// Do not care about memory allocations here, since gzip is slow
// and allocates a lot of memory by itself.
bs := resp.bodyStream
resp.bodyStream = NewStreamReader(func(sw *bufio.Writer) {
zw := acquireGzipWriter(sw, level)
zw := acquireStacklessGzipWriter(sw, level)
fw := &flushWriter{
wf: zw,
bw: sw,
}
copyZeroAlloc(fw, bs)
releaseGzipWriter(zw)
releaseStacklessGzipWriter(zw, level)
if bsc, ok := bs.(io.Closer); ok {
bsc.Close()
}
})
} else {
w := responseBodyPool.Get()
zw := acquireGzipWriter(w, level)
_, err := zw.Write(resp.bodyBytes())
releaseGzipWriter(zw)
if err != nil {
return err
bodyBytes := resp.bodyBytes()
if len(bodyBytes) < minCompressLen {
// There is no sense in spending CPU time on small body compression,
// since there is a very high probability that the compressed
// body size will be bigger than the original body size.
return nil
}
w := responseBodyPool.Get()
w.B = AppendGzipBytesLevel(w.B, bodyBytes, level)
// Hack: swap resp.body with w.
if resp.body != nil {
@@ -1229,30 +1243,42 @@ func (resp *Response) deflateBody(level int) error {
return nil
}
// Do not care about memory allocations here, since flate is slow
// and allocates a lot of memory by itself.
if !resp.Header.isCompressibleContentType() {
// The content-type cannot be compressed.
return nil
}
if resp.bodyStream != nil {
// Reset Content-Length to -1, since it is impossible
// to determine body size beforehand of streamed compression.
// For https://github.com/valyala/fasthttp/issues/176 .
resp.Header.SetContentLength(-1)
// Do not care about memory allocations here, since flate is slow
// and allocates a lot of memory by itself.
bs := resp.bodyStream
resp.bodyStream = NewStreamReader(func(sw *bufio.Writer) {
zw := acquireFlateWriter(sw, level)
zw := acquireStacklessDeflateWriter(sw, level)
fw := &flushWriter{
wf: zw,
bw: sw,
}
copyZeroAlloc(fw, bs)
releaseFlateWriter(zw)
releaseStacklessDeflateWriter(zw, level)
if bsc, ok := bs.(io.Closer); ok {
bsc.Close()
}
})
} else {
w := responseBodyPool.Get()
zw := acquireFlateWriter(w, level)
_, err := zw.Write(resp.bodyBytes())
releaseFlateWriter(zw)
if err != nil {
return err
bodyBytes := resp.bodyBytes()
if len(bodyBytes) < minCompressLen {
// There is no sense in spending CPU time on small body compression,
// since there is a very high probability that the compressed
// body size will be bigger than the original body size.
return nil
}
w := responseBodyPool.Get()
w.B = AppendDeflateBytesLevel(w.B, bodyBytes, level)
// Hack: swap resp.body with w.
if resp.body != nil {
@@ -1264,6 +1290,9 @@ func (resp *Response) deflateBody(level int) error {
return nil
}
// Bodies with sizes smaller than minCompressLen aren't compressed at all
const minCompressLen = 200
type writeFlusher interface {
io.Writer
Flush() error

View File

@@ -340,6 +340,7 @@ func CompressHandler(h RequestHandler) RequestHandler {
// * CompressBestSpeed
// * CompressBestCompression
// * CompressDefaultCompression
// * CompressHuffmanOnly
func CompressHandlerLevel(h RequestHandler, level int) RequestHandler {
return func(ctx *RequestCtx) {
h(ctx)

View File

@@ -68,4 +68,6 @@ var (
strMultipartFormData = []byte("multipart/form-data")
strBoundary = []byte("boundary")
strBytes = []byte("bytes")
strTextSlash = []byte("text/")
strApplicationSlash = []byte("application/")
)

View File

@@ -277,7 +277,7 @@ func (u *URI) parse(host, uri []byte, h *RequestHeader) {
func normalizePath(dst, src []byte) []byte {
dst = dst[:0]
dst = addLeadingSlash(dst, src)
dst = decodeArgAppend(dst, src, false)
dst = decodeArgAppendNoPlus(dst, src)
// remove duplicate slashes
b := dst