mirror of
https://github.com/jimeh/casecmp.git
synced 2026-02-19 02:16:40 +00:00
Switch from fasthttp to net/http to simplify things
This commit is contained in:
3
Makefile
3
Makefile
@@ -7,7 +7,8 @@ DOCKERREPO = jimeh/casecmp
|
|||||||
BINDIR = $(shell dirname ${BINARY})
|
BINDIR = $(shell dirname ${BINARY})
|
||||||
SOURCES = $(shell find . -name '*.go' -o -name 'VERSION')
|
SOURCES = $(shell find . -name '*.go' -o -name 'VERSION')
|
||||||
VERSION = $(shell cat VERSION)
|
VERSION = $(shell cat VERSION)
|
||||||
OSARCH = "darwin/386 darwin/amd64 linux/386 linux/amd64 linux/arm"
|
OSARCH = "darwin/386 darwin/amd64 linux/386 linux/amd64 linux/arm " \
|
||||||
|
"windows/386 windows/amd64"
|
||||||
RELEASEDIR = releases
|
RELEASEDIR = releases
|
||||||
|
|
||||||
$(BINARY): $(SOURCES)
|
$(BINARY): $(SOURCES)
|
||||||
|
|||||||
63
main.go
63
main.go
@@ -2,19 +2,25 @@ package main
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"io"
|
||||||
"log"
|
"log"
|
||||||
|
"net/http"
|
||||||
"os"
|
"os"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/qiangxue/fasthttp-routing"
|
|
||||||
"github.com/valyala/fasthttp"
|
|
||||||
"gopkg.in/alecthomas/kingpin.v2"
|
"gopkg.in/alecthomas/kingpin.v2"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// Name of application.
|
||||||
|
var Name = "casecmp"
|
||||||
|
|
||||||
// Version gets populated with version at build-time.
|
// Version gets populated with version at build-time.
|
||||||
var Version string
|
var Version string
|
||||||
var defaultPort = "8080"
|
|
||||||
|
|
||||||
|
// DefaultPort that service runs on.
|
||||||
|
var DefaultPort = "8080"
|
||||||
|
|
||||||
|
// Argument parsing setup.
|
||||||
var (
|
var (
|
||||||
port = kingpin.Flag("port", "Port to listen to.").Short('p').
|
port = kingpin.Flag("port", "Port to listen to.").Short('p').
|
||||||
Default("").String()
|
Default("").String()
|
||||||
@@ -24,53 +30,62 @@ var (
|
|||||||
Short('v').Bool()
|
Short('v').Bool()
|
||||||
)
|
)
|
||||||
|
|
||||||
func indexHandler(c *routing.Context) error {
|
func indexHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
c.Write([]byte(
|
resp := Name + " " + Version + "\n" +
|
||||||
|
"\n" +
|
||||||
"Case-insensitive string comparison, as an API. Because ¯\\_(ツ)_/¯\n" +
|
"Case-insensitive string comparison, as an API. Because ¯\\_(ツ)_/¯\n" +
|
||||||
"\n" +
|
"\n" +
|
||||||
"Example:\n" +
|
"Example:\n" +
|
||||||
"curl -X POST -F \"a=Foo Bar\" -F \"b=FOO BAR\" " +
|
"curl -X POST -F \"a=Foo Bar\" -F \"b=FOO BAR\" " +
|
||||||
"http://" + string(c.Host()) + "/",
|
"http://" + r.Host + "/\n" +
|
||||||
))
|
"curl -X POST http://" + r.Host + "/?a=Foo%%20Bar&b=FOO%%20BAR"
|
||||||
return nil
|
|
||||||
|
io.WriteString(w, resp)
|
||||||
}
|
}
|
||||||
|
|
||||||
func casecmpHandler(c *routing.Context) error {
|
func casecmpHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
a := c.FormValue("a")
|
a := r.FormValue("a")
|
||||||
b := c.FormValue("b")
|
b := r.FormValue("b")
|
||||||
|
|
||||||
resp := "0"
|
resp := "0"
|
||||||
if strings.EqualFold(string(a), string(b)) {
|
if strings.EqualFold(string(a), string(b)) {
|
||||||
resp = "1"
|
resp = "1"
|
||||||
}
|
}
|
||||||
|
fmt.Fprintf(w, resp)
|
||||||
|
}
|
||||||
|
|
||||||
c.Write([]byte(resp))
|
func rootHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
return nil
|
if r.URL.Path != "/" {
|
||||||
|
http.NotFound(w, r)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if r.Method == "GET" {
|
||||||
|
indexHandler(w, r)
|
||||||
|
} else {
|
||||||
|
casecmpHandler(w, r)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func printVersion() {
|
func printVersion() {
|
||||||
fmt.Println("casecmp " + Version)
|
fmt.Println(Name + " " + Version)
|
||||||
}
|
}
|
||||||
|
|
||||||
func startServer() {
|
func startServer() {
|
||||||
r := routing.New()
|
http.HandleFunc("/", rootHandler)
|
||||||
r.Get("/", indexHandler)
|
|
||||||
r.Post("/", casecmpHandler)
|
|
||||||
|
|
||||||
server := fasthttp.Server{Handler: r.HandleRequest}
|
|
||||||
|
|
||||||
if *port == "" {
|
if *port == "" {
|
||||||
envPort := os.Getenv("PORT")
|
envPort := os.Getenv("PORT")
|
||||||
if envPort != "" {
|
if envPort != "" {
|
||||||
*port = envPort
|
*port = envPort
|
||||||
} else {
|
} else {
|
||||||
*port = defaultPort
|
*port = DefaultPort
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
address := *bind + ":" + *port
|
address := *bind + ":" + *port
|
||||||
fmt.Println("Listening on " + address)
|
fmt.Println("Listening on " + address)
|
||||||
log.Fatal(server.ListenAndServe(address))
|
log.Fatal(http.ListenAndServe(address, nil))
|
||||||
}
|
}
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
|
|||||||
27
vendor/github.com/klauspost/compress/LICENSE
generated
vendored
27
vendor/github.com/klauspost/compress/LICENSE
generated
vendored
@@ -1,27 +0,0 @@
|
|||||||
Copyright (c) 2012 The Go Authors. All rights reserved.
|
|
||||||
|
|
||||||
Redistribution and use in source and binary forms, with or without
|
|
||||||
modification, are permitted provided that the following conditions are
|
|
||||||
met:
|
|
||||||
|
|
||||||
* Redistributions of source code must retain the above copyright
|
|
||||||
notice, this list of conditions and the following disclaimer.
|
|
||||||
* Redistributions in binary form must reproduce the above
|
|
||||||
copyright notice, this list of conditions and the following disclaimer
|
|
||||||
in the documentation and/or other materials provided with the
|
|
||||||
distribution.
|
|
||||||
* Neither the name of Google Inc. nor the names of its
|
|
||||||
contributors may be used to endorse or promote products derived from
|
|
||||||
this software without specific prior written permission.
|
|
||||||
|
|
||||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
||||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
||||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|
||||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|
||||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
||||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|
||||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
||||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
||||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
||||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
||||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
||||||
32
vendor/github.com/klauspost/compress/flate/copy.go
generated
vendored
32
vendor/github.com/klauspost/compress/flate/copy.go
generated
vendored
@@ -1,32 +0,0 @@
|
|||||||
// Copyright 2012 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package flate
|
|
||||||
|
|
||||||
// forwardCopy is like the built-in copy function except that it always goes
|
|
||||||
// forward from the start, even if the dst and src overlap.
|
|
||||||
// It is equivalent to:
|
|
||||||
// for i := 0; i < n; i++ {
|
|
||||||
// mem[dst+i] = mem[src+i]
|
|
||||||
// }
|
|
||||||
func forwardCopy(mem []byte, dst, src, n int) {
|
|
||||||
if dst <= src {
|
|
||||||
copy(mem[dst:dst+n], mem[src:src+n])
|
|
||||||
return
|
|
||||||
}
|
|
||||||
for {
|
|
||||||
if dst >= src+n {
|
|
||||||
copy(mem[dst:dst+n], mem[src:src+n])
|
|
||||||
return
|
|
||||||
}
|
|
||||||
// There is some forward overlap. The destination
|
|
||||||
// will be filled with a repeated pattern of mem[src:src+k].
|
|
||||||
// We copy one instance of the pattern here, then repeat.
|
|
||||||
// Each time around this loop k will double.
|
|
||||||
k := dst - src
|
|
||||||
copy(mem[dst:dst+k], mem[src:src+k])
|
|
||||||
n -= k
|
|
||||||
dst += k
|
|
||||||
}
|
|
||||||
}
|
|
||||||
41
vendor/github.com/klauspost/compress/flate/crc32_amd64.go
generated
vendored
41
vendor/github.com/klauspost/compress/flate/crc32_amd64.go
generated
vendored
@@ -1,41 +0,0 @@
|
|||||||
//+build !noasm
|
|
||||||
//+build !appengine
|
|
||||||
|
|
||||||
// Copyright 2015, Klaus Post, see LICENSE for details.
|
|
||||||
|
|
||||||
package flate
|
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/klauspost/cpuid"
|
|
||||||
)
|
|
||||||
|
|
||||||
// crc32sse returns a hash for the first 4 bytes of the slice
|
|
||||||
// len(a) must be >= 4.
|
|
||||||
//go:noescape
|
|
||||||
func crc32sse(a []byte) uint32
|
|
||||||
|
|
||||||
// crc32sseAll calculates hashes for each 4-byte set in a.
|
|
||||||
// dst must be east len(a) - 4 in size.
|
|
||||||
// The size is not checked by the assembly.
|
|
||||||
//go:noescape
|
|
||||||
func crc32sseAll(a []byte, dst []uint32)
|
|
||||||
|
|
||||||
// matchLenSSE4 returns the number of matching bytes in a and b
|
|
||||||
// up to length 'max'. Both slices must be at least 'max'
|
|
||||||
// bytes in size.
|
|
||||||
//
|
|
||||||
// TODO: drop the "SSE4" name, since it doesn't use any SSE instructions.
|
|
||||||
//
|
|
||||||
//go:noescape
|
|
||||||
func matchLenSSE4(a, b []byte, max int) int
|
|
||||||
|
|
||||||
// histogram accumulates a histogram of b in h.
|
|
||||||
// h must be at least 256 entries in length,
|
|
||||||
// and must be cleared before calling this function.
|
|
||||||
//go:noescape
|
|
||||||
func histogram(b []byte, h []int32)
|
|
||||||
|
|
||||||
// Detect SSE 4.2 feature.
|
|
||||||
func init() {
|
|
||||||
useSSE42 = cpuid.CPU.SSE42()
|
|
||||||
}
|
|
||||||
213
vendor/github.com/klauspost/compress/flate/crc32_amd64.s
generated
vendored
213
vendor/github.com/klauspost/compress/flate/crc32_amd64.s
generated
vendored
@@ -1,213 +0,0 @@
|
|||||||
//+build !noasm
|
|
||||||
//+build !appengine
|
|
||||||
|
|
||||||
// Copyright 2015, Klaus Post, see LICENSE for details.
|
|
||||||
|
|
||||||
// func crc32sse(a []byte) uint32
|
|
||||||
TEXT ·crc32sse(SB), 4, $0
|
|
||||||
MOVQ a+0(FP), R10
|
|
||||||
XORQ BX, BX
|
|
||||||
|
|
||||||
// CRC32 dword (R10), EBX
|
|
||||||
BYTE $0xF2; BYTE $0x41; BYTE $0x0f
|
|
||||||
BYTE $0x38; BYTE $0xf1; BYTE $0x1a
|
|
||||||
|
|
||||||
MOVL BX, ret+24(FP)
|
|
||||||
RET
|
|
||||||
|
|
||||||
// func crc32sseAll(a []byte, dst []uint32)
|
|
||||||
TEXT ·crc32sseAll(SB), 4, $0
|
|
||||||
MOVQ a+0(FP), R8 // R8: src
|
|
||||||
MOVQ a_len+8(FP), R10 // input length
|
|
||||||
MOVQ dst+24(FP), R9 // R9: dst
|
|
||||||
SUBQ $4, R10
|
|
||||||
JS end
|
|
||||||
JZ one_crc
|
|
||||||
MOVQ R10, R13
|
|
||||||
SHRQ $2, R10 // len/4
|
|
||||||
ANDQ $3, R13 // len&3
|
|
||||||
XORQ BX, BX
|
|
||||||
ADDQ $1, R13
|
|
||||||
TESTQ R10, R10
|
|
||||||
JZ rem_loop
|
|
||||||
|
|
||||||
crc_loop:
|
|
||||||
MOVQ (R8), R11
|
|
||||||
XORQ BX, BX
|
|
||||||
XORQ DX, DX
|
|
||||||
XORQ DI, DI
|
|
||||||
MOVQ R11, R12
|
|
||||||
SHRQ $8, R11
|
|
||||||
MOVQ R12, AX
|
|
||||||
MOVQ R11, CX
|
|
||||||
SHRQ $16, R12
|
|
||||||
SHRQ $16, R11
|
|
||||||
MOVQ R12, SI
|
|
||||||
|
|
||||||
// CRC32 EAX, EBX
|
|
||||||
BYTE $0xF2; BYTE $0x0f
|
|
||||||
BYTE $0x38; BYTE $0xf1; BYTE $0xd8
|
|
||||||
|
|
||||||
// CRC32 ECX, EDX
|
|
||||||
BYTE $0xF2; BYTE $0x0f
|
|
||||||
BYTE $0x38; BYTE $0xf1; BYTE $0xd1
|
|
||||||
|
|
||||||
// CRC32 ESI, EDI
|
|
||||||
BYTE $0xF2; BYTE $0x0f
|
|
||||||
BYTE $0x38; BYTE $0xf1; BYTE $0xfe
|
|
||||||
MOVL BX, (R9)
|
|
||||||
MOVL DX, 4(R9)
|
|
||||||
MOVL DI, 8(R9)
|
|
||||||
|
|
||||||
XORQ BX, BX
|
|
||||||
MOVL R11, AX
|
|
||||||
|
|
||||||
// CRC32 EAX, EBX
|
|
||||||
BYTE $0xF2; BYTE $0x0f
|
|
||||||
BYTE $0x38; BYTE $0xf1; BYTE $0xd8
|
|
||||||
MOVL BX, 12(R9)
|
|
||||||
|
|
||||||
ADDQ $16, R9
|
|
||||||
ADDQ $4, R8
|
|
||||||
XORQ BX, BX
|
|
||||||
SUBQ $1, R10
|
|
||||||
JNZ crc_loop
|
|
||||||
|
|
||||||
rem_loop:
|
|
||||||
MOVL (R8), AX
|
|
||||||
|
|
||||||
// CRC32 EAX, EBX
|
|
||||||
BYTE $0xF2; BYTE $0x0f
|
|
||||||
BYTE $0x38; BYTE $0xf1; BYTE $0xd8
|
|
||||||
|
|
||||||
MOVL BX, (R9)
|
|
||||||
ADDQ $4, R9
|
|
||||||
ADDQ $1, R8
|
|
||||||
XORQ BX, BX
|
|
||||||
SUBQ $1, R13
|
|
||||||
JNZ rem_loop
|
|
||||||
|
|
||||||
end:
|
|
||||||
RET
|
|
||||||
|
|
||||||
one_crc:
|
|
||||||
MOVQ $1, R13
|
|
||||||
XORQ BX, BX
|
|
||||||
JMP rem_loop
|
|
||||||
|
|
||||||
// func matchLenSSE4(a, b []byte, max int) int
|
|
||||||
TEXT ·matchLenSSE4(SB), 4, $0
|
|
||||||
MOVQ a_base+0(FP), SI
|
|
||||||
MOVQ b_base+24(FP), DI
|
|
||||||
MOVQ DI, DX
|
|
||||||
MOVQ max+48(FP), CX
|
|
||||||
|
|
||||||
cmp8:
|
|
||||||
// As long as we are 8 or more bytes before the end of max, we can load and
|
|
||||||
// compare 8 bytes at a time. If those 8 bytes are equal, repeat.
|
|
||||||
CMPQ CX, $8
|
|
||||||
JLT cmp1
|
|
||||||
MOVQ (SI), AX
|
|
||||||
MOVQ (DI), BX
|
|
||||||
CMPQ AX, BX
|
|
||||||
JNE bsf
|
|
||||||
ADDQ $8, SI
|
|
||||||
ADDQ $8, DI
|
|
||||||
SUBQ $8, CX
|
|
||||||
JMP cmp8
|
|
||||||
|
|
||||||
bsf:
|
|
||||||
// If those 8 bytes were not equal, XOR the two 8 byte values, and return
|
|
||||||
// the index of the first byte that differs. The BSF instruction finds the
|
|
||||||
// least significant 1 bit, the amd64 architecture is little-endian, and
|
|
||||||
// the shift by 3 converts a bit index to a byte index.
|
|
||||||
XORQ AX, BX
|
|
||||||
BSFQ BX, BX
|
|
||||||
SHRQ $3, BX
|
|
||||||
ADDQ BX, DI
|
|
||||||
|
|
||||||
// Subtract off &b[0] to convert from &b[ret] to ret, and return.
|
|
||||||
SUBQ DX, DI
|
|
||||||
MOVQ DI, ret+56(FP)
|
|
||||||
RET
|
|
||||||
|
|
||||||
cmp1:
|
|
||||||
// In the slices' tail, compare 1 byte at a time.
|
|
||||||
CMPQ CX, $0
|
|
||||||
JEQ matchLenEnd
|
|
||||||
MOVB (SI), AX
|
|
||||||
MOVB (DI), BX
|
|
||||||
CMPB AX, BX
|
|
||||||
JNE matchLenEnd
|
|
||||||
ADDQ $1, SI
|
|
||||||
ADDQ $1, DI
|
|
||||||
SUBQ $1, CX
|
|
||||||
JMP cmp1
|
|
||||||
|
|
||||||
matchLenEnd:
|
|
||||||
// Subtract off &b[0] to convert from &b[ret] to ret, and return.
|
|
||||||
SUBQ DX, DI
|
|
||||||
MOVQ DI, ret+56(FP)
|
|
||||||
RET
|
|
||||||
|
|
||||||
// func histogram(b []byte, h []int32)
|
|
||||||
TEXT ·histogram(SB), 4, $0
|
|
||||||
MOVQ b+0(FP), SI // SI: &b
|
|
||||||
MOVQ b_len+8(FP), R9 // R9: len(b)
|
|
||||||
MOVQ h+24(FP), DI // DI: Histogram
|
|
||||||
MOVQ R9, R8
|
|
||||||
SHRQ $3, R8
|
|
||||||
JZ hist1
|
|
||||||
XORQ R11, R11
|
|
||||||
|
|
||||||
loop_hist8:
|
|
||||||
MOVQ (SI), R10
|
|
||||||
|
|
||||||
MOVB R10, R11
|
|
||||||
INCL (DI)(R11*4)
|
|
||||||
SHRQ $8, R10
|
|
||||||
|
|
||||||
MOVB R10, R11
|
|
||||||
INCL (DI)(R11*4)
|
|
||||||
SHRQ $8, R10
|
|
||||||
|
|
||||||
MOVB R10, R11
|
|
||||||
INCL (DI)(R11*4)
|
|
||||||
SHRQ $8, R10
|
|
||||||
|
|
||||||
MOVB R10, R11
|
|
||||||
INCL (DI)(R11*4)
|
|
||||||
SHRQ $8, R10
|
|
||||||
|
|
||||||
MOVB R10, R11
|
|
||||||
INCL (DI)(R11*4)
|
|
||||||
SHRQ $8, R10
|
|
||||||
|
|
||||||
MOVB R10, R11
|
|
||||||
INCL (DI)(R11*4)
|
|
||||||
SHRQ $8, R10
|
|
||||||
|
|
||||||
MOVB R10, R11
|
|
||||||
INCL (DI)(R11*4)
|
|
||||||
SHRQ $8, R10
|
|
||||||
|
|
||||||
INCL (DI)(R10*4)
|
|
||||||
|
|
||||||
ADDQ $8, SI
|
|
||||||
DECQ R8
|
|
||||||
JNZ loop_hist8
|
|
||||||
|
|
||||||
hist1:
|
|
||||||
ANDQ $7, R9
|
|
||||||
JZ end_hist
|
|
||||||
XORQ R10, R10
|
|
||||||
|
|
||||||
loop_hist1:
|
|
||||||
MOVB (SI), R10
|
|
||||||
INCL (DI)(R10*4)
|
|
||||||
INCQ SI
|
|
||||||
DECQ R9
|
|
||||||
JNZ loop_hist1
|
|
||||||
|
|
||||||
end_hist:
|
|
||||||
RET
|
|
||||||
35
vendor/github.com/klauspost/compress/flate/crc32_noasm.go
generated
vendored
35
vendor/github.com/klauspost/compress/flate/crc32_noasm.go
generated
vendored
@@ -1,35 +0,0 @@
|
|||||||
//+build !amd64 noasm appengine
|
|
||||||
|
|
||||||
// Copyright 2015, Klaus Post, see LICENSE for details.
|
|
||||||
|
|
||||||
package flate
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
useSSE42 = false
|
|
||||||
}
|
|
||||||
|
|
||||||
// crc32sse should never be called.
|
|
||||||
func crc32sse(a []byte) uint32 {
|
|
||||||
panic("no assembler")
|
|
||||||
}
|
|
||||||
|
|
||||||
// crc32sseAll should never be called.
|
|
||||||
func crc32sseAll(a []byte, dst []uint32) {
|
|
||||||
panic("no assembler")
|
|
||||||
}
|
|
||||||
|
|
||||||
// matchLenSSE4 should never be called.
|
|
||||||
func matchLenSSE4(a, b []byte, max int) int {
|
|
||||||
panic("no assembler")
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
// histogram accumulates a histogram of b in h.
|
|
||||||
//
|
|
||||||
// len(h) must be >= 256, and h's elements must be all zeroes.
|
|
||||||
func histogram(b []byte, h []int32) {
|
|
||||||
h = h[:256]
|
|
||||||
for _, t := range b {
|
|
||||||
h[t]++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
1353
vendor/github.com/klauspost/compress/flate/deflate.go
generated
vendored
1353
vendor/github.com/klauspost/compress/flate/deflate.go
generated
vendored
File diff suppressed because it is too large
Load Diff
184
vendor/github.com/klauspost/compress/flate/dict_decoder.go
generated
vendored
184
vendor/github.com/klauspost/compress/flate/dict_decoder.go
generated
vendored
@@ -1,184 +0,0 @@
|
|||||||
// Copyright 2016 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package flate
|
|
||||||
|
|
||||||
// dictDecoder implements the LZ77 sliding dictionary as used in decompression.
|
|
||||||
// LZ77 decompresses data through sequences of two forms of commands:
|
|
||||||
//
|
|
||||||
// * Literal insertions: Runs of one or more symbols are inserted into the data
|
|
||||||
// stream as is. This is accomplished through the writeByte method for a
|
|
||||||
// single symbol, or combinations of writeSlice/writeMark for multiple symbols.
|
|
||||||
// Any valid stream must start with a literal insertion if no preset dictionary
|
|
||||||
// is used.
|
|
||||||
//
|
|
||||||
// * Backward copies: Runs of one or more symbols are copied from previously
|
|
||||||
// emitted data. Backward copies come as the tuple (dist, length) where dist
|
|
||||||
// determines how far back in the stream to copy from and length determines how
|
|
||||||
// many bytes to copy. Note that it is valid for the length to be greater than
|
|
||||||
// the distance. Since LZ77 uses forward copies, that situation is used to
|
|
||||||
// perform a form of run-length encoding on repeated runs of symbols.
|
|
||||||
// The writeCopy and tryWriteCopy are used to implement this command.
|
|
||||||
//
|
|
||||||
// For performance reasons, this implementation performs little to no sanity
|
|
||||||
// checks about the arguments. As such, the invariants documented for each
|
|
||||||
// method call must be respected.
|
|
||||||
type dictDecoder struct {
|
|
||||||
hist []byte // Sliding window history
|
|
||||||
|
|
||||||
// Invariant: 0 <= rdPos <= wrPos <= len(hist)
|
|
||||||
wrPos int // Current output position in buffer
|
|
||||||
rdPos int // Have emitted hist[:rdPos] already
|
|
||||||
full bool // Has a full window length been written yet?
|
|
||||||
}
|
|
||||||
|
|
||||||
// init initializes dictDecoder to have a sliding window dictionary of the given
|
|
||||||
// size. If a preset dict is provided, it will initialize the dictionary with
|
|
||||||
// the contents of dict.
|
|
||||||
func (dd *dictDecoder) init(size int, dict []byte) {
|
|
||||||
*dd = dictDecoder{hist: dd.hist}
|
|
||||||
|
|
||||||
if cap(dd.hist) < size {
|
|
||||||
dd.hist = make([]byte, size)
|
|
||||||
}
|
|
||||||
dd.hist = dd.hist[:size]
|
|
||||||
|
|
||||||
if len(dict) > len(dd.hist) {
|
|
||||||
dict = dict[len(dict)-len(dd.hist):]
|
|
||||||
}
|
|
||||||
dd.wrPos = copy(dd.hist, dict)
|
|
||||||
if dd.wrPos == len(dd.hist) {
|
|
||||||
dd.wrPos = 0
|
|
||||||
dd.full = true
|
|
||||||
}
|
|
||||||
dd.rdPos = dd.wrPos
|
|
||||||
}
|
|
||||||
|
|
||||||
// histSize reports the total amount of historical data in the dictionary.
|
|
||||||
func (dd *dictDecoder) histSize() int {
|
|
||||||
if dd.full {
|
|
||||||
return len(dd.hist)
|
|
||||||
}
|
|
||||||
return dd.wrPos
|
|
||||||
}
|
|
||||||
|
|
||||||
// availRead reports the number of bytes that can be flushed by readFlush.
|
|
||||||
func (dd *dictDecoder) availRead() int {
|
|
||||||
return dd.wrPos - dd.rdPos
|
|
||||||
}
|
|
||||||
|
|
||||||
// availWrite reports the available amount of output buffer space.
|
|
||||||
func (dd *dictDecoder) availWrite() int {
|
|
||||||
return len(dd.hist) - dd.wrPos
|
|
||||||
}
|
|
||||||
|
|
||||||
// writeSlice returns a slice of the available buffer to write data to.
|
|
||||||
//
|
|
||||||
// This invariant will be kept: len(s) <= availWrite()
|
|
||||||
func (dd *dictDecoder) writeSlice() []byte {
|
|
||||||
return dd.hist[dd.wrPos:]
|
|
||||||
}
|
|
||||||
|
|
||||||
// writeMark advances the writer pointer by cnt.
|
|
||||||
//
|
|
||||||
// This invariant must be kept: 0 <= cnt <= availWrite()
|
|
||||||
func (dd *dictDecoder) writeMark(cnt int) {
|
|
||||||
dd.wrPos += cnt
|
|
||||||
}
|
|
||||||
|
|
||||||
// writeByte writes a single byte to the dictionary.
|
|
||||||
//
|
|
||||||
// This invariant must be kept: 0 < availWrite()
|
|
||||||
func (dd *dictDecoder) writeByte(c byte) {
|
|
||||||
dd.hist[dd.wrPos] = c
|
|
||||||
dd.wrPos++
|
|
||||||
}
|
|
||||||
|
|
||||||
// writeCopy copies a string at a given (dist, length) to the output.
|
|
||||||
// This returns the number of bytes copied and may be less than the requested
|
|
||||||
// length if the available space in the output buffer is too small.
|
|
||||||
//
|
|
||||||
// This invariant must be kept: 0 < dist <= histSize()
|
|
||||||
func (dd *dictDecoder) writeCopy(dist, length int) int {
|
|
||||||
dstBase := dd.wrPos
|
|
||||||
dstPos := dstBase
|
|
||||||
srcPos := dstPos - dist
|
|
||||||
endPos := dstPos + length
|
|
||||||
if endPos > len(dd.hist) {
|
|
||||||
endPos = len(dd.hist)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Copy non-overlapping section after destination position.
|
|
||||||
//
|
|
||||||
// This section is non-overlapping in that the copy length for this section
|
|
||||||
// is always less than or equal to the backwards distance. This can occur
|
|
||||||
// if a distance refers to data that wraps-around in the buffer.
|
|
||||||
// Thus, a backwards copy is performed here; that is, the exact bytes in
|
|
||||||
// the source prior to the copy is placed in the destination.
|
|
||||||
if srcPos < 0 {
|
|
||||||
srcPos += len(dd.hist)
|
|
||||||
dstPos += copy(dd.hist[dstPos:endPos], dd.hist[srcPos:])
|
|
||||||
srcPos = 0
|
|
||||||
}
|
|
||||||
|
|
||||||
// Copy possibly overlapping section before destination position.
|
|
||||||
//
|
|
||||||
// This section can overlap if the copy length for this section is larger
|
|
||||||
// than the backwards distance. This is allowed by LZ77 so that repeated
|
|
||||||
// strings can be succinctly represented using (dist, length) pairs.
|
|
||||||
// Thus, a forwards copy is performed here; that is, the bytes copied is
|
|
||||||
// possibly dependent on the resulting bytes in the destination as the copy
|
|
||||||
// progresses along. This is functionally equivalent to the following:
|
|
||||||
//
|
|
||||||
// for i := 0; i < endPos-dstPos; i++ {
|
|
||||||
// dd.hist[dstPos+i] = dd.hist[srcPos+i]
|
|
||||||
// }
|
|
||||||
// dstPos = endPos
|
|
||||||
//
|
|
||||||
for dstPos < endPos {
|
|
||||||
dstPos += copy(dd.hist[dstPos:endPos], dd.hist[srcPos:dstPos])
|
|
||||||
}
|
|
||||||
|
|
||||||
dd.wrPos = dstPos
|
|
||||||
return dstPos - dstBase
|
|
||||||
}
|
|
||||||
|
|
||||||
// tryWriteCopy tries to copy a string at a given (distance, length) to the
|
|
||||||
// output. This specialized version is optimized for short distances.
|
|
||||||
//
|
|
||||||
// This method is designed to be inlined for performance reasons.
|
|
||||||
//
|
|
||||||
// This invariant must be kept: 0 < dist <= histSize()
|
|
||||||
func (dd *dictDecoder) tryWriteCopy(dist, length int) int {
|
|
||||||
dstPos := dd.wrPos
|
|
||||||
endPos := dstPos + length
|
|
||||||
if dstPos < dist || endPos > len(dd.hist) {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
dstBase := dstPos
|
|
||||||
srcPos := dstPos - dist
|
|
||||||
|
|
||||||
// Copy possibly overlapping section before destination position.
|
|
||||||
loop:
|
|
||||||
dstPos += copy(dd.hist[dstPos:endPos], dd.hist[srcPos:dstPos])
|
|
||||||
if dstPos < endPos {
|
|
||||||
goto loop // Avoid for-loop so that this function can be inlined
|
|
||||||
}
|
|
||||||
|
|
||||||
dd.wrPos = dstPos
|
|
||||||
return dstPos - dstBase
|
|
||||||
}
|
|
||||||
|
|
||||||
// readFlush returns a slice of the historical buffer that is ready to be
|
|
||||||
// emitted to the user. The data returned by readFlush must be fully consumed
|
|
||||||
// before calling any other dictDecoder methods.
|
|
||||||
func (dd *dictDecoder) readFlush() []byte {
|
|
||||||
toRead := dd.hist[dd.rdPos:dd.wrPos]
|
|
||||||
dd.rdPos = dd.wrPos
|
|
||||||
if dd.wrPos == len(dd.hist) {
|
|
||||||
dd.wrPos, dd.rdPos = 0, 0
|
|
||||||
dd.full = true
|
|
||||||
}
|
|
||||||
return toRead
|
|
||||||
}
|
|
||||||
701
vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go
generated
vendored
701
vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go
generated
vendored
@@ -1,701 +0,0 @@
|
|||||||
// Copyright 2009 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package flate
|
|
||||||
|
|
||||||
import (
|
|
||||||
"io"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
// The largest offset code.
|
|
||||||
offsetCodeCount = 30
|
|
||||||
|
|
||||||
// The special code used to mark the end of a block.
|
|
||||||
endBlockMarker = 256
|
|
||||||
|
|
||||||
// The first length code.
|
|
||||||
lengthCodesStart = 257
|
|
||||||
|
|
||||||
// The number of codegen codes.
|
|
||||||
codegenCodeCount = 19
|
|
||||||
badCode = 255
|
|
||||||
|
|
||||||
// bufferFlushSize indicates the buffer size
|
|
||||||
// after which bytes are flushed to the writer.
|
|
||||||
// Should preferably be a multiple of 6, since
|
|
||||||
// we accumulate 6 bytes between writes to the buffer.
|
|
||||||
bufferFlushSize = 240
|
|
||||||
|
|
||||||
// bufferSize is the actual output byte buffer size.
|
|
||||||
// It must have additional headroom for a flush
|
|
||||||
// which can contain up to 8 bytes.
|
|
||||||
bufferSize = bufferFlushSize + 8
|
|
||||||
)
|
|
||||||
|
|
||||||
// The number of extra bits needed by length code X - LENGTH_CODES_START.
|
|
||||||
var lengthExtraBits = []int8{
|
|
||||||
/* 257 */ 0, 0, 0,
|
|
||||||
/* 260 */ 0, 0, 0, 0, 0, 1, 1, 1, 1, 2,
|
|
||||||
/* 270 */ 2, 2, 2, 3, 3, 3, 3, 4, 4, 4,
|
|
||||||
/* 280 */ 4, 5, 5, 5, 5, 0,
|
|
||||||
}
|
|
||||||
|
|
||||||
// The length indicated by length code X - LENGTH_CODES_START.
|
|
||||||
var lengthBase = []uint32{
|
|
||||||
0, 1, 2, 3, 4, 5, 6, 7, 8, 10,
|
|
||||||
12, 14, 16, 20, 24, 28, 32, 40, 48, 56,
|
|
||||||
64, 80, 96, 112, 128, 160, 192, 224, 255,
|
|
||||||
}
|
|
||||||
|
|
||||||
// offset code word extra bits.
|
|
||||||
var offsetExtraBits = []int8{
|
|
||||||
0, 0, 0, 0, 1, 1, 2, 2, 3, 3,
|
|
||||||
4, 4, 5, 5, 6, 6, 7, 7, 8, 8,
|
|
||||||
9, 9, 10, 10, 11, 11, 12, 12, 13, 13,
|
|
||||||
/* extended window */
|
|
||||||
14, 14, 15, 15, 16, 16, 17, 17, 18, 18, 19, 19, 20, 20,
|
|
||||||
}
|
|
||||||
|
|
||||||
var offsetBase = []uint32{
|
|
||||||
/* normal deflate */
|
|
||||||
0x000000, 0x000001, 0x000002, 0x000003, 0x000004,
|
|
||||||
0x000006, 0x000008, 0x00000c, 0x000010, 0x000018,
|
|
||||||
0x000020, 0x000030, 0x000040, 0x000060, 0x000080,
|
|
||||||
0x0000c0, 0x000100, 0x000180, 0x000200, 0x000300,
|
|
||||||
0x000400, 0x000600, 0x000800, 0x000c00, 0x001000,
|
|
||||||
0x001800, 0x002000, 0x003000, 0x004000, 0x006000,
|
|
||||||
|
|
||||||
/* extended window */
|
|
||||||
0x008000, 0x00c000, 0x010000, 0x018000, 0x020000,
|
|
||||||
0x030000, 0x040000, 0x060000, 0x080000, 0x0c0000,
|
|
||||||
0x100000, 0x180000, 0x200000, 0x300000,
|
|
||||||
}
|
|
||||||
|
|
||||||
// The odd order in which the codegen code sizes are written.
|
|
||||||
var codegenOrder = []uint32{16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15}
|
|
||||||
|
|
||||||
type huffmanBitWriter struct {
|
|
||||||
// writer is the underlying writer.
|
|
||||||
// Do not use it directly; use the write method, which ensures
|
|
||||||
// that Write errors are sticky.
|
|
||||||
writer io.Writer
|
|
||||||
|
|
||||||
// Data waiting to be written is bytes[0:nbytes]
|
|
||||||
// and then the low nbits of bits.
|
|
||||||
bits uint64
|
|
||||||
nbits uint
|
|
||||||
bytes [bufferSize]byte
|
|
||||||
codegenFreq [codegenCodeCount]int32
|
|
||||||
nbytes int
|
|
||||||
literalFreq []int32
|
|
||||||
offsetFreq []int32
|
|
||||||
codegen []uint8
|
|
||||||
literalEncoding *huffmanEncoder
|
|
||||||
offsetEncoding *huffmanEncoder
|
|
||||||
codegenEncoding *huffmanEncoder
|
|
||||||
err error
|
|
||||||
}
|
|
||||||
|
|
||||||
func newHuffmanBitWriter(w io.Writer) *huffmanBitWriter {
|
|
||||||
return &huffmanBitWriter{
|
|
||||||
writer: w,
|
|
||||||
literalFreq: make([]int32, maxNumLit),
|
|
||||||
offsetFreq: make([]int32, offsetCodeCount),
|
|
||||||
codegen: make([]uint8, maxNumLit+offsetCodeCount+1),
|
|
||||||
literalEncoding: newHuffmanEncoder(maxNumLit),
|
|
||||||
codegenEncoding: newHuffmanEncoder(codegenCodeCount),
|
|
||||||
offsetEncoding: newHuffmanEncoder(offsetCodeCount),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *huffmanBitWriter) reset(writer io.Writer) {
|
|
||||||
w.writer = writer
|
|
||||||
w.bits, w.nbits, w.nbytes, w.err = 0, 0, 0, nil
|
|
||||||
w.bytes = [bufferSize]byte{}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *huffmanBitWriter) flush() {
|
|
||||||
if w.err != nil {
|
|
||||||
w.nbits = 0
|
|
||||||
return
|
|
||||||
}
|
|
||||||
n := w.nbytes
|
|
||||||
for w.nbits != 0 {
|
|
||||||
w.bytes[n] = byte(w.bits)
|
|
||||||
w.bits >>= 8
|
|
||||||
if w.nbits > 8 { // Avoid underflow
|
|
||||||
w.nbits -= 8
|
|
||||||
} else {
|
|
||||||
w.nbits = 0
|
|
||||||
}
|
|
||||||
n++
|
|
||||||
}
|
|
||||||
w.bits = 0
|
|
||||||
w.write(w.bytes[:n])
|
|
||||||
w.nbytes = 0
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *huffmanBitWriter) write(b []byte) {
|
|
||||||
if w.err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
_, w.err = w.writer.Write(b)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *huffmanBitWriter) writeBits(b int32, nb uint) {
|
|
||||||
if w.err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
w.bits |= uint64(b) << w.nbits
|
|
||||||
w.nbits += nb
|
|
||||||
if w.nbits >= 48 {
|
|
||||||
bits := w.bits
|
|
||||||
w.bits >>= 48
|
|
||||||
w.nbits -= 48
|
|
||||||
n := w.nbytes
|
|
||||||
bytes := w.bytes[n : n+6]
|
|
||||||
bytes[0] = byte(bits)
|
|
||||||
bytes[1] = byte(bits >> 8)
|
|
||||||
bytes[2] = byte(bits >> 16)
|
|
||||||
bytes[3] = byte(bits >> 24)
|
|
||||||
bytes[4] = byte(bits >> 32)
|
|
||||||
bytes[5] = byte(bits >> 40)
|
|
||||||
n += 6
|
|
||||||
if n >= bufferFlushSize {
|
|
||||||
w.write(w.bytes[:n])
|
|
||||||
n = 0
|
|
||||||
}
|
|
||||||
w.nbytes = n
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *huffmanBitWriter) writeBytes(bytes []byte) {
|
|
||||||
if w.err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
n := w.nbytes
|
|
||||||
if w.nbits&7 != 0 {
|
|
||||||
w.err = InternalError("writeBytes with unfinished bits")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
for w.nbits != 0 {
|
|
||||||
w.bytes[n] = byte(w.bits)
|
|
||||||
w.bits >>= 8
|
|
||||||
w.nbits -= 8
|
|
||||||
n++
|
|
||||||
}
|
|
||||||
if n != 0 {
|
|
||||||
w.write(w.bytes[:n])
|
|
||||||
}
|
|
||||||
w.nbytes = 0
|
|
||||||
w.write(bytes)
|
|
||||||
}
|
|
||||||
|
|
||||||
// RFC 1951 3.2.7 specifies a special run-length encoding for specifying
|
|
||||||
// the literal and offset lengths arrays (which are concatenated into a single
|
|
||||||
// array). This method generates that run-length encoding.
|
|
||||||
//
|
|
||||||
// The result is written into the codegen array, and the frequencies
|
|
||||||
// of each code is written into the codegenFreq array.
|
|
||||||
// Codes 0-15 are single byte codes. Codes 16-18 are followed by additional
|
|
||||||
// information. Code badCode is an end marker
|
|
||||||
//
|
|
||||||
// numLiterals The number of literals in literalEncoding
|
|
||||||
// numOffsets The number of offsets in offsetEncoding
|
|
||||||
// litenc, offenc The literal and offset encoder to use
|
|
||||||
func (w *huffmanBitWriter) generateCodegen(numLiterals int, numOffsets int, litEnc, offEnc *huffmanEncoder) {
|
|
||||||
for i := range w.codegenFreq {
|
|
||||||
w.codegenFreq[i] = 0
|
|
||||||
}
|
|
||||||
// Note that we are using codegen both as a temporary variable for holding
|
|
||||||
// a copy of the frequencies, and as the place where we put the result.
|
|
||||||
// This is fine because the output is always shorter than the input used
|
|
||||||
// so far.
|
|
||||||
codegen := w.codegen // cache
|
|
||||||
// Copy the concatenated code sizes to codegen. Put a marker at the end.
|
|
||||||
cgnl := codegen[:numLiterals]
|
|
||||||
for i := range cgnl {
|
|
||||||
cgnl[i] = uint8(litEnc.codes[i].len)
|
|
||||||
}
|
|
||||||
|
|
||||||
cgnl = codegen[numLiterals : numLiterals+numOffsets]
|
|
||||||
for i := range cgnl {
|
|
||||||
cgnl[i] = uint8(offEnc.codes[i].len)
|
|
||||||
}
|
|
||||||
codegen[numLiterals+numOffsets] = badCode
|
|
||||||
|
|
||||||
size := codegen[0]
|
|
||||||
count := 1
|
|
||||||
outIndex := 0
|
|
||||||
for inIndex := 1; size != badCode; inIndex++ {
|
|
||||||
// INVARIANT: We have seen "count" copies of size that have not yet
|
|
||||||
// had output generated for them.
|
|
||||||
nextSize := codegen[inIndex]
|
|
||||||
if nextSize == size {
|
|
||||||
count++
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
// We need to generate codegen indicating "count" of size.
|
|
||||||
if size != 0 {
|
|
||||||
codegen[outIndex] = size
|
|
||||||
outIndex++
|
|
||||||
w.codegenFreq[size]++
|
|
||||||
count--
|
|
||||||
for count >= 3 {
|
|
||||||
n := 6
|
|
||||||
if n > count {
|
|
||||||
n = count
|
|
||||||
}
|
|
||||||
codegen[outIndex] = 16
|
|
||||||
outIndex++
|
|
||||||
codegen[outIndex] = uint8(n - 3)
|
|
||||||
outIndex++
|
|
||||||
w.codegenFreq[16]++
|
|
||||||
count -= n
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
for count >= 11 {
|
|
||||||
n := 138
|
|
||||||
if n > count {
|
|
||||||
n = count
|
|
||||||
}
|
|
||||||
codegen[outIndex] = 18
|
|
||||||
outIndex++
|
|
||||||
codegen[outIndex] = uint8(n - 11)
|
|
||||||
outIndex++
|
|
||||||
w.codegenFreq[18]++
|
|
||||||
count -= n
|
|
||||||
}
|
|
||||||
if count >= 3 {
|
|
||||||
// count >= 3 && count <= 10
|
|
||||||
codegen[outIndex] = 17
|
|
||||||
outIndex++
|
|
||||||
codegen[outIndex] = uint8(count - 3)
|
|
||||||
outIndex++
|
|
||||||
w.codegenFreq[17]++
|
|
||||||
count = 0
|
|
||||||
}
|
|
||||||
}
|
|
||||||
count--
|
|
||||||
for ; count >= 0; count-- {
|
|
||||||
codegen[outIndex] = size
|
|
||||||
outIndex++
|
|
||||||
w.codegenFreq[size]++
|
|
||||||
}
|
|
||||||
// Set up invariant for next time through the loop.
|
|
||||||
size = nextSize
|
|
||||||
count = 1
|
|
||||||
}
|
|
||||||
// Marker indicating the end of the codegen.
|
|
||||||
codegen[outIndex] = badCode
|
|
||||||
}
|
|
||||||
|
|
||||||
// dynamicSize returns the size of dynamically encoded data in bits.
|
|
||||||
func (w *huffmanBitWriter) dynamicSize(litEnc, offEnc *huffmanEncoder, extraBits int) (size, numCodegens int) {
|
|
||||||
numCodegens = len(w.codegenFreq)
|
|
||||||
for numCodegens > 4 && w.codegenFreq[codegenOrder[numCodegens-1]] == 0 {
|
|
||||||
numCodegens--
|
|
||||||
}
|
|
||||||
header := 3 + 5 + 5 + 4 + (3 * numCodegens) +
|
|
||||||
w.codegenEncoding.bitLength(w.codegenFreq[:]) +
|
|
||||||
int(w.codegenFreq[16])*2 +
|
|
||||||
int(w.codegenFreq[17])*3 +
|
|
||||||
int(w.codegenFreq[18])*7
|
|
||||||
size = header +
|
|
||||||
litEnc.bitLength(w.literalFreq) +
|
|
||||||
offEnc.bitLength(w.offsetFreq) +
|
|
||||||
extraBits
|
|
||||||
|
|
||||||
return size, numCodegens
|
|
||||||
}
|
|
||||||
|
|
||||||
// fixedSize returns the size of dynamically encoded data in bits.
|
|
||||||
func (w *huffmanBitWriter) fixedSize(extraBits int) int {
|
|
||||||
return 3 +
|
|
||||||
fixedLiteralEncoding.bitLength(w.literalFreq) +
|
|
||||||
fixedOffsetEncoding.bitLength(w.offsetFreq) +
|
|
||||||
extraBits
|
|
||||||
}
|
|
||||||
|
|
||||||
// storedSize calculates the stored size, including header.
|
|
||||||
// The function returns the size in bits and whether the block
|
|
||||||
// fits inside a single block.
|
|
||||||
func (w *huffmanBitWriter) storedSize(in []byte) (int, bool) {
|
|
||||||
if in == nil {
|
|
||||||
return 0, false
|
|
||||||
}
|
|
||||||
if len(in) <= maxStoreBlockSize {
|
|
||||||
return (len(in) + 5) * 8, true
|
|
||||||
}
|
|
||||||
return 0, false
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *huffmanBitWriter) writeCode(c hcode) {
|
|
||||||
if w.err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
w.bits |= uint64(c.code) << w.nbits
|
|
||||||
w.nbits += uint(c.len)
|
|
||||||
if w.nbits >= 48 {
|
|
||||||
bits := w.bits
|
|
||||||
w.bits >>= 48
|
|
||||||
w.nbits -= 48
|
|
||||||
n := w.nbytes
|
|
||||||
bytes := w.bytes[n : n+6]
|
|
||||||
bytes[0] = byte(bits)
|
|
||||||
bytes[1] = byte(bits >> 8)
|
|
||||||
bytes[2] = byte(bits >> 16)
|
|
||||||
bytes[3] = byte(bits >> 24)
|
|
||||||
bytes[4] = byte(bits >> 32)
|
|
||||||
bytes[5] = byte(bits >> 40)
|
|
||||||
n += 6
|
|
||||||
if n >= bufferFlushSize {
|
|
||||||
w.write(w.bytes[:n])
|
|
||||||
n = 0
|
|
||||||
}
|
|
||||||
w.nbytes = n
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Write the header of a dynamic Huffman block to the output stream.
|
|
||||||
//
|
|
||||||
// numLiterals The number of literals specified in codegen
|
|
||||||
// numOffsets The number of offsets specified in codegen
|
|
||||||
// numCodegens The number of codegens used in codegen
|
|
||||||
func (w *huffmanBitWriter) writeDynamicHeader(numLiterals int, numOffsets int, numCodegens int, isEof bool) {
|
|
||||||
if w.err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
var firstBits int32 = 4
|
|
||||||
if isEof {
|
|
||||||
firstBits = 5
|
|
||||||
}
|
|
||||||
w.writeBits(firstBits, 3)
|
|
||||||
w.writeBits(int32(numLiterals-257), 5)
|
|
||||||
w.writeBits(int32(numOffsets-1), 5)
|
|
||||||
w.writeBits(int32(numCodegens-4), 4)
|
|
||||||
|
|
||||||
for i := 0; i < numCodegens; i++ {
|
|
||||||
value := uint(w.codegenEncoding.codes[codegenOrder[i]].len)
|
|
||||||
w.writeBits(int32(value), 3)
|
|
||||||
}
|
|
||||||
|
|
||||||
i := 0
|
|
||||||
for {
|
|
||||||
var codeWord int = int(w.codegen[i])
|
|
||||||
i++
|
|
||||||
if codeWord == badCode {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
w.writeCode(w.codegenEncoding.codes[uint32(codeWord)])
|
|
||||||
|
|
||||||
switch codeWord {
|
|
||||||
case 16:
|
|
||||||
w.writeBits(int32(w.codegen[i]), 2)
|
|
||||||
i++
|
|
||||||
break
|
|
||||||
case 17:
|
|
||||||
w.writeBits(int32(w.codegen[i]), 3)
|
|
||||||
i++
|
|
||||||
break
|
|
||||||
case 18:
|
|
||||||
w.writeBits(int32(w.codegen[i]), 7)
|
|
||||||
i++
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *huffmanBitWriter) writeStoredHeader(length int, isEof bool) {
|
|
||||||
if w.err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
var flag int32
|
|
||||||
if isEof {
|
|
||||||
flag = 1
|
|
||||||
}
|
|
||||||
w.writeBits(flag, 3)
|
|
||||||
w.flush()
|
|
||||||
w.writeBits(int32(length), 16)
|
|
||||||
w.writeBits(int32(^uint16(length)), 16)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *huffmanBitWriter) writeFixedHeader(isEof bool) {
|
|
||||||
if w.err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
// Indicate that we are a fixed Huffman block
|
|
||||||
var value int32 = 2
|
|
||||||
if isEof {
|
|
||||||
value = 3
|
|
||||||
}
|
|
||||||
w.writeBits(value, 3)
|
|
||||||
}
|
|
||||||
|
|
||||||
// writeBlock will write a block of tokens with the smallest encoding.
|
|
||||||
// The original input can be supplied, and if the huffman encoded data
|
|
||||||
// is larger than the original bytes, the data will be written as a
|
|
||||||
// stored block.
|
|
||||||
// If the input is nil, the tokens will always be Huffman encoded.
|
|
||||||
func (w *huffmanBitWriter) writeBlock(tokens []token, eof bool, input []byte) {
|
|
||||||
if w.err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
tokens = append(tokens, endBlockMarker)
|
|
||||||
numLiterals, numOffsets := w.indexTokens(tokens)
|
|
||||||
|
|
||||||
var extraBits int
|
|
||||||
storedSize, storable := w.storedSize(input)
|
|
||||||
if storable {
|
|
||||||
// We only bother calculating the costs of the extra bits required by
|
|
||||||
// the length of offset fields (which will be the same for both fixed
|
|
||||||
// and dynamic encoding), if we need to compare those two encodings
|
|
||||||
// against stored encoding.
|
|
||||||
for lengthCode := lengthCodesStart + 8; lengthCode < numLiterals; lengthCode++ {
|
|
||||||
// First eight length codes have extra size = 0.
|
|
||||||
extraBits += int(w.literalFreq[lengthCode]) * int(lengthExtraBits[lengthCode-lengthCodesStart])
|
|
||||||
}
|
|
||||||
for offsetCode := 4; offsetCode < numOffsets; offsetCode++ {
|
|
||||||
// First four offset codes have extra size = 0.
|
|
||||||
extraBits += int(w.offsetFreq[offsetCode]) * int(offsetExtraBits[offsetCode])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Figure out smallest code.
|
|
||||||
// Fixed Huffman baseline.
|
|
||||||
var literalEncoding = fixedLiteralEncoding
|
|
||||||
var offsetEncoding = fixedOffsetEncoding
|
|
||||||
var size = w.fixedSize(extraBits)
|
|
||||||
|
|
||||||
// Dynamic Huffman?
|
|
||||||
var numCodegens int
|
|
||||||
|
|
||||||
// Generate codegen and codegenFrequencies, which indicates how to encode
|
|
||||||
// the literalEncoding and the offsetEncoding.
|
|
||||||
w.generateCodegen(numLiterals, numOffsets, w.literalEncoding, w.offsetEncoding)
|
|
||||||
w.codegenEncoding.generate(w.codegenFreq[:], 7)
|
|
||||||
dynamicSize, numCodegens := w.dynamicSize(w.literalEncoding, w.offsetEncoding, extraBits)
|
|
||||||
|
|
||||||
if dynamicSize < size {
|
|
||||||
size = dynamicSize
|
|
||||||
literalEncoding = w.literalEncoding
|
|
||||||
offsetEncoding = w.offsetEncoding
|
|
||||||
}
|
|
||||||
|
|
||||||
// Stored bytes?
|
|
||||||
if storable && storedSize < size {
|
|
||||||
w.writeStoredHeader(len(input), eof)
|
|
||||||
w.writeBytes(input)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Huffman.
|
|
||||||
if literalEncoding == fixedLiteralEncoding {
|
|
||||||
w.writeFixedHeader(eof)
|
|
||||||
} else {
|
|
||||||
w.writeDynamicHeader(numLiterals, numOffsets, numCodegens, eof)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Write the tokens.
|
|
||||||
w.writeTokens(tokens, literalEncoding.codes, offsetEncoding.codes)
|
|
||||||
}
|
|
||||||
|
|
||||||
// writeBlockDynamic encodes a block using a dynamic Huffman table.
|
|
||||||
// This should be used if the symbols used have a disproportionate
|
|
||||||
// histogram distribution.
|
|
||||||
// If input is supplied and the compression savings are below 1/16th of the
|
|
||||||
// input size the block is stored.
|
|
||||||
func (w *huffmanBitWriter) writeBlockDynamic(tokens []token, eof bool, input []byte) {
|
|
||||||
if w.err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
tokens = append(tokens, endBlockMarker)
|
|
||||||
numLiterals, numOffsets := w.indexTokens(tokens)
|
|
||||||
|
|
||||||
// Generate codegen and codegenFrequencies, which indicates how to encode
|
|
||||||
// the literalEncoding and the offsetEncoding.
|
|
||||||
w.generateCodegen(numLiterals, numOffsets, w.literalEncoding, w.offsetEncoding)
|
|
||||||
w.codegenEncoding.generate(w.codegenFreq[:], 7)
|
|
||||||
size, numCodegens := w.dynamicSize(w.literalEncoding, w.offsetEncoding, 0)
|
|
||||||
|
|
||||||
// Store bytes, if we don't get a reasonable improvement.
|
|
||||||
if ssize, storable := w.storedSize(input); storable && ssize < (size+size>>4) {
|
|
||||||
w.writeStoredHeader(len(input), eof)
|
|
||||||
w.writeBytes(input)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Write Huffman table.
|
|
||||||
w.writeDynamicHeader(numLiterals, numOffsets, numCodegens, eof)
|
|
||||||
|
|
||||||
// Write the tokens.
|
|
||||||
w.writeTokens(tokens, w.literalEncoding.codes, w.offsetEncoding.codes)
|
|
||||||
}
|
|
||||||
|
|
||||||
// indexTokens indexes a slice of tokens, and updates
|
|
||||||
// literalFreq and offsetFreq, and generates literalEncoding
|
|
||||||
// and offsetEncoding.
|
|
||||||
// The number of literal and offset tokens is returned.
|
|
||||||
func (w *huffmanBitWriter) indexTokens(tokens []token) (numLiterals, numOffsets int) {
|
|
||||||
for i := range w.literalFreq {
|
|
||||||
w.literalFreq[i] = 0
|
|
||||||
}
|
|
||||||
for i := range w.offsetFreq {
|
|
||||||
w.offsetFreq[i] = 0
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, t := range tokens {
|
|
||||||
if t < matchType {
|
|
||||||
w.literalFreq[t.literal()]++
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
length := t.length()
|
|
||||||
offset := t.offset()
|
|
||||||
w.literalFreq[lengthCodesStart+lengthCode(length)]++
|
|
||||||
w.offsetFreq[offsetCode(offset)]++
|
|
||||||
}
|
|
||||||
|
|
||||||
// get the number of literals
|
|
||||||
numLiterals = len(w.literalFreq)
|
|
||||||
for w.literalFreq[numLiterals-1] == 0 {
|
|
||||||
numLiterals--
|
|
||||||
}
|
|
||||||
// get the number of offsets
|
|
||||||
numOffsets = len(w.offsetFreq)
|
|
||||||
for numOffsets > 0 && w.offsetFreq[numOffsets-1] == 0 {
|
|
||||||
numOffsets--
|
|
||||||
}
|
|
||||||
if numOffsets == 0 {
|
|
||||||
// We haven't found a single match. If we want to go with the dynamic encoding,
|
|
||||||
// we should count at least one offset to be sure that the offset huffman tree could be encoded.
|
|
||||||
w.offsetFreq[0] = 1
|
|
||||||
numOffsets = 1
|
|
||||||
}
|
|
||||||
w.literalEncoding.generate(w.literalFreq, 15)
|
|
||||||
w.offsetEncoding.generate(w.offsetFreq, 15)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// writeTokens writes a slice of tokens to the output.
|
|
||||||
// codes for literal and offset encoding must be supplied.
|
|
||||||
func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode) {
|
|
||||||
if w.err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
for _, t := range tokens {
|
|
||||||
if t < matchType {
|
|
||||||
w.writeCode(leCodes[t.literal()])
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
// Write the length
|
|
||||||
length := t.length()
|
|
||||||
lengthCode := lengthCode(length)
|
|
||||||
w.writeCode(leCodes[lengthCode+lengthCodesStart])
|
|
||||||
extraLengthBits := uint(lengthExtraBits[lengthCode])
|
|
||||||
if extraLengthBits > 0 {
|
|
||||||
extraLength := int32(length - lengthBase[lengthCode])
|
|
||||||
w.writeBits(extraLength, extraLengthBits)
|
|
||||||
}
|
|
||||||
// Write the offset
|
|
||||||
offset := t.offset()
|
|
||||||
offsetCode := offsetCode(offset)
|
|
||||||
w.writeCode(oeCodes[offsetCode])
|
|
||||||
extraOffsetBits := uint(offsetExtraBits[offsetCode])
|
|
||||||
if extraOffsetBits > 0 {
|
|
||||||
extraOffset := int32(offset - offsetBase[offsetCode])
|
|
||||||
w.writeBits(extraOffset, extraOffsetBits)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// huffOffset is a static offset encoder used for huffman only encoding.
|
|
||||||
// It can be reused since we will not be encoding offset values.
|
|
||||||
var huffOffset *huffmanEncoder
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
w := newHuffmanBitWriter(nil)
|
|
||||||
w.offsetFreq[0] = 1
|
|
||||||
huffOffset = newHuffmanEncoder(offsetCodeCount)
|
|
||||||
huffOffset.generate(w.offsetFreq, 15)
|
|
||||||
}
|
|
||||||
|
|
||||||
// writeBlockHuff encodes a block of bytes as either
|
|
||||||
// Huffman encoded literals or uncompressed bytes if the
|
|
||||||
// results only gains very little from compression.
|
|
||||||
func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte) {
|
|
||||||
if w.err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Clear histogram
|
|
||||||
for i := range w.literalFreq {
|
|
||||||
w.literalFreq[i] = 0
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add everything as literals
|
|
||||||
histogram(input, w.literalFreq)
|
|
||||||
|
|
||||||
w.literalFreq[endBlockMarker] = 1
|
|
||||||
|
|
||||||
const numLiterals = endBlockMarker + 1
|
|
||||||
const numOffsets = 1
|
|
||||||
|
|
||||||
w.literalEncoding.generate(w.literalFreq, 15)
|
|
||||||
|
|
||||||
// Figure out smallest code.
|
|
||||||
// Always use dynamic Huffman or Store
|
|
||||||
var numCodegens int
|
|
||||||
|
|
||||||
// Generate codegen and codegenFrequencies, which indicates how to encode
|
|
||||||
// the literalEncoding and the offsetEncoding.
|
|
||||||
w.generateCodegen(numLiterals, numOffsets, w.literalEncoding, huffOffset)
|
|
||||||
w.codegenEncoding.generate(w.codegenFreq[:], 7)
|
|
||||||
size, numCodegens := w.dynamicSize(w.literalEncoding, huffOffset, 0)
|
|
||||||
|
|
||||||
// Store bytes, if we don't get a reasonable improvement.
|
|
||||||
if ssize, storable := w.storedSize(input); storable && ssize < (size+size>>4) {
|
|
||||||
w.writeStoredHeader(len(input), eof)
|
|
||||||
w.writeBytes(input)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Huffman.
|
|
||||||
w.writeDynamicHeader(numLiterals, numOffsets, numCodegens, eof)
|
|
||||||
encoding := w.literalEncoding.codes[:257]
|
|
||||||
n := w.nbytes
|
|
||||||
for _, t := range input {
|
|
||||||
// Bitwriting inlined, ~30% speedup
|
|
||||||
c := encoding[t]
|
|
||||||
w.bits |= uint64(c.code) << w.nbits
|
|
||||||
w.nbits += uint(c.len)
|
|
||||||
if w.nbits < 48 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
// Store 6 bytes
|
|
||||||
bits := w.bits
|
|
||||||
w.bits >>= 48
|
|
||||||
w.nbits -= 48
|
|
||||||
bytes := w.bytes[n : n+6]
|
|
||||||
bytes[0] = byte(bits)
|
|
||||||
bytes[1] = byte(bits >> 8)
|
|
||||||
bytes[2] = byte(bits >> 16)
|
|
||||||
bytes[3] = byte(bits >> 24)
|
|
||||||
bytes[4] = byte(bits >> 32)
|
|
||||||
bytes[5] = byte(bits >> 40)
|
|
||||||
n += 6
|
|
||||||
if n < bufferFlushSize {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
w.write(w.bytes[:n])
|
|
||||||
if w.err != nil {
|
|
||||||
return // Return early in the event of write failures
|
|
||||||
}
|
|
||||||
n = 0
|
|
||||||
}
|
|
||||||
w.nbytes = n
|
|
||||||
w.writeCode(encoding[endBlockMarker])
|
|
||||||
}
|
|
||||||
344
vendor/github.com/klauspost/compress/flate/huffman_code.go
generated
vendored
344
vendor/github.com/klauspost/compress/flate/huffman_code.go
generated
vendored
@@ -1,344 +0,0 @@
|
|||||||
// Copyright 2009 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package flate
|
|
||||||
|
|
||||||
import (
|
|
||||||
"math"
|
|
||||||
"sort"
|
|
||||||
)
|
|
||||||
|
|
||||||
// hcode is a huffman code with a bit code and bit length.
|
|
||||||
type hcode struct {
|
|
||||||
code, len uint16
|
|
||||||
}
|
|
||||||
|
|
||||||
type huffmanEncoder struct {
|
|
||||||
codes []hcode
|
|
||||||
freqcache []literalNode
|
|
||||||
bitCount [17]int32
|
|
||||||
lns byLiteral // stored to avoid repeated allocation in generate
|
|
||||||
lfs byFreq // stored to avoid repeated allocation in generate
|
|
||||||
}
|
|
||||||
|
|
||||||
type literalNode struct {
|
|
||||||
literal uint16
|
|
||||||
freq int32
|
|
||||||
}
|
|
||||||
|
|
||||||
// A levelInfo describes the state of the constructed tree for a given depth.
|
|
||||||
type levelInfo struct {
|
|
||||||
// Our level. for better printing
|
|
||||||
level int32
|
|
||||||
|
|
||||||
// The frequency of the last node at this level
|
|
||||||
lastFreq int32
|
|
||||||
|
|
||||||
// The frequency of the next character to add to this level
|
|
||||||
nextCharFreq int32
|
|
||||||
|
|
||||||
// The frequency of the next pair (from level below) to add to this level.
|
|
||||||
// Only valid if the "needed" value of the next lower level is 0.
|
|
||||||
nextPairFreq int32
|
|
||||||
|
|
||||||
// The number of chains remaining to generate for this level before moving
|
|
||||||
// up to the next level
|
|
||||||
needed int32
|
|
||||||
}
|
|
||||||
|
|
||||||
// set sets the code and length of an hcode.
|
|
||||||
func (h *hcode) set(code uint16, length uint16) {
|
|
||||||
h.len = length
|
|
||||||
h.code = code
|
|
||||||
}
|
|
||||||
|
|
||||||
func maxNode() literalNode { return literalNode{math.MaxUint16, math.MaxInt32} }
|
|
||||||
|
|
||||||
func newHuffmanEncoder(size int) *huffmanEncoder {
|
|
||||||
return &huffmanEncoder{codes: make([]hcode, size)}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Generates a HuffmanCode corresponding to the fixed literal table
|
|
||||||
func generateFixedLiteralEncoding() *huffmanEncoder {
|
|
||||||
h := newHuffmanEncoder(maxNumLit)
|
|
||||||
codes := h.codes
|
|
||||||
var ch uint16
|
|
||||||
for ch = 0; ch < maxNumLit; ch++ {
|
|
||||||
var bits uint16
|
|
||||||
var size uint16
|
|
||||||
switch {
|
|
||||||
case ch < 144:
|
|
||||||
// size 8, 000110000 .. 10111111
|
|
||||||
bits = ch + 48
|
|
||||||
size = 8
|
|
||||||
break
|
|
||||||
case ch < 256:
|
|
||||||
// size 9, 110010000 .. 111111111
|
|
||||||
bits = ch + 400 - 144
|
|
||||||
size = 9
|
|
||||||
break
|
|
||||||
case ch < 280:
|
|
||||||
// size 7, 0000000 .. 0010111
|
|
||||||
bits = ch - 256
|
|
||||||
size = 7
|
|
||||||
break
|
|
||||||
default:
|
|
||||||
// size 8, 11000000 .. 11000111
|
|
||||||
bits = ch + 192 - 280
|
|
||||||
size = 8
|
|
||||||
}
|
|
||||||
codes[ch] = hcode{code: reverseBits(bits, byte(size)), len: size}
|
|
||||||
}
|
|
||||||
return h
|
|
||||||
}
|
|
||||||
|
|
||||||
func generateFixedOffsetEncoding() *huffmanEncoder {
|
|
||||||
h := newHuffmanEncoder(30)
|
|
||||||
codes := h.codes
|
|
||||||
for ch := range codes {
|
|
||||||
codes[ch] = hcode{code: reverseBits(uint16(ch), 5), len: 5}
|
|
||||||
}
|
|
||||||
return h
|
|
||||||
}
|
|
||||||
|
|
||||||
var fixedLiteralEncoding *huffmanEncoder = generateFixedLiteralEncoding()
|
|
||||||
var fixedOffsetEncoding *huffmanEncoder = generateFixedOffsetEncoding()
|
|
||||||
|
|
||||||
func (h *huffmanEncoder) bitLength(freq []int32) int {
|
|
||||||
var total int
|
|
||||||
for i, f := range freq {
|
|
||||||
if f != 0 {
|
|
||||||
total += int(f) * int(h.codes[i].len)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return total
|
|
||||||
}
|
|
||||||
|
|
||||||
const maxBitsLimit = 16
|
|
||||||
|
|
||||||
// Return the number of literals assigned to each bit size in the Huffman encoding
|
|
||||||
//
|
|
||||||
// This method is only called when list.length >= 3
|
|
||||||
// The cases of 0, 1, and 2 literals are handled by special case code.
|
|
||||||
//
|
|
||||||
// list An array of the literals with non-zero frequencies
|
|
||||||
// and their associated frequencies. The array is in order of increasing
|
|
||||||
// frequency, and has as its last element a special element with frequency
|
|
||||||
// MaxInt32
|
|
||||||
// maxBits The maximum number of bits that should be used to encode any literal.
|
|
||||||
// Must be less than 16.
|
|
||||||
// return An integer array in which array[i] indicates the number of literals
|
|
||||||
// that should be encoded in i bits.
|
|
||||||
func (h *huffmanEncoder) bitCounts(list []literalNode, maxBits int32) []int32 {
|
|
||||||
if maxBits >= maxBitsLimit {
|
|
||||||
panic("flate: maxBits too large")
|
|
||||||
}
|
|
||||||
n := int32(len(list))
|
|
||||||
list = list[0 : n+1]
|
|
||||||
list[n] = maxNode()
|
|
||||||
|
|
||||||
// The tree can't have greater depth than n - 1, no matter what. This
|
|
||||||
// saves a little bit of work in some small cases
|
|
||||||
if maxBits > n-1 {
|
|
||||||
maxBits = n - 1
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create information about each of the levels.
|
|
||||||
// A bogus "Level 0" whose sole purpose is so that
|
|
||||||
// level1.prev.needed==0. This makes level1.nextPairFreq
|
|
||||||
// be a legitimate value that never gets chosen.
|
|
||||||
var levels [maxBitsLimit]levelInfo
|
|
||||||
// leafCounts[i] counts the number of literals at the left
|
|
||||||
// of ancestors of the rightmost node at level i.
|
|
||||||
// leafCounts[i][j] is the number of literals at the left
|
|
||||||
// of the level j ancestor.
|
|
||||||
var leafCounts [maxBitsLimit][maxBitsLimit]int32
|
|
||||||
|
|
||||||
for level := int32(1); level <= maxBits; level++ {
|
|
||||||
// For every level, the first two items are the first two characters.
|
|
||||||
// We initialize the levels as if we had already figured this out.
|
|
||||||
levels[level] = levelInfo{
|
|
||||||
level: level,
|
|
||||||
lastFreq: list[1].freq,
|
|
||||||
nextCharFreq: list[2].freq,
|
|
||||||
nextPairFreq: list[0].freq + list[1].freq,
|
|
||||||
}
|
|
||||||
leafCounts[level][level] = 2
|
|
||||||
if level == 1 {
|
|
||||||
levels[level].nextPairFreq = math.MaxInt32
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// We need a total of 2*n - 2 items at top level and have already generated 2.
|
|
||||||
levels[maxBits].needed = 2*n - 4
|
|
||||||
|
|
||||||
level := maxBits
|
|
||||||
for {
|
|
||||||
l := &levels[level]
|
|
||||||
if l.nextPairFreq == math.MaxInt32 && l.nextCharFreq == math.MaxInt32 {
|
|
||||||
// We've run out of both leafs and pairs.
|
|
||||||
// End all calculations for this level.
|
|
||||||
// To make sure we never come back to this level or any lower level,
|
|
||||||
// set nextPairFreq impossibly large.
|
|
||||||
l.needed = 0
|
|
||||||
levels[level+1].nextPairFreq = math.MaxInt32
|
|
||||||
level++
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
prevFreq := l.lastFreq
|
|
||||||
if l.nextCharFreq < l.nextPairFreq {
|
|
||||||
// The next item on this row is a leaf node.
|
|
||||||
n := leafCounts[level][level] + 1
|
|
||||||
l.lastFreq = l.nextCharFreq
|
|
||||||
// Lower leafCounts are the same of the previous node.
|
|
||||||
leafCounts[level][level] = n
|
|
||||||
l.nextCharFreq = list[n].freq
|
|
||||||
} else {
|
|
||||||
// The next item on this row is a pair from the previous row.
|
|
||||||
// nextPairFreq isn't valid until we generate two
|
|
||||||
// more values in the level below
|
|
||||||
l.lastFreq = l.nextPairFreq
|
|
||||||
// Take leaf counts from the lower level, except counts[level] remains the same.
|
|
||||||
copy(leafCounts[level][:level], leafCounts[level-1][:level])
|
|
||||||
levels[l.level-1].needed = 2
|
|
||||||
}
|
|
||||||
|
|
||||||
if l.needed--; l.needed == 0 {
|
|
||||||
// We've done everything we need to do for this level.
|
|
||||||
// Continue calculating one level up. Fill in nextPairFreq
|
|
||||||
// of that level with the sum of the two nodes we've just calculated on
|
|
||||||
// this level.
|
|
||||||
if l.level == maxBits {
|
|
||||||
// All done!
|
|
||||||
break
|
|
||||||
}
|
|
||||||
levels[l.level+1].nextPairFreq = prevFreq + l.lastFreq
|
|
||||||
level++
|
|
||||||
} else {
|
|
||||||
// If we stole from below, move down temporarily to replenish it.
|
|
||||||
for levels[level-1].needed > 0 {
|
|
||||||
level--
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Somethings is wrong if at the end, the top level is null or hasn't used
|
|
||||||
// all of the leaves.
|
|
||||||
if leafCounts[maxBits][maxBits] != n {
|
|
||||||
panic("leafCounts[maxBits][maxBits] != n")
|
|
||||||
}
|
|
||||||
|
|
||||||
bitCount := h.bitCount[:maxBits+1]
|
|
||||||
bits := 1
|
|
||||||
counts := &leafCounts[maxBits]
|
|
||||||
for level := maxBits; level > 0; level-- {
|
|
||||||
// chain.leafCount gives the number of literals requiring at least "bits"
|
|
||||||
// bits to encode.
|
|
||||||
bitCount[bits] = counts[level] - counts[level-1]
|
|
||||||
bits++
|
|
||||||
}
|
|
||||||
return bitCount
|
|
||||||
}
|
|
||||||
|
|
||||||
// Look at the leaves and assign them a bit count and an encoding as specified
|
|
||||||
// in RFC 1951 3.2.2
|
|
||||||
func (h *huffmanEncoder) assignEncodingAndSize(bitCount []int32, list []literalNode) {
|
|
||||||
code := uint16(0)
|
|
||||||
for n, bits := range bitCount {
|
|
||||||
code <<= 1
|
|
||||||
if n == 0 || bits == 0 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
// The literals list[len(list)-bits] .. list[len(list)-bits]
|
|
||||||
// are encoded using "bits" bits, and get the values
|
|
||||||
// code, code + 1, .... The code values are
|
|
||||||
// assigned in literal order (not frequency order).
|
|
||||||
chunk := list[len(list)-int(bits):]
|
|
||||||
|
|
||||||
h.lns.sort(chunk)
|
|
||||||
for _, node := range chunk {
|
|
||||||
h.codes[node.literal] = hcode{code: reverseBits(code, uint8(n)), len: uint16(n)}
|
|
||||||
code++
|
|
||||||
}
|
|
||||||
list = list[0 : len(list)-int(bits)]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Update this Huffman Code object to be the minimum code for the specified frequency count.
|
|
||||||
//
|
|
||||||
// freq An array of frequencies, in which frequency[i] gives the frequency of literal i.
|
|
||||||
// maxBits The maximum number of bits to use for any literal.
|
|
||||||
func (h *huffmanEncoder) generate(freq []int32, maxBits int32) {
|
|
||||||
if h.freqcache == nil {
|
|
||||||
// Allocate a reusable buffer with the longest possible frequency table.
|
|
||||||
// Possible lengths are codegenCodeCount, offsetCodeCount and maxNumLit.
|
|
||||||
// The largest of these is maxNumLit, so we allocate for that case.
|
|
||||||
h.freqcache = make([]literalNode, maxNumLit+1)
|
|
||||||
}
|
|
||||||
list := h.freqcache[:len(freq)+1]
|
|
||||||
// Number of non-zero literals
|
|
||||||
count := 0
|
|
||||||
// Set list to be the set of all non-zero literals and their frequencies
|
|
||||||
for i, f := range freq {
|
|
||||||
if f != 0 {
|
|
||||||
list[count] = literalNode{uint16(i), f}
|
|
||||||
count++
|
|
||||||
} else {
|
|
||||||
list[count] = literalNode{}
|
|
||||||
h.codes[i].len = 0
|
|
||||||
}
|
|
||||||
}
|
|
||||||
list[len(freq)] = literalNode{}
|
|
||||||
|
|
||||||
list = list[:count]
|
|
||||||
if count <= 2 {
|
|
||||||
// Handle the small cases here, because they are awkward for the general case code. With
|
|
||||||
// two or fewer literals, everything has bit length 1.
|
|
||||||
for i, node := range list {
|
|
||||||
// "list" is in order of increasing literal value.
|
|
||||||
h.codes[node.literal].set(uint16(i), 1)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
h.lfs.sort(list)
|
|
||||||
|
|
||||||
// Get the number of literals for each bit count
|
|
||||||
bitCount := h.bitCounts(list, maxBits)
|
|
||||||
// And do the assignment
|
|
||||||
h.assignEncodingAndSize(bitCount, list)
|
|
||||||
}
|
|
||||||
|
|
||||||
type byLiteral []literalNode
|
|
||||||
|
|
||||||
func (s *byLiteral) sort(a []literalNode) {
|
|
||||||
*s = byLiteral(a)
|
|
||||||
sort.Sort(s)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s byLiteral) Len() int { return len(s) }
|
|
||||||
|
|
||||||
func (s byLiteral) Less(i, j int) bool {
|
|
||||||
return s[i].literal < s[j].literal
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s byLiteral) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
|
||||||
|
|
||||||
type byFreq []literalNode
|
|
||||||
|
|
||||||
func (s *byFreq) sort(a []literalNode) {
|
|
||||||
*s = byFreq(a)
|
|
||||||
sort.Sort(s)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s byFreq) Len() int { return len(s) }
|
|
||||||
|
|
||||||
func (s byFreq) Less(i, j int) bool {
|
|
||||||
if s[i].freq == s[j].freq {
|
|
||||||
return s[i].literal < s[j].literal
|
|
||||||
}
|
|
||||||
return s[i].freq < s[j].freq
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s byFreq) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
|
||||||
868
vendor/github.com/klauspost/compress/flate/inflate.go
generated
vendored
868
vendor/github.com/klauspost/compress/flate/inflate.go
generated
vendored
@@ -1,868 +0,0 @@
|
|||||||
// Copyright 2009 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// Package flate implements the DEFLATE compressed data format, described in
|
|
||||||
// RFC 1951. The gzip and zlib packages implement access to DEFLATE-based file
|
|
||||||
// formats.
|
|
||||||
package flate
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bufio"
|
|
||||||
"io"
|
|
||||||
"strconv"
|
|
||||||
"sync"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
maxCodeLen = 16 // max length of Huffman code
|
|
||||||
maxCodeLenMask = 15 // mask for max length of Huffman code
|
|
||||||
// The next three numbers come from the RFC section 3.2.7, with the
|
|
||||||
// additional proviso in section 3.2.5 which implies that distance codes
|
|
||||||
// 30 and 31 should never occur in compressed data.
|
|
||||||
maxNumLit = 286
|
|
||||||
maxNumDist = 30
|
|
||||||
numCodes = 19 // number of codes in Huffman meta-code
|
|
||||||
)
|
|
||||||
|
|
||||||
// Initialize the fixedHuffmanDecoder only once upon first use.
|
|
||||||
var fixedOnce sync.Once
|
|
||||||
var fixedHuffmanDecoder huffmanDecoder
|
|
||||||
|
|
||||||
// A CorruptInputError reports the presence of corrupt input at a given offset.
|
|
||||||
type CorruptInputError int64
|
|
||||||
|
|
||||||
func (e CorruptInputError) Error() string {
|
|
||||||
return "flate: corrupt input before offset " + strconv.FormatInt(int64(e), 10)
|
|
||||||
}
|
|
||||||
|
|
||||||
// An InternalError reports an error in the flate code itself.
|
|
||||||
type InternalError string
|
|
||||||
|
|
||||||
func (e InternalError) Error() string { return "flate: internal error: " + string(e) }
|
|
||||||
|
|
||||||
// A ReadError reports an error encountered while reading input.
|
|
||||||
//
|
|
||||||
// Deprecated: No longer returned.
|
|
||||||
type ReadError struct {
|
|
||||||
Offset int64 // byte offset where error occurred
|
|
||||||
Err error // error returned by underlying Read
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *ReadError) Error() string {
|
|
||||||
return "flate: read error at offset " + strconv.FormatInt(e.Offset, 10) + ": " + e.Err.Error()
|
|
||||||
}
|
|
||||||
|
|
||||||
// A WriteError reports an error encountered while writing output.
|
|
||||||
//
|
|
||||||
// Deprecated: No longer returned.
|
|
||||||
type WriteError struct {
|
|
||||||
Offset int64 // byte offset where error occurred
|
|
||||||
Err error // error returned by underlying Write
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *WriteError) Error() string {
|
|
||||||
return "flate: write error at offset " + strconv.FormatInt(e.Offset, 10) + ": " + e.Err.Error()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Resetter resets a ReadCloser returned by NewReader or NewReaderDict to
|
|
||||||
// to switch to a new underlying Reader. This permits reusing a ReadCloser
|
|
||||||
// instead of allocating a new one.
|
|
||||||
type Resetter interface {
|
|
||||||
// Reset discards any buffered data and resets the Resetter as if it was
|
|
||||||
// newly initialized with the given reader.
|
|
||||||
Reset(r io.Reader, dict []byte) error
|
|
||||||
}
|
|
||||||
|
|
||||||
// The data structure for decoding Huffman tables is based on that of
|
|
||||||
// zlib. There is a lookup table of a fixed bit width (huffmanChunkBits),
|
|
||||||
// For codes smaller than the table width, there are multiple entries
|
|
||||||
// (each combination of trailing bits has the same value). For codes
|
|
||||||
// larger than the table width, the table contains a link to an overflow
|
|
||||||
// table. The width of each entry in the link table is the maximum code
|
|
||||||
// size minus the chunk width.
|
|
||||||
//
|
|
||||||
// Note that you can do a lookup in the table even without all bits
|
|
||||||
// filled. Since the extra bits are zero, and the DEFLATE Huffman codes
|
|
||||||
// have the property that shorter codes come before longer ones, the
|
|
||||||
// bit length estimate in the result is a lower bound on the actual
|
|
||||||
// number of bits.
|
|
||||||
//
|
|
||||||
// See the following:
|
|
||||||
// http://www.gzip.org/algorithm.txt
|
|
||||||
|
|
||||||
// chunk & 15 is number of bits
|
|
||||||
// chunk >> 4 is value, including table link
|
|
||||||
|
|
||||||
const (
|
|
||||||
huffmanChunkBits = 9
|
|
||||||
huffmanNumChunks = 1 << huffmanChunkBits
|
|
||||||
huffmanCountMask = 15
|
|
||||||
huffmanValueShift = 4
|
|
||||||
)
|
|
||||||
|
|
||||||
type huffmanDecoder struct {
|
|
||||||
min int // the minimum code length
|
|
||||||
chunks *[huffmanNumChunks]uint32 // chunks as described above
|
|
||||||
links [][]uint32 // overflow links
|
|
||||||
linkMask uint32 // mask the width of the link table
|
|
||||||
}
|
|
||||||
|
|
||||||
// Initialize Huffman decoding tables from array of code lengths.
|
|
||||||
// Following this function, h is guaranteed to be initialized into a complete
|
|
||||||
// tree (i.e., neither over-subscribed nor under-subscribed). The exception is a
|
|
||||||
// degenerate case where the tree has only a single symbol with length 1. Empty
|
|
||||||
// trees are permitted.
|
|
||||||
func (h *huffmanDecoder) init(bits []int) bool {
|
|
||||||
// Sanity enables additional runtime tests during Huffman
|
|
||||||
// table construction. It's intended to be used during
|
|
||||||
// development to supplement the currently ad-hoc unit tests.
|
|
||||||
const sanity = false
|
|
||||||
|
|
||||||
if h.chunks == nil {
|
|
||||||
h.chunks = &[huffmanNumChunks]uint32{}
|
|
||||||
}
|
|
||||||
if h.min != 0 {
|
|
||||||
*h = huffmanDecoder{chunks: h.chunks, links: h.links}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Count number of codes of each length,
|
|
||||||
// compute min and max length.
|
|
||||||
var count [maxCodeLen]int
|
|
||||||
var min, max int
|
|
||||||
for _, n := range bits {
|
|
||||||
if n == 0 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if min == 0 || n < min {
|
|
||||||
min = n
|
|
||||||
}
|
|
||||||
if n > max {
|
|
||||||
max = n
|
|
||||||
}
|
|
||||||
count[n&maxCodeLenMask]++
|
|
||||||
}
|
|
||||||
|
|
||||||
// Empty tree. The decompressor.huffSym function will fail later if the tree
|
|
||||||
// is used. Technically, an empty tree is only valid for the HDIST tree and
|
|
||||||
// not the HCLEN and HLIT tree. However, a stream with an empty HCLEN tree
|
|
||||||
// is guaranteed to fail since it will attempt to use the tree to decode the
|
|
||||||
// codes for the HLIT and HDIST trees. Similarly, an empty HLIT tree is
|
|
||||||
// guaranteed to fail later since the compressed data section must be
|
|
||||||
// composed of at least one symbol (the end-of-block marker).
|
|
||||||
if max == 0 {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
code := 0
|
|
||||||
var nextcode [maxCodeLen]int
|
|
||||||
for i := min; i <= max; i++ {
|
|
||||||
code <<= 1
|
|
||||||
nextcode[i&maxCodeLenMask] = code
|
|
||||||
code += count[i&maxCodeLenMask]
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check that the coding is complete (i.e., that we've
|
|
||||||
// assigned all 2-to-the-max possible bit sequences).
|
|
||||||
// Exception: To be compatible with zlib, we also need to
|
|
||||||
// accept degenerate single-code codings. See also
|
|
||||||
// TestDegenerateHuffmanCoding.
|
|
||||||
if code != 1<<uint(max) && !(code == 1 && max == 1) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
h.min = min
|
|
||||||
chunks := h.chunks[:]
|
|
||||||
for i := range chunks {
|
|
||||||
chunks[i] = 0
|
|
||||||
}
|
|
||||||
|
|
||||||
if max > huffmanChunkBits {
|
|
||||||
numLinks := 1 << (uint(max) - huffmanChunkBits)
|
|
||||||
h.linkMask = uint32(numLinks - 1)
|
|
||||||
|
|
||||||
// create link tables
|
|
||||||
link := nextcode[huffmanChunkBits+1] >> 1
|
|
||||||
if cap(h.links) < huffmanNumChunks-link {
|
|
||||||
h.links = make([][]uint32, huffmanNumChunks-link)
|
|
||||||
} else {
|
|
||||||
h.links = h.links[:huffmanNumChunks-link]
|
|
||||||
}
|
|
||||||
for j := uint(link); j < huffmanNumChunks; j++ {
|
|
||||||
reverse := int(reverseByte[j>>8]) | int(reverseByte[j&0xff])<<8
|
|
||||||
reverse >>= uint(16 - huffmanChunkBits)
|
|
||||||
off := j - uint(link)
|
|
||||||
if sanity && h.chunks[reverse] != 0 {
|
|
||||||
panic("impossible: overwriting existing chunk")
|
|
||||||
}
|
|
||||||
h.chunks[reverse] = uint32(off<<huffmanValueShift | (huffmanChunkBits + 1))
|
|
||||||
if cap(h.links[off]) < numLinks {
|
|
||||||
h.links[off] = make([]uint32, numLinks)
|
|
||||||
} else {
|
|
||||||
links := h.links[off][:0]
|
|
||||||
h.links[off] = links[:numLinks]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
h.links = h.links[:0]
|
|
||||||
}
|
|
||||||
|
|
||||||
for i, n := range bits {
|
|
||||||
if n == 0 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
code := nextcode[n]
|
|
||||||
nextcode[n]++
|
|
||||||
chunk := uint32(i<<huffmanValueShift | n)
|
|
||||||
reverse := int(reverseByte[code>>8]) | int(reverseByte[code&0xff])<<8
|
|
||||||
reverse >>= uint(16 - n)
|
|
||||||
if n <= huffmanChunkBits {
|
|
||||||
for off := reverse; off < len(h.chunks); off += 1 << uint(n) {
|
|
||||||
// We should never need to overwrite
|
|
||||||
// an existing chunk. Also, 0 is
|
|
||||||
// never a valid chunk, because the
|
|
||||||
// lower 4 "count" bits should be
|
|
||||||
// between 1 and 15.
|
|
||||||
if sanity && h.chunks[off] != 0 {
|
|
||||||
panic("impossible: overwriting existing chunk")
|
|
||||||
}
|
|
||||||
h.chunks[off] = chunk
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
j := reverse & (huffmanNumChunks - 1)
|
|
||||||
if sanity && h.chunks[j]&huffmanCountMask != huffmanChunkBits+1 {
|
|
||||||
// Longer codes should have been
|
|
||||||
// associated with a link table above.
|
|
||||||
panic("impossible: not an indirect chunk")
|
|
||||||
}
|
|
||||||
value := h.chunks[j] >> huffmanValueShift
|
|
||||||
linktab := h.links[value]
|
|
||||||
reverse >>= huffmanChunkBits
|
|
||||||
for off := reverse; off < len(linktab); off += 1 << uint(n-huffmanChunkBits) {
|
|
||||||
if sanity && linktab[off] != 0 {
|
|
||||||
panic("impossible: overwriting existing chunk")
|
|
||||||
}
|
|
||||||
linktab[off] = chunk
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if sanity {
|
|
||||||
// Above we've sanity checked that we never overwrote
|
|
||||||
// an existing entry. Here we additionally check that
|
|
||||||
// we filled the tables completely.
|
|
||||||
for i, chunk := range h.chunks {
|
|
||||||
if chunk == 0 {
|
|
||||||
// As an exception, in the degenerate
|
|
||||||
// single-code case, we allow odd
|
|
||||||
// chunks to be missing.
|
|
||||||
if code == 1 && i%2 == 1 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
panic("impossible: missing chunk")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for _, linktab := range h.links {
|
|
||||||
for _, chunk := range linktab {
|
|
||||||
if chunk == 0 {
|
|
||||||
panic("impossible: missing chunk")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// The actual read interface needed by NewReader.
|
|
||||||
// If the passed in io.Reader does not also have ReadByte,
|
|
||||||
// the NewReader will introduce its own buffering.
|
|
||||||
type Reader interface {
|
|
||||||
io.Reader
|
|
||||||
io.ByteReader
|
|
||||||
}
|
|
||||||
|
|
||||||
// Decompress state.
|
|
||||||
type decompressor struct {
|
|
||||||
// Input source.
|
|
||||||
r Reader
|
|
||||||
roffset int64
|
|
||||||
|
|
||||||
// Input bits, in top of b.
|
|
||||||
b uint32
|
|
||||||
nb uint
|
|
||||||
|
|
||||||
// Huffman decoders for literal/length, distance.
|
|
||||||
h1, h2 huffmanDecoder
|
|
||||||
|
|
||||||
// Length arrays used to define Huffman codes.
|
|
||||||
bits *[maxNumLit + maxNumDist]int
|
|
||||||
codebits *[numCodes]int
|
|
||||||
|
|
||||||
// Output history, buffer.
|
|
||||||
dict dictDecoder
|
|
||||||
|
|
||||||
// Temporary buffer (avoids repeated allocation).
|
|
||||||
buf [4]byte
|
|
||||||
|
|
||||||
// Next step in the decompression,
|
|
||||||
// and decompression state.
|
|
||||||
step func(*decompressor)
|
|
||||||
stepState int
|
|
||||||
final bool
|
|
||||||
err error
|
|
||||||
toRead []byte
|
|
||||||
hl, hd *huffmanDecoder
|
|
||||||
copyLen int
|
|
||||||
copyDist int
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *decompressor) nextBlock() {
|
|
||||||
for f.nb < 1+2 {
|
|
||||||
if f.err = f.moreBits(); f.err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
f.final = f.b&1 == 1
|
|
||||||
f.b >>= 1
|
|
||||||
typ := f.b & 3
|
|
||||||
f.b >>= 2
|
|
||||||
f.nb -= 1 + 2
|
|
||||||
switch typ {
|
|
||||||
case 0:
|
|
||||||
f.dataBlock()
|
|
||||||
case 1:
|
|
||||||
// compressed, fixed Huffman tables
|
|
||||||
f.hl = &fixedHuffmanDecoder
|
|
||||||
f.hd = nil
|
|
||||||
f.huffmanBlock()
|
|
||||||
case 2:
|
|
||||||
// compressed, dynamic Huffman tables
|
|
||||||
if f.err = f.readHuffman(); f.err != nil {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
f.hl = &f.h1
|
|
||||||
f.hd = &f.h2
|
|
||||||
f.huffmanBlock()
|
|
||||||
default:
|
|
||||||
// 3 is reserved.
|
|
||||||
f.err = CorruptInputError(f.roffset)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *decompressor) Read(b []byte) (int, error) {
|
|
||||||
for {
|
|
||||||
if len(f.toRead) > 0 {
|
|
||||||
n := copy(b, f.toRead)
|
|
||||||
f.toRead = f.toRead[n:]
|
|
||||||
if len(f.toRead) == 0 {
|
|
||||||
return n, f.err
|
|
||||||
}
|
|
||||||
return n, nil
|
|
||||||
}
|
|
||||||
if f.err != nil {
|
|
||||||
return 0, f.err
|
|
||||||
}
|
|
||||||
f.step(f)
|
|
||||||
if f.err != nil && len(f.toRead) == 0 {
|
|
||||||
f.toRead = f.dict.readFlush() // Flush what's left in case of error
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Support the io.WriteTo interface for io.Copy and friends.
|
|
||||||
func (f *decompressor) WriteTo(w io.Writer) (int64, error) {
|
|
||||||
total := int64(0)
|
|
||||||
flushed := false
|
|
||||||
for {
|
|
||||||
if len(f.toRead) > 0 {
|
|
||||||
n, err := w.Write(f.toRead)
|
|
||||||
total += int64(n)
|
|
||||||
if err != nil {
|
|
||||||
f.err = err
|
|
||||||
return total, err
|
|
||||||
}
|
|
||||||
if n != len(f.toRead) {
|
|
||||||
return total, io.ErrShortWrite
|
|
||||||
}
|
|
||||||
f.toRead = f.toRead[:0]
|
|
||||||
}
|
|
||||||
if f.err != nil && flushed {
|
|
||||||
if f.err == io.EOF {
|
|
||||||
return total, nil
|
|
||||||
}
|
|
||||||
return total, f.err
|
|
||||||
}
|
|
||||||
if f.err == nil {
|
|
||||||
f.step(f)
|
|
||||||
}
|
|
||||||
if len(f.toRead) == 0 && f.err != nil && !flushed {
|
|
||||||
f.toRead = f.dict.readFlush() // Flush what's left in case of error
|
|
||||||
flushed = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *decompressor) Close() error {
|
|
||||||
if f.err == io.EOF {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return f.err
|
|
||||||
}
|
|
||||||
|
|
||||||
// RFC 1951 section 3.2.7.
|
|
||||||
// Compression with dynamic Huffman codes
|
|
||||||
|
|
||||||
var codeOrder = [...]int{16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15}
|
|
||||||
|
|
||||||
func (f *decompressor) readHuffman() error {
|
|
||||||
// HLIT[5], HDIST[5], HCLEN[4].
|
|
||||||
for f.nb < 5+5+4 {
|
|
||||||
if err := f.moreBits(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
nlit := int(f.b&0x1F) + 257
|
|
||||||
if nlit > maxNumLit {
|
|
||||||
return CorruptInputError(f.roffset)
|
|
||||||
}
|
|
||||||
f.b >>= 5
|
|
||||||
ndist := int(f.b&0x1F) + 1
|
|
||||||
if ndist > maxNumDist {
|
|
||||||
return CorruptInputError(f.roffset)
|
|
||||||
}
|
|
||||||
f.b >>= 5
|
|
||||||
nclen := int(f.b&0xF) + 4
|
|
||||||
// numCodes is 19, so nclen is always valid.
|
|
||||||
f.b >>= 4
|
|
||||||
f.nb -= 5 + 5 + 4
|
|
||||||
|
|
||||||
// (HCLEN+4)*3 bits: code lengths in the magic codeOrder order.
|
|
||||||
for i := 0; i < nclen; i++ {
|
|
||||||
for f.nb < 3 {
|
|
||||||
if err := f.moreBits(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
f.codebits[codeOrder[i]] = int(f.b & 0x7)
|
|
||||||
f.b >>= 3
|
|
||||||
f.nb -= 3
|
|
||||||
}
|
|
||||||
for i := nclen; i < len(codeOrder); i++ {
|
|
||||||
f.codebits[codeOrder[i]] = 0
|
|
||||||
}
|
|
||||||
if !f.h1.init(f.codebits[0:]) {
|
|
||||||
return CorruptInputError(f.roffset)
|
|
||||||
}
|
|
||||||
|
|
||||||
// HLIT + 257 code lengths, HDIST + 1 code lengths,
|
|
||||||
// using the code length Huffman code.
|
|
||||||
for i, n := 0, nlit+ndist; i < n; {
|
|
||||||
x, err := f.huffSym(&f.h1)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if x < 16 {
|
|
||||||
// Actual length.
|
|
||||||
f.bits[i] = x
|
|
||||||
i++
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
// Repeat previous length or zero.
|
|
||||||
var rep int
|
|
||||||
var nb uint
|
|
||||||
var b int
|
|
||||||
switch x {
|
|
||||||
default:
|
|
||||||
return InternalError("unexpected length code")
|
|
||||||
case 16:
|
|
||||||
rep = 3
|
|
||||||
nb = 2
|
|
||||||
if i == 0 {
|
|
||||||
return CorruptInputError(f.roffset)
|
|
||||||
}
|
|
||||||
b = f.bits[i-1]
|
|
||||||
case 17:
|
|
||||||
rep = 3
|
|
||||||
nb = 3
|
|
||||||
b = 0
|
|
||||||
case 18:
|
|
||||||
rep = 11
|
|
||||||
nb = 7
|
|
||||||
b = 0
|
|
||||||
}
|
|
||||||
for f.nb < nb {
|
|
||||||
if err := f.moreBits(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
rep += int(f.b & uint32(1<<nb-1))
|
|
||||||
f.b >>= nb
|
|
||||||
f.nb -= nb
|
|
||||||
if i+rep > n {
|
|
||||||
return CorruptInputError(f.roffset)
|
|
||||||
}
|
|
||||||
for j := 0; j < rep; j++ {
|
|
||||||
f.bits[i] = b
|
|
||||||
i++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if !f.h1.init(f.bits[0:nlit]) || !f.h2.init(f.bits[nlit:nlit+ndist]) {
|
|
||||||
return CorruptInputError(f.roffset)
|
|
||||||
}
|
|
||||||
|
|
||||||
// As an optimization, we can initialize the min bits to read at a time
|
|
||||||
// for the HLIT tree to the length of the EOB marker since we know that
|
|
||||||
// every block must terminate with one. This preserves the property that
|
|
||||||
// we never read any extra bytes after the end of the DEFLATE stream.
|
|
||||||
if f.h1.min < f.bits[endBlockMarker] {
|
|
||||||
f.h1.min = f.bits[endBlockMarker]
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Decode a single Huffman block from f.
|
|
||||||
// hl and hd are the Huffman states for the lit/length values
|
|
||||||
// and the distance values, respectively. If hd == nil, using the
|
|
||||||
// fixed distance encoding associated with fixed Huffman blocks.
|
|
||||||
func (f *decompressor) huffmanBlock() {
|
|
||||||
const (
|
|
||||||
stateInit = iota // Zero value must be stateInit
|
|
||||||
stateDict
|
|
||||||
)
|
|
||||||
|
|
||||||
switch f.stepState {
|
|
||||||
case stateInit:
|
|
||||||
goto readLiteral
|
|
||||||
case stateDict:
|
|
||||||
goto copyHistory
|
|
||||||
}
|
|
||||||
|
|
||||||
readLiteral:
|
|
||||||
// Read literal and/or (length, distance) according to RFC section 3.2.3.
|
|
||||||
{
|
|
||||||
v, err := f.huffSym(f.hl)
|
|
||||||
if err != nil {
|
|
||||||
f.err = err
|
|
||||||
return
|
|
||||||
}
|
|
||||||
var n uint // number of bits extra
|
|
||||||
var length int
|
|
||||||
switch {
|
|
||||||
case v < 256:
|
|
||||||
f.dict.writeByte(byte(v))
|
|
||||||
if f.dict.availWrite() == 0 {
|
|
||||||
f.toRead = f.dict.readFlush()
|
|
||||||
f.step = (*decompressor).huffmanBlock
|
|
||||||
f.stepState = stateInit
|
|
||||||
return
|
|
||||||
}
|
|
||||||
goto readLiteral
|
|
||||||
case v == 256:
|
|
||||||
f.finishBlock()
|
|
||||||
return
|
|
||||||
// otherwise, reference to older data
|
|
||||||
case v < 265:
|
|
||||||
length = v - (257 - 3)
|
|
||||||
n = 0
|
|
||||||
case v < 269:
|
|
||||||
length = v*2 - (265*2 - 11)
|
|
||||||
n = 1
|
|
||||||
case v < 273:
|
|
||||||
length = v*4 - (269*4 - 19)
|
|
||||||
n = 2
|
|
||||||
case v < 277:
|
|
||||||
length = v*8 - (273*8 - 35)
|
|
||||||
n = 3
|
|
||||||
case v < 281:
|
|
||||||
length = v*16 - (277*16 - 67)
|
|
||||||
n = 4
|
|
||||||
case v < 285:
|
|
||||||
length = v*32 - (281*32 - 131)
|
|
||||||
n = 5
|
|
||||||
case v < maxNumLit:
|
|
||||||
length = 258
|
|
||||||
n = 0
|
|
||||||
default:
|
|
||||||
f.err = CorruptInputError(f.roffset)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if n > 0 {
|
|
||||||
for f.nb < n {
|
|
||||||
if err = f.moreBits(); err != nil {
|
|
||||||
f.err = err
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
length += int(f.b & uint32(1<<n-1))
|
|
||||||
f.b >>= n
|
|
||||||
f.nb -= n
|
|
||||||
}
|
|
||||||
|
|
||||||
var dist int
|
|
||||||
if f.hd == nil {
|
|
||||||
for f.nb < 5 {
|
|
||||||
if err = f.moreBits(); err != nil {
|
|
||||||
f.err = err
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
dist = int(reverseByte[(f.b&0x1F)<<3])
|
|
||||||
f.b >>= 5
|
|
||||||
f.nb -= 5
|
|
||||||
} else {
|
|
||||||
if dist, err = f.huffSym(f.hd); err != nil {
|
|
||||||
f.err = err
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
switch {
|
|
||||||
case dist < 4:
|
|
||||||
dist++
|
|
||||||
case dist < maxNumDist:
|
|
||||||
nb := uint(dist-2) >> 1
|
|
||||||
// have 1 bit in bottom of dist, need nb more.
|
|
||||||
extra := (dist & 1) << nb
|
|
||||||
for f.nb < nb {
|
|
||||||
if err = f.moreBits(); err != nil {
|
|
||||||
f.err = err
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
extra |= int(f.b & uint32(1<<nb-1))
|
|
||||||
f.b >>= nb
|
|
||||||
f.nb -= nb
|
|
||||||
dist = 1<<(nb+1) + 1 + extra
|
|
||||||
default:
|
|
||||||
f.err = CorruptInputError(f.roffset)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// No check on length; encoding can be prescient.
|
|
||||||
if dist > f.dict.histSize() {
|
|
||||||
f.err = CorruptInputError(f.roffset)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
f.copyLen, f.copyDist = length, dist
|
|
||||||
goto copyHistory
|
|
||||||
}
|
|
||||||
|
|
||||||
copyHistory:
|
|
||||||
// Perform a backwards copy according to RFC section 3.2.3.
|
|
||||||
{
|
|
||||||
cnt := f.dict.tryWriteCopy(f.copyDist, f.copyLen)
|
|
||||||
if cnt == 0 {
|
|
||||||
cnt = f.dict.writeCopy(f.copyDist, f.copyLen)
|
|
||||||
}
|
|
||||||
f.copyLen -= cnt
|
|
||||||
|
|
||||||
if f.dict.availWrite() == 0 || f.copyLen > 0 {
|
|
||||||
f.toRead = f.dict.readFlush()
|
|
||||||
f.step = (*decompressor).huffmanBlock // We need to continue this work
|
|
||||||
f.stepState = stateDict
|
|
||||||
return
|
|
||||||
}
|
|
||||||
goto readLiteral
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Copy a single uncompressed data block from input to output.
|
|
||||||
func (f *decompressor) dataBlock() {
|
|
||||||
// Uncompressed.
|
|
||||||
// Discard current half-byte.
|
|
||||||
f.nb = 0
|
|
||||||
f.b = 0
|
|
||||||
|
|
||||||
// Length then ones-complement of length.
|
|
||||||
nr, err := io.ReadFull(f.r, f.buf[0:4])
|
|
||||||
f.roffset += int64(nr)
|
|
||||||
if err != nil {
|
|
||||||
if err == io.EOF {
|
|
||||||
err = io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
f.err = err
|
|
||||||
return
|
|
||||||
}
|
|
||||||
n := int(f.buf[0]) | int(f.buf[1])<<8
|
|
||||||
nn := int(f.buf[2]) | int(f.buf[3])<<8
|
|
||||||
if uint16(nn) != uint16(^n) {
|
|
||||||
f.err = CorruptInputError(f.roffset)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if n == 0 {
|
|
||||||
f.toRead = f.dict.readFlush()
|
|
||||||
f.finishBlock()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
f.copyLen = n
|
|
||||||
f.copyData()
|
|
||||||
}
|
|
||||||
|
|
||||||
// copyData copies f.copyLen bytes from the underlying reader into f.hist.
|
|
||||||
// It pauses for reads when f.hist is full.
|
|
||||||
func (f *decompressor) copyData() {
|
|
||||||
buf := f.dict.writeSlice()
|
|
||||||
if len(buf) > f.copyLen {
|
|
||||||
buf = buf[:f.copyLen]
|
|
||||||
}
|
|
||||||
|
|
||||||
cnt, err := io.ReadFull(f.r, buf)
|
|
||||||
f.roffset += int64(cnt)
|
|
||||||
f.copyLen -= cnt
|
|
||||||
f.dict.writeMark(cnt)
|
|
||||||
if err != nil {
|
|
||||||
if err == io.EOF {
|
|
||||||
err = io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
f.err = err
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if f.dict.availWrite() == 0 || f.copyLen > 0 {
|
|
||||||
f.toRead = f.dict.readFlush()
|
|
||||||
f.step = (*decompressor).copyData
|
|
||||||
return
|
|
||||||
}
|
|
||||||
f.finishBlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *decompressor) finishBlock() {
|
|
||||||
if f.final {
|
|
||||||
if f.dict.availRead() > 0 {
|
|
||||||
f.toRead = f.dict.readFlush()
|
|
||||||
}
|
|
||||||
f.err = io.EOF
|
|
||||||
}
|
|
||||||
f.step = (*decompressor).nextBlock
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *decompressor) moreBits() error {
|
|
||||||
c, err := f.r.ReadByte()
|
|
||||||
if err != nil {
|
|
||||||
if err == io.EOF {
|
|
||||||
err = io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
f.roffset++
|
|
||||||
f.b |= uint32(c) << f.nb
|
|
||||||
f.nb += 8
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Read the next Huffman-encoded symbol from f according to h.
|
|
||||||
func (f *decompressor) huffSym(h *huffmanDecoder) (int, error) {
|
|
||||||
// Since a huffmanDecoder can be empty or be composed of a degenerate tree
|
|
||||||
// with single element, huffSym must error on these two edge cases. In both
|
|
||||||
// cases, the chunks slice will be 0 for the invalid sequence, leading it
|
|
||||||
// satisfy the n == 0 check below.
|
|
||||||
n := uint(h.min)
|
|
||||||
for {
|
|
||||||
for f.nb < n {
|
|
||||||
if err := f.moreBits(); err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
chunk := h.chunks[f.b&(huffmanNumChunks-1)]
|
|
||||||
n = uint(chunk & huffmanCountMask)
|
|
||||||
if n > huffmanChunkBits {
|
|
||||||
chunk = h.links[chunk>>huffmanValueShift][(f.b>>huffmanChunkBits)&h.linkMask]
|
|
||||||
n = uint(chunk & huffmanCountMask)
|
|
||||||
}
|
|
||||||
if n <= f.nb {
|
|
||||||
if n == 0 {
|
|
||||||
f.err = CorruptInputError(f.roffset)
|
|
||||||
return 0, f.err
|
|
||||||
}
|
|
||||||
f.b >>= n
|
|
||||||
f.nb -= n
|
|
||||||
return int(chunk >> huffmanValueShift), nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func makeReader(r io.Reader) Reader {
|
|
||||||
if rr, ok := r.(Reader); ok {
|
|
||||||
return rr
|
|
||||||
}
|
|
||||||
return bufio.NewReader(r)
|
|
||||||
}
|
|
||||||
|
|
||||||
func fixedHuffmanDecoderInit() {
|
|
||||||
fixedOnce.Do(func() {
|
|
||||||
// These come from the RFC section 3.2.6.
|
|
||||||
var bits [288]int
|
|
||||||
for i := 0; i < 144; i++ {
|
|
||||||
bits[i] = 8
|
|
||||||
}
|
|
||||||
for i := 144; i < 256; i++ {
|
|
||||||
bits[i] = 9
|
|
||||||
}
|
|
||||||
for i := 256; i < 280; i++ {
|
|
||||||
bits[i] = 7
|
|
||||||
}
|
|
||||||
for i := 280; i < 288; i++ {
|
|
||||||
bits[i] = 8
|
|
||||||
}
|
|
||||||
fixedHuffmanDecoder.init(bits[:])
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *decompressor) Reset(r io.Reader, dict []byte) error {
|
|
||||||
*f = decompressor{
|
|
||||||
r: makeReader(r),
|
|
||||||
bits: f.bits,
|
|
||||||
codebits: f.codebits,
|
|
||||||
h1: f.h1,
|
|
||||||
h2: f.h2,
|
|
||||||
dict: f.dict,
|
|
||||||
step: (*decompressor).nextBlock,
|
|
||||||
}
|
|
||||||
f.dict.init(maxMatchOffset, dict)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewReader returns a new ReadCloser that can be used
|
|
||||||
// to read the uncompressed version of r.
|
|
||||||
// If r does not also implement io.ByteReader,
|
|
||||||
// the decompressor may read more data than necessary from r.
|
|
||||||
// It is the caller's responsibility to call Close on the ReadCloser
|
|
||||||
// when finished reading.
|
|
||||||
//
|
|
||||||
// The ReadCloser returned by NewReader also implements Resetter.
|
|
||||||
func NewReader(r io.Reader) io.ReadCloser {
|
|
||||||
fixedHuffmanDecoderInit()
|
|
||||||
|
|
||||||
var f decompressor
|
|
||||||
f.r = makeReader(r)
|
|
||||||
f.bits = new([maxNumLit + maxNumDist]int)
|
|
||||||
f.codebits = new([numCodes]int)
|
|
||||||
f.step = (*decompressor).nextBlock
|
|
||||||
f.dict.init(maxMatchOffset, nil)
|
|
||||||
return &f
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewReaderDict is like NewReader but initializes the reader
|
|
||||||
// with a preset dictionary. The returned Reader behaves as if
|
|
||||||
// the uncompressed data stream started with the given dictionary,
|
|
||||||
// which has already been read. NewReaderDict is typically used
|
|
||||||
// to read data compressed by NewWriterDict.
|
|
||||||
//
|
|
||||||
// The ReadCloser returned by NewReader also implements Resetter.
|
|
||||||
func NewReaderDict(r io.Reader, dict []byte) io.ReadCloser {
|
|
||||||
fixedHuffmanDecoderInit()
|
|
||||||
|
|
||||||
var f decompressor
|
|
||||||
f.r = makeReader(r)
|
|
||||||
f.bits = new([maxNumLit + maxNumDist]int)
|
|
||||||
f.codebits = new([numCodes]int)
|
|
||||||
f.step = (*decompressor).nextBlock
|
|
||||||
f.dict.init(maxMatchOffset, dict)
|
|
||||||
return &f
|
|
||||||
}
|
|
||||||
48
vendor/github.com/klauspost/compress/flate/reverse_bits.go
generated
vendored
48
vendor/github.com/klauspost/compress/flate/reverse_bits.go
generated
vendored
@@ -1,48 +0,0 @@
|
|||||||
// Copyright 2009 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package flate
|
|
||||||
|
|
||||||
var reverseByte = [256]byte{
|
|
||||||
0x00, 0x80, 0x40, 0xc0, 0x20, 0xa0, 0x60, 0xe0,
|
|
||||||
0x10, 0x90, 0x50, 0xd0, 0x30, 0xb0, 0x70, 0xf0,
|
|
||||||
0x08, 0x88, 0x48, 0xc8, 0x28, 0xa8, 0x68, 0xe8,
|
|
||||||
0x18, 0x98, 0x58, 0xd8, 0x38, 0xb8, 0x78, 0xf8,
|
|
||||||
0x04, 0x84, 0x44, 0xc4, 0x24, 0xa4, 0x64, 0xe4,
|
|
||||||
0x14, 0x94, 0x54, 0xd4, 0x34, 0xb4, 0x74, 0xf4,
|
|
||||||
0x0c, 0x8c, 0x4c, 0xcc, 0x2c, 0xac, 0x6c, 0xec,
|
|
||||||
0x1c, 0x9c, 0x5c, 0xdc, 0x3c, 0xbc, 0x7c, 0xfc,
|
|
||||||
0x02, 0x82, 0x42, 0xc2, 0x22, 0xa2, 0x62, 0xe2,
|
|
||||||
0x12, 0x92, 0x52, 0xd2, 0x32, 0xb2, 0x72, 0xf2,
|
|
||||||
0x0a, 0x8a, 0x4a, 0xca, 0x2a, 0xaa, 0x6a, 0xea,
|
|
||||||
0x1a, 0x9a, 0x5a, 0xda, 0x3a, 0xba, 0x7a, 0xfa,
|
|
||||||
0x06, 0x86, 0x46, 0xc6, 0x26, 0xa6, 0x66, 0xe6,
|
|
||||||
0x16, 0x96, 0x56, 0xd6, 0x36, 0xb6, 0x76, 0xf6,
|
|
||||||
0x0e, 0x8e, 0x4e, 0xce, 0x2e, 0xae, 0x6e, 0xee,
|
|
||||||
0x1e, 0x9e, 0x5e, 0xde, 0x3e, 0xbe, 0x7e, 0xfe,
|
|
||||||
0x01, 0x81, 0x41, 0xc1, 0x21, 0xa1, 0x61, 0xe1,
|
|
||||||
0x11, 0x91, 0x51, 0xd1, 0x31, 0xb1, 0x71, 0xf1,
|
|
||||||
0x09, 0x89, 0x49, 0xc9, 0x29, 0xa9, 0x69, 0xe9,
|
|
||||||
0x19, 0x99, 0x59, 0xd9, 0x39, 0xb9, 0x79, 0xf9,
|
|
||||||
0x05, 0x85, 0x45, 0xc5, 0x25, 0xa5, 0x65, 0xe5,
|
|
||||||
0x15, 0x95, 0x55, 0xd5, 0x35, 0xb5, 0x75, 0xf5,
|
|
||||||
0x0d, 0x8d, 0x4d, 0xcd, 0x2d, 0xad, 0x6d, 0xed,
|
|
||||||
0x1d, 0x9d, 0x5d, 0xdd, 0x3d, 0xbd, 0x7d, 0xfd,
|
|
||||||
0x03, 0x83, 0x43, 0xc3, 0x23, 0xa3, 0x63, 0xe3,
|
|
||||||
0x13, 0x93, 0x53, 0xd3, 0x33, 0xb3, 0x73, 0xf3,
|
|
||||||
0x0b, 0x8b, 0x4b, 0xcb, 0x2b, 0xab, 0x6b, 0xeb,
|
|
||||||
0x1b, 0x9b, 0x5b, 0xdb, 0x3b, 0xbb, 0x7b, 0xfb,
|
|
||||||
0x07, 0x87, 0x47, 0xc7, 0x27, 0xa7, 0x67, 0xe7,
|
|
||||||
0x17, 0x97, 0x57, 0xd7, 0x37, 0xb7, 0x77, 0xf7,
|
|
||||||
0x0f, 0x8f, 0x4f, 0xcf, 0x2f, 0xaf, 0x6f, 0xef,
|
|
||||||
0x1f, 0x9f, 0x5f, 0xdf, 0x3f, 0xbf, 0x7f, 0xff,
|
|
||||||
}
|
|
||||||
|
|
||||||
func reverseUint16(v uint16) uint16 {
|
|
||||||
return uint16(reverseByte[v>>8]) | uint16(reverseByte[v&0xFF])<<8
|
|
||||||
}
|
|
||||||
|
|
||||||
func reverseBits(number uint16, bitLength byte) uint16 {
|
|
||||||
return reverseUint16(number << uint8(16-bitLength))
|
|
||||||
}
|
|
||||||
900
vendor/github.com/klauspost/compress/flate/snappy.go
generated
vendored
900
vendor/github.com/klauspost/compress/flate/snappy.go
generated
vendored
@@ -1,900 +0,0 @@
|
|||||||
// Copyright 2011 The Snappy-Go Authors. All rights reserved.
|
|
||||||
// Modified for deflate by Klaus Post (c) 2015.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package flate
|
|
||||||
|
|
||||||
// emitLiteral writes a literal chunk and returns the number of bytes written.
|
|
||||||
func emitLiteral(dst *tokens, lit []byte) {
|
|
||||||
ol := int(dst.n)
|
|
||||||
for i, v := range lit {
|
|
||||||
dst.tokens[(i+ol)&maxStoreBlockSize] = token(v)
|
|
||||||
}
|
|
||||||
dst.n += uint16(len(lit))
|
|
||||||
}
|
|
||||||
|
|
||||||
// emitCopy writes a copy chunk and returns the number of bytes written.
|
|
||||||
func emitCopy(dst *tokens, offset, length int) {
|
|
||||||
dst.tokens[dst.n] = matchToken(uint32(length-3), uint32(offset-minOffsetSize))
|
|
||||||
dst.n++
|
|
||||||
}
|
|
||||||
|
|
||||||
type snappyEnc interface {
|
|
||||||
Encode(dst *tokens, src []byte)
|
|
||||||
Reset()
|
|
||||||
}
|
|
||||||
|
|
||||||
func newSnappy(level int) snappyEnc {
|
|
||||||
switch level {
|
|
||||||
case 1:
|
|
||||||
return &snappyL1{}
|
|
||||||
case 2:
|
|
||||||
return &snappyL2{snappyGen: snappyGen{cur: maxStoreBlockSize, prev: make([]byte, 0, maxStoreBlockSize)}}
|
|
||||||
case 3:
|
|
||||||
return &snappyL3{snappyGen: snappyGen{cur: maxStoreBlockSize, prev: make([]byte, 0, maxStoreBlockSize)}}
|
|
||||||
case 4:
|
|
||||||
return &snappyL4{snappyL3{snappyGen: snappyGen{cur: maxStoreBlockSize, prev: make([]byte, 0, maxStoreBlockSize)}}}
|
|
||||||
default:
|
|
||||||
panic("invalid level specified")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
const (
|
|
||||||
tableBits = 14 // Bits used in the table
|
|
||||||
tableSize = 1 << tableBits // Size of the table
|
|
||||||
tableMask = tableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks.
|
|
||||||
tableShift = 32 - tableBits // Right-shift to get the tableBits most significant bits of a uint32.
|
|
||||||
baseMatchOffset = 1 // The smallest match offset
|
|
||||||
baseMatchLength = 3 // The smallest match length per the RFC section 3.2.5
|
|
||||||
maxMatchOffset = 1 << 15 // The largest match offset
|
|
||||||
)
|
|
||||||
|
|
||||||
func load32(b []byte, i int) uint32 {
|
|
||||||
b = b[i : i+4 : len(b)] // Help the compiler eliminate bounds checks on the next line.
|
|
||||||
return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
|
|
||||||
}
|
|
||||||
|
|
||||||
func load64(b []byte, i int) uint64 {
|
|
||||||
b = b[i : i+8 : len(b)] // Help the compiler eliminate bounds checks on the next line.
|
|
||||||
return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 |
|
|
||||||
uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
|
|
||||||
}
|
|
||||||
|
|
||||||
func hash(u uint32) uint32 {
|
|
||||||
return (u * 0x1e35a7bd) >> tableShift
|
|
||||||
}
|
|
||||||
|
|
||||||
// snappyL1 encapsulates level 1 compression
|
|
||||||
type snappyL1 struct{}
|
|
||||||
|
|
||||||
func (e *snappyL1) Reset() {}
|
|
||||||
|
|
||||||
func (e *snappyL1) Encode(dst *tokens, src []byte) {
|
|
||||||
const (
|
|
||||||
inputMargin = 16 - 1
|
|
||||||
minNonLiteralBlockSize = 1 + 1 + inputMargin
|
|
||||||
)
|
|
||||||
|
|
||||||
// This check isn't in the Snappy implementation, but there, the caller
|
|
||||||
// instead of the callee handles this case.
|
|
||||||
if len(src) < minNonLiteralBlockSize {
|
|
||||||
// We do not fill the token table.
|
|
||||||
// This will be picked up by caller.
|
|
||||||
dst.n = uint16(len(src))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Initialize the hash table.
|
|
||||||
//
|
|
||||||
// The table element type is uint16, as s < sLimit and sLimit < len(src)
|
|
||||||
// and len(src) <= maxStoreBlockSize and maxStoreBlockSize == 65535.
|
|
||||||
var table [tableSize]uint16
|
|
||||||
|
|
||||||
// sLimit is when to stop looking for offset/length copies. The inputMargin
|
|
||||||
// lets us use a fast path for emitLiteral in the main loop, while we are
|
|
||||||
// looking for copies.
|
|
||||||
sLimit := len(src) - inputMargin
|
|
||||||
|
|
||||||
// nextEmit is where in src the next emitLiteral should start from.
|
|
||||||
nextEmit := 0
|
|
||||||
|
|
||||||
// The encoded form must start with a literal, as there are no previous
|
|
||||||
// bytes to copy, so we start looking for hash matches at s == 1.
|
|
||||||
s := 1
|
|
||||||
nextHash := hash(load32(src, s))
|
|
||||||
|
|
||||||
for {
|
|
||||||
// Copied from the C++ snappy implementation:
|
|
||||||
//
|
|
||||||
// Heuristic match skipping: If 32 bytes are scanned with no matches
|
|
||||||
// found, start looking only at every other byte. If 32 more bytes are
|
|
||||||
// scanned (or skipped), look at every third byte, etc.. When a match
|
|
||||||
// is found, immediately go back to looking at every byte. This is a
|
|
||||||
// small loss (~5% performance, ~0.1% density) for compressible data
|
|
||||||
// due to more bookkeeping, but for non-compressible data (such as
|
|
||||||
// JPEG) it's a huge win since the compressor quickly "realizes" the
|
|
||||||
// data is incompressible and doesn't bother looking for matches
|
|
||||||
// everywhere.
|
|
||||||
//
|
|
||||||
// The "skip" variable keeps track of how many bytes there are since
|
|
||||||
// the last match; dividing it by 32 (ie. right-shifting by five) gives
|
|
||||||
// the number of bytes to move ahead for each iteration.
|
|
||||||
skip := 32
|
|
||||||
|
|
||||||
nextS := s
|
|
||||||
candidate := 0
|
|
||||||
for {
|
|
||||||
s = nextS
|
|
||||||
bytesBetweenHashLookups := skip >> 5
|
|
||||||
nextS = s + bytesBetweenHashLookups
|
|
||||||
skip += bytesBetweenHashLookups
|
|
||||||
if nextS > sLimit {
|
|
||||||
goto emitRemainder
|
|
||||||
}
|
|
||||||
candidate = int(table[nextHash&tableMask])
|
|
||||||
table[nextHash&tableMask] = uint16(s)
|
|
||||||
nextHash = hash(load32(src, nextS))
|
|
||||||
if s-candidate <= maxMatchOffset && load32(src, s) == load32(src, candidate) {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// A 4-byte match has been found. We'll later see if more than 4 bytes
|
|
||||||
// match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
|
|
||||||
// them as literal bytes.
|
|
||||||
emitLiteral(dst, src[nextEmit:s])
|
|
||||||
|
|
||||||
// Call emitCopy, and then see if another emitCopy could be our next
|
|
||||||
// move. Repeat until we find no match for the input immediately after
|
|
||||||
// what was consumed by the last emitCopy call.
|
|
||||||
//
|
|
||||||
// If we exit this loop normally then we need to call emitLiteral next,
|
|
||||||
// though we don't yet know how big the literal will be. We handle that
|
|
||||||
// by proceeding to the next iteration of the main loop. We also can
|
|
||||||
// exit this loop via goto if we get close to exhausting the input.
|
|
||||||
for {
|
|
||||||
// Invariant: we have a 4-byte match at s, and no need to emit any
|
|
||||||
// literal bytes prior to s.
|
|
||||||
base := s
|
|
||||||
|
|
||||||
// Extend the 4-byte match as long as possible.
|
|
||||||
//
|
|
||||||
// This is an inlined version of Snappy's:
|
|
||||||
// s = extendMatch(src, candidate+4, s+4)
|
|
||||||
s += 4
|
|
||||||
s1 := base + maxMatchLength
|
|
||||||
if s1 > len(src) {
|
|
||||||
s1 = len(src)
|
|
||||||
}
|
|
||||||
a := src[s:s1]
|
|
||||||
b := src[candidate+4:]
|
|
||||||
b = b[:len(a)]
|
|
||||||
l := len(a)
|
|
||||||
for i := range a {
|
|
||||||
if a[i] != b[i] {
|
|
||||||
l = i
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
s += l
|
|
||||||
|
|
||||||
// matchToken is flate's equivalent of Snappy's emitCopy.
|
|
||||||
dst.tokens[dst.n] = matchToken(uint32(s-base-baseMatchLength), uint32(base-candidate-baseMatchOffset))
|
|
||||||
dst.n++
|
|
||||||
nextEmit = s
|
|
||||||
if s >= sLimit {
|
|
||||||
goto emitRemainder
|
|
||||||
}
|
|
||||||
|
|
||||||
// We could immediately start working at s now, but to improve
|
|
||||||
// compression we first update the hash table at s-1 and at s. If
|
|
||||||
// another emitCopy is not our next move, also calculate nextHash
|
|
||||||
// at s+1. At least on GOARCH=amd64, these three hash calculations
|
|
||||||
// are faster as one load64 call (with some shifts) instead of
|
|
||||||
// three load32 calls.
|
|
||||||
x := load64(src, s-1)
|
|
||||||
prevHash := hash(uint32(x >> 0))
|
|
||||||
table[prevHash&tableMask] = uint16(s - 1)
|
|
||||||
currHash := hash(uint32(x >> 8))
|
|
||||||
candidate = int(table[currHash&tableMask])
|
|
||||||
table[currHash&tableMask] = uint16(s)
|
|
||||||
if s-candidate > maxMatchOffset || uint32(x>>8) != load32(src, candidate) {
|
|
||||||
nextHash = hash(uint32(x >> 16))
|
|
||||||
s++
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
emitRemainder:
|
|
||||||
if nextEmit < len(src) {
|
|
||||||
emitLiteral(dst, src[nextEmit:])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type tableEntry struct {
|
|
||||||
val uint32
|
|
||||||
offset int32
|
|
||||||
}
|
|
||||||
|
|
||||||
func load3232(b []byte, i int32) uint32 {
|
|
||||||
b = b[i : i+4 : len(b)] // Help the compiler eliminate bounds checks on the next line.
|
|
||||||
return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
|
|
||||||
}
|
|
||||||
|
|
||||||
func load6432(b []byte, i int32) uint64 {
|
|
||||||
b = b[i : i+8 : len(b)] // Help the compiler eliminate bounds checks on the next line.
|
|
||||||
return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 |
|
|
||||||
uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
|
|
||||||
}
|
|
||||||
|
|
||||||
// snappyGen maintains the table for matches,
|
|
||||||
// and the previous byte block for level 2.
|
|
||||||
// This is the generic implementation.
|
|
||||||
type snappyGen struct {
|
|
||||||
prev []byte
|
|
||||||
cur int32
|
|
||||||
}
|
|
||||||
|
|
||||||
// snappyGen maintains the table for matches,
|
|
||||||
// and the previous byte block for level 2.
|
|
||||||
// This is the generic implementation.
|
|
||||||
type snappyL2 struct {
|
|
||||||
snappyGen
|
|
||||||
table [tableSize]tableEntry
|
|
||||||
}
|
|
||||||
|
|
||||||
// EncodeL2 uses a similar algorithm to level 1, but is capable
|
|
||||||
// of matching across blocks giving better compression at a small slowdown.
|
|
||||||
func (e *snappyL2) Encode(dst *tokens, src []byte) {
|
|
||||||
const (
|
|
||||||
inputMargin = 8 - 1
|
|
||||||
minNonLiteralBlockSize = 1 + 1 + inputMargin
|
|
||||||
)
|
|
||||||
|
|
||||||
// Protect against e.cur wraparound.
|
|
||||||
if e.cur > 1<<30 {
|
|
||||||
for i := range e.table[:] {
|
|
||||||
e.table[i] = tableEntry{}
|
|
||||||
}
|
|
||||||
e.cur = maxStoreBlockSize
|
|
||||||
}
|
|
||||||
|
|
||||||
// This check isn't in the Snappy implementation, but there, the caller
|
|
||||||
// instead of the callee handles this case.
|
|
||||||
if len(src) < minNonLiteralBlockSize {
|
|
||||||
// We do not fill the token table.
|
|
||||||
// This will be picked up by caller.
|
|
||||||
dst.n = uint16(len(src))
|
|
||||||
e.cur += maxStoreBlockSize
|
|
||||||
e.prev = e.prev[:0]
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// sLimit is when to stop looking for offset/length copies. The inputMargin
|
|
||||||
// lets us use a fast path for emitLiteral in the main loop, while we are
|
|
||||||
// looking for copies.
|
|
||||||
sLimit := int32(len(src) - inputMargin)
|
|
||||||
|
|
||||||
// nextEmit is where in src the next emitLiteral should start from.
|
|
||||||
nextEmit := int32(0)
|
|
||||||
s := int32(0)
|
|
||||||
cv := load3232(src, s)
|
|
||||||
nextHash := hash(cv)
|
|
||||||
|
|
||||||
for {
|
|
||||||
// Copied from the C++ snappy implementation:
|
|
||||||
//
|
|
||||||
// Heuristic match skipping: If 32 bytes are scanned with no matches
|
|
||||||
// found, start looking only at every other byte. If 32 more bytes are
|
|
||||||
// scanned (or skipped), look at every third byte, etc.. When a match
|
|
||||||
// is found, immediately go back to looking at every byte. This is a
|
|
||||||
// small loss (~5% performance, ~0.1% density) for compressible data
|
|
||||||
// due to more bookkeeping, but for non-compressible data (such as
|
|
||||||
// JPEG) it's a huge win since the compressor quickly "realizes" the
|
|
||||||
// data is incompressible and doesn't bother looking for matches
|
|
||||||
// everywhere.
|
|
||||||
//
|
|
||||||
// The "skip" variable keeps track of how many bytes there are since
|
|
||||||
// the last match; dividing it by 32 (ie. right-shifting by five) gives
|
|
||||||
// the number of bytes to move ahead for each iteration.
|
|
||||||
skip := int32(32)
|
|
||||||
|
|
||||||
nextS := s
|
|
||||||
var candidate tableEntry
|
|
||||||
for {
|
|
||||||
s = nextS
|
|
||||||
bytesBetweenHashLookups := skip >> 5
|
|
||||||
nextS = s + bytesBetweenHashLookups
|
|
||||||
skip += bytesBetweenHashLookups
|
|
||||||
if nextS > sLimit {
|
|
||||||
goto emitRemainder
|
|
||||||
}
|
|
||||||
candidate = e.table[nextHash&tableMask]
|
|
||||||
now := load3232(src, nextS)
|
|
||||||
e.table[nextHash&tableMask] = tableEntry{offset: s + e.cur, val: cv}
|
|
||||||
nextHash = hash(now)
|
|
||||||
|
|
||||||
offset := s - (candidate.offset - e.cur)
|
|
||||||
if offset > maxMatchOffset || cv != candidate.val {
|
|
||||||
// Out of range or not matched.
|
|
||||||
cv = now
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
// A 4-byte match has been found. We'll later see if more than 4 bytes
|
|
||||||
// match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
|
|
||||||
// them as literal bytes.
|
|
||||||
emitLiteral(dst, src[nextEmit:s])
|
|
||||||
|
|
||||||
// Call emitCopy, and then see if another emitCopy could be our next
|
|
||||||
// move. Repeat until we find no match for the input immediately after
|
|
||||||
// what was consumed by the last emitCopy call.
|
|
||||||
//
|
|
||||||
// If we exit this loop normally then we need to call emitLiteral next,
|
|
||||||
// though we don't yet know how big the literal will be. We handle that
|
|
||||||
// by proceeding to the next iteration of the main loop. We also can
|
|
||||||
// exit this loop via goto if we get close to exhausting the input.
|
|
||||||
for {
|
|
||||||
// Invariant: we have a 4-byte match at s, and no need to emit any
|
|
||||||
// literal bytes prior to s.
|
|
||||||
|
|
||||||
// Extend the 4-byte match as long as possible.
|
|
||||||
//
|
|
||||||
s += 4
|
|
||||||
t := candidate.offset - e.cur + 4
|
|
||||||
l := e.matchlen(s, t, src)
|
|
||||||
|
|
||||||
// matchToken is flate's equivalent of Snappy's emitCopy. (length,offset)
|
|
||||||
dst.tokens[dst.n] = matchToken(uint32(l+4-baseMatchLength), uint32(s-t-baseMatchOffset))
|
|
||||||
dst.n++
|
|
||||||
s += l
|
|
||||||
nextEmit = s
|
|
||||||
if s >= sLimit {
|
|
||||||
t += l
|
|
||||||
// Index first pair after match end.
|
|
||||||
if int(t+4) < len(src) && t > 0 {
|
|
||||||
cv := load3232(src, t)
|
|
||||||
e.table[hash(cv)&tableMask] = tableEntry{offset: t + e.cur, val: cv}
|
|
||||||
}
|
|
||||||
goto emitRemainder
|
|
||||||
}
|
|
||||||
|
|
||||||
// We could immediately start working at s now, but to improve
|
|
||||||
// compression we first update the hash table at s-1 and at s. If
|
|
||||||
// another emitCopy is not our next move, also calculate nextHash
|
|
||||||
// at s+1. At least on GOARCH=amd64, these three hash calculations
|
|
||||||
// are faster as one load64 call (with some shifts) instead of
|
|
||||||
// three load32 calls.
|
|
||||||
x := load6432(src, s-1)
|
|
||||||
prevHash := hash(uint32(x))
|
|
||||||
e.table[prevHash&tableMask] = tableEntry{offset: e.cur + s - 1, val: uint32(x)}
|
|
||||||
x >>= 8
|
|
||||||
currHash := hash(uint32(x))
|
|
||||||
candidate = e.table[currHash&tableMask]
|
|
||||||
e.table[currHash&tableMask] = tableEntry{offset: e.cur + s, val: uint32(x)}
|
|
||||||
|
|
||||||
offset := s - (candidate.offset - e.cur)
|
|
||||||
if offset > maxMatchOffset || uint32(x) != candidate.val {
|
|
||||||
cv = uint32(x >> 8)
|
|
||||||
nextHash = hash(cv)
|
|
||||||
s++
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
emitRemainder:
|
|
||||||
if int(nextEmit) < len(src) {
|
|
||||||
emitLiteral(dst, src[nextEmit:])
|
|
||||||
}
|
|
||||||
e.cur += int32(len(src))
|
|
||||||
e.prev = e.prev[:len(src)]
|
|
||||||
copy(e.prev, src)
|
|
||||||
}
|
|
||||||
|
|
||||||
type tableEntryPrev struct {
|
|
||||||
Cur tableEntry
|
|
||||||
Prev tableEntry
|
|
||||||
}
|
|
||||||
|
|
||||||
// snappyL3
|
|
||||||
type snappyL3 struct {
|
|
||||||
snappyGen
|
|
||||||
table [tableSize]tableEntryPrev
|
|
||||||
}
|
|
||||||
|
|
||||||
// Encode uses a similar algorithm to level 2, will check up to two candidates.
|
|
||||||
func (e *snappyL3) Encode(dst *tokens, src []byte) {
|
|
||||||
const (
|
|
||||||
inputMargin = 8 - 1
|
|
||||||
minNonLiteralBlockSize = 1 + 1 + inputMargin
|
|
||||||
)
|
|
||||||
|
|
||||||
// Protect against e.cur wraparound.
|
|
||||||
if e.cur > 1<<30 {
|
|
||||||
for i := range e.table[:] {
|
|
||||||
e.table[i] = tableEntryPrev{}
|
|
||||||
}
|
|
||||||
e.snappyGen = snappyGen{cur: maxStoreBlockSize, prev: e.prev[:0]}
|
|
||||||
}
|
|
||||||
|
|
||||||
// This check isn't in the Snappy implementation, but there, the caller
|
|
||||||
// instead of the callee handles this case.
|
|
||||||
if len(src) < minNonLiteralBlockSize {
|
|
||||||
// We do not fill the token table.
|
|
||||||
// This will be picked up by caller.
|
|
||||||
dst.n = uint16(len(src))
|
|
||||||
e.cur += maxStoreBlockSize
|
|
||||||
e.prev = e.prev[:0]
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// sLimit is when to stop looking for offset/length copies. The inputMargin
|
|
||||||
// lets us use a fast path for emitLiteral in the main loop, while we are
|
|
||||||
// looking for copies.
|
|
||||||
sLimit := int32(len(src) - inputMargin)
|
|
||||||
|
|
||||||
// nextEmit is where in src the next emitLiteral should start from.
|
|
||||||
nextEmit := int32(0)
|
|
||||||
s := int32(0)
|
|
||||||
cv := load3232(src, s)
|
|
||||||
nextHash := hash(cv)
|
|
||||||
|
|
||||||
for {
|
|
||||||
// Copied from the C++ snappy implementation:
|
|
||||||
//
|
|
||||||
// Heuristic match skipping: If 32 bytes are scanned with no matches
|
|
||||||
// found, start looking only at every other byte. If 32 more bytes are
|
|
||||||
// scanned (or skipped), look at every third byte, etc.. When a match
|
|
||||||
// is found, immediately go back to looking at every byte. This is a
|
|
||||||
// small loss (~5% performance, ~0.1% density) for compressible data
|
|
||||||
// due to more bookkeeping, but for non-compressible data (such as
|
|
||||||
// JPEG) it's a huge win since the compressor quickly "realizes" the
|
|
||||||
// data is incompressible and doesn't bother looking for matches
|
|
||||||
// everywhere.
|
|
||||||
//
|
|
||||||
// The "skip" variable keeps track of how many bytes there are since
|
|
||||||
// the last match; dividing it by 32 (ie. right-shifting by five) gives
|
|
||||||
// the number of bytes to move ahead for each iteration.
|
|
||||||
skip := int32(32)
|
|
||||||
|
|
||||||
nextS := s
|
|
||||||
var candidate tableEntry
|
|
||||||
for {
|
|
||||||
s = nextS
|
|
||||||
bytesBetweenHashLookups := skip >> 5
|
|
||||||
nextS = s + bytesBetweenHashLookups
|
|
||||||
skip += bytesBetweenHashLookups
|
|
||||||
if nextS > sLimit {
|
|
||||||
goto emitRemainder
|
|
||||||
}
|
|
||||||
candidates := e.table[nextHash&tableMask]
|
|
||||||
now := load3232(src, nextS)
|
|
||||||
e.table[nextHash&tableMask] = tableEntryPrev{Prev: candidates.Cur, Cur: tableEntry{offset: s + e.cur, val: cv}}
|
|
||||||
nextHash = hash(now)
|
|
||||||
|
|
||||||
// Check both candidates
|
|
||||||
candidate = candidates.Cur
|
|
||||||
if cv == candidate.val {
|
|
||||||
offset := s - (candidate.offset - e.cur)
|
|
||||||
if offset <= maxMatchOffset {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// We only check if value mismatches.
|
|
||||||
// Offset will always be invalid in other cases.
|
|
||||||
candidate = candidates.Prev
|
|
||||||
if cv == candidate.val {
|
|
||||||
offset := s - (candidate.offset - e.cur)
|
|
||||||
if offset <= maxMatchOffset {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
cv = now
|
|
||||||
}
|
|
||||||
|
|
||||||
// A 4-byte match has been found. We'll later see if more than 4 bytes
|
|
||||||
// match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
|
|
||||||
// them as literal bytes.
|
|
||||||
emitLiteral(dst, src[nextEmit:s])
|
|
||||||
|
|
||||||
// Call emitCopy, and then see if another emitCopy could be our next
|
|
||||||
// move. Repeat until we find no match for the input immediately after
|
|
||||||
// what was consumed by the last emitCopy call.
|
|
||||||
//
|
|
||||||
// If we exit this loop normally then we need to call emitLiteral next,
|
|
||||||
// though we don't yet know how big the literal will be. We handle that
|
|
||||||
// by proceeding to the next iteration of the main loop. We also can
|
|
||||||
// exit this loop via goto if we get close to exhausting the input.
|
|
||||||
for {
|
|
||||||
// Invariant: we have a 4-byte match at s, and no need to emit any
|
|
||||||
// literal bytes prior to s.
|
|
||||||
|
|
||||||
// Extend the 4-byte match as long as possible.
|
|
||||||
//
|
|
||||||
s += 4
|
|
||||||
t := candidate.offset - e.cur + 4
|
|
||||||
l := e.matchlen(s, t, src)
|
|
||||||
|
|
||||||
// matchToken is flate's equivalent of Snappy's emitCopy. (length,offset)
|
|
||||||
dst.tokens[dst.n] = matchToken(uint32(l+4-baseMatchLength), uint32(s-t-baseMatchOffset))
|
|
||||||
dst.n++
|
|
||||||
s += l
|
|
||||||
nextEmit = s
|
|
||||||
if s >= sLimit {
|
|
||||||
t += l
|
|
||||||
// Index first pair after match end.
|
|
||||||
if int(t+4) < len(src) && t > 0 {
|
|
||||||
cv := load3232(src, t)
|
|
||||||
nextHash = hash(cv)
|
|
||||||
e.table[nextHash&tableMask] = tableEntryPrev{
|
|
||||||
Prev: e.table[nextHash&tableMask].Cur,
|
|
||||||
Cur: tableEntry{offset: e.cur + t, val: cv},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
goto emitRemainder
|
|
||||||
}
|
|
||||||
|
|
||||||
// We could immediately start working at s now, but to improve
|
|
||||||
// compression we first update the hash table at s-3 to s. If
|
|
||||||
// another emitCopy is not our next move, also calculate nextHash
|
|
||||||
// at s+1. At least on GOARCH=amd64, these three hash calculations
|
|
||||||
// are faster as one load64 call (with some shifts) instead of
|
|
||||||
// three load32 calls.
|
|
||||||
x := load6432(src, s-3)
|
|
||||||
prevHash := hash(uint32(x))
|
|
||||||
e.table[prevHash&tableMask] = tableEntryPrev{
|
|
||||||
Prev: e.table[prevHash&tableMask].Cur,
|
|
||||||
Cur: tableEntry{offset: e.cur + s - 3, val: uint32(x)},
|
|
||||||
}
|
|
||||||
x >>= 8
|
|
||||||
prevHash = hash(uint32(x))
|
|
||||||
|
|
||||||
e.table[prevHash&tableMask] = tableEntryPrev{
|
|
||||||
Prev: e.table[prevHash&tableMask].Cur,
|
|
||||||
Cur: tableEntry{offset: e.cur + s - 2, val: uint32(x)},
|
|
||||||
}
|
|
||||||
x >>= 8
|
|
||||||
prevHash = hash(uint32(x))
|
|
||||||
|
|
||||||
e.table[prevHash&tableMask] = tableEntryPrev{
|
|
||||||
Prev: e.table[prevHash&tableMask].Cur,
|
|
||||||
Cur: tableEntry{offset: e.cur + s - 1, val: uint32(x)},
|
|
||||||
}
|
|
||||||
x >>= 8
|
|
||||||
currHash := hash(uint32(x))
|
|
||||||
candidates := e.table[currHash&tableMask]
|
|
||||||
cv = uint32(x)
|
|
||||||
e.table[currHash&tableMask] = tableEntryPrev{
|
|
||||||
Prev: candidates.Cur,
|
|
||||||
Cur: tableEntry{offset: s + e.cur, val: cv},
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check both candidates
|
|
||||||
candidate = candidates.Cur
|
|
||||||
if cv == candidate.val {
|
|
||||||
offset := s - (candidate.offset - e.cur)
|
|
||||||
if offset <= maxMatchOffset {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// We only check if value mismatches.
|
|
||||||
// Offset will always be invalid in other cases.
|
|
||||||
candidate = candidates.Prev
|
|
||||||
if cv == candidate.val {
|
|
||||||
offset := s - (candidate.offset - e.cur)
|
|
||||||
if offset <= maxMatchOffset {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
cv = uint32(x >> 8)
|
|
||||||
nextHash = hash(cv)
|
|
||||||
s++
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
emitRemainder:
|
|
||||||
if int(nextEmit) < len(src) {
|
|
||||||
emitLiteral(dst, src[nextEmit:])
|
|
||||||
}
|
|
||||||
e.cur += int32(len(src))
|
|
||||||
e.prev = e.prev[:len(src)]
|
|
||||||
copy(e.prev, src)
|
|
||||||
}
|
|
||||||
|
|
||||||
// snappyL4
|
|
||||||
type snappyL4 struct {
|
|
||||||
snappyL3
|
|
||||||
}
|
|
||||||
|
|
||||||
// Encode uses a similar algorithm to level 3,
|
|
||||||
// but will check up to two candidates if first isn't long enough.
|
|
||||||
func (e *snappyL4) Encode(dst *tokens, src []byte) {
|
|
||||||
const (
|
|
||||||
inputMargin = 8 - 3
|
|
||||||
minNonLiteralBlockSize = 1 + 1 + inputMargin
|
|
||||||
matchLenGood = 12
|
|
||||||
)
|
|
||||||
|
|
||||||
// Protect against e.cur wraparound.
|
|
||||||
if e.cur > 1<<30 {
|
|
||||||
for i := range e.table[:] {
|
|
||||||
e.table[i] = tableEntryPrev{}
|
|
||||||
}
|
|
||||||
e.snappyGen = snappyGen{cur: maxStoreBlockSize, prev: e.prev[:0]}
|
|
||||||
}
|
|
||||||
|
|
||||||
// This check isn't in the Snappy implementation, but there, the caller
|
|
||||||
// instead of the callee handles this case.
|
|
||||||
if len(src) < minNonLiteralBlockSize {
|
|
||||||
// We do not fill the token table.
|
|
||||||
// This will be picked up by caller.
|
|
||||||
dst.n = uint16(len(src))
|
|
||||||
e.cur += maxStoreBlockSize
|
|
||||||
e.prev = e.prev[:0]
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// sLimit is when to stop looking for offset/length copies. The inputMargin
|
|
||||||
// lets us use a fast path for emitLiteral in the main loop, while we are
|
|
||||||
// looking for copies.
|
|
||||||
sLimit := int32(len(src) - inputMargin)
|
|
||||||
|
|
||||||
// nextEmit is where in src the next emitLiteral should start from.
|
|
||||||
nextEmit := int32(0)
|
|
||||||
s := int32(0)
|
|
||||||
cv := load3232(src, s)
|
|
||||||
nextHash := hash(cv)
|
|
||||||
|
|
||||||
for {
|
|
||||||
// Copied from the C++ snappy implementation:
|
|
||||||
//
|
|
||||||
// Heuristic match skipping: If 32 bytes are scanned with no matches
|
|
||||||
// found, start looking only at every other byte. If 32 more bytes are
|
|
||||||
// scanned (or skipped), look at every third byte, etc.. When a match
|
|
||||||
// is found, immediately go back to looking at every byte. This is a
|
|
||||||
// small loss (~5% performance, ~0.1% density) for compressible data
|
|
||||||
// due to more bookkeeping, but for non-compressible data (such as
|
|
||||||
// JPEG) it's a huge win since the compressor quickly "realizes" the
|
|
||||||
// data is incompressible and doesn't bother looking for matches
|
|
||||||
// everywhere.
|
|
||||||
//
|
|
||||||
// The "skip" variable keeps track of how many bytes there are since
|
|
||||||
// the last match; dividing it by 32 (ie. right-shifting by five) gives
|
|
||||||
// the number of bytes to move ahead for each iteration.
|
|
||||||
skip := int32(32)
|
|
||||||
|
|
||||||
nextS := s
|
|
||||||
var candidate tableEntry
|
|
||||||
var candidateAlt tableEntry
|
|
||||||
for {
|
|
||||||
s = nextS
|
|
||||||
bytesBetweenHashLookups := skip >> 5
|
|
||||||
nextS = s + bytesBetweenHashLookups
|
|
||||||
skip += bytesBetweenHashLookups
|
|
||||||
if nextS > sLimit {
|
|
||||||
goto emitRemainder
|
|
||||||
}
|
|
||||||
candidates := e.table[nextHash&tableMask]
|
|
||||||
now := load3232(src, nextS)
|
|
||||||
e.table[nextHash&tableMask] = tableEntryPrev{Prev: candidates.Cur, Cur: tableEntry{offset: s + e.cur, val: cv}}
|
|
||||||
nextHash = hash(now)
|
|
||||||
|
|
||||||
// Check both candidates
|
|
||||||
candidate = candidates.Cur
|
|
||||||
if cv == candidate.val {
|
|
||||||
offset := s - (candidate.offset - e.cur)
|
|
||||||
if offset < maxMatchOffset {
|
|
||||||
offset = s - (candidates.Prev.offset - e.cur)
|
|
||||||
if cv == candidates.Prev.val && offset < maxMatchOffset {
|
|
||||||
candidateAlt = candidates.Prev
|
|
||||||
}
|
|
||||||
break
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// We only check if value mismatches.
|
|
||||||
// Offset will always be invalid in other cases.
|
|
||||||
candidate = candidates.Prev
|
|
||||||
if cv == candidate.val {
|
|
||||||
offset := s - (candidate.offset - e.cur)
|
|
||||||
if offset < maxMatchOffset {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
cv = now
|
|
||||||
}
|
|
||||||
|
|
||||||
// A 4-byte match has been found. We'll later see if more than 4 bytes
|
|
||||||
// match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
|
|
||||||
// them as literal bytes.
|
|
||||||
emitLiteral(dst, src[nextEmit:s])
|
|
||||||
|
|
||||||
// Call emitCopy, and then see if another emitCopy could be our next
|
|
||||||
// move. Repeat until we find no match for the input immediately after
|
|
||||||
// what was consumed by the last emitCopy call.
|
|
||||||
//
|
|
||||||
// If we exit this loop normally then we need to call emitLiteral next,
|
|
||||||
// though we don't yet know how big the literal will be. We handle that
|
|
||||||
// by proceeding to the next iteration of the main loop. We also can
|
|
||||||
// exit this loop via goto if we get close to exhausting the input.
|
|
||||||
for {
|
|
||||||
// Invariant: we have a 4-byte match at s, and no need to emit any
|
|
||||||
// literal bytes prior to s.
|
|
||||||
|
|
||||||
// Extend the 4-byte match as long as possible.
|
|
||||||
//
|
|
||||||
s += 4
|
|
||||||
t := candidate.offset - e.cur + 4
|
|
||||||
l := e.matchlen(s, t, src)
|
|
||||||
// Try alternative candidate if match length < matchLenGood.
|
|
||||||
if l < matchLenGood-4 && candidateAlt.offset != 0 {
|
|
||||||
t2 := candidateAlt.offset - e.cur + 4
|
|
||||||
l2 := e.matchlen(s, t2, src)
|
|
||||||
if l2 > l {
|
|
||||||
l = l2
|
|
||||||
t = t2
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// matchToken is flate's equivalent of Snappy's emitCopy. (length,offset)
|
|
||||||
dst.tokens[dst.n] = matchToken(uint32(l+4-baseMatchLength), uint32(s-t-baseMatchOffset))
|
|
||||||
dst.n++
|
|
||||||
s += l
|
|
||||||
nextEmit = s
|
|
||||||
if s >= sLimit {
|
|
||||||
t += l
|
|
||||||
// Index first pair after match end.
|
|
||||||
if int(t+4) < len(src) && t > 0 {
|
|
||||||
cv := load3232(src, t)
|
|
||||||
nextHash = hash(cv)
|
|
||||||
e.table[nextHash&tableMask] = tableEntryPrev{
|
|
||||||
Prev: e.table[nextHash&tableMask].Cur,
|
|
||||||
Cur: tableEntry{offset: e.cur + t, val: cv},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
goto emitRemainder
|
|
||||||
}
|
|
||||||
|
|
||||||
// We could immediately start working at s now, but to improve
|
|
||||||
// compression we first update the hash table at s-3 to s. If
|
|
||||||
// another emitCopy is not our next move, also calculate nextHash
|
|
||||||
// at s+1. At least on GOARCH=amd64, these three hash calculations
|
|
||||||
// are faster as one load64 call (with some shifts) instead of
|
|
||||||
// three load32 calls.
|
|
||||||
x := load6432(src, s-3)
|
|
||||||
prevHash := hash(uint32(x))
|
|
||||||
e.table[prevHash&tableMask] = tableEntryPrev{
|
|
||||||
Prev: e.table[prevHash&tableMask].Cur,
|
|
||||||
Cur: tableEntry{offset: e.cur + s - 3, val: uint32(x)},
|
|
||||||
}
|
|
||||||
x >>= 8
|
|
||||||
prevHash = hash(uint32(x))
|
|
||||||
|
|
||||||
e.table[prevHash&tableMask] = tableEntryPrev{
|
|
||||||
Prev: e.table[prevHash&tableMask].Cur,
|
|
||||||
Cur: tableEntry{offset: e.cur + s - 2, val: uint32(x)},
|
|
||||||
}
|
|
||||||
x >>= 8
|
|
||||||
prevHash = hash(uint32(x))
|
|
||||||
|
|
||||||
e.table[prevHash&tableMask] = tableEntryPrev{
|
|
||||||
Prev: e.table[prevHash&tableMask].Cur,
|
|
||||||
Cur: tableEntry{offset: e.cur + s - 1, val: uint32(x)},
|
|
||||||
}
|
|
||||||
x >>= 8
|
|
||||||
currHash := hash(uint32(x))
|
|
||||||
candidates := e.table[currHash&tableMask]
|
|
||||||
cv = uint32(x)
|
|
||||||
e.table[currHash&tableMask] = tableEntryPrev{
|
|
||||||
Prev: candidates.Cur,
|
|
||||||
Cur: tableEntry{offset: s + e.cur, val: cv},
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check both candidates
|
|
||||||
candidate = candidates.Cur
|
|
||||||
candidateAlt = tableEntry{}
|
|
||||||
if cv == candidate.val {
|
|
||||||
offset := s - (candidate.offset - e.cur)
|
|
||||||
if offset <= maxMatchOffset {
|
|
||||||
offset = s - (candidates.Prev.offset - e.cur)
|
|
||||||
if cv == candidates.Prev.val && offset <= maxMatchOffset {
|
|
||||||
candidateAlt = candidates.Prev
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// We only check if value mismatches.
|
|
||||||
// Offset will always be invalid in other cases.
|
|
||||||
candidate = candidates.Prev
|
|
||||||
if cv == candidate.val {
|
|
||||||
offset := s - (candidate.offset - e.cur)
|
|
||||||
if offset <= maxMatchOffset {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
cv = uint32(x >> 8)
|
|
||||||
nextHash = hash(cv)
|
|
||||||
s++
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
emitRemainder:
|
|
||||||
if int(nextEmit) < len(src) {
|
|
||||||
emitLiteral(dst, src[nextEmit:])
|
|
||||||
}
|
|
||||||
e.cur += int32(len(src))
|
|
||||||
e.prev = e.prev[:len(src)]
|
|
||||||
copy(e.prev, src)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *snappyGen) matchlen(s, t int32, src []byte) int32 {
|
|
||||||
s1 := int(s) + maxMatchLength - 4
|
|
||||||
if s1 > len(src) {
|
|
||||||
s1 = len(src)
|
|
||||||
}
|
|
||||||
|
|
||||||
// If we are inside the current block
|
|
||||||
if t >= 0 {
|
|
||||||
b := src[t:]
|
|
||||||
a := src[s:s1]
|
|
||||||
b = b[:len(a)]
|
|
||||||
// Extend the match to be as long as possible.
|
|
||||||
for i := range a {
|
|
||||||
if a[i] != b[i] {
|
|
||||||
return int32(i)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return int32(len(a))
|
|
||||||
}
|
|
||||||
|
|
||||||
// We found a match in the previous block.
|
|
||||||
tp := int32(len(e.prev)) + t
|
|
||||||
if tp < 0 {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
// Extend the match to be as long as possible.
|
|
||||||
a := src[s:s1]
|
|
||||||
b := e.prev[tp:]
|
|
||||||
if len(b) > len(a) {
|
|
||||||
b = b[:len(a)]
|
|
||||||
}
|
|
||||||
a = a[:len(b)]
|
|
||||||
for i := range b {
|
|
||||||
if a[i] != b[i] {
|
|
||||||
return int32(i)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// If we reached our limit, we matched everything we are
|
|
||||||
// allowed to in the previous block and we return.
|
|
||||||
n := int32(len(b))
|
|
||||||
if int(s+n) == s1 {
|
|
||||||
return n
|
|
||||||
}
|
|
||||||
|
|
||||||
// Continue looking for more matches in the current block.
|
|
||||||
a = src[s+n : s1]
|
|
||||||
b = src[:len(a)]
|
|
||||||
for i := range a {
|
|
||||||
if a[i] != b[i] {
|
|
||||||
return int32(i) + n
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return int32(len(a)) + n
|
|
||||||
}
|
|
||||||
|
|
||||||
// Reset the encoding table.
|
|
||||||
func (e *snappyGen) Reset() {
|
|
||||||
e.prev = e.prev[:0]
|
|
||||||
e.cur += maxMatchOffset
|
|
||||||
}
|
|
||||||
115
vendor/github.com/klauspost/compress/flate/token.go
generated
vendored
115
vendor/github.com/klauspost/compress/flate/token.go
generated
vendored
@@ -1,115 +0,0 @@
|
|||||||
// Copyright 2009 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package flate
|
|
||||||
|
|
||||||
import "fmt"
|
|
||||||
|
|
||||||
const (
|
|
||||||
// 2 bits: type 0 = literal 1=EOF 2=Match 3=Unused
|
|
||||||
// 8 bits: xlength = length - MIN_MATCH_LENGTH
|
|
||||||
// 22 bits xoffset = offset - MIN_OFFSET_SIZE, or literal
|
|
||||||
lengthShift = 22
|
|
||||||
offsetMask = 1<<lengthShift - 1
|
|
||||||
typeMask = 3 << 30
|
|
||||||
literalType = 0 << 30
|
|
||||||
matchType = 1 << 30
|
|
||||||
)
|
|
||||||
|
|
||||||
// The length code for length X (MIN_MATCH_LENGTH <= X <= MAX_MATCH_LENGTH)
|
|
||||||
// is lengthCodes[length - MIN_MATCH_LENGTH]
|
|
||||||
var lengthCodes = [...]uint32{
|
|
||||||
0, 1, 2, 3, 4, 5, 6, 7, 8, 8,
|
|
||||||
9, 9, 10, 10, 11, 11, 12, 12, 12, 12,
|
|
||||||
13, 13, 13, 13, 14, 14, 14, 14, 15, 15,
|
|
||||||
15, 15, 16, 16, 16, 16, 16, 16, 16, 16,
|
|
||||||
17, 17, 17, 17, 17, 17, 17, 17, 18, 18,
|
|
||||||
18, 18, 18, 18, 18, 18, 19, 19, 19, 19,
|
|
||||||
19, 19, 19, 19, 20, 20, 20, 20, 20, 20,
|
|
||||||
20, 20, 20, 20, 20, 20, 20, 20, 20, 20,
|
|
||||||
21, 21, 21, 21, 21, 21, 21, 21, 21, 21,
|
|
||||||
21, 21, 21, 21, 21, 21, 22, 22, 22, 22,
|
|
||||||
22, 22, 22, 22, 22, 22, 22, 22, 22, 22,
|
|
||||||
22, 22, 23, 23, 23, 23, 23, 23, 23, 23,
|
|
||||||
23, 23, 23, 23, 23, 23, 23, 23, 24, 24,
|
|
||||||
24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
|
|
||||||
24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
|
|
||||||
24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
|
|
||||||
25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
|
|
||||||
25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
|
|
||||||
25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
|
|
||||||
25, 25, 26, 26, 26, 26, 26, 26, 26, 26,
|
|
||||||
26, 26, 26, 26, 26, 26, 26, 26, 26, 26,
|
|
||||||
26, 26, 26, 26, 26, 26, 26, 26, 26, 26,
|
|
||||||
26, 26, 26, 26, 27, 27, 27, 27, 27, 27,
|
|
||||||
27, 27, 27, 27, 27, 27, 27, 27, 27, 27,
|
|
||||||
27, 27, 27, 27, 27, 27, 27, 27, 27, 27,
|
|
||||||
27, 27, 27, 27, 27, 28,
|
|
||||||
}
|
|
||||||
|
|
||||||
var offsetCodes = [...]uint32{
|
|
||||||
0, 1, 2, 3, 4, 4, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7,
|
|
||||||
8, 8, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9,
|
|
||||||
10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
|
|
||||||
11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11,
|
|
||||||
12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12,
|
|
||||||
12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12,
|
|
||||||
13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
|
|
||||||
13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
|
|
||||||
14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
|
|
||||||
14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
|
|
||||||
14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
|
|
||||||
14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
|
|
||||||
15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
|
|
||||||
15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
|
|
||||||
15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
|
|
||||||
15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
|
|
||||||
}
|
|
||||||
|
|
||||||
type token uint32
|
|
||||||
|
|
||||||
type tokens struct {
|
|
||||||
tokens [maxStoreBlockSize + 1]token
|
|
||||||
n uint16 // Must be able to contain maxStoreBlockSize
|
|
||||||
}
|
|
||||||
|
|
||||||
// Convert a literal into a literal token.
|
|
||||||
func literalToken(literal uint32) token { return token(literalType + literal) }
|
|
||||||
|
|
||||||
// Convert a < xlength, xoffset > pair into a match token.
|
|
||||||
func matchToken(xlength uint32, xoffset uint32) token {
|
|
||||||
return token(matchType + xlength<<lengthShift + xoffset)
|
|
||||||
}
|
|
||||||
|
|
||||||
func matchTokend(xlength uint32, xoffset uint32) token {
|
|
||||||
if xlength > maxMatchLength || xoffset > maxMatchOffset {
|
|
||||||
panic(fmt.Sprintf("Invalid match: len: %d, offset: %d\n", xlength, xoffset))
|
|
||||||
return token(matchType)
|
|
||||||
}
|
|
||||||
return token(matchType + xlength<<lengthShift + xoffset)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Returns the type of a token
|
|
||||||
func (t token) typ() uint32 { return uint32(t) & typeMask }
|
|
||||||
|
|
||||||
// Returns the literal of a literal token
|
|
||||||
func (t token) literal() uint32 { return uint32(t - literalType) }
|
|
||||||
|
|
||||||
// Returns the extra offset of a match token
|
|
||||||
func (t token) offset() uint32 { return uint32(t) & offsetMask }
|
|
||||||
|
|
||||||
func (t token) length() uint32 { return uint32((t - matchType) >> lengthShift) }
|
|
||||||
|
|
||||||
func lengthCode(len uint32) uint32 { return lengthCodes[len] }
|
|
||||||
|
|
||||||
// Returns the offset code corresponding to a specific offset
|
|
||||||
func offsetCode(off uint32) uint32 {
|
|
||||||
if off < uint32(len(offsetCodes)) {
|
|
||||||
return offsetCodes[off]
|
|
||||||
} else if off>>7 < uint32(len(offsetCodes)) {
|
|
||||||
return offsetCodes[off>>7] + 14
|
|
||||||
} else {
|
|
||||||
return offsetCodes[off>>14] + 28
|
|
||||||
}
|
|
||||||
}
|
|
||||||
344
vendor/github.com/klauspost/compress/gzip/gunzip.go
generated
vendored
344
vendor/github.com/klauspost/compress/gzip/gunzip.go
generated
vendored
@@ -1,344 +0,0 @@
|
|||||||
// Copyright 2009 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// Package gzip implements reading and writing of gzip format compressed files,
|
|
||||||
// as specified in RFC 1952.
|
|
||||||
package gzip
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bufio"
|
|
||||||
"encoding/binary"
|
|
||||||
"errors"
|
|
||||||
"hash/crc32"
|
|
||||||
"io"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/klauspost/compress/flate"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
gzipID1 = 0x1f
|
|
||||||
gzipID2 = 0x8b
|
|
||||||
gzipDeflate = 8
|
|
||||||
flagText = 1 << 0
|
|
||||||
flagHdrCrc = 1 << 1
|
|
||||||
flagExtra = 1 << 2
|
|
||||||
flagName = 1 << 3
|
|
||||||
flagComment = 1 << 4
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
// ErrChecksum is returned when reading GZIP data that has an invalid checksum.
|
|
||||||
ErrChecksum = errors.New("gzip: invalid checksum")
|
|
||||||
// ErrHeader is returned when reading GZIP data that has an invalid header.
|
|
||||||
ErrHeader = errors.New("gzip: invalid header")
|
|
||||||
)
|
|
||||||
|
|
||||||
var le = binary.LittleEndian
|
|
||||||
|
|
||||||
// noEOF converts io.EOF to io.ErrUnexpectedEOF.
|
|
||||||
func noEOF(err error) error {
|
|
||||||
if err == io.EOF {
|
|
||||||
return io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// The gzip file stores a header giving metadata about the compressed file.
|
|
||||||
// That header is exposed as the fields of the Writer and Reader structs.
|
|
||||||
//
|
|
||||||
// Strings must be UTF-8 encoded and may only contain Unicode code points
|
|
||||||
// U+0001 through U+00FF, due to limitations of the GZIP file format.
|
|
||||||
type Header struct {
|
|
||||||
Comment string // comment
|
|
||||||
Extra []byte // "extra data"
|
|
||||||
ModTime time.Time // modification time
|
|
||||||
Name string // file name
|
|
||||||
OS byte // operating system type
|
|
||||||
}
|
|
||||||
|
|
||||||
// A Reader is an io.Reader that can be read to retrieve
|
|
||||||
// uncompressed data from a gzip-format compressed file.
|
|
||||||
//
|
|
||||||
// In general, a gzip file can be a concatenation of gzip files,
|
|
||||||
// each with its own header. Reads from the Reader
|
|
||||||
// return the concatenation of the uncompressed data of each.
|
|
||||||
// Only the first header is recorded in the Reader fields.
|
|
||||||
//
|
|
||||||
// Gzip files store a length and checksum of the uncompressed data.
|
|
||||||
// The Reader will return a ErrChecksum when Read
|
|
||||||
// reaches the end of the uncompressed data if it does not
|
|
||||||
// have the expected length or checksum. Clients should treat data
|
|
||||||
// returned by Read as tentative until they receive the io.EOF
|
|
||||||
// marking the end of the data.
|
|
||||||
type Reader struct {
|
|
||||||
Header // valid after NewReader or Reader.Reset
|
|
||||||
r flate.Reader
|
|
||||||
decompressor io.ReadCloser
|
|
||||||
digest uint32 // CRC-32, IEEE polynomial (section 8)
|
|
||||||
size uint32 // Uncompressed size (section 2.3.1)
|
|
||||||
buf [512]byte
|
|
||||||
err error
|
|
||||||
multistream bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewReader creates a new Reader reading the given reader.
|
|
||||||
// If r does not also implement io.ByteReader,
|
|
||||||
// the decompressor may read more data than necessary from r.
|
|
||||||
//
|
|
||||||
// It is the caller's responsibility to call Close on the Reader when done.
|
|
||||||
//
|
|
||||||
// The Reader.Header fields will be valid in the Reader returned.
|
|
||||||
func NewReader(r io.Reader) (*Reader, error) {
|
|
||||||
z := new(Reader)
|
|
||||||
if err := z.Reset(r); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return z, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Reset discards the Reader z's state and makes it equivalent to the
|
|
||||||
// result of its original state from NewReader, but reading from r instead.
|
|
||||||
// This permits reusing a Reader rather than allocating a new one.
|
|
||||||
func (z *Reader) Reset(r io.Reader) error {
|
|
||||||
*z = Reader{
|
|
||||||
decompressor: z.decompressor,
|
|
||||||
multistream: true,
|
|
||||||
}
|
|
||||||
if rr, ok := r.(flate.Reader); ok {
|
|
||||||
z.r = rr
|
|
||||||
} else {
|
|
||||||
z.r = bufio.NewReader(r)
|
|
||||||
}
|
|
||||||
z.Header, z.err = z.readHeader()
|
|
||||||
return z.err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Multistream controls whether the reader supports multistream files.
|
|
||||||
//
|
|
||||||
// If enabled (the default), the Reader expects the input to be a sequence
|
|
||||||
// of individually gzipped data streams, each with its own header and
|
|
||||||
// trailer, ending at EOF. The effect is that the concatenation of a sequence
|
|
||||||
// of gzipped files is treated as equivalent to the gzip of the concatenation
|
|
||||||
// of the sequence. This is standard behavior for gzip readers.
|
|
||||||
//
|
|
||||||
// Calling Multistream(false) disables this behavior; disabling the behavior
|
|
||||||
// can be useful when reading file formats that distinguish individual gzip
|
|
||||||
// data streams or mix gzip data streams with other data streams.
|
|
||||||
// In this mode, when the Reader reaches the end of the data stream,
|
|
||||||
// Read returns io.EOF. If the underlying reader implements io.ByteReader,
|
|
||||||
// it will be left positioned just after the gzip stream.
|
|
||||||
// To start the next stream, call z.Reset(r) followed by z.Multistream(false).
|
|
||||||
// If there is no next stream, z.Reset(r) will return io.EOF.
|
|
||||||
func (z *Reader) Multistream(ok bool) {
|
|
||||||
z.multistream = ok
|
|
||||||
}
|
|
||||||
|
|
||||||
// readString reads a NUL-terminated string from z.r.
|
|
||||||
// It treats the bytes read as being encoded as ISO 8859-1 (Latin-1) and
|
|
||||||
// will output a string encoded using UTF-8.
|
|
||||||
// This method always updates z.digest with the data read.
|
|
||||||
func (z *Reader) readString() (string, error) {
|
|
||||||
var err error
|
|
||||||
needConv := false
|
|
||||||
for i := 0; ; i++ {
|
|
||||||
if i >= len(z.buf) {
|
|
||||||
return "", ErrHeader
|
|
||||||
}
|
|
||||||
z.buf[i], err = z.r.ReadByte()
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
if z.buf[i] > 0x7f {
|
|
||||||
needConv = true
|
|
||||||
}
|
|
||||||
if z.buf[i] == 0 {
|
|
||||||
// Digest covers the NUL terminator.
|
|
||||||
z.digest = crc32.Update(z.digest, crc32.IEEETable, z.buf[:i+1])
|
|
||||||
|
|
||||||
// Strings are ISO 8859-1, Latin-1 (RFC 1952, section 2.3.1).
|
|
||||||
if needConv {
|
|
||||||
s := make([]rune, 0, i)
|
|
||||||
for _, v := range z.buf[:i] {
|
|
||||||
s = append(s, rune(v))
|
|
||||||
}
|
|
||||||
return string(s), nil
|
|
||||||
}
|
|
||||||
return string(z.buf[:i]), nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// readHeader reads the GZIP header according to section 2.3.1.
|
|
||||||
// This method does not set z.err.
|
|
||||||
func (z *Reader) readHeader() (hdr Header, err error) {
|
|
||||||
if _, err = io.ReadFull(z.r, z.buf[:10]); err != nil {
|
|
||||||
// RFC 1952, section 2.2, says the following:
|
|
||||||
// A gzip file consists of a series of "members" (compressed data sets).
|
|
||||||
//
|
|
||||||
// Other than this, the specification does not clarify whether a
|
|
||||||
// "series" is defined as "one or more" or "zero or more". To err on the
|
|
||||||
// side of caution, Go interprets this to mean "zero or more".
|
|
||||||
// Thus, it is okay to return io.EOF here.
|
|
||||||
return hdr, err
|
|
||||||
}
|
|
||||||
if z.buf[0] != gzipID1 || z.buf[1] != gzipID2 || z.buf[2] != gzipDeflate {
|
|
||||||
return hdr, ErrHeader
|
|
||||||
}
|
|
||||||
flg := z.buf[3]
|
|
||||||
hdr.ModTime = time.Unix(int64(le.Uint32(z.buf[4:8])), 0)
|
|
||||||
// z.buf[8] is XFL and is currently ignored.
|
|
||||||
hdr.OS = z.buf[9]
|
|
||||||
z.digest = crc32.ChecksumIEEE(z.buf[:10])
|
|
||||||
|
|
||||||
if flg&flagExtra != 0 {
|
|
||||||
if _, err = io.ReadFull(z.r, z.buf[:2]); err != nil {
|
|
||||||
return hdr, noEOF(err)
|
|
||||||
}
|
|
||||||
z.digest = crc32.Update(z.digest, crc32.IEEETable, z.buf[:2])
|
|
||||||
data := make([]byte, le.Uint16(z.buf[:2]))
|
|
||||||
if _, err = io.ReadFull(z.r, data); err != nil {
|
|
||||||
return hdr, noEOF(err)
|
|
||||||
}
|
|
||||||
z.digest = crc32.Update(z.digest, crc32.IEEETable, data)
|
|
||||||
hdr.Extra = data
|
|
||||||
}
|
|
||||||
|
|
||||||
var s string
|
|
||||||
if flg&flagName != 0 {
|
|
||||||
if s, err = z.readString(); err != nil {
|
|
||||||
return hdr, err
|
|
||||||
}
|
|
||||||
hdr.Name = s
|
|
||||||
}
|
|
||||||
|
|
||||||
if flg&flagComment != 0 {
|
|
||||||
if s, err = z.readString(); err != nil {
|
|
||||||
return hdr, err
|
|
||||||
}
|
|
||||||
hdr.Comment = s
|
|
||||||
}
|
|
||||||
|
|
||||||
if flg&flagHdrCrc != 0 {
|
|
||||||
if _, err = io.ReadFull(z.r, z.buf[:2]); err != nil {
|
|
||||||
return hdr, noEOF(err)
|
|
||||||
}
|
|
||||||
digest := le.Uint16(z.buf[:2])
|
|
||||||
if digest != uint16(z.digest) {
|
|
||||||
return hdr, ErrHeader
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
z.digest = 0
|
|
||||||
if z.decompressor == nil {
|
|
||||||
z.decompressor = flate.NewReader(z.r)
|
|
||||||
} else {
|
|
||||||
z.decompressor.(flate.Resetter).Reset(z.r, nil)
|
|
||||||
}
|
|
||||||
return hdr, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Read implements io.Reader, reading uncompressed bytes from its underlying Reader.
|
|
||||||
func (z *Reader) Read(p []byte) (n int, err error) {
|
|
||||||
if z.err != nil {
|
|
||||||
return 0, z.err
|
|
||||||
}
|
|
||||||
|
|
||||||
n, z.err = z.decompressor.Read(p)
|
|
||||||
z.digest = crc32.Update(z.digest, crc32.IEEETable, p[:n])
|
|
||||||
z.size += uint32(n)
|
|
||||||
if z.err != io.EOF {
|
|
||||||
// In the normal case we return here.
|
|
||||||
return n, z.err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Finished file; check checksum and size.
|
|
||||||
if _, err := io.ReadFull(z.r, z.buf[:8]); err != nil {
|
|
||||||
z.err = noEOF(err)
|
|
||||||
return n, z.err
|
|
||||||
}
|
|
||||||
digest := le.Uint32(z.buf[:4])
|
|
||||||
size := le.Uint32(z.buf[4:8])
|
|
||||||
if digest != z.digest || size != z.size {
|
|
||||||
z.err = ErrChecksum
|
|
||||||
return n, z.err
|
|
||||||
}
|
|
||||||
z.digest, z.size = 0, 0
|
|
||||||
|
|
||||||
// File is ok; check if there is another.
|
|
||||||
if !z.multistream {
|
|
||||||
return n, io.EOF
|
|
||||||
}
|
|
||||||
z.err = nil // Remove io.EOF
|
|
||||||
|
|
||||||
if _, z.err = z.readHeader(); z.err != nil {
|
|
||||||
return n, z.err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Read from next file, if necessary.
|
|
||||||
if n > 0 {
|
|
||||||
return n, nil
|
|
||||||
}
|
|
||||||
return z.Read(p)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Support the io.WriteTo interface for io.Copy and friends.
|
|
||||||
func (z *Reader) WriteTo(w io.Writer) (int64, error) {
|
|
||||||
total := int64(0)
|
|
||||||
crcWriter := crc32.NewIEEE()
|
|
||||||
for {
|
|
||||||
if z.err != nil {
|
|
||||||
if z.err == io.EOF {
|
|
||||||
return total, nil
|
|
||||||
}
|
|
||||||
return total, z.err
|
|
||||||
}
|
|
||||||
|
|
||||||
// We write both to output and digest.
|
|
||||||
mw := io.MultiWriter(w, crcWriter)
|
|
||||||
n, err := z.decompressor.(io.WriterTo).WriteTo(mw)
|
|
||||||
total += n
|
|
||||||
z.size += uint32(n)
|
|
||||||
if err != nil {
|
|
||||||
z.err = err
|
|
||||||
return total, z.err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Finished file; check checksum + size.
|
|
||||||
if _, err := io.ReadFull(z.r, z.buf[0:8]); err != nil {
|
|
||||||
if err == io.EOF {
|
|
||||||
err = io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
z.err = err
|
|
||||||
return total, err
|
|
||||||
}
|
|
||||||
z.digest = crcWriter.Sum32()
|
|
||||||
digest := le.Uint32(z.buf[:4])
|
|
||||||
size := le.Uint32(z.buf[4:8])
|
|
||||||
if digest != z.digest || size != z.size {
|
|
||||||
z.err = ErrChecksum
|
|
||||||
return total, z.err
|
|
||||||
}
|
|
||||||
z.digest, z.size = 0, 0
|
|
||||||
|
|
||||||
// File is ok; check if there is another.
|
|
||||||
if !z.multistream {
|
|
||||||
return total, nil
|
|
||||||
}
|
|
||||||
crcWriter.Reset()
|
|
||||||
z.err = nil // Remove io.EOF
|
|
||||||
|
|
||||||
if _, z.err = z.readHeader(); z.err != nil {
|
|
||||||
if z.err == io.EOF {
|
|
||||||
return total, nil
|
|
||||||
}
|
|
||||||
return total, z.err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Close closes the Reader. It does not close the underlying io.Reader.
|
|
||||||
// In order for the GZIP checksum to be verified, the reader must be
|
|
||||||
// fully consumed until the io.EOF.
|
|
||||||
func (z *Reader) Close() error { return z.decompressor.Close() }
|
|
||||||
251
vendor/github.com/klauspost/compress/gzip/gzip.go
generated
vendored
251
vendor/github.com/klauspost/compress/gzip/gzip.go
generated
vendored
@@ -1,251 +0,0 @@
|
|||||||
// Copyright 2010 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package gzip
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"hash/crc32"
|
|
||||||
"io"
|
|
||||||
|
|
||||||
"github.com/klauspost/compress/flate"
|
|
||||||
)
|
|
||||||
|
|
||||||
// These constants are copied from the flate package, so that code that imports
|
|
||||||
// "compress/gzip" does not also have to import "compress/flate".
|
|
||||||
const (
|
|
||||||
NoCompression = flate.NoCompression
|
|
||||||
BestSpeed = flate.BestSpeed
|
|
||||||
BestCompression = flate.BestCompression
|
|
||||||
DefaultCompression = flate.DefaultCompression
|
|
||||||
ConstantCompression = flate.ConstantCompression
|
|
||||||
HuffmanOnly = flate.HuffmanOnly
|
|
||||||
)
|
|
||||||
|
|
||||||
// A Writer is an io.WriteCloser.
|
|
||||||
// Writes to a Writer are compressed and written to w.
|
|
||||||
type Writer struct {
|
|
||||||
Header // written at first call to Write, Flush, or Close
|
|
||||||
w io.Writer
|
|
||||||
level int
|
|
||||||
wroteHeader bool
|
|
||||||
compressor *flate.Writer
|
|
||||||
digest uint32 // CRC-32, IEEE polynomial (section 8)
|
|
||||||
size uint32 // Uncompressed size (section 2.3.1)
|
|
||||||
closed bool
|
|
||||||
buf [10]byte
|
|
||||||
err error
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewWriter returns a new Writer.
|
|
||||||
// Writes to the returned writer are compressed and written to w.
|
|
||||||
//
|
|
||||||
// It is the caller's responsibility to call Close on the WriteCloser when done.
|
|
||||||
// Writes may be buffered and not flushed until Close.
|
|
||||||
//
|
|
||||||
// Callers that wish to set the fields in Writer.Header must do so before
|
|
||||||
// the first call to Write, Flush, or Close.
|
|
||||||
func NewWriter(w io.Writer) *Writer {
|
|
||||||
z, _ := NewWriterLevel(w, DefaultCompression)
|
|
||||||
return z
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewWriterLevel is like NewWriter but specifies the compression level instead
|
|
||||||
// of assuming DefaultCompression.
|
|
||||||
//
|
|
||||||
// The compression level can be DefaultCompression, NoCompression, or any
|
|
||||||
// integer value between BestSpeed and BestCompression inclusive. The error
|
|
||||||
// returned will be nil if the level is valid.
|
|
||||||
func NewWriterLevel(w io.Writer, level int) (*Writer, error) {
|
|
||||||
if level < HuffmanOnly || level > BestCompression {
|
|
||||||
return nil, fmt.Errorf("gzip: invalid compression level: %d", level)
|
|
||||||
}
|
|
||||||
z := new(Writer)
|
|
||||||
z.init(w, level)
|
|
||||||
return z, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (z *Writer) init(w io.Writer, level int) {
|
|
||||||
compressor := z.compressor
|
|
||||||
if compressor != nil {
|
|
||||||
compressor.Reset(w)
|
|
||||||
}
|
|
||||||
*z = Writer{
|
|
||||||
Header: Header{
|
|
||||||
OS: 255, // unknown
|
|
||||||
},
|
|
||||||
w: w,
|
|
||||||
level: level,
|
|
||||||
compressor: compressor,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Reset discards the Writer z's state and makes it equivalent to the
|
|
||||||
// result of its original state from NewWriter or NewWriterLevel, but
|
|
||||||
// writing to w instead. This permits reusing a Writer rather than
|
|
||||||
// allocating a new one.
|
|
||||||
func (z *Writer) Reset(w io.Writer) {
|
|
||||||
z.init(w, z.level)
|
|
||||||
}
|
|
||||||
|
|
||||||
// writeBytes writes a length-prefixed byte slice to z.w.
|
|
||||||
func (z *Writer) writeBytes(b []byte) error {
|
|
||||||
if len(b) > 0xffff {
|
|
||||||
return errors.New("gzip.Write: Extra data is too large")
|
|
||||||
}
|
|
||||||
le.PutUint16(z.buf[:2], uint16(len(b)))
|
|
||||||
_, err := z.w.Write(z.buf[:2])
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
_, err = z.w.Write(b)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// writeString writes a UTF-8 string s in GZIP's format to z.w.
|
|
||||||
// GZIP (RFC 1952) specifies that strings are NUL-terminated ISO 8859-1 (Latin-1).
|
|
||||||
func (z *Writer) writeString(s string) (err error) {
|
|
||||||
// GZIP stores Latin-1 strings; error if non-Latin-1; convert if non-ASCII.
|
|
||||||
needconv := false
|
|
||||||
for _, v := range s {
|
|
||||||
if v == 0 || v > 0xff {
|
|
||||||
return errors.New("gzip.Write: non-Latin-1 header string")
|
|
||||||
}
|
|
||||||
if v > 0x7f {
|
|
||||||
needconv = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if needconv {
|
|
||||||
b := make([]byte, 0, len(s))
|
|
||||||
for _, v := range s {
|
|
||||||
b = append(b, byte(v))
|
|
||||||
}
|
|
||||||
_, err = z.w.Write(b)
|
|
||||||
} else {
|
|
||||||
_, err = io.WriteString(z.w, s)
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
// GZIP strings are NUL-terminated.
|
|
||||||
z.buf[0] = 0
|
|
||||||
_, err = z.w.Write(z.buf[:1])
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Write writes a compressed form of p to the underlying io.Writer. The
|
|
||||||
// compressed bytes are not necessarily flushed until the Writer is closed.
|
|
||||||
func (z *Writer) Write(p []byte) (int, error) {
|
|
||||||
if z.err != nil {
|
|
||||||
return 0, z.err
|
|
||||||
}
|
|
||||||
var n int
|
|
||||||
// Write the GZIP header lazily.
|
|
||||||
if !z.wroteHeader {
|
|
||||||
z.wroteHeader = true
|
|
||||||
z.buf[0] = gzipID1
|
|
||||||
z.buf[1] = gzipID2
|
|
||||||
z.buf[2] = gzipDeflate
|
|
||||||
z.buf[3] = 0
|
|
||||||
if z.Extra != nil {
|
|
||||||
z.buf[3] |= 0x04
|
|
||||||
}
|
|
||||||
if z.Name != "" {
|
|
||||||
z.buf[3] |= 0x08
|
|
||||||
}
|
|
||||||
if z.Comment != "" {
|
|
||||||
z.buf[3] |= 0x10
|
|
||||||
}
|
|
||||||
le.PutUint32(z.buf[4:8], uint32(z.ModTime.Unix()))
|
|
||||||
if z.level == BestCompression {
|
|
||||||
z.buf[8] = 2
|
|
||||||
} else if z.level == BestSpeed {
|
|
||||||
z.buf[8] = 4
|
|
||||||
} else {
|
|
||||||
z.buf[8] = 0
|
|
||||||
}
|
|
||||||
z.buf[9] = z.OS
|
|
||||||
n, z.err = z.w.Write(z.buf[:10])
|
|
||||||
if z.err != nil {
|
|
||||||
return n, z.err
|
|
||||||
}
|
|
||||||
if z.Extra != nil {
|
|
||||||
z.err = z.writeBytes(z.Extra)
|
|
||||||
if z.err != nil {
|
|
||||||
return n, z.err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if z.Name != "" {
|
|
||||||
z.err = z.writeString(z.Name)
|
|
||||||
if z.err != nil {
|
|
||||||
return n, z.err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if z.Comment != "" {
|
|
||||||
z.err = z.writeString(z.Comment)
|
|
||||||
if z.err != nil {
|
|
||||||
return n, z.err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if z.compressor == nil {
|
|
||||||
z.compressor, _ = flate.NewWriter(z.w, z.level)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
z.size += uint32(len(p))
|
|
||||||
z.digest = crc32.Update(z.digest, crc32.IEEETable, p)
|
|
||||||
n, z.err = z.compressor.Write(p)
|
|
||||||
return n, z.err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Flush flushes any pending compressed data to the underlying writer.
|
|
||||||
//
|
|
||||||
// It is useful mainly in compressed network protocols, to ensure that
|
|
||||||
// a remote reader has enough data to reconstruct a packet. Flush does
|
|
||||||
// not return until the data has been written. If the underlying
|
|
||||||
// writer returns an error, Flush returns that error.
|
|
||||||
//
|
|
||||||
// In the terminology of the zlib library, Flush is equivalent to Z_SYNC_FLUSH.
|
|
||||||
func (z *Writer) Flush() error {
|
|
||||||
if z.err != nil {
|
|
||||||
return z.err
|
|
||||||
}
|
|
||||||
if z.closed {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
if !z.wroteHeader {
|
|
||||||
z.Write(nil)
|
|
||||||
if z.err != nil {
|
|
||||||
return z.err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
z.err = z.compressor.Flush()
|
|
||||||
return z.err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Close closes the Writer, flushing any unwritten data to the underlying
|
|
||||||
// io.Writer, but does not close the underlying io.Writer.
|
|
||||||
func (z *Writer) Close() error {
|
|
||||||
if z.err != nil {
|
|
||||||
return z.err
|
|
||||||
}
|
|
||||||
if z.closed {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
z.closed = true
|
|
||||||
if !z.wroteHeader {
|
|
||||||
z.Write(nil)
|
|
||||||
if z.err != nil {
|
|
||||||
return z.err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
z.err = z.compressor.Close()
|
|
||||||
if z.err != nil {
|
|
||||||
return z.err
|
|
||||||
}
|
|
||||||
le.PutUint32(z.buf[:4], z.digest)
|
|
||||||
le.PutUint32(z.buf[4:8], z.size)
|
|
||||||
_, z.err = z.w.Write(z.buf[:8])
|
|
||||||
return z.err
|
|
||||||
}
|
|
||||||
178
vendor/github.com/klauspost/compress/zlib/reader.go
generated
vendored
178
vendor/github.com/klauspost/compress/zlib/reader.go
generated
vendored
@@ -1,178 +0,0 @@
|
|||||||
// Copyright 2009 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
/*
|
|
||||||
Package zlib implements reading and writing of zlib format compressed data,
|
|
||||||
as specified in RFC 1950.
|
|
||||||
|
|
||||||
The implementation provides filters that uncompress during reading
|
|
||||||
and compress during writing. For example, to write compressed data
|
|
||||||
to a buffer:
|
|
||||||
|
|
||||||
var b bytes.Buffer
|
|
||||||
w := zlib.NewWriter(&b)
|
|
||||||
w.Write([]byte("hello, world\n"))
|
|
||||||
w.Close()
|
|
||||||
|
|
||||||
and to read that data back:
|
|
||||||
|
|
||||||
r, err := zlib.NewReader(&b)
|
|
||||||
io.Copy(os.Stdout, r)
|
|
||||||
r.Close()
|
|
||||||
*/
|
|
||||||
package zlib
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bufio"
|
|
||||||
"errors"
|
|
||||||
"hash"
|
|
||||||
"hash/adler32"
|
|
||||||
"io"
|
|
||||||
|
|
||||||
"github.com/klauspost/compress/flate"
|
|
||||||
)
|
|
||||||
|
|
||||||
const zlibDeflate = 8
|
|
||||||
|
|
||||||
var (
|
|
||||||
// ErrChecksum is returned when reading ZLIB data that has an invalid checksum.
|
|
||||||
ErrChecksum = errors.New("zlib: invalid checksum")
|
|
||||||
// ErrDictionary is returned when reading ZLIB data that has an invalid dictionary.
|
|
||||||
ErrDictionary = errors.New("zlib: invalid dictionary")
|
|
||||||
// ErrHeader is returned when reading ZLIB data that has an invalid header.
|
|
||||||
ErrHeader = errors.New("zlib: invalid header")
|
|
||||||
)
|
|
||||||
|
|
||||||
type reader struct {
|
|
||||||
r flate.Reader
|
|
||||||
decompressor io.ReadCloser
|
|
||||||
digest hash.Hash32
|
|
||||||
err error
|
|
||||||
scratch [4]byte
|
|
||||||
}
|
|
||||||
|
|
||||||
// Resetter resets a ReadCloser returned by NewReader or NewReaderDict to
|
|
||||||
// to switch to a new underlying Reader. This permits reusing a ReadCloser
|
|
||||||
// instead of allocating a new one.
|
|
||||||
type Resetter interface {
|
|
||||||
// Reset discards any buffered data and resets the Resetter as if it was
|
|
||||||
// newly initialized with the given reader.
|
|
||||||
Reset(r io.Reader, dict []byte) error
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewReader creates a new ReadCloser.
|
|
||||||
// Reads from the returned ReadCloser read and decompress data from r.
|
|
||||||
// If r does not implement io.ByteReader, the decompressor may read more
|
|
||||||
// data than necessary from r.
|
|
||||||
// It is the caller's responsibility to call Close on the ReadCloser when done.
|
|
||||||
//
|
|
||||||
// The ReadCloser returned by NewReader also implements Resetter.
|
|
||||||
func NewReader(r io.Reader) (io.ReadCloser, error) {
|
|
||||||
return NewReaderDict(r, nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewReaderDict is like NewReader but uses a preset dictionary.
|
|
||||||
// NewReaderDict ignores the dictionary if the compressed data does not refer to it.
|
|
||||||
// If the compressed data refers to a different dictionary, NewReaderDict returns ErrDictionary.
|
|
||||||
//
|
|
||||||
// The ReadCloser returned by NewReaderDict also implements Resetter.
|
|
||||||
func NewReaderDict(r io.Reader, dict []byte) (io.ReadCloser, error) {
|
|
||||||
z := new(reader)
|
|
||||||
err := z.Reset(r, dict)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return z, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (z *reader) Read(p []byte) (int, error) {
|
|
||||||
if z.err != nil {
|
|
||||||
return 0, z.err
|
|
||||||
}
|
|
||||||
|
|
||||||
var n int
|
|
||||||
n, z.err = z.decompressor.Read(p)
|
|
||||||
z.digest.Write(p[0:n])
|
|
||||||
if z.err != io.EOF {
|
|
||||||
// In the normal case we return here.
|
|
||||||
return n, z.err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Finished file; check checksum.
|
|
||||||
if _, err := io.ReadFull(z.r, z.scratch[0:4]); err != nil {
|
|
||||||
if err == io.EOF {
|
|
||||||
err = io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
z.err = err
|
|
||||||
return n, z.err
|
|
||||||
}
|
|
||||||
// ZLIB (RFC 1950) is big-endian, unlike GZIP (RFC 1952).
|
|
||||||
checksum := uint32(z.scratch[0])<<24 | uint32(z.scratch[1])<<16 | uint32(z.scratch[2])<<8 | uint32(z.scratch[3])
|
|
||||||
if checksum != z.digest.Sum32() {
|
|
||||||
z.err = ErrChecksum
|
|
||||||
return n, z.err
|
|
||||||
}
|
|
||||||
return n, io.EOF
|
|
||||||
}
|
|
||||||
|
|
||||||
// Calling Close does not close the wrapped io.Reader originally passed to NewReader.
|
|
||||||
// In order for the ZLIB checksum to be verified, the reader must be
|
|
||||||
// fully consumed until the io.EOF.
|
|
||||||
func (z *reader) Close() error {
|
|
||||||
if z.err != nil && z.err != io.EOF {
|
|
||||||
return z.err
|
|
||||||
}
|
|
||||||
z.err = z.decompressor.Close()
|
|
||||||
return z.err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (z *reader) Reset(r io.Reader, dict []byte) error {
|
|
||||||
*z = reader{decompressor: z.decompressor}
|
|
||||||
if fr, ok := r.(flate.Reader); ok {
|
|
||||||
z.r = fr
|
|
||||||
} else {
|
|
||||||
z.r = bufio.NewReader(r)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Read the header (RFC 1950 section 2.2.).
|
|
||||||
_, z.err = io.ReadFull(z.r, z.scratch[0:2])
|
|
||||||
if z.err != nil {
|
|
||||||
if z.err == io.EOF {
|
|
||||||
z.err = io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
return z.err
|
|
||||||
}
|
|
||||||
h := uint(z.scratch[0])<<8 | uint(z.scratch[1])
|
|
||||||
if (z.scratch[0]&0x0f != zlibDeflate) || (h%31 != 0) {
|
|
||||||
z.err = ErrHeader
|
|
||||||
return z.err
|
|
||||||
}
|
|
||||||
haveDict := z.scratch[1]&0x20 != 0
|
|
||||||
if haveDict {
|
|
||||||
_, z.err = io.ReadFull(z.r, z.scratch[0:4])
|
|
||||||
if z.err != nil {
|
|
||||||
if z.err == io.EOF {
|
|
||||||
z.err = io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
return z.err
|
|
||||||
}
|
|
||||||
checksum := uint32(z.scratch[0])<<24 | uint32(z.scratch[1])<<16 | uint32(z.scratch[2])<<8 | uint32(z.scratch[3])
|
|
||||||
if checksum != adler32.Checksum(dict) {
|
|
||||||
z.err = ErrDictionary
|
|
||||||
return z.err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if z.decompressor == nil {
|
|
||||||
if haveDict {
|
|
||||||
z.decompressor = flate.NewReaderDict(z.r, dict)
|
|
||||||
} else {
|
|
||||||
z.decompressor = flate.NewReader(z.r)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
z.decompressor.(flate.Resetter).Reset(z.r, dict)
|
|
||||||
}
|
|
||||||
z.digest = adler32.New()
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
201
vendor/github.com/klauspost/compress/zlib/writer.go
generated
vendored
201
vendor/github.com/klauspost/compress/zlib/writer.go
generated
vendored
@@ -1,201 +0,0 @@
|
|||||||
// Copyright 2009 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package zlib
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"hash"
|
|
||||||
"hash/adler32"
|
|
||||||
"io"
|
|
||||||
|
|
||||||
"github.com/klauspost/compress/flate"
|
|
||||||
)
|
|
||||||
|
|
||||||
// These constants are copied from the flate package, so that code that imports
|
|
||||||
// "compress/zlib" does not also have to import "compress/flate".
|
|
||||||
const (
|
|
||||||
NoCompression = flate.NoCompression
|
|
||||||
BestSpeed = flate.BestSpeed
|
|
||||||
BestCompression = flate.BestCompression
|
|
||||||
DefaultCompression = flate.DefaultCompression
|
|
||||||
ConstantCompression = flate.ConstantCompression
|
|
||||||
HuffmanOnly = flate.HuffmanOnly
|
|
||||||
)
|
|
||||||
|
|
||||||
// A Writer takes data written to it and writes the compressed
|
|
||||||
// form of that data to an underlying writer (see NewWriter).
|
|
||||||
type Writer struct {
|
|
||||||
w io.Writer
|
|
||||||
level int
|
|
||||||
dict []byte
|
|
||||||
compressor *flate.Writer
|
|
||||||
digest hash.Hash32
|
|
||||||
err error
|
|
||||||
scratch [4]byte
|
|
||||||
wroteHeader bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewWriter creates a new Writer.
|
|
||||||
// Writes to the returned Writer are compressed and written to w.
|
|
||||||
//
|
|
||||||
// It is the caller's responsibility to call Close on the WriteCloser when done.
|
|
||||||
// Writes may be buffered and not flushed until Close.
|
|
||||||
func NewWriter(w io.Writer) *Writer {
|
|
||||||
z, _ := NewWriterLevelDict(w, DefaultCompression, nil)
|
|
||||||
return z
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewWriterLevel is like NewWriter but specifies the compression level instead
|
|
||||||
// of assuming DefaultCompression.
|
|
||||||
//
|
|
||||||
// The compression level can be DefaultCompression, NoCompression, HuffmanOnly
|
|
||||||
// or any integer value between BestSpeed and BestCompression inclusive.
|
|
||||||
// The error returned will be nil if the level is valid.
|
|
||||||
func NewWriterLevel(w io.Writer, level int) (*Writer, error) {
|
|
||||||
return NewWriterLevelDict(w, level, nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewWriterLevelDict is like NewWriterLevel but specifies a dictionary to
|
|
||||||
// compress with.
|
|
||||||
//
|
|
||||||
// The dictionary may be nil. If not, its contents should not be modified until
|
|
||||||
// the Writer is closed.
|
|
||||||
func NewWriterLevelDict(w io.Writer, level int, dict []byte) (*Writer, error) {
|
|
||||||
if level < HuffmanOnly || level > BestCompression {
|
|
||||||
return nil, fmt.Errorf("zlib: invalid compression level: %d", level)
|
|
||||||
}
|
|
||||||
return &Writer{
|
|
||||||
w: w,
|
|
||||||
level: level,
|
|
||||||
dict: dict,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Reset clears the state of the Writer z such that it is equivalent to its
|
|
||||||
// initial state from NewWriterLevel or NewWriterLevelDict, but instead writing
|
|
||||||
// to w.
|
|
||||||
func (z *Writer) Reset(w io.Writer) {
|
|
||||||
z.w = w
|
|
||||||
// z.level and z.dict left unchanged.
|
|
||||||
if z.compressor != nil {
|
|
||||||
z.compressor.Reset(w)
|
|
||||||
}
|
|
||||||
if z.digest != nil {
|
|
||||||
z.digest.Reset()
|
|
||||||
}
|
|
||||||
z.err = nil
|
|
||||||
z.scratch = [4]byte{}
|
|
||||||
z.wroteHeader = false
|
|
||||||
}
|
|
||||||
|
|
||||||
// writeHeader writes the ZLIB header.
|
|
||||||
func (z *Writer) writeHeader() (err error) {
|
|
||||||
z.wroteHeader = true
|
|
||||||
// ZLIB has a two-byte header (as documented in RFC 1950).
|
|
||||||
// The first four bits is the CINFO (compression info), which is 7 for the default deflate window size.
|
|
||||||
// The next four bits is the CM (compression method), which is 8 for deflate.
|
|
||||||
z.scratch[0] = 0x78
|
|
||||||
// The next two bits is the FLEVEL (compression level). The four values are:
|
|
||||||
// 0=fastest, 1=fast, 2=default, 3=best.
|
|
||||||
// The next bit, FDICT, is set if a dictionary is given.
|
|
||||||
// The final five FCHECK bits form a mod-31 checksum.
|
|
||||||
switch z.level {
|
|
||||||
case -2, 0, 1:
|
|
||||||
z.scratch[1] = 0 << 6
|
|
||||||
case 2, 3, 4, 5:
|
|
||||||
z.scratch[1] = 1 << 6
|
|
||||||
case 6, -1:
|
|
||||||
z.scratch[1] = 2 << 6
|
|
||||||
case 7, 8, 9:
|
|
||||||
z.scratch[1] = 3 << 6
|
|
||||||
default:
|
|
||||||
panic("unreachable")
|
|
||||||
}
|
|
||||||
if z.dict != nil {
|
|
||||||
z.scratch[1] |= 1 << 5
|
|
||||||
}
|
|
||||||
z.scratch[1] += uint8(31 - (uint16(z.scratch[0])<<8+uint16(z.scratch[1]))%31)
|
|
||||||
if _, err = z.w.Write(z.scratch[0:2]); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if z.dict != nil {
|
|
||||||
// The next four bytes are the Adler-32 checksum of the dictionary.
|
|
||||||
checksum := adler32.Checksum(z.dict)
|
|
||||||
z.scratch[0] = uint8(checksum >> 24)
|
|
||||||
z.scratch[1] = uint8(checksum >> 16)
|
|
||||||
z.scratch[2] = uint8(checksum >> 8)
|
|
||||||
z.scratch[3] = uint8(checksum >> 0)
|
|
||||||
if _, err = z.w.Write(z.scratch[0:4]); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if z.compressor == nil {
|
|
||||||
// Initialize deflater unless the Writer is being reused
|
|
||||||
// after a Reset call.
|
|
||||||
z.compressor, err = flate.NewWriterDict(z.w, z.level, z.dict)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
z.digest = adler32.New()
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Write writes a compressed form of p to the underlying io.Writer. The
|
|
||||||
// compressed bytes are not necessarily flushed until the Writer is closed or
|
|
||||||
// explicitly flushed.
|
|
||||||
func (z *Writer) Write(p []byte) (n int, err error) {
|
|
||||||
if !z.wroteHeader {
|
|
||||||
z.err = z.writeHeader()
|
|
||||||
}
|
|
||||||
if z.err != nil {
|
|
||||||
return 0, z.err
|
|
||||||
}
|
|
||||||
if len(p) == 0 {
|
|
||||||
return 0, nil
|
|
||||||
}
|
|
||||||
n, err = z.compressor.Write(p)
|
|
||||||
if err != nil {
|
|
||||||
z.err = err
|
|
||||||
return
|
|
||||||
}
|
|
||||||
z.digest.Write(p)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Flush flushes the Writer to its underlying io.Writer.
|
|
||||||
func (z *Writer) Flush() error {
|
|
||||||
if !z.wroteHeader {
|
|
||||||
z.err = z.writeHeader()
|
|
||||||
}
|
|
||||||
if z.err != nil {
|
|
||||||
return z.err
|
|
||||||
}
|
|
||||||
z.err = z.compressor.Flush()
|
|
||||||
return z.err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Close closes the Writer, flushing any unwritten data to the underlying
|
|
||||||
// io.Writer, but does not close the underlying io.Writer.
|
|
||||||
func (z *Writer) Close() error {
|
|
||||||
if !z.wroteHeader {
|
|
||||||
z.err = z.writeHeader()
|
|
||||||
}
|
|
||||||
if z.err != nil {
|
|
||||||
return z.err
|
|
||||||
}
|
|
||||||
z.err = z.compressor.Close()
|
|
||||||
if z.err != nil {
|
|
||||||
return z.err
|
|
||||||
}
|
|
||||||
checksum := z.digest.Sum32()
|
|
||||||
// ZLIB (RFC 1950) is big-endian, unlike GZIP (RFC 1952).
|
|
||||||
z.scratch[0] = uint8(checksum >> 24)
|
|
||||||
z.scratch[1] = uint8(checksum >> 16)
|
|
||||||
z.scratch[2] = uint8(checksum >> 8)
|
|
||||||
z.scratch[3] = uint8(checksum >> 0)
|
|
||||||
_, z.err = z.w.Write(z.scratch[0:4])
|
|
||||||
return z.err
|
|
||||||
}
|
|
||||||
22
vendor/github.com/klauspost/cpuid/LICENSE
generated
vendored
22
vendor/github.com/klauspost/cpuid/LICENSE
generated
vendored
@@ -1,22 +0,0 @@
|
|||||||
The MIT License (MIT)
|
|
||||||
|
|
||||||
Copyright (c) 2015 Klaus Post
|
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
||||||
of this software and associated documentation files (the "Software"), to deal
|
|
||||||
in the Software without restriction, including without limitation the rights
|
|
||||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
||||||
copies of the Software, and to permit persons to whom the Software is
|
|
||||||
furnished to do so, subject to the following conditions:
|
|
||||||
|
|
||||||
The above copyright notice and this permission notice shall be included in all
|
|
||||||
copies or substantial portions of the Software.
|
|
||||||
|
|
||||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
||||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
||||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
||||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
||||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
||||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
||||||
SOFTWARE.
|
|
||||||
|
|
||||||
145
vendor/github.com/klauspost/cpuid/README.md
generated
vendored
145
vendor/github.com/klauspost/cpuid/README.md
generated
vendored
@@ -1,145 +0,0 @@
|
|||||||
# cpuid
|
|
||||||
Package cpuid provides information about the CPU running the current program.
|
|
||||||
|
|
||||||
CPU features are detected on startup, and kept for fast access through the life of the application.
|
|
||||||
Currently x86 / x64 (AMD64) is supported, and no external C (cgo) code is used, which should make the library very easy to use.
|
|
||||||
|
|
||||||
You can access the CPU information by accessing the shared CPU variable of the cpuid library.
|
|
||||||
|
|
||||||
Package home: https://github.com/klauspost/cpuid
|
|
||||||
|
|
||||||
[![GoDoc][1]][2] [![Build Status][3]][4]
|
|
||||||
|
|
||||||
[1]: https://godoc.org/github.com/klauspost/cpuid?status.svg
|
|
||||||
[2]: https://godoc.org/github.com/klauspost/cpuid
|
|
||||||
[3]: https://travis-ci.org/klauspost/cpuid.svg
|
|
||||||
[4]: https://travis-ci.org/klauspost/cpuid
|
|
||||||
|
|
||||||
# features
|
|
||||||
## CPU Instructions
|
|
||||||
* **CMOV** (i686 CMOV)
|
|
||||||
* **NX** (NX (No-Execute) bit)
|
|
||||||
* **AMD3DNOW** (AMD 3DNOW)
|
|
||||||
* **AMD3DNOWEXT** (AMD 3DNowExt)
|
|
||||||
* **MMX** (standard MMX)
|
|
||||||
* **MMXEXT** (SSE integer functions or AMD MMX ext)
|
|
||||||
* **SSE** (SSE functions)
|
|
||||||
* **SSE2** (P4 SSE functions)
|
|
||||||
* **SSE3** (Prescott SSE3 functions)
|
|
||||||
* **SSSE3** (Conroe SSSE3 functions)
|
|
||||||
* **SSE4** (Penryn SSE4.1 functions)
|
|
||||||
* **SSE4A** (AMD Barcelona microarchitecture SSE4a instructions)
|
|
||||||
* **SSE42** (Nehalem SSE4.2 functions)
|
|
||||||
* **AVX** (AVX functions)
|
|
||||||
* **AVX2** (AVX2 functions)
|
|
||||||
* **FMA3** (Intel FMA 3)
|
|
||||||
* **FMA4** (Bulldozer FMA4 functions)
|
|
||||||
* **XOP** (Bulldozer XOP functions)
|
|
||||||
* **F16C** (Half-precision floating-point conversion)
|
|
||||||
* **BMI1** (Bit Manipulation Instruction Set 1)
|
|
||||||
* **BMI2** (Bit Manipulation Instruction Set 2)
|
|
||||||
* **TBM** (AMD Trailing Bit Manipulation)
|
|
||||||
* **LZCNT** (LZCNT instruction)
|
|
||||||
* **POPCNT** (POPCNT instruction)
|
|
||||||
* **AESNI** (Advanced Encryption Standard New Instructions)
|
|
||||||
* **CLMUL** (Carry-less Multiplication)
|
|
||||||
* **HTT** (Hyperthreading (enabled))
|
|
||||||
* **HLE** (Hardware Lock Elision)
|
|
||||||
* **RTM** (Restricted Transactional Memory)
|
|
||||||
* **RDRAND** (RDRAND instruction is available)
|
|
||||||
* **RDSEED** (RDSEED instruction is available)
|
|
||||||
* **ADX** (Intel ADX (Multi-Precision Add-Carry Instruction Extensions))
|
|
||||||
* **SHA** (Intel SHA Extensions)
|
|
||||||
* **AVX512F** (AVX-512 Foundation)
|
|
||||||
* **AVX512DQ** (AVX-512 Doubleword and Quadword Instructions)
|
|
||||||
* **AVX512IFMA** (AVX-512 Integer Fused Multiply-Add Instructions)
|
|
||||||
* **AVX512PF** (AVX-512 Prefetch Instructions)
|
|
||||||
* **AVX512ER** (AVX-512 Exponential and Reciprocal Instructions)
|
|
||||||
* **AVX512CD** (AVX-512 Conflict Detection Instructions)
|
|
||||||
* **AVX512BW** (AVX-512 Byte and Word Instructions)
|
|
||||||
* **AVX512VL** (AVX-512 Vector Length Extensions)
|
|
||||||
* **AVX512VBMI** (AVX-512 Vector Bit Manipulation Instructions)
|
|
||||||
* **MPX** (Intel MPX (Memory Protection Extensions))
|
|
||||||
* **ERMS** (Enhanced REP MOVSB/STOSB)
|
|
||||||
* **RDTSCP** (RDTSCP Instruction)
|
|
||||||
* **CX16** (CMPXCHG16B Instruction)
|
|
||||||
* **SGX** (Software Guard Extensions, with activation details)
|
|
||||||
|
|
||||||
## Performance
|
|
||||||
* **RDTSCP()** Returns current cycle count. Can be used for benchmarking.
|
|
||||||
* **SSE2SLOW** (SSE2 is supported, but usually not faster)
|
|
||||||
* **SSE3SLOW** (SSE3 is supported, but usually not faster)
|
|
||||||
* **ATOM** (Atom processor, some SSSE3 instructions are slower)
|
|
||||||
* **Cache line** (Probable size of a cache line).
|
|
||||||
* **L1, L2, L3 Cache size** on newer Intel/AMD CPUs.
|
|
||||||
|
|
||||||
## Cpu Vendor/VM
|
|
||||||
* **Intel**
|
|
||||||
* **AMD**
|
|
||||||
* **VIA**
|
|
||||||
* **Transmeta**
|
|
||||||
* **NSC**
|
|
||||||
* **KVM** (Kernel-based Virtual Machine)
|
|
||||||
* **MSVM** (Microsoft Hyper-V or Windows Virtual PC)
|
|
||||||
* **VMware**
|
|
||||||
* **XenHVM**
|
|
||||||
|
|
||||||
# installing
|
|
||||||
|
|
||||||
```go get github.com/klauspost/cpuid```
|
|
||||||
|
|
||||||
# example
|
|
||||||
|
|
||||||
```Go
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"github.com/klauspost/cpuid"
|
|
||||||
)
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
// Print basic CPU information:
|
|
||||||
fmt.Println("Name:", cpuid.CPU.BrandName)
|
|
||||||
fmt.Println("PhysicalCores:", cpuid.CPU.PhysicalCores)
|
|
||||||
fmt.Println("ThreadsPerCore:", cpuid.CPU.ThreadsPerCore)
|
|
||||||
fmt.Println("LogicalCores:", cpuid.CPU.LogicalCores)
|
|
||||||
fmt.Println("Family", cpuid.CPU.Family, "Model:", cpuid.CPU.Model)
|
|
||||||
fmt.Println("Features:", cpuid.CPU.Features)
|
|
||||||
fmt.Println("Cacheline bytes:", cpuid.CPU.CacheLine)
|
|
||||||
fmt.Println("L1 Data Cache:", cpuid.CPU.Cache.L1D, "bytes")
|
|
||||||
fmt.Println("L1 Instruction Cache:", cpuid.CPU.Cache.L1D, "bytes")
|
|
||||||
fmt.Println("L2 Cache:", cpuid.CPU.Cache.L2, "bytes")
|
|
||||||
fmt.Println("L3 Cache:", cpuid.CPU.Cache.L3, "bytes")
|
|
||||||
|
|
||||||
// Test if we have a specific feature:
|
|
||||||
if cpuid.CPU.SSE() {
|
|
||||||
fmt.Println("We have Streaming SIMD Extensions")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
Sample output:
|
|
||||||
```
|
|
||||||
>go run main.go
|
|
||||||
Name: Intel(R) Core(TM) i5-2540M CPU @ 2.60GHz
|
|
||||||
PhysicalCores: 2
|
|
||||||
ThreadsPerCore: 2
|
|
||||||
LogicalCores: 4
|
|
||||||
Family 6 Model: 42
|
|
||||||
Features: CMOV,MMX,MMXEXT,SSE,SSE2,SSE3,SSSE3,SSE4.1,SSE4.2,AVX,AESNI,CLMUL
|
|
||||||
Cacheline bytes: 64
|
|
||||||
We have Streaming SIMD Extensions
|
|
||||||
```
|
|
||||||
|
|
||||||
# private package
|
|
||||||
|
|
||||||
In the "private" folder you can find an autogenerated version of the library you can include in your own packages.
|
|
||||||
|
|
||||||
For this purpose all exports are removed, and functions and constants are lowercased.
|
|
||||||
|
|
||||||
This is not a recommended way of using the library, but provided for convenience, if it is difficult for you to use external packages.
|
|
||||||
|
|
||||||
# license
|
|
||||||
|
|
||||||
This code is published under an MIT license. See LICENSE file for more information.
|
|
||||||
1030
vendor/github.com/klauspost/cpuid/cpuid.go
generated
vendored
1030
vendor/github.com/klauspost/cpuid/cpuid.go
generated
vendored
File diff suppressed because it is too large
Load Diff
42
vendor/github.com/klauspost/cpuid/cpuid_386.s
generated
vendored
42
vendor/github.com/klauspost/cpuid/cpuid_386.s
generated
vendored
@@ -1,42 +0,0 @@
|
|||||||
// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file.
|
|
||||||
|
|
||||||
// +build 386,!gccgo
|
|
||||||
|
|
||||||
// func asmCpuid(op uint32) (eax, ebx, ecx, edx uint32)
|
|
||||||
TEXT ·asmCpuid(SB), 7, $0
|
|
||||||
XORL CX, CX
|
|
||||||
MOVL op+0(FP), AX
|
|
||||||
CPUID
|
|
||||||
MOVL AX, eax+4(FP)
|
|
||||||
MOVL BX, ebx+8(FP)
|
|
||||||
MOVL CX, ecx+12(FP)
|
|
||||||
MOVL DX, edx+16(FP)
|
|
||||||
RET
|
|
||||||
|
|
||||||
// func asmCpuidex(op, op2 uint32) (eax, ebx, ecx, edx uint32)
|
|
||||||
TEXT ·asmCpuidex(SB), 7, $0
|
|
||||||
MOVL op+0(FP), AX
|
|
||||||
MOVL op2+4(FP), CX
|
|
||||||
CPUID
|
|
||||||
MOVL AX, eax+8(FP)
|
|
||||||
MOVL BX, ebx+12(FP)
|
|
||||||
MOVL CX, ecx+16(FP)
|
|
||||||
MOVL DX, edx+20(FP)
|
|
||||||
RET
|
|
||||||
|
|
||||||
// func xgetbv(index uint32) (eax, edx uint32)
|
|
||||||
TEXT ·asmXgetbv(SB), 7, $0
|
|
||||||
MOVL index+0(FP), CX
|
|
||||||
BYTE $0x0f; BYTE $0x01; BYTE $0xd0 // XGETBV
|
|
||||||
MOVL AX, eax+4(FP)
|
|
||||||
MOVL DX, edx+8(FP)
|
|
||||||
RET
|
|
||||||
|
|
||||||
// func asmRdtscpAsm() (eax, ebx, ecx, edx uint32)
|
|
||||||
TEXT ·asmRdtscpAsm(SB), 7, $0
|
|
||||||
BYTE $0x0F; BYTE $0x01; BYTE $0xF9 // RDTSCP
|
|
||||||
MOVL AX, eax+0(FP)
|
|
||||||
MOVL BX, ebx+4(FP)
|
|
||||||
MOVL CX, ecx+8(FP)
|
|
||||||
MOVL DX, edx+12(FP)
|
|
||||||
RET
|
|
||||||
42
vendor/github.com/klauspost/cpuid/cpuid_amd64.s
generated
vendored
42
vendor/github.com/klauspost/cpuid/cpuid_amd64.s
generated
vendored
@@ -1,42 +0,0 @@
|
|||||||
// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file.
|
|
||||||
|
|
||||||
//+build amd64,!gccgo
|
|
||||||
|
|
||||||
// func asmCpuid(op uint32) (eax, ebx, ecx, edx uint32)
|
|
||||||
TEXT ·asmCpuid(SB), 7, $0
|
|
||||||
XORQ CX, CX
|
|
||||||
MOVL op+0(FP), AX
|
|
||||||
CPUID
|
|
||||||
MOVL AX, eax+8(FP)
|
|
||||||
MOVL BX, ebx+12(FP)
|
|
||||||
MOVL CX, ecx+16(FP)
|
|
||||||
MOVL DX, edx+20(FP)
|
|
||||||
RET
|
|
||||||
|
|
||||||
// func asmCpuidex(op, op2 uint32) (eax, ebx, ecx, edx uint32)
|
|
||||||
TEXT ·asmCpuidex(SB), 7, $0
|
|
||||||
MOVL op+0(FP), AX
|
|
||||||
MOVL op2+4(FP), CX
|
|
||||||
CPUID
|
|
||||||
MOVL AX, eax+8(FP)
|
|
||||||
MOVL BX, ebx+12(FP)
|
|
||||||
MOVL CX, ecx+16(FP)
|
|
||||||
MOVL DX, edx+20(FP)
|
|
||||||
RET
|
|
||||||
|
|
||||||
// func asmXgetbv(index uint32) (eax, edx uint32)
|
|
||||||
TEXT ·asmXgetbv(SB), 7, $0
|
|
||||||
MOVL index+0(FP), CX
|
|
||||||
BYTE $0x0f; BYTE $0x01; BYTE $0xd0 // XGETBV
|
|
||||||
MOVL AX, eax+8(FP)
|
|
||||||
MOVL DX, edx+12(FP)
|
|
||||||
RET
|
|
||||||
|
|
||||||
// func asmRdtscpAsm() (eax, ebx, ecx, edx uint32)
|
|
||||||
TEXT ·asmRdtscpAsm(SB), 7, $0
|
|
||||||
BYTE $0x0F; BYTE $0x01; BYTE $0xF9 // RDTSCP
|
|
||||||
MOVL AX, eax+0(FP)
|
|
||||||
MOVL BX, ebx+4(FP)
|
|
||||||
MOVL CX, ecx+8(FP)
|
|
||||||
MOVL DX, edx+12(FP)
|
|
||||||
RET
|
|
||||||
17
vendor/github.com/klauspost/cpuid/detect_intel.go
generated
vendored
17
vendor/github.com/klauspost/cpuid/detect_intel.go
generated
vendored
@@ -1,17 +0,0 @@
|
|||||||
// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file.
|
|
||||||
|
|
||||||
// +build 386,!gccgo amd64,!gccgo
|
|
||||||
|
|
||||||
package cpuid
|
|
||||||
|
|
||||||
func asmCpuid(op uint32) (eax, ebx, ecx, edx uint32)
|
|
||||||
func asmCpuidex(op, op2 uint32) (eax, ebx, ecx, edx uint32)
|
|
||||||
func asmXgetbv(index uint32) (eax, edx uint32)
|
|
||||||
func asmRdtscpAsm() (eax, ebx, ecx, edx uint32)
|
|
||||||
|
|
||||||
func initCPU() {
|
|
||||||
cpuid = asmCpuid
|
|
||||||
cpuidex = asmCpuidex
|
|
||||||
xgetbv = asmXgetbv
|
|
||||||
rdtscpAsm = asmRdtscpAsm
|
|
||||||
}
|
|
||||||
23
vendor/github.com/klauspost/cpuid/detect_ref.go
generated
vendored
23
vendor/github.com/klauspost/cpuid/detect_ref.go
generated
vendored
@@ -1,23 +0,0 @@
|
|||||||
// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file.
|
|
||||||
|
|
||||||
// +build !amd64,!386 gccgo
|
|
||||||
|
|
||||||
package cpuid
|
|
||||||
|
|
||||||
func initCPU() {
|
|
||||||
cpuid = func(op uint32) (eax, ebx, ecx, edx uint32) {
|
|
||||||
return 0, 0, 0, 0
|
|
||||||
}
|
|
||||||
|
|
||||||
cpuidex = func(op, op2 uint32) (eax, ebx, ecx, edx uint32) {
|
|
||||||
return 0, 0, 0, 0
|
|
||||||
}
|
|
||||||
|
|
||||||
xgetbv = func(index uint32) (eax, edx uint32) {
|
|
||||||
return 0, 0
|
|
||||||
}
|
|
||||||
|
|
||||||
rdtscpAsm = func() (eax, ebx, ecx, edx uint32) {
|
|
||||||
return 0, 0, 0, 0
|
|
||||||
}
|
|
||||||
}
|
|
||||||
4
vendor/github.com/klauspost/cpuid/generate.go
generated
vendored
4
vendor/github.com/klauspost/cpuid/generate.go
generated
vendored
@@ -1,4 +0,0 @@
|
|||||||
package cpuid
|
|
||||||
|
|
||||||
//go:generate go run private-gen.go
|
|
||||||
//go:generate gofmt -w ./private
|
|
||||||
28
vendor/github.com/klauspost/crc32/LICENSE
generated
vendored
28
vendor/github.com/klauspost/crc32/LICENSE
generated
vendored
@@ -1,28 +0,0 @@
|
|||||||
Copyright (c) 2012 The Go Authors. All rights reserved.
|
|
||||||
Copyright (c) 2015 Klaus Post
|
|
||||||
|
|
||||||
Redistribution and use in source and binary forms, with or without
|
|
||||||
modification, are permitted provided that the following conditions are
|
|
||||||
met:
|
|
||||||
|
|
||||||
* Redistributions of source code must retain the above copyright
|
|
||||||
notice, this list of conditions and the following disclaimer.
|
|
||||||
* Redistributions in binary form must reproduce the above
|
|
||||||
copyright notice, this list of conditions and the following disclaimer
|
|
||||||
in the documentation and/or other materials provided with the
|
|
||||||
distribution.
|
|
||||||
* Neither the name of Google Inc. nor the names of its
|
|
||||||
contributors may be used to endorse or promote products derived from
|
|
||||||
this software without specific prior written permission.
|
|
||||||
|
|
||||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
||||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
||||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|
||||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|
||||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
||||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|
||||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
||||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
||||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
||||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
||||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
||||||
92
vendor/github.com/klauspost/crc32/README.md
generated
vendored
92
vendor/github.com/klauspost/crc32/README.md
generated
vendored
@@ -1,92 +0,0 @@
|
|||||||
# Not needed!
|
|
||||||
|
|
||||||
If you use Go 1.7 or later, there is no reason to use this package any more, since optimizations have been merged into the standard library.
|
|
||||||
|
|
||||||
The following reposiitory and documentation is left for historical reasons (and to not break exisiting code).
|
|
||||||
|
|
||||||
# crc32
|
|
||||||
|
|
||||||
This package is a drop-in replacement for the standard library `hash/crc32` package, that features SSE 4.2 optimizations on x64 platforms, for a 10x speedup.
|
|
||||||
|
|
||||||
[](https://travis-ci.org/klauspost/crc32)
|
|
||||||
|
|
||||||
# usage
|
|
||||||
|
|
||||||
Install using `go get github.com/klauspost/crc32`. This library is based on Go 1.5 code and requires Go 1.3 or newer.
|
|
||||||
|
|
||||||
Replace `import "hash/crc32"` with `import "github.com/klauspost/crc32"` and you are good to go.
|
|
||||||
|
|
||||||
# changes
|
|
||||||
* Oct 20, 2016: Changes have been merged to upstream Go. Package updated to match.
|
|
||||||
* Dec 4, 2015: Uses the "slice-by-8" trick more extensively, which gives a 1.5 to 2.5x speedup if assembler is unavailable.
|
|
||||||
|
|
||||||
|
|
||||||
# performance
|
|
||||||
|
|
||||||
For *Go 1.7* performance is equivalent to the standard library. So if you use this package for Go 1.7 you can switch back.
|
|
||||||
|
|
||||||
|
|
||||||
For IEEE tables (the most common), there is approximately a factor 10 speedup with "CLMUL" (Carryless multiplication) instruction:
|
|
||||||
```
|
|
||||||
benchmark old ns/op new ns/op delta
|
|
||||||
BenchmarkCrc32KB 99955 10258 -89.74%
|
|
||||||
|
|
||||||
benchmark old MB/s new MB/s speedup
|
|
||||||
BenchmarkCrc32KB 327.83 3194.20 9.74x
|
|
||||||
```
|
|
||||||
|
|
||||||
For other tables and "CLMUL" capable machines the performance is the same as the standard library.
|
|
||||||
|
|
||||||
Here are some detailed benchmarks, comparing to go 1.5 standard library with and without assembler enabled.
|
|
||||||
|
|
||||||
```
|
|
||||||
Std: Standard Go 1.5 library
|
|
||||||
Crc: Indicates IEEE type CRC.
|
|
||||||
40B: Size of each slice encoded.
|
|
||||||
NoAsm: Assembler was disabled (ie. not an AMD64 or SSE 4.2+ capable machine).
|
|
||||||
Castagnoli: Castagnoli CRC type.
|
|
||||||
|
|
||||||
BenchmarkStdCrc40B-4 10000000 158 ns/op 252.88 MB/s
|
|
||||||
BenchmarkCrc40BNoAsm-4 20000000 105 ns/op 377.38 MB/s (slice8)
|
|
||||||
BenchmarkCrc40B-4 20000000 105 ns/op 378.77 MB/s (slice8)
|
|
||||||
|
|
||||||
BenchmarkStdCrc1KB-4 500000 3604 ns/op 284.10 MB/s
|
|
||||||
BenchmarkCrc1KBNoAsm-4 1000000 1463 ns/op 699.79 MB/s (slice8)
|
|
||||||
BenchmarkCrc1KB-4 3000000 396 ns/op 2583.69 MB/s (asm)
|
|
||||||
|
|
||||||
BenchmarkStdCrc8KB-4 200000 11417 ns/op 717.48 MB/s (slice8)
|
|
||||||
BenchmarkCrc8KBNoAsm-4 200000 11317 ns/op 723.85 MB/s (slice8)
|
|
||||||
BenchmarkCrc8KB-4 500000 2919 ns/op 2805.73 MB/s (asm)
|
|
||||||
|
|
||||||
BenchmarkStdCrc32KB-4 30000 45749 ns/op 716.24 MB/s (slice8)
|
|
||||||
BenchmarkCrc32KBNoAsm-4 30000 45109 ns/op 726.42 MB/s (slice8)
|
|
||||||
BenchmarkCrc32KB-4 100000 11497 ns/op 2850.09 MB/s (asm)
|
|
||||||
|
|
||||||
BenchmarkStdNoAsmCastagnol40B-4 10000000 161 ns/op 246.94 MB/s
|
|
||||||
BenchmarkStdCastagnoli40B-4 50000000 28.4 ns/op 1410.69 MB/s (asm)
|
|
||||||
BenchmarkCastagnoli40BNoAsm-4 20000000 100 ns/op 398.01 MB/s (slice8)
|
|
||||||
BenchmarkCastagnoli40B-4 50000000 28.2 ns/op 1419.54 MB/s (asm)
|
|
||||||
|
|
||||||
BenchmarkStdNoAsmCastagnoli1KB-4 500000 3622 ns/op 282.67 MB/s
|
|
||||||
BenchmarkStdCastagnoli1KB-4 10000000 144 ns/op 7099.78 MB/s (asm)
|
|
||||||
BenchmarkCastagnoli1KBNoAsm-4 1000000 1475 ns/op 694.14 MB/s (slice8)
|
|
||||||
BenchmarkCastagnoli1KB-4 10000000 146 ns/op 6993.35 MB/s (asm)
|
|
||||||
|
|
||||||
BenchmarkStdNoAsmCastagnoli8KB-4 50000 28781 ns/op 284.63 MB/s
|
|
||||||
BenchmarkStdCastagnoli8KB-4 1000000 1029 ns/op 7957.89 MB/s (asm)
|
|
||||||
BenchmarkCastagnoli8KBNoAsm-4 200000 11410 ns/op 717.94 MB/s (slice8)
|
|
||||||
BenchmarkCastagnoli8KB-4 1000000 1000 ns/op 8188.71 MB/s (asm)
|
|
||||||
|
|
||||||
BenchmarkStdNoAsmCastagnoli32KB-4 10000 115426 ns/op 283.89 MB/s
|
|
||||||
BenchmarkStdCastagnoli32KB-4 300000 4065 ns/op 8059.13 MB/s (asm)
|
|
||||||
BenchmarkCastagnoli32KBNoAsm-4 30000 45171 ns/op 725.41 MB/s (slice8)
|
|
||||||
BenchmarkCastagnoli32KB-4 500000 4077 ns/op 8035.89 MB/s (asm)
|
|
||||||
```
|
|
||||||
|
|
||||||
The IEEE assembler optimizations has been submitted and will be part of the Go 1.6 standard library.
|
|
||||||
|
|
||||||
However, the improved use of slice-by-8 has not, but will probably be submitted for Go 1.7.
|
|
||||||
|
|
||||||
# license
|
|
||||||
|
|
||||||
Standard Go license. Changes are Copyright (c) 2015 Klaus Post under same conditions.
|
|
||||||
207
vendor/github.com/klauspost/crc32/crc32.go
generated
vendored
207
vendor/github.com/klauspost/crc32/crc32.go
generated
vendored
@@ -1,207 +0,0 @@
|
|||||||
// Copyright 2009 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// Package crc32 implements the 32-bit cyclic redundancy check, or CRC-32,
|
|
||||||
// checksum. See http://en.wikipedia.org/wiki/Cyclic_redundancy_check for
|
|
||||||
// information.
|
|
||||||
//
|
|
||||||
// Polynomials are represented in LSB-first form also known as reversed representation.
|
|
||||||
//
|
|
||||||
// See http://en.wikipedia.org/wiki/Mathematics_of_cyclic_redundancy_checks#Reversed_representations_and_reciprocal_polynomials
|
|
||||||
// for information.
|
|
||||||
package crc32
|
|
||||||
|
|
||||||
import (
|
|
||||||
"hash"
|
|
||||||
"sync"
|
|
||||||
)
|
|
||||||
|
|
||||||
// The size of a CRC-32 checksum in bytes.
|
|
||||||
const Size = 4
|
|
||||||
|
|
||||||
// Predefined polynomials.
|
|
||||||
const (
|
|
||||||
// IEEE is by far and away the most common CRC-32 polynomial.
|
|
||||||
// Used by ethernet (IEEE 802.3), v.42, fddi, gzip, zip, png, ...
|
|
||||||
IEEE = 0xedb88320
|
|
||||||
|
|
||||||
// Castagnoli's polynomial, used in iSCSI.
|
|
||||||
// Has better error detection characteristics than IEEE.
|
|
||||||
// http://dx.doi.org/10.1109/26.231911
|
|
||||||
Castagnoli = 0x82f63b78
|
|
||||||
|
|
||||||
// Koopman's polynomial.
|
|
||||||
// Also has better error detection characteristics than IEEE.
|
|
||||||
// http://dx.doi.org/10.1109/DSN.2002.1028931
|
|
||||||
Koopman = 0xeb31d82e
|
|
||||||
)
|
|
||||||
|
|
||||||
// Table is a 256-word table representing the polynomial for efficient processing.
|
|
||||||
type Table [256]uint32
|
|
||||||
|
|
||||||
// This file makes use of functions implemented in architecture-specific files.
|
|
||||||
// The interface that they implement is as follows:
|
|
||||||
//
|
|
||||||
// // archAvailableIEEE reports whether an architecture-specific CRC32-IEEE
|
|
||||||
// // algorithm is available.
|
|
||||||
// archAvailableIEEE() bool
|
|
||||||
//
|
|
||||||
// // archInitIEEE initializes the architecture-specific CRC3-IEEE algorithm.
|
|
||||||
// // It can only be called if archAvailableIEEE() returns true.
|
|
||||||
// archInitIEEE()
|
|
||||||
//
|
|
||||||
// // archUpdateIEEE updates the given CRC32-IEEE. It can only be called if
|
|
||||||
// // archInitIEEE() was previously called.
|
|
||||||
// archUpdateIEEE(crc uint32, p []byte) uint32
|
|
||||||
//
|
|
||||||
// // archAvailableCastagnoli reports whether an architecture-specific
|
|
||||||
// // CRC32-C algorithm is available.
|
|
||||||
// archAvailableCastagnoli() bool
|
|
||||||
//
|
|
||||||
// // archInitCastagnoli initializes the architecture-specific CRC32-C
|
|
||||||
// // algorithm. It can only be called if archAvailableCastagnoli() returns
|
|
||||||
// // true.
|
|
||||||
// archInitCastagnoli()
|
|
||||||
//
|
|
||||||
// // archUpdateCastagnoli updates the given CRC32-C. It can only be called
|
|
||||||
// // if archInitCastagnoli() was previously called.
|
|
||||||
// archUpdateCastagnoli(crc uint32, p []byte) uint32
|
|
||||||
|
|
||||||
// castagnoliTable points to a lazily initialized Table for the Castagnoli
|
|
||||||
// polynomial. MakeTable will always return this value when asked to make a
|
|
||||||
// Castagnoli table so we can compare against it to find when the caller is
|
|
||||||
// using this polynomial.
|
|
||||||
var castagnoliTable *Table
|
|
||||||
var castagnoliTable8 *slicing8Table
|
|
||||||
var castagnoliArchImpl bool
|
|
||||||
var updateCastagnoli func(crc uint32, p []byte) uint32
|
|
||||||
var castagnoliOnce sync.Once
|
|
||||||
|
|
||||||
func castagnoliInit() {
|
|
||||||
castagnoliTable = simpleMakeTable(Castagnoli)
|
|
||||||
castagnoliArchImpl = archAvailableCastagnoli()
|
|
||||||
|
|
||||||
if castagnoliArchImpl {
|
|
||||||
archInitCastagnoli()
|
|
||||||
updateCastagnoli = archUpdateCastagnoli
|
|
||||||
} else {
|
|
||||||
// Initialize the slicing-by-8 table.
|
|
||||||
castagnoliTable8 = slicingMakeTable(Castagnoli)
|
|
||||||
updateCastagnoli = func(crc uint32, p []byte) uint32 {
|
|
||||||
return slicingUpdate(crc, castagnoliTable8, p)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// IEEETable is the table for the IEEE polynomial.
|
|
||||||
var IEEETable = simpleMakeTable(IEEE)
|
|
||||||
|
|
||||||
// ieeeTable8 is the slicing8Table for IEEE
|
|
||||||
var ieeeTable8 *slicing8Table
|
|
||||||
var ieeeArchImpl bool
|
|
||||||
var updateIEEE func(crc uint32, p []byte) uint32
|
|
||||||
var ieeeOnce sync.Once
|
|
||||||
|
|
||||||
func ieeeInit() {
|
|
||||||
ieeeArchImpl = archAvailableIEEE()
|
|
||||||
|
|
||||||
if ieeeArchImpl {
|
|
||||||
archInitIEEE()
|
|
||||||
updateIEEE = archUpdateIEEE
|
|
||||||
} else {
|
|
||||||
// Initialize the slicing-by-8 table.
|
|
||||||
ieeeTable8 = slicingMakeTable(IEEE)
|
|
||||||
updateIEEE = func(crc uint32, p []byte) uint32 {
|
|
||||||
return slicingUpdate(crc, ieeeTable8, p)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// MakeTable returns a Table constructed from the specified polynomial.
|
|
||||||
// The contents of this Table must not be modified.
|
|
||||||
func MakeTable(poly uint32) *Table {
|
|
||||||
switch poly {
|
|
||||||
case IEEE:
|
|
||||||
ieeeOnce.Do(ieeeInit)
|
|
||||||
return IEEETable
|
|
||||||
case Castagnoli:
|
|
||||||
castagnoliOnce.Do(castagnoliInit)
|
|
||||||
return castagnoliTable
|
|
||||||
}
|
|
||||||
return simpleMakeTable(poly)
|
|
||||||
}
|
|
||||||
|
|
||||||
// digest represents the partial evaluation of a checksum.
|
|
||||||
type digest struct {
|
|
||||||
crc uint32
|
|
||||||
tab *Table
|
|
||||||
}
|
|
||||||
|
|
||||||
// New creates a new hash.Hash32 computing the CRC-32 checksum
|
|
||||||
// using the polynomial represented by the Table.
|
|
||||||
// Its Sum method will lay the value out in big-endian byte order.
|
|
||||||
func New(tab *Table) hash.Hash32 {
|
|
||||||
if tab == IEEETable {
|
|
||||||
ieeeOnce.Do(ieeeInit)
|
|
||||||
}
|
|
||||||
return &digest{0, tab}
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewIEEE creates a new hash.Hash32 computing the CRC-32 checksum
|
|
||||||
// using the IEEE polynomial.
|
|
||||||
// Its Sum method will lay the value out in big-endian byte order.
|
|
||||||
func NewIEEE() hash.Hash32 { return New(IEEETable) }
|
|
||||||
|
|
||||||
func (d *digest) Size() int { return Size }
|
|
||||||
|
|
||||||
func (d *digest) BlockSize() int { return 1 }
|
|
||||||
|
|
||||||
func (d *digest) Reset() { d.crc = 0 }
|
|
||||||
|
|
||||||
// Update returns the result of adding the bytes in p to the crc.
|
|
||||||
func Update(crc uint32, tab *Table, p []byte) uint32 {
|
|
||||||
switch tab {
|
|
||||||
case castagnoliTable:
|
|
||||||
return updateCastagnoli(crc, p)
|
|
||||||
case IEEETable:
|
|
||||||
// Unfortunately, because IEEETable is exported, IEEE may be used without a
|
|
||||||
// call to MakeTable. We have to make sure it gets initialized in that case.
|
|
||||||
ieeeOnce.Do(ieeeInit)
|
|
||||||
return updateIEEE(crc, p)
|
|
||||||
default:
|
|
||||||
return simpleUpdate(crc, tab, p)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *digest) Write(p []byte) (n int, err error) {
|
|
||||||
switch d.tab {
|
|
||||||
case castagnoliTable:
|
|
||||||
d.crc = updateCastagnoli(d.crc, p)
|
|
||||||
case IEEETable:
|
|
||||||
// We only create digest objects through New() which takes care of
|
|
||||||
// initialization in this case.
|
|
||||||
d.crc = updateIEEE(d.crc, p)
|
|
||||||
default:
|
|
||||||
d.crc = simpleUpdate(d.crc, d.tab, p)
|
|
||||||
}
|
|
||||||
return len(p), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *digest) Sum32() uint32 { return d.crc }
|
|
||||||
|
|
||||||
func (d *digest) Sum(in []byte) []byte {
|
|
||||||
s := d.Sum32()
|
|
||||||
return append(in, byte(s>>24), byte(s>>16), byte(s>>8), byte(s))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Checksum returns the CRC-32 checksum of data
|
|
||||||
// using the polynomial represented by the Table.
|
|
||||||
func Checksum(data []byte, tab *Table) uint32 { return Update(0, tab, data) }
|
|
||||||
|
|
||||||
// ChecksumIEEE returns the CRC-32 checksum of data
|
|
||||||
// using the IEEE polynomial.
|
|
||||||
func ChecksumIEEE(data []byte) uint32 {
|
|
||||||
ieeeOnce.Do(ieeeInit)
|
|
||||||
return updateIEEE(0, data)
|
|
||||||
}
|
|
||||||
230
vendor/github.com/klauspost/crc32/crc32_amd64.go
generated
vendored
230
vendor/github.com/klauspost/crc32/crc32_amd64.go
generated
vendored
@@ -1,230 +0,0 @@
|
|||||||
// Copyright 2011 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// +build !appengine,!gccgo
|
|
||||||
|
|
||||||
// AMD64-specific hardware-assisted CRC32 algorithms. See crc32.go for a
|
|
||||||
// description of the interface that each architecture-specific file
|
|
||||||
// implements.
|
|
||||||
|
|
||||||
package crc32
|
|
||||||
|
|
||||||
import "unsafe"
|
|
||||||
|
|
||||||
// This file contains the code to call the SSE 4.2 version of the Castagnoli
|
|
||||||
// and IEEE CRC.
|
|
||||||
|
|
||||||
// haveSSE41/haveSSE42/haveCLMUL are defined in crc_amd64.s and use
|
|
||||||
// CPUID to test for SSE 4.1, 4.2 and CLMUL support.
|
|
||||||
func haveSSE41() bool
|
|
||||||
func haveSSE42() bool
|
|
||||||
func haveCLMUL() bool
|
|
||||||
|
|
||||||
// castagnoliSSE42 is defined in crc32_amd64.s and uses the SSE4.2 CRC32
|
|
||||||
// instruction.
|
|
||||||
//go:noescape
|
|
||||||
func castagnoliSSE42(crc uint32, p []byte) uint32
|
|
||||||
|
|
||||||
// castagnoliSSE42Triple is defined in crc32_amd64.s and uses the SSE4.2 CRC32
|
|
||||||
// instruction.
|
|
||||||
//go:noescape
|
|
||||||
func castagnoliSSE42Triple(
|
|
||||||
crcA, crcB, crcC uint32,
|
|
||||||
a, b, c []byte,
|
|
||||||
rounds uint32,
|
|
||||||
) (retA uint32, retB uint32, retC uint32)
|
|
||||||
|
|
||||||
// ieeeCLMUL is defined in crc_amd64.s and uses the PCLMULQDQ
|
|
||||||
// instruction as well as SSE 4.1.
|
|
||||||
//go:noescape
|
|
||||||
func ieeeCLMUL(crc uint32, p []byte) uint32
|
|
||||||
|
|
||||||
var sse42 = haveSSE42()
|
|
||||||
var useFastIEEE = haveCLMUL() && haveSSE41()
|
|
||||||
|
|
||||||
const castagnoliK1 = 168
|
|
||||||
const castagnoliK2 = 1344
|
|
||||||
|
|
||||||
type sse42Table [4]Table
|
|
||||||
|
|
||||||
var castagnoliSSE42TableK1 *sse42Table
|
|
||||||
var castagnoliSSE42TableK2 *sse42Table
|
|
||||||
|
|
||||||
func archAvailableCastagnoli() bool {
|
|
||||||
return sse42
|
|
||||||
}
|
|
||||||
|
|
||||||
func archInitCastagnoli() {
|
|
||||||
if !sse42 {
|
|
||||||
panic("arch-specific Castagnoli not available")
|
|
||||||
}
|
|
||||||
castagnoliSSE42TableK1 = new(sse42Table)
|
|
||||||
castagnoliSSE42TableK2 = new(sse42Table)
|
|
||||||
// See description in updateCastagnoli.
|
|
||||||
// t[0][i] = CRC(i000, O)
|
|
||||||
// t[1][i] = CRC(0i00, O)
|
|
||||||
// t[2][i] = CRC(00i0, O)
|
|
||||||
// t[3][i] = CRC(000i, O)
|
|
||||||
// where O is a sequence of K zeros.
|
|
||||||
var tmp [castagnoliK2]byte
|
|
||||||
for b := 0; b < 4; b++ {
|
|
||||||
for i := 0; i < 256; i++ {
|
|
||||||
val := uint32(i) << uint32(b*8)
|
|
||||||
castagnoliSSE42TableK1[b][i] = castagnoliSSE42(val, tmp[:castagnoliK1])
|
|
||||||
castagnoliSSE42TableK2[b][i] = castagnoliSSE42(val, tmp[:])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// castagnoliShift computes the CRC32-C of K1 or K2 zeroes (depending on the
|
|
||||||
// table given) with the given initial crc value. This corresponds to
|
|
||||||
// CRC(crc, O) in the description in updateCastagnoli.
|
|
||||||
func castagnoliShift(table *sse42Table, crc uint32) uint32 {
|
|
||||||
return table[3][crc>>24] ^
|
|
||||||
table[2][(crc>>16)&0xFF] ^
|
|
||||||
table[1][(crc>>8)&0xFF] ^
|
|
||||||
table[0][crc&0xFF]
|
|
||||||
}
|
|
||||||
|
|
||||||
func archUpdateCastagnoli(crc uint32, p []byte) uint32 {
|
|
||||||
if !sse42 {
|
|
||||||
panic("not available")
|
|
||||||
}
|
|
||||||
|
|
||||||
// This method is inspired from the algorithm in Intel's white paper:
|
|
||||||
// "Fast CRC Computation for iSCSI Polynomial Using CRC32 Instruction"
|
|
||||||
// The same strategy of splitting the buffer in three is used but the
|
|
||||||
// combining calculation is different; the complete derivation is explained
|
|
||||||
// below.
|
|
||||||
//
|
|
||||||
// -- The basic idea --
|
|
||||||
//
|
|
||||||
// The CRC32 instruction (available in SSE4.2) can process 8 bytes at a
|
|
||||||
// time. In recent Intel architectures the instruction takes 3 cycles;
|
|
||||||
// however the processor can pipeline up to three instructions if they
|
|
||||||
// don't depend on each other.
|
|
||||||
//
|
|
||||||
// Roughly this means that we can process three buffers in about the same
|
|
||||||
// time we can process one buffer.
|
|
||||||
//
|
|
||||||
// The idea is then to split the buffer in three, CRC the three pieces
|
|
||||||
// separately and then combine the results.
|
|
||||||
//
|
|
||||||
// Combining the results requires precomputed tables, so we must choose a
|
|
||||||
// fixed buffer length to optimize. The longer the length, the faster; but
|
|
||||||
// only buffers longer than this length will use the optimization. We choose
|
|
||||||
// two cutoffs and compute tables for both:
|
|
||||||
// - one around 512: 168*3=504
|
|
||||||
// - one around 4KB: 1344*3=4032
|
|
||||||
//
|
|
||||||
// -- The nitty gritty --
|
|
||||||
//
|
|
||||||
// Let CRC(I, X) be the non-inverted CRC32-C of the sequence X (with
|
|
||||||
// initial non-inverted CRC I). This function has the following properties:
|
|
||||||
// (a) CRC(I, AB) = CRC(CRC(I, A), B)
|
|
||||||
// (b) CRC(I, A xor B) = CRC(I, A) xor CRC(0, B)
|
|
||||||
//
|
|
||||||
// Say we want to compute CRC(I, ABC) where A, B, C are three sequences of
|
|
||||||
// K bytes each, where K is a fixed constant. Let O be the sequence of K zero
|
|
||||||
// bytes.
|
|
||||||
//
|
|
||||||
// CRC(I, ABC) = CRC(I, ABO xor C)
|
|
||||||
// = CRC(I, ABO) xor CRC(0, C)
|
|
||||||
// = CRC(CRC(I, AB), O) xor CRC(0, C)
|
|
||||||
// = CRC(CRC(I, AO xor B), O) xor CRC(0, C)
|
|
||||||
// = CRC(CRC(I, AO) xor CRC(0, B), O) xor CRC(0, C)
|
|
||||||
// = CRC(CRC(CRC(I, A), O) xor CRC(0, B), O) xor CRC(0, C)
|
|
||||||
//
|
|
||||||
// The castagnoliSSE42Triple function can compute CRC(I, A), CRC(0, B),
|
|
||||||
// and CRC(0, C) efficiently. We just need to find a way to quickly compute
|
|
||||||
// CRC(uvwx, O) given a 4-byte initial value uvwx. We can precompute these
|
|
||||||
// values; since we can't have a 32-bit table, we break it up into four
|
|
||||||
// 8-bit tables:
|
|
||||||
//
|
|
||||||
// CRC(uvwx, O) = CRC(u000, O) xor
|
|
||||||
// CRC(0v00, O) xor
|
|
||||||
// CRC(00w0, O) xor
|
|
||||||
// CRC(000x, O)
|
|
||||||
//
|
|
||||||
// We can compute tables corresponding to the four terms for all 8-bit
|
|
||||||
// values.
|
|
||||||
|
|
||||||
crc = ^crc
|
|
||||||
|
|
||||||
// If a buffer is long enough to use the optimization, process the first few
|
|
||||||
// bytes to align the buffer to an 8 byte boundary (if necessary).
|
|
||||||
if len(p) >= castagnoliK1*3 {
|
|
||||||
delta := int(uintptr(unsafe.Pointer(&p[0])) & 7)
|
|
||||||
if delta != 0 {
|
|
||||||
delta = 8 - delta
|
|
||||||
crc = castagnoliSSE42(crc, p[:delta])
|
|
||||||
p = p[delta:]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Process 3*K2 at a time.
|
|
||||||
for len(p) >= castagnoliK2*3 {
|
|
||||||
// Compute CRC(I, A), CRC(0, B), and CRC(0, C).
|
|
||||||
crcA, crcB, crcC := castagnoliSSE42Triple(
|
|
||||||
crc, 0, 0,
|
|
||||||
p, p[castagnoliK2:], p[castagnoliK2*2:],
|
|
||||||
castagnoliK2/24)
|
|
||||||
|
|
||||||
// CRC(I, AB) = CRC(CRC(I, A), O) xor CRC(0, B)
|
|
||||||
crcAB := castagnoliShift(castagnoliSSE42TableK2, crcA) ^ crcB
|
|
||||||
// CRC(I, ABC) = CRC(CRC(I, AB), O) xor CRC(0, C)
|
|
||||||
crc = castagnoliShift(castagnoliSSE42TableK2, crcAB) ^ crcC
|
|
||||||
p = p[castagnoliK2*3:]
|
|
||||||
}
|
|
||||||
|
|
||||||
// Process 3*K1 at a time.
|
|
||||||
for len(p) >= castagnoliK1*3 {
|
|
||||||
// Compute CRC(I, A), CRC(0, B), and CRC(0, C).
|
|
||||||
crcA, crcB, crcC := castagnoliSSE42Triple(
|
|
||||||
crc, 0, 0,
|
|
||||||
p, p[castagnoliK1:], p[castagnoliK1*2:],
|
|
||||||
castagnoliK1/24)
|
|
||||||
|
|
||||||
// CRC(I, AB) = CRC(CRC(I, A), O) xor CRC(0, B)
|
|
||||||
crcAB := castagnoliShift(castagnoliSSE42TableK1, crcA) ^ crcB
|
|
||||||
// CRC(I, ABC) = CRC(CRC(I, AB), O) xor CRC(0, C)
|
|
||||||
crc = castagnoliShift(castagnoliSSE42TableK1, crcAB) ^ crcC
|
|
||||||
p = p[castagnoliK1*3:]
|
|
||||||
}
|
|
||||||
|
|
||||||
// Use the simple implementation for what's left.
|
|
||||||
crc = castagnoliSSE42(crc, p)
|
|
||||||
return ^crc
|
|
||||||
}
|
|
||||||
|
|
||||||
func archAvailableIEEE() bool {
|
|
||||||
return useFastIEEE
|
|
||||||
}
|
|
||||||
|
|
||||||
var archIeeeTable8 *slicing8Table
|
|
||||||
|
|
||||||
func archInitIEEE() {
|
|
||||||
if !useFastIEEE {
|
|
||||||
panic("not available")
|
|
||||||
}
|
|
||||||
// We still use slicing-by-8 for small buffers.
|
|
||||||
archIeeeTable8 = slicingMakeTable(IEEE)
|
|
||||||
}
|
|
||||||
|
|
||||||
func archUpdateIEEE(crc uint32, p []byte) uint32 {
|
|
||||||
if !useFastIEEE {
|
|
||||||
panic("not available")
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(p) >= 64 {
|
|
||||||
left := len(p) & 15
|
|
||||||
do := len(p) - left
|
|
||||||
crc = ^ieeeCLMUL(^crc, p[:do])
|
|
||||||
p = p[do:]
|
|
||||||
}
|
|
||||||
if len(p) == 0 {
|
|
||||||
return crc
|
|
||||||
}
|
|
||||||
return slicingUpdate(crc, archIeeeTable8, p)
|
|
||||||
}
|
|
||||||
319
vendor/github.com/klauspost/crc32/crc32_amd64.s
generated
vendored
319
vendor/github.com/klauspost/crc32/crc32_amd64.s
generated
vendored
@@ -1,319 +0,0 @@
|
|||||||
// Copyright 2011 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// +build gc
|
|
||||||
|
|
||||||
#define NOSPLIT 4
|
|
||||||
#define RODATA 8
|
|
||||||
|
|
||||||
// castagnoliSSE42 updates the (non-inverted) crc with the given buffer.
|
|
||||||
//
|
|
||||||
// func castagnoliSSE42(crc uint32, p []byte) uint32
|
|
||||||
TEXT ·castagnoliSSE42(SB), NOSPLIT, $0
|
|
||||||
MOVL crc+0(FP), AX // CRC value
|
|
||||||
MOVQ p+8(FP), SI // data pointer
|
|
||||||
MOVQ p_len+16(FP), CX // len(p)
|
|
||||||
|
|
||||||
// If there are fewer than 8 bytes to process, skip alignment.
|
|
||||||
CMPQ CX, $8
|
|
||||||
JL less_than_8
|
|
||||||
|
|
||||||
MOVQ SI, BX
|
|
||||||
ANDQ $7, BX
|
|
||||||
JZ aligned
|
|
||||||
|
|
||||||
// Process the first few bytes to 8-byte align the input.
|
|
||||||
|
|
||||||
// BX = 8 - BX. We need to process this many bytes to align.
|
|
||||||
SUBQ $1, BX
|
|
||||||
XORQ $7, BX
|
|
||||||
|
|
||||||
BTQ $0, BX
|
|
||||||
JNC align_2
|
|
||||||
|
|
||||||
CRC32B (SI), AX
|
|
||||||
DECQ CX
|
|
||||||
INCQ SI
|
|
||||||
|
|
||||||
align_2:
|
|
||||||
BTQ $1, BX
|
|
||||||
JNC align_4
|
|
||||||
|
|
||||||
// CRC32W (SI), AX
|
|
||||||
BYTE $0x66; BYTE $0xf2; BYTE $0x0f; BYTE $0x38; BYTE $0xf1; BYTE $0x06
|
|
||||||
|
|
||||||
SUBQ $2, CX
|
|
||||||
ADDQ $2, SI
|
|
||||||
|
|
||||||
align_4:
|
|
||||||
BTQ $2, BX
|
|
||||||
JNC aligned
|
|
||||||
|
|
||||||
// CRC32L (SI), AX
|
|
||||||
BYTE $0xf2; BYTE $0x0f; BYTE $0x38; BYTE $0xf1; BYTE $0x06
|
|
||||||
|
|
||||||
SUBQ $4, CX
|
|
||||||
ADDQ $4, SI
|
|
||||||
|
|
||||||
aligned:
|
|
||||||
// The input is now 8-byte aligned and we can process 8-byte chunks.
|
|
||||||
CMPQ CX, $8
|
|
||||||
JL less_than_8
|
|
||||||
|
|
||||||
CRC32Q (SI), AX
|
|
||||||
ADDQ $8, SI
|
|
||||||
SUBQ $8, CX
|
|
||||||
JMP aligned
|
|
||||||
|
|
||||||
less_than_8:
|
|
||||||
// We may have some bytes left over; process 4 bytes, then 2, then 1.
|
|
||||||
BTQ $2, CX
|
|
||||||
JNC less_than_4
|
|
||||||
|
|
||||||
// CRC32L (SI), AX
|
|
||||||
BYTE $0xf2; BYTE $0x0f; BYTE $0x38; BYTE $0xf1; BYTE $0x06
|
|
||||||
ADDQ $4, SI
|
|
||||||
|
|
||||||
less_than_4:
|
|
||||||
BTQ $1, CX
|
|
||||||
JNC less_than_2
|
|
||||||
|
|
||||||
// CRC32W (SI), AX
|
|
||||||
BYTE $0x66; BYTE $0xf2; BYTE $0x0f; BYTE $0x38; BYTE $0xf1; BYTE $0x06
|
|
||||||
ADDQ $2, SI
|
|
||||||
|
|
||||||
less_than_2:
|
|
||||||
BTQ $0, CX
|
|
||||||
JNC done
|
|
||||||
|
|
||||||
CRC32B (SI), AX
|
|
||||||
|
|
||||||
done:
|
|
||||||
MOVL AX, ret+32(FP)
|
|
||||||
RET
|
|
||||||
|
|
||||||
// castagnoliSSE42Triple updates three (non-inverted) crcs with (24*rounds)
|
|
||||||
// bytes from each buffer.
|
|
||||||
//
|
|
||||||
// func castagnoliSSE42Triple(
|
|
||||||
// crc1, crc2, crc3 uint32,
|
|
||||||
// a, b, c []byte,
|
|
||||||
// rounds uint32,
|
|
||||||
// ) (retA uint32, retB uint32, retC uint32)
|
|
||||||
TEXT ·castagnoliSSE42Triple(SB), NOSPLIT, $0
|
|
||||||
MOVL crcA+0(FP), AX
|
|
||||||
MOVL crcB+4(FP), CX
|
|
||||||
MOVL crcC+8(FP), DX
|
|
||||||
|
|
||||||
MOVQ a+16(FP), R8 // data pointer
|
|
||||||
MOVQ b+40(FP), R9 // data pointer
|
|
||||||
MOVQ c+64(FP), R10 // data pointer
|
|
||||||
|
|
||||||
MOVL rounds+88(FP), R11
|
|
||||||
|
|
||||||
loop:
|
|
||||||
CRC32Q (R8), AX
|
|
||||||
CRC32Q (R9), CX
|
|
||||||
CRC32Q (R10), DX
|
|
||||||
|
|
||||||
CRC32Q 8(R8), AX
|
|
||||||
CRC32Q 8(R9), CX
|
|
||||||
CRC32Q 8(R10), DX
|
|
||||||
|
|
||||||
CRC32Q 16(R8), AX
|
|
||||||
CRC32Q 16(R9), CX
|
|
||||||
CRC32Q 16(R10), DX
|
|
||||||
|
|
||||||
ADDQ $24, R8
|
|
||||||
ADDQ $24, R9
|
|
||||||
ADDQ $24, R10
|
|
||||||
|
|
||||||
DECQ R11
|
|
||||||
JNZ loop
|
|
||||||
|
|
||||||
MOVL AX, retA+96(FP)
|
|
||||||
MOVL CX, retB+100(FP)
|
|
||||||
MOVL DX, retC+104(FP)
|
|
||||||
RET
|
|
||||||
|
|
||||||
// func haveSSE42() bool
|
|
||||||
TEXT ·haveSSE42(SB), NOSPLIT, $0
|
|
||||||
XORQ AX, AX
|
|
||||||
INCL AX
|
|
||||||
CPUID
|
|
||||||
SHRQ $20, CX
|
|
||||||
ANDQ $1, CX
|
|
||||||
MOVB CX, ret+0(FP)
|
|
||||||
RET
|
|
||||||
|
|
||||||
// func haveCLMUL() bool
|
|
||||||
TEXT ·haveCLMUL(SB), NOSPLIT, $0
|
|
||||||
XORQ AX, AX
|
|
||||||
INCL AX
|
|
||||||
CPUID
|
|
||||||
SHRQ $1, CX
|
|
||||||
ANDQ $1, CX
|
|
||||||
MOVB CX, ret+0(FP)
|
|
||||||
RET
|
|
||||||
|
|
||||||
// func haveSSE41() bool
|
|
||||||
TEXT ·haveSSE41(SB), NOSPLIT, $0
|
|
||||||
XORQ AX, AX
|
|
||||||
INCL AX
|
|
||||||
CPUID
|
|
||||||
SHRQ $19, CX
|
|
||||||
ANDQ $1, CX
|
|
||||||
MOVB CX, ret+0(FP)
|
|
||||||
RET
|
|
||||||
|
|
||||||
// CRC32 polynomial data
|
|
||||||
//
|
|
||||||
// These constants are lifted from the
|
|
||||||
// Linux kernel, since they avoid the costly
|
|
||||||
// PSHUFB 16 byte reversal proposed in the
|
|
||||||
// original Intel paper.
|
|
||||||
DATA r2r1kp<>+0(SB)/8, $0x154442bd4
|
|
||||||
DATA r2r1kp<>+8(SB)/8, $0x1c6e41596
|
|
||||||
DATA r4r3kp<>+0(SB)/8, $0x1751997d0
|
|
||||||
DATA r4r3kp<>+8(SB)/8, $0x0ccaa009e
|
|
||||||
DATA rupolykp<>+0(SB)/8, $0x1db710641
|
|
||||||
DATA rupolykp<>+8(SB)/8, $0x1f7011641
|
|
||||||
DATA r5kp<>+0(SB)/8, $0x163cd6124
|
|
||||||
|
|
||||||
GLOBL r2r1kp<>(SB), RODATA, $16
|
|
||||||
GLOBL r4r3kp<>(SB), RODATA, $16
|
|
||||||
GLOBL rupolykp<>(SB), RODATA, $16
|
|
||||||
GLOBL r5kp<>(SB), RODATA, $8
|
|
||||||
|
|
||||||
// Based on http://www.intel.com/content/dam/www/public/us/en/documents/white-papers/fast-crc-computation-generic-polynomials-pclmulqdq-paper.pdf
|
|
||||||
// len(p) must be at least 64, and must be a multiple of 16.
|
|
||||||
|
|
||||||
// func ieeeCLMUL(crc uint32, p []byte) uint32
|
|
||||||
TEXT ·ieeeCLMUL(SB), NOSPLIT, $0
|
|
||||||
MOVL crc+0(FP), X0 // Initial CRC value
|
|
||||||
MOVQ p+8(FP), SI // data pointer
|
|
||||||
MOVQ p_len+16(FP), CX // len(p)
|
|
||||||
|
|
||||||
MOVOU (SI), X1
|
|
||||||
MOVOU 16(SI), X2
|
|
||||||
MOVOU 32(SI), X3
|
|
||||||
MOVOU 48(SI), X4
|
|
||||||
PXOR X0, X1
|
|
||||||
ADDQ $64, SI // buf+=64
|
|
||||||
SUBQ $64, CX // len-=64
|
|
||||||
CMPQ CX, $64 // Less than 64 bytes left
|
|
||||||
JB remain64
|
|
||||||
|
|
||||||
MOVOA r2r1kp<>+0(SB), X0
|
|
||||||
|
|
||||||
loopback64:
|
|
||||||
MOVOA X1, X5
|
|
||||||
MOVOA X2, X6
|
|
||||||
MOVOA X3, X7
|
|
||||||
MOVOA X4, X8
|
|
||||||
|
|
||||||
PCLMULQDQ $0, X0, X1
|
|
||||||
PCLMULQDQ $0, X0, X2
|
|
||||||
PCLMULQDQ $0, X0, X3
|
|
||||||
PCLMULQDQ $0, X0, X4
|
|
||||||
|
|
||||||
// Load next early
|
|
||||||
MOVOU (SI), X11
|
|
||||||
MOVOU 16(SI), X12
|
|
||||||
MOVOU 32(SI), X13
|
|
||||||
MOVOU 48(SI), X14
|
|
||||||
|
|
||||||
PCLMULQDQ $0x11, X0, X5
|
|
||||||
PCLMULQDQ $0x11, X0, X6
|
|
||||||
PCLMULQDQ $0x11, X0, X7
|
|
||||||
PCLMULQDQ $0x11, X0, X8
|
|
||||||
|
|
||||||
PXOR X5, X1
|
|
||||||
PXOR X6, X2
|
|
||||||
PXOR X7, X3
|
|
||||||
PXOR X8, X4
|
|
||||||
|
|
||||||
PXOR X11, X1
|
|
||||||
PXOR X12, X2
|
|
||||||
PXOR X13, X3
|
|
||||||
PXOR X14, X4
|
|
||||||
|
|
||||||
ADDQ $0x40, DI
|
|
||||||
ADDQ $64, SI // buf+=64
|
|
||||||
SUBQ $64, CX // len-=64
|
|
||||||
CMPQ CX, $64 // Less than 64 bytes left?
|
|
||||||
JGE loopback64
|
|
||||||
|
|
||||||
// Fold result into a single register (X1)
|
|
||||||
remain64:
|
|
||||||
MOVOA r4r3kp<>+0(SB), X0
|
|
||||||
|
|
||||||
MOVOA X1, X5
|
|
||||||
PCLMULQDQ $0, X0, X1
|
|
||||||
PCLMULQDQ $0x11, X0, X5
|
|
||||||
PXOR X5, X1
|
|
||||||
PXOR X2, X1
|
|
||||||
|
|
||||||
MOVOA X1, X5
|
|
||||||
PCLMULQDQ $0, X0, X1
|
|
||||||
PCLMULQDQ $0x11, X0, X5
|
|
||||||
PXOR X5, X1
|
|
||||||
PXOR X3, X1
|
|
||||||
|
|
||||||
MOVOA X1, X5
|
|
||||||
PCLMULQDQ $0, X0, X1
|
|
||||||
PCLMULQDQ $0x11, X0, X5
|
|
||||||
PXOR X5, X1
|
|
||||||
PXOR X4, X1
|
|
||||||
|
|
||||||
// If there is less than 16 bytes left we are done
|
|
||||||
CMPQ CX, $16
|
|
||||||
JB finish
|
|
||||||
|
|
||||||
// Encode 16 bytes
|
|
||||||
remain16:
|
|
||||||
MOVOU (SI), X10
|
|
||||||
MOVOA X1, X5
|
|
||||||
PCLMULQDQ $0, X0, X1
|
|
||||||
PCLMULQDQ $0x11, X0, X5
|
|
||||||
PXOR X5, X1
|
|
||||||
PXOR X10, X1
|
|
||||||
SUBQ $16, CX
|
|
||||||
ADDQ $16, SI
|
|
||||||
CMPQ CX, $16
|
|
||||||
JGE remain16
|
|
||||||
|
|
||||||
finish:
|
|
||||||
// Fold final result into 32 bits and return it
|
|
||||||
PCMPEQB X3, X3
|
|
||||||
PCLMULQDQ $1, X1, X0
|
|
||||||
PSRLDQ $8, X1
|
|
||||||
PXOR X0, X1
|
|
||||||
|
|
||||||
MOVOA X1, X2
|
|
||||||
MOVQ r5kp<>+0(SB), X0
|
|
||||||
|
|
||||||
// Creates 32 bit mask. Note that we don't care about upper half.
|
|
||||||
PSRLQ $32, X3
|
|
||||||
|
|
||||||
PSRLDQ $4, X2
|
|
||||||
PAND X3, X1
|
|
||||||
PCLMULQDQ $0, X0, X1
|
|
||||||
PXOR X2, X1
|
|
||||||
|
|
||||||
MOVOA rupolykp<>+0(SB), X0
|
|
||||||
|
|
||||||
MOVOA X1, X2
|
|
||||||
PAND X3, X1
|
|
||||||
PCLMULQDQ $0x10, X0, X1
|
|
||||||
PAND X3, X1
|
|
||||||
PCLMULQDQ $0, X0, X1
|
|
||||||
PXOR X2, X1
|
|
||||||
|
|
||||||
// PEXTRD $1, X1, AX (SSE 4.1)
|
|
||||||
BYTE $0x66; BYTE $0x0f; BYTE $0x3a
|
|
||||||
BYTE $0x16; BYTE $0xc8; BYTE $0x01
|
|
||||||
MOVL AX, ret+32(FP)
|
|
||||||
|
|
||||||
RET
|
|
||||||
43
vendor/github.com/klauspost/crc32/crc32_amd64p32.go
generated
vendored
43
vendor/github.com/klauspost/crc32/crc32_amd64p32.go
generated
vendored
@@ -1,43 +0,0 @@
|
|||||||
// Copyright 2011 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// +build !appengine,!gccgo
|
|
||||||
|
|
||||||
package crc32
|
|
||||||
|
|
||||||
// This file contains the code to call the SSE 4.2 version of the Castagnoli
|
|
||||||
// CRC.
|
|
||||||
|
|
||||||
// haveSSE42 is defined in crc32_amd64p32.s and uses CPUID to test for SSE 4.2
|
|
||||||
// support.
|
|
||||||
func haveSSE42() bool
|
|
||||||
|
|
||||||
// castagnoliSSE42 is defined in crc32_amd64p32.s and uses the SSE4.2 CRC32
|
|
||||||
// instruction.
|
|
||||||
//go:noescape
|
|
||||||
func castagnoliSSE42(crc uint32, p []byte) uint32
|
|
||||||
|
|
||||||
var sse42 = haveSSE42()
|
|
||||||
|
|
||||||
func archAvailableCastagnoli() bool {
|
|
||||||
return sse42
|
|
||||||
}
|
|
||||||
|
|
||||||
func archInitCastagnoli() {
|
|
||||||
if !sse42 {
|
|
||||||
panic("not available")
|
|
||||||
}
|
|
||||||
// No initialization necessary.
|
|
||||||
}
|
|
||||||
|
|
||||||
func archUpdateCastagnoli(crc uint32, p []byte) uint32 {
|
|
||||||
if !sse42 {
|
|
||||||
panic("not available")
|
|
||||||
}
|
|
||||||
return castagnoliSSE42(crc, p)
|
|
||||||
}
|
|
||||||
|
|
||||||
func archAvailableIEEE() bool { return false }
|
|
||||||
func archInitIEEE() { panic("not available") }
|
|
||||||
func archUpdateIEEE(crc uint32, p []byte) uint32 { panic("not available") }
|
|
||||||
67
vendor/github.com/klauspost/crc32/crc32_amd64p32.s
generated
vendored
67
vendor/github.com/klauspost/crc32/crc32_amd64p32.s
generated
vendored
@@ -1,67 +0,0 @@
|
|||||||
// Copyright 2011 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// +build gc
|
|
||||||
|
|
||||||
#define NOSPLIT 4
|
|
||||||
#define RODATA 8
|
|
||||||
|
|
||||||
// func castagnoliSSE42(crc uint32, p []byte) uint32
|
|
||||||
TEXT ·castagnoliSSE42(SB), NOSPLIT, $0
|
|
||||||
MOVL crc+0(FP), AX // CRC value
|
|
||||||
MOVL p+4(FP), SI // data pointer
|
|
||||||
MOVL p_len+8(FP), CX // len(p)
|
|
||||||
|
|
||||||
NOTL AX
|
|
||||||
|
|
||||||
// If there's less than 8 bytes to process, we do it byte-by-byte.
|
|
||||||
CMPQ CX, $8
|
|
||||||
JL cleanup
|
|
||||||
|
|
||||||
// Process individual bytes until the input is 8-byte aligned.
|
|
||||||
startup:
|
|
||||||
MOVQ SI, BX
|
|
||||||
ANDQ $7, BX
|
|
||||||
JZ aligned
|
|
||||||
|
|
||||||
CRC32B (SI), AX
|
|
||||||
DECQ CX
|
|
||||||
INCQ SI
|
|
||||||
JMP startup
|
|
||||||
|
|
||||||
aligned:
|
|
||||||
// The input is now 8-byte aligned and we can process 8-byte chunks.
|
|
||||||
CMPQ CX, $8
|
|
||||||
JL cleanup
|
|
||||||
|
|
||||||
CRC32Q (SI), AX
|
|
||||||
ADDQ $8, SI
|
|
||||||
SUBQ $8, CX
|
|
||||||
JMP aligned
|
|
||||||
|
|
||||||
cleanup:
|
|
||||||
// We may have some bytes left over that we process one at a time.
|
|
||||||
CMPQ CX, $0
|
|
||||||
JE done
|
|
||||||
|
|
||||||
CRC32B (SI), AX
|
|
||||||
INCQ SI
|
|
||||||
DECQ CX
|
|
||||||
JMP cleanup
|
|
||||||
|
|
||||||
done:
|
|
||||||
NOTL AX
|
|
||||||
MOVL AX, ret+16(FP)
|
|
||||||
RET
|
|
||||||
|
|
||||||
// func haveSSE42() bool
|
|
||||||
TEXT ·haveSSE42(SB), NOSPLIT, $0
|
|
||||||
XORQ AX, AX
|
|
||||||
INCL AX
|
|
||||||
CPUID
|
|
||||||
SHRQ $20, CX
|
|
||||||
ANDQ $1, CX
|
|
||||||
MOVB CX, ret+0(FP)
|
|
||||||
RET
|
|
||||||
|
|
||||||
89
vendor/github.com/klauspost/crc32/crc32_generic.go
generated
vendored
89
vendor/github.com/klauspost/crc32/crc32_generic.go
generated
vendored
@@ -1,89 +0,0 @@
|
|||||||
// Copyright 2011 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// This file contains CRC32 algorithms that are not specific to any architecture
|
|
||||||
// and don't use hardware acceleration.
|
|
||||||
//
|
|
||||||
// The simple (and slow) CRC32 implementation only uses a 256*4 bytes table.
|
|
||||||
//
|
|
||||||
// The slicing-by-8 algorithm is a faster implementation that uses a bigger
|
|
||||||
// table (8*256*4 bytes).
|
|
||||||
|
|
||||||
package crc32
|
|
||||||
|
|
||||||
// simpleMakeTable allocates and constructs a Table for the specified
|
|
||||||
// polynomial. The table is suitable for use with the simple algorithm
|
|
||||||
// (simpleUpdate).
|
|
||||||
func simpleMakeTable(poly uint32) *Table {
|
|
||||||
t := new(Table)
|
|
||||||
simplePopulateTable(poly, t)
|
|
||||||
return t
|
|
||||||
}
|
|
||||||
|
|
||||||
// simplePopulateTable constructs a Table for the specified polynomial, suitable
|
|
||||||
// for use with simpleUpdate.
|
|
||||||
func simplePopulateTable(poly uint32, t *Table) {
|
|
||||||
for i := 0; i < 256; i++ {
|
|
||||||
crc := uint32(i)
|
|
||||||
for j := 0; j < 8; j++ {
|
|
||||||
if crc&1 == 1 {
|
|
||||||
crc = (crc >> 1) ^ poly
|
|
||||||
} else {
|
|
||||||
crc >>= 1
|
|
||||||
}
|
|
||||||
}
|
|
||||||
t[i] = crc
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// simpleUpdate uses the simple algorithm to update the CRC, given a table that
|
|
||||||
// was previously computed using simpleMakeTable.
|
|
||||||
func simpleUpdate(crc uint32, tab *Table, p []byte) uint32 {
|
|
||||||
crc = ^crc
|
|
||||||
for _, v := range p {
|
|
||||||
crc = tab[byte(crc)^v] ^ (crc >> 8)
|
|
||||||
}
|
|
||||||
return ^crc
|
|
||||||
}
|
|
||||||
|
|
||||||
// Use slicing-by-8 when payload >= this value.
|
|
||||||
const slicing8Cutoff = 16
|
|
||||||
|
|
||||||
// slicing8Table is array of 8 Tables, used by the slicing-by-8 algorithm.
|
|
||||||
type slicing8Table [8]Table
|
|
||||||
|
|
||||||
// slicingMakeTable constructs a slicing8Table for the specified polynomial. The
|
|
||||||
// table is suitable for use with the slicing-by-8 algorithm (slicingUpdate).
|
|
||||||
func slicingMakeTable(poly uint32) *slicing8Table {
|
|
||||||
t := new(slicing8Table)
|
|
||||||
simplePopulateTable(poly, &t[0])
|
|
||||||
for i := 0; i < 256; i++ {
|
|
||||||
crc := t[0][i]
|
|
||||||
for j := 1; j < 8; j++ {
|
|
||||||
crc = t[0][crc&0xFF] ^ (crc >> 8)
|
|
||||||
t[j][i] = crc
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return t
|
|
||||||
}
|
|
||||||
|
|
||||||
// slicingUpdate uses the slicing-by-8 algorithm to update the CRC, given a
|
|
||||||
// table that was previously computed using slicingMakeTable.
|
|
||||||
func slicingUpdate(crc uint32, tab *slicing8Table, p []byte) uint32 {
|
|
||||||
if len(p) >= slicing8Cutoff {
|
|
||||||
crc = ^crc
|
|
||||||
for len(p) > 8 {
|
|
||||||
crc ^= uint32(p[0]) | uint32(p[1])<<8 | uint32(p[2])<<16 | uint32(p[3])<<24
|
|
||||||
crc = tab[0][p[7]] ^ tab[1][p[6]] ^ tab[2][p[5]] ^ tab[3][p[4]] ^
|
|
||||||
tab[4][crc>>24] ^ tab[5][(crc>>16)&0xFF] ^
|
|
||||||
tab[6][(crc>>8)&0xFF] ^ tab[7][crc&0xFF]
|
|
||||||
p = p[8:]
|
|
||||||
}
|
|
||||||
crc = ^crc
|
|
||||||
}
|
|
||||||
if len(p) == 0 {
|
|
||||||
return crc
|
|
||||||
}
|
|
||||||
return simpleUpdate(crc, &tab[0], p)
|
|
||||||
}
|
|
||||||
15
vendor/github.com/klauspost/crc32/crc32_otherarch.go
generated
vendored
15
vendor/github.com/klauspost/crc32/crc32_otherarch.go
generated
vendored
@@ -1,15 +0,0 @@
|
|||||||
// Copyright 2011 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// +build !amd64,!amd64p32,!s390x
|
|
||||||
|
|
||||||
package crc32
|
|
||||||
|
|
||||||
func archAvailableIEEE() bool { return false }
|
|
||||||
func archInitIEEE() { panic("not available") }
|
|
||||||
func archUpdateIEEE(crc uint32, p []byte) uint32 { panic("not available") }
|
|
||||||
|
|
||||||
func archAvailableCastagnoli() bool { return false }
|
|
||||||
func archInitCastagnoli() { panic("not available") }
|
|
||||||
func archUpdateCastagnoli(crc uint32, p []byte) uint32 { panic("not available") }
|
|
||||||
91
vendor/github.com/klauspost/crc32/crc32_s390x.go
generated
vendored
91
vendor/github.com/klauspost/crc32/crc32_s390x.go
generated
vendored
@@ -1,91 +0,0 @@
|
|||||||
// Copyright 2016 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// +build s390x
|
|
||||||
|
|
||||||
package crc32
|
|
||||||
|
|
||||||
const (
|
|
||||||
vxMinLen = 64
|
|
||||||
vxAlignMask = 15 // align to 16 bytes
|
|
||||||
)
|
|
||||||
|
|
||||||
// hasVectorFacility reports whether the machine has the z/Architecture
|
|
||||||
// vector facility installed and enabled.
|
|
||||||
func hasVectorFacility() bool
|
|
||||||
|
|
||||||
var hasVX = hasVectorFacility()
|
|
||||||
|
|
||||||
// vectorizedCastagnoli implements CRC32 using vector instructions.
|
|
||||||
// It is defined in crc32_s390x.s.
|
|
||||||
//go:noescape
|
|
||||||
func vectorizedCastagnoli(crc uint32, p []byte) uint32
|
|
||||||
|
|
||||||
// vectorizedIEEE implements CRC32 using vector instructions.
|
|
||||||
// It is defined in crc32_s390x.s.
|
|
||||||
//go:noescape
|
|
||||||
func vectorizedIEEE(crc uint32, p []byte) uint32
|
|
||||||
|
|
||||||
func archAvailableCastagnoli() bool {
|
|
||||||
return hasVX
|
|
||||||
}
|
|
||||||
|
|
||||||
var archCastagnoliTable8 *slicing8Table
|
|
||||||
|
|
||||||
func archInitCastagnoli() {
|
|
||||||
if !hasVX {
|
|
||||||
panic("not available")
|
|
||||||
}
|
|
||||||
// We still use slicing-by-8 for small buffers.
|
|
||||||
archCastagnoliTable8 = slicingMakeTable(Castagnoli)
|
|
||||||
}
|
|
||||||
|
|
||||||
// archUpdateCastagnoli calculates the checksum of p using
|
|
||||||
// vectorizedCastagnoli.
|
|
||||||
func archUpdateCastagnoli(crc uint32, p []byte) uint32 {
|
|
||||||
if !hasVX {
|
|
||||||
panic("not available")
|
|
||||||
}
|
|
||||||
// Use vectorized function if data length is above threshold.
|
|
||||||
if len(p) >= vxMinLen {
|
|
||||||
aligned := len(p) & ^vxAlignMask
|
|
||||||
crc = vectorizedCastagnoli(crc, p[:aligned])
|
|
||||||
p = p[aligned:]
|
|
||||||
}
|
|
||||||
if len(p) == 0 {
|
|
||||||
return crc
|
|
||||||
}
|
|
||||||
return slicingUpdate(crc, archCastagnoliTable8, p)
|
|
||||||
}
|
|
||||||
|
|
||||||
func archAvailableIEEE() bool {
|
|
||||||
return hasVX
|
|
||||||
}
|
|
||||||
|
|
||||||
var archIeeeTable8 *slicing8Table
|
|
||||||
|
|
||||||
func archInitIEEE() {
|
|
||||||
if !hasVX {
|
|
||||||
panic("not available")
|
|
||||||
}
|
|
||||||
// We still use slicing-by-8 for small buffers.
|
|
||||||
archIeeeTable8 = slicingMakeTable(IEEE)
|
|
||||||
}
|
|
||||||
|
|
||||||
// archUpdateIEEE calculates the checksum of p using vectorizedIEEE.
|
|
||||||
func archUpdateIEEE(crc uint32, p []byte) uint32 {
|
|
||||||
if !hasVX {
|
|
||||||
panic("not available")
|
|
||||||
}
|
|
||||||
// Use vectorized function if data length is above threshold.
|
|
||||||
if len(p) >= vxMinLen {
|
|
||||||
aligned := len(p) & ^vxAlignMask
|
|
||||||
crc = vectorizedIEEE(crc, p[:aligned])
|
|
||||||
p = p[aligned:]
|
|
||||||
}
|
|
||||||
if len(p) == 0 {
|
|
||||||
return crc
|
|
||||||
}
|
|
||||||
return slicingUpdate(crc, archIeeeTable8, p)
|
|
||||||
}
|
|
||||||
249
vendor/github.com/klauspost/crc32/crc32_s390x.s
generated
vendored
249
vendor/github.com/klauspost/crc32/crc32_s390x.s
generated
vendored
@@ -1,249 +0,0 @@
|
|||||||
// Copyright 2016 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// +build s390x
|
|
||||||
|
|
||||||
#include "textflag.h"
|
|
||||||
|
|
||||||
// Vector register range containing CRC-32 constants
|
|
||||||
|
|
||||||
#define CONST_PERM_LE2BE V9
|
|
||||||
#define CONST_R2R1 V10
|
|
||||||
#define CONST_R4R3 V11
|
|
||||||
#define CONST_R5 V12
|
|
||||||
#define CONST_RU_POLY V13
|
|
||||||
#define CONST_CRC_POLY V14
|
|
||||||
|
|
||||||
// The CRC-32 constant block contains reduction constants to fold and
|
|
||||||
// process particular chunks of the input data stream in parallel.
|
|
||||||
//
|
|
||||||
// Note that the constant definitions below are extended in order to compute
|
|
||||||
// intermediate results with a single VECTOR GALOIS FIELD MULTIPLY instruction.
|
|
||||||
// The rightmost doubleword can be 0 to prevent contribution to the result or
|
|
||||||
// can be multiplied by 1 to perform an XOR without the need for a separate
|
|
||||||
// VECTOR EXCLUSIVE OR instruction.
|
|
||||||
//
|
|
||||||
// The polynomials used are bit-reflected:
|
|
||||||
//
|
|
||||||
// IEEE: P'(x) = 0x0edb88320
|
|
||||||
// Castagnoli: P'(x) = 0x082f63b78
|
|
||||||
|
|
||||||
// IEEE polynomial constants
|
|
||||||
DATA ·crcleconskp+0(SB)/8, $0x0F0E0D0C0B0A0908 // LE-to-BE mask
|
|
||||||
DATA ·crcleconskp+8(SB)/8, $0x0706050403020100
|
|
||||||
DATA ·crcleconskp+16(SB)/8, $0x00000001c6e41596 // R2
|
|
||||||
DATA ·crcleconskp+24(SB)/8, $0x0000000154442bd4 // R1
|
|
||||||
DATA ·crcleconskp+32(SB)/8, $0x00000000ccaa009e // R4
|
|
||||||
DATA ·crcleconskp+40(SB)/8, $0x00000001751997d0 // R3
|
|
||||||
DATA ·crcleconskp+48(SB)/8, $0x0000000000000000
|
|
||||||
DATA ·crcleconskp+56(SB)/8, $0x0000000163cd6124 // R5
|
|
||||||
DATA ·crcleconskp+64(SB)/8, $0x0000000000000000
|
|
||||||
DATA ·crcleconskp+72(SB)/8, $0x00000001F7011641 // u'
|
|
||||||
DATA ·crcleconskp+80(SB)/8, $0x0000000000000000
|
|
||||||
DATA ·crcleconskp+88(SB)/8, $0x00000001DB710641 // P'(x) << 1
|
|
||||||
|
|
||||||
GLOBL ·crcleconskp(SB), RODATA, $144
|
|
||||||
|
|
||||||
// Castagonli Polynomial constants
|
|
||||||
DATA ·crccleconskp+0(SB)/8, $0x0F0E0D0C0B0A0908 // LE-to-BE mask
|
|
||||||
DATA ·crccleconskp+8(SB)/8, $0x0706050403020100
|
|
||||||
DATA ·crccleconskp+16(SB)/8, $0x000000009e4addf8 // R2
|
|
||||||
DATA ·crccleconskp+24(SB)/8, $0x00000000740eef02 // R1
|
|
||||||
DATA ·crccleconskp+32(SB)/8, $0x000000014cd00bd6 // R4
|
|
||||||
DATA ·crccleconskp+40(SB)/8, $0x00000000f20c0dfe // R3
|
|
||||||
DATA ·crccleconskp+48(SB)/8, $0x0000000000000000
|
|
||||||
DATA ·crccleconskp+56(SB)/8, $0x00000000dd45aab8 // R5
|
|
||||||
DATA ·crccleconskp+64(SB)/8, $0x0000000000000000
|
|
||||||
DATA ·crccleconskp+72(SB)/8, $0x00000000dea713f1 // u'
|
|
||||||
DATA ·crccleconskp+80(SB)/8, $0x0000000000000000
|
|
||||||
DATA ·crccleconskp+88(SB)/8, $0x0000000105ec76f0 // P'(x) << 1
|
|
||||||
|
|
||||||
GLOBL ·crccleconskp(SB), RODATA, $144
|
|
||||||
|
|
||||||
// func hasVectorFacility() bool
|
|
||||||
TEXT ·hasVectorFacility(SB), NOSPLIT, $24-1
|
|
||||||
MOVD $x-24(SP), R1
|
|
||||||
XC $24, 0(R1), 0(R1) // clear the storage
|
|
||||||
MOVD $2, R0 // R0 is the number of double words stored -1
|
|
||||||
WORD $0xB2B01000 // STFLE 0(R1)
|
|
||||||
XOR R0, R0 // reset the value of R0
|
|
||||||
MOVBZ z-8(SP), R1
|
|
||||||
AND $0x40, R1
|
|
||||||
BEQ novector
|
|
||||||
|
|
||||||
vectorinstalled:
|
|
||||||
// check if the vector instruction has been enabled
|
|
||||||
VLEIB $0, $0xF, V16
|
|
||||||
VLGVB $0, V16, R1
|
|
||||||
CMPBNE R1, $0xF, novector
|
|
||||||
MOVB $1, ret+0(FP) // have vx
|
|
||||||
RET
|
|
||||||
|
|
||||||
novector:
|
|
||||||
MOVB $0, ret+0(FP) // no vx
|
|
||||||
RET
|
|
||||||
|
|
||||||
// The CRC-32 function(s) use these calling conventions:
|
|
||||||
//
|
|
||||||
// Parameters:
|
|
||||||
//
|
|
||||||
// R2: Initial CRC value, typically ~0; and final CRC (return) value.
|
|
||||||
// R3: Input buffer pointer, performance might be improved if the
|
|
||||||
// buffer is on a doubleword boundary.
|
|
||||||
// R4: Length of the buffer, must be 64 bytes or greater.
|
|
||||||
//
|
|
||||||
// Register usage:
|
|
||||||
//
|
|
||||||
// R5: CRC-32 constant pool base pointer.
|
|
||||||
// V0: Initial CRC value and intermediate constants and results.
|
|
||||||
// V1..V4: Data for CRC computation.
|
|
||||||
// V5..V8: Next data chunks that are fetched from the input buffer.
|
|
||||||
//
|
|
||||||
// V9..V14: CRC-32 constants.
|
|
||||||
|
|
||||||
// func vectorizedIEEE(crc uint32, p []byte) uint32
|
|
||||||
TEXT ·vectorizedIEEE(SB), NOSPLIT, $0
|
|
||||||
MOVWZ crc+0(FP), R2 // R2 stores the CRC value
|
|
||||||
MOVD p+8(FP), R3 // data pointer
|
|
||||||
MOVD p_len+16(FP), R4 // len(p)
|
|
||||||
|
|
||||||
MOVD $·crcleconskp(SB), R5
|
|
||||||
BR vectorizedBody<>(SB)
|
|
||||||
|
|
||||||
// func vectorizedCastagnoli(crc uint32, p []byte) uint32
|
|
||||||
TEXT ·vectorizedCastagnoli(SB), NOSPLIT, $0
|
|
||||||
MOVWZ crc+0(FP), R2 // R2 stores the CRC value
|
|
||||||
MOVD p+8(FP), R3 // data pointer
|
|
||||||
MOVD p_len+16(FP), R4 // len(p)
|
|
||||||
|
|
||||||
// R5: crc-32 constant pool base pointer, constant is used to reduce crc
|
|
||||||
MOVD $·crccleconskp(SB), R5
|
|
||||||
BR vectorizedBody<>(SB)
|
|
||||||
|
|
||||||
TEXT vectorizedBody<>(SB), NOSPLIT, $0
|
|
||||||
XOR $0xffffffff, R2 // NOTW R2
|
|
||||||
VLM 0(R5), CONST_PERM_LE2BE, CONST_CRC_POLY
|
|
||||||
|
|
||||||
// Load the initial CRC value into the rightmost word of V0
|
|
||||||
VZERO V0
|
|
||||||
VLVGF $3, R2, V0
|
|
||||||
|
|
||||||
// Crash if the input size is less than 64-bytes.
|
|
||||||
CMP R4, $64
|
|
||||||
BLT crash
|
|
||||||
|
|
||||||
// Load a 64-byte data chunk and XOR with CRC
|
|
||||||
VLM 0(R3), V1, V4 // 64-bytes into V1..V4
|
|
||||||
|
|
||||||
// Reflect the data if the CRC operation is in the bit-reflected domain
|
|
||||||
VPERM V1, V1, CONST_PERM_LE2BE, V1
|
|
||||||
VPERM V2, V2, CONST_PERM_LE2BE, V2
|
|
||||||
VPERM V3, V3, CONST_PERM_LE2BE, V3
|
|
||||||
VPERM V4, V4, CONST_PERM_LE2BE, V4
|
|
||||||
|
|
||||||
VX V0, V1, V1 // V1 ^= CRC
|
|
||||||
ADD $64, R3 // BUF = BUF + 64
|
|
||||||
ADD $(-64), R4
|
|
||||||
|
|
||||||
// Check remaining buffer size and jump to proper folding method
|
|
||||||
CMP R4, $64
|
|
||||||
BLT less_than_64bytes
|
|
||||||
|
|
||||||
fold_64bytes_loop:
|
|
||||||
// Load the next 64-byte data chunk into V5 to V8
|
|
||||||
VLM 0(R3), V5, V8
|
|
||||||
VPERM V5, V5, CONST_PERM_LE2BE, V5
|
|
||||||
VPERM V6, V6, CONST_PERM_LE2BE, V6
|
|
||||||
VPERM V7, V7, CONST_PERM_LE2BE, V7
|
|
||||||
VPERM V8, V8, CONST_PERM_LE2BE, V8
|
|
||||||
|
|
||||||
// Perform a GF(2) multiplication of the doublewords in V1 with
|
|
||||||
// the reduction constants in V0. The intermediate result is
|
|
||||||
// then folded (accumulated) with the next data chunk in V5 and
|
|
||||||
// stored in V1. Repeat this step for the register contents
|
|
||||||
// in V2, V3, and V4 respectively.
|
|
||||||
|
|
||||||
VGFMAG CONST_R2R1, V1, V5, V1
|
|
||||||
VGFMAG CONST_R2R1, V2, V6, V2
|
|
||||||
VGFMAG CONST_R2R1, V3, V7, V3
|
|
||||||
VGFMAG CONST_R2R1, V4, V8, V4
|
|
||||||
|
|
||||||
// Adjust buffer pointer and length for next loop
|
|
||||||
ADD $64, R3 // BUF = BUF + 64
|
|
||||||
ADD $(-64), R4 // LEN = LEN - 64
|
|
||||||
|
|
||||||
CMP R4, $64
|
|
||||||
BGE fold_64bytes_loop
|
|
||||||
|
|
||||||
less_than_64bytes:
|
|
||||||
// Fold V1 to V4 into a single 128-bit value in V1
|
|
||||||
VGFMAG CONST_R4R3, V1, V2, V1
|
|
||||||
VGFMAG CONST_R4R3, V1, V3, V1
|
|
||||||
VGFMAG CONST_R4R3, V1, V4, V1
|
|
||||||
|
|
||||||
// Check whether to continue with 64-bit folding
|
|
||||||
CMP R4, $16
|
|
||||||
BLT final_fold
|
|
||||||
|
|
||||||
fold_16bytes_loop:
|
|
||||||
VL 0(R3), V2 // Load next data chunk
|
|
||||||
VPERM V2, V2, CONST_PERM_LE2BE, V2
|
|
||||||
|
|
||||||
VGFMAG CONST_R4R3, V1, V2, V1 // Fold next data chunk
|
|
||||||
|
|
||||||
// Adjust buffer pointer and size for folding next data chunk
|
|
||||||
ADD $16, R3
|
|
||||||
ADD $-16, R4
|
|
||||||
|
|
||||||
// Process remaining data chunks
|
|
||||||
CMP R4, $16
|
|
||||||
BGE fold_16bytes_loop
|
|
||||||
|
|
||||||
final_fold:
|
|
||||||
VLEIB $7, $0x40, V9
|
|
||||||
VSRLB V9, CONST_R4R3, V0
|
|
||||||
VLEIG $0, $1, V0
|
|
||||||
|
|
||||||
VGFMG V0, V1, V1
|
|
||||||
|
|
||||||
VLEIB $7, $0x20, V9 // Shift by words
|
|
||||||
VSRLB V9, V1, V2 // Store remaining bits in V2
|
|
||||||
VUPLLF V1, V1 // Split rightmost doubleword
|
|
||||||
VGFMAG CONST_R5, V1, V2, V1 // V1 = (V1 * R5) XOR V2
|
|
||||||
|
|
||||||
// The input values to the Barret reduction are the degree-63 polynomial
|
|
||||||
// in V1 (R(x)), degree-32 generator polynomial, and the reduction
|
|
||||||
// constant u. The Barret reduction result is the CRC value of R(x) mod
|
|
||||||
// P(x).
|
|
||||||
//
|
|
||||||
// The Barret reduction algorithm is defined as:
|
|
||||||
//
|
|
||||||
// 1. T1(x) = floor( R(x) / x^32 ) GF2MUL u
|
|
||||||
// 2. T2(x) = floor( T1(x) / x^32 ) GF2MUL P(x)
|
|
||||||
// 3. C(x) = R(x) XOR T2(x) mod x^32
|
|
||||||
//
|
|
||||||
// Note: To compensate the division by x^32, use the vector unpack
|
|
||||||
// instruction to move the leftmost word into the leftmost doubleword
|
|
||||||
// of the vector register. The rightmost doubleword is multiplied
|
|
||||||
// with zero to not contribute to the intermediate results.
|
|
||||||
|
|
||||||
// T1(x) = floor( R(x) / x^32 ) GF2MUL u
|
|
||||||
VUPLLF V1, V2
|
|
||||||
VGFMG CONST_RU_POLY, V2, V2
|
|
||||||
|
|
||||||
// Compute the GF(2) product of the CRC polynomial in VO with T1(x) in
|
|
||||||
// V2 and XOR the intermediate result, T2(x), with the value in V1.
|
|
||||||
// The final result is in the rightmost word of V2.
|
|
||||||
|
|
||||||
VUPLLF V2, V2
|
|
||||||
VGFMAG CONST_CRC_POLY, V2, V1, V2
|
|
||||||
|
|
||||||
done:
|
|
||||||
VLGVF $2, V2, R2
|
|
||||||
XOR $0xffffffff, R2 // NOTW R2
|
|
||||||
MOVWZ R2, ret + 32(FP)
|
|
||||||
RET
|
|
||||||
|
|
||||||
crash:
|
|
||||||
MOVD $0, (R0) // input size is less than 64-bytes
|
|
||||||
22
vendor/github.com/qiangxue/fasthttp-routing/LICENSE
generated
vendored
22
vendor/github.com/qiangxue/fasthttp-routing/LICENSE
generated
vendored
@@ -1,22 +0,0 @@
|
|||||||
The BSD 3-Clause License
|
|
||||||
|
|
||||||
Copyright (c) 2016, Qiang Xue
|
|
||||||
All rights reserved.
|
|
||||||
|
|
||||||
Redistribution and use in source and binary forms, with or without modification, are permitted provided
|
|
||||||
that the following conditions are met:
|
|
||||||
|
|
||||||
1. Redistributions of source code must retain the above copyright notice, this list of conditions
|
|
||||||
and the following disclaimer.
|
|
||||||
2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and
|
|
||||||
the following disclaimer in the documentation and/or other materials provided with the distribution.
|
|
||||||
3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or
|
|
||||||
promote products derived from this software without specific prior written permission.
|
|
||||||
|
|
||||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
|
|
||||||
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
|
||||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
||||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
|
||||||
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
|
|
||||||
WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
|
|
||||||
THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
||||||
244
vendor/github.com/qiangxue/fasthttp-routing/README.md
generated
vendored
244
vendor/github.com/qiangxue/fasthttp-routing/README.md
generated
vendored
@@ -1,244 +0,0 @@
|
|||||||
# fasthttp-routing
|
|
||||||
|
|
||||||
[](http://godoc.org/github.com/qiangxue/fasthttp-routing)
|
|
||||||
[](http://goreportcard.com/report/qiangxue/fasthttp-routing)
|
|
||||||
|
|
||||||
## Description
|
|
||||||
|
|
||||||
fasthttp-routing is a Go package that is adapted from [ozzo-routing](https://github.com/go-ozzo/ozzo-routing) to provide
|
|
||||||
fast and powerful routing features for the high-performance [fasthttp](https://github.com/valyala/fasthttp) server.
|
|
||||||
The package has the following features:
|
|
||||||
|
|
||||||
* middleware pipeline architecture, similar to that of the [Express framework](http://expressjs.com).
|
|
||||||
* extremely fast request routing with zero dynamic memory allocation
|
|
||||||
* modular code organization through route grouping
|
|
||||||
* flexible URL path matching, supporting URL parameters and regular expressions
|
|
||||||
* URL creation according to the predefined routes
|
|
||||||
|
|
||||||
## Requirements
|
|
||||||
|
|
||||||
Go 1.5 or above.
|
|
||||||
|
|
||||||
## Installation
|
|
||||||
|
|
||||||
Run the following command to install the package:
|
|
||||||
|
|
||||||
```
|
|
||||||
go get github.com/qiangxue/fasthttp-routing
|
|
||||||
```
|
|
||||||
|
|
||||||
## Getting Started
|
|
||||||
|
|
||||||
Create a `server.go` file with the following content:
|
|
||||||
|
|
||||||
```go
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
|
|
||||||
"github.com/qiangxue/fasthttp-routing"
|
|
||||||
"github.com/valyala/fasthttp"
|
|
||||||
)
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
router := routing.New()
|
|
||||||
|
|
||||||
router.Get("/", func(c *routing.Context) error {
|
|
||||||
fmt.Fprintf(c, "Hello, world!")
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
|
|
||||||
panic(fasthttp.ListenAndServe(":8080", router.HandleRequest))
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
Now run the following command to start the Web server:
|
|
||||||
|
|
||||||
```
|
|
||||||
go run server.go
|
|
||||||
```
|
|
||||||
|
|
||||||
You should be able to access URLs such as `http://localhost:8080`.
|
|
||||||
|
|
||||||
|
|
||||||
### Routes
|
|
||||||
|
|
||||||
ozzo-routing works by building a routing table in a router and then dispatching HTTP requests to the matching handlers
|
|
||||||
found in the routing table. An intuitive illustration of a routing table is as follows:
|
|
||||||
|
|
||||||
|
|
||||||
Routes | Handlers
|
|
||||||
--------------------|-----------------
|
|
||||||
`GET /users` | m1, m2, h1, ...
|
|
||||||
`POST /users` | m1, m2, h2, ...
|
|
||||||
`PUT /users/<id>` | m1, m2, h3, ...
|
|
||||||
`DELETE /users/<id>`| m1, m2, h4, ...
|
|
||||||
|
|
||||||
|
|
||||||
For an incoming request `GET /users`, the first route would match and the handlers m1, m2, and h1 would be executed.
|
|
||||||
If the request is `PUT /users/123`, the third route would match and the corresponding handlers would be executed.
|
|
||||||
Note that the token `<id>` can match any number of non-slash characters and the matching part can be accessed as
|
|
||||||
a path parameter value in the handlers.
|
|
||||||
|
|
||||||
**If an incoming request matches multiple routes in the table, the route added first to the table will take precedence.
|
|
||||||
All other matching routes will be ignored.**
|
|
||||||
|
|
||||||
The actual implementation of the routing table uses a variant of the radix tree data structure, which makes the routing
|
|
||||||
process as fast as working with a hash table, thanks to the inspiration from [httprouter](https://github.com/julienschmidt/httprouter).
|
|
||||||
|
|
||||||
To add a new route and its handlers to the routing table, call the `To` method like the following:
|
|
||||||
|
|
||||||
```go
|
|
||||||
router := routing.New()
|
|
||||||
router.To("GET", "/users", m1, m2, h1)
|
|
||||||
router.To("POST", "/users", m1, m2, h2)
|
|
||||||
```
|
|
||||||
|
|
||||||
You can also use shortcut methods, such as `Get`, `Post`, `Put`, etc., which are named after the HTTP method names:
|
|
||||||
|
|
||||||
```go
|
|
||||||
router.Get("/users", m1, m2, h1)
|
|
||||||
router.Post("/users", m1, m2, h2)
|
|
||||||
```
|
|
||||||
|
|
||||||
If you have multiple routes with the same URL path but different HTTP methods, like the above example, you can
|
|
||||||
chain them together as follows,
|
|
||||||
|
|
||||||
```go
|
|
||||||
router.Get("/users", m1, m2, h1).Post(m1, m2, h2)
|
|
||||||
```
|
|
||||||
|
|
||||||
If you want to use the same set of handlers to handle the same URL path but different HTTP methods, you can take
|
|
||||||
the following shortcut:
|
|
||||||
|
|
||||||
```go
|
|
||||||
router.To("GET,POST", "/users", m1, m2, h)
|
|
||||||
```
|
|
||||||
|
|
||||||
A route may contain parameter tokens which are in the format of `<name:pattern>`, where `name` stands for the parameter
|
|
||||||
name, and `pattern` is a regular expression which the parameter value should match. A token `<name>` is equivalent
|
|
||||||
to `<name:[^/]*>`, i.e., it matches any number of non-slash characters. At the end of a route, an asterisk character
|
|
||||||
can be used to match any number of arbitrary characters. Below are some examples:
|
|
||||||
|
|
||||||
* `/users/<username>`: matches `/users/admin`
|
|
||||||
* `/users/accnt-<id:\d+>`: matches `/users/accnt-123`, but not `/users/accnt-admin`
|
|
||||||
* `/users/<username>/*`: matches `/users/admin/profile/address`
|
|
||||||
|
|
||||||
When a URL path matches a route, the matching parameters on the URL path can be accessed via `Context.Param()`:
|
|
||||||
|
|
||||||
```go
|
|
||||||
router := routing.New()
|
|
||||||
|
|
||||||
router.Get("/users/<username>", func (c *routing.Context) error {
|
|
||||||
fmt.Fprintf(c, "Name: %v", c.Param("username"))
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
```
|
|
||||||
|
|
||||||
|
|
||||||
### Route Groups
|
|
||||||
|
|
||||||
Route group is a way of grouping together the routes which have the same route prefix. The routes in a group also
|
|
||||||
share the same handlers that are registered with the group via its `Use` method. For example,
|
|
||||||
|
|
||||||
```go
|
|
||||||
router := routing.New()
|
|
||||||
api := router.Group("/api")
|
|
||||||
api.Use(m1, m2)
|
|
||||||
api.Get("/users", h1).Post(h2)
|
|
||||||
api.Put("/users/<id>", h3).Delete(h4)
|
|
||||||
```
|
|
||||||
|
|
||||||
The above `/api` route group establishes the following routing table:
|
|
||||||
|
|
||||||
|
|
||||||
Routes | Handlers
|
|
||||||
------------------------|-------------
|
|
||||||
`GET /api/users` | m1, m2, h1, ...
|
|
||||||
`POST /api/users` | m1, m2, h2, ...
|
|
||||||
`PUT /api/users/<id>` | m1, m2, h3, ...
|
|
||||||
`DELETE /api/users/<id>`| m1, m2, h4, ...
|
|
||||||
|
|
||||||
|
|
||||||
As you can see, all these routes have the same route prefix `/api` and the handlers `m1` and `m2`. In other similar
|
|
||||||
routing frameworks, the handlers registered with a route group are also called *middlewares*.
|
|
||||||
|
|
||||||
Route groups can be nested. That is, a route group can create a child group by calling the `Group()` method. The router
|
|
||||||
serves as the top level route group. A child group inherits the handlers registered with its parent group. For example,
|
|
||||||
|
|
||||||
```go
|
|
||||||
router := routing.New()
|
|
||||||
router.Use(m1)
|
|
||||||
|
|
||||||
api := router.Group("/api")
|
|
||||||
api.Use(m2)
|
|
||||||
|
|
||||||
users := group.Group("/users")
|
|
||||||
users.Use(m3)
|
|
||||||
users.Put("/<id>", h1)
|
|
||||||
```
|
|
||||||
|
|
||||||
Because the router serves as the parent of the `api` group which is the parent of the `users` group,
|
|
||||||
the `PUT /api/users/<id>` route is associated with the handlers `m1`, `m2`, `m3`, and `h1`.
|
|
||||||
|
|
||||||
|
|
||||||
### Router
|
|
||||||
|
|
||||||
Router manages the routing table and dispatches incoming requests to appropriate handlers. A router instance is created
|
|
||||||
by calling the `routing.New()` method.
|
|
||||||
|
|
||||||
To hook up router with fasthttp, use the following code:
|
|
||||||
|
|
||||||
```go
|
|
||||||
router := routing.New()
|
|
||||||
fasthttp.ListenAndServe(":8080", router.HandleRequest)
|
|
||||||
```
|
|
||||||
|
|
||||||
|
|
||||||
### Handlers
|
|
||||||
|
|
||||||
A handler is a function with the signature `func(*routing.Context) error`. A handler is executed by the router if
|
|
||||||
the incoming request URL path matches the route that the handler is associated with. Through the `routing.Context`
|
|
||||||
parameter, you can access the request information in handlers.
|
|
||||||
|
|
||||||
A route may be associated with multiple handlers. These handlers will be executed in the order that they are registered
|
|
||||||
to the route. The execution sequence can be terminated in the middle using one of the following two methods:
|
|
||||||
|
|
||||||
* A handler returns an error: the router will skip the rest of the handlers and handle the returned error.
|
|
||||||
* A handler calls `Context.Abort()`: the router will simply skip the rest of the handlers. There is no error to be handled.
|
|
||||||
|
|
||||||
A handler can call `Context.Next()` to explicitly execute the rest of the unexecuted handlers and take actions after
|
|
||||||
they finish execution. For example, a response compression handler may start the output buffer, call `Context.Next()`,
|
|
||||||
and then compress and send the output to response.
|
|
||||||
|
|
||||||
|
|
||||||
### Context
|
|
||||||
|
|
||||||
For each incoming request, a `routing.Context` object is passed through the relevant handlers. Because `routing.Context`
|
|
||||||
embeds `fasthttp.RequestCtx`, you can access all properties and methods provided by the latter.
|
|
||||||
|
|
||||||
Additionally, the `Context.Param()` method allows handlers to access the URL path parameters that match the current route.
|
|
||||||
Using `Context.Get()` and `Context.Set()`, handlers can share data between each other. For example, an authentication
|
|
||||||
handler can store the authenticated user identity by calling `Context.Set()`, and other handlers can retrieve back
|
|
||||||
the identity information by calling `Context.Get()`.
|
|
||||||
|
|
||||||
Context also provides a handy `WriteData()` method that can be used to write data of arbitrary type to the response.
|
|
||||||
The `WriteData()` method can also be overridden (by replacement) to achieve more versatile response data writing.
|
|
||||||
|
|
||||||
|
|
||||||
### Error Handling
|
|
||||||
|
|
||||||
A handler may return an error indicating some erroneous condition. Sometimes, a handler or the code it calls may cause
|
|
||||||
a panic. Both should be handled properly to ensure best user experience. It is recommended that you use
|
|
||||||
the `fault.Recover` handler or a similar error handler to handle these errors.
|
|
||||||
|
|
||||||
If an error is not handled by any handler, the router will handle it by calling its `handleError()` method which
|
|
||||||
simply sets an appropriate HTTP status code and writes the error message to the response.
|
|
||||||
|
|
||||||
When an incoming request has no matching route, the router will call the handlers registered via the `Router.NotFound()`
|
|
||||||
method. All the handlers registered via `Router.Use()` will also be called in advance. By default, the following two
|
|
||||||
handlers are registered with `Router.NotFound()`:
|
|
||||||
|
|
||||||
* `routing.MethodNotAllowedHandler`: a handler that sends an `Allow` HTTP header indicating the allowed HTTP methods for a requested URL
|
|
||||||
* `routing.NotFoundHandler`: a handler triggering 404 HTTP error
|
|
||||||
126
vendor/github.com/qiangxue/fasthttp-routing/context.go
generated
vendored
126
vendor/github.com/qiangxue/fasthttp-routing/context.go
generated
vendored
@@ -1,126 +0,0 @@
|
|||||||
// Copyright 2016 Qiang Xue. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package routing
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
|
|
||||||
"github.com/valyala/fasthttp"
|
|
||||||
)
|
|
||||||
|
|
||||||
// SerializeFunc serializes the given data of arbitrary type into a byte array.
|
|
||||||
type SerializeFunc func(data interface{}) ([]byte, error)
|
|
||||||
|
|
||||||
// Context represents the contextual data and environment while processing an incoming HTTP request.
|
|
||||||
type Context struct {
|
|
||||||
*fasthttp.RequestCtx
|
|
||||||
|
|
||||||
Serialize SerializeFunc // the function serializing the given data of arbitrary type into a byte array.
|
|
||||||
|
|
||||||
router *Router
|
|
||||||
pnames []string // list of route parameter names
|
|
||||||
pvalues []string // list of parameter values corresponding to pnames
|
|
||||||
data map[string]interface{} // data items managed by Get and Set
|
|
||||||
index int // the index of the currently executing handler in handlers
|
|
||||||
handlers []Handler // the handlers associated with the current route
|
|
||||||
}
|
|
||||||
|
|
||||||
// Router returns the Router that is handling the incoming HTTP request.
|
|
||||||
func (c *Context) Router() *Router {
|
|
||||||
return c.router
|
|
||||||
}
|
|
||||||
|
|
||||||
// Param returns the named parameter value that is found in the URL path matching the current route.
|
|
||||||
// If the named parameter cannot be found, an empty string will be returned.
|
|
||||||
func (c *Context) Param(name string) string {
|
|
||||||
for i, n := range c.pnames {
|
|
||||||
if n == name {
|
|
||||||
return c.pvalues[i]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get returns the named data item previously registered with the context by calling Set.
|
|
||||||
// If the named data item cannot be found, nil will be returned.
|
|
||||||
func (c *Context) Get(name string) interface{} {
|
|
||||||
return c.data[name]
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set stores the named data item in the context so that it can be retrieved later.
|
|
||||||
func (c *Context) Set(name string, value interface{}) {
|
|
||||||
if c.data == nil {
|
|
||||||
c.data = make(map[string]interface{})
|
|
||||||
}
|
|
||||||
c.data[name] = value
|
|
||||||
}
|
|
||||||
|
|
||||||
// Next calls the rest of the handlers associated with the current route.
|
|
||||||
// If any of these handlers returns an error, Next will return the error and skip the following handlers.
|
|
||||||
// Next is normally used when a handler needs to do some postprocessing after the rest of the handlers
|
|
||||||
// are executed.
|
|
||||||
func (c *Context) Next() error {
|
|
||||||
c.index++
|
|
||||||
for n := len(c.handlers); c.index < n; c.index++ {
|
|
||||||
if err := c.handlers[c.index](c); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Abort skips the rest of the handlers associated with the current route.
|
|
||||||
// Abort is normally used when a handler handles the request normally and wants to skip the rest of the handlers.
|
|
||||||
// If a handler wants to indicate an error condition, it should simply return the error without calling Abort.
|
|
||||||
func (c *Context) Abort() {
|
|
||||||
c.index = len(c.handlers)
|
|
||||||
}
|
|
||||||
|
|
||||||
// URL creates a URL using the named route and the parameter values.
|
|
||||||
// The parameters should be given in the sequence of name1, value1, name2, value2, and so on.
|
|
||||||
// If a parameter in the route is not provided a value, the parameter token will remain in the resulting URL.
|
|
||||||
// Parameter values will be properly URL encoded.
|
|
||||||
// The method returns an empty string if the URL creation fails.
|
|
||||||
func (c *Context) URL(route string, pairs ...interface{}) string {
|
|
||||||
if r := c.router.routes[route]; r != nil {
|
|
||||||
return r.URL(pairs...)
|
|
||||||
}
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
// WriteData writes the given data of arbitrary type to the response.
|
|
||||||
// The method calls the Serialize() method to convert the data into a byte array and then writes
|
|
||||||
// the byte array to the response.
|
|
||||||
func (c *Context) WriteData(data interface{}) (err error) {
|
|
||||||
var bytes []byte
|
|
||||||
if bytes, err = c.Serialize(data); err == nil {
|
|
||||||
_, err = c.Write(bytes)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// init sets the request and response of the context and resets all other properties.
|
|
||||||
func (c *Context) init(ctx *fasthttp.RequestCtx) {
|
|
||||||
c.RequestCtx = ctx
|
|
||||||
c.data = nil
|
|
||||||
c.index = -1
|
|
||||||
c.Serialize = Serialize
|
|
||||||
}
|
|
||||||
|
|
||||||
// Serialize converts the given data into a byte array.
|
|
||||||
// If the data is neither a byte array nor a string, it will call fmt.Sprint to convert it into a string.
|
|
||||||
func Serialize(data interface{}) (bytes []byte, err error) {
|
|
||||||
switch data.(type) {
|
|
||||||
case []byte:
|
|
||||||
return data.([]byte), nil
|
|
||||||
case string:
|
|
||||||
return []byte(data.(string)), nil
|
|
||||||
default:
|
|
||||||
if data != nil {
|
|
||||||
return []byte(fmt.Sprint(data)), nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
40
vendor/github.com/qiangxue/fasthttp-routing/error.go
generated
vendored
40
vendor/github.com/qiangxue/fasthttp-routing/error.go
generated
vendored
@@ -1,40 +0,0 @@
|
|||||||
// Copyright 2016 Qiang Xue. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package routing
|
|
||||||
|
|
||||||
import "net/http"
|
|
||||||
|
|
||||||
// HTTPError represents an HTTP error with HTTP status code and error message
|
|
||||||
type HTTPError interface {
|
|
||||||
error
|
|
||||||
// StatusCode returns the HTTP status code of the error
|
|
||||||
StatusCode() int
|
|
||||||
}
|
|
||||||
|
|
||||||
// Error contains the error information reported by calling Context.Error().
|
|
||||||
type httpError struct {
|
|
||||||
Status int `json:"status" xml:"status"`
|
|
||||||
Message string `json:"message" xml:"message"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewHTTPError creates a new HttpError instance.
|
|
||||||
// If the error message is not given, http.StatusText() will be called
|
|
||||||
// to generate the message based on the status code.
|
|
||||||
func NewHTTPError(status int, message ...string) HTTPError {
|
|
||||||
if len(message) > 0 {
|
|
||||||
return &httpError{status, message[0]}
|
|
||||||
}
|
|
||||||
return &httpError{status, http.StatusText(status)}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Error returns the error message.
|
|
||||||
func (e *httpError) Error() string {
|
|
||||||
return e.Message
|
|
||||||
}
|
|
||||||
|
|
||||||
// StatusCode returns the HTTP status code.
|
|
||||||
func (e *httpError) StatusCode() int {
|
|
||||||
return e.Status
|
|
||||||
}
|
|
||||||
107
vendor/github.com/qiangxue/fasthttp-routing/group.go
generated
vendored
107
vendor/github.com/qiangxue/fasthttp-routing/group.go
generated
vendored
@@ -1,107 +0,0 @@
|
|||||||
// Copyright 2016 Qiang Xue. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package routing
|
|
||||||
|
|
||||||
import (
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
// RouteGroup represents a group of routes that share the same path prefix.
|
|
||||||
type RouteGroup struct {
|
|
||||||
prefix string
|
|
||||||
router *Router
|
|
||||||
handlers []Handler
|
|
||||||
}
|
|
||||||
|
|
||||||
// newRouteGroup creates a new RouteGroup with the given path prefix, router, and handlers.
|
|
||||||
func newRouteGroup(prefix string, router *Router, handlers []Handler) *RouteGroup {
|
|
||||||
return &RouteGroup{
|
|
||||||
prefix: prefix,
|
|
||||||
router: router,
|
|
||||||
handlers: handlers,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get adds a GET route to the router with the given route path and handlers.
|
|
||||||
func (r *RouteGroup) Get(path string, handlers ...Handler) *Route {
|
|
||||||
return newRoute(path, r).Get(handlers...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Post adds a POST route to the router with the given route path and handlers.
|
|
||||||
func (r *RouteGroup) Post(path string, handlers ...Handler) *Route {
|
|
||||||
return newRoute(path, r).Post(handlers...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Put adds a PUT route to the router with the given route path and handlers.
|
|
||||||
func (r *RouteGroup) Put(path string, handlers ...Handler) *Route {
|
|
||||||
return newRoute(path, r).Put(handlers...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Patch adds a PATCH route to the router with the given route path and handlers.
|
|
||||||
func (r *RouteGroup) Patch(path string, handlers ...Handler) *Route {
|
|
||||||
return newRoute(path, r).Patch(handlers...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Delete adds a DELETE route to the router with the given route path and handlers.
|
|
||||||
func (r *RouteGroup) Delete(path string, handlers ...Handler) *Route {
|
|
||||||
return newRoute(path, r).Delete(handlers...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Connect adds a CONNECT route to the router with the given route path and handlers.
|
|
||||||
func (r *RouteGroup) Connect(path string, handlers ...Handler) *Route {
|
|
||||||
return newRoute(path, r).Connect(handlers...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Head adds a HEAD route to the router with the given route path and handlers.
|
|
||||||
func (r *RouteGroup) Head(path string, handlers ...Handler) *Route {
|
|
||||||
return newRoute(path, r).Head(handlers...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Options adds an OPTIONS route to the router with the given route path and handlers.
|
|
||||||
func (r *RouteGroup) Options(path string, handlers ...Handler) *Route {
|
|
||||||
return newRoute(path, r).Options(handlers...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Trace adds a TRACE route to the router with the given route path and handlers.
|
|
||||||
func (r *RouteGroup) Trace(path string, handlers ...Handler) *Route {
|
|
||||||
return newRoute(path, r).Trace(handlers...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Any adds a route with the given route, handlers, and the HTTP methods as listed in routing.Methods.
|
|
||||||
func (r *RouteGroup) Any(path string, handlers ...Handler) *Route {
|
|
||||||
route := newRoute(path, r)
|
|
||||||
for _, method := range Methods {
|
|
||||||
route.add(method, handlers)
|
|
||||||
}
|
|
||||||
return route
|
|
||||||
}
|
|
||||||
|
|
||||||
// To adds a route to the router with the given HTTP methods, route path, and handlers.
|
|
||||||
// Multiple HTTP methods should be separated by commas (without any surrounding spaces).
|
|
||||||
func (r *RouteGroup) To(methods, path string, handlers ...Handler) *Route {
|
|
||||||
route := newRoute(path, r)
|
|
||||||
for _, method := range strings.Split(methods, ",") {
|
|
||||||
route.add(method, handlers)
|
|
||||||
}
|
|
||||||
return route
|
|
||||||
}
|
|
||||||
|
|
||||||
// Group creates a RouteGroup with the given route path prefix and handlers.
|
|
||||||
// The new group will combine the existing path prefix with the new one.
|
|
||||||
// If no handler is provided, the new group will inherit the handlers registered
|
|
||||||
// with the current group.
|
|
||||||
func (r *RouteGroup) Group(prefix string, handlers ...Handler) *RouteGroup {
|
|
||||||
if len(handlers) == 0 {
|
|
||||||
handlers = make([]Handler, len(r.handlers))
|
|
||||||
copy(handlers, r.handlers)
|
|
||||||
}
|
|
||||||
return newRouteGroup(r.prefix+prefix, r.router, handlers)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Use registers one or multiple handlers to the current route group.
|
|
||||||
// These handlers will be shared by all routes belong to this group and its subgroups.
|
|
||||||
func (r *RouteGroup) Use(handlers ...Handler) {
|
|
||||||
r.handlers = append(r.handlers, handlers...)
|
|
||||||
}
|
|
||||||
161
vendor/github.com/qiangxue/fasthttp-routing/route.go
generated
vendored
161
vendor/github.com/qiangxue/fasthttp-routing/route.go
generated
vendored
@@ -1,161 +0,0 @@
|
|||||||
// Copyright 2016 Qiang Xue. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package routing
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"net/url"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Route represents a URL path pattern that can be used to match requested URLs.
|
|
||||||
type Route struct {
|
|
||||||
group *RouteGroup
|
|
||||||
name, path string
|
|
||||||
template string
|
|
||||||
}
|
|
||||||
|
|
||||||
// newRoute creates a new Route with the given route path and route group.
|
|
||||||
func newRoute(path string, group *RouteGroup) *Route {
|
|
||||||
path = group.prefix + path
|
|
||||||
name := path
|
|
||||||
|
|
||||||
// an asterisk at the end matches any number of characters
|
|
||||||
if strings.HasSuffix(path, "*") {
|
|
||||||
path = path[:len(path)-1] + "<:.*>"
|
|
||||||
}
|
|
||||||
|
|
||||||
route := &Route{
|
|
||||||
group: group,
|
|
||||||
name: name,
|
|
||||||
path: path,
|
|
||||||
template: buildURLTemplate(path),
|
|
||||||
}
|
|
||||||
group.router.routes[name] = route
|
|
||||||
|
|
||||||
return route
|
|
||||||
}
|
|
||||||
|
|
||||||
// Name sets the name of the route.
|
|
||||||
// This method will update the registration of the route in the router as well.
|
|
||||||
func (r *Route) Name(name string) *Route {
|
|
||||||
r.name = name
|
|
||||||
r.group.router.routes[name] = r
|
|
||||||
return r
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get adds the route to the router using the GET HTTP method.
|
|
||||||
func (r *Route) Get(handlers ...Handler) *Route {
|
|
||||||
return r.add("GET", handlers)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Post adds the route to the router using the POST HTTP method.
|
|
||||||
func (r *Route) Post(handlers ...Handler) *Route {
|
|
||||||
return r.add("POST", handlers)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Put adds the route to the router using the PUT HTTP method.
|
|
||||||
func (r *Route) Put(handlers ...Handler) *Route {
|
|
||||||
return r.add("PUT", handlers)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Patch adds the route to the router using the PATCH HTTP method.
|
|
||||||
func (r *Route) Patch(handlers ...Handler) *Route {
|
|
||||||
return r.add("PATCH", handlers)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Delete adds the route to the router using the DELETE HTTP method.
|
|
||||||
func (r *Route) Delete(handlers ...Handler) *Route {
|
|
||||||
return r.add("DELETE", handlers)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Connect adds the route to the router using the CONNECT HTTP method.
|
|
||||||
func (r *Route) Connect(handlers ...Handler) *Route {
|
|
||||||
return r.add("CONNECT", handlers)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Head adds the route to the router using the HEAD HTTP method.
|
|
||||||
func (r *Route) Head(handlers ...Handler) *Route {
|
|
||||||
return r.add("HEAD", handlers)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Options adds the route to the router using the OPTIONS HTTP method.
|
|
||||||
func (r *Route) Options(handlers ...Handler) *Route {
|
|
||||||
return r.add("OPTIONS", handlers)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Trace adds the route to the router using the TRACE HTTP method.
|
|
||||||
func (r *Route) Trace(handlers ...Handler) *Route {
|
|
||||||
return r.add("TRACE", handlers)
|
|
||||||
}
|
|
||||||
|
|
||||||
// To adds the route to the router with the given HTTP methods and handlers.
|
|
||||||
// Multiple HTTP methods should be separated by commas (without any surrounding spaces).
|
|
||||||
func (r *Route) To(methods string, handlers ...Handler) *Route {
|
|
||||||
for _, method := range strings.Split(methods, ",") {
|
|
||||||
r.add(method, handlers)
|
|
||||||
}
|
|
||||||
return r
|
|
||||||
}
|
|
||||||
|
|
||||||
// URL creates a URL using the current route and the given parameters.
|
|
||||||
// The parameters should be given in the sequence of name1, value1, name2, value2, and so on.
|
|
||||||
// If a parameter in the route is not provided a value, the parameter token will remain in the resulting URL.
|
|
||||||
// The method will perform URL encoding for all given parameter values.
|
|
||||||
func (r *Route) URL(pairs ...interface{}) (s string) {
|
|
||||||
s = r.template
|
|
||||||
for i := 0; i < len(pairs); i++ {
|
|
||||||
name := fmt.Sprintf("<%v>", pairs[i])
|
|
||||||
value := ""
|
|
||||||
if i < len(pairs)-1 {
|
|
||||||
value = url.QueryEscape(fmt.Sprint(pairs[i+1]))
|
|
||||||
}
|
|
||||||
s = strings.Replace(s, name, value, -1)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// add registers the route, the specified HTTP method and the handlers to the router.
|
|
||||||
// The handlers will be combined with the handlers of the route group.
|
|
||||||
func (r *Route) add(method string, handlers []Handler) *Route {
|
|
||||||
hh := combineHandlers(r.group.handlers, handlers)
|
|
||||||
r.group.router.add(method, r.path, hh)
|
|
||||||
return r
|
|
||||||
}
|
|
||||||
|
|
||||||
// buildURLTemplate converts a route pattern into a URL template by removing regular expressions in parameter tokens.
|
|
||||||
func buildURLTemplate(path string) string {
|
|
||||||
template, start, end := "", -1, -1
|
|
||||||
for i := 0; i < len(path); i++ {
|
|
||||||
if path[i] == '<' && start < 0 {
|
|
||||||
start = i
|
|
||||||
} else if path[i] == '>' && start >= 0 {
|
|
||||||
name := path[start+1 : i]
|
|
||||||
for j := start + 1; j < i; j++ {
|
|
||||||
if path[j] == ':' {
|
|
||||||
name = path[start+1 : j]
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
template += path[end+1:start] + "<" + name + ">"
|
|
||||||
end = i
|
|
||||||
start = -1
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if end < 0 {
|
|
||||||
template = path
|
|
||||||
} else if end < len(path)-1 {
|
|
||||||
template += path[end+1:]
|
|
||||||
}
|
|
||||||
return template
|
|
||||||
}
|
|
||||||
|
|
||||||
// combineHandlers merges two lists of handlers into a new list.
|
|
||||||
func combineHandlers(h1 []Handler, h2 []Handler) []Handler {
|
|
||||||
hh := make([]Handler, len(h1)+len(h2))
|
|
||||||
copy(hh, h1)
|
|
||||||
copy(hh[len(h1):], h2)
|
|
||||||
return hh
|
|
||||||
}
|
|
||||||
169
vendor/github.com/qiangxue/fasthttp-routing/router.go
generated
vendored
169
vendor/github.com/qiangxue/fasthttp-routing/router.go
generated
vendored
@@ -1,169 +0,0 @@
|
|||||||
// Copyright 2016 Qiang Xue. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// Package routing provides high performance and powerful HTTP routing capabilities.
|
|
||||||
package routing
|
|
||||||
|
|
||||||
import (
|
|
||||||
"net/http"
|
|
||||||
"sort"
|
|
||||||
"strings"
|
|
||||||
"sync"
|
|
||||||
|
|
||||||
"github.com/valyala/fasthttp"
|
|
||||||
)
|
|
||||||
|
|
||||||
type (
|
|
||||||
// Handler is the function for handling HTTP requests.
|
|
||||||
Handler func(*Context) error
|
|
||||||
|
|
||||||
// Router manages routes and dispatches HTTP requests to the handlers of the matching routes.
|
|
||||||
Router struct {
|
|
||||||
RouteGroup
|
|
||||||
pool sync.Pool
|
|
||||||
routes map[string]*Route
|
|
||||||
stores map[string]routeStore
|
|
||||||
maxParams int
|
|
||||||
notFound []Handler
|
|
||||||
notFoundHandlers []Handler
|
|
||||||
}
|
|
||||||
|
|
||||||
// routeStore stores route paths and the corresponding handlers.
|
|
||||||
routeStore interface {
|
|
||||||
Add(key string, data interface{}) int
|
|
||||||
Get(key string, pvalues []string) (data interface{}, pnames []string)
|
|
||||||
String() string
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
// Methods lists all supported HTTP methods by Router.
|
|
||||||
var Methods = []string{
|
|
||||||
"CONNECT",
|
|
||||||
"DELETE",
|
|
||||||
"GET",
|
|
||||||
"HEAD",
|
|
||||||
"OPTIONS",
|
|
||||||
"PATCH",
|
|
||||||
"POST",
|
|
||||||
"PUT",
|
|
||||||
"TRACE",
|
|
||||||
}
|
|
||||||
|
|
||||||
// New creates a new Router object.
|
|
||||||
func New() *Router {
|
|
||||||
r := &Router{
|
|
||||||
routes: make(map[string]*Route),
|
|
||||||
stores: make(map[string]routeStore),
|
|
||||||
}
|
|
||||||
r.RouteGroup = *newRouteGroup("", r, make([]Handler, 0))
|
|
||||||
r.NotFound(MethodNotAllowedHandler, NotFoundHandler)
|
|
||||||
r.pool.New = func() interface{} {
|
|
||||||
return &Context{
|
|
||||||
pvalues: make([]string, r.maxParams),
|
|
||||||
router: r,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return r
|
|
||||||
}
|
|
||||||
|
|
||||||
// HandleRequest handles the HTTP request.
|
|
||||||
func (r *Router) HandleRequest(ctx *fasthttp.RequestCtx) {
|
|
||||||
c := r.pool.Get().(*Context)
|
|
||||||
c.init(ctx)
|
|
||||||
c.handlers, c.pnames = r.find(string(ctx.Method()), string(ctx.Path()), c.pvalues)
|
|
||||||
if err := c.Next(); err != nil {
|
|
||||||
r.handleError(c, err)
|
|
||||||
}
|
|
||||||
r.pool.Put(c)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Route returns the named route.
|
|
||||||
// Nil is returned if the named route cannot be found.
|
|
||||||
func (r *Router) Route(name string) *Route {
|
|
||||||
return r.routes[name]
|
|
||||||
}
|
|
||||||
|
|
||||||
// Use appends the specified handlers to the router and shares them with all routes.
|
|
||||||
func (r *Router) Use(handlers ...Handler) {
|
|
||||||
r.RouteGroup.Use(handlers...)
|
|
||||||
r.notFoundHandlers = combineHandlers(r.handlers, r.notFound)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NotFound specifies the handlers that should be invoked when the router cannot find any route matching a request.
|
|
||||||
// Note that the handlers registered via Use will be invoked first in this case.
|
|
||||||
func (r *Router) NotFound(handlers ...Handler) {
|
|
||||||
r.notFound = handlers
|
|
||||||
r.notFoundHandlers = combineHandlers(r.handlers, r.notFound)
|
|
||||||
}
|
|
||||||
|
|
||||||
// handleError is the error handler for handling any unhandled errors.
|
|
||||||
func (r *Router) handleError(c *Context, err error) {
|
|
||||||
if httpError, ok := err.(HTTPError); ok {
|
|
||||||
c.Error(httpError.Error(), httpError.StatusCode())
|
|
||||||
} else {
|
|
||||||
c.Error(err.Error(), http.StatusInternalServerError)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *Router) add(method, path string, handlers []Handler) {
|
|
||||||
store := r.stores[method]
|
|
||||||
if store == nil {
|
|
||||||
store = newStore()
|
|
||||||
r.stores[method] = store
|
|
||||||
}
|
|
||||||
if n := store.Add(path, handlers); n > r.maxParams {
|
|
||||||
r.maxParams = n
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *Router) find(method, path string, pvalues []string) (handlers []Handler, pnames []string) {
|
|
||||||
var hh interface{}
|
|
||||||
if store := r.stores[method]; store != nil {
|
|
||||||
hh, pnames = store.Get(path, pvalues)
|
|
||||||
}
|
|
||||||
if hh != nil {
|
|
||||||
return hh.([]Handler), pnames
|
|
||||||
}
|
|
||||||
return r.notFoundHandlers, pnames
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *Router) findAllowedMethods(path string) map[string]bool {
|
|
||||||
methods := make(map[string]bool)
|
|
||||||
pvalues := make([]string, r.maxParams)
|
|
||||||
for m, store := range r.stores {
|
|
||||||
if handlers, _ := store.Get(path, pvalues); handlers != nil {
|
|
||||||
methods[m] = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return methods
|
|
||||||
}
|
|
||||||
|
|
||||||
// NotFoundHandler returns a 404 HTTP error indicating a request has no matching route.
|
|
||||||
func NotFoundHandler(*Context) error {
|
|
||||||
return NewHTTPError(http.StatusNotFound)
|
|
||||||
}
|
|
||||||
|
|
||||||
// MethodNotAllowedHandler handles the situation when a request has matching route without matching HTTP method.
|
|
||||||
// In this case, the handler will respond with an Allow HTTP header listing the allowed HTTP methods.
|
|
||||||
// Otherwise, the handler will do nothing and let the next handler (usually a NotFoundHandler) to handle the problem.
|
|
||||||
func MethodNotAllowedHandler(c *Context) error {
|
|
||||||
methods := c.Router().findAllowedMethods(string(c.Path()))
|
|
||||||
if len(methods) == 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
methods["OPTIONS"] = true
|
|
||||||
ms := make([]string, len(methods))
|
|
||||||
i := 0
|
|
||||||
for method := range methods {
|
|
||||||
ms[i] = method
|
|
||||||
i++
|
|
||||||
}
|
|
||||||
sort.Strings(ms)
|
|
||||||
c.Response.Header.Set("Allow", strings.Join(ms, ", "))
|
|
||||||
if string(c.Method()) != "OPTIONS" {
|
|
||||||
c.Response.SetStatusCode(http.StatusMethodNotAllowed)
|
|
||||||
}
|
|
||||||
c.Abort()
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
317
vendor/github.com/qiangxue/fasthttp-routing/store.go
generated
vendored
317
vendor/github.com/qiangxue/fasthttp-routing/store.go
generated
vendored
@@ -1,317 +0,0 @@
|
|||||||
// Copyright 2016 Qiang Xue. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package routing
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"math"
|
|
||||||
"regexp"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
// store is a radix tree that supports storing data with parametric keys and retrieving them back with concrete keys.
|
|
||||||
// When retrieving a data item with a concrete key, the matching parameter names and values will be returned as well.
|
|
||||||
// A parametric key is a string containing tokens in the format of "<name>", "<name:pattern>", or "<:pattern>".
|
|
||||||
// Each token represents a single parameter.
|
|
||||||
type store struct {
|
|
||||||
root *node // the root node of the radix tree
|
|
||||||
count int // the number of data nodes in the tree
|
|
||||||
}
|
|
||||||
|
|
||||||
// newStore creates a new store.
|
|
||||||
func newStore() *store {
|
|
||||||
return &store{
|
|
||||||
root: &node{
|
|
||||||
static: true,
|
|
||||||
children: make([]*node, 256),
|
|
||||||
pchildren: make([]*node, 0),
|
|
||||||
pindex: -1,
|
|
||||||
pnames: []string{},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add adds a new data item with the given parametric key.
|
|
||||||
// The number of parameters in the key is returned.
|
|
||||||
func (s *store) Add(key string, data interface{}) int {
|
|
||||||
s.count++
|
|
||||||
return s.root.add(key, data, s.count)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get returns the data item matching the given concrete key.
|
|
||||||
// If the data item was added to the store with a parametric key before, the matching
|
|
||||||
// parameter names and values will be returned as well.
|
|
||||||
func (s *store) Get(path string, pvalues []string) (data interface{}, pnames []string) {
|
|
||||||
data, pnames, _ = s.root.get(path, pvalues)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// String dumps the radix tree kept in the store as a string.
|
|
||||||
func (s *store) String() string {
|
|
||||||
return s.root.print(0)
|
|
||||||
}
|
|
||||||
|
|
||||||
// node represents a radix trie node
|
|
||||||
type node struct {
|
|
||||||
static bool // whether the node is a static node or param node
|
|
||||||
|
|
||||||
key string // the key identifying this node
|
|
||||||
data interface{} // the data associated with this node. nil if not a data node.
|
|
||||||
|
|
||||||
order int // the order at which the data was added. used to be pick the first one when matching multiple
|
|
||||||
minOrder int // minimum order among all the child nodes and this node
|
|
||||||
|
|
||||||
children []*node // child static nodes, indexed by the first byte of each child key
|
|
||||||
pchildren []*node // child param nodes
|
|
||||||
|
|
||||||
regex *regexp.Regexp // regular expression for a param node containing regular expression key
|
|
||||||
pindex int // the parameter index, meaningful only for param node
|
|
||||||
pnames []string // the parameter names collected from the root till this node
|
|
||||||
}
|
|
||||||
|
|
||||||
// add adds a new data item to the tree rooted at the current node.
|
|
||||||
// The number of parameters in the key is returned.
|
|
||||||
func (n *node) add(key string, data interface{}, order int) int {
|
|
||||||
matched := 0
|
|
||||||
|
|
||||||
// find the common prefix
|
|
||||||
for ; matched < len(key) && matched < len(n.key); matched++ {
|
|
||||||
if key[matched] != n.key[matched] {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if matched == len(n.key) {
|
|
||||||
if matched == len(key) {
|
|
||||||
// the node key is the same as the key: make the current node as data node
|
|
||||||
// if the node is already a data node, ignore the new data since we only care the first matched node
|
|
||||||
if n.data == nil {
|
|
||||||
n.data = data
|
|
||||||
n.order = order
|
|
||||||
}
|
|
||||||
return n.pindex + 1
|
|
||||||
}
|
|
||||||
|
|
||||||
// the node key is a prefix of the key: create a child node
|
|
||||||
newKey := key[matched:]
|
|
||||||
|
|
||||||
// try adding to a static child
|
|
||||||
if child := n.children[newKey[0]]; child != nil {
|
|
||||||
if pn := child.add(newKey, data, order); pn >= 0 {
|
|
||||||
return pn
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// try adding to a param child
|
|
||||||
for _, child := range n.pchildren {
|
|
||||||
if pn := child.add(newKey, data, order); pn >= 0 {
|
|
||||||
return pn
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return n.addChild(newKey, data, order)
|
|
||||||
}
|
|
||||||
|
|
||||||
if matched == 0 || !n.static {
|
|
||||||
// no common prefix, or partial common prefix with a non-static node: should skip this node
|
|
||||||
return -1
|
|
||||||
}
|
|
||||||
|
|
||||||
// the node key shares a partial prefix with the key: split the node key
|
|
||||||
n1 := &node{
|
|
||||||
static: true,
|
|
||||||
key: n.key[matched:],
|
|
||||||
data: n.data,
|
|
||||||
order: n.order,
|
|
||||||
minOrder: n.minOrder,
|
|
||||||
pchildren: n.pchildren,
|
|
||||||
children: n.children,
|
|
||||||
pindex: n.pindex,
|
|
||||||
pnames: n.pnames,
|
|
||||||
}
|
|
||||||
|
|
||||||
n.key = key[0:matched]
|
|
||||||
n.data = nil
|
|
||||||
n.pchildren = make([]*node, 0)
|
|
||||||
n.children = make([]*node, 256)
|
|
||||||
n.children[n1.key[0]] = n1
|
|
||||||
|
|
||||||
return n.add(key, data, order)
|
|
||||||
}
|
|
||||||
|
|
||||||
// addChild creates static and param nodes to store the given data
|
|
||||||
func (n *node) addChild(key string, data interface{}, order int) int {
|
|
||||||
// find the first occurrence of a param token
|
|
||||||
p0, p1 := -1, -1
|
|
||||||
for i := 0; i < len(key); i++ {
|
|
||||||
if p0 < 0 && key[i] == '<' {
|
|
||||||
p0 = i
|
|
||||||
}
|
|
||||||
if p0 >= 0 && key[i] == '>' {
|
|
||||||
p1 = i
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if p0 > 0 && p1 > 0 || p1 < 0 {
|
|
||||||
// param token occurs after a static string, or no param token: create a static node
|
|
||||||
child := &node{
|
|
||||||
static: true,
|
|
||||||
key: key,
|
|
||||||
minOrder: order,
|
|
||||||
children: make([]*node, 256),
|
|
||||||
pchildren: make([]*node, 0),
|
|
||||||
pindex: n.pindex,
|
|
||||||
pnames: n.pnames,
|
|
||||||
}
|
|
||||||
n.children[key[0]] = child
|
|
||||||
if p1 > 0 {
|
|
||||||
// param token occurs after a static string
|
|
||||||
child.key = key[:p0]
|
|
||||||
n = child
|
|
||||||
} else {
|
|
||||||
// no param token: done adding the child
|
|
||||||
child.data = data
|
|
||||||
child.order = order
|
|
||||||
return child.pindex + 1
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// add param node
|
|
||||||
child := &node{
|
|
||||||
static: false,
|
|
||||||
key: key[p0 : p1+1],
|
|
||||||
minOrder: order,
|
|
||||||
children: make([]*node, 256),
|
|
||||||
pchildren: make([]*node, 0),
|
|
||||||
pindex: n.pindex,
|
|
||||||
pnames: n.pnames,
|
|
||||||
}
|
|
||||||
pattern := ""
|
|
||||||
pname := key[p0+1 : p1]
|
|
||||||
for i := p0 + 1; i < p1; i++ {
|
|
||||||
if key[i] == ':' {
|
|
||||||
pname = key[p0+1 : i]
|
|
||||||
pattern = key[i+1 : p1]
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if pattern != "" {
|
|
||||||
// the param token contains a regular expression
|
|
||||||
child.regex = regexp.MustCompile("^" + pattern)
|
|
||||||
}
|
|
||||||
pnames := make([]string, len(n.pnames)+1)
|
|
||||||
copy(pnames, n.pnames)
|
|
||||||
pnames[len(n.pnames)] = pname
|
|
||||||
child.pnames = pnames
|
|
||||||
child.pindex = len(pnames) - 1
|
|
||||||
n.pchildren = append(n.pchildren, child)
|
|
||||||
|
|
||||||
if p1 == len(key)-1 {
|
|
||||||
// the param token is at the end of the key
|
|
||||||
child.data = data
|
|
||||||
child.order = order
|
|
||||||
return child.pindex + 1
|
|
||||||
}
|
|
||||||
|
|
||||||
// process the rest of the key
|
|
||||||
return child.addChild(key[p1+1:], data, order)
|
|
||||||
}
|
|
||||||
|
|
||||||
// get returns the data item with the key matching the tree rooted at the current node
|
|
||||||
func (n *node) get(key string, pvalues []string) (data interface{}, pnames []string, order int) {
|
|
||||||
order = math.MaxInt32
|
|
||||||
|
|
||||||
repeat:
|
|
||||||
if n.static {
|
|
||||||
// check if the node key is a prefix of the given key
|
|
||||||
// a slightly optimized version of strings.HasPrefix
|
|
||||||
nkl := len(n.key)
|
|
||||||
if nkl > len(key) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
for i := nkl - 1; i >= 0; i-- {
|
|
||||||
if n.key[i] != key[i] {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
key = key[nkl:]
|
|
||||||
} else if n.regex != nil {
|
|
||||||
// param node with regular expression
|
|
||||||
if n.regex.String() == "^.*" {
|
|
||||||
pvalues[n.pindex] = key
|
|
||||||
key = ""
|
|
||||||
} else if match := n.regex.FindStringIndex(key); match != nil {
|
|
||||||
pvalues[n.pindex] = key[0:match[1]]
|
|
||||||
key = key[match[1]:]
|
|
||||||
} else {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// param node matching non-"/" characters
|
|
||||||
i, kl := 0, len(key)
|
|
||||||
for ; i < kl; i++ {
|
|
||||||
if key[i] == '/' {
|
|
||||||
pvalues[n.pindex] = key[0:i]
|
|
||||||
key = key[i:]
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if i == kl {
|
|
||||||
pvalues[n.pindex] = key
|
|
||||||
key = ""
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(key) > 0 {
|
|
||||||
// find a static child that can match the rest of the key
|
|
||||||
if child := n.children[key[0]]; child != nil {
|
|
||||||
if len(n.pchildren) == 0 {
|
|
||||||
// use goto to avoid recursion when no param children
|
|
||||||
n = child
|
|
||||||
goto repeat
|
|
||||||
}
|
|
||||||
data, pnames, order = child.get(key, pvalues)
|
|
||||||
}
|
|
||||||
} else if n.data != nil {
|
|
||||||
// do not return yet: a param node may match an empty string with smaller order
|
|
||||||
data, pnames, order = n.data, n.pnames, n.order
|
|
||||||
}
|
|
||||||
|
|
||||||
// try matching param children
|
|
||||||
tvalues := pvalues
|
|
||||||
allocated := false
|
|
||||||
for _, child := range n.pchildren {
|
|
||||||
if child.minOrder >= order {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if data != nil && !allocated {
|
|
||||||
tvalues = make([]string, len(pvalues))
|
|
||||||
allocated = true
|
|
||||||
}
|
|
||||||
if d, p, s := child.get(key, tvalues); d != nil && s < order {
|
|
||||||
if allocated {
|
|
||||||
for i := child.pindex; i < len(p); i++ {
|
|
||||||
pvalues[i] = tvalues[i]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
data, pnames, order = d, p, s
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func (n *node) print(level int) string {
|
|
||||||
r := fmt.Sprintf("%v{key: %v, regex: %v, data: %v, order: %v, minOrder: %v, pindex: %v, pnames: %v}\n", strings.Repeat(" ", level<<2), n.key, n.regex, n.data, n.order, n.minOrder, n.pindex, n.pnames)
|
|
||||||
for _, child := range n.children {
|
|
||||||
if child != nil {
|
|
||||||
r += child.print(level + 1)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for _, child := range n.pchildren {
|
|
||||||
r += child.print(level + 1)
|
|
||||||
}
|
|
||||||
return r
|
|
||||||
}
|
|
||||||
22
vendor/github.com/valyala/bytebufferpool/LICENSE
generated
vendored
22
vendor/github.com/valyala/bytebufferpool/LICENSE
generated
vendored
@@ -1,22 +0,0 @@
|
|||||||
The MIT License (MIT)
|
|
||||||
|
|
||||||
Copyright (c) 2016 Aliaksandr Valialkin, VertaMedia
|
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
||||||
of this software and associated documentation files (the "Software"), to deal
|
|
||||||
in the Software without restriction, including without limitation the rights
|
|
||||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
||||||
copies of the Software, and to permit persons to whom the Software is
|
|
||||||
furnished to do so, subject to the following conditions:
|
|
||||||
|
|
||||||
The above copyright notice and this permission notice shall be included in all
|
|
||||||
copies or substantial portions of the Software.
|
|
||||||
|
|
||||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
||||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
||||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
||||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
||||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
||||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
||||||
SOFTWARE.
|
|
||||||
|
|
||||||
21
vendor/github.com/valyala/bytebufferpool/README.md
generated
vendored
21
vendor/github.com/valyala/bytebufferpool/README.md
generated
vendored
@@ -1,21 +0,0 @@
|
|||||||
[](https://travis-ci.org/valyala/bytebufferpool)
|
|
||||||
[](http://godoc.org/github.com/valyala/bytebufferpool)
|
|
||||||
[](http://goreportcard.com/report/valyala/bytebufferpool)
|
|
||||||
|
|
||||||
# bytebufferpool
|
|
||||||
|
|
||||||
An implementation of a pool of byte buffers with anti-memory-waste protection.
|
|
||||||
|
|
||||||
The pool may waste limited amount of memory due to fragmentation.
|
|
||||||
This amount equals to the maximum total size of the byte buffers
|
|
||||||
in concurrent use.
|
|
||||||
|
|
||||||
# Benchmark results
|
|
||||||
Currently bytebufferpool is fastest and most effective buffer pool written in Go.
|
|
||||||
|
|
||||||
You can find results [here](https://omgnull.github.io/go-benchmark/buffer/).
|
|
||||||
|
|
||||||
# bytebufferpool users
|
|
||||||
|
|
||||||
* [fasthttp](https://github.com/valyala/fasthttp)
|
|
||||||
* [quicktemplate](https://github.com/valyala/quicktemplate)
|
|
||||||
111
vendor/github.com/valyala/bytebufferpool/bytebuffer.go
generated
vendored
111
vendor/github.com/valyala/bytebufferpool/bytebuffer.go
generated
vendored
@@ -1,111 +0,0 @@
|
|||||||
package bytebufferpool
|
|
||||||
|
|
||||||
import "io"
|
|
||||||
|
|
||||||
// ByteBuffer provides byte buffer, which can be used for minimizing
|
|
||||||
// memory allocations.
|
|
||||||
//
|
|
||||||
// ByteBuffer may be used with functions appending data to the given []byte
|
|
||||||
// slice. See example code for details.
|
|
||||||
//
|
|
||||||
// Use Get for obtaining an empty byte buffer.
|
|
||||||
type ByteBuffer struct {
|
|
||||||
|
|
||||||
// B is a byte buffer to use in append-like workloads.
|
|
||||||
// See example code for details.
|
|
||||||
B []byte
|
|
||||||
}
|
|
||||||
|
|
||||||
// Len returns the size of the byte buffer.
|
|
||||||
func (b *ByteBuffer) Len() int {
|
|
||||||
return len(b.B)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ReadFrom implements io.ReaderFrom.
|
|
||||||
//
|
|
||||||
// The function appends all the data read from r to b.
|
|
||||||
func (b *ByteBuffer) ReadFrom(r io.Reader) (int64, error) {
|
|
||||||
p := b.B
|
|
||||||
nStart := int64(len(p))
|
|
||||||
nMax := int64(cap(p))
|
|
||||||
n := nStart
|
|
||||||
if nMax == 0 {
|
|
||||||
nMax = 64
|
|
||||||
p = make([]byte, nMax)
|
|
||||||
} else {
|
|
||||||
p = p[:nMax]
|
|
||||||
}
|
|
||||||
for {
|
|
||||||
if n == nMax {
|
|
||||||
nMax *= 2
|
|
||||||
bNew := make([]byte, nMax)
|
|
||||||
copy(bNew, p)
|
|
||||||
p = bNew
|
|
||||||
}
|
|
||||||
nn, err := r.Read(p[n:])
|
|
||||||
n += int64(nn)
|
|
||||||
if err != nil {
|
|
||||||
b.B = p[:n]
|
|
||||||
n -= nStart
|
|
||||||
if err == io.EOF {
|
|
||||||
return n, nil
|
|
||||||
}
|
|
||||||
return n, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// WriteTo implements io.WriterTo.
|
|
||||||
func (b *ByteBuffer) WriteTo(w io.Writer) (int64, error) {
|
|
||||||
n, err := w.Write(b.B)
|
|
||||||
return int64(n), err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Bytes returns b.B, i.e. all the bytes accumulated in the buffer.
|
|
||||||
//
|
|
||||||
// The purpose of this function is bytes.Buffer compatibility.
|
|
||||||
func (b *ByteBuffer) Bytes() []byte {
|
|
||||||
return b.B
|
|
||||||
}
|
|
||||||
|
|
||||||
// Write implements io.Writer - it appends p to ByteBuffer.B
|
|
||||||
func (b *ByteBuffer) Write(p []byte) (int, error) {
|
|
||||||
b.B = append(b.B, p...)
|
|
||||||
return len(p), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// WriteByte appends the byte c to the buffer.
|
|
||||||
//
|
|
||||||
// The purpose of this function is bytes.Buffer compatibility.
|
|
||||||
//
|
|
||||||
// The function always returns nil.
|
|
||||||
func (b *ByteBuffer) WriteByte(c byte) error {
|
|
||||||
b.B = append(b.B, c)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// WriteString appends s to ByteBuffer.B.
|
|
||||||
func (b *ByteBuffer) WriteString(s string) (int, error) {
|
|
||||||
b.B = append(b.B, s...)
|
|
||||||
return len(s), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set sets ByteBuffer.B to p.
|
|
||||||
func (b *ByteBuffer) Set(p []byte) {
|
|
||||||
b.B = append(b.B[:0], p...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetString sets ByteBuffer.B to s.
|
|
||||||
func (b *ByteBuffer) SetString(s string) {
|
|
||||||
b.B = append(b.B[:0], s...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// String returns string representation of ByteBuffer.B.
|
|
||||||
func (b *ByteBuffer) String() string {
|
|
||||||
return string(b.B)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Reset makes ByteBuffer.B empty.
|
|
||||||
func (b *ByteBuffer) Reset() {
|
|
||||||
b.B = b.B[:0]
|
|
||||||
}
|
|
||||||
7
vendor/github.com/valyala/bytebufferpool/doc.go
generated
vendored
7
vendor/github.com/valyala/bytebufferpool/doc.go
generated
vendored
@@ -1,7 +0,0 @@
|
|||||||
// Package bytebufferpool implements a pool of byte buffers
|
|
||||||
// with anti-fragmentation protection.
|
|
||||||
//
|
|
||||||
// The pool may waste limited amount of memory due to fragmentation.
|
|
||||||
// This amount equals to the maximum total size of the byte buffers
|
|
||||||
// in concurrent use.
|
|
||||||
package bytebufferpool
|
|
||||||
151
vendor/github.com/valyala/bytebufferpool/pool.go
generated
vendored
151
vendor/github.com/valyala/bytebufferpool/pool.go
generated
vendored
@@ -1,151 +0,0 @@
|
|||||||
package bytebufferpool
|
|
||||||
|
|
||||||
import (
|
|
||||||
"sort"
|
|
||||||
"sync"
|
|
||||||
"sync/atomic"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
minBitSize = 6 // 2**6=64 is a CPU cache line size
|
|
||||||
steps = 20
|
|
||||||
|
|
||||||
minSize = 1 << minBitSize
|
|
||||||
maxSize = 1 << (minBitSize + steps - 1)
|
|
||||||
|
|
||||||
calibrateCallsThreshold = 42000
|
|
||||||
maxPercentile = 0.95
|
|
||||||
)
|
|
||||||
|
|
||||||
// Pool represents byte buffer pool.
|
|
||||||
//
|
|
||||||
// Distinct pools may be used for distinct types of byte buffers.
|
|
||||||
// Properly determined byte buffer types with their own pools may help reducing
|
|
||||||
// memory waste.
|
|
||||||
type Pool struct {
|
|
||||||
calls [steps]uint64
|
|
||||||
calibrating uint64
|
|
||||||
|
|
||||||
defaultSize uint64
|
|
||||||
maxSize uint64
|
|
||||||
|
|
||||||
pool sync.Pool
|
|
||||||
}
|
|
||||||
|
|
||||||
var defaultPool Pool
|
|
||||||
|
|
||||||
// Get returns an empty byte buffer from the pool.
|
|
||||||
//
|
|
||||||
// Got byte buffer may be returned to the pool via Put call.
|
|
||||||
// This reduces the number of memory allocations required for byte buffer
|
|
||||||
// management.
|
|
||||||
func Get() *ByteBuffer { return defaultPool.Get() }
|
|
||||||
|
|
||||||
// Get returns new byte buffer with zero length.
|
|
||||||
//
|
|
||||||
// The byte buffer may be returned to the pool via Put after the use
|
|
||||||
// in order to minimize GC overhead.
|
|
||||||
func (p *Pool) Get() *ByteBuffer {
|
|
||||||
v := p.pool.Get()
|
|
||||||
if v != nil {
|
|
||||||
return v.(*ByteBuffer)
|
|
||||||
}
|
|
||||||
return &ByteBuffer{
|
|
||||||
B: make([]byte, 0, atomic.LoadUint64(&p.defaultSize)),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Put returns byte buffer to the pool.
|
|
||||||
//
|
|
||||||
// ByteBuffer.B mustn't be touched after returning it to the pool.
|
|
||||||
// Otherwise data races will occur.
|
|
||||||
func Put(b *ByteBuffer) { defaultPool.Put(b) }
|
|
||||||
|
|
||||||
// Put releases byte buffer obtained via Get to the pool.
|
|
||||||
//
|
|
||||||
// The buffer mustn't be accessed after returning to the pool.
|
|
||||||
func (p *Pool) Put(b *ByteBuffer) {
|
|
||||||
idx := index(len(b.B))
|
|
||||||
|
|
||||||
if atomic.AddUint64(&p.calls[idx], 1) > calibrateCallsThreshold {
|
|
||||||
p.calibrate()
|
|
||||||
}
|
|
||||||
|
|
||||||
maxSize := int(atomic.LoadUint64(&p.maxSize))
|
|
||||||
if maxSize == 0 || cap(b.B) <= maxSize {
|
|
||||||
b.Reset()
|
|
||||||
p.pool.Put(b)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *Pool) calibrate() {
|
|
||||||
if !atomic.CompareAndSwapUint64(&p.calibrating, 0, 1) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
a := make(callSizes, 0, steps)
|
|
||||||
var callsSum uint64
|
|
||||||
for i := uint64(0); i < steps; i++ {
|
|
||||||
calls := atomic.SwapUint64(&p.calls[i], 0)
|
|
||||||
callsSum += calls
|
|
||||||
a = append(a, callSize{
|
|
||||||
calls: calls,
|
|
||||||
size: minSize << i,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
sort.Sort(a)
|
|
||||||
|
|
||||||
defaultSize := a[0].size
|
|
||||||
maxSize := defaultSize
|
|
||||||
|
|
||||||
maxSum := uint64(float64(callsSum) * maxPercentile)
|
|
||||||
callsSum = 0
|
|
||||||
for i := 0; i < steps; i++ {
|
|
||||||
if callsSum > maxSum {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
callsSum += a[i].calls
|
|
||||||
size := a[i].size
|
|
||||||
if size > maxSize {
|
|
||||||
maxSize = size
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
atomic.StoreUint64(&p.defaultSize, defaultSize)
|
|
||||||
atomic.StoreUint64(&p.maxSize, maxSize)
|
|
||||||
|
|
||||||
atomic.StoreUint64(&p.calibrating, 0)
|
|
||||||
}
|
|
||||||
|
|
||||||
type callSize struct {
|
|
||||||
calls uint64
|
|
||||||
size uint64
|
|
||||||
}
|
|
||||||
|
|
||||||
type callSizes []callSize
|
|
||||||
|
|
||||||
func (ci callSizes) Len() int {
|
|
||||||
return len(ci)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ci callSizes) Less(i, j int) bool {
|
|
||||||
return ci[i].calls > ci[j].calls
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ci callSizes) Swap(i, j int) {
|
|
||||||
ci[i], ci[j] = ci[j], ci[i]
|
|
||||||
}
|
|
||||||
|
|
||||||
func index(n int) int {
|
|
||||||
n--
|
|
||||||
n >>= minBitSize
|
|
||||||
idx := 0
|
|
||||||
for n > 0 {
|
|
||||||
n >>= 1
|
|
||||||
idx++
|
|
||||||
}
|
|
||||||
if idx >= steps {
|
|
||||||
idx = steps - 1
|
|
||||||
}
|
|
||||||
return idx
|
|
||||||
}
|
|
||||||
22
vendor/github.com/valyala/fasthttp/LICENSE
generated
vendored
22
vendor/github.com/valyala/fasthttp/LICENSE
generated
vendored
@@ -1,22 +0,0 @@
|
|||||||
The MIT License (MIT)
|
|
||||||
|
|
||||||
Copyright (c) 2015-2016 Aliaksandr Valialkin, VertaMedia
|
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
||||||
of this software and associated documentation files (the "Software"), to deal
|
|
||||||
in the Software without restriction, including without limitation the rights
|
|
||||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
||||||
copies of the Software, and to permit persons to whom the Software is
|
|
||||||
furnished to do so, subject to the following conditions:
|
|
||||||
|
|
||||||
The above copyright notice and this permission notice shall be included in all
|
|
||||||
copies or substantial portions of the Software.
|
|
||||||
|
|
||||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
||||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
||||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
||||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
||||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
||||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
||||||
SOFTWARE.
|
|
||||||
|
|
||||||
580
vendor/github.com/valyala/fasthttp/README.md
generated
vendored
580
vendor/github.com/valyala/fasthttp/README.md
generated
vendored
@@ -1,580 +0,0 @@
|
|||||||
[](https://travis-ci.org/valyala/fasthttp)
|
|
||||||
[](http://godoc.org/github.com/valyala/fasthttp)
|
|
||||||
[](https://goreportcard.com/report/github.com/valyala/fasthttp)
|
|
||||||
|
|
||||||
# fasthttp
|
|
||||||
Fast HTTP implementation for Go.
|
|
||||||
|
|
||||||
Currently fasthttp is successfully used by [VertaMedia](https://vertamedia.com/)
|
|
||||||
in a production serving up to 200K rps from more than 1.5M concurrent keep-alive
|
|
||||||
connections per physical server.
|
|
||||||
|
|
||||||
[TechEmpower Benchmark round 12 results](https://www.techempower.com/benchmarks/#section=data-r12&hw=peak&test=plaintext)
|
|
||||||
|
|
||||||
[Server Benchmarks](#http-server-performance-comparison-with-nethttp)
|
|
||||||
|
|
||||||
[Client Benchmarks](#http-client-comparison-with-nethttp)
|
|
||||||
|
|
||||||
[Install](#install)
|
|
||||||
|
|
||||||
[Documentation](https://godoc.org/github.com/valyala/fasthttp)
|
|
||||||
|
|
||||||
[Examples from docs](https://godoc.org/github.com/valyala/fasthttp#pkg-examples)
|
|
||||||
|
|
||||||
[Code examples](examples)
|
|
||||||
|
|
||||||
[Switching from net/http to fasthttp](#switching-from-nethttp-to-fasthttp)
|
|
||||||
|
|
||||||
[Fasthttp best practices](#fasthttp-best-practices)
|
|
||||||
|
|
||||||
[Tricks with byte buffers](#tricks-with-byte-buffers)
|
|
||||||
|
|
||||||
[Related projects](#related-projects)
|
|
||||||
|
|
||||||
[FAQ](#faq)
|
|
||||||
|
|
||||||
# HTTP server performance comparison with [net/http](https://golang.org/pkg/net/http/)
|
|
||||||
|
|
||||||
In short, fasthttp server is up to 10 times faster than net/http.
|
|
||||||
Below are benchmark results.
|
|
||||||
|
|
||||||
*GOMAXPROCS=1*
|
|
||||||
|
|
||||||
net/http server:
|
|
||||||
```
|
|
||||||
$ GOMAXPROCS=1 go test -bench=NetHTTPServerGet -benchmem -benchtime=10s
|
|
||||||
BenchmarkNetHTTPServerGet1ReqPerConn 1000000 12052 ns/op 2297 B/op 29 allocs/op
|
|
||||||
BenchmarkNetHTTPServerGet2ReqPerConn 1000000 12278 ns/op 2327 B/op 24 allocs/op
|
|
||||||
BenchmarkNetHTTPServerGet10ReqPerConn 2000000 8903 ns/op 2112 B/op 19 allocs/op
|
|
||||||
BenchmarkNetHTTPServerGet10KReqPerConn 2000000 8451 ns/op 2058 B/op 18 allocs/op
|
|
||||||
BenchmarkNetHTTPServerGet1ReqPerConn10KClients 500000 26733 ns/op 3229 B/op 29 allocs/op
|
|
||||||
BenchmarkNetHTTPServerGet2ReqPerConn10KClients 1000000 23351 ns/op 3211 B/op 24 allocs/op
|
|
||||||
BenchmarkNetHTTPServerGet10ReqPerConn10KClients 1000000 13390 ns/op 2483 B/op 19 allocs/op
|
|
||||||
BenchmarkNetHTTPServerGet100ReqPerConn10KClients 1000000 13484 ns/op 2171 B/op 18 allocs/op
|
|
||||||
```
|
|
||||||
|
|
||||||
fasthttp server:
|
|
||||||
```
|
|
||||||
$ GOMAXPROCS=1 go test -bench=kServerGet -benchmem -benchtime=10s
|
|
||||||
BenchmarkServerGet1ReqPerConn 10000000 1559 ns/op 0 B/op 0 allocs/op
|
|
||||||
BenchmarkServerGet2ReqPerConn 10000000 1248 ns/op 0 B/op 0 allocs/op
|
|
||||||
BenchmarkServerGet10ReqPerConn 20000000 797 ns/op 0 B/op 0 allocs/op
|
|
||||||
BenchmarkServerGet10KReqPerConn 20000000 716 ns/op 0 B/op 0 allocs/op
|
|
||||||
BenchmarkServerGet1ReqPerConn10KClients 10000000 1974 ns/op 0 B/op 0 allocs/op
|
|
||||||
BenchmarkServerGet2ReqPerConn10KClients 10000000 1352 ns/op 0 B/op 0 allocs/op
|
|
||||||
BenchmarkServerGet10ReqPerConn10KClients 20000000 789 ns/op 2 B/op 0 allocs/op
|
|
||||||
BenchmarkServerGet100ReqPerConn10KClients 20000000 604 ns/op 0 B/op 0 allocs/op
|
|
||||||
```
|
|
||||||
|
|
||||||
*GOMAXPROCS=4*
|
|
||||||
|
|
||||||
net/http server:
|
|
||||||
```
|
|
||||||
$ GOMAXPROCS=4 go test -bench=NetHTTPServerGet -benchmem -benchtime=10s
|
|
||||||
BenchmarkNetHTTPServerGet1ReqPerConn-4 3000000 4529 ns/op 2389 B/op 29 allocs/op
|
|
||||||
BenchmarkNetHTTPServerGet2ReqPerConn-4 5000000 3896 ns/op 2418 B/op 24 allocs/op
|
|
||||||
BenchmarkNetHTTPServerGet10ReqPerConn-4 5000000 3145 ns/op 2160 B/op 19 allocs/op
|
|
||||||
BenchmarkNetHTTPServerGet10KReqPerConn-4 5000000 3054 ns/op 2065 B/op 18 allocs/op
|
|
||||||
BenchmarkNetHTTPServerGet1ReqPerConn10KClients-4 1000000 10321 ns/op 3710 B/op 30 allocs/op
|
|
||||||
BenchmarkNetHTTPServerGet2ReqPerConn10KClients-4 2000000 7556 ns/op 3296 B/op 24 allocs/op
|
|
||||||
BenchmarkNetHTTPServerGet10ReqPerConn10KClients-4 5000000 3905 ns/op 2349 B/op 19 allocs/op
|
|
||||||
BenchmarkNetHTTPServerGet100ReqPerConn10KClients-4 5000000 3435 ns/op 2130 B/op 18 allocs/op
|
|
||||||
```
|
|
||||||
|
|
||||||
fasthttp server:
|
|
||||||
```
|
|
||||||
$ GOMAXPROCS=4 go test -bench=kServerGet -benchmem -benchtime=10s
|
|
||||||
BenchmarkServerGet1ReqPerConn-4 10000000 1141 ns/op 0 B/op 0 allocs/op
|
|
||||||
BenchmarkServerGet2ReqPerConn-4 20000000 707 ns/op 0 B/op 0 allocs/op
|
|
||||||
BenchmarkServerGet10ReqPerConn-4 30000000 341 ns/op 0 B/op 0 allocs/op
|
|
||||||
BenchmarkServerGet10KReqPerConn-4 50000000 310 ns/op 0 B/op 0 allocs/op
|
|
||||||
BenchmarkServerGet1ReqPerConn10KClients-4 10000000 1119 ns/op 0 B/op 0 allocs/op
|
|
||||||
BenchmarkServerGet2ReqPerConn10KClients-4 20000000 644 ns/op 0 B/op 0 allocs/op
|
|
||||||
BenchmarkServerGet10ReqPerConn10KClients-4 30000000 346 ns/op 0 B/op 0 allocs/op
|
|
||||||
BenchmarkServerGet100ReqPerConn10KClients-4 50000000 282 ns/op 0 B/op 0 allocs/op
|
|
||||||
```
|
|
||||||
|
|
||||||
# HTTP client comparison with net/http
|
|
||||||
|
|
||||||
In short, fasthttp client is up to 10 times faster than net/http.
|
|
||||||
Below are benchmark results.
|
|
||||||
|
|
||||||
*GOMAXPROCS=1*
|
|
||||||
|
|
||||||
net/http client:
|
|
||||||
```
|
|
||||||
$ GOMAXPROCS=1 go test -bench='HTTPClient(Do|GetEndToEnd)' -benchmem -benchtime=10s
|
|
||||||
BenchmarkNetHTTPClientDoFastServer 1000000 12567 ns/op 2616 B/op 35 allocs/op
|
|
||||||
BenchmarkNetHTTPClientGetEndToEnd1TCP 200000 67030 ns/op 5028 B/op 56 allocs/op
|
|
||||||
BenchmarkNetHTTPClientGetEndToEnd10TCP 300000 51098 ns/op 5031 B/op 56 allocs/op
|
|
||||||
BenchmarkNetHTTPClientGetEndToEnd100TCP 300000 45096 ns/op 5026 B/op 55 allocs/op
|
|
||||||
BenchmarkNetHTTPClientGetEndToEnd1Inmemory 500000 24779 ns/op 5035 B/op 57 allocs/op
|
|
||||||
BenchmarkNetHTTPClientGetEndToEnd10Inmemory 1000000 26425 ns/op 5035 B/op 57 allocs/op
|
|
||||||
BenchmarkNetHTTPClientGetEndToEnd100Inmemory 500000 28515 ns/op 5045 B/op 57 allocs/op
|
|
||||||
BenchmarkNetHTTPClientGetEndToEnd1000Inmemory 500000 39511 ns/op 5096 B/op 56 allocs/op
|
|
||||||
```
|
|
||||||
|
|
||||||
fasthttp client:
|
|
||||||
```
|
|
||||||
$ GOMAXPROCS=1 go test -bench='kClient(Do|GetEndToEnd)' -benchmem -benchtime=10s
|
|
||||||
BenchmarkClientDoFastServer 20000000 865 ns/op 0 B/op 0 allocs/op
|
|
||||||
BenchmarkClientGetEndToEnd1TCP 1000000 18711 ns/op 0 B/op 0 allocs/op
|
|
||||||
BenchmarkClientGetEndToEnd10TCP 1000000 14664 ns/op 0 B/op 0 allocs/op
|
|
||||||
BenchmarkClientGetEndToEnd100TCP 1000000 14043 ns/op 1 B/op 0 allocs/op
|
|
||||||
BenchmarkClientGetEndToEnd1Inmemory 5000000 3965 ns/op 0 B/op 0 allocs/op
|
|
||||||
BenchmarkClientGetEndToEnd10Inmemory 3000000 4060 ns/op 0 B/op 0 allocs/op
|
|
||||||
BenchmarkClientGetEndToEnd100Inmemory 5000000 3396 ns/op 0 B/op 0 allocs/op
|
|
||||||
BenchmarkClientGetEndToEnd1000Inmemory 5000000 3306 ns/op 2 B/op 0 allocs/op
|
|
||||||
```
|
|
||||||
|
|
||||||
*GOMAXPROCS=4*
|
|
||||||
|
|
||||||
net/http client:
|
|
||||||
```
|
|
||||||
$ GOMAXPROCS=4 go test -bench='HTTPClient(Do|GetEndToEnd)' -benchmem -benchtime=10s
|
|
||||||
BenchmarkNetHTTPClientDoFastServer-4 2000000 8774 ns/op 2619 B/op 35 allocs/op
|
|
||||||
BenchmarkNetHTTPClientGetEndToEnd1TCP-4 500000 22951 ns/op 5047 B/op 56 allocs/op
|
|
||||||
BenchmarkNetHTTPClientGetEndToEnd10TCP-4 1000000 19182 ns/op 5037 B/op 55 allocs/op
|
|
||||||
BenchmarkNetHTTPClientGetEndToEnd100TCP-4 1000000 16535 ns/op 5031 B/op 55 allocs/op
|
|
||||||
BenchmarkNetHTTPClientGetEndToEnd1Inmemory-4 1000000 14495 ns/op 5038 B/op 56 allocs/op
|
|
||||||
BenchmarkNetHTTPClientGetEndToEnd10Inmemory-4 1000000 10237 ns/op 5034 B/op 56 allocs/op
|
|
||||||
BenchmarkNetHTTPClientGetEndToEnd100Inmemory-4 1000000 10125 ns/op 5045 B/op 56 allocs/op
|
|
||||||
BenchmarkNetHTTPClientGetEndToEnd1000Inmemory-4 1000000 11132 ns/op 5136 B/op 56 allocs/op
|
|
||||||
```
|
|
||||||
|
|
||||||
fasthttp client:
|
|
||||||
```
|
|
||||||
$ GOMAXPROCS=4 go test -bench='kClient(Do|GetEndToEnd)' -benchmem -benchtime=10s
|
|
||||||
BenchmarkClientDoFastServer-4 50000000 397 ns/op 0 B/op 0 allocs/op
|
|
||||||
BenchmarkClientGetEndToEnd1TCP-4 2000000 7388 ns/op 0 B/op 0 allocs/op
|
|
||||||
BenchmarkClientGetEndToEnd10TCP-4 2000000 6689 ns/op 0 B/op 0 allocs/op
|
|
||||||
BenchmarkClientGetEndToEnd100TCP-4 3000000 4927 ns/op 1 B/op 0 allocs/op
|
|
||||||
BenchmarkClientGetEndToEnd1Inmemory-4 10000000 1604 ns/op 0 B/op 0 allocs/op
|
|
||||||
BenchmarkClientGetEndToEnd10Inmemory-4 10000000 1458 ns/op 0 B/op 0 allocs/op
|
|
||||||
BenchmarkClientGetEndToEnd100Inmemory-4 10000000 1329 ns/op 0 B/op 0 allocs/op
|
|
||||||
BenchmarkClientGetEndToEnd1000Inmemory-4 10000000 1316 ns/op 5 B/op 0 allocs/op
|
|
||||||
```
|
|
||||||
|
|
||||||
|
|
||||||
# Install
|
|
||||||
|
|
||||||
```
|
|
||||||
go get -u github.com/valyala/fasthttp
|
|
||||||
```
|
|
||||||
|
|
||||||
|
|
||||||
# Switching from net/http to fasthttp
|
|
||||||
|
|
||||||
Unfortunately, fasthttp doesn't provide API identical to net/http.
|
|
||||||
See the [FAQ](#faq) for details.
|
|
||||||
There is [net/http -> fasthttp handler converter](https://godoc.org/github.com/valyala/fasthttp/fasthttpadaptor),
|
|
||||||
but it is better to write fasthttp request handlers by hand in order to use
|
|
||||||
all of the fasthttp advantages (especially high performance :) ).
|
|
||||||
|
|
||||||
Important points:
|
|
||||||
|
|
||||||
* Fasthttp works with [RequestHandler functions](https://godoc.org/github.com/valyala/fasthttp#RequestHandler)
|
|
||||||
instead of objects implementing [Handler interface](https://golang.org/pkg/net/http/#Handler).
|
|
||||||
Fortunately, it is easy to pass bound struct methods to fasthttp:
|
|
||||||
|
|
||||||
```go
|
|
||||||
type MyHandler struct {
|
|
||||||
foobar string
|
|
||||||
}
|
|
||||||
|
|
||||||
// request handler in net/http style, i.e. method bound to MyHandler struct.
|
|
||||||
func (h *MyHandler) HandleFastHTTP(ctx *fasthttp.RequestCtx) {
|
|
||||||
// notice that we may access MyHandler properties here - see h.foobar.
|
|
||||||
fmt.Fprintf(ctx, "Hello, world! Requested path is %q. Foobar is %q",
|
|
||||||
ctx.Path(), h.foobar)
|
|
||||||
}
|
|
||||||
|
|
||||||
// request handler in fasthttp style, i.e. just plain function.
|
|
||||||
func fastHTTPHandler(ctx *fasthttp.RequestCtx) {
|
|
||||||
fmt.Fprintf(ctx, "Hi there! RequestURI is %q", ctx.RequestURI())
|
|
||||||
}
|
|
||||||
|
|
||||||
// pass bound struct method to fasthttp
|
|
||||||
myHandler := &MyHandler{
|
|
||||||
foobar: "foobar",
|
|
||||||
}
|
|
||||||
fasthttp.ListenAndServe(":8080", myHandler.HandleFastHTTP)
|
|
||||||
|
|
||||||
// pass plain function to fasthttp
|
|
||||||
fasthttp.ListenAndServe(":8081", fastHTTPHandler)
|
|
||||||
```
|
|
||||||
|
|
||||||
* The [RequestHandler](https://godoc.org/github.com/valyala/fasthttp#RequestHandler)
|
|
||||||
accepts only one argument - [RequestCtx](https://godoc.org/github.com/valyala/fasthttp#RequestCtx).
|
|
||||||
It contains all the functionality required for http request processing
|
|
||||||
and response writing. Below is an example of a simple request handler conversion
|
|
||||||
from net/http to fasthttp.
|
|
||||||
|
|
||||||
```go
|
|
||||||
// net/http request handler
|
|
||||||
requestHandler := func(w http.ResponseWriter, r *http.Request) {
|
|
||||||
switch r.URL.Path {
|
|
||||||
case "/foo":
|
|
||||||
fooHandler(w, r)
|
|
||||||
case "/bar":
|
|
||||||
barHandler(w, r)
|
|
||||||
default:
|
|
||||||
http.Error(w, "Unsupported path", http.StatusNotFound)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
```go
|
|
||||||
// the corresponding fasthttp request handler
|
|
||||||
requestHandler := func(ctx *fasthttp.RequestCtx) {
|
|
||||||
switch string(ctx.Path()) {
|
|
||||||
case "/foo":
|
|
||||||
fooHandler(ctx)
|
|
||||||
case "/bar":
|
|
||||||
barHandler(ctx)
|
|
||||||
default:
|
|
||||||
ctx.Error("Unsupported path", fasthttp.StatusNotFound)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
* Fasthttp allows setting response headers and writing response body
|
|
||||||
in an arbitrary order. There is no 'headers first, then body' restriction
|
|
||||||
like in net/http. The following code is valid for fasthttp:
|
|
||||||
|
|
||||||
```go
|
|
||||||
requestHandler := func(ctx *fasthttp.RequestCtx) {
|
|
||||||
// set some headers and status code first
|
|
||||||
ctx.SetContentType("foo/bar")
|
|
||||||
ctx.SetStatusCode(fasthttp.StatusOK)
|
|
||||||
|
|
||||||
// then write the first part of body
|
|
||||||
fmt.Fprintf(ctx, "this is the first part of body\n")
|
|
||||||
|
|
||||||
// then set more headers
|
|
||||||
ctx.Response.Header.Set("Foo-Bar", "baz")
|
|
||||||
|
|
||||||
// then write more body
|
|
||||||
fmt.Fprintf(ctx, "this is the second part of body\n")
|
|
||||||
|
|
||||||
// then override already written body
|
|
||||||
ctx.SetBody([]byte("this is completely new body contents"))
|
|
||||||
|
|
||||||
// then update status code
|
|
||||||
ctx.SetStatusCode(fasthttp.StatusNotFound)
|
|
||||||
|
|
||||||
// basically, anything may be updated many times before
|
|
||||||
// returning from RequestHandler.
|
|
||||||
//
|
|
||||||
// Unlike net/http fasthttp doesn't put response to the wire until
|
|
||||||
// returning from RequestHandler.
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
* Fasthttp doesn't provide [ServeMux](https://golang.org/pkg/net/http/#ServeMux),
|
|
||||||
but there are more powerful third-party routers and web frameworks
|
|
||||||
with fasthttp support:
|
|
||||||
|
|
||||||
* [Iris](https://github.com/kataras/iris)
|
|
||||||
* [fasthttp-routing](https://github.com/qiangxue/fasthttp-routing)
|
|
||||||
* [fasthttprouter](https://github.com/buaazp/fasthttprouter)
|
|
||||||
* [lu](https://github.com/vincentLiuxiang/lu)
|
|
||||||
|
|
||||||
Net/http code with simple ServeMux is trivially converted to fasthttp code:
|
|
||||||
|
|
||||||
```go
|
|
||||||
// net/http code
|
|
||||||
|
|
||||||
m := &http.ServeMux{}
|
|
||||||
m.HandleFunc("/foo", fooHandlerFunc)
|
|
||||||
m.HandleFunc("/bar", barHandlerFunc)
|
|
||||||
m.Handle("/baz", bazHandler)
|
|
||||||
|
|
||||||
http.ListenAndServe(":80", m)
|
|
||||||
```
|
|
||||||
|
|
||||||
```go
|
|
||||||
// the corresponding fasthttp code
|
|
||||||
m := func(ctx *fasthttp.RequestCtx) {
|
|
||||||
switch string(ctx.Path()) {
|
|
||||||
case "/foo":
|
|
||||||
fooHandlerFunc(ctx)
|
|
||||||
case "/bar":
|
|
||||||
barHandlerFunc(ctx)
|
|
||||||
case "/baz":
|
|
||||||
bazHandler.HandlerFunc(ctx)
|
|
||||||
default:
|
|
||||||
ctx.Error("not found", fasthttp.StatusNotFound)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fasthttp.ListenAndServe(":80", m)
|
|
||||||
```
|
|
||||||
|
|
||||||
* net/http -> fasthttp conversion table:
|
|
||||||
|
|
||||||
* All the pseudocode below assumes w, r and ctx have these types:
|
|
||||||
```go
|
|
||||||
var (
|
|
||||||
w http.ResponseWriter
|
|
||||||
r *http.Request
|
|
||||||
ctx *fasthttp.RequestCtx
|
|
||||||
)
|
|
||||||
```
|
|
||||||
* r.Body -> [ctx.PostBody()](https://godoc.org/github.com/valyala/fasthttp#RequestCtx.PostBody)
|
|
||||||
* r.URL.Path -> [ctx.Path()](https://godoc.org/github.com/valyala/fasthttp#RequestCtx.Path)
|
|
||||||
* r.URL -> [ctx.URI()](https://godoc.org/github.com/valyala/fasthttp#RequestCtx.URI)
|
|
||||||
* r.Method -> [ctx.Method()](https://godoc.org/github.com/valyala/fasthttp#RequestCtx.Method)
|
|
||||||
* r.Header -> [ctx.Request.Header](https://godoc.org/github.com/valyala/fasthttp#RequestHeader)
|
|
||||||
* r.Header.Get() -> [ctx.Request.Header.Peek()](https://godoc.org/github.com/valyala/fasthttp#RequestHeader.Peek)
|
|
||||||
* r.Host -> [ctx.Host()](https://godoc.org/github.com/valyala/fasthttp#RequestCtx.Host)
|
|
||||||
* r.Form -> [ctx.QueryArgs()](https://godoc.org/github.com/valyala/fasthttp#RequestCtx.QueryArgs) +
|
|
||||||
[ctx.PostArgs()](https://godoc.org/github.com/valyala/fasthttp#RequestCtx.PostArgs)
|
|
||||||
* r.PostForm -> [ctx.PostArgs()](https://godoc.org/github.com/valyala/fasthttp#RequestCtx.PostArgs)
|
|
||||||
* r.FormValue() -> [ctx.FormValue()](https://godoc.org/github.com/valyala/fasthttp#RequestCtx.FormValue)
|
|
||||||
* r.FormFile() -> [ctx.FormFile()](https://godoc.org/github.com/valyala/fasthttp#RequestCtx.FormFile)
|
|
||||||
* r.MultipartForm -> [ctx.MultipartForm()](https://godoc.org/github.com/valyala/fasthttp#RequestCtx.MultipartForm)
|
|
||||||
* r.RemoteAddr -> [ctx.RemoteAddr()](https://godoc.org/github.com/valyala/fasthttp#RequestCtx.RemoteAddr)
|
|
||||||
* r.RequestURI -> [ctx.RequestURI()](https://godoc.org/github.com/valyala/fasthttp#RequestCtx.RequestURI)
|
|
||||||
* r.TLS -> [ctx.IsTLS()](https://godoc.org/github.com/valyala/fasthttp#RequestCtx.IsTLS)
|
|
||||||
* r.Cookie() -> [ctx.Request.Header.Cookie()](https://godoc.org/github.com/valyala/fasthttp#RequestHeader.Cookie)
|
|
||||||
* r.Referer() -> [ctx.Referer()](https://godoc.org/github.com/valyala/fasthttp#RequestCtx.Referer)
|
|
||||||
* r.UserAgent() -> [ctx.UserAgent()](https://godoc.org/github.com/valyala/fasthttp#RequestCtx.UserAgent)
|
|
||||||
* w.Header() -> [ctx.Response.Header](https://godoc.org/github.com/valyala/fasthttp#ResponseHeader)
|
|
||||||
* w.Header().Set() -> [ctx.Response.Header.Set()](https://godoc.org/github.com/valyala/fasthttp#ResponseHeader.Set)
|
|
||||||
* w.Header().Set("Content-Type") -> [ctx.SetContentType()](https://godoc.org/github.com/valyala/fasthttp#RequestCtx.SetContentType)
|
|
||||||
* w.Header().Set("Set-Cookie") -> [ctx.Response.Header.SetCookie()](https://godoc.org/github.com/valyala/fasthttp#ResponseHeader.SetCookie)
|
|
||||||
* w.Write() -> [ctx.Write()](https://godoc.org/github.com/valyala/fasthttp#RequestCtx.Write),
|
|
||||||
[ctx.SetBody()](https://godoc.org/github.com/valyala/fasthttp#RequestCtx.SetBody),
|
|
||||||
[ctx.SetBodyStream()](https://godoc.org/github.com/valyala/fasthttp#RequestCtx.SetBodyStream),
|
|
||||||
[ctx.SetBodyStreamWriter()](https://godoc.org/github.com/valyala/fasthttp#RequestCtx.SetBodyStreamWriter)
|
|
||||||
* w.WriteHeader() -> [ctx.SetStatusCode()](https://godoc.org/github.com/valyala/fasthttp#RequestCtx.SetStatusCode)
|
|
||||||
* w.(http.Hijacker).Hijack() -> [ctx.Hijack()](https://godoc.org/github.com/valyala/fasthttp#RequestCtx.Hijack)
|
|
||||||
* http.Error() -> [ctx.Error()](https://godoc.org/github.com/valyala/fasthttp#RequestCtx.Error)
|
|
||||||
* http.FileServer() -> [fasthttp.FSHandler()](https://godoc.org/github.com/valyala/fasthttp#FSHandler),
|
|
||||||
[fasthttp.FS](https://godoc.org/github.com/valyala/fasthttp#FS)
|
|
||||||
* http.ServeFile() -> [fasthttp.ServeFile()](https://godoc.org/github.com/valyala/fasthttp#ServeFile)
|
|
||||||
* http.Redirect() -> [ctx.Redirect()](https://godoc.org/github.com/valyala/fasthttp#RequestCtx.Redirect)
|
|
||||||
* http.NotFound() -> [ctx.NotFound()](https://godoc.org/github.com/valyala/fasthttp#RequestCtx.NotFound)
|
|
||||||
* http.StripPrefix() -> [fasthttp.PathRewriteFunc](https://godoc.org/github.com/valyala/fasthttp#PathRewriteFunc)
|
|
||||||
|
|
||||||
* *VERY IMPORTANT!* Fasthttp disallows holding references
|
|
||||||
to [RequestCtx](https://godoc.org/github.com/valyala/fasthttp#RequestCtx) or to its'
|
|
||||||
members after returning from [RequestHandler](https://godoc.org/github.com/valyala/fasthttp#RequestHandler).
|
|
||||||
Otherwise [data races](http://blog.golang.org/race-detector) are inevitable.
|
|
||||||
Carefully inspect all the net/http request handlers converted to fasthttp whether
|
|
||||||
they retain references to RequestCtx or to its' members after returning.
|
|
||||||
RequestCtx provides the following _band aids_ for this case:
|
|
||||||
|
|
||||||
* Wrap RequestHandler into [TimeoutHandler](https://godoc.org/github.com/valyala/fasthttp#TimeoutHandler).
|
|
||||||
* Call [TimeoutError](https://godoc.org/github.com/valyala/fasthttp#RequestCtx.TimeoutError)
|
|
||||||
before returning from RequestHandler if there are references to RequestCtx or to its' members.
|
|
||||||
See [the example](https://godoc.org/github.com/valyala/fasthttp#example-RequestCtx-TimeoutError)
|
|
||||||
for more details.
|
|
||||||
|
|
||||||
Use this brilliant tool - [race detector](http://blog.golang.org/race-detector) -
|
|
||||||
for detecting and eliminating data races in your program. If you detected
|
|
||||||
data race related to fasthttp in your program, then there is high probability
|
|
||||||
you forgot calling [TimeoutError](https://godoc.org/github.com/valyala/fasthttp#RequestCtx.TimeoutError)
|
|
||||||
before returning from [RequestHandler](https://godoc.org/github.com/valyala/fasthttp#RequestHandler).
|
|
||||||
|
|
||||||
* Blind switching from net/http to fasthttp won't give you performance boost.
|
|
||||||
While fasthttp is optimized for speed, its' performance may be easily saturated
|
|
||||||
by slow [RequestHandler](https://godoc.org/github.com/valyala/fasthttp#RequestHandler).
|
|
||||||
So [profile](http://blog.golang.org/profiling-go-programs) and optimize your
|
|
||||||
code after switching to fasthttp. For instance, use [quicktemplate](https://github.com/valyala/quicktemplate)
|
|
||||||
instead of [html/template](https://golang.org/pkg/html/template/).
|
|
||||||
|
|
||||||
* See also [fasthttputil](https://godoc.org/github.com/valyala/fasthttp/fasthttputil),
|
|
||||||
[fasthttpadaptor](https://godoc.org/github.com/valyala/fasthttp/fasthttpadaptor) and
|
|
||||||
[expvarhandler](https://godoc.org/github.com/valyala/fasthttp/expvarhandler).
|
|
||||||
|
|
||||||
|
|
||||||
# Performance optimization tips for multi-core systems
|
|
||||||
|
|
||||||
* Use [reuseport](https://godoc.org/github.com/valyala/fasthttp/reuseport) listener.
|
|
||||||
* Run a separate server instance per CPU core with GOMAXPROCS=1.
|
|
||||||
* Pin each server instance to a separate CPU core using [taskset](http://linux.die.net/man/1/taskset).
|
|
||||||
* Ensure the interrupts of multiqueue network card are evenly distributed between CPU cores.
|
|
||||||
See [this article](https://blog.cloudflare.com/how-to-achieve-low-latency/) for details.
|
|
||||||
* Use Go 1.6 as it provides some considerable performance improvements.
|
|
||||||
|
|
||||||
|
|
||||||
# Fasthttp best practices
|
|
||||||
|
|
||||||
* Do not allocate objects and `[]byte` buffers - just reuse them as much
|
|
||||||
as possible. Fasthttp API design encourages this.
|
|
||||||
* [sync.Pool](https://golang.org/pkg/sync/#Pool) is your best friend.
|
|
||||||
* [Profile your program](http://blog.golang.org/profiling-go-programs)
|
|
||||||
in production.
|
|
||||||
`go tool pprof --alloc_objects your-program mem.pprof` usually gives better
|
|
||||||
insights for optimization opportunities than `go tool pprof your-program cpu.pprof`.
|
|
||||||
* Write [tests and benchmarks](https://golang.org/pkg/testing/) for hot paths.
|
|
||||||
* Avoid conversion between `[]byte` and `string`, since this may result in memory
|
|
||||||
allocation+copy. Fasthttp API provides functions for both `[]byte` and `string` -
|
|
||||||
use these functions instead of converting manually between `[]byte` and `string`.
|
|
||||||
There are some exceptions - see [this wiki page](https://github.com/golang/go/wiki/CompilerOptimizations#string-and-byte)
|
|
||||||
for more details.
|
|
||||||
* Verify your tests and production code under
|
|
||||||
[race detector](https://golang.org/doc/articles/race_detector.html) on a regular basis.
|
|
||||||
* Prefer [quicktemplate](https://github.com/valyala/quicktemplate) instead of
|
|
||||||
[html/template](https://golang.org/pkg/html/template/) in your webserver.
|
|
||||||
|
|
||||||
|
|
||||||
# Tricks with `[]byte` buffers
|
|
||||||
|
|
||||||
The following tricks are used by fasthttp. Use them in your code too.
|
|
||||||
|
|
||||||
* Standard Go functions accept nil buffers
|
|
||||||
```go
|
|
||||||
var (
|
|
||||||
// both buffers are uninitialized
|
|
||||||
dst []byte
|
|
||||||
src []byte
|
|
||||||
)
|
|
||||||
dst = append(dst, src...) // is legal if dst is nil and/or src is nil
|
|
||||||
copy(dst, src) // is legal if dst is nil and/or src is nil
|
|
||||||
(string(src) == "") // is true if src is nil
|
|
||||||
(len(src) == 0) // is true if src is nil
|
|
||||||
src = src[:0] // works like a charm with nil src
|
|
||||||
|
|
||||||
// this for loop doesn't panic if src is nil
|
|
||||||
for i, ch := range src {
|
|
||||||
doSomething(i, ch)
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
So throw away nil checks for `[]byte` buffers from you code. For example,
|
|
||||||
```go
|
|
||||||
srcLen := 0
|
|
||||||
if src != nil {
|
|
||||||
srcLen = len(src)
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
becomes
|
|
||||||
|
|
||||||
```go
|
|
||||||
srcLen := len(src)
|
|
||||||
```
|
|
||||||
|
|
||||||
* String may be appended to `[]byte` buffer with `append`
|
|
||||||
```go
|
|
||||||
dst = append(dst, "foobar"...)
|
|
||||||
```
|
|
||||||
|
|
||||||
* `[]byte` buffer may be extended to its' capacity.
|
|
||||||
```go
|
|
||||||
buf := make([]byte, 100)
|
|
||||||
a := buf[:10] // len(a) == 10, cap(a) == 100.
|
|
||||||
b := a[:100] // is valid, since cap(a) == 100.
|
|
||||||
```
|
|
||||||
|
|
||||||
* All fasthttp functions accept nil `[]byte` buffer
|
|
||||||
```go
|
|
||||||
statusCode, body, err := fasthttp.Get(nil, "http://google.com/")
|
|
||||||
uintBuf := fasthttp.AppendUint(nil, 1234)
|
|
||||||
```
|
|
||||||
|
|
||||||
# Related projects
|
|
||||||
|
|
||||||
* [fasthttp-contrib](https://github.com/fasthttp-contrib) - various useful
|
|
||||||
helpers for projects based on fasthttp.
|
|
||||||
* [iris](https://github.com/kataras/iris) - web application framework built
|
|
||||||
on top of fasthttp. Features speed and functionality.
|
|
||||||
* [fasthttp-routing](https://github.com/qiangxue/fasthttp-routing) - fast and
|
|
||||||
powerful routing package for fasthttp servers.
|
|
||||||
* [fasthttprouter](https://github.com/buaazp/fasthttprouter) - a high
|
|
||||||
performance fasthttp request router that scales well.
|
|
||||||
* [lu](https://github.com/vincentLiuxiang/lu) - a high performance
|
|
||||||
go middleware web framework which is based on fasthttp.
|
|
||||||
* [websocket](https://github.com/leavengood/websocket) - Gorilla-based
|
|
||||||
websocket implementation for fasthttp.
|
|
||||||
|
|
||||||
|
|
||||||
# FAQ
|
|
||||||
|
|
||||||
* *Why creating yet another http package instead of optimizing net/http?*
|
|
||||||
|
|
||||||
Because net/http API limits many optimization opportunities.
|
|
||||||
For example:
|
|
||||||
* net/http Request object lifetime isn't limited by request handler execution
|
|
||||||
time. So the server must create a new request object per each request instead
|
|
||||||
of reusing existing objects like fasthttp does.
|
|
||||||
* net/http headers are stored in a `map[string][]string`. So the server
|
|
||||||
must parse all the headers, convert them from `[]byte` to `string` and put
|
|
||||||
them into the map before calling user-provided request handler.
|
|
||||||
This all requires unnecessary memory allocations avoided by fasthttp.
|
|
||||||
* net/http client API requires creating a new response object per each request.
|
|
||||||
|
|
||||||
* *Why fasthttp API is incompatible with net/http?*
|
|
||||||
|
|
||||||
Because net/http API limits many optimization opportunities. See the answer
|
|
||||||
above for more details. Also certain net/http API parts are suboptimal
|
|
||||||
for use:
|
|
||||||
* Compare [net/http connection hijacking](https://golang.org/pkg/net/http/#Hijacker)
|
|
||||||
to [fasthttp connection hijacking](https://godoc.org/github.com/valyala/fasthttp#RequestCtx.Hijack).
|
|
||||||
* Compare [net/http Request.Body reading](https://golang.org/pkg/net/http/#Request)
|
|
||||||
to [fasthttp request body reading](https://godoc.org/github.com/valyala/fasthttp#RequestCtx.PostBody).
|
|
||||||
|
|
||||||
* *Why fasthttp doesn't support HTTP/2.0 and WebSockets?*
|
|
||||||
|
|
||||||
There are [plans](TODO) for adding HTTP/2.0 and WebSockets support
|
|
||||||
in the future.
|
|
||||||
In the mean time, third parties may use [RequestCtx.Hijack](https://godoc.org/github.com/valyala/fasthttp#RequestCtx.Hijack)
|
|
||||||
for implementing these goodies. See [the first third-party websocket implementation on the top of fasthttp](https://github.com/leavengood/websocket).
|
|
||||||
|
|
||||||
* *Are there known net/http advantages comparing to fasthttp?*
|
|
||||||
|
|
||||||
Yes:
|
|
||||||
* net/http supports [HTTP/2.0 starting from go1.6](https://http2.golang.org/).
|
|
||||||
* net/http API is stable, while fasthttp API constantly evolves.
|
|
||||||
* net/http handles more HTTP corner cases.
|
|
||||||
* net/http should contain less bugs, since it is used and tested by much
|
|
||||||
wider audience.
|
|
||||||
* net/http works on Go older than 1.5.
|
|
||||||
|
|
||||||
* *Why fasthttp API prefers returning `[]byte` instead of `string`?*
|
|
||||||
|
|
||||||
Because `[]byte` to `string` conversion isn't free - it requires memory
|
|
||||||
allocation and copy. Feel free wrapping returned `[]byte` result into
|
|
||||||
`string()` if you prefer working with strings instead of byte slices.
|
|
||||||
But be aware that this has non-zero overhead.
|
|
||||||
|
|
||||||
* *Which GO versions are supported by fasthttp?*
|
|
||||||
|
|
||||||
Go1.5+. Older versions won't be supported, since their standard package
|
|
||||||
[miss useful functions](https://github.com/valyala/fasthttp/issues/5).
|
|
||||||
|
|
||||||
* *Please provide real benchmark data and sever information*
|
|
||||||
|
|
||||||
See [this issue](https://github.com/valyala/fasthttp/issues/4).
|
|
||||||
|
|
||||||
* *Are there plans to add request routing to fasthttp?*
|
|
||||||
|
|
||||||
There are no plans to add request routing into fasthttp.
|
|
||||||
Use third-party routers and web frameworks with fasthttp support:
|
|
||||||
|
|
||||||
* [Iris](https://github.com/kataras/iris)
|
|
||||||
* [fasthttp-routing](https://github.com/qiangxue/fasthttp-routing)
|
|
||||||
* [fasthttprouter](https://github.com/buaazp/fasthttprouter)
|
|
||||||
* [gramework](https://github.com/gramework/gramework)
|
|
||||||
* [lu](https://github.com/vincentLiuxiang/lu)
|
|
||||||
|
|
||||||
See also [this issue](https://github.com/valyala/fasthttp/issues/9) for more info.
|
|
||||||
|
|
||||||
* *I detected data race in fasthttp!*
|
|
||||||
|
|
||||||
Cool! [File a bug](https://github.com/valyala/fasthttp/issues/new). But before
|
|
||||||
doing this check the following in your code:
|
|
||||||
|
|
||||||
* Make sure there are no references to [RequestCtx](https://godoc.org/github.com/valyala/fasthttp#RequestCtx)
|
|
||||||
or to its' members after returning from [RequestHandler](https://godoc.org/github.com/valyala/fasthttp#RequestHandler).
|
|
||||||
* Make sure you call [TimeoutError](https://godoc.org/github.com/valyala/fasthttp#RequestCtx.TimeoutError)
|
|
||||||
before returning from [RequestHandler](https://godoc.org/github.com/valyala/fasthttp#RequestHandler)
|
|
||||||
if there are references to [RequestCtx](https://godoc.org/github.com/valyala/fasthttp#RequestCtx)
|
|
||||||
or to its' members, which may be accessed by other goroutines.
|
|
||||||
|
|
||||||
* *I didn't find an answer for my question here*
|
|
||||||
|
|
||||||
Try exploring [these questions](https://github.com/valyala/fasthttp/issues?q=label%3Aquestion).
|
|
||||||
4
vendor/github.com/valyala/fasthttp/TODO
generated
vendored
4
vendor/github.com/valyala/fasthttp/TODO
generated
vendored
@@ -1,4 +0,0 @@
|
|||||||
- SessionClient with referer and cookies support.
|
|
||||||
- ProxyHandler similar to FSHandler.
|
|
||||||
- WebSockets. See https://tools.ietf.org/html/rfc6455 .
|
|
||||||
- HTTP/2.0. See https://tools.ietf.org/html/rfc7540 .
|
|
||||||
517
vendor/github.com/valyala/fasthttp/args.go
generated
vendored
517
vendor/github.com/valyala/fasthttp/args.go
generated
vendored
@@ -1,517 +0,0 @@
|
|||||||
package fasthttp
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"errors"
|
|
||||||
"io"
|
|
||||||
"sync"
|
|
||||||
)
|
|
||||||
|
|
||||||
// AcquireArgs returns an empty Args object from the pool.
|
|
||||||
//
|
|
||||||
// The returned Args may be returned to the pool with ReleaseArgs
|
|
||||||
// when no longer needed. This allows reducing GC load.
|
|
||||||
func AcquireArgs() *Args {
|
|
||||||
return argsPool.Get().(*Args)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ReleaseArgs returns the object acquired via AquireArgs to the pool.
|
|
||||||
//
|
|
||||||
// Do not access the released Args object, otherwise data races may occur.
|
|
||||||
func ReleaseArgs(a *Args) {
|
|
||||||
a.Reset()
|
|
||||||
argsPool.Put(a)
|
|
||||||
}
|
|
||||||
|
|
||||||
var argsPool = &sync.Pool{
|
|
||||||
New: func() interface{} {
|
|
||||||
return &Args{}
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
// Args represents query arguments.
|
|
||||||
//
|
|
||||||
// It is forbidden copying Args instances. Create new instances instead
|
|
||||||
// and use CopyTo().
|
|
||||||
//
|
|
||||||
// Args instance MUST NOT be used from concurrently running goroutines.
|
|
||||||
type Args struct {
|
|
||||||
noCopy noCopy
|
|
||||||
|
|
||||||
args []argsKV
|
|
||||||
buf []byte
|
|
||||||
}
|
|
||||||
|
|
||||||
type argsKV struct {
|
|
||||||
key []byte
|
|
||||||
value []byte
|
|
||||||
}
|
|
||||||
|
|
||||||
// Reset clears query args.
|
|
||||||
func (a *Args) Reset() {
|
|
||||||
a.args = a.args[:0]
|
|
||||||
}
|
|
||||||
|
|
||||||
// CopyTo copies all args to dst.
|
|
||||||
func (a *Args) CopyTo(dst *Args) {
|
|
||||||
dst.Reset()
|
|
||||||
dst.args = copyArgs(dst.args, a.args)
|
|
||||||
}
|
|
||||||
|
|
||||||
// VisitAll calls f for each existing arg.
|
|
||||||
//
|
|
||||||
// f must not retain references to key and value after returning.
|
|
||||||
// Make key and/or value copies if you need storing them after returning.
|
|
||||||
func (a *Args) VisitAll(f func(key, value []byte)) {
|
|
||||||
visitArgs(a.args, f)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Len returns the number of query args.
|
|
||||||
func (a *Args) Len() int {
|
|
||||||
return len(a.args)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Parse parses the given string containing query args.
|
|
||||||
func (a *Args) Parse(s string) {
|
|
||||||
a.buf = append(a.buf[:0], s...)
|
|
||||||
a.ParseBytes(a.buf)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ParseBytes parses the given b containing query args.
|
|
||||||
func (a *Args) ParseBytes(b []byte) {
|
|
||||||
a.Reset()
|
|
||||||
|
|
||||||
var s argsScanner
|
|
||||||
s.b = b
|
|
||||||
|
|
||||||
var kv *argsKV
|
|
||||||
a.args, kv = allocArg(a.args)
|
|
||||||
for s.next(kv) {
|
|
||||||
if len(kv.key) > 0 || len(kv.value) > 0 {
|
|
||||||
a.args, kv = allocArg(a.args)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
a.args = releaseArg(a.args)
|
|
||||||
}
|
|
||||||
|
|
||||||
// String returns string representation of query args.
|
|
||||||
func (a *Args) String() string {
|
|
||||||
return string(a.QueryString())
|
|
||||||
}
|
|
||||||
|
|
||||||
// QueryString returns query string for the args.
|
|
||||||
//
|
|
||||||
// The returned value is valid until the next call to Args methods.
|
|
||||||
func (a *Args) QueryString() []byte {
|
|
||||||
a.buf = a.AppendBytes(a.buf[:0])
|
|
||||||
return a.buf
|
|
||||||
}
|
|
||||||
|
|
||||||
// AppendBytes appends query string to dst and returns the extended dst.
|
|
||||||
func (a *Args) AppendBytes(dst []byte) []byte {
|
|
||||||
for i, n := 0, len(a.args); i < n; i++ {
|
|
||||||
kv := &a.args[i]
|
|
||||||
dst = AppendQuotedArg(dst, kv.key)
|
|
||||||
if len(kv.value) > 0 {
|
|
||||||
dst = append(dst, '=')
|
|
||||||
dst = AppendQuotedArg(dst, kv.value)
|
|
||||||
}
|
|
||||||
if i+1 < n {
|
|
||||||
dst = append(dst, '&')
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return dst
|
|
||||||
}
|
|
||||||
|
|
||||||
// WriteTo writes query string to w.
|
|
||||||
//
|
|
||||||
// WriteTo implements io.WriterTo interface.
|
|
||||||
func (a *Args) WriteTo(w io.Writer) (int64, error) {
|
|
||||||
n, err := w.Write(a.QueryString())
|
|
||||||
return int64(n), err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Del deletes argument with the given key from query args.
|
|
||||||
func (a *Args) Del(key string) {
|
|
||||||
a.args = delAllArgs(a.args, key)
|
|
||||||
}
|
|
||||||
|
|
||||||
// DelBytes deletes argument with the given key from query args.
|
|
||||||
func (a *Args) DelBytes(key []byte) {
|
|
||||||
a.args = delAllArgs(a.args, b2s(key))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add adds 'key=value' argument.
|
|
||||||
//
|
|
||||||
// Multiple values for the same key may be added.
|
|
||||||
func (a *Args) Add(key, value string) {
|
|
||||||
a.args = appendArg(a.args, key, value)
|
|
||||||
}
|
|
||||||
|
|
||||||
// AddBytesK adds 'key=value' argument.
|
|
||||||
//
|
|
||||||
// Multiple values for the same key may be added.
|
|
||||||
func (a *Args) AddBytesK(key []byte, value string) {
|
|
||||||
a.args = appendArg(a.args, b2s(key), value)
|
|
||||||
}
|
|
||||||
|
|
||||||
// AddBytesV adds 'key=value' argument.
|
|
||||||
//
|
|
||||||
// Multiple values for the same key may be added.
|
|
||||||
func (a *Args) AddBytesV(key string, value []byte) {
|
|
||||||
a.args = appendArg(a.args, key, b2s(value))
|
|
||||||
}
|
|
||||||
|
|
||||||
// AddBytesKV adds 'key=value' argument.
|
|
||||||
//
|
|
||||||
// Multiple values for the same key may be added.
|
|
||||||
func (a *Args) AddBytesKV(key, value []byte) {
|
|
||||||
a.args = appendArg(a.args, b2s(key), b2s(value))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set sets 'key=value' argument.
|
|
||||||
func (a *Args) Set(key, value string) {
|
|
||||||
a.args = setArg(a.args, key, value)
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetBytesK sets 'key=value' argument.
|
|
||||||
func (a *Args) SetBytesK(key []byte, value string) {
|
|
||||||
a.args = setArg(a.args, b2s(key), value)
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetBytesV sets 'key=value' argument.
|
|
||||||
func (a *Args) SetBytesV(key string, value []byte) {
|
|
||||||
a.args = setArg(a.args, key, b2s(value))
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetBytesKV sets 'key=value' argument.
|
|
||||||
func (a *Args) SetBytesKV(key, value []byte) {
|
|
||||||
a.args = setArgBytes(a.args, key, value)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Peek returns query arg value for the given key.
|
|
||||||
//
|
|
||||||
// Returned value is valid until the next Args call.
|
|
||||||
func (a *Args) Peek(key string) []byte {
|
|
||||||
return peekArgStr(a.args, key)
|
|
||||||
}
|
|
||||||
|
|
||||||
// PeekBytes returns query arg value for the given key.
|
|
||||||
//
|
|
||||||
// Returned value is valid until the next Args call.
|
|
||||||
func (a *Args) PeekBytes(key []byte) []byte {
|
|
||||||
return peekArgBytes(a.args, key)
|
|
||||||
}
|
|
||||||
|
|
||||||
// PeekMulti returns all the arg values for the given key.
|
|
||||||
func (a *Args) PeekMulti(key string) [][]byte {
|
|
||||||
var values [][]byte
|
|
||||||
a.VisitAll(func(k, v []byte) {
|
|
||||||
if string(k) == key {
|
|
||||||
values = append(values, v)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
return values
|
|
||||||
}
|
|
||||||
|
|
||||||
// PeekMultiBytes returns all the arg values for the given key.
|
|
||||||
func (a *Args) PeekMultiBytes(key []byte) [][]byte {
|
|
||||||
return a.PeekMulti(b2s(key))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Has returns true if the given key exists in Args.
|
|
||||||
func (a *Args) Has(key string) bool {
|
|
||||||
return hasArg(a.args, key)
|
|
||||||
}
|
|
||||||
|
|
||||||
// HasBytes returns true if the given key exists in Args.
|
|
||||||
func (a *Args) HasBytes(key []byte) bool {
|
|
||||||
return hasArg(a.args, b2s(key))
|
|
||||||
}
|
|
||||||
|
|
||||||
// ErrNoArgValue is returned when Args value with the given key is missing.
|
|
||||||
var ErrNoArgValue = errors.New("no Args value for the given key")
|
|
||||||
|
|
||||||
// GetUint returns uint value for the given key.
|
|
||||||
func (a *Args) GetUint(key string) (int, error) {
|
|
||||||
value := a.Peek(key)
|
|
||||||
if len(value) == 0 {
|
|
||||||
return -1, ErrNoArgValue
|
|
||||||
}
|
|
||||||
return ParseUint(value)
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetUint sets uint value for the given key.
|
|
||||||
func (a *Args) SetUint(key string, value int) {
|
|
||||||
bb := AcquireByteBuffer()
|
|
||||||
bb.B = AppendUint(bb.B[:0], value)
|
|
||||||
a.SetBytesV(key, bb.B)
|
|
||||||
ReleaseByteBuffer(bb)
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetUintBytes sets uint value for the given key.
|
|
||||||
func (a *Args) SetUintBytes(key []byte, value int) {
|
|
||||||
a.SetUint(b2s(key), value)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetUintOrZero returns uint value for the given key.
|
|
||||||
//
|
|
||||||
// Zero (0) is returned on error.
|
|
||||||
func (a *Args) GetUintOrZero(key string) int {
|
|
||||||
n, err := a.GetUint(key)
|
|
||||||
if err != nil {
|
|
||||||
n = 0
|
|
||||||
}
|
|
||||||
return n
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetUfloat returns ufloat value for the given key.
|
|
||||||
func (a *Args) GetUfloat(key string) (float64, error) {
|
|
||||||
value := a.Peek(key)
|
|
||||||
if len(value) == 0 {
|
|
||||||
return -1, ErrNoArgValue
|
|
||||||
}
|
|
||||||
return ParseUfloat(value)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetUfloatOrZero returns ufloat value for the given key.
|
|
||||||
//
|
|
||||||
// Zero (0) is returned on error.
|
|
||||||
func (a *Args) GetUfloatOrZero(key string) float64 {
|
|
||||||
f, err := a.GetUfloat(key)
|
|
||||||
if err != nil {
|
|
||||||
f = 0
|
|
||||||
}
|
|
||||||
return f
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetBool returns boolean value for the given key.
|
|
||||||
//
|
|
||||||
// true is returned for '1', 'y' and 'yes' values,
|
|
||||||
// otherwise false is returned.
|
|
||||||
func (a *Args) GetBool(key string) bool {
|
|
||||||
switch string(a.Peek(key)) {
|
|
||||||
case "1", "y", "yes":
|
|
||||||
return true
|
|
||||||
default:
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func visitArgs(args []argsKV, f func(k, v []byte)) {
|
|
||||||
for i, n := 0, len(args); i < n; i++ {
|
|
||||||
kv := &args[i]
|
|
||||||
f(kv.key, kv.value)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func copyArgs(dst, src []argsKV) []argsKV {
|
|
||||||
if cap(dst) < len(src) {
|
|
||||||
tmp := make([]argsKV, len(src))
|
|
||||||
copy(tmp, dst)
|
|
||||||
dst = tmp
|
|
||||||
}
|
|
||||||
n := len(src)
|
|
||||||
dst = dst[:n]
|
|
||||||
for i := 0; i < n; i++ {
|
|
||||||
dstKV := &dst[i]
|
|
||||||
srcKV := &src[i]
|
|
||||||
dstKV.key = append(dstKV.key[:0], srcKV.key...)
|
|
||||||
dstKV.value = append(dstKV.value[:0], srcKV.value...)
|
|
||||||
}
|
|
||||||
return dst
|
|
||||||
}
|
|
||||||
|
|
||||||
func delAllArgsBytes(args []argsKV, key []byte) []argsKV {
|
|
||||||
return delAllArgs(args, b2s(key))
|
|
||||||
}
|
|
||||||
|
|
||||||
func delAllArgs(args []argsKV, key string) []argsKV {
|
|
||||||
for i, n := 0, len(args); i < n; i++ {
|
|
||||||
kv := &args[i]
|
|
||||||
if key == string(kv.key) {
|
|
||||||
tmp := *kv
|
|
||||||
copy(args[i:], args[i+1:])
|
|
||||||
n--
|
|
||||||
args[n] = tmp
|
|
||||||
args = args[:n]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return args
|
|
||||||
}
|
|
||||||
|
|
||||||
func setArgBytes(h []argsKV, key, value []byte) []argsKV {
|
|
||||||
return setArg(h, b2s(key), b2s(value))
|
|
||||||
}
|
|
||||||
|
|
||||||
func setArg(h []argsKV, key, value string) []argsKV {
|
|
||||||
n := len(h)
|
|
||||||
for i := 0; i < n; i++ {
|
|
||||||
kv := &h[i]
|
|
||||||
if key == string(kv.key) {
|
|
||||||
kv.value = append(kv.value[:0], value...)
|
|
||||||
return h
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return appendArg(h, key, value)
|
|
||||||
}
|
|
||||||
|
|
||||||
func appendArgBytes(h []argsKV, key, value []byte) []argsKV {
|
|
||||||
return appendArg(h, b2s(key), b2s(value))
|
|
||||||
}
|
|
||||||
|
|
||||||
func appendArg(args []argsKV, key, value string) []argsKV {
|
|
||||||
var kv *argsKV
|
|
||||||
args, kv = allocArg(args)
|
|
||||||
kv.key = append(kv.key[:0], key...)
|
|
||||||
kv.value = append(kv.value[:0], value...)
|
|
||||||
return args
|
|
||||||
}
|
|
||||||
|
|
||||||
func allocArg(h []argsKV) ([]argsKV, *argsKV) {
|
|
||||||
n := len(h)
|
|
||||||
if cap(h) > n {
|
|
||||||
h = h[:n+1]
|
|
||||||
} else {
|
|
||||||
h = append(h, argsKV{})
|
|
||||||
}
|
|
||||||
return h, &h[n]
|
|
||||||
}
|
|
||||||
|
|
||||||
func releaseArg(h []argsKV) []argsKV {
|
|
||||||
return h[:len(h)-1]
|
|
||||||
}
|
|
||||||
|
|
||||||
func hasArg(h []argsKV, key string) bool {
|
|
||||||
for i, n := 0, len(h); i < n; i++ {
|
|
||||||
kv := &h[i]
|
|
||||||
if key == string(kv.key) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func peekArgBytes(h []argsKV, k []byte) []byte {
|
|
||||||
for i, n := 0, len(h); i < n; i++ {
|
|
||||||
kv := &h[i]
|
|
||||||
if bytes.Equal(kv.key, k) {
|
|
||||||
return kv.value
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func peekArgStr(h []argsKV, k string) []byte {
|
|
||||||
for i, n := 0, len(h); i < n; i++ {
|
|
||||||
kv := &h[i]
|
|
||||||
if string(kv.key) == k {
|
|
||||||
return kv.value
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type argsScanner struct {
|
|
||||||
b []byte
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *argsScanner) next(kv *argsKV) bool {
|
|
||||||
if len(s.b) == 0 {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
isKey := true
|
|
||||||
k := 0
|
|
||||||
for i, c := range s.b {
|
|
||||||
switch c {
|
|
||||||
case '=':
|
|
||||||
if isKey {
|
|
||||||
isKey = false
|
|
||||||
kv.key = decodeArgAppend(kv.key[:0], s.b[:i])
|
|
||||||
k = i + 1
|
|
||||||
}
|
|
||||||
case '&':
|
|
||||||
if isKey {
|
|
||||||
kv.key = decodeArgAppend(kv.key[:0], s.b[:i])
|
|
||||||
kv.value = kv.value[:0]
|
|
||||||
} else {
|
|
||||||
kv.value = decodeArgAppend(kv.value[:0], s.b[k:i])
|
|
||||||
}
|
|
||||||
s.b = s.b[i+1:]
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if isKey {
|
|
||||||
kv.key = decodeArgAppend(kv.key[:0], s.b)
|
|
||||||
kv.value = kv.value[:0]
|
|
||||||
} else {
|
|
||||||
kv.value = decodeArgAppend(kv.value[:0], s.b[k:])
|
|
||||||
}
|
|
||||||
s.b = s.b[len(s.b):]
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
func decodeArgAppend(dst, src []byte) []byte {
|
|
||||||
if bytes.IndexByte(src, '%') < 0 && bytes.IndexByte(src, '+') < 0 {
|
|
||||||
// fast path: src doesn't contain encoded chars
|
|
||||||
return append(dst, src...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// slow path
|
|
||||||
for i := 0; i < len(src); i++ {
|
|
||||||
c := src[i]
|
|
||||||
if c == '%' {
|
|
||||||
if i+2 >= len(src) {
|
|
||||||
return append(dst, src[i:]...)
|
|
||||||
}
|
|
||||||
x2 := hex2intTable[src[i+2]]
|
|
||||||
x1 := hex2intTable[src[i+1]]
|
|
||||||
if x1 == 16 || x2 == 16 {
|
|
||||||
dst = append(dst, '%')
|
|
||||||
} else {
|
|
||||||
dst = append(dst, x1<<4|x2)
|
|
||||||
i += 2
|
|
||||||
}
|
|
||||||
} else if c == '+' {
|
|
||||||
dst = append(dst, ' ')
|
|
||||||
} else {
|
|
||||||
dst = append(dst, c)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return dst
|
|
||||||
}
|
|
||||||
|
|
||||||
// decodeArgAppendNoPlus is almost identical to decodeArgAppend, but it doesn't
|
|
||||||
// substitute '+' with ' '.
|
|
||||||
//
|
|
||||||
// The function is copy-pasted from decodeArgAppend due to the preformance
|
|
||||||
// reasons only.
|
|
||||||
func decodeArgAppendNoPlus(dst, src []byte) []byte {
|
|
||||||
if bytes.IndexByte(src, '%') < 0 {
|
|
||||||
// fast path: src doesn't contain encoded chars
|
|
||||||
return append(dst, src...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// slow path
|
|
||||||
for i := 0; i < len(src); i++ {
|
|
||||||
c := src[i]
|
|
||||||
if c == '%' {
|
|
||||||
if i+2 >= len(src) {
|
|
||||||
return append(dst, src[i:]...)
|
|
||||||
}
|
|
||||||
x2 := hex2intTable[src[i+2]]
|
|
||||||
x1 := hex2intTable[src[i+1]]
|
|
||||||
if x1 == 16 || x2 == 16 {
|
|
||||||
dst = append(dst, '%')
|
|
||||||
} else {
|
|
||||||
dst = append(dst, x1<<4|x2)
|
|
||||||
i += 2
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
dst = append(dst, c)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return dst
|
|
||||||
}
|
|
||||||
64
vendor/github.com/valyala/fasthttp/bytebuffer.go
generated
vendored
64
vendor/github.com/valyala/fasthttp/bytebuffer.go
generated
vendored
@@ -1,64 +0,0 @@
|
|||||||
package fasthttp
|
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/valyala/bytebufferpool"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ByteBuffer provides byte buffer, which can be used with fasthttp API
|
|
||||||
// in order to minimize memory allocations.
|
|
||||||
//
|
|
||||||
// ByteBuffer may be used with functions appending data to the given []byte
|
|
||||||
// slice. See example code for details.
|
|
||||||
//
|
|
||||||
// Use AcquireByteBuffer for obtaining an empty byte buffer.
|
|
||||||
//
|
|
||||||
// ByteBuffer is deprecated. Use github.com/valyala/bytebufferpool instead.
|
|
||||||
type ByteBuffer bytebufferpool.ByteBuffer
|
|
||||||
|
|
||||||
// Write implements io.Writer - it appends p to ByteBuffer.B
|
|
||||||
func (b *ByteBuffer) Write(p []byte) (int, error) {
|
|
||||||
return bb(b).Write(p)
|
|
||||||
}
|
|
||||||
|
|
||||||
// WriteString appends s to ByteBuffer.B
|
|
||||||
func (b *ByteBuffer) WriteString(s string) (int, error) {
|
|
||||||
return bb(b).WriteString(s)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set sets ByteBuffer.B to p
|
|
||||||
func (b *ByteBuffer) Set(p []byte) {
|
|
||||||
bb(b).Set(p)
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetString sets ByteBuffer.B to s
|
|
||||||
func (b *ByteBuffer) SetString(s string) {
|
|
||||||
bb(b).SetString(s)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Reset makes ByteBuffer.B empty.
|
|
||||||
func (b *ByteBuffer) Reset() {
|
|
||||||
bb(b).Reset()
|
|
||||||
}
|
|
||||||
|
|
||||||
// AcquireByteBuffer returns an empty byte buffer from the pool.
|
|
||||||
//
|
|
||||||
// Acquired byte buffer may be returned to the pool via ReleaseByteBuffer call.
|
|
||||||
// This reduces the number of memory allocations required for byte buffer
|
|
||||||
// management.
|
|
||||||
func AcquireByteBuffer() *ByteBuffer {
|
|
||||||
return (*ByteBuffer)(defaultByteBufferPool.Get())
|
|
||||||
}
|
|
||||||
|
|
||||||
// ReleaseByteBuffer returns byte buffer to the pool.
|
|
||||||
//
|
|
||||||
// ByteBuffer.B mustn't be touched after returning it to the pool.
|
|
||||||
// Otherwise data races occur.
|
|
||||||
func ReleaseByteBuffer(b *ByteBuffer) {
|
|
||||||
defaultByteBufferPool.Put(bb(b))
|
|
||||||
}
|
|
||||||
|
|
||||||
func bb(b *ByteBuffer) *bytebufferpool.ByteBuffer {
|
|
||||||
return (*bytebufferpool.ByteBuffer)(b)
|
|
||||||
}
|
|
||||||
|
|
||||||
var defaultByteBufferPool bytebufferpool.Pool
|
|
||||||
447
vendor/github.com/valyala/fasthttp/bytesconv.go
generated
vendored
447
vendor/github.com/valyala/fasthttp/bytesconv.go
generated
vendored
@@ -1,447 +0,0 @@
|
|||||||
package fasthttp
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bufio"
|
|
||||||
"bytes"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"math"
|
|
||||||
"net"
|
|
||||||
"reflect"
|
|
||||||
"strings"
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
"unsafe"
|
|
||||||
)
|
|
||||||
|
|
||||||
// AppendHTMLEscape appends html-escaped s to dst and returns the extended dst.
|
|
||||||
func AppendHTMLEscape(dst []byte, s string) []byte {
|
|
||||||
if strings.IndexByte(s, '<') < 0 &&
|
|
||||||
strings.IndexByte(s, '>') < 0 &&
|
|
||||||
strings.IndexByte(s, '"') < 0 &&
|
|
||||||
strings.IndexByte(s, '\'') < 0 {
|
|
||||||
|
|
||||||
// fast path - nothing to escape
|
|
||||||
return append(dst, s...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// slow path
|
|
||||||
var prev int
|
|
||||||
var sub string
|
|
||||||
for i, n := 0, len(s); i < n; i++ {
|
|
||||||
sub = ""
|
|
||||||
switch s[i] {
|
|
||||||
case '<':
|
|
||||||
sub = "<"
|
|
||||||
case '>':
|
|
||||||
sub = ">"
|
|
||||||
case '"':
|
|
||||||
sub = """
|
|
||||||
case '\'':
|
|
||||||
sub = "'"
|
|
||||||
}
|
|
||||||
if len(sub) > 0 {
|
|
||||||
dst = append(dst, s[prev:i]...)
|
|
||||||
dst = append(dst, sub...)
|
|
||||||
prev = i + 1
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return append(dst, s[prev:]...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// AppendHTMLEscapeBytes appends html-escaped s to dst and returns
|
|
||||||
// the extended dst.
|
|
||||||
func AppendHTMLEscapeBytes(dst, s []byte) []byte {
|
|
||||||
return AppendHTMLEscape(dst, b2s(s))
|
|
||||||
}
|
|
||||||
|
|
||||||
// AppendIPv4 appends string representation of the given ip v4 to dst
|
|
||||||
// and returns the extended dst.
|
|
||||||
func AppendIPv4(dst []byte, ip net.IP) []byte {
|
|
||||||
ip = ip.To4()
|
|
||||||
if ip == nil {
|
|
||||||
return append(dst, "non-v4 ip passed to AppendIPv4"...)
|
|
||||||
}
|
|
||||||
|
|
||||||
dst = AppendUint(dst, int(ip[0]))
|
|
||||||
for i := 1; i < 4; i++ {
|
|
||||||
dst = append(dst, '.')
|
|
||||||
dst = AppendUint(dst, int(ip[i]))
|
|
||||||
}
|
|
||||||
return dst
|
|
||||||
}
|
|
||||||
|
|
||||||
var errEmptyIPStr = errors.New("empty ip address string")
|
|
||||||
|
|
||||||
// ParseIPv4 parses ip address from ipStr into dst and returns the extended dst.
|
|
||||||
func ParseIPv4(dst net.IP, ipStr []byte) (net.IP, error) {
|
|
||||||
if len(ipStr) == 0 {
|
|
||||||
return dst, errEmptyIPStr
|
|
||||||
}
|
|
||||||
if len(dst) < net.IPv4len {
|
|
||||||
dst = make([]byte, net.IPv4len)
|
|
||||||
}
|
|
||||||
copy(dst, net.IPv4zero)
|
|
||||||
dst = dst.To4()
|
|
||||||
if dst == nil {
|
|
||||||
panic("BUG: dst must not be nil")
|
|
||||||
}
|
|
||||||
|
|
||||||
b := ipStr
|
|
||||||
for i := 0; i < 3; i++ {
|
|
||||||
n := bytes.IndexByte(b, '.')
|
|
||||||
if n < 0 {
|
|
||||||
return dst, fmt.Errorf("cannot find dot in ipStr %q", ipStr)
|
|
||||||
}
|
|
||||||
v, err := ParseUint(b[:n])
|
|
||||||
if err != nil {
|
|
||||||
return dst, fmt.Errorf("cannot parse ipStr %q: %s", ipStr, err)
|
|
||||||
}
|
|
||||||
if v > 255 {
|
|
||||||
return dst, fmt.Errorf("cannot parse ipStr %q: ip part cannot exceed 255: parsed %d", ipStr, v)
|
|
||||||
}
|
|
||||||
dst[i] = byte(v)
|
|
||||||
b = b[n+1:]
|
|
||||||
}
|
|
||||||
v, err := ParseUint(b)
|
|
||||||
if err != nil {
|
|
||||||
return dst, fmt.Errorf("cannot parse ipStr %q: %s", ipStr, err)
|
|
||||||
}
|
|
||||||
if v > 255 {
|
|
||||||
return dst, fmt.Errorf("cannot parse ipStr %q: ip part cannot exceed 255: parsed %d", ipStr, v)
|
|
||||||
}
|
|
||||||
dst[3] = byte(v)
|
|
||||||
|
|
||||||
return dst, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// AppendHTTPDate appends HTTP-compliant (RFC1123) representation of date
|
|
||||||
// to dst and returns the extended dst.
|
|
||||||
func AppendHTTPDate(dst []byte, date time.Time) []byte {
|
|
||||||
dst = date.In(time.UTC).AppendFormat(dst, time.RFC1123)
|
|
||||||
copy(dst[len(dst)-3:], strGMT)
|
|
||||||
return dst
|
|
||||||
}
|
|
||||||
|
|
||||||
// ParseHTTPDate parses HTTP-compliant (RFC1123) date.
|
|
||||||
func ParseHTTPDate(date []byte) (time.Time, error) {
|
|
||||||
return time.Parse(time.RFC1123, b2s(date))
|
|
||||||
}
|
|
||||||
|
|
||||||
// AppendUint appends n to dst and returns the extended dst.
|
|
||||||
func AppendUint(dst []byte, n int) []byte {
|
|
||||||
if n < 0 {
|
|
||||||
panic("BUG: int must be positive")
|
|
||||||
}
|
|
||||||
|
|
||||||
var b [20]byte
|
|
||||||
buf := b[:]
|
|
||||||
i := len(buf)
|
|
||||||
var q int
|
|
||||||
for n >= 10 {
|
|
||||||
i--
|
|
||||||
q = n / 10
|
|
||||||
buf[i] = '0' + byte(n-q*10)
|
|
||||||
n = q
|
|
||||||
}
|
|
||||||
i--
|
|
||||||
buf[i] = '0' + byte(n)
|
|
||||||
|
|
||||||
dst = append(dst, buf[i:]...)
|
|
||||||
return dst
|
|
||||||
}
|
|
||||||
|
|
||||||
// ParseUint parses uint from buf.
|
|
||||||
func ParseUint(buf []byte) (int, error) {
|
|
||||||
v, n, err := parseUintBuf(buf)
|
|
||||||
if n != len(buf) {
|
|
||||||
return -1, errUnexpectedTrailingChar
|
|
||||||
}
|
|
||||||
return v, err
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
errEmptyInt = errors.New("empty integer")
|
|
||||||
errUnexpectedFirstChar = errors.New("unexpected first char found. Expecting 0-9")
|
|
||||||
errUnexpectedTrailingChar = errors.New("unexpected traling char found. Expecting 0-9")
|
|
||||||
errTooLongInt = errors.New("too long int")
|
|
||||||
)
|
|
||||||
|
|
||||||
func parseUintBuf(b []byte) (int, int, error) {
|
|
||||||
n := len(b)
|
|
||||||
if n == 0 {
|
|
||||||
return -1, 0, errEmptyInt
|
|
||||||
}
|
|
||||||
v := 0
|
|
||||||
for i := 0; i < n; i++ {
|
|
||||||
c := b[i]
|
|
||||||
k := c - '0'
|
|
||||||
if k > 9 {
|
|
||||||
if i == 0 {
|
|
||||||
return -1, i, errUnexpectedFirstChar
|
|
||||||
}
|
|
||||||
return v, i, nil
|
|
||||||
}
|
|
||||||
if i >= maxIntChars {
|
|
||||||
return -1, i, errTooLongInt
|
|
||||||
}
|
|
||||||
v = 10*v + int(k)
|
|
||||||
}
|
|
||||||
return v, n, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
errEmptyFloat = errors.New("empty float number")
|
|
||||||
errDuplicateFloatPoint = errors.New("duplicate point found in float number")
|
|
||||||
errUnexpectedFloatEnd = errors.New("unexpected end of float number")
|
|
||||||
errInvalidFloatExponent = errors.New("invalid float number exponent")
|
|
||||||
errUnexpectedFloatChar = errors.New("unexpected char found in float number")
|
|
||||||
)
|
|
||||||
|
|
||||||
// ParseUfloat parses unsigned float from buf.
|
|
||||||
func ParseUfloat(buf []byte) (float64, error) {
|
|
||||||
if len(buf) == 0 {
|
|
||||||
return -1, errEmptyFloat
|
|
||||||
}
|
|
||||||
b := buf
|
|
||||||
var v uint64
|
|
||||||
var offset = 1.0
|
|
||||||
var pointFound bool
|
|
||||||
for i, c := range b {
|
|
||||||
if c < '0' || c > '9' {
|
|
||||||
if c == '.' {
|
|
||||||
if pointFound {
|
|
||||||
return -1, errDuplicateFloatPoint
|
|
||||||
}
|
|
||||||
pointFound = true
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if c == 'e' || c == 'E' {
|
|
||||||
if i+1 >= len(b) {
|
|
||||||
return -1, errUnexpectedFloatEnd
|
|
||||||
}
|
|
||||||
b = b[i+1:]
|
|
||||||
minus := -1
|
|
||||||
switch b[0] {
|
|
||||||
case '+':
|
|
||||||
b = b[1:]
|
|
||||||
minus = 1
|
|
||||||
case '-':
|
|
||||||
b = b[1:]
|
|
||||||
default:
|
|
||||||
minus = 1
|
|
||||||
}
|
|
||||||
vv, err := ParseUint(b)
|
|
||||||
if err != nil {
|
|
||||||
return -1, errInvalidFloatExponent
|
|
||||||
}
|
|
||||||
return float64(v) * offset * math.Pow10(minus*int(vv)), nil
|
|
||||||
}
|
|
||||||
return -1, errUnexpectedFloatChar
|
|
||||||
}
|
|
||||||
v = 10*v + uint64(c-'0')
|
|
||||||
if pointFound {
|
|
||||||
offset /= 10
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return float64(v) * offset, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
errEmptyHexNum = errors.New("empty hex number")
|
|
||||||
errTooLargeHexNum = errors.New("too large hex number")
|
|
||||||
)
|
|
||||||
|
|
||||||
func readHexInt(r *bufio.Reader) (int, error) {
|
|
||||||
n := 0
|
|
||||||
i := 0
|
|
||||||
var k int
|
|
||||||
for {
|
|
||||||
c, err := r.ReadByte()
|
|
||||||
if err != nil {
|
|
||||||
if err == io.EOF && i > 0 {
|
|
||||||
return n, nil
|
|
||||||
}
|
|
||||||
return -1, err
|
|
||||||
}
|
|
||||||
k = int(hex2intTable[c])
|
|
||||||
if k == 16 {
|
|
||||||
if i == 0 {
|
|
||||||
return -1, errEmptyHexNum
|
|
||||||
}
|
|
||||||
r.UnreadByte()
|
|
||||||
return n, nil
|
|
||||||
}
|
|
||||||
if i >= maxHexIntChars {
|
|
||||||
return -1, errTooLargeHexNum
|
|
||||||
}
|
|
||||||
n = (n << 4) | k
|
|
||||||
i++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
var hexIntBufPool sync.Pool
|
|
||||||
|
|
||||||
func writeHexInt(w *bufio.Writer, n int) error {
|
|
||||||
if n < 0 {
|
|
||||||
panic("BUG: int must be positive")
|
|
||||||
}
|
|
||||||
|
|
||||||
v := hexIntBufPool.Get()
|
|
||||||
if v == nil {
|
|
||||||
v = make([]byte, maxHexIntChars+1)
|
|
||||||
}
|
|
||||||
buf := v.([]byte)
|
|
||||||
i := len(buf) - 1
|
|
||||||
for {
|
|
||||||
buf[i] = int2hexbyte(n & 0xf)
|
|
||||||
n >>= 4
|
|
||||||
if n == 0 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
i--
|
|
||||||
}
|
|
||||||
_, err := w.Write(buf[i:])
|
|
||||||
hexIntBufPool.Put(v)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
func int2hexbyte(n int) byte {
|
|
||||||
if n < 10 {
|
|
||||||
return '0' + byte(n)
|
|
||||||
}
|
|
||||||
return 'a' + byte(n) - 10
|
|
||||||
}
|
|
||||||
|
|
||||||
func hexCharUpper(c byte) byte {
|
|
||||||
if c < 10 {
|
|
||||||
return '0' + c
|
|
||||||
}
|
|
||||||
return c - 10 + 'A'
|
|
||||||
}
|
|
||||||
|
|
||||||
var hex2intTable = func() []byte {
|
|
||||||
b := make([]byte, 255)
|
|
||||||
for i := byte(0); i < 255; i++ {
|
|
||||||
c := byte(16)
|
|
||||||
if i >= '0' && i <= '9' {
|
|
||||||
c = i - '0'
|
|
||||||
} else if i >= 'a' && i <= 'f' {
|
|
||||||
c = i - 'a' + 10
|
|
||||||
} else if i >= 'A' && i <= 'F' {
|
|
||||||
c = i - 'A' + 10
|
|
||||||
}
|
|
||||||
b[i] = c
|
|
||||||
}
|
|
||||||
return b
|
|
||||||
}()
|
|
||||||
|
|
||||||
const toLower = 'a' - 'A'
|
|
||||||
|
|
||||||
var toLowerTable = func() [256]byte {
|
|
||||||
var a [256]byte
|
|
||||||
for i := 0; i < 256; i++ {
|
|
||||||
c := byte(i)
|
|
||||||
if c >= 'A' && c <= 'Z' {
|
|
||||||
c += toLower
|
|
||||||
}
|
|
||||||
a[i] = c
|
|
||||||
}
|
|
||||||
return a
|
|
||||||
}()
|
|
||||||
|
|
||||||
var toUpperTable = func() [256]byte {
|
|
||||||
var a [256]byte
|
|
||||||
for i := 0; i < 256; i++ {
|
|
||||||
c := byte(i)
|
|
||||||
if c >= 'a' && c <= 'z' {
|
|
||||||
c -= toLower
|
|
||||||
}
|
|
||||||
a[i] = c
|
|
||||||
}
|
|
||||||
return a
|
|
||||||
}()
|
|
||||||
|
|
||||||
func lowercaseBytes(b []byte) {
|
|
||||||
for i := 0; i < len(b); i++ {
|
|
||||||
p := &b[i]
|
|
||||||
*p = toLowerTable[*p]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// b2s converts byte slice to a string without memory allocation.
|
|
||||||
// See https://groups.google.com/forum/#!msg/Golang-Nuts/ENgbUzYvCuU/90yGx7GUAgAJ .
|
|
||||||
//
|
|
||||||
// Note it may break if string and/or slice header will change
|
|
||||||
// in the future go versions.
|
|
||||||
func b2s(b []byte) string {
|
|
||||||
return *(*string)(unsafe.Pointer(&b))
|
|
||||||
}
|
|
||||||
|
|
||||||
// s2b converts string to a byte slice without memory allocation.
|
|
||||||
//
|
|
||||||
// Note it may break if string and/or slice header will change
|
|
||||||
// in the future go versions.
|
|
||||||
func s2b(s string) []byte {
|
|
||||||
sh := (*reflect.StringHeader)(unsafe.Pointer(&s))
|
|
||||||
bh := reflect.SliceHeader{
|
|
||||||
Data: sh.Data,
|
|
||||||
Len: sh.Len,
|
|
||||||
Cap: sh.Len,
|
|
||||||
}
|
|
||||||
return *(*[]byte)(unsafe.Pointer(&bh))
|
|
||||||
}
|
|
||||||
|
|
||||||
// AppendUnquotedArg appends url-decoded src to dst and returns appended dst.
|
|
||||||
//
|
|
||||||
// dst may point to src. In this case src will be overwritten.
|
|
||||||
func AppendUnquotedArg(dst, src []byte) []byte {
|
|
||||||
return decodeArgAppend(dst, src)
|
|
||||||
}
|
|
||||||
|
|
||||||
// AppendQuotedArg appends url-encoded src to dst and returns appended dst.
|
|
||||||
func AppendQuotedArg(dst, src []byte) []byte {
|
|
||||||
for _, c := range src {
|
|
||||||
// See http://www.w3.org/TR/html5/forms.html#form-submission-algorithm
|
|
||||||
if c >= 'a' && c <= 'z' || c >= 'A' && c <= 'Z' || c >= '0' && c <= '9' ||
|
|
||||||
c == '*' || c == '-' || c == '.' || c == '_' {
|
|
||||||
dst = append(dst, c)
|
|
||||||
} else {
|
|
||||||
dst = append(dst, '%', hexCharUpper(c>>4), hexCharUpper(c&15))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return dst
|
|
||||||
}
|
|
||||||
|
|
||||||
func appendQuotedPath(dst, src []byte) []byte {
|
|
||||||
for _, c := range src {
|
|
||||||
if c >= 'a' && c <= 'z' || c >= 'A' && c <= 'Z' || c >= '0' && c <= '9' ||
|
|
||||||
c == '/' || c == '.' || c == ',' || c == '=' || c == ':' || c == '&' || c == '~' || c == '-' || c == '_' {
|
|
||||||
dst = append(dst, c)
|
|
||||||
} else {
|
|
||||||
dst = append(dst, '%', hexCharUpper(c>>4), hexCharUpper(c&15))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return dst
|
|
||||||
}
|
|
||||||
|
|
||||||
// EqualBytesStr returns true if string(b) == s.
|
|
||||||
//
|
|
||||||
// This function has no performance benefits comparing to string(b) == s.
|
|
||||||
// It is left here for backwards compatibility only.
|
|
||||||
//
|
|
||||||
// This function is deperecated and may be deleted soon.
|
|
||||||
func EqualBytesStr(b []byte, s string) bool {
|
|
||||||
return string(b) == s
|
|
||||||
}
|
|
||||||
|
|
||||||
// AppendBytesStr appends src to dst and returns the extended dst.
|
|
||||||
//
|
|
||||||
// This function has no performance benefits comparing to append(dst, src...).
|
|
||||||
// It is left here for backwards compatibility only.
|
|
||||||
//
|
|
||||||
// This function is deprecated and may be deleted soon.
|
|
||||||
func AppendBytesStr(dst []byte, src string) []byte {
|
|
||||||
return append(dst, src...)
|
|
||||||
}
|
|
||||||
8
vendor/github.com/valyala/fasthttp/bytesconv_32.go
generated
vendored
8
vendor/github.com/valyala/fasthttp/bytesconv_32.go
generated
vendored
@@ -1,8 +0,0 @@
|
|||||||
// +build !amd64,!arm64,!ppc64
|
|
||||||
|
|
||||||
package fasthttp
|
|
||||||
|
|
||||||
const (
|
|
||||||
maxIntChars = 9
|
|
||||||
maxHexIntChars = 7
|
|
||||||
)
|
|
||||||
8
vendor/github.com/valyala/fasthttp/bytesconv_64.go
generated
vendored
8
vendor/github.com/valyala/fasthttp/bytesconv_64.go
generated
vendored
@@ -1,8 +0,0 @@
|
|||||||
// +build amd64 arm64 ppc64
|
|
||||||
|
|
||||||
package fasthttp
|
|
||||||
|
|
||||||
const (
|
|
||||||
maxIntChars = 18
|
|
||||||
maxHexIntChars = 15
|
|
||||||
)
|
|
||||||
2163
vendor/github.com/valyala/fasthttp/client.go
generated
vendored
2163
vendor/github.com/valyala/fasthttp/client.go
generated
vendored
File diff suppressed because it is too large
Load Diff
28
vendor/github.com/valyala/fasthttp/coarseTime.go
generated
vendored
28
vendor/github.com/valyala/fasthttp/coarseTime.go
generated
vendored
@@ -1,28 +0,0 @@
|
|||||||
package fasthttp
|
|
||||||
|
|
||||||
import (
|
|
||||||
"sync/atomic"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
// CoarseTimeNow returns the current time truncated to the nearest second.
|
|
||||||
//
|
|
||||||
// This is a faster alternative to time.Now().
|
|
||||||
func CoarseTimeNow() time.Time {
|
|
||||||
tp := coarseTime.Load().(*time.Time)
|
|
||||||
return *tp
|
|
||||||
}
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
t := time.Now().Truncate(time.Second)
|
|
||||||
coarseTime.Store(&t)
|
|
||||||
go func() {
|
|
||||||
for {
|
|
||||||
time.Sleep(time.Second)
|
|
||||||
t := time.Now().Truncate(time.Second)
|
|
||||||
coarseTime.Store(&t)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
|
|
||||||
var coarseTime atomic.Value
|
|
||||||
440
vendor/github.com/valyala/fasthttp/compress.go
generated
vendored
440
vendor/github.com/valyala/fasthttp/compress.go
generated
vendored
@@ -1,440 +0,0 @@
|
|||||||
package fasthttp
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"os"
|
|
||||||
"sync"
|
|
||||||
|
|
||||||
"github.com/klauspost/compress/flate"
|
|
||||||
"github.com/klauspost/compress/gzip"
|
|
||||||
"github.com/klauspost/compress/zlib"
|
|
||||||
"github.com/valyala/bytebufferpool"
|
|
||||||
"github.com/valyala/fasthttp/stackless"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Supported compression levels.
|
|
||||||
const (
|
|
||||||
CompressNoCompression = flate.NoCompression
|
|
||||||
CompressBestSpeed = flate.BestSpeed
|
|
||||||
CompressBestCompression = flate.BestCompression
|
|
||||||
CompressDefaultCompression = 6 // flate.DefaultCompression
|
|
||||||
CompressHuffmanOnly = -2 // flate.HuffmanOnly
|
|
||||||
)
|
|
||||||
|
|
||||||
func acquireGzipReader(r io.Reader) (*gzip.Reader, error) {
|
|
||||||
v := gzipReaderPool.Get()
|
|
||||||
if v == nil {
|
|
||||||
return gzip.NewReader(r)
|
|
||||||
}
|
|
||||||
zr := v.(*gzip.Reader)
|
|
||||||
if err := zr.Reset(r); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return zr, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func releaseGzipReader(zr *gzip.Reader) {
|
|
||||||
zr.Close()
|
|
||||||
gzipReaderPool.Put(zr)
|
|
||||||
}
|
|
||||||
|
|
||||||
var gzipReaderPool sync.Pool
|
|
||||||
|
|
||||||
func acquireFlateReader(r io.Reader) (io.ReadCloser, error) {
|
|
||||||
v := flateReaderPool.Get()
|
|
||||||
if v == nil {
|
|
||||||
zr, err := zlib.NewReader(r)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return zr, nil
|
|
||||||
}
|
|
||||||
zr := v.(io.ReadCloser)
|
|
||||||
if err := resetFlateReader(zr, r); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return zr, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func releaseFlateReader(zr io.ReadCloser) {
|
|
||||||
zr.Close()
|
|
||||||
flateReaderPool.Put(zr)
|
|
||||||
}
|
|
||||||
|
|
||||||
func resetFlateReader(zr io.ReadCloser, r io.Reader) error {
|
|
||||||
zrr, ok := zr.(zlib.Resetter)
|
|
||||||
if !ok {
|
|
||||||
panic("BUG: zlib.Reader doesn't implement zlib.Resetter???")
|
|
||||||
}
|
|
||||||
return zrr.Reset(r, nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
var flateReaderPool sync.Pool
|
|
||||||
|
|
||||||
func acquireStacklessGzipWriter(w io.Writer, level int) stackless.Writer {
|
|
||||||
nLevel := normalizeCompressLevel(level)
|
|
||||||
p := stacklessGzipWriterPoolMap[nLevel]
|
|
||||||
v := p.Get()
|
|
||||||
if v == nil {
|
|
||||||
return stackless.NewWriter(w, func(w io.Writer) stackless.Writer {
|
|
||||||
return acquireRealGzipWriter(w, level)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
sw := v.(stackless.Writer)
|
|
||||||
sw.Reset(w)
|
|
||||||
return sw
|
|
||||||
}
|
|
||||||
|
|
||||||
func releaseStacklessGzipWriter(sw stackless.Writer, level int) {
|
|
||||||
sw.Close()
|
|
||||||
nLevel := normalizeCompressLevel(level)
|
|
||||||
p := stacklessGzipWriterPoolMap[nLevel]
|
|
||||||
p.Put(sw)
|
|
||||||
}
|
|
||||||
|
|
||||||
func acquireRealGzipWriter(w io.Writer, level int) *gzip.Writer {
|
|
||||||
nLevel := normalizeCompressLevel(level)
|
|
||||||
p := realGzipWriterPoolMap[nLevel]
|
|
||||||
v := p.Get()
|
|
||||||
if v == nil {
|
|
||||||
zw, err := gzip.NewWriterLevel(w, level)
|
|
||||||
if err != nil {
|
|
||||||
panic(fmt.Sprintf("BUG: unexpected error from gzip.NewWriterLevel(%d): %s", level, err))
|
|
||||||
}
|
|
||||||
return zw
|
|
||||||
}
|
|
||||||
zw := v.(*gzip.Writer)
|
|
||||||
zw.Reset(w)
|
|
||||||
return zw
|
|
||||||
}
|
|
||||||
|
|
||||||
func releaseRealGzipWriter(zw *gzip.Writer, level int) {
|
|
||||||
zw.Close()
|
|
||||||
nLevel := normalizeCompressLevel(level)
|
|
||||||
p := realGzipWriterPoolMap[nLevel]
|
|
||||||
p.Put(zw)
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
stacklessGzipWriterPoolMap = newCompressWriterPoolMap()
|
|
||||||
realGzipWriterPoolMap = newCompressWriterPoolMap()
|
|
||||||
)
|
|
||||||
|
|
||||||
// AppendGzipBytesLevel appends gzipped src to dst using the given
|
|
||||||
// compression level and returns the resulting dst.
|
|
||||||
//
|
|
||||||
// Supported compression levels are:
|
|
||||||
//
|
|
||||||
// * CompressNoCompression
|
|
||||||
// * CompressBestSpeed
|
|
||||||
// * CompressBestCompression
|
|
||||||
// * CompressDefaultCompression
|
|
||||||
// * CompressHuffmanOnly
|
|
||||||
func AppendGzipBytesLevel(dst, src []byte, level int) []byte {
|
|
||||||
w := &byteSliceWriter{dst}
|
|
||||||
WriteGzipLevel(w, src, level)
|
|
||||||
return w.b
|
|
||||||
}
|
|
||||||
|
|
||||||
// WriteGzipLevel writes gzipped p to w using the given compression level
|
|
||||||
// and returns the number of compressed bytes written to w.
|
|
||||||
//
|
|
||||||
// Supported compression levels are:
|
|
||||||
//
|
|
||||||
// * CompressNoCompression
|
|
||||||
// * CompressBestSpeed
|
|
||||||
// * CompressBestCompression
|
|
||||||
// * CompressDefaultCompression
|
|
||||||
// * CompressHuffmanOnly
|
|
||||||
func WriteGzipLevel(w io.Writer, p []byte, level int) (int, error) {
|
|
||||||
switch w.(type) {
|
|
||||||
case *byteSliceWriter,
|
|
||||||
*bytes.Buffer,
|
|
||||||
*ByteBuffer,
|
|
||||||
*bytebufferpool.ByteBuffer:
|
|
||||||
// These writers don't block, so we can just use stacklessWriteGzip
|
|
||||||
ctx := &compressCtx{
|
|
||||||
w: w,
|
|
||||||
p: p,
|
|
||||||
level: level,
|
|
||||||
}
|
|
||||||
stacklessWriteGzip(ctx)
|
|
||||||
return len(p), nil
|
|
||||||
default:
|
|
||||||
zw := acquireStacklessGzipWriter(w, level)
|
|
||||||
n, err := zw.Write(p)
|
|
||||||
releaseStacklessGzipWriter(zw, level)
|
|
||||||
return n, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
var stacklessWriteGzip = stackless.NewFunc(nonblockingWriteGzip)
|
|
||||||
|
|
||||||
func nonblockingWriteGzip(ctxv interface{}) {
|
|
||||||
ctx := ctxv.(*compressCtx)
|
|
||||||
zw := acquireRealGzipWriter(ctx.w, ctx.level)
|
|
||||||
|
|
||||||
_, err := zw.Write(ctx.p)
|
|
||||||
if err != nil {
|
|
||||||
panic(fmt.Sprintf("BUG: gzip.Writer.Write for len(p)=%d returned unexpected error: %s", len(ctx.p), err))
|
|
||||||
}
|
|
||||||
|
|
||||||
releaseRealGzipWriter(zw, ctx.level)
|
|
||||||
}
|
|
||||||
|
|
||||||
// WriteGzip writes gzipped p to w and returns the number of compressed
|
|
||||||
// bytes written to w.
|
|
||||||
func WriteGzip(w io.Writer, p []byte) (int, error) {
|
|
||||||
return WriteGzipLevel(w, p, CompressDefaultCompression)
|
|
||||||
}
|
|
||||||
|
|
||||||
// AppendGzipBytes appends gzipped src to dst and returns the resulting dst.
|
|
||||||
func AppendGzipBytes(dst, src []byte) []byte {
|
|
||||||
return AppendGzipBytesLevel(dst, src, CompressDefaultCompression)
|
|
||||||
}
|
|
||||||
|
|
||||||
// WriteGunzip writes ungzipped p to w and returns the number of uncompressed
|
|
||||||
// bytes written to w.
|
|
||||||
func WriteGunzip(w io.Writer, p []byte) (int, error) {
|
|
||||||
r := &byteSliceReader{p}
|
|
||||||
zr, err := acquireGzipReader(r)
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
n, err := copyZeroAlloc(w, zr)
|
|
||||||
releaseGzipReader(zr)
|
|
||||||
nn := int(n)
|
|
||||||
if int64(nn) != n {
|
|
||||||
return 0, fmt.Errorf("too much data gunzipped: %d", n)
|
|
||||||
}
|
|
||||||
return nn, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// AppendGunzipBytes appends gunzipped src to dst and returns the resulting dst.
|
|
||||||
func AppendGunzipBytes(dst, src []byte) ([]byte, error) {
|
|
||||||
w := &byteSliceWriter{dst}
|
|
||||||
_, err := WriteGunzip(w, src)
|
|
||||||
return w.b, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// AppendDeflateBytesLevel appends deflated src to dst using the given
|
|
||||||
// compression level and returns the resulting dst.
|
|
||||||
//
|
|
||||||
// Supported compression levels are:
|
|
||||||
//
|
|
||||||
// * CompressNoCompression
|
|
||||||
// * CompressBestSpeed
|
|
||||||
// * CompressBestCompression
|
|
||||||
// * CompressDefaultCompression
|
|
||||||
// * CompressHuffmanOnly
|
|
||||||
func AppendDeflateBytesLevel(dst, src []byte, level int) []byte {
|
|
||||||
w := &byteSliceWriter{dst}
|
|
||||||
WriteDeflateLevel(w, src, level)
|
|
||||||
return w.b
|
|
||||||
}
|
|
||||||
|
|
||||||
// WriteDeflateLevel writes deflated p to w using the given compression level
|
|
||||||
// and returns the number of compressed bytes written to w.
|
|
||||||
//
|
|
||||||
// Supported compression levels are:
|
|
||||||
//
|
|
||||||
// * CompressNoCompression
|
|
||||||
// * CompressBestSpeed
|
|
||||||
// * CompressBestCompression
|
|
||||||
// * CompressDefaultCompression
|
|
||||||
// * CompressHuffmanOnly
|
|
||||||
func WriteDeflateLevel(w io.Writer, p []byte, level int) (int, error) {
|
|
||||||
switch w.(type) {
|
|
||||||
case *byteSliceWriter,
|
|
||||||
*bytes.Buffer,
|
|
||||||
*ByteBuffer,
|
|
||||||
*bytebufferpool.ByteBuffer:
|
|
||||||
// These writers don't block, so we can just use stacklessWriteDeflate
|
|
||||||
ctx := &compressCtx{
|
|
||||||
w: w,
|
|
||||||
p: p,
|
|
||||||
level: level,
|
|
||||||
}
|
|
||||||
stacklessWriteDeflate(ctx)
|
|
||||||
return len(p), nil
|
|
||||||
default:
|
|
||||||
zw := acquireStacklessDeflateWriter(w, level)
|
|
||||||
n, err := zw.Write(p)
|
|
||||||
releaseStacklessDeflateWriter(zw, level)
|
|
||||||
return n, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
var stacklessWriteDeflate = stackless.NewFunc(nonblockingWriteDeflate)
|
|
||||||
|
|
||||||
func nonblockingWriteDeflate(ctxv interface{}) {
|
|
||||||
ctx := ctxv.(*compressCtx)
|
|
||||||
zw := acquireRealDeflateWriter(ctx.w, ctx.level)
|
|
||||||
|
|
||||||
_, err := zw.Write(ctx.p)
|
|
||||||
if err != nil {
|
|
||||||
panic(fmt.Sprintf("BUG: zlib.Writer.Write for len(p)=%d returned unexpected error: %s", len(ctx.p), err))
|
|
||||||
}
|
|
||||||
|
|
||||||
releaseRealDeflateWriter(zw, ctx.level)
|
|
||||||
}
|
|
||||||
|
|
||||||
type compressCtx struct {
|
|
||||||
w io.Writer
|
|
||||||
p []byte
|
|
||||||
level int
|
|
||||||
}
|
|
||||||
|
|
||||||
// WriteDeflate writes deflated p to w and returns the number of compressed
|
|
||||||
// bytes written to w.
|
|
||||||
func WriteDeflate(w io.Writer, p []byte) (int, error) {
|
|
||||||
return WriteDeflateLevel(w, p, CompressDefaultCompression)
|
|
||||||
}
|
|
||||||
|
|
||||||
// AppendDeflateBytes appends deflated src to dst and returns the resulting dst.
|
|
||||||
func AppendDeflateBytes(dst, src []byte) []byte {
|
|
||||||
return AppendDeflateBytesLevel(dst, src, CompressDefaultCompression)
|
|
||||||
}
|
|
||||||
|
|
||||||
// WriteInflate writes inflated p to w and returns the number of uncompressed
|
|
||||||
// bytes written to w.
|
|
||||||
func WriteInflate(w io.Writer, p []byte) (int, error) {
|
|
||||||
r := &byteSliceReader{p}
|
|
||||||
zr, err := acquireFlateReader(r)
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
n, err := copyZeroAlloc(w, zr)
|
|
||||||
releaseFlateReader(zr)
|
|
||||||
nn := int(n)
|
|
||||||
if int64(nn) != n {
|
|
||||||
return 0, fmt.Errorf("too much data inflated: %d", n)
|
|
||||||
}
|
|
||||||
return nn, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// AppendInflateBytes appends inflated src to dst and returns the resulting dst.
|
|
||||||
func AppendInflateBytes(dst, src []byte) ([]byte, error) {
|
|
||||||
w := &byteSliceWriter{dst}
|
|
||||||
_, err := WriteInflate(w, src)
|
|
||||||
return w.b, err
|
|
||||||
}
|
|
||||||
|
|
||||||
type byteSliceWriter struct {
|
|
||||||
b []byte
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *byteSliceWriter) Write(p []byte) (int, error) {
|
|
||||||
w.b = append(w.b, p...)
|
|
||||||
return len(p), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type byteSliceReader struct {
|
|
||||||
b []byte
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *byteSliceReader) Read(p []byte) (int, error) {
|
|
||||||
if len(r.b) == 0 {
|
|
||||||
return 0, io.EOF
|
|
||||||
}
|
|
||||||
n := copy(p, r.b)
|
|
||||||
r.b = r.b[n:]
|
|
||||||
return n, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func acquireStacklessDeflateWriter(w io.Writer, level int) stackless.Writer {
|
|
||||||
nLevel := normalizeCompressLevel(level)
|
|
||||||
p := stacklessDeflateWriterPoolMap[nLevel]
|
|
||||||
v := p.Get()
|
|
||||||
if v == nil {
|
|
||||||
return stackless.NewWriter(w, func(w io.Writer) stackless.Writer {
|
|
||||||
return acquireRealDeflateWriter(w, level)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
sw := v.(stackless.Writer)
|
|
||||||
sw.Reset(w)
|
|
||||||
return sw
|
|
||||||
}
|
|
||||||
|
|
||||||
func releaseStacklessDeflateWriter(sw stackless.Writer, level int) {
|
|
||||||
sw.Close()
|
|
||||||
nLevel := normalizeCompressLevel(level)
|
|
||||||
p := stacklessDeflateWriterPoolMap[nLevel]
|
|
||||||
p.Put(sw)
|
|
||||||
}
|
|
||||||
|
|
||||||
func acquireRealDeflateWriter(w io.Writer, level int) *zlib.Writer {
|
|
||||||
nLevel := normalizeCompressLevel(level)
|
|
||||||
p := realDeflateWriterPoolMap[nLevel]
|
|
||||||
v := p.Get()
|
|
||||||
if v == nil {
|
|
||||||
zw, err := zlib.NewWriterLevel(w, level)
|
|
||||||
if err != nil {
|
|
||||||
panic(fmt.Sprintf("BUG: unexpected error from zlib.NewWriterLevel(%d): %s", level, err))
|
|
||||||
}
|
|
||||||
return zw
|
|
||||||
}
|
|
||||||
zw := v.(*zlib.Writer)
|
|
||||||
zw.Reset(w)
|
|
||||||
return zw
|
|
||||||
}
|
|
||||||
|
|
||||||
func releaseRealDeflateWriter(zw *zlib.Writer, level int) {
|
|
||||||
zw.Close()
|
|
||||||
nLevel := normalizeCompressLevel(level)
|
|
||||||
p := realDeflateWriterPoolMap[nLevel]
|
|
||||||
p.Put(zw)
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
stacklessDeflateWriterPoolMap = newCompressWriterPoolMap()
|
|
||||||
realDeflateWriterPoolMap = newCompressWriterPoolMap()
|
|
||||||
)
|
|
||||||
|
|
||||||
func newCompressWriterPoolMap() []*sync.Pool {
|
|
||||||
// Initialize pools for all the compression levels defined
|
|
||||||
// in https://golang.org/pkg/compress/flate/#pkg-constants .
|
|
||||||
// Compression levels are normalized with normalizeCompressLevel,
|
|
||||||
// so the fit [0..11].
|
|
||||||
var m []*sync.Pool
|
|
||||||
for i := 0; i < 12; i++ {
|
|
||||||
m = append(m, &sync.Pool{})
|
|
||||||
}
|
|
||||||
return m
|
|
||||||
}
|
|
||||||
|
|
||||||
func isFileCompressible(f *os.File, minCompressRatio float64) bool {
|
|
||||||
// Try compressing the first 4kb of of the file
|
|
||||||
// and see if it can be compressed by more than
|
|
||||||
// the given minCompressRatio.
|
|
||||||
b := AcquireByteBuffer()
|
|
||||||
zw := acquireStacklessGzipWriter(b, CompressDefaultCompression)
|
|
||||||
lr := &io.LimitedReader{
|
|
||||||
R: f,
|
|
||||||
N: 4096,
|
|
||||||
}
|
|
||||||
_, err := copyZeroAlloc(zw, lr)
|
|
||||||
releaseStacklessGzipWriter(zw, CompressDefaultCompression)
|
|
||||||
f.Seek(0, 0)
|
|
||||||
if err != nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
n := 4096 - lr.N
|
|
||||||
zn := len(b.B)
|
|
||||||
ReleaseByteBuffer(b)
|
|
||||||
return float64(zn) < float64(n)*minCompressRatio
|
|
||||||
}
|
|
||||||
|
|
||||||
// normalizes compression level into [0..11], so it could be used as an index
|
|
||||||
// in *PoolMap.
|
|
||||||
func normalizeCompressLevel(level int) int {
|
|
||||||
// -2 is the lowest compression level - CompressHuffmanOnly
|
|
||||||
// 9 is the highest compression level - CompressBestCompression
|
|
||||||
if level < -2 || level > 9 {
|
|
||||||
level = CompressDefaultCompression
|
|
||||||
}
|
|
||||||
return level + 2
|
|
||||||
}
|
|
||||||
396
vendor/github.com/valyala/fasthttp/cookie.go
generated
vendored
396
vendor/github.com/valyala/fasthttp/cookie.go
generated
vendored
@@ -1,396 +0,0 @@
|
|||||||
package fasthttp
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"errors"
|
|
||||||
"io"
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
var zeroTime time.Time
|
|
||||||
|
|
||||||
var (
|
|
||||||
// CookieExpireDelete may be set on Cookie.Expire for expiring the given cookie.
|
|
||||||
CookieExpireDelete = time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC)
|
|
||||||
|
|
||||||
// CookieExpireUnlimited indicates that the cookie doesn't expire.
|
|
||||||
CookieExpireUnlimited = zeroTime
|
|
||||||
)
|
|
||||||
|
|
||||||
// AcquireCookie returns an empty Cookie object from the pool.
|
|
||||||
//
|
|
||||||
// The returned object may be returned back to the pool with ReleaseCookie.
|
|
||||||
// This allows reducing GC load.
|
|
||||||
func AcquireCookie() *Cookie {
|
|
||||||
return cookiePool.Get().(*Cookie)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ReleaseCookie returns the Cookie object acquired with AcquireCookie back
|
|
||||||
// to the pool.
|
|
||||||
//
|
|
||||||
// Do not access released Cookie object, otherwise data races may occur.
|
|
||||||
func ReleaseCookie(c *Cookie) {
|
|
||||||
c.Reset()
|
|
||||||
cookiePool.Put(c)
|
|
||||||
}
|
|
||||||
|
|
||||||
var cookiePool = &sync.Pool{
|
|
||||||
New: func() interface{} {
|
|
||||||
return &Cookie{}
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
// Cookie represents HTTP response cookie.
|
|
||||||
//
|
|
||||||
// Do not copy Cookie objects. Create new object and use CopyTo instead.
|
|
||||||
//
|
|
||||||
// Cookie instance MUST NOT be used from concurrently running goroutines.
|
|
||||||
type Cookie struct {
|
|
||||||
noCopy noCopy
|
|
||||||
|
|
||||||
key []byte
|
|
||||||
value []byte
|
|
||||||
expire time.Time
|
|
||||||
domain []byte
|
|
||||||
path []byte
|
|
||||||
|
|
||||||
httpOnly bool
|
|
||||||
secure bool
|
|
||||||
|
|
||||||
bufKV argsKV
|
|
||||||
buf []byte
|
|
||||||
}
|
|
||||||
|
|
||||||
// CopyTo copies src cookie to c.
|
|
||||||
func (c *Cookie) CopyTo(src *Cookie) {
|
|
||||||
c.Reset()
|
|
||||||
c.key = append(c.key[:0], src.key...)
|
|
||||||
c.value = append(c.value[:0], src.value...)
|
|
||||||
c.expire = src.expire
|
|
||||||
c.domain = append(c.domain[:0], src.domain...)
|
|
||||||
c.path = append(c.path[:0], src.path...)
|
|
||||||
c.httpOnly = src.httpOnly
|
|
||||||
c.secure = src.secure
|
|
||||||
}
|
|
||||||
|
|
||||||
// HTTPOnly returns true if the cookie is http only.
|
|
||||||
func (c *Cookie) HTTPOnly() bool {
|
|
||||||
return c.httpOnly
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetHTTPOnly sets cookie's httpOnly flag to the given value.
|
|
||||||
func (c *Cookie) SetHTTPOnly(httpOnly bool) {
|
|
||||||
c.httpOnly = httpOnly
|
|
||||||
}
|
|
||||||
|
|
||||||
// Secure returns true if the cookie is secure.
|
|
||||||
func (c *Cookie) Secure() bool {
|
|
||||||
return c.secure
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetSecure sets cookie's secure flag to the given value.
|
|
||||||
func (c *Cookie) SetSecure(secure bool) {
|
|
||||||
c.secure = secure
|
|
||||||
}
|
|
||||||
|
|
||||||
// Path returns cookie path.
|
|
||||||
func (c *Cookie) Path() []byte {
|
|
||||||
return c.path
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetPath sets cookie path.
|
|
||||||
func (c *Cookie) SetPath(path string) {
|
|
||||||
c.buf = append(c.buf[:0], path...)
|
|
||||||
c.path = normalizePath(c.path, c.buf)
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetPathBytes sets cookie path.
|
|
||||||
func (c *Cookie) SetPathBytes(path []byte) {
|
|
||||||
c.buf = append(c.buf[:0], path...)
|
|
||||||
c.path = normalizePath(c.path, c.buf)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Domain returns cookie domain.
|
|
||||||
//
|
|
||||||
// The returned domain is valid until the next Cookie modification method call.
|
|
||||||
func (c *Cookie) Domain() []byte {
|
|
||||||
return c.domain
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetDomain sets cookie domain.
|
|
||||||
func (c *Cookie) SetDomain(domain string) {
|
|
||||||
c.domain = append(c.domain[:0], domain...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetDomainBytes sets cookie domain.
|
|
||||||
func (c *Cookie) SetDomainBytes(domain []byte) {
|
|
||||||
c.domain = append(c.domain[:0], domain...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Expire returns cookie expiration time.
|
|
||||||
//
|
|
||||||
// CookieExpireUnlimited is returned if cookie doesn't expire
|
|
||||||
func (c *Cookie) Expire() time.Time {
|
|
||||||
expire := c.expire
|
|
||||||
if expire.IsZero() {
|
|
||||||
expire = CookieExpireUnlimited
|
|
||||||
}
|
|
||||||
return expire
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetExpire sets cookie expiration time.
|
|
||||||
//
|
|
||||||
// Set expiration time to CookieExpireDelete for expiring (deleting)
|
|
||||||
// the cookie on the client.
|
|
||||||
//
|
|
||||||
// By default cookie lifetime is limited by browser session.
|
|
||||||
func (c *Cookie) SetExpire(expire time.Time) {
|
|
||||||
c.expire = expire
|
|
||||||
}
|
|
||||||
|
|
||||||
// Value returns cookie value.
|
|
||||||
//
|
|
||||||
// The returned value is valid until the next Cookie modification method call.
|
|
||||||
func (c *Cookie) Value() []byte {
|
|
||||||
return c.value
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetValue sets cookie value.
|
|
||||||
func (c *Cookie) SetValue(value string) {
|
|
||||||
c.value = append(c.value[:0], value...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetValueBytes sets cookie value.
|
|
||||||
func (c *Cookie) SetValueBytes(value []byte) {
|
|
||||||
c.value = append(c.value[:0], value...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Key returns cookie name.
|
|
||||||
//
|
|
||||||
// The returned value is valid until the next Cookie modification method call.
|
|
||||||
func (c *Cookie) Key() []byte {
|
|
||||||
return c.key
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetKey sets cookie name.
|
|
||||||
func (c *Cookie) SetKey(key string) {
|
|
||||||
c.key = append(c.key[:0], key...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetKeyBytes sets cookie name.
|
|
||||||
func (c *Cookie) SetKeyBytes(key []byte) {
|
|
||||||
c.key = append(c.key[:0], key...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Reset clears the cookie.
|
|
||||||
func (c *Cookie) Reset() {
|
|
||||||
c.key = c.key[:0]
|
|
||||||
c.value = c.value[:0]
|
|
||||||
c.expire = zeroTime
|
|
||||||
c.domain = c.domain[:0]
|
|
||||||
c.path = c.path[:0]
|
|
||||||
c.httpOnly = false
|
|
||||||
c.secure = false
|
|
||||||
}
|
|
||||||
|
|
||||||
// AppendBytes appends cookie representation to dst and returns
|
|
||||||
// the extended dst.
|
|
||||||
func (c *Cookie) AppendBytes(dst []byte) []byte {
|
|
||||||
if len(c.key) > 0 {
|
|
||||||
dst = append(dst, c.key...)
|
|
||||||
dst = append(dst, '=')
|
|
||||||
}
|
|
||||||
dst = append(dst, c.value...)
|
|
||||||
|
|
||||||
if !c.expire.IsZero() {
|
|
||||||
c.bufKV.value = AppendHTTPDate(c.bufKV.value[:0], c.expire)
|
|
||||||
dst = append(dst, ';', ' ')
|
|
||||||
dst = append(dst, strCookieExpires...)
|
|
||||||
dst = append(dst, '=')
|
|
||||||
dst = append(dst, c.bufKV.value...)
|
|
||||||
}
|
|
||||||
if len(c.domain) > 0 {
|
|
||||||
dst = appendCookiePart(dst, strCookieDomain, c.domain)
|
|
||||||
}
|
|
||||||
if len(c.path) > 0 {
|
|
||||||
dst = appendCookiePart(dst, strCookiePath, c.path)
|
|
||||||
}
|
|
||||||
if c.httpOnly {
|
|
||||||
dst = append(dst, ';', ' ')
|
|
||||||
dst = append(dst, strCookieHTTPOnly...)
|
|
||||||
}
|
|
||||||
if c.secure {
|
|
||||||
dst = append(dst, ';', ' ')
|
|
||||||
dst = append(dst, strCookieSecure...)
|
|
||||||
}
|
|
||||||
return dst
|
|
||||||
}
|
|
||||||
|
|
||||||
// Cookie returns cookie representation.
|
|
||||||
//
|
|
||||||
// The returned value is valid until the next call to Cookie methods.
|
|
||||||
func (c *Cookie) Cookie() []byte {
|
|
||||||
c.buf = c.AppendBytes(c.buf[:0])
|
|
||||||
return c.buf
|
|
||||||
}
|
|
||||||
|
|
||||||
// String returns cookie representation.
|
|
||||||
func (c *Cookie) String() string {
|
|
||||||
return string(c.Cookie())
|
|
||||||
}
|
|
||||||
|
|
||||||
// WriteTo writes cookie representation to w.
|
|
||||||
//
|
|
||||||
// WriteTo implements io.WriterTo interface.
|
|
||||||
func (c *Cookie) WriteTo(w io.Writer) (int64, error) {
|
|
||||||
n, err := w.Write(c.Cookie())
|
|
||||||
return int64(n), err
|
|
||||||
}
|
|
||||||
|
|
||||||
var errNoCookies = errors.New("no cookies found")
|
|
||||||
|
|
||||||
// Parse parses Set-Cookie header.
|
|
||||||
func (c *Cookie) Parse(src string) error {
|
|
||||||
c.buf = append(c.buf[:0], src...)
|
|
||||||
return c.ParseBytes(c.buf)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ParseBytes parses Set-Cookie header.
|
|
||||||
func (c *Cookie) ParseBytes(src []byte) error {
|
|
||||||
c.Reset()
|
|
||||||
|
|
||||||
var s cookieScanner
|
|
||||||
s.b = src
|
|
||||||
|
|
||||||
kv := &c.bufKV
|
|
||||||
if !s.next(kv) {
|
|
||||||
return errNoCookies
|
|
||||||
}
|
|
||||||
|
|
||||||
c.key = append(c.key[:0], kv.key...)
|
|
||||||
c.value = append(c.value[:0], kv.value...)
|
|
||||||
|
|
||||||
for s.next(kv) {
|
|
||||||
if len(kv.key) == 0 && len(kv.value) == 0 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
switch string(kv.key) {
|
|
||||||
case "expires":
|
|
||||||
v := b2s(kv.value)
|
|
||||||
exptime, err := time.ParseInLocation(time.RFC1123, v, time.UTC)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
c.expire = exptime
|
|
||||||
case "domain":
|
|
||||||
c.domain = append(c.domain[:0], kv.value...)
|
|
||||||
case "path":
|
|
||||||
c.path = append(c.path[:0], kv.value...)
|
|
||||||
case "":
|
|
||||||
switch string(kv.value) {
|
|
||||||
case "HttpOnly":
|
|
||||||
c.httpOnly = true
|
|
||||||
case "secure":
|
|
||||||
c.secure = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func appendCookiePart(dst, key, value []byte) []byte {
|
|
||||||
dst = append(dst, ';', ' ')
|
|
||||||
dst = append(dst, key...)
|
|
||||||
dst = append(dst, '=')
|
|
||||||
return append(dst, value...)
|
|
||||||
}
|
|
||||||
|
|
||||||
func getCookieKey(dst, src []byte) []byte {
|
|
||||||
n := bytes.IndexByte(src, '=')
|
|
||||||
if n >= 0 {
|
|
||||||
src = src[:n]
|
|
||||||
}
|
|
||||||
return decodeCookieArg(dst, src, false)
|
|
||||||
}
|
|
||||||
|
|
||||||
func appendRequestCookieBytes(dst []byte, cookies []argsKV) []byte {
|
|
||||||
for i, n := 0, len(cookies); i < n; i++ {
|
|
||||||
kv := &cookies[i]
|
|
||||||
if len(kv.key) > 0 {
|
|
||||||
dst = append(dst, kv.key...)
|
|
||||||
dst = append(dst, '=')
|
|
||||||
}
|
|
||||||
dst = append(dst, kv.value...)
|
|
||||||
if i+1 < n {
|
|
||||||
dst = append(dst, ';', ' ')
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return dst
|
|
||||||
}
|
|
||||||
|
|
||||||
func parseRequestCookies(cookies []argsKV, src []byte) []argsKV {
|
|
||||||
var s cookieScanner
|
|
||||||
s.b = src
|
|
||||||
var kv *argsKV
|
|
||||||
cookies, kv = allocArg(cookies)
|
|
||||||
for s.next(kv) {
|
|
||||||
if len(kv.key) > 0 || len(kv.value) > 0 {
|
|
||||||
cookies, kv = allocArg(cookies)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return releaseArg(cookies)
|
|
||||||
}
|
|
||||||
|
|
||||||
type cookieScanner struct {
|
|
||||||
b []byte
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *cookieScanner) next(kv *argsKV) bool {
|
|
||||||
b := s.b
|
|
||||||
if len(b) == 0 {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
isKey := true
|
|
||||||
k := 0
|
|
||||||
for i, c := range b {
|
|
||||||
switch c {
|
|
||||||
case '=':
|
|
||||||
if isKey {
|
|
||||||
isKey = false
|
|
||||||
kv.key = decodeCookieArg(kv.key, b[:i], false)
|
|
||||||
k = i + 1
|
|
||||||
}
|
|
||||||
case ';':
|
|
||||||
if isKey {
|
|
||||||
kv.key = kv.key[:0]
|
|
||||||
}
|
|
||||||
kv.value = decodeCookieArg(kv.value, b[k:i], true)
|
|
||||||
s.b = b[i+1:]
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if isKey {
|
|
||||||
kv.key = kv.key[:0]
|
|
||||||
}
|
|
||||||
kv.value = decodeCookieArg(kv.value, b[k:], true)
|
|
||||||
s.b = b[len(b):]
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
func decodeCookieArg(dst, src []byte, skipQuotes bool) []byte {
|
|
||||||
for len(src) > 0 && src[0] == ' ' {
|
|
||||||
src = src[1:]
|
|
||||||
}
|
|
||||||
for len(src) > 0 && src[len(src)-1] == ' ' {
|
|
||||||
src = src[:len(src)-1]
|
|
||||||
}
|
|
||||||
if skipQuotes {
|
|
||||||
if len(src) > 1 && src[0] == '"' && src[len(src)-1] == '"' {
|
|
||||||
src = src[1 : len(src)-1]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return append(dst[:0], src...)
|
|
||||||
}
|
|
||||||
40
vendor/github.com/valyala/fasthttp/doc.go
generated
vendored
40
vendor/github.com/valyala/fasthttp/doc.go
generated
vendored
@@ -1,40 +0,0 @@
|
|||||||
/*
|
|
||||||
Package fasthttp provides fast HTTP server and client API.
|
|
||||||
|
|
||||||
Fasthttp provides the following features:
|
|
||||||
|
|
||||||
* Optimized for speed. Easily handles more than 100K qps and more than 1M
|
|
||||||
concurrent keep-alive connections on modern hardware.
|
|
||||||
* Optimized for low memory usage.
|
|
||||||
* Easy 'Connection: Upgrade' support via RequestCtx.Hijack.
|
|
||||||
* Server supports requests' pipelining. Multiple requests may be read from
|
|
||||||
a single network packet and multiple responses may be sent in a single
|
|
||||||
network packet. This may be useful for highly loaded REST services.
|
|
||||||
* Server provides the following anti-DoS limits:
|
|
||||||
|
|
||||||
* The number of concurrent connections.
|
|
||||||
* The number of concurrent connections per client IP.
|
|
||||||
* The number of requests per connection.
|
|
||||||
* Request read timeout.
|
|
||||||
* Response write timeout.
|
|
||||||
* Maximum request header size.
|
|
||||||
* Maximum request body size.
|
|
||||||
* Maximum request execution time.
|
|
||||||
* Maximum keep-alive connection lifetime.
|
|
||||||
* Early filtering out non-GET requests.
|
|
||||||
|
|
||||||
* A lot of additional useful info is exposed to request handler:
|
|
||||||
|
|
||||||
* Server and client address.
|
|
||||||
* Per-request logger.
|
|
||||||
* Unique request id.
|
|
||||||
* Request start time.
|
|
||||||
* Connection start time.
|
|
||||||
* Request sequence number for the current connection.
|
|
||||||
|
|
||||||
* Client supports automatic retry on idempotent requests' failure.
|
|
||||||
* Fasthttp API is designed with the ability to extend existing client
|
|
||||||
and server implementations or to write custom client and server
|
|
||||||
implementations from scratch.
|
|
||||||
*/
|
|
||||||
package fasthttp
|
|
||||||
2
vendor/github.com/valyala/fasthttp/fasthttputil/doc.go
generated
vendored
2
vendor/github.com/valyala/fasthttp/fasthttputil/doc.go
generated
vendored
@@ -1,2 +0,0 @@
|
|||||||
// Package fasthttputil provides utility functions for fasthttp.
|
|
||||||
package fasthttputil
|
|
||||||
5
vendor/github.com/valyala/fasthttp/fasthttputil/ecdsa.key
generated
vendored
5
vendor/github.com/valyala/fasthttp/fasthttputil/ecdsa.key
generated
vendored
@@ -1,5 +0,0 @@
|
|||||||
-----BEGIN EC PRIVATE KEY-----
|
|
||||||
MHcCAQEEIBpQbZ6a5jL1Yh4wdP6yZk4MKjYWArD/QOLENFw8vbELoAoGCCqGSM49
|
|
||||||
AwEHoUQDQgAEKQCZWgE2IBhb47ot8MIs1D4KSisHYlZ41IWyeutpjb0fjwwIhimh
|
|
||||||
pl1Qld1/d2j3Z3vVyfa5yD+ncV7qCFZuSg==
|
|
||||||
-----END EC PRIVATE KEY-----
|
|
||||||
10
vendor/github.com/valyala/fasthttp/fasthttputil/ecdsa.pem
generated
vendored
10
vendor/github.com/valyala/fasthttp/fasthttputil/ecdsa.pem
generated
vendored
@@ -1,10 +0,0 @@
|
|||||||
-----BEGIN CERTIFICATE-----
|
|
||||||
MIIBbTCCAROgAwIBAgIQPo718S+K+G7hc1SgTEU4QDAKBggqhkjOPQQDAjASMRAw
|
|
||||||
DgYDVQQKEwdBY21lIENvMB4XDTE3MDQyMDIxMDExNFoXDTE4MDQyMDIxMDExNFow
|
|
||||||
EjEQMA4GA1UEChMHQWNtZSBDbzBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABCkA
|
|
||||||
mVoBNiAYW+O6LfDCLNQ+CkorB2JWeNSFsnrraY29H48MCIYpoaZdUJXdf3do92d7
|
|
||||||
1cn2ucg/p3Fe6ghWbkqjSzBJMA4GA1UdDwEB/wQEAwIFoDATBgNVHSUEDDAKBggr
|
|
||||||
BgEFBQcDATAMBgNVHRMBAf8EAjAAMBQGA1UdEQQNMAuCCWxvY2FsaG9zdDAKBggq
|
|
||||||
hkjOPQQDAgNIADBFAiEAoLAIQkvSuIcHUqyWroA6yWYw2fznlRH/uO9/hMCxUCEC
|
|
||||||
IClRYb/5O9eD/Eq/ozPnwNpsQHOeYefEhadJ/P82y0lG
|
|
||||||
-----END CERTIFICATE-----
|
|
||||||
84
vendor/github.com/valyala/fasthttp/fasthttputil/inmemory_listener.go
generated
vendored
84
vendor/github.com/valyala/fasthttp/fasthttputil/inmemory_listener.go
generated
vendored
@@ -1,84 +0,0 @@
|
|||||||
package fasthttputil
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"net"
|
|
||||||
"sync"
|
|
||||||
)
|
|
||||||
|
|
||||||
// InmemoryListener provides in-memory dialer<->net.Listener implementation.
|
|
||||||
//
|
|
||||||
// It may be used either for fast in-process client<->server communcations
|
|
||||||
// without network stack overhead or for client<->server tests.
|
|
||||||
type InmemoryListener struct {
|
|
||||||
lock sync.Mutex
|
|
||||||
closed bool
|
|
||||||
conns chan net.Conn
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewInmemoryListener returns new in-memory dialer<->net.Listener.
|
|
||||||
func NewInmemoryListener() *InmemoryListener {
|
|
||||||
return &InmemoryListener{
|
|
||||||
conns: make(chan net.Conn, 1024),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Accept implements net.Listener's Accept.
|
|
||||||
//
|
|
||||||
// It is safe calling Accept from concurrently running goroutines.
|
|
||||||
//
|
|
||||||
// Accept returns new connection per each Dial call.
|
|
||||||
func (ln *InmemoryListener) Accept() (net.Conn, error) {
|
|
||||||
c, ok := <-ln.conns
|
|
||||||
if !ok {
|
|
||||||
return nil, fmt.Errorf("InmemoryListener is already closed: use of closed network connection")
|
|
||||||
}
|
|
||||||
return c, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Close implements net.Listener's Close.
|
|
||||||
func (ln *InmemoryListener) Close() error {
|
|
||||||
var err error
|
|
||||||
|
|
||||||
ln.lock.Lock()
|
|
||||||
if !ln.closed {
|
|
||||||
close(ln.conns)
|
|
||||||
ln.closed = true
|
|
||||||
} else {
|
|
||||||
err = fmt.Errorf("InmemoryListener is already closed")
|
|
||||||
}
|
|
||||||
ln.lock.Unlock()
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Addr implements net.Listener's Addr.
|
|
||||||
func (ln *InmemoryListener) Addr() net.Addr {
|
|
||||||
return &net.UnixAddr{
|
|
||||||
Name: "InmemoryListener",
|
|
||||||
Net: "memory",
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Dial creates new client<->server connection, enqueues server side
|
|
||||||
// of the connection to Accept and returns client side of the connection.
|
|
||||||
//
|
|
||||||
// It is safe calling Dial from concurrently running goroutines.
|
|
||||||
func (ln *InmemoryListener) Dial() (net.Conn, error) {
|
|
||||||
pc := NewPipeConns()
|
|
||||||
cConn := pc.Conn1()
|
|
||||||
sConn := pc.Conn2()
|
|
||||||
ln.lock.Lock()
|
|
||||||
if !ln.closed {
|
|
||||||
ln.conns <- sConn
|
|
||||||
} else {
|
|
||||||
sConn.Close()
|
|
||||||
cConn.Close()
|
|
||||||
cConn = nil
|
|
||||||
}
|
|
||||||
ln.lock.Unlock()
|
|
||||||
|
|
||||||
if cConn == nil {
|
|
||||||
return nil, fmt.Errorf("InmemoryListener is already closed")
|
|
||||||
}
|
|
||||||
return cConn, nil
|
|
||||||
}
|
|
||||||
283
vendor/github.com/valyala/fasthttp/fasthttputil/pipeconns.go
generated
vendored
283
vendor/github.com/valyala/fasthttp/fasthttputil/pipeconns.go
generated
vendored
@@ -1,283 +0,0 @@
|
|||||||
package fasthttputil
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"io"
|
|
||||||
"net"
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
// NewPipeConns returns new bi-directonal connection pipe.
|
|
||||||
func NewPipeConns() *PipeConns {
|
|
||||||
ch1 := make(chan *byteBuffer, 4)
|
|
||||||
ch2 := make(chan *byteBuffer, 4)
|
|
||||||
|
|
||||||
pc := &PipeConns{
|
|
||||||
stopCh: make(chan struct{}),
|
|
||||||
}
|
|
||||||
pc.c1.rCh = ch1
|
|
||||||
pc.c1.wCh = ch2
|
|
||||||
pc.c2.rCh = ch2
|
|
||||||
pc.c2.wCh = ch1
|
|
||||||
pc.c1.pc = pc
|
|
||||||
pc.c2.pc = pc
|
|
||||||
return pc
|
|
||||||
}
|
|
||||||
|
|
||||||
// PipeConns provides bi-directional connection pipe,
|
|
||||||
// which use in-process memory as a transport.
|
|
||||||
//
|
|
||||||
// PipeConns must be created by calling NewPipeConns.
|
|
||||||
//
|
|
||||||
// PipeConns has the following additional features comparing to connections
|
|
||||||
// returned from net.Pipe():
|
|
||||||
//
|
|
||||||
// * It is faster.
|
|
||||||
// * It buffers Write calls, so there is no need to have concurrent goroutine
|
|
||||||
// calling Read in order to unblock each Write call.
|
|
||||||
// * It supports read and write deadlines.
|
|
||||||
//
|
|
||||||
type PipeConns struct {
|
|
||||||
c1 pipeConn
|
|
||||||
c2 pipeConn
|
|
||||||
stopCh chan struct{}
|
|
||||||
stopChLock sync.Mutex
|
|
||||||
}
|
|
||||||
|
|
||||||
// Conn1 returns the first end of bi-directional pipe.
|
|
||||||
//
|
|
||||||
// Data written to Conn1 may be read from Conn2.
|
|
||||||
// Data written to Conn2 may be read from Conn1.
|
|
||||||
func (pc *PipeConns) Conn1() net.Conn {
|
|
||||||
return &pc.c1
|
|
||||||
}
|
|
||||||
|
|
||||||
// Conn2 returns the second end of bi-directional pipe.
|
|
||||||
//
|
|
||||||
// Data written to Conn2 may be read from Conn1.
|
|
||||||
// Data written to Conn1 may be read from Conn2.
|
|
||||||
func (pc *PipeConns) Conn2() net.Conn {
|
|
||||||
return &pc.c2
|
|
||||||
}
|
|
||||||
|
|
||||||
// Close closes pipe connections.
|
|
||||||
func (pc *PipeConns) Close() error {
|
|
||||||
pc.stopChLock.Lock()
|
|
||||||
select {
|
|
||||||
case <-pc.stopCh:
|
|
||||||
default:
|
|
||||||
close(pc.stopCh)
|
|
||||||
}
|
|
||||||
pc.stopChLock.Unlock()
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type pipeConn struct {
|
|
||||||
b *byteBuffer
|
|
||||||
bb []byte
|
|
||||||
|
|
||||||
rCh chan *byteBuffer
|
|
||||||
wCh chan *byteBuffer
|
|
||||||
pc *PipeConns
|
|
||||||
|
|
||||||
readDeadlineTimer *time.Timer
|
|
||||||
writeDeadlineTimer *time.Timer
|
|
||||||
|
|
||||||
readDeadlineCh <-chan time.Time
|
|
||||||
writeDeadlineCh <-chan time.Time
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *pipeConn) Write(p []byte) (int, error) {
|
|
||||||
b := acquireByteBuffer()
|
|
||||||
b.b = append(b.b[:0], p...)
|
|
||||||
|
|
||||||
select {
|
|
||||||
case <-c.pc.stopCh:
|
|
||||||
releaseByteBuffer(b)
|
|
||||||
return 0, errConnectionClosed
|
|
||||||
default:
|
|
||||||
}
|
|
||||||
|
|
||||||
select {
|
|
||||||
case c.wCh <- b:
|
|
||||||
default:
|
|
||||||
select {
|
|
||||||
case c.wCh <- b:
|
|
||||||
case <-c.writeDeadlineCh:
|
|
||||||
c.writeDeadlineCh = closedDeadlineCh
|
|
||||||
return 0, ErrTimeout
|
|
||||||
case <-c.pc.stopCh:
|
|
||||||
releaseByteBuffer(b)
|
|
||||||
return 0, errConnectionClosed
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return len(p), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *pipeConn) Read(p []byte) (int, error) {
|
|
||||||
mayBlock := true
|
|
||||||
nn := 0
|
|
||||||
for len(p) > 0 {
|
|
||||||
n, err := c.read(p, mayBlock)
|
|
||||||
nn += n
|
|
||||||
if err != nil {
|
|
||||||
if !mayBlock && err == errWouldBlock {
|
|
||||||
err = nil
|
|
||||||
}
|
|
||||||
return nn, err
|
|
||||||
}
|
|
||||||
p = p[n:]
|
|
||||||
mayBlock = false
|
|
||||||
}
|
|
||||||
|
|
||||||
return nn, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *pipeConn) read(p []byte, mayBlock bool) (int, error) {
|
|
||||||
if len(c.bb) == 0 {
|
|
||||||
if err := c.readNextByteBuffer(mayBlock); err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
n := copy(p, c.bb)
|
|
||||||
c.bb = c.bb[n:]
|
|
||||||
|
|
||||||
return n, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *pipeConn) readNextByteBuffer(mayBlock bool) error {
|
|
||||||
releaseByteBuffer(c.b)
|
|
||||||
c.b = nil
|
|
||||||
|
|
||||||
select {
|
|
||||||
case c.b = <-c.rCh:
|
|
||||||
default:
|
|
||||||
if !mayBlock {
|
|
||||||
return errWouldBlock
|
|
||||||
}
|
|
||||||
select {
|
|
||||||
case c.b = <-c.rCh:
|
|
||||||
case <-c.readDeadlineCh:
|
|
||||||
c.readDeadlineCh = closedDeadlineCh
|
|
||||||
// rCh may contain data when deadline is reached.
|
|
||||||
// Read the data before returning ErrTimeout.
|
|
||||||
select {
|
|
||||||
case c.b = <-c.rCh:
|
|
||||||
default:
|
|
||||||
return ErrTimeout
|
|
||||||
}
|
|
||||||
case <-c.pc.stopCh:
|
|
||||||
// rCh may contain data when stopCh is closed.
|
|
||||||
// Read the data before returning EOF.
|
|
||||||
select {
|
|
||||||
case c.b = <-c.rCh:
|
|
||||||
default:
|
|
||||||
return io.EOF
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
c.bb = c.b.b
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
errWouldBlock = errors.New("would block")
|
|
||||||
errConnectionClosed = errors.New("connection closed")
|
|
||||||
|
|
||||||
// ErrTimeout is returned from Read() or Write() on timeout.
|
|
||||||
ErrTimeout = errors.New("timeout")
|
|
||||||
)
|
|
||||||
|
|
||||||
func (c *pipeConn) Close() error {
|
|
||||||
return c.pc.Close()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *pipeConn) LocalAddr() net.Addr {
|
|
||||||
return pipeAddr(0)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *pipeConn) RemoteAddr() net.Addr {
|
|
||||||
return pipeAddr(0)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *pipeConn) SetDeadline(deadline time.Time) error {
|
|
||||||
c.SetReadDeadline(deadline)
|
|
||||||
c.SetWriteDeadline(deadline)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *pipeConn) SetReadDeadline(deadline time.Time) error {
|
|
||||||
if c.readDeadlineTimer == nil {
|
|
||||||
c.readDeadlineTimer = time.NewTimer(time.Hour)
|
|
||||||
}
|
|
||||||
c.readDeadlineCh = updateTimer(c.readDeadlineTimer, deadline)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *pipeConn) SetWriteDeadline(deadline time.Time) error {
|
|
||||||
if c.writeDeadlineTimer == nil {
|
|
||||||
c.writeDeadlineTimer = time.NewTimer(time.Hour)
|
|
||||||
}
|
|
||||||
c.writeDeadlineCh = updateTimer(c.writeDeadlineTimer, deadline)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func updateTimer(t *time.Timer, deadline time.Time) <-chan time.Time {
|
|
||||||
if !t.Stop() {
|
|
||||||
select {
|
|
||||||
case <-t.C:
|
|
||||||
default:
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if deadline.IsZero() {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
d := -time.Since(deadline)
|
|
||||||
if d <= 0 {
|
|
||||||
return closedDeadlineCh
|
|
||||||
}
|
|
||||||
t.Reset(d)
|
|
||||||
return t.C
|
|
||||||
}
|
|
||||||
|
|
||||||
var closedDeadlineCh = func() <-chan time.Time {
|
|
||||||
ch := make(chan time.Time)
|
|
||||||
close(ch)
|
|
||||||
return ch
|
|
||||||
}()
|
|
||||||
|
|
||||||
type pipeAddr int
|
|
||||||
|
|
||||||
func (pipeAddr) Network() string {
|
|
||||||
return "pipe"
|
|
||||||
}
|
|
||||||
|
|
||||||
func (pipeAddr) String() string {
|
|
||||||
return "pipe"
|
|
||||||
}
|
|
||||||
|
|
||||||
type byteBuffer struct {
|
|
||||||
b []byte
|
|
||||||
}
|
|
||||||
|
|
||||||
func acquireByteBuffer() *byteBuffer {
|
|
||||||
return byteBufferPool.Get().(*byteBuffer)
|
|
||||||
}
|
|
||||||
|
|
||||||
func releaseByteBuffer(b *byteBuffer) {
|
|
||||||
if b != nil {
|
|
||||||
byteBufferPool.Put(b)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
var byteBufferPool = &sync.Pool{
|
|
||||||
New: func() interface{} {
|
|
||||||
return &byteBuffer{
|
|
||||||
b: make([]byte, 1024),
|
|
||||||
}
|
|
||||||
},
|
|
||||||
}
|
|
||||||
28
vendor/github.com/valyala/fasthttp/fasthttputil/rsa.key
generated
vendored
28
vendor/github.com/valyala/fasthttp/fasthttputil/rsa.key
generated
vendored
@@ -1,28 +0,0 @@
|
|||||||
-----BEGIN PRIVATE KEY-----
|
|
||||||
MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQD4IQusAs8PJdnG
|
|
||||||
3mURt/AXtgC+ceqLOatJ49JJE1VPTkMAy+oE1f1XvkMrYsHqmDf6GWVzgVXryL4U
|
|
||||||
wq2/nJSm56ddhN55nI8oSN3dtywUB8/ShelEN73nlN77PeD9tl6NksPwWaKrqxq0
|
|
||||||
FlabRPZSQCfmgZbhDV8Sa8mfCkFU0G0lit6kLGceCKMvmW+9Bz7ebsYmVdmVMxmf
|
|
||||||
IJStFD44lWFTdUc65WISKEdW2ELcUefb0zOLw+0PCbXFGJH5x5ktksW8+BBk2Hkg
|
|
||||||
GeQRL/qPCccthbScO0VgNj3zJ3ZZL0ObSDAbvNDG85joeNjDNq5DT/BAZ0bOSbEF
|
|
||||||
sh+f9BAzAgMBAAECggEBAJWv2cq7Jw6MVwSRxYca38xuD6TUNBopgBvjREixURW2
|
|
||||||
sNUaLuMb9Omp7fuOaE2N5rcJ+xnjPGIxh/oeN5MQctz9gwn3zf6vY+15h97pUb4D
|
|
||||||
uGvYPRDaT8YVGS+X9NMZ4ZCmqW2lpWzKnCFoGHcy8yZLbcaxBsRdvKzwOYGoPiFb
|
|
||||||
K2QuhXZ/1UPmqK9i2DFKtj40X6vBszTNboFxOVpXrPu0FJwLVSDf2hSZ4fMM0DH3
|
|
||||||
YqwKcYf5te+hxGKgrqRA3tn0NCWii0in6QIwXMC+kMw1ebg/tZKqyDLMNptAK8J+
|
|
||||||
DVw9m5X1seUHS5ehU/g2jrQrtK5WYn7MrFK4lBzlRwECgYEA/d1TeANYECDWRRDk
|
|
||||||
B0aaRZs87Rwl/J9PsvbsKvtU/bX+OfSOUjOa9iQBqn0LmU8GqusEET/QVUfocVwV
|
|
||||||
Bggf/5qDLxz100Rj0ags/yE/kNr0Bb31kkkKHFMnCT06YasR7qKllwrAlPJvQv9x
|
|
||||||
IzBKq+T/Dx08Wep9bCRSFhzRCnsCgYEA+jdeZXTDr/Vz+D2B3nAw1frqYFfGnEVY
|
|
||||||
wqmoK3VXMDkGuxsloO2rN+SyiUo3JNiQNPDub/t7175GH5pmKtZOlftePANsUjBj
|
|
||||||
wZ1D0rI5Bxu/71ibIUYIRVmXsTEQkh/ozoh3jXCZ9+bLgYiYx7789IUZZSokFQ3D
|
|
||||||
FICUT9KJ36kCgYAGoq9Y1rWJjmIrYfqj2guUQC+CfxbbGIrrwZqAsRsSmpwvhZ3m
|
|
||||||
tiSZxG0quKQB+NfSxdvQW5ulbwC7Xc3K35F+i9pb8+TVBdeaFkw+yu6vaZmxQLrX
|
|
||||||
fQM/pEjD7A7HmMIaO7QaU5SfEAsqdCTP56Y8AftMuNXn/8IRfo2KuGwaWwKBgFpU
|
|
||||||
ILzJoVdlad9E/Rw7LjYhZfkv1uBVXIyxyKcfrkEXZSmozDXDdxsvcZCEfVHM6Ipk
|
|
||||||
K/+7LuMcqp4AFEAEq8wTOdq6daFaHLkpt/FZK6M4TlruhtpFOPkoNc3e45eM83OT
|
|
||||||
6mziKINJC1CQ6m65sQHpBtjxlKMRG8rL/D6wx9s5AoGBAMRlqNPMwglT3hvDmsAt
|
|
||||||
9Lf9pdmhERUlHhD8bj8mDaBj2Aqv7f6VRJaYZqP403pKKQexuqcn80mtjkSAPFkN
|
|
||||||
Cj7BVt/RXm5uoxDTnfi26RF9F6yNDEJ7UU9+peBr99aazF/fTgW/1GcMkQnum8uV
|
|
||||||
c257YgaWmjK9uB0Y2r2VxS0G
|
|
||||||
-----END PRIVATE KEY-----
|
|
||||||
17
vendor/github.com/valyala/fasthttp/fasthttputil/rsa.pem
generated
vendored
17
vendor/github.com/valyala/fasthttp/fasthttputil/rsa.pem
generated
vendored
@@ -1,17 +0,0 @@
|
|||||||
-----BEGIN CERTIFICATE-----
|
|
||||||
MIICujCCAaKgAwIBAgIJAMbXnKZ/cikUMA0GCSqGSIb3DQEBCwUAMBUxEzARBgNV
|
|
||||||
BAMTCnVidW50dS5uYW4wHhcNMTUwMjA0MDgwMTM5WhcNMjUwMjAxMDgwMTM5WjAV
|
|
||||||
MRMwEQYDVQQDEwp1YnVudHUubmFuMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB
|
|
||||||
CgKCAQEA+CELrALPDyXZxt5lEbfwF7YAvnHqizmrSePSSRNVT05DAMvqBNX9V75D
|
|
||||||
K2LB6pg3+hllc4FV68i+FMKtv5yUpuenXYTeeZyPKEjd3bcsFAfP0oXpRDe955Te
|
|
||||||
+z3g/bZejZLD8Fmiq6satBZWm0T2UkAn5oGW4Q1fEmvJnwpBVNBtJYrepCxnHgij
|
|
||||||
L5lvvQc+3m7GJlXZlTMZnyCUrRQ+OJVhU3VHOuViEihHVthC3FHn29Mzi8PtDwm1
|
|
||||||
xRiR+ceZLZLFvPgQZNh5IBnkES/6jwnHLYW0nDtFYDY98yd2WS9Dm0gwG7zQxvOY
|
|
||||||
6HjYwzauQ0/wQGdGzkmxBbIfn/QQMwIDAQABow0wCzAJBgNVHRMEAjAAMA0GCSqG
|
|
||||||
SIb3DQEBCwUAA4IBAQBQjKm/4KN/iTgXbLTL3i7zaxYXFLXsnT1tF+ay4VA8aj98
|
|
||||||
L3JwRTciZ3A5iy/W4VSCt3eASwOaPWHKqDBB5RTtL73LoAqsWmO3APOGQAbixcQ2
|
|
||||||
45GXi05OKeyiYRi1Nvq7Unv9jUkRDHUYVPZVSAjCpsXzPhFkmZoTRxmx5l0ZF7Li
|
|
||||||
K91lI5h+eFq0dwZwrmlPambyh1vQUi70VHv8DNToVU29kel7YLbxGbuqETfhrcy6
|
|
||||||
X+Mha6RYITkAn5FqsZcKMsc9eYGEF4l3XV+oS7q6xfTxktYJMFTI18J0lQ2Lv/CI
|
|
||||||
whdMnYGntDQBE/iFCrJEGNsKGc38796GBOb5j+zd
|
|
||||||
-----END CERTIFICATE-----
|
|
||||||
1252
vendor/github.com/valyala/fasthttp/fs.go
generated
vendored
1252
vendor/github.com/valyala/fasthttp/fs.go
generated
vendored
File diff suppressed because it is too large
Load Diff
2091
vendor/github.com/valyala/fasthttp/header.go
generated
vendored
2091
vendor/github.com/valyala/fasthttp/header.go
generated
vendored
File diff suppressed because it is too large
Load Diff
1710
vendor/github.com/valyala/fasthttp/http.go
generated
vendored
1710
vendor/github.com/valyala/fasthttp/http.go
generated
vendored
File diff suppressed because it is too large
Load Diff
183
vendor/github.com/valyala/fasthttp/lbclient.go
generated
vendored
183
vendor/github.com/valyala/fasthttp/lbclient.go
generated
vendored
@@ -1,183 +0,0 @@
|
|||||||
package fasthttp
|
|
||||||
|
|
||||||
import (
|
|
||||||
"sync"
|
|
||||||
"sync/atomic"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
// BalancingClient is the interface for clients, which may be passed
|
|
||||||
// to LBClient.Clients.
|
|
||||||
type BalancingClient interface {
|
|
||||||
DoDeadline(req *Request, resp *Response, deadline time.Time) error
|
|
||||||
PendingRequests() int
|
|
||||||
}
|
|
||||||
|
|
||||||
// LBClient balances requests among available LBClient.Clients.
|
|
||||||
//
|
|
||||||
// It has the following features:
|
|
||||||
//
|
|
||||||
// - Balances load among available clients using 'least loaded' + 'round robin'
|
|
||||||
// hybrid technique.
|
|
||||||
// - Dynamically decreases load on unhealthy clients.
|
|
||||||
//
|
|
||||||
// It is forbidden copying LBClient instances. Create new instances instead.
|
|
||||||
//
|
|
||||||
// It is safe calling LBClient methods from concurrently running goroutines.
|
|
||||||
type LBClient struct {
|
|
||||||
noCopy noCopy
|
|
||||||
|
|
||||||
// Clients must contain non-zero clients list.
|
|
||||||
// Incoming requests are balanced among these clients.
|
|
||||||
Clients []BalancingClient
|
|
||||||
|
|
||||||
// HealthCheck is a callback called after each request.
|
|
||||||
//
|
|
||||||
// The request, response and the error returned by the client
|
|
||||||
// is passed to HealthCheck, so the callback may determine whether
|
|
||||||
// the client is healthy.
|
|
||||||
//
|
|
||||||
// Load on the current client is decreased if HealthCheck returns false.
|
|
||||||
//
|
|
||||||
// By default HealthCheck returns false if err != nil.
|
|
||||||
HealthCheck func(req *Request, resp *Response, err error) bool
|
|
||||||
|
|
||||||
// Timeout is the request timeout used when calling LBClient.Do.
|
|
||||||
//
|
|
||||||
// DefaultLBClientTimeout is used by default.
|
|
||||||
Timeout time.Duration
|
|
||||||
|
|
||||||
cs []*lbClient
|
|
||||||
|
|
||||||
// nextIdx is for spreading requests among equally loaded clients
|
|
||||||
// in a round-robin fashion.
|
|
||||||
nextIdx uint32
|
|
||||||
|
|
||||||
once sync.Once
|
|
||||||
}
|
|
||||||
|
|
||||||
// DefaultLBClientTimeout is the default request timeout used by LBClient
|
|
||||||
// when calling LBClient.Do.
|
|
||||||
//
|
|
||||||
// The timeout may be overriden via LBClient.Timeout.
|
|
||||||
const DefaultLBClientTimeout = time.Second
|
|
||||||
|
|
||||||
// DoDeadline calls DoDeadline on the least loaded client
|
|
||||||
func (cc *LBClient) DoDeadline(req *Request, resp *Response, deadline time.Time) error {
|
|
||||||
return cc.get().DoDeadline(req, resp, deadline)
|
|
||||||
}
|
|
||||||
|
|
||||||
// DoTimeout calculates deadline and calls DoDeadline on the least loaded client
|
|
||||||
func (cc *LBClient) DoTimeout(req *Request, resp *Response, timeout time.Duration) error {
|
|
||||||
deadline := time.Now().Add(timeout)
|
|
||||||
return cc.get().DoDeadline(req, resp, deadline)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Do calls calculates deadline using LBClient.Timeout and calls DoDeadline
|
|
||||||
// on the least loaded client.
|
|
||||||
func (cc *LBClient) Do(req *Request, resp *Response) error {
|
|
||||||
timeout := cc.Timeout
|
|
||||||
if timeout <= 0 {
|
|
||||||
timeout = DefaultLBClientTimeout
|
|
||||||
}
|
|
||||||
return cc.DoTimeout(req, resp, timeout)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (cc *LBClient) init() {
|
|
||||||
if len(cc.Clients) == 0 {
|
|
||||||
panic("BUG: LBClient.Clients cannot be empty")
|
|
||||||
}
|
|
||||||
for _, c := range cc.Clients {
|
|
||||||
cc.cs = append(cc.cs, &lbClient{
|
|
||||||
c: c,
|
|
||||||
healthCheck: cc.HealthCheck,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Randomize nextIdx in order to prevent initial servers'
|
|
||||||
// hammering from a cluster of identical LBClients.
|
|
||||||
cc.nextIdx = uint32(time.Now().UnixNano())
|
|
||||||
}
|
|
||||||
|
|
||||||
func (cc *LBClient) get() *lbClient {
|
|
||||||
cc.once.Do(cc.init)
|
|
||||||
|
|
||||||
cs := cc.cs
|
|
||||||
idx := atomic.AddUint32(&cc.nextIdx, 1)
|
|
||||||
idx %= uint32(len(cs))
|
|
||||||
|
|
||||||
minC := cs[idx]
|
|
||||||
minN := minC.PendingRequests()
|
|
||||||
if minN == 0 {
|
|
||||||
return minC
|
|
||||||
}
|
|
||||||
for _, c := range cs[idx+1:] {
|
|
||||||
n := c.PendingRequests()
|
|
||||||
if n == 0 {
|
|
||||||
return c
|
|
||||||
}
|
|
||||||
if n < minN {
|
|
||||||
minC = c
|
|
||||||
minN = n
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for _, c := range cs[:idx] {
|
|
||||||
n := c.PendingRequests()
|
|
||||||
if n == 0 {
|
|
||||||
return c
|
|
||||||
}
|
|
||||||
if n < minN {
|
|
||||||
minC = c
|
|
||||||
minN = n
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return minC
|
|
||||||
}
|
|
||||||
|
|
||||||
type lbClient struct {
|
|
||||||
c BalancingClient
|
|
||||||
healthCheck func(req *Request, resp *Response, err error) bool
|
|
||||||
penalty uint32
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *lbClient) DoDeadline(req *Request, resp *Response, deadline time.Time) error {
|
|
||||||
err := c.c.DoDeadline(req, resp, deadline)
|
|
||||||
if !c.isHealthy(req, resp, err) && c.incPenalty() {
|
|
||||||
// Penalize the client returning error, so the next requests
|
|
||||||
// are routed to another clients.
|
|
||||||
time.AfterFunc(penaltyDuration, c.decPenalty)
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *lbClient) PendingRequests() int {
|
|
||||||
n := c.c.PendingRequests()
|
|
||||||
m := atomic.LoadUint32(&c.penalty)
|
|
||||||
return n + int(m)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *lbClient) isHealthy(req *Request, resp *Response, err error) bool {
|
|
||||||
if c.healthCheck == nil {
|
|
||||||
return err == nil
|
|
||||||
}
|
|
||||||
return c.healthCheck(req, resp, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *lbClient) incPenalty() bool {
|
|
||||||
m := atomic.AddUint32(&c.penalty, 1)
|
|
||||||
if m > maxPenalty {
|
|
||||||
c.decPenalty()
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *lbClient) decPenalty() {
|
|
||||||
atomic.AddUint32(&c.penalty, ^uint32(0))
|
|
||||||
}
|
|
||||||
|
|
||||||
const (
|
|
||||||
maxPenalty = 300
|
|
||||||
|
|
||||||
penaltyDuration = 3 * time.Second
|
|
||||||
)
|
|
||||||
9
vendor/github.com/valyala/fasthttp/nocopy.go
generated
vendored
9
vendor/github.com/valyala/fasthttp/nocopy.go
generated
vendored
@@ -1,9 +0,0 @@
|
|||||||
package fasthttp
|
|
||||||
|
|
||||||
// Embed this type into a struct, which mustn't be copied,
|
|
||||||
// so `go vet` gives a warning if this struct is copied.
|
|
||||||
//
|
|
||||||
// See https://github.com/golang/go/issues/8005#issuecomment-190753527 for details.
|
|
||||||
type noCopy struct{}
|
|
||||||
|
|
||||||
func (*noCopy) Lock() {}
|
|
||||||
100
vendor/github.com/valyala/fasthttp/peripconn.go
generated
vendored
100
vendor/github.com/valyala/fasthttp/peripconn.go
generated
vendored
@@ -1,100 +0,0 @@
|
|||||||
package fasthttp
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"net"
|
|
||||||
"sync"
|
|
||||||
)
|
|
||||||
|
|
||||||
type perIPConnCounter struct {
|
|
||||||
pool sync.Pool
|
|
||||||
lock sync.Mutex
|
|
||||||
m map[uint32]int
|
|
||||||
}
|
|
||||||
|
|
||||||
func (cc *perIPConnCounter) Register(ip uint32) int {
|
|
||||||
cc.lock.Lock()
|
|
||||||
if cc.m == nil {
|
|
||||||
cc.m = make(map[uint32]int)
|
|
||||||
}
|
|
||||||
n := cc.m[ip] + 1
|
|
||||||
cc.m[ip] = n
|
|
||||||
cc.lock.Unlock()
|
|
||||||
return n
|
|
||||||
}
|
|
||||||
|
|
||||||
func (cc *perIPConnCounter) Unregister(ip uint32) {
|
|
||||||
cc.lock.Lock()
|
|
||||||
if cc.m == nil {
|
|
||||||
cc.lock.Unlock()
|
|
||||||
panic("BUG: perIPConnCounter.Register() wasn't called")
|
|
||||||
}
|
|
||||||
n := cc.m[ip] - 1
|
|
||||||
if n < 0 {
|
|
||||||
cc.lock.Unlock()
|
|
||||||
panic(fmt.Sprintf("BUG: negative per-ip counter=%d for ip=%d", n, ip))
|
|
||||||
}
|
|
||||||
cc.m[ip] = n
|
|
||||||
cc.lock.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
type perIPConn struct {
|
|
||||||
net.Conn
|
|
||||||
|
|
||||||
ip uint32
|
|
||||||
perIPConnCounter *perIPConnCounter
|
|
||||||
}
|
|
||||||
|
|
||||||
func acquirePerIPConn(conn net.Conn, ip uint32, counter *perIPConnCounter) *perIPConn {
|
|
||||||
v := counter.pool.Get()
|
|
||||||
if v == nil {
|
|
||||||
v = &perIPConn{
|
|
||||||
perIPConnCounter: counter,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
c := v.(*perIPConn)
|
|
||||||
c.Conn = conn
|
|
||||||
c.ip = ip
|
|
||||||
return c
|
|
||||||
}
|
|
||||||
|
|
||||||
func releasePerIPConn(c *perIPConn) {
|
|
||||||
c.Conn = nil
|
|
||||||
c.perIPConnCounter.pool.Put(c)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *perIPConn) Close() error {
|
|
||||||
err := c.Conn.Close()
|
|
||||||
c.perIPConnCounter.Unregister(c.ip)
|
|
||||||
releasePerIPConn(c)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
func getUint32IP(c net.Conn) uint32 {
|
|
||||||
return ip2uint32(getConnIP4(c))
|
|
||||||
}
|
|
||||||
|
|
||||||
func getConnIP4(c net.Conn) net.IP {
|
|
||||||
addr := c.RemoteAddr()
|
|
||||||
ipAddr, ok := addr.(*net.TCPAddr)
|
|
||||||
if !ok {
|
|
||||||
return net.IPv4zero
|
|
||||||
}
|
|
||||||
return ipAddr.IP.To4()
|
|
||||||
}
|
|
||||||
|
|
||||||
func ip2uint32(ip net.IP) uint32 {
|
|
||||||
if len(ip) != 4 {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
return uint32(ip[0])<<24 | uint32(ip[1])<<16 | uint32(ip[2])<<8 | uint32(ip[3])
|
|
||||||
}
|
|
||||||
|
|
||||||
func uint322ip(ip uint32) net.IP {
|
|
||||||
b := make([]byte, 4)
|
|
||||||
b[0] = byte(ip >> 24)
|
|
||||||
b[1] = byte(ip >> 16)
|
|
||||||
b[2] = byte(ip >> 8)
|
|
||||||
b[3] = byte(ip)
|
|
||||||
return b
|
|
||||||
}
|
|
||||||
1993
vendor/github.com/valyala/fasthttp/server.go
generated
vendored
1993
vendor/github.com/valyala/fasthttp/server.go
generated
vendored
File diff suppressed because it is too large
Load Diff
28
vendor/github.com/valyala/fasthttp/ssl-cert-snakeoil.key
generated
vendored
28
vendor/github.com/valyala/fasthttp/ssl-cert-snakeoil.key
generated
vendored
@@ -1,28 +0,0 @@
|
|||||||
-----BEGIN PRIVATE KEY-----
|
|
||||||
MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQD4IQusAs8PJdnG
|
|
||||||
3mURt/AXtgC+ceqLOatJ49JJE1VPTkMAy+oE1f1XvkMrYsHqmDf6GWVzgVXryL4U
|
|
||||||
wq2/nJSm56ddhN55nI8oSN3dtywUB8/ShelEN73nlN77PeD9tl6NksPwWaKrqxq0
|
|
||||||
FlabRPZSQCfmgZbhDV8Sa8mfCkFU0G0lit6kLGceCKMvmW+9Bz7ebsYmVdmVMxmf
|
|
||||||
IJStFD44lWFTdUc65WISKEdW2ELcUefb0zOLw+0PCbXFGJH5x5ktksW8+BBk2Hkg
|
|
||||||
GeQRL/qPCccthbScO0VgNj3zJ3ZZL0ObSDAbvNDG85joeNjDNq5DT/BAZ0bOSbEF
|
|
||||||
sh+f9BAzAgMBAAECggEBAJWv2cq7Jw6MVwSRxYca38xuD6TUNBopgBvjREixURW2
|
|
||||||
sNUaLuMb9Omp7fuOaE2N5rcJ+xnjPGIxh/oeN5MQctz9gwn3zf6vY+15h97pUb4D
|
|
||||||
uGvYPRDaT8YVGS+X9NMZ4ZCmqW2lpWzKnCFoGHcy8yZLbcaxBsRdvKzwOYGoPiFb
|
|
||||||
K2QuhXZ/1UPmqK9i2DFKtj40X6vBszTNboFxOVpXrPu0FJwLVSDf2hSZ4fMM0DH3
|
|
||||||
YqwKcYf5te+hxGKgrqRA3tn0NCWii0in6QIwXMC+kMw1ebg/tZKqyDLMNptAK8J+
|
|
||||||
DVw9m5X1seUHS5ehU/g2jrQrtK5WYn7MrFK4lBzlRwECgYEA/d1TeANYECDWRRDk
|
|
||||||
B0aaRZs87Rwl/J9PsvbsKvtU/bX+OfSOUjOa9iQBqn0LmU8GqusEET/QVUfocVwV
|
|
||||||
Bggf/5qDLxz100Rj0ags/yE/kNr0Bb31kkkKHFMnCT06YasR7qKllwrAlPJvQv9x
|
|
||||||
IzBKq+T/Dx08Wep9bCRSFhzRCnsCgYEA+jdeZXTDr/Vz+D2B3nAw1frqYFfGnEVY
|
|
||||||
wqmoK3VXMDkGuxsloO2rN+SyiUo3JNiQNPDub/t7175GH5pmKtZOlftePANsUjBj
|
|
||||||
wZ1D0rI5Bxu/71ibIUYIRVmXsTEQkh/ozoh3jXCZ9+bLgYiYx7789IUZZSokFQ3D
|
|
||||||
FICUT9KJ36kCgYAGoq9Y1rWJjmIrYfqj2guUQC+CfxbbGIrrwZqAsRsSmpwvhZ3m
|
|
||||||
tiSZxG0quKQB+NfSxdvQW5ulbwC7Xc3K35F+i9pb8+TVBdeaFkw+yu6vaZmxQLrX
|
|
||||||
fQM/pEjD7A7HmMIaO7QaU5SfEAsqdCTP56Y8AftMuNXn/8IRfo2KuGwaWwKBgFpU
|
|
||||||
ILzJoVdlad9E/Rw7LjYhZfkv1uBVXIyxyKcfrkEXZSmozDXDdxsvcZCEfVHM6Ipk
|
|
||||||
K/+7LuMcqp4AFEAEq8wTOdq6daFaHLkpt/FZK6M4TlruhtpFOPkoNc3e45eM83OT
|
|
||||||
6mziKINJC1CQ6m65sQHpBtjxlKMRG8rL/D6wx9s5AoGBAMRlqNPMwglT3hvDmsAt
|
|
||||||
9Lf9pdmhERUlHhD8bj8mDaBj2Aqv7f6VRJaYZqP403pKKQexuqcn80mtjkSAPFkN
|
|
||||||
Cj7BVt/RXm5uoxDTnfi26RF9F6yNDEJ7UU9+peBr99aazF/fTgW/1GcMkQnum8uV
|
|
||||||
c257YgaWmjK9uB0Y2r2VxS0G
|
|
||||||
-----END PRIVATE KEY-----
|
|
||||||
17
vendor/github.com/valyala/fasthttp/ssl-cert-snakeoil.pem
generated
vendored
17
vendor/github.com/valyala/fasthttp/ssl-cert-snakeoil.pem
generated
vendored
@@ -1,17 +0,0 @@
|
|||||||
-----BEGIN CERTIFICATE-----
|
|
||||||
MIICujCCAaKgAwIBAgIJAMbXnKZ/cikUMA0GCSqGSIb3DQEBCwUAMBUxEzARBgNV
|
|
||||||
BAMTCnVidW50dS5uYW4wHhcNMTUwMjA0MDgwMTM5WhcNMjUwMjAxMDgwMTM5WjAV
|
|
||||||
MRMwEQYDVQQDEwp1YnVudHUubmFuMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB
|
|
||||||
CgKCAQEA+CELrALPDyXZxt5lEbfwF7YAvnHqizmrSePSSRNVT05DAMvqBNX9V75D
|
|
||||||
K2LB6pg3+hllc4FV68i+FMKtv5yUpuenXYTeeZyPKEjd3bcsFAfP0oXpRDe955Te
|
|
||||||
+z3g/bZejZLD8Fmiq6satBZWm0T2UkAn5oGW4Q1fEmvJnwpBVNBtJYrepCxnHgij
|
|
||||||
L5lvvQc+3m7GJlXZlTMZnyCUrRQ+OJVhU3VHOuViEihHVthC3FHn29Mzi8PtDwm1
|
|
||||||
xRiR+ceZLZLFvPgQZNh5IBnkES/6jwnHLYW0nDtFYDY98yd2WS9Dm0gwG7zQxvOY
|
|
||||||
6HjYwzauQ0/wQGdGzkmxBbIfn/QQMwIDAQABow0wCzAJBgNVHRMEAjAAMA0GCSqG
|
|
||||||
SIb3DQEBCwUAA4IBAQBQjKm/4KN/iTgXbLTL3i7zaxYXFLXsnT1tF+ay4VA8aj98
|
|
||||||
L3JwRTciZ3A5iy/W4VSCt3eASwOaPWHKqDBB5RTtL73LoAqsWmO3APOGQAbixcQ2
|
|
||||||
45GXi05OKeyiYRi1Nvq7Unv9jUkRDHUYVPZVSAjCpsXzPhFkmZoTRxmx5l0ZF7Li
|
|
||||||
K91lI5h+eFq0dwZwrmlPambyh1vQUi70VHv8DNToVU29kel7YLbxGbuqETfhrcy6
|
|
||||||
X+Mha6RYITkAn5FqsZcKMsc9eYGEF4l3XV+oS7q6xfTxktYJMFTI18J0lQ2Lv/CI
|
|
||||||
whdMnYGntDQBE/iFCrJEGNsKGc38796GBOb5j+zd
|
|
||||||
-----END CERTIFICATE-----
|
|
||||||
3
vendor/github.com/valyala/fasthttp/stackless/doc.go
generated
vendored
3
vendor/github.com/valyala/fasthttp/stackless/doc.go
generated
vendored
@@ -1,3 +0,0 @@
|
|||||||
// Package stackless provides functionality that may save stack space
|
|
||||||
// for high number of concurrently running goroutines.
|
|
||||||
package stackless
|
|
||||||
79
vendor/github.com/valyala/fasthttp/stackless/func.go
generated
vendored
79
vendor/github.com/valyala/fasthttp/stackless/func.go
generated
vendored
@@ -1,79 +0,0 @@
|
|||||||
package stackless
|
|
||||||
|
|
||||||
import (
|
|
||||||
"runtime"
|
|
||||||
"sync"
|
|
||||||
)
|
|
||||||
|
|
||||||
// NewFunc returns stackless wrapper for the function f.
|
|
||||||
//
|
|
||||||
// Unlike f, the returned stackless wrapper doesn't use stack space
|
|
||||||
// on the goroutine that calls it.
|
|
||||||
// The wrapper may save a lot of stack space if the following conditions
|
|
||||||
// are met:
|
|
||||||
//
|
|
||||||
// - f doesn't contain blocking calls on network, I/O or channels;
|
|
||||||
// - f uses a lot of stack space;
|
|
||||||
// - the wrapper is called from high number of concurrent goroutines.
|
|
||||||
//
|
|
||||||
// The stackless wrapper returns false if the call cannot be processed
|
|
||||||
// at the moment due to high load.
|
|
||||||
func NewFunc(f func(ctx interface{})) func(ctx interface{}) bool {
|
|
||||||
if f == nil {
|
|
||||||
panic("BUG: f cannot be nil")
|
|
||||||
}
|
|
||||||
|
|
||||||
funcWorkCh := make(chan *funcWork, runtime.GOMAXPROCS(-1)*2048)
|
|
||||||
onceInit := func() {
|
|
||||||
n := runtime.GOMAXPROCS(-1)
|
|
||||||
for i := 0; i < n; i++ {
|
|
||||||
go funcWorker(funcWorkCh, f)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
var once sync.Once
|
|
||||||
|
|
||||||
return func(ctx interface{}) bool {
|
|
||||||
once.Do(onceInit)
|
|
||||||
fw := getFuncWork()
|
|
||||||
fw.ctx = ctx
|
|
||||||
|
|
||||||
select {
|
|
||||||
case funcWorkCh <- fw:
|
|
||||||
default:
|
|
||||||
putFuncWork(fw)
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
<-fw.done
|
|
||||||
putFuncWork(fw)
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func funcWorker(funcWorkCh <-chan *funcWork, f func(ctx interface{})) {
|
|
||||||
for fw := range funcWorkCh {
|
|
||||||
f(fw.ctx)
|
|
||||||
fw.done <- struct{}{}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func getFuncWork() *funcWork {
|
|
||||||
v := funcWorkPool.Get()
|
|
||||||
if v == nil {
|
|
||||||
v = &funcWork{
|
|
||||||
done: make(chan struct{}, 1),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return v.(*funcWork)
|
|
||||||
}
|
|
||||||
|
|
||||||
func putFuncWork(fw *funcWork) {
|
|
||||||
fw.ctx = nil
|
|
||||||
funcWorkPool.Put(fw)
|
|
||||||
}
|
|
||||||
|
|
||||||
var funcWorkPool sync.Pool
|
|
||||||
|
|
||||||
type funcWork struct {
|
|
||||||
ctx interface{}
|
|
||||||
done chan struct{}
|
|
||||||
}
|
|
||||||
138
vendor/github.com/valyala/fasthttp/stackless/writer.go
generated
vendored
138
vendor/github.com/valyala/fasthttp/stackless/writer.go
generated
vendored
@@ -1,138 +0,0 @@
|
|||||||
package stackless
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"github.com/valyala/bytebufferpool"
|
|
||||||
"io"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Writer is an interface stackless writer must conform to.
|
|
||||||
//
|
|
||||||
// The interface contains common subset for Writers from compress/* packages.
|
|
||||||
type Writer interface {
|
|
||||||
Write(p []byte) (int, error)
|
|
||||||
Flush() error
|
|
||||||
Close() error
|
|
||||||
Reset(w io.Writer)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewWriterFunc must return new writer that will be wrapped into
|
|
||||||
// stackless writer.
|
|
||||||
type NewWriterFunc func(w io.Writer) Writer
|
|
||||||
|
|
||||||
// NewWriter creates a stackless writer around a writer returned
|
|
||||||
// from newWriter.
|
|
||||||
//
|
|
||||||
// The returned writer writes data to dstW.
|
|
||||||
//
|
|
||||||
// Writers that use a lot of stack space may be wrapped into stackless writer,
|
|
||||||
// thus saving stack space for high number of concurrently running goroutines.
|
|
||||||
func NewWriter(dstW io.Writer, newWriter NewWriterFunc) Writer {
|
|
||||||
w := &writer{
|
|
||||||
dstW: dstW,
|
|
||||||
}
|
|
||||||
w.zw = newWriter(&w.xw)
|
|
||||||
return w
|
|
||||||
}
|
|
||||||
|
|
||||||
type writer struct {
|
|
||||||
dstW io.Writer
|
|
||||||
zw Writer
|
|
||||||
xw xWriter
|
|
||||||
|
|
||||||
err error
|
|
||||||
n int
|
|
||||||
|
|
||||||
p []byte
|
|
||||||
op op
|
|
||||||
}
|
|
||||||
|
|
||||||
type op int
|
|
||||||
|
|
||||||
const (
|
|
||||||
opWrite op = iota
|
|
||||||
opFlush
|
|
||||||
opClose
|
|
||||||
opReset
|
|
||||||
)
|
|
||||||
|
|
||||||
func (w *writer) Write(p []byte) (int, error) {
|
|
||||||
w.p = p
|
|
||||||
err := w.do(opWrite)
|
|
||||||
w.p = nil
|
|
||||||
return w.n, err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *writer) Flush() error {
|
|
||||||
return w.do(opFlush)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *writer) Close() error {
|
|
||||||
return w.do(opClose)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *writer) Reset(dstW io.Writer) {
|
|
||||||
w.xw.Reset()
|
|
||||||
w.do(opReset)
|
|
||||||
w.dstW = dstW
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *writer) do(op op) error {
|
|
||||||
w.op = op
|
|
||||||
if !stacklessWriterFunc(w) {
|
|
||||||
return errHighLoad
|
|
||||||
}
|
|
||||||
err := w.err
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if w.xw.bb != nil && len(w.xw.bb.B) > 0 {
|
|
||||||
_, err = w.dstW.Write(w.xw.bb.B)
|
|
||||||
}
|
|
||||||
w.xw.Reset()
|
|
||||||
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
var errHighLoad = errors.New("cannot compress data due to high load")
|
|
||||||
|
|
||||||
var stacklessWriterFunc = NewFunc(writerFunc)
|
|
||||||
|
|
||||||
func writerFunc(ctx interface{}) {
|
|
||||||
w := ctx.(*writer)
|
|
||||||
switch w.op {
|
|
||||||
case opWrite:
|
|
||||||
w.n, w.err = w.zw.Write(w.p)
|
|
||||||
case opFlush:
|
|
||||||
w.err = w.zw.Flush()
|
|
||||||
case opClose:
|
|
||||||
w.err = w.zw.Close()
|
|
||||||
case opReset:
|
|
||||||
w.zw.Reset(&w.xw)
|
|
||||||
w.err = nil
|
|
||||||
default:
|
|
||||||
panic(fmt.Sprintf("BUG: unexpected op: %d", w.op))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type xWriter struct {
|
|
||||||
bb *bytebufferpool.ByteBuffer
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *xWriter) Write(p []byte) (int, error) {
|
|
||||||
if w.bb == nil {
|
|
||||||
w.bb = bufferPool.Get()
|
|
||||||
}
|
|
||||||
w.bb.Write(p)
|
|
||||||
return len(p), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *xWriter) Reset() {
|
|
||||||
if w.bb != nil {
|
|
||||||
bufferPool.Put(w.bb)
|
|
||||||
w.bb = nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
var bufferPool bytebufferpool.Pool
|
|
||||||
176
vendor/github.com/valyala/fasthttp/status.go
generated
vendored
176
vendor/github.com/valyala/fasthttp/status.go
generated
vendored
@@ -1,176 +0,0 @@
|
|||||||
package fasthttp
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"sync/atomic"
|
|
||||||
)
|
|
||||||
|
|
||||||
// HTTP status codes were stolen from net/http.
|
|
||||||
const (
|
|
||||||
StatusContinue = 100 // RFC 7231, 6.2.1
|
|
||||||
StatusSwitchingProtocols = 101 // RFC 7231, 6.2.2
|
|
||||||
StatusProcessing = 102 // RFC 2518, 10.1
|
|
||||||
|
|
||||||
StatusOK = 200 // RFC 7231, 6.3.1
|
|
||||||
StatusCreated = 201 // RFC 7231, 6.3.2
|
|
||||||
StatusAccepted = 202 // RFC 7231, 6.3.3
|
|
||||||
StatusNonAuthoritativeInfo = 203 // RFC 7231, 6.3.4
|
|
||||||
StatusNoContent = 204 // RFC 7231, 6.3.5
|
|
||||||
StatusResetContent = 205 // RFC 7231, 6.3.6
|
|
||||||
StatusPartialContent = 206 // RFC 7233, 4.1
|
|
||||||
StatusMultiStatus = 207 // RFC 4918, 11.1
|
|
||||||
StatusAlreadyReported = 208 // RFC 5842, 7.1
|
|
||||||
StatusIMUsed = 226 // RFC 3229, 10.4.1
|
|
||||||
|
|
||||||
StatusMultipleChoices = 300 // RFC 7231, 6.4.1
|
|
||||||
StatusMovedPermanently = 301 // RFC 7231, 6.4.2
|
|
||||||
StatusFound = 302 // RFC 7231, 6.4.3
|
|
||||||
StatusSeeOther = 303 // RFC 7231, 6.4.4
|
|
||||||
StatusNotModified = 304 // RFC 7232, 4.1
|
|
||||||
StatusUseProxy = 305 // RFC 7231, 6.4.5
|
|
||||||
_ = 306 // RFC 7231, 6.4.6 (Unused)
|
|
||||||
StatusTemporaryRedirect = 307 // RFC 7231, 6.4.7
|
|
||||||
StatusPermanentRedirect = 308 // RFC 7538, 3
|
|
||||||
|
|
||||||
StatusBadRequest = 400 // RFC 7231, 6.5.1
|
|
||||||
StatusUnauthorized = 401 // RFC 7235, 3.1
|
|
||||||
StatusPaymentRequired = 402 // RFC 7231, 6.5.2
|
|
||||||
StatusForbidden = 403 // RFC 7231, 6.5.3
|
|
||||||
StatusNotFound = 404 // RFC 7231, 6.5.4
|
|
||||||
StatusMethodNotAllowed = 405 // RFC 7231, 6.5.5
|
|
||||||
StatusNotAcceptable = 406 // RFC 7231, 6.5.6
|
|
||||||
StatusProxyAuthRequired = 407 // RFC 7235, 3.2
|
|
||||||
StatusRequestTimeout = 408 // RFC 7231, 6.5.7
|
|
||||||
StatusConflict = 409 // RFC 7231, 6.5.8
|
|
||||||
StatusGone = 410 // RFC 7231, 6.5.9
|
|
||||||
StatusLengthRequired = 411 // RFC 7231, 6.5.10
|
|
||||||
StatusPreconditionFailed = 412 // RFC 7232, 4.2
|
|
||||||
StatusRequestEntityTooLarge = 413 // RFC 7231, 6.5.11
|
|
||||||
StatusRequestURITooLong = 414 // RFC 7231, 6.5.12
|
|
||||||
StatusUnsupportedMediaType = 415 // RFC 7231, 6.5.13
|
|
||||||
StatusRequestedRangeNotSatisfiable = 416 // RFC 7233, 4.4
|
|
||||||
StatusExpectationFailed = 417 // RFC 7231, 6.5.14
|
|
||||||
StatusTeapot = 418 // RFC 7168, 2.3.3
|
|
||||||
StatusUnprocessableEntity = 422 // RFC 4918, 11.2
|
|
||||||
StatusLocked = 423 // RFC 4918, 11.3
|
|
||||||
StatusFailedDependency = 424 // RFC 4918, 11.4
|
|
||||||
StatusUpgradeRequired = 426 // RFC 7231, 6.5.15
|
|
||||||
StatusPreconditionRequired = 428 // RFC 6585, 3
|
|
||||||
StatusTooManyRequests = 429 // RFC 6585, 4
|
|
||||||
StatusRequestHeaderFieldsTooLarge = 431 // RFC 6585, 5
|
|
||||||
StatusUnavailableForLegalReasons = 451 // RFC 7725, 3
|
|
||||||
|
|
||||||
StatusInternalServerError = 500 // RFC 7231, 6.6.1
|
|
||||||
StatusNotImplemented = 501 // RFC 7231, 6.6.2
|
|
||||||
StatusBadGateway = 502 // RFC 7231, 6.6.3
|
|
||||||
StatusServiceUnavailable = 503 // RFC 7231, 6.6.4
|
|
||||||
StatusGatewayTimeout = 504 // RFC 7231, 6.6.5
|
|
||||||
StatusHTTPVersionNotSupported = 505 // RFC 7231, 6.6.6
|
|
||||||
StatusVariantAlsoNegotiates = 506 // RFC 2295, 8.1
|
|
||||||
StatusInsufficientStorage = 507 // RFC 4918, 11.5
|
|
||||||
StatusLoopDetected = 508 // RFC 5842, 7.2
|
|
||||||
StatusNotExtended = 510 // RFC 2774, 7
|
|
||||||
StatusNetworkAuthenticationRequired = 511 // RFC 6585, 6
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
statusLines atomic.Value
|
|
||||||
|
|
||||||
statusMessages = map[int]string{
|
|
||||||
StatusContinue: "Continue",
|
|
||||||
StatusSwitchingProtocols: "Switching Protocols",
|
|
||||||
StatusProcessing: "Processing",
|
|
||||||
|
|
||||||
StatusOK: "OK",
|
|
||||||
StatusCreated: "Created",
|
|
||||||
StatusAccepted: "Accepted",
|
|
||||||
StatusNonAuthoritativeInfo: "Non-Authoritative Information",
|
|
||||||
StatusNoContent: "No Content",
|
|
||||||
StatusResetContent: "Reset Content",
|
|
||||||
StatusPartialContent: "Partial Content",
|
|
||||||
StatusMultiStatus: "Multi-Status",
|
|
||||||
StatusAlreadyReported: "Already Reported",
|
|
||||||
StatusIMUsed: "IM Used",
|
|
||||||
|
|
||||||
StatusMultipleChoices: "Multiple Choices",
|
|
||||||
StatusMovedPermanently: "Moved Permanently",
|
|
||||||
StatusFound: "Found",
|
|
||||||
StatusSeeOther: "See Other",
|
|
||||||
StatusNotModified: "Not Modified",
|
|
||||||
StatusUseProxy: "Use Proxy",
|
|
||||||
StatusTemporaryRedirect: "Temporary Redirect",
|
|
||||||
StatusPermanentRedirect: "Permanent Redirect",
|
|
||||||
|
|
||||||
StatusBadRequest: "Bad Request",
|
|
||||||
StatusUnauthorized: "Unauthorized",
|
|
||||||
StatusPaymentRequired: "Payment Required",
|
|
||||||
StatusForbidden: "Forbidden",
|
|
||||||
StatusNotFound: "Not Found",
|
|
||||||
StatusMethodNotAllowed: "Method Not Allowed",
|
|
||||||
StatusNotAcceptable: "Not Acceptable",
|
|
||||||
StatusProxyAuthRequired: "Proxy Authentication Required",
|
|
||||||
StatusRequestTimeout: "Request Timeout",
|
|
||||||
StatusConflict: "Conflict",
|
|
||||||
StatusGone: "Gone",
|
|
||||||
StatusLengthRequired: "Length Required",
|
|
||||||
StatusPreconditionFailed: "Precondition Failed",
|
|
||||||
StatusRequestEntityTooLarge: "Request Entity Too Large",
|
|
||||||
StatusRequestURITooLong: "Request URI Too Long",
|
|
||||||
StatusUnsupportedMediaType: "Unsupported Media Type",
|
|
||||||
StatusRequestedRangeNotSatisfiable: "Requested Range Not Satisfiable",
|
|
||||||
StatusExpectationFailed: "Expectation Failed",
|
|
||||||
StatusTeapot: "I'm a teapot",
|
|
||||||
StatusUnprocessableEntity: "Unprocessable Entity",
|
|
||||||
StatusLocked: "Locked",
|
|
||||||
StatusFailedDependency: "Failed Dependency",
|
|
||||||
StatusUpgradeRequired: "Upgrade Required",
|
|
||||||
StatusPreconditionRequired: "Precondition Required",
|
|
||||||
StatusTooManyRequests: "Too Many Requests",
|
|
||||||
StatusRequestHeaderFieldsTooLarge: "Request Header Fields Too Large",
|
|
||||||
StatusUnavailableForLegalReasons: "Unavailable For Legal Reasons",
|
|
||||||
|
|
||||||
StatusInternalServerError: "Internal Server Error",
|
|
||||||
StatusNotImplemented: "Not Implemented",
|
|
||||||
StatusBadGateway: "Bad Gateway",
|
|
||||||
StatusServiceUnavailable: "Service Unavailable",
|
|
||||||
StatusGatewayTimeout: "Gateway Timeout",
|
|
||||||
StatusHTTPVersionNotSupported: "HTTP Version Not Supported",
|
|
||||||
StatusVariantAlsoNegotiates: "Variant Also Negotiates",
|
|
||||||
StatusInsufficientStorage: "Insufficient Storage",
|
|
||||||
StatusLoopDetected: "Loop Detected",
|
|
||||||
StatusNotExtended: "Not Extended",
|
|
||||||
StatusNetworkAuthenticationRequired: "Network Authentication Required",
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
// StatusMessage returns HTTP status message for the given status code.
|
|
||||||
func StatusMessage(statusCode int) string {
|
|
||||||
s := statusMessages[statusCode]
|
|
||||||
if s == "" {
|
|
||||||
s = "Unknown Status Code"
|
|
||||||
}
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
statusLines.Store(make(map[int][]byte))
|
|
||||||
}
|
|
||||||
|
|
||||||
func statusLine(statusCode int) []byte {
|
|
||||||
m := statusLines.Load().(map[int][]byte)
|
|
||||||
h := m[statusCode]
|
|
||||||
if h != nil {
|
|
||||||
return h
|
|
||||||
}
|
|
||||||
|
|
||||||
statusText := StatusMessage(statusCode)
|
|
||||||
|
|
||||||
h = []byte(fmt.Sprintf("HTTP/1.1 %d %s\r\n", statusCode, statusText))
|
|
||||||
newM := make(map[int][]byte, len(m)+1)
|
|
||||||
for k, v := range m {
|
|
||||||
newM[k] = v
|
|
||||||
}
|
|
||||||
newM[statusCode] = h
|
|
||||||
statusLines.Store(newM)
|
|
||||||
return h
|
|
||||||
}
|
|
||||||
54
vendor/github.com/valyala/fasthttp/stream.go
generated
vendored
54
vendor/github.com/valyala/fasthttp/stream.go
generated
vendored
@@ -1,54 +0,0 @@
|
|||||||
package fasthttp
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bufio"
|
|
||||||
"io"
|
|
||||||
"sync"
|
|
||||||
|
|
||||||
"github.com/valyala/fasthttp/fasthttputil"
|
|
||||||
)
|
|
||||||
|
|
||||||
// StreamWriter must write data to w.
|
|
||||||
//
|
|
||||||
// Usually StreamWriter writes data to w in a loop (aka 'data streaming').
|
|
||||||
//
|
|
||||||
// StreamWriter must return immediately if w returns error.
|
|
||||||
//
|
|
||||||
// Since the written data is buffered, do not forget calling w.Flush
|
|
||||||
// when the data must be propagated to reader.
|
|
||||||
type StreamWriter func(w *bufio.Writer)
|
|
||||||
|
|
||||||
// NewStreamReader returns a reader, which replays all the data generated by sw.
|
|
||||||
//
|
|
||||||
// The returned reader may be passed to Response.SetBodyStream.
|
|
||||||
//
|
|
||||||
// Close must be called on the returned reader after all the required data
|
|
||||||
// has been read. Otherwise goroutine leak may occur.
|
|
||||||
//
|
|
||||||
// See also Response.SetBodyStreamWriter.
|
|
||||||
func NewStreamReader(sw StreamWriter) io.ReadCloser {
|
|
||||||
pc := fasthttputil.NewPipeConns()
|
|
||||||
pw := pc.Conn1()
|
|
||||||
pr := pc.Conn2()
|
|
||||||
|
|
||||||
var bw *bufio.Writer
|
|
||||||
v := streamWriterBufPool.Get()
|
|
||||||
if v == nil {
|
|
||||||
bw = bufio.NewWriter(pw)
|
|
||||||
} else {
|
|
||||||
bw = v.(*bufio.Writer)
|
|
||||||
bw.Reset(pw)
|
|
||||||
}
|
|
||||||
|
|
||||||
go func() {
|
|
||||||
sw(bw)
|
|
||||||
bw.Flush()
|
|
||||||
pw.Close()
|
|
||||||
|
|
||||||
streamWriterBufPool.Put(bw)
|
|
||||||
}()
|
|
||||||
|
|
||||||
return pr
|
|
||||||
}
|
|
||||||
|
|
||||||
var streamWriterBufPool sync.Pool
|
|
||||||
73
vendor/github.com/valyala/fasthttp/strings.go
generated
vendored
73
vendor/github.com/valyala/fasthttp/strings.go
generated
vendored
@@ -1,73 +0,0 @@
|
|||||||
package fasthttp
|
|
||||||
|
|
||||||
var (
|
|
||||||
defaultServerName = []byte("fasthttp")
|
|
||||||
defaultUserAgent = []byte("fasthttp")
|
|
||||||
defaultContentType = []byte("text/plain; charset=utf-8")
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
strSlash = []byte("/")
|
|
||||||
strSlashSlash = []byte("//")
|
|
||||||
strSlashDotDot = []byte("/..")
|
|
||||||
strSlashDotSlash = []byte("/./")
|
|
||||||
strSlashDotDotSlash = []byte("/../")
|
|
||||||
strCRLF = []byte("\r\n")
|
|
||||||
strHTTP = []byte("http")
|
|
||||||
strHTTPS = []byte("https")
|
|
||||||
strHTTP11 = []byte("HTTP/1.1")
|
|
||||||
strColonSlashSlash = []byte("://")
|
|
||||||
strColonSpace = []byte(": ")
|
|
||||||
strGMT = []byte("GMT")
|
|
||||||
|
|
||||||
strResponseContinue = []byte("HTTP/1.1 100 Continue\r\n\r\n")
|
|
||||||
|
|
||||||
strGet = []byte("GET")
|
|
||||||
strHead = []byte("HEAD")
|
|
||||||
strPost = []byte("POST")
|
|
||||||
strPut = []byte("PUT")
|
|
||||||
strDelete = []byte("DELETE")
|
|
||||||
|
|
||||||
strExpect = []byte("Expect")
|
|
||||||
strConnection = []byte("Connection")
|
|
||||||
strContentLength = []byte("Content-Length")
|
|
||||||
strContentType = []byte("Content-Type")
|
|
||||||
strDate = []byte("Date")
|
|
||||||
strHost = []byte("Host")
|
|
||||||
strReferer = []byte("Referer")
|
|
||||||
strServer = []byte("Server")
|
|
||||||
strTransferEncoding = []byte("Transfer-Encoding")
|
|
||||||
strContentEncoding = []byte("Content-Encoding")
|
|
||||||
strAcceptEncoding = []byte("Accept-Encoding")
|
|
||||||
strUserAgent = []byte("User-Agent")
|
|
||||||
strCookie = []byte("Cookie")
|
|
||||||
strSetCookie = []byte("Set-Cookie")
|
|
||||||
strLocation = []byte("Location")
|
|
||||||
strIfModifiedSince = []byte("If-Modified-Since")
|
|
||||||
strLastModified = []byte("Last-Modified")
|
|
||||||
strAcceptRanges = []byte("Accept-Ranges")
|
|
||||||
strRange = []byte("Range")
|
|
||||||
strContentRange = []byte("Content-Range")
|
|
||||||
|
|
||||||
strCookieExpires = []byte("expires")
|
|
||||||
strCookieDomain = []byte("domain")
|
|
||||||
strCookiePath = []byte("path")
|
|
||||||
strCookieHTTPOnly = []byte("HttpOnly")
|
|
||||||
strCookieSecure = []byte("secure")
|
|
||||||
|
|
||||||
strClose = []byte("close")
|
|
||||||
strGzip = []byte("gzip")
|
|
||||||
strDeflate = []byte("deflate")
|
|
||||||
strKeepAlive = []byte("keep-alive")
|
|
||||||
strKeepAliveCamelCase = []byte("Keep-Alive")
|
|
||||||
strUpgrade = []byte("Upgrade")
|
|
||||||
strChunked = []byte("chunked")
|
|
||||||
strIdentity = []byte("identity")
|
|
||||||
str100Continue = []byte("100-continue")
|
|
||||||
strPostArgsContentType = []byte("application/x-www-form-urlencoded")
|
|
||||||
strMultipartFormData = []byte("multipart/form-data")
|
|
||||||
strBoundary = []byte("boundary")
|
|
||||||
strBytes = []byte("bytes")
|
|
||||||
strTextSlash = []byte("text/")
|
|
||||||
strApplicationSlash = []byte("application/")
|
|
||||||
)
|
|
||||||
369
vendor/github.com/valyala/fasthttp/tcpdialer.go
generated
vendored
369
vendor/github.com/valyala/fasthttp/tcpdialer.go
generated
vendored
@@ -1,369 +0,0 @@
|
|||||||
package fasthttp
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"net"
|
|
||||||
"strconv"
|
|
||||||
"sync"
|
|
||||||
"sync/atomic"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Dial dials the given TCP addr using tcp4.
|
|
||||||
//
|
|
||||||
// This function has the following additional features comparing to net.Dial:
|
|
||||||
//
|
|
||||||
// * It reduces load on DNS resolver by caching resolved TCP addressed
|
|
||||||
// for DefaultDNSCacheDuration.
|
|
||||||
// * It dials all the resolved TCP addresses in round-robin manner until
|
|
||||||
// connection is established. This may be useful if certain addresses
|
|
||||||
// are temporarily unreachable.
|
|
||||||
// * It returns ErrDialTimeout if connection cannot be established during
|
|
||||||
// DefaultDialTimeout seconds. Use DialTimeout for customizing dial timeout.
|
|
||||||
//
|
|
||||||
// This dialer is intended for custom code wrapping before passing
|
|
||||||
// to Client.Dial or HostClient.Dial.
|
|
||||||
//
|
|
||||||
// For instance, per-host counters and/or limits may be implemented
|
|
||||||
// by such wrappers.
|
|
||||||
//
|
|
||||||
// The addr passed to the function must contain port. Example addr values:
|
|
||||||
//
|
|
||||||
// * foobar.baz:443
|
|
||||||
// * foo.bar:80
|
|
||||||
// * aaa.com:8080
|
|
||||||
func Dial(addr string) (net.Conn, error) {
|
|
||||||
return getDialer(DefaultDialTimeout, false)(addr)
|
|
||||||
}
|
|
||||||
|
|
||||||
// DialTimeout dials the given TCP addr using tcp4 using the given timeout.
|
|
||||||
//
|
|
||||||
// This function has the following additional features comparing to net.Dial:
|
|
||||||
//
|
|
||||||
// * It reduces load on DNS resolver by caching resolved TCP addressed
|
|
||||||
// for DefaultDNSCacheDuration.
|
|
||||||
// * It dials all the resolved TCP addresses in round-robin manner until
|
|
||||||
// connection is established. This may be useful if certain addresses
|
|
||||||
// are temporarily unreachable.
|
|
||||||
//
|
|
||||||
// This dialer is intended for custom code wrapping before passing
|
|
||||||
// to Client.Dial or HostClient.Dial.
|
|
||||||
//
|
|
||||||
// For instance, per-host counters and/or limits may be implemented
|
|
||||||
// by such wrappers.
|
|
||||||
//
|
|
||||||
// The addr passed to the function must contain port. Example addr values:
|
|
||||||
//
|
|
||||||
// * foobar.baz:443
|
|
||||||
// * foo.bar:80
|
|
||||||
// * aaa.com:8080
|
|
||||||
func DialTimeout(addr string, timeout time.Duration) (net.Conn, error) {
|
|
||||||
return getDialer(timeout, false)(addr)
|
|
||||||
}
|
|
||||||
|
|
||||||
// DialDualStack dials the given TCP addr using both tcp4 and tcp6.
|
|
||||||
//
|
|
||||||
// This function has the following additional features comparing to net.Dial:
|
|
||||||
//
|
|
||||||
// * It reduces load on DNS resolver by caching resolved TCP addressed
|
|
||||||
// for DefaultDNSCacheDuration.
|
|
||||||
// * It dials all the resolved TCP addresses in round-robin manner until
|
|
||||||
// connection is established. This may be useful if certain addresses
|
|
||||||
// are temporarily unreachable.
|
|
||||||
// * It returns ErrDialTimeout if connection cannot be established during
|
|
||||||
// DefaultDialTimeout seconds. Use DialDualStackTimeout for custom dial
|
|
||||||
// timeout.
|
|
||||||
//
|
|
||||||
// This dialer is intended for custom code wrapping before passing
|
|
||||||
// to Client.Dial or HostClient.Dial.
|
|
||||||
//
|
|
||||||
// For instance, per-host counters and/or limits may be implemented
|
|
||||||
// by such wrappers.
|
|
||||||
//
|
|
||||||
// The addr passed to the function must contain port. Example addr values:
|
|
||||||
//
|
|
||||||
// * foobar.baz:443
|
|
||||||
// * foo.bar:80
|
|
||||||
// * aaa.com:8080
|
|
||||||
func DialDualStack(addr string) (net.Conn, error) {
|
|
||||||
return getDialer(DefaultDialTimeout, true)(addr)
|
|
||||||
}
|
|
||||||
|
|
||||||
// DialDualStackTimeout dials the given TCP addr using both tcp4 and tcp6
|
|
||||||
// using the given timeout.
|
|
||||||
//
|
|
||||||
// This function has the following additional features comparing to net.Dial:
|
|
||||||
//
|
|
||||||
// * It reduces load on DNS resolver by caching resolved TCP addressed
|
|
||||||
// for DefaultDNSCacheDuration.
|
|
||||||
// * It dials all the resolved TCP addresses in round-robin manner until
|
|
||||||
// connection is established. This may be useful if certain addresses
|
|
||||||
// are temporarily unreachable.
|
|
||||||
//
|
|
||||||
// This dialer is intended for custom code wrapping before passing
|
|
||||||
// to Client.Dial or HostClient.Dial.
|
|
||||||
//
|
|
||||||
// For instance, per-host counters and/or limits may be implemented
|
|
||||||
// by such wrappers.
|
|
||||||
//
|
|
||||||
// The addr passed to the function must contain port. Example addr values:
|
|
||||||
//
|
|
||||||
// * foobar.baz:443
|
|
||||||
// * foo.bar:80
|
|
||||||
// * aaa.com:8080
|
|
||||||
func DialDualStackTimeout(addr string, timeout time.Duration) (net.Conn, error) {
|
|
||||||
return getDialer(timeout, true)(addr)
|
|
||||||
}
|
|
||||||
|
|
||||||
func getDialer(timeout time.Duration, dualStack bool) DialFunc {
|
|
||||||
if timeout <= 0 {
|
|
||||||
timeout = DefaultDialTimeout
|
|
||||||
}
|
|
||||||
timeoutRounded := int(timeout.Seconds()*10 + 9)
|
|
||||||
|
|
||||||
m := dialMap
|
|
||||||
if dualStack {
|
|
||||||
m = dialDualStackMap
|
|
||||||
}
|
|
||||||
|
|
||||||
dialMapLock.Lock()
|
|
||||||
d := m[timeoutRounded]
|
|
||||||
if d == nil {
|
|
||||||
dialer := dialerStd
|
|
||||||
if dualStack {
|
|
||||||
dialer = dialerDualStack
|
|
||||||
}
|
|
||||||
d = dialer.NewDial(timeout)
|
|
||||||
m[timeoutRounded] = d
|
|
||||||
}
|
|
||||||
dialMapLock.Unlock()
|
|
||||||
return d
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
dialerStd = &tcpDialer{}
|
|
||||||
dialerDualStack = &tcpDialer{DualStack: true}
|
|
||||||
|
|
||||||
dialMap = make(map[int]DialFunc)
|
|
||||||
dialDualStackMap = make(map[int]DialFunc)
|
|
||||||
dialMapLock sync.Mutex
|
|
||||||
)
|
|
||||||
|
|
||||||
type tcpDialer struct {
|
|
||||||
DualStack bool
|
|
||||||
|
|
||||||
tcpAddrsLock sync.Mutex
|
|
||||||
tcpAddrsMap map[string]*tcpAddrEntry
|
|
||||||
|
|
||||||
concurrencyCh chan struct{}
|
|
||||||
|
|
||||||
once sync.Once
|
|
||||||
}
|
|
||||||
|
|
||||||
const maxDialConcurrency = 1000
|
|
||||||
|
|
||||||
func (d *tcpDialer) NewDial(timeout time.Duration) DialFunc {
|
|
||||||
d.once.Do(func() {
|
|
||||||
d.concurrencyCh = make(chan struct{}, maxDialConcurrency)
|
|
||||||
d.tcpAddrsMap = make(map[string]*tcpAddrEntry)
|
|
||||||
go d.tcpAddrsClean()
|
|
||||||
})
|
|
||||||
|
|
||||||
return func(addr string) (net.Conn, error) {
|
|
||||||
addrs, idx, err := d.getTCPAddrs(addr)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
network := "tcp4"
|
|
||||||
if d.DualStack {
|
|
||||||
network = "tcp"
|
|
||||||
}
|
|
||||||
|
|
||||||
var conn net.Conn
|
|
||||||
n := uint32(len(addrs))
|
|
||||||
deadline := time.Now().Add(timeout)
|
|
||||||
for n > 0 {
|
|
||||||
conn, err = tryDial(network, &addrs[idx%n], deadline, d.concurrencyCh)
|
|
||||||
if err == nil {
|
|
||||||
return conn, nil
|
|
||||||
}
|
|
||||||
if err == ErrDialTimeout {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
idx++
|
|
||||||
n--
|
|
||||||
}
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func tryDial(network string, addr *net.TCPAddr, deadline time.Time, concurrencyCh chan struct{}) (net.Conn, error) {
|
|
||||||
timeout := -time.Since(deadline)
|
|
||||||
if timeout <= 0 {
|
|
||||||
return nil, ErrDialTimeout
|
|
||||||
}
|
|
||||||
|
|
||||||
select {
|
|
||||||
case concurrencyCh <- struct{}{}:
|
|
||||||
default:
|
|
||||||
tc := acquireTimer(timeout)
|
|
||||||
isTimeout := false
|
|
||||||
select {
|
|
||||||
case concurrencyCh <- struct{}{}:
|
|
||||||
case <-tc.C:
|
|
||||||
isTimeout = true
|
|
||||||
}
|
|
||||||
releaseTimer(tc)
|
|
||||||
if isTimeout {
|
|
||||||
return nil, ErrDialTimeout
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
timeout = -time.Since(deadline)
|
|
||||||
if timeout <= 0 {
|
|
||||||
<-concurrencyCh
|
|
||||||
return nil, ErrDialTimeout
|
|
||||||
}
|
|
||||||
|
|
||||||
chv := dialResultChanPool.Get()
|
|
||||||
if chv == nil {
|
|
||||||
chv = make(chan dialResult, 1)
|
|
||||||
}
|
|
||||||
ch := chv.(chan dialResult)
|
|
||||||
go func() {
|
|
||||||
var dr dialResult
|
|
||||||
dr.conn, dr.err = net.DialTCP(network, nil, addr)
|
|
||||||
ch <- dr
|
|
||||||
<-concurrencyCh
|
|
||||||
}()
|
|
||||||
|
|
||||||
var (
|
|
||||||
conn net.Conn
|
|
||||||
err error
|
|
||||||
)
|
|
||||||
|
|
||||||
tc := acquireTimer(timeout)
|
|
||||||
select {
|
|
||||||
case dr := <-ch:
|
|
||||||
conn = dr.conn
|
|
||||||
err = dr.err
|
|
||||||
dialResultChanPool.Put(ch)
|
|
||||||
case <-tc.C:
|
|
||||||
err = ErrDialTimeout
|
|
||||||
}
|
|
||||||
releaseTimer(tc)
|
|
||||||
|
|
||||||
return conn, err
|
|
||||||
}
|
|
||||||
|
|
||||||
var dialResultChanPool sync.Pool
|
|
||||||
|
|
||||||
type dialResult struct {
|
|
||||||
conn net.Conn
|
|
||||||
err error
|
|
||||||
}
|
|
||||||
|
|
||||||
// ErrDialTimeout is returned when TCP dialing is timed out.
|
|
||||||
var ErrDialTimeout = errors.New("dialing to the given TCP address timed out")
|
|
||||||
|
|
||||||
// DefaultDialTimeout is timeout used by Dial and DialDualStack
|
|
||||||
// for establishing TCP connections.
|
|
||||||
const DefaultDialTimeout = 3 * time.Second
|
|
||||||
|
|
||||||
type tcpAddrEntry struct {
|
|
||||||
addrs []net.TCPAddr
|
|
||||||
addrsIdx uint32
|
|
||||||
|
|
||||||
resolveTime time.Time
|
|
||||||
pending bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// DefaultDNSCacheDuration is the duration for caching resolved TCP addresses
|
|
||||||
// by Dial* functions.
|
|
||||||
const DefaultDNSCacheDuration = time.Minute
|
|
||||||
|
|
||||||
func (d *tcpDialer) tcpAddrsClean() {
|
|
||||||
expireDuration := 2 * DefaultDNSCacheDuration
|
|
||||||
for {
|
|
||||||
time.Sleep(time.Second)
|
|
||||||
t := time.Now()
|
|
||||||
|
|
||||||
d.tcpAddrsLock.Lock()
|
|
||||||
for k, e := range d.tcpAddrsMap {
|
|
||||||
if t.Sub(e.resolveTime) > expireDuration {
|
|
||||||
delete(d.tcpAddrsMap, k)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
d.tcpAddrsLock.Unlock()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *tcpDialer) getTCPAddrs(addr string) ([]net.TCPAddr, uint32, error) {
|
|
||||||
d.tcpAddrsLock.Lock()
|
|
||||||
e := d.tcpAddrsMap[addr]
|
|
||||||
if e != nil && !e.pending && time.Since(e.resolveTime) > DefaultDNSCacheDuration {
|
|
||||||
e.pending = true
|
|
||||||
e = nil
|
|
||||||
}
|
|
||||||
d.tcpAddrsLock.Unlock()
|
|
||||||
|
|
||||||
if e == nil {
|
|
||||||
addrs, err := resolveTCPAddrs(addr, d.DualStack)
|
|
||||||
if err != nil {
|
|
||||||
d.tcpAddrsLock.Lock()
|
|
||||||
e = d.tcpAddrsMap[addr]
|
|
||||||
if e != nil && e.pending {
|
|
||||||
e.pending = false
|
|
||||||
}
|
|
||||||
d.tcpAddrsLock.Unlock()
|
|
||||||
return nil, 0, err
|
|
||||||
}
|
|
||||||
|
|
||||||
e = &tcpAddrEntry{
|
|
||||||
addrs: addrs,
|
|
||||||
resolveTime: time.Now(),
|
|
||||||
}
|
|
||||||
|
|
||||||
d.tcpAddrsLock.Lock()
|
|
||||||
d.tcpAddrsMap[addr] = e
|
|
||||||
d.tcpAddrsLock.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
idx := atomic.AddUint32(&e.addrsIdx, 1)
|
|
||||||
return e.addrs, idx, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func resolveTCPAddrs(addr string, dualStack bool) ([]net.TCPAddr, error) {
|
|
||||||
host, portS, err := net.SplitHostPort(addr)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
port, err := strconv.Atoi(portS)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
ips, err := net.LookupIP(host)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
n := len(ips)
|
|
||||||
addrs := make([]net.TCPAddr, 0, n)
|
|
||||||
for i := 0; i < n; i++ {
|
|
||||||
ip := ips[i]
|
|
||||||
if !dualStack && ip.To4() == nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
addrs = append(addrs, net.TCPAddr{
|
|
||||||
IP: ip,
|
|
||||||
Port: port,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
if len(addrs) == 0 {
|
|
||||||
return nil, errNoDNSEntries
|
|
||||||
}
|
|
||||||
return addrs, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
var errNoDNSEntries = errors.New("couldn't find DNS entries for the given domain. Try using DialDualStack")
|
|
||||||
44
vendor/github.com/valyala/fasthttp/timer.go
generated
vendored
44
vendor/github.com/valyala/fasthttp/timer.go
generated
vendored
@@ -1,44 +0,0 @@
|
|||||||
package fasthttp
|
|
||||||
|
|
||||||
import (
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
func initTimer(t *time.Timer, timeout time.Duration) *time.Timer {
|
|
||||||
if t == nil {
|
|
||||||
return time.NewTimer(timeout)
|
|
||||||
}
|
|
||||||
if t.Reset(timeout) {
|
|
||||||
panic("BUG: active timer trapped into initTimer()")
|
|
||||||
}
|
|
||||||
return t
|
|
||||||
}
|
|
||||||
|
|
||||||
func stopTimer(t *time.Timer) {
|
|
||||||
if !t.Stop() {
|
|
||||||
// Collect possibly added time from the channel
|
|
||||||
// if timer has been stopped and nobody collected its' value.
|
|
||||||
select {
|
|
||||||
case <-t.C:
|
|
||||||
default:
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func acquireTimer(timeout time.Duration) *time.Timer {
|
|
||||||
v := timerPool.Get()
|
|
||||||
if v == nil {
|
|
||||||
return time.NewTimer(timeout)
|
|
||||||
}
|
|
||||||
t := v.(*time.Timer)
|
|
||||||
initTimer(t, timeout)
|
|
||||||
return t
|
|
||||||
}
|
|
||||||
|
|
||||||
func releaseTimer(t *time.Timer) {
|
|
||||||
stopTimer(t)
|
|
||||||
timerPool.Put(t)
|
|
||||||
}
|
|
||||||
|
|
||||||
var timerPool sync.Pool
|
|
||||||
520
vendor/github.com/valyala/fasthttp/uri.go
generated
vendored
520
vendor/github.com/valyala/fasthttp/uri.go
generated
vendored
@@ -1,520 +0,0 @@
|
|||||||
package fasthttp
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"io"
|
|
||||||
"sync"
|
|
||||||
)
|
|
||||||
|
|
||||||
// AcquireURI returns an empty URI instance from the pool.
|
|
||||||
//
|
|
||||||
// Release the URI with ReleaseURI after the URI is no longer needed.
|
|
||||||
// This allows reducing GC load.
|
|
||||||
func AcquireURI() *URI {
|
|
||||||
return uriPool.Get().(*URI)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ReleaseURI releases the URI acquired via AcquireURI.
|
|
||||||
//
|
|
||||||
// The released URI mustn't be used after releasing it, otherwise data races
|
|
||||||
// may occur.
|
|
||||||
func ReleaseURI(u *URI) {
|
|
||||||
u.Reset()
|
|
||||||
uriPool.Put(u)
|
|
||||||
}
|
|
||||||
|
|
||||||
var uriPool = &sync.Pool{
|
|
||||||
New: func() interface{} {
|
|
||||||
return &URI{}
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
// URI represents URI :) .
|
|
||||||
//
|
|
||||||
// It is forbidden copying URI instances. Create new instance and use CopyTo
|
|
||||||
// instead.
|
|
||||||
//
|
|
||||||
// URI instance MUST NOT be used from concurrently running goroutines.
|
|
||||||
type URI struct {
|
|
||||||
noCopy noCopy
|
|
||||||
|
|
||||||
pathOriginal []byte
|
|
||||||
scheme []byte
|
|
||||||
path []byte
|
|
||||||
queryString []byte
|
|
||||||
hash []byte
|
|
||||||
host []byte
|
|
||||||
|
|
||||||
queryArgs Args
|
|
||||||
parsedQueryArgs bool
|
|
||||||
|
|
||||||
fullURI []byte
|
|
||||||
requestURI []byte
|
|
||||||
|
|
||||||
h *RequestHeader
|
|
||||||
}
|
|
||||||
|
|
||||||
// CopyTo copies uri contents to dst.
|
|
||||||
func (u *URI) CopyTo(dst *URI) {
|
|
||||||
dst.Reset()
|
|
||||||
dst.pathOriginal = append(dst.pathOriginal[:0], u.pathOriginal...)
|
|
||||||
dst.scheme = append(dst.scheme[:0], u.scheme...)
|
|
||||||
dst.path = append(dst.path[:0], u.path...)
|
|
||||||
dst.queryString = append(dst.queryString[:0], u.queryString...)
|
|
||||||
dst.hash = append(dst.hash[:0], u.hash...)
|
|
||||||
dst.host = append(dst.host[:0], u.host...)
|
|
||||||
|
|
||||||
u.queryArgs.CopyTo(&dst.queryArgs)
|
|
||||||
dst.parsedQueryArgs = u.parsedQueryArgs
|
|
||||||
|
|
||||||
// fullURI and requestURI shouldn't be copied, since they are created
|
|
||||||
// from scratch on each FullURI() and RequestURI() call.
|
|
||||||
dst.h = u.h
|
|
||||||
}
|
|
||||||
|
|
||||||
// Hash returns URI hash, i.e. qwe of http://aaa.com/foo/bar?baz=123#qwe .
|
|
||||||
//
|
|
||||||
// The returned value is valid until the next URI method call.
|
|
||||||
func (u *URI) Hash() []byte {
|
|
||||||
return u.hash
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetHash sets URI hash.
|
|
||||||
func (u *URI) SetHash(hash string) {
|
|
||||||
u.hash = append(u.hash[:0], hash...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetHashBytes sets URI hash.
|
|
||||||
func (u *URI) SetHashBytes(hash []byte) {
|
|
||||||
u.hash = append(u.hash[:0], hash...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// QueryString returns URI query string,
|
|
||||||
// i.e. baz=123 of http://aaa.com/foo/bar?baz=123#qwe .
|
|
||||||
//
|
|
||||||
// The returned value is valid until the next URI method call.
|
|
||||||
func (u *URI) QueryString() []byte {
|
|
||||||
return u.queryString
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetQueryString sets URI query string.
|
|
||||||
func (u *URI) SetQueryString(queryString string) {
|
|
||||||
u.queryString = append(u.queryString[:0], queryString...)
|
|
||||||
u.parsedQueryArgs = false
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetQueryStringBytes sets URI query string.
|
|
||||||
func (u *URI) SetQueryStringBytes(queryString []byte) {
|
|
||||||
u.queryString = append(u.queryString[:0], queryString...)
|
|
||||||
u.parsedQueryArgs = false
|
|
||||||
}
|
|
||||||
|
|
||||||
// Path returns URI path, i.e. /foo/bar of http://aaa.com/foo/bar?baz=123#qwe .
|
|
||||||
//
|
|
||||||
// The returned path is always urldecoded and normalized,
|
|
||||||
// i.e. '//f%20obar/baz/../zzz' becomes '/f obar/zzz'.
|
|
||||||
//
|
|
||||||
// The returned value is valid until the next URI method call.
|
|
||||||
func (u *URI) Path() []byte {
|
|
||||||
path := u.path
|
|
||||||
if len(path) == 0 {
|
|
||||||
path = strSlash
|
|
||||||
}
|
|
||||||
return path
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetPath sets URI path.
|
|
||||||
func (u *URI) SetPath(path string) {
|
|
||||||
u.pathOriginal = append(u.pathOriginal[:0], path...)
|
|
||||||
u.path = normalizePath(u.path, u.pathOriginal)
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetPathBytes sets URI path.
|
|
||||||
func (u *URI) SetPathBytes(path []byte) {
|
|
||||||
u.pathOriginal = append(u.pathOriginal[:0], path...)
|
|
||||||
u.path = normalizePath(u.path, u.pathOriginal)
|
|
||||||
}
|
|
||||||
|
|
||||||
// PathOriginal returns the original path from requestURI passed to URI.Parse().
|
|
||||||
//
|
|
||||||
// The returned value is valid until the next URI method call.
|
|
||||||
func (u *URI) PathOriginal() []byte {
|
|
||||||
return u.pathOriginal
|
|
||||||
}
|
|
||||||
|
|
||||||
// Scheme returns URI scheme, i.e. http of http://aaa.com/foo/bar?baz=123#qwe .
|
|
||||||
//
|
|
||||||
// Returned scheme is always lowercased.
|
|
||||||
//
|
|
||||||
// The returned value is valid until the next URI method call.
|
|
||||||
func (u *URI) Scheme() []byte {
|
|
||||||
scheme := u.scheme
|
|
||||||
if len(scheme) == 0 {
|
|
||||||
scheme = strHTTP
|
|
||||||
}
|
|
||||||
return scheme
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetScheme sets URI scheme, i.e. http, https, ftp, etc.
|
|
||||||
func (u *URI) SetScheme(scheme string) {
|
|
||||||
u.scheme = append(u.scheme[:0], scheme...)
|
|
||||||
lowercaseBytes(u.scheme)
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetSchemeBytes sets URI scheme, i.e. http, https, ftp, etc.
|
|
||||||
func (u *URI) SetSchemeBytes(scheme []byte) {
|
|
||||||
u.scheme = append(u.scheme[:0], scheme...)
|
|
||||||
lowercaseBytes(u.scheme)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Reset clears uri.
|
|
||||||
func (u *URI) Reset() {
|
|
||||||
u.pathOriginal = u.pathOriginal[:0]
|
|
||||||
u.scheme = u.scheme[:0]
|
|
||||||
u.path = u.path[:0]
|
|
||||||
u.queryString = u.queryString[:0]
|
|
||||||
u.hash = u.hash[:0]
|
|
||||||
|
|
||||||
u.host = u.host[:0]
|
|
||||||
u.queryArgs.Reset()
|
|
||||||
u.parsedQueryArgs = false
|
|
||||||
|
|
||||||
// There is no need in u.fullURI = u.fullURI[:0], since full uri
|
|
||||||
// is calucalted on each call to FullURI().
|
|
||||||
|
|
||||||
// There is no need in u.requestURI = u.requestURI[:0], since requestURI
|
|
||||||
// is calculated on each call to RequestURI().
|
|
||||||
|
|
||||||
u.h = nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Host returns host part, i.e. aaa.com of http://aaa.com/foo/bar?baz=123#qwe .
|
|
||||||
//
|
|
||||||
// Host is always lowercased.
|
|
||||||
func (u *URI) Host() []byte {
|
|
||||||
if len(u.host) == 0 && u.h != nil {
|
|
||||||
u.host = append(u.host[:0], u.h.Host()...)
|
|
||||||
lowercaseBytes(u.host)
|
|
||||||
u.h = nil
|
|
||||||
}
|
|
||||||
return u.host
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetHost sets host for the uri.
|
|
||||||
func (u *URI) SetHost(host string) {
|
|
||||||
u.host = append(u.host[:0], host...)
|
|
||||||
lowercaseBytes(u.host)
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetHostBytes sets host for the uri.
|
|
||||||
func (u *URI) SetHostBytes(host []byte) {
|
|
||||||
u.host = append(u.host[:0], host...)
|
|
||||||
lowercaseBytes(u.host)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Parse initializes URI from the given host and uri.
|
|
||||||
//
|
|
||||||
// host may be nil. In this case uri must contain fully qualified uri,
|
|
||||||
// i.e. with scheme and host. http is assumed if scheme is omitted.
|
|
||||||
//
|
|
||||||
// uri may contain e.g. RequestURI without scheme and host if host is non-empty.
|
|
||||||
func (u *URI) Parse(host, uri []byte) {
|
|
||||||
u.parse(host, uri, nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (u *URI) parseQuick(uri []byte, h *RequestHeader, isTLS bool) {
|
|
||||||
u.parse(nil, uri, h)
|
|
||||||
if isTLS {
|
|
||||||
u.scheme = append(u.scheme[:0], strHTTPS...)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (u *URI) parse(host, uri []byte, h *RequestHeader) {
|
|
||||||
u.Reset()
|
|
||||||
u.h = h
|
|
||||||
|
|
||||||
scheme, host, uri := splitHostURI(host, uri)
|
|
||||||
u.scheme = append(u.scheme, scheme...)
|
|
||||||
lowercaseBytes(u.scheme)
|
|
||||||
u.host = append(u.host, host...)
|
|
||||||
lowercaseBytes(u.host)
|
|
||||||
|
|
||||||
b := uri
|
|
||||||
queryIndex := bytes.IndexByte(b, '?')
|
|
||||||
fragmentIndex := bytes.IndexByte(b, '#')
|
|
||||||
// Ignore query in fragment part
|
|
||||||
if fragmentIndex >= 0 && queryIndex > fragmentIndex {
|
|
||||||
queryIndex = -1
|
|
||||||
}
|
|
||||||
|
|
||||||
if queryIndex < 0 && fragmentIndex < 0 {
|
|
||||||
u.pathOriginal = append(u.pathOriginal, b...)
|
|
||||||
u.path = normalizePath(u.path, u.pathOriginal)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if queryIndex >= 0 {
|
|
||||||
// Path is everything up to the start of the query
|
|
||||||
u.pathOriginal = append(u.pathOriginal, b[:queryIndex]...)
|
|
||||||
u.path = normalizePath(u.path, u.pathOriginal)
|
|
||||||
|
|
||||||
if fragmentIndex < 0 {
|
|
||||||
u.queryString = append(u.queryString, b[queryIndex+1:]...)
|
|
||||||
} else {
|
|
||||||
u.queryString = append(u.queryString, b[queryIndex+1:fragmentIndex]...)
|
|
||||||
u.hash = append(u.hash, b[fragmentIndex+1:]...)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// fragmentIndex >= 0 && queryIndex < 0
|
|
||||||
// Path is up to the start of fragment
|
|
||||||
u.pathOriginal = append(u.pathOriginal, b[:fragmentIndex]...)
|
|
||||||
u.path = normalizePath(u.path, u.pathOriginal)
|
|
||||||
u.hash = append(u.hash, b[fragmentIndex+1:]...)
|
|
||||||
}
|
|
||||||
|
|
||||||
func normalizePath(dst, src []byte) []byte {
|
|
||||||
dst = dst[:0]
|
|
||||||
dst = addLeadingSlash(dst, src)
|
|
||||||
dst = decodeArgAppendNoPlus(dst, src)
|
|
||||||
|
|
||||||
// remove duplicate slashes
|
|
||||||
b := dst
|
|
||||||
bSize := len(b)
|
|
||||||
for {
|
|
||||||
n := bytes.Index(b, strSlashSlash)
|
|
||||||
if n < 0 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
b = b[n:]
|
|
||||||
copy(b, b[1:])
|
|
||||||
b = b[:len(b)-1]
|
|
||||||
bSize--
|
|
||||||
}
|
|
||||||
dst = dst[:bSize]
|
|
||||||
|
|
||||||
// remove /./ parts
|
|
||||||
b = dst
|
|
||||||
for {
|
|
||||||
n := bytes.Index(b, strSlashDotSlash)
|
|
||||||
if n < 0 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
nn := n + len(strSlashDotSlash) - 1
|
|
||||||
copy(b[n:], b[nn:])
|
|
||||||
b = b[:len(b)-nn+n]
|
|
||||||
}
|
|
||||||
|
|
||||||
// remove /foo/../ parts
|
|
||||||
for {
|
|
||||||
n := bytes.Index(b, strSlashDotDotSlash)
|
|
||||||
if n < 0 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
nn := bytes.LastIndexByte(b[:n], '/')
|
|
||||||
if nn < 0 {
|
|
||||||
nn = 0
|
|
||||||
}
|
|
||||||
n += len(strSlashDotDotSlash) - 1
|
|
||||||
copy(b[nn:], b[n:])
|
|
||||||
b = b[:len(b)-n+nn]
|
|
||||||
}
|
|
||||||
|
|
||||||
// remove trailing /foo/..
|
|
||||||
n := bytes.LastIndex(b, strSlashDotDot)
|
|
||||||
if n >= 0 && n+len(strSlashDotDot) == len(b) {
|
|
||||||
nn := bytes.LastIndexByte(b[:n], '/')
|
|
||||||
if nn < 0 {
|
|
||||||
return strSlash
|
|
||||||
}
|
|
||||||
b = b[:nn+1]
|
|
||||||
}
|
|
||||||
|
|
||||||
return b
|
|
||||||
}
|
|
||||||
|
|
||||||
// RequestURI returns RequestURI - i.e. URI without Scheme and Host.
|
|
||||||
func (u *URI) RequestURI() []byte {
|
|
||||||
dst := appendQuotedPath(u.requestURI[:0], u.Path())
|
|
||||||
if u.queryArgs.Len() > 0 {
|
|
||||||
dst = append(dst, '?')
|
|
||||||
dst = u.queryArgs.AppendBytes(dst)
|
|
||||||
} else if len(u.queryString) > 0 {
|
|
||||||
dst = append(dst, '?')
|
|
||||||
dst = append(dst, u.queryString...)
|
|
||||||
}
|
|
||||||
if len(u.hash) > 0 {
|
|
||||||
dst = append(dst, '#')
|
|
||||||
dst = append(dst, u.hash...)
|
|
||||||
}
|
|
||||||
u.requestURI = dst
|
|
||||||
return u.requestURI
|
|
||||||
}
|
|
||||||
|
|
||||||
// LastPathSegment returns the last part of uri path after '/'.
|
|
||||||
//
|
|
||||||
// Examples:
|
|
||||||
//
|
|
||||||
// * For /foo/bar/baz.html path returns baz.html.
|
|
||||||
// * For /foo/bar/ returns empty byte slice.
|
|
||||||
// * For /foobar.js returns foobar.js.
|
|
||||||
func (u *URI) LastPathSegment() []byte {
|
|
||||||
path := u.Path()
|
|
||||||
n := bytes.LastIndexByte(path, '/')
|
|
||||||
if n < 0 {
|
|
||||||
return path
|
|
||||||
}
|
|
||||||
return path[n+1:]
|
|
||||||
}
|
|
||||||
|
|
||||||
// Update updates uri.
|
|
||||||
//
|
|
||||||
// The following newURI types are accepted:
|
|
||||||
//
|
|
||||||
// * Absolute, i.e. http://foobar.com/aaa/bb?cc . In this case the original
|
|
||||||
// uri is replaced by newURI.
|
|
||||||
// * Absolute without scheme, i.e. //foobar.com/aaa/bb?cc. In this case
|
|
||||||
// the original scheme is preserved.
|
|
||||||
// * Missing host, i.e. /aaa/bb?cc . In this case only RequestURI part
|
|
||||||
// of the original uri is replaced.
|
|
||||||
// * Relative path, i.e. xx?yy=abc . In this case the original RequestURI
|
|
||||||
// is updated according to the new relative path.
|
|
||||||
func (u *URI) Update(newURI string) {
|
|
||||||
u.UpdateBytes(s2b(newURI))
|
|
||||||
}
|
|
||||||
|
|
||||||
// UpdateBytes updates uri.
|
|
||||||
//
|
|
||||||
// The following newURI types are accepted:
|
|
||||||
//
|
|
||||||
// * Absolute, i.e. http://foobar.com/aaa/bb?cc . In this case the original
|
|
||||||
// uri is replaced by newURI.
|
|
||||||
// * Absolute without scheme, i.e. //foobar.com/aaa/bb?cc. In this case
|
|
||||||
// the original scheme is preserved.
|
|
||||||
// * Missing host, i.e. /aaa/bb?cc . In this case only RequestURI part
|
|
||||||
// of the original uri is replaced.
|
|
||||||
// * Relative path, i.e. xx?yy=abc . In this case the original RequestURI
|
|
||||||
// is updated according to the new relative path.
|
|
||||||
func (u *URI) UpdateBytes(newURI []byte) {
|
|
||||||
u.requestURI = u.updateBytes(newURI, u.requestURI)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (u *URI) updateBytes(newURI, buf []byte) []byte {
|
|
||||||
if len(newURI) == 0 {
|
|
||||||
return buf
|
|
||||||
}
|
|
||||||
|
|
||||||
n := bytes.Index(newURI, strSlashSlash)
|
|
||||||
if n >= 0 {
|
|
||||||
// absolute uri
|
|
||||||
var b [32]byte
|
|
||||||
schemeOriginal := b[:0]
|
|
||||||
if len(u.scheme) > 0 {
|
|
||||||
schemeOriginal = append([]byte(nil), u.scheme...)
|
|
||||||
}
|
|
||||||
u.Parse(nil, newURI)
|
|
||||||
if len(schemeOriginal) > 0 && len(u.scheme) == 0 {
|
|
||||||
u.scheme = append(u.scheme[:0], schemeOriginal...)
|
|
||||||
}
|
|
||||||
return buf
|
|
||||||
}
|
|
||||||
|
|
||||||
if newURI[0] == '/' {
|
|
||||||
// uri without host
|
|
||||||
buf = u.appendSchemeHost(buf[:0])
|
|
||||||
buf = append(buf, newURI...)
|
|
||||||
u.Parse(nil, buf)
|
|
||||||
return buf
|
|
||||||
}
|
|
||||||
|
|
||||||
// relative path
|
|
||||||
switch newURI[0] {
|
|
||||||
case '?':
|
|
||||||
// query string only update
|
|
||||||
u.SetQueryStringBytes(newURI[1:])
|
|
||||||
return append(buf[:0], u.FullURI()...)
|
|
||||||
case '#':
|
|
||||||
// update only hash
|
|
||||||
u.SetHashBytes(newURI[1:])
|
|
||||||
return append(buf[:0], u.FullURI()...)
|
|
||||||
default:
|
|
||||||
// update the last path part after the slash
|
|
||||||
path := u.Path()
|
|
||||||
n = bytes.LastIndexByte(path, '/')
|
|
||||||
if n < 0 {
|
|
||||||
panic("BUG: path must contain at least one slash")
|
|
||||||
}
|
|
||||||
buf = u.appendSchemeHost(buf[:0])
|
|
||||||
buf = appendQuotedPath(buf, path[:n+1])
|
|
||||||
buf = append(buf, newURI...)
|
|
||||||
u.Parse(nil, buf)
|
|
||||||
return buf
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// FullURI returns full uri in the form {Scheme}://{Host}{RequestURI}#{Hash}.
|
|
||||||
func (u *URI) FullURI() []byte {
|
|
||||||
u.fullURI = u.AppendBytes(u.fullURI[:0])
|
|
||||||
return u.fullURI
|
|
||||||
}
|
|
||||||
|
|
||||||
// AppendBytes appends full uri to dst and returns the extended dst.
|
|
||||||
func (u *URI) AppendBytes(dst []byte) []byte {
|
|
||||||
dst = u.appendSchemeHost(dst)
|
|
||||||
return append(dst, u.RequestURI()...)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (u *URI) appendSchemeHost(dst []byte) []byte {
|
|
||||||
dst = append(dst, u.Scheme()...)
|
|
||||||
dst = append(dst, strColonSlashSlash...)
|
|
||||||
return append(dst, u.Host()...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// WriteTo writes full uri to w.
|
|
||||||
//
|
|
||||||
// WriteTo implements io.WriterTo interface.
|
|
||||||
func (u *URI) WriteTo(w io.Writer) (int64, error) {
|
|
||||||
n, err := w.Write(u.FullURI())
|
|
||||||
return int64(n), err
|
|
||||||
}
|
|
||||||
|
|
||||||
// String returns full uri.
|
|
||||||
func (u *URI) String() string {
|
|
||||||
return string(u.FullURI())
|
|
||||||
}
|
|
||||||
|
|
||||||
func splitHostURI(host, uri []byte) ([]byte, []byte, []byte) {
|
|
||||||
n := bytes.Index(uri, strSlashSlash)
|
|
||||||
if n < 0 {
|
|
||||||
return strHTTP, host, uri
|
|
||||||
}
|
|
||||||
scheme := uri[:n]
|
|
||||||
if bytes.IndexByte(scheme, '/') >= 0 {
|
|
||||||
return strHTTP, host, uri
|
|
||||||
}
|
|
||||||
if len(scheme) > 0 && scheme[len(scheme)-1] == ':' {
|
|
||||||
scheme = scheme[:len(scheme)-1]
|
|
||||||
}
|
|
||||||
n += len(strSlashSlash)
|
|
||||||
uri = uri[n:]
|
|
||||||
n = bytes.IndexByte(uri, '/')
|
|
||||||
if n < 0 {
|
|
||||||
return scheme, uri, strSlash
|
|
||||||
}
|
|
||||||
return scheme, uri[:n], uri[n:]
|
|
||||||
}
|
|
||||||
|
|
||||||
// QueryArgs returns query args.
|
|
||||||
func (u *URI) QueryArgs() *Args {
|
|
||||||
u.parseQueryArgs()
|
|
||||||
return &u.queryArgs
|
|
||||||
}
|
|
||||||
|
|
||||||
func (u *URI) parseQueryArgs() {
|
|
||||||
if u.parsedQueryArgs {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
u.queryArgs.ParseBytes(u.queryString)
|
|
||||||
u.parsedQueryArgs = true
|
|
||||||
}
|
|
||||||
12
vendor/github.com/valyala/fasthttp/uri_unix.go
generated
vendored
12
vendor/github.com/valyala/fasthttp/uri_unix.go
generated
vendored
@@ -1,12 +0,0 @@
|
|||||||
// +build !windows
|
|
||||||
|
|
||||||
package fasthttp
|
|
||||||
|
|
||||||
func addLeadingSlash(dst, src []byte) []byte {
|
|
||||||
// add leading slash for unix paths
|
|
||||||
if len(src) == 0 || src[0] != '/' {
|
|
||||||
dst = append(dst, '/')
|
|
||||||
}
|
|
||||||
|
|
||||||
return dst
|
|
||||||
}
|
|
||||||
12
vendor/github.com/valyala/fasthttp/uri_windows.go
generated
vendored
12
vendor/github.com/valyala/fasthttp/uri_windows.go
generated
vendored
@@ -1,12 +0,0 @@
|
|||||||
// +build windows
|
|
||||||
|
|
||||||
package fasthttp
|
|
||||||
|
|
||||||
func addLeadingSlash(dst, src []byte) []byte {
|
|
||||||
// zero length and "C:/" case
|
|
||||||
if len(src) == 0 || (len(src) > 2 && src[1] != ':') {
|
|
||||||
dst = append(dst, '/')
|
|
||||||
}
|
|
||||||
|
|
||||||
return dst
|
|
||||||
}
|
|
||||||
71
vendor/github.com/valyala/fasthttp/userdata.go
generated
vendored
71
vendor/github.com/valyala/fasthttp/userdata.go
generated
vendored
@@ -1,71 +0,0 @@
|
|||||||
package fasthttp
|
|
||||||
|
|
||||||
import (
|
|
||||||
"io"
|
|
||||||
)
|
|
||||||
|
|
||||||
type userDataKV struct {
|
|
||||||
key []byte
|
|
||||||
value interface{}
|
|
||||||
}
|
|
||||||
|
|
||||||
type userData []userDataKV
|
|
||||||
|
|
||||||
func (d *userData) Set(key string, value interface{}) {
|
|
||||||
args := *d
|
|
||||||
n := len(args)
|
|
||||||
for i := 0; i < n; i++ {
|
|
||||||
kv := &args[i]
|
|
||||||
if string(kv.key) == key {
|
|
||||||
kv.value = value
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
c := cap(args)
|
|
||||||
if c > n {
|
|
||||||
args = args[:n+1]
|
|
||||||
kv := &args[n]
|
|
||||||
kv.key = append(kv.key[:0], key...)
|
|
||||||
kv.value = value
|
|
||||||
*d = args
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
kv := userDataKV{}
|
|
||||||
kv.key = append(kv.key[:0], key...)
|
|
||||||
kv.value = value
|
|
||||||
*d = append(args, kv)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *userData) SetBytes(key []byte, value interface{}) {
|
|
||||||
d.Set(b2s(key), value)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *userData) Get(key string) interface{} {
|
|
||||||
args := *d
|
|
||||||
n := len(args)
|
|
||||||
for i := 0; i < n; i++ {
|
|
||||||
kv := &args[i]
|
|
||||||
if string(kv.key) == key {
|
|
||||||
return kv.value
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *userData) GetBytes(key []byte) interface{} {
|
|
||||||
return d.Get(b2s(key))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *userData) Reset() {
|
|
||||||
args := *d
|
|
||||||
n := len(args)
|
|
||||||
for i := 0; i < n; i++ {
|
|
||||||
v := args[i].value
|
|
||||||
if vc, ok := v.(io.Closer); ok {
|
|
||||||
vc.Close()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
*d = (*d)[:0]
|
|
||||||
}
|
|
||||||
231
vendor/github.com/valyala/fasthttp/workerpool.go
generated
vendored
231
vendor/github.com/valyala/fasthttp/workerpool.go
generated
vendored
@@ -1,231 +0,0 @@
|
|||||||
package fasthttp
|
|
||||||
|
|
||||||
import (
|
|
||||||
"net"
|
|
||||||
"runtime"
|
|
||||||
"strings"
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
// workerPool serves incoming connections via a pool of workers
|
|
||||||
// in FILO order, i.e. the most recently stopped worker will serve the next
|
|
||||||
// incoming connection.
|
|
||||||
//
|
|
||||||
// Such a scheme keeps CPU caches hot (in theory).
|
|
||||||
type workerPool struct {
|
|
||||||
// Function for serving server connections.
|
|
||||||
// It must leave c unclosed.
|
|
||||||
WorkerFunc func(c net.Conn) error
|
|
||||||
|
|
||||||
MaxWorkersCount int
|
|
||||||
|
|
||||||
LogAllErrors bool
|
|
||||||
|
|
||||||
MaxIdleWorkerDuration time.Duration
|
|
||||||
|
|
||||||
Logger Logger
|
|
||||||
|
|
||||||
lock sync.Mutex
|
|
||||||
workersCount int
|
|
||||||
mustStop bool
|
|
||||||
|
|
||||||
ready []*workerChan
|
|
||||||
|
|
||||||
stopCh chan struct{}
|
|
||||||
|
|
||||||
workerChanPool sync.Pool
|
|
||||||
}
|
|
||||||
|
|
||||||
type workerChan struct {
|
|
||||||
lastUseTime time.Time
|
|
||||||
ch chan net.Conn
|
|
||||||
}
|
|
||||||
|
|
||||||
func (wp *workerPool) Start() {
|
|
||||||
if wp.stopCh != nil {
|
|
||||||
panic("BUG: workerPool already started")
|
|
||||||
}
|
|
||||||
wp.stopCh = make(chan struct{})
|
|
||||||
stopCh := wp.stopCh
|
|
||||||
go func() {
|
|
||||||
var scratch []*workerChan
|
|
||||||
for {
|
|
||||||
wp.clean(&scratch)
|
|
||||||
select {
|
|
||||||
case <-stopCh:
|
|
||||||
return
|
|
||||||
default:
|
|
||||||
time.Sleep(wp.getMaxIdleWorkerDuration())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (wp *workerPool) Stop() {
|
|
||||||
if wp.stopCh == nil {
|
|
||||||
panic("BUG: workerPool wasn't started")
|
|
||||||
}
|
|
||||||
close(wp.stopCh)
|
|
||||||
wp.stopCh = nil
|
|
||||||
|
|
||||||
// Stop all the workers waiting for incoming connections.
|
|
||||||
// Do not wait for busy workers - they will stop after
|
|
||||||
// serving the connection and noticing wp.mustStop = true.
|
|
||||||
wp.lock.Lock()
|
|
||||||
ready := wp.ready
|
|
||||||
for i, ch := range ready {
|
|
||||||
ch.ch <- nil
|
|
||||||
ready[i] = nil
|
|
||||||
}
|
|
||||||
wp.ready = ready[:0]
|
|
||||||
wp.mustStop = true
|
|
||||||
wp.lock.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (wp *workerPool) getMaxIdleWorkerDuration() time.Duration {
|
|
||||||
if wp.MaxIdleWorkerDuration <= 0 {
|
|
||||||
return 10 * time.Second
|
|
||||||
}
|
|
||||||
return wp.MaxIdleWorkerDuration
|
|
||||||
}
|
|
||||||
|
|
||||||
func (wp *workerPool) clean(scratch *[]*workerChan) {
|
|
||||||
maxIdleWorkerDuration := wp.getMaxIdleWorkerDuration()
|
|
||||||
|
|
||||||
// Clean least recently used workers if they didn't serve connections
|
|
||||||
// for more than maxIdleWorkerDuration.
|
|
||||||
currentTime := time.Now()
|
|
||||||
|
|
||||||
wp.lock.Lock()
|
|
||||||
ready := wp.ready
|
|
||||||
n := len(ready)
|
|
||||||
i := 0
|
|
||||||
for i < n && currentTime.Sub(ready[i].lastUseTime) > maxIdleWorkerDuration {
|
|
||||||
i++
|
|
||||||
}
|
|
||||||
*scratch = append((*scratch)[:0], ready[:i]...)
|
|
||||||
if i > 0 {
|
|
||||||
m := copy(ready, ready[i:])
|
|
||||||
for i = m; i < n; i++ {
|
|
||||||
ready[i] = nil
|
|
||||||
}
|
|
||||||
wp.ready = ready[:m]
|
|
||||||
}
|
|
||||||
wp.lock.Unlock()
|
|
||||||
|
|
||||||
// Notify obsolete workers to stop.
|
|
||||||
// This notification must be outside the wp.lock, since ch.ch
|
|
||||||
// may be blocking and may consume a lot of time if many workers
|
|
||||||
// are located on non-local CPUs.
|
|
||||||
tmp := *scratch
|
|
||||||
for i, ch := range tmp {
|
|
||||||
ch.ch <- nil
|
|
||||||
tmp[i] = nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (wp *workerPool) Serve(c net.Conn) bool {
|
|
||||||
ch := wp.getCh()
|
|
||||||
if ch == nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
ch.ch <- c
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
var workerChanCap = func() int {
|
|
||||||
// Use blocking workerChan if GOMAXPROCS=1.
|
|
||||||
// This immediately switches Serve to WorkerFunc, which results
|
|
||||||
// in higher performance (under go1.5 at least).
|
|
||||||
if runtime.GOMAXPROCS(0) == 1 {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
// Use non-blocking workerChan if GOMAXPROCS>1,
|
|
||||||
// since otherwise the Serve caller (Acceptor) may lag accepting
|
|
||||||
// new connections if WorkerFunc is CPU-bound.
|
|
||||||
return 1
|
|
||||||
}()
|
|
||||||
|
|
||||||
func (wp *workerPool) getCh() *workerChan {
|
|
||||||
var ch *workerChan
|
|
||||||
createWorker := false
|
|
||||||
|
|
||||||
wp.lock.Lock()
|
|
||||||
ready := wp.ready
|
|
||||||
n := len(ready) - 1
|
|
||||||
if n < 0 {
|
|
||||||
if wp.workersCount < wp.MaxWorkersCount {
|
|
||||||
createWorker = true
|
|
||||||
wp.workersCount++
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
ch = ready[n]
|
|
||||||
ready[n] = nil
|
|
||||||
wp.ready = ready[:n]
|
|
||||||
}
|
|
||||||
wp.lock.Unlock()
|
|
||||||
|
|
||||||
if ch == nil {
|
|
||||||
if !createWorker {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
vch := wp.workerChanPool.Get()
|
|
||||||
if vch == nil {
|
|
||||||
vch = &workerChan{
|
|
||||||
ch: make(chan net.Conn, workerChanCap),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
ch = vch.(*workerChan)
|
|
||||||
go func() {
|
|
||||||
wp.workerFunc(ch)
|
|
||||||
wp.workerChanPool.Put(vch)
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
return ch
|
|
||||||
}
|
|
||||||
|
|
||||||
func (wp *workerPool) release(ch *workerChan) bool {
|
|
||||||
ch.lastUseTime = CoarseTimeNow()
|
|
||||||
wp.lock.Lock()
|
|
||||||
if wp.mustStop {
|
|
||||||
wp.lock.Unlock()
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
wp.ready = append(wp.ready, ch)
|
|
||||||
wp.lock.Unlock()
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
func (wp *workerPool) workerFunc(ch *workerChan) {
|
|
||||||
var c net.Conn
|
|
||||||
|
|
||||||
var err error
|
|
||||||
for c = range ch.ch {
|
|
||||||
if c == nil {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
if err = wp.WorkerFunc(c); err != nil && err != errHijacked {
|
|
||||||
errStr := err.Error()
|
|
||||||
if wp.LogAllErrors || !(strings.Contains(errStr, "broken pipe") ||
|
|
||||||
strings.Contains(errStr, "reset by peer") ||
|
|
||||||
strings.Contains(errStr, "i/o timeout")) {
|
|
||||||
wp.Logger.Printf("error when serving connection %q<->%q: %s", c.LocalAddr(), c.RemoteAddr(), err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if err != errHijacked {
|
|
||||||
c.Close()
|
|
||||||
}
|
|
||||||
c = nil
|
|
||||||
|
|
||||||
if !wp.release(ch) {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
wp.lock.Lock()
|
|
||||||
wp.workersCount--
|
|
||||||
wp.lock.Unlock()
|
|
||||||
}
|
|
||||||
60
vendor/vendor.json
vendored
60
vendor/vendor.json
vendored
@@ -20,66 +20,6 @@
|
|||||||
"revision": "2efee857e7cfd4f3d0138cc3cbb1b4966962b93a",
|
"revision": "2efee857e7cfd4f3d0138cc3cbb1b4966962b93a",
|
||||||
"revisionTime": "2015-10-22T06:55:26Z"
|
"revisionTime": "2015-10-22T06:55:26Z"
|
||||||
},
|
},
|
||||||
{
|
|
||||||
"checksumSHA1": "+IQN6csaE5uxhZJnulb6uwhVCr4=",
|
|
||||||
"path": "github.com/klauspost/compress/flate",
|
|
||||||
"revision": "f3dce52e0576655d55fd69e74b63da96ad1108f3",
|
|
||||||
"revisionTime": "2017-05-28T13:23:59Z"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"checksumSHA1": "kWBC7CTgppTdJFXizt4XkURbyCE=",
|
|
||||||
"path": "github.com/klauspost/compress/gzip",
|
|
||||||
"revision": "f3dce52e0576655d55fd69e74b63da96ad1108f3",
|
|
||||||
"revisionTime": "2017-05-28T13:23:59Z"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"checksumSHA1": "+azPXaZpPF14YHRghNAer13ThQU=",
|
|
||||||
"path": "github.com/klauspost/compress/zlib",
|
|
||||||
"revision": "f3dce52e0576655d55fd69e74b63da96ad1108f3",
|
|
||||||
"revisionTime": "2017-05-28T13:23:59Z"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"checksumSHA1": "oZnJ7hI35QaJqMyzayLPq1w0dcU=",
|
|
||||||
"path": "github.com/klauspost/cpuid",
|
|
||||||
"revision": "ae7887de9fa5d2db4eaa8174a7eff2c1ac00f2da",
|
|
||||||
"revisionTime": "2017-07-28T05:55:34Z"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"checksumSHA1": "6/zXof97s7P9tlNp3mUioXgeEVI=",
|
|
||||||
"path": "github.com/klauspost/crc32",
|
|
||||||
"revision": "bab58d77464aa9cf4e84200c3276da0831fe0c03",
|
|
||||||
"revisionTime": "2017-06-28T07:24:49Z"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"checksumSHA1": "+mB8aEvEg2wl3PoWZjAVfhGxtJA=",
|
|
||||||
"path": "github.com/qiangxue/fasthttp-routing",
|
|
||||||
"revision": "6ccdc2a18d8712f842de50ddcb8c9a3d025ddc71",
|
|
||||||
"revisionTime": "2016-02-25T05:06:29Z"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"checksumSHA1": "LTOa3BADhwvT0wFCknPueQALm8I=",
|
|
||||||
"path": "github.com/valyala/bytebufferpool",
|
|
||||||
"revision": "e746df99fe4a3986f4d4f79e13c1e0117ce9c2f7",
|
|
||||||
"revisionTime": "2016-08-17T18:16:52Z"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"checksumSHA1": "WbIdBYaWTfPb73xgCOoW/aeoSFU=",
|
|
||||||
"path": "github.com/valyala/fasthttp",
|
|
||||||
"revision": "ae643c872d2c060154a4fb2162dc1c0ab1693ccd",
|
|
||||||
"revisionTime": "2017-07-21T13:45:47Z"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"checksumSHA1": "nFdyJk6jdHzVNgEMdjDuWMk4z5o=",
|
|
||||||
"path": "github.com/valyala/fasthttp/fasthttputil",
|
|
||||||
"revision": "ae643c872d2c060154a4fb2162dc1c0ab1693ccd",
|
|
||||||
"revisionTime": "2017-07-21T13:45:47Z"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"checksumSHA1": "YXXy4b1yOQx/iL3Icv6svTmcGss=",
|
|
||||||
"path": "github.com/valyala/fasthttp/stackless",
|
|
||||||
"revision": "ae643c872d2c060154a4fb2162dc1c0ab1693ccd",
|
|
||||||
"revisionTime": "2017-07-21T13:45:47Z"
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
"checksumSHA1": "3SZTatHIy9OTKc95YlVfXKnoySg=",
|
"checksumSHA1": "3SZTatHIy9OTKc95YlVfXKnoySg=",
|
||||||
"path": "gopkg.in/alecthomas/kingpin.v2",
|
"path": "gopkg.in/alecthomas/kingpin.v2",
|
||||||
|
|||||||
Reference in New Issue
Block a user