mirror of
https://github.com/jimeh/casecmp.git
synced 2026-02-19 02:16:40 +00:00
Update dependencies
This commit is contained in:
265
vendor/github.com/klauspost/compress/flate/gen.go
generated
vendored
265
vendor/github.com/klauspost/compress/flate/gen.go
generated
vendored
@@ -1,265 +0,0 @@
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build ignore
|
||||
|
||||
// This program generates fixedhuff.go
|
||||
// Invoke as
|
||||
//
|
||||
// go run gen.go -output fixedhuff.go
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"flag"
|
||||
"fmt"
|
||||
"go/format"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
)
|
||||
|
||||
var filename = flag.String("output", "fixedhuff.go", "output file name")
|
||||
|
||||
const maxCodeLen = 16
|
||||
|
||||
// Note: the definition of the huffmanDecoder struct is copied from
|
||||
// inflate.go, as it is private to the implementation.
|
||||
|
||||
// chunk & 15 is number of bits
|
||||
// chunk >> 4 is value, including table link
|
||||
|
||||
const (
|
||||
huffmanChunkBits = 9
|
||||
huffmanNumChunks = 1 << huffmanChunkBits
|
||||
huffmanCountMask = 15
|
||||
huffmanValueShift = 4
|
||||
)
|
||||
|
||||
type huffmanDecoder struct {
|
||||
min int // the minimum code length
|
||||
chunks [huffmanNumChunks]uint32 // chunks as described above
|
||||
links [][]uint32 // overflow links
|
||||
linkMask uint32 // mask the width of the link table
|
||||
}
|
||||
|
||||
// Initialize Huffman decoding tables from array of code lengths.
|
||||
// Following this function, h is guaranteed to be initialized into a complete
|
||||
// tree (i.e., neither over-subscribed nor under-subscribed). The exception is a
|
||||
// degenerate case where the tree has only a single symbol with length 1. Empty
|
||||
// trees are permitted.
|
||||
func (h *huffmanDecoder) init(bits []int) bool {
|
||||
// Sanity enables additional runtime tests during Huffman
|
||||
// table construction. It's intended to be used during
|
||||
// development to supplement the currently ad-hoc unit tests.
|
||||
const sanity = false
|
||||
|
||||
if h.min != 0 {
|
||||
*h = huffmanDecoder{}
|
||||
}
|
||||
|
||||
// Count number of codes of each length,
|
||||
// compute min and max length.
|
||||
var count [maxCodeLen]int
|
||||
var min, max int
|
||||
for _, n := range bits {
|
||||
if n == 0 {
|
||||
continue
|
||||
}
|
||||
if min == 0 || n < min {
|
||||
min = n
|
||||
}
|
||||
if n > max {
|
||||
max = n
|
||||
}
|
||||
count[n]++
|
||||
}
|
||||
|
||||
// Empty tree. The decompressor.huffSym function will fail later if the tree
|
||||
// is used. Technically, an empty tree is only valid for the HDIST tree and
|
||||
// not the HCLEN and HLIT tree. However, a stream with an empty HCLEN tree
|
||||
// is guaranteed to fail since it will attempt to use the tree to decode the
|
||||
// codes for the HLIT and HDIST trees. Similarly, an empty HLIT tree is
|
||||
// guaranteed to fail later since the compressed data section must be
|
||||
// composed of at least one symbol (the end-of-block marker).
|
||||
if max == 0 {
|
||||
return true
|
||||
}
|
||||
|
||||
code := 0
|
||||
var nextcode [maxCodeLen]int
|
||||
for i := min; i <= max; i++ {
|
||||
code <<= 1
|
||||
nextcode[i] = code
|
||||
code += count[i]
|
||||
}
|
||||
|
||||
// Check that the coding is complete (i.e., that we've
|
||||
// assigned all 2-to-the-max possible bit sequences).
|
||||
// Exception: To be compatible with zlib, we also need to
|
||||
// accept degenerate single-code codings. See also
|
||||
// TestDegenerateHuffmanCoding.
|
||||
if code != 1<<uint(max) && !(code == 1 && max == 1) {
|
||||
return false
|
||||
}
|
||||
|
||||
h.min = min
|
||||
if max > huffmanChunkBits {
|
||||
numLinks := 1 << (uint(max) - huffmanChunkBits)
|
||||
h.linkMask = uint32(numLinks - 1)
|
||||
|
||||
// create link tables
|
||||
link := nextcode[huffmanChunkBits+1] >> 1
|
||||
h.links = make([][]uint32, huffmanNumChunks-link)
|
||||
for j := uint(link); j < huffmanNumChunks; j++ {
|
||||
reverse := int(reverseByte[j>>8]) | int(reverseByte[j&0xff])<<8
|
||||
reverse >>= uint(16 - huffmanChunkBits)
|
||||
off := j - uint(link)
|
||||
if sanity && h.chunks[reverse] != 0 {
|
||||
panic("impossible: overwriting existing chunk")
|
||||
}
|
||||
h.chunks[reverse] = uint32(off<<huffmanValueShift | (huffmanChunkBits + 1))
|
||||
h.links[off] = make([]uint32, numLinks)
|
||||
}
|
||||
}
|
||||
|
||||
for i, n := range bits {
|
||||
if n == 0 {
|
||||
continue
|
||||
}
|
||||
code := nextcode[n]
|
||||
nextcode[n]++
|
||||
chunk := uint32(i<<huffmanValueShift | n)
|
||||
reverse := int(reverseByte[code>>8]) | int(reverseByte[code&0xff])<<8
|
||||
reverse >>= uint(16 - n)
|
||||
if n <= huffmanChunkBits {
|
||||
for off := reverse; off < len(h.chunks); off += 1 << uint(n) {
|
||||
// We should never need to overwrite
|
||||
// an existing chunk. Also, 0 is
|
||||
// never a valid chunk, because the
|
||||
// lower 4 "count" bits should be
|
||||
// between 1 and 15.
|
||||
if sanity && h.chunks[off] != 0 {
|
||||
panic("impossible: overwriting existing chunk")
|
||||
}
|
||||
h.chunks[off] = chunk
|
||||
}
|
||||
} else {
|
||||
j := reverse & (huffmanNumChunks - 1)
|
||||
if sanity && h.chunks[j]&huffmanCountMask != huffmanChunkBits+1 {
|
||||
// Longer codes should have been
|
||||
// associated with a link table above.
|
||||
panic("impossible: not an indirect chunk")
|
||||
}
|
||||
value := h.chunks[j] >> huffmanValueShift
|
||||
linktab := h.links[value]
|
||||
reverse >>= huffmanChunkBits
|
||||
for off := reverse; off < len(linktab); off += 1 << uint(n-huffmanChunkBits) {
|
||||
if sanity && linktab[off] != 0 {
|
||||
panic("impossible: overwriting existing chunk")
|
||||
}
|
||||
linktab[off] = chunk
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if sanity {
|
||||
// Above we've sanity checked that we never overwrote
|
||||
// an existing entry. Here we additionally check that
|
||||
// we filled the tables completely.
|
||||
for i, chunk := range h.chunks {
|
||||
if chunk == 0 {
|
||||
// As an exception, in the degenerate
|
||||
// single-code case, we allow odd
|
||||
// chunks to be missing.
|
||||
if code == 1 && i%2 == 1 {
|
||||
continue
|
||||
}
|
||||
panic("impossible: missing chunk")
|
||||
}
|
||||
}
|
||||
for _, linktab := range h.links {
|
||||
for _, chunk := range linktab {
|
||||
if chunk == 0 {
|
||||
panic("impossible: missing chunk")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
|
||||
var h huffmanDecoder
|
||||
var bits [288]int
|
||||
initReverseByte()
|
||||
for i := 0; i < 144; i++ {
|
||||
bits[i] = 8
|
||||
}
|
||||
for i := 144; i < 256; i++ {
|
||||
bits[i] = 9
|
||||
}
|
||||
for i := 256; i < 280; i++ {
|
||||
bits[i] = 7
|
||||
}
|
||||
for i := 280; i < 288; i++ {
|
||||
bits[i] = 8
|
||||
}
|
||||
h.init(bits[:])
|
||||
if h.links != nil {
|
||||
log.Fatal("Unexpected links table in fixed Huffman decoder")
|
||||
}
|
||||
|
||||
var buf bytes.Buffer
|
||||
|
||||
fmt.Fprintf(&buf, `// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.`+"\n\n")
|
||||
|
||||
fmt.Fprintln(&buf, "package flate")
|
||||
fmt.Fprintln(&buf)
|
||||
fmt.Fprintln(&buf, "// autogenerated by go run gen.go -output fixedhuff.go, DO NOT EDIT")
|
||||
fmt.Fprintln(&buf)
|
||||
fmt.Fprintln(&buf, "var fixedHuffmanDecoder = huffmanDecoder{")
|
||||
fmt.Fprintf(&buf, "\t%d,\n", h.min)
|
||||
fmt.Fprintln(&buf, "\t[huffmanNumChunks]uint32{")
|
||||
for i := 0; i < huffmanNumChunks; i++ {
|
||||
if i&7 == 0 {
|
||||
fmt.Fprintf(&buf, "\t\t")
|
||||
} else {
|
||||
fmt.Fprintf(&buf, " ")
|
||||
}
|
||||
fmt.Fprintf(&buf, "0x%04x,", h.chunks[i])
|
||||
if i&7 == 7 {
|
||||
fmt.Fprintln(&buf)
|
||||
}
|
||||
}
|
||||
fmt.Fprintln(&buf, "\t},")
|
||||
fmt.Fprintln(&buf, "\tnil, 0,")
|
||||
fmt.Fprintln(&buf, "}")
|
||||
|
||||
data, err := format.Source(buf.Bytes())
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
err = ioutil.WriteFile(*filename, data, 0644)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
var reverseByte [256]byte
|
||||
|
||||
func initReverseByte() {
|
||||
for x := 0; x < 256; x++ {
|
||||
var result byte
|
||||
for i := uint(0); i < 8; i++ {
|
||||
result |= byte(((x >> i) & 1) << (7 - i))
|
||||
}
|
||||
reverseByte[x] = result
|
||||
}
|
||||
}
|
||||
44
vendor/github.com/klauspost/compress/flate/inflate.go
generated
vendored
44
vendor/github.com/klauspost/compress/flate/inflate.go
generated
vendored
@@ -15,7 +15,8 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
maxCodeLen = 16 // max length of Huffman code
|
||||
maxCodeLen = 16 // max length of Huffman code
|
||||
maxCodeLenMask = 15 // mask for max length of Huffman code
|
||||
// The next three numbers come from the RFC section 3.2.7, with the
|
||||
// additional proviso in section 3.2.5 which implies that distance codes
|
||||
// 30 and 31 should never occur in compressed data.
|
||||
@@ -101,10 +102,10 @@ const (
|
||||
)
|
||||
|
||||
type huffmanDecoder struct {
|
||||
min int // the minimum code length
|
||||
chunks [huffmanNumChunks]uint32 // chunks as described above
|
||||
links [][]uint32 // overflow links
|
||||
linkMask uint32 // mask the width of the link table
|
||||
min int // the minimum code length
|
||||
chunks *[huffmanNumChunks]uint32 // chunks as described above
|
||||
links [][]uint32 // overflow links
|
||||
linkMask uint32 // mask the width of the link table
|
||||
}
|
||||
|
||||
// Initialize Huffman decoding tables from array of code lengths.
|
||||
@@ -118,8 +119,11 @@ func (h *huffmanDecoder) init(bits []int) bool {
|
||||
// development to supplement the currently ad-hoc unit tests.
|
||||
const sanity = false
|
||||
|
||||
if h.chunks == nil {
|
||||
h.chunks = &[huffmanNumChunks]uint32{}
|
||||
}
|
||||
if h.min != 0 {
|
||||
*h = huffmanDecoder{}
|
||||
*h = huffmanDecoder{chunks: h.chunks, links: h.links}
|
||||
}
|
||||
|
||||
// Count number of codes of each length,
|
||||
@@ -136,7 +140,7 @@ func (h *huffmanDecoder) init(bits []int) bool {
|
||||
if n > max {
|
||||
max = n
|
||||
}
|
||||
count[n]++
|
||||
count[n&maxCodeLenMask]++
|
||||
}
|
||||
|
||||
// Empty tree. The decompressor.huffSym function will fail later if the tree
|
||||
@@ -154,8 +158,8 @@ func (h *huffmanDecoder) init(bits []int) bool {
|
||||
var nextcode [maxCodeLen]int
|
||||
for i := min; i <= max; i++ {
|
||||
code <<= 1
|
||||
nextcode[i] = code
|
||||
code += count[i]
|
||||
nextcode[i&maxCodeLenMask] = code
|
||||
code += count[i&maxCodeLenMask]
|
||||
}
|
||||
|
||||
// Check that the coding is complete (i.e., that we've
|
||||
@@ -168,13 +172,22 @@ func (h *huffmanDecoder) init(bits []int) bool {
|
||||
}
|
||||
|
||||
h.min = min
|
||||
chunks := h.chunks[:]
|
||||
for i := range chunks {
|
||||
chunks[i] = 0
|
||||
}
|
||||
|
||||
if max > huffmanChunkBits {
|
||||
numLinks := 1 << (uint(max) - huffmanChunkBits)
|
||||
h.linkMask = uint32(numLinks - 1)
|
||||
|
||||
// create link tables
|
||||
link := nextcode[huffmanChunkBits+1] >> 1
|
||||
h.links = make([][]uint32, huffmanNumChunks-link)
|
||||
if cap(h.links) < huffmanNumChunks-link {
|
||||
h.links = make([][]uint32, huffmanNumChunks-link)
|
||||
} else {
|
||||
h.links = h.links[:huffmanNumChunks-link]
|
||||
}
|
||||
for j := uint(link); j < huffmanNumChunks; j++ {
|
||||
reverse := int(reverseByte[j>>8]) | int(reverseByte[j&0xff])<<8
|
||||
reverse >>= uint(16 - huffmanChunkBits)
|
||||
@@ -183,8 +196,15 @@ func (h *huffmanDecoder) init(bits []int) bool {
|
||||
panic("impossible: overwriting existing chunk")
|
||||
}
|
||||
h.chunks[reverse] = uint32(off<<huffmanValueShift | (huffmanChunkBits + 1))
|
||||
h.links[off] = make([]uint32, numLinks)
|
||||
if cap(h.links[off]) < numLinks {
|
||||
h.links[off] = make([]uint32, numLinks)
|
||||
} else {
|
||||
links := h.links[off][:0]
|
||||
h.links[off] = links[:numLinks]
|
||||
}
|
||||
}
|
||||
} else {
|
||||
h.links = h.links[:0]
|
||||
}
|
||||
|
||||
for i, n := range bits {
|
||||
@@ -799,6 +819,8 @@ func (f *decompressor) Reset(r io.Reader, dict []byte) error {
|
||||
r: makeReader(r),
|
||||
bits: f.bits,
|
||||
codebits: f.codebits,
|
||||
h1: f.h1,
|
||||
h2: f.h2,
|
||||
dict: f.dict,
|
||||
step: (*decompressor).nextBlock,
|
||||
}
|
||||
|
||||
2
vendor/github.com/klauspost/compress/gzip/gunzip.go
generated
vendored
2
vendor/github.com/klauspost/compress/gzip/gunzip.go
generated
vendored
@@ -10,11 +10,11 @@ import (
|
||||
"bufio"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"hash/crc32"
|
||||
"io"
|
||||
"time"
|
||||
|
||||
"github.com/klauspost/compress/flate"
|
||||
"github.com/klauspost/crc32"
|
||||
)
|
||||
|
||||
const (
|
||||
|
||||
2
vendor/github.com/klauspost/compress/gzip/gzip.go
generated
vendored
2
vendor/github.com/klauspost/compress/gzip/gzip.go
generated
vendored
@@ -7,10 +7,10 @@ package gzip
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"hash/crc32"
|
||||
"io"
|
||||
|
||||
"github.com/klauspost/compress/flate"
|
||||
"github.com/klauspost/crc32"
|
||||
)
|
||||
|
||||
// These constants are copied from the flate package, so that code that imports
|
||||
|
||||
12
vendor/github.com/klauspost/cpuid/cpuid.go
generated
vendored
12
vendor/github.com/klauspost/cpuid/cpuid.go
generated
vendored
@@ -192,7 +192,7 @@ func Detect() {
|
||||
CPU.CacheLine = cacheLine()
|
||||
CPU.Family, CPU.Model = familyModel()
|
||||
CPU.Features = support()
|
||||
CPU.SGX = sgx(CPU.Features&SGX != 0)
|
||||
CPU.SGX = hasSGX(CPU.Features&SGX != 0)
|
||||
CPU.ThreadsPerCore = threadsPerCore()
|
||||
CPU.LogicalCores = logicalCores()
|
||||
CPU.PhysicalCores = physicalCores()
|
||||
@@ -437,14 +437,22 @@ func (c CPUInfo) ERMS() bool {
|
||||
return c.Features&ERMS != 0
|
||||
}
|
||||
|
||||
// RDTSCP Instruction is available.
|
||||
func (c CPUInfo) RDTSCP() bool {
|
||||
return c.Features&RDTSCP != 0
|
||||
}
|
||||
|
||||
// CX16 indicates if CMPXCHG16B instruction is available.
|
||||
func (c CPUInfo) CX16() bool {
|
||||
return c.Features&CX16 != 0
|
||||
}
|
||||
|
||||
// TSX is split into HLE (Hardware Lock Elision) and RTM (Restricted Transactional Memory) detection.
|
||||
// So TSX simply checks that.
|
||||
func (c CPUInfo) TSX() bool {
|
||||
return c.Features&(MPX|RTM) == MPX|RTM
|
||||
}
|
||||
|
||||
// Atom indicates an Atom processor
|
||||
func (c CPUInfo) Atom() bool {
|
||||
return c.Features&ATOM != 0
|
||||
@@ -757,7 +765,7 @@ type SGXSupport struct {
|
||||
MaxEnclaveSize64 int64
|
||||
}
|
||||
|
||||
func sgx(available bool) (rval SGXSupport) {
|
||||
func hasSGX(available bool) (rval SGXSupport) {
|
||||
rval.Available = available
|
||||
|
||||
if !available {
|
||||
|
||||
1
vendor/github.com/klauspost/cpuid/generate.go
generated
vendored
1
vendor/github.com/klauspost/cpuid/generate.go
generated
vendored
@@ -1,3 +1,4 @@
|
||||
package cpuid
|
||||
|
||||
//go:generate go run private-gen.go
|
||||
//go:generate gofmt -w ./private
|
||||
|
||||
476
vendor/github.com/klauspost/cpuid/private-gen.go
generated
vendored
476
vendor/github.com/klauspost/cpuid/private-gen.go
generated
vendored
@@ -1,476 +0,0 @@
|
||||
// +build ignore
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"go/ast"
|
||||
"go/parser"
|
||||
"go/printer"
|
||||
"go/token"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"reflect"
|
||||
"strings"
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
var inFiles = []string{"cpuid.go", "cpuid_test.go"}
|
||||
var copyFiles = []string{"cpuid_amd64.s", "cpuid_386.s", "detect_ref.go", "detect_intel.go"}
|
||||
var fileSet = token.NewFileSet()
|
||||
var reWrites = []rewrite{
|
||||
initRewrite("CPUInfo -> cpuInfo"),
|
||||
initRewrite("Vendor -> vendor"),
|
||||
initRewrite("Flags -> flags"),
|
||||
initRewrite("Detect -> detect"),
|
||||
initRewrite("CPU -> cpu"),
|
||||
}
|
||||
var excludeNames = map[string]bool{"string": true, "join": true, "trim": true,
|
||||
// cpuid_test.go
|
||||
"t": true, "println": true, "logf": true, "log": true, "fatalf": true, "fatal": true,
|
||||
}
|
||||
|
||||
var excludePrefixes = []string{"test", "benchmark"}
|
||||
|
||||
func main() {
|
||||
Package := "private"
|
||||
parserMode := parser.ParseComments
|
||||
exported := make(map[string]rewrite)
|
||||
for _, file := range inFiles {
|
||||
in, err := os.Open(file)
|
||||
if err != nil {
|
||||
log.Fatalf("opening input", err)
|
||||
}
|
||||
|
||||
src, err := ioutil.ReadAll(in)
|
||||
if err != nil {
|
||||
log.Fatalf("reading input", err)
|
||||
}
|
||||
|
||||
astfile, err := parser.ParseFile(fileSet, file, src, parserMode)
|
||||
if err != nil {
|
||||
log.Fatalf("parsing input", err)
|
||||
}
|
||||
|
||||
for _, rw := range reWrites {
|
||||
astfile = rw(astfile)
|
||||
}
|
||||
|
||||
// Inspect the AST and print all identifiers and literals.
|
||||
var startDecl token.Pos
|
||||
var endDecl token.Pos
|
||||
ast.Inspect(astfile, func(n ast.Node) bool {
|
||||
var s string
|
||||
switch x := n.(type) {
|
||||
case *ast.Ident:
|
||||
if x.IsExported() {
|
||||
t := strings.ToLower(x.Name)
|
||||
for _, pre := range excludePrefixes {
|
||||
if strings.HasPrefix(t, pre) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
if excludeNames[t] != true {
|
||||
//if x.Pos() > startDecl && x.Pos() < endDecl {
|
||||
exported[x.Name] = initRewrite(x.Name + " -> " + t)
|
||||
}
|
||||
}
|
||||
|
||||
case *ast.GenDecl:
|
||||
if x.Tok == token.CONST && x.Lparen > 0 {
|
||||
startDecl = x.Lparen
|
||||
endDecl = x.Rparen
|
||||
// fmt.Printf("Decl:%s -> %s\n", fileSet.Position(startDecl), fileSet.Position(endDecl))
|
||||
}
|
||||
}
|
||||
if s != "" {
|
||||
fmt.Printf("%s:\t%s\n", fileSet.Position(n.Pos()), s)
|
||||
}
|
||||
return true
|
||||
})
|
||||
|
||||
for _, rw := range exported {
|
||||
astfile = rw(astfile)
|
||||
}
|
||||
|
||||
var buf bytes.Buffer
|
||||
|
||||
printer.Fprint(&buf, fileSet, astfile)
|
||||
|
||||
// Remove package documentation and insert information
|
||||
s := buf.String()
|
||||
ind := strings.Index(buf.String(), "\npackage cpuid")
|
||||
s = s[ind:]
|
||||
s = "// Generated, DO NOT EDIT,\n" +
|
||||
"// but copy it to your own project and rename the package.\n" +
|
||||
"// See more at http://github.com/klauspost/cpuid\n" +
|
||||
s
|
||||
|
||||
outputName := Package + string(os.PathSeparator) + file
|
||||
|
||||
err = ioutil.WriteFile(outputName, []byte(s), 0644)
|
||||
if err != nil {
|
||||
log.Fatalf("writing output: %s", err)
|
||||
}
|
||||
log.Println("Generated", outputName)
|
||||
}
|
||||
|
||||
for _, file := range copyFiles {
|
||||
dst := ""
|
||||
if strings.HasPrefix(file, "cpuid") {
|
||||
dst = Package + string(os.PathSeparator) + file
|
||||
} else {
|
||||
dst = Package + string(os.PathSeparator) + "cpuid_" + file
|
||||
}
|
||||
err := copyFile(file, dst)
|
||||
if err != nil {
|
||||
log.Fatalf("copying file: %s", err)
|
||||
}
|
||||
log.Println("Copied", dst)
|
||||
}
|
||||
}
|
||||
|
||||
// CopyFile copies a file from src to dst. If src and dst files exist, and are
|
||||
// the same, then return success. Copy the file contents from src to dst.
|
||||
func copyFile(src, dst string) (err error) {
|
||||
sfi, err := os.Stat(src)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if !sfi.Mode().IsRegular() {
|
||||
// cannot copy non-regular files (e.g., directories,
|
||||
// symlinks, devices, etc.)
|
||||
return fmt.Errorf("CopyFile: non-regular source file %s (%q)", sfi.Name(), sfi.Mode().String())
|
||||
}
|
||||
dfi, err := os.Stat(dst)
|
||||
if err != nil {
|
||||
if !os.IsNotExist(err) {
|
||||
return
|
||||
}
|
||||
} else {
|
||||
if !(dfi.Mode().IsRegular()) {
|
||||
return fmt.Errorf("CopyFile: non-regular destination file %s (%q)", dfi.Name(), dfi.Mode().String())
|
||||
}
|
||||
if os.SameFile(sfi, dfi) {
|
||||
return
|
||||
}
|
||||
}
|
||||
err = copyFileContents(src, dst)
|
||||
return
|
||||
}
|
||||
|
||||
// copyFileContents copies the contents of the file named src to the file named
|
||||
// by dst. The file will be created if it does not already exist. If the
|
||||
// destination file exists, all it's contents will be replaced by the contents
|
||||
// of the source file.
|
||||
func copyFileContents(src, dst string) (err error) {
|
||||
in, err := os.Open(src)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer in.Close()
|
||||
out, err := os.Create(dst)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer func() {
|
||||
cerr := out.Close()
|
||||
if err == nil {
|
||||
err = cerr
|
||||
}
|
||||
}()
|
||||
if _, err = io.Copy(out, in); err != nil {
|
||||
return
|
||||
}
|
||||
err = out.Sync()
|
||||
return
|
||||
}
|
||||
|
||||
type rewrite func(*ast.File) *ast.File
|
||||
|
||||
// Mostly copied from gofmt
|
||||
func initRewrite(rewriteRule string) rewrite {
|
||||
f := strings.Split(rewriteRule, "->")
|
||||
if len(f) != 2 {
|
||||
fmt.Fprintf(os.Stderr, "rewrite rule must be of the form 'pattern -> replacement'\n")
|
||||
os.Exit(2)
|
||||
}
|
||||
pattern := parseExpr(f[0], "pattern")
|
||||
replace := parseExpr(f[1], "replacement")
|
||||
return func(p *ast.File) *ast.File { return rewriteFile(pattern, replace, p) }
|
||||
}
|
||||
|
||||
// parseExpr parses s as an expression.
|
||||
// It might make sense to expand this to allow statement patterns,
|
||||
// but there are problems with preserving formatting and also
|
||||
// with what a wildcard for a statement looks like.
|
||||
func parseExpr(s, what string) ast.Expr {
|
||||
x, err := parser.ParseExpr(s)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "parsing %s %s at %s\n", what, s, err)
|
||||
os.Exit(2)
|
||||
}
|
||||
return x
|
||||
}
|
||||
|
||||
// Keep this function for debugging.
|
||||
/*
|
||||
func dump(msg string, val reflect.Value) {
|
||||
fmt.Printf("%s:\n", msg)
|
||||
ast.Print(fileSet, val.Interface())
|
||||
fmt.Println()
|
||||
}
|
||||
*/
|
||||
|
||||
// rewriteFile applies the rewrite rule 'pattern -> replace' to an entire file.
|
||||
func rewriteFile(pattern, replace ast.Expr, p *ast.File) *ast.File {
|
||||
cmap := ast.NewCommentMap(fileSet, p, p.Comments)
|
||||
m := make(map[string]reflect.Value)
|
||||
pat := reflect.ValueOf(pattern)
|
||||
repl := reflect.ValueOf(replace)
|
||||
|
||||
var rewriteVal func(val reflect.Value) reflect.Value
|
||||
rewriteVal = func(val reflect.Value) reflect.Value {
|
||||
// don't bother if val is invalid to start with
|
||||
if !val.IsValid() {
|
||||
return reflect.Value{}
|
||||
}
|
||||
for k := range m {
|
||||
delete(m, k)
|
||||
}
|
||||
val = apply(rewriteVal, val)
|
||||
if match(m, pat, val) {
|
||||
val = subst(m, repl, reflect.ValueOf(val.Interface().(ast.Node).Pos()))
|
||||
}
|
||||
return val
|
||||
}
|
||||
|
||||
r := apply(rewriteVal, reflect.ValueOf(p)).Interface().(*ast.File)
|
||||
r.Comments = cmap.Filter(r).Comments() // recreate comments list
|
||||
return r
|
||||
}
|
||||
|
||||
// set is a wrapper for x.Set(y); it protects the caller from panics if x cannot be changed to y.
|
||||
func set(x, y reflect.Value) {
|
||||
// don't bother if x cannot be set or y is invalid
|
||||
if !x.CanSet() || !y.IsValid() {
|
||||
return
|
||||
}
|
||||
defer func() {
|
||||
if x := recover(); x != nil {
|
||||
if s, ok := x.(string); ok &&
|
||||
(strings.Contains(s, "type mismatch") || strings.Contains(s, "not assignable")) {
|
||||
// x cannot be set to y - ignore this rewrite
|
||||
return
|
||||
}
|
||||
panic(x)
|
||||
}
|
||||
}()
|
||||
x.Set(y)
|
||||
}
|
||||
|
||||
// Values/types for special cases.
|
||||
var (
|
||||
objectPtrNil = reflect.ValueOf((*ast.Object)(nil))
|
||||
scopePtrNil = reflect.ValueOf((*ast.Scope)(nil))
|
||||
|
||||
identType = reflect.TypeOf((*ast.Ident)(nil))
|
||||
objectPtrType = reflect.TypeOf((*ast.Object)(nil))
|
||||
positionType = reflect.TypeOf(token.NoPos)
|
||||
callExprType = reflect.TypeOf((*ast.CallExpr)(nil))
|
||||
scopePtrType = reflect.TypeOf((*ast.Scope)(nil))
|
||||
)
|
||||
|
||||
// apply replaces each AST field x in val with f(x), returning val.
|
||||
// To avoid extra conversions, f operates on the reflect.Value form.
|
||||
func apply(f func(reflect.Value) reflect.Value, val reflect.Value) reflect.Value {
|
||||
if !val.IsValid() {
|
||||
return reflect.Value{}
|
||||
}
|
||||
|
||||
// *ast.Objects introduce cycles and are likely incorrect after
|
||||
// rewrite; don't follow them but replace with nil instead
|
||||
if val.Type() == objectPtrType {
|
||||
return objectPtrNil
|
||||
}
|
||||
|
||||
// similarly for scopes: they are likely incorrect after a rewrite;
|
||||
// replace them with nil
|
||||
if val.Type() == scopePtrType {
|
||||
return scopePtrNil
|
||||
}
|
||||
|
||||
switch v := reflect.Indirect(val); v.Kind() {
|
||||
case reflect.Slice:
|
||||
for i := 0; i < v.Len(); i++ {
|
||||
e := v.Index(i)
|
||||
set(e, f(e))
|
||||
}
|
||||
case reflect.Struct:
|
||||
for i := 0; i < v.NumField(); i++ {
|
||||
e := v.Field(i)
|
||||
set(e, f(e))
|
||||
}
|
||||
case reflect.Interface:
|
||||
e := v.Elem()
|
||||
set(v, f(e))
|
||||
}
|
||||
return val
|
||||
}
|
||||
|
||||
func isWildcard(s string) bool {
|
||||
rune, size := utf8.DecodeRuneInString(s)
|
||||
return size == len(s) && unicode.IsLower(rune)
|
||||
}
|
||||
|
||||
// match returns true if pattern matches val,
|
||||
// recording wildcard submatches in m.
|
||||
// If m == nil, match checks whether pattern == val.
|
||||
func match(m map[string]reflect.Value, pattern, val reflect.Value) bool {
|
||||
// Wildcard matches any expression. If it appears multiple
|
||||
// times in the pattern, it must match the same expression
|
||||
// each time.
|
||||
if m != nil && pattern.IsValid() && pattern.Type() == identType {
|
||||
name := pattern.Interface().(*ast.Ident).Name
|
||||
if isWildcard(name) && val.IsValid() {
|
||||
// wildcards only match valid (non-nil) expressions.
|
||||
if _, ok := val.Interface().(ast.Expr); ok && !val.IsNil() {
|
||||
if old, ok := m[name]; ok {
|
||||
return match(nil, old, val)
|
||||
}
|
||||
m[name] = val
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Otherwise, pattern and val must match recursively.
|
||||
if !pattern.IsValid() || !val.IsValid() {
|
||||
return !pattern.IsValid() && !val.IsValid()
|
||||
}
|
||||
if pattern.Type() != val.Type() {
|
||||
return false
|
||||
}
|
||||
|
||||
// Special cases.
|
||||
switch pattern.Type() {
|
||||
case identType:
|
||||
// For identifiers, only the names need to match
|
||||
// (and none of the other *ast.Object information).
|
||||
// This is a common case, handle it all here instead
|
||||
// of recursing down any further via reflection.
|
||||
p := pattern.Interface().(*ast.Ident)
|
||||
v := val.Interface().(*ast.Ident)
|
||||
return p == nil && v == nil || p != nil && v != nil && p.Name == v.Name
|
||||
case objectPtrType, positionType:
|
||||
// object pointers and token positions always match
|
||||
return true
|
||||
case callExprType:
|
||||
// For calls, the Ellipsis fields (token.Position) must
|
||||
// match since that is how f(x) and f(x...) are different.
|
||||
// Check them here but fall through for the remaining fields.
|
||||
p := pattern.Interface().(*ast.CallExpr)
|
||||
v := val.Interface().(*ast.CallExpr)
|
||||
if p.Ellipsis.IsValid() != v.Ellipsis.IsValid() {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
p := reflect.Indirect(pattern)
|
||||
v := reflect.Indirect(val)
|
||||
if !p.IsValid() || !v.IsValid() {
|
||||
return !p.IsValid() && !v.IsValid()
|
||||
}
|
||||
|
||||
switch p.Kind() {
|
||||
case reflect.Slice:
|
||||
if p.Len() != v.Len() {
|
||||
return false
|
||||
}
|
||||
for i := 0; i < p.Len(); i++ {
|
||||
if !match(m, p.Index(i), v.Index(i)) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
|
||||
case reflect.Struct:
|
||||
for i := 0; i < p.NumField(); i++ {
|
||||
if !match(m, p.Field(i), v.Field(i)) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
|
||||
case reflect.Interface:
|
||||
return match(m, p.Elem(), v.Elem())
|
||||
}
|
||||
|
||||
// Handle token integers, etc.
|
||||
return p.Interface() == v.Interface()
|
||||
}
|
||||
|
||||
// subst returns a copy of pattern with values from m substituted in place
|
||||
// of wildcards and pos used as the position of tokens from the pattern.
|
||||
// if m == nil, subst returns a copy of pattern and doesn't change the line
|
||||
// number information.
|
||||
func subst(m map[string]reflect.Value, pattern reflect.Value, pos reflect.Value) reflect.Value {
|
||||
if !pattern.IsValid() {
|
||||
return reflect.Value{}
|
||||
}
|
||||
|
||||
// Wildcard gets replaced with map value.
|
||||
if m != nil && pattern.Type() == identType {
|
||||
name := pattern.Interface().(*ast.Ident).Name
|
||||
if isWildcard(name) {
|
||||
if old, ok := m[name]; ok {
|
||||
return subst(nil, old, reflect.Value{})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if pos.IsValid() && pattern.Type() == positionType {
|
||||
// use new position only if old position was valid in the first place
|
||||
if old := pattern.Interface().(token.Pos); !old.IsValid() {
|
||||
return pattern
|
||||
}
|
||||
return pos
|
||||
}
|
||||
|
||||
// Otherwise copy.
|
||||
switch p := pattern; p.Kind() {
|
||||
case reflect.Slice:
|
||||
v := reflect.MakeSlice(p.Type(), p.Len(), p.Len())
|
||||
for i := 0; i < p.Len(); i++ {
|
||||
v.Index(i).Set(subst(m, p.Index(i), pos))
|
||||
}
|
||||
return v
|
||||
|
||||
case reflect.Struct:
|
||||
v := reflect.New(p.Type()).Elem()
|
||||
for i := 0; i < p.NumField(); i++ {
|
||||
v.Field(i).Set(subst(m, p.Field(i), pos))
|
||||
}
|
||||
return v
|
||||
|
||||
case reflect.Ptr:
|
||||
v := reflect.New(p.Type()).Elem()
|
||||
if elem := p.Elem(); elem.IsValid() {
|
||||
v.Set(subst(m, elem, pos).Addr())
|
||||
}
|
||||
return v
|
||||
|
||||
case reflect.Interface:
|
||||
v := reflect.New(p.Type()).Elem()
|
||||
if elem := p.Elem(); elem.IsValid() {
|
||||
v.Set(subst(m, elem, pos))
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
return pattern
|
||||
}
|
||||
7
vendor/github.com/klauspost/crc32/README.md
generated
vendored
7
vendor/github.com/klauspost/crc32/README.md
generated
vendored
@@ -1,5 +1,10 @@
|
||||
# Not needed!
|
||||
|
||||
If you use Go 1.7 or later, there is no reason to use this package any more, since optimizations have been merged into the standard library.
|
||||
|
||||
The following reposiitory and documentation is left for historical reasons (and to not break exisiting code).
|
||||
|
||||
# crc32
|
||||
CRC32 hash with x64 optimizations
|
||||
|
||||
This package is a drop-in replacement for the standard library `hash/crc32` package, that features SSE 4.2 optimizations on x64 platforms, for a 10x speedup.
|
||||
|
||||
|
||||
2
vendor/github.com/klauspost/crc32/crc32_s390x.s
generated
vendored
2
vendor/github.com/klauspost/crc32/crc32_s390x.s
generated
vendored
@@ -226,7 +226,7 @@ final_fold:
|
||||
// Note: To compensate the division by x^32, use the vector unpack
|
||||
// instruction to move the leftmost word into the leftmost doubleword
|
||||
// of the vector register. The rightmost doubleword is multiplied
|
||||
// with zero to not contribute to the intermedate results.
|
||||
// with zero to not contribute to the intermediate results.
|
||||
|
||||
// T1(x) = floor( R(x) / x^32 ) GF2MUL u
|
||||
VUPLLF V1, V2
|
||||
|
||||
1
vendor/github.com/valyala/fasthttp/README.md
generated
vendored
1
vendor/github.com/valyala/fasthttp/README.md
generated
vendored
@@ -558,6 +558,7 @@ uintBuf := fasthttp.AppendUint(nil, 1234)
|
||||
* [Iris](https://github.com/kataras/iris)
|
||||
* [fasthttp-routing](https://github.com/qiangxue/fasthttp-routing)
|
||||
* [fasthttprouter](https://github.com/buaazp/fasthttprouter)
|
||||
* [gramework](https://github.com/gramework/gramework)
|
||||
* [lu](https://github.com/vincentLiuxiang/lu)
|
||||
|
||||
See also [this issue](https://github.com/valyala/fasthttp/issues/9) for more info.
|
||||
|
||||
69
vendor/github.com/valyala/fasthttp/args.go
generated
vendored
69
vendor/github.com/valyala/fasthttp/args.go
generated
vendored
@@ -428,15 +428,15 @@ func (s *argsScanner) next(kv *argsKV) bool {
|
||||
case '=':
|
||||
if isKey {
|
||||
isKey = false
|
||||
kv.key = decodeArg(kv.key, s.b[:i], true)
|
||||
kv.key = decodeArgAppend(kv.key[:0], s.b[:i])
|
||||
k = i + 1
|
||||
}
|
||||
case '&':
|
||||
if isKey {
|
||||
kv.key = decodeArg(kv.key, s.b[:i], true)
|
||||
kv.key = decodeArgAppend(kv.key[:0], s.b[:i])
|
||||
kv.value = kv.value[:0]
|
||||
} else {
|
||||
kv.value = decodeArg(kv.value, s.b[k:i], true)
|
||||
kv.value = decodeArgAppend(kv.value[:0], s.b[k:i])
|
||||
}
|
||||
s.b = s.b[i+1:]
|
||||
return true
|
||||
@@ -444,35 +444,37 @@ func (s *argsScanner) next(kv *argsKV) bool {
|
||||
}
|
||||
|
||||
if isKey {
|
||||
kv.key = decodeArg(kv.key, s.b, true)
|
||||
kv.key = decodeArgAppend(kv.key[:0], s.b)
|
||||
kv.value = kv.value[:0]
|
||||
} else {
|
||||
kv.value = decodeArg(kv.value, s.b[k:], true)
|
||||
kv.value = decodeArgAppend(kv.value[:0], s.b[k:])
|
||||
}
|
||||
s.b = s.b[len(s.b):]
|
||||
return true
|
||||
}
|
||||
|
||||
func decodeArg(dst, src []byte, decodePlus bool) []byte {
|
||||
return decodeArgAppend(dst[:0], src, decodePlus)
|
||||
}
|
||||
func decodeArgAppend(dst, src []byte) []byte {
|
||||
if bytes.IndexByte(src, '%') < 0 && bytes.IndexByte(src, '+') < 0 {
|
||||
// fast path: src doesn't contain encoded chars
|
||||
return append(dst, src...)
|
||||
}
|
||||
|
||||
func decodeArgAppend(dst, src []byte, decodePlus bool) []byte {
|
||||
for i, n := 0, len(src); i < n; i++ {
|
||||
// slow path
|
||||
for i := 0; i < len(src); i++ {
|
||||
c := src[i]
|
||||
if c == '%' {
|
||||
if i+2 >= n {
|
||||
if i+2 >= len(src) {
|
||||
return append(dst, src[i:]...)
|
||||
}
|
||||
x1 := hexbyte2int(src[i+1])
|
||||
x2 := hexbyte2int(src[i+2])
|
||||
if x1 < 0 || x2 < 0 {
|
||||
dst = append(dst, c)
|
||||
x2 := hex2intTable[src[i+2]]
|
||||
x1 := hex2intTable[src[i+1]]
|
||||
if x1 == 16 || x2 == 16 {
|
||||
dst = append(dst, '%')
|
||||
} else {
|
||||
dst = append(dst, byte(x1<<4|x2))
|
||||
dst = append(dst, x1<<4|x2)
|
||||
i += 2
|
||||
}
|
||||
} else if decodePlus && c == '+' {
|
||||
} else if c == '+' {
|
||||
dst = append(dst, ' ')
|
||||
} else {
|
||||
dst = append(dst, c)
|
||||
@@ -480,3 +482,36 @@ func decodeArgAppend(dst, src []byte, decodePlus bool) []byte {
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// decodeArgAppendNoPlus is almost identical to decodeArgAppend, but it doesn't
|
||||
// substitute '+' with ' '.
|
||||
//
|
||||
// The function is copy-pasted from decodeArgAppend due to the preformance
|
||||
// reasons only.
|
||||
func decodeArgAppendNoPlus(dst, src []byte) []byte {
|
||||
if bytes.IndexByte(src, '%') < 0 {
|
||||
// fast path: src doesn't contain encoded chars
|
||||
return append(dst, src...)
|
||||
}
|
||||
|
||||
// slow path
|
||||
for i := 0; i < len(src); i++ {
|
||||
c := src[i]
|
||||
if c == '%' {
|
||||
if i+2 >= len(src) {
|
||||
return append(dst, src[i:]...)
|
||||
}
|
||||
x2 := hex2intTable[src[i+2]]
|
||||
x1 := hex2intTable[src[i+1]]
|
||||
if x1 == 16 || x2 == 16 {
|
||||
dst = append(dst, '%')
|
||||
} else {
|
||||
dst = append(dst, x1<<4|x2)
|
||||
i += 2
|
||||
}
|
||||
} else {
|
||||
dst = append(dst, c)
|
||||
}
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
69
vendor/github.com/valyala/fasthttp/bytesconv.go
generated
vendored
69
vendor/github.com/valyala/fasthttp/bytesconv.go
generated
vendored
@@ -9,6 +9,7 @@ import (
|
||||
"math"
|
||||
"net"
|
||||
"reflect"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
"unsafe"
|
||||
@@ -16,6 +17,16 @@ import (
|
||||
|
||||
// AppendHTMLEscape appends html-escaped s to dst and returns the extended dst.
|
||||
func AppendHTMLEscape(dst []byte, s string) []byte {
|
||||
if strings.IndexByte(s, '<') < 0 &&
|
||||
strings.IndexByte(s, '>') < 0 &&
|
||||
strings.IndexByte(s, '"') < 0 &&
|
||||
strings.IndexByte(s, '\'') < 0 {
|
||||
|
||||
// fast path - nothing to escape
|
||||
return append(dst, s...)
|
||||
}
|
||||
|
||||
// slow path
|
||||
var prev int
|
||||
var sub string
|
||||
for i, n := 0, len(s); i < n; i++ {
|
||||
@@ -254,8 +265,8 @@ func readHexInt(r *bufio.Reader) (int, error) {
|
||||
}
|
||||
return -1, err
|
||||
}
|
||||
k = hexbyte2int(c)
|
||||
if k < 0 {
|
||||
k = int(hex2intTable[c])
|
||||
if k == 16 {
|
||||
if i == 0 {
|
||||
return -1, errEmptyHexNum
|
||||
}
|
||||
@@ -313,42 +324,49 @@ func hexCharUpper(c byte) byte {
|
||||
var hex2intTable = func() []byte {
|
||||
b := make([]byte, 255)
|
||||
for i := byte(0); i < 255; i++ {
|
||||
c := byte(0)
|
||||
c := byte(16)
|
||||
if i >= '0' && i <= '9' {
|
||||
c = 1 + i - '0'
|
||||
c = i - '0'
|
||||
} else if i >= 'a' && i <= 'f' {
|
||||
c = 1 + i - 'a' + 10
|
||||
c = i - 'a' + 10
|
||||
} else if i >= 'A' && i <= 'F' {
|
||||
c = 1 + i - 'A' + 10
|
||||
c = i - 'A' + 10
|
||||
}
|
||||
b[i] = c
|
||||
}
|
||||
return b
|
||||
}()
|
||||
|
||||
func hexbyte2int(c byte) int {
|
||||
return int(hex2intTable[c]) - 1
|
||||
}
|
||||
|
||||
const toLower = 'a' - 'A'
|
||||
|
||||
func uppercaseByte(p *byte) {
|
||||
c := *p
|
||||
if c >= 'a' && c <= 'z' {
|
||||
*p = c - toLower
|
||||
var toLowerTable = func() [256]byte {
|
||||
var a [256]byte
|
||||
for i := 0; i < 256; i++ {
|
||||
c := byte(i)
|
||||
if c >= 'A' && c <= 'Z' {
|
||||
c += toLower
|
||||
}
|
||||
a[i] = c
|
||||
}
|
||||
}
|
||||
return a
|
||||
}()
|
||||
|
||||
func lowercaseByte(p *byte) {
|
||||
c := *p
|
||||
if c >= 'A' && c <= 'Z' {
|
||||
*p = c + toLower
|
||||
var toUpperTable = func() [256]byte {
|
||||
var a [256]byte
|
||||
for i := 0; i < 256; i++ {
|
||||
c := byte(i)
|
||||
if c >= 'a' && c <= 'z' {
|
||||
c -= toLower
|
||||
}
|
||||
a[i] = c
|
||||
}
|
||||
}
|
||||
return a
|
||||
}()
|
||||
|
||||
func lowercaseBytes(b []byte) {
|
||||
for i, n := 0, len(b); i < n; i++ {
|
||||
lowercaseByte(&b[i])
|
||||
for i := 0; i < len(b); i++ {
|
||||
p := &b[i]
|
||||
*p = toLowerTable[*p]
|
||||
}
|
||||
}
|
||||
|
||||
@@ -375,6 +393,13 @@ func s2b(s string) []byte {
|
||||
return *(*[]byte)(unsafe.Pointer(&bh))
|
||||
}
|
||||
|
||||
// AppendUnquotedArg appends url-decoded src to dst and returns appended dst.
|
||||
//
|
||||
// dst may point to src. In this case src will be overwritten.
|
||||
func AppendUnquotedArg(dst, src []byte) []byte {
|
||||
return decodeArgAppend(dst, src)
|
||||
}
|
||||
|
||||
// AppendQuotedArg appends url-encoded src to dst and returns appended dst.
|
||||
func AppendQuotedArg(dst, src []byte) []byte {
|
||||
for _, c := range src {
|
||||
|
||||
3
vendor/github.com/valyala/fasthttp/client.go
generated
vendored
3
vendor/github.com/valyala/fasthttp/client.go
generated
vendored
@@ -1380,6 +1380,9 @@ func newClientTLSConfig(c *tls.Config, addr string) *tls.Config {
|
||||
}
|
||||
|
||||
func tlsServerName(addr string) string {
|
||||
if !strings.Contains(addr, ":") {
|
||||
return addr
|
||||
}
|
||||
host, _, err := net.SplitHostPort(addr)
|
||||
if err != nil {
|
||||
return "*"
|
||||
|
||||
295
vendor/github.com/valyala/fasthttp/compress.go
generated
vendored
295
vendor/github.com/valyala/fasthttp/compress.go
generated
vendored
@@ -1,6 +1,7 @@
|
||||
package fasthttp
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
@@ -9,6 +10,7 @@ import (
|
||||
"github.com/klauspost/compress/flate"
|
||||
"github.com/klauspost/compress/gzip"
|
||||
"github.com/klauspost/compress/zlib"
|
||||
"github.com/valyala/bytebufferpool"
|
||||
"github.com/valyala/fasthttp/stackless"
|
||||
)
|
||||
|
||||
@@ -17,7 +19,8 @@ const (
|
||||
CompressNoCompression = flate.NoCompression
|
||||
CompressBestSpeed = flate.BestSpeed
|
||||
CompressBestCompression = flate.BestCompression
|
||||
CompressDefaultCompression = flate.DefaultCompression
|
||||
CompressDefaultCompression = 6 // flate.DefaultCompression
|
||||
CompressHuffmanOnly = -2 // flate.HuffmanOnly
|
||||
)
|
||||
|
||||
func acquireGzipReader(r io.Reader) (*gzip.Reader, error) {
|
||||
@@ -70,51 +73,54 @@ func resetFlateReader(zr io.ReadCloser, r io.Reader) error {
|
||||
|
||||
var flateReaderPool sync.Pool
|
||||
|
||||
func acquireGzipWriter(w io.Writer, level int) *gzipWriter {
|
||||
p := gzipWriterPoolMap[level]
|
||||
if p == nil {
|
||||
panic(fmt.Sprintf("BUG: unexpected compression level passed: %d. See compress/gzip for supported levels", level))
|
||||
}
|
||||
|
||||
func acquireStacklessGzipWriter(w io.Writer, level int) stackless.Writer {
|
||||
nLevel := normalizeCompressLevel(level)
|
||||
p := stacklessGzipWriterPoolMap[nLevel]
|
||||
v := p.Get()
|
||||
if v == nil {
|
||||
sw := stackless.NewWriter(w, func(w io.Writer) stackless.Writer {
|
||||
zw, err := gzip.NewWriterLevel(w, level)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("BUG: unexpected error from gzip.NewWriterLevel(%d): %s", level, err))
|
||||
}
|
||||
return zw
|
||||
return stackless.NewWriter(w, func(w io.Writer) stackless.Writer {
|
||||
return acquireRealGzipWriter(w, level)
|
||||
})
|
||||
return &gzipWriter{
|
||||
Writer: sw,
|
||||
p: p,
|
||||
}
|
||||
}
|
||||
zw := v.(*gzipWriter)
|
||||
sw := v.(stackless.Writer)
|
||||
sw.Reset(w)
|
||||
return sw
|
||||
}
|
||||
|
||||
func releaseStacklessGzipWriter(sw stackless.Writer, level int) {
|
||||
sw.Close()
|
||||
nLevel := normalizeCompressLevel(level)
|
||||
p := stacklessGzipWriterPoolMap[nLevel]
|
||||
p.Put(sw)
|
||||
}
|
||||
|
||||
func acquireRealGzipWriter(w io.Writer, level int) *gzip.Writer {
|
||||
nLevel := normalizeCompressLevel(level)
|
||||
p := realGzipWriterPoolMap[nLevel]
|
||||
v := p.Get()
|
||||
if v == nil {
|
||||
zw, err := gzip.NewWriterLevel(w, level)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("BUG: unexpected error from gzip.NewWriterLevel(%d): %s", level, err))
|
||||
}
|
||||
return zw
|
||||
}
|
||||
zw := v.(*gzip.Writer)
|
||||
zw.Reset(w)
|
||||
return zw
|
||||
}
|
||||
|
||||
func releaseGzipWriter(zw *gzipWriter) {
|
||||
func releaseRealGzipWriter(zw *gzip.Writer, level int) {
|
||||
zw.Close()
|
||||
zw.p.Put(zw)
|
||||
nLevel := normalizeCompressLevel(level)
|
||||
p := realGzipWriterPoolMap[nLevel]
|
||||
p.Put(zw)
|
||||
}
|
||||
|
||||
type gzipWriter struct {
|
||||
stackless.Writer
|
||||
p *sync.Pool
|
||||
}
|
||||
|
||||
var gzipWriterPoolMap = func() map[int]*sync.Pool {
|
||||
// Initialize pools for all the compression levels defined
|
||||
// in https://golang.org/pkg/compress/gzip/#pkg-constants .
|
||||
m := make(map[int]*sync.Pool, 11)
|
||||
m[-1] = &sync.Pool{}
|
||||
for i := 0; i < 10; i++ {
|
||||
m[i] = &sync.Pool{}
|
||||
}
|
||||
return m
|
||||
}()
|
||||
var (
|
||||
stacklessGzipWriterPoolMap = newCompressWriterPoolMap()
|
||||
realGzipWriterPoolMap = newCompressWriterPoolMap()
|
||||
)
|
||||
|
||||
// AppendGzipBytesLevel appends gzipped src to dst using the given
|
||||
// compression level and returns the resulting dst.
|
||||
@@ -125,6 +131,7 @@ var gzipWriterPoolMap = func() map[int]*sync.Pool {
|
||||
// * CompressBestSpeed
|
||||
// * CompressBestCompression
|
||||
// * CompressDefaultCompression
|
||||
// * CompressHuffmanOnly
|
||||
func AppendGzipBytesLevel(dst, src []byte, level int) []byte {
|
||||
w := &byteSliceWriter{dst}
|
||||
WriteGzipLevel(w, src, level)
|
||||
@@ -140,11 +147,41 @@ func AppendGzipBytesLevel(dst, src []byte, level int) []byte {
|
||||
// * CompressBestSpeed
|
||||
// * CompressBestCompression
|
||||
// * CompressDefaultCompression
|
||||
// * CompressHuffmanOnly
|
||||
func WriteGzipLevel(w io.Writer, p []byte, level int) (int, error) {
|
||||
zw := acquireGzipWriter(w, level)
|
||||
n, err := zw.Write(p)
|
||||
releaseGzipWriter(zw)
|
||||
return n, err
|
||||
switch w.(type) {
|
||||
case *byteSliceWriter,
|
||||
*bytes.Buffer,
|
||||
*ByteBuffer,
|
||||
*bytebufferpool.ByteBuffer:
|
||||
// These writers don't block, so we can just use stacklessWriteGzip
|
||||
ctx := &compressCtx{
|
||||
w: w,
|
||||
p: p,
|
||||
level: level,
|
||||
}
|
||||
stacklessWriteGzip(ctx)
|
||||
return len(p), nil
|
||||
default:
|
||||
zw := acquireStacklessGzipWriter(w, level)
|
||||
n, err := zw.Write(p)
|
||||
releaseStacklessGzipWriter(zw, level)
|
||||
return n, err
|
||||
}
|
||||
}
|
||||
|
||||
var stacklessWriteGzip = stackless.NewFunc(nonblockingWriteGzip)
|
||||
|
||||
func nonblockingWriteGzip(ctxv interface{}) {
|
||||
ctx := ctxv.(*compressCtx)
|
||||
zw := acquireRealGzipWriter(ctx.w, ctx.level)
|
||||
|
||||
_, err := zw.Write(ctx.p)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("BUG: gzip.Writer.Write for len(p)=%d returned unexpected error: %s", len(ctx.p), err))
|
||||
}
|
||||
|
||||
releaseRealGzipWriter(zw, ctx.level)
|
||||
}
|
||||
|
||||
// WriteGzip writes gzipped p to w and returns the number of compressed
|
||||
@@ -175,6 +212,92 @@ func WriteGunzip(w io.Writer, p []byte) (int, error) {
|
||||
return nn, err
|
||||
}
|
||||
|
||||
// AppendGunzipBytes appends gunzipped src to dst and returns the resulting dst.
|
||||
func AppendGunzipBytes(dst, src []byte) ([]byte, error) {
|
||||
w := &byteSliceWriter{dst}
|
||||
_, err := WriteGunzip(w, src)
|
||||
return w.b, err
|
||||
}
|
||||
|
||||
// AppendDeflateBytesLevel appends deflated src to dst using the given
|
||||
// compression level and returns the resulting dst.
|
||||
//
|
||||
// Supported compression levels are:
|
||||
//
|
||||
// * CompressNoCompression
|
||||
// * CompressBestSpeed
|
||||
// * CompressBestCompression
|
||||
// * CompressDefaultCompression
|
||||
// * CompressHuffmanOnly
|
||||
func AppendDeflateBytesLevel(dst, src []byte, level int) []byte {
|
||||
w := &byteSliceWriter{dst}
|
||||
WriteDeflateLevel(w, src, level)
|
||||
return w.b
|
||||
}
|
||||
|
||||
// WriteDeflateLevel writes deflated p to w using the given compression level
|
||||
// and returns the number of compressed bytes written to w.
|
||||
//
|
||||
// Supported compression levels are:
|
||||
//
|
||||
// * CompressNoCompression
|
||||
// * CompressBestSpeed
|
||||
// * CompressBestCompression
|
||||
// * CompressDefaultCompression
|
||||
// * CompressHuffmanOnly
|
||||
func WriteDeflateLevel(w io.Writer, p []byte, level int) (int, error) {
|
||||
switch w.(type) {
|
||||
case *byteSliceWriter,
|
||||
*bytes.Buffer,
|
||||
*ByteBuffer,
|
||||
*bytebufferpool.ByteBuffer:
|
||||
// These writers don't block, so we can just use stacklessWriteDeflate
|
||||
ctx := &compressCtx{
|
||||
w: w,
|
||||
p: p,
|
||||
level: level,
|
||||
}
|
||||
stacklessWriteDeflate(ctx)
|
||||
return len(p), nil
|
||||
default:
|
||||
zw := acquireStacklessDeflateWriter(w, level)
|
||||
n, err := zw.Write(p)
|
||||
releaseStacklessDeflateWriter(zw, level)
|
||||
return n, err
|
||||
}
|
||||
}
|
||||
|
||||
var stacklessWriteDeflate = stackless.NewFunc(nonblockingWriteDeflate)
|
||||
|
||||
func nonblockingWriteDeflate(ctxv interface{}) {
|
||||
ctx := ctxv.(*compressCtx)
|
||||
zw := acquireRealDeflateWriter(ctx.w, ctx.level)
|
||||
|
||||
_, err := zw.Write(ctx.p)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("BUG: zlib.Writer.Write for len(p)=%d returned unexpected error: %s", len(ctx.p), err))
|
||||
}
|
||||
|
||||
releaseRealDeflateWriter(zw, ctx.level)
|
||||
}
|
||||
|
||||
type compressCtx struct {
|
||||
w io.Writer
|
||||
p []byte
|
||||
level int
|
||||
}
|
||||
|
||||
// WriteDeflate writes deflated p to w and returns the number of compressed
|
||||
// bytes written to w.
|
||||
func WriteDeflate(w io.Writer, p []byte) (int, error) {
|
||||
return WriteDeflateLevel(w, p, CompressDefaultCompression)
|
||||
}
|
||||
|
||||
// AppendDeflateBytes appends deflated src to dst and returns the resulting dst.
|
||||
func AppendDeflateBytes(dst, src []byte) []byte {
|
||||
return AppendDeflateBytesLevel(dst, src, CompressDefaultCompression)
|
||||
}
|
||||
|
||||
// WriteInflate writes inflated p to w and returns the number of uncompressed
|
||||
// bytes written to w.
|
||||
func WriteInflate(w io.Writer, p []byte) (int, error) {
|
||||
@@ -192,10 +315,10 @@ func WriteInflate(w io.Writer, p []byte) (int, error) {
|
||||
return nn, err
|
||||
}
|
||||
|
||||
// AppendGunzipBytes append gunzipped src to dst and returns the resulting dst.
|
||||
func AppendGunzipBytes(dst, src []byte) ([]byte, error) {
|
||||
// AppendInflateBytes appends inflated src to dst and returns the resulting dst.
|
||||
func AppendInflateBytes(dst, src []byte) ([]byte, error) {
|
||||
w := &byteSliceWriter{dst}
|
||||
_, err := WriteGunzip(w, src)
|
||||
_, err := WriteInflate(w, src)
|
||||
return w.b, err
|
||||
}
|
||||
|
||||
@@ -221,64 +344,79 @@ func (r *byteSliceReader) Read(p []byte) (int, error) {
|
||||
return n, nil
|
||||
}
|
||||
|
||||
func acquireFlateWriter(w io.Writer, level int) *flateWriter {
|
||||
p := flateWriterPoolMap[level]
|
||||
if p == nil {
|
||||
panic(fmt.Sprintf("BUG: unexpected compression level passed: %d. See compress/flate for supported levels", level))
|
||||
}
|
||||
|
||||
func acquireStacklessDeflateWriter(w io.Writer, level int) stackless.Writer {
|
||||
nLevel := normalizeCompressLevel(level)
|
||||
p := stacklessDeflateWriterPoolMap[nLevel]
|
||||
v := p.Get()
|
||||
if v == nil {
|
||||
sw := stackless.NewWriter(w, func(w io.Writer) stackless.Writer {
|
||||
zw, err := zlib.NewWriterLevel(w, level)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("BUG: unexpected error in zlib.NewWriterLevel(%d): %s", level, err))
|
||||
}
|
||||
return zw
|
||||
return stackless.NewWriter(w, func(w io.Writer) stackless.Writer {
|
||||
return acquireRealDeflateWriter(w, level)
|
||||
})
|
||||
return &flateWriter{
|
||||
Writer: sw,
|
||||
p: p,
|
||||
}
|
||||
}
|
||||
zw := v.(*flateWriter)
|
||||
sw := v.(stackless.Writer)
|
||||
sw.Reset(w)
|
||||
return sw
|
||||
}
|
||||
|
||||
func releaseStacklessDeflateWriter(sw stackless.Writer, level int) {
|
||||
sw.Close()
|
||||
nLevel := normalizeCompressLevel(level)
|
||||
p := stacklessDeflateWriterPoolMap[nLevel]
|
||||
p.Put(sw)
|
||||
}
|
||||
|
||||
func acquireRealDeflateWriter(w io.Writer, level int) *zlib.Writer {
|
||||
nLevel := normalizeCompressLevel(level)
|
||||
p := realDeflateWriterPoolMap[nLevel]
|
||||
v := p.Get()
|
||||
if v == nil {
|
||||
zw, err := zlib.NewWriterLevel(w, level)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("BUG: unexpected error from zlib.NewWriterLevel(%d): %s", level, err))
|
||||
}
|
||||
return zw
|
||||
}
|
||||
zw := v.(*zlib.Writer)
|
||||
zw.Reset(w)
|
||||
return zw
|
||||
}
|
||||
|
||||
func releaseFlateWriter(zw *flateWriter) {
|
||||
func releaseRealDeflateWriter(zw *zlib.Writer, level int) {
|
||||
zw.Close()
|
||||
zw.p.Put(zw)
|
||||
nLevel := normalizeCompressLevel(level)
|
||||
p := realDeflateWriterPoolMap[nLevel]
|
||||
p.Put(zw)
|
||||
}
|
||||
|
||||
type flateWriter struct {
|
||||
stackless.Writer
|
||||
p *sync.Pool
|
||||
}
|
||||
var (
|
||||
stacklessDeflateWriterPoolMap = newCompressWriterPoolMap()
|
||||
realDeflateWriterPoolMap = newCompressWriterPoolMap()
|
||||
)
|
||||
|
||||
var flateWriterPoolMap = func() map[int]*sync.Pool {
|
||||
func newCompressWriterPoolMap() []*sync.Pool {
|
||||
// Initialize pools for all the compression levels defined
|
||||
// in https://golang.org/pkg/compress/flate/#pkg-constants .
|
||||
m := make(map[int]*sync.Pool, 11)
|
||||
m[-1] = &sync.Pool{}
|
||||
for i := 0; i < 10; i++ {
|
||||
m[i] = &sync.Pool{}
|
||||
// Compression levels are normalized with normalizeCompressLevel,
|
||||
// so the fit [0..11].
|
||||
var m []*sync.Pool
|
||||
for i := 0; i < 12; i++ {
|
||||
m = append(m, &sync.Pool{})
|
||||
}
|
||||
return m
|
||||
}()
|
||||
}
|
||||
|
||||
func isFileCompressible(f *os.File, minCompressRatio float64) bool {
|
||||
// Try compressing the first 4kb of of the file
|
||||
// and see if it can be compressed by more than
|
||||
// the given minCompressRatio.
|
||||
b := AcquireByteBuffer()
|
||||
zw := acquireGzipWriter(b, CompressDefaultCompression)
|
||||
zw := acquireStacklessGzipWriter(b, CompressDefaultCompression)
|
||||
lr := &io.LimitedReader{
|
||||
R: f,
|
||||
N: 4096,
|
||||
}
|
||||
_, err := copyZeroAlloc(zw, lr)
|
||||
releaseGzipWriter(zw)
|
||||
releaseStacklessGzipWriter(zw, CompressDefaultCompression)
|
||||
f.Seek(0, 0)
|
||||
if err != nil {
|
||||
return false
|
||||
@@ -289,3 +427,14 @@ func isFileCompressible(f *os.File, minCompressRatio float64) bool {
|
||||
ReleaseByteBuffer(b)
|
||||
return float64(zn) < float64(n)*minCompressRatio
|
||||
}
|
||||
|
||||
// normalizes compression level into [0..11], so it could be used as an index
|
||||
// in *PoolMap.
|
||||
func normalizeCompressLevel(level int) int {
|
||||
// -2 is the lowest compression level - CompressHuffmanOnly
|
||||
// 9 is the highest compression level - CompressBestCompression
|
||||
if level < -2 || level > 9 {
|
||||
level = CompressDefaultCompression
|
||||
}
|
||||
return level + 2
|
||||
}
|
||||
|
||||
5
vendor/github.com/valyala/fasthttp/fasthttputil/ecdsa.key
generated
vendored
Normal file
5
vendor/github.com/valyala/fasthttp/fasthttputil/ecdsa.key
generated
vendored
Normal file
@@ -0,0 +1,5 @@
|
||||
-----BEGIN EC PRIVATE KEY-----
|
||||
MHcCAQEEIBpQbZ6a5jL1Yh4wdP6yZk4MKjYWArD/QOLENFw8vbELoAoGCCqGSM49
|
||||
AwEHoUQDQgAEKQCZWgE2IBhb47ot8MIs1D4KSisHYlZ41IWyeutpjb0fjwwIhimh
|
||||
pl1Qld1/d2j3Z3vVyfa5yD+ncV7qCFZuSg==
|
||||
-----END EC PRIVATE KEY-----
|
||||
10
vendor/github.com/valyala/fasthttp/fasthttputil/ecdsa.pem
generated
vendored
Normal file
10
vendor/github.com/valyala/fasthttp/fasthttputil/ecdsa.pem
generated
vendored
Normal file
@@ -0,0 +1,10 @@
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIBbTCCAROgAwIBAgIQPo718S+K+G7hc1SgTEU4QDAKBggqhkjOPQQDAjASMRAw
|
||||
DgYDVQQKEwdBY21lIENvMB4XDTE3MDQyMDIxMDExNFoXDTE4MDQyMDIxMDExNFow
|
||||
EjEQMA4GA1UEChMHQWNtZSBDbzBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABCkA
|
||||
mVoBNiAYW+O6LfDCLNQ+CkorB2JWeNSFsnrraY29H48MCIYpoaZdUJXdf3do92d7
|
||||
1cn2ucg/p3Fe6ghWbkqjSzBJMA4GA1UdDwEB/wQEAwIFoDATBgNVHSUEDDAKBggr
|
||||
BgEFBQcDATAMBgNVHRMBAf8EAjAAMBQGA1UdEQQNMAuCCWxvY2FsaG9zdDAKBggq
|
||||
hkjOPQQDAgNIADBFAiEAoLAIQkvSuIcHUqyWroA6yWYw2fznlRH/uO9/hMCxUCEC
|
||||
IClRYb/5O9eD/Eq/ozPnwNpsQHOeYefEhadJ/P82y0lG
|
||||
-----END CERTIFICATE-----
|
||||
11
vendor/github.com/valyala/fasthttp/fs.go
generated
vendored
11
vendor/github.com/valyala/fasthttp/fs.go
generated
vendored
@@ -958,12 +958,7 @@ func (h *fsHandler) createDirIndex(base *URI, dirPath string, mustCompress bool)
|
||||
|
||||
if mustCompress {
|
||||
var zbuf ByteBuffer
|
||||
zw := acquireGzipWriter(&zbuf, CompressDefaultCompression)
|
||||
_, err = zw.Write(w.B)
|
||||
releaseGzipWriter(zw)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error when compressing automatically generated index for directory %q: %s", dirPath, err)
|
||||
}
|
||||
zbuf.B = AppendGzipBytesLevel(zbuf.B, w.B, CompressDefaultCompression)
|
||||
w = &zbuf
|
||||
}
|
||||
|
||||
@@ -1048,12 +1043,12 @@ func (h *fsHandler) compressFileNolock(f *os.File, fileInfo os.FileInfo, filePat
|
||||
return nil, errNoCreatePermission
|
||||
}
|
||||
|
||||
zw := acquireGzipWriter(zf, CompressDefaultCompression)
|
||||
zw := acquireStacklessGzipWriter(zf, CompressDefaultCompression)
|
||||
_, err = copyZeroAlloc(zw, f)
|
||||
if err1 := zw.Flush(); err == nil {
|
||||
err = err1
|
||||
}
|
||||
releaseGzipWriter(zw)
|
||||
releaseStacklessGzipWriter(zw, CompressDefaultCompression)
|
||||
zf.Close()
|
||||
f.Close()
|
||||
if err != nil {
|
||||
|
||||
30
vendor/github.com/valyala/fasthttp/header.go
generated
vendored
30
vendor/github.com/valyala/fasthttp/header.go
generated
vendored
@@ -263,6 +263,12 @@ func (h *RequestHeader) SetContentLength(contentLength int) {
|
||||
}
|
||||
}
|
||||
|
||||
func (h *ResponseHeader) isCompressibleContentType() bool {
|
||||
contentType := h.ContentType()
|
||||
return bytes.HasPrefix(contentType, strTextSlash) ||
|
||||
bytes.HasPrefix(contentType, strApplicationSlash)
|
||||
}
|
||||
|
||||
// ContentType returns Content-Type header value.
|
||||
func (h *ResponseHeader) ContentType() []byte {
|
||||
contentType := h.contentType
|
||||
@@ -2008,19 +2014,21 @@ func normalizeHeaderKey(b []byte, disableNormalizing bool) {
|
||||
}
|
||||
|
||||
n := len(b)
|
||||
up := true
|
||||
for i := 0; i < n; i++ {
|
||||
switch b[i] {
|
||||
case '-':
|
||||
up = true
|
||||
default:
|
||||
if up {
|
||||
up = false
|
||||
uppercaseByte(&b[i])
|
||||
} else {
|
||||
lowercaseByte(&b[i])
|
||||
if n == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
b[0] = toUpperTable[b[0]]
|
||||
for i := 1; i < n; i++ {
|
||||
p := &b[i]
|
||||
if *p == '-' {
|
||||
i++
|
||||
if i < n {
|
||||
b[i] = toUpperTable[b[i]]
|
||||
}
|
||||
continue
|
||||
}
|
||||
*p = toLowerTable[*p]
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
69
vendor/github.com/valyala/fasthttp/http.go
generated
vendored
69
vendor/github.com/valyala/fasthttp/http.go
generated
vendored
@@ -1138,6 +1138,7 @@ func (resp *Response) WriteGzip(w *bufio.Writer) error {
|
||||
// * CompressBestSpeed
|
||||
// * CompressBestCompression
|
||||
// * CompressDefaultCompression
|
||||
// * CompressHuffmanOnly
|
||||
//
|
||||
// The method gzips response body and sets 'Content-Encoding: gzip'
|
||||
// header before writing response to w.
|
||||
@@ -1168,6 +1169,7 @@ func (resp *Response) WriteDeflate(w *bufio.Writer) error {
|
||||
// * CompressBestSpeed
|
||||
// * CompressBestCompression
|
||||
// * CompressDefaultCompression
|
||||
// * CompressHuffmanOnly
|
||||
//
|
||||
// The method deflates response body and sets 'Content-Encoding: deflate'
|
||||
// header before writing response to w.
|
||||
@@ -1187,30 +1189,42 @@ func (resp *Response) gzipBody(level int) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Do not care about memory allocations here, since gzip is slow
|
||||
// and allocates a lot of memory by itself.
|
||||
if !resp.Header.isCompressibleContentType() {
|
||||
// The content-type cannot be compressed.
|
||||
return nil
|
||||
}
|
||||
|
||||
if resp.bodyStream != nil {
|
||||
// Reset Content-Length to -1, since it is impossible
|
||||
// to determine body size beforehand of streamed compression.
|
||||
// For https://github.com/valyala/fasthttp/issues/176 .
|
||||
resp.Header.SetContentLength(-1)
|
||||
|
||||
// Do not care about memory allocations here, since gzip is slow
|
||||
// and allocates a lot of memory by itself.
|
||||
bs := resp.bodyStream
|
||||
resp.bodyStream = NewStreamReader(func(sw *bufio.Writer) {
|
||||
zw := acquireGzipWriter(sw, level)
|
||||
zw := acquireStacklessGzipWriter(sw, level)
|
||||
fw := &flushWriter{
|
||||
wf: zw,
|
||||
bw: sw,
|
||||
}
|
||||
copyZeroAlloc(fw, bs)
|
||||
releaseGzipWriter(zw)
|
||||
releaseStacklessGzipWriter(zw, level)
|
||||
if bsc, ok := bs.(io.Closer); ok {
|
||||
bsc.Close()
|
||||
}
|
||||
})
|
||||
} else {
|
||||
w := responseBodyPool.Get()
|
||||
zw := acquireGzipWriter(w, level)
|
||||
_, err := zw.Write(resp.bodyBytes())
|
||||
releaseGzipWriter(zw)
|
||||
if err != nil {
|
||||
return err
|
||||
bodyBytes := resp.bodyBytes()
|
||||
if len(bodyBytes) < minCompressLen {
|
||||
// There is no sense in spending CPU time on small body compression,
|
||||
// since there is a very high probability that the compressed
|
||||
// body size will be bigger than the original body size.
|
||||
return nil
|
||||
}
|
||||
w := responseBodyPool.Get()
|
||||
w.B = AppendGzipBytesLevel(w.B, bodyBytes, level)
|
||||
|
||||
// Hack: swap resp.body with w.
|
||||
if resp.body != nil {
|
||||
@@ -1229,30 +1243,42 @@ func (resp *Response) deflateBody(level int) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Do not care about memory allocations here, since flate is slow
|
||||
// and allocates a lot of memory by itself.
|
||||
if !resp.Header.isCompressibleContentType() {
|
||||
// The content-type cannot be compressed.
|
||||
return nil
|
||||
}
|
||||
|
||||
if resp.bodyStream != nil {
|
||||
// Reset Content-Length to -1, since it is impossible
|
||||
// to determine body size beforehand of streamed compression.
|
||||
// For https://github.com/valyala/fasthttp/issues/176 .
|
||||
resp.Header.SetContentLength(-1)
|
||||
|
||||
// Do not care about memory allocations here, since flate is slow
|
||||
// and allocates a lot of memory by itself.
|
||||
bs := resp.bodyStream
|
||||
resp.bodyStream = NewStreamReader(func(sw *bufio.Writer) {
|
||||
zw := acquireFlateWriter(sw, level)
|
||||
zw := acquireStacklessDeflateWriter(sw, level)
|
||||
fw := &flushWriter{
|
||||
wf: zw,
|
||||
bw: sw,
|
||||
}
|
||||
copyZeroAlloc(fw, bs)
|
||||
releaseFlateWriter(zw)
|
||||
releaseStacklessDeflateWriter(zw, level)
|
||||
if bsc, ok := bs.(io.Closer); ok {
|
||||
bsc.Close()
|
||||
}
|
||||
})
|
||||
} else {
|
||||
w := responseBodyPool.Get()
|
||||
zw := acquireFlateWriter(w, level)
|
||||
_, err := zw.Write(resp.bodyBytes())
|
||||
releaseFlateWriter(zw)
|
||||
if err != nil {
|
||||
return err
|
||||
bodyBytes := resp.bodyBytes()
|
||||
if len(bodyBytes) < minCompressLen {
|
||||
// There is no sense in spending CPU time on small body compression,
|
||||
// since there is a very high probability that the compressed
|
||||
// body size will be bigger than the original body size.
|
||||
return nil
|
||||
}
|
||||
w := responseBodyPool.Get()
|
||||
w.B = AppendDeflateBytesLevel(w.B, bodyBytes, level)
|
||||
|
||||
// Hack: swap resp.body with w.
|
||||
if resp.body != nil {
|
||||
@@ -1264,6 +1290,9 @@ func (resp *Response) deflateBody(level int) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Bodies with sizes smaller than minCompressLen aren't compressed at all
|
||||
const minCompressLen = 200
|
||||
|
||||
type writeFlusher interface {
|
||||
io.Writer
|
||||
Flush() error
|
||||
|
||||
1
vendor/github.com/valyala/fasthttp/server.go
generated
vendored
1
vendor/github.com/valyala/fasthttp/server.go
generated
vendored
@@ -340,6 +340,7 @@ func CompressHandler(h RequestHandler) RequestHandler {
|
||||
// * CompressBestSpeed
|
||||
// * CompressBestCompression
|
||||
// * CompressDefaultCompression
|
||||
// * CompressHuffmanOnly
|
||||
func CompressHandlerLevel(h RequestHandler, level int) RequestHandler {
|
||||
return func(ctx *RequestCtx) {
|
||||
h(ctx)
|
||||
|
||||
2
vendor/github.com/valyala/fasthttp/strings.go
generated
vendored
2
vendor/github.com/valyala/fasthttp/strings.go
generated
vendored
@@ -68,4 +68,6 @@ var (
|
||||
strMultipartFormData = []byte("multipart/form-data")
|
||||
strBoundary = []byte("boundary")
|
||||
strBytes = []byte("bytes")
|
||||
strTextSlash = []byte("text/")
|
||||
strApplicationSlash = []byte("application/")
|
||||
)
|
||||
|
||||
2
vendor/github.com/valyala/fasthttp/uri.go
generated
vendored
2
vendor/github.com/valyala/fasthttp/uri.go
generated
vendored
@@ -277,7 +277,7 @@ func (u *URI) parse(host, uri []byte, h *RequestHeader) {
|
||||
func normalizePath(dst, src []byte) []byte {
|
||||
dst = dst[:0]
|
||||
dst = addLeadingSlash(dst, src)
|
||||
dst = decodeArgAppend(dst, src, false)
|
||||
dst = decodeArgAppendNoPlus(dst, src)
|
||||
|
||||
// remove duplicate slashes
|
||||
b := dst
|
||||
|
||||
13
vendor/gopkg.in/alecthomas/kingpin.v2/README.md
generated
vendored
13
vendor/gopkg.in/alecthomas/kingpin.v2/README.md
generated
vendored
@@ -1,4 +1,7 @@
|
||||
# Kingpin - A Go (golang) command line and flag parser [](http://godoc.org/github.com/alecthomas/kingpin) [](https://travis-ci.org/alecthomas/kingpin)
|
||||
# Kingpin - A Go (golang) command line and flag parser
|
||||
[](http://godoc.org/github.com/alecthomas/kingpin) [](https://travis-ci.org/alecthomas/kingpin) [](https://gitter.im/alecthomas/Lobby)
|
||||
|
||||
|
||||
|
||||
<!-- MarkdownTOC -->
|
||||
|
||||
@@ -243,7 +246,7 @@ var (
|
||||
func main() {
|
||||
kingpin.Version("0.0.1")
|
||||
kingpin.Parse()
|
||||
fmt.Printf("Would ping: %s with timeout %s and count %d", *ip, *timeout, *count)
|
||||
fmt.Printf("Would ping: %s with timeout %s and count %d\n", *ip, *timeout, *count)
|
||||
}
|
||||
```
|
||||
|
||||
@@ -384,7 +387,7 @@ func main() {
|
||||
|
||||
Kingpin supports both flag and positional argument parsers for converting to
|
||||
Go types. For example, some included parsers are `Int()`, `Float()`,
|
||||
`Duration()` and `ExistingFile()`.
|
||||
`Duration()` and `ExistingFile()` (see [parsers.go](./parsers.go) for a complete list of included parsers).
|
||||
|
||||
Parsers conform to Go's [`flag.Value`](http://godoc.org/flag#Value)
|
||||
interface, so any existing implementations will work.
|
||||
@@ -412,7 +415,7 @@ As a convenience, I would recommend something like this:
|
||||
|
||||
```go
|
||||
func HTTPHeader(s Settings) (target *http.Header) {
|
||||
target = new(http.Header)
|
||||
target = &http.Header{}
|
||||
s.SetValue((*HTTPHeaderValue)(target))
|
||||
return
|
||||
}
|
||||
@@ -578,7 +581,7 @@ Consider the case that you needed to read a local database or a file to
|
||||
provide suggestions. You can dynamically generate the options
|
||||
|
||||
```
|
||||
func listHosts(args []string) []string {
|
||||
func listHosts() []string {
|
||||
// Provide a dynamic list of hosts from a hosts file or otherwise
|
||||
// for bash completion. In this example we simply return static slice.
|
||||
|
||||
|
||||
2
vendor/gopkg.in/alecthomas/kingpin.v2/app.go
generated
vendored
2
vendor/gopkg.in/alecthomas/kingpin.v2/app.go
generated
vendored
@@ -136,7 +136,7 @@ func (a *Application) Writer(w io.Writer) *Application {
|
||||
|
||||
// ErrorWriter sets the io.Writer to use for errors.
|
||||
func (a *Application) ErrorWriter(w io.Writer) *Application {
|
||||
a.usageWriter = w
|
||||
a.errorWriter = w
|
||||
return a
|
||||
}
|
||||
|
||||
|
||||
13
vendor/gopkg.in/alecthomas/kingpin.v2/args.go
generated
vendored
13
vendor/gopkg.in/alecthomas/kingpin.v2/args.go
generated
vendored
@@ -87,14 +87,13 @@ func (a *ArgClause) setDefault() error {
|
||||
if v, ok := a.value.(remainderArg); !ok || !v.IsCumulative() {
|
||||
// Use the value as-is
|
||||
return a.value.Set(a.GetEnvarValue())
|
||||
} else {
|
||||
for _, value := range a.GetSplitEnvarValue() {
|
||||
if err := a.value.Set(value); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
for _, value := range a.GetSplitEnvarValue() {
|
||||
if err := a.value.Set(value); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
if len(a.defaultValues) > 0 {
|
||||
|
||||
2
vendor/gopkg.in/alecthomas/kingpin.v2/doc.go
generated
vendored
2
vendor/gopkg.in/alecthomas/kingpin.v2/doc.go
generated
vendored
@@ -35,7 +35,7 @@
|
||||
//
|
||||
// package main
|
||||
//
|
||||
// import "gopkg.in/alecthomas/kingpin.v1"
|
||||
// import "gopkg.in/alecthomas/kingpin.v2"
|
||||
//
|
||||
// var (
|
||||
// debug = kingpin.Flag("debug", "enable debug mode").Default("false").Bool()
|
||||
|
||||
6
vendor/gopkg.in/alecthomas/kingpin.v2/flags.go
generated
vendored
6
vendor/gopkg.in/alecthomas/kingpin.v2/flags.go
generated
vendored
@@ -53,7 +53,7 @@ func (f *flagGroup) init(defaultEnvarPrefix string) error {
|
||||
}
|
||||
|
||||
func (f *flagGroup) checkDuplicates() error {
|
||||
seenShort := map[byte]bool{}
|
||||
seenShort := map[rune]bool{}
|
||||
seenLong := map[string]bool{}
|
||||
for _, flag := range f.flagOrder {
|
||||
if flag.shorthand != 0 {
|
||||
@@ -147,7 +147,7 @@ type FlagClause struct {
|
||||
completionsMixin
|
||||
envarMixin
|
||||
name string
|
||||
shorthand byte
|
||||
shorthand rune
|
||||
help string
|
||||
defaultValues []string
|
||||
placeholder string
|
||||
@@ -295,7 +295,7 @@ func (f *FlagClause) Required() *FlagClause {
|
||||
}
|
||||
|
||||
// Short sets the short flag name.
|
||||
func (f *FlagClause) Short(name byte) *FlagClause {
|
||||
func (f *FlagClause) Short(name rune) *FlagClause {
|
||||
f.shorthand = name
|
||||
return f
|
||||
}
|
||||
|
||||
18
vendor/gopkg.in/alecthomas/kingpin.v2/parser.go
generated
vendored
18
vendor/gopkg.in/alecthomas/kingpin.v2/parser.go
generated
vendored
@@ -5,6 +5,7 @@ import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
type TokenType int
|
||||
@@ -189,7 +190,8 @@ func (p *ParseContext) Next() *Token {
|
||||
if len(arg) == 1 {
|
||||
return &Token{Index: p.argi, Type: TokenShort}
|
||||
}
|
||||
short := arg[1:2]
|
||||
shortRune, size := utf8.DecodeRuneInString(arg[1:])
|
||||
short := string(shortRune)
|
||||
flag, ok := p.flags.short[short]
|
||||
// Not a known short flag, we'll just return it anyway.
|
||||
if !ok {
|
||||
@@ -198,14 +200,14 @@ func (p *ParseContext) Next() *Token {
|
||||
} else {
|
||||
// Short flag with combined argument: -fARG
|
||||
token := &Token{p.argi, TokenShort, short}
|
||||
if len(arg) > 2 {
|
||||
p.Push(&Token{p.argi, TokenArg, arg[2:]})
|
||||
if len(arg) > size+1 {
|
||||
p.Push(&Token{p.argi, TokenArg, arg[size+1:]})
|
||||
}
|
||||
return token
|
||||
}
|
||||
|
||||
if len(arg) > 2 {
|
||||
p.args = append([]string{"-" + arg[2:]}, p.args...)
|
||||
if len(arg) > size+1 {
|
||||
p.args = append([]string{"-" + arg[size+1:]}, p.args...)
|
||||
}
|
||||
return &Token{p.argi, TokenShort, short}
|
||||
} else if strings.HasPrefix(arg, "@") {
|
||||
@@ -213,10 +215,10 @@ func (p *ParseContext) Next() *Token {
|
||||
if err != nil {
|
||||
return &Token{p.argi, TokenError, err.Error()}
|
||||
}
|
||||
if p.argi >= len(p.args) {
|
||||
p.args = append(p.args[:p.argi-1], expanded...)
|
||||
if len(p.args) == 0 {
|
||||
p.args = expanded
|
||||
} else {
|
||||
p.args = append(p.args[:p.argi-1], append(expanded, p.args[p.argi+1:]...)...)
|
||||
p.args = append(expanded, p.args...)
|
||||
}
|
||||
return p.Next()
|
||||
}
|
||||
|
||||
4
vendor/gopkg.in/alecthomas/kingpin.v2/values.go
generated
vendored
4
vendor/gopkg.in/alecthomas/kingpin.v2/values.go
generated
vendored
@@ -332,6 +332,10 @@ func (u *urlListValue) String() string {
|
||||
return strings.Join(out, ",")
|
||||
}
|
||||
|
||||
func (u *urlListValue) IsCumulative() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// A flag whose value must be in a set of options.
|
||||
type enumValue struct {
|
||||
value *string
|
||||
|
||||
32
vendor/gopkg.in/alecthomas/kingpin.v2/values_generated.go
generated
vendored
32
vendor/gopkg.in/alecthomas/kingpin.v2/values_generated.go
generated
vendored
@@ -28,7 +28,7 @@ func (f *boolValue) Set(s string) error {
|
||||
|
||||
func (f *boolValue) Get() interface{} { return (bool)(*f.v) }
|
||||
|
||||
func (f *boolValue) String() string { return fmt.Sprintf("%v", *f) }
|
||||
func (f *boolValue) String() string { return fmt.Sprintf("%v", *f.v) }
|
||||
|
||||
// Bool parses the next command-line value as bool.
|
||||
func (p *parserMixin) Bool() (target *bool) {
|
||||
@@ -114,7 +114,7 @@ func (f *uintValue) Set(s string) error {
|
||||
|
||||
func (f *uintValue) Get() interface{} { return (uint)(*f.v) }
|
||||
|
||||
func (f *uintValue) String() string { return fmt.Sprintf("%v", *f) }
|
||||
func (f *uintValue) String() string { return fmt.Sprintf("%v", *f.v) }
|
||||
|
||||
// Uint parses the next command-line value as uint.
|
||||
func (p *parserMixin) Uint() (target *uint) {
|
||||
@@ -157,7 +157,7 @@ func (f *uint8Value) Set(s string) error {
|
||||
|
||||
func (f *uint8Value) Get() interface{} { return (uint8)(*f.v) }
|
||||
|
||||
func (f *uint8Value) String() string { return fmt.Sprintf("%v", *f) }
|
||||
func (f *uint8Value) String() string { return fmt.Sprintf("%v", *f.v) }
|
||||
|
||||
// Uint8 parses the next command-line value as uint8.
|
||||
func (p *parserMixin) Uint8() (target *uint8) {
|
||||
@@ -200,7 +200,7 @@ func (f *uint16Value) Set(s string) error {
|
||||
|
||||
func (f *uint16Value) Get() interface{} { return (uint16)(*f.v) }
|
||||
|
||||
func (f *uint16Value) String() string { return fmt.Sprintf("%v", *f) }
|
||||
func (f *uint16Value) String() string { return fmt.Sprintf("%v", *f.v) }
|
||||
|
||||
// Uint16 parses the next command-line value as uint16.
|
||||
func (p *parserMixin) Uint16() (target *uint16) {
|
||||
@@ -243,7 +243,7 @@ func (f *uint32Value) Set(s string) error {
|
||||
|
||||
func (f *uint32Value) Get() interface{} { return (uint32)(*f.v) }
|
||||
|
||||
func (f *uint32Value) String() string { return fmt.Sprintf("%v", *f) }
|
||||
func (f *uint32Value) String() string { return fmt.Sprintf("%v", *f.v) }
|
||||
|
||||
// Uint32 parses the next command-line value as uint32.
|
||||
func (p *parserMixin) Uint32() (target *uint32) {
|
||||
@@ -286,7 +286,7 @@ func (f *uint64Value) Set(s string) error {
|
||||
|
||||
func (f *uint64Value) Get() interface{} { return (uint64)(*f.v) }
|
||||
|
||||
func (f *uint64Value) String() string { return fmt.Sprintf("%v", *f) }
|
||||
func (f *uint64Value) String() string { return fmt.Sprintf("%v", *f.v) }
|
||||
|
||||
// Uint64 parses the next command-line value as uint64.
|
||||
func (p *parserMixin) Uint64() (target *uint64) {
|
||||
@@ -329,7 +329,7 @@ func (f *intValue) Set(s string) error {
|
||||
|
||||
func (f *intValue) Get() interface{} { return (int)(*f.v) }
|
||||
|
||||
func (f *intValue) String() string { return fmt.Sprintf("%v", *f) }
|
||||
func (f *intValue) String() string { return fmt.Sprintf("%v", *f.v) }
|
||||
|
||||
// Int parses the next command-line value as int.
|
||||
func (p *parserMixin) Int() (target *int) {
|
||||
@@ -372,7 +372,7 @@ func (f *int8Value) Set(s string) error {
|
||||
|
||||
func (f *int8Value) Get() interface{} { return (int8)(*f.v) }
|
||||
|
||||
func (f *int8Value) String() string { return fmt.Sprintf("%v", *f) }
|
||||
func (f *int8Value) String() string { return fmt.Sprintf("%v", *f.v) }
|
||||
|
||||
// Int8 parses the next command-line value as int8.
|
||||
func (p *parserMixin) Int8() (target *int8) {
|
||||
@@ -415,7 +415,7 @@ func (f *int16Value) Set(s string) error {
|
||||
|
||||
func (f *int16Value) Get() interface{} { return (int16)(*f.v) }
|
||||
|
||||
func (f *int16Value) String() string { return fmt.Sprintf("%v", *f) }
|
||||
func (f *int16Value) String() string { return fmt.Sprintf("%v", *f.v) }
|
||||
|
||||
// Int16 parses the next command-line value as int16.
|
||||
func (p *parserMixin) Int16() (target *int16) {
|
||||
@@ -458,7 +458,7 @@ func (f *int32Value) Set(s string) error {
|
||||
|
||||
func (f *int32Value) Get() interface{} { return (int32)(*f.v) }
|
||||
|
||||
func (f *int32Value) String() string { return fmt.Sprintf("%v", *f) }
|
||||
func (f *int32Value) String() string { return fmt.Sprintf("%v", *f.v) }
|
||||
|
||||
// Int32 parses the next command-line value as int32.
|
||||
func (p *parserMixin) Int32() (target *int32) {
|
||||
@@ -501,7 +501,7 @@ func (f *int64Value) Set(s string) error {
|
||||
|
||||
func (f *int64Value) Get() interface{} { return (int64)(*f.v) }
|
||||
|
||||
func (f *int64Value) String() string { return fmt.Sprintf("%v", *f) }
|
||||
func (f *int64Value) String() string { return fmt.Sprintf("%v", *f.v) }
|
||||
|
||||
// Int64 parses the next command-line value as int64.
|
||||
func (p *parserMixin) Int64() (target *int64) {
|
||||
@@ -544,7 +544,7 @@ func (f *float64Value) Set(s string) error {
|
||||
|
||||
func (f *float64Value) Get() interface{} { return (float64)(*f.v) }
|
||||
|
||||
func (f *float64Value) String() string { return fmt.Sprintf("%v", *f) }
|
||||
func (f *float64Value) String() string { return fmt.Sprintf("%v", *f.v) }
|
||||
|
||||
// Float64 parses the next command-line value as float64.
|
||||
func (p *parserMixin) Float64() (target *float64) {
|
||||
@@ -587,7 +587,7 @@ func (f *float32Value) Set(s string) error {
|
||||
|
||||
func (f *float32Value) Get() interface{} { return (float32)(*f.v) }
|
||||
|
||||
func (f *float32Value) String() string { return fmt.Sprintf("%v", *f) }
|
||||
func (f *float32Value) String() string { return fmt.Sprintf("%v", *f.v) }
|
||||
|
||||
// Float32 parses the next command-line value as float32.
|
||||
func (p *parserMixin) Float32() (target *float32) {
|
||||
@@ -708,7 +708,7 @@ func (f *regexpValue) Set(s string) error {
|
||||
|
||||
func (f *regexpValue) Get() interface{} { return (*regexp.Regexp)(*f.v) }
|
||||
|
||||
func (f *regexpValue) String() string { return fmt.Sprintf("%v", *f) }
|
||||
func (f *regexpValue) String() string { return fmt.Sprintf("%v", *f.v) }
|
||||
|
||||
// Regexp parses the next command-line value as *regexp.Regexp.
|
||||
func (p *parserMixin) Regexp() (target **regexp.Regexp) {
|
||||
@@ -751,7 +751,7 @@ func (f *resolvedIPValue) Set(s string) error {
|
||||
|
||||
func (f *resolvedIPValue) Get() interface{} { return (net.IP)(*f.v) }
|
||||
|
||||
func (f *resolvedIPValue) String() string { return fmt.Sprintf("%v", *f) }
|
||||
func (f *resolvedIPValue) String() string { return fmt.Sprintf("%v", *f.v) }
|
||||
|
||||
// Resolve a hostname or IP to an IP.
|
||||
func (p *parserMixin) ResolvedIP() (target *net.IP) {
|
||||
@@ -794,7 +794,7 @@ func (f *hexBytesValue) Set(s string) error {
|
||||
|
||||
func (f *hexBytesValue) Get() interface{} { return ([]byte)(*f.v) }
|
||||
|
||||
func (f *hexBytesValue) String() string { return fmt.Sprintf("%v", *f) }
|
||||
func (f *hexBytesValue) String() string { return fmt.Sprintf("%v", *f.v) }
|
||||
|
||||
// Bytes as a hex string.
|
||||
func (p *parserMixin) HexBytes() (target *[]byte) {
|
||||
|
||||
50
vendor/vendor.json
vendored
50
vendor/vendor.json
vendored
@@ -21,34 +21,34 @@
|
||||
"revisionTime": "2015-10-22T06:55:26Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "+CqJGh7NIDMnHgScq9sl9tPrnVM=",
|
||||
"checksumSHA1": "+IQN6csaE5uxhZJnulb6uwhVCr4=",
|
||||
"path": "github.com/klauspost/compress/flate",
|
||||
"revision": "14c9a76e3c95e47f8ccce949bba2c1101a8b85e6",
|
||||
"revisionTime": "2017-02-18T08:16:04Z"
|
||||
"revision": "f3dce52e0576655d55fd69e74b63da96ad1108f3",
|
||||
"revisionTime": "2017-05-28T13:23:59Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "V1lQwkoDR1fPmZBSgkmZjgZofeU=",
|
||||
"checksumSHA1": "kWBC7CTgppTdJFXizt4XkURbyCE=",
|
||||
"path": "github.com/klauspost/compress/gzip",
|
||||
"revision": "14c9a76e3c95e47f8ccce949bba2c1101a8b85e6",
|
||||
"revisionTime": "2017-02-18T08:16:04Z"
|
||||
"revision": "f3dce52e0576655d55fd69e74b63da96ad1108f3",
|
||||
"revisionTime": "2017-05-28T13:23:59Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "+azPXaZpPF14YHRghNAer13ThQU=",
|
||||
"path": "github.com/klauspost/compress/zlib",
|
||||
"revision": "14c9a76e3c95e47f8ccce949bba2c1101a8b85e6",
|
||||
"revisionTime": "2017-02-18T08:16:04Z"
|
||||
"revision": "f3dce52e0576655d55fd69e74b63da96ad1108f3",
|
||||
"revisionTime": "2017-05-28T13:23:59Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "iKPMvbAueGfdyHcWCgzwKzm8WVo=",
|
||||
"checksumSHA1": "oZnJ7hI35QaJqMyzayLPq1w0dcU=",
|
||||
"path": "github.com/klauspost/cpuid",
|
||||
"revision": "09cded8978dc9e80714c4d85b0322337b0a1e5e0",
|
||||
"revisionTime": "2016-03-02T07:53:16Z"
|
||||
"revision": "ae7887de9fa5d2db4eaa8174a7eff2c1ac00f2da",
|
||||
"revisionTime": "2017-07-28T05:55:34Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "BM6ZlNJmtKy3GBoWwg2X55gnZ4A=",
|
||||
"checksumSHA1": "6/zXof97s7P9tlNp3mUioXgeEVI=",
|
||||
"path": "github.com/klauspost/crc32",
|
||||
"revision": "1bab8b35b6bb565f92cbc97939610af9369f942a",
|
||||
"revisionTime": "2017-02-10T14:05:23Z"
|
||||
"revision": "bab58d77464aa9cf4e84200c3276da0831fe0c03",
|
||||
"revisionTime": "2017-06-28T07:24:49Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "+mB8aEvEg2wl3PoWZjAVfhGxtJA=",
|
||||
@@ -63,28 +63,28 @@
|
||||
"revisionTime": "2016-08-17T18:16:52Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "bLa60Y/jg1DAFoiUWxfJJ4fh4F4=",
|
||||
"checksumSHA1": "WbIdBYaWTfPb73xgCOoW/aeoSFU=",
|
||||
"path": "github.com/valyala/fasthttp",
|
||||
"revision": "fc109d6887b5edb43510d924d14d735f3975fb51",
|
||||
"revisionTime": "2017-02-22T16:45:09Z"
|
||||
"revision": "ae643c872d2c060154a4fb2162dc1c0ab1693ccd",
|
||||
"revisionTime": "2017-07-21T13:45:47Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "nMWLZCTKLciURGG8o/KeEPUExkY=",
|
||||
"checksumSHA1": "nFdyJk6jdHzVNgEMdjDuWMk4z5o=",
|
||||
"path": "github.com/valyala/fasthttp/fasthttputil",
|
||||
"revision": "fc109d6887b5edb43510d924d14d735f3975fb51",
|
||||
"revisionTime": "2017-02-22T16:45:09Z"
|
||||
"revision": "ae643c872d2c060154a4fb2162dc1c0ab1693ccd",
|
||||
"revisionTime": "2017-07-21T13:45:47Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "YXXy4b1yOQx/iL3Icv6svTmcGss=",
|
||||
"path": "github.com/valyala/fasthttp/stackless",
|
||||
"revision": "fc109d6887b5edb43510d924d14d735f3975fb51",
|
||||
"revisionTime": "2017-02-22T16:45:09Z"
|
||||
"revision": "ae643c872d2c060154a4fb2162dc1c0ab1693ccd",
|
||||
"revisionTime": "2017-07-21T13:45:47Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "SeYI7DRWrd0Ku+CLavuwIz3EEmQ=",
|
||||
"checksumSHA1": "3SZTatHIy9OTKc95YlVfXKnoySg=",
|
||||
"path": "gopkg.in/alecthomas/kingpin.v2",
|
||||
"revision": "e9044be3ab2a8e11d4e1f418d12f0790d57e8d70",
|
||||
"revisionTime": "2016-08-29T10:30:05Z"
|
||||
"revision": "1087e65c9441605df944fb12c33f0fe7072d18ca",
|
||||
"revisionTime": "2017-07-27T04:22:29Z"
|
||||
}
|
||||
],
|
||||
"rootPath": "github.com/jimeh/casecmp"
|
||||
|
||||
Reference in New Issue
Block a user