commit ce38f0ec969951979106f4efcd7b7b1fda32836d Author: Jim Myhrberg Date: Thu Mar 30 01:09:55 2017 +0100 initial commit diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..36f971e --- /dev/null +++ b/.gitignore @@ -0,0 +1 @@ +bin/* diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000..7aa5e14 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,17 @@ +FROM golang:alpine + +ADD . /go/src/github.com/jimeh/casecmp + +RUN go install github.com/jimeh/casecmp + +EXPOSE 8080 +CMD ["/go/bin/casecmp", "--port", "8080"] + + + +# FROM scratch +# ADD bin/casecmp_linux_amd64 /casecmp +# EXPOSE 8080 +# VOLUME /data +# WORKDIR / +# CMD ["/casecmp", "--port", "8080"] diff --git a/Makefile b/Makefile new file mode 100644 index 0000000..e3a4b21 --- /dev/null +++ b/Makefile @@ -0,0 +1,70 @@ +DEV_DEPS = github.com/kardianos/govendor \ +github.com/mitchellh/gox + +BINNAME = casecmp +BINARY = bin/${BINNAME} +DOCKERREPO = jimeh/casecmp +BINDIR = $(shell dirname ${BINARY}) +SOURCES = $(shell find . -name '*.go' -o -name 'VERSION') +VERSION = $(shell cat VERSION) +OSARCH = "darwin/386 darwin/amd64 linux/386 linux/amd64 linux/arm" +RELEASEDIR = releases + +$(BINARY): $(SOURCES) + go build -o ${BINARY} -ldflags "-X main.Version=${VERSION}" + +.PHONY: build +build: $(BINARY) + +.PHONY: clean +clean: + if [ -f ${BINARY} ]; then rm ${BINARY}; fi; \ + if [ -d ${BINDIR} ]; then rmdir ${BINDIR}; fi + +.PHONY: run +run: $(BINARY) + $(BINARY) + +.PHONY: install +install: dev-deps + @govendor install +local +program + +.PHONY: vendor-sync +vendor-sync: dev-deps + @govendor sync + +.PHONY: vendor-fetch +vendor-fetch: dev-deps + @govendor fetch +external +missing + +.PHONY: vendor-install +vendor-install: dev-deps + @govendor install +vendor + +.PHONY: dev-deps +dev-deps: + @$(foreach DEP,$(DEV_DEPS),go get $(DEP);) + +.PHONY: update-dev-deps +update-dev-deps: + @$(foreach DEP,$(DEV_DEPS),go get -u $(DEP);) + +.PHONY: release-build +release-build: + gox -output "${RELEASEDIR}/${BINNAME}_${VERSION}_{{.OS}}_{{.Arch}}" \ + -osarch=${OSARCH} \ + -ldflags "-X main.Version=${VERSION}" + +.SILENT: release +.PHONY: release +release: release-build + $(eval BINS := $(shell cd ${RELEASEDIR} && find . \ + -name "${BINNAME}_${VERSION}_*" -not -name "*.tar.gz")) + cd $(RELEASEDIR); \ + $(foreach BIN,$(BINS),tar -cvzf $(BIN).tar.gz $(BIN) && rm $(BIN);) + +.PHONY: build-docker +build-docker: clean + govendor sync \ + && docker build -t "${DOCKERREPO}:latest" . \ + && docker tag "${DOCKERREPO}:latest" "${DOCKERREPO}:${VERSION}" diff --git a/VERSION b/VERSION new file mode 100644 index 0000000..6e8bf73 --- /dev/null +++ b/VERSION @@ -0,0 +1 @@ +0.1.0 diff --git a/main.go b/main.go new file mode 100644 index 0000000..dd53394 --- /dev/null +++ b/main.go @@ -0,0 +1,82 @@ +package main + +import ( + "fmt" + "log" + "os" + "strings" + + "github.com/qiangxue/fasthttp-routing" + "github.com/valyala/fasthttp" + "gopkg.in/alecthomas/kingpin.v2" +) + +// Version gets populated with version at build-time. +var Version string +var defaultPort = "8080" + +var ( + port = kingpin.Flag("port", "Port to listen to.").Short('p'). + Default(defaultPort).String() + bind = kingpin.Flag("bind", "Bind address.").Short('b'). + Default("0.0.0.0").String() + version = kingpin.Flag("version", "Print version info."). + Short('v').Bool() +) + +func indexHandler(c *routing.Context) error { + c.Write([]byte( + "Case-insensitive string comparison, as an API. Because ¯\\_(ツ)_/¯\n" + + "\n" + + "Example:\n" + + "curl -X POST -F \"a=Foo Bar\" -F \"b=FOO BAR\" " + + "http://casecmp.bah.io/", + )) + return nil +} + +func casecmpHandler(c *routing.Context) error { + a := c.FormValue("a") + b := c.FormValue("b") + + resp := "0" + if strings.EqualFold(string(a), string(b)) { + resp = "1" + } + + c.Write([]byte(resp)) + return nil +} + +func printVersion() { + fmt.Println("casecmp " + Version) +} + +func startServer() { + r := routing.New() + r.Get("/", indexHandler) + r.Post("/", casecmpHandler) + + server := fasthttp.Server{Handler: r.HandleRequest} + + if *port == defaultPort { + envPort := os.Getenv("PORT") + if envPort != "" { + *port = envPort + } + } + + address := *bind + ":" + *port + fmt.Println("Listening on " + address) + log.Fatal(server.ListenAndServe(address)) +} + +func main() { + kingpin.Parse() + + if *version { + printVersion() + } else { + startServer() + } +} diff --git a/vendor/github.com/alecthomas/template/LICENSE b/vendor/github.com/alecthomas/template/LICENSE new file mode 100644 index 0000000..7448756 --- /dev/null +++ b/vendor/github.com/alecthomas/template/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2012 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/alecthomas/template/README.md b/vendor/github.com/alecthomas/template/README.md new file mode 100644 index 0000000..ef6a8ee --- /dev/null +++ b/vendor/github.com/alecthomas/template/README.md @@ -0,0 +1,25 @@ +# Go's `text/template` package with newline elision + +This is a fork of Go 1.4's [text/template](http://golang.org/pkg/text/template/) package with one addition: a backslash immediately after a closing delimiter will delete all subsequent newlines until a non-newline. + +eg. + +``` +{{if true}}\ +hello +{{end}}\ +``` + +Will result in: + +``` +hello\n +``` + +Rather than: + +``` +\n +hello\n +\n +``` diff --git a/vendor/github.com/alecthomas/template/doc.go b/vendor/github.com/alecthomas/template/doc.go new file mode 100644 index 0000000..223c595 --- /dev/null +++ b/vendor/github.com/alecthomas/template/doc.go @@ -0,0 +1,406 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package template implements data-driven templates for generating textual output. + +To generate HTML output, see package html/template, which has the same interface +as this package but automatically secures HTML output against certain attacks. + +Templates are executed by applying them to a data structure. Annotations in the +template refer to elements of the data structure (typically a field of a struct +or a key in a map) to control execution and derive values to be displayed. +Execution of the template walks the structure and sets the cursor, represented +by a period '.' and called "dot", to the value at the current location in the +structure as execution proceeds. + +The input text for a template is UTF-8-encoded text in any format. +"Actions"--data evaluations or control structures--are delimited by +"{{" and "}}"; all text outside actions is copied to the output unchanged. +Actions may not span newlines, although comments can. + +Once parsed, a template may be executed safely in parallel. + +Here is a trivial example that prints "17 items are made of wool". + + type Inventory struct { + Material string + Count uint + } + sweaters := Inventory{"wool", 17} + tmpl, err := template.New("test").Parse("{{.Count}} items are made of {{.Material}}") + if err != nil { panic(err) } + err = tmpl.Execute(os.Stdout, sweaters) + if err != nil { panic(err) } + +More intricate examples appear below. + +Actions + +Here is the list of actions. "Arguments" and "pipelines" are evaluations of +data, defined in detail below. + +*/ +// {{/* a comment */}} +// A comment; discarded. May contain newlines. +// Comments do not nest and must start and end at the +// delimiters, as shown here. +/* + + {{pipeline}} + The default textual representation of the value of the pipeline + is copied to the output. + + {{if pipeline}} T1 {{end}} + If the value of the pipeline is empty, no output is generated; + otherwise, T1 is executed. The empty values are false, 0, any + nil pointer or interface value, and any array, slice, map, or + string of length zero. + Dot is unaffected. + + {{if pipeline}} T1 {{else}} T0 {{end}} + If the value of the pipeline is empty, T0 is executed; + otherwise, T1 is executed. Dot is unaffected. + + {{if pipeline}} T1 {{else if pipeline}} T0 {{end}} + To simplify the appearance of if-else chains, the else action + of an if may include another if directly; the effect is exactly + the same as writing + {{if pipeline}} T1 {{else}}{{if pipeline}} T0 {{end}}{{end}} + + {{range pipeline}} T1 {{end}} + The value of the pipeline must be an array, slice, map, or channel. + If the value of the pipeline has length zero, nothing is output; + otherwise, dot is set to the successive elements of the array, + slice, or map and T1 is executed. If the value is a map and the + keys are of basic type with a defined order ("comparable"), the + elements will be visited in sorted key order. + + {{range pipeline}} T1 {{else}} T0 {{end}} + The value of the pipeline must be an array, slice, map, or channel. + If the value of the pipeline has length zero, dot is unaffected and + T0 is executed; otherwise, dot is set to the successive elements + of the array, slice, or map and T1 is executed. + + {{template "name"}} + The template with the specified name is executed with nil data. + + {{template "name" pipeline}} + The template with the specified name is executed with dot set + to the value of the pipeline. + + {{with pipeline}} T1 {{end}} + If the value of the pipeline is empty, no output is generated; + otherwise, dot is set to the value of the pipeline and T1 is + executed. + + {{with pipeline}} T1 {{else}} T0 {{end}} + If the value of the pipeline is empty, dot is unaffected and T0 + is executed; otherwise, dot is set to the value of the pipeline + and T1 is executed. + +Arguments + +An argument is a simple value, denoted by one of the following. + + - A boolean, string, character, integer, floating-point, imaginary + or complex constant in Go syntax. These behave like Go's untyped + constants, although raw strings may not span newlines. + - The keyword nil, representing an untyped Go nil. + - The character '.' (period): + . + The result is the value of dot. + - A variable name, which is a (possibly empty) alphanumeric string + preceded by a dollar sign, such as + $piOver2 + or + $ + The result is the value of the variable. + Variables are described below. + - The name of a field of the data, which must be a struct, preceded + by a period, such as + .Field + The result is the value of the field. Field invocations may be + chained: + .Field1.Field2 + Fields can also be evaluated on variables, including chaining: + $x.Field1.Field2 + - The name of a key of the data, which must be a map, preceded + by a period, such as + .Key + The result is the map element value indexed by the key. + Key invocations may be chained and combined with fields to any + depth: + .Field1.Key1.Field2.Key2 + Although the key must be an alphanumeric identifier, unlike with + field names they do not need to start with an upper case letter. + Keys can also be evaluated on variables, including chaining: + $x.key1.key2 + - The name of a niladic method of the data, preceded by a period, + such as + .Method + The result is the value of invoking the method with dot as the + receiver, dot.Method(). Such a method must have one return value (of + any type) or two return values, the second of which is an error. + If it has two and the returned error is non-nil, execution terminates + and an error is returned to the caller as the value of Execute. + Method invocations may be chained and combined with fields and keys + to any depth: + .Field1.Key1.Method1.Field2.Key2.Method2 + Methods can also be evaluated on variables, including chaining: + $x.Method1.Field + - The name of a niladic function, such as + fun + The result is the value of invoking the function, fun(). The return + types and values behave as in methods. Functions and function + names are described below. + - A parenthesized instance of one the above, for grouping. The result + may be accessed by a field or map key invocation. + print (.F1 arg1) (.F2 arg2) + (.StructValuedMethod "arg").Field + +Arguments may evaluate to any type; if they are pointers the implementation +automatically indirects to the base type when required. +If an evaluation yields a function value, such as a function-valued +field of a struct, the function is not invoked automatically, but it +can be used as a truth value for an if action and the like. To invoke +it, use the call function, defined below. + +A pipeline is a possibly chained sequence of "commands". A command is a simple +value (argument) or a function or method call, possibly with multiple arguments: + + Argument + The result is the value of evaluating the argument. + .Method [Argument...] + The method can be alone or the last element of a chain but, + unlike methods in the middle of a chain, it can take arguments. + The result is the value of calling the method with the + arguments: + dot.Method(Argument1, etc.) + functionName [Argument...] + The result is the value of calling the function associated + with the name: + function(Argument1, etc.) + Functions and function names are described below. + +Pipelines + +A pipeline may be "chained" by separating a sequence of commands with pipeline +characters '|'. In a chained pipeline, the result of the each command is +passed as the last argument of the following command. The output of the final +command in the pipeline is the value of the pipeline. + +The output of a command will be either one value or two values, the second of +which has type error. If that second value is present and evaluates to +non-nil, execution terminates and the error is returned to the caller of +Execute. + +Variables + +A pipeline inside an action may initialize a variable to capture the result. +The initialization has syntax + + $variable := pipeline + +where $variable is the name of the variable. An action that declares a +variable produces no output. + +If a "range" action initializes a variable, the variable is set to the +successive elements of the iteration. Also, a "range" may declare two +variables, separated by a comma: + + range $index, $element := pipeline + +in which case $index and $element are set to the successive values of the +array/slice index or map key and element, respectively. Note that if there is +only one variable, it is assigned the element; this is opposite to the +convention in Go range clauses. + +A variable's scope extends to the "end" action of the control structure ("if", +"with", or "range") in which it is declared, or to the end of the template if +there is no such control structure. A template invocation does not inherit +variables from the point of its invocation. + +When execution begins, $ is set to the data argument passed to Execute, that is, +to the starting value of dot. + +Examples + +Here are some example one-line templates demonstrating pipelines and variables. +All produce the quoted word "output": + + {{"\"output\""}} + A string constant. + {{`"output"`}} + A raw string constant. + {{printf "%q" "output"}} + A function call. + {{"output" | printf "%q"}} + A function call whose final argument comes from the previous + command. + {{printf "%q" (print "out" "put")}} + A parenthesized argument. + {{"put" | printf "%s%s" "out" | printf "%q"}} + A more elaborate call. + {{"output" | printf "%s" | printf "%q"}} + A longer chain. + {{with "output"}}{{printf "%q" .}}{{end}} + A with action using dot. + {{with $x := "output" | printf "%q"}}{{$x}}{{end}} + A with action that creates and uses a variable. + {{with $x := "output"}}{{printf "%q" $x}}{{end}} + A with action that uses the variable in another action. + {{with $x := "output"}}{{$x | printf "%q"}}{{end}} + The same, but pipelined. + +Functions + +During execution functions are found in two function maps: first in the +template, then in the global function map. By default, no functions are defined +in the template but the Funcs method can be used to add them. + +Predefined global functions are named as follows. + + and + Returns the boolean AND of its arguments by returning the + first empty argument or the last argument, that is, + "and x y" behaves as "if x then y else x". All the + arguments are evaluated. + call + Returns the result of calling the first argument, which + must be a function, with the remaining arguments as parameters. + Thus "call .X.Y 1 2" is, in Go notation, dot.X.Y(1, 2) where + Y is a func-valued field, map entry, or the like. + The first argument must be the result of an evaluation + that yields a value of function type (as distinct from + a predefined function such as print). The function must + return either one or two result values, the second of which + is of type error. If the arguments don't match the function + or the returned error value is non-nil, execution stops. + html + Returns the escaped HTML equivalent of the textual + representation of its arguments. + index + Returns the result of indexing its first argument by the + following arguments. Thus "index x 1 2 3" is, in Go syntax, + x[1][2][3]. Each indexed item must be a map, slice, or array. + js + Returns the escaped JavaScript equivalent of the textual + representation of its arguments. + len + Returns the integer length of its argument. + not + Returns the boolean negation of its single argument. + or + Returns the boolean OR of its arguments by returning the + first non-empty argument or the last argument, that is, + "or x y" behaves as "if x then x else y". All the + arguments are evaluated. + print + An alias for fmt.Sprint + printf + An alias for fmt.Sprintf + println + An alias for fmt.Sprintln + urlquery + Returns the escaped value of the textual representation of + its arguments in a form suitable for embedding in a URL query. + +The boolean functions take any zero value to be false and a non-zero +value to be true. + +There is also a set of binary comparison operators defined as +functions: + + eq + Returns the boolean truth of arg1 == arg2 + ne + Returns the boolean truth of arg1 != arg2 + lt + Returns the boolean truth of arg1 < arg2 + le + Returns the boolean truth of arg1 <= arg2 + gt + Returns the boolean truth of arg1 > arg2 + ge + Returns the boolean truth of arg1 >= arg2 + +For simpler multi-way equality tests, eq (only) accepts two or more +arguments and compares the second and subsequent to the first, +returning in effect + + arg1==arg2 || arg1==arg3 || arg1==arg4 ... + +(Unlike with || in Go, however, eq is a function call and all the +arguments will be evaluated.) + +The comparison functions work on basic types only (or named basic +types, such as "type Celsius float32"). They implement the Go rules +for comparison of values, except that size and exact type are +ignored, so any integer value, signed or unsigned, may be compared +with any other integer value. (The arithmetic value is compared, +not the bit pattern, so all negative integers are less than all +unsigned integers.) However, as usual, one may not compare an int +with a float32 and so on. + +Associated templates + +Each template is named by a string specified when it is created. Also, each +template is associated with zero or more other templates that it may invoke by +name; such associations are transitive and form a name space of templates. + +A template may use a template invocation to instantiate another associated +template; see the explanation of the "template" action above. The name must be +that of a template associated with the template that contains the invocation. + +Nested template definitions + +When parsing a template, another template may be defined and associated with the +template being parsed. Template definitions must appear at the top level of the +template, much like global variables in a Go program. + +The syntax of such definitions is to surround each template declaration with a +"define" and "end" action. + +The define action names the template being created by providing a string +constant. Here is a simple example: + + `{{define "T1"}}ONE{{end}} + {{define "T2"}}TWO{{end}} + {{define "T3"}}{{template "T1"}} {{template "T2"}}{{end}} + {{template "T3"}}` + +This defines two templates, T1 and T2, and a third T3 that invokes the other two +when it is executed. Finally it invokes T3. If executed this template will +produce the text + + ONE TWO + +By construction, a template may reside in only one association. If it's +necessary to have a template addressable from multiple associations, the +template definition must be parsed multiple times to create distinct *Template +values, or must be copied with the Clone or AddParseTree method. + +Parse may be called multiple times to assemble the various associated templates; +see the ParseFiles and ParseGlob functions and methods for simple ways to parse +related templates stored in files. + +A template may be executed directly or through ExecuteTemplate, which executes +an associated template identified by name. To invoke our example above, we +might write, + + err := tmpl.Execute(os.Stdout, "no data needed") + if err != nil { + log.Fatalf("execution failed: %s", err) + } + +or to invoke a particular template explicitly by name, + + err := tmpl.ExecuteTemplate(os.Stdout, "T2", "no data needed") + if err != nil { + log.Fatalf("execution failed: %s", err) + } + +*/ +package template diff --git a/vendor/github.com/alecthomas/template/exec.go b/vendor/github.com/alecthomas/template/exec.go new file mode 100644 index 0000000..c3078e5 --- /dev/null +++ b/vendor/github.com/alecthomas/template/exec.go @@ -0,0 +1,845 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package template + +import ( + "bytes" + "fmt" + "io" + "reflect" + "runtime" + "sort" + "strings" + + "github.com/alecthomas/template/parse" +) + +// state represents the state of an execution. It's not part of the +// template so that multiple executions of the same template +// can execute in parallel. +type state struct { + tmpl *Template + wr io.Writer + node parse.Node // current node, for errors + vars []variable // push-down stack of variable values. +} + +// variable holds the dynamic value of a variable such as $, $x etc. +type variable struct { + name string + value reflect.Value +} + +// push pushes a new variable on the stack. +func (s *state) push(name string, value reflect.Value) { + s.vars = append(s.vars, variable{name, value}) +} + +// mark returns the length of the variable stack. +func (s *state) mark() int { + return len(s.vars) +} + +// pop pops the variable stack up to the mark. +func (s *state) pop(mark int) { + s.vars = s.vars[0:mark] +} + +// setVar overwrites the top-nth variable on the stack. Used by range iterations. +func (s *state) setVar(n int, value reflect.Value) { + s.vars[len(s.vars)-n].value = value +} + +// varValue returns the value of the named variable. +func (s *state) varValue(name string) reflect.Value { + for i := s.mark() - 1; i >= 0; i-- { + if s.vars[i].name == name { + return s.vars[i].value + } + } + s.errorf("undefined variable: %s", name) + return zero +} + +var zero reflect.Value + +// at marks the state to be on node n, for error reporting. +func (s *state) at(node parse.Node) { + s.node = node +} + +// doublePercent returns the string with %'s replaced by %%, if necessary, +// so it can be used safely inside a Printf format string. +func doublePercent(str string) string { + if strings.Contains(str, "%") { + str = strings.Replace(str, "%", "%%", -1) + } + return str +} + +// errorf formats the error and terminates processing. +func (s *state) errorf(format string, args ...interface{}) { + name := doublePercent(s.tmpl.Name()) + if s.node == nil { + format = fmt.Sprintf("template: %s: %s", name, format) + } else { + location, context := s.tmpl.ErrorContext(s.node) + format = fmt.Sprintf("template: %s: executing %q at <%s>: %s", location, name, doublePercent(context), format) + } + panic(fmt.Errorf(format, args...)) +} + +// errRecover is the handler that turns panics into returns from the top +// level of Parse. +func errRecover(errp *error) { + e := recover() + if e != nil { + switch err := e.(type) { + case runtime.Error: + panic(e) + case error: + *errp = err + default: + panic(e) + } + } +} + +// ExecuteTemplate applies the template associated with t that has the given name +// to the specified data object and writes the output to wr. +// If an error occurs executing the template or writing its output, +// execution stops, but partial results may already have been written to +// the output writer. +// A template may be executed safely in parallel. +func (t *Template) ExecuteTemplate(wr io.Writer, name string, data interface{}) error { + tmpl := t.tmpl[name] + if tmpl == nil { + return fmt.Errorf("template: no template %q associated with template %q", name, t.name) + } + return tmpl.Execute(wr, data) +} + +// Execute applies a parsed template to the specified data object, +// and writes the output to wr. +// If an error occurs executing the template or writing its output, +// execution stops, but partial results may already have been written to +// the output writer. +// A template may be executed safely in parallel. +func (t *Template) Execute(wr io.Writer, data interface{}) (err error) { + defer errRecover(&err) + value := reflect.ValueOf(data) + state := &state{ + tmpl: t, + wr: wr, + vars: []variable{{"$", value}}, + } + t.init() + if t.Tree == nil || t.Root == nil { + var b bytes.Buffer + for name, tmpl := range t.tmpl { + if tmpl.Tree == nil || tmpl.Root == nil { + continue + } + if b.Len() > 0 { + b.WriteString(", ") + } + fmt.Fprintf(&b, "%q", name) + } + var s string + if b.Len() > 0 { + s = "; defined templates are: " + b.String() + } + state.errorf("%q is an incomplete or empty template%s", t.Name(), s) + } + state.walk(value, t.Root) + return +} + +// Walk functions step through the major pieces of the template structure, +// generating output as they go. +func (s *state) walk(dot reflect.Value, node parse.Node) { + s.at(node) + switch node := node.(type) { + case *parse.ActionNode: + // Do not pop variables so they persist until next end. + // Also, if the action declares variables, don't print the result. + val := s.evalPipeline(dot, node.Pipe) + if len(node.Pipe.Decl) == 0 { + s.printValue(node, val) + } + case *parse.IfNode: + s.walkIfOrWith(parse.NodeIf, dot, node.Pipe, node.List, node.ElseList) + case *parse.ListNode: + for _, node := range node.Nodes { + s.walk(dot, node) + } + case *parse.RangeNode: + s.walkRange(dot, node) + case *parse.TemplateNode: + s.walkTemplate(dot, node) + case *parse.TextNode: + if _, err := s.wr.Write(node.Text); err != nil { + s.errorf("%s", err) + } + case *parse.WithNode: + s.walkIfOrWith(parse.NodeWith, dot, node.Pipe, node.List, node.ElseList) + default: + s.errorf("unknown node: %s", node) + } +} + +// walkIfOrWith walks an 'if' or 'with' node. The two control structures +// are identical in behavior except that 'with' sets dot. +func (s *state) walkIfOrWith(typ parse.NodeType, dot reflect.Value, pipe *parse.PipeNode, list, elseList *parse.ListNode) { + defer s.pop(s.mark()) + val := s.evalPipeline(dot, pipe) + truth, ok := isTrue(val) + if !ok { + s.errorf("if/with can't use %v", val) + } + if truth { + if typ == parse.NodeWith { + s.walk(val, list) + } else { + s.walk(dot, list) + } + } else if elseList != nil { + s.walk(dot, elseList) + } +} + +// isTrue reports whether the value is 'true', in the sense of not the zero of its type, +// and whether the value has a meaningful truth value. +func isTrue(val reflect.Value) (truth, ok bool) { + if !val.IsValid() { + // Something like var x interface{}, never set. It's a form of nil. + return false, true + } + switch val.Kind() { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + truth = val.Len() > 0 + case reflect.Bool: + truth = val.Bool() + case reflect.Complex64, reflect.Complex128: + truth = val.Complex() != 0 + case reflect.Chan, reflect.Func, reflect.Ptr, reflect.Interface: + truth = !val.IsNil() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + truth = val.Int() != 0 + case reflect.Float32, reflect.Float64: + truth = val.Float() != 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + truth = val.Uint() != 0 + case reflect.Struct: + truth = true // Struct values are always true. + default: + return + } + return truth, true +} + +func (s *state) walkRange(dot reflect.Value, r *parse.RangeNode) { + s.at(r) + defer s.pop(s.mark()) + val, _ := indirect(s.evalPipeline(dot, r.Pipe)) + // mark top of stack before any variables in the body are pushed. + mark := s.mark() + oneIteration := func(index, elem reflect.Value) { + // Set top var (lexically the second if there are two) to the element. + if len(r.Pipe.Decl) > 0 { + s.setVar(1, elem) + } + // Set next var (lexically the first if there are two) to the index. + if len(r.Pipe.Decl) > 1 { + s.setVar(2, index) + } + s.walk(elem, r.List) + s.pop(mark) + } + switch val.Kind() { + case reflect.Array, reflect.Slice: + if val.Len() == 0 { + break + } + for i := 0; i < val.Len(); i++ { + oneIteration(reflect.ValueOf(i), val.Index(i)) + } + return + case reflect.Map: + if val.Len() == 0 { + break + } + for _, key := range sortKeys(val.MapKeys()) { + oneIteration(key, val.MapIndex(key)) + } + return + case reflect.Chan: + if val.IsNil() { + break + } + i := 0 + for ; ; i++ { + elem, ok := val.Recv() + if !ok { + break + } + oneIteration(reflect.ValueOf(i), elem) + } + if i == 0 { + break + } + return + case reflect.Invalid: + break // An invalid value is likely a nil map, etc. and acts like an empty map. + default: + s.errorf("range can't iterate over %v", val) + } + if r.ElseList != nil { + s.walk(dot, r.ElseList) + } +} + +func (s *state) walkTemplate(dot reflect.Value, t *parse.TemplateNode) { + s.at(t) + tmpl := s.tmpl.tmpl[t.Name] + if tmpl == nil { + s.errorf("template %q not defined", t.Name) + } + // Variables declared by the pipeline persist. + dot = s.evalPipeline(dot, t.Pipe) + newState := *s + newState.tmpl = tmpl + // No dynamic scoping: template invocations inherit no variables. + newState.vars = []variable{{"$", dot}} + newState.walk(dot, tmpl.Root) +} + +// Eval functions evaluate pipelines, commands, and their elements and extract +// values from the data structure by examining fields, calling methods, and so on. +// The printing of those values happens only through walk functions. + +// evalPipeline returns the value acquired by evaluating a pipeline. If the +// pipeline has a variable declaration, the variable will be pushed on the +// stack. Callers should therefore pop the stack after they are finished +// executing commands depending on the pipeline value. +func (s *state) evalPipeline(dot reflect.Value, pipe *parse.PipeNode) (value reflect.Value) { + if pipe == nil { + return + } + s.at(pipe) + for _, cmd := range pipe.Cmds { + value = s.evalCommand(dot, cmd, value) // previous value is this one's final arg. + // If the object has type interface{}, dig down one level to the thing inside. + if value.Kind() == reflect.Interface && value.Type().NumMethod() == 0 { + value = reflect.ValueOf(value.Interface()) // lovely! + } + } + for _, variable := range pipe.Decl { + s.push(variable.Ident[0], value) + } + return value +} + +func (s *state) notAFunction(args []parse.Node, final reflect.Value) { + if len(args) > 1 || final.IsValid() { + s.errorf("can't give argument to non-function %s", args[0]) + } +} + +func (s *state) evalCommand(dot reflect.Value, cmd *parse.CommandNode, final reflect.Value) reflect.Value { + firstWord := cmd.Args[0] + switch n := firstWord.(type) { + case *parse.FieldNode: + return s.evalFieldNode(dot, n, cmd.Args, final) + case *parse.ChainNode: + return s.evalChainNode(dot, n, cmd.Args, final) + case *parse.IdentifierNode: + // Must be a function. + return s.evalFunction(dot, n, cmd, cmd.Args, final) + case *parse.PipeNode: + // Parenthesized pipeline. The arguments are all inside the pipeline; final is ignored. + return s.evalPipeline(dot, n) + case *parse.VariableNode: + return s.evalVariableNode(dot, n, cmd.Args, final) + } + s.at(firstWord) + s.notAFunction(cmd.Args, final) + switch word := firstWord.(type) { + case *parse.BoolNode: + return reflect.ValueOf(word.True) + case *parse.DotNode: + return dot + case *parse.NilNode: + s.errorf("nil is not a command") + case *parse.NumberNode: + return s.idealConstant(word) + case *parse.StringNode: + return reflect.ValueOf(word.Text) + } + s.errorf("can't evaluate command %q", firstWord) + panic("not reached") +} + +// idealConstant is called to return the value of a number in a context where +// we don't know the type. In that case, the syntax of the number tells us +// its type, and we use Go rules to resolve. Note there is no such thing as +// a uint ideal constant in this situation - the value must be of int type. +func (s *state) idealConstant(constant *parse.NumberNode) reflect.Value { + // These are ideal constants but we don't know the type + // and we have no context. (If it was a method argument, + // we'd know what we need.) The syntax guides us to some extent. + s.at(constant) + switch { + case constant.IsComplex: + return reflect.ValueOf(constant.Complex128) // incontrovertible. + case constant.IsFloat && !isHexConstant(constant.Text) && strings.IndexAny(constant.Text, ".eE") >= 0: + return reflect.ValueOf(constant.Float64) + case constant.IsInt: + n := int(constant.Int64) + if int64(n) != constant.Int64 { + s.errorf("%s overflows int", constant.Text) + } + return reflect.ValueOf(n) + case constant.IsUint: + s.errorf("%s overflows int", constant.Text) + } + return zero +} + +func isHexConstant(s string) bool { + return len(s) > 2 && s[0] == '0' && (s[1] == 'x' || s[1] == 'X') +} + +func (s *state) evalFieldNode(dot reflect.Value, field *parse.FieldNode, args []parse.Node, final reflect.Value) reflect.Value { + s.at(field) + return s.evalFieldChain(dot, dot, field, field.Ident, args, final) +} + +func (s *state) evalChainNode(dot reflect.Value, chain *parse.ChainNode, args []parse.Node, final reflect.Value) reflect.Value { + s.at(chain) + // (pipe).Field1.Field2 has pipe as .Node, fields as .Field. Eval the pipeline, then the fields. + pipe := s.evalArg(dot, nil, chain.Node) + if len(chain.Field) == 0 { + s.errorf("internal error: no fields in evalChainNode") + } + return s.evalFieldChain(dot, pipe, chain, chain.Field, args, final) +} + +func (s *state) evalVariableNode(dot reflect.Value, variable *parse.VariableNode, args []parse.Node, final reflect.Value) reflect.Value { + // $x.Field has $x as the first ident, Field as the second. Eval the var, then the fields. + s.at(variable) + value := s.varValue(variable.Ident[0]) + if len(variable.Ident) == 1 { + s.notAFunction(args, final) + return value + } + return s.evalFieldChain(dot, value, variable, variable.Ident[1:], args, final) +} + +// evalFieldChain evaluates .X.Y.Z possibly followed by arguments. +// dot is the environment in which to evaluate arguments, while +// receiver is the value being walked along the chain. +func (s *state) evalFieldChain(dot, receiver reflect.Value, node parse.Node, ident []string, args []parse.Node, final reflect.Value) reflect.Value { + n := len(ident) + for i := 0; i < n-1; i++ { + receiver = s.evalField(dot, ident[i], node, nil, zero, receiver) + } + // Now if it's a method, it gets the arguments. + return s.evalField(dot, ident[n-1], node, args, final, receiver) +} + +func (s *state) evalFunction(dot reflect.Value, node *parse.IdentifierNode, cmd parse.Node, args []parse.Node, final reflect.Value) reflect.Value { + s.at(node) + name := node.Ident + function, ok := findFunction(name, s.tmpl) + if !ok { + s.errorf("%q is not a defined function", name) + } + return s.evalCall(dot, function, cmd, name, args, final) +} + +// evalField evaluates an expression like (.Field) or (.Field arg1 arg2). +// The 'final' argument represents the return value from the preceding +// value of the pipeline, if any. +func (s *state) evalField(dot reflect.Value, fieldName string, node parse.Node, args []parse.Node, final, receiver reflect.Value) reflect.Value { + if !receiver.IsValid() { + return zero + } + typ := receiver.Type() + receiver, _ = indirect(receiver) + // Unless it's an interface, need to get to a value of type *T to guarantee + // we see all methods of T and *T. + ptr := receiver + if ptr.Kind() != reflect.Interface && ptr.CanAddr() { + ptr = ptr.Addr() + } + if method := ptr.MethodByName(fieldName); method.IsValid() { + return s.evalCall(dot, method, node, fieldName, args, final) + } + hasArgs := len(args) > 1 || final.IsValid() + // It's not a method; must be a field of a struct or an element of a map. The receiver must not be nil. + receiver, isNil := indirect(receiver) + if isNil { + s.errorf("nil pointer evaluating %s.%s", typ, fieldName) + } + switch receiver.Kind() { + case reflect.Struct: + tField, ok := receiver.Type().FieldByName(fieldName) + if ok { + field := receiver.FieldByIndex(tField.Index) + if tField.PkgPath != "" { // field is unexported + s.errorf("%s is an unexported field of struct type %s", fieldName, typ) + } + // If it's a function, we must call it. + if hasArgs { + s.errorf("%s has arguments but cannot be invoked as function", fieldName) + } + return field + } + s.errorf("%s is not a field of struct type %s", fieldName, typ) + case reflect.Map: + // If it's a map, attempt to use the field name as a key. + nameVal := reflect.ValueOf(fieldName) + if nameVal.Type().AssignableTo(receiver.Type().Key()) { + if hasArgs { + s.errorf("%s is not a method but has arguments", fieldName) + } + return receiver.MapIndex(nameVal) + } + } + s.errorf("can't evaluate field %s in type %s", fieldName, typ) + panic("not reached") +} + +var ( + errorType = reflect.TypeOf((*error)(nil)).Elem() + fmtStringerType = reflect.TypeOf((*fmt.Stringer)(nil)).Elem() +) + +// evalCall executes a function or method call. If it's a method, fun already has the receiver bound, so +// it looks just like a function call. The arg list, if non-nil, includes (in the manner of the shell), arg[0] +// as the function itself. +func (s *state) evalCall(dot, fun reflect.Value, node parse.Node, name string, args []parse.Node, final reflect.Value) reflect.Value { + if args != nil { + args = args[1:] // Zeroth arg is function name/node; not passed to function. + } + typ := fun.Type() + numIn := len(args) + if final.IsValid() { + numIn++ + } + numFixed := len(args) + if typ.IsVariadic() { + numFixed = typ.NumIn() - 1 // last arg is the variadic one. + if numIn < numFixed { + s.errorf("wrong number of args for %s: want at least %d got %d", name, typ.NumIn()-1, len(args)) + } + } else if numIn < typ.NumIn()-1 || !typ.IsVariadic() && numIn != typ.NumIn() { + s.errorf("wrong number of args for %s: want %d got %d", name, typ.NumIn(), len(args)) + } + if !goodFunc(typ) { + // TODO: This could still be a confusing error; maybe goodFunc should provide info. + s.errorf("can't call method/function %q with %d results", name, typ.NumOut()) + } + // Build the arg list. + argv := make([]reflect.Value, numIn) + // Args must be evaluated. Fixed args first. + i := 0 + for ; i < numFixed && i < len(args); i++ { + argv[i] = s.evalArg(dot, typ.In(i), args[i]) + } + // Now the ... args. + if typ.IsVariadic() { + argType := typ.In(typ.NumIn() - 1).Elem() // Argument is a slice. + for ; i < len(args); i++ { + argv[i] = s.evalArg(dot, argType, args[i]) + } + } + // Add final value if necessary. + if final.IsValid() { + t := typ.In(typ.NumIn() - 1) + if typ.IsVariadic() { + t = t.Elem() + } + argv[i] = s.validateType(final, t) + } + result := fun.Call(argv) + // If we have an error that is not nil, stop execution and return that error to the caller. + if len(result) == 2 && !result[1].IsNil() { + s.at(node) + s.errorf("error calling %s: %s", name, result[1].Interface().(error)) + } + return result[0] +} + +// canBeNil reports whether an untyped nil can be assigned to the type. See reflect.Zero. +func canBeNil(typ reflect.Type) bool { + switch typ.Kind() { + case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: + return true + } + return false +} + +// validateType guarantees that the value is valid and assignable to the type. +func (s *state) validateType(value reflect.Value, typ reflect.Type) reflect.Value { + if !value.IsValid() { + if typ == nil || canBeNil(typ) { + // An untyped nil interface{}. Accept as a proper nil value. + return reflect.Zero(typ) + } + s.errorf("invalid value; expected %s", typ) + } + if typ != nil && !value.Type().AssignableTo(typ) { + if value.Kind() == reflect.Interface && !value.IsNil() { + value = value.Elem() + if value.Type().AssignableTo(typ) { + return value + } + // fallthrough + } + // Does one dereference or indirection work? We could do more, as we + // do with method receivers, but that gets messy and method receivers + // are much more constrained, so it makes more sense there than here. + // Besides, one is almost always all you need. + switch { + case value.Kind() == reflect.Ptr && value.Type().Elem().AssignableTo(typ): + value = value.Elem() + if !value.IsValid() { + s.errorf("dereference of nil pointer of type %s", typ) + } + case reflect.PtrTo(value.Type()).AssignableTo(typ) && value.CanAddr(): + value = value.Addr() + default: + s.errorf("wrong type for value; expected %s; got %s", typ, value.Type()) + } + } + return value +} + +func (s *state) evalArg(dot reflect.Value, typ reflect.Type, n parse.Node) reflect.Value { + s.at(n) + switch arg := n.(type) { + case *parse.DotNode: + return s.validateType(dot, typ) + case *parse.NilNode: + if canBeNil(typ) { + return reflect.Zero(typ) + } + s.errorf("cannot assign nil to %s", typ) + case *parse.FieldNode: + return s.validateType(s.evalFieldNode(dot, arg, []parse.Node{n}, zero), typ) + case *parse.VariableNode: + return s.validateType(s.evalVariableNode(dot, arg, nil, zero), typ) + case *parse.PipeNode: + return s.validateType(s.evalPipeline(dot, arg), typ) + case *parse.IdentifierNode: + return s.evalFunction(dot, arg, arg, nil, zero) + case *parse.ChainNode: + return s.validateType(s.evalChainNode(dot, arg, nil, zero), typ) + } + switch typ.Kind() { + case reflect.Bool: + return s.evalBool(typ, n) + case reflect.Complex64, reflect.Complex128: + return s.evalComplex(typ, n) + case reflect.Float32, reflect.Float64: + return s.evalFloat(typ, n) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return s.evalInteger(typ, n) + case reflect.Interface: + if typ.NumMethod() == 0 { + return s.evalEmptyInterface(dot, n) + } + case reflect.String: + return s.evalString(typ, n) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return s.evalUnsignedInteger(typ, n) + } + s.errorf("can't handle %s for arg of type %s", n, typ) + panic("not reached") +} + +func (s *state) evalBool(typ reflect.Type, n parse.Node) reflect.Value { + s.at(n) + if n, ok := n.(*parse.BoolNode); ok { + value := reflect.New(typ).Elem() + value.SetBool(n.True) + return value + } + s.errorf("expected bool; found %s", n) + panic("not reached") +} + +func (s *state) evalString(typ reflect.Type, n parse.Node) reflect.Value { + s.at(n) + if n, ok := n.(*parse.StringNode); ok { + value := reflect.New(typ).Elem() + value.SetString(n.Text) + return value + } + s.errorf("expected string; found %s", n) + panic("not reached") +} + +func (s *state) evalInteger(typ reflect.Type, n parse.Node) reflect.Value { + s.at(n) + if n, ok := n.(*parse.NumberNode); ok && n.IsInt { + value := reflect.New(typ).Elem() + value.SetInt(n.Int64) + return value + } + s.errorf("expected integer; found %s", n) + panic("not reached") +} + +func (s *state) evalUnsignedInteger(typ reflect.Type, n parse.Node) reflect.Value { + s.at(n) + if n, ok := n.(*parse.NumberNode); ok && n.IsUint { + value := reflect.New(typ).Elem() + value.SetUint(n.Uint64) + return value + } + s.errorf("expected unsigned integer; found %s", n) + panic("not reached") +} + +func (s *state) evalFloat(typ reflect.Type, n parse.Node) reflect.Value { + s.at(n) + if n, ok := n.(*parse.NumberNode); ok && n.IsFloat { + value := reflect.New(typ).Elem() + value.SetFloat(n.Float64) + return value + } + s.errorf("expected float; found %s", n) + panic("not reached") +} + +func (s *state) evalComplex(typ reflect.Type, n parse.Node) reflect.Value { + if n, ok := n.(*parse.NumberNode); ok && n.IsComplex { + value := reflect.New(typ).Elem() + value.SetComplex(n.Complex128) + return value + } + s.errorf("expected complex; found %s", n) + panic("not reached") +} + +func (s *state) evalEmptyInterface(dot reflect.Value, n parse.Node) reflect.Value { + s.at(n) + switch n := n.(type) { + case *parse.BoolNode: + return reflect.ValueOf(n.True) + case *parse.DotNode: + return dot + case *parse.FieldNode: + return s.evalFieldNode(dot, n, nil, zero) + case *parse.IdentifierNode: + return s.evalFunction(dot, n, n, nil, zero) + case *parse.NilNode: + // NilNode is handled in evalArg, the only place that calls here. + s.errorf("evalEmptyInterface: nil (can't happen)") + case *parse.NumberNode: + return s.idealConstant(n) + case *parse.StringNode: + return reflect.ValueOf(n.Text) + case *parse.VariableNode: + return s.evalVariableNode(dot, n, nil, zero) + case *parse.PipeNode: + return s.evalPipeline(dot, n) + } + s.errorf("can't handle assignment of %s to empty interface argument", n) + panic("not reached") +} + +// indirect returns the item at the end of indirection, and a bool to indicate if it's nil. +// We indirect through pointers and empty interfaces (only) because +// non-empty interfaces have methods we might need. +func indirect(v reflect.Value) (rv reflect.Value, isNil bool) { + for ; v.Kind() == reflect.Ptr || v.Kind() == reflect.Interface; v = v.Elem() { + if v.IsNil() { + return v, true + } + if v.Kind() == reflect.Interface && v.NumMethod() > 0 { + break + } + } + return v, false +} + +// printValue writes the textual representation of the value to the output of +// the template. +func (s *state) printValue(n parse.Node, v reflect.Value) { + s.at(n) + iface, ok := printableValue(v) + if !ok { + s.errorf("can't print %s of type %s", n, v.Type()) + } + fmt.Fprint(s.wr, iface) +} + +// printableValue returns the, possibly indirected, interface value inside v that +// is best for a call to formatted printer. +func printableValue(v reflect.Value) (interface{}, bool) { + if v.Kind() == reflect.Ptr { + v, _ = indirect(v) // fmt.Fprint handles nil. + } + if !v.IsValid() { + return "", true + } + + if !v.Type().Implements(errorType) && !v.Type().Implements(fmtStringerType) { + if v.CanAddr() && (reflect.PtrTo(v.Type()).Implements(errorType) || reflect.PtrTo(v.Type()).Implements(fmtStringerType)) { + v = v.Addr() + } else { + switch v.Kind() { + case reflect.Chan, reflect.Func: + return nil, false + } + } + } + return v.Interface(), true +} + +// Types to help sort the keys in a map for reproducible output. + +type rvs []reflect.Value + +func (x rvs) Len() int { return len(x) } +func (x rvs) Swap(i, j int) { x[i], x[j] = x[j], x[i] } + +type rvInts struct{ rvs } + +func (x rvInts) Less(i, j int) bool { return x.rvs[i].Int() < x.rvs[j].Int() } + +type rvUints struct{ rvs } + +func (x rvUints) Less(i, j int) bool { return x.rvs[i].Uint() < x.rvs[j].Uint() } + +type rvFloats struct{ rvs } + +func (x rvFloats) Less(i, j int) bool { return x.rvs[i].Float() < x.rvs[j].Float() } + +type rvStrings struct{ rvs } + +func (x rvStrings) Less(i, j int) bool { return x.rvs[i].String() < x.rvs[j].String() } + +// sortKeys sorts (if it can) the slice of reflect.Values, which is a slice of map keys. +func sortKeys(v []reflect.Value) []reflect.Value { + if len(v) <= 1 { + return v + } + switch v[0].Kind() { + case reflect.Float32, reflect.Float64: + sort.Sort(rvFloats{v}) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + sort.Sort(rvInts{v}) + case reflect.String: + sort.Sort(rvStrings{v}) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + sort.Sort(rvUints{v}) + } + return v +} diff --git a/vendor/github.com/alecthomas/template/funcs.go b/vendor/github.com/alecthomas/template/funcs.go new file mode 100644 index 0000000..39ee5ed --- /dev/null +++ b/vendor/github.com/alecthomas/template/funcs.go @@ -0,0 +1,598 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package template + +import ( + "bytes" + "errors" + "fmt" + "io" + "net/url" + "reflect" + "strings" + "unicode" + "unicode/utf8" +) + +// FuncMap is the type of the map defining the mapping from names to functions. +// Each function must have either a single return value, or two return values of +// which the second has type error. In that case, if the second (error) +// return value evaluates to non-nil during execution, execution terminates and +// Execute returns that error. +type FuncMap map[string]interface{} + +var builtins = FuncMap{ + "and": and, + "call": call, + "html": HTMLEscaper, + "index": index, + "js": JSEscaper, + "len": length, + "not": not, + "or": or, + "print": fmt.Sprint, + "printf": fmt.Sprintf, + "println": fmt.Sprintln, + "urlquery": URLQueryEscaper, + + // Comparisons + "eq": eq, // == + "ge": ge, // >= + "gt": gt, // > + "le": le, // <= + "lt": lt, // < + "ne": ne, // != +} + +var builtinFuncs = createValueFuncs(builtins) + +// createValueFuncs turns a FuncMap into a map[string]reflect.Value +func createValueFuncs(funcMap FuncMap) map[string]reflect.Value { + m := make(map[string]reflect.Value) + addValueFuncs(m, funcMap) + return m +} + +// addValueFuncs adds to values the functions in funcs, converting them to reflect.Values. +func addValueFuncs(out map[string]reflect.Value, in FuncMap) { + for name, fn := range in { + v := reflect.ValueOf(fn) + if v.Kind() != reflect.Func { + panic("value for " + name + " not a function") + } + if !goodFunc(v.Type()) { + panic(fmt.Errorf("can't install method/function %q with %d results", name, v.Type().NumOut())) + } + out[name] = v + } +} + +// addFuncs adds to values the functions in funcs. It does no checking of the input - +// call addValueFuncs first. +func addFuncs(out, in FuncMap) { + for name, fn := range in { + out[name] = fn + } +} + +// goodFunc checks that the function or method has the right result signature. +func goodFunc(typ reflect.Type) bool { + // We allow functions with 1 result or 2 results where the second is an error. + switch { + case typ.NumOut() == 1: + return true + case typ.NumOut() == 2 && typ.Out(1) == errorType: + return true + } + return false +} + +// findFunction looks for a function in the template, and global map. +func findFunction(name string, tmpl *Template) (reflect.Value, bool) { + if tmpl != nil && tmpl.common != nil { + if fn := tmpl.execFuncs[name]; fn.IsValid() { + return fn, true + } + } + if fn := builtinFuncs[name]; fn.IsValid() { + return fn, true + } + return reflect.Value{}, false +} + +// Indexing. + +// index returns the result of indexing its first argument by the following +// arguments. Thus "index x 1 2 3" is, in Go syntax, x[1][2][3]. Each +// indexed item must be a map, slice, or array. +func index(item interface{}, indices ...interface{}) (interface{}, error) { + v := reflect.ValueOf(item) + for _, i := range indices { + index := reflect.ValueOf(i) + var isNil bool + if v, isNil = indirect(v); isNil { + return nil, fmt.Errorf("index of nil pointer") + } + switch v.Kind() { + case reflect.Array, reflect.Slice, reflect.String: + var x int64 + switch index.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + x = index.Int() + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + x = int64(index.Uint()) + default: + return nil, fmt.Errorf("cannot index slice/array with type %s", index.Type()) + } + if x < 0 || x >= int64(v.Len()) { + return nil, fmt.Errorf("index out of range: %d", x) + } + v = v.Index(int(x)) + case reflect.Map: + if !index.IsValid() { + index = reflect.Zero(v.Type().Key()) + } + if !index.Type().AssignableTo(v.Type().Key()) { + return nil, fmt.Errorf("%s is not index type for %s", index.Type(), v.Type()) + } + if x := v.MapIndex(index); x.IsValid() { + v = x + } else { + v = reflect.Zero(v.Type().Elem()) + } + default: + return nil, fmt.Errorf("can't index item of type %s", v.Type()) + } + } + return v.Interface(), nil +} + +// Length + +// length returns the length of the item, with an error if it has no defined length. +func length(item interface{}) (int, error) { + v, isNil := indirect(reflect.ValueOf(item)) + if isNil { + return 0, fmt.Errorf("len of nil pointer") + } + switch v.Kind() { + case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice, reflect.String: + return v.Len(), nil + } + return 0, fmt.Errorf("len of type %s", v.Type()) +} + +// Function invocation + +// call returns the result of evaluating the first argument as a function. +// The function must return 1 result, or 2 results, the second of which is an error. +func call(fn interface{}, args ...interface{}) (interface{}, error) { + v := reflect.ValueOf(fn) + typ := v.Type() + if typ.Kind() != reflect.Func { + return nil, fmt.Errorf("non-function of type %s", typ) + } + if !goodFunc(typ) { + return nil, fmt.Errorf("function called with %d args; should be 1 or 2", typ.NumOut()) + } + numIn := typ.NumIn() + var dddType reflect.Type + if typ.IsVariadic() { + if len(args) < numIn-1 { + return nil, fmt.Errorf("wrong number of args: got %d want at least %d", len(args), numIn-1) + } + dddType = typ.In(numIn - 1).Elem() + } else { + if len(args) != numIn { + return nil, fmt.Errorf("wrong number of args: got %d want %d", len(args), numIn) + } + } + argv := make([]reflect.Value, len(args)) + for i, arg := range args { + value := reflect.ValueOf(arg) + // Compute the expected type. Clumsy because of variadics. + var argType reflect.Type + if !typ.IsVariadic() || i < numIn-1 { + argType = typ.In(i) + } else { + argType = dddType + } + if !value.IsValid() && canBeNil(argType) { + value = reflect.Zero(argType) + } + if !value.Type().AssignableTo(argType) { + return nil, fmt.Errorf("arg %d has type %s; should be %s", i, value.Type(), argType) + } + argv[i] = value + } + result := v.Call(argv) + if len(result) == 2 && !result[1].IsNil() { + return result[0].Interface(), result[1].Interface().(error) + } + return result[0].Interface(), nil +} + +// Boolean logic. + +func truth(a interface{}) bool { + t, _ := isTrue(reflect.ValueOf(a)) + return t +} + +// and computes the Boolean AND of its arguments, returning +// the first false argument it encounters, or the last argument. +func and(arg0 interface{}, args ...interface{}) interface{} { + if !truth(arg0) { + return arg0 + } + for i := range args { + arg0 = args[i] + if !truth(arg0) { + break + } + } + return arg0 +} + +// or computes the Boolean OR of its arguments, returning +// the first true argument it encounters, or the last argument. +func or(arg0 interface{}, args ...interface{}) interface{} { + if truth(arg0) { + return arg0 + } + for i := range args { + arg0 = args[i] + if truth(arg0) { + break + } + } + return arg0 +} + +// not returns the Boolean negation of its argument. +func not(arg interface{}) (truth bool) { + truth, _ = isTrue(reflect.ValueOf(arg)) + return !truth +} + +// Comparison. + +// TODO: Perhaps allow comparison between signed and unsigned integers. + +var ( + errBadComparisonType = errors.New("invalid type for comparison") + errBadComparison = errors.New("incompatible types for comparison") + errNoComparison = errors.New("missing argument for comparison") +) + +type kind int + +const ( + invalidKind kind = iota + boolKind + complexKind + intKind + floatKind + integerKind + stringKind + uintKind +) + +func basicKind(v reflect.Value) (kind, error) { + switch v.Kind() { + case reflect.Bool: + return boolKind, nil + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return intKind, nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return uintKind, nil + case reflect.Float32, reflect.Float64: + return floatKind, nil + case reflect.Complex64, reflect.Complex128: + return complexKind, nil + case reflect.String: + return stringKind, nil + } + return invalidKind, errBadComparisonType +} + +// eq evaluates the comparison a == b || a == c || ... +func eq(arg1 interface{}, arg2 ...interface{}) (bool, error) { + v1 := reflect.ValueOf(arg1) + k1, err := basicKind(v1) + if err != nil { + return false, err + } + if len(arg2) == 0 { + return false, errNoComparison + } + for _, arg := range arg2 { + v2 := reflect.ValueOf(arg) + k2, err := basicKind(v2) + if err != nil { + return false, err + } + truth := false + if k1 != k2 { + // Special case: Can compare integer values regardless of type's sign. + switch { + case k1 == intKind && k2 == uintKind: + truth = v1.Int() >= 0 && uint64(v1.Int()) == v2.Uint() + case k1 == uintKind && k2 == intKind: + truth = v2.Int() >= 0 && v1.Uint() == uint64(v2.Int()) + default: + return false, errBadComparison + } + } else { + switch k1 { + case boolKind: + truth = v1.Bool() == v2.Bool() + case complexKind: + truth = v1.Complex() == v2.Complex() + case floatKind: + truth = v1.Float() == v2.Float() + case intKind: + truth = v1.Int() == v2.Int() + case stringKind: + truth = v1.String() == v2.String() + case uintKind: + truth = v1.Uint() == v2.Uint() + default: + panic("invalid kind") + } + } + if truth { + return true, nil + } + } + return false, nil +} + +// ne evaluates the comparison a != b. +func ne(arg1, arg2 interface{}) (bool, error) { + // != is the inverse of ==. + equal, err := eq(arg1, arg2) + return !equal, err +} + +// lt evaluates the comparison a < b. +func lt(arg1, arg2 interface{}) (bool, error) { + v1 := reflect.ValueOf(arg1) + k1, err := basicKind(v1) + if err != nil { + return false, err + } + v2 := reflect.ValueOf(arg2) + k2, err := basicKind(v2) + if err != nil { + return false, err + } + truth := false + if k1 != k2 { + // Special case: Can compare integer values regardless of type's sign. + switch { + case k1 == intKind && k2 == uintKind: + truth = v1.Int() < 0 || uint64(v1.Int()) < v2.Uint() + case k1 == uintKind && k2 == intKind: + truth = v2.Int() >= 0 && v1.Uint() < uint64(v2.Int()) + default: + return false, errBadComparison + } + } else { + switch k1 { + case boolKind, complexKind: + return false, errBadComparisonType + case floatKind: + truth = v1.Float() < v2.Float() + case intKind: + truth = v1.Int() < v2.Int() + case stringKind: + truth = v1.String() < v2.String() + case uintKind: + truth = v1.Uint() < v2.Uint() + default: + panic("invalid kind") + } + } + return truth, nil +} + +// le evaluates the comparison <= b. +func le(arg1, arg2 interface{}) (bool, error) { + // <= is < or ==. + lessThan, err := lt(arg1, arg2) + if lessThan || err != nil { + return lessThan, err + } + return eq(arg1, arg2) +} + +// gt evaluates the comparison a > b. +func gt(arg1, arg2 interface{}) (bool, error) { + // > is the inverse of <=. + lessOrEqual, err := le(arg1, arg2) + if err != nil { + return false, err + } + return !lessOrEqual, nil +} + +// ge evaluates the comparison a >= b. +func ge(arg1, arg2 interface{}) (bool, error) { + // >= is the inverse of <. + lessThan, err := lt(arg1, arg2) + if err != nil { + return false, err + } + return !lessThan, nil +} + +// HTML escaping. + +var ( + htmlQuot = []byte(""") // shorter than """ + htmlApos = []byte("'") // shorter than "'" and apos was not in HTML until HTML5 + htmlAmp = []byte("&") + htmlLt = []byte("<") + htmlGt = []byte(">") +) + +// HTMLEscape writes to w the escaped HTML equivalent of the plain text data b. +func HTMLEscape(w io.Writer, b []byte) { + last := 0 + for i, c := range b { + var html []byte + switch c { + case '"': + html = htmlQuot + case '\'': + html = htmlApos + case '&': + html = htmlAmp + case '<': + html = htmlLt + case '>': + html = htmlGt + default: + continue + } + w.Write(b[last:i]) + w.Write(html) + last = i + 1 + } + w.Write(b[last:]) +} + +// HTMLEscapeString returns the escaped HTML equivalent of the plain text data s. +func HTMLEscapeString(s string) string { + // Avoid allocation if we can. + if strings.IndexAny(s, `'"&<>`) < 0 { + return s + } + var b bytes.Buffer + HTMLEscape(&b, []byte(s)) + return b.String() +} + +// HTMLEscaper returns the escaped HTML equivalent of the textual +// representation of its arguments. +func HTMLEscaper(args ...interface{}) string { + return HTMLEscapeString(evalArgs(args)) +} + +// JavaScript escaping. + +var ( + jsLowUni = []byte(`\u00`) + hex = []byte("0123456789ABCDEF") + + jsBackslash = []byte(`\\`) + jsApos = []byte(`\'`) + jsQuot = []byte(`\"`) + jsLt = []byte(`\x3C`) + jsGt = []byte(`\x3E`) +) + +// JSEscape writes to w the escaped JavaScript equivalent of the plain text data b. +func JSEscape(w io.Writer, b []byte) { + last := 0 + for i := 0; i < len(b); i++ { + c := b[i] + + if !jsIsSpecial(rune(c)) { + // fast path: nothing to do + continue + } + w.Write(b[last:i]) + + if c < utf8.RuneSelf { + // Quotes, slashes and angle brackets get quoted. + // Control characters get written as \u00XX. + switch c { + case '\\': + w.Write(jsBackslash) + case '\'': + w.Write(jsApos) + case '"': + w.Write(jsQuot) + case '<': + w.Write(jsLt) + case '>': + w.Write(jsGt) + default: + w.Write(jsLowUni) + t, b := c>>4, c&0x0f + w.Write(hex[t : t+1]) + w.Write(hex[b : b+1]) + } + } else { + // Unicode rune. + r, size := utf8.DecodeRune(b[i:]) + if unicode.IsPrint(r) { + w.Write(b[i : i+size]) + } else { + fmt.Fprintf(w, "\\u%04X", r) + } + i += size - 1 + } + last = i + 1 + } + w.Write(b[last:]) +} + +// JSEscapeString returns the escaped JavaScript equivalent of the plain text data s. +func JSEscapeString(s string) string { + // Avoid allocation if we can. + if strings.IndexFunc(s, jsIsSpecial) < 0 { + return s + } + var b bytes.Buffer + JSEscape(&b, []byte(s)) + return b.String() +} + +func jsIsSpecial(r rune) bool { + switch r { + case '\\', '\'', '"', '<', '>': + return true + } + return r < ' ' || utf8.RuneSelf <= r +} + +// JSEscaper returns the escaped JavaScript equivalent of the textual +// representation of its arguments. +func JSEscaper(args ...interface{}) string { + return JSEscapeString(evalArgs(args)) +} + +// URLQueryEscaper returns the escaped value of the textual representation of +// its arguments in a form suitable for embedding in a URL query. +func URLQueryEscaper(args ...interface{}) string { + return url.QueryEscape(evalArgs(args)) +} + +// evalArgs formats the list of arguments into a string. It is therefore equivalent to +// fmt.Sprint(args...) +// except that each argument is indirected (if a pointer), as required, +// using the same rules as the default string evaluation during template +// execution. +func evalArgs(args []interface{}) string { + ok := false + var s string + // Fast path for simple common case. + if len(args) == 1 { + s, ok = args[0].(string) + } + if !ok { + for i, arg := range args { + a, ok := printableValue(reflect.ValueOf(arg)) + if ok { + args[i] = a + } // else left fmt do its thing + } + s = fmt.Sprint(args...) + } + return s +} diff --git a/vendor/github.com/alecthomas/template/helper.go b/vendor/github.com/alecthomas/template/helper.go new file mode 100644 index 0000000..3636fb5 --- /dev/null +++ b/vendor/github.com/alecthomas/template/helper.go @@ -0,0 +1,108 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Helper functions to make constructing templates easier. + +package template + +import ( + "fmt" + "io/ioutil" + "path/filepath" +) + +// Functions and methods to parse templates. + +// Must is a helper that wraps a call to a function returning (*Template, error) +// and panics if the error is non-nil. It is intended for use in variable +// initializations such as +// var t = template.Must(template.New("name").Parse("text")) +func Must(t *Template, err error) *Template { + if err != nil { + panic(err) + } + return t +} + +// ParseFiles creates a new Template and parses the template definitions from +// the named files. The returned template's name will have the (base) name and +// (parsed) contents of the first file. There must be at least one file. +// If an error occurs, parsing stops and the returned *Template is nil. +func ParseFiles(filenames ...string) (*Template, error) { + return parseFiles(nil, filenames...) +} + +// ParseFiles parses the named files and associates the resulting templates with +// t. If an error occurs, parsing stops and the returned template is nil; +// otherwise it is t. There must be at least one file. +func (t *Template) ParseFiles(filenames ...string) (*Template, error) { + return parseFiles(t, filenames...) +} + +// parseFiles is the helper for the method and function. If the argument +// template is nil, it is created from the first file. +func parseFiles(t *Template, filenames ...string) (*Template, error) { + if len(filenames) == 0 { + // Not really a problem, but be consistent. + return nil, fmt.Errorf("template: no files named in call to ParseFiles") + } + for _, filename := range filenames { + b, err := ioutil.ReadFile(filename) + if err != nil { + return nil, err + } + s := string(b) + name := filepath.Base(filename) + // First template becomes return value if not already defined, + // and we use that one for subsequent New calls to associate + // all the templates together. Also, if this file has the same name + // as t, this file becomes the contents of t, so + // t, err := New(name).Funcs(xxx).ParseFiles(name) + // works. Otherwise we create a new template associated with t. + var tmpl *Template + if t == nil { + t = New(name) + } + if name == t.Name() { + tmpl = t + } else { + tmpl = t.New(name) + } + _, err = tmpl.Parse(s) + if err != nil { + return nil, err + } + } + return t, nil +} + +// ParseGlob creates a new Template and parses the template definitions from the +// files identified by the pattern, which must match at least one file. The +// returned template will have the (base) name and (parsed) contents of the +// first file matched by the pattern. ParseGlob is equivalent to calling +// ParseFiles with the list of files matched by the pattern. +func ParseGlob(pattern string) (*Template, error) { + return parseGlob(nil, pattern) +} + +// ParseGlob parses the template definitions in the files identified by the +// pattern and associates the resulting templates with t. The pattern is +// processed by filepath.Glob and must match at least one file. ParseGlob is +// equivalent to calling t.ParseFiles with the list of files matched by the +// pattern. +func (t *Template) ParseGlob(pattern string) (*Template, error) { + return parseGlob(t, pattern) +} + +// parseGlob is the implementation of the function and method ParseGlob. +func parseGlob(t *Template, pattern string) (*Template, error) { + filenames, err := filepath.Glob(pattern) + if err != nil { + return nil, err + } + if len(filenames) == 0 { + return nil, fmt.Errorf("template: pattern matches no files: %#q", pattern) + } + return parseFiles(t, filenames...) +} diff --git a/vendor/github.com/alecthomas/template/parse/lex.go b/vendor/github.com/alecthomas/template/parse/lex.go new file mode 100644 index 0000000..55f1c05 --- /dev/null +++ b/vendor/github.com/alecthomas/template/parse/lex.go @@ -0,0 +1,556 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package parse + +import ( + "fmt" + "strings" + "unicode" + "unicode/utf8" +) + +// item represents a token or text string returned from the scanner. +type item struct { + typ itemType // The type of this item. + pos Pos // The starting position, in bytes, of this item in the input string. + val string // The value of this item. +} + +func (i item) String() string { + switch { + case i.typ == itemEOF: + return "EOF" + case i.typ == itemError: + return i.val + case i.typ > itemKeyword: + return fmt.Sprintf("<%s>", i.val) + case len(i.val) > 10: + return fmt.Sprintf("%.10q...", i.val) + } + return fmt.Sprintf("%q", i.val) +} + +// itemType identifies the type of lex items. +type itemType int + +const ( + itemError itemType = iota // error occurred; value is text of error + itemBool // boolean constant + itemChar // printable ASCII character; grab bag for comma etc. + itemCharConstant // character constant + itemComplex // complex constant (1+2i); imaginary is just a number + itemColonEquals // colon-equals (':=') introducing a declaration + itemEOF + itemField // alphanumeric identifier starting with '.' + itemIdentifier // alphanumeric identifier not starting with '.' + itemLeftDelim // left action delimiter + itemLeftParen // '(' inside action + itemNumber // simple number, including imaginary + itemPipe // pipe symbol + itemRawString // raw quoted string (includes quotes) + itemRightDelim // right action delimiter + itemElideNewline // elide newline after right delim + itemRightParen // ')' inside action + itemSpace // run of spaces separating arguments + itemString // quoted string (includes quotes) + itemText // plain text + itemVariable // variable starting with '$', such as '$' or '$1' or '$hello' + // Keywords appear after all the rest. + itemKeyword // used only to delimit the keywords + itemDot // the cursor, spelled '.' + itemDefine // define keyword + itemElse // else keyword + itemEnd // end keyword + itemIf // if keyword + itemNil // the untyped nil constant, easiest to treat as a keyword + itemRange // range keyword + itemTemplate // template keyword + itemWith // with keyword +) + +var key = map[string]itemType{ + ".": itemDot, + "define": itemDefine, + "else": itemElse, + "end": itemEnd, + "if": itemIf, + "range": itemRange, + "nil": itemNil, + "template": itemTemplate, + "with": itemWith, +} + +const eof = -1 + +// stateFn represents the state of the scanner as a function that returns the next state. +type stateFn func(*lexer) stateFn + +// lexer holds the state of the scanner. +type lexer struct { + name string // the name of the input; used only for error reports + input string // the string being scanned + leftDelim string // start of action + rightDelim string // end of action + state stateFn // the next lexing function to enter + pos Pos // current position in the input + start Pos // start position of this item + width Pos // width of last rune read from input + lastPos Pos // position of most recent item returned by nextItem + items chan item // channel of scanned items + parenDepth int // nesting depth of ( ) exprs +} + +// next returns the next rune in the input. +func (l *lexer) next() rune { + if int(l.pos) >= len(l.input) { + l.width = 0 + return eof + } + r, w := utf8.DecodeRuneInString(l.input[l.pos:]) + l.width = Pos(w) + l.pos += l.width + return r +} + +// peek returns but does not consume the next rune in the input. +func (l *lexer) peek() rune { + r := l.next() + l.backup() + return r +} + +// backup steps back one rune. Can only be called once per call of next. +func (l *lexer) backup() { + l.pos -= l.width +} + +// emit passes an item back to the client. +func (l *lexer) emit(t itemType) { + l.items <- item{t, l.start, l.input[l.start:l.pos]} + l.start = l.pos +} + +// ignore skips over the pending input before this point. +func (l *lexer) ignore() { + l.start = l.pos +} + +// accept consumes the next rune if it's from the valid set. +func (l *lexer) accept(valid string) bool { + if strings.IndexRune(valid, l.next()) >= 0 { + return true + } + l.backup() + return false +} + +// acceptRun consumes a run of runes from the valid set. +func (l *lexer) acceptRun(valid string) { + for strings.IndexRune(valid, l.next()) >= 0 { + } + l.backup() +} + +// lineNumber reports which line we're on, based on the position of +// the previous item returned by nextItem. Doing it this way +// means we don't have to worry about peek double counting. +func (l *lexer) lineNumber() int { + return 1 + strings.Count(l.input[:l.lastPos], "\n") +} + +// errorf returns an error token and terminates the scan by passing +// back a nil pointer that will be the next state, terminating l.nextItem. +func (l *lexer) errorf(format string, args ...interface{}) stateFn { + l.items <- item{itemError, l.start, fmt.Sprintf(format, args...)} + return nil +} + +// nextItem returns the next item from the input. +func (l *lexer) nextItem() item { + item := <-l.items + l.lastPos = item.pos + return item +} + +// lex creates a new scanner for the input string. +func lex(name, input, left, right string) *lexer { + if left == "" { + left = leftDelim + } + if right == "" { + right = rightDelim + } + l := &lexer{ + name: name, + input: input, + leftDelim: left, + rightDelim: right, + items: make(chan item), + } + go l.run() + return l +} + +// run runs the state machine for the lexer. +func (l *lexer) run() { + for l.state = lexText; l.state != nil; { + l.state = l.state(l) + } +} + +// state functions + +const ( + leftDelim = "{{" + rightDelim = "}}" + leftComment = "/*" + rightComment = "*/" +) + +// lexText scans until an opening action delimiter, "{{". +func lexText(l *lexer) stateFn { + for { + if strings.HasPrefix(l.input[l.pos:], l.leftDelim) { + if l.pos > l.start { + l.emit(itemText) + } + return lexLeftDelim + } + if l.next() == eof { + break + } + } + // Correctly reached EOF. + if l.pos > l.start { + l.emit(itemText) + } + l.emit(itemEOF) + return nil +} + +// lexLeftDelim scans the left delimiter, which is known to be present. +func lexLeftDelim(l *lexer) stateFn { + l.pos += Pos(len(l.leftDelim)) + if strings.HasPrefix(l.input[l.pos:], leftComment) { + return lexComment + } + l.emit(itemLeftDelim) + l.parenDepth = 0 + return lexInsideAction +} + +// lexComment scans a comment. The left comment marker is known to be present. +func lexComment(l *lexer) stateFn { + l.pos += Pos(len(leftComment)) + i := strings.Index(l.input[l.pos:], rightComment) + if i < 0 { + return l.errorf("unclosed comment") + } + l.pos += Pos(i + len(rightComment)) + if !strings.HasPrefix(l.input[l.pos:], l.rightDelim) { + return l.errorf("comment ends before closing delimiter") + + } + l.pos += Pos(len(l.rightDelim)) + l.ignore() + return lexText +} + +// lexRightDelim scans the right delimiter, which is known to be present. +func lexRightDelim(l *lexer) stateFn { + l.pos += Pos(len(l.rightDelim)) + l.emit(itemRightDelim) + if l.peek() == '\\' { + l.pos++ + l.emit(itemElideNewline) + } + return lexText +} + +// lexInsideAction scans the elements inside action delimiters. +func lexInsideAction(l *lexer) stateFn { + // Either number, quoted string, or identifier. + // Spaces separate arguments; runs of spaces turn into itemSpace. + // Pipe symbols separate and are emitted. + if strings.HasPrefix(l.input[l.pos:], l.rightDelim+"\\") || strings.HasPrefix(l.input[l.pos:], l.rightDelim) { + if l.parenDepth == 0 { + return lexRightDelim + } + return l.errorf("unclosed left paren") + } + switch r := l.next(); { + case r == eof || isEndOfLine(r): + return l.errorf("unclosed action") + case isSpace(r): + return lexSpace + case r == ':': + if l.next() != '=' { + return l.errorf("expected :=") + } + l.emit(itemColonEquals) + case r == '|': + l.emit(itemPipe) + case r == '"': + return lexQuote + case r == '`': + return lexRawQuote + case r == '$': + return lexVariable + case r == '\'': + return lexChar + case r == '.': + // special look-ahead for ".field" so we don't break l.backup(). + if l.pos < Pos(len(l.input)) { + r := l.input[l.pos] + if r < '0' || '9' < r { + return lexField + } + } + fallthrough // '.' can start a number. + case r == '+' || r == '-' || ('0' <= r && r <= '9'): + l.backup() + return lexNumber + case isAlphaNumeric(r): + l.backup() + return lexIdentifier + case r == '(': + l.emit(itemLeftParen) + l.parenDepth++ + return lexInsideAction + case r == ')': + l.emit(itemRightParen) + l.parenDepth-- + if l.parenDepth < 0 { + return l.errorf("unexpected right paren %#U", r) + } + return lexInsideAction + case r <= unicode.MaxASCII && unicode.IsPrint(r): + l.emit(itemChar) + return lexInsideAction + default: + return l.errorf("unrecognized character in action: %#U", r) + } + return lexInsideAction +} + +// lexSpace scans a run of space characters. +// One space has already been seen. +func lexSpace(l *lexer) stateFn { + for isSpace(l.peek()) { + l.next() + } + l.emit(itemSpace) + return lexInsideAction +} + +// lexIdentifier scans an alphanumeric. +func lexIdentifier(l *lexer) stateFn { +Loop: + for { + switch r := l.next(); { + case isAlphaNumeric(r): + // absorb. + default: + l.backup() + word := l.input[l.start:l.pos] + if !l.atTerminator() { + return l.errorf("bad character %#U", r) + } + switch { + case key[word] > itemKeyword: + l.emit(key[word]) + case word[0] == '.': + l.emit(itemField) + case word == "true", word == "false": + l.emit(itemBool) + default: + l.emit(itemIdentifier) + } + break Loop + } + } + return lexInsideAction +} + +// lexField scans a field: .Alphanumeric. +// The . has been scanned. +func lexField(l *lexer) stateFn { + return lexFieldOrVariable(l, itemField) +} + +// lexVariable scans a Variable: $Alphanumeric. +// The $ has been scanned. +func lexVariable(l *lexer) stateFn { + if l.atTerminator() { // Nothing interesting follows -> "$". + l.emit(itemVariable) + return lexInsideAction + } + return lexFieldOrVariable(l, itemVariable) +} + +// lexVariable scans a field or variable: [.$]Alphanumeric. +// The . or $ has been scanned. +func lexFieldOrVariable(l *lexer, typ itemType) stateFn { + if l.atTerminator() { // Nothing interesting follows -> "." or "$". + if typ == itemVariable { + l.emit(itemVariable) + } else { + l.emit(itemDot) + } + return lexInsideAction + } + var r rune + for { + r = l.next() + if !isAlphaNumeric(r) { + l.backup() + break + } + } + if !l.atTerminator() { + return l.errorf("bad character %#U", r) + } + l.emit(typ) + return lexInsideAction +} + +// atTerminator reports whether the input is at valid termination character to +// appear after an identifier. Breaks .X.Y into two pieces. Also catches cases +// like "$x+2" not being acceptable without a space, in case we decide one +// day to implement arithmetic. +func (l *lexer) atTerminator() bool { + r := l.peek() + if isSpace(r) || isEndOfLine(r) { + return true + } + switch r { + case eof, '.', ',', '|', ':', ')', '(': + return true + } + // Does r start the delimiter? This can be ambiguous (with delim=="//", $x/2 will + // succeed but should fail) but only in extremely rare cases caused by willfully + // bad choice of delimiter. + if rd, _ := utf8.DecodeRuneInString(l.rightDelim); rd == r { + return true + } + return false +} + +// lexChar scans a character constant. The initial quote is already +// scanned. Syntax checking is done by the parser. +func lexChar(l *lexer) stateFn { +Loop: + for { + switch l.next() { + case '\\': + if r := l.next(); r != eof && r != '\n' { + break + } + fallthrough + case eof, '\n': + return l.errorf("unterminated character constant") + case '\'': + break Loop + } + } + l.emit(itemCharConstant) + return lexInsideAction +} + +// lexNumber scans a number: decimal, octal, hex, float, or imaginary. This +// isn't a perfect number scanner - for instance it accepts "." and "0x0.2" +// and "089" - but when it's wrong the input is invalid and the parser (via +// strconv) will notice. +func lexNumber(l *lexer) stateFn { + if !l.scanNumber() { + return l.errorf("bad number syntax: %q", l.input[l.start:l.pos]) + } + if sign := l.peek(); sign == '+' || sign == '-' { + // Complex: 1+2i. No spaces, must end in 'i'. + if !l.scanNumber() || l.input[l.pos-1] != 'i' { + return l.errorf("bad number syntax: %q", l.input[l.start:l.pos]) + } + l.emit(itemComplex) + } else { + l.emit(itemNumber) + } + return lexInsideAction +} + +func (l *lexer) scanNumber() bool { + // Optional leading sign. + l.accept("+-") + // Is it hex? + digits := "0123456789" + if l.accept("0") && l.accept("xX") { + digits = "0123456789abcdefABCDEF" + } + l.acceptRun(digits) + if l.accept(".") { + l.acceptRun(digits) + } + if l.accept("eE") { + l.accept("+-") + l.acceptRun("0123456789") + } + // Is it imaginary? + l.accept("i") + // Next thing mustn't be alphanumeric. + if isAlphaNumeric(l.peek()) { + l.next() + return false + } + return true +} + +// lexQuote scans a quoted string. +func lexQuote(l *lexer) stateFn { +Loop: + for { + switch l.next() { + case '\\': + if r := l.next(); r != eof && r != '\n' { + break + } + fallthrough + case eof, '\n': + return l.errorf("unterminated quoted string") + case '"': + break Loop + } + } + l.emit(itemString) + return lexInsideAction +} + +// lexRawQuote scans a raw quoted string. +func lexRawQuote(l *lexer) stateFn { +Loop: + for { + switch l.next() { + case eof, '\n': + return l.errorf("unterminated raw quoted string") + case '`': + break Loop + } + } + l.emit(itemRawString) + return lexInsideAction +} + +// isSpace reports whether r is a space character. +func isSpace(r rune) bool { + return r == ' ' || r == '\t' +} + +// isEndOfLine reports whether r is an end-of-line character. +func isEndOfLine(r rune) bool { + return r == '\r' || r == '\n' +} + +// isAlphaNumeric reports whether r is an alphabetic, digit, or underscore. +func isAlphaNumeric(r rune) bool { + return r == '_' || unicode.IsLetter(r) || unicode.IsDigit(r) +} diff --git a/vendor/github.com/alecthomas/template/parse/node.go b/vendor/github.com/alecthomas/template/parse/node.go new file mode 100644 index 0000000..55c37f6 --- /dev/null +++ b/vendor/github.com/alecthomas/template/parse/node.go @@ -0,0 +1,834 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Parse nodes. + +package parse + +import ( + "bytes" + "fmt" + "strconv" + "strings" +) + +var textFormat = "%s" // Changed to "%q" in tests for better error messages. + +// A Node is an element in the parse tree. The interface is trivial. +// The interface contains an unexported method so that only +// types local to this package can satisfy it. +type Node interface { + Type() NodeType + String() string + // Copy does a deep copy of the Node and all its components. + // To avoid type assertions, some XxxNodes also have specialized + // CopyXxx methods that return *XxxNode. + Copy() Node + Position() Pos // byte position of start of node in full original input string + // tree returns the containing *Tree. + // It is unexported so all implementations of Node are in this package. + tree() *Tree +} + +// NodeType identifies the type of a parse tree node. +type NodeType int + +// Pos represents a byte position in the original input text from which +// this template was parsed. +type Pos int + +func (p Pos) Position() Pos { + return p +} + +// Type returns itself and provides an easy default implementation +// for embedding in a Node. Embedded in all non-trivial Nodes. +func (t NodeType) Type() NodeType { + return t +} + +const ( + NodeText NodeType = iota // Plain text. + NodeAction // A non-control action such as a field evaluation. + NodeBool // A boolean constant. + NodeChain // A sequence of field accesses. + NodeCommand // An element of a pipeline. + NodeDot // The cursor, dot. + nodeElse // An else action. Not added to tree. + nodeEnd // An end action. Not added to tree. + NodeField // A field or method name. + NodeIdentifier // An identifier; always a function name. + NodeIf // An if action. + NodeList // A list of Nodes. + NodeNil // An untyped nil constant. + NodeNumber // A numerical constant. + NodePipe // A pipeline of commands. + NodeRange // A range action. + NodeString // A string constant. + NodeTemplate // A template invocation action. + NodeVariable // A $ variable. + NodeWith // A with action. +) + +// Nodes. + +// ListNode holds a sequence of nodes. +type ListNode struct { + NodeType + Pos + tr *Tree + Nodes []Node // The element nodes in lexical order. +} + +func (t *Tree) newList(pos Pos) *ListNode { + return &ListNode{tr: t, NodeType: NodeList, Pos: pos} +} + +func (l *ListNode) append(n Node) { + l.Nodes = append(l.Nodes, n) +} + +func (l *ListNode) tree() *Tree { + return l.tr +} + +func (l *ListNode) String() string { + b := new(bytes.Buffer) + for _, n := range l.Nodes { + fmt.Fprint(b, n) + } + return b.String() +} + +func (l *ListNode) CopyList() *ListNode { + if l == nil { + return l + } + n := l.tr.newList(l.Pos) + for _, elem := range l.Nodes { + n.append(elem.Copy()) + } + return n +} + +func (l *ListNode) Copy() Node { + return l.CopyList() +} + +// TextNode holds plain text. +type TextNode struct { + NodeType + Pos + tr *Tree + Text []byte // The text; may span newlines. +} + +func (t *Tree) newText(pos Pos, text string) *TextNode { + return &TextNode{tr: t, NodeType: NodeText, Pos: pos, Text: []byte(text)} +} + +func (t *TextNode) String() string { + return fmt.Sprintf(textFormat, t.Text) +} + +func (t *TextNode) tree() *Tree { + return t.tr +} + +func (t *TextNode) Copy() Node { + return &TextNode{tr: t.tr, NodeType: NodeText, Pos: t.Pos, Text: append([]byte{}, t.Text...)} +} + +// PipeNode holds a pipeline with optional declaration +type PipeNode struct { + NodeType + Pos + tr *Tree + Line int // The line number in the input (deprecated; kept for compatibility) + Decl []*VariableNode // Variable declarations in lexical order. + Cmds []*CommandNode // The commands in lexical order. +} + +func (t *Tree) newPipeline(pos Pos, line int, decl []*VariableNode) *PipeNode { + return &PipeNode{tr: t, NodeType: NodePipe, Pos: pos, Line: line, Decl: decl} +} + +func (p *PipeNode) append(command *CommandNode) { + p.Cmds = append(p.Cmds, command) +} + +func (p *PipeNode) String() string { + s := "" + if len(p.Decl) > 0 { + for i, v := range p.Decl { + if i > 0 { + s += ", " + } + s += v.String() + } + s += " := " + } + for i, c := range p.Cmds { + if i > 0 { + s += " | " + } + s += c.String() + } + return s +} + +func (p *PipeNode) tree() *Tree { + return p.tr +} + +func (p *PipeNode) CopyPipe() *PipeNode { + if p == nil { + return p + } + var decl []*VariableNode + for _, d := range p.Decl { + decl = append(decl, d.Copy().(*VariableNode)) + } + n := p.tr.newPipeline(p.Pos, p.Line, decl) + for _, c := range p.Cmds { + n.append(c.Copy().(*CommandNode)) + } + return n +} + +func (p *PipeNode) Copy() Node { + return p.CopyPipe() +} + +// ActionNode holds an action (something bounded by delimiters). +// Control actions have their own nodes; ActionNode represents simple +// ones such as field evaluations and parenthesized pipelines. +type ActionNode struct { + NodeType + Pos + tr *Tree + Line int // The line number in the input (deprecated; kept for compatibility) + Pipe *PipeNode // The pipeline in the action. +} + +func (t *Tree) newAction(pos Pos, line int, pipe *PipeNode) *ActionNode { + return &ActionNode{tr: t, NodeType: NodeAction, Pos: pos, Line: line, Pipe: pipe} +} + +func (a *ActionNode) String() string { + return fmt.Sprintf("{{%s}}", a.Pipe) + +} + +func (a *ActionNode) tree() *Tree { + return a.tr +} + +func (a *ActionNode) Copy() Node { + return a.tr.newAction(a.Pos, a.Line, a.Pipe.CopyPipe()) + +} + +// CommandNode holds a command (a pipeline inside an evaluating action). +type CommandNode struct { + NodeType + Pos + tr *Tree + Args []Node // Arguments in lexical order: Identifier, field, or constant. +} + +func (t *Tree) newCommand(pos Pos) *CommandNode { + return &CommandNode{tr: t, NodeType: NodeCommand, Pos: pos} +} + +func (c *CommandNode) append(arg Node) { + c.Args = append(c.Args, arg) +} + +func (c *CommandNode) String() string { + s := "" + for i, arg := range c.Args { + if i > 0 { + s += " " + } + if arg, ok := arg.(*PipeNode); ok { + s += "(" + arg.String() + ")" + continue + } + s += arg.String() + } + return s +} + +func (c *CommandNode) tree() *Tree { + return c.tr +} + +func (c *CommandNode) Copy() Node { + if c == nil { + return c + } + n := c.tr.newCommand(c.Pos) + for _, c := range c.Args { + n.append(c.Copy()) + } + return n +} + +// IdentifierNode holds an identifier. +type IdentifierNode struct { + NodeType + Pos + tr *Tree + Ident string // The identifier's name. +} + +// NewIdentifier returns a new IdentifierNode with the given identifier name. +func NewIdentifier(ident string) *IdentifierNode { + return &IdentifierNode{NodeType: NodeIdentifier, Ident: ident} +} + +// SetPos sets the position. NewIdentifier is a public method so we can't modify its signature. +// Chained for convenience. +// TODO: fix one day? +func (i *IdentifierNode) SetPos(pos Pos) *IdentifierNode { + i.Pos = pos + return i +} + +// SetTree sets the parent tree for the node. NewIdentifier is a public method so we can't modify its signature. +// Chained for convenience. +// TODO: fix one day? +func (i *IdentifierNode) SetTree(t *Tree) *IdentifierNode { + i.tr = t + return i +} + +func (i *IdentifierNode) String() string { + return i.Ident +} + +func (i *IdentifierNode) tree() *Tree { + return i.tr +} + +func (i *IdentifierNode) Copy() Node { + return NewIdentifier(i.Ident).SetTree(i.tr).SetPos(i.Pos) +} + +// VariableNode holds a list of variable names, possibly with chained field +// accesses. The dollar sign is part of the (first) name. +type VariableNode struct { + NodeType + Pos + tr *Tree + Ident []string // Variable name and fields in lexical order. +} + +func (t *Tree) newVariable(pos Pos, ident string) *VariableNode { + return &VariableNode{tr: t, NodeType: NodeVariable, Pos: pos, Ident: strings.Split(ident, ".")} +} + +func (v *VariableNode) String() string { + s := "" + for i, id := range v.Ident { + if i > 0 { + s += "." + } + s += id + } + return s +} + +func (v *VariableNode) tree() *Tree { + return v.tr +} + +func (v *VariableNode) Copy() Node { + return &VariableNode{tr: v.tr, NodeType: NodeVariable, Pos: v.Pos, Ident: append([]string{}, v.Ident...)} +} + +// DotNode holds the special identifier '.'. +type DotNode struct { + NodeType + Pos + tr *Tree +} + +func (t *Tree) newDot(pos Pos) *DotNode { + return &DotNode{tr: t, NodeType: NodeDot, Pos: pos} +} + +func (d *DotNode) Type() NodeType { + // Override method on embedded NodeType for API compatibility. + // TODO: Not really a problem; could change API without effect but + // api tool complains. + return NodeDot +} + +func (d *DotNode) String() string { + return "." +} + +func (d *DotNode) tree() *Tree { + return d.tr +} + +func (d *DotNode) Copy() Node { + return d.tr.newDot(d.Pos) +} + +// NilNode holds the special identifier 'nil' representing an untyped nil constant. +type NilNode struct { + NodeType + Pos + tr *Tree +} + +func (t *Tree) newNil(pos Pos) *NilNode { + return &NilNode{tr: t, NodeType: NodeNil, Pos: pos} +} + +func (n *NilNode) Type() NodeType { + // Override method on embedded NodeType for API compatibility. + // TODO: Not really a problem; could change API without effect but + // api tool complains. + return NodeNil +} + +func (n *NilNode) String() string { + return "nil" +} + +func (n *NilNode) tree() *Tree { + return n.tr +} + +func (n *NilNode) Copy() Node { + return n.tr.newNil(n.Pos) +} + +// FieldNode holds a field (identifier starting with '.'). +// The names may be chained ('.x.y'). +// The period is dropped from each ident. +type FieldNode struct { + NodeType + Pos + tr *Tree + Ident []string // The identifiers in lexical order. +} + +func (t *Tree) newField(pos Pos, ident string) *FieldNode { + return &FieldNode{tr: t, NodeType: NodeField, Pos: pos, Ident: strings.Split(ident[1:], ".")} // [1:] to drop leading period +} + +func (f *FieldNode) String() string { + s := "" + for _, id := range f.Ident { + s += "." + id + } + return s +} + +func (f *FieldNode) tree() *Tree { + return f.tr +} + +func (f *FieldNode) Copy() Node { + return &FieldNode{tr: f.tr, NodeType: NodeField, Pos: f.Pos, Ident: append([]string{}, f.Ident...)} +} + +// ChainNode holds a term followed by a chain of field accesses (identifier starting with '.'). +// The names may be chained ('.x.y'). +// The periods are dropped from each ident. +type ChainNode struct { + NodeType + Pos + tr *Tree + Node Node + Field []string // The identifiers in lexical order. +} + +func (t *Tree) newChain(pos Pos, node Node) *ChainNode { + return &ChainNode{tr: t, NodeType: NodeChain, Pos: pos, Node: node} +} + +// Add adds the named field (which should start with a period) to the end of the chain. +func (c *ChainNode) Add(field string) { + if len(field) == 0 || field[0] != '.' { + panic("no dot in field") + } + field = field[1:] // Remove leading dot. + if field == "" { + panic("empty field") + } + c.Field = append(c.Field, field) +} + +func (c *ChainNode) String() string { + s := c.Node.String() + if _, ok := c.Node.(*PipeNode); ok { + s = "(" + s + ")" + } + for _, field := range c.Field { + s += "." + field + } + return s +} + +func (c *ChainNode) tree() *Tree { + return c.tr +} + +func (c *ChainNode) Copy() Node { + return &ChainNode{tr: c.tr, NodeType: NodeChain, Pos: c.Pos, Node: c.Node, Field: append([]string{}, c.Field...)} +} + +// BoolNode holds a boolean constant. +type BoolNode struct { + NodeType + Pos + tr *Tree + True bool // The value of the boolean constant. +} + +func (t *Tree) newBool(pos Pos, true bool) *BoolNode { + return &BoolNode{tr: t, NodeType: NodeBool, Pos: pos, True: true} +} + +func (b *BoolNode) String() string { + if b.True { + return "true" + } + return "false" +} + +func (b *BoolNode) tree() *Tree { + return b.tr +} + +func (b *BoolNode) Copy() Node { + return b.tr.newBool(b.Pos, b.True) +} + +// NumberNode holds a number: signed or unsigned integer, float, or complex. +// The value is parsed and stored under all the types that can represent the value. +// This simulates in a small amount of code the behavior of Go's ideal constants. +type NumberNode struct { + NodeType + Pos + tr *Tree + IsInt bool // Number has an integral value. + IsUint bool // Number has an unsigned integral value. + IsFloat bool // Number has a floating-point value. + IsComplex bool // Number is complex. + Int64 int64 // The signed integer value. + Uint64 uint64 // The unsigned integer value. + Float64 float64 // The floating-point value. + Complex128 complex128 // The complex value. + Text string // The original textual representation from the input. +} + +func (t *Tree) newNumber(pos Pos, text string, typ itemType) (*NumberNode, error) { + n := &NumberNode{tr: t, NodeType: NodeNumber, Pos: pos, Text: text} + switch typ { + case itemCharConstant: + rune, _, tail, err := strconv.UnquoteChar(text[1:], text[0]) + if err != nil { + return nil, err + } + if tail != "'" { + return nil, fmt.Errorf("malformed character constant: %s", text) + } + n.Int64 = int64(rune) + n.IsInt = true + n.Uint64 = uint64(rune) + n.IsUint = true + n.Float64 = float64(rune) // odd but those are the rules. + n.IsFloat = true + return n, nil + case itemComplex: + // fmt.Sscan can parse the pair, so let it do the work. + if _, err := fmt.Sscan(text, &n.Complex128); err != nil { + return nil, err + } + n.IsComplex = true + n.simplifyComplex() + return n, nil + } + // Imaginary constants can only be complex unless they are zero. + if len(text) > 0 && text[len(text)-1] == 'i' { + f, err := strconv.ParseFloat(text[:len(text)-1], 64) + if err == nil { + n.IsComplex = true + n.Complex128 = complex(0, f) + n.simplifyComplex() + return n, nil + } + } + // Do integer test first so we get 0x123 etc. + u, err := strconv.ParseUint(text, 0, 64) // will fail for -0; fixed below. + if err == nil { + n.IsUint = true + n.Uint64 = u + } + i, err := strconv.ParseInt(text, 0, 64) + if err == nil { + n.IsInt = true + n.Int64 = i + if i == 0 { + n.IsUint = true // in case of -0. + n.Uint64 = u + } + } + // If an integer extraction succeeded, promote the float. + if n.IsInt { + n.IsFloat = true + n.Float64 = float64(n.Int64) + } else if n.IsUint { + n.IsFloat = true + n.Float64 = float64(n.Uint64) + } else { + f, err := strconv.ParseFloat(text, 64) + if err == nil { + n.IsFloat = true + n.Float64 = f + // If a floating-point extraction succeeded, extract the int if needed. + if !n.IsInt && float64(int64(f)) == f { + n.IsInt = true + n.Int64 = int64(f) + } + if !n.IsUint && float64(uint64(f)) == f { + n.IsUint = true + n.Uint64 = uint64(f) + } + } + } + if !n.IsInt && !n.IsUint && !n.IsFloat { + return nil, fmt.Errorf("illegal number syntax: %q", text) + } + return n, nil +} + +// simplifyComplex pulls out any other types that are represented by the complex number. +// These all require that the imaginary part be zero. +func (n *NumberNode) simplifyComplex() { + n.IsFloat = imag(n.Complex128) == 0 + if n.IsFloat { + n.Float64 = real(n.Complex128) + n.IsInt = float64(int64(n.Float64)) == n.Float64 + if n.IsInt { + n.Int64 = int64(n.Float64) + } + n.IsUint = float64(uint64(n.Float64)) == n.Float64 + if n.IsUint { + n.Uint64 = uint64(n.Float64) + } + } +} + +func (n *NumberNode) String() string { + return n.Text +} + +func (n *NumberNode) tree() *Tree { + return n.tr +} + +func (n *NumberNode) Copy() Node { + nn := new(NumberNode) + *nn = *n // Easy, fast, correct. + return nn +} + +// StringNode holds a string constant. The value has been "unquoted". +type StringNode struct { + NodeType + Pos + tr *Tree + Quoted string // The original text of the string, with quotes. + Text string // The string, after quote processing. +} + +func (t *Tree) newString(pos Pos, orig, text string) *StringNode { + return &StringNode{tr: t, NodeType: NodeString, Pos: pos, Quoted: orig, Text: text} +} + +func (s *StringNode) String() string { + return s.Quoted +} + +func (s *StringNode) tree() *Tree { + return s.tr +} + +func (s *StringNode) Copy() Node { + return s.tr.newString(s.Pos, s.Quoted, s.Text) +} + +// endNode represents an {{end}} action. +// It does not appear in the final parse tree. +type endNode struct { + NodeType + Pos + tr *Tree +} + +func (t *Tree) newEnd(pos Pos) *endNode { + return &endNode{tr: t, NodeType: nodeEnd, Pos: pos} +} + +func (e *endNode) String() string { + return "{{end}}" +} + +func (e *endNode) tree() *Tree { + return e.tr +} + +func (e *endNode) Copy() Node { + return e.tr.newEnd(e.Pos) +} + +// elseNode represents an {{else}} action. Does not appear in the final tree. +type elseNode struct { + NodeType + Pos + tr *Tree + Line int // The line number in the input (deprecated; kept for compatibility) +} + +func (t *Tree) newElse(pos Pos, line int) *elseNode { + return &elseNode{tr: t, NodeType: nodeElse, Pos: pos, Line: line} +} + +func (e *elseNode) Type() NodeType { + return nodeElse +} + +func (e *elseNode) String() string { + return "{{else}}" +} + +func (e *elseNode) tree() *Tree { + return e.tr +} + +func (e *elseNode) Copy() Node { + return e.tr.newElse(e.Pos, e.Line) +} + +// BranchNode is the common representation of if, range, and with. +type BranchNode struct { + NodeType + Pos + tr *Tree + Line int // The line number in the input (deprecated; kept for compatibility) + Pipe *PipeNode // The pipeline to be evaluated. + List *ListNode // What to execute if the value is non-empty. + ElseList *ListNode // What to execute if the value is empty (nil if absent). +} + +func (b *BranchNode) String() string { + name := "" + switch b.NodeType { + case NodeIf: + name = "if" + case NodeRange: + name = "range" + case NodeWith: + name = "with" + default: + panic("unknown branch type") + } + if b.ElseList != nil { + return fmt.Sprintf("{{%s %s}}%s{{else}}%s{{end}}", name, b.Pipe, b.List, b.ElseList) + } + return fmt.Sprintf("{{%s %s}}%s{{end}}", name, b.Pipe, b.List) +} + +func (b *BranchNode) tree() *Tree { + return b.tr +} + +func (b *BranchNode) Copy() Node { + switch b.NodeType { + case NodeIf: + return b.tr.newIf(b.Pos, b.Line, b.Pipe, b.List, b.ElseList) + case NodeRange: + return b.tr.newRange(b.Pos, b.Line, b.Pipe, b.List, b.ElseList) + case NodeWith: + return b.tr.newWith(b.Pos, b.Line, b.Pipe, b.List, b.ElseList) + default: + panic("unknown branch type") + } +} + +// IfNode represents an {{if}} action and its commands. +type IfNode struct { + BranchNode +} + +func (t *Tree) newIf(pos Pos, line int, pipe *PipeNode, list, elseList *ListNode) *IfNode { + return &IfNode{BranchNode{tr: t, NodeType: NodeIf, Pos: pos, Line: line, Pipe: pipe, List: list, ElseList: elseList}} +} + +func (i *IfNode) Copy() Node { + return i.tr.newIf(i.Pos, i.Line, i.Pipe.CopyPipe(), i.List.CopyList(), i.ElseList.CopyList()) +} + +// RangeNode represents a {{range}} action and its commands. +type RangeNode struct { + BranchNode +} + +func (t *Tree) newRange(pos Pos, line int, pipe *PipeNode, list, elseList *ListNode) *RangeNode { + return &RangeNode{BranchNode{tr: t, NodeType: NodeRange, Pos: pos, Line: line, Pipe: pipe, List: list, ElseList: elseList}} +} + +func (r *RangeNode) Copy() Node { + return r.tr.newRange(r.Pos, r.Line, r.Pipe.CopyPipe(), r.List.CopyList(), r.ElseList.CopyList()) +} + +// WithNode represents a {{with}} action and its commands. +type WithNode struct { + BranchNode +} + +func (t *Tree) newWith(pos Pos, line int, pipe *PipeNode, list, elseList *ListNode) *WithNode { + return &WithNode{BranchNode{tr: t, NodeType: NodeWith, Pos: pos, Line: line, Pipe: pipe, List: list, ElseList: elseList}} +} + +func (w *WithNode) Copy() Node { + return w.tr.newWith(w.Pos, w.Line, w.Pipe.CopyPipe(), w.List.CopyList(), w.ElseList.CopyList()) +} + +// TemplateNode represents a {{template}} action. +type TemplateNode struct { + NodeType + Pos + tr *Tree + Line int // The line number in the input (deprecated; kept for compatibility) + Name string // The name of the template (unquoted). + Pipe *PipeNode // The command to evaluate as dot for the template. +} + +func (t *Tree) newTemplate(pos Pos, line int, name string, pipe *PipeNode) *TemplateNode { + return &TemplateNode{tr: t, NodeType: NodeTemplate, Pos: pos, Line: line, Name: name, Pipe: pipe} +} + +func (t *TemplateNode) String() string { + if t.Pipe == nil { + return fmt.Sprintf("{{template %q}}", t.Name) + } + return fmt.Sprintf("{{template %q %s}}", t.Name, t.Pipe) +} + +func (t *TemplateNode) tree() *Tree { + return t.tr +} + +func (t *TemplateNode) Copy() Node { + return t.tr.newTemplate(t.Pos, t.Line, t.Name, t.Pipe.CopyPipe()) +} diff --git a/vendor/github.com/alecthomas/template/parse/parse.go b/vendor/github.com/alecthomas/template/parse/parse.go new file mode 100644 index 0000000..0d77ade --- /dev/null +++ b/vendor/github.com/alecthomas/template/parse/parse.go @@ -0,0 +1,700 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package parse builds parse trees for templates as defined by text/template +// and html/template. Clients should use those packages to construct templates +// rather than this one, which provides shared internal data structures not +// intended for general use. +package parse + +import ( + "bytes" + "fmt" + "runtime" + "strconv" + "strings" +) + +// Tree is the representation of a single parsed template. +type Tree struct { + Name string // name of the template represented by the tree. + ParseName string // name of the top-level template during parsing, for error messages. + Root *ListNode // top-level root of the tree. + text string // text parsed to create the template (or its parent) + // Parsing only; cleared after parse. + funcs []map[string]interface{} + lex *lexer + token [3]item // three-token lookahead for parser. + peekCount int + vars []string // variables defined at the moment. +} + +// Copy returns a copy of the Tree. Any parsing state is discarded. +func (t *Tree) Copy() *Tree { + if t == nil { + return nil + } + return &Tree{ + Name: t.Name, + ParseName: t.ParseName, + Root: t.Root.CopyList(), + text: t.text, + } +} + +// Parse returns a map from template name to parse.Tree, created by parsing the +// templates described in the argument string. The top-level template will be +// given the specified name. If an error is encountered, parsing stops and an +// empty map is returned with the error. +func Parse(name, text, leftDelim, rightDelim string, funcs ...map[string]interface{}) (treeSet map[string]*Tree, err error) { + treeSet = make(map[string]*Tree) + t := New(name) + t.text = text + _, err = t.Parse(text, leftDelim, rightDelim, treeSet, funcs...) + return +} + +// next returns the next token. +func (t *Tree) next() item { + if t.peekCount > 0 { + t.peekCount-- + } else { + t.token[0] = t.lex.nextItem() + } + return t.token[t.peekCount] +} + +// backup backs the input stream up one token. +func (t *Tree) backup() { + t.peekCount++ +} + +// backup2 backs the input stream up two tokens. +// The zeroth token is already there. +func (t *Tree) backup2(t1 item) { + t.token[1] = t1 + t.peekCount = 2 +} + +// backup3 backs the input stream up three tokens +// The zeroth token is already there. +func (t *Tree) backup3(t2, t1 item) { // Reverse order: we're pushing back. + t.token[1] = t1 + t.token[2] = t2 + t.peekCount = 3 +} + +// peek returns but does not consume the next token. +func (t *Tree) peek() item { + if t.peekCount > 0 { + return t.token[t.peekCount-1] + } + t.peekCount = 1 + t.token[0] = t.lex.nextItem() + return t.token[0] +} + +// nextNonSpace returns the next non-space token. +func (t *Tree) nextNonSpace() (token item) { + for { + token = t.next() + if token.typ != itemSpace { + break + } + } + return token +} + +// peekNonSpace returns but does not consume the next non-space token. +func (t *Tree) peekNonSpace() (token item) { + for { + token = t.next() + if token.typ != itemSpace { + break + } + } + t.backup() + return token +} + +// Parsing. + +// New allocates a new parse tree with the given name. +func New(name string, funcs ...map[string]interface{}) *Tree { + return &Tree{ + Name: name, + funcs: funcs, + } +} + +// ErrorContext returns a textual representation of the location of the node in the input text. +// The receiver is only used when the node does not have a pointer to the tree inside, +// which can occur in old code. +func (t *Tree) ErrorContext(n Node) (location, context string) { + pos := int(n.Position()) + tree := n.tree() + if tree == nil { + tree = t + } + text := tree.text[:pos] + byteNum := strings.LastIndex(text, "\n") + if byteNum == -1 { + byteNum = pos // On first line. + } else { + byteNum++ // After the newline. + byteNum = pos - byteNum + } + lineNum := 1 + strings.Count(text, "\n") + context = n.String() + if len(context) > 20 { + context = fmt.Sprintf("%.20s...", context) + } + return fmt.Sprintf("%s:%d:%d", tree.ParseName, lineNum, byteNum), context +} + +// errorf formats the error and terminates processing. +func (t *Tree) errorf(format string, args ...interface{}) { + t.Root = nil + format = fmt.Sprintf("template: %s:%d: %s", t.ParseName, t.lex.lineNumber(), format) + panic(fmt.Errorf(format, args...)) +} + +// error terminates processing. +func (t *Tree) error(err error) { + t.errorf("%s", err) +} + +// expect consumes the next token and guarantees it has the required type. +func (t *Tree) expect(expected itemType, context string) item { + token := t.nextNonSpace() + if token.typ != expected { + t.unexpected(token, context) + } + return token +} + +// expectOneOf consumes the next token and guarantees it has one of the required types. +func (t *Tree) expectOneOf(expected1, expected2 itemType, context string) item { + token := t.nextNonSpace() + if token.typ != expected1 && token.typ != expected2 { + t.unexpected(token, context) + } + return token +} + +// unexpected complains about the token and terminates processing. +func (t *Tree) unexpected(token item, context string) { + t.errorf("unexpected %s in %s", token, context) +} + +// recover is the handler that turns panics into returns from the top level of Parse. +func (t *Tree) recover(errp *error) { + e := recover() + if e != nil { + if _, ok := e.(runtime.Error); ok { + panic(e) + } + if t != nil { + t.stopParse() + } + *errp = e.(error) + } + return +} + +// startParse initializes the parser, using the lexer. +func (t *Tree) startParse(funcs []map[string]interface{}, lex *lexer) { + t.Root = nil + t.lex = lex + t.vars = []string{"$"} + t.funcs = funcs +} + +// stopParse terminates parsing. +func (t *Tree) stopParse() { + t.lex = nil + t.vars = nil + t.funcs = nil +} + +// Parse parses the template definition string to construct a representation of +// the template for execution. If either action delimiter string is empty, the +// default ("{{" or "}}") is used. Embedded template definitions are added to +// the treeSet map. +func (t *Tree) Parse(text, leftDelim, rightDelim string, treeSet map[string]*Tree, funcs ...map[string]interface{}) (tree *Tree, err error) { + defer t.recover(&err) + t.ParseName = t.Name + t.startParse(funcs, lex(t.Name, text, leftDelim, rightDelim)) + t.text = text + t.parse(treeSet) + t.add(treeSet) + t.stopParse() + return t, nil +} + +// add adds tree to the treeSet. +func (t *Tree) add(treeSet map[string]*Tree) { + tree := treeSet[t.Name] + if tree == nil || IsEmptyTree(tree.Root) { + treeSet[t.Name] = t + return + } + if !IsEmptyTree(t.Root) { + t.errorf("template: multiple definition of template %q", t.Name) + } +} + +// IsEmptyTree reports whether this tree (node) is empty of everything but space. +func IsEmptyTree(n Node) bool { + switch n := n.(type) { + case nil: + return true + case *ActionNode: + case *IfNode: + case *ListNode: + for _, node := range n.Nodes { + if !IsEmptyTree(node) { + return false + } + } + return true + case *RangeNode: + case *TemplateNode: + case *TextNode: + return len(bytes.TrimSpace(n.Text)) == 0 + case *WithNode: + default: + panic("unknown node: " + n.String()) + } + return false +} + +// parse is the top-level parser for a template, essentially the same +// as itemList except it also parses {{define}} actions. +// It runs to EOF. +func (t *Tree) parse(treeSet map[string]*Tree) (next Node) { + t.Root = t.newList(t.peek().pos) + for t.peek().typ != itemEOF { + if t.peek().typ == itemLeftDelim { + delim := t.next() + if t.nextNonSpace().typ == itemDefine { + newT := New("definition") // name will be updated once we know it. + newT.text = t.text + newT.ParseName = t.ParseName + newT.startParse(t.funcs, t.lex) + newT.parseDefinition(treeSet) + continue + } + t.backup2(delim) + } + n := t.textOrAction() + if n.Type() == nodeEnd { + t.errorf("unexpected %s", n) + } + t.Root.append(n) + } + return nil +} + +// parseDefinition parses a {{define}} ... {{end}} template definition and +// installs the definition in the treeSet map. The "define" keyword has already +// been scanned. +func (t *Tree) parseDefinition(treeSet map[string]*Tree) { + const context = "define clause" + name := t.expectOneOf(itemString, itemRawString, context) + var err error + t.Name, err = strconv.Unquote(name.val) + if err != nil { + t.error(err) + } + t.expect(itemRightDelim, context) + var end Node + t.Root, end = t.itemList() + if end.Type() != nodeEnd { + t.errorf("unexpected %s in %s", end, context) + } + t.add(treeSet) + t.stopParse() +} + +// itemList: +// textOrAction* +// Terminates at {{end}} or {{else}}, returned separately. +func (t *Tree) itemList() (list *ListNode, next Node) { + list = t.newList(t.peekNonSpace().pos) + for t.peekNonSpace().typ != itemEOF { + n := t.textOrAction() + switch n.Type() { + case nodeEnd, nodeElse: + return list, n + } + list.append(n) + } + t.errorf("unexpected EOF") + return +} + +// textOrAction: +// text | action +func (t *Tree) textOrAction() Node { + switch token := t.nextNonSpace(); token.typ { + case itemElideNewline: + return t.elideNewline() + case itemText: + return t.newText(token.pos, token.val) + case itemLeftDelim: + return t.action() + default: + t.unexpected(token, "input") + } + return nil +} + +// elideNewline: +// Remove newlines trailing rightDelim if \\ is present. +func (t *Tree) elideNewline() Node { + token := t.peek() + if token.typ != itemText { + t.unexpected(token, "input") + return nil + } + + t.next() + stripped := strings.TrimLeft(token.val, "\n\r") + diff := len(token.val) - len(stripped) + if diff > 0 { + // This is a bit nasty. We mutate the token in-place to remove + // preceding newlines. + token.pos += Pos(diff) + token.val = stripped + } + return t.newText(token.pos, token.val) +} + +// Action: +// control +// command ("|" command)* +// Left delim is past. Now get actions. +// First word could be a keyword such as range. +func (t *Tree) action() (n Node) { + switch token := t.nextNonSpace(); token.typ { + case itemElse: + return t.elseControl() + case itemEnd: + return t.endControl() + case itemIf: + return t.ifControl() + case itemRange: + return t.rangeControl() + case itemTemplate: + return t.templateControl() + case itemWith: + return t.withControl() + } + t.backup() + // Do not pop variables; they persist until "end". + return t.newAction(t.peek().pos, t.lex.lineNumber(), t.pipeline("command")) +} + +// Pipeline: +// declarations? command ('|' command)* +func (t *Tree) pipeline(context string) (pipe *PipeNode) { + var decl []*VariableNode + pos := t.peekNonSpace().pos + // Are there declarations? + for { + if v := t.peekNonSpace(); v.typ == itemVariable { + t.next() + // Since space is a token, we need 3-token look-ahead here in the worst case: + // in "$x foo" we need to read "foo" (as opposed to ":=") to know that $x is an + // argument variable rather than a declaration. So remember the token + // adjacent to the variable so we can push it back if necessary. + tokenAfterVariable := t.peek() + if next := t.peekNonSpace(); next.typ == itemColonEquals || (next.typ == itemChar && next.val == ",") { + t.nextNonSpace() + variable := t.newVariable(v.pos, v.val) + decl = append(decl, variable) + t.vars = append(t.vars, v.val) + if next.typ == itemChar && next.val == "," { + if context == "range" && len(decl) < 2 { + continue + } + t.errorf("too many declarations in %s", context) + } + } else if tokenAfterVariable.typ == itemSpace { + t.backup3(v, tokenAfterVariable) + } else { + t.backup2(v) + } + } + break + } + pipe = t.newPipeline(pos, t.lex.lineNumber(), decl) + for { + switch token := t.nextNonSpace(); token.typ { + case itemRightDelim, itemRightParen: + if len(pipe.Cmds) == 0 { + t.errorf("missing value for %s", context) + } + if token.typ == itemRightParen { + t.backup() + } + return + case itemBool, itemCharConstant, itemComplex, itemDot, itemField, itemIdentifier, + itemNumber, itemNil, itemRawString, itemString, itemVariable, itemLeftParen: + t.backup() + pipe.append(t.command()) + default: + t.unexpected(token, context) + } + } +} + +func (t *Tree) parseControl(allowElseIf bool, context string) (pos Pos, line int, pipe *PipeNode, list, elseList *ListNode) { + defer t.popVars(len(t.vars)) + line = t.lex.lineNumber() + pipe = t.pipeline(context) + var next Node + list, next = t.itemList() + switch next.Type() { + case nodeEnd: //done + case nodeElse: + if allowElseIf { + // Special case for "else if". If the "else" is followed immediately by an "if", + // the elseControl will have left the "if" token pending. Treat + // {{if a}}_{{else if b}}_{{end}} + // as + // {{if a}}_{{else}}{{if b}}_{{end}}{{end}}. + // To do this, parse the if as usual and stop at it {{end}}; the subsequent{{end}} + // is assumed. This technique works even for long if-else-if chains. + // TODO: Should we allow else-if in with and range? + if t.peek().typ == itemIf { + t.next() // Consume the "if" token. + elseList = t.newList(next.Position()) + elseList.append(t.ifControl()) + // Do not consume the next item - only one {{end}} required. + break + } + } + elseList, next = t.itemList() + if next.Type() != nodeEnd { + t.errorf("expected end; found %s", next) + } + } + return pipe.Position(), line, pipe, list, elseList +} + +// If: +// {{if pipeline}} itemList {{end}} +// {{if pipeline}} itemList {{else}} itemList {{end}} +// If keyword is past. +func (t *Tree) ifControl() Node { + return t.newIf(t.parseControl(true, "if")) +} + +// Range: +// {{range pipeline}} itemList {{end}} +// {{range pipeline}} itemList {{else}} itemList {{end}} +// Range keyword is past. +func (t *Tree) rangeControl() Node { + return t.newRange(t.parseControl(false, "range")) +} + +// With: +// {{with pipeline}} itemList {{end}} +// {{with pipeline}} itemList {{else}} itemList {{end}} +// If keyword is past. +func (t *Tree) withControl() Node { + return t.newWith(t.parseControl(false, "with")) +} + +// End: +// {{end}} +// End keyword is past. +func (t *Tree) endControl() Node { + return t.newEnd(t.expect(itemRightDelim, "end").pos) +} + +// Else: +// {{else}} +// Else keyword is past. +func (t *Tree) elseControl() Node { + // Special case for "else if". + peek := t.peekNonSpace() + if peek.typ == itemIf { + // We see "{{else if ... " but in effect rewrite it to {{else}}{{if ... ". + return t.newElse(peek.pos, t.lex.lineNumber()) + } + return t.newElse(t.expect(itemRightDelim, "else").pos, t.lex.lineNumber()) +} + +// Template: +// {{template stringValue pipeline}} +// Template keyword is past. The name must be something that can evaluate +// to a string. +func (t *Tree) templateControl() Node { + var name string + token := t.nextNonSpace() + switch token.typ { + case itemString, itemRawString: + s, err := strconv.Unquote(token.val) + if err != nil { + t.error(err) + } + name = s + default: + t.unexpected(token, "template invocation") + } + var pipe *PipeNode + if t.nextNonSpace().typ != itemRightDelim { + t.backup() + // Do not pop variables; they persist until "end". + pipe = t.pipeline("template") + } + return t.newTemplate(token.pos, t.lex.lineNumber(), name, pipe) +} + +// command: +// operand (space operand)* +// space-separated arguments up to a pipeline character or right delimiter. +// we consume the pipe character but leave the right delim to terminate the action. +func (t *Tree) command() *CommandNode { + cmd := t.newCommand(t.peekNonSpace().pos) + for { + t.peekNonSpace() // skip leading spaces. + operand := t.operand() + if operand != nil { + cmd.append(operand) + } + switch token := t.next(); token.typ { + case itemSpace: + continue + case itemError: + t.errorf("%s", token.val) + case itemRightDelim, itemRightParen: + t.backup() + case itemPipe: + default: + t.errorf("unexpected %s in operand; missing space?", token) + } + break + } + if len(cmd.Args) == 0 { + t.errorf("empty command") + } + return cmd +} + +// operand: +// term .Field* +// An operand is a space-separated component of a command, +// a term possibly followed by field accesses. +// A nil return means the next item is not an operand. +func (t *Tree) operand() Node { + node := t.term() + if node == nil { + return nil + } + if t.peek().typ == itemField { + chain := t.newChain(t.peek().pos, node) + for t.peek().typ == itemField { + chain.Add(t.next().val) + } + // Compatibility with original API: If the term is of type NodeField + // or NodeVariable, just put more fields on the original. + // Otherwise, keep the Chain node. + // TODO: Switch to Chains always when we can. + switch node.Type() { + case NodeField: + node = t.newField(chain.Position(), chain.String()) + case NodeVariable: + node = t.newVariable(chain.Position(), chain.String()) + default: + node = chain + } + } + return node +} + +// term: +// literal (number, string, nil, boolean) +// function (identifier) +// . +// .Field +// $ +// '(' pipeline ')' +// A term is a simple "expression". +// A nil return means the next item is not a term. +func (t *Tree) term() Node { + switch token := t.nextNonSpace(); token.typ { + case itemError: + t.errorf("%s", token.val) + case itemIdentifier: + if !t.hasFunction(token.val) { + t.errorf("function %q not defined", token.val) + } + return NewIdentifier(token.val).SetTree(t).SetPos(token.pos) + case itemDot: + return t.newDot(token.pos) + case itemNil: + return t.newNil(token.pos) + case itemVariable: + return t.useVar(token.pos, token.val) + case itemField: + return t.newField(token.pos, token.val) + case itemBool: + return t.newBool(token.pos, token.val == "true") + case itemCharConstant, itemComplex, itemNumber: + number, err := t.newNumber(token.pos, token.val, token.typ) + if err != nil { + t.error(err) + } + return number + case itemLeftParen: + pipe := t.pipeline("parenthesized pipeline") + if token := t.next(); token.typ != itemRightParen { + t.errorf("unclosed right paren: unexpected %s", token) + } + return pipe + case itemString, itemRawString: + s, err := strconv.Unquote(token.val) + if err != nil { + t.error(err) + } + return t.newString(token.pos, token.val, s) + } + t.backup() + return nil +} + +// hasFunction reports if a function name exists in the Tree's maps. +func (t *Tree) hasFunction(name string) bool { + for _, funcMap := range t.funcs { + if funcMap == nil { + continue + } + if funcMap[name] != nil { + return true + } + } + return false +} + +// popVars trims the variable list to the specified length +func (t *Tree) popVars(n int) { + t.vars = t.vars[:n] +} + +// useVar returns a node for a variable reference. It errors if the +// variable is not defined. +func (t *Tree) useVar(pos Pos, name string) Node { + v := t.newVariable(pos, name) + for _, varName := range t.vars { + if varName == v.Ident[0] { + return v + } + } + t.errorf("undefined variable %q", v.Ident[0]) + return nil +} diff --git a/vendor/github.com/alecthomas/template/template.go b/vendor/github.com/alecthomas/template/template.go new file mode 100644 index 0000000..447ed2a --- /dev/null +++ b/vendor/github.com/alecthomas/template/template.go @@ -0,0 +1,218 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package template + +import ( + "fmt" + "reflect" + + "github.com/alecthomas/template/parse" +) + +// common holds the information shared by related templates. +type common struct { + tmpl map[string]*Template + // We use two maps, one for parsing and one for execution. + // This separation makes the API cleaner since it doesn't + // expose reflection to the client. + parseFuncs FuncMap + execFuncs map[string]reflect.Value +} + +// Template is the representation of a parsed template. The *parse.Tree +// field is exported only for use by html/template and should be treated +// as unexported by all other clients. +type Template struct { + name string + *parse.Tree + *common + leftDelim string + rightDelim string +} + +// New allocates a new template with the given name. +func New(name string) *Template { + return &Template{ + name: name, + } +} + +// Name returns the name of the template. +func (t *Template) Name() string { + return t.name +} + +// New allocates a new template associated with the given one and with the same +// delimiters. The association, which is transitive, allows one template to +// invoke another with a {{template}} action. +func (t *Template) New(name string) *Template { + t.init() + return &Template{ + name: name, + common: t.common, + leftDelim: t.leftDelim, + rightDelim: t.rightDelim, + } +} + +func (t *Template) init() { + if t.common == nil { + t.common = new(common) + t.tmpl = make(map[string]*Template) + t.parseFuncs = make(FuncMap) + t.execFuncs = make(map[string]reflect.Value) + } +} + +// Clone returns a duplicate of the template, including all associated +// templates. The actual representation is not copied, but the name space of +// associated templates is, so further calls to Parse in the copy will add +// templates to the copy but not to the original. Clone can be used to prepare +// common templates and use them with variant definitions for other templates +// by adding the variants after the clone is made. +func (t *Template) Clone() (*Template, error) { + nt := t.copy(nil) + nt.init() + nt.tmpl[t.name] = nt + for k, v := range t.tmpl { + if k == t.name { // Already installed. + continue + } + // The associated templates share nt's common structure. + tmpl := v.copy(nt.common) + nt.tmpl[k] = tmpl + } + for k, v := range t.parseFuncs { + nt.parseFuncs[k] = v + } + for k, v := range t.execFuncs { + nt.execFuncs[k] = v + } + return nt, nil +} + +// copy returns a shallow copy of t, with common set to the argument. +func (t *Template) copy(c *common) *Template { + nt := New(t.name) + nt.Tree = t.Tree + nt.common = c + nt.leftDelim = t.leftDelim + nt.rightDelim = t.rightDelim + return nt +} + +// AddParseTree creates a new template with the name and parse tree +// and associates it with t. +func (t *Template) AddParseTree(name string, tree *parse.Tree) (*Template, error) { + if t.common != nil && t.tmpl[name] != nil { + return nil, fmt.Errorf("template: redefinition of template %q", name) + } + nt := t.New(name) + nt.Tree = tree + t.tmpl[name] = nt + return nt, nil +} + +// Templates returns a slice of the templates associated with t, including t +// itself. +func (t *Template) Templates() []*Template { + if t.common == nil { + return nil + } + // Return a slice so we don't expose the map. + m := make([]*Template, 0, len(t.tmpl)) + for _, v := range t.tmpl { + m = append(m, v) + } + return m +} + +// Delims sets the action delimiters to the specified strings, to be used in +// subsequent calls to Parse, ParseFiles, or ParseGlob. Nested template +// definitions will inherit the settings. An empty delimiter stands for the +// corresponding default: {{ or }}. +// The return value is the template, so calls can be chained. +func (t *Template) Delims(left, right string) *Template { + t.leftDelim = left + t.rightDelim = right + return t +} + +// Funcs adds the elements of the argument map to the template's function map. +// It panics if a value in the map is not a function with appropriate return +// type. However, it is legal to overwrite elements of the map. The return +// value is the template, so calls can be chained. +func (t *Template) Funcs(funcMap FuncMap) *Template { + t.init() + addValueFuncs(t.execFuncs, funcMap) + addFuncs(t.parseFuncs, funcMap) + return t +} + +// Lookup returns the template with the given name that is associated with t, +// or nil if there is no such template. +func (t *Template) Lookup(name string) *Template { + if t.common == nil { + return nil + } + return t.tmpl[name] +} + +// Parse parses a string into a template. Nested template definitions will be +// associated with the top-level template t. Parse may be called multiple times +// to parse definitions of templates to associate with t. It is an error if a +// resulting template is non-empty (contains content other than template +// definitions) and would replace a non-empty template with the same name. +// (In multiple calls to Parse with the same receiver template, only one call +// can contain text other than space, comments, and template definitions.) +func (t *Template) Parse(text string) (*Template, error) { + t.init() + trees, err := parse.Parse(t.name, text, t.leftDelim, t.rightDelim, t.parseFuncs, builtins) + if err != nil { + return nil, err + } + // Add the newly parsed trees, including the one for t, into our common structure. + for name, tree := range trees { + // If the name we parsed is the name of this template, overwrite this template. + // The associate method checks it's not a redefinition. + tmpl := t + if name != t.name { + tmpl = t.New(name) + } + // Even if t == tmpl, we need to install it in the common.tmpl map. + if replace, err := t.associate(tmpl, tree); err != nil { + return nil, err + } else if replace { + tmpl.Tree = tree + } + tmpl.leftDelim = t.leftDelim + tmpl.rightDelim = t.rightDelim + } + return t, nil +} + +// associate installs the new template into the group of templates associated +// with t. It is an error to reuse a name except to overwrite an empty +// template. The two are already known to share the common structure. +// The boolean return value reports wither to store this tree as t.Tree. +func (t *Template) associate(new *Template, tree *parse.Tree) (bool, error) { + if new.common != t.common { + panic("internal error: associate not common") + } + name := new.name + if old := t.tmpl[name]; old != nil { + oldIsEmpty := parse.IsEmptyTree(old.Root) + newIsEmpty := parse.IsEmptyTree(tree.Root) + if newIsEmpty { + // Whether old is empty or not, new is empty; no reason to replace old. + return false, nil + } + if !oldIsEmpty { + return false, fmt.Errorf("template: redefinition of template %q", name) + } + } + t.tmpl[name] = new + return true, nil +} diff --git a/vendor/github.com/alecthomas/units/COPYING b/vendor/github.com/alecthomas/units/COPYING new file mode 100644 index 0000000..2993ec0 --- /dev/null +++ b/vendor/github.com/alecthomas/units/COPYING @@ -0,0 +1,19 @@ +Copyright (C) 2014 Alec Thomas + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/alecthomas/units/README.md b/vendor/github.com/alecthomas/units/README.md new file mode 100644 index 0000000..bee884e --- /dev/null +++ b/vendor/github.com/alecthomas/units/README.md @@ -0,0 +1,11 @@ +# Units - Helpful unit multipliers and functions for Go + +The goal of this package is to have functionality similar to the [time](http://golang.org/pkg/time/) package. + +It allows for code like this: + +```go +n, err := ParseBase2Bytes("1KB") +// n == 1024 +n = units.Mebibyte * 512 +``` diff --git a/vendor/github.com/alecthomas/units/bytes.go b/vendor/github.com/alecthomas/units/bytes.go new file mode 100644 index 0000000..eaadeb8 --- /dev/null +++ b/vendor/github.com/alecthomas/units/bytes.go @@ -0,0 +1,83 @@ +package units + +// Base2Bytes is the old non-SI power-of-2 byte scale (1024 bytes in a kilobyte, +// etc.). +type Base2Bytes int64 + +// Base-2 byte units. +const ( + Kibibyte Base2Bytes = 1024 + KiB = Kibibyte + Mebibyte = Kibibyte * 1024 + MiB = Mebibyte + Gibibyte = Mebibyte * 1024 + GiB = Gibibyte + Tebibyte = Gibibyte * 1024 + TiB = Tebibyte + Pebibyte = Tebibyte * 1024 + PiB = Pebibyte + Exbibyte = Pebibyte * 1024 + EiB = Exbibyte +) + +var ( + bytesUnitMap = MakeUnitMap("iB", "B", 1024) + oldBytesUnitMap = MakeUnitMap("B", "B", 1024) +) + +// ParseBase2Bytes supports both iB and B in base-2 multipliers. That is, KB +// and KiB are both 1024. +func ParseBase2Bytes(s string) (Base2Bytes, error) { + n, err := ParseUnit(s, bytesUnitMap) + if err != nil { + n, err = ParseUnit(s, oldBytesUnitMap) + } + return Base2Bytes(n), err +} + +func (b Base2Bytes) String() string { + return ToString(int64(b), 1024, "iB", "B") +} + +var ( + metricBytesUnitMap = MakeUnitMap("B", "B", 1000) +) + +// MetricBytes are SI byte units (1000 bytes in a kilobyte). +type MetricBytes SI + +// SI base-10 byte units. +const ( + Kilobyte MetricBytes = 1000 + KB = Kilobyte + Megabyte = Kilobyte * 1000 + MB = Megabyte + Gigabyte = Megabyte * 1000 + GB = Gigabyte + Terabyte = Gigabyte * 1000 + TB = Terabyte + Petabyte = Terabyte * 1000 + PB = Petabyte + Exabyte = Petabyte * 1000 + EB = Exabyte +) + +// ParseMetricBytes parses base-10 metric byte units. That is, KB is 1000 bytes. +func ParseMetricBytes(s string) (MetricBytes, error) { + n, err := ParseUnit(s, metricBytesUnitMap) + return MetricBytes(n), err +} + +func (m MetricBytes) String() string { + return ToString(int64(m), 1000, "B", "B") +} + +// ParseStrictBytes supports both iB and B suffixes for base 2 and metric, +// respectively. That is, KiB represents 1024 and KB represents 1000. +func ParseStrictBytes(s string) (int64, error) { + n, err := ParseUnit(s, bytesUnitMap) + if err != nil { + n, err = ParseUnit(s, metricBytesUnitMap) + } + return int64(n), err +} diff --git a/vendor/github.com/alecthomas/units/doc.go b/vendor/github.com/alecthomas/units/doc.go new file mode 100644 index 0000000..156ae38 --- /dev/null +++ b/vendor/github.com/alecthomas/units/doc.go @@ -0,0 +1,13 @@ +// Package units provides helpful unit multipliers and functions for Go. +// +// The goal of this package is to have functionality similar to the time [1] package. +// +// +// [1] http://golang.org/pkg/time/ +// +// It allows for code like this: +// +// n, err := ParseBase2Bytes("1KB") +// // n == 1024 +// n = units.Mebibyte * 512 +package units diff --git a/vendor/github.com/alecthomas/units/si.go b/vendor/github.com/alecthomas/units/si.go new file mode 100644 index 0000000..8234a9d --- /dev/null +++ b/vendor/github.com/alecthomas/units/si.go @@ -0,0 +1,26 @@ +package units + +// SI units. +type SI int64 + +// SI unit multiples. +const ( + Kilo SI = 1000 + Mega = Kilo * 1000 + Giga = Mega * 1000 + Tera = Giga * 1000 + Peta = Tera * 1000 + Exa = Peta * 1000 +) + +func MakeUnitMap(suffix, shortSuffix string, scale int64) map[string]float64 { + return map[string]float64{ + shortSuffix: 1, + "K" + suffix: float64(scale), + "M" + suffix: float64(scale * scale), + "G" + suffix: float64(scale * scale * scale), + "T" + suffix: float64(scale * scale * scale * scale), + "P" + suffix: float64(scale * scale * scale * scale * scale), + "E" + suffix: float64(scale * scale * scale * scale * scale * scale), + } +} diff --git a/vendor/github.com/alecthomas/units/util.go b/vendor/github.com/alecthomas/units/util.go new file mode 100644 index 0000000..6527e92 --- /dev/null +++ b/vendor/github.com/alecthomas/units/util.go @@ -0,0 +1,138 @@ +package units + +import ( + "errors" + "fmt" + "strings" +) + +var ( + siUnits = []string{"", "K", "M", "G", "T", "P", "E"} +) + +func ToString(n int64, scale int64, suffix, baseSuffix string) string { + mn := len(siUnits) + out := make([]string, mn) + for i, m := range siUnits { + if n%scale != 0 || i == 0 && n == 0 { + s := suffix + if i == 0 { + s = baseSuffix + } + out[mn-1-i] = fmt.Sprintf("%d%s%s", n%scale, m, s) + } + n /= scale + if n == 0 { + break + } + } + return strings.Join(out, "") +} + +// Below code ripped straight from http://golang.org/src/pkg/time/format.go?s=33392:33438#L1123 +var errLeadingInt = errors.New("units: bad [0-9]*") // never printed + +// leadingInt consumes the leading [0-9]* from s. +func leadingInt(s string) (x int64, rem string, err error) { + i := 0 + for ; i < len(s); i++ { + c := s[i] + if c < '0' || c > '9' { + break + } + if x >= (1<<63-10)/10 { + // overflow + return 0, "", errLeadingInt + } + x = x*10 + int64(c) - '0' + } + return x, s[i:], nil +} + +func ParseUnit(s string, unitMap map[string]float64) (int64, error) { + // [-+]?([0-9]*(\.[0-9]*)?[a-z]+)+ + orig := s + f := float64(0) + neg := false + + // Consume [-+]? + if s != "" { + c := s[0] + if c == '-' || c == '+' { + neg = c == '-' + s = s[1:] + } + } + // Special case: if all that is left is "0", this is zero. + if s == "0" { + return 0, nil + } + if s == "" { + return 0, errors.New("units: invalid " + orig) + } + for s != "" { + g := float64(0) // this element of the sequence + + var x int64 + var err error + + // The next character must be [0-9.] + if !(s[0] == '.' || ('0' <= s[0] && s[0] <= '9')) { + return 0, errors.New("units: invalid " + orig) + } + // Consume [0-9]* + pl := len(s) + x, s, err = leadingInt(s) + if err != nil { + return 0, errors.New("units: invalid " + orig) + } + g = float64(x) + pre := pl != len(s) // whether we consumed anything before a period + + // Consume (\.[0-9]*)? + post := false + if s != "" && s[0] == '.' { + s = s[1:] + pl := len(s) + x, s, err = leadingInt(s) + if err != nil { + return 0, errors.New("units: invalid " + orig) + } + scale := 1.0 + for n := pl - len(s); n > 0; n-- { + scale *= 10 + } + g += float64(x) / scale + post = pl != len(s) + } + if !pre && !post { + // no digits (e.g. ".s" or "-.s") + return 0, errors.New("units: invalid " + orig) + } + + // Consume unit. + i := 0 + for ; i < len(s); i++ { + c := s[i] + if c == '.' || ('0' <= c && c <= '9') { + break + } + } + u := s[:i] + s = s[i:] + unit, ok := unitMap[u] + if !ok { + return 0, errors.New("units: unknown unit " + u + " in " + orig) + } + + f += g * unit + } + + if neg { + f = -f + } + if f < float64(-1<<63) || f > float64(1<<63-1) { + return 0, errors.New("units: overflow parsing unit") + } + return int64(f), nil +} diff --git a/vendor/github.com/klauspost/compress/LICENSE b/vendor/github.com/klauspost/compress/LICENSE new file mode 100644 index 0000000..7448756 --- /dev/null +++ b/vendor/github.com/klauspost/compress/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2012 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/klauspost/compress/flate/copy.go b/vendor/github.com/klauspost/compress/flate/copy.go new file mode 100644 index 0000000..a3200a8 --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/copy.go @@ -0,0 +1,32 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package flate + +// forwardCopy is like the built-in copy function except that it always goes +// forward from the start, even if the dst and src overlap. +// It is equivalent to: +// for i := 0; i < n; i++ { +// mem[dst+i] = mem[src+i] +// } +func forwardCopy(mem []byte, dst, src, n int) { + if dst <= src { + copy(mem[dst:dst+n], mem[src:src+n]) + return + } + for { + if dst >= src+n { + copy(mem[dst:dst+n], mem[src:src+n]) + return + } + // There is some forward overlap. The destination + // will be filled with a repeated pattern of mem[src:src+k]. + // We copy one instance of the pattern here, then repeat. + // Each time around this loop k will double. + k := dst - src + copy(mem[dst:dst+k], mem[src:src+k]) + n -= k + dst += k + } +} diff --git a/vendor/github.com/klauspost/compress/flate/crc32_amd64.go b/vendor/github.com/klauspost/compress/flate/crc32_amd64.go new file mode 100644 index 0000000..70a6095 --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/crc32_amd64.go @@ -0,0 +1,41 @@ +//+build !noasm +//+build !appengine + +// Copyright 2015, Klaus Post, see LICENSE for details. + +package flate + +import ( + "github.com/klauspost/cpuid" +) + +// crc32sse returns a hash for the first 4 bytes of the slice +// len(a) must be >= 4. +//go:noescape +func crc32sse(a []byte) uint32 + +// crc32sseAll calculates hashes for each 4-byte set in a. +// dst must be east len(a) - 4 in size. +// The size is not checked by the assembly. +//go:noescape +func crc32sseAll(a []byte, dst []uint32) + +// matchLenSSE4 returns the number of matching bytes in a and b +// up to length 'max'. Both slices must be at least 'max' +// bytes in size. +// +// TODO: drop the "SSE4" name, since it doesn't use any SSE instructions. +// +//go:noescape +func matchLenSSE4(a, b []byte, max int) int + +// histogram accumulates a histogram of b in h. +// h must be at least 256 entries in length, +// and must be cleared before calling this function. +//go:noescape +func histogram(b []byte, h []int32) + +// Detect SSE 4.2 feature. +func init() { + useSSE42 = cpuid.CPU.SSE42() +} diff --git a/vendor/github.com/klauspost/compress/flate/crc32_amd64.s b/vendor/github.com/klauspost/compress/flate/crc32_amd64.s new file mode 100644 index 0000000..2fb2079 --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/crc32_amd64.s @@ -0,0 +1,213 @@ +//+build !noasm +//+build !appengine + +// Copyright 2015, Klaus Post, see LICENSE for details. + +// func crc32sse(a []byte) uint32 +TEXT ·crc32sse(SB), 4, $0 + MOVQ a+0(FP), R10 + XORQ BX, BX + + // CRC32 dword (R10), EBX + BYTE $0xF2; BYTE $0x41; BYTE $0x0f + BYTE $0x38; BYTE $0xf1; BYTE $0x1a + + MOVL BX, ret+24(FP) + RET + +// func crc32sseAll(a []byte, dst []uint32) +TEXT ·crc32sseAll(SB), 4, $0 + MOVQ a+0(FP), R8 // R8: src + MOVQ a_len+8(FP), R10 // input length + MOVQ dst+24(FP), R9 // R9: dst + SUBQ $4, R10 + JS end + JZ one_crc + MOVQ R10, R13 + SHRQ $2, R10 // len/4 + ANDQ $3, R13 // len&3 + XORQ BX, BX + ADDQ $1, R13 + TESTQ R10, R10 + JZ rem_loop + +crc_loop: + MOVQ (R8), R11 + XORQ BX, BX + XORQ DX, DX + XORQ DI, DI + MOVQ R11, R12 + SHRQ $8, R11 + MOVQ R12, AX + MOVQ R11, CX + SHRQ $16, R12 + SHRQ $16, R11 + MOVQ R12, SI + + // CRC32 EAX, EBX + BYTE $0xF2; BYTE $0x0f + BYTE $0x38; BYTE $0xf1; BYTE $0xd8 + + // CRC32 ECX, EDX + BYTE $0xF2; BYTE $0x0f + BYTE $0x38; BYTE $0xf1; BYTE $0xd1 + + // CRC32 ESI, EDI + BYTE $0xF2; BYTE $0x0f + BYTE $0x38; BYTE $0xf1; BYTE $0xfe + MOVL BX, (R9) + MOVL DX, 4(R9) + MOVL DI, 8(R9) + + XORQ BX, BX + MOVL R11, AX + + // CRC32 EAX, EBX + BYTE $0xF2; BYTE $0x0f + BYTE $0x38; BYTE $0xf1; BYTE $0xd8 + MOVL BX, 12(R9) + + ADDQ $16, R9 + ADDQ $4, R8 + XORQ BX, BX + SUBQ $1, R10 + JNZ crc_loop + +rem_loop: + MOVL (R8), AX + + // CRC32 EAX, EBX + BYTE $0xF2; BYTE $0x0f + BYTE $0x38; BYTE $0xf1; BYTE $0xd8 + + MOVL BX, (R9) + ADDQ $4, R9 + ADDQ $1, R8 + XORQ BX, BX + SUBQ $1, R13 + JNZ rem_loop + +end: + RET + +one_crc: + MOVQ $1, R13 + XORQ BX, BX + JMP rem_loop + +// func matchLenSSE4(a, b []byte, max int) int +TEXT ·matchLenSSE4(SB), 4, $0 + MOVQ a_base+0(FP), SI + MOVQ b_base+24(FP), DI + MOVQ DI, DX + MOVQ max+48(FP), CX + +cmp8: + // As long as we are 8 or more bytes before the end of max, we can load and + // compare 8 bytes at a time. If those 8 bytes are equal, repeat. + CMPQ CX, $8 + JLT cmp1 + MOVQ (SI), AX + MOVQ (DI), BX + CMPQ AX, BX + JNE bsf + ADDQ $8, SI + ADDQ $8, DI + SUBQ $8, CX + JMP cmp8 + +bsf: + // If those 8 bytes were not equal, XOR the two 8 byte values, and return + // the index of the first byte that differs. The BSF instruction finds the + // least significant 1 bit, the amd64 architecture is little-endian, and + // the shift by 3 converts a bit index to a byte index. + XORQ AX, BX + BSFQ BX, BX + SHRQ $3, BX + ADDQ BX, DI + + // Subtract off &b[0] to convert from &b[ret] to ret, and return. + SUBQ DX, DI + MOVQ DI, ret+56(FP) + RET + +cmp1: + // In the slices' tail, compare 1 byte at a time. + CMPQ CX, $0 + JEQ matchLenEnd + MOVB (SI), AX + MOVB (DI), BX + CMPB AX, BX + JNE matchLenEnd + ADDQ $1, SI + ADDQ $1, DI + SUBQ $1, CX + JMP cmp1 + +matchLenEnd: + // Subtract off &b[0] to convert from &b[ret] to ret, and return. + SUBQ DX, DI + MOVQ DI, ret+56(FP) + RET + +// func histogram(b []byte, h []int32) +TEXT ·histogram(SB), 4, $0 + MOVQ b+0(FP), SI // SI: &b + MOVQ b_len+8(FP), R9 // R9: len(b) + MOVQ h+24(FP), DI // DI: Histogram + MOVQ R9, R8 + SHRQ $3, R8 + JZ hist1 + XORQ R11, R11 + +loop_hist8: + MOVQ (SI), R10 + + MOVB R10, R11 + INCL (DI)(R11*4) + SHRQ $8, R10 + + MOVB R10, R11 + INCL (DI)(R11*4) + SHRQ $8, R10 + + MOVB R10, R11 + INCL (DI)(R11*4) + SHRQ $8, R10 + + MOVB R10, R11 + INCL (DI)(R11*4) + SHRQ $8, R10 + + MOVB R10, R11 + INCL (DI)(R11*4) + SHRQ $8, R10 + + MOVB R10, R11 + INCL (DI)(R11*4) + SHRQ $8, R10 + + MOVB R10, R11 + INCL (DI)(R11*4) + SHRQ $8, R10 + + INCL (DI)(R10*4) + + ADDQ $8, SI + DECQ R8 + JNZ loop_hist8 + +hist1: + ANDQ $7, R9 + JZ end_hist + XORQ R10, R10 + +loop_hist1: + MOVB (SI), R10 + INCL (DI)(R10*4) + INCQ SI + DECQ R9 + JNZ loop_hist1 + +end_hist: + RET diff --git a/vendor/github.com/klauspost/compress/flate/crc32_noasm.go b/vendor/github.com/klauspost/compress/flate/crc32_noasm.go new file mode 100644 index 0000000..bd98bd5 --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/crc32_noasm.go @@ -0,0 +1,35 @@ +//+build !amd64 noasm appengine + +// Copyright 2015, Klaus Post, see LICENSE for details. + +package flate + +func init() { + useSSE42 = false +} + +// crc32sse should never be called. +func crc32sse(a []byte) uint32 { + panic("no assembler") +} + +// crc32sseAll should never be called. +func crc32sseAll(a []byte, dst []uint32) { + panic("no assembler") +} + +// matchLenSSE4 should never be called. +func matchLenSSE4(a, b []byte, max int) int { + panic("no assembler") + return 0 +} + +// histogram accumulates a histogram of b in h. +// +// len(h) must be >= 256, and h's elements must be all zeroes. +func histogram(b []byte, h []int32) { + h = h[:256] + for _, t := range b { + h[t]++ + } +} diff --git a/vendor/github.com/klauspost/compress/flate/deflate.go b/vendor/github.com/klauspost/compress/flate/deflate.go new file mode 100644 index 0000000..9e6e7ff --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/deflate.go @@ -0,0 +1,1353 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Copyright (c) 2015 Klaus Post +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package flate + +import ( + "fmt" + "io" + "math" +) + +const ( + NoCompression = 0 + BestSpeed = 1 + BestCompression = 9 + DefaultCompression = -1 + + // HuffmanOnly disables Lempel-Ziv match searching and only performs Huffman + // entropy encoding. This mode is useful in compressing data that has + // already been compressed with an LZ style algorithm (e.g. Snappy or LZ4) + // that lacks an entropy encoder. Compression gains are achieved when + // certain bytes in the input stream occur more frequently than others. + // + // Note that HuffmanOnly produces a compressed output that is + // RFC 1951 compliant. That is, any valid DEFLATE decompressor will + // continue to be able to decompress this output. + HuffmanOnly = -2 + ConstantCompression = HuffmanOnly // compatibility alias. + + logWindowSize = 15 + windowSize = 1 << logWindowSize + windowMask = windowSize - 1 + logMaxOffsetSize = 15 // Standard DEFLATE + minMatchLength = 4 // The smallest match that the compressor looks for + maxMatchLength = 258 // The longest match for the compressor + minOffsetSize = 1 // The shortest offset that makes any sense + + // The maximum number of tokens we put into a single flat block, just too + // stop things from getting too large. + maxFlateBlockTokens = 1 << 14 + maxStoreBlockSize = 65535 + hashBits = 17 // After 17 performance degrades + hashSize = 1 << hashBits + hashMask = (1 << hashBits) - 1 + hashShift = (hashBits + minMatchLength - 1) / minMatchLength + maxHashOffset = 1 << 24 + + skipNever = math.MaxInt32 +) + +var useSSE42 bool + +type compressionLevel struct { + good, lazy, nice, chain, fastSkipHashing, level int +} + +// Compression levels have been rebalanced from zlib deflate defaults +// to give a bigger spread in speed and compression. +// See https://blog.klauspost.com/rebalancing-deflate-compression-levels/ +var levels = []compressionLevel{ + {}, // 0 + // Level 1-4 uses specialized algorithm - values not used + {0, 0, 0, 0, 0, 1}, + {0, 0, 0, 0, 0, 2}, + {0, 0, 0, 0, 0, 3}, + {0, 0, 0, 0, 0, 4}, + // For levels 5-6 we don't bother trying with lazy matches. + // Lazy matching is at least 30% slower, with 1.5% increase. + {6, 0, 12, 8, 12, 5}, + {8, 0, 24, 16, 16, 6}, + // Levels 7-9 use increasingly more lazy matching + // and increasingly stringent conditions for "good enough". + {8, 8, 24, 16, skipNever, 7}, + {10, 16, 24, 64, skipNever, 8}, + {32, 258, 258, 4096, skipNever, 9}, +} + +type compressor struct { + compressionLevel + + w *huffmanBitWriter + bulkHasher func([]byte, []uint32) + + // compression algorithm + fill func(*compressor, []byte) int // copy data to window + step func(*compressor) // process window + sync bool // requesting flush + + // Input hash chains + // hashHead[hashValue] contains the largest inputIndex with the specified hash value + // If hashHead[hashValue] is within the current window, then + // hashPrev[hashHead[hashValue] & windowMask] contains the previous index + // with the same hash value. + chainHead int + hashHead [hashSize]uint32 + hashPrev [windowSize]uint32 + hashOffset int + + // input window: unprocessed data is window[index:windowEnd] + index int + window []byte + windowEnd int + blockStart int // window index where current tokens start + byteAvailable bool // if true, still need to process window[index-1]. + + // queued output tokens + tokens tokens + + // deflate state + length int + offset int + hash uint32 + maxInsertIndex int + err error + ii uint16 // position of last match, intended to overflow to reset. + + snap snappyEnc + hashMatch [maxMatchLength + minMatchLength]uint32 +} + +func (d *compressor) fillDeflate(b []byte) int { + if d.index >= 2*windowSize-(minMatchLength+maxMatchLength) { + // shift the window by windowSize + copy(d.window[:], d.window[windowSize:2*windowSize]) + d.index -= windowSize + d.windowEnd -= windowSize + if d.blockStart >= windowSize { + d.blockStart -= windowSize + } else { + d.blockStart = math.MaxInt32 + } + d.hashOffset += windowSize + if d.hashOffset > maxHashOffset { + delta := d.hashOffset - 1 + d.hashOffset -= delta + d.chainHead -= delta + // Iterate over slices instead of arrays to avoid copying + // the entire table onto the stack (Issue #18625). + for i, v := range d.hashPrev[:] { + if int(v) > delta { + d.hashPrev[i] = uint32(int(v) - delta) + } else { + d.hashPrev[i] = 0 + } + } + for i, v := range d.hashHead[:] { + if int(v) > delta { + d.hashHead[i] = uint32(int(v) - delta) + } else { + d.hashHead[i] = 0 + } + } + } + } + n := copy(d.window[d.windowEnd:], b) + d.windowEnd += n + return n +} + +func (d *compressor) writeBlock(tok tokens, index int, eof bool) error { + if index > 0 || eof { + var window []byte + if d.blockStart <= index { + window = d.window[d.blockStart:index] + } + d.blockStart = index + d.w.writeBlock(tok.tokens[:tok.n], eof, window) + return d.w.err + } + return nil +} + +// writeBlockSkip writes the current block and uses the number of tokens +// to determine if the block should be stored on no matches, or +// only huffman encoded. +func (d *compressor) writeBlockSkip(tok tokens, index int, eof bool) error { + if index > 0 || eof { + if d.blockStart <= index { + window := d.window[d.blockStart:index] + // If we removed less than a 64th of all literals + // we huffman compress the block. + if int(tok.n) > len(window)-int(tok.n>>6) { + d.w.writeBlockHuff(eof, window) + } else { + // Write a dynamic huffman block. + d.w.writeBlockDynamic(tok.tokens[:tok.n], eof, window) + } + } else { + d.w.writeBlock(tok.tokens[:tok.n], eof, nil) + } + d.blockStart = index + return d.w.err + } + return nil +} + +// fillWindow will fill the current window with the supplied +// dictionary and calculate all hashes. +// This is much faster than doing a full encode. +// Should only be used after a start/reset. +func (d *compressor) fillWindow(b []byte) { + // Do not fill window if we are in store-only mode, + // use constant or Snappy compression. + switch d.compressionLevel.level { + case 0, 1, 2: + return + } + // If we are given too much, cut it. + if len(b) > windowSize { + b = b[len(b)-windowSize:] + } + // Add all to window. + n := copy(d.window[d.windowEnd:], b) + + // Calculate 256 hashes at the time (more L1 cache hits) + loops := (n + 256 - minMatchLength) / 256 + for j := 0; j < loops; j++ { + startindex := j * 256 + end := startindex + 256 + minMatchLength - 1 + if end > n { + end = n + } + tocheck := d.window[startindex:end] + dstSize := len(tocheck) - minMatchLength + 1 + + if dstSize <= 0 { + continue + } + + dst := d.hashMatch[:dstSize] + d.bulkHasher(tocheck, dst) + var newH uint32 + for i, val := range dst { + di := i + startindex + newH = val & hashMask + // Get previous value with the same hash. + // Our chain should point to the previous value. + d.hashPrev[di&windowMask] = d.hashHead[newH] + // Set the head of the hash chain to us. + d.hashHead[newH] = uint32(di + d.hashOffset) + } + d.hash = newH + } + // Update window information. + d.windowEnd += n + d.index = n +} + +// Try to find a match starting at index whose length is greater than prevSize. +// We only look at chainCount possibilities before giving up. +// pos = d.index, prevHead = d.chainHead-d.hashOffset, prevLength=minMatchLength-1, lookahead +func (d *compressor) findMatch(pos int, prevHead int, prevLength int, lookahead int) (length, offset int, ok bool) { + minMatchLook := maxMatchLength + if lookahead < minMatchLook { + minMatchLook = lookahead + } + + win := d.window[0 : pos+minMatchLook] + + // We quit when we get a match that's at least nice long + nice := len(win) - pos + if d.nice < nice { + nice = d.nice + } + + // If we've got a match that's good enough, only look in 1/4 the chain. + tries := d.chain + length = prevLength + if length >= d.good { + tries >>= 2 + } + + wEnd := win[pos+length] + wPos := win[pos:] + minIndex := pos - windowSize + + for i := prevHead; tries > 0; tries-- { + if wEnd == win[i+length] { + n := matchLen(win[i:], wPos, minMatchLook) + + if n > length && (n > minMatchLength || pos-i <= 4096) { + length = n + offset = pos - i + ok = true + if n >= nice { + // The match is good enough that we don't try to find a better one. + break + } + wEnd = win[pos+n] + } + } + if i == minIndex { + // hashPrev[i & windowMask] has already been overwritten, so stop now. + break + } + i = int(d.hashPrev[i&windowMask]) - d.hashOffset + if i < minIndex || i < 0 { + break + } + } + return +} + +// Try to find a match starting at index whose length is greater than prevSize. +// We only look at chainCount possibilities before giving up. +// pos = d.index, prevHead = d.chainHead-d.hashOffset, prevLength=minMatchLength-1, lookahead +func (d *compressor) findMatchSSE(pos int, prevHead int, prevLength int, lookahead int) (length, offset int, ok bool) { + minMatchLook := maxMatchLength + if lookahead < minMatchLook { + minMatchLook = lookahead + } + + win := d.window[0 : pos+minMatchLook] + + // We quit when we get a match that's at least nice long + nice := len(win) - pos + if d.nice < nice { + nice = d.nice + } + + // If we've got a match that's good enough, only look in 1/4 the chain. + tries := d.chain + length = prevLength + if length >= d.good { + tries >>= 2 + } + + wEnd := win[pos+length] + wPos := win[pos:] + minIndex := pos - windowSize + + for i := prevHead; tries > 0; tries-- { + if wEnd == win[i+length] { + n := matchLenSSE4(win[i:], wPos, minMatchLook) + + if n > length && (n > minMatchLength || pos-i <= 4096) { + length = n + offset = pos - i + ok = true + if n >= nice { + // The match is good enough that we don't try to find a better one. + break + } + wEnd = win[pos+n] + } + } + if i == minIndex { + // hashPrev[i & windowMask] has already been overwritten, so stop now. + break + } + i = int(d.hashPrev[i&windowMask]) - d.hashOffset + if i < minIndex || i < 0 { + break + } + } + return +} + +func (d *compressor) writeStoredBlock(buf []byte) error { + if d.w.writeStoredHeader(len(buf), false); d.w.err != nil { + return d.w.err + } + d.w.writeBytes(buf) + return d.w.err +} + +const hashmul = 0x1e35a7bd + +// hash4 returns a hash representation of the first 4 bytes +// of the supplied slice. +// The caller must ensure that len(b) >= 4. +func hash4(b []byte) uint32 { + return ((uint32(b[3]) | uint32(b[2])<<8 | uint32(b[1])<<16 | uint32(b[0])<<24) * hashmul) >> (32 - hashBits) +} + +// bulkHash4 will compute hashes using the same +// algorithm as hash4 +func bulkHash4(b []byte, dst []uint32) { + if len(b) < minMatchLength { + return + } + hb := uint32(b[3]) | uint32(b[2])<<8 | uint32(b[1])<<16 | uint32(b[0])<<24 + dst[0] = (hb * hashmul) >> (32 - hashBits) + end := len(b) - minMatchLength + 1 + for i := 1; i < end; i++ { + hb = (hb << 8) | uint32(b[i+3]) + dst[i] = (hb * hashmul) >> (32 - hashBits) + } +} + +// matchLen returns the number of matching bytes in a and b +// up to length 'max'. Both slices must be at least 'max' +// bytes in size. +func matchLen(a, b []byte, max int) int { + a = a[:max] + b = b[:len(a)] + for i, av := range a { + if b[i] != av { + return i + } + } + return max +} + +func (d *compressor) initDeflate() { + d.window = make([]byte, 2*windowSize) + d.hashOffset = 1 + d.length = minMatchLength - 1 + d.offset = 0 + d.byteAvailable = false + d.index = 0 + d.hash = 0 + d.chainHead = -1 + d.bulkHasher = bulkHash4 + if useSSE42 { + d.bulkHasher = crc32sseAll + } +} + +// Assumes that d.fastSkipHashing != skipNever, +// otherwise use deflateLazy +func (d *compressor) deflate() { + + // Sanity enables additional runtime tests. + // It's intended to be used during development + // to supplement the currently ad-hoc unit tests. + const sanity = false + + if d.windowEnd-d.index < minMatchLength+maxMatchLength && !d.sync { + return + } + + d.maxInsertIndex = d.windowEnd - (minMatchLength - 1) + if d.index < d.maxInsertIndex { + d.hash = hash4(d.window[d.index : d.index+minMatchLength]) + } + + for { + if sanity && d.index > d.windowEnd { + panic("index > windowEnd") + } + lookahead := d.windowEnd - d.index + if lookahead < minMatchLength+maxMatchLength { + if !d.sync { + return + } + if sanity && d.index > d.windowEnd { + panic("index > windowEnd") + } + if lookahead == 0 { + if d.tokens.n > 0 { + if d.err = d.writeBlockSkip(d.tokens, d.index, false); d.err != nil { + return + } + d.tokens.n = 0 + } + return + } + } + if d.index < d.maxInsertIndex { + // Update the hash + d.hash = hash4(d.window[d.index : d.index+minMatchLength]) + ch := d.hashHead[d.hash&hashMask] + d.chainHead = int(ch) + d.hashPrev[d.index&windowMask] = ch + d.hashHead[d.hash&hashMask] = uint32(d.index + d.hashOffset) + } + d.length = minMatchLength - 1 + d.offset = 0 + minIndex := d.index - windowSize + if minIndex < 0 { + minIndex = 0 + } + + if d.chainHead-d.hashOffset >= minIndex && lookahead > minMatchLength-1 { + if newLength, newOffset, ok := d.findMatch(d.index, d.chainHead-d.hashOffset, minMatchLength-1, lookahead); ok { + d.length = newLength + d.offset = newOffset + } + } + if d.length >= minMatchLength { + d.ii = 0 + // There was a match at the previous step, and the current match is + // not better. Output the previous match. + // "d.length-3" should NOT be "d.length-minMatchLength", since the format always assume 3 + d.tokens.tokens[d.tokens.n] = matchToken(uint32(d.length-3), uint32(d.offset-minOffsetSize)) + d.tokens.n++ + // Insert in the hash table all strings up to the end of the match. + // index and index-1 are already inserted. If there is not enough + // lookahead, the last two strings are not inserted into the hash + // table. + if d.length <= d.fastSkipHashing { + var newIndex int + newIndex = d.index + d.length + // Calculate missing hashes + end := newIndex + if end > d.maxInsertIndex { + end = d.maxInsertIndex + } + end += minMatchLength - 1 + startindex := d.index + 1 + if startindex > d.maxInsertIndex { + startindex = d.maxInsertIndex + } + tocheck := d.window[startindex:end] + dstSize := len(tocheck) - minMatchLength + 1 + if dstSize > 0 { + dst := d.hashMatch[:dstSize] + bulkHash4(tocheck, dst) + var newH uint32 + for i, val := range dst { + di := i + startindex + newH = val & hashMask + // Get previous value with the same hash. + // Our chain should point to the previous value. + d.hashPrev[di&windowMask] = d.hashHead[newH] + // Set the head of the hash chain to us. + d.hashHead[newH] = uint32(di + d.hashOffset) + } + d.hash = newH + } + d.index = newIndex + } else { + // For matches this long, we don't bother inserting each individual + // item into the table. + d.index += d.length + if d.index < d.maxInsertIndex { + d.hash = hash4(d.window[d.index : d.index+minMatchLength]) + } + } + if d.tokens.n == maxFlateBlockTokens { + // The block includes the current character + if d.err = d.writeBlockSkip(d.tokens, d.index, false); d.err != nil { + return + } + d.tokens.n = 0 + } + } else { + d.ii++ + end := d.index + int(d.ii>>uint(d.fastSkipHashing)) + 1 + if end > d.windowEnd { + end = d.windowEnd + } + for i := d.index; i < end; i++ { + d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[i])) + d.tokens.n++ + if d.tokens.n == maxFlateBlockTokens { + if d.err = d.writeBlockSkip(d.tokens, i+1, false); d.err != nil { + return + } + d.tokens.n = 0 + } + } + d.index = end + } + } +} + +// deflateLazy is the same as deflate, but with d.fastSkipHashing == skipNever, +// meaning it always has lazy matching on. +func (d *compressor) deflateLazy() { + // Sanity enables additional runtime tests. + // It's intended to be used during development + // to supplement the currently ad-hoc unit tests. + const sanity = false + + if d.windowEnd-d.index < minMatchLength+maxMatchLength && !d.sync { + return + } + + d.maxInsertIndex = d.windowEnd - (minMatchLength - 1) + if d.index < d.maxInsertIndex { + d.hash = hash4(d.window[d.index : d.index+minMatchLength]) + } + + for { + if sanity && d.index > d.windowEnd { + panic("index > windowEnd") + } + lookahead := d.windowEnd - d.index + if lookahead < minMatchLength+maxMatchLength { + if !d.sync { + return + } + if sanity && d.index > d.windowEnd { + panic("index > windowEnd") + } + if lookahead == 0 { + // Flush current output block if any. + if d.byteAvailable { + // There is still one pending token that needs to be flushed + d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[d.index-1])) + d.tokens.n++ + d.byteAvailable = false + } + if d.tokens.n > 0 { + if d.err = d.writeBlock(d.tokens, d.index, false); d.err != nil { + return + } + d.tokens.n = 0 + } + return + } + } + if d.index < d.maxInsertIndex { + // Update the hash + d.hash = hash4(d.window[d.index : d.index+minMatchLength]) + ch := d.hashHead[d.hash&hashMask] + d.chainHead = int(ch) + d.hashPrev[d.index&windowMask] = ch + d.hashHead[d.hash&hashMask] = uint32(d.index + d.hashOffset) + } + prevLength := d.length + prevOffset := d.offset + d.length = minMatchLength - 1 + d.offset = 0 + minIndex := d.index - windowSize + if minIndex < 0 { + minIndex = 0 + } + + if d.chainHead-d.hashOffset >= minIndex && lookahead > prevLength && prevLength < d.lazy { + if newLength, newOffset, ok := d.findMatch(d.index, d.chainHead-d.hashOffset, minMatchLength-1, lookahead); ok { + d.length = newLength + d.offset = newOffset + } + } + if prevLength >= minMatchLength && d.length <= prevLength { + // There was a match at the previous step, and the current match is + // not better. Output the previous match. + d.tokens.tokens[d.tokens.n] = matchToken(uint32(prevLength-3), uint32(prevOffset-minOffsetSize)) + d.tokens.n++ + + // Insert in the hash table all strings up to the end of the match. + // index and index-1 are already inserted. If there is not enough + // lookahead, the last two strings are not inserted into the hash + // table. + var newIndex int + newIndex = d.index + prevLength - 1 + // Calculate missing hashes + end := newIndex + if end > d.maxInsertIndex { + end = d.maxInsertIndex + } + end += minMatchLength - 1 + startindex := d.index + 1 + if startindex > d.maxInsertIndex { + startindex = d.maxInsertIndex + } + tocheck := d.window[startindex:end] + dstSize := len(tocheck) - minMatchLength + 1 + if dstSize > 0 { + dst := d.hashMatch[:dstSize] + bulkHash4(tocheck, dst) + var newH uint32 + for i, val := range dst { + di := i + startindex + newH = val & hashMask + // Get previous value with the same hash. + // Our chain should point to the previous value. + d.hashPrev[di&windowMask] = d.hashHead[newH] + // Set the head of the hash chain to us. + d.hashHead[newH] = uint32(di + d.hashOffset) + } + d.hash = newH + } + + d.index = newIndex + d.byteAvailable = false + d.length = minMatchLength - 1 + if d.tokens.n == maxFlateBlockTokens { + // The block includes the current character + if d.err = d.writeBlock(d.tokens, d.index, false); d.err != nil { + return + } + d.tokens.n = 0 + } + } else { + // Reset, if we got a match this run. + if d.length >= minMatchLength { + d.ii = 0 + } + // We have a byte waiting. Emit it. + if d.byteAvailable { + d.ii++ + d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[d.index-1])) + d.tokens.n++ + if d.tokens.n == maxFlateBlockTokens { + if d.err = d.writeBlock(d.tokens, d.index, false); d.err != nil { + return + } + d.tokens.n = 0 + } + d.index++ + + // If we have a long run of no matches, skip additional bytes + // Resets when d.ii overflows after 64KB. + if d.ii > 31 { + n := int(d.ii >> 5) + for j := 0; j < n; j++ { + if d.index >= d.windowEnd-1 { + break + } + + d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[d.index-1])) + d.tokens.n++ + if d.tokens.n == maxFlateBlockTokens { + if d.err = d.writeBlock(d.tokens, d.index, false); d.err != nil { + return + } + d.tokens.n = 0 + } + d.index++ + } + // Flush last byte + d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[d.index-1])) + d.tokens.n++ + d.byteAvailable = false + // d.length = minMatchLength - 1 // not needed, since d.ii is reset above, so it should never be > minMatchLength + if d.tokens.n == maxFlateBlockTokens { + if d.err = d.writeBlock(d.tokens, d.index, false); d.err != nil { + return + } + d.tokens.n = 0 + } + } + } else { + d.index++ + d.byteAvailable = true + } + } + } +} + +// Assumes that d.fastSkipHashing != skipNever, +// otherwise use deflateLazySSE +func (d *compressor) deflateSSE() { + + // Sanity enables additional runtime tests. + // It's intended to be used during development + // to supplement the currently ad-hoc unit tests. + const sanity = false + + if d.windowEnd-d.index < minMatchLength+maxMatchLength && !d.sync { + return + } + + d.maxInsertIndex = d.windowEnd - (minMatchLength - 1) + if d.index < d.maxInsertIndex { + d.hash = crc32sse(d.window[d.index:d.index+minMatchLength]) & hashMask + } + + for { + if sanity && d.index > d.windowEnd { + panic("index > windowEnd") + } + lookahead := d.windowEnd - d.index + if lookahead < minMatchLength+maxMatchLength { + if !d.sync { + return + } + if sanity && d.index > d.windowEnd { + panic("index > windowEnd") + } + if lookahead == 0 { + if d.tokens.n > 0 { + if d.err = d.writeBlockSkip(d.tokens, d.index, false); d.err != nil { + return + } + d.tokens.n = 0 + } + return + } + } + if d.index < d.maxInsertIndex { + // Update the hash + d.hash = crc32sse(d.window[d.index:d.index+minMatchLength]) & hashMask + ch := d.hashHead[d.hash] + d.chainHead = int(ch) + d.hashPrev[d.index&windowMask] = ch + d.hashHead[d.hash] = uint32(d.index + d.hashOffset) + } + d.length = minMatchLength - 1 + d.offset = 0 + minIndex := d.index - windowSize + if minIndex < 0 { + minIndex = 0 + } + + if d.chainHead-d.hashOffset >= minIndex && lookahead > minMatchLength-1 { + if newLength, newOffset, ok := d.findMatchSSE(d.index, d.chainHead-d.hashOffset, minMatchLength-1, lookahead); ok { + d.length = newLength + d.offset = newOffset + } + } + if d.length >= minMatchLength { + d.ii = 0 + // There was a match at the previous step, and the current match is + // not better. Output the previous match. + // "d.length-3" should NOT be "d.length-minMatchLength", since the format always assume 3 + d.tokens.tokens[d.tokens.n] = matchToken(uint32(d.length-3), uint32(d.offset-minOffsetSize)) + d.tokens.n++ + // Insert in the hash table all strings up to the end of the match. + // index and index-1 are already inserted. If there is not enough + // lookahead, the last two strings are not inserted into the hash + // table. + if d.length <= d.fastSkipHashing { + var newIndex int + newIndex = d.index + d.length + // Calculate missing hashes + end := newIndex + if end > d.maxInsertIndex { + end = d.maxInsertIndex + } + end += minMatchLength - 1 + startindex := d.index + 1 + if startindex > d.maxInsertIndex { + startindex = d.maxInsertIndex + } + tocheck := d.window[startindex:end] + dstSize := len(tocheck) - minMatchLength + 1 + if dstSize > 0 { + dst := d.hashMatch[:dstSize] + + crc32sseAll(tocheck, dst) + var newH uint32 + for i, val := range dst { + di := i + startindex + newH = val & hashMask + // Get previous value with the same hash. + // Our chain should point to the previous value. + d.hashPrev[di&windowMask] = d.hashHead[newH] + // Set the head of the hash chain to us. + d.hashHead[newH] = uint32(di + d.hashOffset) + } + d.hash = newH + } + d.index = newIndex + } else { + // For matches this long, we don't bother inserting each individual + // item into the table. + d.index += d.length + if d.index < d.maxInsertIndex { + d.hash = crc32sse(d.window[d.index:d.index+minMatchLength]) & hashMask + } + } + if d.tokens.n == maxFlateBlockTokens { + // The block includes the current character + if d.err = d.writeBlockSkip(d.tokens, d.index, false); d.err != nil { + return + } + d.tokens.n = 0 + } + } else { + d.ii++ + end := d.index + int(d.ii>>5) + 1 + if end > d.windowEnd { + end = d.windowEnd + } + for i := d.index; i < end; i++ { + d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[i])) + d.tokens.n++ + if d.tokens.n == maxFlateBlockTokens { + if d.err = d.writeBlockSkip(d.tokens, i+1, false); d.err != nil { + return + } + d.tokens.n = 0 + } + } + d.index = end + } + } +} + +// deflateLazy is the same as deflate, but with d.fastSkipHashing == skipNever, +// meaning it always has lazy matching on. +func (d *compressor) deflateLazySSE() { + // Sanity enables additional runtime tests. + // It's intended to be used during development + // to supplement the currently ad-hoc unit tests. + const sanity = false + + if d.windowEnd-d.index < minMatchLength+maxMatchLength && !d.sync { + return + } + + d.maxInsertIndex = d.windowEnd - (minMatchLength - 1) + if d.index < d.maxInsertIndex { + d.hash = crc32sse(d.window[d.index:d.index+minMatchLength]) & hashMask + } + + for { + if sanity && d.index > d.windowEnd { + panic("index > windowEnd") + } + lookahead := d.windowEnd - d.index + if lookahead < minMatchLength+maxMatchLength { + if !d.sync { + return + } + if sanity && d.index > d.windowEnd { + panic("index > windowEnd") + } + if lookahead == 0 { + // Flush current output block if any. + if d.byteAvailable { + // There is still one pending token that needs to be flushed + d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[d.index-1])) + d.tokens.n++ + d.byteAvailable = false + } + if d.tokens.n > 0 { + if d.err = d.writeBlock(d.tokens, d.index, false); d.err != nil { + return + } + d.tokens.n = 0 + } + return + } + } + if d.index < d.maxInsertIndex { + // Update the hash + d.hash = crc32sse(d.window[d.index:d.index+minMatchLength]) & hashMask + ch := d.hashHead[d.hash] + d.chainHead = int(ch) + d.hashPrev[d.index&windowMask] = ch + d.hashHead[d.hash] = uint32(d.index + d.hashOffset) + } + prevLength := d.length + prevOffset := d.offset + d.length = minMatchLength - 1 + d.offset = 0 + minIndex := d.index - windowSize + if minIndex < 0 { + minIndex = 0 + } + + if d.chainHead-d.hashOffset >= minIndex && lookahead > prevLength && prevLength < d.lazy { + if newLength, newOffset, ok := d.findMatchSSE(d.index, d.chainHead-d.hashOffset, minMatchLength-1, lookahead); ok { + d.length = newLength + d.offset = newOffset + } + } + if prevLength >= minMatchLength && d.length <= prevLength { + // There was a match at the previous step, and the current match is + // not better. Output the previous match. + d.tokens.tokens[d.tokens.n] = matchToken(uint32(prevLength-3), uint32(prevOffset-minOffsetSize)) + d.tokens.n++ + + // Insert in the hash table all strings up to the end of the match. + // index and index-1 are already inserted. If there is not enough + // lookahead, the last two strings are not inserted into the hash + // table. + var newIndex int + newIndex = d.index + prevLength - 1 + // Calculate missing hashes + end := newIndex + if end > d.maxInsertIndex { + end = d.maxInsertIndex + } + end += minMatchLength - 1 + startindex := d.index + 1 + if startindex > d.maxInsertIndex { + startindex = d.maxInsertIndex + } + tocheck := d.window[startindex:end] + dstSize := len(tocheck) - minMatchLength + 1 + if dstSize > 0 { + dst := d.hashMatch[:dstSize] + crc32sseAll(tocheck, dst) + var newH uint32 + for i, val := range dst { + di := i + startindex + newH = val & hashMask + // Get previous value with the same hash. + // Our chain should point to the previous value. + d.hashPrev[di&windowMask] = d.hashHead[newH] + // Set the head of the hash chain to us. + d.hashHead[newH] = uint32(di + d.hashOffset) + } + d.hash = newH + } + + d.index = newIndex + d.byteAvailable = false + d.length = minMatchLength - 1 + if d.tokens.n == maxFlateBlockTokens { + // The block includes the current character + if d.err = d.writeBlock(d.tokens, d.index, false); d.err != nil { + return + } + d.tokens.n = 0 + } + } else { + // Reset, if we got a match this run. + if d.length >= minMatchLength { + d.ii = 0 + } + // We have a byte waiting. Emit it. + if d.byteAvailable { + d.ii++ + d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[d.index-1])) + d.tokens.n++ + if d.tokens.n == maxFlateBlockTokens { + if d.err = d.writeBlock(d.tokens, d.index, false); d.err != nil { + return + } + d.tokens.n = 0 + } + d.index++ + + // If we have a long run of no matches, skip additional bytes + // Resets when d.ii overflows after 64KB. + if d.ii > 31 { + n := int(d.ii >> 6) + for j := 0; j < n; j++ { + if d.index >= d.windowEnd-1 { + break + } + + d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[d.index-1])) + d.tokens.n++ + if d.tokens.n == maxFlateBlockTokens { + if d.err = d.writeBlock(d.tokens, d.index, false); d.err != nil { + return + } + d.tokens.n = 0 + } + d.index++ + } + // Flush last byte + d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[d.index-1])) + d.tokens.n++ + d.byteAvailable = false + // d.length = minMatchLength - 1 // not needed, since d.ii is reset above, so it should never be > minMatchLength + if d.tokens.n == maxFlateBlockTokens { + if d.err = d.writeBlock(d.tokens, d.index, false); d.err != nil { + return + } + d.tokens.n = 0 + } + } + } else { + d.index++ + d.byteAvailable = true + } + } + } +} + +func (d *compressor) store() { + if d.windowEnd > 0 && (d.windowEnd == maxStoreBlockSize || d.sync) { + d.err = d.writeStoredBlock(d.window[:d.windowEnd]) + d.windowEnd = 0 + } +} + +// fillWindow will fill the buffer with data for huffman-only compression. +// The number of bytes copied is returned. +func (d *compressor) fillBlock(b []byte) int { + n := copy(d.window[d.windowEnd:], b) + d.windowEnd += n + return n +} + +// storeHuff will compress and store the currently added data, +// if enough has been accumulated or we at the end of the stream. +// Any error that occurred will be in d.err +func (d *compressor) storeHuff() { + if d.windowEnd < len(d.window) && !d.sync || d.windowEnd == 0 { + return + } + d.w.writeBlockHuff(false, d.window[:d.windowEnd]) + d.err = d.w.err + d.windowEnd = 0 +} + +// storeHuff will compress and store the currently added data, +// if enough has been accumulated or we at the end of the stream. +// Any error that occurred will be in d.err +func (d *compressor) storeSnappy() { + // We only compress if we have maxStoreBlockSize. + if d.windowEnd < maxStoreBlockSize { + if !d.sync { + return + } + // Handle extremely small sizes. + if d.windowEnd < 128 { + if d.windowEnd == 0 { + return + } + if d.windowEnd <= 32 { + d.err = d.writeStoredBlock(d.window[:d.windowEnd]) + d.tokens.n = 0 + d.windowEnd = 0 + } else { + d.w.writeBlockHuff(false, d.window[:d.windowEnd]) + d.err = d.w.err + } + d.tokens.n = 0 + d.windowEnd = 0 + d.snap.Reset() + return + } + } + + d.snap.Encode(&d.tokens, d.window[:d.windowEnd]) + // If we made zero matches, store the block as is. + if int(d.tokens.n) == d.windowEnd { + d.err = d.writeStoredBlock(d.window[:d.windowEnd]) + // If we removed less than 1/16th, huffman compress the block. + } else if int(d.tokens.n) > d.windowEnd-(d.windowEnd>>4) { + d.w.writeBlockHuff(false, d.window[:d.windowEnd]) + d.err = d.w.err + } else { + d.w.writeBlockDynamic(d.tokens.tokens[:d.tokens.n], false, d.window[:d.windowEnd]) + d.err = d.w.err + } + d.tokens.n = 0 + d.windowEnd = 0 +} + +// write will add input byte to the stream. +// Unless an error occurs all bytes will be consumed. +func (d *compressor) write(b []byte) (n int, err error) { + if d.err != nil { + return 0, d.err + } + n = len(b) + for len(b) > 0 { + d.step(d) + b = b[d.fill(d, b):] + if d.err != nil { + return 0, d.err + } + } + return n, d.err +} + +func (d *compressor) syncFlush() error { + d.sync = true + if d.err != nil { + return d.err + } + d.step(d) + if d.err == nil { + d.w.writeStoredHeader(0, false) + d.w.flush() + d.err = d.w.err + } + d.sync = false + return d.err +} + +func (d *compressor) init(w io.Writer, level int) (err error) { + d.w = newHuffmanBitWriter(w) + + switch { + case level == NoCompression: + d.window = make([]byte, maxStoreBlockSize) + d.fill = (*compressor).fillBlock + d.step = (*compressor).store + case level == ConstantCompression: + d.window = make([]byte, maxStoreBlockSize) + d.fill = (*compressor).fillBlock + d.step = (*compressor).storeHuff + case level >= 1 && level <= 4: + d.snap = newSnappy(level) + d.window = make([]byte, maxStoreBlockSize) + d.fill = (*compressor).fillBlock + d.step = (*compressor).storeSnappy + case level == DefaultCompression: + level = 5 + fallthrough + case 5 <= level && level <= 9: + d.compressionLevel = levels[level] + d.initDeflate() + d.fill = (*compressor).fillDeflate + if d.fastSkipHashing == skipNever { + if useSSE42 { + d.step = (*compressor).deflateLazySSE + } else { + d.step = (*compressor).deflateLazy + } + } else { + if useSSE42 { + d.step = (*compressor).deflateSSE + } else { + d.step = (*compressor).deflate + + } + } + default: + return fmt.Errorf("flate: invalid compression level %d: want value in range [-2, 9]", level) + } + return nil +} + +// reset the state of the compressor. +func (d *compressor) reset(w io.Writer) { + d.w.reset(w) + d.sync = false + d.err = nil + // We only need to reset a few things for Snappy. + if d.snap != nil { + d.snap.Reset() + d.windowEnd = 0 + d.tokens.n = 0 + return + } + switch d.compressionLevel.chain { + case 0: + // level was NoCompression or ConstantCompresssion. + d.windowEnd = 0 + default: + d.chainHead = -1 + for i := range d.hashHead { + d.hashHead[i] = 0 + } + for i := range d.hashPrev { + d.hashPrev[i] = 0 + } + d.hashOffset = 1 + d.index, d.windowEnd = 0, 0 + d.blockStart, d.byteAvailable = 0, false + d.tokens.n = 0 + d.length = minMatchLength - 1 + d.offset = 0 + d.hash = 0 + d.ii = 0 + d.maxInsertIndex = 0 + } +} + +func (d *compressor) close() error { + if d.err != nil { + return d.err + } + d.sync = true + d.step(d) + if d.err != nil { + return d.err + } + if d.w.writeStoredHeader(0, true); d.w.err != nil { + return d.w.err + } + d.w.flush() + return d.w.err +} + +// NewWriter returns a new Writer compressing data at the given level. +// Following zlib, levels range from 1 (BestSpeed) to 9 (BestCompression); +// higher levels typically run slower but compress more. +// Level 0 (NoCompression) does not attempt any compression; it only adds the +// necessary DEFLATE framing. +// Level -1 (DefaultCompression) uses the default compression level. +// Level -2 (ConstantCompression) will use Huffman compression only, giving +// a very fast compression for all types of input, but sacrificing considerable +// compression efficiency. +// +// If level is in the range [-2, 9] then the error returned will be nil. +// Otherwise the error returned will be non-nil. +func NewWriter(w io.Writer, level int) (*Writer, error) { + var dw Writer + if err := dw.d.init(w, level); err != nil { + return nil, err + } + return &dw, nil +} + +// NewWriterDict is like NewWriter but initializes the new +// Writer with a preset dictionary. The returned Writer behaves +// as if the dictionary had been written to it without producing +// any compressed output. The compressed data written to w +// can only be decompressed by a Reader initialized with the +// same dictionary. +func NewWriterDict(w io.Writer, level int, dict []byte) (*Writer, error) { + dw := &dictWriter{w} + zw, err := NewWriter(dw, level) + if err != nil { + return nil, err + } + zw.d.fillWindow(dict) + zw.dict = append(zw.dict, dict...) // duplicate dictionary for Reset method. + return zw, err +} + +type dictWriter struct { + w io.Writer +} + +func (w *dictWriter) Write(b []byte) (n int, err error) { + return w.w.Write(b) +} + +// A Writer takes data written to it and writes the compressed +// form of that data to an underlying writer (see NewWriter). +type Writer struct { + d compressor + dict []byte +} + +// Write writes data to w, which will eventually write the +// compressed form of data to its underlying writer. +func (w *Writer) Write(data []byte) (n int, err error) { + return w.d.write(data) +} + +// Flush flushes any pending data to the underlying writer. +// It is useful mainly in compressed network protocols, to ensure that +// a remote reader has enough data to reconstruct a packet. +// Flush does not return until the data has been written. +// Calling Flush when there is no pending data still causes the Writer +// to emit a sync marker of at least 4 bytes. +// If the underlying writer returns an error, Flush returns that error. +// +// In the terminology of the zlib library, Flush is equivalent to Z_SYNC_FLUSH. +func (w *Writer) Flush() error { + // For more about flushing: + // http://www.bolet.org/~pornin/deflate-flush.html + return w.d.syncFlush() +} + +// Close flushes and closes the writer. +func (w *Writer) Close() error { + return w.d.close() +} + +// Reset discards the writer's state and makes it equivalent to +// the result of NewWriter or NewWriterDict called with dst +// and w's level and dictionary. +func (w *Writer) Reset(dst io.Writer) { + if dw, ok := w.d.w.writer.(*dictWriter); ok { + // w was created with NewWriterDict + dw.w = dst + w.d.reset(dw) + w.d.fillWindow(w.dict) + } else { + // w was created with NewWriter + w.d.reset(dst) + } +} + +// ResetDict discards the writer's state and makes it equivalent to +// the result of NewWriter or NewWriterDict called with dst +// and w's level, but sets a specific dictionary. +func (w *Writer) ResetDict(dst io.Writer, dict []byte) { + w.dict = dict + w.d.reset(dst) + w.d.fillWindow(w.dict) +} diff --git a/vendor/github.com/klauspost/compress/flate/dict_decoder.go b/vendor/github.com/klauspost/compress/flate/dict_decoder.go new file mode 100644 index 0000000..71c75a0 --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/dict_decoder.go @@ -0,0 +1,184 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package flate + +// dictDecoder implements the LZ77 sliding dictionary as used in decompression. +// LZ77 decompresses data through sequences of two forms of commands: +// +// * Literal insertions: Runs of one or more symbols are inserted into the data +// stream as is. This is accomplished through the writeByte method for a +// single symbol, or combinations of writeSlice/writeMark for multiple symbols. +// Any valid stream must start with a literal insertion if no preset dictionary +// is used. +// +// * Backward copies: Runs of one or more symbols are copied from previously +// emitted data. Backward copies come as the tuple (dist, length) where dist +// determines how far back in the stream to copy from and length determines how +// many bytes to copy. Note that it is valid for the length to be greater than +// the distance. Since LZ77 uses forward copies, that situation is used to +// perform a form of run-length encoding on repeated runs of symbols. +// The writeCopy and tryWriteCopy are used to implement this command. +// +// For performance reasons, this implementation performs little to no sanity +// checks about the arguments. As such, the invariants documented for each +// method call must be respected. +type dictDecoder struct { + hist []byte // Sliding window history + + // Invariant: 0 <= rdPos <= wrPos <= len(hist) + wrPos int // Current output position in buffer + rdPos int // Have emitted hist[:rdPos] already + full bool // Has a full window length been written yet? +} + +// init initializes dictDecoder to have a sliding window dictionary of the given +// size. If a preset dict is provided, it will initialize the dictionary with +// the contents of dict. +func (dd *dictDecoder) init(size int, dict []byte) { + *dd = dictDecoder{hist: dd.hist} + + if cap(dd.hist) < size { + dd.hist = make([]byte, size) + } + dd.hist = dd.hist[:size] + + if len(dict) > len(dd.hist) { + dict = dict[len(dict)-len(dd.hist):] + } + dd.wrPos = copy(dd.hist, dict) + if dd.wrPos == len(dd.hist) { + dd.wrPos = 0 + dd.full = true + } + dd.rdPos = dd.wrPos +} + +// histSize reports the total amount of historical data in the dictionary. +func (dd *dictDecoder) histSize() int { + if dd.full { + return len(dd.hist) + } + return dd.wrPos +} + +// availRead reports the number of bytes that can be flushed by readFlush. +func (dd *dictDecoder) availRead() int { + return dd.wrPos - dd.rdPos +} + +// availWrite reports the available amount of output buffer space. +func (dd *dictDecoder) availWrite() int { + return len(dd.hist) - dd.wrPos +} + +// writeSlice returns a slice of the available buffer to write data to. +// +// This invariant will be kept: len(s) <= availWrite() +func (dd *dictDecoder) writeSlice() []byte { + return dd.hist[dd.wrPos:] +} + +// writeMark advances the writer pointer by cnt. +// +// This invariant must be kept: 0 <= cnt <= availWrite() +func (dd *dictDecoder) writeMark(cnt int) { + dd.wrPos += cnt +} + +// writeByte writes a single byte to the dictionary. +// +// This invariant must be kept: 0 < availWrite() +func (dd *dictDecoder) writeByte(c byte) { + dd.hist[dd.wrPos] = c + dd.wrPos++ +} + +// writeCopy copies a string at a given (dist, length) to the output. +// This returns the number of bytes copied and may be less than the requested +// length if the available space in the output buffer is too small. +// +// This invariant must be kept: 0 < dist <= histSize() +func (dd *dictDecoder) writeCopy(dist, length int) int { + dstBase := dd.wrPos + dstPos := dstBase + srcPos := dstPos - dist + endPos := dstPos + length + if endPos > len(dd.hist) { + endPos = len(dd.hist) + } + + // Copy non-overlapping section after destination position. + // + // This section is non-overlapping in that the copy length for this section + // is always less than or equal to the backwards distance. This can occur + // if a distance refers to data that wraps-around in the buffer. + // Thus, a backwards copy is performed here; that is, the exact bytes in + // the source prior to the copy is placed in the destination. + if srcPos < 0 { + srcPos += len(dd.hist) + dstPos += copy(dd.hist[dstPos:endPos], dd.hist[srcPos:]) + srcPos = 0 + } + + // Copy possibly overlapping section before destination position. + // + // This section can overlap if the copy length for this section is larger + // than the backwards distance. This is allowed by LZ77 so that repeated + // strings can be succinctly represented using (dist, length) pairs. + // Thus, a forwards copy is performed here; that is, the bytes copied is + // possibly dependent on the resulting bytes in the destination as the copy + // progresses along. This is functionally equivalent to the following: + // + // for i := 0; i < endPos-dstPos; i++ { + // dd.hist[dstPos+i] = dd.hist[srcPos+i] + // } + // dstPos = endPos + // + for dstPos < endPos { + dstPos += copy(dd.hist[dstPos:endPos], dd.hist[srcPos:dstPos]) + } + + dd.wrPos = dstPos + return dstPos - dstBase +} + +// tryWriteCopy tries to copy a string at a given (distance, length) to the +// output. This specialized version is optimized for short distances. +// +// This method is designed to be inlined for performance reasons. +// +// This invariant must be kept: 0 < dist <= histSize() +func (dd *dictDecoder) tryWriteCopy(dist, length int) int { + dstPos := dd.wrPos + endPos := dstPos + length + if dstPos < dist || endPos > len(dd.hist) { + return 0 + } + dstBase := dstPos + srcPos := dstPos - dist + + // Copy possibly overlapping section before destination position. +loop: + dstPos += copy(dd.hist[dstPos:endPos], dd.hist[srcPos:dstPos]) + if dstPos < endPos { + goto loop // Avoid for-loop so that this function can be inlined + } + + dd.wrPos = dstPos + return dstPos - dstBase +} + +// readFlush returns a slice of the historical buffer that is ready to be +// emitted to the user. The data returned by readFlush must be fully consumed +// before calling any other dictDecoder methods. +func (dd *dictDecoder) readFlush() []byte { + toRead := dd.hist[dd.rdPos:dd.wrPos] + dd.rdPos = dd.wrPos + if dd.wrPos == len(dd.hist) { + dd.wrPos, dd.rdPos = 0, 0 + dd.full = true + } + return toRead +} diff --git a/vendor/github.com/klauspost/compress/flate/gen.go b/vendor/github.com/klauspost/compress/flate/gen.go new file mode 100644 index 0000000..154c89a --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/gen.go @@ -0,0 +1,265 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// This program generates fixedhuff.go +// Invoke as +// +// go run gen.go -output fixedhuff.go + +package main + +import ( + "bytes" + "flag" + "fmt" + "go/format" + "io/ioutil" + "log" +) + +var filename = flag.String("output", "fixedhuff.go", "output file name") + +const maxCodeLen = 16 + +// Note: the definition of the huffmanDecoder struct is copied from +// inflate.go, as it is private to the implementation. + +// chunk & 15 is number of bits +// chunk >> 4 is value, including table link + +const ( + huffmanChunkBits = 9 + huffmanNumChunks = 1 << huffmanChunkBits + huffmanCountMask = 15 + huffmanValueShift = 4 +) + +type huffmanDecoder struct { + min int // the minimum code length + chunks [huffmanNumChunks]uint32 // chunks as described above + links [][]uint32 // overflow links + linkMask uint32 // mask the width of the link table +} + +// Initialize Huffman decoding tables from array of code lengths. +// Following this function, h is guaranteed to be initialized into a complete +// tree (i.e., neither over-subscribed nor under-subscribed). The exception is a +// degenerate case where the tree has only a single symbol with length 1. Empty +// trees are permitted. +func (h *huffmanDecoder) init(bits []int) bool { + // Sanity enables additional runtime tests during Huffman + // table construction. It's intended to be used during + // development to supplement the currently ad-hoc unit tests. + const sanity = false + + if h.min != 0 { + *h = huffmanDecoder{} + } + + // Count number of codes of each length, + // compute min and max length. + var count [maxCodeLen]int + var min, max int + for _, n := range bits { + if n == 0 { + continue + } + if min == 0 || n < min { + min = n + } + if n > max { + max = n + } + count[n]++ + } + + // Empty tree. The decompressor.huffSym function will fail later if the tree + // is used. Technically, an empty tree is only valid for the HDIST tree and + // not the HCLEN and HLIT tree. However, a stream with an empty HCLEN tree + // is guaranteed to fail since it will attempt to use the tree to decode the + // codes for the HLIT and HDIST trees. Similarly, an empty HLIT tree is + // guaranteed to fail later since the compressed data section must be + // composed of at least one symbol (the end-of-block marker). + if max == 0 { + return true + } + + code := 0 + var nextcode [maxCodeLen]int + for i := min; i <= max; i++ { + code <<= 1 + nextcode[i] = code + code += count[i] + } + + // Check that the coding is complete (i.e., that we've + // assigned all 2-to-the-max possible bit sequences). + // Exception: To be compatible with zlib, we also need to + // accept degenerate single-code codings. See also + // TestDegenerateHuffmanCoding. + if code != 1< huffmanChunkBits { + numLinks := 1 << (uint(max) - huffmanChunkBits) + h.linkMask = uint32(numLinks - 1) + + // create link tables + link := nextcode[huffmanChunkBits+1] >> 1 + h.links = make([][]uint32, huffmanNumChunks-link) + for j := uint(link); j < huffmanNumChunks; j++ { + reverse := int(reverseByte[j>>8]) | int(reverseByte[j&0xff])<<8 + reverse >>= uint(16 - huffmanChunkBits) + off := j - uint(link) + if sanity && h.chunks[reverse] != 0 { + panic("impossible: overwriting existing chunk") + } + h.chunks[reverse] = uint32(off<>8]) | int(reverseByte[code&0xff])<<8 + reverse >>= uint(16 - n) + if n <= huffmanChunkBits { + for off := reverse; off < len(h.chunks); off += 1 << uint(n) { + // We should never need to overwrite + // an existing chunk. Also, 0 is + // never a valid chunk, because the + // lower 4 "count" bits should be + // between 1 and 15. + if sanity && h.chunks[off] != 0 { + panic("impossible: overwriting existing chunk") + } + h.chunks[off] = chunk + } + } else { + j := reverse & (huffmanNumChunks - 1) + if sanity && h.chunks[j]&huffmanCountMask != huffmanChunkBits+1 { + // Longer codes should have been + // associated with a link table above. + panic("impossible: not an indirect chunk") + } + value := h.chunks[j] >> huffmanValueShift + linktab := h.links[value] + reverse >>= huffmanChunkBits + for off := reverse; off < len(linktab); off += 1 << uint(n-huffmanChunkBits) { + if sanity && linktab[off] != 0 { + panic("impossible: overwriting existing chunk") + } + linktab[off] = chunk + } + } + } + + if sanity { + // Above we've sanity checked that we never overwrote + // an existing entry. Here we additionally check that + // we filled the tables completely. + for i, chunk := range h.chunks { + if chunk == 0 { + // As an exception, in the degenerate + // single-code case, we allow odd + // chunks to be missing. + if code == 1 && i%2 == 1 { + continue + } + panic("impossible: missing chunk") + } + } + for _, linktab := range h.links { + for _, chunk := range linktab { + if chunk == 0 { + panic("impossible: missing chunk") + } + } + } + } + + return true +} + +func main() { + flag.Parse() + + var h huffmanDecoder + var bits [288]int + initReverseByte() + for i := 0; i < 144; i++ { + bits[i] = 8 + } + for i := 144; i < 256; i++ { + bits[i] = 9 + } + for i := 256; i < 280; i++ { + bits[i] = 7 + } + for i := 280; i < 288; i++ { + bits[i] = 8 + } + h.init(bits[:]) + if h.links != nil { + log.Fatal("Unexpected links table in fixed Huffman decoder") + } + + var buf bytes.Buffer + + fmt.Fprintf(&buf, `// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file.`+"\n\n") + + fmt.Fprintln(&buf, "package flate") + fmt.Fprintln(&buf) + fmt.Fprintln(&buf, "// autogenerated by go run gen.go -output fixedhuff.go, DO NOT EDIT") + fmt.Fprintln(&buf) + fmt.Fprintln(&buf, "var fixedHuffmanDecoder = huffmanDecoder{") + fmt.Fprintf(&buf, "\t%d,\n", h.min) + fmt.Fprintln(&buf, "\t[huffmanNumChunks]uint32{") + for i := 0; i < huffmanNumChunks; i++ { + if i&7 == 0 { + fmt.Fprintf(&buf, "\t\t") + } else { + fmt.Fprintf(&buf, " ") + } + fmt.Fprintf(&buf, "0x%04x,", h.chunks[i]) + if i&7 == 7 { + fmt.Fprintln(&buf) + } + } + fmt.Fprintln(&buf, "\t},") + fmt.Fprintln(&buf, "\tnil, 0,") + fmt.Fprintln(&buf, "}") + + data, err := format.Source(buf.Bytes()) + if err != nil { + log.Fatal(err) + } + err = ioutil.WriteFile(*filename, data, 0644) + if err != nil { + log.Fatal(err) + } +} + +var reverseByte [256]byte + +func initReverseByte() { + for x := 0; x < 256; x++ { + var result byte + for i := uint(0); i < 8; i++ { + result |= byte(((x >> i) & 1) << (7 - i)) + } + reverseByte[x] = result + } +} diff --git a/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go b/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go new file mode 100644 index 0000000..f9b2a69 --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go @@ -0,0 +1,701 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package flate + +import ( + "io" +) + +const ( + // The largest offset code. + offsetCodeCount = 30 + + // The special code used to mark the end of a block. + endBlockMarker = 256 + + // The first length code. + lengthCodesStart = 257 + + // The number of codegen codes. + codegenCodeCount = 19 + badCode = 255 + + // bufferFlushSize indicates the buffer size + // after which bytes are flushed to the writer. + // Should preferably be a multiple of 6, since + // we accumulate 6 bytes between writes to the buffer. + bufferFlushSize = 240 + + // bufferSize is the actual output byte buffer size. + // It must have additional headroom for a flush + // which can contain up to 8 bytes. + bufferSize = bufferFlushSize + 8 +) + +// The number of extra bits needed by length code X - LENGTH_CODES_START. +var lengthExtraBits = []int8{ + /* 257 */ 0, 0, 0, + /* 260 */ 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, + /* 270 */ 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, + /* 280 */ 4, 5, 5, 5, 5, 0, +} + +// The length indicated by length code X - LENGTH_CODES_START. +var lengthBase = []uint32{ + 0, 1, 2, 3, 4, 5, 6, 7, 8, 10, + 12, 14, 16, 20, 24, 28, 32, 40, 48, 56, + 64, 80, 96, 112, 128, 160, 192, 224, 255, +} + +// offset code word extra bits. +var offsetExtraBits = []int8{ + 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, + 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, + 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, + /* extended window */ + 14, 14, 15, 15, 16, 16, 17, 17, 18, 18, 19, 19, 20, 20, +} + +var offsetBase = []uint32{ + /* normal deflate */ + 0x000000, 0x000001, 0x000002, 0x000003, 0x000004, + 0x000006, 0x000008, 0x00000c, 0x000010, 0x000018, + 0x000020, 0x000030, 0x000040, 0x000060, 0x000080, + 0x0000c0, 0x000100, 0x000180, 0x000200, 0x000300, + 0x000400, 0x000600, 0x000800, 0x000c00, 0x001000, + 0x001800, 0x002000, 0x003000, 0x004000, 0x006000, + + /* extended window */ + 0x008000, 0x00c000, 0x010000, 0x018000, 0x020000, + 0x030000, 0x040000, 0x060000, 0x080000, 0x0c0000, + 0x100000, 0x180000, 0x200000, 0x300000, +} + +// The odd order in which the codegen code sizes are written. +var codegenOrder = []uint32{16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15} + +type huffmanBitWriter struct { + // writer is the underlying writer. + // Do not use it directly; use the write method, which ensures + // that Write errors are sticky. + writer io.Writer + + // Data waiting to be written is bytes[0:nbytes] + // and then the low nbits of bits. + bits uint64 + nbits uint + bytes [bufferSize]byte + codegenFreq [codegenCodeCount]int32 + nbytes int + literalFreq []int32 + offsetFreq []int32 + codegen []uint8 + literalEncoding *huffmanEncoder + offsetEncoding *huffmanEncoder + codegenEncoding *huffmanEncoder + err error +} + +func newHuffmanBitWriter(w io.Writer) *huffmanBitWriter { + return &huffmanBitWriter{ + writer: w, + literalFreq: make([]int32, maxNumLit), + offsetFreq: make([]int32, offsetCodeCount), + codegen: make([]uint8, maxNumLit+offsetCodeCount+1), + literalEncoding: newHuffmanEncoder(maxNumLit), + codegenEncoding: newHuffmanEncoder(codegenCodeCount), + offsetEncoding: newHuffmanEncoder(offsetCodeCount), + } +} + +func (w *huffmanBitWriter) reset(writer io.Writer) { + w.writer = writer + w.bits, w.nbits, w.nbytes, w.err = 0, 0, 0, nil + w.bytes = [bufferSize]byte{} +} + +func (w *huffmanBitWriter) flush() { + if w.err != nil { + w.nbits = 0 + return + } + n := w.nbytes + for w.nbits != 0 { + w.bytes[n] = byte(w.bits) + w.bits >>= 8 + if w.nbits > 8 { // Avoid underflow + w.nbits -= 8 + } else { + w.nbits = 0 + } + n++ + } + w.bits = 0 + w.write(w.bytes[:n]) + w.nbytes = 0 +} + +func (w *huffmanBitWriter) write(b []byte) { + if w.err != nil { + return + } + _, w.err = w.writer.Write(b) +} + +func (w *huffmanBitWriter) writeBits(b int32, nb uint) { + if w.err != nil { + return + } + w.bits |= uint64(b) << w.nbits + w.nbits += nb + if w.nbits >= 48 { + bits := w.bits + w.bits >>= 48 + w.nbits -= 48 + n := w.nbytes + bytes := w.bytes[n : n+6] + bytes[0] = byte(bits) + bytes[1] = byte(bits >> 8) + bytes[2] = byte(bits >> 16) + bytes[3] = byte(bits >> 24) + bytes[4] = byte(bits >> 32) + bytes[5] = byte(bits >> 40) + n += 6 + if n >= bufferFlushSize { + w.write(w.bytes[:n]) + n = 0 + } + w.nbytes = n + } +} + +func (w *huffmanBitWriter) writeBytes(bytes []byte) { + if w.err != nil { + return + } + n := w.nbytes + if w.nbits&7 != 0 { + w.err = InternalError("writeBytes with unfinished bits") + return + } + for w.nbits != 0 { + w.bytes[n] = byte(w.bits) + w.bits >>= 8 + w.nbits -= 8 + n++ + } + if n != 0 { + w.write(w.bytes[:n]) + } + w.nbytes = 0 + w.write(bytes) +} + +// RFC 1951 3.2.7 specifies a special run-length encoding for specifying +// the literal and offset lengths arrays (which are concatenated into a single +// array). This method generates that run-length encoding. +// +// The result is written into the codegen array, and the frequencies +// of each code is written into the codegenFreq array. +// Codes 0-15 are single byte codes. Codes 16-18 are followed by additional +// information. Code badCode is an end marker +// +// numLiterals The number of literals in literalEncoding +// numOffsets The number of offsets in offsetEncoding +// litenc, offenc The literal and offset encoder to use +func (w *huffmanBitWriter) generateCodegen(numLiterals int, numOffsets int, litEnc, offEnc *huffmanEncoder) { + for i := range w.codegenFreq { + w.codegenFreq[i] = 0 + } + // Note that we are using codegen both as a temporary variable for holding + // a copy of the frequencies, and as the place where we put the result. + // This is fine because the output is always shorter than the input used + // so far. + codegen := w.codegen // cache + // Copy the concatenated code sizes to codegen. Put a marker at the end. + cgnl := codegen[:numLiterals] + for i := range cgnl { + cgnl[i] = uint8(litEnc.codes[i].len) + } + + cgnl = codegen[numLiterals : numLiterals+numOffsets] + for i := range cgnl { + cgnl[i] = uint8(offEnc.codes[i].len) + } + codegen[numLiterals+numOffsets] = badCode + + size := codegen[0] + count := 1 + outIndex := 0 + for inIndex := 1; size != badCode; inIndex++ { + // INVARIANT: We have seen "count" copies of size that have not yet + // had output generated for them. + nextSize := codegen[inIndex] + if nextSize == size { + count++ + continue + } + // We need to generate codegen indicating "count" of size. + if size != 0 { + codegen[outIndex] = size + outIndex++ + w.codegenFreq[size]++ + count-- + for count >= 3 { + n := 6 + if n > count { + n = count + } + codegen[outIndex] = 16 + outIndex++ + codegen[outIndex] = uint8(n - 3) + outIndex++ + w.codegenFreq[16]++ + count -= n + } + } else { + for count >= 11 { + n := 138 + if n > count { + n = count + } + codegen[outIndex] = 18 + outIndex++ + codegen[outIndex] = uint8(n - 11) + outIndex++ + w.codegenFreq[18]++ + count -= n + } + if count >= 3 { + // count >= 3 && count <= 10 + codegen[outIndex] = 17 + outIndex++ + codegen[outIndex] = uint8(count - 3) + outIndex++ + w.codegenFreq[17]++ + count = 0 + } + } + count-- + for ; count >= 0; count-- { + codegen[outIndex] = size + outIndex++ + w.codegenFreq[size]++ + } + // Set up invariant for next time through the loop. + size = nextSize + count = 1 + } + // Marker indicating the end of the codegen. + codegen[outIndex] = badCode +} + +// dynamicSize returns the size of dynamically encoded data in bits. +func (w *huffmanBitWriter) dynamicSize(litEnc, offEnc *huffmanEncoder, extraBits int) (size, numCodegens int) { + numCodegens = len(w.codegenFreq) + for numCodegens > 4 && w.codegenFreq[codegenOrder[numCodegens-1]] == 0 { + numCodegens-- + } + header := 3 + 5 + 5 + 4 + (3 * numCodegens) + + w.codegenEncoding.bitLength(w.codegenFreq[:]) + + int(w.codegenFreq[16])*2 + + int(w.codegenFreq[17])*3 + + int(w.codegenFreq[18])*7 + size = header + + litEnc.bitLength(w.literalFreq) + + offEnc.bitLength(w.offsetFreq) + + extraBits + + return size, numCodegens +} + +// fixedSize returns the size of dynamically encoded data in bits. +func (w *huffmanBitWriter) fixedSize(extraBits int) int { + return 3 + + fixedLiteralEncoding.bitLength(w.literalFreq) + + fixedOffsetEncoding.bitLength(w.offsetFreq) + + extraBits +} + +// storedSize calculates the stored size, including header. +// The function returns the size in bits and whether the block +// fits inside a single block. +func (w *huffmanBitWriter) storedSize(in []byte) (int, bool) { + if in == nil { + return 0, false + } + if len(in) <= maxStoreBlockSize { + return (len(in) + 5) * 8, true + } + return 0, false +} + +func (w *huffmanBitWriter) writeCode(c hcode) { + if w.err != nil { + return + } + w.bits |= uint64(c.code) << w.nbits + w.nbits += uint(c.len) + if w.nbits >= 48 { + bits := w.bits + w.bits >>= 48 + w.nbits -= 48 + n := w.nbytes + bytes := w.bytes[n : n+6] + bytes[0] = byte(bits) + bytes[1] = byte(bits >> 8) + bytes[2] = byte(bits >> 16) + bytes[3] = byte(bits >> 24) + bytes[4] = byte(bits >> 32) + bytes[5] = byte(bits >> 40) + n += 6 + if n >= bufferFlushSize { + w.write(w.bytes[:n]) + n = 0 + } + w.nbytes = n + } +} + +// Write the header of a dynamic Huffman block to the output stream. +// +// numLiterals The number of literals specified in codegen +// numOffsets The number of offsets specified in codegen +// numCodegens The number of codegens used in codegen +func (w *huffmanBitWriter) writeDynamicHeader(numLiterals int, numOffsets int, numCodegens int, isEof bool) { + if w.err != nil { + return + } + var firstBits int32 = 4 + if isEof { + firstBits = 5 + } + w.writeBits(firstBits, 3) + w.writeBits(int32(numLiterals-257), 5) + w.writeBits(int32(numOffsets-1), 5) + w.writeBits(int32(numCodegens-4), 4) + + for i := 0; i < numCodegens; i++ { + value := uint(w.codegenEncoding.codes[codegenOrder[i]].len) + w.writeBits(int32(value), 3) + } + + i := 0 + for { + var codeWord int = int(w.codegen[i]) + i++ + if codeWord == badCode { + break + } + w.writeCode(w.codegenEncoding.codes[uint32(codeWord)]) + + switch codeWord { + case 16: + w.writeBits(int32(w.codegen[i]), 2) + i++ + break + case 17: + w.writeBits(int32(w.codegen[i]), 3) + i++ + break + case 18: + w.writeBits(int32(w.codegen[i]), 7) + i++ + break + } + } +} + +func (w *huffmanBitWriter) writeStoredHeader(length int, isEof bool) { + if w.err != nil { + return + } + var flag int32 + if isEof { + flag = 1 + } + w.writeBits(flag, 3) + w.flush() + w.writeBits(int32(length), 16) + w.writeBits(int32(^uint16(length)), 16) +} + +func (w *huffmanBitWriter) writeFixedHeader(isEof bool) { + if w.err != nil { + return + } + // Indicate that we are a fixed Huffman block + var value int32 = 2 + if isEof { + value = 3 + } + w.writeBits(value, 3) +} + +// writeBlock will write a block of tokens with the smallest encoding. +// The original input can be supplied, and if the huffman encoded data +// is larger than the original bytes, the data will be written as a +// stored block. +// If the input is nil, the tokens will always be Huffman encoded. +func (w *huffmanBitWriter) writeBlock(tokens []token, eof bool, input []byte) { + if w.err != nil { + return + } + + tokens = append(tokens, endBlockMarker) + numLiterals, numOffsets := w.indexTokens(tokens) + + var extraBits int + storedSize, storable := w.storedSize(input) + if storable { + // We only bother calculating the costs of the extra bits required by + // the length of offset fields (which will be the same for both fixed + // and dynamic encoding), if we need to compare those two encodings + // against stored encoding. + for lengthCode := lengthCodesStart + 8; lengthCode < numLiterals; lengthCode++ { + // First eight length codes have extra size = 0. + extraBits += int(w.literalFreq[lengthCode]) * int(lengthExtraBits[lengthCode-lengthCodesStart]) + } + for offsetCode := 4; offsetCode < numOffsets; offsetCode++ { + // First four offset codes have extra size = 0. + extraBits += int(w.offsetFreq[offsetCode]) * int(offsetExtraBits[offsetCode]) + } + } + + // Figure out smallest code. + // Fixed Huffman baseline. + var literalEncoding = fixedLiteralEncoding + var offsetEncoding = fixedOffsetEncoding + var size = w.fixedSize(extraBits) + + // Dynamic Huffman? + var numCodegens int + + // Generate codegen and codegenFrequencies, which indicates how to encode + // the literalEncoding and the offsetEncoding. + w.generateCodegen(numLiterals, numOffsets, w.literalEncoding, w.offsetEncoding) + w.codegenEncoding.generate(w.codegenFreq[:], 7) + dynamicSize, numCodegens := w.dynamicSize(w.literalEncoding, w.offsetEncoding, extraBits) + + if dynamicSize < size { + size = dynamicSize + literalEncoding = w.literalEncoding + offsetEncoding = w.offsetEncoding + } + + // Stored bytes? + if storable && storedSize < size { + w.writeStoredHeader(len(input), eof) + w.writeBytes(input) + return + } + + // Huffman. + if literalEncoding == fixedLiteralEncoding { + w.writeFixedHeader(eof) + } else { + w.writeDynamicHeader(numLiterals, numOffsets, numCodegens, eof) + } + + // Write the tokens. + w.writeTokens(tokens, literalEncoding.codes, offsetEncoding.codes) +} + +// writeBlockDynamic encodes a block using a dynamic Huffman table. +// This should be used if the symbols used have a disproportionate +// histogram distribution. +// If input is supplied and the compression savings are below 1/16th of the +// input size the block is stored. +func (w *huffmanBitWriter) writeBlockDynamic(tokens []token, eof bool, input []byte) { + if w.err != nil { + return + } + + tokens = append(tokens, endBlockMarker) + numLiterals, numOffsets := w.indexTokens(tokens) + + // Generate codegen and codegenFrequencies, which indicates how to encode + // the literalEncoding and the offsetEncoding. + w.generateCodegen(numLiterals, numOffsets, w.literalEncoding, w.offsetEncoding) + w.codegenEncoding.generate(w.codegenFreq[:], 7) + size, numCodegens := w.dynamicSize(w.literalEncoding, w.offsetEncoding, 0) + + // Store bytes, if we don't get a reasonable improvement. + if ssize, storable := w.storedSize(input); storable && ssize < (size+size>>4) { + w.writeStoredHeader(len(input), eof) + w.writeBytes(input) + return + } + + // Write Huffman table. + w.writeDynamicHeader(numLiterals, numOffsets, numCodegens, eof) + + // Write the tokens. + w.writeTokens(tokens, w.literalEncoding.codes, w.offsetEncoding.codes) +} + +// indexTokens indexes a slice of tokens, and updates +// literalFreq and offsetFreq, and generates literalEncoding +// and offsetEncoding. +// The number of literal and offset tokens is returned. +func (w *huffmanBitWriter) indexTokens(tokens []token) (numLiterals, numOffsets int) { + for i := range w.literalFreq { + w.literalFreq[i] = 0 + } + for i := range w.offsetFreq { + w.offsetFreq[i] = 0 + } + + for _, t := range tokens { + if t < matchType { + w.literalFreq[t.literal()]++ + continue + } + length := t.length() + offset := t.offset() + w.literalFreq[lengthCodesStart+lengthCode(length)]++ + w.offsetFreq[offsetCode(offset)]++ + } + + // get the number of literals + numLiterals = len(w.literalFreq) + for w.literalFreq[numLiterals-1] == 0 { + numLiterals-- + } + // get the number of offsets + numOffsets = len(w.offsetFreq) + for numOffsets > 0 && w.offsetFreq[numOffsets-1] == 0 { + numOffsets-- + } + if numOffsets == 0 { + // We haven't found a single match. If we want to go with the dynamic encoding, + // we should count at least one offset to be sure that the offset huffman tree could be encoded. + w.offsetFreq[0] = 1 + numOffsets = 1 + } + w.literalEncoding.generate(w.literalFreq, 15) + w.offsetEncoding.generate(w.offsetFreq, 15) + return +} + +// writeTokens writes a slice of tokens to the output. +// codes for literal and offset encoding must be supplied. +func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode) { + if w.err != nil { + return + } + for _, t := range tokens { + if t < matchType { + w.writeCode(leCodes[t.literal()]) + continue + } + // Write the length + length := t.length() + lengthCode := lengthCode(length) + w.writeCode(leCodes[lengthCode+lengthCodesStart]) + extraLengthBits := uint(lengthExtraBits[lengthCode]) + if extraLengthBits > 0 { + extraLength := int32(length - lengthBase[lengthCode]) + w.writeBits(extraLength, extraLengthBits) + } + // Write the offset + offset := t.offset() + offsetCode := offsetCode(offset) + w.writeCode(oeCodes[offsetCode]) + extraOffsetBits := uint(offsetExtraBits[offsetCode]) + if extraOffsetBits > 0 { + extraOffset := int32(offset - offsetBase[offsetCode]) + w.writeBits(extraOffset, extraOffsetBits) + } + } +} + +// huffOffset is a static offset encoder used for huffman only encoding. +// It can be reused since we will not be encoding offset values. +var huffOffset *huffmanEncoder + +func init() { + w := newHuffmanBitWriter(nil) + w.offsetFreq[0] = 1 + huffOffset = newHuffmanEncoder(offsetCodeCount) + huffOffset.generate(w.offsetFreq, 15) +} + +// writeBlockHuff encodes a block of bytes as either +// Huffman encoded literals or uncompressed bytes if the +// results only gains very little from compression. +func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte) { + if w.err != nil { + return + } + + // Clear histogram + for i := range w.literalFreq { + w.literalFreq[i] = 0 + } + + // Add everything as literals + histogram(input, w.literalFreq) + + w.literalFreq[endBlockMarker] = 1 + + const numLiterals = endBlockMarker + 1 + const numOffsets = 1 + + w.literalEncoding.generate(w.literalFreq, 15) + + // Figure out smallest code. + // Always use dynamic Huffman or Store + var numCodegens int + + // Generate codegen and codegenFrequencies, which indicates how to encode + // the literalEncoding and the offsetEncoding. + w.generateCodegen(numLiterals, numOffsets, w.literalEncoding, huffOffset) + w.codegenEncoding.generate(w.codegenFreq[:], 7) + size, numCodegens := w.dynamicSize(w.literalEncoding, huffOffset, 0) + + // Store bytes, if we don't get a reasonable improvement. + if ssize, storable := w.storedSize(input); storable && ssize < (size+size>>4) { + w.writeStoredHeader(len(input), eof) + w.writeBytes(input) + return + } + + // Huffman. + w.writeDynamicHeader(numLiterals, numOffsets, numCodegens, eof) + encoding := w.literalEncoding.codes[:257] + n := w.nbytes + for _, t := range input { + // Bitwriting inlined, ~30% speedup + c := encoding[t] + w.bits |= uint64(c.code) << w.nbits + w.nbits += uint(c.len) + if w.nbits < 48 { + continue + } + // Store 6 bytes + bits := w.bits + w.bits >>= 48 + w.nbits -= 48 + bytes := w.bytes[n : n+6] + bytes[0] = byte(bits) + bytes[1] = byte(bits >> 8) + bytes[2] = byte(bits >> 16) + bytes[3] = byte(bits >> 24) + bytes[4] = byte(bits >> 32) + bytes[5] = byte(bits >> 40) + n += 6 + if n < bufferFlushSize { + continue + } + w.write(w.bytes[:n]) + if w.err != nil { + return // Return early in the event of write failures + } + n = 0 + } + w.nbytes = n + w.writeCode(encoding[endBlockMarker]) +} diff --git a/vendor/github.com/klauspost/compress/flate/huffman_code.go b/vendor/github.com/klauspost/compress/flate/huffman_code.go new file mode 100644 index 0000000..bdcbd82 --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/huffman_code.go @@ -0,0 +1,344 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package flate + +import ( + "math" + "sort" +) + +// hcode is a huffman code with a bit code and bit length. +type hcode struct { + code, len uint16 +} + +type huffmanEncoder struct { + codes []hcode + freqcache []literalNode + bitCount [17]int32 + lns byLiteral // stored to avoid repeated allocation in generate + lfs byFreq // stored to avoid repeated allocation in generate +} + +type literalNode struct { + literal uint16 + freq int32 +} + +// A levelInfo describes the state of the constructed tree for a given depth. +type levelInfo struct { + // Our level. for better printing + level int32 + + // The frequency of the last node at this level + lastFreq int32 + + // The frequency of the next character to add to this level + nextCharFreq int32 + + // The frequency of the next pair (from level below) to add to this level. + // Only valid if the "needed" value of the next lower level is 0. + nextPairFreq int32 + + // The number of chains remaining to generate for this level before moving + // up to the next level + needed int32 +} + +// set sets the code and length of an hcode. +func (h *hcode) set(code uint16, length uint16) { + h.len = length + h.code = code +} + +func maxNode() literalNode { return literalNode{math.MaxUint16, math.MaxInt32} } + +func newHuffmanEncoder(size int) *huffmanEncoder { + return &huffmanEncoder{codes: make([]hcode, size)} +} + +// Generates a HuffmanCode corresponding to the fixed literal table +func generateFixedLiteralEncoding() *huffmanEncoder { + h := newHuffmanEncoder(maxNumLit) + codes := h.codes + var ch uint16 + for ch = 0; ch < maxNumLit; ch++ { + var bits uint16 + var size uint16 + switch { + case ch < 144: + // size 8, 000110000 .. 10111111 + bits = ch + 48 + size = 8 + break + case ch < 256: + // size 9, 110010000 .. 111111111 + bits = ch + 400 - 144 + size = 9 + break + case ch < 280: + // size 7, 0000000 .. 0010111 + bits = ch - 256 + size = 7 + break + default: + // size 8, 11000000 .. 11000111 + bits = ch + 192 - 280 + size = 8 + } + codes[ch] = hcode{code: reverseBits(bits, byte(size)), len: size} + } + return h +} + +func generateFixedOffsetEncoding() *huffmanEncoder { + h := newHuffmanEncoder(30) + codes := h.codes + for ch := range codes { + codes[ch] = hcode{code: reverseBits(uint16(ch), 5), len: 5} + } + return h +} + +var fixedLiteralEncoding *huffmanEncoder = generateFixedLiteralEncoding() +var fixedOffsetEncoding *huffmanEncoder = generateFixedOffsetEncoding() + +func (h *huffmanEncoder) bitLength(freq []int32) int { + var total int + for i, f := range freq { + if f != 0 { + total += int(f) * int(h.codes[i].len) + } + } + return total +} + +const maxBitsLimit = 16 + +// Return the number of literals assigned to each bit size in the Huffman encoding +// +// This method is only called when list.length >= 3 +// The cases of 0, 1, and 2 literals are handled by special case code. +// +// list An array of the literals with non-zero frequencies +// and their associated frequencies. The array is in order of increasing +// frequency, and has as its last element a special element with frequency +// MaxInt32 +// maxBits The maximum number of bits that should be used to encode any literal. +// Must be less than 16. +// return An integer array in which array[i] indicates the number of literals +// that should be encoded in i bits. +func (h *huffmanEncoder) bitCounts(list []literalNode, maxBits int32) []int32 { + if maxBits >= maxBitsLimit { + panic("flate: maxBits too large") + } + n := int32(len(list)) + list = list[0 : n+1] + list[n] = maxNode() + + // The tree can't have greater depth than n - 1, no matter what. This + // saves a little bit of work in some small cases + if maxBits > n-1 { + maxBits = n - 1 + } + + // Create information about each of the levels. + // A bogus "Level 0" whose sole purpose is so that + // level1.prev.needed==0. This makes level1.nextPairFreq + // be a legitimate value that never gets chosen. + var levels [maxBitsLimit]levelInfo + // leafCounts[i] counts the number of literals at the left + // of ancestors of the rightmost node at level i. + // leafCounts[i][j] is the number of literals at the left + // of the level j ancestor. + var leafCounts [maxBitsLimit][maxBitsLimit]int32 + + for level := int32(1); level <= maxBits; level++ { + // For every level, the first two items are the first two characters. + // We initialize the levels as if we had already figured this out. + levels[level] = levelInfo{ + level: level, + lastFreq: list[1].freq, + nextCharFreq: list[2].freq, + nextPairFreq: list[0].freq + list[1].freq, + } + leafCounts[level][level] = 2 + if level == 1 { + levels[level].nextPairFreq = math.MaxInt32 + } + } + + // We need a total of 2*n - 2 items at top level and have already generated 2. + levels[maxBits].needed = 2*n - 4 + + level := maxBits + for { + l := &levels[level] + if l.nextPairFreq == math.MaxInt32 && l.nextCharFreq == math.MaxInt32 { + // We've run out of both leafs and pairs. + // End all calculations for this level. + // To make sure we never come back to this level or any lower level, + // set nextPairFreq impossibly large. + l.needed = 0 + levels[level+1].nextPairFreq = math.MaxInt32 + level++ + continue + } + + prevFreq := l.lastFreq + if l.nextCharFreq < l.nextPairFreq { + // The next item on this row is a leaf node. + n := leafCounts[level][level] + 1 + l.lastFreq = l.nextCharFreq + // Lower leafCounts are the same of the previous node. + leafCounts[level][level] = n + l.nextCharFreq = list[n].freq + } else { + // The next item on this row is a pair from the previous row. + // nextPairFreq isn't valid until we generate two + // more values in the level below + l.lastFreq = l.nextPairFreq + // Take leaf counts from the lower level, except counts[level] remains the same. + copy(leafCounts[level][:level], leafCounts[level-1][:level]) + levels[l.level-1].needed = 2 + } + + if l.needed--; l.needed == 0 { + // We've done everything we need to do for this level. + // Continue calculating one level up. Fill in nextPairFreq + // of that level with the sum of the two nodes we've just calculated on + // this level. + if l.level == maxBits { + // All done! + break + } + levels[l.level+1].nextPairFreq = prevFreq + l.lastFreq + level++ + } else { + // If we stole from below, move down temporarily to replenish it. + for levels[level-1].needed > 0 { + level-- + } + } + } + + // Somethings is wrong if at the end, the top level is null or hasn't used + // all of the leaves. + if leafCounts[maxBits][maxBits] != n { + panic("leafCounts[maxBits][maxBits] != n") + } + + bitCount := h.bitCount[:maxBits+1] + bits := 1 + counts := &leafCounts[maxBits] + for level := maxBits; level > 0; level-- { + // chain.leafCount gives the number of literals requiring at least "bits" + // bits to encode. + bitCount[bits] = counts[level] - counts[level-1] + bits++ + } + return bitCount +} + +// Look at the leaves and assign them a bit count and an encoding as specified +// in RFC 1951 3.2.2 +func (h *huffmanEncoder) assignEncodingAndSize(bitCount []int32, list []literalNode) { + code := uint16(0) + for n, bits := range bitCount { + code <<= 1 + if n == 0 || bits == 0 { + continue + } + // The literals list[len(list)-bits] .. list[len(list)-bits] + // are encoded using "bits" bits, and get the values + // code, code + 1, .... The code values are + // assigned in literal order (not frequency order). + chunk := list[len(list)-int(bits):] + + h.lns.sort(chunk) + for _, node := range chunk { + h.codes[node.literal] = hcode{code: reverseBits(code, uint8(n)), len: uint16(n)} + code++ + } + list = list[0 : len(list)-int(bits)] + } +} + +// Update this Huffman Code object to be the minimum code for the specified frequency count. +// +// freq An array of frequencies, in which frequency[i] gives the frequency of literal i. +// maxBits The maximum number of bits to use for any literal. +func (h *huffmanEncoder) generate(freq []int32, maxBits int32) { + if h.freqcache == nil { + // Allocate a reusable buffer with the longest possible frequency table. + // Possible lengths are codegenCodeCount, offsetCodeCount and maxNumLit. + // The largest of these is maxNumLit, so we allocate for that case. + h.freqcache = make([]literalNode, maxNumLit+1) + } + list := h.freqcache[:len(freq)+1] + // Number of non-zero literals + count := 0 + // Set list to be the set of all non-zero literals and their frequencies + for i, f := range freq { + if f != 0 { + list[count] = literalNode{uint16(i), f} + count++ + } else { + list[count] = literalNode{} + h.codes[i].len = 0 + } + } + list[len(freq)] = literalNode{} + + list = list[:count] + if count <= 2 { + // Handle the small cases here, because they are awkward for the general case code. With + // two or fewer literals, everything has bit length 1. + for i, node := range list { + // "list" is in order of increasing literal value. + h.codes[node.literal].set(uint16(i), 1) + } + return + } + h.lfs.sort(list) + + // Get the number of literals for each bit count + bitCount := h.bitCounts(list, maxBits) + // And do the assignment + h.assignEncodingAndSize(bitCount, list) +} + +type byLiteral []literalNode + +func (s *byLiteral) sort(a []literalNode) { + *s = byLiteral(a) + sort.Sort(s) +} + +func (s byLiteral) Len() int { return len(s) } + +func (s byLiteral) Less(i, j int) bool { + return s[i].literal < s[j].literal +} + +func (s byLiteral) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +type byFreq []literalNode + +func (s *byFreq) sort(a []literalNode) { + *s = byFreq(a) + sort.Sort(s) +} + +func (s byFreq) Len() int { return len(s) } + +func (s byFreq) Less(i, j int) bool { + if s[i].freq == s[j].freq { + return s[i].literal < s[j].literal + } + return s[i].freq < s[j].freq +} + +func (s byFreq) Swap(i, j int) { s[i], s[j] = s[j], s[i] } diff --git a/vendor/github.com/klauspost/compress/flate/inflate.go b/vendor/github.com/klauspost/compress/flate/inflate.go new file mode 100644 index 0000000..53b63d9 --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/inflate.go @@ -0,0 +1,846 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package flate implements the DEFLATE compressed data format, described in +// RFC 1951. The gzip and zlib packages implement access to DEFLATE-based file +// formats. +package flate + +import ( + "bufio" + "io" + "strconv" + "sync" +) + +const ( + maxCodeLen = 16 // max length of Huffman code + // The next three numbers come from the RFC section 3.2.7, with the + // additional proviso in section 3.2.5 which implies that distance codes + // 30 and 31 should never occur in compressed data. + maxNumLit = 286 + maxNumDist = 30 + numCodes = 19 // number of codes in Huffman meta-code +) + +// Initialize the fixedHuffmanDecoder only once upon first use. +var fixedOnce sync.Once +var fixedHuffmanDecoder huffmanDecoder + +// A CorruptInputError reports the presence of corrupt input at a given offset. +type CorruptInputError int64 + +func (e CorruptInputError) Error() string { + return "flate: corrupt input before offset " + strconv.FormatInt(int64(e), 10) +} + +// An InternalError reports an error in the flate code itself. +type InternalError string + +func (e InternalError) Error() string { return "flate: internal error: " + string(e) } + +// A ReadError reports an error encountered while reading input. +// +// Deprecated: No longer returned. +type ReadError struct { + Offset int64 // byte offset where error occurred + Err error // error returned by underlying Read +} + +func (e *ReadError) Error() string { + return "flate: read error at offset " + strconv.FormatInt(e.Offset, 10) + ": " + e.Err.Error() +} + +// A WriteError reports an error encountered while writing output. +// +// Deprecated: No longer returned. +type WriteError struct { + Offset int64 // byte offset where error occurred + Err error // error returned by underlying Write +} + +func (e *WriteError) Error() string { + return "flate: write error at offset " + strconv.FormatInt(e.Offset, 10) + ": " + e.Err.Error() +} + +// Resetter resets a ReadCloser returned by NewReader or NewReaderDict to +// to switch to a new underlying Reader. This permits reusing a ReadCloser +// instead of allocating a new one. +type Resetter interface { + // Reset discards any buffered data and resets the Resetter as if it was + // newly initialized with the given reader. + Reset(r io.Reader, dict []byte) error +} + +// The data structure for decoding Huffman tables is based on that of +// zlib. There is a lookup table of a fixed bit width (huffmanChunkBits), +// For codes smaller than the table width, there are multiple entries +// (each combination of trailing bits has the same value). For codes +// larger than the table width, the table contains a link to an overflow +// table. The width of each entry in the link table is the maximum code +// size minus the chunk width. +// +// Note that you can do a lookup in the table even without all bits +// filled. Since the extra bits are zero, and the DEFLATE Huffman codes +// have the property that shorter codes come before longer ones, the +// bit length estimate in the result is a lower bound on the actual +// number of bits. +// +// See the following: +// http://www.gzip.org/algorithm.txt + +// chunk & 15 is number of bits +// chunk >> 4 is value, including table link + +const ( + huffmanChunkBits = 9 + huffmanNumChunks = 1 << huffmanChunkBits + huffmanCountMask = 15 + huffmanValueShift = 4 +) + +type huffmanDecoder struct { + min int // the minimum code length + chunks [huffmanNumChunks]uint32 // chunks as described above + links [][]uint32 // overflow links + linkMask uint32 // mask the width of the link table +} + +// Initialize Huffman decoding tables from array of code lengths. +// Following this function, h is guaranteed to be initialized into a complete +// tree (i.e., neither over-subscribed nor under-subscribed). The exception is a +// degenerate case where the tree has only a single symbol with length 1. Empty +// trees are permitted. +func (h *huffmanDecoder) init(bits []int) bool { + // Sanity enables additional runtime tests during Huffman + // table construction. It's intended to be used during + // development to supplement the currently ad-hoc unit tests. + const sanity = false + + if h.min != 0 { + *h = huffmanDecoder{} + } + + // Count number of codes of each length, + // compute min and max length. + var count [maxCodeLen]int + var min, max int + for _, n := range bits { + if n == 0 { + continue + } + if min == 0 || n < min { + min = n + } + if n > max { + max = n + } + count[n]++ + } + + // Empty tree. The decompressor.huffSym function will fail later if the tree + // is used. Technically, an empty tree is only valid for the HDIST tree and + // not the HCLEN and HLIT tree. However, a stream with an empty HCLEN tree + // is guaranteed to fail since it will attempt to use the tree to decode the + // codes for the HLIT and HDIST trees. Similarly, an empty HLIT tree is + // guaranteed to fail later since the compressed data section must be + // composed of at least one symbol (the end-of-block marker). + if max == 0 { + return true + } + + code := 0 + var nextcode [maxCodeLen]int + for i := min; i <= max; i++ { + code <<= 1 + nextcode[i] = code + code += count[i] + } + + // Check that the coding is complete (i.e., that we've + // assigned all 2-to-the-max possible bit sequences). + // Exception: To be compatible with zlib, we also need to + // accept degenerate single-code codings. See also + // TestDegenerateHuffmanCoding. + if code != 1< huffmanChunkBits { + numLinks := 1 << (uint(max) - huffmanChunkBits) + h.linkMask = uint32(numLinks - 1) + + // create link tables + link := nextcode[huffmanChunkBits+1] >> 1 + h.links = make([][]uint32, huffmanNumChunks-link) + for j := uint(link); j < huffmanNumChunks; j++ { + reverse := int(reverseByte[j>>8]) | int(reverseByte[j&0xff])<<8 + reverse >>= uint(16 - huffmanChunkBits) + off := j - uint(link) + if sanity && h.chunks[reverse] != 0 { + panic("impossible: overwriting existing chunk") + } + h.chunks[reverse] = uint32(off<>8]) | int(reverseByte[code&0xff])<<8 + reverse >>= uint(16 - n) + if n <= huffmanChunkBits { + for off := reverse; off < len(h.chunks); off += 1 << uint(n) { + // We should never need to overwrite + // an existing chunk. Also, 0 is + // never a valid chunk, because the + // lower 4 "count" bits should be + // between 1 and 15. + if sanity && h.chunks[off] != 0 { + panic("impossible: overwriting existing chunk") + } + h.chunks[off] = chunk + } + } else { + j := reverse & (huffmanNumChunks - 1) + if sanity && h.chunks[j]&huffmanCountMask != huffmanChunkBits+1 { + // Longer codes should have been + // associated with a link table above. + panic("impossible: not an indirect chunk") + } + value := h.chunks[j] >> huffmanValueShift + linktab := h.links[value] + reverse >>= huffmanChunkBits + for off := reverse; off < len(linktab); off += 1 << uint(n-huffmanChunkBits) { + if sanity && linktab[off] != 0 { + panic("impossible: overwriting existing chunk") + } + linktab[off] = chunk + } + } + } + + if sanity { + // Above we've sanity checked that we never overwrote + // an existing entry. Here we additionally check that + // we filled the tables completely. + for i, chunk := range h.chunks { + if chunk == 0 { + // As an exception, in the degenerate + // single-code case, we allow odd + // chunks to be missing. + if code == 1 && i%2 == 1 { + continue + } + panic("impossible: missing chunk") + } + } + for _, linktab := range h.links { + for _, chunk := range linktab { + if chunk == 0 { + panic("impossible: missing chunk") + } + } + } + } + + return true +} + +// The actual read interface needed by NewReader. +// If the passed in io.Reader does not also have ReadByte, +// the NewReader will introduce its own buffering. +type Reader interface { + io.Reader + io.ByteReader +} + +// Decompress state. +type decompressor struct { + // Input source. + r Reader + roffset int64 + + // Input bits, in top of b. + b uint32 + nb uint + + // Huffman decoders for literal/length, distance. + h1, h2 huffmanDecoder + + // Length arrays used to define Huffman codes. + bits *[maxNumLit + maxNumDist]int + codebits *[numCodes]int + + // Output history, buffer. + dict dictDecoder + + // Temporary buffer (avoids repeated allocation). + buf [4]byte + + // Next step in the decompression, + // and decompression state. + step func(*decompressor) + stepState int + final bool + err error + toRead []byte + hl, hd *huffmanDecoder + copyLen int + copyDist int +} + +func (f *decompressor) nextBlock() { + for f.nb < 1+2 { + if f.err = f.moreBits(); f.err != nil { + return + } + } + f.final = f.b&1 == 1 + f.b >>= 1 + typ := f.b & 3 + f.b >>= 2 + f.nb -= 1 + 2 + switch typ { + case 0: + f.dataBlock() + case 1: + // compressed, fixed Huffman tables + f.hl = &fixedHuffmanDecoder + f.hd = nil + f.huffmanBlock() + case 2: + // compressed, dynamic Huffman tables + if f.err = f.readHuffman(); f.err != nil { + break + } + f.hl = &f.h1 + f.hd = &f.h2 + f.huffmanBlock() + default: + // 3 is reserved. + f.err = CorruptInputError(f.roffset) + } +} + +func (f *decompressor) Read(b []byte) (int, error) { + for { + if len(f.toRead) > 0 { + n := copy(b, f.toRead) + f.toRead = f.toRead[n:] + if len(f.toRead) == 0 { + return n, f.err + } + return n, nil + } + if f.err != nil { + return 0, f.err + } + f.step(f) + if f.err != nil && len(f.toRead) == 0 { + f.toRead = f.dict.readFlush() // Flush what's left in case of error + } + } +} + +// Support the io.WriteTo interface for io.Copy and friends. +func (f *decompressor) WriteTo(w io.Writer) (int64, error) { + total := int64(0) + flushed := false + for { + if len(f.toRead) > 0 { + n, err := w.Write(f.toRead) + total += int64(n) + if err != nil { + f.err = err + return total, err + } + if n != len(f.toRead) { + return total, io.ErrShortWrite + } + f.toRead = f.toRead[:0] + } + if f.err != nil && flushed { + if f.err == io.EOF { + return total, nil + } + return total, f.err + } + if f.err == nil { + f.step(f) + } + if len(f.toRead) == 0 && f.err != nil && !flushed { + f.toRead = f.dict.readFlush() // Flush what's left in case of error + flushed = true + } + } +} + +func (f *decompressor) Close() error { + if f.err == io.EOF { + return nil + } + return f.err +} + +// RFC 1951 section 3.2.7. +// Compression with dynamic Huffman codes + +var codeOrder = [...]int{16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15} + +func (f *decompressor) readHuffman() error { + // HLIT[5], HDIST[5], HCLEN[4]. + for f.nb < 5+5+4 { + if err := f.moreBits(); err != nil { + return err + } + } + nlit := int(f.b&0x1F) + 257 + if nlit > maxNumLit { + return CorruptInputError(f.roffset) + } + f.b >>= 5 + ndist := int(f.b&0x1F) + 1 + if ndist > maxNumDist { + return CorruptInputError(f.roffset) + } + f.b >>= 5 + nclen := int(f.b&0xF) + 4 + // numCodes is 19, so nclen is always valid. + f.b >>= 4 + f.nb -= 5 + 5 + 4 + + // (HCLEN+4)*3 bits: code lengths in the magic codeOrder order. + for i := 0; i < nclen; i++ { + for f.nb < 3 { + if err := f.moreBits(); err != nil { + return err + } + } + f.codebits[codeOrder[i]] = int(f.b & 0x7) + f.b >>= 3 + f.nb -= 3 + } + for i := nclen; i < len(codeOrder); i++ { + f.codebits[codeOrder[i]] = 0 + } + if !f.h1.init(f.codebits[0:]) { + return CorruptInputError(f.roffset) + } + + // HLIT + 257 code lengths, HDIST + 1 code lengths, + // using the code length Huffman code. + for i, n := 0, nlit+ndist; i < n; { + x, err := f.huffSym(&f.h1) + if err != nil { + return err + } + if x < 16 { + // Actual length. + f.bits[i] = x + i++ + continue + } + // Repeat previous length or zero. + var rep int + var nb uint + var b int + switch x { + default: + return InternalError("unexpected length code") + case 16: + rep = 3 + nb = 2 + if i == 0 { + return CorruptInputError(f.roffset) + } + b = f.bits[i-1] + case 17: + rep = 3 + nb = 3 + b = 0 + case 18: + rep = 11 + nb = 7 + b = 0 + } + for f.nb < nb { + if err := f.moreBits(); err != nil { + return err + } + } + rep += int(f.b & uint32(1<>= nb + f.nb -= nb + if i+rep > n { + return CorruptInputError(f.roffset) + } + for j := 0; j < rep; j++ { + f.bits[i] = b + i++ + } + } + + if !f.h1.init(f.bits[0:nlit]) || !f.h2.init(f.bits[nlit:nlit+ndist]) { + return CorruptInputError(f.roffset) + } + + // As an optimization, we can initialize the min bits to read at a time + // for the HLIT tree to the length of the EOB marker since we know that + // every block must terminate with one. This preserves the property that + // we never read any extra bytes after the end of the DEFLATE stream. + if f.h1.min < f.bits[endBlockMarker] { + f.h1.min = f.bits[endBlockMarker] + } + + return nil +} + +// Decode a single Huffman block from f. +// hl and hd are the Huffman states for the lit/length values +// and the distance values, respectively. If hd == nil, using the +// fixed distance encoding associated with fixed Huffman blocks. +func (f *decompressor) huffmanBlock() { + const ( + stateInit = iota // Zero value must be stateInit + stateDict + ) + + switch f.stepState { + case stateInit: + goto readLiteral + case stateDict: + goto copyHistory + } + +readLiteral: + // Read literal and/or (length, distance) according to RFC section 3.2.3. + { + v, err := f.huffSym(f.hl) + if err != nil { + f.err = err + return + } + var n uint // number of bits extra + var length int + switch { + case v < 256: + f.dict.writeByte(byte(v)) + if f.dict.availWrite() == 0 { + f.toRead = f.dict.readFlush() + f.step = (*decompressor).huffmanBlock + f.stepState = stateInit + return + } + goto readLiteral + case v == 256: + f.finishBlock() + return + // otherwise, reference to older data + case v < 265: + length = v - (257 - 3) + n = 0 + case v < 269: + length = v*2 - (265*2 - 11) + n = 1 + case v < 273: + length = v*4 - (269*4 - 19) + n = 2 + case v < 277: + length = v*8 - (273*8 - 35) + n = 3 + case v < 281: + length = v*16 - (277*16 - 67) + n = 4 + case v < 285: + length = v*32 - (281*32 - 131) + n = 5 + case v < maxNumLit: + length = 258 + n = 0 + default: + f.err = CorruptInputError(f.roffset) + return + } + if n > 0 { + for f.nb < n { + if err = f.moreBits(); err != nil { + f.err = err + return + } + } + length += int(f.b & uint32(1<>= n + f.nb -= n + } + + var dist int + if f.hd == nil { + for f.nb < 5 { + if err = f.moreBits(); err != nil { + f.err = err + return + } + } + dist = int(reverseByte[(f.b&0x1F)<<3]) + f.b >>= 5 + f.nb -= 5 + } else { + if dist, err = f.huffSym(f.hd); err != nil { + f.err = err + return + } + } + + switch { + case dist < 4: + dist++ + case dist < maxNumDist: + nb := uint(dist-2) >> 1 + // have 1 bit in bottom of dist, need nb more. + extra := (dist & 1) << nb + for f.nb < nb { + if err = f.moreBits(); err != nil { + f.err = err + return + } + } + extra |= int(f.b & uint32(1<>= nb + f.nb -= nb + dist = 1<<(nb+1) + 1 + extra + default: + f.err = CorruptInputError(f.roffset) + return + } + + // No check on length; encoding can be prescient. + if dist > f.dict.histSize() { + f.err = CorruptInputError(f.roffset) + return + } + + f.copyLen, f.copyDist = length, dist + goto copyHistory + } + +copyHistory: + // Perform a backwards copy according to RFC section 3.2.3. + { + cnt := f.dict.tryWriteCopy(f.copyDist, f.copyLen) + if cnt == 0 { + cnt = f.dict.writeCopy(f.copyDist, f.copyLen) + } + f.copyLen -= cnt + + if f.dict.availWrite() == 0 || f.copyLen > 0 { + f.toRead = f.dict.readFlush() + f.step = (*decompressor).huffmanBlock // We need to continue this work + f.stepState = stateDict + return + } + goto readLiteral + } +} + +// Copy a single uncompressed data block from input to output. +func (f *decompressor) dataBlock() { + // Uncompressed. + // Discard current half-byte. + f.nb = 0 + f.b = 0 + + // Length then ones-complement of length. + nr, err := io.ReadFull(f.r, f.buf[0:4]) + f.roffset += int64(nr) + if err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + f.err = err + return + } + n := int(f.buf[0]) | int(f.buf[1])<<8 + nn := int(f.buf[2]) | int(f.buf[3])<<8 + if uint16(nn) != uint16(^n) { + f.err = CorruptInputError(f.roffset) + return + } + + if n == 0 { + f.toRead = f.dict.readFlush() + f.finishBlock() + return + } + + f.copyLen = n + f.copyData() +} + +// copyData copies f.copyLen bytes from the underlying reader into f.hist. +// It pauses for reads when f.hist is full. +func (f *decompressor) copyData() { + buf := f.dict.writeSlice() + if len(buf) > f.copyLen { + buf = buf[:f.copyLen] + } + + cnt, err := io.ReadFull(f.r, buf) + f.roffset += int64(cnt) + f.copyLen -= cnt + f.dict.writeMark(cnt) + if err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + f.err = err + return + } + + if f.dict.availWrite() == 0 || f.copyLen > 0 { + f.toRead = f.dict.readFlush() + f.step = (*decompressor).copyData + return + } + f.finishBlock() +} + +func (f *decompressor) finishBlock() { + if f.final { + if f.dict.availRead() > 0 { + f.toRead = f.dict.readFlush() + } + f.err = io.EOF + } + f.step = (*decompressor).nextBlock +} + +func (f *decompressor) moreBits() error { + c, err := f.r.ReadByte() + if err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return err + } + f.roffset++ + f.b |= uint32(c) << f.nb + f.nb += 8 + return nil +} + +// Read the next Huffman-encoded symbol from f according to h. +func (f *decompressor) huffSym(h *huffmanDecoder) (int, error) { + // Since a huffmanDecoder can be empty or be composed of a degenerate tree + // with single element, huffSym must error on these two edge cases. In both + // cases, the chunks slice will be 0 for the invalid sequence, leading it + // satisfy the n == 0 check below. + n := uint(h.min) + for { + for f.nb < n { + if err := f.moreBits(); err != nil { + return 0, err + } + } + chunk := h.chunks[f.b&(huffmanNumChunks-1)] + n = uint(chunk & huffmanCountMask) + if n > huffmanChunkBits { + chunk = h.links[chunk>>huffmanValueShift][(f.b>>huffmanChunkBits)&h.linkMask] + n = uint(chunk & huffmanCountMask) + } + if n <= f.nb { + if n == 0 { + f.err = CorruptInputError(f.roffset) + return 0, f.err + } + f.b >>= n + f.nb -= n + return int(chunk >> huffmanValueShift), nil + } + } +} + +func makeReader(r io.Reader) Reader { + if rr, ok := r.(Reader); ok { + return rr + } + return bufio.NewReader(r) +} + +func fixedHuffmanDecoderInit() { + fixedOnce.Do(func() { + // These come from the RFC section 3.2.6. + var bits [288]int + for i := 0; i < 144; i++ { + bits[i] = 8 + } + for i := 144; i < 256; i++ { + bits[i] = 9 + } + for i := 256; i < 280; i++ { + bits[i] = 7 + } + for i := 280; i < 288; i++ { + bits[i] = 8 + } + fixedHuffmanDecoder.init(bits[:]) + }) +} + +func (f *decompressor) Reset(r io.Reader, dict []byte) error { + *f = decompressor{ + r: makeReader(r), + bits: f.bits, + codebits: f.codebits, + dict: f.dict, + step: (*decompressor).nextBlock, + } + f.dict.init(maxMatchOffset, dict) + return nil +} + +// NewReader returns a new ReadCloser that can be used +// to read the uncompressed version of r. +// If r does not also implement io.ByteReader, +// the decompressor may read more data than necessary from r. +// It is the caller's responsibility to call Close on the ReadCloser +// when finished reading. +// +// The ReadCloser returned by NewReader also implements Resetter. +func NewReader(r io.Reader) io.ReadCloser { + fixedHuffmanDecoderInit() + + var f decompressor + f.r = makeReader(r) + f.bits = new([maxNumLit + maxNumDist]int) + f.codebits = new([numCodes]int) + f.step = (*decompressor).nextBlock + f.dict.init(maxMatchOffset, nil) + return &f +} + +// NewReaderDict is like NewReader but initializes the reader +// with a preset dictionary. The returned Reader behaves as if +// the uncompressed data stream started with the given dictionary, +// which has already been read. NewReaderDict is typically used +// to read data compressed by NewWriterDict. +// +// The ReadCloser returned by NewReader also implements Resetter. +func NewReaderDict(r io.Reader, dict []byte) io.ReadCloser { + fixedHuffmanDecoderInit() + + var f decompressor + f.r = makeReader(r) + f.bits = new([maxNumLit + maxNumDist]int) + f.codebits = new([numCodes]int) + f.step = (*decompressor).nextBlock + f.dict.init(maxMatchOffset, dict) + return &f +} diff --git a/vendor/github.com/klauspost/compress/flate/reverse_bits.go b/vendor/github.com/klauspost/compress/flate/reverse_bits.go new file mode 100644 index 0000000..c1a0272 --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/reverse_bits.go @@ -0,0 +1,48 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package flate + +var reverseByte = [256]byte{ + 0x00, 0x80, 0x40, 0xc0, 0x20, 0xa0, 0x60, 0xe0, + 0x10, 0x90, 0x50, 0xd0, 0x30, 0xb0, 0x70, 0xf0, + 0x08, 0x88, 0x48, 0xc8, 0x28, 0xa8, 0x68, 0xe8, + 0x18, 0x98, 0x58, 0xd8, 0x38, 0xb8, 0x78, 0xf8, + 0x04, 0x84, 0x44, 0xc4, 0x24, 0xa4, 0x64, 0xe4, + 0x14, 0x94, 0x54, 0xd4, 0x34, 0xb4, 0x74, 0xf4, + 0x0c, 0x8c, 0x4c, 0xcc, 0x2c, 0xac, 0x6c, 0xec, + 0x1c, 0x9c, 0x5c, 0xdc, 0x3c, 0xbc, 0x7c, 0xfc, + 0x02, 0x82, 0x42, 0xc2, 0x22, 0xa2, 0x62, 0xe2, + 0x12, 0x92, 0x52, 0xd2, 0x32, 0xb2, 0x72, 0xf2, + 0x0a, 0x8a, 0x4a, 0xca, 0x2a, 0xaa, 0x6a, 0xea, + 0x1a, 0x9a, 0x5a, 0xda, 0x3a, 0xba, 0x7a, 0xfa, + 0x06, 0x86, 0x46, 0xc6, 0x26, 0xa6, 0x66, 0xe6, + 0x16, 0x96, 0x56, 0xd6, 0x36, 0xb6, 0x76, 0xf6, + 0x0e, 0x8e, 0x4e, 0xce, 0x2e, 0xae, 0x6e, 0xee, + 0x1e, 0x9e, 0x5e, 0xde, 0x3e, 0xbe, 0x7e, 0xfe, + 0x01, 0x81, 0x41, 0xc1, 0x21, 0xa1, 0x61, 0xe1, + 0x11, 0x91, 0x51, 0xd1, 0x31, 0xb1, 0x71, 0xf1, + 0x09, 0x89, 0x49, 0xc9, 0x29, 0xa9, 0x69, 0xe9, + 0x19, 0x99, 0x59, 0xd9, 0x39, 0xb9, 0x79, 0xf9, + 0x05, 0x85, 0x45, 0xc5, 0x25, 0xa5, 0x65, 0xe5, + 0x15, 0x95, 0x55, 0xd5, 0x35, 0xb5, 0x75, 0xf5, + 0x0d, 0x8d, 0x4d, 0xcd, 0x2d, 0xad, 0x6d, 0xed, + 0x1d, 0x9d, 0x5d, 0xdd, 0x3d, 0xbd, 0x7d, 0xfd, + 0x03, 0x83, 0x43, 0xc3, 0x23, 0xa3, 0x63, 0xe3, + 0x13, 0x93, 0x53, 0xd3, 0x33, 0xb3, 0x73, 0xf3, + 0x0b, 0x8b, 0x4b, 0xcb, 0x2b, 0xab, 0x6b, 0xeb, + 0x1b, 0x9b, 0x5b, 0xdb, 0x3b, 0xbb, 0x7b, 0xfb, + 0x07, 0x87, 0x47, 0xc7, 0x27, 0xa7, 0x67, 0xe7, + 0x17, 0x97, 0x57, 0xd7, 0x37, 0xb7, 0x77, 0xf7, + 0x0f, 0x8f, 0x4f, 0xcf, 0x2f, 0xaf, 0x6f, 0xef, + 0x1f, 0x9f, 0x5f, 0xdf, 0x3f, 0xbf, 0x7f, 0xff, +} + +func reverseUint16(v uint16) uint16 { + return uint16(reverseByte[v>>8]) | uint16(reverseByte[v&0xFF])<<8 +} + +func reverseBits(number uint16, bitLength byte) uint16 { + return reverseUint16(number << uint8(16-bitLength)) +} diff --git a/vendor/github.com/klauspost/compress/flate/snappy.go b/vendor/github.com/klauspost/compress/flate/snappy.go new file mode 100644 index 0000000..d853320 --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/snappy.go @@ -0,0 +1,900 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Modified for deflate by Klaus Post (c) 2015. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package flate + +// emitLiteral writes a literal chunk and returns the number of bytes written. +func emitLiteral(dst *tokens, lit []byte) { + ol := int(dst.n) + for i, v := range lit { + dst.tokens[(i+ol)&maxStoreBlockSize] = token(v) + } + dst.n += uint16(len(lit)) +} + +// emitCopy writes a copy chunk and returns the number of bytes written. +func emitCopy(dst *tokens, offset, length int) { + dst.tokens[dst.n] = matchToken(uint32(length-3), uint32(offset-minOffsetSize)) + dst.n++ +} + +type snappyEnc interface { + Encode(dst *tokens, src []byte) + Reset() +} + +func newSnappy(level int) snappyEnc { + switch level { + case 1: + return &snappyL1{} + case 2: + return &snappyL2{snappyGen: snappyGen{cur: maxStoreBlockSize, prev: make([]byte, 0, maxStoreBlockSize)}} + case 3: + return &snappyL3{snappyGen: snappyGen{cur: maxStoreBlockSize, prev: make([]byte, 0, maxStoreBlockSize)}} + case 4: + return &snappyL4{snappyL3{snappyGen: snappyGen{cur: maxStoreBlockSize, prev: make([]byte, 0, maxStoreBlockSize)}}} + default: + panic("invalid level specified") + } +} + +const ( + tableBits = 14 // Bits used in the table + tableSize = 1 << tableBits // Size of the table + tableMask = tableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks. + tableShift = 32 - tableBits // Right-shift to get the tableBits most significant bits of a uint32. + baseMatchOffset = 1 // The smallest match offset + baseMatchLength = 3 // The smallest match length per the RFC section 3.2.5 + maxMatchOffset = 1 << 15 // The largest match offset +) + +func load32(b []byte, i int) uint32 { + b = b[i : i+4 : len(b)] // Help the compiler eliminate bounds checks on the next line. + return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 +} + +func load64(b []byte, i int) uint64 { + b = b[i : i+8 : len(b)] // Help the compiler eliminate bounds checks on the next line. + return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | + uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 +} + +func hash(u uint32) uint32 { + return (u * 0x1e35a7bd) >> tableShift +} + +// snappyL1 encapsulates level 1 compression +type snappyL1 struct{} + +func (e *snappyL1) Reset() {} + +func (e *snappyL1) Encode(dst *tokens, src []byte) { + const ( + inputMargin = 16 - 1 + minNonLiteralBlockSize = 1 + 1 + inputMargin + ) + + // This check isn't in the Snappy implementation, but there, the caller + // instead of the callee handles this case. + if len(src) < minNonLiteralBlockSize { + // We do not fill the token table. + // This will be picked up by caller. + dst.n = uint16(len(src)) + return + } + + // Initialize the hash table. + // + // The table element type is uint16, as s < sLimit and sLimit < len(src) + // and len(src) <= maxStoreBlockSize and maxStoreBlockSize == 65535. + var table [tableSize]uint16 + + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := len(src) - inputMargin + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := 0 + + // The encoded form must start with a literal, as there are no previous + // bytes to copy, so we start looking for hash matches at s == 1. + s := 1 + nextHash := hash(load32(src, s)) + + for { + // Copied from the C++ snappy implementation: + // + // Heuristic match skipping: If 32 bytes are scanned with no matches + // found, start looking only at every other byte. If 32 more bytes are + // scanned (or skipped), look at every third byte, etc.. When a match + // is found, immediately go back to looking at every byte. This is a + // small loss (~5% performance, ~0.1% density) for compressible data + // due to more bookkeeping, but for non-compressible data (such as + // JPEG) it's a huge win since the compressor quickly "realizes" the + // data is incompressible and doesn't bother looking for matches + // everywhere. + // + // The "skip" variable keeps track of how many bytes there are since + // the last match; dividing it by 32 (ie. right-shifting by five) gives + // the number of bytes to move ahead for each iteration. + skip := 32 + + nextS := s + candidate := 0 + for { + s = nextS + bytesBetweenHashLookups := skip >> 5 + nextS = s + bytesBetweenHashLookups + skip += bytesBetweenHashLookups + if nextS > sLimit { + goto emitRemainder + } + candidate = int(table[nextHash&tableMask]) + table[nextHash&tableMask] = uint16(s) + nextHash = hash(load32(src, nextS)) + if s-candidate <= maxMatchOffset && load32(src, s) == load32(src, candidate) { + break + } + } + + // A 4-byte match has been found. We'll later see if more than 4 bytes + // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit + // them as literal bytes. + emitLiteral(dst, src[nextEmit:s]) + + // Call emitCopy, and then see if another emitCopy could be our next + // move. Repeat until we find no match for the input immediately after + // what was consumed by the last emitCopy call. + // + // If we exit this loop normally then we need to call emitLiteral next, + // though we don't yet know how big the literal will be. We handle that + // by proceeding to the next iteration of the main loop. We also can + // exit this loop via goto if we get close to exhausting the input. + for { + // Invariant: we have a 4-byte match at s, and no need to emit any + // literal bytes prior to s. + base := s + + // Extend the 4-byte match as long as possible. + // + // This is an inlined version of Snappy's: + // s = extendMatch(src, candidate+4, s+4) + s += 4 + s1 := base + maxMatchLength + if s1 > len(src) { + s1 = len(src) + } + a := src[s:s1] + b := src[candidate+4:] + b = b[:len(a)] + l := len(a) + for i := range a { + if a[i] != b[i] { + l = i + break + } + } + s += l + + // matchToken is flate's equivalent of Snappy's emitCopy. + dst.tokens[dst.n] = matchToken(uint32(s-base-baseMatchLength), uint32(base-candidate-baseMatchOffset)) + dst.n++ + nextEmit = s + if s >= sLimit { + goto emitRemainder + } + + // We could immediately start working at s now, but to improve + // compression we first update the hash table at s-1 and at s. If + // another emitCopy is not our next move, also calculate nextHash + // at s+1. At least on GOARCH=amd64, these three hash calculations + // are faster as one load64 call (with some shifts) instead of + // three load32 calls. + x := load64(src, s-1) + prevHash := hash(uint32(x >> 0)) + table[prevHash&tableMask] = uint16(s - 1) + currHash := hash(uint32(x >> 8)) + candidate = int(table[currHash&tableMask]) + table[currHash&tableMask] = uint16(s) + if s-candidate > maxMatchOffset || uint32(x>>8) != load32(src, candidate) { + nextHash = hash(uint32(x >> 16)) + s++ + break + } + } + } + +emitRemainder: + if nextEmit < len(src) { + emitLiteral(dst, src[nextEmit:]) + } +} + +type tableEntry struct { + val uint32 + offset int32 +} + +func load3232(b []byte, i int32) uint32 { + b = b[i : i+4 : len(b)] // Help the compiler eliminate bounds checks on the next line. + return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 +} + +func load6432(b []byte, i int32) uint64 { + b = b[i : i+8 : len(b)] // Help the compiler eliminate bounds checks on the next line. + return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | + uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 +} + +// snappyGen maintains the table for matches, +// and the previous byte block for level 2. +// This is the generic implementation. +type snappyGen struct { + prev []byte + cur int32 +} + +// snappyGen maintains the table for matches, +// and the previous byte block for level 2. +// This is the generic implementation. +type snappyL2 struct { + snappyGen + table [tableSize]tableEntry +} + +// EncodeL2 uses a similar algorithm to level 1, but is capable +// of matching across blocks giving better compression at a small slowdown. +func (e *snappyL2) Encode(dst *tokens, src []byte) { + const ( + inputMargin = 8 - 1 + minNonLiteralBlockSize = 1 + 1 + inputMargin + ) + + // Protect against e.cur wraparound. + if e.cur > 1<<30 { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + e.cur = maxStoreBlockSize + } + + // This check isn't in the Snappy implementation, but there, the caller + // instead of the callee handles this case. + if len(src) < minNonLiteralBlockSize { + // We do not fill the token table. + // This will be picked up by caller. + dst.n = uint16(len(src)) + e.cur += maxStoreBlockSize + e.prev = e.prev[:0] + return + } + + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := int32(len(src) - inputMargin) + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := int32(0) + s := int32(0) + cv := load3232(src, s) + nextHash := hash(cv) + + for { + // Copied from the C++ snappy implementation: + // + // Heuristic match skipping: If 32 bytes are scanned with no matches + // found, start looking only at every other byte. If 32 more bytes are + // scanned (or skipped), look at every third byte, etc.. When a match + // is found, immediately go back to looking at every byte. This is a + // small loss (~5% performance, ~0.1% density) for compressible data + // due to more bookkeeping, but for non-compressible data (such as + // JPEG) it's a huge win since the compressor quickly "realizes" the + // data is incompressible and doesn't bother looking for matches + // everywhere. + // + // The "skip" variable keeps track of how many bytes there are since + // the last match; dividing it by 32 (ie. right-shifting by five) gives + // the number of bytes to move ahead for each iteration. + skip := int32(32) + + nextS := s + var candidate tableEntry + for { + s = nextS + bytesBetweenHashLookups := skip >> 5 + nextS = s + bytesBetweenHashLookups + skip += bytesBetweenHashLookups + if nextS > sLimit { + goto emitRemainder + } + candidate = e.table[nextHash&tableMask] + now := load3232(src, nextS) + e.table[nextHash&tableMask] = tableEntry{offset: s + e.cur, val: cv} + nextHash = hash(now) + + offset := s - (candidate.offset - e.cur) + if offset > maxMatchOffset || cv != candidate.val { + // Out of range or not matched. + cv = now + continue + } + break + } + + // A 4-byte match has been found. We'll later see if more than 4 bytes + // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit + // them as literal bytes. + emitLiteral(dst, src[nextEmit:s]) + + // Call emitCopy, and then see if another emitCopy could be our next + // move. Repeat until we find no match for the input immediately after + // what was consumed by the last emitCopy call. + // + // If we exit this loop normally then we need to call emitLiteral next, + // though we don't yet know how big the literal will be. We handle that + // by proceeding to the next iteration of the main loop. We also can + // exit this loop via goto if we get close to exhausting the input. + for { + // Invariant: we have a 4-byte match at s, and no need to emit any + // literal bytes prior to s. + + // Extend the 4-byte match as long as possible. + // + s += 4 + t := candidate.offset - e.cur + 4 + l := e.matchlen(s, t, src) + + // matchToken is flate's equivalent of Snappy's emitCopy. (length,offset) + dst.tokens[dst.n] = matchToken(uint32(l+4-baseMatchLength), uint32(s-t-baseMatchOffset)) + dst.n++ + s += l + nextEmit = s + if s >= sLimit { + t += l + // Index first pair after match end. + if int(t+4) < len(src) && t > 0 { + cv := load3232(src, t) + e.table[hash(cv)&tableMask] = tableEntry{offset: t + e.cur, val: cv} + } + goto emitRemainder + } + + // We could immediately start working at s now, but to improve + // compression we first update the hash table at s-1 and at s. If + // another emitCopy is not our next move, also calculate nextHash + // at s+1. At least on GOARCH=amd64, these three hash calculations + // are faster as one load64 call (with some shifts) instead of + // three load32 calls. + x := load6432(src, s-1) + prevHash := hash(uint32(x)) + e.table[prevHash&tableMask] = tableEntry{offset: e.cur + s - 1, val: uint32(x)} + x >>= 8 + currHash := hash(uint32(x)) + candidate = e.table[currHash&tableMask] + e.table[currHash&tableMask] = tableEntry{offset: e.cur + s, val: uint32(x)} + + offset := s - (candidate.offset - e.cur) + if offset > maxMatchOffset || uint32(x) != candidate.val { + cv = uint32(x >> 8) + nextHash = hash(cv) + s++ + break + } + } + } + +emitRemainder: + if int(nextEmit) < len(src) { + emitLiteral(dst, src[nextEmit:]) + } + e.cur += int32(len(src)) + e.prev = e.prev[:len(src)] + copy(e.prev, src) +} + +type tableEntryPrev struct { + Cur tableEntry + Prev tableEntry +} + +// snappyL3 +type snappyL3 struct { + snappyGen + table [tableSize]tableEntryPrev +} + +// Encode uses a similar algorithm to level 2, will check up to two candidates. +func (e *snappyL3) Encode(dst *tokens, src []byte) { + const ( + inputMargin = 8 - 1 + minNonLiteralBlockSize = 1 + 1 + inputMargin + ) + + // Protect against e.cur wraparound. + if e.cur > 1<<30 { + for i := range e.table[:] { + e.table[i] = tableEntryPrev{} + } + e.snappyGen = snappyGen{cur: maxStoreBlockSize, prev: e.prev[:0]} + } + + // This check isn't in the Snappy implementation, but there, the caller + // instead of the callee handles this case. + if len(src) < minNonLiteralBlockSize { + // We do not fill the token table. + // This will be picked up by caller. + dst.n = uint16(len(src)) + e.cur += maxStoreBlockSize + e.prev = e.prev[:0] + return + } + + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := int32(len(src) - inputMargin) + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := int32(0) + s := int32(0) + cv := load3232(src, s) + nextHash := hash(cv) + + for { + // Copied from the C++ snappy implementation: + // + // Heuristic match skipping: If 32 bytes are scanned with no matches + // found, start looking only at every other byte. If 32 more bytes are + // scanned (or skipped), look at every third byte, etc.. When a match + // is found, immediately go back to looking at every byte. This is a + // small loss (~5% performance, ~0.1% density) for compressible data + // due to more bookkeeping, but for non-compressible data (such as + // JPEG) it's a huge win since the compressor quickly "realizes" the + // data is incompressible and doesn't bother looking for matches + // everywhere. + // + // The "skip" variable keeps track of how many bytes there are since + // the last match; dividing it by 32 (ie. right-shifting by five) gives + // the number of bytes to move ahead for each iteration. + skip := int32(32) + + nextS := s + var candidate tableEntry + for { + s = nextS + bytesBetweenHashLookups := skip >> 5 + nextS = s + bytesBetweenHashLookups + skip += bytesBetweenHashLookups + if nextS > sLimit { + goto emitRemainder + } + candidates := e.table[nextHash&tableMask] + now := load3232(src, nextS) + e.table[nextHash&tableMask] = tableEntryPrev{Prev: candidates.Cur, Cur: tableEntry{offset: s + e.cur, val: cv}} + nextHash = hash(now) + + // Check both candidates + candidate = candidates.Cur + if cv == candidate.val { + offset := s - (candidate.offset - e.cur) + if offset <= maxMatchOffset { + break + } + } else { + // We only check if value mismatches. + // Offset will always be invalid in other cases. + candidate = candidates.Prev + if cv == candidate.val { + offset := s - (candidate.offset - e.cur) + if offset <= maxMatchOffset { + break + } + } + } + cv = now + } + + // A 4-byte match has been found. We'll later see if more than 4 bytes + // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit + // them as literal bytes. + emitLiteral(dst, src[nextEmit:s]) + + // Call emitCopy, and then see if another emitCopy could be our next + // move. Repeat until we find no match for the input immediately after + // what was consumed by the last emitCopy call. + // + // If we exit this loop normally then we need to call emitLiteral next, + // though we don't yet know how big the literal will be. We handle that + // by proceeding to the next iteration of the main loop. We also can + // exit this loop via goto if we get close to exhausting the input. + for { + // Invariant: we have a 4-byte match at s, and no need to emit any + // literal bytes prior to s. + + // Extend the 4-byte match as long as possible. + // + s += 4 + t := candidate.offset - e.cur + 4 + l := e.matchlen(s, t, src) + + // matchToken is flate's equivalent of Snappy's emitCopy. (length,offset) + dst.tokens[dst.n] = matchToken(uint32(l+4-baseMatchLength), uint32(s-t-baseMatchOffset)) + dst.n++ + s += l + nextEmit = s + if s >= sLimit { + t += l + // Index first pair after match end. + if int(t+4) < len(src) && t > 0 { + cv := load3232(src, t) + nextHash = hash(cv) + e.table[nextHash&tableMask] = tableEntryPrev{ + Prev: e.table[nextHash&tableMask].Cur, + Cur: tableEntry{offset: e.cur + t, val: cv}, + } + } + goto emitRemainder + } + + // We could immediately start working at s now, but to improve + // compression we first update the hash table at s-3 to s. If + // another emitCopy is not our next move, also calculate nextHash + // at s+1. At least on GOARCH=amd64, these three hash calculations + // are faster as one load64 call (with some shifts) instead of + // three load32 calls. + x := load6432(src, s-3) + prevHash := hash(uint32(x)) + e.table[prevHash&tableMask] = tableEntryPrev{ + Prev: e.table[prevHash&tableMask].Cur, + Cur: tableEntry{offset: e.cur + s - 3, val: uint32(x)}, + } + x >>= 8 + prevHash = hash(uint32(x)) + + e.table[prevHash&tableMask] = tableEntryPrev{ + Prev: e.table[prevHash&tableMask].Cur, + Cur: tableEntry{offset: e.cur + s - 2, val: uint32(x)}, + } + x >>= 8 + prevHash = hash(uint32(x)) + + e.table[prevHash&tableMask] = tableEntryPrev{ + Prev: e.table[prevHash&tableMask].Cur, + Cur: tableEntry{offset: e.cur + s - 1, val: uint32(x)}, + } + x >>= 8 + currHash := hash(uint32(x)) + candidates := e.table[currHash&tableMask] + cv = uint32(x) + e.table[currHash&tableMask] = tableEntryPrev{ + Prev: candidates.Cur, + Cur: tableEntry{offset: s + e.cur, val: cv}, + } + + // Check both candidates + candidate = candidates.Cur + if cv == candidate.val { + offset := s - (candidate.offset - e.cur) + if offset <= maxMatchOffset { + continue + } + } else { + // We only check if value mismatches. + // Offset will always be invalid in other cases. + candidate = candidates.Prev + if cv == candidate.val { + offset := s - (candidate.offset - e.cur) + if offset <= maxMatchOffset { + continue + } + } + } + cv = uint32(x >> 8) + nextHash = hash(cv) + s++ + break + } + } + +emitRemainder: + if int(nextEmit) < len(src) { + emitLiteral(dst, src[nextEmit:]) + } + e.cur += int32(len(src)) + e.prev = e.prev[:len(src)] + copy(e.prev, src) +} + +// snappyL4 +type snappyL4 struct { + snappyL3 +} + +// Encode uses a similar algorithm to level 3, +// but will check up to two candidates if first isn't long enough. +func (e *snappyL4) Encode(dst *tokens, src []byte) { + const ( + inputMargin = 8 - 3 + minNonLiteralBlockSize = 1 + 1 + inputMargin + matchLenGood = 12 + ) + + // Protect against e.cur wraparound. + if e.cur > 1<<30 { + for i := range e.table[:] { + e.table[i] = tableEntryPrev{} + } + e.snappyGen = snappyGen{cur: maxStoreBlockSize, prev: e.prev[:0]} + } + + // This check isn't in the Snappy implementation, but there, the caller + // instead of the callee handles this case. + if len(src) < minNonLiteralBlockSize { + // We do not fill the token table. + // This will be picked up by caller. + dst.n = uint16(len(src)) + e.cur += maxStoreBlockSize + e.prev = e.prev[:0] + return + } + + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := int32(len(src) - inputMargin) + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := int32(0) + s := int32(0) + cv := load3232(src, s) + nextHash := hash(cv) + + for { + // Copied from the C++ snappy implementation: + // + // Heuristic match skipping: If 32 bytes are scanned with no matches + // found, start looking only at every other byte. If 32 more bytes are + // scanned (or skipped), look at every third byte, etc.. When a match + // is found, immediately go back to looking at every byte. This is a + // small loss (~5% performance, ~0.1% density) for compressible data + // due to more bookkeeping, but for non-compressible data (such as + // JPEG) it's a huge win since the compressor quickly "realizes" the + // data is incompressible and doesn't bother looking for matches + // everywhere. + // + // The "skip" variable keeps track of how many bytes there are since + // the last match; dividing it by 32 (ie. right-shifting by five) gives + // the number of bytes to move ahead for each iteration. + skip := int32(32) + + nextS := s + var candidate tableEntry + var candidateAlt tableEntry + for { + s = nextS + bytesBetweenHashLookups := skip >> 5 + nextS = s + bytesBetweenHashLookups + skip += bytesBetweenHashLookups + if nextS > sLimit { + goto emitRemainder + } + candidates := e.table[nextHash&tableMask] + now := load3232(src, nextS) + e.table[nextHash&tableMask] = tableEntryPrev{Prev: candidates.Cur, Cur: tableEntry{offset: s + e.cur, val: cv}} + nextHash = hash(now) + + // Check both candidates + candidate = candidates.Cur + if cv == candidate.val { + offset := s - (candidate.offset - e.cur) + if offset < maxMatchOffset { + offset = s - (candidates.Prev.offset - e.cur) + if cv == candidates.Prev.val && offset < maxMatchOffset { + candidateAlt = candidates.Prev + } + break + } + } else { + // We only check if value mismatches. + // Offset will always be invalid in other cases. + candidate = candidates.Prev + if cv == candidate.val { + offset := s - (candidate.offset - e.cur) + if offset < maxMatchOffset { + break + } + } + } + cv = now + } + + // A 4-byte match has been found. We'll later see if more than 4 bytes + // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit + // them as literal bytes. + emitLiteral(dst, src[nextEmit:s]) + + // Call emitCopy, and then see if another emitCopy could be our next + // move. Repeat until we find no match for the input immediately after + // what was consumed by the last emitCopy call. + // + // If we exit this loop normally then we need to call emitLiteral next, + // though we don't yet know how big the literal will be. We handle that + // by proceeding to the next iteration of the main loop. We also can + // exit this loop via goto if we get close to exhausting the input. + for { + // Invariant: we have a 4-byte match at s, and no need to emit any + // literal bytes prior to s. + + // Extend the 4-byte match as long as possible. + // + s += 4 + t := candidate.offset - e.cur + 4 + l := e.matchlen(s, t, src) + // Try alternative candidate if match length < matchLenGood. + if l < matchLenGood-4 && candidateAlt.offset != 0 { + t2 := candidateAlt.offset - e.cur + 4 + l2 := e.matchlen(s, t2, src) + if l2 > l { + l = l2 + t = t2 + } + } + // matchToken is flate's equivalent of Snappy's emitCopy. (length,offset) + dst.tokens[dst.n] = matchToken(uint32(l+4-baseMatchLength), uint32(s-t-baseMatchOffset)) + dst.n++ + s += l + nextEmit = s + if s >= sLimit { + t += l + // Index first pair after match end. + if int(t+4) < len(src) && t > 0 { + cv := load3232(src, t) + nextHash = hash(cv) + e.table[nextHash&tableMask] = tableEntryPrev{ + Prev: e.table[nextHash&tableMask].Cur, + Cur: tableEntry{offset: e.cur + t, val: cv}, + } + } + goto emitRemainder + } + + // We could immediately start working at s now, but to improve + // compression we first update the hash table at s-3 to s. If + // another emitCopy is not our next move, also calculate nextHash + // at s+1. At least on GOARCH=amd64, these three hash calculations + // are faster as one load64 call (with some shifts) instead of + // three load32 calls. + x := load6432(src, s-3) + prevHash := hash(uint32(x)) + e.table[prevHash&tableMask] = tableEntryPrev{ + Prev: e.table[prevHash&tableMask].Cur, + Cur: tableEntry{offset: e.cur + s - 3, val: uint32(x)}, + } + x >>= 8 + prevHash = hash(uint32(x)) + + e.table[prevHash&tableMask] = tableEntryPrev{ + Prev: e.table[prevHash&tableMask].Cur, + Cur: tableEntry{offset: e.cur + s - 2, val: uint32(x)}, + } + x >>= 8 + prevHash = hash(uint32(x)) + + e.table[prevHash&tableMask] = tableEntryPrev{ + Prev: e.table[prevHash&tableMask].Cur, + Cur: tableEntry{offset: e.cur + s - 1, val: uint32(x)}, + } + x >>= 8 + currHash := hash(uint32(x)) + candidates := e.table[currHash&tableMask] + cv = uint32(x) + e.table[currHash&tableMask] = tableEntryPrev{ + Prev: candidates.Cur, + Cur: tableEntry{offset: s + e.cur, val: cv}, + } + + // Check both candidates + candidate = candidates.Cur + candidateAlt = tableEntry{} + if cv == candidate.val { + offset := s - (candidate.offset - e.cur) + if offset <= maxMatchOffset { + offset = s - (candidates.Prev.offset - e.cur) + if cv == candidates.Prev.val && offset <= maxMatchOffset { + candidateAlt = candidates.Prev + } + continue + } + } else { + // We only check if value mismatches. + // Offset will always be invalid in other cases. + candidate = candidates.Prev + if cv == candidate.val { + offset := s - (candidate.offset - e.cur) + if offset <= maxMatchOffset { + continue + } + } + } + cv = uint32(x >> 8) + nextHash = hash(cv) + s++ + break + } + } + +emitRemainder: + if int(nextEmit) < len(src) { + emitLiteral(dst, src[nextEmit:]) + } + e.cur += int32(len(src)) + e.prev = e.prev[:len(src)] + copy(e.prev, src) +} + +func (e *snappyGen) matchlen(s, t int32, src []byte) int32 { + s1 := int(s) + maxMatchLength - 4 + if s1 > len(src) { + s1 = len(src) + } + + // If we are inside the current block + if t >= 0 { + b := src[t:] + a := src[s:s1] + b = b[:len(a)] + // Extend the match to be as long as possible. + for i := range a { + if a[i] != b[i] { + return int32(i) + } + } + return int32(len(a)) + } + + // We found a match in the previous block. + tp := int32(len(e.prev)) + t + if tp < 0 { + return 0 + } + + // Extend the match to be as long as possible. + a := src[s:s1] + b := e.prev[tp:] + if len(b) > len(a) { + b = b[:len(a)] + } + a = a[:len(b)] + for i := range b { + if a[i] != b[i] { + return int32(i) + } + } + + // If we reached our limit, we matched everything we are + // allowed to in the previous block and we return. + n := int32(len(b)) + if int(s+n) == s1 { + return n + } + + // Continue looking for more matches in the current block. + a = src[s+n : s1] + b = src[:len(a)] + for i := range a { + if a[i] != b[i] { + return int32(i) + n + } + } + return int32(len(a)) + n +} + +// Reset the encoding table. +func (e *snappyGen) Reset() { + e.prev = e.prev[:0] + e.cur += maxMatchOffset +} diff --git a/vendor/github.com/klauspost/compress/flate/token.go b/vendor/github.com/klauspost/compress/flate/token.go new file mode 100644 index 0000000..4f275ea --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/token.go @@ -0,0 +1,115 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package flate + +import "fmt" + +const ( + // 2 bits: type 0 = literal 1=EOF 2=Match 3=Unused + // 8 bits: xlength = length - MIN_MATCH_LENGTH + // 22 bits xoffset = offset - MIN_OFFSET_SIZE, or literal + lengthShift = 22 + offsetMask = 1< pair into a match token. +func matchToken(xlength uint32, xoffset uint32) token { + return token(matchType + xlength< maxMatchLength || xoffset > maxMatchOffset { + panic(fmt.Sprintf("Invalid match: len: %d, offset: %d\n", xlength, xoffset)) + return token(matchType) + } + return token(matchType + xlength<> lengthShift) } + +func lengthCode(len uint32) uint32 { return lengthCodes[len] } + +// Returns the offset code corresponding to a specific offset +func offsetCode(off uint32) uint32 { + if off < uint32(len(offsetCodes)) { + return offsetCodes[off] + } else if off>>7 < uint32(len(offsetCodes)) { + return offsetCodes[off>>7] + 14 + } else { + return offsetCodes[off>>14] + 28 + } +} diff --git a/vendor/github.com/klauspost/compress/gzip/gunzip.go b/vendor/github.com/klauspost/compress/gzip/gunzip.go new file mode 100644 index 0000000..e73fab3 --- /dev/null +++ b/vendor/github.com/klauspost/compress/gzip/gunzip.go @@ -0,0 +1,344 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package gzip implements reading and writing of gzip format compressed files, +// as specified in RFC 1952. +package gzip + +import ( + "bufio" + "encoding/binary" + "errors" + "io" + "time" + + "github.com/klauspost/compress/flate" + "github.com/klauspost/crc32" +) + +const ( + gzipID1 = 0x1f + gzipID2 = 0x8b + gzipDeflate = 8 + flagText = 1 << 0 + flagHdrCrc = 1 << 1 + flagExtra = 1 << 2 + flagName = 1 << 3 + flagComment = 1 << 4 +) + +var ( + // ErrChecksum is returned when reading GZIP data that has an invalid checksum. + ErrChecksum = errors.New("gzip: invalid checksum") + // ErrHeader is returned when reading GZIP data that has an invalid header. + ErrHeader = errors.New("gzip: invalid header") +) + +var le = binary.LittleEndian + +// noEOF converts io.EOF to io.ErrUnexpectedEOF. +func noEOF(err error) error { + if err == io.EOF { + return io.ErrUnexpectedEOF + } + return err +} + +// The gzip file stores a header giving metadata about the compressed file. +// That header is exposed as the fields of the Writer and Reader structs. +// +// Strings must be UTF-8 encoded and may only contain Unicode code points +// U+0001 through U+00FF, due to limitations of the GZIP file format. +type Header struct { + Comment string // comment + Extra []byte // "extra data" + ModTime time.Time // modification time + Name string // file name + OS byte // operating system type +} + +// A Reader is an io.Reader that can be read to retrieve +// uncompressed data from a gzip-format compressed file. +// +// In general, a gzip file can be a concatenation of gzip files, +// each with its own header. Reads from the Reader +// return the concatenation of the uncompressed data of each. +// Only the first header is recorded in the Reader fields. +// +// Gzip files store a length and checksum of the uncompressed data. +// The Reader will return a ErrChecksum when Read +// reaches the end of the uncompressed data if it does not +// have the expected length or checksum. Clients should treat data +// returned by Read as tentative until they receive the io.EOF +// marking the end of the data. +type Reader struct { + Header // valid after NewReader or Reader.Reset + r flate.Reader + decompressor io.ReadCloser + digest uint32 // CRC-32, IEEE polynomial (section 8) + size uint32 // Uncompressed size (section 2.3.1) + buf [512]byte + err error + multistream bool +} + +// NewReader creates a new Reader reading the given reader. +// If r does not also implement io.ByteReader, +// the decompressor may read more data than necessary from r. +// +// It is the caller's responsibility to call Close on the Reader when done. +// +// The Reader.Header fields will be valid in the Reader returned. +func NewReader(r io.Reader) (*Reader, error) { + z := new(Reader) + if err := z.Reset(r); err != nil { + return nil, err + } + return z, nil +} + +// Reset discards the Reader z's state and makes it equivalent to the +// result of its original state from NewReader, but reading from r instead. +// This permits reusing a Reader rather than allocating a new one. +func (z *Reader) Reset(r io.Reader) error { + *z = Reader{ + decompressor: z.decompressor, + multistream: true, + } + if rr, ok := r.(flate.Reader); ok { + z.r = rr + } else { + z.r = bufio.NewReader(r) + } + z.Header, z.err = z.readHeader() + return z.err +} + +// Multistream controls whether the reader supports multistream files. +// +// If enabled (the default), the Reader expects the input to be a sequence +// of individually gzipped data streams, each with its own header and +// trailer, ending at EOF. The effect is that the concatenation of a sequence +// of gzipped files is treated as equivalent to the gzip of the concatenation +// of the sequence. This is standard behavior for gzip readers. +// +// Calling Multistream(false) disables this behavior; disabling the behavior +// can be useful when reading file formats that distinguish individual gzip +// data streams or mix gzip data streams with other data streams. +// In this mode, when the Reader reaches the end of the data stream, +// Read returns io.EOF. If the underlying reader implements io.ByteReader, +// it will be left positioned just after the gzip stream. +// To start the next stream, call z.Reset(r) followed by z.Multistream(false). +// If there is no next stream, z.Reset(r) will return io.EOF. +func (z *Reader) Multistream(ok bool) { + z.multistream = ok +} + +// readString reads a NUL-terminated string from z.r. +// It treats the bytes read as being encoded as ISO 8859-1 (Latin-1) and +// will output a string encoded using UTF-8. +// This method always updates z.digest with the data read. +func (z *Reader) readString() (string, error) { + var err error + needConv := false + for i := 0; ; i++ { + if i >= len(z.buf) { + return "", ErrHeader + } + z.buf[i], err = z.r.ReadByte() + if err != nil { + return "", err + } + if z.buf[i] > 0x7f { + needConv = true + } + if z.buf[i] == 0 { + // Digest covers the NUL terminator. + z.digest = crc32.Update(z.digest, crc32.IEEETable, z.buf[:i+1]) + + // Strings are ISO 8859-1, Latin-1 (RFC 1952, section 2.3.1). + if needConv { + s := make([]rune, 0, i) + for _, v := range z.buf[:i] { + s = append(s, rune(v)) + } + return string(s), nil + } + return string(z.buf[:i]), nil + } + } +} + +// readHeader reads the GZIP header according to section 2.3.1. +// This method does not set z.err. +func (z *Reader) readHeader() (hdr Header, err error) { + if _, err = io.ReadFull(z.r, z.buf[:10]); err != nil { + // RFC 1952, section 2.2, says the following: + // A gzip file consists of a series of "members" (compressed data sets). + // + // Other than this, the specification does not clarify whether a + // "series" is defined as "one or more" or "zero or more". To err on the + // side of caution, Go interprets this to mean "zero or more". + // Thus, it is okay to return io.EOF here. + return hdr, err + } + if z.buf[0] != gzipID1 || z.buf[1] != gzipID2 || z.buf[2] != gzipDeflate { + return hdr, ErrHeader + } + flg := z.buf[3] + hdr.ModTime = time.Unix(int64(le.Uint32(z.buf[4:8])), 0) + // z.buf[8] is XFL and is currently ignored. + hdr.OS = z.buf[9] + z.digest = crc32.ChecksumIEEE(z.buf[:10]) + + if flg&flagExtra != 0 { + if _, err = io.ReadFull(z.r, z.buf[:2]); err != nil { + return hdr, noEOF(err) + } + z.digest = crc32.Update(z.digest, crc32.IEEETable, z.buf[:2]) + data := make([]byte, le.Uint16(z.buf[:2])) + if _, err = io.ReadFull(z.r, data); err != nil { + return hdr, noEOF(err) + } + z.digest = crc32.Update(z.digest, crc32.IEEETable, data) + hdr.Extra = data + } + + var s string + if flg&flagName != 0 { + if s, err = z.readString(); err != nil { + return hdr, err + } + hdr.Name = s + } + + if flg&flagComment != 0 { + if s, err = z.readString(); err != nil { + return hdr, err + } + hdr.Comment = s + } + + if flg&flagHdrCrc != 0 { + if _, err = io.ReadFull(z.r, z.buf[:2]); err != nil { + return hdr, noEOF(err) + } + digest := le.Uint16(z.buf[:2]) + if digest != uint16(z.digest) { + return hdr, ErrHeader + } + } + + z.digest = 0 + if z.decompressor == nil { + z.decompressor = flate.NewReader(z.r) + } else { + z.decompressor.(flate.Resetter).Reset(z.r, nil) + } + return hdr, nil +} + +// Read implements io.Reader, reading uncompressed bytes from its underlying Reader. +func (z *Reader) Read(p []byte) (n int, err error) { + if z.err != nil { + return 0, z.err + } + + n, z.err = z.decompressor.Read(p) + z.digest = crc32.Update(z.digest, crc32.IEEETable, p[:n]) + z.size += uint32(n) + if z.err != io.EOF { + // In the normal case we return here. + return n, z.err + } + + // Finished file; check checksum and size. + if _, err := io.ReadFull(z.r, z.buf[:8]); err != nil { + z.err = noEOF(err) + return n, z.err + } + digest := le.Uint32(z.buf[:4]) + size := le.Uint32(z.buf[4:8]) + if digest != z.digest || size != z.size { + z.err = ErrChecksum + return n, z.err + } + z.digest, z.size = 0, 0 + + // File is ok; check if there is another. + if !z.multistream { + return n, io.EOF + } + z.err = nil // Remove io.EOF + + if _, z.err = z.readHeader(); z.err != nil { + return n, z.err + } + + // Read from next file, if necessary. + if n > 0 { + return n, nil + } + return z.Read(p) +} + +// Support the io.WriteTo interface for io.Copy and friends. +func (z *Reader) WriteTo(w io.Writer) (int64, error) { + total := int64(0) + crcWriter := crc32.NewIEEE() + for { + if z.err != nil { + if z.err == io.EOF { + return total, nil + } + return total, z.err + } + + // We write both to output and digest. + mw := io.MultiWriter(w, crcWriter) + n, err := z.decompressor.(io.WriterTo).WriteTo(mw) + total += n + z.size += uint32(n) + if err != nil { + z.err = err + return total, z.err + } + + // Finished file; check checksum + size. + if _, err := io.ReadFull(z.r, z.buf[0:8]); err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + z.err = err + return total, err + } + z.digest = crcWriter.Sum32() + digest := le.Uint32(z.buf[:4]) + size := le.Uint32(z.buf[4:8]) + if digest != z.digest || size != z.size { + z.err = ErrChecksum + return total, z.err + } + z.digest, z.size = 0, 0 + + // File is ok; check if there is another. + if !z.multistream { + return total, nil + } + crcWriter.Reset() + z.err = nil // Remove io.EOF + + if _, z.err = z.readHeader(); z.err != nil { + if z.err == io.EOF { + return total, nil + } + return total, z.err + } + } +} + +// Close closes the Reader. It does not close the underlying io.Reader. +// In order for the GZIP checksum to be verified, the reader must be +// fully consumed until the io.EOF. +func (z *Reader) Close() error { return z.decompressor.Close() } diff --git a/vendor/github.com/klauspost/compress/gzip/gzip.go b/vendor/github.com/klauspost/compress/gzip/gzip.go new file mode 100644 index 0000000..a0f3ed0 --- /dev/null +++ b/vendor/github.com/klauspost/compress/gzip/gzip.go @@ -0,0 +1,251 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gzip + +import ( + "errors" + "fmt" + "io" + + "github.com/klauspost/compress/flate" + "github.com/klauspost/crc32" +) + +// These constants are copied from the flate package, so that code that imports +// "compress/gzip" does not also have to import "compress/flate". +const ( + NoCompression = flate.NoCompression + BestSpeed = flate.BestSpeed + BestCompression = flate.BestCompression + DefaultCompression = flate.DefaultCompression + ConstantCompression = flate.ConstantCompression + HuffmanOnly = flate.HuffmanOnly +) + +// A Writer is an io.WriteCloser. +// Writes to a Writer are compressed and written to w. +type Writer struct { + Header // written at first call to Write, Flush, or Close + w io.Writer + level int + wroteHeader bool + compressor *flate.Writer + digest uint32 // CRC-32, IEEE polynomial (section 8) + size uint32 // Uncompressed size (section 2.3.1) + closed bool + buf [10]byte + err error +} + +// NewWriter returns a new Writer. +// Writes to the returned writer are compressed and written to w. +// +// It is the caller's responsibility to call Close on the WriteCloser when done. +// Writes may be buffered and not flushed until Close. +// +// Callers that wish to set the fields in Writer.Header must do so before +// the first call to Write, Flush, or Close. +func NewWriter(w io.Writer) *Writer { + z, _ := NewWriterLevel(w, DefaultCompression) + return z +} + +// NewWriterLevel is like NewWriter but specifies the compression level instead +// of assuming DefaultCompression. +// +// The compression level can be DefaultCompression, NoCompression, or any +// integer value between BestSpeed and BestCompression inclusive. The error +// returned will be nil if the level is valid. +func NewWriterLevel(w io.Writer, level int) (*Writer, error) { + if level < HuffmanOnly || level > BestCompression { + return nil, fmt.Errorf("gzip: invalid compression level: %d", level) + } + z := new(Writer) + z.init(w, level) + return z, nil +} + +func (z *Writer) init(w io.Writer, level int) { + compressor := z.compressor + if compressor != nil { + compressor.Reset(w) + } + *z = Writer{ + Header: Header{ + OS: 255, // unknown + }, + w: w, + level: level, + compressor: compressor, + } +} + +// Reset discards the Writer z's state and makes it equivalent to the +// result of its original state from NewWriter or NewWriterLevel, but +// writing to w instead. This permits reusing a Writer rather than +// allocating a new one. +func (z *Writer) Reset(w io.Writer) { + z.init(w, z.level) +} + +// writeBytes writes a length-prefixed byte slice to z.w. +func (z *Writer) writeBytes(b []byte) error { + if len(b) > 0xffff { + return errors.New("gzip.Write: Extra data is too large") + } + le.PutUint16(z.buf[:2], uint16(len(b))) + _, err := z.w.Write(z.buf[:2]) + if err != nil { + return err + } + _, err = z.w.Write(b) + return err +} + +// writeString writes a UTF-8 string s in GZIP's format to z.w. +// GZIP (RFC 1952) specifies that strings are NUL-terminated ISO 8859-1 (Latin-1). +func (z *Writer) writeString(s string) (err error) { + // GZIP stores Latin-1 strings; error if non-Latin-1; convert if non-ASCII. + needconv := false + for _, v := range s { + if v == 0 || v > 0xff { + return errors.New("gzip.Write: non-Latin-1 header string") + } + if v > 0x7f { + needconv = true + } + } + if needconv { + b := make([]byte, 0, len(s)) + for _, v := range s { + b = append(b, byte(v)) + } + _, err = z.w.Write(b) + } else { + _, err = io.WriteString(z.w, s) + } + if err != nil { + return err + } + // GZIP strings are NUL-terminated. + z.buf[0] = 0 + _, err = z.w.Write(z.buf[:1]) + return err +} + +// Write writes a compressed form of p to the underlying io.Writer. The +// compressed bytes are not necessarily flushed until the Writer is closed. +func (z *Writer) Write(p []byte) (int, error) { + if z.err != nil { + return 0, z.err + } + var n int + // Write the GZIP header lazily. + if !z.wroteHeader { + z.wroteHeader = true + z.buf[0] = gzipID1 + z.buf[1] = gzipID2 + z.buf[2] = gzipDeflate + z.buf[3] = 0 + if z.Extra != nil { + z.buf[3] |= 0x04 + } + if z.Name != "" { + z.buf[3] |= 0x08 + } + if z.Comment != "" { + z.buf[3] |= 0x10 + } + le.PutUint32(z.buf[4:8], uint32(z.ModTime.Unix())) + if z.level == BestCompression { + z.buf[8] = 2 + } else if z.level == BestSpeed { + z.buf[8] = 4 + } else { + z.buf[8] = 0 + } + z.buf[9] = z.OS + n, z.err = z.w.Write(z.buf[:10]) + if z.err != nil { + return n, z.err + } + if z.Extra != nil { + z.err = z.writeBytes(z.Extra) + if z.err != nil { + return n, z.err + } + } + if z.Name != "" { + z.err = z.writeString(z.Name) + if z.err != nil { + return n, z.err + } + } + if z.Comment != "" { + z.err = z.writeString(z.Comment) + if z.err != nil { + return n, z.err + } + } + if z.compressor == nil { + z.compressor, _ = flate.NewWriter(z.w, z.level) + } + } + z.size += uint32(len(p)) + z.digest = crc32.Update(z.digest, crc32.IEEETable, p) + n, z.err = z.compressor.Write(p) + return n, z.err +} + +// Flush flushes any pending compressed data to the underlying writer. +// +// It is useful mainly in compressed network protocols, to ensure that +// a remote reader has enough data to reconstruct a packet. Flush does +// not return until the data has been written. If the underlying +// writer returns an error, Flush returns that error. +// +// In the terminology of the zlib library, Flush is equivalent to Z_SYNC_FLUSH. +func (z *Writer) Flush() error { + if z.err != nil { + return z.err + } + if z.closed { + return nil + } + if !z.wroteHeader { + z.Write(nil) + if z.err != nil { + return z.err + } + } + z.err = z.compressor.Flush() + return z.err +} + +// Close closes the Writer, flushing any unwritten data to the underlying +// io.Writer, but does not close the underlying io.Writer. +func (z *Writer) Close() error { + if z.err != nil { + return z.err + } + if z.closed { + return nil + } + z.closed = true + if !z.wroteHeader { + z.Write(nil) + if z.err != nil { + return z.err + } + } + z.err = z.compressor.Close() + if z.err != nil { + return z.err + } + le.PutUint32(z.buf[:4], z.digest) + le.PutUint32(z.buf[4:8], z.size) + _, z.err = z.w.Write(z.buf[:8]) + return z.err +} diff --git a/vendor/github.com/klauspost/compress/zlib/reader.go b/vendor/github.com/klauspost/compress/zlib/reader.go new file mode 100644 index 0000000..b78a2c1 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zlib/reader.go @@ -0,0 +1,178 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package zlib implements reading and writing of zlib format compressed data, +as specified in RFC 1950. + +The implementation provides filters that uncompress during reading +and compress during writing. For example, to write compressed data +to a buffer: + + var b bytes.Buffer + w := zlib.NewWriter(&b) + w.Write([]byte("hello, world\n")) + w.Close() + +and to read that data back: + + r, err := zlib.NewReader(&b) + io.Copy(os.Stdout, r) + r.Close() +*/ +package zlib + +import ( + "bufio" + "errors" + "hash" + "hash/adler32" + "io" + + "github.com/klauspost/compress/flate" +) + +const zlibDeflate = 8 + +var ( + // ErrChecksum is returned when reading ZLIB data that has an invalid checksum. + ErrChecksum = errors.New("zlib: invalid checksum") + // ErrDictionary is returned when reading ZLIB data that has an invalid dictionary. + ErrDictionary = errors.New("zlib: invalid dictionary") + // ErrHeader is returned when reading ZLIB data that has an invalid header. + ErrHeader = errors.New("zlib: invalid header") +) + +type reader struct { + r flate.Reader + decompressor io.ReadCloser + digest hash.Hash32 + err error + scratch [4]byte +} + +// Resetter resets a ReadCloser returned by NewReader or NewReaderDict to +// to switch to a new underlying Reader. This permits reusing a ReadCloser +// instead of allocating a new one. +type Resetter interface { + // Reset discards any buffered data and resets the Resetter as if it was + // newly initialized with the given reader. + Reset(r io.Reader, dict []byte) error +} + +// NewReader creates a new ReadCloser. +// Reads from the returned ReadCloser read and decompress data from r. +// If r does not implement io.ByteReader, the decompressor may read more +// data than necessary from r. +// It is the caller's responsibility to call Close on the ReadCloser when done. +// +// The ReadCloser returned by NewReader also implements Resetter. +func NewReader(r io.Reader) (io.ReadCloser, error) { + return NewReaderDict(r, nil) +} + +// NewReaderDict is like NewReader but uses a preset dictionary. +// NewReaderDict ignores the dictionary if the compressed data does not refer to it. +// If the compressed data refers to a different dictionary, NewReaderDict returns ErrDictionary. +// +// The ReadCloser returned by NewReaderDict also implements Resetter. +func NewReaderDict(r io.Reader, dict []byte) (io.ReadCloser, error) { + z := new(reader) + err := z.Reset(r, dict) + if err != nil { + return nil, err + } + return z, nil +} + +func (z *reader) Read(p []byte) (int, error) { + if z.err != nil { + return 0, z.err + } + + var n int + n, z.err = z.decompressor.Read(p) + z.digest.Write(p[0:n]) + if z.err != io.EOF { + // In the normal case we return here. + return n, z.err + } + + // Finished file; check checksum. + if _, err := io.ReadFull(z.r, z.scratch[0:4]); err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + z.err = err + return n, z.err + } + // ZLIB (RFC 1950) is big-endian, unlike GZIP (RFC 1952). + checksum := uint32(z.scratch[0])<<24 | uint32(z.scratch[1])<<16 | uint32(z.scratch[2])<<8 | uint32(z.scratch[3]) + if checksum != z.digest.Sum32() { + z.err = ErrChecksum + return n, z.err + } + return n, io.EOF +} + +// Calling Close does not close the wrapped io.Reader originally passed to NewReader. +// In order for the ZLIB checksum to be verified, the reader must be +// fully consumed until the io.EOF. +func (z *reader) Close() error { + if z.err != nil && z.err != io.EOF { + return z.err + } + z.err = z.decompressor.Close() + return z.err +} + +func (z *reader) Reset(r io.Reader, dict []byte) error { + *z = reader{decompressor: z.decompressor} + if fr, ok := r.(flate.Reader); ok { + z.r = fr + } else { + z.r = bufio.NewReader(r) + } + + // Read the header (RFC 1950 section 2.2.). + _, z.err = io.ReadFull(z.r, z.scratch[0:2]) + if z.err != nil { + if z.err == io.EOF { + z.err = io.ErrUnexpectedEOF + } + return z.err + } + h := uint(z.scratch[0])<<8 | uint(z.scratch[1]) + if (z.scratch[0]&0x0f != zlibDeflate) || (h%31 != 0) { + z.err = ErrHeader + return z.err + } + haveDict := z.scratch[1]&0x20 != 0 + if haveDict { + _, z.err = io.ReadFull(z.r, z.scratch[0:4]) + if z.err != nil { + if z.err == io.EOF { + z.err = io.ErrUnexpectedEOF + } + return z.err + } + checksum := uint32(z.scratch[0])<<24 | uint32(z.scratch[1])<<16 | uint32(z.scratch[2])<<8 | uint32(z.scratch[3]) + if checksum != adler32.Checksum(dict) { + z.err = ErrDictionary + return z.err + } + } + + if z.decompressor == nil { + if haveDict { + z.decompressor = flate.NewReaderDict(z.r, dict) + } else { + z.decompressor = flate.NewReader(z.r) + } + } else { + z.decompressor.(flate.Resetter).Reset(z.r, dict) + } + z.digest = adler32.New() + return nil +} diff --git a/vendor/github.com/klauspost/compress/zlib/writer.go b/vendor/github.com/klauspost/compress/zlib/writer.go new file mode 100644 index 0000000..605816b --- /dev/null +++ b/vendor/github.com/klauspost/compress/zlib/writer.go @@ -0,0 +1,201 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package zlib + +import ( + "fmt" + "hash" + "hash/adler32" + "io" + + "github.com/klauspost/compress/flate" +) + +// These constants are copied from the flate package, so that code that imports +// "compress/zlib" does not also have to import "compress/flate". +const ( + NoCompression = flate.NoCompression + BestSpeed = flate.BestSpeed + BestCompression = flate.BestCompression + DefaultCompression = flate.DefaultCompression + ConstantCompression = flate.ConstantCompression + HuffmanOnly = flate.HuffmanOnly +) + +// A Writer takes data written to it and writes the compressed +// form of that data to an underlying writer (see NewWriter). +type Writer struct { + w io.Writer + level int + dict []byte + compressor *flate.Writer + digest hash.Hash32 + err error + scratch [4]byte + wroteHeader bool +} + +// NewWriter creates a new Writer. +// Writes to the returned Writer are compressed and written to w. +// +// It is the caller's responsibility to call Close on the WriteCloser when done. +// Writes may be buffered and not flushed until Close. +func NewWriter(w io.Writer) *Writer { + z, _ := NewWriterLevelDict(w, DefaultCompression, nil) + return z +} + +// NewWriterLevel is like NewWriter but specifies the compression level instead +// of assuming DefaultCompression. +// +// The compression level can be DefaultCompression, NoCompression, HuffmanOnly +// or any integer value between BestSpeed and BestCompression inclusive. +// The error returned will be nil if the level is valid. +func NewWriterLevel(w io.Writer, level int) (*Writer, error) { + return NewWriterLevelDict(w, level, nil) +} + +// NewWriterLevelDict is like NewWriterLevel but specifies a dictionary to +// compress with. +// +// The dictionary may be nil. If not, its contents should not be modified until +// the Writer is closed. +func NewWriterLevelDict(w io.Writer, level int, dict []byte) (*Writer, error) { + if level < HuffmanOnly || level > BestCompression { + return nil, fmt.Errorf("zlib: invalid compression level: %d", level) + } + return &Writer{ + w: w, + level: level, + dict: dict, + }, nil +} + +// Reset clears the state of the Writer z such that it is equivalent to its +// initial state from NewWriterLevel or NewWriterLevelDict, but instead writing +// to w. +func (z *Writer) Reset(w io.Writer) { + z.w = w + // z.level and z.dict left unchanged. + if z.compressor != nil { + z.compressor.Reset(w) + } + if z.digest != nil { + z.digest.Reset() + } + z.err = nil + z.scratch = [4]byte{} + z.wroteHeader = false +} + +// writeHeader writes the ZLIB header. +func (z *Writer) writeHeader() (err error) { + z.wroteHeader = true + // ZLIB has a two-byte header (as documented in RFC 1950). + // The first four bits is the CINFO (compression info), which is 7 for the default deflate window size. + // The next four bits is the CM (compression method), which is 8 for deflate. + z.scratch[0] = 0x78 + // The next two bits is the FLEVEL (compression level). The four values are: + // 0=fastest, 1=fast, 2=default, 3=best. + // The next bit, FDICT, is set if a dictionary is given. + // The final five FCHECK bits form a mod-31 checksum. + switch z.level { + case -2, 0, 1: + z.scratch[1] = 0 << 6 + case 2, 3, 4, 5: + z.scratch[1] = 1 << 6 + case 6, -1: + z.scratch[1] = 2 << 6 + case 7, 8, 9: + z.scratch[1] = 3 << 6 + default: + panic("unreachable") + } + if z.dict != nil { + z.scratch[1] |= 1 << 5 + } + z.scratch[1] += uint8(31 - (uint16(z.scratch[0])<<8+uint16(z.scratch[1]))%31) + if _, err = z.w.Write(z.scratch[0:2]); err != nil { + return err + } + if z.dict != nil { + // The next four bytes are the Adler-32 checksum of the dictionary. + checksum := adler32.Checksum(z.dict) + z.scratch[0] = uint8(checksum >> 24) + z.scratch[1] = uint8(checksum >> 16) + z.scratch[2] = uint8(checksum >> 8) + z.scratch[3] = uint8(checksum >> 0) + if _, err = z.w.Write(z.scratch[0:4]); err != nil { + return err + } + } + if z.compressor == nil { + // Initialize deflater unless the Writer is being reused + // after a Reset call. + z.compressor, err = flate.NewWriterDict(z.w, z.level, z.dict) + if err != nil { + return err + } + z.digest = adler32.New() + } + return nil +} + +// Write writes a compressed form of p to the underlying io.Writer. The +// compressed bytes are not necessarily flushed until the Writer is closed or +// explicitly flushed. +func (z *Writer) Write(p []byte) (n int, err error) { + if !z.wroteHeader { + z.err = z.writeHeader() + } + if z.err != nil { + return 0, z.err + } + if len(p) == 0 { + return 0, nil + } + n, err = z.compressor.Write(p) + if err != nil { + z.err = err + return + } + z.digest.Write(p) + return +} + +// Flush flushes the Writer to its underlying io.Writer. +func (z *Writer) Flush() error { + if !z.wroteHeader { + z.err = z.writeHeader() + } + if z.err != nil { + return z.err + } + z.err = z.compressor.Flush() + return z.err +} + +// Close closes the Writer, flushing any unwritten data to the underlying +// io.Writer, but does not close the underlying io.Writer. +func (z *Writer) Close() error { + if !z.wroteHeader { + z.err = z.writeHeader() + } + if z.err != nil { + return z.err + } + z.err = z.compressor.Close() + if z.err != nil { + return z.err + } + checksum := z.digest.Sum32() + // ZLIB (RFC 1950) is big-endian, unlike GZIP (RFC 1952). + z.scratch[0] = uint8(checksum >> 24) + z.scratch[1] = uint8(checksum >> 16) + z.scratch[2] = uint8(checksum >> 8) + z.scratch[3] = uint8(checksum >> 0) + _, z.err = z.w.Write(z.scratch[0:4]) + return z.err +} diff --git a/vendor/github.com/klauspost/cpuid/LICENSE b/vendor/github.com/klauspost/cpuid/LICENSE new file mode 100644 index 0000000..5cec7ee --- /dev/null +++ b/vendor/github.com/klauspost/cpuid/LICENSE @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2015 Klaus Post + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + diff --git a/vendor/github.com/klauspost/cpuid/README.md b/vendor/github.com/klauspost/cpuid/README.md new file mode 100644 index 0000000..b2b6bee --- /dev/null +++ b/vendor/github.com/klauspost/cpuid/README.md @@ -0,0 +1,145 @@ +# cpuid +Package cpuid provides information about the CPU running the current program. + +CPU features are detected on startup, and kept for fast access through the life of the application. +Currently x86 / x64 (AMD64) is supported, and no external C (cgo) code is used, which should make the library very easy to use. + +You can access the CPU information by accessing the shared CPU variable of the cpuid library. + +Package home: https://github.com/klauspost/cpuid + +[![GoDoc][1]][2] [![Build Status][3]][4] + +[1]: https://godoc.org/github.com/klauspost/cpuid?status.svg +[2]: https://godoc.org/github.com/klauspost/cpuid +[3]: https://travis-ci.org/klauspost/cpuid.svg +[4]: https://travis-ci.org/klauspost/cpuid + +# features +## CPU Instructions +* **CMOV** (i686 CMOV) +* **NX** (NX (No-Execute) bit) +* **AMD3DNOW** (AMD 3DNOW) +* **AMD3DNOWEXT** (AMD 3DNowExt) +* **MMX** (standard MMX) +* **MMXEXT** (SSE integer functions or AMD MMX ext) +* **SSE** (SSE functions) +* **SSE2** (P4 SSE functions) +* **SSE3** (Prescott SSE3 functions) +* **SSSE3** (Conroe SSSE3 functions) +* **SSE4** (Penryn SSE4.1 functions) +* **SSE4A** (AMD Barcelona microarchitecture SSE4a instructions) +* **SSE42** (Nehalem SSE4.2 functions) +* **AVX** (AVX functions) +* **AVX2** (AVX2 functions) +* **FMA3** (Intel FMA 3) +* **FMA4** (Bulldozer FMA4 functions) +* **XOP** (Bulldozer XOP functions) +* **F16C** (Half-precision floating-point conversion) +* **BMI1** (Bit Manipulation Instruction Set 1) +* **BMI2** (Bit Manipulation Instruction Set 2) +* **TBM** (AMD Trailing Bit Manipulation) +* **LZCNT** (LZCNT instruction) +* **POPCNT** (POPCNT instruction) +* **AESNI** (Advanced Encryption Standard New Instructions) +* **CLMUL** (Carry-less Multiplication) +* **HTT** (Hyperthreading (enabled)) +* **HLE** (Hardware Lock Elision) +* **RTM** (Restricted Transactional Memory) +* **RDRAND** (RDRAND instruction is available) +* **RDSEED** (RDSEED instruction is available) +* **ADX** (Intel ADX (Multi-Precision Add-Carry Instruction Extensions)) +* **SHA** (Intel SHA Extensions) +* **AVX512F** (AVX-512 Foundation) +* **AVX512DQ** (AVX-512 Doubleword and Quadword Instructions) +* **AVX512IFMA** (AVX-512 Integer Fused Multiply-Add Instructions) +* **AVX512PF** (AVX-512 Prefetch Instructions) +* **AVX512ER** (AVX-512 Exponential and Reciprocal Instructions) +* **AVX512CD** (AVX-512 Conflict Detection Instructions) +* **AVX512BW** (AVX-512 Byte and Word Instructions) +* **AVX512VL** (AVX-512 Vector Length Extensions) +* **AVX512VBMI** (AVX-512 Vector Bit Manipulation Instructions) +* **MPX** (Intel MPX (Memory Protection Extensions)) +* **ERMS** (Enhanced REP MOVSB/STOSB) +* **RDTSCP** (RDTSCP Instruction) +* **CX16** (CMPXCHG16B Instruction) +* **SGX** (Software Guard Extensions, with activation details) + +## Performance +* **RDTSCP()** Returns current cycle count. Can be used for benchmarking. +* **SSE2SLOW** (SSE2 is supported, but usually not faster) +* **SSE3SLOW** (SSE3 is supported, but usually not faster) +* **ATOM** (Atom processor, some SSSE3 instructions are slower) +* **Cache line** (Probable size of a cache line). +* **L1, L2, L3 Cache size** on newer Intel/AMD CPUs. + +## Cpu Vendor/VM +* **Intel** +* **AMD** +* **VIA** +* **Transmeta** +* **NSC** +* **KVM** (Kernel-based Virtual Machine) +* **MSVM** (Microsoft Hyper-V or Windows Virtual PC) +* **VMware** +* **XenHVM** + +# installing + +```go get github.com/klauspost/cpuid``` + +# example + +```Go +package main + +import ( + "fmt" + "github.com/klauspost/cpuid" +) + +func main() { + // Print basic CPU information: + fmt.Println("Name:", cpuid.CPU.BrandName) + fmt.Println("PhysicalCores:", cpuid.CPU.PhysicalCores) + fmt.Println("ThreadsPerCore:", cpuid.CPU.ThreadsPerCore) + fmt.Println("LogicalCores:", cpuid.CPU.LogicalCores) + fmt.Println("Family", cpuid.CPU.Family, "Model:", cpuid.CPU.Model) + fmt.Println("Features:", cpuid.CPU.Features) + fmt.Println("Cacheline bytes:", cpuid.CPU.CacheLine) + fmt.Println("L1 Data Cache:", cpuid.CPU.Cache.L1D, "bytes") + fmt.Println("L1 Instruction Cache:", cpuid.CPU.Cache.L1D, "bytes") + fmt.Println("L2 Cache:", cpuid.CPU.Cache.L2, "bytes") + fmt.Println("L3 Cache:", cpuid.CPU.Cache.L3, "bytes") + + // Test if we have a specific feature: + if cpuid.CPU.SSE() { + fmt.Println("We have Streaming SIMD Extensions") + } +} +``` + +Sample output: +``` +>go run main.go +Name: Intel(R) Core(TM) i5-2540M CPU @ 2.60GHz +PhysicalCores: 2 +ThreadsPerCore: 2 +LogicalCores: 4 +Family 6 Model: 42 +Features: CMOV,MMX,MMXEXT,SSE,SSE2,SSE3,SSSE3,SSE4.1,SSE4.2,AVX,AESNI,CLMUL +Cacheline bytes: 64 +We have Streaming SIMD Extensions +``` + +# private package + +In the "private" folder you can find an autogenerated version of the library you can include in your own packages. + +For this purpose all exports are removed, and functions and constants are lowercased. + +This is not a recommended way of using the library, but provided for convenience, if it is difficult for you to use external packages. + +# license + +This code is published under an MIT license. See LICENSE file for more information. diff --git a/vendor/github.com/klauspost/cpuid/cpuid.go b/vendor/github.com/klauspost/cpuid/cpuid.go new file mode 100644 index 0000000..9230ca5 --- /dev/null +++ b/vendor/github.com/klauspost/cpuid/cpuid.go @@ -0,0 +1,1022 @@ +// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file. + +// Package cpuid provides information about the CPU running the current program. +// +// CPU features are detected on startup, and kept for fast access through the life of the application. +// Currently x86 / x64 (AMD64) is supported. +// +// You can access the CPU information by accessing the shared CPU variable of the cpuid library. +// +// Package home: https://github.com/klauspost/cpuid +package cpuid + +import "strings" + +// Vendor is a representation of a CPU vendor. +type Vendor int + +const ( + Other Vendor = iota + Intel + AMD + VIA + Transmeta + NSC + KVM // Kernel-based Virtual Machine + MSVM // Microsoft Hyper-V or Windows Virtual PC + VMware + XenHVM +) + +const ( + CMOV = 1 << iota // i686 CMOV + NX // NX (No-Execute) bit + AMD3DNOW // AMD 3DNOW + AMD3DNOWEXT // AMD 3DNowExt + MMX // standard MMX + MMXEXT // SSE integer functions or AMD MMX ext + SSE // SSE functions + SSE2 // P4 SSE functions + SSE3 // Prescott SSE3 functions + SSSE3 // Conroe SSSE3 functions + SSE4 // Penryn SSE4.1 functions + SSE4A // AMD Barcelona microarchitecture SSE4a instructions + SSE42 // Nehalem SSE4.2 functions + AVX // AVX functions + AVX2 // AVX2 functions + FMA3 // Intel FMA 3 + FMA4 // Bulldozer FMA4 functions + XOP // Bulldozer XOP functions + F16C // Half-precision floating-point conversion + BMI1 // Bit Manipulation Instruction Set 1 + BMI2 // Bit Manipulation Instruction Set 2 + TBM // AMD Trailing Bit Manipulation + LZCNT // LZCNT instruction + POPCNT // POPCNT instruction + AESNI // Advanced Encryption Standard New Instructions + CLMUL // Carry-less Multiplication + HTT // Hyperthreading (enabled) + HLE // Hardware Lock Elision + RTM // Restricted Transactional Memory + RDRAND // RDRAND instruction is available + RDSEED // RDSEED instruction is available + ADX // Intel ADX (Multi-Precision Add-Carry Instruction Extensions) + SHA // Intel SHA Extensions + AVX512F // AVX-512 Foundation + AVX512DQ // AVX-512 Doubleword and Quadword Instructions + AVX512IFMA // AVX-512 Integer Fused Multiply-Add Instructions + AVX512PF // AVX-512 Prefetch Instructions + AVX512ER // AVX-512 Exponential and Reciprocal Instructions + AVX512CD // AVX-512 Conflict Detection Instructions + AVX512BW // AVX-512 Byte and Word Instructions + AVX512VL // AVX-512 Vector Length Extensions + AVX512VBMI // AVX-512 Vector Bit Manipulation Instructions + MPX // Intel MPX (Memory Protection Extensions) + ERMS // Enhanced REP MOVSB/STOSB + RDTSCP // RDTSCP Instruction + CX16 // CMPXCHG16B Instruction + SGX // Software Guard Extensions + + // Performance indicators + SSE2SLOW // SSE2 is supported, but usually not faster + SSE3SLOW // SSE3 is supported, but usually not faster + ATOM // Atom processor, some SSSE3 instructions are slower +) + +var flagNames = map[Flags]string{ + CMOV: "CMOV", // i686 CMOV + NX: "NX", // NX (No-Execute) bit + AMD3DNOW: "AMD3DNOW", // AMD 3DNOW + AMD3DNOWEXT: "AMD3DNOWEXT", // AMD 3DNowExt + MMX: "MMX", // Standard MMX + MMXEXT: "MMXEXT", // SSE integer functions or AMD MMX ext + SSE: "SSE", // SSE functions + SSE2: "SSE2", // P4 SSE2 functions + SSE3: "SSE3", // Prescott SSE3 functions + SSSE3: "SSSE3", // Conroe SSSE3 functions + SSE4: "SSE4.1", // Penryn SSE4.1 functions + SSE4A: "SSE4A", // AMD Barcelona microarchitecture SSE4a instructions + SSE42: "SSE4.2", // Nehalem SSE4.2 functions + AVX: "AVX", // AVX functions + AVX2: "AVX2", // AVX functions + FMA3: "FMA3", // Intel FMA 3 + FMA4: "FMA4", // Bulldozer FMA4 functions + XOP: "XOP", // Bulldozer XOP functions + F16C: "F16C", // Half-precision floating-point conversion + BMI1: "BMI1", // Bit Manipulation Instruction Set 1 + BMI2: "BMI2", // Bit Manipulation Instruction Set 2 + TBM: "TBM", // AMD Trailing Bit Manipulation + LZCNT: "LZCNT", // LZCNT instruction + POPCNT: "POPCNT", // POPCNT instruction + AESNI: "AESNI", // Advanced Encryption Standard New Instructions + CLMUL: "CLMUL", // Carry-less Multiplication + HTT: "HTT", // Hyperthreading (enabled) + HLE: "HLE", // Hardware Lock Elision + RTM: "RTM", // Restricted Transactional Memory + RDRAND: "RDRAND", // RDRAND instruction is available + RDSEED: "RDSEED", // RDSEED instruction is available + ADX: "ADX", // Intel ADX (Multi-Precision Add-Carry Instruction Extensions) + SHA: "SHA", // Intel SHA Extensions + AVX512F: "AVX512F", // AVX-512 Foundation + AVX512DQ: "AVX512DQ", // AVX-512 Doubleword and Quadword Instructions + AVX512IFMA: "AVX512IFMA", // AVX-512 Integer Fused Multiply-Add Instructions + AVX512PF: "AVX512PF", // AVX-512 Prefetch Instructions + AVX512ER: "AVX512ER", // AVX-512 Exponential and Reciprocal Instructions + AVX512CD: "AVX512CD", // AVX-512 Conflict Detection Instructions + AVX512BW: "AVX512BW", // AVX-512 Byte and Word Instructions + AVX512VL: "AVX512VL", // AVX-512 Vector Length Extensions + AVX512VBMI: "AVX512VBMI", // AVX-512 Vector Bit Manipulation Instructions + MPX: "MPX", // Intel MPX (Memory Protection Extensions) + ERMS: "ERMS", // Enhanced REP MOVSB/STOSB + RDTSCP: "RDTSCP", // RDTSCP Instruction + CX16: "CX16", // CMPXCHG16B Instruction + SGX: "SGX", // Software Guard Extensions + + // Performance indicators + SSE2SLOW: "SSE2SLOW", // SSE2 supported, but usually not faster + SSE3SLOW: "SSE3SLOW", // SSE3 supported, but usually not faster + ATOM: "ATOM", // Atom processor, some SSSE3 instructions are slower + +} + +// CPUInfo contains information about the detected system CPU. +type CPUInfo struct { + BrandName string // Brand name reported by the CPU + VendorID Vendor // Comparable CPU vendor ID + Features Flags // Features of the CPU + PhysicalCores int // Number of physical processor cores in your CPU. Will be 0 if undetectable. + ThreadsPerCore int // Number of threads per physical core. Will be 1 if undetectable. + LogicalCores int // Number of physical cores times threads that can run on each core through the use of hyperthreading. Will be 0 if undetectable. + Family int // CPU family number + Model int // CPU model number + CacheLine int // Cache line size in bytes. Will be 0 if undetectable. + Cache struct { + L1I int // L1 Instruction Cache (per core or shared). Will be -1 if undetected + L1D int // L1 Data Cache (per core or shared). Will be -1 if undetected + L2 int // L2 Cache (per core or shared). Will be -1 if undetected + L3 int // L3 Instruction Cache (per core or shared). Will be -1 if undetected + } + SGX SGXSupport + maxFunc uint32 + maxExFunc uint32 +} + +var cpuid func(op uint32) (eax, ebx, ecx, edx uint32) +var cpuidex func(op, op2 uint32) (eax, ebx, ecx, edx uint32) +var xgetbv func(index uint32) (eax, edx uint32) +var rdtscpAsm func() (eax, ebx, ecx, edx uint32) + +// CPU contains information about the CPU as detected on startup, +// or when Detect last was called. +// +// Use this as the primary entry point to you data, +// this way queries are +var CPU CPUInfo + +func init() { + initCPU() + Detect() +} + +// Detect will re-detect current CPU info. +// This will replace the content of the exported CPU variable. +// +// Unless you expect the CPU to change while you are running your program +// you should not need to call this function. +// If you call this, you must ensure that no other goroutine is accessing the +// exported CPU variable. +func Detect() { + CPU.maxFunc = maxFunctionID() + CPU.maxExFunc = maxExtendedFunction() + CPU.BrandName = brandName() + CPU.CacheLine = cacheLine() + CPU.Family, CPU.Model = familyModel() + CPU.Features = support() + CPU.SGX = sgx(CPU.Features&SGX != 0) + CPU.ThreadsPerCore = threadsPerCore() + CPU.LogicalCores = logicalCores() + CPU.PhysicalCores = physicalCores() + CPU.VendorID = vendorID() + CPU.cacheSize() +} + +// Generated here: http://play.golang.org/p/BxFH2Gdc0G + +// Cmov indicates support of CMOV instructions +func (c CPUInfo) Cmov() bool { + return c.Features&CMOV != 0 +} + +// Amd3dnow indicates support of AMD 3DNOW! instructions +func (c CPUInfo) Amd3dnow() bool { + return c.Features&AMD3DNOW != 0 +} + +// Amd3dnowExt indicates support of AMD 3DNOW! Extended instructions +func (c CPUInfo) Amd3dnowExt() bool { + return c.Features&AMD3DNOWEXT != 0 +} + +// MMX indicates support of MMX instructions +func (c CPUInfo) MMX() bool { + return c.Features&MMX != 0 +} + +// MMXExt indicates support of MMXEXT instructions +// (SSE integer functions or AMD MMX ext) +func (c CPUInfo) MMXExt() bool { + return c.Features&MMXEXT != 0 +} + +// SSE indicates support of SSE instructions +func (c CPUInfo) SSE() bool { + return c.Features&SSE != 0 +} + +// SSE2 indicates support of SSE 2 instructions +func (c CPUInfo) SSE2() bool { + return c.Features&SSE2 != 0 +} + +// SSE3 indicates support of SSE 3 instructions +func (c CPUInfo) SSE3() bool { + return c.Features&SSE3 != 0 +} + +// SSSE3 indicates support of SSSE 3 instructions +func (c CPUInfo) SSSE3() bool { + return c.Features&SSSE3 != 0 +} + +// SSE4 indicates support of SSE 4 (also called SSE 4.1) instructions +func (c CPUInfo) SSE4() bool { + return c.Features&SSE4 != 0 +} + +// SSE42 indicates support of SSE4.2 instructions +func (c CPUInfo) SSE42() bool { + return c.Features&SSE42 != 0 +} + +// AVX indicates support of AVX instructions +// and operating system support of AVX instructions +func (c CPUInfo) AVX() bool { + return c.Features&AVX != 0 +} + +// AVX2 indicates support of AVX2 instructions +func (c CPUInfo) AVX2() bool { + return c.Features&AVX2 != 0 +} + +// FMA3 indicates support of FMA3 instructions +func (c CPUInfo) FMA3() bool { + return c.Features&FMA3 != 0 +} + +// FMA4 indicates support of FMA4 instructions +func (c CPUInfo) FMA4() bool { + return c.Features&FMA4 != 0 +} + +// XOP indicates support of XOP instructions +func (c CPUInfo) XOP() bool { + return c.Features&XOP != 0 +} + +// F16C indicates support of F16C instructions +func (c CPUInfo) F16C() bool { + return c.Features&F16C != 0 +} + +// BMI1 indicates support of BMI1 instructions +func (c CPUInfo) BMI1() bool { + return c.Features&BMI1 != 0 +} + +// BMI2 indicates support of BMI2 instructions +func (c CPUInfo) BMI2() bool { + return c.Features&BMI2 != 0 +} + +// TBM indicates support of TBM instructions +// (AMD Trailing Bit Manipulation) +func (c CPUInfo) TBM() bool { + return c.Features&TBM != 0 +} + +// Lzcnt indicates support of LZCNT instruction +func (c CPUInfo) Lzcnt() bool { + return c.Features&LZCNT != 0 +} + +// Popcnt indicates support of POPCNT instruction +func (c CPUInfo) Popcnt() bool { + return c.Features&POPCNT != 0 +} + +// HTT indicates the processor has Hyperthreading enabled +func (c CPUInfo) HTT() bool { + return c.Features&HTT != 0 +} + +// SSE2Slow indicates that SSE2 may be slow on this processor +func (c CPUInfo) SSE2Slow() bool { + return c.Features&SSE2SLOW != 0 +} + +// SSE3Slow indicates that SSE3 may be slow on this processor +func (c CPUInfo) SSE3Slow() bool { + return c.Features&SSE3SLOW != 0 +} + +// AesNi indicates support of AES-NI instructions +// (Advanced Encryption Standard New Instructions) +func (c CPUInfo) AesNi() bool { + return c.Features&AESNI != 0 +} + +// Clmul indicates support of CLMUL instructions +// (Carry-less Multiplication) +func (c CPUInfo) Clmul() bool { + return c.Features&CLMUL != 0 +} + +// NX indicates support of NX (No-Execute) bit +func (c CPUInfo) NX() bool { + return c.Features&NX != 0 +} + +// SSE4A indicates support of AMD Barcelona microarchitecture SSE4a instructions +func (c CPUInfo) SSE4A() bool { + return c.Features&SSE4A != 0 +} + +// HLE indicates support of Hardware Lock Elision +func (c CPUInfo) HLE() bool { + return c.Features&HLE != 0 +} + +// RTM indicates support of Restricted Transactional Memory +func (c CPUInfo) RTM() bool { + return c.Features&RTM != 0 +} + +// Rdrand indicates support of RDRAND instruction is available +func (c CPUInfo) Rdrand() bool { + return c.Features&RDRAND != 0 +} + +// Rdseed indicates support of RDSEED instruction is available +func (c CPUInfo) Rdseed() bool { + return c.Features&RDSEED != 0 +} + +// ADX indicates support of Intel ADX (Multi-Precision Add-Carry Instruction Extensions) +func (c CPUInfo) ADX() bool { + return c.Features&ADX != 0 +} + +// SHA indicates support of Intel SHA Extensions +func (c CPUInfo) SHA() bool { + return c.Features&SHA != 0 +} + +// AVX512F indicates support of AVX-512 Foundation +func (c CPUInfo) AVX512F() bool { + return c.Features&AVX512F != 0 +} + +// AVX512DQ indicates support of AVX-512 Doubleword and Quadword Instructions +func (c CPUInfo) AVX512DQ() bool { + return c.Features&AVX512DQ != 0 +} + +// AVX512IFMA indicates support of AVX-512 Integer Fused Multiply-Add Instructions +func (c CPUInfo) AVX512IFMA() bool { + return c.Features&AVX512IFMA != 0 +} + +// AVX512PF indicates support of AVX-512 Prefetch Instructions +func (c CPUInfo) AVX512PF() bool { + return c.Features&AVX512PF != 0 +} + +// AVX512ER indicates support of AVX-512 Exponential and Reciprocal Instructions +func (c CPUInfo) AVX512ER() bool { + return c.Features&AVX512ER != 0 +} + +// AVX512CD indicates support of AVX-512 Conflict Detection Instructions +func (c CPUInfo) AVX512CD() bool { + return c.Features&AVX512CD != 0 +} + +// AVX512BW indicates support of AVX-512 Byte and Word Instructions +func (c CPUInfo) AVX512BW() bool { + return c.Features&AVX512BW != 0 +} + +// AVX512VL indicates support of AVX-512 Vector Length Extensions +func (c CPUInfo) AVX512VL() bool { + return c.Features&AVX512VL != 0 +} + +// AVX512VBMI indicates support of AVX-512 Vector Bit Manipulation Instructions +func (c CPUInfo) AVX512VBMI() bool { + return c.Features&AVX512VBMI != 0 +} + +// MPX indicates support of Intel MPX (Memory Protection Extensions) +func (c CPUInfo) MPX() bool { + return c.Features&MPX != 0 +} + +// ERMS indicates support of Enhanced REP MOVSB/STOSB +func (c CPUInfo) ERMS() bool { + return c.Features&ERMS != 0 +} + +func (c CPUInfo) RDTSCP() bool { + return c.Features&RDTSCP != 0 +} + +func (c CPUInfo) CX16() bool { + return c.Features&CX16 != 0 +} + +// Atom indicates an Atom processor +func (c CPUInfo) Atom() bool { + return c.Features&ATOM != 0 +} + +// Intel returns true if vendor is recognized as Intel +func (c CPUInfo) Intel() bool { + return c.VendorID == Intel +} + +// AMD returns true if vendor is recognized as AMD +func (c CPUInfo) AMD() bool { + return c.VendorID == AMD +} + +// Transmeta returns true if vendor is recognized as Transmeta +func (c CPUInfo) Transmeta() bool { + return c.VendorID == Transmeta +} + +// NSC returns true if vendor is recognized as National Semiconductor +func (c CPUInfo) NSC() bool { + return c.VendorID == NSC +} + +// VIA returns true if vendor is recognized as VIA +func (c CPUInfo) VIA() bool { + return c.VendorID == VIA +} + +// RTCounter returns the 64-bit time-stamp counter +// Uses the RDTSCP instruction. The value 0 is returned +// if the CPU does not support the instruction. +func (c CPUInfo) RTCounter() uint64 { + if !c.RDTSCP() { + return 0 + } + a, _, _, d := rdtscpAsm() + return uint64(a) | (uint64(d) << 32) +} + +// Ia32TscAux returns the IA32_TSC_AUX part of the RDTSCP. +// This variable is OS dependent, but on Linux contains information +// about the current cpu/core the code is running on. +// If the RDTSCP instruction isn't supported on the CPU, the value 0 is returned. +func (c CPUInfo) Ia32TscAux() uint32 { + if !c.RDTSCP() { + return 0 + } + _, _, ecx, _ := rdtscpAsm() + return ecx +} + +// LogicalCPU will return the Logical CPU the code is currently executing on. +// This is likely to change when the OS re-schedules the running thread +// to another CPU. +// If the current core cannot be detected, -1 will be returned. +func (c CPUInfo) LogicalCPU() int { + if c.maxFunc < 1 { + return -1 + } + _, ebx, _, _ := cpuid(1) + return int(ebx >> 24) +} + +// VM Will return true if the cpu id indicates we are in +// a virtual machine. This is only a hint, and will very likely +// have many false negatives. +func (c CPUInfo) VM() bool { + switch c.VendorID { + case MSVM, KVM, VMware, XenHVM: + return true + } + return false +} + +// Flags contains detected cpu features and caracteristics +type Flags uint64 + +// String returns a string representation of the detected +// CPU features. +func (f Flags) String() string { + return strings.Join(f.Strings(), ",") +} + +// Strings returns and array of the detected features. +func (f Flags) Strings() []string { + s := support() + r := make([]string, 0, 20) + for i := uint(0); i < 64; i++ { + key := Flags(1 << i) + val := flagNames[key] + if s&key != 0 { + r = append(r, val) + } + } + return r +} + +func maxExtendedFunction() uint32 { + eax, _, _, _ := cpuid(0x80000000) + return eax +} + +func maxFunctionID() uint32 { + a, _, _, _ := cpuid(0) + return a +} + +func brandName() string { + if maxExtendedFunction() >= 0x80000004 { + v := make([]uint32, 0, 48) + for i := uint32(0); i < 3; i++ { + a, b, c, d := cpuid(0x80000002 + i) + v = append(v, a, b, c, d) + } + return strings.Trim(string(valAsString(v...)), " ") + } + return "unknown" +} + +func threadsPerCore() int { + mfi := maxFunctionID() + if mfi < 0x4 || vendorID() != Intel { + return 1 + } + + if mfi < 0xb { + _, b, _, d := cpuid(1) + if (d & (1 << 28)) != 0 { + // v will contain logical core count + v := (b >> 16) & 255 + if v > 1 { + a4, _, _, _ := cpuid(4) + // physical cores + v2 := (a4 >> 26) + 1 + if v2 > 0 { + return int(v) / int(v2) + } + } + } + return 1 + } + _, b, _, _ := cpuidex(0xb, 0) + if b&0xffff == 0 { + return 1 + } + return int(b & 0xffff) +} + +func logicalCores() int { + mfi := maxFunctionID() + switch vendorID() { + case Intel: + // Use this on old Intel processors + if mfi < 0xb { + if mfi < 1 { + return 0 + } + // CPUID.1:EBX[23:16] represents the maximum number of addressable IDs (initial APIC ID) + // that can be assigned to logical processors in a physical package. + // The value may not be the same as the number of logical processors that are present in the hardware of a physical package. + _, ebx, _, _ := cpuid(1) + logical := (ebx >> 16) & 0xff + return int(logical) + } + _, b, _, _ := cpuidex(0xb, 1) + return int(b & 0xffff) + case AMD: + _, b, _, _ := cpuid(1) + return int((b >> 16) & 0xff) + default: + return 0 + } +} + +func familyModel() (int, int) { + if maxFunctionID() < 0x1 { + return 0, 0 + } + eax, _, _, _ := cpuid(1) + family := ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff) + model := ((eax >> 4) & 0xf) + ((eax >> 12) & 0xf0) + return int(family), int(model) +} + +func physicalCores() int { + switch vendorID() { + case Intel: + return logicalCores() / threadsPerCore() + case AMD: + if maxExtendedFunction() >= 0x80000008 { + _, _, c, _ := cpuid(0x80000008) + return int(c&0xff) + 1 + } + } + return 0 +} + +// Except from http://en.wikipedia.org/wiki/CPUID#EAX.3D0:_Get_vendor_ID +var vendorMapping = map[string]Vendor{ + "AMDisbetter!": AMD, + "AuthenticAMD": AMD, + "CentaurHauls": VIA, + "GenuineIntel": Intel, + "TransmetaCPU": Transmeta, + "GenuineTMx86": Transmeta, + "Geode by NSC": NSC, + "VIA VIA VIA ": VIA, + "KVMKVMKVMKVM": KVM, + "Microsoft Hv": MSVM, + "VMwareVMware": VMware, + "XenVMMXenVMM": XenHVM, +} + +func vendorID() Vendor { + _, b, c, d := cpuid(0) + v := valAsString(b, d, c) + vend, ok := vendorMapping[string(v)] + if !ok { + return Other + } + return vend +} + +func cacheLine() int { + if maxFunctionID() < 0x1 { + return 0 + } + + _, ebx, _, _ := cpuid(1) + cache := (ebx & 0xff00) >> 5 // cflush size + if cache == 0 && maxExtendedFunction() >= 0x80000006 { + _, _, ecx, _ := cpuid(0x80000006) + cache = ecx & 0xff // cacheline size + } + // TODO: Read from Cache and TLB Information + return int(cache) +} + +func (c *CPUInfo) cacheSize() { + c.Cache.L1D = -1 + c.Cache.L1I = -1 + c.Cache.L2 = -1 + c.Cache.L3 = -1 + vendor := vendorID() + switch vendor { + case Intel: + if maxFunctionID() < 4 { + return + } + for i := uint32(0); ; i++ { + eax, ebx, ecx, _ := cpuidex(4, i) + cacheType := eax & 15 + if cacheType == 0 { + break + } + cacheLevel := (eax >> 5) & 7 + coherency := int(ebx&0xfff) + 1 + partitions := int((ebx>>12)&0x3ff) + 1 + associativity := int((ebx>>22)&0x3ff) + 1 + sets := int(ecx) + 1 + size := associativity * partitions * coherency * sets + switch cacheLevel { + case 1: + if cacheType == 1 { + // 1 = Data Cache + c.Cache.L1D = size + } else if cacheType == 2 { + // 2 = Instruction Cache + c.Cache.L1I = size + } else { + if c.Cache.L1D < 0 { + c.Cache.L1I = size + } + if c.Cache.L1I < 0 { + c.Cache.L1I = size + } + } + case 2: + c.Cache.L2 = size + case 3: + c.Cache.L3 = size + } + } + case AMD: + // Untested. + if maxExtendedFunction() < 0x80000005 { + return + } + _, _, ecx, edx := cpuid(0x80000005) + c.Cache.L1D = int(((ecx >> 24) & 0xFF) * 1024) + c.Cache.L1I = int(((edx >> 24) & 0xFF) * 1024) + + if maxExtendedFunction() < 0x80000006 { + return + } + _, _, ecx, _ = cpuid(0x80000006) + c.Cache.L2 = int(((ecx >> 16) & 0xFFFF) * 1024) + } + + return +} + +type SGXSupport struct { + Available bool + SGX1Supported bool + SGX2Supported bool + MaxEnclaveSizeNot64 int64 + MaxEnclaveSize64 int64 +} + +func sgx(available bool) (rval SGXSupport) { + rval.Available = available + + if !available { + return + } + + a, _, _, d := cpuidex(0x12, 0) + rval.SGX1Supported = a&0x01 != 0 + rval.SGX2Supported = a&0x02 != 0 + rval.MaxEnclaveSizeNot64 = 1 << (d & 0xFF) // pow 2 + rval.MaxEnclaveSize64 = 1 << ((d >> 8) & 0xFF) // pow 2 + + return +} + +func support() Flags { + mfi := maxFunctionID() + vend := vendorID() + if mfi < 0x1 { + return 0 + } + rval := uint64(0) + _, _, c, d := cpuid(1) + if (d & (1 << 15)) != 0 { + rval |= CMOV + } + if (d & (1 << 23)) != 0 { + rval |= MMX + } + if (d & (1 << 25)) != 0 { + rval |= MMXEXT + } + if (d & (1 << 25)) != 0 { + rval |= SSE + } + if (d & (1 << 26)) != 0 { + rval |= SSE2 + } + if (c & 1) != 0 { + rval |= SSE3 + } + if (c & 0x00000200) != 0 { + rval |= SSSE3 + } + if (c & 0x00080000) != 0 { + rval |= SSE4 + } + if (c & 0x00100000) != 0 { + rval |= SSE42 + } + if (c & (1 << 25)) != 0 { + rval |= AESNI + } + if (c & (1 << 1)) != 0 { + rval |= CLMUL + } + if c&(1<<23) != 0 { + rval |= POPCNT + } + if c&(1<<30) != 0 { + rval |= RDRAND + } + if c&(1<<29) != 0 { + rval |= F16C + } + if c&(1<<13) != 0 { + rval |= CX16 + } + if vend == Intel && (d&(1<<28)) != 0 && mfi >= 4 { + if threadsPerCore() > 1 { + rval |= HTT + } + } + + // Check XGETBV, OXSAVE and AVX bits + if c&(1<<26) != 0 && c&(1<<27) != 0 && c&(1<<28) != 0 { + // Check for OS support + eax, _ := xgetbv(0) + if (eax & 0x6) == 0x6 { + rval |= AVX + if (c & 0x00001000) != 0 { + rval |= FMA3 + } + } + } + + // Check AVX2, AVX2 requires OS support, but BMI1/2 don't. + if mfi >= 7 { + _, ebx, ecx, _ := cpuidex(7, 0) + if (rval&AVX) != 0 && (ebx&0x00000020) != 0 { + rval |= AVX2 + } + if (ebx & 0x00000008) != 0 { + rval |= BMI1 + if (ebx & 0x00000100) != 0 { + rval |= BMI2 + } + } + if ebx&(1<<2) != 0 { + rval |= SGX + } + if ebx&(1<<4) != 0 { + rval |= HLE + } + if ebx&(1<<9) != 0 { + rval |= ERMS + } + if ebx&(1<<11) != 0 { + rval |= RTM + } + if ebx&(1<<14) != 0 { + rval |= MPX + } + if ebx&(1<<18) != 0 { + rval |= RDSEED + } + if ebx&(1<<19) != 0 { + rval |= ADX + } + if ebx&(1<<29) != 0 { + rval |= SHA + } + + // Only detect AVX-512 features if XGETBV is supported + if c&((1<<26)|(1<<27)) == (1<<26)|(1<<27) { + // Check for OS support + eax, _ := xgetbv(0) + + // Verify that XCR0[7:5] = ‘111b’ (OPMASK state, upper 256-bit of ZMM0-ZMM15 and + // ZMM16-ZMM31 state are enabled by OS) + /// and that XCR0[2:1] = ‘11b’ (XMM state and YMM state are enabled by OS). + if (eax>>5)&7 == 7 && (eax>>1)&3 == 3 { + if ebx&(1<<16) != 0 { + rval |= AVX512F + } + if ebx&(1<<17) != 0 { + rval |= AVX512DQ + } + if ebx&(1<<21) != 0 { + rval |= AVX512IFMA + } + if ebx&(1<<26) != 0 { + rval |= AVX512PF + } + if ebx&(1<<27) != 0 { + rval |= AVX512ER + } + if ebx&(1<<28) != 0 { + rval |= AVX512CD + } + if ebx&(1<<30) != 0 { + rval |= AVX512BW + } + if ebx&(1<<31) != 0 { + rval |= AVX512VL + } + // ecx + if ecx&(1<<1) != 0 { + rval |= AVX512VBMI + } + } + } + } + + if maxExtendedFunction() >= 0x80000001 { + _, _, c, d := cpuid(0x80000001) + if (c & (1 << 5)) != 0 { + rval |= LZCNT + rval |= POPCNT + } + if (d & (1 << 31)) != 0 { + rval |= AMD3DNOW + } + if (d & (1 << 30)) != 0 { + rval |= AMD3DNOWEXT + } + if (d & (1 << 23)) != 0 { + rval |= MMX + } + if (d & (1 << 22)) != 0 { + rval |= MMXEXT + } + if (c & (1 << 6)) != 0 { + rval |= SSE4A + } + if d&(1<<20) != 0 { + rval |= NX + } + if d&(1<<27) != 0 { + rval |= RDTSCP + } + + /* Allow for selectively disabling SSE2 functions on AMD processors + with SSE2 support but not SSE4a. This includes Athlon64, some + Opteron, and some Sempron processors. MMX, SSE, or 3DNow! are faster + than SSE2 often enough to utilize this special-case flag. + AV_CPU_FLAG_SSE2 and AV_CPU_FLAG_SSE2SLOW are both set in this case + so that SSE2 is used unless explicitly disabled by checking + AV_CPU_FLAG_SSE2SLOW. */ + if vendorID() != Intel && + rval&SSE2 != 0 && (c&0x00000040) == 0 { + rval |= SSE2SLOW + } + + /* XOP and FMA4 use the AVX instruction coding scheme, so they can't be + * used unless the OS has AVX support. */ + if (rval & AVX) != 0 { + if (c & 0x00000800) != 0 { + rval |= XOP + } + if (c & 0x00010000) != 0 { + rval |= FMA4 + } + } + + if vendorID() == Intel { + family, model := familyModel() + if family == 6 && (model == 9 || model == 13 || model == 14) { + /* 6/9 (pentium-m "banias"), 6/13 (pentium-m "dothan"), and + * 6/14 (core1 "yonah") theoretically support sse2, but it's + * usually slower than mmx. */ + if (rval & SSE2) != 0 { + rval |= SSE2SLOW + } + if (rval & SSE3) != 0 { + rval |= SSE3SLOW + } + } + /* The Atom processor has SSSE3 support, which is useful in many cases, + * but sometimes the SSSE3 version is slower than the SSE2 equivalent + * on the Atom, but is generally faster on other processors supporting + * SSSE3. This flag allows for selectively disabling certain SSSE3 + * functions on the Atom. */ + if family == 6 && model == 28 { + rval |= ATOM + } + } + } + return Flags(rval) +} + +func valAsString(values ...uint32) []byte { + r := make([]byte, 4*len(values)) + for i, v := range values { + dst := r[i*4:] + dst[0] = byte(v & 0xff) + dst[1] = byte((v >> 8) & 0xff) + dst[2] = byte((v >> 16) & 0xff) + dst[3] = byte((v >> 24) & 0xff) + switch { + case dst[0] == 0: + return r[:i*4] + case dst[1] == 0: + return r[:i*4+1] + case dst[2] == 0: + return r[:i*4+2] + case dst[3] == 0: + return r[:i*4+3] + } + } + return r +} diff --git a/vendor/github.com/klauspost/cpuid/cpuid_386.s b/vendor/github.com/klauspost/cpuid/cpuid_386.s new file mode 100644 index 0000000..4d73171 --- /dev/null +++ b/vendor/github.com/klauspost/cpuid/cpuid_386.s @@ -0,0 +1,42 @@ +// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file. + +// +build 386,!gccgo + +// func asmCpuid(op uint32) (eax, ebx, ecx, edx uint32) +TEXT ·asmCpuid(SB), 7, $0 + XORL CX, CX + MOVL op+0(FP), AX + CPUID + MOVL AX, eax+4(FP) + MOVL BX, ebx+8(FP) + MOVL CX, ecx+12(FP) + MOVL DX, edx+16(FP) + RET + +// func asmCpuidex(op, op2 uint32) (eax, ebx, ecx, edx uint32) +TEXT ·asmCpuidex(SB), 7, $0 + MOVL op+0(FP), AX + MOVL op2+4(FP), CX + CPUID + MOVL AX, eax+8(FP) + MOVL BX, ebx+12(FP) + MOVL CX, ecx+16(FP) + MOVL DX, edx+20(FP) + RET + +// func xgetbv(index uint32) (eax, edx uint32) +TEXT ·asmXgetbv(SB), 7, $0 + MOVL index+0(FP), CX + BYTE $0x0f; BYTE $0x01; BYTE $0xd0 // XGETBV + MOVL AX, eax+4(FP) + MOVL DX, edx+8(FP) + RET + +// func asmRdtscpAsm() (eax, ebx, ecx, edx uint32) +TEXT ·asmRdtscpAsm(SB), 7, $0 + BYTE $0x0F; BYTE $0x01; BYTE $0xF9 // RDTSCP + MOVL AX, eax+0(FP) + MOVL BX, ebx+4(FP) + MOVL CX, ecx+8(FP) + MOVL DX, edx+12(FP) + RET diff --git a/vendor/github.com/klauspost/cpuid/cpuid_amd64.s b/vendor/github.com/klauspost/cpuid/cpuid_amd64.s new file mode 100644 index 0000000..3c1d60e --- /dev/null +++ b/vendor/github.com/klauspost/cpuid/cpuid_amd64.s @@ -0,0 +1,42 @@ +// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file. + +//+build amd64,!gccgo + +// func asmCpuid(op uint32) (eax, ebx, ecx, edx uint32) +TEXT ·asmCpuid(SB), 7, $0 + XORQ CX, CX + MOVL op+0(FP), AX + CPUID + MOVL AX, eax+8(FP) + MOVL BX, ebx+12(FP) + MOVL CX, ecx+16(FP) + MOVL DX, edx+20(FP) + RET + +// func asmCpuidex(op, op2 uint32) (eax, ebx, ecx, edx uint32) +TEXT ·asmCpuidex(SB), 7, $0 + MOVL op+0(FP), AX + MOVL op2+4(FP), CX + CPUID + MOVL AX, eax+8(FP) + MOVL BX, ebx+12(FP) + MOVL CX, ecx+16(FP) + MOVL DX, edx+20(FP) + RET + +// func asmXgetbv(index uint32) (eax, edx uint32) +TEXT ·asmXgetbv(SB), 7, $0 + MOVL index+0(FP), CX + BYTE $0x0f; BYTE $0x01; BYTE $0xd0 // XGETBV + MOVL AX, eax+8(FP) + MOVL DX, edx+12(FP) + RET + +// func asmRdtscpAsm() (eax, ebx, ecx, edx uint32) +TEXT ·asmRdtscpAsm(SB), 7, $0 + BYTE $0x0F; BYTE $0x01; BYTE $0xF9 // RDTSCP + MOVL AX, eax+0(FP) + MOVL BX, ebx+4(FP) + MOVL CX, ecx+8(FP) + MOVL DX, edx+12(FP) + RET diff --git a/vendor/github.com/klauspost/cpuid/detect_intel.go b/vendor/github.com/klauspost/cpuid/detect_intel.go new file mode 100644 index 0000000..a5f04dd --- /dev/null +++ b/vendor/github.com/klauspost/cpuid/detect_intel.go @@ -0,0 +1,17 @@ +// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file. + +// +build 386,!gccgo amd64,!gccgo + +package cpuid + +func asmCpuid(op uint32) (eax, ebx, ecx, edx uint32) +func asmCpuidex(op, op2 uint32) (eax, ebx, ecx, edx uint32) +func asmXgetbv(index uint32) (eax, edx uint32) +func asmRdtscpAsm() (eax, ebx, ecx, edx uint32) + +func initCPU() { + cpuid = asmCpuid + cpuidex = asmCpuidex + xgetbv = asmXgetbv + rdtscpAsm = asmRdtscpAsm +} diff --git a/vendor/github.com/klauspost/cpuid/detect_ref.go b/vendor/github.com/klauspost/cpuid/detect_ref.go new file mode 100644 index 0000000..909c5d9 --- /dev/null +++ b/vendor/github.com/klauspost/cpuid/detect_ref.go @@ -0,0 +1,23 @@ +// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file. + +// +build !amd64,!386 gccgo + +package cpuid + +func initCPU() { + cpuid = func(op uint32) (eax, ebx, ecx, edx uint32) { + return 0, 0, 0, 0 + } + + cpuidex = func(op, op2 uint32) (eax, ebx, ecx, edx uint32) { + return 0, 0, 0, 0 + } + + xgetbv = func(index uint32) (eax, edx uint32) { + return 0, 0 + } + + rdtscpAsm = func() (eax, ebx, ecx, edx uint32) { + return 0, 0, 0, 0 + } +} diff --git a/vendor/github.com/klauspost/cpuid/generate.go b/vendor/github.com/klauspost/cpuid/generate.go new file mode 100644 index 0000000..c060b81 --- /dev/null +++ b/vendor/github.com/klauspost/cpuid/generate.go @@ -0,0 +1,3 @@ +package cpuid + +//go:generate go run private-gen.go diff --git a/vendor/github.com/klauspost/cpuid/private-gen.go b/vendor/github.com/klauspost/cpuid/private-gen.go new file mode 100644 index 0000000..437333d --- /dev/null +++ b/vendor/github.com/klauspost/cpuid/private-gen.go @@ -0,0 +1,476 @@ +// +build ignore + +package main + +import ( + "bytes" + "fmt" + "go/ast" + "go/parser" + "go/printer" + "go/token" + "io" + "io/ioutil" + "log" + "os" + "reflect" + "strings" + "unicode" + "unicode/utf8" +) + +var inFiles = []string{"cpuid.go", "cpuid_test.go"} +var copyFiles = []string{"cpuid_amd64.s", "cpuid_386.s", "detect_ref.go", "detect_intel.go"} +var fileSet = token.NewFileSet() +var reWrites = []rewrite{ + initRewrite("CPUInfo -> cpuInfo"), + initRewrite("Vendor -> vendor"), + initRewrite("Flags -> flags"), + initRewrite("Detect -> detect"), + initRewrite("CPU -> cpu"), +} +var excludeNames = map[string]bool{"string": true, "join": true, "trim": true, + // cpuid_test.go + "t": true, "println": true, "logf": true, "log": true, "fatalf": true, "fatal": true, +} + +var excludePrefixes = []string{"test", "benchmark"} + +func main() { + Package := "private" + parserMode := parser.ParseComments + exported := make(map[string]rewrite) + for _, file := range inFiles { + in, err := os.Open(file) + if err != nil { + log.Fatalf("opening input", err) + } + + src, err := ioutil.ReadAll(in) + if err != nil { + log.Fatalf("reading input", err) + } + + astfile, err := parser.ParseFile(fileSet, file, src, parserMode) + if err != nil { + log.Fatalf("parsing input", err) + } + + for _, rw := range reWrites { + astfile = rw(astfile) + } + + // Inspect the AST and print all identifiers and literals. + var startDecl token.Pos + var endDecl token.Pos + ast.Inspect(astfile, func(n ast.Node) bool { + var s string + switch x := n.(type) { + case *ast.Ident: + if x.IsExported() { + t := strings.ToLower(x.Name) + for _, pre := range excludePrefixes { + if strings.HasPrefix(t, pre) { + return true + } + } + if excludeNames[t] != true { + //if x.Pos() > startDecl && x.Pos() < endDecl { + exported[x.Name] = initRewrite(x.Name + " -> " + t) + } + } + + case *ast.GenDecl: + if x.Tok == token.CONST && x.Lparen > 0 { + startDecl = x.Lparen + endDecl = x.Rparen + // fmt.Printf("Decl:%s -> %s\n", fileSet.Position(startDecl), fileSet.Position(endDecl)) + } + } + if s != "" { + fmt.Printf("%s:\t%s\n", fileSet.Position(n.Pos()), s) + } + return true + }) + + for _, rw := range exported { + astfile = rw(astfile) + } + + var buf bytes.Buffer + + printer.Fprint(&buf, fileSet, astfile) + + // Remove package documentation and insert information + s := buf.String() + ind := strings.Index(buf.String(), "\npackage cpuid") + s = s[ind:] + s = "// Generated, DO NOT EDIT,\n" + + "// but copy it to your own project and rename the package.\n" + + "// See more at http://github.com/klauspost/cpuid\n" + + s + + outputName := Package + string(os.PathSeparator) + file + + err = ioutil.WriteFile(outputName, []byte(s), 0644) + if err != nil { + log.Fatalf("writing output: %s", err) + } + log.Println("Generated", outputName) + } + + for _, file := range copyFiles { + dst := "" + if strings.HasPrefix(file, "cpuid") { + dst = Package + string(os.PathSeparator) + file + } else { + dst = Package + string(os.PathSeparator) + "cpuid_" + file + } + err := copyFile(file, dst) + if err != nil { + log.Fatalf("copying file: %s", err) + } + log.Println("Copied", dst) + } +} + +// CopyFile copies a file from src to dst. If src and dst files exist, and are +// the same, then return success. Copy the file contents from src to dst. +func copyFile(src, dst string) (err error) { + sfi, err := os.Stat(src) + if err != nil { + return + } + if !sfi.Mode().IsRegular() { + // cannot copy non-regular files (e.g., directories, + // symlinks, devices, etc.) + return fmt.Errorf("CopyFile: non-regular source file %s (%q)", sfi.Name(), sfi.Mode().String()) + } + dfi, err := os.Stat(dst) + if err != nil { + if !os.IsNotExist(err) { + return + } + } else { + if !(dfi.Mode().IsRegular()) { + return fmt.Errorf("CopyFile: non-regular destination file %s (%q)", dfi.Name(), dfi.Mode().String()) + } + if os.SameFile(sfi, dfi) { + return + } + } + err = copyFileContents(src, dst) + return +} + +// copyFileContents copies the contents of the file named src to the file named +// by dst. The file will be created if it does not already exist. If the +// destination file exists, all it's contents will be replaced by the contents +// of the source file. +func copyFileContents(src, dst string) (err error) { + in, err := os.Open(src) + if err != nil { + return + } + defer in.Close() + out, err := os.Create(dst) + if err != nil { + return + } + defer func() { + cerr := out.Close() + if err == nil { + err = cerr + } + }() + if _, err = io.Copy(out, in); err != nil { + return + } + err = out.Sync() + return +} + +type rewrite func(*ast.File) *ast.File + +// Mostly copied from gofmt +func initRewrite(rewriteRule string) rewrite { + f := strings.Split(rewriteRule, "->") + if len(f) != 2 { + fmt.Fprintf(os.Stderr, "rewrite rule must be of the form 'pattern -> replacement'\n") + os.Exit(2) + } + pattern := parseExpr(f[0], "pattern") + replace := parseExpr(f[1], "replacement") + return func(p *ast.File) *ast.File { return rewriteFile(pattern, replace, p) } +} + +// parseExpr parses s as an expression. +// It might make sense to expand this to allow statement patterns, +// but there are problems with preserving formatting and also +// with what a wildcard for a statement looks like. +func parseExpr(s, what string) ast.Expr { + x, err := parser.ParseExpr(s) + if err != nil { + fmt.Fprintf(os.Stderr, "parsing %s %s at %s\n", what, s, err) + os.Exit(2) + } + return x +} + +// Keep this function for debugging. +/* +func dump(msg string, val reflect.Value) { + fmt.Printf("%s:\n", msg) + ast.Print(fileSet, val.Interface()) + fmt.Println() +} +*/ + +// rewriteFile applies the rewrite rule 'pattern -> replace' to an entire file. +func rewriteFile(pattern, replace ast.Expr, p *ast.File) *ast.File { + cmap := ast.NewCommentMap(fileSet, p, p.Comments) + m := make(map[string]reflect.Value) + pat := reflect.ValueOf(pattern) + repl := reflect.ValueOf(replace) + + var rewriteVal func(val reflect.Value) reflect.Value + rewriteVal = func(val reflect.Value) reflect.Value { + // don't bother if val is invalid to start with + if !val.IsValid() { + return reflect.Value{} + } + for k := range m { + delete(m, k) + } + val = apply(rewriteVal, val) + if match(m, pat, val) { + val = subst(m, repl, reflect.ValueOf(val.Interface().(ast.Node).Pos())) + } + return val + } + + r := apply(rewriteVal, reflect.ValueOf(p)).Interface().(*ast.File) + r.Comments = cmap.Filter(r).Comments() // recreate comments list + return r +} + +// set is a wrapper for x.Set(y); it protects the caller from panics if x cannot be changed to y. +func set(x, y reflect.Value) { + // don't bother if x cannot be set or y is invalid + if !x.CanSet() || !y.IsValid() { + return + } + defer func() { + if x := recover(); x != nil { + if s, ok := x.(string); ok && + (strings.Contains(s, "type mismatch") || strings.Contains(s, "not assignable")) { + // x cannot be set to y - ignore this rewrite + return + } + panic(x) + } + }() + x.Set(y) +} + +// Values/types for special cases. +var ( + objectPtrNil = reflect.ValueOf((*ast.Object)(nil)) + scopePtrNil = reflect.ValueOf((*ast.Scope)(nil)) + + identType = reflect.TypeOf((*ast.Ident)(nil)) + objectPtrType = reflect.TypeOf((*ast.Object)(nil)) + positionType = reflect.TypeOf(token.NoPos) + callExprType = reflect.TypeOf((*ast.CallExpr)(nil)) + scopePtrType = reflect.TypeOf((*ast.Scope)(nil)) +) + +// apply replaces each AST field x in val with f(x), returning val. +// To avoid extra conversions, f operates on the reflect.Value form. +func apply(f func(reflect.Value) reflect.Value, val reflect.Value) reflect.Value { + if !val.IsValid() { + return reflect.Value{} + } + + // *ast.Objects introduce cycles and are likely incorrect after + // rewrite; don't follow them but replace with nil instead + if val.Type() == objectPtrType { + return objectPtrNil + } + + // similarly for scopes: they are likely incorrect after a rewrite; + // replace them with nil + if val.Type() == scopePtrType { + return scopePtrNil + } + + switch v := reflect.Indirect(val); v.Kind() { + case reflect.Slice: + for i := 0; i < v.Len(); i++ { + e := v.Index(i) + set(e, f(e)) + } + case reflect.Struct: + for i := 0; i < v.NumField(); i++ { + e := v.Field(i) + set(e, f(e)) + } + case reflect.Interface: + e := v.Elem() + set(v, f(e)) + } + return val +} + +func isWildcard(s string) bool { + rune, size := utf8.DecodeRuneInString(s) + return size == len(s) && unicode.IsLower(rune) +} + +// match returns true if pattern matches val, +// recording wildcard submatches in m. +// If m == nil, match checks whether pattern == val. +func match(m map[string]reflect.Value, pattern, val reflect.Value) bool { + // Wildcard matches any expression. If it appears multiple + // times in the pattern, it must match the same expression + // each time. + if m != nil && pattern.IsValid() && pattern.Type() == identType { + name := pattern.Interface().(*ast.Ident).Name + if isWildcard(name) && val.IsValid() { + // wildcards only match valid (non-nil) expressions. + if _, ok := val.Interface().(ast.Expr); ok && !val.IsNil() { + if old, ok := m[name]; ok { + return match(nil, old, val) + } + m[name] = val + return true + } + } + } + + // Otherwise, pattern and val must match recursively. + if !pattern.IsValid() || !val.IsValid() { + return !pattern.IsValid() && !val.IsValid() + } + if pattern.Type() != val.Type() { + return false + } + + // Special cases. + switch pattern.Type() { + case identType: + // For identifiers, only the names need to match + // (and none of the other *ast.Object information). + // This is a common case, handle it all here instead + // of recursing down any further via reflection. + p := pattern.Interface().(*ast.Ident) + v := val.Interface().(*ast.Ident) + return p == nil && v == nil || p != nil && v != nil && p.Name == v.Name + case objectPtrType, positionType: + // object pointers and token positions always match + return true + case callExprType: + // For calls, the Ellipsis fields (token.Position) must + // match since that is how f(x) and f(x...) are different. + // Check them here but fall through for the remaining fields. + p := pattern.Interface().(*ast.CallExpr) + v := val.Interface().(*ast.CallExpr) + if p.Ellipsis.IsValid() != v.Ellipsis.IsValid() { + return false + } + } + + p := reflect.Indirect(pattern) + v := reflect.Indirect(val) + if !p.IsValid() || !v.IsValid() { + return !p.IsValid() && !v.IsValid() + } + + switch p.Kind() { + case reflect.Slice: + if p.Len() != v.Len() { + return false + } + for i := 0; i < p.Len(); i++ { + if !match(m, p.Index(i), v.Index(i)) { + return false + } + } + return true + + case reflect.Struct: + for i := 0; i < p.NumField(); i++ { + if !match(m, p.Field(i), v.Field(i)) { + return false + } + } + return true + + case reflect.Interface: + return match(m, p.Elem(), v.Elem()) + } + + // Handle token integers, etc. + return p.Interface() == v.Interface() +} + +// subst returns a copy of pattern with values from m substituted in place +// of wildcards and pos used as the position of tokens from the pattern. +// if m == nil, subst returns a copy of pattern and doesn't change the line +// number information. +func subst(m map[string]reflect.Value, pattern reflect.Value, pos reflect.Value) reflect.Value { + if !pattern.IsValid() { + return reflect.Value{} + } + + // Wildcard gets replaced with map value. + if m != nil && pattern.Type() == identType { + name := pattern.Interface().(*ast.Ident).Name + if isWildcard(name) { + if old, ok := m[name]; ok { + return subst(nil, old, reflect.Value{}) + } + } + } + + if pos.IsValid() && pattern.Type() == positionType { + // use new position only if old position was valid in the first place + if old := pattern.Interface().(token.Pos); !old.IsValid() { + return pattern + } + return pos + } + + // Otherwise copy. + switch p := pattern; p.Kind() { + case reflect.Slice: + v := reflect.MakeSlice(p.Type(), p.Len(), p.Len()) + for i := 0; i < p.Len(); i++ { + v.Index(i).Set(subst(m, p.Index(i), pos)) + } + return v + + case reflect.Struct: + v := reflect.New(p.Type()).Elem() + for i := 0; i < p.NumField(); i++ { + v.Field(i).Set(subst(m, p.Field(i), pos)) + } + return v + + case reflect.Ptr: + v := reflect.New(p.Type()).Elem() + if elem := p.Elem(); elem.IsValid() { + v.Set(subst(m, elem, pos).Addr()) + } + return v + + case reflect.Interface: + v := reflect.New(p.Type()).Elem() + if elem := p.Elem(); elem.IsValid() { + v.Set(subst(m, elem, pos)) + } + return v + } + + return pattern +} diff --git a/vendor/github.com/klauspost/crc32/LICENSE b/vendor/github.com/klauspost/crc32/LICENSE new file mode 100644 index 0000000..4fd5963 --- /dev/null +++ b/vendor/github.com/klauspost/crc32/LICENSE @@ -0,0 +1,28 @@ +Copyright (c) 2012 The Go Authors. All rights reserved. +Copyright (c) 2015 Klaus Post + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/klauspost/crc32/README.md b/vendor/github.com/klauspost/crc32/README.md new file mode 100644 index 0000000..029625d --- /dev/null +++ b/vendor/github.com/klauspost/crc32/README.md @@ -0,0 +1,87 @@ +# crc32 +CRC32 hash with x64 optimizations + +This package is a drop-in replacement for the standard library `hash/crc32` package, that features SSE 4.2 optimizations on x64 platforms, for a 10x speedup. + +[![Build Status](https://travis-ci.org/klauspost/crc32.svg?branch=master)](https://travis-ci.org/klauspost/crc32) + +# usage + +Install using `go get github.com/klauspost/crc32`. This library is based on Go 1.5 code and requires Go 1.3 or newer. + +Replace `import "hash/crc32"` with `import "github.com/klauspost/crc32"` and you are good to go. + +# changes +* Oct 20, 2016: Changes have been merged to upstream Go. Package updated to match. +* Dec 4, 2015: Uses the "slice-by-8" trick more extensively, which gives a 1.5 to 2.5x speedup if assembler is unavailable. + + +# performance + +For *Go 1.7* performance is equivalent to the standard library. So if you use this package for Go 1.7 you can switch back. + + +For IEEE tables (the most common), there is approximately a factor 10 speedup with "CLMUL" (Carryless multiplication) instruction: +``` +benchmark old ns/op new ns/op delta +BenchmarkCrc32KB 99955 10258 -89.74% + +benchmark old MB/s new MB/s speedup +BenchmarkCrc32KB 327.83 3194.20 9.74x +``` + +For other tables and "CLMUL" capable machines the performance is the same as the standard library. + +Here are some detailed benchmarks, comparing to go 1.5 standard library with and without assembler enabled. + +``` +Std: Standard Go 1.5 library +Crc: Indicates IEEE type CRC. +40B: Size of each slice encoded. +NoAsm: Assembler was disabled (ie. not an AMD64 or SSE 4.2+ capable machine). +Castagnoli: Castagnoli CRC type. + +BenchmarkStdCrc40B-4 10000000 158 ns/op 252.88 MB/s +BenchmarkCrc40BNoAsm-4 20000000 105 ns/op 377.38 MB/s (slice8) +BenchmarkCrc40B-4 20000000 105 ns/op 378.77 MB/s (slice8) + +BenchmarkStdCrc1KB-4 500000 3604 ns/op 284.10 MB/s +BenchmarkCrc1KBNoAsm-4 1000000 1463 ns/op 699.79 MB/s (slice8) +BenchmarkCrc1KB-4 3000000 396 ns/op 2583.69 MB/s (asm) + +BenchmarkStdCrc8KB-4 200000 11417 ns/op 717.48 MB/s (slice8) +BenchmarkCrc8KBNoAsm-4 200000 11317 ns/op 723.85 MB/s (slice8) +BenchmarkCrc8KB-4 500000 2919 ns/op 2805.73 MB/s (asm) + +BenchmarkStdCrc32KB-4 30000 45749 ns/op 716.24 MB/s (slice8) +BenchmarkCrc32KBNoAsm-4 30000 45109 ns/op 726.42 MB/s (slice8) +BenchmarkCrc32KB-4 100000 11497 ns/op 2850.09 MB/s (asm) + +BenchmarkStdNoAsmCastagnol40B-4 10000000 161 ns/op 246.94 MB/s +BenchmarkStdCastagnoli40B-4 50000000 28.4 ns/op 1410.69 MB/s (asm) +BenchmarkCastagnoli40BNoAsm-4 20000000 100 ns/op 398.01 MB/s (slice8) +BenchmarkCastagnoli40B-4 50000000 28.2 ns/op 1419.54 MB/s (asm) + +BenchmarkStdNoAsmCastagnoli1KB-4 500000 3622 ns/op 282.67 MB/s +BenchmarkStdCastagnoli1KB-4 10000000 144 ns/op 7099.78 MB/s (asm) +BenchmarkCastagnoli1KBNoAsm-4 1000000 1475 ns/op 694.14 MB/s (slice8) +BenchmarkCastagnoli1KB-4 10000000 146 ns/op 6993.35 MB/s (asm) + +BenchmarkStdNoAsmCastagnoli8KB-4 50000 28781 ns/op 284.63 MB/s +BenchmarkStdCastagnoli8KB-4 1000000 1029 ns/op 7957.89 MB/s (asm) +BenchmarkCastagnoli8KBNoAsm-4 200000 11410 ns/op 717.94 MB/s (slice8) +BenchmarkCastagnoli8KB-4 1000000 1000 ns/op 8188.71 MB/s (asm) + +BenchmarkStdNoAsmCastagnoli32KB-4 10000 115426 ns/op 283.89 MB/s +BenchmarkStdCastagnoli32KB-4 300000 4065 ns/op 8059.13 MB/s (asm) +BenchmarkCastagnoli32KBNoAsm-4 30000 45171 ns/op 725.41 MB/s (slice8) +BenchmarkCastagnoli32KB-4 500000 4077 ns/op 8035.89 MB/s (asm) +``` + +The IEEE assembler optimizations has been submitted and will be part of the Go 1.6 standard library. + +However, the improved use of slice-by-8 has not, but will probably be submitted for Go 1.7. + +# license + +Standard Go license. Changes are Copyright (c) 2015 Klaus Post under same conditions. diff --git a/vendor/github.com/klauspost/crc32/crc32.go b/vendor/github.com/klauspost/crc32/crc32.go new file mode 100644 index 0000000..8aa91b1 --- /dev/null +++ b/vendor/github.com/klauspost/crc32/crc32.go @@ -0,0 +1,207 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package crc32 implements the 32-bit cyclic redundancy check, or CRC-32, +// checksum. See http://en.wikipedia.org/wiki/Cyclic_redundancy_check for +// information. +// +// Polynomials are represented in LSB-first form also known as reversed representation. +// +// See http://en.wikipedia.org/wiki/Mathematics_of_cyclic_redundancy_checks#Reversed_representations_and_reciprocal_polynomials +// for information. +package crc32 + +import ( + "hash" + "sync" +) + +// The size of a CRC-32 checksum in bytes. +const Size = 4 + +// Predefined polynomials. +const ( + // IEEE is by far and away the most common CRC-32 polynomial. + // Used by ethernet (IEEE 802.3), v.42, fddi, gzip, zip, png, ... + IEEE = 0xedb88320 + + // Castagnoli's polynomial, used in iSCSI. + // Has better error detection characteristics than IEEE. + // http://dx.doi.org/10.1109/26.231911 + Castagnoli = 0x82f63b78 + + // Koopman's polynomial. + // Also has better error detection characteristics than IEEE. + // http://dx.doi.org/10.1109/DSN.2002.1028931 + Koopman = 0xeb31d82e +) + +// Table is a 256-word table representing the polynomial for efficient processing. +type Table [256]uint32 + +// This file makes use of functions implemented in architecture-specific files. +// The interface that they implement is as follows: +// +// // archAvailableIEEE reports whether an architecture-specific CRC32-IEEE +// // algorithm is available. +// archAvailableIEEE() bool +// +// // archInitIEEE initializes the architecture-specific CRC3-IEEE algorithm. +// // It can only be called if archAvailableIEEE() returns true. +// archInitIEEE() +// +// // archUpdateIEEE updates the given CRC32-IEEE. It can only be called if +// // archInitIEEE() was previously called. +// archUpdateIEEE(crc uint32, p []byte) uint32 +// +// // archAvailableCastagnoli reports whether an architecture-specific +// // CRC32-C algorithm is available. +// archAvailableCastagnoli() bool +// +// // archInitCastagnoli initializes the architecture-specific CRC32-C +// // algorithm. It can only be called if archAvailableCastagnoli() returns +// // true. +// archInitCastagnoli() +// +// // archUpdateCastagnoli updates the given CRC32-C. It can only be called +// // if archInitCastagnoli() was previously called. +// archUpdateCastagnoli(crc uint32, p []byte) uint32 + +// castagnoliTable points to a lazily initialized Table for the Castagnoli +// polynomial. MakeTable will always return this value when asked to make a +// Castagnoli table so we can compare against it to find when the caller is +// using this polynomial. +var castagnoliTable *Table +var castagnoliTable8 *slicing8Table +var castagnoliArchImpl bool +var updateCastagnoli func(crc uint32, p []byte) uint32 +var castagnoliOnce sync.Once + +func castagnoliInit() { + castagnoliTable = simpleMakeTable(Castagnoli) + castagnoliArchImpl = archAvailableCastagnoli() + + if castagnoliArchImpl { + archInitCastagnoli() + updateCastagnoli = archUpdateCastagnoli + } else { + // Initialize the slicing-by-8 table. + castagnoliTable8 = slicingMakeTable(Castagnoli) + updateCastagnoli = func(crc uint32, p []byte) uint32 { + return slicingUpdate(crc, castagnoliTable8, p) + } + } +} + +// IEEETable is the table for the IEEE polynomial. +var IEEETable = simpleMakeTable(IEEE) + +// ieeeTable8 is the slicing8Table for IEEE +var ieeeTable8 *slicing8Table +var ieeeArchImpl bool +var updateIEEE func(crc uint32, p []byte) uint32 +var ieeeOnce sync.Once + +func ieeeInit() { + ieeeArchImpl = archAvailableIEEE() + + if ieeeArchImpl { + archInitIEEE() + updateIEEE = archUpdateIEEE + } else { + // Initialize the slicing-by-8 table. + ieeeTable8 = slicingMakeTable(IEEE) + updateIEEE = func(crc uint32, p []byte) uint32 { + return slicingUpdate(crc, ieeeTable8, p) + } + } +} + +// MakeTable returns a Table constructed from the specified polynomial. +// The contents of this Table must not be modified. +func MakeTable(poly uint32) *Table { + switch poly { + case IEEE: + ieeeOnce.Do(ieeeInit) + return IEEETable + case Castagnoli: + castagnoliOnce.Do(castagnoliInit) + return castagnoliTable + } + return simpleMakeTable(poly) +} + +// digest represents the partial evaluation of a checksum. +type digest struct { + crc uint32 + tab *Table +} + +// New creates a new hash.Hash32 computing the CRC-32 checksum +// using the polynomial represented by the Table. +// Its Sum method will lay the value out in big-endian byte order. +func New(tab *Table) hash.Hash32 { + if tab == IEEETable { + ieeeOnce.Do(ieeeInit) + } + return &digest{0, tab} +} + +// NewIEEE creates a new hash.Hash32 computing the CRC-32 checksum +// using the IEEE polynomial. +// Its Sum method will lay the value out in big-endian byte order. +func NewIEEE() hash.Hash32 { return New(IEEETable) } + +func (d *digest) Size() int { return Size } + +func (d *digest) BlockSize() int { return 1 } + +func (d *digest) Reset() { d.crc = 0 } + +// Update returns the result of adding the bytes in p to the crc. +func Update(crc uint32, tab *Table, p []byte) uint32 { + switch tab { + case castagnoliTable: + return updateCastagnoli(crc, p) + case IEEETable: + // Unfortunately, because IEEETable is exported, IEEE may be used without a + // call to MakeTable. We have to make sure it gets initialized in that case. + ieeeOnce.Do(ieeeInit) + return updateIEEE(crc, p) + default: + return simpleUpdate(crc, tab, p) + } +} + +func (d *digest) Write(p []byte) (n int, err error) { + switch d.tab { + case castagnoliTable: + d.crc = updateCastagnoli(d.crc, p) + case IEEETable: + // We only create digest objects through New() which takes care of + // initialization in this case. + d.crc = updateIEEE(d.crc, p) + default: + d.crc = simpleUpdate(d.crc, d.tab, p) + } + return len(p), nil +} + +func (d *digest) Sum32() uint32 { return d.crc } + +func (d *digest) Sum(in []byte) []byte { + s := d.Sum32() + return append(in, byte(s>>24), byte(s>>16), byte(s>>8), byte(s)) +} + +// Checksum returns the CRC-32 checksum of data +// using the polynomial represented by the Table. +func Checksum(data []byte, tab *Table) uint32 { return Update(0, tab, data) } + +// ChecksumIEEE returns the CRC-32 checksum of data +// using the IEEE polynomial. +func ChecksumIEEE(data []byte) uint32 { + ieeeOnce.Do(ieeeInit) + return updateIEEE(0, data) +} diff --git a/vendor/github.com/klauspost/crc32/crc32_amd64.go b/vendor/github.com/klauspost/crc32/crc32_amd64.go new file mode 100644 index 0000000..af2a0b8 --- /dev/null +++ b/vendor/github.com/klauspost/crc32/crc32_amd64.go @@ -0,0 +1,230 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine,!gccgo + +// AMD64-specific hardware-assisted CRC32 algorithms. See crc32.go for a +// description of the interface that each architecture-specific file +// implements. + +package crc32 + +import "unsafe" + +// This file contains the code to call the SSE 4.2 version of the Castagnoli +// and IEEE CRC. + +// haveSSE41/haveSSE42/haveCLMUL are defined in crc_amd64.s and use +// CPUID to test for SSE 4.1, 4.2 and CLMUL support. +func haveSSE41() bool +func haveSSE42() bool +func haveCLMUL() bool + +// castagnoliSSE42 is defined in crc32_amd64.s and uses the SSE4.2 CRC32 +// instruction. +//go:noescape +func castagnoliSSE42(crc uint32, p []byte) uint32 + +// castagnoliSSE42Triple is defined in crc32_amd64.s and uses the SSE4.2 CRC32 +// instruction. +//go:noescape +func castagnoliSSE42Triple( + crcA, crcB, crcC uint32, + a, b, c []byte, + rounds uint32, +) (retA uint32, retB uint32, retC uint32) + +// ieeeCLMUL is defined in crc_amd64.s and uses the PCLMULQDQ +// instruction as well as SSE 4.1. +//go:noescape +func ieeeCLMUL(crc uint32, p []byte) uint32 + +var sse42 = haveSSE42() +var useFastIEEE = haveCLMUL() && haveSSE41() + +const castagnoliK1 = 168 +const castagnoliK2 = 1344 + +type sse42Table [4]Table + +var castagnoliSSE42TableK1 *sse42Table +var castagnoliSSE42TableK2 *sse42Table + +func archAvailableCastagnoli() bool { + return sse42 +} + +func archInitCastagnoli() { + if !sse42 { + panic("arch-specific Castagnoli not available") + } + castagnoliSSE42TableK1 = new(sse42Table) + castagnoliSSE42TableK2 = new(sse42Table) + // See description in updateCastagnoli. + // t[0][i] = CRC(i000, O) + // t[1][i] = CRC(0i00, O) + // t[2][i] = CRC(00i0, O) + // t[3][i] = CRC(000i, O) + // where O is a sequence of K zeros. + var tmp [castagnoliK2]byte + for b := 0; b < 4; b++ { + for i := 0; i < 256; i++ { + val := uint32(i) << uint32(b*8) + castagnoliSSE42TableK1[b][i] = castagnoliSSE42(val, tmp[:castagnoliK1]) + castagnoliSSE42TableK2[b][i] = castagnoliSSE42(val, tmp[:]) + } + } +} + +// castagnoliShift computes the CRC32-C of K1 or K2 zeroes (depending on the +// table given) with the given initial crc value. This corresponds to +// CRC(crc, O) in the description in updateCastagnoli. +func castagnoliShift(table *sse42Table, crc uint32) uint32 { + return table[3][crc>>24] ^ + table[2][(crc>>16)&0xFF] ^ + table[1][(crc>>8)&0xFF] ^ + table[0][crc&0xFF] +} + +func archUpdateCastagnoli(crc uint32, p []byte) uint32 { + if !sse42 { + panic("not available") + } + + // This method is inspired from the algorithm in Intel's white paper: + // "Fast CRC Computation for iSCSI Polynomial Using CRC32 Instruction" + // The same strategy of splitting the buffer in three is used but the + // combining calculation is different; the complete derivation is explained + // below. + // + // -- The basic idea -- + // + // The CRC32 instruction (available in SSE4.2) can process 8 bytes at a + // time. In recent Intel architectures the instruction takes 3 cycles; + // however the processor can pipeline up to three instructions if they + // don't depend on each other. + // + // Roughly this means that we can process three buffers in about the same + // time we can process one buffer. + // + // The idea is then to split the buffer in three, CRC the three pieces + // separately and then combine the results. + // + // Combining the results requires precomputed tables, so we must choose a + // fixed buffer length to optimize. The longer the length, the faster; but + // only buffers longer than this length will use the optimization. We choose + // two cutoffs and compute tables for both: + // - one around 512: 168*3=504 + // - one around 4KB: 1344*3=4032 + // + // -- The nitty gritty -- + // + // Let CRC(I, X) be the non-inverted CRC32-C of the sequence X (with + // initial non-inverted CRC I). This function has the following properties: + // (a) CRC(I, AB) = CRC(CRC(I, A), B) + // (b) CRC(I, A xor B) = CRC(I, A) xor CRC(0, B) + // + // Say we want to compute CRC(I, ABC) where A, B, C are three sequences of + // K bytes each, where K is a fixed constant. Let O be the sequence of K zero + // bytes. + // + // CRC(I, ABC) = CRC(I, ABO xor C) + // = CRC(I, ABO) xor CRC(0, C) + // = CRC(CRC(I, AB), O) xor CRC(0, C) + // = CRC(CRC(I, AO xor B), O) xor CRC(0, C) + // = CRC(CRC(I, AO) xor CRC(0, B), O) xor CRC(0, C) + // = CRC(CRC(CRC(I, A), O) xor CRC(0, B), O) xor CRC(0, C) + // + // The castagnoliSSE42Triple function can compute CRC(I, A), CRC(0, B), + // and CRC(0, C) efficiently. We just need to find a way to quickly compute + // CRC(uvwx, O) given a 4-byte initial value uvwx. We can precompute these + // values; since we can't have a 32-bit table, we break it up into four + // 8-bit tables: + // + // CRC(uvwx, O) = CRC(u000, O) xor + // CRC(0v00, O) xor + // CRC(00w0, O) xor + // CRC(000x, O) + // + // We can compute tables corresponding to the four terms for all 8-bit + // values. + + crc = ^crc + + // If a buffer is long enough to use the optimization, process the first few + // bytes to align the buffer to an 8 byte boundary (if necessary). + if len(p) >= castagnoliK1*3 { + delta := int(uintptr(unsafe.Pointer(&p[0])) & 7) + if delta != 0 { + delta = 8 - delta + crc = castagnoliSSE42(crc, p[:delta]) + p = p[delta:] + } + } + + // Process 3*K2 at a time. + for len(p) >= castagnoliK2*3 { + // Compute CRC(I, A), CRC(0, B), and CRC(0, C). + crcA, crcB, crcC := castagnoliSSE42Triple( + crc, 0, 0, + p, p[castagnoliK2:], p[castagnoliK2*2:], + castagnoliK2/24) + + // CRC(I, AB) = CRC(CRC(I, A), O) xor CRC(0, B) + crcAB := castagnoliShift(castagnoliSSE42TableK2, crcA) ^ crcB + // CRC(I, ABC) = CRC(CRC(I, AB), O) xor CRC(0, C) + crc = castagnoliShift(castagnoliSSE42TableK2, crcAB) ^ crcC + p = p[castagnoliK2*3:] + } + + // Process 3*K1 at a time. + for len(p) >= castagnoliK1*3 { + // Compute CRC(I, A), CRC(0, B), and CRC(0, C). + crcA, crcB, crcC := castagnoliSSE42Triple( + crc, 0, 0, + p, p[castagnoliK1:], p[castagnoliK1*2:], + castagnoliK1/24) + + // CRC(I, AB) = CRC(CRC(I, A), O) xor CRC(0, B) + crcAB := castagnoliShift(castagnoliSSE42TableK1, crcA) ^ crcB + // CRC(I, ABC) = CRC(CRC(I, AB), O) xor CRC(0, C) + crc = castagnoliShift(castagnoliSSE42TableK1, crcAB) ^ crcC + p = p[castagnoliK1*3:] + } + + // Use the simple implementation for what's left. + crc = castagnoliSSE42(crc, p) + return ^crc +} + +func archAvailableIEEE() bool { + return useFastIEEE +} + +var archIeeeTable8 *slicing8Table + +func archInitIEEE() { + if !useFastIEEE { + panic("not available") + } + // We still use slicing-by-8 for small buffers. + archIeeeTable8 = slicingMakeTable(IEEE) +} + +func archUpdateIEEE(crc uint32, p []byte) uint32 { + if !useFastIEEE { + panic("not available") + } + + if len(p) >= 64 { + left := len(p) & 15 + do := len(p) - left + crc = ^ieeeCLMUL(^crc, p[:do]) + p = p[do:] + } + if len(p) == 0 { + return crc + } + return slicingUpdate(crc, archIeeeTable8, p) +} diff --git a/vendor/github.com/klauspost/crc32/crc32_amd64.s b/vendor/github.com/klauspost/crc32/crc32_amd64.s new file mode 100644 index 0000000..e8a7941 --- /dev/null +++ b/vendor/github.com/klauspost/crc32/crc32_amd64.s @@ -0,0 +1,319 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build gc + +#define NOSPLIT 4 +#define RODATA 8 + +// castagnoliSSE42 updates the (non-inverted) crc with the given buffer. +// +// func castagnoliSSE42(crc uint32, p []byte) uint32 +TEXT ·castagnoliSSE42(SB), NOSPLIT, $0 + MOVL crc+0(FP), AX // CRC value + MOVQ p+8(FP), SI // data pointer + MOVQ p_len+16(FP), CX // len(p) + + // If there are fewer than 8 bytes to process, skip alignment. + CMPQ CX, $8 + JL less_than_8 + + MOVQ SI, BX + ANDQ $7, BX + JZ aligned + + // Process the first few bytes to 8-byte align the input. + + // BX = 8 - BX. We need to process this many bytes to align. + SUBQ $1, BX + XORQ $7, BX + + BTQ $0, BX + JNC align_2 + + CRC32B (SI), AX + DECQ CX + INCQ SI + +align_2: + BTQ $1, BX + JNC align_4 + + // CRC32W (SI), AX + BYTE $0x66; BYTE $0xf2; BYTE $0x0f; BYTE $0x38; BYTE $0xf1; BYTE $0x06 + + SUBQ $2, CX + ADDQ $2, SI + +align_4: + BTQ $2, BX + JNC aligned + + // CRC32L (SI), AX + BYTE $0xf2; BYTE $0x0f; BYTE $0x38; BYTE $0xf1; BYTE $0x06 + + SUBQ $4, CX + ADDQ $4, SI + +aligned: + // The input is now 8-byte aligned and we can process 8-byte chunks. + CMPQ CX, $8 + JL less_than_8 + + CRC32Q (SI), AX + ADDQ $8, SI + SUBQ $8, CX + JMP aligned + +less_than_8: + // We may have some bytes left over; process 4 bytes, then 2, then 1. + BTQ $2, CX + JNC less_than_4 + + // CRC32L (SI), AX + BYTE $0xf2; BYTE $0x0f; BYTE $0x38; BYTE $0xf1; BYTE $0x06 + ADDQ $4, SI + +less_than_4: + BTQ $1, CX + JNC less_than_2 + + // CRC32W (SI), AX + BYTE $0x66; BYTE $0xf2; BYTE $0x0f; BYTE $0x38; BYTE $0xf1; BYTE $0x06 + ADDQ $2, SI + +less_than_2: + BTQ $0, CX + JNC done + + CRC32B (SI), AX + +done: + MOVL AX, ret+32(FP) + RET + +// castagnoliSSE42Triple updates three (non-inverted) crcs with (24*rounds) +// bytes from each buffer. +// +// func castagnoliSSE42Triple( +// crc1, crc2, crc3 uint32, +// a, b, c []byte, +// rounds uint32, +// ) (retA uint32, retB uint32, retC uint32) +TEXT ·castagnoliSSE42Triple(SB), NOSPLIT, $0 + MOVL crcA+0(FP), AX + MOVL crcB+4(FP), CX + MOVL crcC+8(FP), DX + + MOVQ a+16(FP), R8 // data pointer + MOVQ b+40(FP), R9 // data pointer + MOVQ c+64(FP), R10 // data pointer + + MOVL rounds+88(FP), R11 + +loop: + CRC32Q (R8), AX + CRC32Q (R9), CX + CRC32Q (R10), DX + + CRC32Q 8(R8), AX + CRC32Q 8(R9), CX + CRC32Q 8(R10), DX + + CRC32Q 16(R8), AX + CRC32Q 16(R9), CX + CRC32Q 16(R10), DX + + ADDQ $24, R8 + ADDQ $24, R9 + ADDQ $24, R10 + + DECQ R11 + JNZ loop + + MOVL AX, retA+96(FP) + MOVL CX, retB+100(FP) + MOVL DX, retC+104(FP) + RET + +// func haveSSE42() bool +TEXT ·haveSSE42(SB), NOSPLIT, $0 + XORQ AX, AX + INCL AX + CPUID + SHRQ $20, CX + ANDQ $1, CX + MOVB CX, ret+0(FP) + RET + +// func haveCLMUL() bool +TEXT ·haveCLMUL(SB), NOSPLIT, $0 + XORQ AX, AX + INCL AX + CPUID + SHRQ $1, CX + ANDQ $1, CX + MOVB CX, ret+0(FP) + RET + +// func haveSSE41() bool +TEXT ·haveSSE41(SB), NOSPLIT, $0 + XORQ AX, AX + INCL AX + CPUID + SHRQ $19, CX + ANDQ $1, CX + MOVB CX, ret+0(FP) + RET + +// CRC32 polynomial data +// +// These constants are lifted from the +// Linux kernel, since they avoid the costly +// PSHUFB 16 byte reversal proposed in the +// original Intel paper. +DATA r2r1kp<>+0(SB)/8, $0x154442bd4 +DATA r2r1kp<>+8(SB)/8, $0x1c6e41596 +DATA r4r3kp<>+0(SB)/8, $0x1751997d0 +DATA r4r3kp<>+8(SB)/8, $0x0ccaa009e +DATA rupolykp<>+0(SB)/8, $0x1db710641 +DATA rupolykp<>+8(SB)/8, $0x1f7011641 +DATA r5kp<>+0(SB)/8, $0x163cd6124 + +GLOBL r2r1kp<>(SB), RODATA, $16 +GLOBL r4r3kp<>(SB), RODATA, $16 +GLOBL rupolykp<>(SB), RODATA, $16 +GLOBL r5kp<>(SB), RODATA, $8 + +// Based on http://www.intel.com/content/dam/www/public/us/en/documents/white-papers/fast-crc-computation-generic-polynomials-pclmulqdq-paper.pdf +// len(p) must be at least 64, and must be a multiple of 16. + +// func ieeeCLMUL(crc uint32, p []byte) uint32 +TEXT ·ieeeCLMUL(SB), NOSPLIT, $0 + MOVL crc+0(FP), X0 // Initial CRC value + MOVQ p+8(FP), SI // data pointer + MOVQ p_len+16(FP), CX // len(p) + + MOVOU (SI), X1 + MOVOU 16(SI), X2 + MOVOU 32(SI), X3 + MOVOU 48(SI), X4 + PXOR X0, X1 + ADDQ $64, SI // buf+=64 + SUBQ $64, CX // len-=64 + CMPQ CX, $64 // Less than 64 bytes left + JB remain64 + + MOVOA r2r1kp<>+0(SB), X0 + +loopback64: + MOVOA X1, X5 + MOVOA X2, X6 + MOVOA X3, X7 + MOVOA X4, X8 + + PCLMULQDQ $0, X0, X1 + PCLMULQDQ $0, X0, X2 + PCLMULQDQ $0, X0, X3 + PCLMULQDQ $0, X0, X4 + + // Load next early + MOVOU (SI), X11 + MOVOU 16(SI), X12 + MOVOU 32(SI), X13 + MOVOU 48(SI), X14 + + PCLMULQDQ $0x11, X0, X5 + PCLMULQDQ $0x11, X0, X6 + PCLMULQDQ $0x11, X0, X7 + PCLMULQDQ $0x11, X0, X8 + + PXOR X5, X1 + PXOR X6, X2 + PXOR X7, X3 + PXOR X8, X4 + + PXOR X11, X1 + PXOR X12, X2 + PXOR X13, X3 + PXOR X14, X4 + + ADDQ $0x40, DI + ADDQ $64, SI // buf+=64 + SUBQ $64, CX // len-=64 + CMPQ CX, $64 // Less than 64 bytes left? + JGE loopback64 + + // Fold result into a single register (X1) +remain64: + MOVOA r4r3kp<>+0(SB), X0 + + MOVOA X1, X5 + PCLMULQDQ $0, X0, X1 + PCLMULQDQ $0x11, X0, X5 + PXOR X5, X1 + PXOR X2, X1 + + MOVOA X1, X5 + PCLMULQDQ $0, X0, X1 + PCLMULQDQ $0x11, X0, X5 + PXOR X5, X1 + PXOR X3, X1 + + MOVOA X1, X5 + PCLMULQDQ $0, X0, X1 + PCLMULQDQ $0x11, X0, X5 + PXOR X5, X1 + PXOR X4, X1 + + // If there is less than 16 bytes left we are done + CMPQ CX, $16 + JB finish + + // Encode 16 bytes +remain16: + MOVOU (SI), X10 + MOVOA X1, X5 + PCLMULQDQ $0, X0, X1 + PCLMULQDQ $0x11, X0, X5 + PXOR X5, X1 + PXOR X10, X1 + SUBQ $16, CX + ADDQ $16, SI + CMPQ CX, $16 + JGE remain16 + +finish: + // Fold final result into 32 bits and return it + PCMPEQB X3, X3 + PCLMULQDQ $1, X1, X0 + PSRLDQ $8, X1 + PXOR X0, X1 + + MOVOA X1, X2 + MOVQ r5kp<>+0(SB), X0 + + // Creates 32 bit mask. Note that we don't care about upper half. + PSRLQ $32, X3 + + PSRLDQ $4, X2 + PAND X3, X1 + PCLMULQDQ $0, X0, X1 + PXOR X2, X1 + + MOVOA rupolykp<>+0(SB), X0 + + MOVOA X1, X2 + PAND X3, X1 + PCLMULQDQ $0x10, X0, X1 + PAND X3, X1 + PCLMULQDQ $0, X0, X1 + PXOR X2, X1 + + // PEXTRD $1, X1, AX (SSE 4.1) + BYTE $0x66; BYTE $0x0f; BYTE $0x3a + BYTE $0x16; BYTE $0xc8; BYTE $0x01 + MOVL AX, ret+32(FP) + + RET diff --git a/vendor/github.com/klauspost/crc32/crc32_amd64p32.go b/vendor/github.com/klauspost/crc32/crc32_amd64p32.go new file mode 100644 index 0000000..3222b06 --- /dev/null +++ b/vendor/github.com/klauspost/crc32/crc32_amd64p32.go @@ -0,0 +1,43 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine,!gccgo + +package crc32 + +// This file contains the code to call the SSE 4.2 version of the Castagnoli +// CRC. + +// haveSSE42 is defined in crc32_amd64p32.s and uses CPUID to test for SSE 4.2 +// support. +func haveSSE42() bool + +// castagnoliSSE42 is defined in crc32_amd64p32.s and uses the SSE4.2 CRC32 +// instruction. +//go:noescape +func castagnoliSSE42(crc uint32, p []byte) uint32 + +var sse42 = haveSSE42() + +func archAvailableCastagnoli() bool { + return sse42 +} + +func archInitCastagnoli() { + if !sse42 { + panic("not available") + } + // No initialization necessary. +} + +func archUpdateCastagnoli(crc uint32, p []byte) uint32 { + if !sse42 { + panic("not available") + } + return castagnoliSSE42(crc, p) +} + +func archAvailableIEEE() bool { return false } +func archInitIEEE() { panic("not available") } +func archUpdateIEEE(crc uint32, p []byte) uint32 { panic("not available") } diff --git a/vendor/github.com/klauspost/crc32/crc32_amd64p32.s b/vendor/github.com/klauspost/crc32/crc32_amd64p32.s new file mode 100644 index 0000000..a578d68 --- /dev/null +++ b/vendor/github.com/klauspost/crc32/crc32_amd64p32.s @@ -0,0 +1,67 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build gc + +#define NOSPLIT 4 +#define RODATA 8 + +// func castagnoliSSE42(crc uint32, p []byte) uint32 +TEXT ·castagnoliSSE42(SB), NOSPLIT, $0 + MOVL crc+0(FP), AX // CRC value + MOVL p+4(FP), SI // data pointer + MOVL p_len+8(FP), CX // len(p) + + NOTL AX + + // If there's less than 8 bytes to process, we do it byte-by-byte. + CMPQ CX, $8 + JL cleanup + + // Process individual bytes until the input is 8-byte aligned. +startup: + MOVQ SI, BX + ANDQ $7, BX + JZ aligned + + CRC32B (SI), AX + DECQ CX + INCQ SI + JMP startup + +aligned: + // The input is now 8-byte aligned and we can process 8-byte chunks. + CMPQ CX, $8 + JL cleanup + + CRC32Q (SI), AX + ADDQ $8, SI + SUBQ $8, CX + JMP aligned + +cleanup: + // We may have some bytes left over that we process one at a time. + CMPQ CX, $0 + JE done + + CRC32B (SI), AX + INCQ SI + DECQ CX + JMP cleanup + +done: + NOTL AX + MOVL AX, ret+16(FP) + RET + +// func haveSSE42() bool +TEXT ·haveSSE42(SB), NOSPLIT, $0 + XORQ AX, AX + INCL AX + CPUID + SHRQ $20, CX + ANDQ $1, CX + MOVB CX, ret+0(FP) + RET + diff --git a/vendor/github.com/klauspost/crc32/crc32_generic.go b/vendor/github.com/klauspost/crc32/crc32_generic.go new file mode 100644 index 0000000..abacbb6 --- /dev/null +++ b/vendor/github.com/klauspost/crc32/crc32_generic.go @@ -0,0 +1,89 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file contains CRC32 algorithms that are not specific to any architecture +// and don't use hardware acceleration. +// +// The simple (and slow) CRC32 implementation only uses a 256*4 bytes table. +// +// The slicing-by-8 algorithm is a faster implementation that uses a bigger +// table (8*256*4 bytes). + +package crc32 + +// simpleMakeTable allocates and constructs a Table for the specified +// polynomial. The table is suitable for use with the simple algorithm +// (simpleUpdate). +func simpleMakeTable(poly uint32) *Table { + t := new(Table) + simplePopulateTable(poly, t) + return t +} + +// simplePopulateTable constructs a Table for the specified polynomial, suitable +// for use with simpleUpdate. +func simplePopulateTable(poly uint32, t *Table) { + for i := 0; i < 256; i++ { + crc := uint32(i) + for j := 0; j < 8; j++ { + if crc&1 == 1 { + crc = (crc >> 1) ^ poly + } else { + crc >>= 1 + } + } + t[i] = crc + } +} + +// simpleUpdate uses the simple algorithm to update the CRC, given a table that +// was previously computed using simpleMakeTable. +func simpleUpdate(crc uint32, tab *Table, p []byte) uint32 { + crc = ^crc + for _, v := range p { + crc = tab[byte(crc)^v] ^ (crc >> 8) + } + return ^crc +} + +// Use slicing-by-8 when payload >= this value. +const slicing8Cutoff = 16 + +// slicing8Table is array of 8 Tables, used by the slicing-by-8 algorithm. +type slicing8Table [8]Table + +// slicingMakeTable constructs a slicing8Table for the specified polynomial. The +// table is suitable for use with the slicing-by-8 algorithm (slicingUpdate). +func slicingMakeTable(poly uint32) *slicing8Table { + t := new(slicing8Table) + simplePopulateTable(poly, &t[0]) + for i := 0; i < 256; i++ { + crc := t[0][i] + for j := 1; j < 8; j++ { + crc = t[0][crc&0xFF] ^ (crc >> 8) + t[j][i] = crc + } + } + return t +} + +// slicingUpdate uses the slicing-by-8 algorithm to update the CRC, given a +// table that was previously computed using slicingMakeTable. +func slicingUpdate(crc uint32, tab *slicing8Table, p []byte) uint32 { + if len(p) >= slicing8Cutoff { + crc = ^crc + for len(p) > 8 { + crc ^= uint32(p[0]) | uint32(p[1])<<8 | uint32(p[2])<<16 | uint32(p[3])<<24 + crc = tab[0][p[7]] ^ tab[1][p[6]] ^ tab[2][p[5]] ^ tab[3][p[4]] ^ + tab[4][crc>>24] ^ tab[5][(crc>>16)&0xFF] ^ + tab[6][(crc>>8)&0xFF] ^ tab[7][crc&0xFF] + p = p[8:] + } + crc = ^crc + } + if len(p) == 0 { + return crc + } + return simpleUpdate(crc, &tab[0], p) +} diff --git a/vendor/github.com/klauspost/crc32/crc32_otherarch.go b/vendor/github.com/klauspost/crc32/crc32_otherarch.go new file mode 100644 index 0000000..cc96076 --- /dev/null +++ b/vendor/github.com/klauspost/crc32/crc32_otherarch.go @@ -0,0 +1,15 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !amd64,!amd64p32,!s390x + +package crc32 + +func archAvailableIEEE() bool { return false } +func archInitIEEE() { panic("not available") } +func archUpdateIEEE(crc uint32, p []byte) uint32 { panic("not available") } + +func archAvailableCastagnoli() bool { return false } +func archInitCastagnoli() { panic("not available") } +func archUpdateCastagnoli(crc uint32, p []byte) uint32 { panic("not available") } diff --git a/vendor/github.com/klauspost/crc32/crc32_s390x.go b/vendor/github.com/klauspost/crc32/crc32_s390x.go new file mode 100644 index 0000000..ce96f03 --- /dev/null +++ b/vendor/github.com/klauspost/crc32/crc32_s390x.go @@ -0,0 +1,91 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build s390x + +package crc32 + +const ( + vxMinLen = 64 + vxAlignMask = 15 // align to 16 bytes +) + +// hasVectorFacility reports whether the machine has the z/Architecture +// vector facility installed and enabled. +func hasVectorFacility() bool + +var hasVX = hasVectorFacility() + +// vectorizedCastagnoli implements CRC32 using vector instructions. +// It is defined in crc32_s390x.s. +//go:noescape +func vectorizedCastagnoli(crc uint32, p []byte) uint32 + +// vectorizedIEEE implements CRC32 using vector instructions. +// It is defined in crc32_s390x.s. +//go:noescape +func vectorizedIEEE(crc uint32, p []byte) uint32 + +func archAvailableCastagnoli() bool { + return hasVX +} + +var archCastagnoliTable8 *slicing8Table + +func archInitCastagnoli() { + if !hasVX { + panic("not available") + } + // We still use slicing-by-8 for small buffers. + archCastagnoliTable8 = slicingMakeTable(Castagnoli) +} + +// archUpdateCastagnoli calculates the checksum of p using +// vectorizedCastagnoli. +func archUpdateCastagnoli(crc uint32, p []byte) uint32 { + if !hasVX { + panic("not available") + } + // Use vectorized function if data length is above threshold. + if len(p) >= vxMinLen { + aligned := len(p) & ^vxAlignMask + crc = vectorizedCastagnoli(crc, p[:aligned]) + p = p[aligned:] + } + if len(p) == 0 { + return crc + } + return slicingUpdate(crc, archCastagnoliTable8, p) +} + +func archAvailableIEEE() bool { + return hasVX +} + +var archIeeeTable8 *slicing8Table + +func archInitIEEE() { + if !hasVX { + panic("not available") + } + // We still use slicing-by-8 for small buffers. + archIeeeTable8 = slicingMakeTable(IEEE) +} + +// archUpdateIEEE calculates the checksum of p using vectorizedIEEE. +func archUpdateIEEE(crc uint32, p []byte) uint32 { + if !hasVX { + panic("not available") + } + // Use vectorized function if data length is above threshold. + if len(p) >= vxMinLen { + aligned := len(p) & ^vxAlignMask + crc = vectorizedIEEE(crc, p[:aligned]) + p = p[aligned:] + } + if len(p) == 0 { + return crc + } + return slicingUpdate(crc, archIeeeTable8, p) +} diff --git a/vendor/github.com/klauspost/crc32/crc32_s390x.s b/vendor/github.com/klauspost/crc32/crc32_s390x.s new file mode 100644 index 0000000..e980ca2 --- /dev/null +++ b/vendor/github.com/klauspost/crc32/crc32_s390x.s @@ -0,0 +1,249 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build s390x + +#include "textflag.h" + +// Vector register range containing CRC-32 constants + +#define CONST_PERM_LE2BE V9 +#define CONST_R2R1 V10 +#define CONST_R4R3 V11 +#define CONST_R5 V12 +#define CONST_RU_POLY V13 +#define CONST_CRC_POLY V14 + +// The CRC-32 constant block contains reduction constants to fold and +// process particular chunks of the input data stream in parallel. +// +// Note that the constant definitions below are extended in order to compute +// intermediate results with a single VECTOR GALOIS FIELD MULTIPLY instruction. +// The rightmost doubleword can be 0 to prevent contribution to the result or +// can be multiplied by 1 to perform an XOR without the need for a separate +// VECTOR EXCLUSIVE OR instruction. +// +// The polynomials used are bit-reflected: +// +// IEEE: P'(x) = 0x0edb88320 +// Castagnoli: P'(x) = 0x082f63b78 + +// IEEE polynomial constants +DATA ·crcleconskp+0(SB)/8, $0x0F0E0D0C0B0A0908 // LE-to-BE mask +DATA ·crcleconskp+8(SB)/8, $0x0706050403020100 +DATA ·crcleconskp+16(SB)/8, $0x00000001c6e41596 // R2 +DATA ·crcleconskp+24(SB)/8, $0x0000000154442bd4 // R1 +DATA ·crcleconskp+32(SB)/8, $0x00000000ccaa009e // R4 +DATA ·crcleconskp+40(SB)/8, $0x00000001751997d0 // R3 +DATA ·crcleconskp+48(SB)/8, $0x0000000000000000 +DATA ·crcleconskp+56(SB)/8, $0x0000000163cd6124 // R5 +DATA ·crcleconskp+64(SB)/8, $0x0000000000000000 +DATA ·crcleconskp+72(SB)/8, $0x00000001F7011641 // u' +DATA ·crcleconskp+80(SB)/8, $0x0000000000000000 +DATA ·crcleconskp+88(SB)/8, $0x00000001DB710641 // P'(x) << 1 + +GLOBL ·crcleconskp(SB), RODATA, $144 + +// Castagonli Polynomial constants +DATA ·crccleconskp+0(SB)/8, $0x0F0E0D0C0B0A0908 // LE-to-BE mask +DATA ·crccleconskp+8(SB)/8, $0x0706050403020100 +DATA ·crccleconskp+16(SB)/8, $0x000000009e4addf8 // R2 +DATA ·crccleconskp+24(SB)/8, $0x00000000740eef02 // R1 +DATA ·crccleconskp+32(SB)/8, $0x000000014cd00bd6 // R4 +DATA ·crccleconskp+40(SB)/8, $0x00000000f20c0dfe // R3 +DATA ·crccleconskp+48(SB)/8, $0x0000000000000000 +DATA ·crccleconskp+56(SB)/8, $0x00000000dd45aab8 // R5 +DATA ·crccleconskp+64(SB)/8, $0x0000000000000000 +DATA ·crccleconskp+72(SB)/8, $0x00000000dea713f1 // u' +DATA ·crccleconskp+80(SB)/8, $0x0000000000000000 +DATA ·crccleconskp+88(SB)/8, $0x0000000105ec76f0 // P'(x) << 1 + +GLOBL ·crccleconskp(SB), RODATA, $144 + +// func hasVectorFacility() bool +TEXT ·hasVectorFacility(SB), NOSPLIT, $24-1 + MOVD $x-24(SP), R1 + XC $24, 0(R1), 0(R1) // clear the storage + MOVD $2, R0 // R0 is the number of double words stored -1 + WORD $0xB2B01000 // STFLE 0(R1) + XOR R0, R0 // reset the value of R0 + MOVBZ z-8(SP), R1 + AND $0x40, R1 + BEQ novector + +vectorinstalled: + // check if the vector instruction has been enabled + VLEIB $0, $0xF, V16 + VLGVB $0, V16, R1 + CMPBNE R1, $0xF, novector + MOVB $1, ret+0(FP) // have vx + RET + +novector: + MOVB $0, ret+0(FP) // no vx + RET + +// The CRC-32 function(s) use these calling conventions: +// +// Parameters: +// +// R2: Initial CRC value, typically ~0; and final CRC (return) value. +// R3: Input buffer pointer, performance might be improved if the +// buffer is on a doubleword boundary. +// R4: Length of the buffer, must be 64 bytes or greater. +// +// Register usage: +// +// R5: CRC-32 constant pool base pointer. +// V0: Initial CRC value and intermediate constants and results. +// V1..V4: Data for CRC computation. +// V5..V8: Next data chunks that are fetched from the input buffer. +// +// V9..V14: CRC-32 constants. + +// func vectorizedIEEE(crc uint32, p []byte) uint32 +TEXT ·vectorizedIEEE(SB), NOSPLIT, $0 + MOVWZ crc+0(FP), R2 // R2 stores the CRC value + MOVD p+8(FP), R3 // data pointer + MOVD p_len+16(FP), R4 // len(p) + + MOVD $·crcleconskp(SB), R5 + BR vectorizedBody<>(SB) + +// func vectorizedCastagnoli(crc uint32, p []byte) uint32 +TEXT ·vectorizedCastagnoli(SB), NOSPLIT, $0 + MOVWZ crc+0(FP), R2 // R2 stores the CRC value + MOVD p+8(FP), R3 // data pointer + MOVD p_len+16(FP), R4 // len(p) + + // R5: crc-32 constant pool base pointer, constant is used to reduce crc + MOVD $·crccleconskp(SB), R5 + BR vectorizedBody<>(SB) + +TEXT vectorizedBody<>(SB), NOSPLIT, $0 + XOR $0xffffffff, R2 // NOTW R2 + VLM 0(R5), CONST_PERM_LE2BE, CONST_CRC_POLY + + // Load the initial CRC value into the rightmost word of V0 + VZERO V0 + VLVGF $3, R2, V0 + + // Crash if the input size is less than 64-bytes. + CMP R4, $64 + BLT crash + + // Load a 64-byte data chunk and XOR with CRC + VLM 0(R3), V1, V4 // 64-bytes into V1..V4 + + // Reflect the data if the CRC operation is in the bit-reflected domain + VPERM V1, V1, CONST_PERM_LE2BE, V1 + VPERM V2, V2, CONST_PERM_LE2BE, V2 + VPERM V3, V3, CONST_PERM_LE2BE, V3 + VPERM V4, V4, CONST_PERM_LE2BE, V4 + + VX V0, V1, V1 // V1 ^= CRC + ADD $64, R3 // BUF = BUF + 64 + ADD $(-64), R4 + + // Check remaining buffer size and jump to proper folding method + CMP R4, $64 + BLT less_than_64bytes + +fold_64bytes_loop: + // Load the next 64-byte data chunk into V5 to V8 + VLM 0(R3), V5, V8 + VPERM V5, V5, CONST_PERM_LE2BE, V5 + VPERM V6, V6, CONST_PERM_LE2BE, V6 + VPERM V7, V7, CONST_PERM_LE2BE, V7 + VPERM V8, V8, CONST_PERM_LE2BE, V8 + + // Perform a GF(2) multiplication of the doublewords in V1 with + // the reduction constants in V0. The intermediate result is + // then folded (accumulated) with the next data chunk in V5 and + // stored in V1. Repeat this step for the register contents + // in V2, V3, and V4 respectively. + + VGFMAG CONST_R2R1, V1, V5, V1 + VGFMAG CONST_R2R1, V2, V6, V2 + VGFMAG CONST_R2R1, V3, V7, V3 + VGFMAG CONST_R2R1, V4, V8, V4 + + // Adjust buffer pointer and length for next loop + ADD $64, R3 // BUF = BUF + 64 + ADD $(-64), R4 // LEN = LEN - 64 + + CMP R4, $64 + BGE fold_64bytes_loop + +less_than_64bytes: + // Fold V1 to V4 into a single 128-bit value in V1 + VGFMAG CONST_R4R3, V1, V2, V1 + VGFMAG CONST_R4R3, V1, V3, V1 + VGFMAG CONST_R4R3, V1, V4, V1 + + // Check whether to continue with 64-bit folding + CMP R4, $16 + BLT final_fold + +fold_16bytes_loop: + VL 0(R3), V2 // Load next data chunk + VPERM V2, V2, CONST_PERM_LE2BE, V2 + + VGFMAG CONST_R4R3, V1, V2, V1 // Fold next data chunk + + // Adjust buffer pointer and size for folding next data chunk + ADD $16, R3 + ADD $-16, R4 + + // Process remaining data chunks + CMP R4, $16 + BGE fold_16bytes_loop + +final_fold: + VLEIB $7, $0x40, V9 + VSRLB V9, CONST_R4R3, V0 + VLEIG $0, $1, V0 + + VGFMG V0, V1, V1 + + VLEIB $7, $0x20, V9 // Shift by words + VSRLB V9, V1, V2 // Store remaining bits in V2 + VUPLLF V1, V1 // Split rightmost doubleword + VGFMAG CONST_R5, V1, V2, V1 // V1 = (V1 * R5) XOR V2 + + // The input values to the Barret reduction are the degree-63 polynomial + // in V1 (R(x)), degree-32 generator polynomial, and the reduction + // constant u. The Barret reduction result is the CRC value of R(x) mod + // P(x). + // + // The Barret reduction algorithm is defined as: + // + // 1. T1(x) = floor( R(x) / x^32 ) GF2MUL u + // 2. T2(x) = floor( T1(x) / x^32 ) GF2MUL P(x) + // 3. C(x) = R(x) XOR T2(x) mod x^32 + // + // Note: To compensate the division by x^32, use the vector unpack + // instruction to move the leftmost word into the leftmost doubleword + // of the vector register. The rightmost doubleword is multiplied + // with zero to not contribute to the intermedate results. + + // T1(x) = floor( R(x) / x^32 ) GF2MUL u + VUPLLF V1, V2 + VGFMG CONST_RU_POLY, V2, V2 + + // Compute the GF(2) product of the CRC polynomial in VO with T1(x) in + // V2 and XOR the intermediate result, T2(x), with the value in V1. + // The final result is in the rightmost word of V2. + + VUPLLF V2, V2 + VGFMAG CONST_CRC_POLY, V2, V1, V2 + +done: + VLGVF $2, V2, R2 + XOR $0xffffffff, R2 // NOTW R2 + MOVWZ R2, ret + 32(FP) + RET + +crash: + MOVD $0, (R0) // input size is less than 64-bytes diff --git a/vendor/github.com/qiangxue/fasthttp-routing/LICENSE b/vendor/github.com/qiangxue/fasthttp-routing/LICENSE new file mode 100644 index 0000000..15264f9 --- /dev/null +++ b/vendor/github.com/qiangxue/fasthttp-routing/LICENSE @@ -0,0 +1,22 @@ +The BSD 3-Clause License + +Copyright (c) 2016, Qiang Xue +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, are permitted provided +that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this list of conditions + and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and + the following disclaimer in the documentation and/or other materials provided with the distribution. +3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or + promote products derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, +INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF +THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/qiangxue/fasthttp-routing/README.md b/vendor/github.com/qiangxue/fasthttp-routing/README.md new file mode 100644 index 0000000..d5fe426 --- /dev/null +++ b/vendor/github.com/qiangxue/fasthttp-routing/README.md @@ -0,0 +1,244 @@ +# fasthttp-routing + +[![GoDoc](https://godoc.org/github.com/qiangxue/fasthttp-routing?status.png)](http://godoc.org/github.com/qiangxue/fasthttp-routing) +[![Go Report](http://goreportcard.com/badge/qiangxue/fasthttp-routing)](http://goreportcard.com/report/qiangxue/fasthttp-routing) + +## Description + +fasthttp-routing is a Go package that is adapted from [ozzo-routing](https://github.com/go-ozzo/ozzo-routing) to provide +fast and powerful routing features for the high-performance [fasthttp](https://github.com/valyala/fasthttp) server. +The package has the following features: + +* middleware pipeline architecture, similar to that of the [Express framework](http://expressjs.com). +* extremely fast request routing with zero dynamic memory allocation +* modular code organization through route grouping +* flexible URL path matching, supporting URL parameters and regular expressions +* URL creation according to the predefined routes + +## Requirements + +Go 1.5 or above. + +## Installation + +Run the following command to install the package: + +``` +go get github.com/qiangxue/fasthttp-routing +``` + +## Getting Started + +Create a `server.go` file with the following content: + +```go +package main + +import ( + "fmt" + + "github.com/qiangxue/fasthttp-routing" + "github.com/valyala/fasthttp" +) + +func main() { + router := routing.New() + + router.Get("/", func(c *routing.Context) error { + fmt.Fprintf(c, "Hello, world!") + return nil + }) + + panic(fasthttp.ListenAndServe(":8080", router.HandleRequest)) +} +``` + +Now run the following command to start the Web server: + +``` +go run server.go +``` + +You should be able to access URLs such as `http://localhost:8080`. + + +### Routes + +ozzo-routing works by building a routing table in a router and then dispatching HTTP requests to the matching handlers +found in the routing table. An intuitive illustration of a routing table is as follows: + + +Routes | Handlers +--------------------|----------------- +`GET /users` | m1, m2, h1, ... +`POST /users` | m1, m2, h2, ... +`PUT /users/` | m1, m2, h3, ... +`DELETE /users/`| m1, m2, h4, ... + + +For an incoming request `GET /users`, the first route would match and the handlers m1, m2, and h1 would be executed. +If the request is `PUT /users/123`, the third route would match and the corresponding handlers would be executed. +Note that the token `` can match any number of non-slash characters and the matching part can be accessed as +a path parameter value in the handlers. + +**If an incoming request matches multiple routes in the table, the route added first to the table will take precedence. +All other matching routes will be ignored.** + +The actual implementation of the routing table uses a variant of the radix tree data structure, which makes the routing +process as fast as working with a hash table, thanks to the inspiration from [httprouter](https://github.com/julienschmidt/httprouter). + +To add a new route and its handlers to the routing table, call the `To` method like the following: + +```go +router := routing.New() +router.To("GET", "/users", m1, m2, h1) +router.To("POST", "/users", m1, m2, h2) +``` + +You can also use shortcut methods, such as `Get`, `Post`, `Put`, etc., which are named after the HTTP method names: + +```go +router.Get("/users", m1, m2, h1) +router.Post("/users", m1, m2, h2) +``` + +If you have multiple routes with the same URL path but different HTTP methods, like the above example, you can +chain them together as follows, + +```go +router.Get("/users", m1, m2, h1).Post(m1, m2, h2) +``` + +If you want to use the same set of handlers to handle the same URL path but different HTTP methods, you can take +the following shortcut: + +```go +router.To("GET,POST", "/users", m1, m2, h) +``` + +A route may contain parameter tokens which are in the format of ``, where `name` stands for the parameter +name, and `pattern` is a regular expression which the parameter value should match. A token `` is equivalent +to ``, i.e., it matches any number of non-slash characters. At the end of a route, an asterisk character +can be used to match any number of arbitrary characters. Below are some examples: + +* `/users/`: matches `/users/admin` +* `/users/accnt-`: matches `/users/accnt-123`, but not `/users/accnt-admin` +* `/users//*`: matches `/users/admin/profile/address` + +When a URL path matches a route, the matching parameters on the URL path can be accessed via `Context.Param()`: + +```go +router := routing.New() + +router.Get("/users/", func (c *routing.Context) error { + fmt.Fprintf(c, "Name: %v", c.Param("username")) + return nil +}) +``` + + +### Route Groups + +Route group is a way of grouping together the routes which have the same route prefix. The routes in a group also +share the same handlers that are registered with the group via its `Use` method. For example, + +```go +router := routing.New() +api := router.Group("/api") +api.Use(m1, m2) +api.Get("/users", h1).Post(h2) +api.Put("/users/", h3).Delete(h4) +``` + +The above `/api` route group establishes the following routing table: + + +Routes | Handlers +------------------------|------------- +`GET /api/users` | m1, m2, h1, ... +`POST /api/users` | m1, m2, h2, ... +`PUT /api/users/` | m1, m2, h3, ... +`DELETE /api/users/`| m1, m2, h4, ... + + +As you can see, all these routes have the same route prefix `/api` and the handlers `m1` and `m2`. In other similar +routing frameworks, the handlers registered with a route group are also called *middlewares*. + +Route groups can be nested. That is, a route group can create a child group by calling the `Group()` method. The router +serves as the top level route group. A child group inherits the handlers registered with its parent group. For example, + +```go +router := routing.New() +router.Use(m1) + +api := router.Group("/api") +api.Use(m2) + +users := group.Group("/users") +users.Use(m3) +users.Put("/", h1) +``` + +Because the router serves as the parent of the `api` group which is the parent of the `users` group, +the `PUT /api/users/` route is associated with the handlers `m1`, `m2`, `m3`, and `h1`. + + +### Router + +Router manages the routing table and dispatches incoming requests to appropriate handlers. A router instance is created +by calling the `routing.New()` method. + +To hook up router with fasthttp, use the following code: + +```go +router := routing.New() +fasthttp.ListenAndServe(":8080", router.HandleRequest) +``` + + +### Handlers + +A handler is a function with the signature `func(*routing.Context) error`. A handler is executed by the router if +the incoming request URL path matches the route that the handler is associated with. Through the `routing.Context` +parameter, you can access the request information in handlers. + +A route may be associated with multiple handlers. These handlers will be executed in the order that they are registered +to the route. The execution sequence can be terminated in the middle using one of the following two methods: + +* A handler returns an error: the router will skip the rest of the handlers and handle the returned error. +* A handler calls `Context.Abort()`: the router will simply skip the rest of the handlers. There is no error to be handled. + +A handler can call `Context.Next()` to explicitly execute the rest of the unexecuted handlers and take actions after +they finish execution. For example, a response compression handler may start the output buffer, call `Context.Next()`, +and then compress and send the output to response. + + +### Context + +For each incoming request, a `routing.Context` object is passed through the relevant handlers. Because `routing.Context` +embeds `fasthttp.RequestCtx`, you can access all properties and methods provided by the latter. + +Additionally, the `Context.Param()` method allows handlers to access the URL path parameters that match the current route. +Using `Context.Get()` and `Context.Set()`, handlers can share data between each other. For example, an authentication +handler can store the authenticated user identity by calling `Context.Set()`, and other handlers can retrieve back +the identity information by calling `Context.Get()`. + +Context also provides a handy `WriteData()` method that can be used to write data of arbitrary type to the response. +The `WriteData()` method can also be overridden (by replacement) to achieve more versatile response data writing. + + +### Error Handling + +A handler may return an error indicating some erroneous condition. Sometimes, a handler or the code it calls may cause +a panic. Both should be handled properly to ensure best user experience. It is recommended that you use +the `fault.Recover` handler or a similar error handler to handle these errors. + +If an error is not handled by any handler, the router will handle it by calling its `handleError()` method which +simply sets an appropriate HTTP status code and writes the error message to the response. + +When an incoming request has no matching route, the router will call the handlers registered via the `Router.NotFound()` +method. All the handlers registered via `Router.Use()` will also be called in advance. By default, the following two +handlers are registered with `Router.NotFound()`: + +* `routing.MethodNotAllowedHandler`: a handler that sends an `Allow` HTTP header indicating the allowed HTTP methods for a requested URL +* `routing.NotFoundHandler`: a handler triggering 404 HTTP error diff --git a/vendor/github.com/qiangxue/fasthttp-routing/context.go b/vendor/github.com/qiangxue/fasthttp-routing/context.go new file mode 100644 index 0000000..552d001 --- /dev/null +++ b/vendor/github.com/qiangxue/fasthttp-routing/context.go @@ -0,0 +1,126 @@ +// Copyright 2016 Qiang Xue. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package routing + +import ( + "fmt" + + "github.com/valyala/fasthttp" +) + +// SerializeFunc serializes the given data of arbitrary type into a byte array. +type SerializeFunc func(data interface{}) ([]byte, error) + +// Context represents the contextual data and environment while processing an incoming HTTP request. +type Context struct { + *fasthttp.RequestCtx + + Serialize SerializeFunc // the function serializing the given data of arbitrary type into a byte array. + + router *Router + pnames []string // list of route parameter names + pvalues []string // list of parameter values corresponding to pnames + data map[string]interface{} // data items managed by Get and Set + index int // the index of the currently executing handler in handlers + handlers []Handler // the handlers associated with the current route +} + +// Router returns the Router that is handling the incoming HTTP request. +func (c *Context) Router() *Router { + return c.router +} + +// Param returns the named parameter value that is found in the URL path matching the current route. +// If the named parameter cannot be found, an empty string will be returned. +func (c *Context) Param(name string) string { + for i, n := range c.pnames { + if n == name { + return c.pvalues[i] + } + } + return "" +} + +// Get returns the named data item previously registered with the context by calling Set. +// If the named data item cannot be found, nil will be returned. +func (c *Context) Get(name string) interface{} { + return c.data[name] +} + +// Set stores the named data item in the context so that it can be retrieved later. +func (c *Context) Set(name string, value interface{}) { + if c.data == nil { + c.data = make(map[string]interface{}) + } + c.data[name] = value +} + +// Next calls the rest of the handlers associated with the current route. +// If any of these handlers returns an error, Next will return the error and skip the following handlers. +// Next is normally used when a handler needs to do some postprocessing after the rest of the handlers +// are executed. +func (c *Context) Next() error { + c.index++ + for n := len(c.handlers); c.index < n; c.index++ { + if err := c.handlers[c.index](c); err != nil { + return err + } + } + return nil +} + +// Abort skips the rest of the handlers associated with the current route. +// Abort is normally used when a handler handles the request normally and wants to skip the rest of the handlers. +// If a handler wants to indicate an error condition, it should simply return the error without calling Abort. +func (c *Context) Abort() { + c.index = len(c.handlers) +} + +// URL creates a URL using the named route and the parameter values. +// The parameters should be given in the sequence of name1, value1, name2, value2, and so on. +// If a parameter in the route is not provided a value, the parameter token will remain in the resulting URL. +// Parameter values will be properly URL encoded. +// The method returns an empty string if the URL creation fails. +func (c *Context) URL(route string, pairs ...interface{}) string { + if r := c.router.routes[route]; r != nil { + return r.URL(pairs...) + } + return "" +} + +// WriteData writes the given data of arbitrary type to the response. +// The method calls the Serialize() method to convert the data into a byte array and then writes +// the byte array to the response. +func (c *Context) WriteData(data interface{}) (err error) { + var bytes []byte + if bytes, err = c.Serialize(data); err == nil { + _, err = c.Write(bytes) + } + return +} + +// init sets the request and response of the context and resets all other properties. +func (c *Context) init(ctx *fasthttp.RequestCtx) { + c.RequestCtx = ctx + c.data = nil + c.index = -1 + c.Serialize = Serialize +} + +// Serialize converts the given data into a byte array. +// If the data is neither a byte array nor a string, it will call fmt.Sprint to convert it into a string. +func Serialize(data interface{}) (bytes []byte, err error) { + switch data.(type) { + case []byte: + return data.([]byte), nil + case string: + return []byte(data.(string)), nil + default: + if data != nil { + return []byte(fmt.Sprint(data)), nil + } + } + return nil, nil +} diff --git a/vendor/github.com/qiangxue/fasthttp-routing/error.go b/vendor/github.com/qiangxue/fasthttp-routing/error.go new file mode 100644 index 0000000..8a57b99 --- /dev/null +++ b/vendor/github.com/qiangxue/fasthttp-routing/error.go @@ -0,0 +1,40 @@ +// Copyright 2016 Qiang Xue. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package routing + +import "net/http" + +// HTTPError represents an HTTP error with HTTP status code and error message +type HTTPError interface { + error + // StatusCode returns the HTTP status code of the error + StatusCode() int +} + +// Error contains the error information reported by calling Context.Error(). +type httpError struct { + Status int `json:"status" xml:"status"` + Message string `json:"message" xml:"message"` +} + +// NewHTTPError creates a new HttpError instance. +// If the error message is not given, http.StatusText() will be called +// to generate the message based on the status code. +func NewHTTPError(status int, message ...string) HTTPError { + if len(message) > 0 { + return &httpError{status, message[0]} + } + return &httpError{status, http.StatusText(status)} +} + +// Error returns the error message. +func (e *httpError) Error() string { + return e.Message +} + +// StatusCode returns the HTTP status code. +func (e *httpError) StatusCode() int { + return e.Status +} diff --git a/vendor/github.com/qiangxue/fasthttp-routing/group.go b/vendor/github.com/qiangxue/fasthttp-routing/group.go new file mode 100644 index 0000000..6125d74 --- /dev/null +++ b/vendor/github.com/qiangxue/fasthttp-routing/group.go @@ -0,0 +1,107 @@ +// Copyright 2016 Qiang Xue. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package routing + +import ( + "strings" +) + +// RouteGroup represents a group of routes that share the same path prefix. +type RouteGroup struct { + prefix string + router *Router + handlers []Handler +} + +// newRouteGroup creates a new RouteGroup with the given path prefix, router, and handlers. +func newRouteGroup(prefix string, router *Router, handlers []Handler) *RouteGroup { + return &RouteGroup{ + prefix: prefix, + router: router, + handlers: handlers, + } +} + +// Get adds a GET route to the router with the given route path and handlers. +func (r *RouteGroup) Get(path string, handlers ...Handler) *Route { + return newRoute(path, r).Get(handlers...) +} + +// Post adds a POST route to the router with the given route path and handlers. +func (r *RouteGroup) Post(path string, handlers ...Handler) *Route { + return newRoute(path, r).Post(handlers...) +} + +// Put adds a PUT route to the router with the given route path and handlers. +func (r *RouteGroup) Put(path string, handlers ...Handler) *Route { + return newRoute(path, r).Put(handlers...) +} + +// Patch adds a PATCH route to the router with the given route path and handlers. +func (r *RouteGroup) Patch(path string, handlers ...Handler) *Route { + return newRoute(path, r).Patch(handlers...) +} + +// Delete adds a DELETE route to the router with the given route path and handlers. +func (r *RouteGroup) Delete(path string, handlers ...Handler) *Route { + return newRoute(path, r).Delete(handlers...) +} + +// Connect adds a CONNECT route to the router with the given route path and handlers. +func (r *RouteGroup) Connect(path string, handlers ...Handler) *Route { + return newRoute(path, r).Connect(handlers...) +} + +// Head adds a HEAD route to the router with the given route path and handlers. +func (r *RouteGroup) Head(path string, handlers ...Handler) *Route { + return newRoute(path, r).Head(handlers...) +} + +// Options adds an OPTIONS route to the router with the given route path and handlers. +func (r *RouteGroup) Options(path string, handlers ...Handler) *Route { + return newRoute(path, r).Options(handlers...) +} + +// Trace adds a TRACE route to the router with the given route path and handlers. +func (r *RouteGroup) Trace(path string, handlers ...Handler) *Route { + return newRoute(path, r).Trace(handlers...) +} + +// Any adds a route with the given route, handlers, and the HTTP methods as listed in routing.Methods. +func (r *RouteGroup) Any(path string, handlers ...Handler) *Route { + route := newRoute(path, r) + for _, method := range Methods { + route.add(method, handlers) + } + return route +} + +// To adds a route to the router with the given HTTP methods, route path, and handlers. +// Multiple HTTP methods should be separated by commas (without any surrounding spaces). +func (r *RouteGroup) To(methods, path string, handlers ...Handler) *Route { + route := newRoute(path, r) + for _, method := range strings.Split(methods, ",") { + route.add(method, handlers) + } + return route +} + +// Group creates a RouteGroup with the given route path prefix and handlers. +// The new group will combine the existing path prefix with the new one. +// If no handler is provided, the new group will inherit the handlers registered +// with the current group. +func (r *RouteGroup) Group(prefix string, handlers ...Handler) *RouteGroup { + if len(handlers) == 0 { + handlers = make([]Handler, len(r.handlers)) + copy(handlers, r.handlers) + } + return newRouteGroup(r.prefix+prefix, r.router, handlers) +} + +// Use registers one or multiple handlers to the current route group. +// These handlers will be shared by all routes belong to this group and its subgroups. +func (r *RouteGroup) Use(handlers ...Handler) { + r.handlers = append(r.handlers, handlers...) +} diff --git a/vendor/github.com/qiangxue/fasthttp-routing/route.go b/vendor/github.com/qiangxue/fasthttp-routing/route.go new file mode 100644 index 0000000..5ca3738 --- /dev/null +++ b/vendor/github.com/qiangxue/fasthttp-routing/route.go @@ -0,0 +1,161 @@ +// Copyright 2016 Qiang Xue. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package routing + +import ( + "fmt" + "net/url" + "strings" +) + +// Route represents a URL path pattern that can be used to match requested URLs. +type Route struct { + group *RouteGroup + name, path string + template string +} + +// newRoute creates a new Route with the given route path and route group. +func newRoute(path string, group *RouteGroup) *Route { + path = group.prefix + path + name := path + + // an asterisk at the end matches any number of characters + if strings.HasSuffix(path, "*") { + path = path[:len(path)-1] + "<:.*>" + } + + route := &Route{ + group: group, + name: name, + path: path, + template: buildURLTemplate(path), + } + group.router.routes[name] = route + + return route +} + +// Name sets the name of the route. +// This method will update the registration of the route in the router as well. +func (r *Route) Name(name string) *Route { + r.name = name + r.group.router.routes[name] = r + return r +} + +// Get adds the route to the router using the GET HTTP method. +func (r *Route) Get(handlers ...Handler) *Route { + return r.add("GET", handlers) +} + +// Post adds the route to the router using the POST HTTP method. +func (r *Route) Post(handlers ...Handler) *Route { + return r.add("POST", handlers) +} + +// Put adds the route to the router using the PUT HTTP method. +func (r *Route) Put(handlers ...Handler) *Route { + return r.add("PUT", handlers) +} + +// Patch adds the route to the router using the PATCH HTTP method. +func (r *Route) Patch(handlers ...Handler) *Route { + return r.add("PATCH", handlers) +} + +// Delete adds the route to the router using the DELETE HTTP method. +func (r *Route) Delete(handlers ...Handler) *Route { + return r.add("DELETE", handlers) +} + +// Connect adds the route to the router using the CONNECT HTTP method. +func (r *Route) Connect(handlers ...Handler) *Route { + return r.add("CONNECT", handlers) +} + +// Head adds the route to the router using the HEAD HTTP method. +func (r *Route) Head(handlers ...Handler) *Route { + return r.add("HEAD", handlers) +} + +// Options adds the route to the router using the OPTIONS HTTP method. +func (r *Route) Options(handlers ...Handler) *Route { + return r.add("OPTIONS", handlers) +} + +// Trace adds the route to the router using the TRACE HTTP method. +func (r *Route) Trace(handlers ...Handler) *Route { + return r.add("TRACE", handlers) +} + +// To adds the route to the router with the given HTTP methods and handlers. +// Multiple HTTP methods should be separated by commas (without any surrounding spaces). +func (r *Route) To(methods string, handlers ...Handler) *Route { + for _, method := range strings.Split(methods, ",") { + r.add(method, handlers) + } + return r +} + +// URL creates a URL using the current route and the given parameters. +// The parameters should be given in the sequence of name1, value1, name2, value2, and so on. +// If a parameter in the route is not provided a value, the parameter token will remain in the resulting URL. +// The method will perform URL encoding for all given parameter values. +func (r *Route) URL(pairs ...interface{}) (s string) { + s = r.template + for i := 0; i < len(pairs); i++ { + name := fmt.Sprintf("<%v>", pairs[i]) + value := "" + if i < len(pairs)-1 { + value = url.QueryEscape(fmt.Sprint(pairs[i+1])) + } + s = strings.Replace(s, name, value, -1) + } + return +} + +// add registers the route, the specified HTTP method and the handlers to the router. +// The handlers will be combined with the handlers of the route group. +func (r *Route) add(method string, handlers []Handler) *Route { + hh := combineHandlers(r.group.handlers, handlers) + r.group.router.add(method, r.path, hh) + return r +} + +// buildURLTemplate converts a route pattern into a URL template by removing regular expressions in parameter tokens. +func buildURLTemplate(path string) string { + template, start, end := "", -1, -1 + for i := 0; i < len(path); i++ { + if path[i] == '<' && start < 0 { + start = i + } else if path[i] == '>' && start >= 0 { + name := path[start+1 : i] + for j := start + 1; j < i; j++ { + if path[j] == ':' { + name = path[start+1 : j] + break + } + } + template += path[end+1:start] + "<" + name + ">" + end = i + start = -1 + } + } + if end < 0 { + template = path + } else if end < len(path)-1 { + template += path[end+1:] + } + return template +} + +// combineHandlers merges two lists of handlers into a new list. +func combineHandlers(h1 []Handler, h2 []Handler) []Handler { + hh := make([]Handler, len(h1)+len(h2)) + copy(hh, h1) + copy(hh[len(h1):], h2) + return hh +} diff --git a/vendor/github.com/qiangxue/fasthttp-routing/router.go b/vendor/github.com/qiangxue/fasthttp-routing/router.go new file mode 100644 index 0000000..f6bccbd --- /dev/null +++ b/vendor/github.com/qiangxue/fasthttp-routing/router.go @@ -0,0 +1,169 @@ +// Copyright 2016 Qiang Xue. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package routing provides high performance and powerful HTTP routing capabilities. +package routing + +import ( + "net/http" + "sort" + "strings" + "sync" + + "github.com/valyala/fasthttp" +) + +type ( + // Handler is the function for handling HTTP requests. + Handler func(*Context) error + + // Router manages routes and dispatches HTTP requests to the handlers of the matching routes. + Router struct { + RouteGroup + pool sync.Pool + routes map[string]*Route + stores map[string]routeStore + maxParams int + notFound []Handler + notFoundHandlers []Handler + } + + // routeStore stores route paths and the corresponding handlers. + routeStore interface { + Add(key string, data interface{}) int + Get(key string, pvalues []string) (data interface{}, pnames []string) + String() string + } +) + +// Methods lists all supported HTTP methods by Router. +var Methods = []string{ + "CONNECT", + "DELETE", + "GET", + "HEAD", + "OPTIONS", + "PATCH", + "POST", + "PUT", + "TRACE", +} + +// New creates a new Router object. +func New() *Router { + r := &Router{ + routes: make(map[string]*Route), + stores: make(map[string]routeStore), + } + r.RouteGroup = *newRouteGroup("", r, make([]Handler, 0)) + r.NotFound(MethodNotAllowedHandler, NotFoundHandler) + r.pool.New = func() interface{} { + return &Context{ + pvalues: make([]string, r.maxParams), + router: r, + } + } + return r +} + +// HandleRequest handles the HTTP request. +func (r *Router) HandleRequest(ctx *fasthttp.RequestCtx) { + c := r.pool.Get().(*Context) + c.init(ctx) + c.handlers, c.pnames = r.find(string(ctx.Method()), string(ctx.Path()), c.pvalues) + if err := c.Next(); err != nil { + r.handleError(c, err) + } + r.pool.Put(c) +} + +// Route returns the named route. +// Nil is returned if the named route cannot be found. +func (r *Router) Route(name string) *Route { + return r.routes[name] +} + +// Use appends the specified handlers to the router and shares them with all routes. +func (r *Router) Use(handlers ...Handler) { + r.RouteGroup.Use(handlers...) + r.notFoundHandlers = combineHandlers(r.handlers, r.notFound) +} + +// NotFound specifies the handlers that should be invoked when the router cannot find any route matching a request. +// Note that the handlers registered via Use will be invoked first in this case. +func (r *Router) NotFound(handlers ...Handler) { + r.notFound = handlers + r.notFoundHandlers = combineHandlers(r.handlers, r.notFound) +} + +// handleError is the error handler for handling any unhandled errors. +func (r *Router) handleError(c *Context, err error) { + if httpError, ok := err.(HTTPError); ok { + c.Error(httpError.Error(), httpError.StatusCode()) + } else { + c.Error(err.Error(), http.StatusInternalServerError) + } +} + +func (r *Router) add(method, path string, handlers []Handler) { + store := r.stores[method] + if store == nil { + store = newStore() + r.stores[method] = store + } + if n := store.Add(path, handlers); n > r.maxParams { + r.maxParams = n + } +} + +func (r *Router) find(method, path string, pvalues []string) (handlers []Handler, pnames []string) { + var hh interface{} + if store := r.stores[method]; store != nil { + hh, pnames = store.Get(path, pvalues) + } + if hh != nil { + return hh.([]Handler), pnames + } + return r.notFoundHandlers, pnames +} + +func (r *Router) findAllowedMethods(path string) map[string]bool { + methods := make(map[string]bool) + pvalues := make([]string, r.maxParams) + for m, store := range r.stores { + if handlers, _ := store.Get(path, pvalues); handlers != nil { + methods[m] = true + } + } + return methods +} + +// NotFoundHandler returns a 404 HTTP error indicating a request has no matching route. +func NotFoundHandler(*Context) error { + return NewHTTPError(http.StatusNotFound) +} + +// MethodNotAllowedHandler handles the situation when a request has matching route without matching HTTP method. +// In this case, the handler will respond with an Allow HTTP header listing the allowed HTTP methods. +// Otherwise, the handler will do nothing and let the next handler (usually a NotFoundHandler) to handle the problem. +func MethodNotAllowedHandler(c *Context) error { + methods := c.Router().findAllowedMethods(string(c.Path())) + if len(methods) == 0 { + return nil + } + methods["OPTIONS"] = true + ms := make([]string, len(methods)) + i := 0 + for method := range methods { + ms[i] = method + i++ + } + sort.Strings(ms) + c.Response.Header.Set("Allow", strings.Join(ms, ", ")) + if string(c.Method()) != "OPTIONS" { + c.Response.SetStatusCode(http.StatusMethodNotAllowed) + } + c.Abort() + return nil +} diff --git a/vendor/github.com/qiangxue/fasthttp-routing/store.go b/vendor/github.com/qiangxue/fasthttp-routing/store.go new file mode 100644 index 0000000..119810c --- /dev/null +++ b/vendor/github.com/qiangxue/fasthttp-routing/store.go @@ -0,0 +1,317 @@ +// Copyright 2016 Qiang Xue. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package routing + +import ( + "fmt" + "math" + "regexp" + "strings" +) + +// store is a radix tree that supports storing data with parametric keys and retrieving them back with concrete keys. +// When retrieving a data item with a concrete key, the matching parameter names and values will be returned as well. +// A parametric key is a string containing tokens in the format of "", "", or "<:pattern>". +// Each token represents a single parameter. +type store struct { + root *node // the root node of the radix tree + count int // the number of data nodes in the tree +} + +// newStore creates a new store. +func newStore() *store { + return &store{ + root: &node{ + static: true, + children: make([]*node, 256), + pchildren: make([]*node, 0), + pindex: -1, + pnames: []string{}, + }, + } +} + +// Add adds a new data item with the given parametric key. +// The number of parameters in the key is returned. +func (s *store) Add(key string, data interface{}) int { + s.count++ + return s.root.add(key, data, s.count) +} + +// Get returns the data item matching the given concrete key. +// If the data item was added to the store with a parametric key before, the matching +// parameter names and values will be returned as well. +func (s *store) Get(path string, pvalues []string) (data interface{}, pnames []string) { + data, pnames, _ = s.root.get(path, pvalues) + return +} + +// String dumps the radix tree kept in the store as a string. +func (s *store) String() string { + return s.root.print(0) +} + +// node represents a radix trie node +type node struct { + static bool // whether the node is a static node or param node + + key string // the key identifying this node + data interface{} // the data associated with this node. nil if not a data node. + + order int // the order at which the data was added. used to be pick the first one when matching multiple + minOrder int // minimum order among all the child nodes and this node + + children []*node // child static nodes, indexed by the first byte of each child key + pchildren []*node // child param nodes + + regex *regexp.Regexp // regular expression for a param node containing regular expression key + pindex int // the parameter index, meaningful only for param node + pnames []string // the parameter names collected from the root till this node +} + +// add adds a new data item to the tree rooted at the current node. +// The number of parameters in the key is returned. +func (n *node) add(key string, data interface{}, order int) int { + matched := 0 + + // find the common prefix + for ; matched < len(key) && matched < len(n.key); matched++ { + if key[matched] != n.key[matched] { + break + } + } + + if matched == len(n.key) { + if matched == len(key) { + // the node key is the same as the key: make the current node as data node + // if the node is already a data node, ignore the new data since we only care the first matched node + if n.data == nil { + n.data = data + n.order = order + } + return n.pindex + 1 + } + + // the node key is a prefix of the key: create a child node + newKey := key[matched:] + + // try adding to a static child + if child := n.children[newKey[0]]; child != nil { + if pn := child.add(newKey, data, order); pn >= 0 { + return pn + } + } + // try adding to a param child + for _, child := range n.pchildren { + if pn := child.add(newKey, data, order); pn >= 0 { + return pn + } + } + + return n.addChild(newKey, data, order) + } + + if matched == 0 || !n.static { + // no common prefix, or partial common prefix with a non-static node: should skip this node + return -1 + } + + // the node key shares a partial prefix with the key: split the node key + n1 := &node{ + static: true, + key: n.key[matched:], + data: n.data, + order: n.order, + minOrder: n.minOrder, + pchildren: n.pchildren, + children: n.children, + pindex: n.pindex, + pnames: n.pnames, + } + + n.key = key[0:matched] + n.data = nil + n.pchildren = make([]*node, 0) + n.children = make([]*node, 256) + n.children[n1.key[0]] = n1 + + return n.add(key, data, order) +} + +// addChild creates static and param nodes to store the given data +func (n *node) addChild(key string, data interface{}, order int) int { + // find the first occurrence of a param token + p0, p1 := -1, -1 + for i := 0; i < len(key); i++ { + if p0 < 0 && key[i] == '<' { + p0 = i + } + if p0 >= 0 && key[i] == '>' { + p1 = i + break + } + } + + if p0 > 0 && p1 > 0 || p1 < 0 { + // param token occurs after a static string, or no param token: create a static node + child := &node{ + static: true, + key: key, + minOrder: order, + children: make([]*node, 256), + pchildren: make([]*node, 0), + pindex: n.pindex, + pnames: n.pnames, + } + n.children[key[0]] = child + if p1 > 0 { + // param token occurs after a static string + child.key = key[:p0] + n = child + } else { + // no param token: done adding the child + child.data = data + child.order = order + return child.pindex + 1 + } + } + + // add param node + child := &node{ + static: false, + key: key[p0 : p1+1], + minOrder: order, + children: make([]*node, 256), + pchildren: make([]*node, 0), + pindex: n.pindex, + pnames: n.pnames, + } + pattern := "" + pname := key[p0+1 : p1] + for i := p0 + 1; i < p1; i++ { + if key[i] == ':' { + pname = key[p0+1 : i] + pattern = key[i+1 : p1] + break + } + } + if pattern != "" { + // the param token contains a regular expression + child.regex = regexp.MustCompile("^" + pattern) + } + pnames := make([]string, len(n.pnames)+1) + copy(pnames, n.pnames) + pnames[len(n.pnames)] = pname + child.pnames = pnames + child.pindex = len(pnames) - 1 + n.pchildren = append(n.pchildren, child) + + if p1 == len(key)-1 { + // the param token is at the end of the key + child.data = data + child.order = order + return child.pindex + 1 + } + + // process the rest of the key + return child.addChild(key[p1+1:], data, order) +} + +// get returns the data item with the key matching the tree rooted at the current node +func (n *node) get(key string, pvalues []string) (data interface{}, pnames []string, order int) { + order = math.MaxInt32 + +repeat: + if n.static { + // check if the node key is a prefix of the given key + // a slightly optimized version of strings.HasPrefix + nkl := len(n.key) + if nkl > len(key) { + return + } + for i := nkl - 1; i >= 0; i-- { + if n.key[i] != key[i] { + return + } + } + key = key[nkl:] + } else if n.regex != nil { + // param node with regular expression + if n.regex.String() == "^.*" { + pvalues[n.pindex] = key + key = "" + } else if match := n.regex.FindStringIndex(key); match != nil { + pvalues[n.pindex] = key[0:match[1]] + key = key[match[1]:] + } else { + return + } + } else { + // param node matching non-"/" characters + i, kl := 0, len(key) + for ; i < kl; i++ { + if key[i] == '/' { + pvalues[n.pindex] = key[0:i] + key = key[i:] + break + } + } + if i == kl { + pvalues[n.pindex] = key + key = "" + } + } + + if len(key) > 0 { + // find a static child that can match the rest of the key + if child := n.children[key[0]]; child != nil { + if len(n.pchildren) == 0 { + // use goto to avoid recursion when no param children + n = child + goto repeat + } + data, pnames, order = child.get(key, pvalues) + } + } else if n.data != nil { + // do not return yet: a param node may match an empty string with smaller order + data, pnames, order = n.data, n.pnames, n.order + } + + // try matching param children + tvalues := pvalues + allocated := false + for _, child := range n.pchildren { + if child.minOrder >= order { + continue + } + if data != nil && !allocated { + tvalues = make([]string, len(pvalues)) + allocated = true + } + if d, p, s := child.get(key, tvalues); d != nil && s < order { + if allocated { + for i := child.pindex; i < len(p); i++ { + pvalues[i] = tvalues[i] + } + } + data, pnames, order = d, p, s + } + } + + return +} + +func (n *node) print(level int) string { + r := fmt.Sprintf("%v{key: %v, regex: %v, data: %v, order: %v, minOrder: %v, pindex: %v, pnames: %v}\n", strings.Repeat(" ", level<<2), n.key, n.regex, n.data, n.order, n.minOrder, n.pindex, n.pnames) + for _, child := range n.children { + if child != nil { + r += child.print(level + 1) + } + } + for _, child := range n.pchildren { + r += child.print(level + 1) + } + return r +} diff --git a/vendor/github.com/valyala/bytebufferpool/LICENSE b/vendor/github.com/valyala/bytebufferpool/LICENSE new file mode 100644 index 0000000..f7c935c --- /dev/null +++ b/vendor/github.com/valyala/bytebufferpool/LICENSE @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2016 Aliaksandr Valialkin, VertaMedia + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + diff --git a/vendor/github.com/valyala/bytebufferpool/README.md b/vendor/github.com/valyala/bytebufferpool/README.md new file mode 100644 index 0000000..061357e --- /dev/null +++ b/vendor/github.com/valyala/bytebufferpool/README.md @@ -0,0 +1,21 @@ +[![Build Status](https://travis-ci.org/valyala/bytebufferpool.svg)](https://travis-ci.org/valyala/bytebufferpool) +[![GoDoc](https://godoc.org/github.com/valyala/bytebufferpool?status.svg)](http://godoc.org/github.com/valyala/bytebufferpool) +[![Go Report](http://goreportcard.com/badge/valyala/bytebufferpool)](http://goreportcard.com/report/valyala/bytebufferpool) + +# bytebufferpool + +An implementation of a pool of byte buffers with anti-memory-waste protection. + +The pool may waste limited amount of memory due to fragmentation. +This amount equals to the maximum total size of the byte buffers +in concurrent use. + +# Benchmark results +Currently bytebufferpool is fastest and most effective buffer pool written in Go. + +You can find results [here](https://omgnull.github.io/go-benchmark/buffer/). + +# bytebufferpool users + +* [fasthttp](https://github.com/valyala/fasthttp) +* [quicktemplate](https://github.com/valyala/quicktemplate) diff --git a/vendor/github.com/valyala/bytebufferpool/bytebuffer.go b/vendor/github.com/valyala/bytebufferpool/bytebuffer.go new file mode 100644 index 0000000..07a055a --- /dev/null +++ b/vendor/github.com/valyala/bytebufferpool/bytebuffer.go @@ -0,0 +1,111 @@ +package bytebufferpool + +import "io" + +// ByteBuffer provides byte buffer, which can be used for minimizing +// memory allocations. +// +// ByteBuffer may be used with functions appending data to the given []byte +// slice. See example code for details. +// +// Use Get for obtaining an empty byte buffer. +type ByteBuffer struct { + + // B is a byte buffer to use in append-like workloads. + // See example code for details. + B []byte +} + +// Len returns the size of the byte buffer. +func (b *ByteBuffer) Len() int { + return len(b.B) +} + +// ReadFrom implements io.ReaderFrom. +// +// The function appends all the data read from r to b. +func (b *ByteBuffer) ReadFrom(r io.Reader) (int64, error) { + p := b.B + nStart := int64(len(p)) + nMax := int64(cap(p)) + n := nStart + if nMax == 0 { + nMax = 64 + p = make([]byte, nMax) + } else { + p = p[:nMax] + } + for { + if n == nMax { + nMax *= 2 + bNew := make([]byte, nMax) + copy(bNew, p) + p = bNew + } + nn, err := r.Read(p[n:]) + n += int64(nn) + if err != nil { + b.B = p[:n] + n -= nStart + if err == io.EOF { + return n, nil + } + return n, err + } + } +} + +// WriteTo implements io.WriterTo. +func (b *ByteBuffer) WriteTo(w io.Writer) (int64, error) { + n, err := w.Write(b.B) + return int64(n), err +} + +// Bytes returns b.B, i.e. all the bytes accumulated in the buffer. +// +// The purpose of this function is bytes.Buffer compatibility. +func (b *ByteBuffer) Bytes() []byte { + return b.B +} + +// Write implements io.Writer - it appends p to ByteBuffer.B +func (b *ByteBuffer) Write(p []byte) (int, error) { + b.B = append(b.B, p...) + return len(p), nil +} + +// WriteByte appends the byte c to the buffer. +// +// The purpose of this function is bytes.Buffer compatibility. +// +// The function always returns nil. +func (b *ByteBuffer) WriteByte(c byte) error { + b.B = append(b.B, c) + return nil +} + +// WriteString appends s to ByteBuffer.B. +func (b *ByteBuffer) WriteString(s string) (int, error) { + b.B = append(b.B, s...) + return len(s), nil +} + +// Set sets ByteBuffer.B to p. +func (b *ByteBuffer) Set(p []byte) { + b.B = append(b.B[:0], p...) +} + +// SetString sets ByteBuffer.B to s. +func (b *ByteBuffer) SetString(s string) { + b.B = append(b.B[:0], s...) +} + +// String returns string representation of ByteBuffer.B. +func (b *ByteBuffer) String() string { + return string(b.B) +} + +// Reset makes ByteBuffer.B empty. +func (b *ByteBuffer) Reset() { + b.B = b.B[:0] +} diff --git a/vendor/github.com/valyala/bytebufferpool/doc.go b/vendor/github.com/valyala/bytebufferpool/doc.go new file mode 100644 index 0000000..e511b7c --- /dev/null +++ b/vendor/github.com/valyala/bytebufferpool/doc.go @@ -0,0 +1,7 @@ +// Package bytebufferpool implements a pool of byte buffers +// with anti-fragmentation protection. +// +// The pool may waste limited amount of memory due to fragmentation. +// This amount equals to the maximum total size of the byte buffers +// in concurrent use. +package bytebufferpool diff --git a/vendor/github.com/valyala/bytebufferpool/pool.go b/vendor/github.com/valyala/bytebufferpool/pool.go new file mode 100644 index 0000000..8bb4134 --- /dev/null +++ b/vendor/github.com/valyala/bytebufferpool/pool.go @@ -0,0 +1,151 @@ +package bytebufferpool + +import ( + "sort" + "sync" + "sync/atomic" +) + +const ( + minBitSize = 6 // 2**6=64 is a CPU cache line size + steps = 20 + + minSize = 1 << minBitSize + maxSize = 1 << (minBitSize + steps - 1) + + calibrateCallsThreshold = 42000 + maxPercentile = 0.95 +) + +// Pool represents byte buffer pool. +// +// Distinct pools may be used for distinct types of byte buffers. +// Properly determined byte buffer types with their own pools may help reducing +// memory waste. +type Pool struct { + calls [steps]uint64 + calibrating uint64 + + defaultSize uint64 + maxSize uint64 + + pool sync.Pool +} + +var defaultPool Pool + +// Get returns an empty byte buffer from the pool. +// +// Got byte buffer may be returned to the pool via Put call. +// This reduces the number of memory allocations required for byte buffer +// management. +func Get() *ByteBuffer { return defaultPool.Get() } + +// Get returns new byte buffer with zero length. +// +// The byte buffer may be returned to the pool via Put after the use +// in order to minimize GC overhead. +func (p *Pool) Get() *ByteBuffer { + v := p.pool.Get() + if v != nil { + return v.(*ByteBuffer) + } + return &ByteBuffer{ + B: make([]byte, 0, atomic.LoadUint64(&p.defaultSize)), + } +} + +// Put returns byte buffer to the pool. +// +// ByteBuffer.B mustn't be touched after returning it to the pool. +// Otherwise data races will occur. +func Put(b *ByteBuffer) { defaultPool.Put(b) } + +// Put releases byte buffer obtained via Get to the pool. +// +// The buffer mustn't be accessed after returning to the pool. +func (p *Pool) Put(b *ByteBuffer) { + idx := index(len(b.B)) + + if atomic.AddUint64(&p.calls[idx], 1) > calibrateCallsThreshold { + p.calibrate() + } + + maxSize := int(atomic.LoadUint64(&p.maxSize)) + if maxSize == 0 || cap(b.B) <= maxSize { + b.Reset() + p.pool.Put(b) + } +} + +func (p *Pool) calibrate() { + if !atomic.CompareAndSwapUint64(&p.calibrating, 0, 1) { + return + } + + a := make(callSizes, 0, steps) + var callsSum uint64 + for i := uint64(0); i < steps; i++ { + calls := atomic.SwapUint64(&p.calls[i], 0) + callsSum += calls + a = append(a, callSize{ + calls: calls, + size: minSize << i, + }) + } + sort.Sort(a) + + defaultSize := a[0].size + maxSize := defaultSize + + maxSum := uint64(float64(callsSum) * maxPercentile) + callsSum = 0 + for i := 0; i < steps; i++ { + if callsSum > maxSum { + break + } + callsSum += a[i].calls + size := a[i].size + if size > maxSize { + maxSize = size + } + } + + atomic.StoreUint64(&p.defaultSize, defaultSize) + atomic.StoreUint64(&p.maxSize, maxSize) + + atomic.StoreUint64(&p.calibrating, 0) +} + +type callSize struct { + calls uint64 + size uint64 +} + +type callSizes []callSize + +func (ci callSizes) Len() int { + return len(ci) +} + +func (ci callSizes) Less(i, j int) bool { + return ci[i].calls > ci[j].calls +} + +func (ci callSizes) Swap(i, j int) { + ci[i], ci[j] = ci[j], ci[i] +} + +func index(n int) int { + n-- + n >>= minBitSize + idx := 0 + for n > 0 { + n >>= 1 + idx++ + } + if idx >= steps { + idx = steps - 1 + } + return idx +} diff --git a/vendor/github.com/valyala/fasthttp/LICENSE b/vendor/github.com/valyala/fasthttp/LICENSE new file mode 100644 index 0000000..22bf00c --- /dev/null +++ b/vendor/github.com/valyala/fasthttp/LICENSE @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2015-2016 Aliaksandr Valialkin, VertaMedia + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + diff --git a/vendor/github.com/valyala/fasthttp/README.md b/vendor/github.com/valyala/fasthttp/README.md new file mode 100644 index 0000000..7688005 --- /dev/null +++ b/vendor/github.com/valyala/fasthttp/README.md @@ -0,0 +1,579 @@ +[![Build Status](https://travis-ci.org/valyala/fasthttp.svg)](https://travis-ci.org/valyala/fasthttp) +[![GoDoc](https://godoc.org/github.com/valyala/fasthttp?status.svg)](http://godoc.org/github.com/valyala/fasthttp) +[![Go Report](https://goreportcard.com/badge/github.com/valyala/fasthttp)](https://goreportcard.com/report/github.com/valyala/fasthttp) + +# fasthttp +Fast HTTP implementation for Go. + +Currently fasthttp is successfully used by [VertaMedia](https://vertamedia.com/) +in a production serving up to 200K rps from more than 1.5M concurrent keep-alive +connections per physical server. + +[TechEmpower Benchmark round 12 results](https://www.techempower.com/benchmarks/#section=data-r12&hw=peak&test=plaintext) + +[Server Benchmarks](#http-server-performance-comparison-with-nethttp) + +[Client Benchmarks](#http-client-comparison-with-nethttp) + +[Install](#install) + +[Documentation](https://godoc.org/github.com/valyala/fasthttp) + +[Examples from docs](https://godoc.org/github.com/valyala/fasthttp#pkg-examples) + +[Code examples](examples) + +[Switching from net/http to fasthttp](#switching-from-nethttp-to-fasthttp) + +[Fasthttp best practices](#fasthttp-best-practices) + +[Tricks with byte buffers](#tricks-with-byte-buffers) + +[Related projects](#related-projects) + +[FAQ](#faq) + +# HTTP server performance comparison with [net/http](https://golang.org/pkg/net/http/) + +In short, fasthttp server is up to 10 times faster than net/http. +Below are benchmark results. + +*GOMAXPROCS=1* + +net/http server: +``` +$ GOMAXPROCS=1 go test -bench=NetHTTPServerGet -benchmem -benchtime=10s +BenchmarkNetHTTPServerGet1ReqPerConn 1000000 12052 ns/op 2297 B/op 29 allocs/op +BenchmarkNetHTTPServerGet2ReqPerConn 1000000 12278 ns/op 2327 B/op 24 allocs/op +BenchmarkNetHTTPServerGet10ReqPerConn 2000000 8903 ns/op 2112 B/op 19 allocs/op +BenchmarkNetHTTPServerGet10KReqPerConn 2000000 8451 ns/op 2058 B/op 18 allocs/op +BenchmarkNetHTTPServerGet1ReqPerConn10KClients 500000 26733 ns/op 3229 B/op 29 allocs/op +BenchmarkNetHTTPServerGet2ReqPerConn10KClients 1000000 23351 ns/op 3211 B/op 24 allocs/op +BenchmarkNetHTTPServerGet10ReqPerConn10KClients 1000000 13390 ns/op 2483 B/op 19 allocs/op +BenchmarkNetHTTPServerGet100ReqPerConn10KClients 1000000 13484 ns/op 2171 B/op 18 allocs/op +``` + +fasthttp server: +``` +$ GOMAXPROCS=1 go test -bench=kServerGet -benchmem -benchtime=10s +BenchmarkServerGet1ReqPerConn 10000000 1559 ns/op 0 B/op 0 allocs/op +BenchmarkServerGet2ReqPerConn 10000000 1248 ns/op 0 B/op 0 allocs/op +BenchmarkServerGet10ReqPerConn 20000000 797 ns/op 0 B/op 0 allocs/op +BenchmarkServerGet10KReqPerConn 20000000 716 ns/op 0 B/op 0 allocs/op +BenchmarkServerGet1ReqPerConn10KClients 10000000 1974 ns/op 0 B/op 0 allocs/op +BenchmarkServerGet2ReqPerConn10KClients 10000000 1352 ns/op 0 B/op 0 allocs/op +BenchmarkServerGet10ReqPerConn10KClients 20000000 789 ns/op 2 B/op 0 allocs/op +BenchmarkServerGet100ReqPerConn10KClients 20000000 604 ns/op 0 B/op 0 allocs/op +``` + +*GOMAXPROCS=4* + +net/http server: +``` +$ GOMAXPROCS=4 go test -bench=NetHTTPServerGet -benchmem -benchtime=10s +BenchmarkNetHTTPServerGet1ReqPerConn-4 3000000 4529 ns/op 2389 B/op 29 allocs/op +BenchmarkNetHTTPServerGet2ReqPerConn-4 5000000 3896 ns/op 2418 B/op 24 allocs/op +BenchmarkNetHTTPServerGet10ReqPerConn-4 5000000 3145 ns/op 2160 B/op 19 allocs/op +BenchmarkNetHTTPServerGet10KReqPerConn-4 5000000 3054 ns/op 2065 B/op 18 allocs/op +BenchmarkNetHTTPServerGet1ReqPerConn10KClients-4 1000000 10321 ns/op 3710 B/op 30 allocs/op +BenchmarkNetHTTPServerGet2ReqPerConn10KClients-4 2000000 7556 ns/op 3296 B/op 24 allocs/op +BenchmarkNetHTTPServerGet10ReqPerConn10KClients-4 5000000 3905 ns/op 2349 B/op 19 allocs/op +BenchmarkNetHTTPServerGet100ReqPerConn10KClients-4 5000000 3435 ns/op 2130 B/op 18 allocs/op +``` + +fasthttp server: +``` +$ GOMAXPROCS=4 go test -bench=kServerGet -benchmem -benchtime=10s +BenchmarkServerGet1ReqPerConn-4 10000000 1141 ns/op 0 B/op 0 allocs/op +BenchmarkServerGet2ReqPerConn-4 20000000 707 ns/op 0 B/op 0 allocs/op +BenchmarkServerGet10ReqPerConn-4 30000000 341 ns/op 0 B/op 0 allocs/op +BenchmarkServerGet10KReqPerConn-4 50000000 310 ns/op 0 B/op 0 allocs/op +BenchmarkServerGet1ReqPerConn10KClients-4 10000000 1119 ns/op 0 B/op 0 allocs/op +BenchmarkServerGet2ReqPerConn10KClients-4 20000000 644 ns/op 0 B/op 0 allocs/op +BenchmarkServerGet10ReqPerConn10KClients-4 30000000 346 ns/op 0 B/op 0 allocs/op +BenchmarkServerGet100ReqPerConn10KClients-4 50000000 282 ns/op 0 B/op 0 allocs/op +``` + +# HTTP client comparison with net/http + +In short, fasthttp client is up to 10 times faster than net/http. +Below are benchmark results. + +*GOMAXPROCS=1* + +net/http client: +``` +$ GOMAXPROCS=1 go test -bench='HTTPClient(Do|GetEndToEnd)' -benchmem -benchtime=10s +BenchmarkNetHTTPClientDoFastServer 1000000 12567 ns/op 2616 B/op 35 allocs/op +BenchmarkNetHTTPClientGetEndToEnd1TCP 200000 67030 ns/op 5028 B/op 56 allocs/op +BenchmarkNetHTTPClientGetEndToEnd10TCP 300000 51098 ns/op 5031 B/op 56 allocs/op +BenchmarkNetHTTPClientGetEndToEnd100TCP 300000 45096 ns/op 5026 B/op 55 allocs/op +BenchmarkNetHTTPClientGetEndToEnd1Inmemory 500000 24779 ns/op 5035 B/op 57 allocs/op +BenchmarkNetHTTPClientGetEndToEnd10Inmemory 1000000 26425 ns/op 5035 B/op 57 allocs/op +BenchmarkNetHTTPClientGetEndToEnd100Inmemory 500000 28515 ns/op 5045 B/op 57 allocs/op +BenchmarkNetHTTPClientGetEndToEnd1000Inmemory 500000 39511 ns/op 5096 B/op 56 allocs/op +``` + +fasthttp client: +``` +$ GOMAXPROCS=1 go test -bench='kClient(Do|GetEndToEnd)' -benchmem -benchtime=10s +BenchmarkClientDoFastServer 20000000 865 ns/op 0 B/op 0 allocs/op +BenchmarkClientGetEndToEnd1TCP 1000000 18711 ns/op 0 B/op 0 allocs/op +BenchmarkClientGetEndToEnd10TCP 1000000 14664 ns/op 0 B/op 0 allocs/op +BenchmarkClientGetEndToEnd100TCP 1000000 14043 ns/op 1 B/op 0 allocs/op +BenchmarkClientGetEndToEnd1Inmemory 5000000 3965 ns/op 0 B/op 0 allocs/op +BenchmarkClientGetEndToEnd10Inmemory 3000000 4060 ns/op 0 B/op 0 allocs/op +BenchmarkClientGetEndToEnd100Inmemory 5000000 3396 ns/op 0 B/op 0 allocs/op +BenchmarkClientGetEndToEnd1000Inmemory 5000000 3306 ns/op 2 B/op 0 allocs/op +``` + +*GOMAXPROCS=4* + +net/http client: +``` +$ GOMAXPROCS=4 go test -bench='HTTPClient(Do|GetEndToEnd)' -benchmem -benchtime=10s +BenchmarkNetHTTPClientDoFastServer-4 2000000 8774 ns/op 2619 B/op 35 allocs/op +BenchmarkNetHTTPClientGetEndToEnd1TCP-4 500000 22951 ns/op 5047 B/op 56 allocs/op +BenchmarkNetHTTPClientGetEndToEnd10TCP-4 1000000 19182 ns/op 5037 B/op 55 allocs/op +BenchmarkNetHTTPClientGetEndToEnd100TCP-4 1000000 16535 ns/op 5031 B/op 55 allocs/op +BenchmarkNetHTTPClientGetEndToEnd1Inmemory-4 1000000 14495 ns/op 5038 B/op 56 allocs/op +BenchmarkNetHTTPClientGetEndToEnd10Inmemory-4 1000000 10237 ns/op 5034 B/op 56 allocs/op +BenchmarkNetHTTPClientGetEndToEnd100Inmemory-4 1000000 10125 ns/op 5045 B/op 56 allocs/op +BenchmarkNetHTTPClientGetEndToEnd1000Inmemory-4 1000000 11132 ns/op 5136 B/op 56 allocs/op +``` + +fasthttp client: +``` +$ GOMAXPROCS=4 go test -bench='kClient(Do|GetEndToEnd)' -benchmem -benchtime=10s +BenchmarkClientDoFastServer-4 50000000 397 ns/op 0 B/op 0 allocs/op +BenchmarkClientGetEndToEnd1TCP-4 2000000 7388 ns/op 0 B/op 0 allocs/op +BenchmarkClientGetEndToEnd10TCP-4 2000000 6689 ns/op 0 B/op 0 allocs/op +BenchmarkClientGetEndToEnd100TCP-4 3000000 4927 ns/op 1 B/op 0 allocs/op +BenchmarkClientGetEndToEnd1Inmemory-4 10000000 1604 ns/op 0 B/op 0 allocs/op +BenchmarkClientGetEndToEnd10Inmemory-4 10000000 1458 ns/op 0 B/op 0 allocs/op +BenchmarkClientGetEndToEnd100Inmemory-4 10000000 1329 ns/op 0 B/op 0 allocs/op +BenchmarkClientGetEndToEnd1000Inmemory-4 10000000 1316 ns/op 5 B/op 0 allocs/op +``` + + +# Install + +``` +go get -u github.com/valyala/fasthttp +``` + + +# Switching from net/http to fasthttp + +Unfortunately, fasthttp doesn't provide API identical to net/http. +See the [FAQ](#faq) for details. +There is [net/http -> fasthttp handler converter](https://godoc.org/github.com/valyala/fasthttp/fasthttpadaptor), +but it is better to write fasthttp request handlers by hand in order to use +all of the fasthttp advantages (especially high performance :) ). + +Important points: + +* Fasthttp works with [RequestHandler functions](https://godoc.org/github.com/valyala/fasthttp#RequestHandler) +instead of objects implementing [Handler interface](https://golang.org/pkg/net/http/#Handler). +Fortunately, it is easy to pass bound struct methods to fasthttp: + + ```go + type MyHandler struct { + foobar string + } + + // request handler in net/http style, i.e. method bound to MyHandler struct. + func (h *MyHandler) HandleFastHTTP(ctx *fasthttp.RequestCtx) { + // notice that we may access MyHandler properties here - see h.foobar. + fmt.Fprintf(ctx, "Hello, world! Requested path is %q. Foobar is %q", + ctx.Path(), h.foobar) + } + + // request handler in fasthttp style, i.e. just plain function. + func fastHTTPHandler(ctx *fasthttp.RequestCtx) { + fmt.Fprintf(ctx, "Hi there! RequestURI is %q", ctx.RequestURI()) + } + + // pass bound struct method to fasthttp + myHandler := &MyHandler{ + foobar: "foobar", + } + fasthttp.ListenAndServe(":8080", myHandler.HandleFastHTTP) + + // pass plain function to fasthttp + fasthttp.ListenAndServe(":8081", fastHTTPHandler) + ``` + +* The [RequestHandler](https://godoc.org/github.com/valyala/fasthttp#RequestHandler) +accepts only one argument - [RequestCtx](https://godoc.org/github.com/valyala/fasthttp#RequestCtx). +It contains all the functionality required for http request processing +and response writing. Below is an example of a simple request handler conversion +from net/http to fasthttp. + + ```go + // net/http request handler + requestHandler := func(w http.ResponseWriter, r *http.Request) { + switch r.URL.Path { + case "/foo": + fooHandler(w, r) + case "/bar": + barHandler(w, r) + default: + http.Error(w, "Unsupported path", http.StatusNotFound) + } + } + ``` + + ```go + // the corresponding fasthttp request handler + requestHandler := func(ctx *fasthttp.RequestCtx) { + switch string(ctx.Path()) { + case "/foo": + fooHandler(ctx) + case "/bar": + barHandler(ctx) + default: + ctx.Error("Unsupported path", fasthttp.StatusNotFound) + } + } + ``` + +* Fasthttp allows setting response headers and writing response body +in an arbitrary order. There is no 'headers first, then body' restriction +like in net/http. The following code is valid for fasthttp: + + ```go + requestHandler := func(ctx *fasthttp.RequestCtx) { + // set some headers and status code first + ctx.SetContentType("foo/bar") + ctx.SetStatusCode(fasthttp.StatusOK) + + // then write the first part of body + fmt.Fprintf(ctx, "this is the first part of body\n") + + // then set more headers + ctx.Response.Header.Set("Foo-Bar", "baz") + + // then write more body + fmt.Fprintf(ctx, "this is the second part of body\n") + + // then override already written body + ctx.SetBody([]byte("this is completely new body contents")) + + // then update status code + ctx.SetStatusCode(fasthttp.StatusNotFound) + + // basically, anything may be updated many times before + // returning from RequestHandler. + // + // Unlike net/http fasthttp doesn't put response to the wire until + // returning from RequestHandler. + } + ``` + +* Fasthttp doesn't provide [ServeMux](https://golang.org/pkg/net/http/#ServeMux), +but there are more powerful third-party routers and web frameworks +with fasthttp support: + + * [Iris](https://github.com/kataras/iris) + * [fasthttp-routing](https://github.com/qiangxue/fasthttp-routing) + * [fasthttprouter](https://github.com/buaazp/fasthttprouter) + * [lu](https://github.com/vincentLiuxiang/lu) + + Net/http code with simple ServeMux is trivially converted to fasthttp code: + + ```go + // net/http code + + m := &http.ServeMux{} + m.HandleFunc("/foo", fooHandlerFunc) + m.HandleFunc("/bar", barHandlerFunc) + m.Handle("/baz", bazHandler) + + http.ListenAndServe(":80", m) + ``` + + ```go + // the corresponding fasthttp code + m := func(ctx *fasthttp.RequestCtx) { + switch string(ctx.Path()) { + case "/foo": + fooHandlerFunc(ctx) + case "/bar": + barHandlerFunc(ctx) + case "/baz": + bazHandler.HandlerFunc(ctx) + default: + ctx.Error("not found", fasthttp.StatusNotFound) + } + } + + fasthttp.ListenAndServe(":80", m) + ``` + +* net/http -> fasthttp conversion table: + + * All the pseudocode below assumes w, r and ctx have these types: + ```go + var ( + w http.ResponseWriter + r *http.Request + ctx *fasthttp.RequestCtx + ) + ``` + * r.Body -> [ctx.PostBody()](https://godoc.org/github.com/valyala/fasthttp#RequestCtx.PostBody) + * r.URL.Path -> [ctx.Path()](https://godoc.org/github.com/valyala/fasthttp#RequestCtx.Path) + * r.URL -> [ctx.URI()](https://godoc.org/github.com/valyala/fasthttp#RequestCtx.URI) + * r.Method -> [ctx.Method()](https://godoc.org/github.com/valyala/fasthttp#RequestCtx.Method) + * r.Header -> [ctx.Request.Header](https://godoc.org/github.com/valyala/fasthttp#RequestHeader) + * r.Header.Get() -> [ctx.Request.Header.Peek()](https://godoc.org/github.com/valyala/fasthttp#RequestHeader.Peek) + * r.Host -> [ctx.Host()](https://godoc.org/github.com/valyala/fasthttp#RequestCtx.Host) + * r.Form -> [ctx.QueryArgs()](https://godoc.org/github.com/valyala/fasthttp#RequestCtx.QueryArgs) + + [ctx.PostArgs()](https://godoc.org/github.com/valyala/fasthttp#RequestCtx.PostArgs) + * r.PostForm -> [ctx.PostArgs()](https://godoc.org/github.com/valyala/fasthttp#RequestCtx.PostArgs) + * r.FormValue() -> [ctx.FormValue()](https://godoc.org/github.com/valyala/fasthttp#RequestCtx.FormValue) + * r.FormFile() -> [ctx.FormFile()](https://godoc.org/github.com/valyala/fasthttp#RequestCtx.FormFile) + * r.MultipartForm -> [ctx.MultipartForm()](https://godoc.org/github.com/valyala/fasthttp#RequestCtx.MultipartForm) + * r.RemoteAddr -> [ctx.RemoteAddr()](https://godoc.org/github.com/valyala/fasthttp#RequestCtx.RemoteAddr) + * r.RequestURI -> [ctx.RequestURI()](https://godoc.org/github.com/valyala/fasthttp#RequestCtx.RequestURI) + * r.TLS -> [ctx.IsTLS()](https://godoc.org/github.com/valyala/fasthttp#RequestCtx.IsTLS) + * r.Cookie() -> [ctx.Request.Header.Cookie()](https://godoc.org/github.com/valyala/fasthttp#RequestHeader.Cookie) + * r.Referer() -> [ctx.Referer()](https://godoc.org/github.com/valyala/fasthttp#RequestCtx.Referer) + * r.UserAgent() -> [ctx.UserAgent()](https://godoc.org/github.com/valyala/fasthttp#RequestCtx.UserAgent) + * w.Header() -> [ctx.Response.Header](https://godoc.org/github.com/valyala/fasthttp#ResponseHeader) + * w.Header().Set() -> [ctx.Response.Header.Set()](https://godoc.org/github.com/valyala/fasthttp#ResponseHeader.Set) + * w.Header().Set("Content-Type") -> [ctx.SetContentType()](https://godoc.org/github.com/valyala/fasthttp#RequestCtx.SetContentType) + * w.Header().Set("Set-Cookie") -> [ctx.Response.Header.SetCookie()](https://godoc.org/github.com/valyala/fasthttp#ResponseHeader.SetCookie) + * w.Write() -> [ctx.Write()](https://godoc.org/github.com/valyala/fasthttp#RequestCtx.Write), + [ctx.SetBody()](https://godoc.org/github.com/valyala/fasthttp#RequestCtx.SetBody), + [ctx.SetBodyStream()](https://godoc.org/github.com/valyala/fasthttp#RequestCtx.SetBodyStream), + [ctx.SetBodyStreamWriter()](https://godoc.org/github.com/valyala/fasthttp#RequestCtx.SetBodyStreamWriter) + * w.WriteHeader() -> [ctx.SetStatusCode()](https://godoc.org/github.com/valyala/fasthttp#RequestCtx.SetStatusCode) + * w.(http.Hijacker).Hijack() -> [ctx.Hijack()](https://godoc.org/github.com/valyala/fasthttp#RequestCtx.Hijack) + * http.Error() -> [ctx.Error()](https://godoc.org/github.com/valyala/fasthttp#RequestCtx.Error) + * http.FileServer() -> [fasthttp.FSHandler()](https://godoc.org/github.com/valyala/fasthttp#FSHandler), + [fasthttp.FS](https://godoc.org/github.com/valyala/fasthttp#FS) + * http.ServeFile() -> [fasthttp.ServeFile()](https://godoc.org/github.com/valyala/fasthttp#ServeFile) + * http.Redirect() -> [ctx.Redirect()](https://godoc.org/github.com/valyala/fasthttp#RequestCtx.Redirect) + * http.NotFound() -> [ctx.NotFound()](https://godoc.org/github.com/valyala/fasthttp#RequestCtx.NotFound) + * http.StripPrefix() -> [fasthttp.PathRewriteFunc](https://godoc.org/github.com/valyala/fasthttp#PathRewriteFunc) + +* *VERY IMPORTANT!* Fasthttp disallows holding references +to [RequestCtx](https://godoc.org/github.com/valyala/fasthttp#RequestCtx) or to its' +members after returning from [RequestHandler](https://godoc.org/github.com/valyala/fasthttp#RequestHandler). +Otherwise [data races](http://blog.golang.org/race-detector) are inevitable. +Carefully inspect all the net/http request handlers converted to fasthttp whether +they retain references to RequestCtx or to its' members after returning. +RequestCtx provides the following _band aids_ for this case: + + * Wrap RequestHandler into [TimeoutHandler](https://godoc.org/github.com/valyala/fasthttp#TimeoutHandler). + * Call [TimeoutError](https://godoc.org/github.com/valyala/fasthttp#RequestCtx.TimeoutError) + before returning from RequestHandler if there are references to RequestCtx or to its' members. + See [the example](https://godoc.org/github.com/valyala/fasthttp#example-RequestCtx-TimeoutError) + for more details. + +Use this brilliant tool - [race detector](http://blog.golang.org/race-detector) - +for detecting and eliminating data races in your program. If you detected +data race related to fasthttp in your program, then there is high probability +you forgot calling [TimeoutError](https://godoc.org/github.com/valyala/fasthttp#RequestCtx.TimeoutError) +before returning from [RequestHandler](https://godoc.org/github.com/valyala/fasthttp#RequestHandler). + +* Blind switching from net/http to fasthttp won't give you performance boost. +While fasthttp is optimized for speed, its' performance may be easily saturated +by slow [RequestHandler](https://godoc.org/github.com/valyala/fasthttp#RequestHandler). +So [profile](http://blog.golang.org/profiling-go-programs) and optimize your +code after switching to fasthttp. For instance, use [quicktemplate](https://github.com/valyala/quicktemplate) +instead of [html/template](https://golang.org/pkg/html/template/). + +* See also [fasthttputil](https://godoc.org/github.com/valyala/fasthttp/fasthttputil), +[fasthttpadaptor](https://godoc.org/github.com/valyala/fasthttp/fasthttpadaptor) and +[expvarhandler](https://godoc.org/github.com/valyala/fasthttp/expvarhandler). + + +# Performance optimization tips for multi-core systems + +* Use [reuseport](https://godoc.org/github.com/valyala/fasthttp/reuseport) listener. +* Run a separate server instance per CPU core with GOMAXPROCS=1. +* Pin each server instance to a separate CPU core using [taskset](http://linux.die.net/man/1/taskset). +* Ensure the interrupts of multiqueue network card are evenly distributed between CPU cores. + See [this article](https://blog.cloudflare.com/how-to-achieve-low-latency/) for details. +* Use Go 1.6 as it provides some considerable performance improvements. + + +# Fasthttp best practices + +* Do not allocate objects and `[]byte` buffers - just reuse them as much + as possible. Fasthttp API design encourages this. +* [sync.Pool](https://golang.org/pkg/sync/#Pool) is your best friend. +* [Profile your program](http://blog.golang.org/profiling-go-programs) + in production. + `go tool pprof --alloc_objects your-program mem.pprof` usually gives better + insights for optimization opportunities than `go tool pprof your-program cpu.pprof`. +* Write [tests and benchmarks](https://golang.org/pkg/testing/) for hot paths. +* Avoid conversion between `[]byte` and `string`, since this may result in memory + allocation+copy. Fasthttp API provides functions for both `[]byte` and `string` - + use these functions instead of converting manually between `[]byte` and `string`. + There are some exceptions - see [this wiki page](https://github.com/golang/go/wiki/CompilerOptimizations#string-and-byte) + for more details. +* Verify your tests and production code under + [race detector](https://golang.org/doc/articles/race_detector.html) on a regular basis. +* Prefer [quicktemplate](https://github.com/valyala/quicktemplate) instead of + [html/template](https://golang.org/pkg/html/template/) in your webserver. + + +# Tricks with `[]byte` buffers + +The following tricks are used by fasthttp. Use them in your code too. + +* Standard Go functions accept nil buffers +```go +var ( + // both buffers are uninitialized + dst []byte + src []byte +) +dst = append(dst, src...) // is legal if dst is nil and/or src is nil +copy(dst, src) // is legal if dst is nil and/or src is nil +(string(src) == "") // is true if src is nil +(len(src) == 0) // is true if src is nil +src = src[:0] // works like a charm with nil src + +// this for loop doesn't panic if src is nil +for i, ch := range src { + doSomething(i, ch) +} +``` + +So throw away nil checks for `[]byte` buffers from you code. For example, +```go +srcLen := 0 +if src != nil { + srcLen = len(src) +} +``` + +becomes + +```go +srcLen := len(src) +``` + +* String may be appended to `[]byte` buffer with `append` +```go +dst = append(dst, "foobar"...) +``` + +* `[]byte` buffer may be extended to its' capacity. +```go +buf := make([]byte, 100) +a := buf[:10] // len(a) == 10, cap(a) == 100. +b := a[:100] // is valid, since cap(a) == 100. +``` + +* All fasthttp functions accept nil `[]byte` buffer +```go +statusCode, body, err := fasthttp.Get(nil, "http://google.com/") +uintBuf := fasthttp.AppendUint(nil, 1234) +``` + +# Related projects + + * [fasthttp-contrib](https://github.com/fasthttp-contrib) - various useful + helpers for projects based on fasthttp. + * [iris](https://github.com/kataras/iris) - web application framework built + on top of fasthttp. Features speed and functionality. + * [fasthttp-routing](https://github.com/qiangxue/fasthttp-routing) - fast and + powerful routing package for fasthttp servers. + * [fasthttprouter](https://github.com/buaazp/fasthttprouter) - a high + performance fasthttp request router that scales well. + * [lu](https://github.com/vincentLiuxiang/lu) - a high performance + go middleware web framework which is based on fasthttp. + * [websocket](https://github.com/leavengood/websocket) - Gorilla-based + websocket implementation for fasthttp. + + +# FAQ + +* *Why creating yet another http package instead of optimizing net/http?* + + Because net/http API limits many optimization opportunities. + For example: + * net/http Request object lifetime isn't limited by request handler execution + time. So the server must create a new request object per each request instead + of reusing existing objects like fasthttp does. + * net/http headers are stored in a `map[string][]string`. So the server + must parse all the headers, convert them from `[]byte` to `string` and put + them into the map before calling user-provided request handler. + This all requires unnecessary memory allocations avoided by fasthttp. + * net/http client API requires creating a new response object per each request. + +* *Why fasthttp API is incompatible with net/http?* + + Because net/http API limits many optimization opportunities. See the answer + above for more details. Also certain net/http API parts are suboptimal + for use: + * Compare [net/http connection hijacking](https://golang.org/pkg/net/http/#Hijacker) + to [fasthttp connection hijacking](https://godoc.org/github.com/valyala/fasthttp#RequestCtx.Hijack). + * Compare [net/http Request.Body reading](https://golang.org/pkg/net/http/#Request) + to [fasthttp request body reading](https://godoc.org/github.com/valyala/fasthttp#RequestCtx.PostBody). + +* *Why fasthttp doesn't support HTTP/2.0 and WebSockets?* + + There are [plans](TODO) for adding HTTP/2.0 and WebSockets support + in the future. + In the mean time, third parties may use [RequestCtx.Hijack](https://godoc.org/github.com/valyala/fasthttp#RequestCtx.Hijack) + for implementing these goodies. See [the first third-party websocket implementation on the top of fasthttp](https://github.com/leavengood/websocket). + +* *Are there known net/http advantages comparing to fasthttp?* + + Yes: + * net/http supports [HTTP/2.0 starting from go1.6](https://http2.golang.org/). + * net/http API is stable, while fasthttp API constantly evolves. + * net/http handles more HTTP corner cases. + * net/http should contain less bugs, since it is used and tested by much + wider audience. + * net/http works on Go older than 1.5. + +* *Why fasthttp API prefers returning `[]byte` instead of `string`?* + + Because `[]byte` to `string` conversion isn't free - it requires memory + allocation and copy. Feel free wrapping returned `[]byte` result into + `string()` if you prefer working with strings instead of byte slices. + But be aware that this has non-zero overhead. + +* *Which GO versions are supported by fasthttp?* + + Go1.5+. Older versions won't be supported, since their standard package + [miss useful functions](https://github.com/valyala/fasthttp/issues/5). + +* *Please provide real benchmark data and sever information* + + See [this issue](https://github.com/valyala/fasthttp/issues/4). + +* *Are there plans to add request routing to fasthttp?* + + There are no plans to add request routing into fasthttp. + Use third-party routers and web frameworks with fasthttp support: + + * [Iris](https://github.com/kataras/iris) + * [fasthttp-routing](https://github.com/qiangxue/fasthttp-routing) + * [fasthttprouter](https://github.com/buaazp/fasthttprouter) + * [lu](https://github.com/vincentLiuxiang/lu) + + See also [this issue](https://github.com/valyala/fasthttp/issues/9) for more info. + +* *I detected data race in fasthttp!* + + Cool! [File a bug](https://github.com/valyala/fasthttp/issues/new). But before + doing this check the following in your code: + + * Make sure there are no references to [RequestCtx](https://godoc.org/github.com/valyala/fasthttp#RequestCtx) + or to its' members after returning from [RequestHandler](https://godoc.org/github.com/valyala/fasthttp#RequestHandler). + * Make sure you call [TimeoutError](https://godoc.org/github.com/valyala/fasthttp#RequestCtx.TimeoutError) + before returning from [RequestHandler](https://godoc.org/github.com/valyala/fasthttp#RequestHandler) + if there are references to [RequestCtx](https://godoc.org/github.com/valyala/fasthttp#RequestCtx) + or to its' members, which may be accessed by other goroutines. + +* *I didn't find an answer for my question here* + + Try exploring [these questions](https://github.com/valyala/fasthttp/issues?q=label%3Aquestion). diff --git a/vendor/github.com/valyala/fasthttp/TODO b/vendor/github.com/valyala/fasthttp/TODO new file mode 100644 index 0000000..ce7505f --- /dev/null +++ b/vendor/github.com/valyala/fasthttp/TODO @@ -0,0 +1,4 @@ +- SessionClient with referer and cookies support. +- ProxyHandler similar to FSHandler. +- WebSockets. See https://tools.ietf.org/html/rfc6455 . +- HTTP/2.0. See https://tools.ietf.org/html/rfc7540 . diff --git a/vendor/github.com/valyala/fasthttp/args.go b/vendor/github.com/valyala/fasthttp/args.go new file mode 100644 index 0000000..951028b --- /dev/null +++ b/vendor/github.com/valyala/fasthttp/args.go @@ -0,0 +1,482 @@ +package fasthttp + +import ( + "bytes" + "errors" + "io" + "sync" +) + +// AcquireArgs returns an empty Args object from the pool. +// +// The returned Args may be returned to the pool with ReleaseArgs +// when no longer needed. This allows reducing GC load. +func AcquireArgs() *Args { + return argsPool.Get().(*Args) +} + +// ReleaseArgs returns the object acquired via AquireArgs to the pool. +// +// Do not access the released Args object, otherwise data races may occur. +func ReleaseArgs(a *Args) { + a.Reset() + argsPool.Put(a) +} + +var argsPool = &sync.Pool{ + New: func() interface{} { + return &Args{} + }, +} + +// Args represents query arguments. +// +// It is forbidden copying Args instances. Create new instances instead +// and use CopyTo(). +// +// Args instance MUST NOT be used from concurrently running goroutines. +type Args struct { + noCopy noCopy + + args []argsKV + buf []byte +} + +type argsKV struct { + key []byte + value []byte +} + +// Reset clears query args. +func (a *Args) Reset() { + a.args = a.args[:0] +} + +// CopyTo copies all args to dst. +func (a *Args) CopyTo(dst *Args) { + dst.Reset() + dst.args = copyArgs(dst.args, a.args) +} + +// VisitAll calls f for each existing arg. +// +// f must not retain references to key and value after returning. +// Make key and/or value copies if you need storing them after returning. +func (a *Args) VisitAll(f func(key, value []byte)) { + visitArgs(a.args, f) +} + +// Len returns the number of query args. +func (a *Args) Len() int { + return len(a.args) +} + +// Parse parses the given string containing query args. +func (a *Args) Parse(s string) { + a.buf = append(a.buf[:0], s...) + a.ParseBytes(a.buf) +} + +// ParseBytes parses the given b containing query args. +func (a *Args) ParseBytes(b []byte) { + a.Reset() + + var s argsScanner + s.b = b + + var kv *argsKV + a.args, kv = allocArg(a.args) + for s.next(kv) { + if len(kv.key) > 0 || len(kv.value) > 0 { + a.args, kv = allocArg(a.args) + } + } + a.args = releaseArg(a.args) +} + +// String returns string representation of query args. +func (a *Args) String() string { + return string(a.QueryString()) +} + +// QueryString returns query string for the args. +// +// The returned value is valid until the next call to Args methods. +func (a *Args) QueryString() []byte { + a.buf = a.AppendBytes(a.buf[:0]) + return a.buf +} + +// AppendBytes appends query string to dst and returns the extended dst. +func (a *Args) AppendBytes(dst []byte) []byte { + for i, n := 0, len(a.args); i < n; i++ { + kv := &a.args[i] + dst = AppendQuotedArg(dst, kv.key) + if len(kv.value) > 0 { + dst = append(dst, '=') + dst = AppendQuotedArg(dst, kv.value) + } + if i+1 < n { + dst = append(dst, '&') + } + } + return dst +} + +// WriteTo writes query string to w. +// +// WriteTo implements io.WriterTo interface. +func (a *Args) WriteTo(w io.Writer) (int64, error) { + n, err := w.Write(a.QueryString()) + return int64(n), err +} + +// Del deletes argument with the given key from query args. +func (a *Args) Del(key string) { + a.args = delAllArgs(a.args, key) +} + +// DelBytes deletes argument with the given key from query args. +func (a *Args) DelBytes(key []byte) { + a.args = delAllArgs(a.args, b2s(key)) +} + +// Add adds 'key=value' argument. +// +// Multiple values for the same key may be added. +func (a *Args) Add(key, value string) { + a.args = appendArg(a.args, key, value) +} + +// AddBytesK adds 'key=value' argument. +// +// Multiple values for the same key may be added. +func (a *Args) AddBytesK(key []byte, value string) { + a.args = appendArg(a.args, b2s(key), value) +} + +// AddBytesV adds 'key=value' argument. +// +// Multiple values for the same key may be added. +func (a *Args) AddBytesV(key string, value []byte) { + a.args = appendArg(a.args, key, b2s(value)) +} + +// AddBytesKV adds 'key=value' argument. +// +// Multiple values for the same key may be added. +func (a *Args) AddBytesKV(key, value []byte) { + a.args = appendArg(a.args, b2s(key), b2s(value)) +} + +// Set sets 'key=value' argument. +func (a *Args) Set(key, value string) { + a.args = setArg(a.args, key, value) +} + +// SetBytesK sets 'key=value' argument. +func (a *Args) SetBytesK(key []byte, value string) { + a.args = setArg(a.args, b2s(key), value) +} + +// SetBytesV sets 'key=value' argument. +func (a *Args) SetBytesV(key string, value []byte) { + a.args = setArg(a.args, key, b2s(value)) +} + +// SetBytesKV sets 'key=value' argument. +func (a *Args) SetBytesKV(key, value []byte) { + a.args = setArgBytes(a.args, key, value) +} + +// Peek returns query arg value for the given key. +// +// Returned value is valid until the next Args call. +func (a *Args) Peek(key string) []byte { + return peekArgStr(a.args, key) +} + +// PeekBytes returns query arg value for the given key. +// +// Returned value is valid until the next Args call. +func (a *Args) PeekBytes(key []byte) []byte { + return peekArgBytes(a.args, key) +} + +// PeekMulti returns all the arg values for the given key. +func (a *Args) PeekMulti(key string) [][]byte { + var values [][]byte + a.VisitAll(func(k, v []byte) { + if string(k) == key { + values = append(values, v) + } + }) + return values +} + +// PeekMultiBytes returns all the arg values for the given key. +func (a *Args) PeekMultiBytes(key []byte) [][]byte { + return a.PeekMulti(b2s(key)) +} + +// Has returns true if the given key exists in Args. +func (a *Args) Has(key string) bool { + return hasArg(a.args, key) +} + +// HasBytes returns true if the given key exists in Args. +func (a *Args) HasBytes(key []byte) bool { + return hasArg(a.args, b2s(key)) +} + +// ErrNoArgValue is returned when Args value with the given key is missing. +var ErrNoArgValue = errors.New("no Args value for the given key") + +// GetUint returns uint value for the given key. +func (a *Args) GetUint(key string) (int, error) { + value := a.Peek(key) + if len(value) == 0 { + return -1, ErrNoArgValue + } + return ParseUint(value) +} + +// SetUint sets uint value for the given key. +func (a *Args) SetUint(key string, value int) { + bb := AcquireByteBuffer() + bb.B = AppendUint(bb.B[:0], value) + a.SetBytesV(key, bb.B) + ReleaseByteBuffer(bb) +} + +// SetUintBytes sets uint value for the given key. +func (a *Args) SetUintBytes(key []byte, value int) { + a.SetUint(b2s(key), value) +} + +// GetUintOrZero returns uint value for the given key. +// +// Zero (0) is returned on error. +func (a *Args) GetUintOrZero(key string) int { + n, err := a.GetUint(key) + if err != nil { + n = 0 + } + return n +} + +// GetUfloat returns ufloat value for the given key. +func (a *Args) GetUfloat(key string) (float64, error) { + value := a.Peek(key) + if len(value) == 0 { + return -1, ErrNoArgValue + } + return ParseUfloat(value) +} + +// GetUfloatOrZero returns ufloat value for the given key. +// +// Zero (0) is returned on error. +func (a *Args) GetUfloatOrZero(key string) float64 { + f, err := a.GetUfloat(key) + if err != nil { + f = 0 + } + return f +} + +// GetBool returns boolean value for the given key. +// +// true is returned for '1', 'y' and 'yes' values, +// otherwise false is returned. +func (a *Args) GetBool(key string) bool { + switch string(a.Peek(key)) { + case "1", "y", "yes": + return true + default: + return false + } +} + +func visitArgs(args []argsKV, f func(k, v []byte)) { + for i, n := 0, len(args); i < n; i++ { + kv := &args[i] + f(kv.key, kv.value) + } +} + +func copyArgs(dst, src []argsKV) []argsKV { + if cap(dst) < len(src) { + tmp := make([]argsKV, len(src)) + copy(tmp, dst) + dst = tmp + } + n := len(src) + dst = dst[:n] + for i := 0; i < n; i++ { + dstKV := &dst[i] + srcKV := &src[i] + dstKV.key = append(dstKV.key[:0], srcKV.key...) + dstKV.value = append(dstKV.value[:0], srcKV.value...) + } + return dst +} + +func delAllArgsBytes(args []argsKV, key []byte) []argsKV { + return delAllArgs(args, b2s(key)) +} + +func delAllArgs(args []argsKV, key string) []argsKV { + for i, n := 0, len(args); i < n; i++ { + kv := &args[i] + if key == string(kv.key) { + tmp := *kv + copy(args[i:], args[i+1:]) + n-- + args[n] = tmp + args = args[:n] + } + } + return args +} + +func setArgBytes(h []argsKV, key, value []byte) []argsKV { + return setArg(h, b2s(key), b2s(value)) +} + +func setArg(h []argsKV, key, value string) []argsKV { + n := len(h) + for i := 0; i < n; i++ { + kv := &h[i] + if key == string(kv.key) { + kv.value = append(kv.value[:0], value...) + return h + } + } + return appendArg(h, key, value) +} + +func appendArgBytes(h []argsKV, key, value []byte) []argsKV { + return appendArg(h, b2s(key), b2s(value)) +} + +func appendArg(args []argsKV, key, value string) []argsKV { + var kv *argsKV + args, kv = allocArg(args) + kv.key = append(kv.key[:0], key...) + kv.value = append(kv.value[:0], value...) + return args +} + +func allocArg(h []argsKV) ([]argsKV, *argsKV) { + n := len(h) + if cap(h) > n { + h = h[:n+1] + } else { + h = append(h, argsKV{}) + } + return h, &h[n] +} + +func releaseArg(h []argsKV) []argsKV { + return h[:len(h)-1] +} + +func hasArg(h []argsKV, key string) bool { + for i, n := 0, len(h); i < n; i++ { + kv := &h[i] + if key == string(kv.key) { + return true + } + } + return false +} + +func peekArgBytes(h []argsKV, k []byte) []byte { + for i, n := 0, len(h); i < n; i++ { + kv := &h[i] + if bytes.Equal(kv.key, k) { + return kv.value + } + } + return nil +} + +func peekArgStr(h []argsKV, k string) []byte { + for i, n := 0, len(h); i < n; i++ { + kv := &h[i] + if string(kv.key) == k { + return kv.value + } + } + return nil +} + +type argsScanner struct { + b []byte +} + +func (s *argsScanner) next(kv *argsKV) bool { + if len(s.b) == 0 { + return false + } + + isKey := true + k := 0 + for i, c := range s.b { + switch c { + case '=': + if isKey { + isKey = false + kv.key = decodeArg(kv.key, s.b[:i], true) + k = i + 1 + } + case '&': + if isKey { + kv.key = decodeArg(kv.key, s.b[:i], true) + kv.value = kv.value[:0] + } else { + kv.value = decodeArg(kv.value, s.b[k:i], true) + } + s.b = s.b[i+1:] + return true + } + } + + if isKey { + kv.key = decodeArg(kv.key, s.b, true) + kv.value = kv.value[:0] + } else { + kv.value = decodeArg(kv.value, s.b[k:], true) + } + s.b = s.b[len(s.b):] + return true +} + +func decodeArg(dst, src []byte, decodePlus bool) []byte { + return decodeArgAppend(dst[:0], src, decodePlus) +} + +func decodeArgAppend(dst, src []byte, decodePlus bool) []byte { + for i, n := 0, len(src); i < n; i++ { + c := src[i] + if c == '%' { + if i+2 >= n { + return append(dst, src[i:]...) + } + x1 := hexbyte2int(src[i+1]) + x2 := hexbyte2int(src[i+2]) + if x1 < 0 || x2 < 0 { + dst = append(dst, c) + } else { + dst = append(dst, byte(x1<<4|x2)) + i += 2 + } + } else if decodePlus && c == '+' { + dst = append(dst, ' ') + } else { + dst = append(dst, c) + } + } + return dst +} diff --git a/vendor/github.com/valyala/fasthttp/bytebuffer.go b/vendor/github.com/valyala/fasthttp/bytebuffer.go new file mode 100644 index 0000000..f965172 --- /dev/null +++ b/vendor/github.com/valyala/fasthttp/bytebuffer.go @@ -0,0 +1,64 @@ +package fasthttp + +import ( + "github.com/valyala/bytebufferpool" +) + +// ByteBuffer provides byte buffer, which can be used with fasthttp API +// in order to minimize memory allocations. +// +// ByteBuffer may be used with functions appending data to the given []byte +// slice. See example code for details. +// +// Use AcquireByteBuffer for obtaining an empty byte buffer. +// +// ByteBuffer is deprecated. Use github.com/valyala/bytebufferpool instead. +type ByteBuffer bytebufferpool.ByteBuffer + +// Write implements io.Writer - it appends p to ByteBuffer.B +func (b *ByteBuffer) Write(p []byte) (int, error) { + return bb(b).Write(p) +} + +// WriteString appends s to ByteBuffer.B +func (b *ByteBuffer) WriteString(s string) (int, error) { + return bb(b).WriteString(s) +} + +// Set sets ByteBuffer.B to p +func (b *ByteBuffer) Set(p []byte) { + bb(b).Set(p) +} + +// SetString sets ByteBuffer.B to s +func (b *ByteBuffer) SetString(s string) { + bb(b).SetString(s) +} + +// Reset makes ByteBuffer.B empty. +func (b *ByteBuffer) Reset() { + bb(b).Reset() +} + +// AcquireByteBuffer returns an empty byte buffer from the pool. +// +// Acquired byte buffer may be returned to the pool via ReleaseByteBuffer call. +// This reduces the number of memory allocations required for byte buffer +// management. +func AcquireByteBuffer() *ByteBuffer { + return (*ByteBuffer)(defaultByteBufferPool.Get()) +} + +// ReleaseByteBuffer returns byte buffer to the pool. +// +// ByteBuffer.B mustn't be touched after returning it to the pool. +// Otherwise data races occur. +func ReleaseByteBuffer(b *ByteBuffer) { + defaultByteBufferPool.Put(bb(b)) +} + +func bb(b *ByteBuffer) *bytebufferpool.ByteBuffer { + return (*bytebufferpool.ByteBuffer)(b) +} + +var defaultByteBufferPool bytebufferpool.Pool diff --git a/vendor/github.com/valyala/fasthttp/bytesconv.go b/vendor/github.com/valyala/fasthttp/bytesconv.go new file mode 100644 index 0000000..13ce724 --- /dev/null +++ b/vendor/github.com/valyala/fasthttp/bytesconv.go @@ -0,0 +1,422 @@ +package fasthttp + +import ( + "bufio" + "bytes" + "errors" + "fmt" + "io" + "math" + "net" + "reflect" + "sync" + "time" + "unsafe" +) + +// AppendHTMLEscape appends html-escaped s to dst and returns the extended dst. +func AppendHTMLEscape(dst []byte, s string) []byte { + var prev int + var sub string + for i, n := 0, len(s); i < n; i++ { + sub = "" + switch s[i] { + case '<': + sub = "<" + case '>': + sub = ">" + case '"': + sub = """ + case '\'': + sub = "'" + } + if len(sub) > 0 { + dst = append(dst, s[prev:i]...) + dst = append(dst, sub...) + prev = i + 1 + } + } + return append(dst, s[prev:]...) +} + +// AppendHTMLEscapeBytes appends html-escaped s to dst and returns +// the extended dst. +func AppendHTMLEscapeBytes(dst, s []byte) []byte { + return AppendHTMLEscape(dst, b2s(s)) +} + +// AppendIPv4 appends string representation of the given ip v4 to dst +// and returns the extended dst. +func AppendIPv4(dst []byte, ip net.IP) []byte { + ip = ip.To4() + if ip == nil { + return append(dst, "non-v4 ip passed to AppendIPv4"...) + } + + dst = AppendUint(dst, int(ip[0])) + for i := 1; i < 4; i++ { + dst = append(dst, '.') + dst = AppendUint(dst, int(ip[i])) + } + return dst +} + +var errEmptyIPStr = errors.New("empty ip address string") + +// ParseIPv4 parses ip address from ipStr into dst and returns the extended dst. +func ParseIPv4(dst net.IP, ipStr []byte) (net.IP, error) { + if len(ipStr) == 0 { + return dst, errEmptyIPStr + } + if len(dst) < net.IPv4len { + dst = make([]byte, net.IPv4len) + } + copy(dst, net.IPv4zero) + dst = dst.To4() + if dst == nil { + panic("BUG: dst must not be nil") + } + + b := ipStr + for i := 0; i < 3; i++ { + n := bytes.IndexByte(b, '.') + if n < 0 { + return dst, fmt.Errorf("cannot find dot in ipStr %q", ipStr) + } + v, err := ParseUint(b[:n]) + if err != nil { + return dst, fmt.Errorf("cannot parse ipStr %q: %s", ipStr, err) + } + if v > 255 { + return dst, fmt.Errorf("cannot parse ipStr %q: ip part cannot exceed 255: parsed %d", ipStr, v) + } + dst[i] = byte(v) + b = b[n+1:] + } + v, err := ParseUint(b) + if err != nil { + return dst, fmt.Errorf("cannot parse ipStr %q: %s", ipStr, err) + } + if v > 255 { + return dst, fmt.Errorf("cannot parse ipStr %q: ip part cannot exceed 255: parsed %d", ipStr, v) + } + dst[3] = byte(v) + + return dst, nil +} + +// AppendHTTPDate appends HTTP-compliant (RFC1123) representation of date +// to dst and returns the extended dst. +func AppendHTTPDate(dst []byte, date time.Time) []byte { + dst = date.In(time.UTC).AppendFormat(dst, time.RFC1123) + copy(dst[len(dst)-3:], strGMT) + return dst +} + +// ParseHTTPDate parses HTTP-compliant (RFC1123) date. +func ParseHTTPDate(date []byte) (time.Time, error) { + return time.Parse(time.RFC1123, b2s(date)) +} + +// AppendUint appends n to dst and returns the extended dst. +func AppendUint(dst []byte, n int) []byte { + if n < 0 { + panic("BUG: int must be positive") + } + + var b [20]byte + buf := b[:] + i := len(buf) + var q int + for n >= 10 { + i-- + q = n / 10 + buf[i] = '0' + byte(n-q*10) + n = q + } + i-- + buf[i] = '0' + byte(n) + + dst = append(dst, buf[i:]...) + return dst +} + +// ParseUint parses uint from buf. +func ParseUint(buf []byte) (int, error) { + v, n, err := parseUintBuf(buf) + if n != len(buf) { + return -1, errUnexpectedTrailingChar + } + return v, err +} + +var ( + errEmptyInt = errors.New("empty integer") + errUnexpectedFirstChar = errors.New("unexpected first char found. Expecting 0-9") + errUnexpectedTrailingChar = errors.New("unexpected traling char found. Expecting 0-9") + errTooLongInt = errors.New("too long int") +) + +func parseUintBuf(b []byte) (int, int, error) { + n := len(b) + if n == 0 { + return -1, 0, errEmptyInt + } + v := 0 + for i := 0; i < n; i++ { + c := b[i] + k := c - '0' + if k > 9 { + if i == 0 { + return -1, i, errUnexpectedFirstChar + } + return v, i, nil + } + if i >= maxIntChars { + return -1, i, errTooLongInt + } + v = 10*v + int(k) + } + return v, n, nil +} + +var ( + errEmptyFloat = errors.New("empty float number") + errDuplicateFloatPoint = errors.New("duplicate point found in float number") + errUnexpectedFloatEnd = errors.New("unexpected end of float number") + errInvalidFloatExponent = errors.New("invalid float number exponent") + errUnexpectedFloatChar = errors.New("unexpected char found in float number") +) + +// ParseUfloat parses unsigned float from buf. +func ParseUfloat(buf []byte) (float64, error) { + if len(buf) == 0 { + return -1, errEmptyFloat + } + b := buf + var v uint64 + var offset = 1.0 + var pointFound bool + for i, c := range b { + if c < '0' || c > '9' { + if c == '.' { + if pointFound { + return -1, errDuplicateFloatPoint + } + pointFound = true + continue + } + if c == 'e' || c == 'E' { + if i+1 >= len(b) { + return -1, errUnexpectedFloatEnd + } + b = b[i+1:] + minus := -1 + switch b[0] { + case '+': + b = b[1:] + minus = 1 + case '-': + b = b[1:] + default: + minus = 1 + } + vv, err := ParseUint(b) + if err != nil { + return -1, errInvalidFloatExponent + } + return float64(v) * offset * math.Pow10(minus*int(vv)), nil + } + return -1, errUnexpectedFloatChar + } + v = 10*v + uint64(c-'0') + if pointFound { + offset /= 10 + } + } + return float64(v) * offset, nil +} + +var ( + errEmptyHexNum = errors.New("empty hex number") + errTooLargeHexNum = errors.New("too large hex number") +) + +func readHexInt(r *bufio.Reader) (int, error) { + n := 0 + i := 0 + var k int + for { + c, err := r.ReadByte() + if err != nil { + if err == io.EOF && i > 0 { + return n, nil + } + return -1, err + } + k = hexbyte2int(c) + if k < 0 { + if i == 0 { + return -1, errEmptyHexNum + } + r.UnreadByte() + return n, nil + } + if i >= maxHexIntChars { + return -1, errTooLargeHexNum + } + n = (n << 4) | k + i++ + } +} + +var hexIntBufPool sync.Pool + +func writeHexInt(w *bufio.Writer, n int) error { + if n < 0 { + panic("BUG: int must be positive") + } + + v := hexIntBufPool.Get() + if v == nil { + v = make([]byte, maxHexIntChars+1) + } + buf := v.([]byte) + i := len(buf) - 1 + for { + buf[i] = int2hexbyte(n & 0xf) + n >>= 4 + if n == 0 { + break + } + i-- + } + _, err := w.Write(buf[i:]) + hexIntBufPool.Put(v) + return err +} + +func int2hexbyte(n int) byte { + if n < 10 { + return '0' + byte(n) + } + return 'a' + byte(n) - 10 +} + +func hexCharUpper(c byte) byte { + if c < 10 { + return '0' + c + } + return c - 10 + 'A' +} + +var hex2intTable = func() []byte { + b := make([]byte, 255) + for i := byte(0); i < 255; i++ { + c := byte(0) + if i >= '0' && i <= '9' { + c = 1 + i - '0' + } else if i >= 'a' && i <= 'f' { + c = 1 + i - 'a' + 10 + } else if i >= 'A' && i <= 'F' { + c = 1 + i - 'A' + 10 + } + b[i] = c + } + return b +}() + +func hexbyte2int(c byte) int { + return int(hex2intTable[c]) - 1 +} + +const toLower = 'a' - 'A' + +func uppercaseByte(p *byte) { + c := *p + if c >= 'a' && c <= 'z' { + *p = c - toLower + } +} + +func lowercaseByte(p *byte) { + c := *p + if c >= 'A' && c <= 'Z' { + *p = c + toLower + } +} + +func lowercaseBytes(b []byte) { + for i, n := 0, len(b); i < n; i++ { + lowercaseByte(&b[i]) + } +} + +// b2s converts byte slice to a string without memory allocation. +// See https://groups.google.com/forum/#!msg/Golang-Nuts/ENgbUzYvCuU/90yGx7GUAgAJ . +// +// Note it may break if string and/or slice header will change +// in the future go versions. +func b2s(b []byte) string { + return *(*string)(unsafe.Pointer(&b)) +} + +// s2b converts string to a byte slice without memory allocation. +// +// Note it may break if string and/or slice header will change +// in the future go versions. +func s2b(s string) []byte { + sh := (*reflect.StringHeader)(unsafe.Pointer(&s)) + bh := reflect.SliceHeader{ + Data: sh.Data, + Len: sh.Len, + Cap: sh.Len, + } + return *(*[]byte)(unsafe.Pointer(&bh)) +} + +// AppendQuotedArg appends url-encoded src to dst and returns appended dst. +func AppendQuotedArg(dst, src []byte) []byte { + for _, c := range src { + // See http://www.w3.org/TR/html5/forms.html#form-submission-algorithm + if c >= 'a' && c <= 'z' || c >= 'A' && c <= 'Z' || c >= '0' && c <= '9' || + c == '*' || c == '-' || c == '.' || c == '_' { + dst = append(dst, c) + } else { + dst = append(dst, '%', hexCharUpper(c>>4), hexCharUpper(c&15)) + } + } + return dst +} + +func appendQuotedPath(dst, src []byte) []byte { + for _, c := range src { + if c >= 'a' && c <= 'z' || c >= 'A' && c <= 'Z' || c >= '0' && c <= '9' || + c == '/' || c == '.' || c == ',' || c == '=' || c == ':' || c == '&' || c == '~' || c == '-' || c == '_' { + dst = append(dst, c) + } else { + dst = append(dst, '%', hexCharUpper(c>>4), hexCharUpper(c&15)) + } + } + return dst +} + +// EqualBytesStr returns true if string(b) == s. +// +// This function has no performance benefits comparing to string(b) == s. +// It is left here for backwards compatibility only. +// +// This function is deperecated and may be deleted soon. +func EqualBytesStr(b []byte, s string) bool { + return string(b) == s +} + +// AppendBytesStr appends src to dst and returns the extended dst. +// +// This function has no performance benefits comparing to append(dst, src...). +// It is left here for backwards compatibility only. +// +// This function is deprecated and may be deleted soon. +func AppendBytesStr(dst []byte, src string) []byte { + return append(dst, src...) +} diff --git a/vendor/github.com/valyala/fasthttp/bytesconv_32.go b/vendor/github.com/valyala/fasthttp/bytesconv_32.go new file mode 100644 index 0000000..1437754 --- /dev/null +++ b/vendor/github.com/valyala/fasthttp/bytesconv_32.go @@ -0,0 +1,8 @@ +// +build !amd64,!arm64,!ppc64 + +package fasthttp + +const ( + maxIntChars = 9 + maxHexIntChars = 7 +) diff --git a/vendor/github.com/valyala/fasthttp/bytesconv_64.go b/vendor/github.com/valyala/fasthttp/bytesconv_64.go new file mode 100644 index 0000000..09d07ef --- /dev/null +++ b/vendor/github.com/valyala/fasthttp/bytesconv_64.go @@ -0,0 +1,8 @@ +// +build amd64 arm64 ppc64 + +package fasthttp + +const ( + maxIntChars = 18 + maxHexIntChars = 15 +) diff --git a/vendor/github.com/valyala/fasthttp/client.go b/vendor/github.com/valyala/fasthttp/client.go new file mode 100644 index 0000000..3f49e9e --- /dev/null +++ b/vendor/github.com/valyala/fasthttp/client.go @@ -0,0 +1,2160 @@ +package fasthttp + +import ( + "bufio" + "bytes" + "crypto/tls" + "errors" + "fmt" + "io" + "net" + "strings" + "sync" + "sync/atomic" + "time" +) + +// Do performs the given http request and fills the given http response. +// +// Request must contain at least non-zero RequestURI with full url (including +// scheme and host) or non-zero Host header + RequestURI. +// +// Client determines the server to be requested in the following order: +// +// - from RequestURI if it contains full url with scheme and host; +// - from Host header otherwise. +// +// The function doesn't follow redirects. Use Get* for following redirects. +// +// Response is ignored if resp is nil. +// +// ErrNoFreeConns is returned if all DefaultMaxConnsPerHost connections +// to the requested host are busy. +// +// It is recommended obtaining req and resp via AcquireRequest +// and AcquireResponse in performance-critical code. +func Do(req *Request, resp *Response) error { + return defaultClient.Do(req, resp) +} + +// DoTimeout performs the given request and waits for response during +// the given timeout duration. +// +// Request must contain at least non-zero RequestURI with full url (including +// scheme and host) or non-zero Host header + RequestURI. +// +// Client determines the server to be requested in the following order: +// +// - from RequestURI if it contains full url with scheme and host; +// - from Host header otherwise. +// +// The function doesn't follow redirects. Use Get* for following redirects. +// +// Response is ignored if resp is nil. +// +// ErrTimeout is returned if the response wasn't returned during +// the given timeout. +// +// ErrNoFreeConns is returned if all DefaultMaxConnsPerHost connections +// to the requested host are busy. +// +// It is recommended obtaining req and resp via AcquireRequest +// and AcquireResponse in performance-critical code. +func DoTimeout(req *Request, resp *Response, timeout time.Duration) error { + return defaultClient.DoTimeout(req, resp, timeout) +} + +// DoDeadline performs the given request and waits for response until +// the given deadline. +// +// Request must contain at least non-zero RequestURI with full url (including +// scheme and host) or non-zero Host header + RequestURI. +// +// Client determines the server to be requested in the following order: +// +// - from RequestURI if it contains full url with scheme and host; +// - from Host header otherwise. +// +// The function doesn't follow redirects. Use Get* for following redirects. +// +// Response is ignored if resp is nil. +// +// ErrTimeout is returned if the response wasn't returned until +// the given deadline. +// +// ErrNoFreeConns is returned if all DefaultMaxConnsPerHost connections +// to the requested host are busy. +// +// It is recommended obtaining req and resp via AcquireRequest +// and AcquireResponse in performance-critical code. +func DoDeadline(req *Request, resp *Response, deadline time.Time) error { + return defaultClient.DoDeadline(req, resp, deadline) +} + +// Get appends url contents to dst and returns it as body. +// +// The function follows redirects. Use Do* for manually handling redirects. +// +// New body buffer is allocated if dst is nil. +func Get(dst []byte, url string) (statusCode int, body []byte, err error) { + return defaultClient.Get(dst, url) +} + +// GetTimeout appends url contents to dst and returns it as body. +// +// The function follows redirects. Use Do* for manually handling redirects. +// +// New body buffer is allocated if dst is nil. +// +// ErrTimeout error is returned if url contents couldn't be fetched +// during the given timeout. +func GetTimeout(dst []byte, url string, timeout time.Duration) (statusCode int, body []byte, err error) { + return defaultClient.GetTimeout(dst, url, timeout) +} + +// GetDeadline appends url contents to dst and returns it as body. +// +// The function follows redirects. Use Do* for manually handling redirects. +// +// New body buffer is allocated if dst is nil. +// +// ErrTimeout error is returned if url contents couldn't be fetched +// until the given deadline. +func GetDeadline(dst []byte, url string, deadline time.Time) (statusCode int, body []byte, err error) { + return defaultClient.GetDeadline(dst, url, deadline) +} + +// Post sends POST request to the given url with the given POST arguments. +// +// Response body is appended to dst, which is returned as body. +// +// The function follows redirects. Use Do* for manually handling redirects. +// +// New body buffer is allocated if dst is nil. +// +// Empty POST body is sent if postArgs is nil. +func Post(dst []byte, url string, postArgs *Args) (statusCode int, body []byte, err error) { + return defaultClient.Post(dst, url, postArgs) +} + +var defaultClient Client + +// Client implements http client. +// +// Copying Client by value is prohibited. Create new instance instead. +// +// It is safe calling Client methods from concurrently running goroutines. +type Client struct { + noCopy noCopy + + // Client name. Used in User-Agent request header. + // + // Default client name is used if not set. + Name string + + // Callback for establishing new connections to hosts. + // + // Default Dial is used if not set. + Dial DialFunc + + // Attempt to connect to both ipv4 and ipv6 addresses if set to true. + // + // This option is used only if default TCP dialer is used, + // i.e. if Dial is blank. + // + // By default client connects only to ipv4 addresses, + // since unfortunately ipv6 remains broken in many networks worldwide :) + DialDualStack bool + + // TLS config for https connections. + // + // Default TLS config is used if not set. + TLSConfig *tls.Config + + // Maximum number of connections per each host which may be established. + // + // DefaultMaxConnsPerHost is used if not set. + MaxConnsPerHost int + + // Idle keep-alive connections are closed after this duration. + // + // By default idle connections are closed + // after DefaultMaxIdleConnDuration. + MaxIdleConnDuration time.Duration + + // Per-connection buffer size for responses' reading. + // This also limits the maximum header size. + // + // Default buffer size is used if 0. + ReadBufferSize int + + // Per-connection buffer size for requests' writing. + // + // Default buffer size is used if 0. + WriteBufferSize int + + // Maximum duration for full response reading (including body). + // + // By default response read timeout is unlimited. + ReadTimeout time.Duration + + // Maximum duration for full request writing (including body). + // + // By default request write timeout is unlimited. + WriteTimeout time.Duration + + // Maximum response body size. + // + // The client returns ErrBodyTooLarge if this limit is greater than 0 + // and response body is greater than the limit. + // + // By default response body size is unlimited. + MaxResponseBodySize int + + // Header names are passed as-is without normalization + // if this option is set. + // + // Disabled header names' normalization may be useful only for proxying + // responses to other clients expecting case-sensitive + // header names. See https://github.com/valyala/fasthttp/issues/57 + // for details. + // + // By default request and response header names are normalized, i.e. + // The first letter and the first letters following dashes + // are uppercased, while all the other letters are lowercased. + // Examples: + // + // * HOST -> Host + // * content-type -> Content-Type + // * cONTENT-lenGTH -> Content-Length + DisableHeaderNamesNormalizing bool + + mLock sync.Mutex + m map[string]*HostClient + ms map[string]*HostClient +} + +// Get appends url contents to dst and returns it as body. +// +// The function follows redirects. Use Do* for manually handling redirects. +// +// New body buffer is allocated if dst is nil. +func (c *Client) Get(dst []byte, url string) (statusCode int, body []byte, err error) { + return clientGetURL(dst, url, c) +} + +// GetTimeout appends url contents to dst and returns it as body. +// +// The function follows redirects. Use Do* for manually handling redirects. +// +// New body buffer is allocated if dst is nil. +// +// ErrTimeout error is returned if url contents couldn't be fetched +// during the given timeout. +func (c *Client) GetTimeout(dst []byte, url string, timeout time.Duration) (statusCode int, body []byte, err error) { + return clientGetURLTimeout(dst, url, timeout, c) +} + +// GetDeadline appends url contents to dst and returns it as body. +// +// The function follows redirects. Use Do* for manually handling redirects. +// +// New body buffer is allocated if dst is nil. +// +// ErrTimeout error is returned if url contents couldn't be fetched +// until the given deadline. +func (c *Client) GetDeadline(dst []byte, url string, deadline time.Time) (statusCode int, body []byte, err error) { + return clientGetURLDeadline(dst, url, deadline, c) +} + +// Post sends POST request to the given url with the given POST arguments. +// +// Response body is appended to dst, which is returned as body. +// +// The function follows redirects. Use Do* for manually handling redirects. +// +// New body buffer is allocated if dst is nil. +// +// Empty POST body is sent if postArgs is nil. +func (c *Client) Post(dst []byte, url string, postArgs *Args) (statusCode int, body []byte, err error) { + return clientPostURL(dst, url, postArgs, c) +} + +// DoTimeout performs the given request and waits for response during +// the given timeout duration. +// +// Request must contain at least non-zero RequestURI with full url (including +// scheme and host) or non-zero Host header + RequestURI. +// +// Client determines the server to be requested in the following order: +// +// - from RequestURI if it contains full url with scheme and host; +// - from Host header otherwise. +// +// The function doesn't follow redirects. Use Get* for following redirects. +// +// Response is ignored if resp is nil. +// +// ErrTimeout is returned if the response wasn't returned during +// the given timeout. +// +// ErrNoFreeConns is returned if all Client.MaxConnsPerHost connections +// to the requested host are busy. +// +// It is recommended obtaining req and resp via AcquireRequest +// and AcquireResponse in performance-critical code. +func (c *Client) DoTimeout(req *Request, resp *Response, timeout time.Duration) error { + return clientDoTimeout(req, resp, timeout, c) +} + +// DoDeadline performs the given request and waits for response until +// the given deadline. +// +// Request must contain at least non-zero RequestURI with full url (including +// scheme and host) or non-zero Host header + RequestURI. +// +// Client determines the server to be requested in the following order: +// +// - from RequestURI if it contains full url with scheme and host; +// - from Host header otherwise. +// +// The function doesn't follow redirects. Use Get* for following redirects. +// +// Response is ignored if resp is nil. +// +// ErrTimeout is returned if the response wasn't returned until +// the given deadline. +// +// ErrNoFreeConns is returned if all Client.MaxConnsPerHost connections +// to the requested host are busy. +// +/// It is recommended obtaining req and resp via AcquireRequest +// and AcquireResponse in performance-critical code. +func (c *Client) DoDeadline(req *Request, resp *Response, deadline time.Time) error { + return clientDoDeadline(req, resp, deadline, c) +} + +// Do performs the given http request and fills the given http response. +// +// Request must contain at least non-zero RequestURI with full url (including +// scheme and host) or non-zero Host header + RequestURI. +// +// Client determines the server to be requested in the following order: +// +// - from RequestURI if it contains full url with scheme and host; +// - from Host header otherwise. +// +// Response is ignored if resp is nil. +// +// The function doesn't follow redirects. Use Get* for following redirects. +// +// ErrNoFreeConns is returned if all Client.MaxConnsPerHost connections +// to the requested host are busy. +// +// It is recommended obtaining req and resp via AcquireRequest +// and AcquireResponse in performance-critical code. +func (c *Client) Do(req *Request, resp *Response) error { + uri := req.URI() + host := uri.Host() + + isTLS := false + scheme := uri.Scheme() + if bytes.Equal(scheme, strHTTPS) { + isTLS = true + } else if !bytes.Equal(scheme, strHTTP) { + return fmt.Errorf("unsupported protocol %q. http and https are supported", scheme) + } + + startCleaner := false + + c.mLock.Lock() + m := c.m + if isTLS { + m = c.ms + } + if m == nil { + m = make(map[string]*HostClient) + if isTLS { + c.ms = m + } else { + c.m = m + } + } + hc := m[string(host)] + if hc == nil { + hc = &HostClient{ + Addr: addMissingPort(string(host), isTLS), + Name: c.Name, + Dial: c.Dial, + DialDualStack: c.DialDualStack, + IsTLS: isTLS, + TLSConfig: c.TLSConfig, + MaxConns: c.MaxConnsPerHost, + MaxIdleConnDuration: c.MaxIdleConnDuration, + ReadBufferSize: c.ReadBufferSize, + WriteBufferSize: c.WriteBufferSize, + ReadTimeout: c.ReadTimeout, + WriteTimeout: c.WriteTimeout, + MaxResponseBodySize: c.MaxResponseBodySize, + DisableHeaderNamesNormalizing: c.DisableHeaderNamesNormalizing, + } + m[string(host)] = hc + if len(m) == 1 { + startCleaner = true + } + } + c.mLock.Unlock() + + if startCleaner { + go c.mCleaner(m) + } + + return hc.Do(req, resp) +} + +func (c *Client) mCleaner(m map[string]*HostClient) { + mustStop := false + for { + t := time.Now() + c.mLock.Lock() + for k, v := range m { + if t.Sub(v.LastUseTime()) > time.Minute { + delete(m, k) + } + } + if len(m) == 0 { + mustStop = true + } + c.mLock.Unlock() + + if mustStop { + break + } + time.Sleep(10 * time.Second) + } +} + +// DefaultMaxConnsPerHost is the maximum number of concurrent connections +// http client may establish per host by default (i.e. if +// Client.MaxConnsPerHost isn't set). +const DefaultMaxConnsPerHost = 512 + +// DefaultMaxIdleConnDuration is the default duration before idle keep-alive +// connection is closed. +const DefaultMaxIdleConnDuration = 10 * time.Second + +// DialFunc must establish connection to addr. +// +// There is no need in establishing TLS (SSL) connection for https. +// The client automatically converts connection to TLS +// if HostClient.IsTLS is set. +// +// TCP address passed to DialFunc always contains host and port. +// Example TCP addr values: +// +// - foobar.com:80 +// - foobar.com:443 +// - foobar.com:8080 +type DialFunc func(addr string) (net.Conn, error) + +// HostClient balances http requests among hosts listed in Addr. +// +// HostClient may be used for balancing load among multiple upstream hosts. +// While multiple addresses passed to HostClient.Addr may be used for balancing +// load among them, it would be better using LBClient instead, since HostClient +// may unevenly balance load among upstream hosts. +// +// It is forbidden copying HostClient instances. Create new instances instead. +// +// It is safe calling HostClient methods from concurrently running goroutines. +type HostClient struct { + noCopy noCopy + + // Comma-separated list of upstream HTTP server host addresses, + // which are passed to Dial in a round-robin manner. + // + // Each address may contain port if default dialer is used. + // For example, + // + // - foobar.com:80 + // - foobar.com:443 + // - foobar.com:8080 + Addr string + + // Client name. Used in User-Agent request header. + Name string + + // Callback for establishing new connection to the host. + // + // Default Dial is used if not set. + Dial DialFunc + + // Attempt to connect to both ipv4 and ipv6 host addresses + // if set to true. + // + // This option is used only if default TCP dialer is used, + // i.e. if Dial is blank. + // + // By default client connects only to ipv4 addresses, + // since unfortunately ipv6 remains broken in many networks worldwide :) + DialDualStack bool + + // Whether to use TLS (aka SSL or HTTPS) for host connections. + IsTLS bool + + // Optional TLS config. + TLSConfig *tls.Config + + // Maximum number of connections which may be established to all hosts + // listed in Addr. + // + // DefaultMaxConnsPerHost is used if not set. + MaxConns int + + // Keep-alive connections are closed after this duration. + // + // By default connection duration is unlimited. + MaxConnDuration time.Duration + + // Idle keep-alive connections are closed after this duration. + // + // By default idle connections are closed + // after DefaultMaxIdleConnDuration. + MaxIdleConnDuration time.Duration + + // Per-connection buffer size for responses' reading. + // This also limits the maximum header size. + // + // Default buffer size is used if 0. + ReadBufferSize int + + // Per-connection buffer size for requests' writing. + // + // Default buffer size is used if 0. + WriteBufferSize int + + // Maximum duration for full response reading (including body). + // + // By default response read timeout is unlimited. + ReadTimeout time.Duration + + // Maximum duration for full request writing (including body). + // + // By default request write timeout is unlimited. + WriteTimeout time.Duration + + // Maximum response body size. + // + // The client returns ErrBodyTooLarge if this limit is greater than 0 + // and response body is greater than the limit. + // + // By default response body size is unlimited. + MaxResponseBodySize int + + // Header names are passed as-is without normalization + // if this option is set. + // + // Disabled header names' normalization may be useful only for proxying + // responses to other clients expecting case-sensitive + // header names. See https://github.com/valyala/fasthttp/issues/57 + // for details. + // + // By default request and response header names are normalized, i.e. + // The first letter and the first letters following dashes + // are uppercased, while all the other letters are lowercased. + // Examples: + // + // * HOST -> Host + // * content-type -> Content-Type + // * cONTENT-lenGTH -> Content-Length + DisableHeaderNamesNormalizing bool + + clientName atomic.Value + lastUseTime uint32 + + connsLock sync.Mutex + connsCount int + conns []*clientConn + + addrsLock sync.Mutex + addrs []string + addrIdx uint32 + + tlsConfigMap map[string]*tls.Config + tlsConfigMapLock sync.Mutex + + readerPool sync.Pool + writerPool sync.Pool + + pendingRequests uint64 + + connsCleanerRun bool +} + +type clientConn struct { + c net.Conn + + createdTime time.Time + lastUseTime time.Time + + lastReadDeadlineTime time.Time + lastWriteDeadlineTime time.Time +} + +var startTimeUnix = time.Now().Unix() + +// LastUseTime returns time the client was last used +func (c *HostClient) LastUseTime() time.Time { + n := atomic.LoadUint32(&c.lastUseTime) + return time.Unix(startTimeUnix+int64(n), 0) +} + +// Get appends url contents to dst and returns it as body. +// +// The function follows redirects. Use Do* for manually handling redirects. +// +// New body buffer is allocated if dst is nil. +func (c *HostClient) Get(dst []byte, url string) (statusCode int, body []byte, err error) { + return clientGetURL(dst, url, c) +} + +// GetTimeout appends url contents to dst and returns it as body. +// +// The function follows redirects. Use Do* for manually handling redirects. +// +// New body buffer is allocated if dst is nil. +// +// ErrTimeout error is returned if url contents couldn't be fetched +// during the given timeout. +func (c *HostClient) GetTimeout(dst []byte, url string, timeout time.Duration) (statusCode int, body []byte, err error) { + return clientGetURLTimeout(dst, url, timeout, c) +} + +// GetDeadline appends url contents to dst and returns it as body. +// +// The function follows redirects. Use Do* for manually handling redirects. +// +// New body buffer is allocated if dst is nil. +// +// ErrTimeout error is returned if url contents couldn't be fetched +// until the given deadline. +func (c *HostClient) GetDeadline(dst []byte, url string, deadline time.Time) (statusCode int, body []byte, err error) { + return clientGetURLDeadline(dst, url, deadline, c) +} + +// Post sends POST request to the given url with the given POST arguments. +// +// Response body is appended to dst, which is returned as body. +// +// The function follows redirects. Use Do* for manually handling redirects. +// +// New body buffer is allocated if dst is nil. +// +// Empty POST body is sent if postArgs is nil. +func (c *HostClient) Post(dst []byte, url string, postArgs *Args) (statusCode int, body []byte, err error) { + return clientPostURL(dst, url, postArgs, c) +} + +type clientDoer interface { + Do(req *Request, resp *Response) error +} + +func clientGetURL(dst []byte, url string, c clientDoer) (statusCode int, body []byte, err error) { + req := AcquireRequest() + + statusCode, body, err = doRequestFollowRedirects(req, dst, url, c) + + ReleaseRequest(req) + return statusCode, body, err +} + +func clientGetURLTimeout(dst []byte, url string, timeout time.Duration, c clientDoer) (statusCode int, body []byte, err error) { + deadline := time.Now().Add(timeout) + return clientGetURLDeadline(dst, url, deadline, c) +} + +type clientURLResponse struct { + statusCode int + body []byte + err error +} + +func clientGetURLDeadline(dst []byte, url string, deadline time.Time, c clientDoer) (statusCode int, body []byte, err error) { + timeout := -time.Since(deadline) + if timeout <= 0 { + return 0, dst, ErrTimeout + } + + var ch chan clientURLResponse + chv := clientURLResponseChPool.Get() + if chv == nil { + chv = make(chan clientURLResponse, 1) + } + ch = chv.(chan clientURLResponse) + + req := AcquireRequest() + + // Note that the request continues execution on ErrTimeout until + // client-specific ReadTimeout exceeds. This helps limiting load + // on slow hosts by MaxConns* concurrent requests. + // + // Without this 'hack' the load on slow host could exceed MaxConns* + // concurrent requests, since timed out requests on client side + // usually continue execution on the host. + go func() { + statusCodeCopy, bodyCopy, errCopy := doRequestFollowRedirects(req, dst, url, c) + ch <- clientURLResponse{ + statusCode: statusCodeCopy, + body: bodyCopy, + err: errCopy, + } + }() + + tc := acquireTimer(timeout) + select { + case resp := <-ch: + ReleaseRequest(req) + clientURLResponseChPool.Put(chv) + statusCode = resp.statusCode + body = resp.body + err = resp.err + case <-tc.C: + body = dst + err = ErrTimeout + } + releaseTimer(tc) + + return statusCode, body, err +} + +var clientURLResponseChPool sync.Pool + +func clientPostURL(dst []byte, url string, postArgs *Args, c clientDoer) (statusCode int, body []byte, err error) { + req := AcquireRequest() + req.Header.SetMethodBytes(strPost) + req.Header.SetContentTypeBytes(strPostArgsContentType) + if postArgs != nil { + postArgs.WriteTo(req.BodyWriter()) + } + + statusCode, body, err = doRequestFollowRedirects(req, dst, url, c) + + ReleaseRequest(req) + return statusCode, body, err +} + +var ( + errMissingLocation = errors.New("missing Location header for http redirect") + errTooManyRedirects = errors.New("too many redirects detected when doing the request") +) + +const maxRedirectsCount = 16 + +func doRequestFollowRedirects(req *Request, dst []byte, url string, c clientDoer) (statusCode int, body []byte, err error) { + resp := AcquireResponse() + bodyBuf := resp.bodyBuffer() + resp.keepBodyBuffer = true + oldBody := bodyBuf.B + bodyBuf.B = dst + + redirectsCount := 0 + for { + req.parsedURI = false + req.Header.host = req.Header.host[:0] + req.SetRequestURI(url) + + if err = c.Do(req, resp); err != nil { + break + } + statusCode = resp.Header.StatusCode() + if statusCode != StatusMovedPermanently && statusCode != StatusFound && statusCode != StatusSeeOther { + break + } + + redirectsCount++ + if redirectsCount > maxRedirectsCount { + err = errTooManyRedirects + break + } + location := resp.Header.peek(strLocation) + if len(location) == 0 { + err = errMissingLocation + break + } + url = getRedirectURL(url, location) + } + + body = bodyBuf.B + bodyBuf.B = oldBody + resp.keepBodyBuffer = false + ReleaseResponse(resp) + + return statusCode, body, err +} + +func getRedirectURL(baseURL string, location []byte) string { + u := AcquireURI() + u.Update(baseURL) + u.UpdateBytes(location) + redirectURL := u.String() + ReleaseURI(u) + return redirectURL +} + +var ( + requestPool sync.Pool + responsePool sync.Pool +) + +// AcquireRequest returns an empty Request instance from request pool. +// +// The returned Request instance may be passed to ReleaseRequest when it is +// no longer needed. This allows Request recycling, reduces GC pressure +// and usually improves performance. +func AcquireRequest() *Request { + v := requestPool.Get() + if v == nil { + return &Request{} + } + return v.(*Request) +} + +// ReleaseRequest returns req acquired via AcquireRequest to request pool. +// +// It is forbidden accessing req and/or its' members after returning +// it to request pool. +func ReleaseRequest(req *Request) { + req.Reset() + requestPool.Put(req) +} + +// AcquireResponse returns an empty Response instance from response pool. +// +// The returned Response instance may be passed to ReleaseResponse when it is +// no longer needed. This allows Response recycling, reduces GC pressure +// and usually improves performance. +func AcquireResponse() *Response { + v := responsePool.Get() + if v == nil { + return &Response{} + } + return v.(*Response) +} + +// ReleaseResponse return resp acquired via AcquireResponse to response pool. +// +// It is forbidden accessing resp and/or its' members after returning +// it to response pool. +func ReleaseResponse(resp *Response) { + resp.Reset() + responsePool.Put(resp) +} + +// DoTimeout performs the given request and waits for response during +// the given timeout duration. +// +// Request must contain at least non-zero RequestURI with full url (including +// scheme and host) or non-zero Host header + RequestURI. +// +// The function doesn't follow redirects. Use Get* for following redirects. +// +// Response is ignored if resp is nil. +// +// ErrTimeout is returned if the response wasn't returned during +// the given timeout. +// +// ErrNoFreeConns is returned if all HostClient.MaxConns connections +// to the host are busy. +// +/// It is recommended obtaining req and resp via AcquireRequest +// and AcquireResponse in performance-critical code. +func (c *HostClient) DoTimeout(req *Request, resp *Response, timeout time.Duration) error { + return clientDoTimeout(req, resp, timeout, c) +} + +// DoDeadline performs the given request and waits for response until +// the given deadline. +// +// Request must contain at least non-zero RequestURI with full url (including +// scheme and host) or non-zero Host header + RequestURI. +// +// The function doesn't follow redirects. Use Get* for following redirects. +// +// Response is ignored if resp is nil. +// +// ErrTimeout is returned if the response wasn't returned until +// the given deadline. +// +// ErrNoFreeConns is returned if all HostClient.MaxConns connections +// to the host are busy. +// +// It is recommended obtaining req and resp via AcquireRequest +// and AcquireResponse in performance-critical code. +func (c *HostClient) DoDeadline(req *Request, resp *Response, deadline time.Time) error { + return clientDoDeadline(req, resp, deadline, c) +} + +func clientDoTimeout(req *Request, resp *Response, timeout time.Duration, c clientDoer) error { + deadline := time.Now().Add(timeout) + return clientDoDeadline(req, resp, deadline, c) +} + +func clientDoDeadline(req *Request, resp *Response, deadline time.Time, c clientDoer) error { + timeout := -time.Since(deadline) + if timeout <= 0 { + return ErrTimeout + } + + var ch chan error + chv := errorChPool.Get() + if chv == nil { + chv = make(chan error, 1) + } + ch = chv.(chan error) + + // Make req and resp copies, since on timeout they no longer + // may be accessed. + reqCopy := AcquireRequest() + req.copyToSkipBody(reqCopy) + swapRequestBody(req, reqCopy) + respCopy := AcquireResponse() + + // Note that the request continues execution on ErrTimeout until + // client-specific ReadTimeout exceeds. This helps limiting load + // on slow hosts by MaxConns* concurrent requests. + // + // Without this 'hack' the load on slow host could exceed MaxConns* + // concurrent requests, since timed out requests on client side + // usually continue execution on the host. + go func() { + ch <- c.Do(reqCopy, respCopy) + }() + + tc := acquireTimer(timeout) + var err error + select { + case err = <-ch: + if resp != nil { + respCopy.copyToSkipBody(resp) + swapResponseBody(resp, respCopy) + } + swapRequestBody(reqCopy, req) + ReleaseResponse(respCopy) + ReleaseRequest(reqCopy) + errorChPool.Put(chv) + case <-tc.C: + err = ErrTimeout + } + releaseTimer(tc) + + return err +} + +var errorChPool sync.Pool + +// Do performs the given http request and sets the corresponding response. +// +// Request must contain at least non-zero RequestURI with full url (including +// scheme and host) or non-zero Host header + RequestURI. +// +// The function doesn't follow redirects. Use Get* for following redirects. +// +// Response is ignored if resp is nil. +// +// ErrNoFreeConns is returned if all HostClient.MaxConns connections +// to the host are busy. +// +// It is recommended obtaining req and resp via AcquireRequest +// and AcquireResponse in performance-critical code. +func (c *HostClient) Do(req *Request, resp *Response) error { + var err error + var retry bool + const maxAttempts = 5 + attempts := 0 + + atomic.AddUint64(&c.pendingRequests, 1) + for { + retry, err = c.do(req, resp) + if err == nil || !retry { + break + } + + if !isIdempotent(req) { + // Retry non-idempotent requests if the server closes + // the connection before sending the response. + // + // This case is possible if the server closes the idle + // keep-alive connection on timeout. + // + // Apache and nginx usually do this. + if err != io.EOF { + break + } + } + attempts++ + if attempts >= maxAttempts { + break + } + } + atomic.AddUint64(&c.pendingRequests, ^uint64(0)) + + if err == io.EOF { + err = ErrConnectionClosed + } + return err +} + +// PendingRequests returns the current number of requests the client +// is executing. +// +// This function may be used for balancing load among multiple HostClient +// instances. +func (c *HostClient) PendingRequests() int { + return int(atomic.LoadUint64(&c.pendingRequests)) +} + +func isIdempotent(req *Request) bool { + return req.Header.IsGet() || req.Header.IsHead() || req.Header.IsPut() +} + +func (c *HostClient) do(req *Request, resp *Response) (bool, error) { + nilResp := false + if resp == nil { + nilResp = true + resp = AcquireResponse() + } + + ok, err := c.doNonNilReqResp(req, resp) + + if nilResp { + ReleaseResponse(resp) + } + + return ok, err +} + +func (c *HostClient) doNonNilReqResp(req *Request, resp *Response) (bool, error) { + if req == nil { + panic("BUG: req cannot be nil") + } + if resp == nil { + panic("BUG: resp cannot be nil") + } + + atomic.StoreUint32(&c.lastUseTime, uint32(CoarseTimeNow().Unix()-startTimeUnix)) + + // Free up resources occupied by response before sending the request, + // so the GC may reclaim these resources (e.g. response body). + resp.Reset() + + cc, err := c.acquireConn() + if err != nil { + return false, err + } + conn := cc.c + + if c.WriteTimeout > 0 { + // Optimization: update write deadline only if more than 25% + // of the last write deadline exceeded. + // See https://github.com/golang/go/issues/15133 for details. + currentTime := CoarseTimeNow() + if currentTime.Sub(cc.lastWriteDeadlineTime) > (c.WriteTimeout >> 2) { + if err = conn.SetWriteDeadline(currentTime.Add(c.WriteTimeout)); err != nil { + c.closeConn(cc) + return true, err + } + cc.lastWriteDeadlineTime = currentTime + } + } + + resetConnection := false + if c.MaxConnDuration > 0 && time.Since(cc.createdTime) > c.MaxConnDuration && !req.ConnectionClose() { + req.SetConnectionClose() + resetConnection = true + } + + userAgentOld := req.Header.UserAgent() + if len(userAgentOld) == 0 { + req.Header.userAgent = c.getClientName() + } + bw := c.acquireWriter(conn) + err = req.Write(bw) + if len(userAgentOld) == 0 { + req.Header.userAgent = userAgentOld + } + + if resetConnection { + req.Header.ResetConnectionClose() + } + + if err == nil { + err = bw.Flush() + } + if err != nil { + c.releaseWriter(bw) + c.closeConn(cc) + return true, err + } + c.releaseWriter(bw) + + if c.ReadTimeout > 0 { + // Optimization: update read deadline only if more than 25% + // of the last read deadline exceeded. + // See https://github.com/golang/go/issues/15133 for details. + currentTime := CoarseTimeNow() + if currentTime.Sub(cc.lastReadDeadlineTime) > (c.ReadTimeout >> 2) { + if err = conn.SetReadDeadline(currentTime.Add(c.ReadTimeout)); err != nil { + c.closeConn(cc) + return true, err + } + cc.lastReadDeadlineTime = currentTime + } + } + + if !req.Header.IsGet() && req.Header.IsHead() { + resp.SkipBody = true + } + if c.DisableHeaderNamesNormalizing { + resp.Header.DisableNormalizing() + } + + br := c.acquireReader(conn) + if err = resp.ReadLimitBody(br, c.MaxResponseBodySize); err != nil { + c.releaseReader(br) + c.closeConn(cc) + return true, err + } + c.releaseReader(br) + + if resetConnection || req.ConnectionClose() || resp.ConnectionClose() { + c.closeConn(cc) + } else { + c.releaseConn(cc) + } + + return false, err +} + +var ( + // ErrNoFreeConns is returned when no free connections available + // to the given host. + // + // Increase the allowed number of connections per host if you + // see this error. + ErrNoFreeConns = errors.New("no free connections available to host") + + // ErrTimeout is returned from timed out calls. + ErrTimeout = errors.New("timeout") + + // ErrConnectionClosed may be returned from client methods if the server + // closes connection before returning the first response byte. + // + // If you see this error, then either fix the server by returning + // 'Connection: close' response header before closing the connection + // or add 'Connection: close' request header before sending requests + // to broken server. + ErrConnectionClosed = errors.New("the server closed connection before returning the first response byte. " + + "Make sure the server returns 'Connection: close' response header before closing the connection") +) + +func (c *HostClient) acquireConn() (*clientConn, error) { + var cc *clientConn + createConn := false + startCleaner := false + + var n int + c.connsLock.Lock() + n = len(c.conns) + if n == 0 { + maxConns := c.MaxConns + if maxConns <= 0 { + maxConns = DefaultMaxConnsPerHost + } + if c.connsCount < maxConns { + c.connsCount++ + createConn = true + if !c.connsCleanerRun { + startCleaner = true + c.connsCleanerRun = true + } + } + } else { + n-- + cc = c.conns[n] + c.conns[n] = nil + c.conns = c.conns[:n] + } + c.connsLock.Unlock() + + if cc != nil { + return cc, nil + } + if !createConn { + return nil, ErrNoFreeConns + } + + if startCleaner { + go c.connsCleaner() + } + + conn, err := c.dialHostHard() + if err != nil { + c.decConnsCount() + return nil, err + } + cc = acquireClientConn(conn) + + return cc, nil +} + +func (c *HostClient) connsCleaner() { + var ( + scratch []*clientConn + maxIdleConnDuration = c.MaxIdleConnDuration + ) + if maxIdleConnDuration <= 0 { + maxIdleConnDuration = DefaultMaxIdleConnDuration + } + for { + currentTime := time.Now() + + // Determine idle connections to be closed. + c.connsLock.Lock() + conns := c.conns + n := len(conns) + i := 0 + for i < n && currentTime.Sub(conns[i].lastUseTime) > maxIdleConnDuration { + i++ + } + scratch = append(scratch[:0], conns[:i]...) + if i > 0 { + m := copy(conns, conns[i:]) + for i = m; i < n; i++ { + conns[i] = nil + } + c.conns = conns[:m] + } + c.connsLock.Unlock() + + // Close idle connections. + for i, cc := range scratch { + c.closeConn(cc) + scratch[i] = nil + } + + // Determine whether to stop the connsCleaner. + c.connsLock.Lock() + mustStop := c.connsCount == 0 + if mustStop { + c.connsCleanerRun = false + } + c.connsLock.Unlock() + if mustStop { + break + } + + time.Sleep(maxIdleConnDuration) + } +} + +func (c *HostClient) closeConn(cc *clientConn) { + c.decConnsCount() + cc.c.Close() + releaseClientConn(cc) +} + +func (c *HostClient) decConnsCount() { + c.connsLock.Lock() + c.connsCount-- + c.connsLock.Unlock() +} + +func acquireClientConn(conn net.Conn) *clientConn { + v := clientConnPool.Get() + if v == nil { + v = &clientConn{} + } + cc := v.(*clientConn) + cc.c = conn + cc.createdTime = CoarseTimeNow() + return cc +} + +func releaseClientConn(cc *clientConn) { + cc.c = nil + clientConnPool.Put(cc) +} + +var clientConnPool sync.Pool + +func (c *HostClient) releaseConn(cc *clientConn) { + cc.lastUseTime = CoarseTimeNow() + c.connsLock.Lock() + c.conns = append(c.conns, cc) + c.connsLock.Unlock() +} + +func (c *HostClient) acquireWriter(conn net.Conn) *bufio.Writer { + v := c.writerPool.Get() + if v == nil { + n := c.WriteBufferSize + if n <= 0 { + n = defaultWriteBufferSize + } + return bufio.NewWriterSize(conn, n) + } + bw := v.(*bufio.Writer) + bw.Reset(conn) + return bw +} + +func (c *HostClient) releaseWriter(bw *bufio.Writer) { + c.writerPool.Put(bw) +} + +func (c *HostClient) acquireReader(conn net.Conn) *bufio.Reader { + v := c.readerPool.Get() + if v == nil { + n := c.ReadBufferSize + if n <= 0 { + n = defaultReadBufferSize + } + return bufio.NewReaderSize(conn, n) + } + br := v.(*bufio.Reader) + br.Reset(conn) + return br +} + +func (c *HostClient) releaseReader(br *bufio.Reader) { + c.readerPool.Put(br) +} + +func newClientTLSConfig(c *tls.Config, addr string) *tls.Config { + if c == nil { + c = &tls.Config{} + } else { + // TODO: substitute this with c.Clone() after go1.8 becomes mainstream :) + c = &tls.Config{ + Rand: c.Rand, + Time: c.Time, + Certificates: c.Certificates, + NameToCertificate: c.NameToCertificate, + GetCertificate: c.GetCertificate, + RootCAs: c.RootCAs, + NextProtos: c.NextProtos, + ServerName: c.ServerName, + + // Do not copy ClientAuth, since it is server-related stuff + // Do not copy ClientCAs, since it is server-related stuff + + InsecureSkipVerify: c.InsecureSkipVerify, + CipherSuites: c.CipherSuites, + + // Do not copy PreferServerCipherSuites - this is server stuff + + SessionTicketsDisabled: c.SessionTicketsDisabled, + + // Do not copy SessionTicketKey - this is server stuff + + ClientSessionCache: c.ClientSessionCache, + MinVersion: c.MinVersion, + MaxVersion: c.MaxVersion, + CurvePreferences: c.CurvePreferences, + } + } + + if c.ClientSessionCache == nil { + c.ClientSessionCache = tls.NewLRUClientSessionCache(0) + } + + if len(c.ServerName) == 0 { + serverName := tlsServerName(addr) + if serverName == "*" { + c.InsecureSkipVerify = true + } else { + c.ServerName = serverName + } + } + return c +} + +func tlsServerName(addr string) string { + host, _, err := net.SplitHostPort(addr) + if err != nil { + return "*" + } + return host +} + +func (c *HostClient) nextAddr() string { + c.addrsLock.Lock() + if c.addrs == nil { + c.addrs = strings.Split(c.Addr, ",") + } + addr := c.addrs[0] + if len(c.addrs) > 1 { + addr = c.addrs[c.addrIdx%uint32(len(c.addrs))] + c.addrIdx++ + } + c.addrsLock.Unlock() + return addr +} + +func (c *HostClient) dialHostHard() (conn net.Conn, err error) { + // attempt to dial all the available hosts before giving up. + + c.addrsLock.Lock() + n := len(c.addrs) + c.addrsLock.Unlock() + + if n == 0 { + // It looks like c.addrs isn't initialized yet. + n = 1 + } + + timeout := c.ReadTimeout + c.WriteTimeout + if timeout <= 0 { + timeout = DefaultDialTimeout + } + deadline := time.Now().Add(timeout) + for n > 0 { + addr := c.nextAddr() + tlsConfig := c.cachedTLSConfig(addr) + conn, err = dialAddr(addr, c.Dial, c.DialDualStack, c.IsTLS, tlsConfig) + if err == nil { + return conn, nil + } + if time.Since(deadline) >= 0 { + break + } + n-- + } + return nil, err +} + +func (c *HostClient) cachedTLSConfig(addr string) *tls.Config { + if !c.IsTLS { + return nil + } + + c.tlsConfigMapLock.Lock() + if c.tlsConfigMap == nil { + c.tlsConfigMap = make(map[string]*tls.Config) + } + cfg := c.tlsConfigMap[addr] + if cfg == nil { + cfg = newClientTLSConfig(c.TLSConfig, addr) + c.tlsConfigMap[addr] = cfg + } + c.tlsConfigMapLock.Unlock() + + return cfg +} + +func dialAddr(addr string, dial DialFunc, dialDualStack, isTLS bool, tlsConfig *tls.Config) (net.Conn, error) { + if dial == nil { + if dialDualStack { + dial = DialDualStack + } else { + dial = Dial + } + addr = addMissingPort(addr, isTLS) + } + conn, err := dial(addr) + if err != nil { + return nil, err + } + if conn == nil { + panic("BUG: DialFunc returned (nil, nil)") + } + if isTLS { + conn = tls.Client(conn, tlsConfig) + } + return conn, nil +} + +func (c *HostClient) getClientName() []byte { + v := c.clientName.Load() + var clientName []byte + if v == nil { + clientName = []byte(c.Name) + if len(clientName) == 0 { + clientName = defaultUserAgent + } + c.clientName.Store(clientName) + } else { + clientName = v.([]byte) + } + return clientName +} + +func addMissingPort(addr string, isTLS bool) string { + n := strings.Index(addr, ":") + if n >= 0 { + return addr + } + port := 80 + if isTLS { + port = 443 + } + return fmt.Sprintf("%s:%d", addr, port) +} + +// PipelineClient pipelines requests over a limited set of concurrent +// connections to the given Addr. +// +// This client may be used in highly loaded HTTP-based RPC systems for reducing +// context switches and network level overhead. +// See https://en.wikipedia.org/wiki/HTTP_pipelining for details. +// +// It is forbidden copying PipelineClient instances. Create new instances +// instead. +// +// It is safe calling PipelineClient methods from concurrently running +// goroutines. +type PipelineClient struct { + noCopy noCopy + + // Address of the host to connect to. + Addr string + + // The maximum number of concurrent connections to the Addr. + // + // A sinle connection is used by default. + MaxConns int + + // The maximum number of pending pipelined requests over + // a single connection to Addr. + // + // DefaultMaxPendingRequests is used by default. + MaxPendingRequests int + + // The maximum delay before sending pipelined requests as a batch + // to the server. + // + // By default requests are sent immediately to the server. + MaxBatchDelay time.Duration + + // Callback for connection establishing to the host. + // + // Default Dial is used if not set. + Dial DialFunc + + // Attempt to connect to both ipv4 and ipv6 host addresses + // if set to true. + // + // This option is used only if default TCP dialer is used, + // i.e. if Dial is blank. + // + // By default client connects only to ipv4 addresses, + // since unfortunately ipv6 remains broken in many networks worldwide :) + DialDualStack bool + + // Whether to use TLS (aka SSL or HTTPS) for host connections. + IsTLS bool + + // Optional TLS config. + TLSConfig *tls.Config + + // Idle connection to the host is closed after this duration. + // + // By default idle connection is closed after + // DefaultMaxIdleConnDuration. + MaxIdleConnDuration time.Duration + + // Buffer size for responses' reading. + // This also limits the maximum header size. + // + // Default buffer size is used if 0. + ReadBufferSize int + + // Buffer size for requests' writing. + // + // Default buffer size is used if 0. + WriteBufferSize int + + // Maximum duration for full response reading (including body). + // + // By default response read timeout is unlimited. + ReadTimeout time.Duration + + // Maximum duration for full request writing (including body). + // + // By default request write timeout is unlimited. + WriteTimeout time.Duration + + // Logger for logging client errors. + // + // By default standard logger from log package is used. + Logger Logger + + connClients []*pipelineConnClient + connClientsLock sync.Mutex +} + +type pipelineConnClient struct { + noCopy noCopy + + Addr string + MaxPendingRequests int + MaxBatchDelay time.Duration + Dial DialFunc + DialDualStack bool + IsTLS bool + TLSConfig *tls.Config + MaxIdleConnDuration time.Duration + ReadBufferSize int + WriteBufferSize int + ReadTimeout time.Duration + WriteTimeout time.Duration + Logger Logger + + workPool sync.Pool + + chLock sync.Mutex + chW chan *pipelineWork + chR chan *pipelineWork + + tlsConfigLock sync.Mutex + tlsConfig *tls.Config +} + +type pipelineWork struct { + reqCopy Request + respCopy Response + req *Request + resp *Response + t *time.Timer + deadline time.Time + err error + done chan struct{} +} + +// DoTimeout performs the given request and waits for response during +// the given timeout duration. +// +// Request must contain at least non-zero RequestURI with full url (including +// scheme and host) or non-zero Host header + RequestURI. +// +// The function doesn't follow redirects. +// +// Response is ignored if resp is nil. +// +// ErrTimeout is returned if the response wasn't returned during +// the given timeout. +// +// It is recommended obtaining req and resp via AcquireRequest +// and AcquireResponse in performance-critical code. +func (c *PipelineClient) DoTimeout(req *Request, resp *Response, timeout time.Duration) error { + return c.DoDeadline(req, resp, time.Now().Add(timeout)) +} + +// DoDeadline performs the given request and waits for response until +// the given deadline. +// +// Request must contain at least non-zero RequestURI with full url (including +// scheme and host) or non-zero Host header + RequestURI. +// +// The function doesn't follow redirects. +// +// Response is ignored if resp is nil. +// +// ErrTimeout is returned if the response wasn't returned until +// the given deadline. +// +// It is recommended obtaining req and resp via AcquireRequest +// and AcquireResponse in performance-critical code. +func (c *PipelineClient) DoDeadline(req *Request, resp *Response, deadline time.Time) error { + return c.getConnClient().DoDeadline(req, resp, deadline) +} + +func (c *pipelineConnClient) DoDeadline(req *Request, resp *Response, deadline time.Time) error { + c.init() + + timeout := -time.Since(deadline) + if timeout < 0 { + return ErrTimeout + } + + w := acquirePipelineWork(&c.workPool, timeout) + w.req = &w.reqCopy + w.resp = &w.respCopy + + // Make a copy of the request in order to avoid data races on timeouts + req.copyToSkipBody(&w.reqCopy) + swapRequestBody(req, &w.reqCopy) + + // Put the request to outgoing queue + select { + case c.chW <- w: + // Fast path: len(c.ch) < cap(c.ch) + default: + // Slow path + select { + case c.chW <- w: + case <-w.t.C: + releasePipelineWork(&c.workPool, w) + return ErrTimeout + } + } + + // Wait for the response + var err error + select { + case <-w.done: + if resp != nil { + w.respCopy.copyToSkipBody(resp) + swapResponseBody(resp, &w.respCopy) + } + err = w.err + releasePipelineWork(&c.workPool, w) + case <-w.t.C: + err = ErrTimeout + } + + return err +} + +// Do performs the given http request and sets the corresponding response. +// +// Request must contain at least non-zero RequestURI with full url (including +// scheme and host) or non-zero Host header + RequestURI. +// +// The function doesn't follow redirects. Use Get* for following redirects. +// +// Response is ignored if resp is nil. +// +// It is recommended obtaining req and resp via AcquireRequest +// and AcquireResponse in performance-critical code. +func (c *PipelineClient) Do(req *Request, resp *Response) error { + return c.getConnClient().Do(req, resp) +} + +func (c *pipelineConnClient) Do(req *Request, resp *Response) error { + c.init() + + w := acquirePipelineWork(&c.workPool, 0) + w.req = req + if resp != nil { + w.resp = resp + } else { + w.resp = &w.respCopy + } + + // Put the request to outgoing queue + select { + case c.chW <- w: + default: + // Try substituting the oldest w with the current one. + select { + case wOld := <-c.chW: + wOld.err = ErrPipelineOverflow + wOld.done <- struct{}{} + default: + } + select { + case c.chW <- w: + default: + releasePipelineWork(&c.workPool, w) + return ErrPipelineOverflow + } + } + + // Wait for the response + <-w.done + err := w.err + + releasePipelineWork(&c.workPool, w) + + return err +} + +func (c *PipelineClient) getConnClient() *pipelineConnClient { + c.connClientsLock.Lock() + cc := c.getConnClientUnlocked() + c.connClientsLock.Unlock() + return cc +} + +func (c *PipelineClient) getConnClientUnlocked() *pipelineConnClient { + if len(c.connClients) == 0 { + return c.newConnClient() + } + + // Return the client with the minimum number of pending requests. + minCC := c.connClients[0] + minReqs := minCC.PendingRequests() + if minReqs == 0 { + return minCC + } + for i := 1; i < len(c.connClients); i++ { + cc := c.connClients[i] + reqs := cc.PendingRequests() + if reqs == 0 { + return cc + } + if reqs < minReqs { + minCC = cc + minReqs = reqs + } + } + + maxConns := c.MaxConns + if maxConns <= 0 { + maxConns = 1 + } + if len(c.connClients) < maxConns { + return c.newConnClient() + } + return minCC +} + +func (c *PipelineClient) newConnClient() *pipelineConnClient { + cc := &pipelineConnClient{ + Addr: c.Addr, + MaxPendingRequests: c.MaxPendingRequests, + MaxBatchDelay: c.MaxBatchDelay, + Dial: c.Dial, + DialDualStack: c.DialDualStack, + IsTLS: c.IsTLS, + TLSConfig: c.TLSConfig, + MaxIdleConnDuration: c.MaxIdleConnDuration, + ReadBufferSize: c.ReadBufferSize, + WriteBufferSize: c.WriteBufferSize, + ReadTimeout: c.ReadTimeout, + WriteTimeout: c.WriteTimeout, + Logger: c.Logger, + } + c.connClients = append(c.connClients, cc) + return cc +} + +// ErrPipelineOverflow may be returned from PipelineClient.Do* +// if the requests' queue is overflown. +var ErrPipelineOverflow = errors.New("pipelined requests' queue has been overflown. Increase MaxConns and/or MaxPendingRequests") + +// DefaultMaxPendingRequests is the default value +// for PipelineClient.MaxPendingRequests. +const DefaultMaxPendingRequests = 1024 + +func (c *pipelineConnClient) init() { + c.chLock.Lock() + if c.chR == nil { + maxPendingRequests := c.MaxPendingRequests + if maxPendingRequests <= 0 { + maxPendingRequests = DefaultMaxPendingRequests + } + c.chR = make(chan *pipelineWork, maxPendingRequests) + if c.chW == nil { + c.chW = make(chan *pipelineWork, maxPendingRequests) + } + go func() { + if err := c.worker(); err != nil { + c.logger().Printf("error in PipelineClient(%q): %s", c.Addr, err) + if netErr, ok := err.(net.Error); ok && netErr.Temporary() { + // Throttle client reconnections on temporary errors + time.Sleep(time.Second) + } + } + + c.chLock.Lock() + // Do not reset c.chW to nil, since it may contain + // pending requests, which could be served on the next + // connection to the host. + c.chR = nil + c.chLock.Unlock() + }() + } + c.chLock.Unlock() +} + +func (c *pipelineConnClient) worker() error { + tlsConfig := c.cachedTLSConfig() + conn, err := dialAddr(c.Addr, c.Dial, c.DialDualStack, c.IsTLS, tlsConfig) + if err != nil { + return err + } + + // Start reader and writer + stopW := make(chan struct{}) + doneW := make(chan error) + go func() { + doneW <- c.writer(conn, stopW) + }() + stopR := make(chan struct{}) + doneR := make(chan error) + go func() { + doneR <- c.reader(conn, stopR) + }() + + // Wait until reader and writer are stopped + select { + case err = <-doneW: + conn.Close() + close(stopR) + <-doneR + case err = <-doneR: + conn.Close() + close(stopW) + <-doneW + } + + // Notify pending readers + for len(c.chR) > 0 { + w := <-c.chR + w.err = errPipelineConnStopped + w.done <- struct{}{} + } + + return err +} + +func (c *pipelineConnClient) cachedTLSConfig() *tls.Config { + if !c.IsTLS { + return nil + } + + c.tlsConfigLock.Lock() + cfg := c.tlsConfig + if cfg == nil { + cfg = newClientTLSConfig(c.TLSConfig, c.Addr) + c.tlsConfig = cfg + } + c.tlsConfigLock.Unlock() + + return cfg +} + +func (c *pipelineConnClient) writer(conn net.Conn, stopCh <-chan struct{}) error { + writeBufferSize := c.WriteBufferSize + if writeBufferSize <= 0 { + writeBufferSize = defaultWriteBufferSize + } + bw := bufio.NewWriterSize(conn, writeBufferSize) + defer bw.Flush() + chR := c.chR + chW := c.chW + writeTimeout := c.WriteTimeout + + maxIdleConnDuration := c.MaxIdleConnDuration + if maxIdleConnDuration <= 0 { + maxIdleConnDuration = DefaultMaxIdleConnDuration + } + maxBatchDelay := c.MaxBatchDelay + + var ( + stopTimer = time.NewTimer(time.Hour) + flushTimer = time.NewTimer(time.Hour) + flushTimerCh <-chan time.Time + instantTimerCh = make(chan time.Time) + + w *pipelineWork + err error + + lastWriteDeadlineTime time.Time + ) + close(instantTimerCh) + for { + againChW: + select { + case w = <-chW: + // Fast path: len(chW) > 0 + default: + // Slow path + stopTimer.Reset(maxIdleConnDuration) + select { + case w = <-chW: + case <-stopTimer.C: + return nil + case <-stopCh: + return nil + case <-flushTimerCh: + if err = bw.Flush(); err != nil { + return err + } + flushTimerCh = nil + goto againChW + } + } + + if !w.deadline.IsZero() && time.Since(w.deadline) >= 0 { + w.err = ErrTimeout + w.done <- struct{}{} + continue + } + + if writeTimeout > 0 { + // Optimization: update write deadline only if more than 25% + // of the last write deadline exceeded. + // See https://github.com/golang/go/issues/15133 for details. + currentTime := CoarseTimeNow() + if currentTime.Sub(lastWriteDeadlineTime) > (writeTimeout >> 2) { + if err = conn.SetWriteDeadline(currentTime.Add(writeTimeout)); err != nil { + w.err = err + w.done <- struct{}{} + return err + } + lastWriteDeadlineTime = currentTime + } + } + if err = w.req.Write(bw); err != nil { + w.err = err + w.done <- struct{}{} + return err + } + if flushTimerCh == nil && (len(chW) == 0 || len(chR) == cap(chR)) { + if maxBatchDelay > 0 { + flushTimer.Reset(maxBatchDelay) + flushTimerCh = flushTimer.C + } else { + flushTimerCh = instantTimerCh + } + } + + againChR: + select { + case chR <- w: + // Fast path: len(chR) < cap(chR) + default: + // Slow path + select { + case chR <- w: + case <-stopCh: + w.err = errPipelineConnStopped + w.done <- struct{}{} + return nil + case <-flushTimerCh: + if err = bw.Flush(); err != nil { + w.err = err + w.done <- struct{}{} + return err + } + flushTimerCh = nil + goto againChR + } + } + } +} + +func (c *pipelineConnClient) reader(conn net.Conn, stopCh <-chan struct{}) error { + readBufferSize := c.ReadBufferSize + if readBufferSize <= 0 { + readBufferSize = defaultReadBufferSize + } + br := bufio.NewReaderSize(conn, readBufferSize) + chR := c.chR + readTimeout := c.ReadTimeout + + var ( + w *pipelineWork + err error + + lastReadDeadlineTime time.Time + ) + for { + select { + case w = <-chR: + // Fast path: len(chR) > 0 + default: + // Slow path + select { + case w = <-chR: + case <-stopCh: + return nil + } + } + + if readTimeout > 0 { + // Optimization: update read deadline only if more than 25% + // of the last read deadline exceeded. + // See https://github.com/golang/go/issues/15133 for details. + currentTime := CoarseTimeNow() + if currentTime.Sub(lastReadDeadlineTime) > (readTimeout >> 2) { + if err = conn.SetReadDeadline(currentTime.Add(readTimeout)); err != nil { + w.err = err + w.done <- struct{}{} + return err + } + lastReadDeadlineTime = currentTime + } + } + if err = w.resp.Read(br); err != nil { + w.err = err + w.done <- struct{}{} + return err + } + + w.done <- struct{}{} + } +} + +func (c *pipelineConnClient) logger() Logger { + if c.Logger != nil { + return c.Logger + } + return defaultLogger +} + +// PendingRequests returns the current number of pending requests pipelined +// to the server. +// +// This number may exceed MaxPendingRequests*MaxConns by up to two times, since +// each connection to the server may keep up to MaxPendingRequests requests +// in the queue before sending them to the server. +// +// This function may be used for balancing load among multiple PipelineClient +// instances. +func (c *PipelineClient) PendingRequests() int { + c.connClientsLock.Lock() + n := 0 + for _, cc := range c.connClients { + n += cc.PendingRequests() + } + c.connClientsLock.Unlock() + return n +} + +func (c *pipelineConnClient) PendingRequests() int { + c.init() + + c.chLock.Lock() + n := len(c.chR) + len(c.chW) + c.chLock.Unlock() + return n +} + +var errPipelineConnStopped = errors.New("pipeline connection has been stopped") + +func acquirePipelineWork(pool *sync.Pool, timeout time.Duration) *pipelineWork { + v := pool.Get() + if v == nil { + v = &pipelineWork{ + done: make(chan struct{}, 1), + } + } + w := v.(*pipelineWork) + if timeout > 0 { + if w.t == nil { + w.t = time.NewTimer(timeout) + } else { + w.t.Reset(timeout) + } + w.deadline = time.Now().Add(timeout) + } else { + w.deadline = zeroTime + } + return w +} + +func releasePipelineWork(pool *sync.Pool, w *pipelineWork) { + if w.t != nil { + w.t.Stop() + } + w.reqCopy.Reset() + w.respCopy.Reset() + w.req = nil + w.resp = nil + w.err = nil + pool.Put(w) +} diff --git a/vendor/github.com/valyala/fasthttp/coarseTime.go b/vendor/github.com/valyala/fasthttp/coarseTime.go new file mode 100644 index 0000000..03c14f3 --- /dev/null +++ b/vendor/github.com/valyala/fasthttp/coarseTime.go @@ -0,0 +1,28 @@ +package fasthttp + +import ( + "sync/atomic" + "time" +) + +// CoarseTimeNow returns the current time truncated to the nearest second. +// +// This is a faster alternative to time.Now(). +func CoarseTimeNow() time.Time { + tp := coarseTime.Load().(*time.Time) + return *tp +} + +func init() { + t := time.Now().Truncate(time.Second) + coarseTime.Store(&t) + go func() { + for { + time.Sleep(time.Second) + t := time.Now().Truncate(time.Second) + coarseTime.Store(&t) + } + }() +} + +var coarseTime atomic.Value diff --git a/vendor/github.com/valyala/fasthttp/compress.go b/vendor/github.com/valyala/fasthttp/compress.go new file mode 100644 index 0000000..b6869c6 --- /dev/null +++ b/vendor/github.com/valyala/fasthttp/compress.go @@ -0,0 +1,291 @@ +package fasthttp + +import ( + "fmt" + "io" + "os" + "sync" + + "github.com/klauspost/compress/flate" + "github.com/klauspost/compress/gzip" + "github.com/klauspost/compress/zlib" + "github.com/valyala/fasthttp/stackless" +) + +// Supported compression levels. +const ( + CompressNoCompression = flate.NoCompression + CompressBestSpeed = flate.BestSpeed + CompressBestCompression = flate.BestCompression + CompressDefaultCompression = flate.DefaultCompression +) + +func acquireGzipReader(r io.Reader) (*gzip.Reader, error) { + v := gzipReaderPool.Get() + if v == nil { + return gzip.NewReader(r) + } + zr := v.(*gzip.Reader) + if err := zr.Reset(r); err != nil { + return nil, err + } + return zr, nil +} + +func releaseGzipReader(zr *gzip.Reader) { + zr.Close() + gzipReaderPool.Put(zr) +} + +var gzipReaderPool sync.Pool + +func acquireFlateReader(r io.Reader) (io.ReadCloser, error) { + v := flateReaderPool.Get() + if v == nil { + zr, err := zlib.NewReader(r) + if err != nil { + return nil, err + } + return zr, nil + } + zr := v.(io.ReadCloser) + if err := resetFlateReader(zr, r); err != nil { + return nil, err + } + return zr, nil +} + +func releaseFlateReader(zr io.ReadCloser) { + zr.Close() + flateReaderPool.Put(zr) +} + +func resetFlateReader(zr io.ReadCloser, r io.Reader) error { + zrr, ok := zr.(zlib.Resetter) + if !ok { + panic("BUG: zlib.Reader doesn't implement zlib.Resetter???") + } + return zrr.Reset(r, nil) +} + +var flateReaderPool sync.Pool + +func acquireGzipWriter(w io.Writer, level int) *gzipWriter { + p := gzipWriterPoolMap[level] + if p == nil { + panic(fmt.Sprintf("BUG: unexpected compression level passed: %d. See compress/gzip for supported levels", level)) + } + + v := p.Get() + if v == nil { + sw := stackless.NewWriter(w, func(w io.Writer) stackless.Writer { + zw, err := gzip.NewWriterLevel(w, level) + if err != nil { + panic(fmt.Sprintf("BUG: unexpected error from gzip.NewWriterLevel(%d): %s", level, err)) + } + return zw + }) + return &gzipWriter{ + Writer: sw, + p: p, + } + } + zw := v.(*gzipWriter) + zw.Reset(w) + return zw +} + +func releaseGzipWriter(zw *gzipWriter) { + zw.Close() + zw.p.Put(zw) +} + +type gzipWriter struct { + stackless.Writer + p *sync.Pool +} + +var gzipWriterPoolMap = func() map[int]*sync.Pool { + // Initialize pools for all the compression levels defined + // in https://golang.org/pkg/compress/gzip/#pkg-constants . + m := make(map[int]*sync.Pool, 11) + m[-1] = &sync.Pool{} + for i := 0; i < 10; i++ { + m[i] = &sync.Pool{} + } + return m +}() + +// AppendGzipBytesLevel appends gzipped src to dst using the given +// compression level and returns the resulting dst. +// +// Supported compression levels are: +// +// * CompressNoCompression +// * CompressBestSpeed +// * CompressBestCompression +// * CompressDefaultCompression +func AppendGzipBytesLevel(dst, src []byte, level int) []byte { + w := &byteSliceWriter{dst} + WriteGzipLevel(w, src, level) + return w.b +} + +// WriteGzipLevel writes gzipped p to w using the given compression level +// and returns the number of compressed bytes written to w. +// +// Supported compression levels are: +// +// * CompressNoCompression +// * CompressBestSpeed +// * CompressBestCompression +// * CompressDefaultCompression +func WriteGzipLevel(w io.Writer, p []byte, level int) (int, error) { + zw := acquireGzipWriter(w, level) + n, err := zw.Write(p) + releaseGzipWriter(zw) + return n, err +} + +// WriteGzip writes gzipped p to w and returns the number of compressed +// bytes written to w. +func WriteGzip(w io.Writer, p []byte) (int, error) { + return WriteGzipLevel(w, p, CompressDefaultCompression) +} + +// AppendGzipBytes appends gzipped src to dst and returns the resulting dst. +func AppendGzipBytes(dst, src []byte) []byte { + return AppendGzipBytesLevel(dst, src, CompressDefaultCompression) +} + +// WriteGunzip writes ungzipped p to w and returns the number of uncompressed +// bytes written to w. +func WriteGunzip(w io.Writer, p []byte) (int, error) { + r := &byteSliceReader{p} + zr, err := acquireGzipReader(r) + if err != nil { + return 0, err + } + n, err := copyZeroAlloc(w, zr) + releaseGzipReader(zr) + nn := int(n) + if int64(nn) != n { + return 0, fmt.Errorf("too much data gunzipped: %d", n) + } + return nn, err +} + +// WriteInflate writes inflated p to w and returns the number of uncompressed +// bytes written to w. +func WriteInflate(w io.Writer, p []byte) (int, error) { + r := &byteSliceReader{p} + zr, err := acquireFlateReader(r) + if err != nil { + return 0, err + } + n, err := copyZeroAlloc(w, zr) + releaseFlateReader(zr) + nn := int(n) + if int64(nn) != n { + return 0, fmt.Errorf("too much data inflated: %d", n) + } + return nn, err +} + +// AppendGunzipBytes append gunzipped src to dst and returns the resulting dst. +func AppendGunzipBytes(dst, src []byte) ([]byte, error) { + w := &byteSliceWriter{dst} + _, err := WriteGunzip(w, src) + return w.b, err +} + +type byteSliceWriter struct { + b []byte +} + +func (w *byteSliceWriter) Write(p []byte) (int, error) { + w.b = append(w.b, p...) + return len(p), nil +} + +type byteSliceReader struct { + b []byte +} + +func (r *byteSliceReader) Read(p []byte) (int, error) { + if len(r.b) == 0 { + return 0, io.EOF + } + n := copy(p, r.b) + r.b = r.b[n:] + return n, nil +} + +func acquireFlateWriter(w io.Writer, level int) *flateWriter { + p := flateWriterPoolMap[level] + if p == nil { + panic(fmt.Sprintf("BUG: unexpected compression level passed: %d. See compress/flate for supported levels", level)) + } + + v := p.Get() + if v == nil { + sw := stackless.NewWriter(w, func(w io.Writer) stackless.Writer { + zw, err := zlib.NewWriterLevel(w, level) + if err != nil { + panic(fmt.Sprintf("BUG: unexpected error in zlib.NewWriterLevel(%d): %s", level, err)) + } + return zw + }) + return &flateWriter{ + Writer: sw, + p: p, + } + } + zw := v.(*flateWriter) + zw.Reset(w) + return zw +} + +func releaseFlateWriter(zw *flateWriter) { + zw.Close() + zw.p.Put(zw) +} + +type flateWriter struct { + stackless.Writer + p *sync.Pool +} + +var flateWriterPoolMap = func() map[int]*sync.Pool { + // Initialize pools for all the compression levels defined + // in https://golang.org/pkg/compress/flate/#pkg-constants . + m := make(map[int]*sync.Pool, 11) + m[-1] = &sync.Pool{} + for i := 0; i < 10; i++ { + m[i] = &sync.Pool{} + } + return m +}() + +func isFileCompressible(f *os.File, minCompressRatio float64) bool { + // Try compressing the first 4kb of of the file + // and see if it can be compressed by more than + // the given minCompressRatio. + b := AcquireByteBuffer() + zw := acquireGzipWriter(b, CompressDefaultCompression) + lr := &io.LimitedReader{ + R: f, + N: 4096, + } + _, err := copyZeroAlloc(zw, lr) + releaseGzipWriter(zw) + f.Seek(0, 0) + if err != nil { + return false + } + + n := 4096 - lr.N + zn := len(b.B) + ReleaseByteBuffer(b) + return float64(zn) < float64(n)*minCompressRatio +} diff --git a/vendor/github.com/valyala/fasthttp/cookie.go b/vendor/github.com/valyala/fasthttp/cookie.go new file mode 100644 index 0000000..17ecc62 --- /dev/null +++ b/vendor/github.com/valyala/fasthttp/cookie.go @@ -0,0 +1,396 @@ +package fasthttp + +import ( + "bytes" + "errors" + "io" + "sync" + "time" +) + +var zeroTime time.Time + +var ( + // CookieExpireDelete may be set on Cookie.Expire for expiring the given cookie. + CookieExpireDelete = time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC) + + // CookieExpireUnlimited indicates that the cookie doesn't expire. + CookieExpireUnlimited = zeroTime +) + +// AcquireCookie returns an empty Cookie object from the pool. +// +// The returned object may be returned back to the pool with ReleaseCookie. +// This allows reducing GC load. +func AcquireCookie() *Cookie { + return cookiePool.Get().(*Cookie) +} + +// ReleaseCookie returns the Cookie object acquired with AcquireCookie back +// to the pool. +// +// Do not access released Cookie object, otherwise data races may occur. +func ReleaseCookie(c *Cookie) { + c.Reset() + cookiePool.Put(c) +} + +var cookiePool = &sync.Pool{ + New: func() interface{} { + return &Cookie{} + }, +} + +// Cookie represents HTTP response cookie. +// +// Do not copy Cookie objects. Create new object and use CopyTo instead. +// +// Cookie instance MUST NOT be used from concurrently running goroutines. +type Cookie struct { + noCopy noCopy + + key []byte + value []byte + expire time.Time + domain []byte + path []byte + + httpOnly bool + secure bool + + bufKV argsKV + buf []byte +} + +// CopyTo copies src cookie to c. +func (c *Cookie) CopyTo(src *Cookie) { + c.Reset() + c.key = append(c.key[:0], src.key...) + c.value = append(c.value[:0], src.value...) + c.expire = src.expire + c.domain = append(c.domain[:0], src.domain...) + c.path = append(c.path[:0], src.path...) + c.httpOnly = src.httpOnly + c.secure = src.secure +} + +// HTTPOnly returns true if the cookie is http only. +func (c *Cookie) HTTPOnly() bool { + return c.httpOnly +} + +// SetHTTPOnly sets cookie's httpOnly flag to the given value. +func (c *Cookie) SetHTTPOnly(httpOnly bool) { + c.httpOnly = httpOnly +} + +// Secure returns true if the cookie is secure. +func (c *Cookie) Secure() bool { + return c.secure +} + +// SetSecure sets cookie's secure flag to the given value. +func (c *Cookie) SetSecure(secure bool) { + c.secure = secure +} + +// Path returns cookie path. +func (c *Cookie) Path() []byte { + return c.path +} + +// SetPath sets cookie path. +func (c *Cookie) SetPath(path string) { + c.buf = append(c.buf[:0], path...) + c.path = normalizePath(c.path, c.buf) +} + +// SetPathBytes sets cookie path. +func (c *Cookie) SetPathBytes(path []byte) { + c.buf = append(c.buf[:0], path...) + c.path = normalizePath(c.path, c.buf) +} + +// Domain returns cookie domain. +// +// The returned domain is valid until the next Cookie modification method call. +func (c *Cookie) Domain() []byte { + return c.domain +} + +// SetDomain sets cookie domain. +func (c *Cookie) SetDomain(domain string) { + c.domain = append(c.domain[:0], domain...) +} + +// SetDomainBytes sets cookie domain. +func (c *Cookie) SetDomainBytes(domain []byte) { + c.domain = append(c.domain[:0], domain...) +} + +// Expire returns cookie expiration time. +// +// CookieExpireUnlimited is returned if cookie doesn't expire +func (c *Cookie) Expire() time.Time { + expire := c.expire + if expire.IsZero() { + expire = CookieExpireUnlimited + } + return expire +} + +// SetExpire sets cookie expiration time. +// +// Set expiration time to CookieExpireDelete for expiring (deleting) +// the cookie on the client. +// +// By default cookie lifetime is limited by browser session. +func (c *Cookie) SetExpire(expire time.Time) { + c.expire = expire +} + +// Value returns cookie value. +// +// The returned value is valid until the next Cookie modification method call. +func (c *Cookie) Value() []byte { + return c.value +} + +// SetValue sets cookie value. +func (c *Cookie) SetValue(value string) { + c.value = append(c.value[:0], value...) +} + +// SetValueBytes sets cookie value. +func (c *Cookie) SetValueBytes(value []byte) { + c.value = append(c.value[:0], value...) +} + +// Key returns cookie name. +// +// The returned value is valid until the next Cookie modification method call. +func (c *Cookie) Key() []byte { + return c.key +} + +// SetKey sets cookie name. +func (c *Cookie) SetKey(key string) { + c.key = append(c.key[:0], key...) +} + +// SetKeyBytes sets cookie name. +func (c *Cookie) SetKeyBytes(key []byte) { + c.key = append(c.key[:0], key...) +} + +// Reset clears the cookie. +func (c *Cookie) Reset() { + c.key = c.key[:0] + c.value = c.value[:0] + c.expire = zeroTime + c.domain = c.domain[:0] + c.path = c.path[:0] + c.httpOnly = false + c.secure = false +} + +// AppendBytes appends cookie representation to dst and returns +// the extended dst. +func (c *Cookie) AppendBytes(dst []byte) []byte { + if len(c.key) > 0 { + dst = append(dst, c.key...) + dst = append(dst, '=') + } + dst = append(dst, c.value...) + + if !c.expire.IsZero() { + c.bufKV.value = AppendHTTPDate(c.bufKV.value[:0], c.expire) + dst = append(dst, ';', ' ') + dst = append(dst, strCookieExpires...) + dst = append(dst, '=') + dst = append(dst, c.bufKV.value...) + } + if len(c.domain) > 0 { + dst = appendCookiePart(dst, strCookieDomain, c.domain) + } + if len(c.path) > 0 { + dst = appendCookiePart(dst, strCookiePath, c.path) + } + if c.httpOnly { + dst = append(dst, ';', ' ') + dst = append(dst, strCookieHTTPOnly...) + } + if c.secure { + dst = append(dst, ';', ' ') + dst = append(dst, strCookieSecure...) + } + return dst +} + +// Cookie returns cookie representation. +// +// The returned value is valid until the next call to Cookie methods. +func (c *Cookie) Cookie() []byte { + c.buf = c.AppendBytes(c.buf[:0]) + return c.buf +} + +// String returns cookie representation. +func (c *Cookie) String() string { + return string(c.Cookie()) +} + +// WriteTo writes cookie representation to w. +// +// WriteTo implements io.WriterTo interface. +func (c *Cookie) WriteTo(w io.Writer) (int64, error) { + n, err := w.Write(c.Cookie()) + return int64(n), err +} + +var errNoCookies = errors.New("no cookies found") + +// Parse parses Set-Cookie header. +func (c *Cookie) Parse(src string) error { + c.buf = append(c.buf[:0], src...) + return c.ParseBytes(c.buf) +} + +// ParseBytes parses Set-Cookie header. +func (c *Cookie) ParseBytes(src []byte) error { + c.Reset() + + var s cookieScanner + s.b = src + + kv := &c.bufKV + if !s.next(kv) { + return errNoCookies + } + + c.key = append(c.key[:0], kv.key...) + c.value = append(c.value[:0], kv.value...) + + for s.next(kv) { + if len(kv.key) == 0 && len(kv.value) == 0 { + continue + } + switch string(kv.key) { + case "expires": + v := b2s(kv.value) + exptime, err := time.ParseInLocation(time.RFC1123, v, time.UTC) + if err != nil { + return err + } + c.expire = exptime + case "domain": + c.domain = append(c.domain[:0], kv.value...) + case "path": + c.path = append(c.path[:0], kv.value...) + case "": + switch string(kv.value) { + case "HttpOnly": + c.httpOnly = true + case "secure": + c.secure = true + } + } + } + return nil +} + +func appendCookiePart(dst, key, value []byte) []byte { + dst = append(dst, ';', ' ') + dst = append(dst, key...) + dst = append(dst, '=') + return append(dst, value...) +} + +func getCookieKey(dst, src []byte) []byte { + n := bytes.IndexByte(src, '=') + if n >= 0 { + src = src[:n] + } + return decodeCookieArg(dst, src, false) +} + +func appendRequestCookieBytes(dst []byte, cookies []argsKV) []byte { + for i, n := 0, len(cookies); i < n; i++ { + kv := &cookies[i] + if len(kv.key) > 0 { + dst = append(dst, kv.key...) + dst = append(dst, '=') + } + dst = append(dst, kv.value...) + if i+1 < n { + dst = append(dst, ';', ' ') + } + } + return dst +} + +func parseRequestCookies(cookies []argsKV, src []byte) []argsKV { + var s cookieScanner + s.b = src + var kv *argsKV + cookies, kv = allocArg(cookies) + for s.next(kv) { + if len(kv.key) > 0 || len(kv.value) > 0 { + cookies, kv = allocArg(cookies) + } + } + return releaseArg(cookies) +} + +type cookieScanner struct { + b []byte +} + +func (s *cookieScanner) next(kv *argsKV) bool { + b := s.b + if len(b) == 0 { + return false + } + + isKey := true + k := 0 + for i, c := range b { + switch c { + case '=': + if isKey { + isKey = false + kv.key = decodeCookieArg(kv.key, b[:i], false) + k = i + 1 + } + case ';': + if isKey { + kv.key = kv.key[:0] + } + kv.value = decodeCookieArg(kv.value, b[k:i], true) + s.b = b[i+1:] + return true + } + } + + if isKey { + kv.key = kv.key[:0] + } + kv.value = decodeCookieArg(kv.value, b[k:], true) + s.b = b[len(b):] + return true +} + +func decodeCookieArg(dst, src []byte, skipQuotes bool) []byte { + for len(src) > 0 && src[0] == ' ' { + src = src[1:] + } + for len(src) > 0 && src[len(src)-1] == ' ' { + src = src[:len(src)-1] + } + if skipQuotes { + if len(src) > 1 && src[0] == '"' && src[len(src)-1] == '"' { + src = src[1 : len(src)-1] + } + } + return append(dst[:0], src...) +} diff --git a/vendor/github.com/valyala/fasthttp/doc.go b/vendor/github.com/valyala/fasthttp/doc.go new file mode 100644 index 0000000..501eff6 --- /dev/null +++ b/vendor/github.com/valyala/fasthttp/doc.go @@ -0,0 +1,40 @@ +/* +Package fasthttp provides fast HTTP server and client API. + +Fasthttp provides the following features: + + * Optimized for speed. Easily handles more than 100K qps and more than 1M + concurrent keep-alive connections on modern hardware. + * Optimized for low memory usage. + * Easy 'Connection: Upgrade' support via RequestCtx.Hijack. + * Server supports requests' pipelining. Multiple requests may be read from + a single network packet and multiple responses may be sent in a single + network packet. This may be useful for highly loaded REST services. + * Server provides the following anti-DoS limits: + + * The number of concurrent connections. + * The number of concurrent connections per client IP. + * The number of requests per connection. + * Request read timeout. + * Response write timeout. + * Maximum request header size. + * Maximum request body size. + * Maximum request execution time. + * Maximum keep-alive connection lifetime. + * Early filtering out non-GET requests. + + * A lot of additional useful info is exposed to request handler: + + * Server and client address. + * Per-request logger. + * Unique request id. + * Request start time. + * Connection start time. + * Request sequence number for the current connection. + + * Client supports automatic retry on idempotent requests' failure. + * Fasthttp API is designed with the ability to extend existing client + and server implementations or to write custom client and server + implementations from scratch. +*/ +package fasthttp diff --git a/vendor/github.com/valyala/fasthttp/fasthttputil/doc.go b/vendor/github.com/valyala/fasthttp/fasthttputil/doc.go new file mode 100644 index 0000000..9cf69e7 --- /dev/null +++ b/vendor/github.com/valyala/fasthttp/fasthttputil/doc.go @@ -0,0 +1,2 @@ +// Package fasthttputil provides utility functions for fasthttp. +package fasthttputil diff --git a/vendor/github.com/valyala/fasthttp/fasthttputil/inmemory_listener.go b/vendor/github.com/valyala/fasthttp/fasthttputil/inmemory_listener.go new file mode 100644 index 0000000..d6bcca4 --- /dev/null +++ b/vendor/github.com/valyala/fasthttp/fasthttputil/inmemory_listener.go @@ -0,0 +1,84 @@ +package fasthttputil + +import ( + "fmt" + "net" + "sync" +) + +// InmemoryListener provides in-memory dialer<->net.Listener implementation. +// +// It may be used either for fast in-process client<->server communcations +// without network stack overhead or for client<->server tests. +type InmemoryListener struct { + lock sync.Mutex + closed bool + conns chan net.Conn +} + +// NewInmemoryListener returns new in-memory dialer<->net.Listener. +func NewInmemoryListener() *InmemoryListener { + return &InmemoryListener{ + conns: make(chan net.Conn, 1024), + } +} + +// Accept implements net.Listener's Accept. +// +// It is safe calling Accept from concurrently running goroutines. +// +// Accept returns new connection per each Dial call. +func (ln *InmemoryListener) Accept() (net.Conn, error) { + c, ok := <-ln.conns + if !ok { + return nil, fmt.Errorf("InmemoryListener is already closed: use of closed network connection") + } + return c, nil +} + +// Close implements net.Listener's Close. +func (ln *InmemoryListener) Close() error { + var err error + + ln.lock.Lock() + if !ln.closed { + close(ln.conns) + ln.closed = true + } else { + err = fmt.Errorf("InmemoryListener is already closed") + } + ln.lock.Unlock() + return err +} + +// Addr implements net.Listener's Addr. +func (ln *InmemoryListener) Addr() net.Addr { + return &net.UnixAddr{ + Name: "InmemoryListener", + Net: "memory", + } +} + +// Dial creates new client<->server connection, enqueues server side +// of the connection to Accept and returns client side of the connection. +// +// It is safe calling Dial from concurrently running goroutines. +func (ln *InmemoryListener) Dial() (net.Conn, error) { + pc := NewPipeConns() + cConn := pc.Conn1() + sConn := pc.Conn2() + ln.lock.Lock() + if !ln.closed { + ln.conns <- sConn + } else { + sConn.Close() + cConn.Close() + cConn = nil + } + ln.lock.Unlock() + + if cConn == nil { + return nil, fmt.Errorf("InmemoryListener is already closed") + } + return cConn, nil +} diff --git a/vendor/github.com/valyala/fasthttp/fasthttputil/pipeconns.go b/vendor/github.com/valyala/fasthttp/fasthttputil/pipeconns.go new file mode 100644 index 0000000..e5a0235 --- /dev/null +++ b/vendor/github.com/valyala/fasthttp/fasthttputil/pipeconns.go @@ -0,0 +1,283 @@ +package fasthttputil + +import ( + "errors" + "io" + "net" + "sync" + "time" +) + +// NewPipeConns returns new bi-directonal connection pipe. +func NewPipeConns() *PipeConns { + ch1 := make(chan *byteBuffer, 4) + ch2 := make(chan *byteBuffer, 4) + + pc := &PipeConns{ + stopCh: make(chan struct{}), + } + pc.c1.rCh = ch1 + pc.c1.wCh = ch2 + pc.c2.rCh = ch2 + pc.c2.wCh = ch1 + pc.c1.pc = pc + pc.c2.pc = pc + return pc +} + +// PipeConns provides bi-directional connection pipe, +// which use in-process memory as a transport. +// +// PipeConns must be created by calling NewPipeConns. +// +// PipeConns has the following additional features comparing to connections +// returned from net.Pipe(): +// +// * It is faster. +// * It buffers Write calls, so there is no need to have concurrent goroutine +// calling Read in order to unblock each Write call. +// * It supports read and write deadlines. +// +type PipeConns struct { + c1 pipeConn + c2 pipeConn + stopCh chan struct{} + stopChLock sync.Mutex +} + +// Conn1 returns the first end of bi-directional pipe. +// +// Data written to Conn1 may be read from Conn2. +// Data written to Conn2 may be read from Conn1. +func (pc *PipeConns) Conn1() net.Conn { + return &pc.c1 +} + +// Conn2 returns the second end of bi-directional pipe. +// +// Data written to Conn2 may be read from Conn1. +// Data written to Conn1 may be read from Conn2. +func (pc *PipeConns) Conn2() net.Conn { + return &pc.c2 +} + +// Close closes pipe connections. +func (pc *PipeConns) Close() error { + pc.stopChLock.Lock() + select { + case <-pc.stopCh: + default: + close(pc.stopCh) + } + pc.stopChLock.Unlock() + + return nil +} + +type pipeConn struct { + b *byteBuffer + bb []byte + + rCh chan *byteBuffer + wCh chan *byteBuffer + pc *PipeConns + + readDeadlineTimer *time.Timer + writeDeadlineTimer *time.Timer + + readDeadlineCh <-chan time.Time + writeDeadlineCh <-chan time.Time +} + +func (c *pipeConn) Write(p []byte) (int, error) { + b := acquireByteBuffer() + b.b = append(b.b[:0], p...) + + select { + case <-c.pc.stopCh: + releaseByteBuffer(b) + return 0, errConnectionClosed + default: + } + + select { + case c.wCh <- b: + default: + select { + case c.wCh <- b: + case <-c.writeDeadlineCh: + c.writeDeadlineCh = closedDeadlineCh + return 0, ErrTimeout + case <-c.pc.stopCh: + releaseByteBuffer(b) + return 0, errConnectionClosed + } + } + + return len(p), nil +} + +func (c *pipeConn) Read(p []byte) (int, error) { + mayBlock := true + nn := 0 + for len(p) > 0 { + n, err := c.read(p, mayBlock) + nn += n + if err != nil { + if !mayBlock && err == errWouldBlock { + err = nil + } + return nn, err + } + p = p[n:] + mayBlock = false + } + + return nn, nil +} + +func (c *pipeConn) read(p []byte, mayBlock bool) (int, error) { + if len(c.bb) == 0 { + if err := c.readNextByteBuffer(mayBlock); err != nil { + return 0, err + } + } + n := copy(p, c.bb) + c.bb = c.bb[n:] + + return n, nil +} + +func (c *pipeConn) readNextByteBuffer(mayBlock bool) error { + releaseByteBuffer(c.b) + c.b = nil + + select { + case c.b = <-c.rCh: + default: + if !mayBlock { + return errWouldBlock + } + select { + case c.b = <-c.rCh: + case <-c.readDeadlineCh: + c.readDeadlineCh = closedDeadlineCh + // rCh may contain data when deadline is reached. + // Read the data before returning ErrTimeout. + select { + case c.b = <-c.rCh: + default: + return ErrTimeout + } + case <-c.pc.stopCh: + // rCh may contain data when stopCh is closed. + // Read the data before returning EOF. + select { + case c.b = <-c.rCh: + default: + return io.EOF + } + } + } + + c.bb = c.b.b + return nil +} + +var ( + errWouldBlock = errors.New("would block") + errConnectionClosed = errors.New("connection closed") + + // ErrTimeout is returned from Read() or Write() on timeout. + ErrTimeout = errors.New("timeout") +) + +func (c *pipeConn) Close() error { + return c.pc.Close() +} + +func (c *pipeConn) LocalAddr() net.Addr { + return pipeAddr(0) +} + +func (c *pipeConn) RemoteAddr() net.Addr { + return pipeAddr(0) +} + +func (c *pipeConn) SetDeadline(deadline time.Time) error { + c.SetReadDeadline(deadline) + c.SetWriteDeadline(deadline) + return nil +} + +func (c *pipeConn) SetReadDeadline(deadline time.Time) error { + if c.readDeadlineTimer == nil { + c.readDeadlineTimer = time.NewTimer(time.Hour) + } + c.readDeadlineCh = updateTimer(c.readDeadlineTimer, deadline) + return nil +} + +func (c *pipeConn) SetWriteDeadline(deadline time.Time) error { + if c.writeDeadlineTimer == nil { + c.writeDeadlineTimer = time.NewTimer(time.Hour) + } + c.writeDeadlineCh = updateTimer(c.writeDeadlineTimer, deadline) + return nil +} + +func updateTimer(t *time.Timer, deadline time.Time) <-chan time.Time { + if !t.Stop() { + select { + case <-t.C: + default: + } + } + if deadline.IsZero() { + return nil + } + d := -time.Since(deadline) + if d <= 0 { + return closedDeadlineCh + } + t.Reset(d) + return t.C +} + +var closedDeadlineCh = func() <-chan time.Time { + ch := make(chan time.Time) + close(ch) + return ch +}() + +type pipeAddr int + +func (pipeAddr) Network() string { + return "pipe" +} + +func (pipeAddr) String() string { + return "pipe" +} + +type byteBuffer struct { + b []byte +} + +func acquireByteBuffer() *byteBuffer { + return byteBufferPool.Get().(*byteBuffer) +} + +func releaseByteBuffer(b *byteBuffer) { + if b != nil { + byteBufferPool.Put(b) + } +} + +var byteBufferPool = &sync.Pool{ + New: func() interface{} { + return &byteBuffer{ + b: make([]byte, 1024), + } + }, +} diff --git a/vendor/github.com/valyala/fasthttp/fasthttputil/ssl-cert-snakeoil.key b/vendor/github.com/valyala/fasthttp/fasthttputil/ssl-cert-snakeoil.key new file mode 100644 index 0000000..00a79a3 --- /dev/null +++ b/vendor/github.com/valyala/fasthttp/fasthttputil/ssl-cert-snakeoil.key @@ -0,0 +1,28 @@ +-----BEGIN PRIVATE KEY----- +MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQD4IQusAs8PJdnG +3mURt/AXtgC+ceqLOatJ49JJE1VPTkMAy+oE1f1XvkMrYsHqmDf6GWVzgVXryL4U +wq2/nJSm56ddhN55nI8oSN3dtywUB8/ShelEN73nlN77PeD9tl6NksPwWaKrqxq0 +FlabRPZSQCfmgZbhDV8Sa8mfCkFU0G0lit6kLGceCKMvmW+9Bz7ebsYmVdmVMxmf +IJStFD44lWFTdUc65WISKEdW2ELcUefb0zOLw+0PCbXFGJH5x5ktksW8+BBk2Hkg +GeQRL/qPCccthbScO0VgNj3zJ3ZZL0ObSDAbvNDG85joeNjDNq5DT/BAZ0bOSbEF +sh+f9BAzAgMBAAECggEBAJWv2cq7Jw6MVwSRxYca38xuD6TUNBopgBvjREixURW2 +sNUaLuMb9Omp7fuOaE2N5rcJ+xnjPGIxh/oeN5MQctz9gwn3zf6vY+15h97pUb4D +uGvYPRDaT8YVGS+X9NMZ4ZCmqW2lpWzKnCFoGHcy8yZLbcaxBsRdvKzwOYGoPiFb +K2QuhXZ/1UPmqK9i2DFKtj40X6vBszTNboFxOVpXrPu0FJwLVSDf2hSZ4fMM0DH3 +YqwKcYf5te+hxGKgrqRA3tn0NCWii0in6QIwXMC+kMw1ebg/tZKqyDLMNptAK8J+ +DVw9m5X1seUHS5ehU/g2jrQrtK5WYn7MrFK4lBzlRwECgYEA/d1TeANYECDWRRDk +B0aaRZs87Rwl/J9PsvbsKvtU/bX+OfSOUjOa9iQBqn0LmU8GqusEET/QVUfocVwV +Bggf/5qDLxz100Rj0ags/yE/kNr0Bb31kkkKHFMnCT06YasR7qKllwrAlPJvQv9x +IzBKq+T/Dx08Wep9bCRSFhzRCnsCgYEA+jdeZXTDr/Vz+D2B3nAw1frqYFfGnEVY +wqmoK3VXMDkGuxsloO2rN+SyiUo3JNiQNPDub/t7175GH5pmKtZOlftePANsUjBj +wZ1D0rI5Bxu/71ibIUYIRVmXsTEQkh/ozoh3jXCZ9+bLgYiYx7789IUZZSokFQ3D +FICUT9KJ36kCgYAGoq9Y1rWJjmIrYfqj2guUQC+CfxbbGIrrwZqAsRsSmpwvhZ3m +tiSZxG0quKQB+NfSxdvQW5ulbwC7Xc3K35F+i9pb8+TVBdeaFkw+yu6vaZmxQLrX +fQM/pEjD7A7HmMIaO7QaU5SfEAsqdCTP56Y8AftMuNXn/8IRfo2KuGwaWwKBgFpU +ILzJoVdlad9E/Rw7LjYhZfkv1uBVXIyxyKcfrkEXZSmozDXDdxsvcZCEfVHM6Ipk +K/+7LuMcqp4AFEAEq8wTOdq6daFaHLkpt/FZK6M4TlruhtpFOPkoNc3e45eM83OT +6mziKINJC1CQ6m65sQHpBtjxlKMRG8rL/D6wx9s5AoGBAMRlqNPMwglT3hvDmsAt +9Lf9pdmhERUlHhD8bj8mDaBj2Aqv7f6VRJaYZqP403pKKQexuqcn80mtjkSAPFkN +Cj7BVt/RXm5uoxDTnfi26RF9F6yNDEJ7UU9+peBr99aazF/fTgW/1GcMkQnum8uV +c257YgaWmjK9uB0Y2r2VxS0G +-----END PRIVATE KEY----- diff --git a/vendor/github.com/valyala/fasthttp/fasthttputil/ssl-cert-snakeoil.pem b/vendor/github.com/valyala/fasthttp/fasthttputil/ssl-cert-snakeoil.pem new file mode 100644 index 0000000..93e77cd --- /dev/null +++ b/vendor/github.com/valyala/fasthttp/fasthttputil/ssl-cert-snakeoil.pem @@ -0,0 +1,17 @@ +-----BEGIN CERTIFICATE----- +MIICujCCAaKgAwIBAgIJAMbXnKZ/cikUMA0GCSqGSIb3DQEBCwUAMBUxEzARBgNV +BAMTCnVidW50dS5uYW4wHhcNMTUwMjA0MDgwMTM5WhcNMjUwMjAxMDgwMTM5WjAV +MRMwEQYDVQQDEwp1YnVudHUubmFuMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB +CgKCAQEA+CELrALPDyXZxt5lEbfwF7YAvnHqizmrSePSSRNVT05DAMvqBNX9V75D +K2LB6pg3+hllc4FV68i+FMKtv5yUpuenXYTeeZyPKEjd3bcsFAfP0oXpRDe955Te ++z3g/bZejZLD8Fmiq6satBZWm0T2UkAn5oGW4Q1fEmvJnwpBVNBtJYrepCxnHgij +L5lvvQc+3m7GJlXZlTMZnyCUrRQ+OJVhU3VHOuViEihHVthC3FHn29Mzi8PtDwm1 +xRiR+ceZLZLFvPgQZNh5IBnkES/6jwnHLYW0nDtFYDY98yd2WS9Dm0gwG7zQxvOY +6HjYwzauQ0/wQGdGzkmxBbIfn/QQMwIDAQABow0wCzAJBgNVHRMEAjAAMA0GCSqG +SIb3DQEBCwUAA4IBAQBQjKm/4KN/iTgXbLTL3i7zaxYXFLXsnT1tF+ay4VA8aj98 +L3JwRTciZ3A5iy/W4VSCt3eASwOaPWHKqDBB5RTtL73LoAqsWmO3APOGQAbixcQ2 +45GXi05OKeyiYRi1Nvq7Unv9jUkRDHUYVPZVSAjCpsXzPhFkmZoTRxmx5l0ZF7Li +K91lI5h+eFq0dwZwrmlPambyh1vQUi70VHv8DNToVU29kel7YLbxGbuqETfhrcy6 +X+Mha6RYITkAn5FqsZcKMsc9eYGEF4l3XV+oS7q6xfTxktYJMFTI18J0lQ2Lv/CI +whdMnYGntDQBE/iFCrJEGNsKGc38796GBOb5j+zd +-----END CERTIFICATE----- diff --git a/vendor/github.com/valyala/fasthttp/fs.go b/vendor/github.com/valyala/fasthttp/fs.go new file mode 100644 index 0000000..c36073d --- /dev/null +++ b/vendor/github.com/valyala/fasthttp/fs.go @@ -0,0 +1,1257 @@ +package fasthttp + +import ( + "bytes" + "errors" + "fmt" + "html" + "io" + "io/ioutil" + "mime" + "net/http" + "os" + "path/filepath" + "sort" + "strings" + "sync" + "time" + + "github.com/klauspost/compress/gzip" +) + +// ServeFileBytesUncompressed returns HTTP response containing file contents +// from the given path. +// +// Directory contents is returned if path points to directory. +// +// ServeFileBytes may be used for saving network traffic when serving files +// with good compression ratio. +// +// See also RequestCtx.SendFileBytes. +func ServeFileBytesUncompressed(ctx *RequestCtx, path []byte) { + ServeFileUncompressed(ctx, b2s(path)) +} + +// ServeFileUncompressed returns HTTP response containing file contents +// from the given path. +// +// Directory contents is returned if path points to directory. +// +// ServeFile may be used for saving network traffic when serving files +// with good compression ratio. +// +// See also RequestCtx.SendFile. +func ServeFileUncompressed(ctx *RequestCtx, path string) { + ctx.Request.Header.DelBytes(strAcceptEncoding) + ServeFile(ctx, path) +} + +// ServeFileBytes returns HTTP response containing compressed file contents +// from the given path. +// +// HTTP response may contain uncompressed file contents in the following cases: +// +// * Missing 'Accept-Encoding: gzip' request header. +// * No write access to directory containing the file. +// +// Directory contents is returned if path points to directory. +// +// Use ServeFileBytesUncompressed is you don't need serving compressed +// file contents. +// +// See also RequestCtx.SendFileBytes. +func ServeFileBytes(ctx *RequestCtx, path []byte) { + ServeFile(ctx, b2s(path)) +} + +// ServeFile returns HTTP response containing compressed file contents +// from the given path. +// +// HTTP response may contain uncompressed file contents in the following cases: +// +// * Missing 'Accept-Encoding: gzip' request header. +// * No write access to directory containing the file. +// +// Directory contents is returned if path points to directory. +// +// Use ServeFileUncompressed is you don't need serving compressed file contents. +// +// See also RequestCtx.SendFile. +func ServeFile(ctx *RequestCtx, path string) { + rootFSOnce.Do(func() { + rootFSHandler = rootFS.NewRequestHandler() + }) + if len(path) == 0 || path[0] != '/' { + // extend relative path to absolute path + var err error + if path, err = filepath.Abs(path); err != nil { + ctx.Logger().Printf("cannot resolve path %q to absolute file path: %s", path, err) + ctx.Error("Internal Server Error", StatusInternalServerError) + return + } + } + ctx.Request.SetRequestURI(path) + rootFSHandler(ctx) +} + +var ( + rootFSOnce sync.Once + rootFS = &FS{ + Root: "/", + GenerateIndexPages: true, + Compress: true, + AcceptByteRange: true, + } + rootFSHandler RequestHandler +) + +// PathRewriteFunc must return new request path based on arbitrary ctx +// info such as ctx.Path(). +// +// Path rewriter is used in FS for translating the current request +// to the local filesystem path relative to FS.Root. +// +// The returned path must not contain '/../' substrings due to security reasons, +// since such paths may refer files outside FS.Root. +// +// The returned path may refer to ctx members. For example, ctx.Path(). +type PathRewriteFunc func(ctx *RequestCtx) []byte + +// NewVHostPathRewriter returns path rewriter, which strips slashesCount +// leading slashes from the path and prepends the path with request's host, +// thus simplifying virtual hosting for static files. +// +// Examples: +// +// * host=foobar.com, slashesCount=0, original path="/foo/bar". +// Resulting path: "/foobar.com/foo/bar" +// +// * host=img.aaa.com, slashesCount=1, original path="/images/123/456.jpg" +// Resulting path: "/img.aaa.com/123/456.jpg" +// +func NewVHostPathRewriter(slashesCount int) PathRewriteFunc { + return func(ctx *RequestCtx) []byte { + path := stripLeadingSlashes(ctx.Path(), slashesCount) + host := ctx.Host() + if n := bytes.IndexByte(host, '/'); n >= 0 { + host = nil + } + if len(host) == 0 { + host = strInvalidHost + } + b := AcquireByteBuffer() + b.B = append(b.B, '/') + b.B = append(b.B, host...) + b.B = append(b.B, path...) + ctx.URI().SetPathBytes(b.B) + ReleaseByteBuffer(b) + + return ctx.Path() + } +} + +var strInvalidHost = []byte("invalid-host") + +// NewPathSlashesStripper returns path rewriter, which strips slashesCount +// leading slashes from the path. +// +// Examples: +// +// * slashesCount = 0, original path: "/foo/bar", result: "/foo/bar" +// * slashesCount = 1, original path: "/foo/bar", result: "/bar" +// * slashesCount = 2, original path: "/foo/bar", result: "" +// +// The returned path rewriter may be used as FS.PathRewrite . +func NewPathSlashesStripper(slashesCount int) PathRewriteFunc { + return func(ctx *RequestCtx) []byte { + return stripLeadingSlashes(ctx.Path(), slashesCount) + } +} + +// NewPathPrefixStripper returns path rewriter, which removes prefixSize bytes +// from the path prefix. +// +// Examples: +// +// * prefixSize = 0, original path: "/foo/bar", result: "/foo/bar" +// * prefixSize = 3, original path: "/foo/bar", result: "o/bar" +// * prefixSize = 7, original path: "/foo/bar", result: "r" +// +// The returned path rewriter may be used as FS.PathRewrite . +func NewPathPrefixStripper(prefixSize int) PathRewriteFunc { + return func(ctx *RequestCtx) []byte { + path := ctx.Path() + if len(path) >= prefixSize { + path = path[prefixSize:] + } + return path + } +} + +// FS represents settings for request handler serving static files +// from the local filesystem. +// +// It is prohibited copying FS values. Create new values instead. +type FS struct { + noCopy noCopy + + // Path to the root directory to serve files from. + Root string + + // List of index file names to try opening during directory access. + // + // For example: + // + // * index.html + // * index.htm + // * my-super-index.xml + // + // By default the list is empty. + IndexNames []string + + // Index pages for directories without files matching IndexNames + // are automatically generated if set. + // + // Directory index generation may be quite slow for directories + // with many files (more than 1K), so it is discouraged enabling + // index pages' generation for such directories. + // + // By default index pages aren't generated. + GenerateIndexPages bool + + // Transparently compresses responses if set to true. + // + // The server tries minimizing CPU usage by caching compressed files. + // It adds CompressedFileSuffix suffix to the original file name and + // tries saving the resulting compressed file under the new file name. + // So it is advisable to give the server write access to Root + // and to all inner folders in order to minimze CPU usage when serving + // compressed responses. + // + // Transparent compression is disabled by default. + Compress bool + + // Enables byte range requests if set to true. + // + // Byte range requests are disabled by default. + AcceptByteRange bool + + // Path rewriting function. + // + // By default request path is not modified. + PathRewrite PathRewriteFunc + + // Expiration duration for inactive file handlers. + // + // FSHandlerCacheDuration is used by default. + CacheDuration time.Duration + + // Suffix to add to the name of cached compressed file. + // + // This value has sense only if Compress is set. + // + // FSCompressedFileSuffix is used by default. + CompressedFileSuffix string + + once sync.Once + h RequestHandler +} + +// FSCompressedFileSuffix is the suffix FS adds to the original file names +// when trying to store compressed file under the new file name. +// See FS.Compress for details. +const FSCompressedFileSuffix = ".fasthttp.gz" + +// FSHandlerCacheDuration is the default expiration duration for inactive +// file handlers opened by FS. +const FSHandlerCacheDuration = 10 * time.Second + +// FSHandler returns request handler serving static files from +// the given root folder. +// +// stripSlashes indicates how many leading slashes must be stripped +// from requested path before searching requested file in the root folder. +// Examples: +// +// * stripSlashes = 0, original path: "/foo/bar", result: "/foo/bar" +// * stripSlashes = 1, original path: "/foo/bar", result: "/bar" +// * stripSlashes = 2, original path: "/foo/bar", result: "" +// +// The returned request handler automatically generates index pages +// for directories without index.html. +// +// The returned handler caches requested file handles +// for FSHandlerCacheDuration. +// Make sure your program has enough 'max open files' limit aka +// 'ulimit -n' if root folder contains many files. +// +// Do not create multiple request handler instances for the same +// (root, stripSlashes) arguments - just reuse a single instance. +// Otherwise goroutine leak will occur. +func FSHandler(root string, stripSlashes int) RequestHandler { + fs := &FS{ + Root: root, + IndexNames: []string{"index.html"}, + GenerateIndexPages: true, + AcceptByteRange: true, + } + if stripSlashes > 0 { + fs.PathRewrite = NewPathSlashesStripper(stripSlashes) + } + return fs.NewRequestHandler() +} + +// NewRequestHandler returns new request handler with the given FS settings. +// +// The returned handler caches requested file handles +// for FS.CacheDuration. +// Make sure your program has enough 'max open files' limit aka +// 'ulimit -n' if FS.Root folder contains many files. +// +// Do not create multiple request handlers from a single FS instance - +// just reuse a single request handler. +func (fs *FS) NewRequestHandler() RequestHandler { + fs.once.Do(fs.initRequestHandler) + return fs.h +} + +func (fs *FS) initRequestHandler() { + root := fs.Root + + // serve files from the current working directory if root is empty + if len(root) == 0 { + root = "." + } + + // strip trailing slashes from the root path + for len(root) > 0 && root[len(root)-1] == '/' { + root = root[:len(root)-1] + } + + cacheDuration := fs.CacheDuration + if cacheDuration <= 0 { + cacheDuration = FSHandlerCacheDuration + } + compressedFileSuffix := fs.CompressedFileSuffix + if len(compressedFileSuffix) == 0 { + compressedFileSuffix = FSCompressedFileSuffix + } + + h := &fsHandler{ + root: root, + indexNames: fs.IndexNames, + pathRewrite: fs.PathRewrite, + generateIndexPages: fs.GenerateIndexPages, + compress: fs.Compress, + acceptByteRange: fs.AcceptByteRange, + cacheDuration: cacheDuration, + compressedFileSuffix: compressedFileSuffix, + cache: make(map[string]*fsFile), + compressedCache: make(map[string]*fsFile), + } + + go func() { + var pendingFiles []*fsFile + for { + time.Sleep(cacheDuration / 2) + pendingFiles = h.cleanCache(pendingFiles) + } + }() + + fs.h = h.handleRequest +} + +type fsHandler struct { + root string + indexNames []string + pathRewrite PathRewriteFunc + generateIndexPages bool + compress bool + acceptByteRange bool + cacheDuration time.Duration + compressedFileSuffix string + + cache map[string]*fsFile + compressedCache map[string]*fsFile + cacheLock sync.Mutex + + smallFileReaderPool sync.Pool +} + +type fsFile struct { + h *fsHandler + f *os.File + dirIndex []byte + contentType string + contentLength int + compressed bool + + lastModified time.Time + lastModifiedStr []byte + + t time.Time + readersCount int + + bigFiles []*bigFileReader + bigFilesLock sync.Mutex +} + +func (ff *fsFile) NewReader() (io.Reader, error) { + if ff.isBig() { + r, err := ff.bigFileReader() + if err != nil { + ff.decReadersCount() + } + return r, err + } + return ff.smallFileReader(), nil +} + +func (ff *fsFile) smallFileReader() io.Reader { + v := ff.h.smallFileReaderPool.Get() + if v == nil { + v = &fsSmallFileReader{} + } + r := v.(*fsSmallFileReader) + r.ff = ff + r.endPos = ff.contentLength + if r.startPos > 0 { + panic("BUG: fsSmallFileReader with non-nil startPos found in the pool") + } + return r +} + +// files bigger than this size are sent with sendfile +const maxSmallFileSize = 2 * 4096 + +func (ff *fsFile) isBig() bool { + return ff.contentLength > maxSmallFileSize && len(ff.dirIndex) == 0 +} + +func (ff *fsFile) bigFileReader() (io.Reader, error) { + if ff.f == nil { + panic("BUG: ff.f must be non-nil in bigFileReader") + } + + var r io.Reader + + ff.bigFilesLock.Lock() + n := len(ff.bigFiles) + if n > 0 { + r = ff.bigFiles[n-1] + ff.bigFiles = ff.bigFiles[:n-1] + } + ff.bigFilesLock.Unlock() + + if r != nil { + return r, nil + } + + f, err := os.Open(ff.f.Name()) + if err != nil { + return nil, fmt.Errorf("cannot open already opened file: %s", err) + } + return &bigFileReader{ + f: f, + ff: ff, + r: f, + }, nil +} + +func (ff *fsFile) Release() { + if ff.f != nil { + ff.f.Close() + + if ff.isBig() { + ff.bigFilesLock.Lock() + for _, r := range ff.bigFiles { + r.f.Close() + } + ff.bigFilesLock.Unlock() + } + } +} + +func (ff *fsFile) decReadersCount() { + ff.h.cacheLock.Lock() + ff.readersCount-- + if ff.readersCount < 0 { + panic("BUG: negative fsFile.readersCount!") + } + ff.h.cacheLock.Unlock() +} + +// bigFileReader attempts to trigger sendfile +// for sending big files over the wire. +type bigFileReader struct { + f *os.File + ff *fsFile + r io.Reader + lr io.LimitedReader +} + +func (r *bigFileReader) UpdateByteRange(startPos, endPos int) error { + if _, err := r.f.Seek(int64(startPos), 0); err != nil { + return err + } + r.r = &r.lr + r.lr.R = r.f + r.lr.N = int64(endPos - startPos + 1) + return nil +} + +func (r *bigFileReader) Read(p []byte) (int, error) { + return r.r.Read(p) +} + +func (r *bigFileReader) WriteTo(w io.Writer) (int64, error) { + if rf, ok := w.(io.ReaderFrom); ok { + // fast path. Senfile must be triggered + return rf.ReadFrom(r.r) + } + + // slow path + return copyZeroAlloc(w, r.r) +} + +func (r *bigFileReader) Close() error { + r.r = r.f + n, err := r.f.Seek(0, 0) + if err == nil { + if n != 0 { + panic("BUG: File.Seek(0,0) returned (non-zero, nil)") + } + + ff := r.ff + ff.bigFilesLock.Lock() + ff.bigFiles = append(ff.bigFiles, r) + ff.bigFilesLock.Unlock() + } else { + r.f.Close() + } + r.ff.decReadersCount() + return err +} + +type fsSmallFileReader struct { + ff *fsFile + startPos int + endPos int +} + +func (r *fsSmallFileReader) Close() error { + ff := r.ff + ff.decReadersCount() + r.ff = nil + r.startPos = 0 + r.endPos = 0 + ff.h.smallFileReaderPool.Put(r) + return nil +} + +func (r *fsSmallFileReader) UpdateByteRange(startPos, endPos int) error { + r.startPos = startPos + r.endPos = endPos + 1 + return nil +} + +func (r *fsSmallFileReader) Read(p []byte) (int, error) { + tailLen := r.endPos - r.startPos + if tailLen <= 0 { + return 0, io.EOF + } + if len(p) > tailLen { + p = p[:tailLen] + } + + ff := r.ff + if ff.f != nil { + n, err := ff.f.ReadAt(p, int64(r.startPos)) + r.startPos += n + return n, err + } + + n := copy(p, ff.dirIndex[r.startPos:]) + r.startPos += n + return n, nil +} + +func (r *fsSmallFileReader) WriteTo(w io.Writer) (int64, error) { + ff := r.ff + + var n int + var err error + if ff.f == nil { + n, err = w.Write(ff.dirIndex[r.startPos:r.endPos]) + return int64(n), err + } + + if rf, ok := w.(io.ReaderFrom); ok { + return rf.ReadFrom(r) + } + + curPos := r.startPos + bufv := copyBufPool.Get() + buf := bufv.([]byte) + for err == nil { + tailLen := r.endPos - curPos + if tailLen <= 0 { + break + } + if len(buf) > tailLen { + buf = buf[:tailLen] + } + n, err = ff.f.ReadAt(buf, int64(curPos)) + nw, errw := w.Write(buf[:n]) + curPos += nw + if errw == nil && nw != n { + panic("BUG: Write(p) returned (n, nil), where n != len(p)") + } + if err == nil { + err = errw + } + } + copyBufPool.Put(bufv) + + if err == io.EOF { + err = nil + } + return int64(curPos - r.startPos), err +} + +func (h *fsHandler) cleanCache(pendingFiles []*fsFile) []*fsFile { + var filesToRelease []*fsFile + + h.cacheLock.Lock() + + // Close files which couldn't be closed before due to non-zero + // readers count on the previous run. + var remainingFiles []*fsFile + for _, ff := range pendingFiles { + if ff.readersCount > 0 { + remainingFiles = append(remainingFiles, ff) + } else { + filesToRelease = append(filesToRelease, ff) + } + } + pendingFiles = remainingFiles + + pendingFiles, filesToRelease = cleanCacheNolock(h.cache, pendingFiles, filesToRelease, h.cacheDuration) + pendingFiles, filesToRelease = cleanCacheNolock(h.compressedCache, pendingFiles, filesToRelease, h.cacheDuration) + + h.cacheLock.Unlock() + + for _, ff := range filesToRelease { + ff.Release() + } + + return pendingFiles +} + +func cleanCacheNolock(cache map[string]*fsFile, pendingFiles, filesToRelease []*fsFile, cacheDuration time.Duration) ([]*fsFile, []*fsFile) { + t := time.Now() + for k, ff := range cache { + if t.Sub(ff.t) > cacheDuration { + if ff.readersCount > 0 { + // There are pending readers on stale file handle, + // so we cannot close it. Put it into pendingFiles + // so it will be closed later. + pendingFiles = append(pendingFiles, ff) + } else { + filesToRelease = append(filesToRelease, ff) + } + delete(cache, k) + } + } + return pendingFiles, filesToRelease +} + +func (h *fsHandler) handleRequest(ctx *RequestCtx) { + var path []byte + if h.pathRewrite != nil { + path = h.pathRewrite(ctx) + } else { + path = ctx.Path() + } + path = stripTrailingSlashes(path) + + if n := bytes.IndexByte(path, 0); n >= 0 { + ctx.Logger().Printf("cannot serve path with nil byte at position %d: %q", n, path) + ctx.Error("Are you a hacker?", StatusBadRequest) + return + } + if h.pathRewrite != nil { + // There is no need to check for '/../' if path = ctx.Path(), + // since ctx.Path must normalize and sanitize the path. + + if n := bytes.Index(path, strSlashDotDotSlash); n >= 0 { + ctx.Logger().Printf("cannot serve path with '/../' at position %d due to security reasons: %q", n, path) + ctx.Error("Internal Server Error", StatusInternalServerError) + return + } + } + + mustCompress := false + fileCache := h.cache + byteRange := ctx.Request.Header.peek(strRange) + if len(byteRange) == 0 && h.compress && ctx.Request.Header.HasAcceptEncodingBytes(strGzip) { + mustCompress = true + fileCache = h.compressedCache + } + + h.cacheLock.Lock() + ff, ok := fileCache[string(path)] + if ok { + ff.readersCount++ + } + h.cacheLock.Unlock() + + if !ok { + pathStr := string(path) + filePath := h.root + pathStr + var err error + ff, err = h.openFSFile(filePath, mustCompress) + if mustCompress && err == errNoCreatePermission { + ctx.Logger().Printf("insufficient permissions for saving compressed file for %q. Serving uncompressed file. "+ + "Allow write access to the directory with this file in order to improve fasthttp performance", filePath) + mustCompress = false + ff, err = h.openFSFile(filePath, mustCompress) + } + if err == errDirIndexRequired { + ff, err = h.openIndexFile(ctx, filePath, mustCompress) + if err != nil { + ctx.Logger().Printf("cannot open dir index %q: %s", filePath, err) + ctx.Error("Directory index is forbidden", StatusForbidden) + return + } + } else if err != nil { + ctx.Logger().Printf("cannot open file %q: %s", filePath, err) + ctx.Error("Cannot open requested path", StatusNotFound) + return + } + + h.cacheLock.Lock() + ff1, ok := fileCache[pathStr] + if !ok { + fileCache[pathStr] = ff + ff.readersCount++ + } else { + ff1.readersCount++ + } + h.cacheLock.Unlock() + + if ok { + // The file has been already opened by another + // goroutine, so close the current file and use + // the file opened by another goroutine instead. + ff.Release() + ff = ff1 + } + } + + if !ctx.IfModifiedSince(ff.lastModified) { + ff.decReadersCount() + ctx.NotModified() + return + } + + r, err := ff.NewReader() + if err != nil { + ctx.Logger().Printf("cannot obtain file reader for path=%q: %s", path, err) + ctx.Error("Internal Server Error", StatusInternalServerError) + return + } + + hdr := &ctx.Response.Header + if ff.compressed { + hdr.SetCanonical(strContentEncoding, strGzip) + } + + statusCode := StatusOK + contentLength := ff.contentLength + if h.acceptByteRange { + hdr.SetCanonical(strAcceptRanges, strBytes) + if len(byteRange) > 0 { + startPos, endPos, err := ParseByteRange(byteRange, contentLength) + if err != nil { + r.(io.Closer).Close() + ctx.Logger().Printf("cannot parse byte range %q for path=%q: %s", byteRange, path, err) + ctx.Error("Range Not Satisfiable", StatusRequestedRangeNotSatisfiable) + return + } + + if err = r.(byteRangeUpdater).UpdateByteRange(startPos, endPos); err != nil { + r.(io.Closer).Close() + ctx.Logger().Printf("cannot seek byte range %q for path=%q: %s", byteRange, path, err) + ctx.Error("Internal Server Error", StatusInternalServerError) + return + } + + hdr.SetContentRange(startPos, endPos, contentLength) + contentLength = endPos - startPos + 1 + statusCode = StatusPartialContent + } + } + + hdr.SetCanonical(strLastModified, ff.lastModifiedStr) + if !ctx.IsHead() { + ctx.SetBodyStream(r, contentLength) + } else { + ctx.Response.ResetBody() + ctx.Response.SkipBody = true + ctx.Response.Header.SetContentLength(contentLength) + if rc, ok := r.(io.Closer); ok { + if err := rc.Close(); err != nil { + ctx.Logger().Printf("cannot close file reader: %s", err) + ctx.Error("Internal Server Error", StatusInternalServerError) + return + } + } + } + ctx.SetContentType(ff.contentType) + ctx.SetStatusCode(statusCode) +} + +type byteRangeUpdater interface { + UpdateByteRange(startPos, endPos int) error +} + +// ParseByteRange parses 'Range: bytes=...' header value. +// +// It follows https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35 . +func ParseByteRange(byteRange []byte, contentLength int) (startPos, endPos int, err error) { + b := byteRange + if !bytes.HasPrefix(b, strBytes) { + return 0, 0, fmt.Errorf("unsupported range units: %q. Expecting %q", byteRange, strBytes) + } + + b = b[len(strBytes):] + if len(b) == 0 || b[0] != '=' { + return 0, 0, fmt.Errorf("missing byte range in %q", byteRange) + } + b = b[1:] + + n := bytes.IndexByte(b, '-') + if n < 0 { + return 0, 0, fmt.Errorf("missing the end position of byte range in %q", byteRange) + } + + if n == 0 { + v, err := ParseUint(b[n+1:]) + if err != nil { + return 0, 0, err + } + startPos := contentLength - v + if startPos < 0 { + startPos = 0 + } + return startPos, contentLength - 1, nil + } + + if startPos, err = ParseUint(b[:n]); err != nil { + return 0, 0, err + } + if startPos >= contentLength { + return 0, 0, fmt.Errorf("the start position of byte range cannot exceed %d. byte range %q", contentLength-1, byteRange) + } + + b = b[n+1:] + if len(b) == 0 { + return startPos, contentLength - 1, nil + } + + if endPos, err = ParseUint(b); err != nil { + return 0, 0, err + } + if endPos >= contentLength { + endPos = contentLength - 1 + } + if endPos < startPos { + return 0, 0, fmt.Errorf("the start position of byte range cannot exceed the end position. byte range %q", byteRange) + } + return startPos, endPos, nil +} + +func (h *fsHandler) openIndexFile(ctx *RequestCtx, dirPath string, mustCompress bool) (*fsFile, error) { + for _, indexName := range h.indexNames { + indexFilePath := dirPath + "/" + indexName + ff, err := h.openFSFile(indexFilePath, mustCompress) + if err == nil { + return ff, nil + } + if !os.IsNotExist(err) { + return nil, fmt.Errorf("cannot open file %q: %s", indexFilePath, err) + } + } + + if !h.generateIndexPages { + return nil, fmt.Errorf("cannot access directory without index page. Directory %q", dirPath) + } + + return h.createDirIndex(ctx.URI(), dirPath, mustCompress) +} + +var ( + errDirIndexRequired = errors.New("directory index required") + errNoCreatePermission = errors.New("no 'create file' permissions") +) + +func (h *fsHandler) createDirIndex(base *URI, dirPath string, mustCompress bool) (*fsFile, error) { + w := &ByteBuffer{} + + basePathEscaped := html.EscapeString(string(base.Path())) + fmt.Fprintf(w, "%s", basePathEscaped) + fmt.Fprintf(w, "

%s

", basePathEscaped) + fmt.Fprintf(w, "
    ") + + if len(basePathEscaped) > 1 { + var parentURI URI + base.CopyTo(&parentURI) + parentURI.Update(string(base.Path()) + "/..") + parentPathEscaped := html.EscapeString(string(parentURI.Path())) + fmt.Fprintf(w, `
  • ..
  • `, parentPathEscaped) + } + + f, err := os.Open(dirPath) + if err != nil { + return nil, err + } + + fileinfos, err := f.Readdir(0) + f.Close() + if err != nil { + return nil, err + } + + fm := make(map[string]os.FileInfo, len(fileinfos)) + var filenames []string + for _, fi := range fileinfos { + name := fi.Name() + if strings.HasSuffix(name, h.compressedFileSuffix) { + // Do not show compressed files on index page. + continue + } + fm[name] = fi + filenames = append(filenames, name) + } + + var u URI + base.CopyTo(&u) + u.Update(string(u.Path()) + "/") + + sort.Sort(sort.StringSlice(filenames)) + for _, name := range filenames { + u.Update(name) + pathEscaped := html.EscapeString(string(u.Path())) + fi := fm[name] + auxStr := "dir" + className := "dir" + if !fi.IsDir() { + auxStr = fmt.Sprintf("file, %d bytes", fi.Size()) + className = "file" + } + fmt.Fprintf(w, `
  • %s, %s, last modified %s
  • `, + pathEscaped, className, html.EscapeString(name), auxStr, fsModTime(fi.ModTime())) + } + + fmt.Fprintf(w, "
") + + if mustCompress { + var zbuf ByteBuffer + zw := acquireGzipWriter(&zbuf, CompressDefaultCompression) + _, err = zw.Write(w.B) + releaseGzipWriter(zw) + if err != nil { + return nil, fmt.Errorf("error when compressing automatically generated index for directory %q: %s", dirPath, err) + } + w = &zbuf + } + + dirIndex := w.B + lastModified := time.Now() + ff := &fsFile{ + h: h, + dirIndex: dirIndex, + contentType: "text/html; charset=utf-8", + contentLength: len(dirIndex), + compressed: mustCompress, + lastModified: lastModified, + lastModifiedStr: AppendHTTPDate(nil, lastModified), + + t: lastModified, + } + return ff, nil +} + +const ( + fsMinCompressRatio = 0.8 + fsMaxCompressibleFileSize = 8 * 1024 * 1024 +) + +func (h *fsHandler) compressAndOpenFSFile(filePath string) (*fsFile, error) { + f, err := os.Open(filePath) + if err != nil { + return nil, err + } + + fileInfo, err := f.Stat() + if err != nil { + f.Close() + return nil, fmt.Errorf("cannot obtain info for file %q: %s", filePath, err) + } + + if fileInfo.IsDir() { + f.Close() + return nil, errDirIndexRequired + } + + if strings.HasSuffix(filePath, h.compressedFileSuffix) || + fileInfo.Size() > fsMaxCompressibleFileSize || + !isFileCompressible(f, fsMinCompressRatio) { + return h.newFSFile(f, fileInfo, false) + } + + compressedFilePath := filePath + h.compressedFileSuffix + absPath, err := filepath.Abs(compressedFilePath) + if err != nil { + f.Close() + return nil, fmt.Errorf("cannot determine absolute path for %q: %s", compressedFilePath, err) + } + + flock := getFileLock(absPath) + flock.Lock() + ff, err := h.compressFileNolock(f, fileInfo, filePath, compressedFilePath) + flock.Unlock() + + return ff, err +} + +func (h *fsHandler) compressFileNolock(f *os.File, fileInfo os.FileInfo, filePath, compressedFilePath string) (*fsFile, error) { + // Attempt to open compressed file created by another concurrent + // goroutine. + // It is safe opening such a file, since the file creation + // is guarded by file mutex - see getFileLock call. + if _, err := os.Stat(compressedFilePath); err == nil { + f.Close() + return h.newCompressedFSFile(compressedFilePath) + } + + // Create temporary file, so concurrent goroutines don't use + // it until it is created. + tmpFilePath := compressedFilePath + ".tmp" + zf, err := os.Create(tmpFilePath) + if err != nil { + f.Close() + if !os.IsPermission(err) { + return nil, fmt.Errorf("cannot create temporary file %q: %s", tmpFilePath, err) + } + return nil, errNoCreatePermission + } + + zw := acquireGzipWriter(zf, CompressDefaultCompression) + _, err = copyZeroAlloc(zw, f) + if err1 := zw.Flush(); err == nil { + err = err1 + } + releaseGzipWriter(zw) + zf.Close() + f.Close() + if err != nil { + return nil, fmt.Errorf("error when compressing file %q to %q: %s", filePath, tmpFilePath, err) + } + if err = os.Chtimes(tmpFilePath, time.Now(), fileInfo.ModTime()); err != nil { + return nil, fmt.Errorf("cannot change modification time to %s for tmp file %q: %s", + fileInfo.ModTime(), tmpFilePath, err) + } + if err = os.Rename(tmpFilePath, compressedFilePath); err != nil { + return nil, fmt.Errorf("cannot move compressed file from %q to %q: %s", tmpFilePath, compressedFilePath, err) + } + return h.newCompressedFSFile(compressedFilePath) +} + +func (h *fsHandler) newCompressedFSFile(filePath string) (*fsFile, error) { + f, err := os.Open(filePath) + if err != nil { + return nil, fmt.Errorf("cannot open compressed file %q: %s", filePath, err) + } + fileInfo, err := f.Stat() + if err != nil { + f.Close() + return nil, fmt.Errorf("cannot obtain info for compressed file %q: %s", filePath, err) + } + return h.newFSFile(f, fileInfo, true) +} + +func (h *fsHandler) openFSFile(filePath string, mustCompress bool) (*fsFile, error) { + filePathOriginal := filePath + if mustCompress { + filePath += h.compressedFileSuffix + } + + f, err := os.Open(filePath) + if err != nil { + if mustCompress && os.IsNotExist(err) { + return h.compressAndOpenFSFile(filePathOriginal) + } + return nil, err + } + + fileInfo, err := f.Stat() + if err != nil { + f.Close() + return nil, fmt.Errorf("cannot obtain info for file %q: %s", filePath, err) + } + + if fileInfo.IsDir() { + f.Close() + if mustCompress { + return nil, fmt.Errorf("directory with unexpected suffix found: %q. Suffix: %q", + filePath, h.compressedFileSuffix) + } + return nil, errDirIndexRequired + } + + if mustCompress { + fileInfoOriginal, err := os.Stat(filePathOriginal) + if err != nil { + f.Close() + return nil, fmt.Errorf("cannot obtain info for original file %q: %s", filePathOriginal, err) + } + + if fileInfoOriginal.ModTime() != fileInfo.ModTime() { + // The compressed file became stale. Re-create it. + f.Close() + os.Remove(filePath) + return h.compressAndOpenFSFile(filePathOriginal) + } + } + + return h.newFSFile(f, fileInfo, mustCompress) +} + +func (h *fsHandler) newFSFile(f *os.File, fileInfo os.FileInfo, compressed bool) (*fsFile, error) { + n := fileInfo.Size() + contentLength := int(n) + if n != int64(contentLength) { + f.Close() + return nil, fmt.Errorf("too big file: %d bytes", n) + } + + // detect content-type + ext := fileExtension(fileInfo.Name(), compressed, h.compressedFileSuffix) + contentType := mime.TypeByExtension(ext) + if len(contentType) == 0 { + data, err := readFileHeader(f, compressed) + if err != nil { + return nil, fmt.Errorf("cannot read header of the file %q: %s", f.Name(), err) + } + contentType = http.DetectContentType(data) + } + + lastModified := fileInfo.ModTime() + ff := &fsFile{ + h: h, + f: f, + contentType: contentType, + contentLength: contentLength, + compressed: compressed, + lastModified: lastModified, + lastModifiedStr: AppendHTTPDate(nil, lastModified), + + t: time.Now(), + } + return ff, nil +} + +func readFileHeader(f *os.File, compressed bool) ([]byte, error) { + r := io.Reader(f) + var zr *gzip.Reader + if compressed { + var err error + if zr, err = acquireGzipReader(f); err != nil { + return nil, err + } + r = zr + } + + lr := &io.LimitedReader{ + R: r, + N: 512, + } + data, err := ioutil.ReadAll(lr) + f.Seek(0, 0) + + if zr != nil { + releaseGzipReader(zr) + } + + return data, err +} + +func stripLeadingSlashes(path []byte, stripSlashes int) []byte { + for stripSlashes > 0 && len(path) > 0 { + if path[0] != '/' { + panic("BUG: path must start with slash") + } + n := bytes.IndexByte(path[1:], '/') + if n < 0 { + path = path[:0] + break + } + path = path[n+1:] + stripSlashes-- + } + return path +} + +func stripTrailingSlashes(path []byte) []byte { + for len(path) > 0 && path[len(path)-1] == '/' { + path = path[:len(path)-1] + } + return path +} + +func fileExtension(path string, compressed bool, compressedFileSuffix string) string { + if compressed && strings.HasSuffix(path, compressedFileSuffix) { + path = path[:len(path)-len(compressedFileSuffix)] + } + n := strings.LastIndexByte(path, '.') + if n < 0 { + return "" + } + return path[n:] +} + +// FileLastModified returns last modified time for the file. +func FileLastModified(path string) (time.Time, error) { + f, err := os.Open(path) + if err != nil { + return zeroTime, err + } + fileInfo, err := f.Stat() + f.Close() + if err != nil { + return zeroTime, err + } + return fsModTime(fileInfo.ModTime()), nil +} + +func fsModTime(t time.Time) time.Time { + return t.In(time.UTC).Truncate(time.Second) +} + +var ( + filesLockMap = make(map[string]*sync.Mutex) + filesLockMapLock sync.Mutex +) + +func getFileLock(absPath string) *sync.Mutex { + filesLockMapLock.Lock() + flock := filesLockMap[absPath] + if flock == nil { + flock = &sync.Mutex{} + filesLockMap[absPath] = flock + } + filesLockMapLock.Unlock() + return flock +} diff --git a/vendor/github.com/valyala/fasthttp/header.go b/vendor/github.com/valyala/fasthttp/header.go new file mode 100644 index 0000000..78c105c --- /dev/null +++ b/vendor/github.com/valyala/fasthttp/header.go @@ -0,0 +1,2083 @@ +package fasthttp + +import ( + "bufio" + "bytes" + "errors" + "fmt" + "io" + "sync/atomic" + "time" +) + +// ResponseHeader represents HTTP response header. +// +// It is forbidden copying ResponseHeader instances. +// Create new instances instead and use CopyTo. +// +// ResponseHeader instance MUST NOT be used from concurrently running +// goroutines. +type ResponseHeader struct { + noCopy noCopy + + disableNormalizing bool + noHTTP11 bool + connectionClose bool + + statusCode int + contentLength int + contentLengthBytes []byte + + contentType []byte + server []byte + + h []argsKV + bufKV argsKV + + cookies []argsKV +} + +// RequestHeader represents HTTP request header. +// +// It is forbidden copying RequestHeader instances. +// Create new instances instead and use CopyTo. +// +// RequestHeader instance MUST NOT be used from concurrently running +// goroutines. +type RequestHeader struct { + noCopy noCopy + + disableNormalizing bool + noHTTP11 bool + connectionClose bool + isGet bool + + // These two fields have been moved close to other bool fields + // for reducing RequestHeader object size. + cookiesCollected bool + rawHeadersParsed bool + + contentLength int + contentLengthBytes []byte + + method []byte + requestURI []byte + host []byte + contentType []byte + userAgent []byte + + h []argsKV + bufKV argsKV + + cookies []argsKV + + rawHeaders []byte +} + +// SetContentRange sets 'Content-Range: bytes startPos-endPos/contentLength' +// header. +func (h *ResponseHeader) SetContentRange(startPos, endPos, contentLength int) { + b := h.bufKV.value[:0] + b = append(b, strBytes...) + b = append(b, ' ') + b = AppendUint(b, startPos) + b = append(b, '-') + b = AppendUint(b, endPos) + b = append(b, '/') + b = AppendUint(b, contentLength) + h.bufKV.value = b + + h.SetCanonical(strContentRange, h.bufKV.value) +} + +// SetByteRange sets 'Range: bytes=startPos-endPos' header. +// +// * If startPos is negative, then 'bytes=-startPos' value is set. +// * If endPos is negative, then 'bytes=startPos-' value is set. +func (h *RequestHeader) SetByteRange(startPos, endPos int) { + h.parseRawHeaders() + + b := h.bufKV.value[:0] + b = append(b, strBytes...) + b = append(b, '=') + if startPos >= 0 { + b = AppendUint(b, startPos) + } else { + endPos = -startPos + } + b = append(b, '-') + if endPos >= 0 { + b = AppendUint(b, endPos) + } + h.bufKV.value = b + + h.SetCanonical(strRange, h.bufKV.value) +} + +// StatusCode returns response status code. +func (h *ResponseHeader) StatusCode() int { + if h.statusCode == 0 { + return StatusOK + } + return h.statusCode +} + +// SetStatusCode sets response status code. +func (h *ResponseHeader) SetStatusCode(statusCode int) { + h.statusCode = statusCode +} + +// SetLastModified sets 'Last-Modified' header to the given value. +func (h *ResponseHeader) SetLastModified(t time.Time) { + h.bufKV.value = AppendHTTPDate(h.bufKV.value[:0], t) + h.SetCanonical(strLastModified, h.bufKV.value) +} + +// ConnectionClose returns true if 'Connection: close' header is set. +func (h *ResponseHeader) ConnectionClose() bool { + return h.connectionClose +} + +// SetConnectionClose sets 'Connection: close' header. +func (h *ResponseHeader) SetConnectionClose() { + h.connectionClose = true +} + +// ResetConnectionClose clears 'Connection: close' header if it exists. +func (h *ResponseHeader) ResetConnectionClose() { + if h.connectionClose { + h.connectionClose = false + h.h = delAllArgsBytes(h.h, strConnection) + } +} + +// ConnectionClose returns true if 'Connection: close' header is set. +func (h *RequestHeader) ConnectionClose() bool { + h.parseRawHeaders() + return h.connectionClose +} + +func (h *RequestHeader) connectionCloseFast() bool { + // h.parseRawHeaders() isn't called for performance reasons. + // Use ConnectionClose for triggering raw headers parsing. + return h.connectionClose +} + +// SetConnectionClose sets 'Connection: close' header. +func (h *RequestHeader) SetConnectionClose() { + // h.parseRawHeaders() isn't called for performance reasons. + h.connectionClose = true +} + +// ResetConnectionClose clears 'Connection: close' header if it exists. +func (h *RequestHeader) ResetConnectionClose() { + h.parseRawHeaders() + if h.connectionClose { + h.connectionClose = false + h.h = delAllArgsBytes(h.h, strConnection) + } +} + +// ConnectionUpgrade returns true if 'Connection: Upgrade' header is set. +func (h *ResponseHeader) ConnectionUpgrade() bool { + return hasHeaderValue(h.Peek("Connection"), strUpgrade) +} + +// ConnectionUpgrade returns true if 'Connection: Upgrade' header is set. +func (h *RequestHeader) ConnectionUpgrade() bool { + h.parseRawHeaders() + return hasHeaderValue(h.Peek("Connection"), strUpgrade) +} + +// ContentLength returns Content-Length header value. +// +// It may be negative: +// -1 means Transfer-Encoding: chunked. +// -2 means Transfer-Encoding: identity. +func (h *ResponseHeader) ContentLength() int { + return h.contentLength +} + +// SetContentLength sets Content-Length header value. +// +// Content-Length may be negative: +// -1 means Transfer-Encoding: chunked. +// -2 means Transfer-Encoding: identity. +func (h *ResponseHeader) SetContentLength(contentLength int) { + if h.mustSkipContentLength() { + return + } + h.contentLength = contentLength + if contentLength >= 0 { + h.contentLengthBytes = AppendUint(h.contentLengthBytes[:0], contentLength) + h.h = delAllArgsBytes(h.h, strTransferEncoding) + } else { + h.contentLengthBytes = h.contentLengthBytes[:0] + value := strChunked + if contentLength == -2 { + h.SetConnectionClose() + value = strIdentity + } + h.h = setArgBytes(h.h, strTransferEncoding, value) + } +} + +func (h *ResponseHeader) mustSkipContentLength() bool { + // From http/1.1 specs: + // All 1xx (informational), 204 (no content), and 304 (not modified) responses MUST NOT include a message-body + statusCode := h.StatusCode() + + // Fast path. + if statusCode < 100 || statusCode == StatusOK { + return false + } + + // Slow path. + return statusCode == StatusNotModified || statusCode == StatusNoContent || statusCode < 200 +} + +// ContentLength returns Content-Length header value. +// +// It may be negative: +// -1 means Transfer-Encoding: chunked. +func (h *RequestHeader) ContentLength() int { + if h.noBody() { + return 0 + } + h.parseRawHeaders() + return h.contentLength +} + +// SetContentLength sets Content-Length header value. +// +// Negative content-length sets 'Transfer-Encoding: chunked' header. +func (h *RequestHeader) SetContentLength(contentLength int) { + h.parseRawHeaders() + h.contentLength = contentLength + if contentLength >= 0 { + h.contentLengthBytes = AppendUint(h.contentLengthBytes[:0], contentLength) + h.h = delAllArgsBytes(h.h, strTransferEncoding) + } else { + h.contentLengthBytes = h.contentLengthBytes[:0] + h.h = setArgBytes(h.h, strTransferEncoding, strChunked) + } +} + +// ContentType returns Content-Type header value. +func (h *ResponseHeader) ContentType() []byte { + contentType := h.contentType + if len(h.contentType) == 0 { + contentType = defaultContentType + } + return contentType +} + +// SetContentType sets Content-Type header value. +func (h *ResponseHeader) SetContentType(contentType string) { + h.contentType = append(h.contentType[:0], contentType...) +} + +// SetContentTypeBytes sets Content-Type header value. +func (h *ResponseHeader) SetContentTypeBytes(contentType []byte) { + h.contentType = append(h.contentType[:0], contentType...) +} + +// Server returns Server header value. +func (h *ResponseHeader) Server() []byte { + return h.server +} + +// SetServer sets Server header value. +func (h *ResponseHeader) SetServer(server string) { + h.server = append(h.server[:0], server...) +} + +// SetServerBytes sets Server header value. +func (h *ResponseHeader) SetServerBytes(server []byte) { + h.server = append(h.server[:0], server...) +} + +// ContentType returns Content-Type header value. +func (h *RequestHeader) ContentType() []byte { + h.parseRawHeaders() + return h.contentType +} + +// SetContentType sets Content-Type header value. +func (h *RequestHeader) SetContentType(contentType string) { + h.parseRawHeaders() + h.contentType = append(h.contentType[:0], contentType...) +} + +// SetContentTypeBytes sets Content-Type header value. +func (h *RequestHeader) SetContentTypeBytes(contentType []byte) { + h.parseRawHeaders() + h.contentType = append(h.contentType[:0], contentType...) +} + +// SetMultipartFormBoundary sets the following Content-Type: +// 'multipart/form-data; boundary=...' +// where ... is substituted by the given boundary. +func (h *RequestHeader) SetMultipartFormBoundary(boundary string) { + h.parseRawHeaders() + + b := h.bufKV.value[:0] + b = append(b, strMultipartFormData...) + b = append(b, ';', ' ') + b = append(b, strBoundary...) + b = append(b, '=') + b = append(b, boundary...) + h.bufKV.value = b + + h.SetContentTypeBytes(h.bufKV.value) +} + +// SetMultipartFormBoundaryBytes sets the following Content-Type: +// 'multipart/form-data; boundary=...' +// where ... is substituted by the given boundary. +func (h *RequestHeader) SetMultipartFormBoundaryBytes(boundary []byte) { + h.parseRawHeaders() + + b := h.bufKV.value[:0] + b = append(b, strMultipartFormData...) + b = append(b, ';', ' ') + b = append(b, strBoundary...) + b = append(b, '=') + b = append(b, boundary...) + h.bufKV.value = b + + h.SetContentTypeBytes(h.bufKV.value) +} + +// MultipartFormBoundary returns boundary part +// from 'multipart/form-data; boundary=...' Content-Type. +func (h *RequestHeader) MultipartFormBoundary() []byte { + b := h.ContentType() + if !bytes.HasPrefix(b, strMultipartFormData) { + return nil + } + b = b[len(strMultipartFormData):] + if len(b) == 0 || b[0] != ';' { + return nil + } + + var n int + for len(b) > 0 { + n++ + for len(b) > n && b[n] == ' ' { + n++ + } + b = b[n:] + if !bytes.HasPrefix(b, strBoundary) { + if n = bytes.IndexByte(b, ';'); n < 0 { + return nil + } + continue + } + + b = b[len(strBoundary):] + if len(b) == 0 || b[0] != '=' { + return nil + } + b = b[1:] + if n = bytes.IndexByte(b, ';'); n >= 0 { + b = b[:n] + } + if len(b) > 1 && b[0] == '"' && b[len(b)-1] == '"' { + b = b[1 : len(b)-1] + } + return b + } + return nil +} + +// Host returns Host header value. +func (h *RequestHeader) Host() []byte { + if len(h.host) > 0 { + return h.host + } + if !h.rawHeadersParsed { + // fast path without employing full headers parsing. + host := peekRawHeader(h.rawHeaders, strHost) + if len(host) > 0 { + h.host = append(h.host[:0], host...) + return h.host + } + } + + // slow path. + h.parseRawHeaders() + return h.host +} + +// SetHost sets Host header value. +func (h *RequestHeader) SetHost(host string) { + h.parseRawHeaders() + h.host = append(h.host[:0], host...) +} + +// SetHostBytes sets Host header value. +func (h *RequestHeader) SetHostBytes(host []byte) { + h.parseRawHeaders() + h.host = append(h.host[:0], host...) +} + +// UserAgent returns User-Agent header value. +func (h *RequestHeader) UserAgent() []byte { + h.parseRawHeaders() + return h.userAgent +} + +// SetUserAgent sets User-Agent header value. +func (h *RequestHeader) SetUserAgent(userAgent string) { + h.parseRawHeaders() + h.userAgent = append(h.userAgent[:0], userAgent...) +} + +// SetUserAgentBytes sets User-Agent header value. +func (h *RequestHeader) SetUserAgentBytes(userAgent []byte) { + h.parseRawHeaders() + h.userAgent = append(h.userAgent[:0], userAgent...) +} + +// Referer returns Referer header value. +func (h *RequestHeader) Referer() []byte { + return h.PeekBytes(strReferer) +} + +// SetReferer sets Referer header value. +func (h *RequestHeader) SetReferer(referer string) { + h.SetBytesK(strReferer, referer) +} + +// SetRefererBytes sets Referer header value. +func (h *RequestHeader) SetRefererBytes(referer []byte) { + h.SetCanonical(strReferer, referer) +} + +// Method returns HTTP request method. +func (h *RequestHeader) Method() []byte { + if len(h.method) == 0 { + return strGet + } + return h.method +} + +// SetMethod sets HTTP request method. +func (h *RequestHeader) SetMethod(method string) { + h.method = append(h.method[:0], method...) +} + +// SetMethodBytes sets HTTP request method. +func (h *RequestHeader) SetMethodBytes(method []byte) { + h.method = append(h.method[:0], method...) +} + +// RequestURI returns RequestURI from the first HTTP request line. +func (h *RequestHeader) RequestURI() []byte { + requestURI := h.requestURI + if len(requestURI) == 0 { + requestURI = strSlash + } + return requestURI +} + +// SetRequestURI sets RequestURI for the first HTTP request line. +// RequestURI must be properly encoded. +// Use URI.RequestURI for constructing proper RequestURI if unsure. +func (h *RequestHeader) SetRequestURI(requestURI string) { + h.requestURI = append(h.requestURI[:0], requestURI...) +} + +// SetRequestURIBytes sets RequestURI for the first HTTP request line. +// RequestURI must be properly encoded. +// Use URI.RequestURI for constructing proper RequestURI if unsure. +func (h *RequestHeader) SetRequestURIBytes(requestURI []byte) { + h.requestURI = append(h.requestURI[:0], requestURI...) +} + +// IsGet returns true if request method is GET. +func (h *RequestHeader) IsGet() bool { + // Optimize fast path for GET requests. + if !h.isGet { + h.isGet = bytes.Equal(h.Method(), strGet) + } + return h.isGet +} + +// IsPost returns true if request methos is POST. +func (h *RequestHeader) IsPost() bool { + return bytes.Equal(h.Method(), strPost) +} + +// IsPut returns true if request method is PUT. +func (h *RequestHeader) IsPut() bool { + return bytes.Equal(h.Method(), strPut) +} + +// IsHead returns true if request method is HEAD. +func (h *RequestHeader) IsHead() bool { + // Fast path + if h.isGet { + return false + } + return bytes.Equal(h.Method(), strHead) +} + +// IsDelete returns true if request method is DELETE. +func (h *RequestHeader) IsDelete() bool { + return bytes.Equal(h.Method(), strDelete) +} + +// IsHTTP11 returns true if the request is HTTP/1.1. +func (h *RequestHeader) IsHTTP11() bool { + return !h.noHTTP11 +} + +// IsHTTP11 returns true if the response is HTTP/1.1. +func (h *ResponseHeader) IsHTTP11() bool { + return !h.noHTTP11 +} + +// HasAcceptEncoding returns true if the header contains +// the given Accept-Encoding value. +func (h *RequestHeader) HasAcceptEncoding(acceptEncoding string) bool { + h.bufKV.value = append(h.bufKV.value[:0], acceptEncoding...) + return h.HasAcceptEncodingBytes(h.bufKV.value) +} + +// HasAcceptEncodingBytes returns true if the header contains +// the given Accept-Encoding value. +func (h *RequestHeader) HasAcceptEncodingBytes(acceptEncoding []byte) bool { + ae := h.peek(strAcceptEncoding) + n := bytes.Index(ae, acceptEncoding) + if n < 0 { + return false + } + b := ae[n+len(acceptEncoding):] + if len(b) > 0 && b[0] != ',' { + return false + } + if n == 0 { + return true + } + return ae[n-1] == ' ' +} + +// Len returns the number of headers set, +// i.e. the number of times f is called in VisitAll. +func (h *ResponseHeader) Len() int { + n := 0 + h.VisitAll(func(k, v []byte) { n++ }) + return n +} + +// Len returns the number of headers set, +// i.e. the number of times f is called in VisitAll. +func (h *RequestHeader) Len() int { + n := 0 + h.VisitAll(func(k, v []byte) { n++ }) + return n +} + +// DisableNormalizing disables header names' normalization. +// +// By default all the header names are normalized by uppercasing +// the first letter and all the first letters following dashes, +// while lowercasing all the other letters. +// Examples: +// +// * CONNECTION -> Connection +// * conteNT-tYPE -> Content-Type +// * foo-bar-baz -> Foo-Bar-Baz +// +// Disable header names' normalization only if know what are you doing. +func (h *RequestHeader) DisableNormalizing() { + h.disableNormalizing = true +} + +// DisableNormalizing disables header names' normalization. +// +// By default all the header names are normalized by uppercasing +// the first letter and all the first letters following dashes, +// while lowercasing all the other letters. +// Examples: +// +// * CONNECTION -> Connection +// * conteNT-tYPE -> Content-Type +// * foo-bar-baz -> Foo-Bar-Baz +// +// Disable header names' normalization only if know what are you doing. +func (h *ResponseHeader) DisableNormalizing() { + h.disableNormalizing = true +} + +// Reset clears response header. +func (h *ResponseHeader) Reset() { + h.disableNormalizing = false + h.resetSkipNormalize() +} + +func (h *ResponseHeader) resetSkipNormalize() { + h.noHTTP11 = false + h.connectionClose = false + + h.statusCode = 0 + h.contentLength = 0 + h.contentLengthBytes = h.contentLengthBytes[:0] + + h.contentType = h.contentType[:0] + h.server = h.server[:0] + + h.h = h.h[:0] + h.cookies = h.cookies[:0] +} + +// Reset clears request header. +func (h *RequestHeader) Reset() { + h.disableNormalizing = false + h.resetSkipNormalize() +} + +func (h *RequestHeader) resetSkipNormalize() { + h.noHTTP11 = false + h.connectionClose = false + h.isGet = false + + h.contentLength = 0 + h.contentLengthBytes = h.contentLengthBytes[:0] + + h.method = h.method[:0] + h.requestURI = h.requestURI[:0] + h.host = h.host[:0] + h.contentType = h.contentType[:0] + h.userAgent = h.userAgent[:0] + + h.h = h.h[:0] + h.cookies = h.cookies[:0] + h.cookiesCollected = false + + h.rawHeaders = h.rawHeaders[:0] + h.rawHeadersParsed = false +} + +// CopyTo copies all the headers to dst. +func (h *ResponseHeader) CopyTo(dst *ResponseHeader) { + dst.Reset() + + dst.disableNormalizing = h.disableNormalizing + dst.noHTTP11 = h.noHTTP11 + dst.connectionClose = h.connectionClose + + dst.statusCode = h.statusCode + dst.contentLength = h.contentLength + dst.contentLengthBytes = append(dst.contentLengthBytes[:0], h.contentLengthBytes...) + dst.contentType = append(dst.contentType[:0], h.contentType...) + dst.server = append(dst.server[:0], h.server...) + dst.h = copyArgs(dst.h, h.h) + dst.cookies = copyArgs(dst.cookies, h.cookies) +} + +// CopyTo copies all the headers to dst. +func (h *RequestHeader) CopyTo(dst *RequestHeader) { + dst.Reset() + + dst.disableNormalizing = h.disableNormalizing + dst.noHTTP11 = h.noHTTP11 + dst.connectionClose = h.connectionClose + dst.isGet = h.isGet + + dst.contentLength = h.contentLength + dst.contentLengthBytes = append(dst.contentLengthBytes[:0], h.contentLengthBytes...) + dst.method = append(dst.method[:0], h.method...) + dst.requestURI = append(dst.requestURI[:0], h.requestURI...) + dst.host = append(dst.host[:0], h.host...) + dst.contentType = append(dst.contentType[:0], h.contentType...) + dst.userAgent = append(dst.userAgent[:0], h.userAgent...) + dst.h = copyArgs(dst.h, h.h) + dst.cookies = copyArgs(dst.cookies, h.cookies) + dst.cookiesCollected = h.cookiesCollected + dst.rawHeaders = append(dst.rawHeaders[:0], h.rawHeaders...) + dst.rawHeadersParsed = h.rawHeadersParsed +} + +// VisitAll calls f for each header. +// +// f must not retain references to key and/or value after returning. +// Copy key and/or value contents before returning if you need retaining them. +func (h *ResponseHeader) VisitAll(f func(key, value []byte)) { + if len(h.contentLengthBytes) > 0 { + f(strContentLength, h.contentLengthBytes) + } + contentType := h.ContentType() + if len(contentType) > 0 { + f(strContentType, contentType) + } + server := h.Server() + if len(server) > 0 { + f(strServer, server) + } + if len(h.cookies) > 0 { + visitArgs(h.cookies, func(k, v []byte) { + f(strSetCookie, v) + }) + } + visitArgs(h.h, f) + if h.ConnectionClose() { + f(strConnection, strClose) + } +} + +// VisitAllCookie calls f for each response cookie. +// +// Cookie name is passed in key and the whole Set-Cookie header value +// is passed in value on each f invocation. Value may be parsed +// with Cookie.ParseBytes(). +// +// f must not retain references to key and/or value after returning. +func (h *ResponseHeader) VisitAllCookie(f func(key, value []byte)) { + visitArgs(h.cookies, f) +} + +// VisitAllCookie calls f for each request cookie. +// +// f must not retain references to key and/or value after returning. +func (h *RequestHeader) VisitAllCookie(f func(key, value []byte)) { + h.parseRawHeaders() + h.collectCookies() + visitArgs(h.cookies, f) +} + +// VisitAll calls f for each header. +// +// f must not retain references to key and/or value after returning. +// Copy key and/or value contents before returning if you need retaining them. +func (h *RequestHeader) VisitAll(f func(key, value []byte)) { + h.parseRawHeaders() + host := h.Host() + if len(host) > 0 { + f(strHost, host) + } + if len(h.contentLengthBytes) > 0 { + f(strContentLength, h.contentLengthBytes) + } + contentType := h.ContentType() + if len(contentType) > 0 { + f(strContentType, contentType) + } + userAgent := h.UserAgent() + if len(userAgent) > 0 { + f(strUserAgent, userAgent) + } + + h.collectCookies() + if len(h.cookies) > 0 { + h.bufKV.value = appendRequestCookieBytes(h.bufKV.value[:0], h.cookies) + f(strCookie, h.bufKV.value) + } + visitArgs(h.h, f) + if h.ConnectionClose() { + f(strConnection, strClose) + } +} + +// Del deletes header with the given key. +func (h *ResponseHeader) Del(key string) { + k := getHeaderKeyBytes(&h.bufKV, key, h.disableNormalizing) + h.del(k) +} + +// DelBytes deletes header with the given key. +func (h *ResponseHeader) DelBytes(key []byte) { + h.bufKV.key = append(h.bufKV.key[:0], key...) + normalizeHeaderKey(h.bufKV.key, h.disableNormalizing) + h.del(h.bufKV.key) +} + +func (h *ResponseHeader) del(key []byte) { + switch string(key) { + case "Content-Type": + h.contentType = h.contentType[:0] + case "Server": + h.server = h.server[:0] + case "Set-Cookie": + h.cookies = h.cookies[:0] + case "Content-Length": + h.contentLength = 0 + h.contentLengthBytes = h.contentLengthBytes[:0] + case "Connection": + h.connectionClose = false + } + h.h = delAllArgsBytes(h.h, key) +} + +// Del deletes header with the given key. +func (h *RequestHeader) Del(key string) { + h.parseRawHeaders() + k := getHeaderKeyBytes(&h.bufKV, key, h.disableNormalizing) + h.del(k) +} + +// DelBytes deletes header with the given key. +func (h *RequestHeader) DelBytes(key []byte) { + h.parseRawHeaders() + h.bufKV.key = append(h.bufKV.key[:0], key...) + normalizeHeaderKey(h.bufKV.key, h.disableNormalizing) + h.del(h.bufKV.key) +} + +func (h *RequestHeader) del(key []byte) { + switch string(key) { + case "Host": + h.host = h.host[:0] + case "Content-Type": + h.contentType = h.contentType[:0] + case "User-Agent": + h.userAgent = h.userAgent[:0] + case "Cookie": + h.cookies = h.cookies[:0] + case "Content-Length": + h.contentLength = 0 + h.contentLengthBytes = h.contentLengthBytes[:0] + case "Connection": + h.connectionClose = false + } + h.h = delAllArgsBytes(h.h, key) +} + +// Add adds the given 'key: value' header. +// +// Multiple headers with the same key may be added with this function. +// Use Set for setting a single header for the given key. +func (h *ResponseHeader) Add(key, value string) { + k := getHeaderKeyBytes(&h.bufKV, key, h.disableNormalizing) + h.h = appendArg(h.h, b2s(k), value) +} + +// AddBytesK adds the given 'key: value' header. +// +// Multiple headers with the same key may be added with this function. +// Use SetBytesK for setting a single header for the given key. +func (h *ResponseHeader) AddBytesK(key []byte, value string) { + h.Add(b2s(key), value) +} + +// AddBytesV adds the given 'key: value' header. +// +// Multiple headers with the same key may be added with this function. +// Use SetBytesV for setting a single header for the given key. +func (h *ResponseHeader) AddBytesV(key string, value []byte) { + h.Add(key, b2s(value)) +} + +// AddBytesKV adds the given 'key: value' header. +// +// Multiple headers with the same key may be added with this function. +// Use SetBytesKV for setting a single header for the given key. +func (h *ResponseHeader) AddBytesKV(key, value []byte) { + h.Add(b2s(key), b2s(value)) +} + +// Set sets the given 'key: value' header. +// +// Use Add for setting multiple header values under the same key. +func (h *ResponseHeader) Set(key, value string) { + initHeaderKV(&h.bufKV, key, value, h.disableNormalizing) + h.SetCanonical(h.bufKV.key, h.bufKV.value) +} + +// SetBytesK sets the given 'key: value' header. +// +// Use AddBytesK for setting multiple header values under the same key. +func (h *ResponseHeader) SetBytesK(key []byte, value string) { + h.bufKV.value = append(h.bufKV.value[:0], value...) + h.SetBytesKV(key, h.bufKV.value) +} + +// SetBytesV sets the given 'key: value' header. +// +// Use AddBytesV for setting multiple header values under the same key. +func (h *ResponseHeader) SetBytesV(key string, value []byte) { + k := getHeaderKeyBytes(&h.bufKV, key, h.disableNormalizing) + h.SetCanonical(k, value) +} + +// SetBytesKV sets the given 'key: value' header. +// +// Use AddBytesKV for setting multiple header values under the same key. +func (h *ResponseHeader) SetBytesKV(key, value []byte) { + h.bufKV.key = append(h.bufKV.key[:0], key...) + normalizeHeaderKey(h.bufKV.key, h.disableNormalizing) + h.SetCanonical(h.bufKV.key, value) +} + +// SetCanonical sets the given 'key: value' header assuming that +// key is in canonical form. +func (h *ResponseHeader) SetCanonical(key, value []byte) { + switch string(key) { + case "Content-Type": + h.SetContentTypeBytes(value) + case "Server": + h.SetServerBytes(value) + case "Set-Cookie": + var kv *argsKV + h.cookies, kv = allocArg(h.cookies) + kv.key = getCookieKey(kv.key, value) + kv.value = append(kv.value[:0], value...) + case "Content-Length": + if contentLength, err := parseContentLength(value); err == nil { + h.contentLength = contentLength + h.contentLengthBytes = append(h.contentLengthBytes[:0], value...) + } + case "Connection": + if bytes.Equal(strClose, value) { + h.SetConnectionClose() + } else { + h.ResetConnectionClose() + h.h = setArgBytes(h.h, key, value) + } + case "Transfer-Encoding": + // Transfer-Encoding is managed automatically. + case "Date": + // Date is managed automatically. + default: + h.h = setArgBytes(h.h, key, value) + } +} + +// SetCookie sets the given response cookie. +// +// It is save re-using the cookie after the function returns. +func (h *ResponseHeader) SetCookie(cookie *Cookie) { + h.cookies = setArgBytes(h.cookies, cookie.Key(), cookie.Cookie()) +} + +// SetCookie sets 'key: value' cookies. +func (h *RequestHeader) SetCookie(key, value string) { + h.parseRawHeaders() + h.collectCookies() + h.cookies = setArg(h.cookies, key, value) +} + +// SetCookieBytesK sets 'key: value' cookies. +func (h *RequestHeader) SetCookieBytesK(key []byte, value string) { + h.SetCookie(b2s(key), value) +} + +// SetCookieBytesKV sets 'key: value' cookies. +func (h *RequestHeader) SetCookieBytesKV(key, value []byte) { + h.SetCookie(b2s(key), b2s(value)) +} + +// DelClientCookie instructs the client to remove the given cookie. +// +// Use DelCookie if you want just removing the cookie from response header. +func (h *ResponseHeader) DelClientCookie(key string) { + h.DelCookie(key) + + c := AcquireCookie() + c.SetKey(key) + c.SetExpire(CookieExpireDelete) + h.SetCookie(c) + ReleaseCookie(c) +} + +// DelClientCookieBytes instructs the client to remove the given cookie. +// +// Use DelCookieBytes if you want just removing the cookie from response header. +func (h *ResponseHeader) DelClientCookieBytes(key []byte) { + h.DelClientCookie(b2s(key)) +} + +// DelCookie removes cookie under the given key from response header. +// +// Note that DelCookie doesn't remove the cookie from the client. +// Use DelClientCookie instead. +func (h *ResponseHeader) DelCookie(key string) { + h.cookies = delAllArgs(h.cookies, key) +} + +// DelCookieBytes removes cookie under the given key from response header. +// +// Note that DelCookieBytes doesn't remove the cookie from the client. +// Use DelClientCookieBytes instead. +func (h *ResponseHeader) DelCookieBytes(key []byte) { + h.DelCookie(b2s(key)) +} + +// DelCookie removes cookie under the given key. +func (h *RequestHeader) DelCookie(key string) { + h.parseRawHeaders() + h.collectCookies() + h.cookies = delAllArgs(h.cookies, key) +} + +// DelCookieBytes removes cookie under the given key. +func (h *RequestHeader) DelCookieBytes(key []byte) { + h.DelCookie(b2s(key)) +} + +// DelAllCookies removes all the cookies from response headers. +func (h *ResponseHeader) DelAllCookies() { + h.cookies = h.cookies[:0] +} + +// DelAllCookies removes all the cookies from request headers. +func (h *RequestHeader) DelAllCookies() { + h.parseRawHeaders() + h.collectCookies() + h.cookies = h.cookies[:0] +} + +// Add adds the given 'key: value' header. +// +// Multiple headers with the same key may be added with this function. +// Use Set for setting a single header for the given key. +func (h *RequestHeader) Add(key, value string) { + k := getHeaderKeyBytes(&h.bufKV, key, h.disableNormalizing) + h.h = appendArg(h.h, b2s(k), value) +} + +// AddBytesK adds the given 'key: value' header. +// +// Multiple headers with the same key may be added with this function. +// Use SetBytesK for setting a single header for the given key. +func (h *RequestHeader) AddBytesK(key []byte, value string) { + h.Add(b2s(key), value) +} + +// AddBytesV adds the given 'key: value' header. +// +// Multiple headers with the same key may be added with this function. +// Use SetBytesV for setting a single header for the given key. +func (h *RequestHeader) AddBytesV(key string, value []byte) { + h.Add(key, b2s(value)) +} + +// AddBytesKV adds the given 'key: value' header. +// +// Multiple headers with the same key may be added with this function. +// Use SetBytesKV for setting a single header for the given key. +func (h *RequestHeader) AddBytesKV(key, value []byte) { + h.Add(b2s(key), b2s(value)) +} + +// Set sets the given 'key: value' header. +// +// Use Add for setting multiple header values under the same key. +func (h *RequestHeader) Set(key, value string) { + initHeaderKV(&h.bufKV, key, value, h.disableNormalizing) + h.SetCanonical(h.bufKV.key, h.bufKV.value) +} + +// SetBytesK sets the given 'key: value' header. +// +// Use AddBytesK for setting multiple header values under the same key. +func (h *RequestHeader) SetBytesK(key []byte, value string) { + h.bufKV.value = append(h.bufKV.value[:0], value...) + h.SetBytesKV(key, h.bufKV.value) +} + +// SetBytesV sets the given 'key: value' header. +// +// Use AddBytesV for setting multiple header values under the same key. +func (h *RequestHeader) SetBytesV(key string, value []byte) { + k := getHeaderKeyBytes(&h.bufKV, key, h.disableNormalizing) + h.SetCanonical(k, value) +} + +// SetBytesKV sets the given 'key: value' header. +// +// Use AddBytesKV for setting multiple header values under the same key. +func (h *RequestHeader) SetBytesKV(key, value []byte) { + h.bufKV.key = append(h.bufKV.key[:0], key...) + normalizeHeaderKey(h.bufKV.key, h.disableNormalizing) + h.SetCanonical(h.bufKV.key, value) +} + +// SetCanonical sets the given 'key: value' header assuming that +// key is in canonical form. +func (h *RequestHeader) SetCanonical(key, value []byte) { + h.parseRawHeaders() + switch string(key) { + case "Host": + h.SetHostBytes(value) + case "Content-Type": + h.SetContentTypeBytes(value) + case "User-Agent": + h.SetUserAgentBytes(value) + case "Cookie": + h.collectCookies() + h.cookies = parseRequestCookies(h.cookies, value) + case "Content-Length": + if contentLength, err := parseContentLength(value); err == nil { + h.contentLength = contentLength + h.contentLengthBytes = append(h.contentLengthBytes[:0], value...) + } + case "Connection": + if bytes.Equal(strClose, value) { + h.SetConnectionClose() + } else { + h.ResetConnectionClose() + h.h = setArgBytes(h.h, key, value) + } + case "Transfer-Encoding": + // Transfer-Encoding is managed automatically. + default: + h.h = setArgBytes(h.h, key, value) + } +} + +// Peek returns header value for the given key. +// +// Returned value is valid until the next call to ResponseHeader. +// Do not store references to returned value. Make copies instead. +func (h *ResponseHeader) Peek(key string) []byte { + k := getHeaderKeyBytes(&h.bufKV, key, h.disableNormalizing) + return h.peek(k) +} + +// PeekBytes returns header value for the given key. +// +// Returned value is valid until the next call to ResponseHeader. +// Do not store references to returned value. Make copies instead. +func (h *ResponseHeader) PeekBytes(key []byte) []byte { + h.bufKV.key = append(h.bufKV.key[:0], key...) + normalizeHeaderKey(h.bufKV.key, h.disableNormalizing) + return h.peek(h.bufKV.key) +} + +// Peek returns header value for the given key. +// +// Returned value is valid until the next call to RequestHeader. +// Do not store references to returned value. Make copies instead. +func (h *RequestHeader) Peek(key string) []byte { + k := getHeaderKeyBytes(&h.bufKV, key, h.disableNormalizing) + return h.peek(k) +} + +// PeekBytes returns header value for the given key. +// +// Returned value is valid until the next call to RequestHeader. +// Do not store references to returned value. Make copies instead. +func (h *RequestHeader) PeekBytes(key []byte) []byte { + h.bufKV.key = append(h.bufKV.key[:0], key...) + normalizeHeaderKey(h.bufKV.key, h.disableNormalizing) + return h.peek(h.bufKV.key) +} + +func (h *ResponseHeader) peek(key []byte) []byte { + switch string(key) { + case "Content-Type": + return h.ContentType() + case "Server": + return h.Server() + case "Connection": + if h.ConnectionClose() { + return strClose + } + return peekArgBytes(h.h, key) + case "Content-Length": + return h.contentLengthBytes + default: + return peekArgBytes(h.h, key) + } +} + +func (h *RequestHeader) peek(key []byte) []byte { + h.parseRawHeaders() + switch string(key) { + case "Host": + return h.Host() + case "Content-Type": + return h.ContentType() + case "User-Agent": + return h.UserAgent() + case "Connection": + if h.ConnectionClose() { + return strClose + } + return peekArgBytes(h.h, key) + case "Content-Length": + return h.contentLengthBytes + default: + return peekArgBytes(h.h, key) + } +} + +// Cookie returns cookie for the given key. +func (h *RequestHeader) Cookie(key string) []byte { + h.parseRawHeaders() + h.collectCookies() + return peekArgStr(h.cookies, key) +} + +// CookieBytes returns cookie for the given key. +func (h *RequestHeader) CookieBytes(key []byte) []byte { + h.parseRawHeaders() + h.collectCookies() + return peekArgBytes(h.cookies, key) +} + +// Cookie fills cookie for the given cookie.Key. +// +// Returns false if cookie with the given cookie.Key is missing. +func (h *ResponseHeader) Cookie(cookie *Cookie) bool { + v := peekArgBytes(h.cookies, cookie.Key()) + if v == nil { + return false + } + cookie.ParseBytes(v) + return true +} + +// Read reads response header from r. +// +// io.EOF is returned if r is closed before reading the first header byte. +func (h *ResponseHeader) Read(r *bufio.Reader) error { + n := 1 + for { + err := h.tryRead(r, n) + if err == nil { + return nil + } + if err != errNeedMore { + h.resetSkipNormalize() + return err + } + n = r.Buffered() + 1 + } +} + +func (h *ResponseHeader) tryRead(r *bufio.Reader, n int) error { + h.resetSkipNormalize() + b, err := r.Peek(n) + if len(b) == 0 { + // treat all errors on the first byte read as EOF + if n == 1 || err == io.EOF { + return io.EOF + } + + // This is for go 1.6 bug. See https://github.com/golang/go/issues/14121 . + if err == bufio.ErrBufferFull { + return &ErrSmallBuffer{ + error: fmt.Errorf("error when reading response headers: %s", errSmallBuffer), + } + } + + return fmt.Errorf("error when reading response headers: %s", err) + } + b = mustPeekBuffered(r) + headersLen, errParse := h.parse(b) + if errParse != nil { + return headerError("response", err, errParse, b) + } + mustDiscard(r, headersLen) + return nil +} + +func headerError(typ string, err, errParse error, b []byte) error { + if errParse != errNeedMore { + return headerErrorMsg(typ, errParse, b) + } + if err == nil { + return errNeedMore + } + + // Buggy servers may leave trailing CRLFs after http body. + // Treat this case as EOF. + if isOnlyCRLF(b) { + return io.EOF + } + + if err != bufio.ErrBufferFull { + return headerErrorMsg(typ, err, b) + } + return &ErrSmallBuffer{ + error: headerErrorMsg(typ, errSmallBuffer, b), + } +} + +func headerErrorMsg(typ string, err error, b []byte) error { + return fmt.Errorf("error when reading %s headers: %s. Buffer size=%d, contents: %s", typ, err, len(b), bufferSnippet(b)) +} + +// Read reads request header from r. +// +// io.EOF is returned if r is closed before reading the first header byte. +func (h *RequestHeader) Read(r *bufio.Reader) error { + n := 1 + for { + err := h.tryRead(r, n) + if err == nil { + return nil + } + if err != errNeedMore { + h.resetSkipNormalize() + return err + } + n = r.Buffered() + 1 + } +} + +func (h *RequestHeader) tryRead(r *bufio.Reader, n int) error { + h.resetSkipNormalize() + b, err := r.Peek(n) + if len(b) == 0 { + // treat all errors on the first byte read as EOF + if n == 1 || err == io.EOF { + return io.EOF + } + + // This is for go 1.6 bug. See https://github.com/golang/go/issues/14121 . + if err == bufio.ErrBufferFull { + return &ErrSmallBuffer{ + error: fmt.Errorf("error when reading request headers: %s", errSmallBuffer), + } + } + + return fmt.Errorf("error when reading request headers: %s", err) + } + b = mustPeekBuffered(r) + headersLen, errParse := h.parse(b) + if errParse != nil { + return headerError("request", err, errParse, b) + } + mustDiscard(r, headersLen) + return nil +} + +func bufferSnippet(b []byte) string { + n := len(b) + start := 200 + end := n - start + if start >= end { + start = n + end = n + } + bStart, bEnd := b[:start], b[end:] + if len(bEnd) == 0 { + return fmt.Sprintf("%q", b) + } + return fmt.Sprintf("%q...%q", bStart, bEnd) +} + +func isOnlyCRLF(b []byte) bool { + for _, ch := range b { + if ch != '\r' && ch != '\n' { + return false + } + } + return true +} + +func init() { + refreshServerDate() + go func() { + for { + time.Sleep(time.Second) + refreshServerDate() + } + }() +} + +var serverDate atomic.Value + +func refreshServerDate() { + b := AppendHTTPDate(nil, time.Now()) + serverDate.Store(b) +} + +// Write writes response header to w. +func (h *ResponseHeader) Write(w *bufio.Writer) error { + _, err := w.Write(h.Header()) + return err +} + +// WriteTo writes response header to w. +// +// WriteTo implements io.WriterTo interface. +func (h *ResponseHeader) WriteTo(w io.Writer) (int64, error) { + n, err := w.Write(h.Header()) + return int64(n), err +} + +// Header returns response header representation. +// +// The returned value is valid until the next call to ResponseHeader methods. +func (h *ResponseHeader) Header() []byte { + h.bufKV.value = h.AppendBytes(h.bufKV.value[:0]) + return h.bufKV.value +} + +// String returns response header representation. +func (h *ResponseHeader) String() string { + return string(h.Header()) +} + +// AppendBytes appends response header representation to dst and returns +// the extended dst. +func (h *ResponseHeader) AppendBytes(dst []byte) []byte { + statusCode := h.StatusCode() + if statusCode < 0 { + statusCode = StatusOK + } + dst = append(dst, statusLine(statusCode)...) + + server := h.Server() + if len(server) == 0 { + server = defaultServerName + } + dst = appendHeaderLine(dst, strServer, server) + dst = appendHeaderLine(dst, strDate, serverDate.Load().([]byte)) + + // Append Content-Type only for non-zero responses + // or if it is explicitly set. + // See https://github.com/valyala/fasthttp/issues/28 . + if h.ContentLength() != 0 || len(h.contentType) > 0 { + dst = appendHeaderLine(dst, strContentType, h.ContentType()) + } + + if len(h.contentLengthBytes) > 0 { + dst = appendHeaderLine(dst, strContentLength, h.contentLengthBytes) + } + + for i, n := 0, len(h.h); i < n; i++ { + kv := &h.h[i] + if !bytes.Equal(kv.key, strDate) { + dst = appendHeaderLine(dst, kv.key, kv.value) + } + } + + n := len(h.cookies) + if n > 0 { + for i := 0; i < n; i++ { + kv := &h.cookies[i] + dst = appendHeaderLine(dst, strSetCookie, kv.value) + } + } + + if h.ConnectionClose() { + dst = appendHeaderLine(dst, strConnection, strClose) + } + + return append(dst, strCRLF...) +} + +// Write writes request header to w. +func (h *RequestHeader) Write(w *bufio.Writer) error { + _, err := w.Write(h.Header()) + return err +} + +// WriteTo writes request header to w. +// +// WriteTo implements io.WriterTo interface. +func (h *RequestHeader) WriteTo(w io.Writer) (int64, error) { + n, err := w.Write(h.Header()) + return int64(n), err +} + +// Header returns request header representation. +// +// The returned representation is valid until the next call to RequestHeader methods. +func (h *RequestHeader) Header() []byte { + h.bufKV.value = h.AppendBytes(h.bufKV.value[:0]) + return h.bufKV.value +} + +// String returns request header representation. +func (h *RequestHeader) String() string { + return string(h.Header()) +} + +// AppendBytes appends request header representation to dst and returns +// the extended dst. +func (h *RequestHeader) AppendBytes(dst []byte) []byte { + // there is no need in h.parseRawHeaders() here - raw headers are specially handled below. + dst = append(dst, h.Method()...) + dst = append(dst, ' ') + dst = append(dst, h.RequestURI()...) + dst = append(dst, ' ') + dst = append(dst, strHTTP11...) + dst = append(dst, strCRLF...) + + if !h.rawHeadersParsed && len(h.rawHeaders) > 0 { + return append(dst, h.rawHeaders...) + } + + userAgent := h.UserAgent() + if len(userAgent) == 0 { + userAgent = defaultUserAgent + } + dst = appendHeaderLine(dst, strUserAgent, userAgent) + + host := h.Host() + if len(host) > 0 { + dst = appendHeaderLine(dst, strHost, host) + } + + contentType := h.ContentType() + if !h.noBody() { + if len(contentType) == 0 { + contentType = strPostArgsContentType + } + dst = appendHeaderLine(dst, strContentType, contentType) + + if len(h.contentLengthBytes) > 0 { + dst = appendHeaderLine(dst, strContentLength, h.contentLengthBytes) + } + } else if len(contentType) > 0 { + dst = appendHeaderLine(dst, strContentType, contentType) + } + + for i, n := 0, len(h.h); i < n; i++ { + kv := &h.h[i] + dst = appendHeaderLine(dst, kv.key, kv.value) + } + + // there is no need in h.collectCookies() here, since if cookies aren't collected yet, + // they all are located in h.h. + n := len(h.cookies) + if n > 0 { + dst = append(dst, strCookie...) + dst = append(dst, strColonSpace...) + dst = appendRequestCookieBytes(dst, h.cookies) + dst = append(dst, strCRLF...) + } + + if h.ConnectionClose() { + dst = appendHeaderLine(dst, strConnection, strClose) + } + + return append(dst, strCRLF...) +} + +func appendHeaderLine(dst, key, value []byte) []byte { + dst = append(dst, key...) + dst = append(dst, strColonSpace...) + dst = append(dst, value...) + return append(dst, strCRLF...) +} + +func (h *ResponseHeader) parse(buf []byte) (int, error) { + m, err := h.parseFirstLine(buf) + if err != nil { + return 0, err + } + n, err := h.parseHeaders(buf[m:]) + if err != nil { + return 0, err + } + return m + n, nil +} + +func (h *RequestHeader) noBody() bool { + return h.IsGet() || h.IsHead() +} + +func (h *RequestHeader) parse(buf []byte) (int, error) { + m, err := h.parseFirstLine(buf) + if err != nil { + return 0, err + } + + var n int + if !h.noBody() || h.noHTTP11 { + n, err = h.parseHeaders(buf[m:]) + if err != nil { + return 0, err + } + h.rawHeadersParsed = true + } else { + var rawHeaders []byte + rawHeaders, n, err = readRawHeaders(h.rawHeaders[:0], buf[m:]) + if err != nil { + return 0, err + } + h.rawHeaders = rawHeaders + } + return m + n, nil +} + +func (h *ResponseHeader) parseFirstLine(buf []byte) (int, error) { + bNext := buf + var b []byte + var err error + for len(b) == 0 { + if b, bNext, err = nextLine(bNext); err != nil { + return 0, err + } + } + + // parse protocol + n := bytes.IndexByte(b, ' ') + if n < 0 { + return 0, fmt.Errorf("cannot find whitespace in the first line of response %q", buf) + } + h.noHTTP11 = !bytes.Equal(b[:n], strHTTP11) + b = b[n+1:] + + // parse status code + h.statusCode, n, err = parseUintBuf(b) + if err != nil { + return 0, fmt.Errorf("cannot parse response status code: %s. Response %q", err, buf) + } + if len(b) > n && b[n] != ' ' { + return 0, fmt.Errorf("unexpected char at the end of status code. Response %q", buf) + } + + return len(buf) - len(bNext), nil +} + +func (h *RequestHeader) parseFirstLine(buf []byte) (int, error) { + bNext := buf + var b []byte + var err error + for len(b) == 0 { + if b, bNext, err = nextLine(bNext); err != nil { + return 0, err + } + } + + // parse method + n := bytes.IndexByte(b, ' ') + if n <= 0 { + return 0, fmt.Errorf("cannot find http request method in %q", buf) + } + h.method = append(h.method[:0], b[:n]...) + b = b[n+1:] + + // parse requestURI + n = bytes.LastIndexByte(b, ' ') + if n < 0 { + h.noHTTP11 = true + n = len(b) + } else if n == 0 { + return 0, fmt.Errorf("requestURI cannot be empty in %q", buf) + } else if !bytes.Equal(b[n+1:], strHTTP11) { + h.noHTTP11 = true + } + h.requestURI = append(h.requestURI[:0], b[:n]...) + + return len(buf) - len(bNext), nil +} + +func peekRawHeader(buf, key []byte) []byte { + n := bytes.Index(buf, key) + if n < 0 { + return nil + } + if n > 0 && buf[n-1] != '\n' { + return nil + } + n += len(key) + if n >= len(buf) { + return nil + } + if buf[n] != ':' { + return nil + } + n++ + if buf[n] != ' ' { + return nil + } + n++ + buf = buf[n:] + n = bytes.IndexByte(buf, '\n') + if n < 0 { + return nil + } + if n > 0 && buf[n-1] == '\r' { + n-- + } + return buf[:n] +} + +func readRawHeaders(dst, buf []byte) ([]byte, int, error) { + n := bytes.IndexByte(buf, '\n') + if n < 0 { + return nil, 0, errNeedMore + } + if (n == 1 && buf[0] == '\r') || n == 0 { + // empty headers + return dst, n + 1, nil + } + + n++ + b := buf + m := n + for { + b = b[m:] + m = bytes.IndexByte(b, '\n') + if m < 0 { + return nil, 0, errNeedMore + } + m++ + n += m + if (m == 2 && b[0] == '\r') || m == 1 { + dst = append(dst, buf[:n]...) + return dst, n, nil + } + } +} + +func (h *ResponseHeader) parseHeaders(buf []byte) (int, error) { + // 'identity' content-length by default + h.contentLength = -2 + + var s headerScanner + s.b = buf + s.disableNormalizing = h.disableNormalizing + var err error + var kv *argsKV + for s.next() { + switch string(s.key) { + case "Content-Type": + h.contentType = append(h.contentType[:0], s.value...) + case "Server": + h.server = append(h.server[:0], s.value...) + case "Content-Length": + if h.contentLength != -1 { + if h.contentLength, err = parseContentLength(s.value); err != nil { + h.contentLength = -2 + } else { + h.contentLengthBytes = append(h.contentLengthBytes[:0], s.value...) + } + } + case "Transfer-Encoding": + if !bytes.Equal(s.value, strIdentity) { + h.contentLength = -1 + h.h = setArgBytes(h.h, strTransferEncoding, strChunked) + } + case "Set-Cookie": + h.cookies, kv = allocArg(h.cookies) + kv.key = getCookieKey(kv.key, s.value) + kv.value = append(kv.value[:0], s.value...) + case "Connection": + if bytes.Equal(s.value, strClose) { + h.connectionClose = true + } else { + h.connectionClose = false + h.h = appendArgBytes(h.h, s.key, s.value) + } + default: + h.h = appendArgBytes(h.h, s.key, s.value) + } + } + if s.err != nil { + h.connectionClose = true + return 0, s.err + } + + if h.contentLength < 0 { + h.contentLengthBytes = h.contentLengthBytes[:0] + } + if h.contentLength == -2 && !h.ConnectionUpgrade() && !h.mustSkipContentLength() { + h.h = setArgBytes(h.h, strTransferEncoding, strIdentity) + h.connectionClose = true + } + if h.noHTTP11 && !h.connectionClose { + // close connection for non-http/1.1 response unless 'Connection: keep-alive' is set. + v := peekArgBytes(h.h, strConnection) + h.connectionClose = !hasHeaderValue(v, strKeepAlive) && !hasHeaderValue(v, strKeepAliveCamelCase) + } + + return len(buf) - len(s.b), nil +} + +func (h *RequestHeader) parseHeaders(buf []byte) (int, error) { + h.contentLength = -2 + + var s headerScanner + s.b = buf + s.disableNormalizing = h.disableNormalizing + var err error + for s.next() { + switch string(s.key) { + case "Host": + h.host = append(h.host[:0], s.value...) + case "User-Agent": + h.userAgent = append(h.userAgent[:0], s.value...) + case "Content-Type": + h.contentType = append(h.contentType[:0], s.value...) + case "Content-Length": + if h.contentLength != -1 { + if h.contentLength, err = parseContentLength(s.value); err != nil { + h.contentLength = -2 + } else { + h.contentLengthBytes = append(h.contentLengthBytes[:0], s.value...) + } + } + case "Transfer-Encoding": + if !bytes.Equal(s.value, strIdentity) { + h.contentLength = -1 + h.h = setArgBytes(h.h, strTransferEncoding, strChunked) + } + case "Connection": + if bytes.Equal(s.value, strClose) { + h.connectionClose = true + } else { + h.connectionClose = false + h.h = appendArgBytes(h.h, s.key, s.value) + } + default: + h.h = appendArgBytes(h.h, s.key, s.value) + } + } + if s.err != nil { + h.connectionClose = true + return 0, s.err + } + + if h.contentLength < 0 { + h.contentLengthBytes = h.contentLengthBytes[:0] + } + if h.noBody() { + h.contentLength = 0 + h.contentLengthBytes = h.contentLengthBytes[:0] + } + if h.noHTTP11 && !h.connectionClose { + // close connection for non-http/1.1 request unless 'Connection: keep-alive' is set. + v := peekArgBytes(h.h, strConnection) + h.connectionClose = !hasHeaderValue(v, strKeepAlive) && !hasHeaderValue(v, strKeepAliveCamelCase) + } + + return len(buf) - len(s.b), nil +} + +func (h *RequestHeader) parseRawHeaders() { + if h.rawHeadersParsed { + return + } + h.rawHeadersParsed = true + if len(h.rawHeaders) == 0 { + return + } + h.parseHeaders(h.rawHeaders) +} + +func (h *RequestHeader) collectCookies() { + if h.cookiesCollected { + return + } + + for i, n := 0, len(h.h); i < n; i++ { + kv := &h.h[i] + if bytes.Equal(kv.key, strCookie) { + h.cookies = parseRequestCookies(h.cookies, kv.value) + tmp := *kv + copy(h.h[i:], h.h[i+1:]) + n-- + i-- + h.h[n] = tmp + h.h = h.h[:n] + } + } + h.cookiesCollected = true +} + +func parseContentLength(b []byte) (int, error) { + v, n, err := parseUintBuf(b) + if err != nil { + return -1, err + } + if n != len(b) { + return -1, fmt.Errorf("non-numeric chars at the end of Content-Length") + } + return v, nil +} + +type headerScanner struct { + b []byte + key []byte + value []byte + err error + + disableNormalizing bool +} + +func (s *headerScanner) next() bool { + bLen := len(s.b) + if bLen >= 2 && s.b[0] == '\r' && s.b[1] == '\n' { + s.b = s.b[2:] + return false + } + if bLen >= 1 && s.b[0] == '\n' { + s.b = s.b[1:] + return false + } + n := bytes.IndexByte(s.b, ':') + if n < 0 { + s.err = errNeedMore + return false + } + s.key = s.b[:n] + normalizeHeaderKey(s.key, s.disableNormalizing) + n++ + for len(s.b) > n && s.b[n] == ' ' { + n++ + } + s.b = s.b[n:] + n = bytes.IndexByte(s.b, '\n') + if n < 0 { + s.err = errNeedMore + return false + } + s.value = s.b[:n] + s.b = s.b[n+1:] + + if n > 0 && s.value[n-1] == '\r' { + n-- + } + for n > 0 && s.value[n-1] == ' ' { + n-- + } + s.value = s.value[:n] + return true +} + +type headerValueScanner struct { + b []byte + value []byte +} + +func (s *headerValueScanner) next() bool { + b := s.b + if len(b) == 0 { + return false + } + n := bytes.IndexByte(b, ',') + if n < 0 { + s.value = stripSpace(b) + s.b = b[len(b):] + return true + } + s.value = stripSpace(b[:n]) + s.b = b[n+1:] + return true +} + +func stripSpace(b []byte) []byte { + for len(b) > 0 && b[0] == ' ' { + b = b[1:] + } + for len(b) > 0 && b[len(b)-1] == ' ' { + b = b[:len(b)-1] + } + return b +} + +func hasHeaderValue(s, value []byte) bool { + var vs headerValueScanner + vs.b = s + for vs.next() { + if bytes.Equal(vs.value, value) { + return true + } + } + return false +} + +func nextLine(b []byte) ([]byte, []byte, error) { + nNext := bytes.IndexByte(b, '\n') + if nNext < 0 { + return nil, nil, errNeedMore + } + n := nNext + if n > 0 && b[n-1] == '\r' { + n-- + } + return b[:n], b[nNext+1:], nil +} + +func initHeaderKV(kv *argsKV, key, value string, disableNormalizing bool) { + kv.key = getHeaderKeyBytes(kv, key, disableNormalizing) + kv.value = append(kv.value[:0], value...) +} + +func getHeaderKeyBytes(kv *argsKV, key string, disableNormalizing bool) []byte { + kv.key = append(kv.key[:0], key...) + normalizeHeaderKey(kv.key, disableNormalizing) + return kv.key +} + +func normalizeHeaderKey(b []byte, disableNormalizing bool) { + if disableNormalizing { + return + } + + n := len(b) + up := true + for i := 0; i < n; i++ { + switch b[i] { + case '-': + up = true + default: + if up { + up = false + uppercaseByte(&b[i]) + } else { + lowercaseByte(&b[i]) + } + } + } +} + +// AppendNormalizedHeaderKey appends normalized header key (name) to dst +// and returns the resulting dst. +// +// Normalized header key starts with uppercase letter. The first letters +// after dashes are also uppercased. All the other letters are lowercased. +// Examples: +// +// * coNTENT-TYPe -> Content-Type +// * HOST -> Host +// * foo-bar-baz -> Foo-Bar-Baz +func AppendNormalizedHeaderKey(dst []byte, key string) []byte { + dst = append(dst, key...) + normalizeHeaderKey(dst[len(dst)-len(key):], false) + return dst +} + +// AppendNormalizedHeaderKeyBytes appends normalized header key (name) to dst +// and returns the resulting dst. +// +// Normalized header key starts with uppercase letter. The first letters +// after dashes are also uppercased. All the other letters are lowercased. +// Examples: +// +// * coNTENT-TYPe -> Content-Type +// * HOST -> Host +// * foo-bar-baz -> Foo-Bar-Baz +func AppendNormalizedHeaderKeyBytes(dst, key []byte) []byte { + return AppendNormalizedHeaderKey(dst, b2s(key)) +} + +var ( + errNeedMore = errors.New("need more data: cannot find trailing lf") + errSmallBuffer = errors.New("small read buffer. Increase ReadBufferSize") +) + +// ErrSmallBuffer is returned when the provided buffer size is too small +// for reading request and/or response headers. +// +// ReadBufferSize value from Server or clients should reduce the number +// of such errors. +type ErrSmallBuffer struct { + error +} + +func mustPeekBuffered(r *bufio.Reader) []byte { + buf, err := r.Peek(r.Buffered()) + if len(buf) == 0 || err != nil { + panic(fmt.Sprintf("bufio.Reader.Peek() returned unexpected data (%q, %v)", buf, err)) + } + return buf +} + +func mustDiscard(r *bufio.Reader, n int) { + if _, err := r.Discard(n); err != nil { + panic(fmt.Sprintf("bufio.Reader.Discard(%d) failed: %s", n, err)) + } +} diff --git a/vendor/github.com/valyala/fasthttp/http.go b/vendor/github.com/valyala/fasthttp/http.go new file mode 100644 index 0000000..ba2f880 --- /dev/null +++ b/vendor/github.com/valyala/fasthttp/http.go @@ -0,0 +1,1681 @@ +package fasthttp + +import ( + "bufio" + "bytes" + "errors" + "fmt" + "io" + "mime/multipart" + "os" + "sync" + + "github.com/valyala/bytebufferpool" +) + +// Request represents HTTP request. +// +// It is forbidden copying Request instances. Create new instances +// and use CopyTo instead. +// +// Request instance MUST NOT be used from concurrently running goroutines. +type Request struct { + noCopy noCopy + + // Request header + // + // Copying Header by value is forbidden. Use pointer to Header instead. + Header RequestHeader + + uri URI + postArgs Args + + bodyStream io.Reader + w requestBodyWriter + body *bytebufferpool.ByteBuffer + + multipartForm *multipart.Form + multipartFormBoundary string + + // Group bool members in order to reduce Request object size. + parsedURI bool + parsedPostArgs bool + + keepBodyBuffer bool + + isTLS bool +} + +// Response represents HTTP response. +// +// It is forbidden copying Response instances. Create new instances +// and use CopyTo instead. +// +// Response instance MUST NOT be used from concurrently running goroutines. +type Response struct { + noCopy noCopy + + // Response header + // + // Copying Header by value is forbidden. Use pointer to Header instead. + Header ResponseHeader + + bodyStream io.Reader + w responseBodyWriter + body *bytebufferpool.ByteBuffer + + // Response.Read() skips reading body if set to true. + // Use it for reading HEAD responses. + // + // Response.Write() skips writing body if set to true. + // Use it for writing HEAD responses. + SkipBody bool + + keepBodyBuffer bool +} + +// SetHost sets host for the request. +func (req *Request) SetHost(host string) { + req.URI().SetHost(host) +} + +// SetHostBytes sets host for the request. +func (req *Request) SetHostBytes(host []byte) { + req.URI().SetHostBytes(host) +} + +// Host returns the host for the given request. +func (req *Request) Host() []byte { + return req.URI().Host() +} + +// SetRequestURI sets RequestURI. +func (req *Request) SetRequestURI(requestURI string) { + req.Header.SetRequestURI(requestURI) + req.parsedURI = false +} + +// SetRequestURIBytes sets RequestURI. +func (req *Request) SetRequestURIBytes(requestURI []byte) { + req.Header.SetRequestURIBytes(requestURI) + req.parsedURI = false +} + +// RequestURI returns request's URI. +func (req *Request) RequestURI() []byte { + if req.parsedURI { + requestURI := req.uri.RequestURI() + req.SetRequestURIBytes(requestURI) + } + return req.Header.RequestURI() +} + +// StatusCode returns response status code. +func (resp *Response) StatusCode() int { + return resp.Header.StatusCode() +} + +// SetStatusCode sets response status code. +func (resp *Response) SetStatusCode(statusCode int) { + resp.Header.SetStatusCode(statusCode) +} + +// ConnectionClose returns true if 'Connection: close' header is set. +func (resp *Response) ConnectionClose() bool { + return resp.Header.ConnectionClose() +} + +// SetConnectionClose sets 'Connection: close' header. +func (resp *Response) SetConnectionClose() { + resp.Header.SetConnectionClose() +} + +// ConnectionClose returns true if 'Connection: close' header is set. +func (req *Request) ConnectionClose() bool { + return req.Header.ConnectionClose() +} + +// SetConnectionClose sets 'Connection: close' header. +func (req *Request) SetConnectionClose() { + req.Header.SetConnectionClose() +} + +// SendFile registers file on the given path to be used as response body +// when Write is called. +// +// Note that SendFile doesn't set Content-Type, so set it yourself +// with Header.SetContentType. +func (resp *Response) SendFile(path string) error { + f, err := os.Open(path) + if err != nil { + return err + } + fileInfo, err := f.Stat() + if err != nil { + f.Close() + return err + } + size64 := fileInfo.Size() + size := int(size64) + if int64(size) != size64 { + size = -1 + } + + resp.Header.SetLastModified(fileInfo.ModTime()) + resp.SetBodyStream(f, size) + return nil +} + +// SetBodyStream sets request body stream and, optionally body size. +// +// If bodySize is >= 0, then the bodyStream must provide exactly bodySize bytes +// before returning io.EOF. +// +// If bodySize < 0, then bodyStream is read until io.EOF. +// +// bodyStream.Close() is called after finishing reading all body data +// if it implements io.Closer. +// +// Note that GET and HEAD requests cannot have body. +// +// See also SetBodyStreamWriter. +func (req *Request) SetBodyStream(bodyStream io.Reader, bodySize int) { + req.ResetBody() + req.bodyStream = bodyStream + req.Header.SetContentLength(bodySize) +} + +// SetBodyStream sets response body stream and, optionally body size. +// +// If bodySize is >= 0, then the bodyStream must provide exactly bodySize bytes +// before returning io.EOF. +// +// If bodySize < 0, then bodyStream is read until io.EOF. +// +// bodyStream.Close() is called after finishing reading all body data +// if it implements io.Closer. +// +// See also SetBodyStreamWriter. +func (resp *Response) SetBodyStream(bodyStream io.Reader, bodySize int) { + resp.ResetBody() + resp.bodyStream = bodyStream + resp.Header.SetContentLength(bodySize) +} + +// IsBodyStream returns true if body is set via SetBodyStream* +func (req *Request) IsBodyStream() bool { + return req.bodyStream != nil +} + +// IsBodyStream returns true if body is set via SetBodyStream* +func (resp *Response) IsBodyStream() bool { + return resp.bodyStream != nil +} + +// SetBodyStreamWriter registers the given sw for populating request body. +// +// This function may be used in the following cases: +// +// * if request body is too big (more than 10MB). +// * if request body is streamed from slow external sources. +// * if request body must be streamed to the server in chunks +// (aka `http client push` or `chunked transfer-encoding`). +// +// Note that GET and HEAD requests cannot have body. +// +/// See also SetBodyStream. +func (req *Request) SetBodyStreamWriter(sw StreamWriter) { + sr := NewStreamReader(sw) + req.SetBodyStream(sr, -1) +} + +// SetBodyStreamWriter registers the given sw for populating response body. +// +// This function may be used in the following cases: +// +// * if response body is too big (more than 10MB). +// * if response body is streamed from slow external sources. +// * if response body must be streamed to the client in chunks +// (aka `http server push` or `chunked transfer-encoding`). +// +// See also SetBodyStream. +func (resp *Response) SetBodyStreamWriter(sw StreamWriter) { + sr := NewStreamReader(sw) + resp.SetBodyStream(sr, -1) +} + +// BodyWriter returns writer for populating response body. +// +// If used inside RequestHandler, the returned writer must not be used +// after returning from RequestHandler. Use RequestCtx.Write +// or SetBodyStreamWriter in this case. +func (resp *Response) BodyWriter() io.Writer { + resp.w.r = resp + return &resp.w +} + +// BodyWriter returns writer for populating request body. +func (req *Request) BodyWriter() io.Writer { + req.w.r = req + return &req.w +} + +type responseBodyWriter struct { + r *Response +} + +func (w *responseBodyWriter) Write(p []byte) (int, error) { + w.r.AppendBody(p) + return len(p), nil +} + +type requestBodyWriter struct { + r *Request +} + +func (w *requestBodyWriter) Write(p []byte) (int, error) { + w.r.AppendBody(p) + return len(p), nil +} + +// Body returns response body. +func (resp *Response) Body() []byte { + if resp.bodyStream != nil { + bodyBuf := resp.bodyBuffer() + bodyBuf.Reset() + _, err := copyZeroAlloc(bodyBuf, resp.bodyStream) + resp.closeBodyStream() + if err != nil { + bodyBuf.SetString(err.Error()) + } + } + return resp.bodyBytes() +} + +func (resp *Response) bodyBytes() []byte { + if resp.body == nil { + return nil + } + return resp.body.B +} + +func (req *Request) bodyBytes() []byte { + if req.body == nil { + return nil + } + return req.body.B +} + +func (resp *Response) bodyBuffer() *bytebufferpool.ByteBuffer { + if resp.body == nil { + resp.body = responseBodyPool.Get() + } + return resp.body +} + +func (req *Request) bodyBuffer() *bytebufferpool.ByteBuffer { + if req.body == nil { + req.body = requestBodyPool.Get() + } + return req.body +} + +var ( + responseBodyPool bytebufferpool.Pool + requestBodyPool bytebufferpool.Pool +) + +// BodyGunzip returns un-gzipped body data. +// +// This method may be used if the request header contains +// 'Content-Encoding: gzip' for reading un-gzipped body. +// Use Body for reading gzipped request body. +func (req *Request) BodyGunzip() ([]byte, error) { + return gunzipData(req.Body()) +} + +// BodyGunzip returns un-gzipped body data. +// +// This method may be used if the response header contains +// 'Content-Encoding: gzip' for reading un-gzipped body. +// Use Body for reading gzipped response body. +func (resp *Response) BodyGunzip() ([]byte, error) { + return gunzipData(resp.Body()) +} + +func gunzipData(p []byte) ([]byte, error) { + var bb ByteBuffer + _, err := WriteGunzip(&bb, p) + if err != nil { + return nil, err + } + return bb.B, nil +} + +// BodyInflate returns inflated body data. +// +// This method may be used if the response header contains +// 'Content-Encoding: deflate' for reading inflated request body. +// Use Body for reading deflated request body. +func (req *Request) BodyInflate() ([]byte, error) { + return inflateData(req.Body()) +} + +// BodyInflate returns inflated body data. +// +// This method may be used if the response header contains +// 'Content-Encoding: deflate' for reading inflated response body. +// Use Body for reading deflated response body. +func (resp *Response) BodyInflate() ([]byte, error) { + return inflateData(resp.Body()) +} + +func inflateData(p []byte) ([]byte, error) { + var bb ByteBuffer + _, err := WriteInflate(&bb, p) + if err != nil { + return nil, err + } + return bb.B, nil +} + +// BodyWriteTo writes request body to w. +func (req *Request) BodyWriteTo(w io.Writer) error { + if req.bodyStream != nil { + _, err := copyZeroAlloc(w, req.bodyStream) + req.closeBodyStream() + return err + } + if req.onlyMultipartForm() { + return WriteMultipartForm(w, req.multipartForm, req.multipartFormBoundary) + } + _, err := w.Write(req.bodyBytes()) + return err +} + +// BodyWriteTo writes response body to w. +func (resp *Response) BodyWriteTo(w io.Writer) error { + if resp.bodyStream != nil { + _, err := copyZeroAlloc(w, resp.bodyStream) + resp.closeBodyStream() + return err + } + _, err := w.Write(resp.bodyBytes()) + return err +} + +// AppendBody appends p to response body. +// +// It is safe re-using p after the function returns. +func (resp *Response) AppendBody(p []byte) { + resp.AppendBodyString(b2s(p)) +} + +// AppendBodyString appends s to response body. +func (resp *Response) AppendBodyString(s string) { + resp.closeBodyStream() + resp.bodyBuffer().WriteString(s) +} + +// SetBody sets response body. +// +// It is safe re-using body argument after the function returns. +func (resp *Response) SetBody(body []byte) { + resp.SetBodyString(b2s(body)) +} + +// SetBodyString sets response body. +func (resp *Response) SetBodyString(body string) { + resp.closeBodyStream() + bodyBuf := resp.bodyBuffer() + bodyBuf.Reset() + bodyBuf.WriteString(body) +} + +// ResetBody resets response body. +func (resp *Response) ResetBody() { + resp.closeBodyStream() + if resp.body != nil { + if resp.keepBodyBuffer { + resp.body.Reset() + } else { + responseBodyPool.Put(resp.body) + resp.body = nil + } + } +} + +// ReleaseBody retires the response body if it is greater than "size" bytes. +// +// This permits GC to reclaim the large buffer. If used, must be before +// ReleaseResponse. +// +// Use this method only if you really understand how it works. +// The majority of workloads don't need this method. +func (resp *Response) ReleaseBody(size int) { + if cap(resp.body.B) > size { + resp.closeBodyStream() + resp.body = nil + } +} + +// ReleaseBody retires the request body if it is greater than "size" bytes. +// +// This permits GC to reclaim the large buffer. If used, must be before +// ReleaseRequest. +// +// Use this method only if you really understand how it works. +// The majority of workloads don't need this method. +func (req *Request) ReleaseBody(size int) { + if cap(req.body.B) > size { + req.closeBodyStream() + req.body = nil + } +} + +// SwapBody swaps response body with the given body and returns +// the previous response body. +// +// It is forbidden to use the body passed to SwapBody after +// the function returns. +func (resp *Response) SwapBody(body []byte) []byte { + bb := resp.bodyBuffer() + + if resp.bodyStream != nil { + bb.Reset() + _, err := copyZeroAlloc(bb, resp.bodyStream) + resp.closeBodyStream() + if err != nil { + bb.Reset() + bb.SetString(err.Error()) + } + } + + oldBody := bb.B + bb.B = body + return oldBody +} + +// SwapBody swaps request body with the given body and returns +// the previous request body. +// +// It is forbidden to use the body passed to SwapBody after +// the function returns. +func (req *Request) SwapBody(body []byte) []byte { + bb := req.bodyBuffer() + + if req.bodyStream != nil { + bb.Reset() + _, err := copyZeroAlloc(bb, req.bodyStream) + req.closeBodyStream() + if err != nil { + bb.Reset() + bb.SetString(err.Error()) + } + } + + oldBody := bb.B + bb.B = body + return oldBody +} + +// Body returns request body. +func (req *Request) Body() []byte { + if req.bodyStream != nil { + bodyBuf := req.bodyBuffer() + bodyBuf.Reset() + _, err := copyZeroAlloc(bodyBuf, req.bodyStream) + req.closeBodyStream() + if err != nil { + bodyBuf.SetString(err.Error()) + } + } else if req.onlyMultipartForm() { + body, err := marshalMultipartForm(req.multipartForm, req.multipartFormBoundary) + if err != nil { + return []byte(err.Error()) + } + return body + } + return req.bodyBytes() +} + +// AppendBody appends p to request body. +// +// It is safe re-using p after the function returns. +func (req *Request) AppendBody(p []byte) { + req.AppendBodyString(b2s(p)) +} + +// AppendBodyString appends s to request body. +func (req *Request) AppendBodyString(s string) { + req.RemoveMultipartFormFiles() + req.closeBodyStream() + req.bodyBuffer().WriteString(s) +} + +// SetBody sets request body. +// +// It is safe re-using body argument after the function returns. +func (req *Request) SetBody(body []byte) { + req.SetBodyString(b2s(body)) +} + +// SetBodyString sets request body. +func (req *Request) SetBodyString(body string) { + req.RemoveMultipartFormFiles() + req.closeBodyStream() + req.bodyBuffer().SetString(body) +} + +// ResetBody resets request body. +func (req *Request) ResetBody() { + req.RemoveMultipartFormFiles() + req.closeBodyStream() + if req.body != nil { + if req.keepBodyBuffer { + req.body.Reset() + } else { + requestBodyPool.Put(req.body) + req.body = nil + } + } +} + +// CopyTo copies req contents to dst except of body stream. +func (req *Request) CopyTo(dst *Request) { + req.copyToSkipBody(dst) + if req.body != nil { + dst.bodyBuffer().Set(req.body.B) + } else if dst.body != nil { + dst.body.Reset() + } +} + +func (req *Request) copyToSkipBody(dst *Request) { + dst.Reset() + req.Header.CopyTo(&dst.Header) + + req.uri.CopyTo(&dst.uri) + dst.parsedURI = req.parsedURI + + req.postArgs.CopyTo(&dst.postArgs) + dst.parsedPostArgs = req.parsedPostArgs + dst.isTLS = req.isTLS + + // do not copy multipartForm - it will be automatically + // re-created on the first call to MultipartForm. +} + +// CopyTo copies resp contents to dst except of body stream. +func (resp *Response) CopyTo(dst *Response) { + resp.copyToSkipBody(dst) + if resp.body != nil { + dst.bodyBuffer().Set(resp.body.B) + } else if dst.body != nil { + dst.body.Reset() + } +} + +func (resp *Response) copyToSkipBody(dst *Response) { + dst.Reset() + resp.Header.CopyTo(&dst.Header) + dst.SkipBody = resp.SkipBody +} + +func swapRequestBody(a, b *Request) { + a.body, b.body = b.body, a.body + a.bodyStream, b.bodyStream = b.bodyStream, a.bodyStream +} + +func swapResponseBody(a, b *Response) { + a.body, b.body = b.body, a.body + a.bodyStream, b.bodyStream = b.bodyStream, a.bodyStream +} + +// URI returns request URI +func (req *Request) URI() *URI { + req.parseURI() + return &req.uri +} + +func (req *Request) parseURI() { + if req.parsedURI { + return + } + req.parsedURI = true + + req.uri.parseQuick(req.Header.RequestURI(), &req.Header, req.isTLS) +} + +// PostArgs returns POST arguments. +func (req *Request) PostArgs() *Args { + req.parsePostArgs() + return &req.postArgs +} + +func (req *Request) parsePostArgs() { + if req.parsedPostArgs { + return + } + req.parsedPostArgs = true + + if !bytes.HasPrefix(req.Header.ContentType(), strPostArgsContentType) { + return + } + req.postArgs.ParseBytes(req.bodyBytes()) +} + +// ErrNoMultipartForm means that the request's Content-Type +// isn't 'multipart/form-data'. +var ErrNoMultipartForm = errors.New("request has no multipart/form-data Content-Type") + +// MultipartForm returns requests's multipart form. +// +// Returns ErrNoMultipartForm if request's Content-Type +// isn't 'multipart/form-data'. +// +// RemoveMultipartFormFiles must be called after returned multipart form +// is processed. +func (req *Request) MultipartForm() (*multipart.Form, error) { + if req.multipartForm != nil { + return req.multipartForm, nil + } + + req.multipartFormBoundary = string(req.Header.MultipartFormBoundary()) + if len(req.multipartFormBoundary) == 0 { + return nil, ErrNoMultipartForm + } + + ce := req.Header.peek(strContentEncoding) + body := req.bodyBytes() + if bytes.Equal(ce, strGzip) { + // Do not care about memory usage here. + var err error + if body, err = AppendGunzipBytes(nil, body); err != nil { + return nil, fmt.Errorf("cannot gunzip request body: %s", err) + } + } else if len(ce) > 0 { + return nil, fmt.Errorf("unsupported Content-Encoding: %q", ce) + } + + f, err := readMultipartForm(bytes.NewReader(body), req.multipartFormBoundary, len(body), len(body)) + if err != nil { + return nil, err + } + req.multipartForm = f + return f, nil +} + +func marshalMultipartForm(f *multipart.Form, boundary string) ([]byte, error) { + var buf ByteBuffer + if err := WriteMultipartForm(&buf, f, boundary); err != nil { + return nil, err + } + return buf.B, nil +} + +// WriteMultipartForm writes the given multipart form f with the given +// boundary to w. +func WriteMultipartForm(w io.Writer, f *multipart.Form, boundary string) error { + // Do not care about memory allocations here, since multipart + // form processing is slooow. + if len(boundary) == 0 { + panic("BUG: form boundary cannot be empty") + } + + mw := multipart.NewWriter(w) + if err := mw.SetBoundary(boundary); err != nil { + return fmt.Errorf("cannot use form boundary %q: %s", boundary, err) + } + + // marshal values + for k, vv := range f.Value { + for _, v := range vv { + if err := mw.WriteField(k, v); err != nil { + return fmt.Errorf("cannot write form field %q value %q: %s", k, v, err) + } + } + } + + // marshal files + for k, fvv := range f.File { + for _, fv := range fvv { + vw, err := mw.CreateFormFile(k, fv.Filename) + if err != nil { + return fmt.Errorf("cannot create form file %q (%q): %s", k, fv.Filename, err) + } + fh, err := fv.Open() + if err != nil { + return fmt.Errorf("cannot open form file %q (%q): %s", k, fv.Filename, err) + } + if _, err = copyZeroAlloc(vw, fh); err != nil { + return fmt.Errorf("error when copying form file %q (%q): %s", k, fv.Filename, err) + } + if err = fh.Close(); err != nil { + return fmt.Errorf("cannot close form file %q (%q): %s", k, fv.Filename, err) + } + } + } + + if err := mw.Close(); err != nil { + return fmt.Errorf("error when closing multipart form writer: %s", err) + } + + return nil +} + +func readMultipartForm(r io.Reader, boundary string, size, maxInMemoryFileSize int) (*multipart.Form, error) { + // Do not care about memory allocations here, since they are tiny + // compared to multipart data (aka multi-MB files) usually sent + // in multipart/form-data requests. + + if size <= 0 { + panic(fmt.Sprintf("BUG: form size must be greater than 0. Given %d", size)) + } + lr := io.LimitReader(r, int64(size)) + mr := multipart.NewReader(lr, boundary) + f, err := mr.ReadForm(int64(maxInMemoryFileSize)) + if err != nil { + return nil, fmt.Errorf("cannot read multipart/form-data body: %s", err) + } + return f, nil +} + +// Reset clears request contents. +func (req *Request) Reset() { + req.Header.Reset() + req.resetSkipHeader() +} + +func (req *Request) resetSkipHeader() { + req.ResetBody() + req.uri.Reset() + req.parsedURI = false + req.postArgs.Reset() + req.parsedPostArgs = false + req.isTLS = false +} + +// RemoveMultipartFormFiles removes multipart/form-data temporary files +// associated with the request. +func (req *Request) RemoveMultipartFormFiles() { + if req.multipartForm != nil { + // Do not check for error, since these files may be deleted or moved + // to new places by user code. + req.multipartForm.RemoveAll() + req.multipartForm = nil + } + req.multipartFormBoundary = "" +} + +// Reset clears response contents. +func (resp *Response) Reset() { + resp.Header.Reset() + resp.resetSkipHeader() + resp.SkipBody = false +} + +func (resp *Response) resetSkipHeader() { + resp.ResetBody() +} + +// Read reads request (including body) from the given r. +// +// RemoveMultipartFormFiles or Reset must be called after +// reading multipart/form-data request in order to delete temporarily +// uploaded files. +// +// If MayContinue returns true, the caller must: +// +// - Either send StatusExpectationFailed response if request headers don't +// satisfy the caller. +// - Or send StatusContinue response before reading request body +// with ContinueReadBody. +// - Or close the connection. +// +// io.EOF is returned if r is closed before reading the first header byte. +func (req *Request) Read(r *bufio.Reader) error { + return req.ReadLimitBody(r, 0) +} + +const defaultMaxInMemoryFileSize = 16 * 1024 * 1024 + +var errGetOnly = errors.New("non-GET request received") + +// ReadLimitBody reads request from the given r, limiting the body size. +// +// If maxBodySize > 0 and the body size exceeds maxBodySize, +// then ErrBodyTooLarge is returned. +// +// RemoveMultipartFormFiles or Reset must be called after +// reading multipart/form-data request in order to delete temporarily +// uploaded files. +// +// If MayContinue returns true, the caller must: +// +// - Either send StatusExpectationFailed response if request headers don't +// satisfy the caller. +// - Or send StatusContinue response before reading request body +// with ContinueReadBody. +// - Or close the connection. +// +// io.EOF is returned if r is closed before reading the first header byte. +func (req *Request) ReadLimitBody(r *bufio.Reader, maxBodySize int) error { + req.resetSkipHeader() + return req.readLimitBody(r, maxBodySize, false) +} + +func (req *Request) readLimitBody(r *bufio.Reader, maxBodySize int, getOnly bool) error { + // Do not reset the request here - the caller must reset it before + // calling this method. + + err := req.Header.Read(r) + if err != nil { + return err + } + if getOnly && !req.Header.IsGet() { + return errGetOnly + } + + if req.Header.noBody() { + return nil + } + + if req.MayContinue() { + // 'Expect: 100-continue' header found. Let the caller deciding + // whether to read request body or + // to return StatusExpectationFailed. + return nil + } + + return req.ContinueReadBody(r, maxBodySize) +} + +// MayContinue returns true if the request contains +// 'Expect: 100-continue' header. +// +// The caller must do one of the following actions if MayContinue returns true: +// +// - Either send StatusExpectationFailed response if request headers don't +// satisfy the caller. +// - Or send StatusContinue response before reading request body +// with ContinueReadBody. +// - Or close the connection. +func (req *Request) MayContinue() bool { + return bytes.Equal(req.Header.peek(strExpect), str100Continue) +} + +// ContinueReadBody reads request body if request header contains +// 'Expect: 100-continue'. +// +// The caller must send StatusContinue response before calling this method. +// +// If maxBodySize > 0 and the body size exceeds maxBodySize, +// then ErrBodyTooLarge is returned. +func (req *Request) ContinueReadBody(r *bufio.Reader, maxBodySize int) error { + var err error + contentLength := req.Header.ContentLength() + if contentLength > 0 { + if maxBodySize > 0 && contentLength > maxBodySize { + return ErrBodyTooLarge + } + + // Pre-read multipart form data of known length. + // This way we limit memory usage for large file uploads, since their contents + // is streamed into temporary files if file size exceeds defaultMaxInMemoryFileSize. + req.multipartFormBoundary = string(req.Header.MultipartFormBoundary()) + if len(req.multipartFormBoundary) > 0 && len(req.Header.peek(strContentEncoding)) == 0 { + req.multipartForm, err = readMultipartForm(r, req.multipartFormBoundary, contentLength, defaultMaxInMemoryFileSize) + if err != nil { + req.Reset() + } + return err + } + } + + if contentLength == -2 { + // identity body has no sense for http requests, since + // the end of body is determined by connection close. + // So just ignore request body for requests without + // 'Content-Length' and 'Transfer-Encoding' headers. + req.Header.SetContentLength(0) + return nil + } + + bodyBuf := req.bodyBuffer() + bodyBuf.Reset() + bodyBuf.B, err = readBody(r, contentLength, maxBodySize, bodyBuf.B) + if err != nil { + req.Reset() + return err + } + req.Header.SetContentLength(len(bodyBuf.B)) + return nil +} + +// Read reads response (including body) from the given r. +// +// io.EOF is returned if r is closed before reading the first header byte. +func (resp *Response) Read(r *bufio.Reader) error { + return resp.ReadLimitBody(r, 0) +} + +// ReadLimitBody reads response from the given r, limiting the body size. +// +// If maxBodySize > 0 and the body size exceeds maxBodySize, +// then ErrBodyTooLarge is returned. +// +// io.EOF is returned if r is closed before reading the first header byte. +func (resp *Response) ReadLimitBody(r *bufio.Reader, maxBodySize int) error { + resp.resetSkipHeader() + err := resp.Header.Read(r) + if err != nil { + return err + } + if resp.Header.StatusCode() == StatusContinue { + // Read the next response according to http://www.w3.org/Protocols/rfc2616/rfc2616-sec8.html . + if err = resp.Header.Read(r); err != nil { + return err + } + } + + if !resp.mustSkipBody() { + bodyBuf := resp.bodyBuffer() + bodyBuf.Reset() + bodyBuf.B, err = readBody(r, resp.Header.ContentLength(), maxBodySize, bodyBuf.B) + if err != nil { + resp.Reset() + return err + } + resp.Header.SetContentLength(len(bodyBuf.B)) + } + return nil +} + +func (resp *Response) mustSkipBody() bool { + return resp.SkipBody || resp.Header.mustSkipContentLength() +} + +var errRequestHostRequired = errors.New("missing required Host header in request") + +// WriteTo writes request to w. It implements io.WriterTo. +func (req *Request) WriteTo(w io.Writer) (int64, error) { + return writeBufio(req, w) +} + +// WriteTo writes response to w. It implements io.WriterTo. +func (resp *Response) WriteTo(w io.Writer) (int64, error) { + return writeBufio(resp, w) +} + +func writeBufio(hw httpWriter, w io.Writer) (int64, error) { + sw := acquireStatsWriter(w) + bw := acquireBufioWriter(sw) + err1 := hw.Write(bw) + err2 := bw.Flush() + releaseBufioWriter(bw) + n := sw.bytesWritten + releaseStatsWriter(sw) + + err := err1 + if err == nil { + err = err2 + } + return n, err +} + +type statsWriter struct { + w io.Writer + bytesWritten int64 +} + +func (w *statsWriter) Write(p []byte) (int, error) { + n, err := w.w.Write(p) + w.bytesWritten += int64(n) + return n, err +} + +func acquireStatsWriter(w io.Writer) *statsWriter { + v := statsWriterPool.Get() + if v == nil { + return &statsWriter{ + w: w, + } + } + sw := v.(*statsWriter) + sw.w = w + return sw +} + +func releaseStatsWriter(sw *statsWriter) { + sw.w = nil + sw.bytesWritten = 0 + statsWriterPool.Put(sw) +} + +var statsWriterPool sync.Pool + +func acquireBufioWriter(w io.Writer) *bufio.Writer { + v := bufioWriterPool.Get() + if v == nil { + return bufio.NewWriter(w) + } + bw := v.(*bufio.Writer) + bw.Reset(w) + return bw +} + +func releaseBufioWriter(bw *bufio.Writer) { + bufioWriterPool.Put(bw) +} + +var bufioWriterPool sync.Pool + +func (req *Request) onlyMultipartForm() bool { + return req.multipartForm != nil && (req.body == nil || len(req.body.B) == 0) +} + +// Write writes request to w. +// +// Write doesn't flush request to w for performance reasons. +// +// See also WriteTo. +func (req *Request) Write(w *bufio.Writer) error { + if len(req.Header.Host()) == 0 || req.parsedURI { + uri := req.URI() + host := uri.Host() + if len(host) == 0 { + return errRequestHostRequired + } + req.Header.SetHostBytes(host) + req.Header.SetRequestURIBytes(uri.RequestURI()) + } + + if req.bodyStream != nil { + return req.writeBodyStream(w) + } + + body := req.bodyBytes() + var err error + if req.onlyMultipartForm() { + body, err = marshalMultipartForm(req.multipartForm, req.multipartFormBoundary) + if err != nil { + return fmt.Errorf("error when marshaling multipart form: %s", err) + } + req.Header.SetMultipartFormBoundary(req.multipartFormBoundary) + } + + hasBody := !req.Header.noBody() + if hasBody { + req.Header.SetContentLength(len(body)) + } + if err = req.Header.Write(w); err != nil { + return err + } + if hasBody { + _, err = w.Write(body) + } else if len(body) > 0 { + return fmt.Errorf("non-zero body for non-POST request. body=%q", body) + } + return err +} + +// WriteGzip writes response with gzipped body to w. +// +// The method gzips response body and sets 'Content-Encoding: gzip' +// header before writing response to w. +// +// WriteGzip doesn't flush response to w for performance reasons. +func (resp *Response) WriteGzip(w *bufio.Writer) error { + return resp.WriteGzipLevel(w, CompressDefaultCompression) +} + +// WriteGzipLevel writes response with gzipped body to w. +// +// Level is the desired compression level: +// +// * CompressNoCompression +// * CompressBestSpeed +// * CompressBestCompression +// * CompressDefaultCompression +// +// The method gzips response body and sets 'Content-Encoding: gzip' +// header before writing response to w. +// +// WriteGzipLevel doesn't flush response to w for performance reasons. +func (resp *Response) WriteGzipLevel(w *bufio.Writer, level int) error { + if err := resp.gzipBody(level); err != nil { + return err + } + return resp.Write(w) +} + +// WriteDeflate writes response with deflated body to w. +// +// The method deflates response body and sets 'Content-Encoding: deflate' +// header before writing response to w. +// +// WriteDeflate doesn't flush response to w for performance reasons. +func (resp *Response) WriteDeflate(w *bufio.Writer) error { + return resp.WriteDeflateLevel(w, CompressDefaultCompression) +} + +// WriteDeflateLevel writes response with deflated body to w. +// +// Level is the desired compression level: +// +// * CompressNoCompression +// * CompressBestSpeed +// * CompressBestCompression +// * CompressDefaultCompression +// +// The method deflates response body and sets 'Content-Encoding: deflate' +// header before writing response to w. +// +// WriteDeflateLevel doesn't flush response to w for performance reasons. +func (resp *Response) WriteDeflateLevel(w *bufio.Writer, level int) error { + if err := resp.deflateBody(level); err != nil { + return err + } + return resp.Write(w) +} + +func (resp *Response) gzipBody(level int) error { + if len(resp.Header.peek(strContentEncoding)) > 0 { + // It looks like the body is already compressed. + // Do not compress it again. + return nil + } + + // Do not care about memory allocations here, since gzip is slow + // and allocates a lot of memory by itself. + if resp.bodyStream != nil { + bs := resp.bodyStream + resp.bodyStream = NewStreamReader(func(sw *bufio.Writer) { + zw := acquireGzipWriter(sw, level) + fw := &flushWriter{ + wf: zw, + bw: sw, + } + copyZeroAlloc(fw, bs) + releaseGzipWriter(zw) + if bsc, ok := bs.(io.Closer); ok { + bsc.Close() + } + }) + } else { + w := responseBodyPool.Get() + zw := acquireGzipWriter(w, level) + _, err := zw.Write(resp.bodyBytes()) + releaseGzipWriter(zw) + if err != nil { + return err + } + + // Hack: swap resp.body with w. + if resp.body != nil { + responseBodyPool.Put(resp.body) + } + resp.body = w + } + resp.Header.SetCanonical(strContentEncoding, strGzip) + return nil +} + +func (resp *Response) deflateBody(level int) error { + if len(resp.Header.peek(strContentEncoding)) > 0 { + // It looks like the body is already compressed. + // Do not compress it again. + return nil + } + + // Do not care about memory allocations here, since flate is slow + // and allocates a lot of memory by itself. + if resp.bodyStream != nil { + bs := resp.bodyStream + resp.bodyStream = NewStreamReader(func(sw *bufio.Writer) { + zw := acquireFlateWriter(sw, level) + fw := &flushWriter{ + wf: zw, + bw: sw, + } + copyZeroAlloc(fw, bs) + releaseFlateWriter(zw) + if bsc, ok := bs.(io.Closer); ok { + bsc.Close() + } + }) + } else { + w := responseBodyPool.Get() + zw := acquireFlateWriter(w, level) + _, err := zw.Write(resp.bodyBytes()) + releaseFlateWriter(zw) + if err != nil { + return err + } + + // Hack: swap resp.body with w. + if resp.body != nil { + responseBodyPool.Put(resp.body) + } + resp.body = w + } + resp.Header.SetCanonical(strContentEncoding, strDeflate) + return nil +} + +type writeFlusher interface { + io.Writer + Flush() error +} + +type flushWriter struct { + wf writeFlusher + bw *bufio.Writer +} + +func (w *flushWriter) Write(p []byte) (int, error) { + n, err := w.wf.Write(p) + if err != nil { + return 0, err + } + if err = w.wf.Flush(); err != nil { + return 0, err + } + if err = w.bw.Flush(); err != nil { + return 0, err + } + return n, nil +} + +// Write writes response to w. +// +// Write doesn't flush response to w for performance reasons. +// +// See also WriteTo. +func (resp *Response) Write(w *bufio.Writer) error { + sendBody := !resp.mustSkipBody() + + if resp.bodyStream != nil { + return resp.writeBodyStream(w, sendBody) + } + + body := resp.bodyBytes() + bodyLen := len(body) + if sendBody || bodyLen > 0 { + resp.Header.SetContentLength(bodyLen) + } + if err := resp.Header.Write(w); err != nil { + return err + } + if sendBody { + if _, err := w.Write(body); err != nil { + return err + } + } + return nil +} + +func (req *Request) writeBodyStream(w *bufio.Writer) error { + var err error + + contentLength := req.Header.ContentLength() + if contentLength < 0 { + lrSize := limitedReaderSize(req.bodyStream) + if lrSize >= 0 { + contentLength = int(lrSize) + if int64(contentLength) != lrSize { + contentLength = -1 + } + if contentLength >= 0 { + req.Header.SetContentLength(contentLength) + } + } + } + if contentLength >= 0 { + if err = req.Header.Write(w); err == nil { + err = writeBodyFixedSize(w, req.bodyStream, int64(contentLength)) + } + } else { + req.Header.SetContentLength(-1) + if err = req.Header.Write(w); err == nil { + err = writeBodyChunked(w, req.bodyStream) + } + } + err1 := req.closeBodyStream() + if err == nil { + err = err1 + } + return err +} + +func (resp *Response) writeBodyStream(w *bufio.Writer, sendBody bool) error { + var err error + + contentLength := resp.Header.ContentLength() + if contentLength < 0 { + lrSize := limitedReaderSize(resp.bodyStream) + if lrSize >= 0 { + contentLength = int(lrSize) + if int64(contentLength) != lrSize { + contentLength = -1 + } + if contentLength >= 0 { + resp.Header.SetContentLength(contentLength) + } + } + } + if contentLength >= 0 { + if err = resp.Header.Write(w); err == nil && sendBody { + err = writeBodyFixedSize(w, resp.bodyStream, int64(contentLength)) + } + } else { + resp.Header.SetContentLength(-1) + if err = resp.Header.Write(w); err == nil && sendBody { + err = writeBodyChunked(w, resp.bodyStream) + } + } + err1 := resp.closeBodyStream() + if err == nil { + err = err1 + } + return err +} + +func (req *Request) closeBodyStream() error { + if req.bodyStream == nil { + return nil + } + var err error + if bsc, ok := req.bodyStream.(io.Closer); ok { + err = bsc.Close() + } + req.bodyStream = nil + return err +} + +func (resp *Response) closeBodyStream() error { + if resp.bodyStream == nil { + return nil + } + var err error + if bsc, ok := resp.bodyStream.(io.Closer); ok { + err = bsc.Close() + } + resp.bodyStream = nil + return err +} + +// String returns request representation. +// +// Returns error message instead of request representation on error. +// +// Use Write instead of String for performance-critical code. +func (req *Request) String() string { + return getHTTPString(req) +} + +// String returns response representation. +// +// Returns error message instead of response representation on error. +// +// Use Write instead of String for performance-critical code. +func (resp *Response) String() string { + return getHTTPString(resp) +} + +func getHTTPString(hw httpWriter) string { + w := AcquireByteBuffer() + bw := bufio.NewWriter(w) + if err := hw.Write(bw); err != nil { + return err.Error() + } + if err := bw.Flush(); err != nil { + return err.Error() + } + s := string(w.B) + ReleaseByteBuffer(w) + return s +} + +type httpWriter interface { + Write(w *bufio.Writer) error +} + +func writeBodyChunked(w *bufio.Writer, r io.Reader) error { + vbuf := copyBufPool.Get() + buf := vbuf.([]byte) + + var err error + var n int + for { + n, err = r.Read(buf) + if n == 0 { + if err == nil { + panic("BUG: io.Reader returned 0, nil") + } + if err == io.EOF { + if err = writeChunk(w, buf[:0]); err != nil { + break + } + err = nil + } + break + } + if err = writeChunk(w, buf[:n]); err != nil { + break + } + } + + copyBufPool.Put(vbuf) + return err +} + +func limitedReaderSize(r io.Reader) int64 { + lr, ok := r.(*io.LimitedReader) + if !ok { + return -1 + } + return lr.N +} + +func writeBodyFixedSize(w *bufio.Writer, r io.Reader, size int64) error { + if size > maxSmallFileSize { + // w buffer must be empty for triggering + // sendfile path in bufio.Writer.ReadFrom. + if err := w.Flush(); err != nil { + return err + } + } + + // Unwrap a single limited reader for triggering sendfile path + // in net.TCPConn.ReadFrom. + lr, ok := r.(*io.LimitedReader) + if ok { + r = lr.R + } + + n, err := copyZeroAlloc(w, r) + + if ok { + lr.N -= n + } + + if n != size && err == nil { + err = fmt.Errorf("copied %d bytes from body stream instead of %d bytes", n, size) + } + return err +} + +func copyZeroAlloc(w io.Writer, r io.Reader) (int64, error) { + vbuf := copyBufPool.Get() + buf := vbuf.([]byte) + n, err := io.CopyBuffer(w, r, buf) + copyBufPool.Put(vbuf) + return n, err +} + +var copyBufPool = sync.Pool{ + New: func() interface{} { + return make([]byte, 4096) + }, +} + +func writeChunk(w *bufio.Writer, b []byte) error { + n := len(b) + writeHexInt(w, n) + w.Write(strCRLF) + w.Write(b) + _, err := w.Write(strCRLF) + err1 := w.Flush() + if err == nil { + err = err1 + } + return err +} + +// ErrBodyTooLarge is returned if either request or response body exceeds +// the given limit. +var ErrBodyTooLarge = errors.New("body size exceeds the given limit") + +func readBody(r *bufio.Reader, contentLength int, maxBodySize int, dst []byte) ([]byte, error) { + dst = dst[:0] + if contentLength >= 0 { + if maxBodySize > 0 && contentLength > maxBodySize { + return dst, ErrBodyTooLarge + } + return appendBodyFixedSize(r, dst, contentLength) + } + if contentLength == -1 { + return readBodyChunked(r, maxBodySize, dst) + } + return readBodyIdentity(r, maxBodySize, dst) +} + +func readBodyIdentity(r *bufio.Reader, maxBodySize int, dst []byte) ([]byte, error) { + dst = dst[:cap(dst)] + if len(dst) == 0 { + dst = make([]byte, 1024) + } + offset := 0 + for { + nn, err := r.Read(dst[offset:]) + if nn <= 0 { + if err != nil { + if err == io.EOF { + return dst[:offset], nil + } + return dst[:offset], err + } + panic(fmt.Sprintf("BUG: bufio.Read() returned (%d, nil)", nn)) + } + offset += nn + if maxBodySize > 0 && offset > maxBodySize { + return dst[:offset], ErrBodyTooLarge + } + if len(dst) == offset { + n := round2(2 * offset) + if maxBodySize > 0 && n > maxBodySize { + n = maxBodySize + 1 + } + b := make([]byte, n) + copy(b, dst) + dst = b + } + } +} + +func appendBodyFixedSize(r *bufio.Reader, dst []byte, n int) ([]byte, error) { + if n == 0 { + return dst, nil + } + + offset := len(dst) + dstLen := offset + n + if cap(dst) < dstLen { + b := make([]byte, round2(dstLen)) + copy(b, dst) + dst = b + } + dst = dst[:dstLen] + + for { + nn, err := r.Read(dst[offset:]) + if nn <= 0 { + if err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return dst[:offset], err + } + panic(fmt.Sprintf("BUG: bufio.Read() returned (%d, nil)", nn)) + } + offset += nn + if offset == dstLen { + return dst, nil + } + } +} + +func readBodyChunked(r *bufio.Reader, maxBodySize int, dst []byte) ([]byte, error) { + if len(dst) > 0 { + panic("BUG: expected zero-length buffer") + } + + strCRLFLen := len(strCRLF) + for { + chunkSize, err := parseChunkSize(r) + if err != nil { + return dst, err + } + if maxBodySize > 0 && len(dst)+chunkSize > maxBodySize { + return dst, ErrBodyTooLarge + } + dst, err = appendBodyFixedSize(r, dst, chunkSize+strCRLFLen) + if err != nil { + return dst, err + } + if !bytes.Equal(dst[len(dst)-strCRLFLen:], strCRLF) { + return dst, fmt.Errorf("cannot find crlf at the end of chunk") + } + dst = dst[:len(dst)-strCRLFLen] + if chunkSize == 0 { + return dst, nil + } + } +} + +func parseChunkSize(r *bufio.Reader) (int, error) { + n, err := readHexInt(r) + if err != nil { + return -1, err + } + c, err := r.ReadByte() + if err != nil { + return -1, fmt.Errorf("cannot read '\r' char at the end of chunk size: %s", err) + } + if c != '\r' { + return -1, fmt.Errorf("unexpected char %q at the end of chunk size. Expected %q", c, '\r') + } + c, err = r.ReadByte() + if err != nil { + return -1, fmt.Errorf("cannot read '\n' char at the end of chunk size: %s", err) + } + if c != '\n' { + return -1, fmt.Errorf("unexpected char %q at the end of chunk size. Expected %q", c, '\n') + } + return n, nil +} + +func round2(n int) int { + if n <= 0 { + return 0 + } + n-- + x := uint(0) + for n > 0 { + n >>= 1 + x++ + } + return 1 << x +} diff --git a/vendor/github.com/valyala/fasthttp/lbclient.go b/vendor/github.com/valyala/fasthttp/lbclient.go new file mode 100644 index 0000000..41fe727 --- /dev/null +++ b/vendor/github.com/valyala/fasthttp/lbclient.go @@ -0,0 +1,183 @@ +package fasthttp + +import ( + "sync" + "sync/atomic" + "time" +) + +// BalancingClient is the interface for clients, which may be passed +// to LBClient.Clients. +type BalancingClient interface { + DoDeadline(req *Request, resp *Response, deadline time.Time) error + PendingRequests() int +} + +// LBClient balances requests among available LBClient.Clients. +// +// It has the following features: +// +// - Balances load among available clients using 'least loaded' + 'round robin' +// hybrid technique. +// - Dynamically decreases load on unhealthy clients. +// +// It is forbidden copying LBClient instances. Create new instances instead. +// +// It is safe calling LBClient methods from concurrently running goroutines. +type LBClient struct { + noCopy noCopy + + // Clients must contain non-zero clients list. + // Incoming requests are balanced among these clients. + Clients []BalancingClient + + // HealthCheck is a callback called after each request. + // + // The request, response and the error returned by the client + // is passed to HealthCheck, so the callback may determine whether + // the client is healthy. + // + // Load on the current client is decreased if HealthCheck returns false. + // + // By default HealthCheck returns false if err != nil. + HealthCheck func(req *Request, resp *Response, err error) bool + + // Timeout is the request timeout used when calling LBClient.Do. + // + // DefaultLBClientTimeout is used by default. + Timeout time.Duration + + cs []*lbClient + + // nextIdx is for spreading requests among equally loaded clients + // in a round-robin fashion. + nextIdx uint32 + + once sync.Once +} + +// DefaultLBClientTimeout is the default request timeout used by LBClient +// when calling LBClient.Do. +// +// The timeout may be overriden via LBClient.Timeout. +const DefaultLBClientTimeout = time.Second + +// DoDeadline calls DoDeadline on the least loaded client +func (cc *LBClient) DoDeadline(req *Request, resp *Response, deadline time.Time) error { + return cc.get().DoDeadline(req, resp, deadline) +} + +// DoTimeout calculates deadline and calls DoDeadline on the least loaded client +func (cc *LBClient) DoTimeout(req *Request, resp *Response, timeout time.Duration) error { + deadline := time.Now().Add(timeout) + return cc.get().DoDeadline(req, resp, deadline) +} + +// Do calls calculates deadline using LBClient.Timeout and calls DoDeadline +// on the least loaded client. +func (cc *LBClient) Do(req *Request, resp *Response) error { + timeout := cc.Timeout + if timeout <= 0 { + timeout = DefaultLBClientTimeout + } + return cc.DoTimeout(req, resp, timeout) +} + +func (cc *LBClient) init() { + if len(cc.Clients) == 0 { + panic("BUG: LBClient.Clients cannot be empty") + } + for _, c := range cc.Clients { + cc.cs = append(cc.cs, &lbClient{ + c: c, + healthCheck: cc.HealthCheck, + }) + } + + // Randomize nextIdx in order to prevent initial servers' + // hammering from a cluster of identical LBClients. + cc.nextIdx = uint32(time.Now().UnixNano()) +} + +func (cc *LBClient) get() *lbClient { + cc.once.Do(cc.init) + + cs := cc.cs + idx := atomic.AddUint32(&cc.nextIdx, 1) + idx %= uint32(len(cs)) + + minC := cs[idx] + minN := minC.PendingRequests() + if minN == 0 { + return minC + } + for _, c := range cs[idx+1:] { + n := c.PendingRequests() + if n == 0 { + return c + } + if n < minN { + minC = c + minN = n + } + } + for _, c := range cs[:idx] { + n := c.PendingRequests() + if n == 0 { + return c + } + if n < minN { + minC = c + minN = n + } + } + return minC +} + +type lbClient struct { + c BalancingClient + healthCheck func(req *Request, resp *Response, err error) bool + penalty uint32 +} + +func (c *lbClient) DoDeadline(req *Request, resp *Response, deadline time.Time) error { + err := c.c.DoDeadline(req, resp, deadline) + if !c.isHealthy(req, resp, err) && c.incPenalty() { + // Penalize the client returning error, so the next requests + // are routed to another clients. + time.AfterFunc(penaltyDuration, c.decPenalty) + } + return err +} + +func (c *lbClient) PendingRequests() int { + n := c.c.PendingRequests() + m := atomic.LoadUint32(&c.penalty) + return n + int(m) +} + +func (c *lbClient) isHealthy(req *Request, resp *Response, err error) bool { + if c.healthCheck == nil { + return err == nil + } + return c.healthCheck(req, resp, err) +} + +func (c *lbClient) incPenalty() bool { + m := atomic.AddUint32(&c.penalty, 1) + if m > maxPenalty { + c.decPenalty() + return false + } + return true +} + +func (c *lbClient) decPenalty() { + atomic.AddUint32(&c.penalty, ^uint32(0)) +} + +const ( + maxPenalty = 300 + + penaltyDuration = 3 * time.Second +) diff --git a/vendor/github.com/valyala/fasthttp/nocopy.go b/vendor/github.com/valyala/fasthttp/nocopy.go new file mode 100644 index 0000000..32af52e --- /dev/null +++ b/vendor/github.com/valyala/fasthttp/nocopy.go @@ -0,0 +1,9 @@ +package fasthttp + +// Embed this type into a struct, which mustn't be copied, +// so `go vet` gives a warning if this struct is copied. +// +// See https://github.com/golang/go/issues/8005#issuecomment-190753527 for details. +type noCopy struct{} + +func (*noCopy) Lock() {} diff --git a/vendor/github.com/valyala/fasthttp/peripconn.go b/vendor/github.com/valyala/fasthttp/peripconn.go new file mode 100644 index 0000000..afd2a92 --- /dev/null +++ b/vendor/github.com/valyala/fasthttp/peripconn.go @@ -0,0 +1,100 @@ +package fasthttp + +import ( + "fmt" + "net" + "sync" +) + +type perIPConnCounter struct { + pool sync.Pool + lock sync.Mutex + m map[uint32]int +} + +func (cc *perIPConnCounter) Register(ip uint32) int { + cc.lock.Lock() + if cc.m == nil { + cc.m = make(map[uint32]int) + } + n := cc.m[ip] + 1 + cc.m[ip] = n + cc.lock.Unlock() + return n +} + +func (cc *perIPConnCounter) Unregister(ip uint32) { + cc.lock.Lock() + if cc.m == nil { + cc.lock.Unlock() + panic("BUG: perIPConnCounter.Register() wasn't called") + } + n := cc.m[ip] - 1 + if n < 0 { + cc.lock.Unlock() + panic(fmt.Sprintf("BUG: negative per-ip counter=%d for ip=%d", n, ip)) + } + cc.m[ip] = n + cc.lock.Unlock() +} + +type perIPConn struct { + net.Conn + + ip uint32 + perIPConnCounter *perIPConnCounter +} + +func acquirePerIPConn(conn net.Conn, ip uint32, counter *perIPConnCounter) *perIPConn { + v := counter.pool.Get() + if v == nil { + v = &perIPConn{ + perIPConnCounter: counter, + } + } + c := v.(*perIPConn) + c.Conn = conn + c.ip = ip + return c +} + +func releasePerIPConn(c *perIPConn) { + c.Conn = nil + c.perIPConnCounter.pool.Put(c) +} + +func (c *perIPConn) Close() error { + err := c.Conn.Close() + c.perIPConnCounter.Unregister(c.ip) + releasePerIPConn(c) + return err +} + +func getUint32IP(c net.Conn) uint32 { + return ip2uint32(getConnIP4(c)) +} + +func getConnIP4(c net.Conn) net.IP { + addr := c.RemoteAddr() + ipAddr, ok := addr.(*net.TCPAddr) + if !ok { + return net.IPv4zero + } + return ipAddr.IP.To4() +} + +func ip2uint32(ip net.IP) uint32 { + if len(ip) != 4 { + return 0 + } + return uint32(ip[0])<<24 | uint32(ip[1])<<16 | uint32(ip[2])<<8 | uint32(ip[3]) +} + +func uint322ip(ip uint32) net.IP { + b := make([]byte, 4) + b[0] = byte(ip >> 24) + b[1] = byte(ip >> 16) + b[2] = byte(ip >> 8) + b[3] = byte(ip) + return b +} diff --git a/vendor/github.com/valyala/fasthttp/server.go b/vendor/github.com/valyala/fasthttp/server.go new file mode 100644 index 0000000..3aaefe4 --- /dev/null +++ b/vendor/github.com/valyala/fasthttp/server.go @@ -0,0 +1,1992 @@ +package fasthttp + +import ( + "bufio" + "crypto/tls" + "errors" + "fmt" + "io" + "log" + "mime/multipart" + "net" + "os" + "strings" + "sync" + "sync/atomic" + "time" +) + +// ServeConn serves HTTP requests from the given connection +// using the given handler. +// +// ServeConn returns nil if all requests from the c are successfully served. +// It returns non-nil error otherwise. +// +// Connection c must immediately propagate all the data passed to Write() +// to the client. Otherwise requests' processing may hang. +// +// ServeConn closes c before returning. +func ServeConn(c net.Conn, handler RequestHandler) error { + v := serverPool.Get() + if v == nil { + v = &Server{} + } + s := v.(*Server) + s.Handler = handler + err := s.ServeConn(c) + s.Handler = nil + serverPool.Put(v) + return err +} + +var serverPool sync.Pool + +// Serve serves incoming connections from the given listener +// using the given handler. +// +// Serve blocks until the given listener returns permanent error. +func Serve(ln net.Listener, handler RequestHandler) error { + s := &Server{ + Handler: handler, + } + return s.Serve(ln) +} + +// ServeTLS serves HTTPS requests from the given net.Listener +// using the given handler. +// +// certFile and keyFile are paths to TLS certificate and key files. +func ServeTLS(ln net.Listener, certFile, keyFile string, handler RequestHandler) error { + s := &Server{ + Handler: handler, + } + return s.ServeTLS(ln, certFile, keyFile) +} + +// ServeTLSEmbed serves HTTPS requests from the given net.Listener +// using the given handler. +// +// certData and keyData must contain valid TLS certificate and key data. +func ServeTLSEmbed(ln net.Listener, certData, keyData []byte, handler RequestHandler) error { + s := &Server{ + Handler: handler, + } + return s.ServeTLSEmbed(ln, certData, keyData) +} + +// ListenAndServe serves HTTP requests from the given TCP addr +// using the given handler. +func ListenAndServe(addr string, handler RequestHandler) error { + s := &Server{ + Handler: handler, + } + return s.ListenAndServe(addr) +} + +// ListenAndServeUNIX serves HTTP requests from the given UNIX addr +// using the given handler. +// +// The function deletes existing file at addr before starting serving. +// +// The server sets the given file mode for the UNIX addr. +func ListenAndServeUNIX(addr string, mode os.FileMode, handler RequestHandler) error { + s := &Server{ + Handler: handler, + } + return s.ListenAndServeUNIX(addr, mode) +} + +// ListenAndServeTLS serves HTTPS requests from the given TCP addr +// using the given handler. +// +// certFile and keyFile are paths to TLS certificate and key files. +func ListenAndServeTLS(addr, certFile, keyFile string, handler RequestHandler) error { + s := &Server{ + Handler: handler, + } + return s.ListenAndServeTLS(addr, certFile, keyFile) +} + +// ListenAndServeTLSEmbed serves HTTPS requests from the given TCP addr +// using the given handler. +// +// certData and keyData must contain valid TLS certificate and key data. +func ListenAndServeTLSEmbed(addr string, certData, keyData []byte, handler RequestHandler) error { + s := &Server{ + Handler: handler, + } + return s.ListenAndServeTLSEmbed(addr, certData, keyData) +} + +// RequestHandler must process incoming requests. +// +// RequestHandler must call ctx.TimeoutError() before returning +// if it keeps references to ctx and/or its' members after the return. +// Consider wrapping RequestHandler into TimeoutHandler if response time +// must be limited. +type RequestHandler func(ctx *RequestCtx) + +// Server implements HTTP server. +// +// Default Server settings should satisfy the majority of Server users. +// Adjust Server settings only if you really understand the consequences. +// +// It is forbidden copying Server instances. Create new Server instances +// instead. +// +// It is safe to call Server methods from concurrently running goroutines. +type Server struct { + noCopy noCopy + + // Handler for processing incoming requests. + Handler RequestHandler + + // Server name for sending in response headers. + // + // Default server name is used if left blank. + Name string + + // The maximum number of concurrent connections the server may serve. + // + // DefaultConcurrency is used if not set. + Concurrency int + + // Whether to disable keep-alive connections. + // + // The server will close all the incoming connections after sending + // the first response to client if this option is set to true. + // + // By default keep-alive connections are enabled. + DisableKeepalive bool + + // Per-connection buffer size for requests' reading. + // This also limits the maximum header size. + // + // Increase this buffer if your clients send multi-KB RequestURIs + // and/or multi-KB headers (for example, BIG cookies). + // + // Default buffer size is used if not set. + ReadBufferSize int + + // Per-connection buffer size for responses' writing. + // + // Default buffer size is used if not set. + WriteBufferSize int + + // Maximum duration for reading the full request (including body). + // + // This also limits the maximum duration for idle keep-alive + // connections. + // + // By default request read timeout is unlimited. + ReadTimeout time.Duration + + // Maximum duration for writing the full response (including body). + // + // By default response write timeout is unlimited. + WriteTimeout time.Duration + + // Maximum number of concurrent client connections allowed per IP. + // + // By default unlimited number of concurrent connections + // may be established to the server from a single IP address. + MaxConnsPerIP int + + // Maximum number of requests served per connection. + // + // The server closes connection after the last request. + // 'Connection: close' header is added to the last response. + // + // By default unlimited number of requests may be served per connection. + MaxRequestsPerConn int + + // Maximum keep-alive connection lifetime. + // + // The server closes keep-alive connection after its' lifetime + // expiration. + // + // See also ReadTimeout for limiting the duration of idle keep-alive + // connections. + // + // By default keep-alive connection lifetime is unlimited. + MaxKeepaliveDuration time.Duration + + // Maximum request body size. + // + // The server rejects requests with bodies exceeding this limit. + // + // Request body size is limited by DefaultMaxRequestBodySize by default. + MaxRequestBodySize int + + // Aggressively reduces memory usage at the cost of higher CPU usage + // if set to true. + // + // Try enabling this option only if the server consumes too much memory + // serving mostly idle keep-alive connections. This may reduce memory + // usage by more than 50%. + // + // Aggressive memory usage reduction is disabled by default. + ReduceMemoryUsage bool + + // Rejects all non-GET requests if set to true. + // + // This option is useful as anti-DoS protection for servers + // accepting only GET requests. The request size is limited + // by ReadBufferSize if GetOnly is set. + // + // Server accepts all the requests by default. + GetOnly bool + + // Logs all errors, including the most frequent + // 'connection reset by peer', 'broken pipe' and 'connection timeout' + // errors. Such errors are common in production serving real-world + // clients. + // + // By default the most frequent errors such as + // 'connection reset by peer', 'broken pipe' and 'connection timeout' + // are suppressed in order to limit output log traffic. + LogAllErrors bool + + // Header names are passed as-is without normalization + // if this option is set. + // + // Disabled header names' normalization may be useful only for proxying + // incoming requests to other servers expecting case-sensitive + // header names. See https://github.com/valyala/fasthttp/issues/57 + // for details. + // + // By default request and response header names are normalized, i.e. + // The first letter and the first letters following dashes + // are uppercased, while all the other letters are lowercased. + // Examples: + // + // * HOST -> Host + // * content-type -> Content-Type + // * cONTENT-lenGTH -> Content-Length + DisableHeaderNamesNormalizing bool + + // Logger, which is used by RequestCtx.Logger(). + // + // By default standard logger from log package is used. + Logger Logger + + concurrency uint32 + concurrencyCh chan struct{} + perIPConnCounter perIPConnCounter + serverName atomic.Value + + ctxPool sync.Pool + readerPool sync.Pool + writerPool sync.Pool + hijackConnPool sync.Pool + bytePool sync.Pool +} + +// TimeoutHandler creates RequestHandler, which returns StatusRequestTimeout +// error with the given msg to the client if h didn't return during +// the given duration. +// +// The returned handler may return StatusTooManyRequests error with the given +// msg to the client if there are more than Server.Concurrency concurrent +// handlers h are running at the moment. +func TimeoutHandler(h RequestHandler, timeout time.Duration, msg string) RequestHandler { + if timeout <= 0 { + return h + } + + return func(ctx *RequestCtx) { + concurrencyCh := ctx.s.concurrencyCh + select { + case concurrencyCh <- struct{}{}: + default: + ctx.Error(msg, StatusTooManyRequests) + return + } + + ch := ctx.timeoutCh + if ch == nil { + ch = make(chan struct{}, 1) + ctx.timeoutCh = ch + } + go func() { + h(ctx) + ch <- struct{}{} + <-concurrencyCh + }() + ctx.timeoutTimer = initTimer(ctx.timeoutTimer, timeout) + select { + case <-ch: + case <-ctx.timeoutTimer.C: + ctx.TimeoutError(msg) + } + stopTimer(ctx.timeoutTimer) + } +} + +// CompressHandler returns RequestHandler that transparently compresses +// response body generated by h if the request contains 'gzip' or 'deflate' +// 'Accept-Encoding' header. +func CompressHandler(h RequestHandler) RequestHandler { + return CompressHandlerLevel(h, CompressDefaultCompression) +} + +// CompressHandlerLevel returns RequestHandler that transparently compresses +// response body generated by h if the request contains 'gzip' or 'deflate' +// 'Accept-Encoding' header. +// +// Level is the desired compression level: +// +// * CompressNoCompression +// * CompressBestSpeed +// * CompressBestCompression +// * CompressDefaultCompression +func CompressHandlerLevel(h RequestHandler, level int) RequestHandler { + return func(ctx *RequestCtx) { + h(ctx) + ce := ctx.Response.Header.PeekBytes(strContentEncoding) + if len(ce) > 0 { + // Do not compress responses with non-empty + // Content-Encoding. + return + } + if ctx.Request.Header.HasAcceptEncodingBytes(strGzip) { + ctx.Response.gzipBody(level) + } else if ctx.Request.Header.HasAcceptEncodingBytes(strDeflate) { + ctx.Response.deflateBody(level) + } + } +} + +// RequestCtx contains incoming request and manages outgoing response. +// +// It is forbidden copying RequestCtx instances. +// +// RequestHandler should avoid holding references to incoming RequestCtx and/or +// its' members after the return. +// If holding RequestCtx references after the return is unavoidable +// (for instance, ctx is passed to a separate goroutine and ctx lifetime cannot +// be controlled), then the RequestHandler MUST call ctx.TimeoutError() +// before return. +// +// It is unsafe modifying/reading RequestCtx instance from concurrently +// running goroutines. The only exception is TimeoutError*, which may be called +// while other goroutines accessing RequestCtx. +type RequestCtx struct { + noCopy noCopy + + // Incoming request. + // + // Copying Request by value is forbidden. Use pointer to Request instead. + Request Request + + // Outgoing response. + // + // Copying Response by value is forbidden. Use pointer to Response instead. + Response Response + + userValues userData + + lastReadDuration time.Duration + + connID uint64 + connRequestNum uint64 + connTime time.Time + + time time.Time + + logger ctxLogger + s *Server + c net.Conn + fbr firstByteReader + + timeoutResponse *Response + timeoutCh chan struct{} + timeoutTimer *time.Timer + + hijackHandler HijackHandler +} + +// HijackHandler must process the hijacked connection c. +// +// The connection c is automatically closed after returning from HijackHandler. +// +// The connection c must not be used after returning from the handler. +type HijackHandler func(c net.Conn) + +// Hijack registers the given handler for connection hijacking. +// +// The handler is called after returning from RequestHandler +// and sending http response. The current connection is passed +// to the handler. The connection is automatically closed after +// returning from the handler. +// +// The server skips calling the handler in the following cases: +// +// * 'Connection: close' header exists in either request or response. +// * Unexpected error during response writing to the connection. +// +// The server stops processing requests from hijacked connections. +// Server limits such as Concurrency, ReadTimeout, WriteTimeout, etc. +// aren't applied to hijacked connections. +// +// The handler must not retain references to ctx members. +// +// Arbitrary 'Connection: Upgrade' protocols may be implemented +// with HijackHandler. For instance, +// +// * WebSocket ( https://en.wikipedia.org/wiki/WebSocket ) +// * HTTP/2.0 ( https://en.wikipedia.org/wiki/HTTP/2 ) +// +func (ctx *RequestCtx) Hijack(handler HijackHandler) { + ctx.hijackHandler = handler +} + +// Hijacked returns true after Hijack is called. +func (ctx *RequestCtx) Hijacked() bool { + return ctx.hijackHandler != nil +} + +// SetUserValue stores the given value (arbitrary object) +// under the given key in ctx. +// +// The value stored in ctx may be obtained by UserValue*. +// +// This functionality may be useful for passing arbitrary values between +// functions involved in request processing. +// +// All the values are removed from ctx after returning from the top +// RequestHandler. Additionally, Close method is called on each value +// implementing io.Closer before removing the value from ctx. +func (ctx *RequestCtx) SetUserValue(key string, value interface{}) { + ctx.userValues.Set(key, value) +} + +// SetUserValueBytes stores the given value (arbitrary object) +// under the given key in ctx. +// +// The value stored in ctx may be obtained by UserValue*. +// +// This functionality may be useful for passing arbitrary values between +// functions involved in request processing. +// +// All the values stored in ctx are deleted after returning from RequestHandler. +func (ctx *RequestCtx) SetUserValueBytes(key []byte, value interface{}) { + ctx.userValues.SetBytes(key, value) +} + +// UserValue returns the value stored via SetUserValue* under the given key. +func (ctx *RequestCtx) UserValue(key string) interface{} { + return ctx.userValues.Get(key) +} + +// UserValueBytes returns the value stored via SetUserValue* +// under the given key. +func (ctx *RequestCtx) UserValueBytes(key []byte) interface{} { + return ctx.userValues.GetBytes(key) +} + +// VisitUserValues calls visitor for each existing userValue. +// +// visitor must not retain references to key and value after returning. +// Make key and/or value copies if you need storing them after returning. +func (ctx *RequestCtx) VisitUserValues(visitor func([]byte, interface{})) { + for i, n := 0, len(ctx.userValues); i < n; i++ { + kv := &ctx.userValues[i] + visitor(kv.key, kv.value) + } +} + +type connTLSer interface { + ConnectionState() tls.ConnectionState +} + +// IsTLS returns true if the underlying connection is tls.Conn. +// +// tls.Conn is an encrypted connection (aka SSL, HTTPS). +func (ctx *RequestCtx) IsTLS() bool { + // cast to (connTLSer) instead of (*tls.Conn), since it catches + // cases with overridden tls.Conn such as: + // + // type customConn struct { + // *tls.Conn + // + // // other custom fields here + // } + _, ok := ctx.c.(connTLSer) + return ok +} + +// TLSConnectionState returns TLS connection state. +// +// The function returns nil if the underlying connection isn't tls.Conn. +// +// The returned state may be used for verifying TLS version, client certificates, +// etc. +func (ctx *RequestCtx) TLSConnectionState() *tls.ConnectionState { + tlsConn, ok := ctx.c.(connTLSer) + if !ok { + return nil + } + state := tlsConn.ConnectionState() + return &state +} + +type firstByteReader struct { + c net.Conn + ch byte + byteRead bool +} + +func (r *firstByteReader) Read(b []byte) (int, error) { + if len(b) == 0 { + return 0, nil + } + nn := 0 + if !r.byteRead { + b[0] = r.ch + b = b[1:] + r.byteRead = true + nn = 1 + } + n, err := r.c.Read(b) + return n + nn, err +} + +// Logger is used for logging formatted messages. +type Logger interface { + // Printf must have the same semantics as log.Printf. + Printf(format string, args ...interface{}) +} + +var ctxLoggerLock sync.Mutex + +type ctxLogger struct { + ctx *RequestCtx + logger Logger +} + +func (cl *ctxLogger) Printf(format string, args ...interface{}) { + ctxLoggerLock.Lock() + msg := fmt.Sprintf(format, args...) + ctx := cl.ctx + req := &ctx.Request + cl.logger.Printf("%.3f #%016X - %s<->%s - %s %s - %s", + time.Since(ctx.Time()).Seconds(), ctx.ID(), ctx.LocalAddr(), ctx.RemoteAddr(), req.Header.Method(), ctx.URI().FullURI(), msg) + ctxLoggerLock.Unlock() +} + +var zeroTCPAddr = &net.TCPAddr{ + IP: net.IPv4zero, +} + +// ID returns unique ID of the request. +func (ctx *RequestCtx) ID() uint64 { + return (ctx.connID << 32) | ctx.connRequestNum +} + +// ConnID returns unique connection ID. +// +// This ID may be used to match distinct requests to the same incoming +// connection. +func (ctx *RequestCtx) ConnID() uint64 { + return ctx.connID +} + +// Time returns RequestHandler call time truncated to the nearest second. +// +// Call time.Now() at the beginning of RequestHandler in order to obtain +// percise RequestHandler call time. +func (ctx *RequestCtx) Time() time.Time { + return ctx.time +} + +// ConnTime returns the time server starts serving the connection +// the current request came from. +// +// The returned time is truncated to the nearest second. +func (ctx *RequestCtx) ConnTime() time.Time { + return ctx.connTime +} + +// ConnRequestNum returns request sequence number +// for the current connection. +// +// Sequence starts with 1. +func (ctx *RequestCtx) ConnRequestNum() uint64 { + return ctx.connRequestNum +} + +// SetConnectionClose sets 'Connection: close' response header and closes +// connection after the RequestHandler returns. +func (ctx *RequestCtx) SetConnectionClose() { + ctx.Response.SetConnectionClose() +} + +// SetStatusCode sets response status code. +func (ctx *RequestCtx) SetStatusCode(statusCode int) { + ctx.Response.SetStatusCode(statusCode) +} + +// SetContentType sets response Content-Type. +func (ctx *RequestCtx) SetContentType(contentType string) { + ctx.Response.Header.SetContentType(contentType) +} + +// SetContentTypeBytes sets response Content-Type. +// +// It is safe modifying contentType buffer after function return. +func (ctx *RequestCtx) SetContentTypeBytes(contentType []byte) { + ctx.Response.Header.SetContentTypeBytes(contentType) +} + +// RequestURI returns RequestURI. +// +// This uri is valid until returning from RequestHandler. +func (ctx *RequestCtx) RequestURI() []byte { + return ctx.Request.Header.RequestURI() +} + +// URI returns requested uri. +// +// The uri is valid until returning from RequestHandler. +func (ctx *RequestCtx) URI() *URI { + return ctx.Request.URI() +} + +// Referer returns request referer. +// +// The referer is valid until returning from RequestHandler. +func (ctx *RequestCtx) Referer() []byte { + return ctx.Request.Header.Referer() +} + +// UserAgent returns User-Agent header value from the request. +func (ctx *RequestCtx) UserAgent() []byte { + return ctx.Request.Header.UserAgent() +} + +// Path returns requested path. +// +// The path is valid until returning from RequestHandler. +func (ctx *RequestCtx) Path() []byte { + return ctx.URI().Path() +} + +// Host returns requested host. +// +// The host is valid until returning from RequestHandler. +func (ctx *RequestCtx) Host() []byte { + return ctx.URI().Host() +} + +// QueryArgs returns query arguments from RequestURI. +// +// It doesn't return POST'ed arguments - use PostArgs() for this. +// +// Returned arguments are valid until returning from RequestHandler. +// +// See also PostArgs, FormValue and FormFile. +func (ctx *RequestCtx) QueryArgs() *Args { + return ctx.URI().QueryArgs() +} + +// PostArgs returns POST arguments. +// +// It doesn't return query arguments from RequestURI - use QueryArgs for this. +// +// Returned arguments are valid until returning from RequestHandler. +// +// See also QueryArgs, FormValue and FormFile. +func (ctx *RequestCtx) PostArgs() *Args { + return ctx.Request.PostArgs() +} + +// MultipartForm returns requests's multipart form. +// +// Returns ErrNoMultipartForm if request's content-type +// isn't 'multipart/form-data'. +// +// All uploaded temporary files are automatically deleted after +// returning from RequestHandler. Either move or copy uploaded files +// into new place if you want retaining them. +// +// Use SaveMultipartFile function for permanently saving uploaded file. +// +// The returned form is valid until returning from RequestHandler. +// +// See also FormFile and FormValue. +func (ctx *RequestCtx) MultipartForm() (*multipart.Form, error) { + return ctx.Request.MultipartForm() +} + +// FormFile returns uploaded file associated with the given multipart form key. +// +// The file is automatically deleted after returning from RequestHandler, +// so either move or copy uploaded file into new place if you want retaining it. +// +// Use SaveMultipartFile function for permanently saving uploaded file. +// +// The returned file header is valid until returning from RequestHandler. +func (ctx *RequestCtx) FormFile(key string) (*multipart.FileHeader, error) { + mf, err := ctx.MultipartForm() + if err != nil { + return nil, err + } + if mf.File == nil { + return nil, err + } + fhh := mf.File[key] + if fhh == nil { + return nil, ErrMissingFile + } + return fhh[0], nil +} + +// ErrMissingFile may be returned from FormFile when the is no uploaded file +// associated with the given multipart form key. +var ErrMissingFile = errors.New("there is no uploaded file associated with the given key") + +// SaveMultipartFile saves multipart file fh under the given filename path. +func SaveMultipartFile(fh *multipart.FileHeader, path string) error { + f, err := fh.Open() + if err != nil { + return err + } + defer f.Close() + + if ff, ok := f.(*os.File); ok { + return os.Rename(ff.Name(), path) + } + + ff, err := os.Create(path) + if err != nil { + return err + } + defer ff.Close() + _, err = copyZeroAlloc(ff, f) + return err +} + +// FormValue returns form value associated with the given key. +// +// The value is searched in the following places: +// +// * Query string. +// * POST or PUT body. +// +// There are more fine-grained methods for obtaining form values: +// +// * QueryArgs for obtaining values from query string. +// * PostArgs for obtaining values from POST or PUT body. +// * MultipartForm for obtaining values from multipart form. +// * FormFile for obtaining uploaded files. +// +// The returned value is valid until returning from RequestHandler. +func (ctx *RequestCtx) FormValue(key string) []byte { + v := ctx.QueryArgs().Peek(key) + if len(v) > 0 { + return v + } + v = ctx.PostArgs().Peek(key) + if len(v) > 0 { + return v + } + mf, err := ctx.MultipartForm() + if err == nil && mf.Value != nil { + vv := mf.Value[key] + if len(vv) > 0 { + return []byte(vv[0]) + } + } + return nil +} + +// IsGet returns true if request method is GET. +func (ctx *RequestCtx) IsGet() bool { + return ctx.Request.Header.IsGet() +} + +// IsPost returns true if request method is POST. +func (ctx *RequestCtx) IsPost() bool { + return ctx.Request.Header.IsPost() +} + +// IsPut returns true if request method is PUT. +func (ctx *RequestCtx) IsPut() bool { + return ctx.Request.Header.IsPut() +} + +// IsDelete returns true if request method is DELETE. +func (ctx *RequestCtx) IsDelete() bool { + return ctx.Request.Header.IsDelete() +} + +// Method return request method. +// +// Returned value is valid until returning from RequestHandler. +func (ctx *RequestCtx) Method() []byte { + return ctx.Request.Header.Method() +} + +// IsHead returns true if request method is HEAD. +func (ctx *RequestCtx) IsHead() bool { + return ctx.Request.Header.IsHead() +} + +// RemoteAddr returns client address for the given request. +// +// Always returns non-nil result. +func (ctx *RequestCtx) RemoteAddr() net.Addr { + addr := ctx.c.RemoteAddr() + if addr == nil { + return zeroTCPAddr + } + return addr +} + +// LocalAddr returns server address for the given request. +// +// Always returns non-nil result. +func (ctx *RequestCtx) LocalAddr() net.Addr { + addr := ctx.c.LocalAddr() + if addr == nil { + return zeroTCPAddr + } + return addr +} + +// RemoteIP returns the client ip the request came from. +// +// Always returns non-nil result. +func (ctx *RequestCtx) RemoteIP() net.IP { + return addrToIP(ctx.RemoteAddr()) +} + +// LocalIP returns the server ip the request came to. +// +// Always returns non-nil result. +func (ctx *RequestCtx) LocalIP() net.IP { + return addrToIP(ctx.LocalAddr()) +} + +func addrToIP(addr net.Addr) net.IP { + x, ok := addr.(*net.TCPAddr) + if !ok { + return net.IPv4zero + } + return x.IP +} + +// Error sets response status code to the given value and sets response body +// to the given message. +func (ctx *RequestCtx) Error(msg string, statusCode int) { + ctx.Response.Reset() + ctx.SetStatusCode(statusCode) + ctx.SetContentTypeBytes(defaultContentType) + ctx.SetBodyString(msg) +} + +// Success sets response Content-Type and body to the given values. +func (ctx *RequestCtx) Success(contentType string, body []byte) { + ctx.SetContentType(contentType) + ctx.SetBody(body) +} + +// SuccessString sets response Content-Type and body to the given values. +func (ctx *RequestCtx) SuccessString(contentType, body string) { + ctx.SetContentType(contentType) + ctx.SetBodyString(body) +} + +// Redirect sets 'Location: uri' response header and sets the given statusCode. +// +// statusCode must have one of the following values: +// +// * StatusMovedPermanently (301) +// * StatusFound (302) +// * StatusSeeOther (303) +// * StatusTemporaryRedirect (307) +// +// All other statusCode values are replaced by StatusFound (302). +// +// The redirect uri may be either absolute or relative to the current +// request uri. +func (ctx *RequestCtx) Redirect(uri string, statusCode int) { + u := AcquireURI() + ctx.URI().CopyTo(u) + u.Update(uri) + ctx.redirect(u.FullURI(), statusCode) + ReleaseURI(u) +} + +// RedirectBytes sets 'Location: uri' response header and sets +// the given statusCode. +// +// statusCode must have one of the following values: +// +// * StatusMovedPermanently (301) +// * StatusFound (302) +// * StatusSeeOther (303) +// * StatusTemporaryRedirect (307) +// +// All other statusCode values are replaced by StatusFound (302). +// +// The redirect uri may be either absolute or relative to the current +// request uri. +func (ctx *RequestCtx) RedirectBytes(uri []byte, statusCode int) { + s := b2s(uri) + ctx.Redirect(s, statusCode) +} + +func (ctx *RequestCtx) redirect(uri []byte, statusCode int) { + ctx.Response.Header.SetCanonical(strLocation, uri) + statusCode = getRedirectStatusCode(statusCode) + ctx.Response.SetStatusCode(statusCode) +} + +func getRedirectStatusCode(statusCode int) int { + if statusCode == StatusMovedPermanently || statusCode == StatusFound || + statusCode == StatusSeeOther || statusCode == StatusTemporaryRedirect { + return statusCode + } + return StatusFound +} + +// SetBody sets response body to the given value. +// +// It is safe re-using body argument after the function returns. +func (ctx *RequestCtx) SetBody(body []byte) { + ctx.Response.SetBody(body) +} + +// SetBodyString sets response body to the given value. +func (ctx *RequestCtx) SetBodyString(body string) { + ctx.Response.SetBodyString(body) +} + +// ResetBody resets response body contents. +func (ctx *RequestCtx) ResetBody() { + ctx.Response.ResetBody() +} + +// SendFile sends local file contents from the given path as response body. +// +// This is a shortcut to ServeFile(ctx, path). +// +// SendFile logs all the errors via ctx.Logger. +// +// See also ServeFile, FSHandler and FS. +func (ctx *RequestCtx) SendFile(path string) { + ServeFile(ctx, path) +} + +// SendFileBytes sends local file contents from the given path as response body. +// +// This is a shortcut to ServeFileBytes(ctx, path). +// +// SendFileBytes logs all the errors via ctx.Logger. +// +// See also ServeFileBytes, FSHandler and FS. +func (ctx *RequestCtx) SendFileBytes(path []byte) { + ServeFileBytes(ctx, path) +} + +// IfModifiedSince returns true if lastModified exceeds 'If-Modified-Since' +// value from the request header. +// +// The function returns true also 'If-Modified-Since' request header is missing. +func (ctx *RequestCtx) IfModifiedSince(lastModified time.Time) bool { + ifModStr := ctx.Request.Header.peek(strIfModifiedSince) + if len(ifModStr) == 0 { + return true + } + ifMod, err := ParseHTTPDate(ifModStr) + if err != nil { + return true + } + lastModified = lastModified.Truncate(time.Second) + return ifMod.Before(lastModified) +} + +// NotModified resets response and sets '304 Not Modified' response status code. +func (ctx *RequestCtx) NotModified() { + ctx.Response.Reset() + ctx.SetStatusCode(StatusNotModified) +} + +// NotFound resets response and sets '404 Not Found' response status code. +func (ctx *RequestCtx) NotFound() { + ctx.Response.Reset() + ctx.SetStatusCode(StatusNotFound) + ctx.SetBodyString("404 Page not found") +} + +// Write writes p into response body. +func (ctx *RequestCtx) Write(p []byte) (int, error) { + ctx.Response.AppendBody(p) + return len(p), nil +} + +// WriteString appends s to response body. +func (ctx *RequestCtx) WriteString(s string) (int, error) { + ctx.Response.AppendBodyString(s) + return len(s), nil +} + +// PostBody returns POST request body. +// +// The returned value is valid until RequestHandler return. +func (ctx *RequestCtx) PostBody() []byte { + return ctx.Request.Body() +} + +// SetBodyStream sets response body stream and, optionally body size. +// +// bodyStream.Close() is called after finishing reading all body data +// if it implements io.Closer. +// +// If bodySize is >= 0, then bodySize bytes must be provided by bodyStream +// before returning io.EOF. +// +// If bodySize < 0, then bodyStream is read until io.EOF. +// +// See also SetBodyStreamWriter. +func (ctx *RequestCtx) SetBodyStream(bodyStream io.Reader, bodySize int) { + ctx.Response.SetBodyStream(bodyStream, bodySize) +} + +// SetBodyStreamWriter registers the given stream writer for populating +// response body. +// +// Access to RequestCtx and/or its' members is forbidden from sw. +// +// This function may be used in the following cases: +// +// * if response body is too big (more than 10MB). +// * if response body is streamed from slow external sources. +// * if response body must be streamed to the client in chunks. +// (aka `http server push`). +func (ctx *RequestCtx) SetBodyStreamWriter(sw StreamWriter) { + ctx.Response.SetBodyStreamWriter(sw) +} + +// IsBodyStream returns true if response body is set via SetBodyStream*. +func (ctx *RequestCtx) IsBodyStream() bool { + return ctx.Response.IsBodyStream() +} + +// Logger returns logger, which may be used for logging arbitrary +// request-specific messages inside RequestHandler. +// +// Each message logged via returned logger contains request-specific information +// such as request id, request duration, local address, remote address, +// request method and request url. +// +// It is safe re-using returned logger for logging multiple messages +// for the current request. +// +// The returned logger is valid until returning from RequestHandler. +func (ctx *RequestCtx) Logger() Logger { + if ctx.logger.ctx == nil { + ctx.logger.ctx = ctx + } + if ctx.logger.logger == nil { + ctx.logger.logger = ctx.s.logger() + } + return &ctx.logger +} + +// TimeoutError sets response status code to StatusRequestTimeout and sets +// body to the given msg. +// +// All response modifications after TimeoutError call are ignored. +// +// TimeoutError MUST be called before returning from RequestHandler if there are +// references to ctx and/or its members in other goroutines remain. +// +// Usage of this function is discouraged. Prefer eliminating ctx references +// from pending goroutines instead of using this function. +func (ctx *RequestCtx) TimeoutError(msg string) { + ctx.TimeoutErrorWithCode(msg, StatusRequestTimeout) +} + +// TimeoutErrorWithCode sets response body to msg and response status +// code to statusCode. +// +// All response modifications after TimeoutErrorWithCode call are ignored. +// +// TimeoutErrorWithCode MUST be called before returning from RequestHandler +// if there are references to ctx and/or its members in other goroutines remain. +// +// Usage of this function is discouraged. Prefer eliminating ctx references +// from pending goroutines instead of using this function. +func (ctx *RequestCtx) TimeoutErrorWithCode(msg string, statusCode int) { + var resp Response + resp.SetStatusCode(statusCode) + resp.SetBodyString(msg) + ctx.TimeoutErrorWithResponse(&resp) +} + +// TimeoutErrorWithResponse marks the ctx as timed out and sends the given +// response to the client. +// +// All ctx modifications after TimeoutErrorWithResponse call are ignored. +// +// TimeoutErrorWithResponse MUST be called before returning from RequestHandler +// if there are references to ctx and/or its members in other goroutines remain. +// +// Usage of this function is discouraged. Prefer eliminating ctx references +// from pending goroutines instead of using this function. +func (ctx *RequestCtx) TimeoutErrorWithResponse(resp *Response) { + respCopy := &Response{} + resp.CopyTo(respCopy) + ctx.timeoutResponse = respCopy +} + +// ListenAndServe serves HTTP requests from the given TCP4 addr. +// +// Pass custom listener to Serve if you need listening on non-TCP4 media +// such as IPv6. +func (s *Server) ListenAndServe(addr string) error { + ln, err := net.Listen("tcp4", addr) + if err != nil { + return err + } + return s.Serve(ln) +} + +// ListenAndServeUNIX serves HTTP requests from the given UNIX addr. +// +// The function deletes existing file at addr before starting serving. +// +// The server sets the given file mode for the UNIX addr. +func (s *Server) ListenAndServeUNIX(addr string, mode os.FileMode) error { + if err := os.Remove(addr); err != nil && !os.IsNotExist(err) { + return fmt.Errorf("unexpected error when trying to remove unix socket file %q: %s", addr, err) + } + ln, err := net.Listen("unix", addr) + if err != nil { + return err + } + if err = os.Chmod(addr, mode); err != nil { + return fmt.Errorf("cannot chmod %#o for %q: %s", mode, addr, err) + } + return s.Serve(ln) +} + +// ListenAndServeTLS serves HTTPS requests from the given TCP4 addr. +// +// certFile and keyFile are paths to TLS certificate and key files. +// +// Pass custom listener to Serve if you need listening on non-TCP4 media +// such as IPv6. +func (s *Server) ListenAndServeTLS(addr, certFile, keyFile string) error { + ln, err := net.Listen("tcp4", addr) + if err != nil { + return err + } + return s.ServeTLS(ln, certFile, keyFile) +} + +// ListenAndServeTLSEmbed serves HTTPS requests from the given TCP4 addr. +// +// certData and keyData must contain valid TLS certificate and key data. +// +// Pass custom listener to Serve if you need listening on arbitrary media +// such as IPv6. +func (s *Server) ListenAndServeTLSEmbed(addr string, certData, keyData []byte) error { + ln, err := net.Listen("tcp4", addr) + if err != nil { + return err + } + return s.ServeTLSEmbed(ln, certData, keyData) +} + +// ServeTLS serves HTTPS requests from the given listener. +// +// certFile and keyFile are paths to TLS certificate and key files. +func (s *Server) ServeTLS(ln net.Listener, certFile, keyFile string) error { + lnTLS, err := newTLSListener(ln, certFile, keyFile) + if err != nil { + return err + } + return s.Serve(lnTLS) +} + +// ServeTLSEmbed serves HTTPS requests from the given listener. +// +// certData and keyData must contain valid TLS certificate and key data. +func (s *Server) ServeTLSEmbed(ln net.Listener, certData, keyData []byte) error { + lnTLS, err := newTLSListenerEmbed(ln, certData, keyData) + if err != nil { + return err + } + return s.Serve(lnTLS) +} + +func newTLSListener(ln net.Listener, certFile, keyFile string) (net.Listener, error) { + cert, err := tls.LoadX509KeyPair(certFile, keyFile) + if err != nil { + return nil, fmt.Errorf("cannot load TLS key pair from certFile=%q and keyFile=%q: %s", certFile, keyFile, err) + } + return newCertListener(ln, &cert), nil +} + +func newTLSListenerEmbed(ln net.Listener, certData, keyData []byte) (net.Listener, error) { + cert, err := tls.X509KeyPair(certData, keyData) + if err != nil { + return nil, fmt.Errorf("cannot load TLS key pair from the provided certData(%d) and keyData(%d): %s", + len(certData), len(keyData), err) + } + return newCertListener(ln, &cert), nil +} + +func newCertListener(ln net.Listener, cert *tls.Certificate) net.Listener { + tlsConfig := &tls.Config{ + Certificates: []tls.Certificate{*cert}, + PreferServerCipherSuites: true, + } + return tls.NewListener(ln, tlsConfig) +} + +// DefaultConcurrency is the maximum number of concurrent connections +// the Server may serve by default (i.e. if Server.Concurrency isn't set). +const DefaultConcurrency = 256 * 1024 + +// Serve serves incoming connections from the given listener. +// +// Serve blocks until the given listener returns permanent error. +func (s *Server) Serve(ln net.Listener) error { + var lastOverflowErrorTime time.Time + var lastPerIPErrorTime time.Time + var c net.Conn + var err error + + maxWorkersCount := s.getConcurrency() + s.concurrencyCh = make(chan struct{}, maxWorkersCount) + wp := &workerPool{ + WorkerFunc: s.serveConn, + MaxWorkersCount: maxWorkersCount, + LogAllErrors: s.LogAllErrors, + Logger: s.logger(), + } + wp.Start() + + for { + if c, err = acceptConn(s, ln, &lastPerIPErrorTime); err != nil { + wp.Stop() + if err == io.EOF { + return nil + } + return err + } + if !wp.Serve(c) { + s.writeFastError(c, StatusServiceUnavailable, + "The connection cannot be served because Server.Concurrency limit exceeded") + c.Close() + if time.Since(lastOverflowErrorTime) > time.Minute { + s.logger().Printf("The incoming connection cannot be served, because %d concurrent connections are served. "+ + "Try increasing Server.Concurrency", maxWorkersCount) + lastOverflowErrorTime = CoarseTimeNow() + } + + // The current server reached concurrency limit, + // so give other concurrently running servers a chance + // accepting incoming connections on the same address. + // + // There is a hope other servers didn't reach their + // concurrency limits yet :) + time.Sleep(100 * time.Millisecond) + } + c = nil + } +} + +func acceptConn(s *Server, ln net.Listener, lastPerIPErrorTime *time.Time) (net.Conn, error) { + for { + c, err := ln.Accept() + if err != nil { + if c != nil { + panic("BUG: net.Listener returned non-nil conn and non-nil error") + } + if netErr, ok := err.(net.Error); ok && netErr.Temporary() { + s.logger().Printf("Temporary error when accepting new connections: %s", netErr) + time.Sleep(time.Second) + continue + } + if err != io.EOF && !strings.Contains(err.Error(), "use of closed network connection") { + s.logger().Printf("Permanent error when accepting new connections: %s", err) + return nil, err + } + return nil, io.EOF + } + if c == nil { + panic("BUG: net.Listener returned (nil, nil)") + } + if s.MaxConnsPerIP > 0 { + pic := wrapPerIPConn(s, c) + if pic == nil { + if time.Since(*lastPerIPErrorTime) > time.Minute { + s.logger().Printf("The number of connections from %s exceeds MaxConnsPerIP=%d", + getConnIP4(c), s.MaxConnsPerIP) + *lastPerIPErrorTime = CoarseTimeNow() + } + continue + } + c = pic + } + return c, nil + } +} + +func wrapPerIPConn(s *Server, c net.Conn) net.Conn { + ip := getUint32IP(c) + if ip == 0 { + return c + } + n := s.perIPConnCounter.Register(ip) + if n > s.MaxConnsPerIP { + s.perIPConnCounter.Unregister(ip) + s.writeFastError(c, StatusTooManyRequests, "The number of connections from your ip exceeds MaxConnsPerIP") + c.Close() + return nil + } + return acquirePerIPConn(c, ip, &s.perIPConnCounter) +} + +var defaultLogger = Logger(log.New(os.Stderr, "", log.LstdFlags)) + +func (s *Server) logger() Logger { + if s.Logger != nil { + return s.Logger + } + return defaultLogger +} + +var ( + // ErrPerIPConnLimit may be returned from ServeConn if the number of connections + // per ip exceeds Server.MaxConnsPerIP. + ErrPerIPConnLimit = errors.New("too many connections per ip") + + // ErrConcurrencyLimit may be returned from ServeConn if the number + // of concurrenty served connections exceeds Server.Concurrency. + ErrConcurrencyLimit = errors.New("canot serve the connection because Server.Concurrency concurrent connections are served") + + // ErrKeepaliveTimeout is returned from ServeConn + // if the connection lifetime exceeds MaxKeepaliveDuration. + ErrKeepaliveTimeout = errors.New("exceeded MaxKeepaliveDuration") +) + +// ServeConn serves HTTP requests from the given connection. +// +// ServeConn returns nil if all requests from the c are successfully served. +// It returns non-nil error otherwise. +// +// Connection c must immediately propagate all the data passed to Write() +// to the client. Otherwise requests' processing may hang. +// +// ServeConn closes c before returning. +func (s *Server) ServeConn(c net.Conn) error { + if s.MaxConnsPerIP > 0 { + pic := wrapPerIPConn(s, c) + if pic == nil { + return ErrPerIPConnLimit + } + c = pic + } + + n := atomic.AddUint32(&s.concurrency, 1) + if n > uint32(s.getConcurrency()) { + atomic.AddUint32(&s.concurrency, ^uint32(0)) + s.writeFastError(c, StatusServiceUnavailable, "The connection cannot be served because Server.Concurrency limit exceeded") + c.Close() + return ErrConcurrencyLimit + } + + err := s.serveConn(c) + + atomic.AddUint32(&s.concurrency, ^uint32(0)) + + if err != errHijacked { + err1 := c.Close() + if err == nil { + err = err1 + } + } else { + err = nil + } + return err +} + +var errHijacked = errors.New("connection has been hijacked") + +func (s *Server) getConcurrency() int { + n := s.Concurrency + if n <= 0 { + n = DefaultConcurrency + } + return n +} + +var globalConnID uint64 + +func nextConnID() uint64 { + return atomic.AddUint64(&globalConnID, 1) +} + +// DefaultMaxRequestBodySize is the maximum request body size the server +// reads by default. +// +// See Server.MaxRequestBodySize for details. +const DefaultMaxRequestBodySize = 4 * 1024 * 1024 + +func (s *Server) serveConn(c net.Conn) error { + serverName := s.getServerName() + connRequestNum := uint64(0) + connID := nextConnID() + currentTime := CoarseTimeNow() + connTime := currentTime + maxRequestBodySize := s.MaxRequestBodySize + if maxRequestBodySize <= 0 { + maxRequestBodySize = DefaultMaxRequestBodySize + } + + ctx := s.acquireCtx(c) + ctx.connTime = connTime + isTLS := ctx.IsTLS() + var ( + br *bufio.Reader + bw *bufio.Writer + + err error + timeoutResponse *Response + hijackHandler HijackHandler + + lastReadDeadlineTime time.Time + lastWriteDeadlineTime time.Time + + connectionClose bool + isHTTP11 bool + ) + for { + connRequestNum++ + ctx.time = currentTime + + if s.ReadTimeout > 0 || s.MaxKeepaliveDuration > 0 { + lastReadDeadlineTime = s.updateReadDeadline(c, ctx, lastReadDeadlineTime) + if lastReadDeadlineTime.IsZero() { + err = ErrKeepaliveTimeout + break + } + } + + if !(s.ReduceMemoryUsage || ctx.lastReadDuration > time.Second) || br != nil { + if br == nil { + br = acquireReader(ctx) + } + } else { + br, err = acquireByteReader(&ctx) + } + ctx.Request.isTLS = isTLS + + if err == nil { + if s.DisableHeaderNamesNormalizing { + ctx.Request.Header.DisableNormalizing() + ctx.Response.Header.DisableNormalizing() + } + err = ctx.Request.readLimitBody(br, maxRequestBodySize, s.GetOnly) + if br.Buffered() == 0 || err != nil { + releaseReader(s, br) + br = nil + } + } + + currentTime = CoarseTimeNow() + ctx.lastReadDuration = currentTime.Sub(ctx.time) + + if err != nil { + if err == io.EOF { + err = nil + } else { + bw = writeErrorResponse(bw, ctx, err) + } + break + } + + // 'Expect: 100-continue' request handling. + // See http://www.w3.org/Protocols/rfc2616/rfc2616-sec8.html for details. + if !ctx.Request.Header.noBody() && ctx.Request.MayContinue() { + // Send 'HTTP/1.1 100 Continue' response. + if bw == nil { + bw = acquireWriter(ctx) + } + bw.Write(strResponseContinue) + err = bw.Flush() + releaseWriter(s, bw) + bw = nil + if err != nil { + break + } + + // Read request body. + if br == nil { + br = acquireReader(ctx) + } + err = ctx.Request.ContinueReadBody(br, maxRequestBodySize) + if br.Buffered() == 0 || err != nil { + releaseReader(s, br) + br = nil + } + if err != nil { + bw = writeErrorResponse(bw, ctx, err) + break + } + } + + connectionClose = s.DisableKeepalive || ctx.Request.Header.connectionCloseFast() + isHTTP11 = ctx.Request.Header.IsHTTP11() + + ctx.Response.Header.SetServerBytes(serverName) + ctx.connID = connID + ctx.connRequestNum = connRequestNum + ctx.connTime = connTime + ctx.time = currentTime + s.Handler(ctx) + + timeoutResponse = ctx.timeoutResponse + if timeoutResponse != nil { + ctx = s.acquireCtx(c) + timeoutResponse.CopyTo(&ctx.Response) + if br != nil { + // Close connection, since br may be attached to the old ctx via ctx.fbr. + ctx.SetConnectionClose() + } + } + + if !ctx.IsGet() && ctx.IsHead() { + ctx.Response.SkipBody = true + } + ctx.Request.Reset() + + hijackHandler = ctx.hijackHandler + ctx.hijackHandler = nil + + ctx.userValues.Reset() + + if s.MaxRequestsPerConn > 0 && connRequestNum >= uint64(s.MaxRequestsPerConn) { + ctx.SetConnectionClose() + } + + if s.WriteTimeout > 0 || s.MaxKeepaliveDuration > 0 { + lastWriteDeadlineTime = s.updateWriteDeadline(c, ctx, lastWriteDeadlineTime) + } + + // Verify Request.Header.connectionCloseFast() again, + // since request handler might trigger full headers' parsing. + connectionClose = connectionClose || ctx.Request.Header.connectionCloseFast() || ctx.Response.ConnectionClose() + if connectionClose { + ctx.Response.Header.SetCanonical(strConnection, strClose) + } else if !isHTTP11 { + // Set 'Connection: keep-alive' response header for non-HTTP/1.1 request. + // There is no need in setting this header for http/1.1, since in http/1.1 + // connections are keep-alive by default. + ctx.Response.Header.SetCanonical(strConnection, strKeepAlive) + } + + if len(ctx.Response.Header.Server()) == 0 { + ctx.Response.Header.SetServerBytes(serverName) + } + + if bw == nil { + bw = acquireWriter(ctx) + } + if err = writeResponse(ctx, bw); err != nil { + break + } + + if br == nil || connectionClose { + err = bw.Flush() + releaseWriter(s, bw) + bw = nil + if err != nil { + break + } + if connectionClose { + break + } + } + + if hijackHandler != nil { + var hjr io.Reader + hjr = c + if br != nil { + hjr = br + br = nil + + // br may point to ctx.fbr, so do not return ctx into pool. + ctx = s.acquireCtx(c) + } + if bw != nil { + err = bw.Flush() + releaseWriter(s, bw) + bw = nil + if err != nil { + break + } + } + c.SetReadDeadline(zeroTime) + c.SetWriteDeadline(zeroTime) + go hijackConnHandler(hjr, c, s, hijackHandler) + hijackHandler = nil + err = errHijacked + break + } + + currentTime = CoarseTimeNow() + } + + if br != nil { + releaseReader(s, br) + } + if bw != nil { + releaseWriter(s, bw) + } + s.releaseCtx(ctx) + return err +} + +func (s *Server) updateReadDeadline(c net.Conn, ctx *RequestCtx, lastDeadlineTime time.Time) time.Time { + readTimeout := s.ReadTimeout + currentTime := ctx.time + if s.MaxKeepaliveDuration > 0 { + connTimeout := s.MaxKeepaliveDuration - currentTime.Sub(ctx.connTime) + if connTimeout <= 0 { + return zeroTime + } + if connTimeout < readTimeout { + readTimeout = connTimeout + } + } + + // Optimization: update read deadline only if more than 25% + // of the last read deadline exceeded. + // See https://github.com/golang/go/issues/15133 for details. + if currentTime.Sub(lastDeadlineTime) > (readTimeout >> 2) { + if err := c.SetReadDeadline(currentTime.Add(readTimeout)); err != nil { + panic(fmt.Sprintf("BUG: error in SetReadDeadline(%s): %s", readTimeout, err)) + } + lastDeadlineTime = currentTime + } + return lastDeadlineTime +} + +func (s *Server) updateWriteDeadline(c net.Conn, ctx *RequestCtx, lastDeadlineTime time.Time) time.Time { + writeTimeout := s.WriteTimeout + if s.MaxKeepaliveDuration > 0 { + connTimeout := s.MaxKeepaliveDuration - time.Since(ctx.connTime) + if connTimeout <= 0 { + // MaxKeepAliveDuration exceeded, but let's try sending response anyway + // in 100ms with 'Connection: close' header. + ctx.SetConnectionClose() + connTimeout = 100 * time.Millisecond + } + if connTimeout < writeTimeout { + writeTimeout = connTimeout + } + } + + // Optimization: update write deadline only if more than 25% + // of the last write deadline exceeded. + // See https://github.com/golang/go/issues/15133 for details. + currentTime := CoarseTimeNow() + if currentTime.Sub(lastDeadlineTime) > (writeTimeout >> 2) { + if err := c.SetWriteDeadline(currentTime.Add(writeTimeout)); err != nil { + panic(fmt.Sprintf("BUG: error in SetWriteDeadline(%s): %s", writeTimeout, err)) + } + lastDeadlineTime = currentTime + } + return lastDeadlineTime +} + +func hijackConnHandler(r io.Reader, c net.Conn, s *Server, h HijackHandler) { + hjc := s.acquireHijackConn(r, c) + h(hjc) + + if br, ok := r.(*bufio.Reader); ok { + releaseReader(s, br) + } + c.Close() + s.releaseHijackConn(hjc) +} + +func (s *Server) acquireHijackConn(r io.Reader, c net.Conn) *hijackConn { + v := s.hijackConnPool.Get() + if v == nil { + hjc := &hijackConn{ + Conn: c, + r: r, + } + return hjc + } + hjc := v.(*hijackConn) + hjc.Conn = c + hjc.r = r + return hjc +} + +func (s *Server) releaseHijackConn(hjc *hijackConn) { + hjc.Conn = nil + hjc.r = nil + s.hijackConnPool.Put(hjc) +} + +type hijackConn struct { + net.Conn + r io.Reader +} + +func (c hijackConn) Read(p []byte) (int, error) { + return c.r.Read(p) +} + +func (c hijackConn) Close() error { + // hijacked conn is closed in hijackConnHandler. + return nil +} + +// LastTimeoutErrorResponse returns the last timeout response set +// via TimeoutError* call. +// +// This function is intended for custom server implementations. +func (ctx *RequestCtx) LastTimeoutErrorResponse() *Response { + return ctx.timeoutResponse +} + +func writeResponse(ctx *RequestCtx, w *bufio.Writer) error { + if ctx.timeoutResponse != nil { + panic("BUG: cannot write timed out response") + } + err := ctx.Response.Write(w) + ctx.Response.Reset() + return err +} + +const ( + defaultReadBufferSize = 4096 + defaultWriteBufferSize = 4096 +) + +func acquireByteReader(ctxP **RequestCtx) (*bufio.Reader, error) { + ctx := *ctxP + s := ctx.s + c := ctx.c + t := ctx.time + s.releaseCtx(ctx) + + // Make GC happy, so it could garbage collect ctx + // while we waiting for the next request. + ctx = nil + *ctxP = nil + + v := s.bytePool.Get() + if v == nil { + v = make([]byte, 1) + } + b := v.([]byte) + n, err := c.Read(b) + ch := b[0] + s.bytePool.Put(v) + ctx = s.acquireCtx(c) + ctx.time = t + *ctxP = ctx + if err != nil { + // Treat all errors as EOF on unsuccessful read + // of the first request byte. + return nil, io.EOF + } + if n != 1 { + panic("BUG: Reader must return at least one byte") + } + + ctx.fbr.c = c + ctx.fbr.ch = ch + ctx.fbr.byteRead = false + r := acquireReader(ctx) + r.Reset(&ctx.fbr) + return r, nil +} + +func acquireReader(ctx *RequestCtx) *bufio.Reader { + v := ctx.s.readerPool.Get() + if v == nil { + n := ctx.s.ReadBufferSize + if n <= 0 { + n = defaultReadBufferSize + } + return bufio.NewReaderSize(ctx.c, n) + } + r := v.(*bufio.Reader) + r.Reset(ctx.c) + return r +} + +func releaseReader(s *Server, r *bufio.Reader) { + s.readerPool.Put(r) +} + +func acquireWriter(ctx *RequestCtx) *bufio.Writer { + v := ctx.s.writerPool.Get() + if v == nil { + n := ctx.s.WriteBufferSize + if n <= 0 { + n = defaultWriteBufferSize + } + return bufio.NewWriterSize(ctx.c, n) + } + w := v.(*bufio.Writer) + w.Reset(ctx.c) + return w +} + +func releaseWriter(s *Server, w *bufio.Writer) { + s.writerPool.Put(w) +} + +func (s *Server) acquireCtx(c net.Conn) *RequestCtx { + v := s.ctxPool.Get() + var ctx *RequestCtx + if v == nil { + ctx = &RequestCtx{ + s: s, + } + keepBodyBuffer := !s.ReduceMemoryUsage + ctx.Request.keepBodyBuffer = keepBodyBuffer + ctx.Response.keepBodyBuffer = keepBodyBuffer + } else { + ctx = v.(*RequestCtx) + } + ctx.c = c + return ctx +} + +// Init2 prepares ctx for passing to RequestHandler. +// +// conn is used only for determining local and remote addresses. +// +// This function is intended for custom Server implementations. +// See https://github.com/valyala/httpteleport for details. +func (ctx *RequestCtx) Init2(conn net.Conn, logger Logger, reduceMemoryUsage bool) { + ctx.c = conn + ctx.logger.logger = logger + ctx.connID = nextConnID() + ctx.s = fakeServer + ctx.connRequestNum = 0 + ctx.connTime = CoarseTimeNow() + ctx.time = ctx.connTime + + keepBodyBuffer := !reduceMemoryUsage + ctx.Request.keepBodyBuffer = keepBodyBuffer + ctx.Response.keepBodyBuffer = keepBodyBuffer +} + +// Init prepares ctx for passing to RequestHandler. +// +// remoteAddr and logger are optional. They are used by RequestCtx.Logger(). +// +// This function is intended for custom Server implementations. +func (ctx *RequestCtx) Init(req *Request, remoteAddr net.Addr, logger Logger) { + if remoteAddr == nil { + remoteAddr = zeroTCPAddr + } + c := &fakeAddrer{ + laddr: zeroTCPAddr, + raddr: remoteAddr, + } + if logger == nil { + logger = defaultLogger + } + ctx.Init2(c, logger, true) + req.CopyTo(&ctx.Request) +} + +var fakeServer = &Server{ + // Initialize concurrencyCh for TimeoutHandler + concurrencyCh: make(chan struct{}, DefaultConcurrency), +} + +type fakeAddrer struct { + net.Conn + laddr net.Addr + raddr net.Addr +} + +func (fa *fakeAddrer) RemoteAddr() net.Addr { + return fa.raddr +} + +func (fa *fakeAddrer) LocalAddr() net.Addr { + return fa.laddr +} + +func (fa *fakeAddrer) Read(p []byte) (int, error) { + panic("BUG: unexpected Read call") +} + +func (fa *fakeAddrer) Write(p []byte) (int, error) { + panic("BUG: unexpected Write call") +} + +func (fa *fakeAddrer) Close() error { + panic("BUG: unexpected Close call") +} + +func (s *Server) releaseCtx(ctx *RequestCtx) { + if ctx.timeoutResponse != nil { + panic("BUG: cannot release timed out RequestCtx") + } + ctx.c = nil + ctx.fbr.c = nil + s.ctxPool.Put(ctx) +} + +func (s *Server) getServerName() []byte { + v := s.serverName.Load() + var serverName []byte + if v == nil { + serverName = []byte(s.Name) + if len(serverName) == 0 { + serverName = defaultServerName + } + s.serverName.Store(serverName) + } else { + serverName = v.([]byte) + } + return serverName +} + +func (s *Server) writeFastError(w io.Writer, statusCode int, msg string) { + w.Write(statusLine(statusCode)) + fmt.Fprintf(w, "Connection: close\r\n"+ + "Server: %s\r\n"+ + "Date: %s\r\n"+ + "Content-Type: text/plain\r\n"+ + "Content-Length: %d\r\n"+ + "\r\n"+ + "%s", + s.getServerName(), serverDate.Load(), len(msg), msg) +} + +func writeErrorResponse(bw *bufio.Writer, ctx *RequestCtx, err error) *bufio.Writer { + if _, ok := err.(*ErrSmallBuffer); ok { + ctx.Error("Too big request header", StatusRequestHeaderFieldsTooLarge) + } else { + ctx.Error("Error when parsing request", StatusBadRequest) + } + ctx.SetConnectionClose() + if bw == nil { + bw = acquireWriter(ctx) + } + writeResponse(ctx, bw) + bw.Flush() + return bw +} diff --git a/vendor/github.com/valyala/fasthttp/ssl-cert-snakeoil.key b/vendor/github.com/valyala/fasthttp/ssl-cert-snakeoil.key new file mode 100644 index 0000000..00a79a3 --- /dev/null +++ b/vendor/github.com/valyala/fasthttp/ssl-cert-snakeoil.key @@ -0,0 +1,28 @@ +-----BEGIN PRIVATE KEY----- +MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQD4IQusAs8PJdnG +3mURt/AXtgC+ceqLOatJ49JJE1VPTkMAy+oE1f1XvkMrYsHqmDf6GWVzgVXryL4U +wq2/nJSm56ddhN55nI8oSN3dtywUB8/ShelEN73nlN77PeD9tl6NksPwWaKrqxq0 +FlabRPZSQCfmgZbhDV8Sa8mfCkFU0G0lit6kLGceCKMvmW+9Bz7ebsYmVdmVMxmf +IJStFD44lWFTdUc65WISKEdW2ELcUefb0zOLw+0PCbXFGJH5x5ktksW8+BBk2Hkg +GeQRL/qPCccthbScO0VgNj3zJ3ZZL0ObSDAbvNDG85joeNjDNq5DT/BAZ0bOSbEF +sh+f9BAzAgMBAAECggEBAJWv2cq7Jw6MVwSRxYca38xuD6TUNBopgBvjREixURW2 +sNUaLuMb9Omp7fuOaE2N5rcJ+xnjPGIxh/oeN5MQctz9gwn3zf6vY+15h97pUb4D +uGvYPRDaT8YVGS+X9NMZ4ZCmqW2lpWzKnCFoGHcy8yZLbcaxBsRdvKzwOYGoPiFb +K2QuhXZ/1UPmqK9i2DFKtj40X6vBszTNboFxOVpXrPu0FJwLVSDf2hSZ4fMM0DH3 +YqwKcYf5te+hxGKgrqRA3tn0NCWii0in6QIwXMC+kMw1ebg/tZKqyDLMNptAK8J+ +DVw9m5X1seUHS5ehU/g2jrQrtK5WYn7MrFK4lBzlRwECgYEA/d1TeANYECDWRRDk +B0aaRZs87Rwl/J9PsvbsKvtU/bX+OfSOUjOa9iQBqn0LmU8GqusEET/QVUfocVwV +Bggf/5qDLxz100Rj0ags/yE/kNr0Bb31kkkKHFMnCT06YasR7qKllwrAlPJvQv9x +IzBKq+T/Dx08Wep9bCRSFhzRCnsCgYEA+jdeZXTDr/Vz+D2B3nAw1frqYFfGnEVY +wqmoK3VXMDkGuxsloO2rN+SyiUo3JNiQNPDub/t7175GH5pmKtZOlftePANsUjBj +wZ1D0rI5Bxu/71ibIUYIRVmXsTEQkh/ozoh3jXCZ9+bLgYiYx7789IUZZSokFQ3D +FICUT9KJ36kCgYAGoq9Y1rWJjmIrYfqj2guUQC+CfxbbGIrrwZqAsRsSmpwvhZ3m +tiSZxG0quKQB+NfSxdvQW5ulbwC7Xc3K35F+i9pb8+TVBdeaFkw+yu6vaZmxQLrX +fQM/pEjD7A7HmMIaO7QaU5SfEAsqdCTP56Y8AftMuNXn/8IRfo2KuGwaWwKBgFpU +ILzJoVdlad9E/Rw7LjYhZfkv1uBVXIyxyKcfrkEXZSmozDXDdxsvcZCEfVHM6Ipk +K/+7LuMcqp4AFEAEq8wTOdq6daFaHLkpt/FZK6M4TlruhtpFOPkoNc3e45eM83OT +6mziKINJC1CQ6m65sQHpBtjxlKMRG8rL/D6wx9s5AoGBAMRlqNPMwglT3hvDmsAt +9Lf9pdmhERUlHhD8bj8mDaBj2Aqv7f6VRJaYZqP403pKKQexuqcn80mtjkSAPFkN +Cj7BVt/RXm5uoxDTnfi26RF9F6yNDEJ7UU9+peBr99aazF/fTgW/1GcMkQnum8uV +c257YgaWmjK9uB0Y2r2VxS0G +-----END PRIVATE KEY----- diff --git a/vendor/github.com/valyala/fasthttp/ssl-cert-snakeoil.pem b/vendor/github.com/valyala/fasthttp/ssl-cert-snakeoil.pem new file mode 100644 index 0000000..93e77cd --- /dev/null +++ b/vendor/github.com/valyala/fasthttp/ssl-cert-snakeoil.pem @@ -0,0 +1,17 @@ +-----BEGIN CERTIFICATE----- +MIICujCCAaKgAwIBAgIJAMbXnKZ/cikUMA0GCSqGSIb3DQEBCwUAMBUxEzARBgNV +BAMTCnVidW50dS5uYW4wHhcNMTUwMjA0MDgwMTM5WhcNMjUwMjAxMDgwMTM5WjAV +MRMwEQYDVQQDEwp1YnVudHUubmFuMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB +CgKCAQEA+CELrALPDyXZxt5lEbfwF7YAvnHqizmrSePSSRNVT05DAMvqBNX9V75D +K2LB6pg3+hllc4FV68i+FMKtv5yUpuenXYTeeZyPKEjd3bcsFAfP0oXpRDe955Te ++z3g/bZejZLD8Fmiq6satBZWm0T2UkAn5oGW4Q1fEmvJnwpBVNBtJYrepCxnHgij +L5lvvQc+3m7GJlXZlTMZnyCUrRQ+OJVhU3VHOuViEihHVthC3FHn29Mzi8PtDwm1 +xRiR+ceZLZLFvPgQZNh5IBnkES/6jwnHLYW0nDtFYDY98yd2WS9Dm0gwG7zQxvOY +6HjYwzauQ0/wQGdGzkmxBbIfn/QQMwIDAQABow0wCzAJBgNVHRMEAjAAMA0GCSqG +SIb3DQEBCwUAA4IBAQBQjKm/4KN/iTgXbLTL3i7zaxYXFLXsnT1tF+ay4VA8aj98 +L3JwRTciZ3A5iy/W4VSCt3eASwOaPWHKqDBB5RTtL73LoAqsWmO3APOGQAbixcQ2 +45GXi05OKeyiYRi1Nvq7Unv9jUkRDHUYVPZVSAjCpsXzPhFkmZoTRxmx5l0ZF7Li +K91lI5h+eFq0dwZwrmlPambyh1vQUi70VHv8DNToVU29kel7YLbxGbuqETfhrcy6 +X+Mha6RYITkAn5FqsZcKMsc9eYGEF4l3XV+oS7q6xfTxktYJMFTI18J0lQ2Lv/CI +whdMnYGntDQBE/iFCrJEGNsKGc38796GBOb5j+zd +-----END CERTIFICATE----- diff --git a/vendor/github.com/valyala/fasthttp/stackless/doc.go b/vendor/github.com/valyala/fasthttp/stackless/doc.go new file mode 100644 index 0000000..8c0cc49 --- /dev/null +++ b/vendor/github.com/valyala/fasthttp/stackless/doc.go @@ -0,0 +1,3 @@ +// Package stackless provides functionality that may save stack space +// for high number of concurrently running goroutines. +package stackless diff --git a/vendor/github.com/valyala/fasthttp/stackless/func.go b/vendor/github.com/valyala/fasthttp/stackless/func.go new file mode 100644 index 0000000..9a49bcc --- /dev/null +++ b/vendor/github.com/valyala/fasthttp/stackless/func.go @@ -0,0 +1,79 @@ +package stackless + +import ( + "runtime" + "sync" +) + +// NewFunc returns stackless wrapper for the function f. +// +// Unlike f, the returned stackless wrapper doesn't use stack space +// on the goroutine that calls it. +// The wrapper may save a lot of stack space if the following conditions +// are met: +// +// - f doesn't contain blocking calls on network, I/O or channels; +// - f uses a lot of stack space; +// - the wrapper is called from high number of concurrent goroutines. +// +// The stackless wrapper returns false if the call cannot be processed +// at the moment due to high load. +func NewFunc(f func(ctx interface{})) func(ctx interface{}) bool { + if f == nil { + panic("BUG: f cannot be nil") + } + + funcWorkCh := make(chan *funcWork, runtime.GOMAXPROCS(-1)*2048) + onceInit := func() { + n := runtime.GOMAXPROCS(-1) + for i := 0; i < n; i++ { + go funcWorker(funcWorkCh, f) + } + } + var once sync.Once + + return func(ctx interface{}) bool { + once.Do(onceInit) + fw := getFuncWork() + fw.ctx = ctx + + select { + case funcWorkCh <- fw: + default: + putFuncWork(fw) + return false + } + <-fw.done + putFuncWork(fw) + return true + } +} + +func funcWorker(funcWorkCh <-chan *funcWork, f func(ctx interface{})) { + for fw := range funcWorkCh { + f(fw.ctx) + fw.done <- struct{}{} + } +} + +func getFuncWork() *funcWork { + v := funcWorkPool.Get() + if v == nil { + v = &funcWork{ + done: make(chan struct{}, 1), + } + } + return v.(*funcWork) +} + +func putFuncWork(fw *funcWork) { + fw.ctx = nil + funcWorkPool.Put(fw) +} + +var funcWorkPool sync.Pool + +type funcWork struct { + ctx interface{} + done chan struct{} +} diff --git a/vendor/github.com/valyala/fasthttp/stackless/writer.go b/vendor/github.com/valyala/fasthttp/stackless/writer.go new file mode 100644 index 0000000..9b9ff09 --- /dev/null +++ b/vendor/github.com/valyala/fasthttp/stackless/writer.go @@ -0,0 +1,138 @@ +package stackless + +import ( + "errors" + "fmt" + "github.com/valyala/bytebufferpool" + "io" +) + +// Writer is an interface stackless writer must conform to. +// +// The interface contains common subset for Writers from compress/* packages. +type Writer interface { + Write(p []byte) (int, error) + Flush() error + Close() error + Reset(w io.Writer) +} + +// NewWriterFunc must return new writer that will be wrapped into +// stackless writer. +type NewWriterFunc func(w io.Writer) Writer + +// NewWriter creates a stackless writer around a writer returned +// from newWriter. +// +// The returned writer writes data to dstW. +// +// Writers that use a lot of stack space may be wrapped into stackless writer, +// thus saving stack space for high number of concurrently running goroutines. +func NewWriter(dstW io.Writer, newWriter NewWriterFunc) Writer { + w := &writer{ + dstW: dstW, + } + w.zw = newWriter(&w.xw) + return w +} + +type writer struct { + dstW io.Writer + zw Writer + xw xWriter + + err error + n int + + p []byte + op op +} + +type op int + +const ( + opWrite op = iota + opFlush + opClose + opReset +) + +func (w *writer) Write(p []byte) (int, error) { + w.p = p + err := w.do(opWrite) + w.p = nil + return w.n, err +} + +func (w *writer) Flush() error { + return w.do(opFlush) +} + +func (w *writer) Close() error { + return w.do(opClose) +} + +func (w *writer) Reset(dstW io.Writer) { + w.xw.Reset() + w.do(opReset) + w.dstW = dstW +} + +func (w *writer) do(op op) error { + w.op = op + if !stacklessWriterFunc(w) { + return errHighLoad + } + err := w.err + if err != nil { + return err + } + if w.xw.bb != nil && len(w.xw.bb.B) > 0 { + _, err = w.dstW.Write(w.xw.bb.B) + } + w.xw.Reset() + + return err +} + +var errHighLoad = errors.New("cannot compress data due to high load") + +var stacklessWriterFunc = NewFunc(writerFunc) + +func writerFunc(ctx interface{}) { + w := ctx.(*writer) + switch w.op { + case opWrite: + w.n, w.err = w.zw.Write(w.p) + case opFlush: + w.err = w.zw.Flush() + case opClose: + w.err = w.zw.Close() + case opReset: + w.zw.Reset(&w.xw) + w.err = nil + default: + panic(fmt.Sprintf("BUG: unexpected op: %d", w.op)) + } +} + +type xWriter struct { + bb *bytebufferpool.ByteBuffer +} + +func (w *xWriter) Write(p []byte) (int, error) { + if w.bb == nil { + w.bb = bufferPool.Get() + } + w.bb.Write(p) + return len(p), nil +} + +func (w *xWriter) Reset() { + if w.bb != nil { + bufferPool.Put(w.bb) + w.bb = nil + } +} + +var bufferPool bytebufferpool.Pool diff --git a/vendor/github.com/valyala/fasthttp/status.go b/vendor/github.com/valyala/fasthttp/status.go new file mode 100644 index 0000000..6687efb --- /dev/null +++ b/vendor/github.com/valyala/fasthttp/status.go @@ -0,0 +1,176 @@ +package fasthttp + +import ( + "fmt" + "sync/atomic" +) + +// HTTP status codes were stolen from net/http. +const ( + StatusContinue = 100 // RFC 7231, 6.2.1 + StatusSwitchingProtocols = 101 // RFC 7231, 6.2.2 + StatusProcessing = 102 // RFC 2518, 10.1 + + StatusOK = 200 // RFC 7231, 6.3.1 + StatusCreated = 201 // RFC 7231, 6.3.2 + StatusAccepted = 202 // RFC 7231, 6.3.3 + StatusNonAuthoritativeInfo = 203 // RFC 7231, 6.3.4 + StatusNoContent = 204 // RFC 7231, 6.3.5 + StatusResetContent = 205 // RFC 7231, 6.3.6 + StatusPartialContent = 206 // RFC 7233, 4.1 + StatusMultiStatus = 207 // RFC 4918, 11.1 + StatusAlreadyReported = 208 // RFC 5842, 7.1 + StatusIMUsed = 226 // RFC 3229, 10.4.1 + + StatusMultipleChoices = 300 // RFC 7231, 6.4.1 + StatusMovedPermanently = 301 // RFC 7231, 6.4.2 + StatusFound = 302 // RFC 7231, 6.4.3 + StatusSeeOther = 303 // RFC 7231, 6.4.4 + StatusNotModified = 304 // RFC 7232, 4.1 + StatusUseProxy = 305 // RFC 7231, 6.4.5 + _ = 306 // RFC 7231, 6.4.6 (Unused) + StatusTemporaryRedirect = 307 // RFC 7231, 6.4.7 + StatusPermanentRedirect = 308 // RFC 7538, 3 + + StatusBadRequest = 400 // RFC 7231, 6.5.1 + StatusUnauthorized = 401 // RFC 7235, 3.1 + StatusPaymentRequired = 402 // RFC 7231, 6.5.2 + StatusForbidden = 403 // RFC 7231, 6.5.3 + StatusNotFound = 404 // RFC 7231, 6.5.4 + StatusMethodNotAllowed = 405 // RFC 7231, 6.5.5 + StatusNotAcceptable = 406 // RFC 7231, 6.5.6 + StatusProxyAuthRequired = 407 // RFC 7235, 3.2 + StatusRequestTimeout = 408 // RFC 7231, 6.5.7 + StatusConflict = 409 // RFC 7231, 6.5.8 + StatusGone = 410 // RFC 7231, 6.5.9 + StatusLengthRequired = 411 // RFC 7231, 6.5.10 + StatusPreconditionFailed = 412 // RFC 7232, 4.2 + StatusRequestEntityTooLarge = 413 // RFC 7231, 6.5.11 + StatusRequestURITooLong = 414 // RFC 7231, 6.5.12 + StatusUnsupportedMediaType = 415 // RFC 7231, 6.5.13 + StatusRequestedRangeNotSatisfiable = 416 // RFC 7233, 4.4 + StatusExpectationFailed = 417 // RFC 7231, 6.5.14 + StatusTeapot = 418 // RFC 7168, 2.3.3 + StatusUnprocessableEntity = 422 // RFC 4918, 11.2 + StatusLocked = 423 // RFC 4918, 11.3 + StatusFailedDependency = 424 // RFC 4918, 11.4 + StatusUpgradeRequired = 426 // RFC 7231, 6.5.15 + StatusPreconditionRequired = 428 // RFC 6585, 3 + StatusTooManyRequests = 429 // RFC 6585, 4 + StatusRequestHeaderFieldsTooLarge = 431 // RFC 6585, 5 + StatusUnavailableForLegalReasons = 451 // RFC 7725, 3 + + StatusInternalServerError = 500 // RFC 7231, 6.6.1 + StatusNotImplemented = 501 // RFC 7231, 6.6.2 + StatusBadGateway = 502 // RFC 7231, 6.6.3 + StatusServiceUnavailable = 503 // RFC 7231, 6.6.4 + StatusGatewayTimeout = 504 // RFC 7231, 6.6.5 + StatusHTTPVersionNotSupported = 505 // RFC 7231, 6.6.6 + StatusVariantAlsoNegotiates = 506 // RFC 2295, 8.1 + StatusInsufficientStorage = 507 // RFC 4918, 11.5 + StatusLoopDetected = 508 // RFC 5842, 7.2 + StatusNotExtended = 510 // RFC 2774, 7 + StatusNetworkAuthenticationRequired = 511 // RFC 6585, 6 +) + +var ( + statusLines atomic.Value + + statusMessages = map[int]string{ + StatusContinue: "Continue", + StatusSwitchingProtocols: "Switching Protocols", + StatusProcessing: "Processing", + + StatusOK: "OK", + StatusCreated: "Created", + StatusAccepted: "Accepted", + StatusNonAuthoritativeInfo: "Non-Authoritative Information", + StatusNoContent: "No Content", + StatusResetContent: "Reset Content", + StatusPartialContent: "Partial Content", + StatusMultiStatus: "Multi-Status", + StatusAlreadyReported: "Already Reported", + StatusIMUsed: "IM Used", + + StatusMultipleChoices: "Multiple Choices", + StatusMovedPermanently: "Moved Permanently", + StatusFound: "Found", + StatusSeeOther: "See Other", + StatusNotModified: "Not Modified", + StatusUseProxy: "Use Proxy", + StatusTemporaryRedirect: "Temporary Redirect", + StatusPermanentRedirect: "Permanent Redirect", + + StatusBadRequest: "Bad Request", + StatusUnauthorized: "Unauthorized", + StatusPaymentRequired: "Payment Required", + StatusForbidden: "Forbidden", + StatusNotFound: "Not Found", + StatusMethodNotAllowed: "Method Not Allowed", + StatusNotAcceptable: "Not Acceptable", + StatusProxyAuthRequired: "Proxy Authentication Required", + StatusRequestTimeout: "Request Timeout", + StatusConflict: "Conflict", + StatusGone: "Gone", + StatusLengthRequired: "Length Required", + StatusPreconditionFailed: "Precondition Failed", + StatusRequestEntityTooLarge: "Request Entity Too Large", + StatusRequestURITooLong: "Request URI Too Long", + StatusUnsupportedMediaType: "Unsupported Media Type", + StatusRequestedRangeNotSatisfiable: "Requested Range Not Satisfiable", + StatusExpectationFailed: "Expectation Failed", + StatusTeapot: "I'm a teapot", + StatusUnprocessableEntity: "Unprocessable Entity", + StatusLocked: "Locked", + StatusFailedDependency: "Failed Dependency", + StatusUpgradeRequired: "Upgrade Required", + StatusPreconditionRequired: "Precondition Required", + StatusTooManyRequests: "Too Many Requests", + StatusRequestHeaderFieldsTooLarge: "Request Header Fields Too Large", + StatusUnavailableForLegalReasons: "Unavailable For Legal Reasons", + + StatusInternalServerError: "Internal Server Error", + StatusNotImplemented: "Not Implemented", + StatusBadGateway: "Bad Gateway", + StatusServiceUnavailable: "Service Unavailable", + StatusGatewayTimeout: "Gateway Timeout", + StatusHTTPVersionNotSupported: "HTTP Version Not Supported", + StatusVariantAlsoNegotiates: "Variant Also Negotiates", + StatusInsufficientStorage: "Insufficient Storage", + StatusLoopDetected: "Loop Detected", + StatusNotExtended: "Not Extended", + StatusNetworkAuthenticationRequired: "Network Authentication Required", + } +) + +// StatusMessage returns HTTP status message for the given status code. +func StatusMessage(statusCode int) string { + s := statusMessages[statusCode] + if s == "" { + s = "Unknown Status Code" + } + return s +} + +func init() { + statusLines.Store(make(map[int][]byte)) +} + +func statusLine(statusCode int) []byte { + m := statusLines.Load().(map[int][]byte) + h := m[statusCode] + if h != nil { + return h + } + + statusText := StatusMessage(statusCode) + + h = []byte(fmt.Sprintf("HTTP/1.1 %d %s\r\n", statusCode, statusText)) + newM := make(map[int][]byte, len(m)+1) + for k, v := range m { + newM[k] = v + } + newM[statusCode] = h + statusLines.Store(newM) + return h +} diff --git a/vendor/github.com/valyala/fasthttp/stream.go b/vendor/github.com/valyala/fasthttp/stream.go new file mode 100644 index 0000000..aa23b1a --- /dev/null +++ b/vendor/github.com/valyala/fasthttp/stream.go @@ -0,0 +1,54 @@ +package fasthttp + +import ( + "bufio" + "io" + "sync" + + "github.com/valyala/fasthttp/fasthttputil" +) + +// StreamWriter must write data to w. +// +// Usually StreamWriter writes data to w in a loop (aka 'data streaming'). +// +// StreamWriter must return immediately if w returns error. +// +// Since the written data is buffered, do not forget calling w.Flush +// when the data must be propagated to reader. +type StreamWriter func(w *bufio.Writer) + +// NewStreamReader returns a reader, which replays all the data generated by sw. +// +// The returned reader may be passed to Response.SetBodyStream. +// +// Close must be called on the returned reader after all the required data +// has been read. Otherwise goroutine leak may occur. +// +// See also Response.SetBodyStreamWriter. +func NewStreamReader(sw StreamWriter) io.ReadCloser { + pc := fasthttputil.NewPipeConns() + pw := pc.Conn1() + pr := pc.Conn2() + + var bw *bufio.Writer + v := streamWriterBufPool.Get() + if v == nil { + bw = bufio.NewWriter(pw) + } else { + bw = v.(*bufio.Writer) + bw.Reset(pw) + } + + go func() { + sw(bw) + bw.Flush() + pw.Close() + + streamWriterBufPool.Put(bw) + }() + + return pr +} + +var streamWriterBufPool sync.Pool diff --git a/vendor/github.com/valyala/fasthttp/strings.go b/vendor/github.com/valyala/fasthttp/strings.go new file mode 100644 index 0000000..656df1c --- /dev/null +++ b/vendor/github.com/valyala/fasthttp/strings.go @@ -0,0 +1,71 @@ +package fasthttp + +var ( + defaultServerName = []byte("fasthttp") + defaultUserAgent = []byte("fasthttp") + defaultContentType = []byte("text/plain; charset=utf-8") +) + +var ( + strSlash = []byte("/") + strSlashSlash = []byte("//") + strSlashDotDot = []byte("/..") + strSlashDotSlash = []byte("/./") + strSlashDotDotSlash = []byte("/../") + strCRLF = []byte("\r\n") + strHTTP = []byte("http") + strHTTPS = []byte("https") + strHTTP11 = []byte("HTTP/1.1") + strColonSlashSlash = []byte("://") + strColonSpace = []byte(": ") + strGMT = []byte("GMT") + + strResponseContinue = []byte("HTTP/1.1 100 Continue\r\n\r\n") + + strGet = []byte("GET") + strHead = []byte("HEAD") + strPost = []byte("POST") + strPut = []byte("PUT") + strDelete = []byte("DELETE") + + strExpect = []byte("Expect") + strConnection = []byte("Connection") + strContentLength = []byte("Content-Length") + strContentType = []byte("Content-Type") + strDate = []byte("Date") + strHost = []byte("Host") + strReferer = []byte("Referer") + strServer = []byte("Server") + strTransferEncoding = []byte("Transfer-Encoding") + strContentEncoding = []byte("Content-Encoding") + strAcceptEncoding = []byte("Accept-Encoding") + strUserAgent = []byte("User-Agent") + strCookie = []byte("Cookie") + strSetCookie = []byte("Set-Cookie") + strLocation = []byte("Location") + strIfModifiedSince = []byte("If-Modified-Since") + strLastModified = []byte("Last-Modified") + strAcceptRanges = []byte("Accept-Ranges") + strRange = []byte("Range") + strContentRange = []byte("Content-Range") + + strCookieExpires = []byte("expires") + strCookieDomain = []byte("domain") + strCookiePath = []byte("path") + strCookieHTTPOnly = []byte("HttpOnly") + strCookieSecure = []byte("secure") + + strClose = []byte("close") + strGzip = []byte("gzip") + strDeflate = []byte("deflate") + strKeepAlive = []byte("keep-alive") + strKeepAliveCamelCase = []byte("Keep-Alive") + strUpgrade = []byte("Upgrade") + strChunked = []byte("chunked") + strIdentity = []byte("identity") + str100Continue = []byte("100-continue") + strPostArgsContentType = []byte("application/x-www-form-urlencoded") + strMultipartFormData = []byte("multipart/form-data") + strBoundary = []byte("boundary") + strBytes = []byte("bytes") +) diff --git a/vendor/github.com/valyala/fasthttp/tcpdialer.go b/vendor/github.com/valyala/fasthttp/tcpdialer.go new file mode 100644 index 0000000..e31fd75 --- /dev/null +++ b/vendor/github.com/valyala/fasthttp/tcpdialer.go @@ -0,0 +1,369 @@ +package fasthttp + +import ( + "errors" + "net" + "strconv" + "sync" + "sync/atomic" + "time" +) + +// Dial dials the given TCP addr using tcp4. +// +// This function has the following additional features comparing to net.Dial: +// +// * It reduces load on DNS resolver by caching resolved TCP addressed +// for DefaultDNSCacheDuration. +// * It dials all the resolved TCP addresses in round-robin manner until +// connection is established. This may be useful if certain addresses +// are temporarily unreachable. +// * It returns ErrDialTimeout if connection cannot be established during +// DefaultDialTimeout seconds. Use DialTimeout for customizing dial timeout. +// +// This dialer is intended for custom code wrapping before passing +// to Client.Dial or HostClient.Dial. +// +// For instance, per-host counters and/or limits may be implemented +// by such wrappers. +// +// The addr passed to the function must contain port. Example addr values: +// +// * foobar.baz:443 +// * foo.bar:80 +// * aaa.com:8080 +func Dial(addr string) (net.Conn, error) { + return getDialer(DefaultDialTimeout, false)(addr) +} + +// DialTimeout dials the given TCP addr using tcp4 using the given timeout. +// +// This function has the following additional features comparing to net.Dial: +// +// * It reduces load on DNS resolver by caching resolved TCP addressed +// for DefaultDNSCacheDuration. +// * It dials all the resolved TCP addresses in round-robin manner until +// connection is established. This may be useful if certain addresses +// are temporarily unreachable. +// +// This dialer is intended for custom code wrapping before passing +// to Client.Dial or HostClient.Dial. +// +// For instance, per-host counters and/or limits may be implemented +// by such wrappers. +// +// The addr passed to the function must contain port. Example addr values: +// +// * foobar.baz:443 +// * foo.bar:80 +// * aaa.com:8080 +func DialTimeout(addr string, timeout time.Duration) (net.Conn, error) { + return getDialer(timeout, false)(addr) +} + +// DialDualStack dials the given TCP addr using both tcp4 and tcp6. +// +// This function has the following additional features comparing to net.Dial: +// +// * It reduces load on DNS resolver by caching resolved TCP addressed +// for DefaultDNSCacheDuration. +// * It dials all the resolved TCP addresses in round-robin manner until +// connection is established. This may be useful if certain addresses +// are temporarily unreachable. +// * It returns ErrDialTimeout if connection cannot be established during +// DefaultDialTimeout seconds. Use DialDualStackTimeout for custom dial +// timeout. +// +// This dialer is intended for custom code wrapping before passing +// to Client.Dial or HostClient.Dial. +// +// For instance, per-host counters and/or limits may be implemented +// by such wrappers. +// +// The addr passed to the function must contain port. Example addr values: +// +// * foobar.baz:443 +// * foo.bar:80 +// * aaa.com:8080 +func DialDualStack(addr string) (net.Conn, error) { + return getDialer(DefaultDialTimeout, true)(addr) +} + +// DialDualStackTimeout dials the given TCP addr using both tcp4 and tcp6 +// using the given timeout. +// +// This function has the following additional features comparing to net.Dial: +// +// * It reduces load on DNS resolver by caching resolved TCP addressed +// for DefaultDNSCacheDuration. +// * It dials all the resolved TCP addresses in round-robin manner until +// connection is established. This may be useful if certain addresses +// are temporarily unreachable. +// +// This dialer is intended for custom code wrapping before passing +// to Client.Dial or HostClient.Dial. +// +// For instance, per-host counters and/or limits may be implemented +// by such wrappers. +// +// The addr passed to the function must contain port. Example addr values: +// +// * foobar.baz:443 +// * foo.bar:80 +// * aaa.com:8080 +func DialDualStackTimeout(addr string, timeout time.Duration) (net.Conn, error) { + return getDialer(timeout, true)(addr) +} + +func getDialer(timeout time.Duration, dualStack bool) DialFunc { + if timeout <= 0 { + timeout = DefaultDialTimeout + } + timeoutRounded := int(timeout.Seconds()*10 + 9) + + m := dialMap + if dualStack { + m = dialDualStackMap + } + + dialMapLock.Lock() + d := m[timeoutRounded] + if d == nil { + dialer := dialerStd + if dualStack { + dialer = dialerDualStack + } + d = dialer.NewDial(timeout) + m[timeoutRounded] = d + } + dialMapLock.Unlock() + return d +} + +var ( + dialerStd = &tcpDialer{} + dialerDualStack = &tcpDialer{DualStack: true} + + dialMap = make(map[int]DialFunc) + dialDualStackMap = make(map[int]DialFunc) + dialMapLock sync.Mutex +) + +type tcpDialer struct { + DualStack bool + + tcpAddrsLock sync.Mutex + tcpAddrsMap map[string]*tcpAddrEntry + + concurrencyCh chan struct{} + + once sync.Once +} + +const maxDialConcurrency = 1000 + +func (d *tcpDialer) NewDial(timeout time.Duration) DialFunc { + d.once.Do(func() { + d.concurrencyCh = make(chan struct{}, maxDialConcurrency) + d.tcpAddrsMap = make(map[string]*tcpAddrEntry) + go d.tcpAddrsClean() + }) + + return func(addr string) (net.Conn, error) { + addrs, idx, err := d.getTCPAddrs(addr) + if err != nil { + return nil, err + } + network := "tcp4" + if d.DualStack { + network = "tcp" + } + + var conn net.Conn + n := uint32(len(addrs)) + deadline := time.Now().Add(timeout) + for n > 0 { + conn, err = tryDial(network, &addrs[idx%n], deadline, d.concurrencyCh) + if err == nil { + return conn, nil + } + if err == ErrDialTimeout { + return nil, err + } + idx++ + n-- + } + return nil, err + } +} + +func tryDial(network string, addr *net.TCPAddr, deadline time.Time, concurrencyCh chan struct{}) (net.Conn, error) { + timeout := -time.Since(deadline) + if timeout <= 0 { + return nil, ErrDialTimeout + } + + select { + case concurrencyCh <- struct{}{}: + default: + tc := acquireTimer(timeout) + isTimeout := false + select { + case concurrencyCh <- struct{}{}: + case <-tc.C: + isTimeout = true + } + releaseTimer(tc) + if isTimeout { + return nil, ErrDialTimeout + } + } + + timeout = -time.Since(deadline) + if timeout <= 0 { + <-concurrencyCh + return nil, ErrDialTimeout + } + + chv := dialResultChanPool.Get() + if chv == nil { + chv = make(chan dialResult, 1) + } + ch := chv.(chan dialResult) + go func() { + var dr dialResult + dr.conn, dr.err = net.DialTCP(network, nil, addr) + ch <- dr + <-concurrencyCh + }() + + var ( + conn net.Conn + err error + ) + + tc := acquireTimer(timeout) + select { + case dr := <-ch: + conn = dr.conn + err = dr.err + dialResultChanPool.Put(ch) + case <-tc.C: + err = ErrDialTimeout + } + releaseTimer(tc) + + return conn, err +} + +var dialResultChanPool sync.Pool + +type dialResult struct { + conn net.Conn + err error +} + +// ErrDialTimeout is returned when TCP dialing is timed out. +var ErrDialTimeout = errors.New("dialing to the given TCP address timed out") + +// DefaultDialTimeout is timeout used by Dial and DialDualStack +// for establishing TCP connections. +const DefaultDialTimeout = 3 * time.Second + +type tcpAddrEntry struct { + addrs []net.TCPAddr + addrsIdx uint32 + + resolveTime time.Time + pending bool +} + +// DefaultDNSCacheDuration is the duration for caching resolved TCP addresses +// by Dial* functions. +const DefaultDNSCacheDuration = time.Minute + +func (d *tcpDialer) tcpAddrsClean() { + expireDuration := 2 * DefaultDNSCacheDuration + for { + time.Sleep(time.Second) + t := time.Now() + + d.tcpAddrsLock.Lock() + for k, e := range d.tcpAddrsMap { + if t.Sub(e.resolveTime) > expireDuration { + delete(d.tcpAddrsMap, k) + } + } + d.tcpAddrsLock.Unlock() + } +} + +func (d *tcpDialer) getTCPAddrs(addr string) ([]net.TCPAddr, uint32, error) { + d.tcpAddrsLock.Lock() + e := d.tcpAddrsMap[addr] + if e != nil && !e.pending && time.Since(e.resolveTime) > DefaultDNSCacheDuration { + e.pending = true + e = nil + } + d.tcpAddrsLock.Unlock() + + if e == nil { + addrs, err := resolveTCPAddrs(addr, d.DualStack) + if err != nil { + d.tcpAddrsLock.Lock() + e = d.tcpAddrsMap[addr] + if e != nil && e.pending { + e.pending = false + } + d.tcpAddrsLock.Unlock() + return nil, 0, err + } + + e = &tcpAddrEntry{ + addrs: addrs, + resolveTime: time.Now(), + } + + d.tcpAddrsLock.Lock() + d.tcpAddrsMap[addr] = e + d.tcpAddrsLock.Unlock() + } + + idx := atomic.AddUint32(&e.addrsIdx, 1) + return e.addrs, idx, nil +} + +func resolveTCPAddrs(addr string, dualStack bool) ([]net.TCPAddr, error) { + host, portS, err := net.SplitHostPort(addr) + if err != nil { + return nil, err + } + port, err := strconv.Atoi(portS) + if err != nil { + return nil, err + } + + ips, err := net.LookupIP(host) + if err != nil { + return nil, err + } + + n := len(ips) + addrs := make([]net.TCPAddr, 0, n) + for i := 0; i < n; i++ { + ip := ips[i] + if !dualStack && ip.To4() == nil { + continue + } + addrs = append(addrs, net.TCPAddr{ + IP: ip, + Port: port, + }) + } + if len(addrs) == 0 { + return nil, errNoDNSEntries + } + return addrs, nil +} + +var errNoDNSEntries = errors.New("couldn't find DNS entries for the given domain. Try using DialDualStack") diff --git a/vendor/github.com/valyala/fasthttp/timer.go b/vendor/github.com/valyala/fasthttp/timer.go new file mode 100644 index 0000000..bb12acb --- /dev/null +++ b/vendor/github.com/valyala/fasthttp/timer.go @@ -0,0 +1,44 @@ +package fasthttp + +import ( + "sync" + "time" +) + +func initTimer(t *time.Timer, timeout time.Duration) *time.Timer { + if t == nil { + return time.NewTimer(timeout) + } + if t.Reset(timeout) { + panic("BUG: active timer trapped into initTimer()") + } + return t +} + +func stopTimer(t *time.Timer) { + if !t.Stop() { + // Collect possibly added time from the channel + // if timer has been stopped and nobody collected its' value. + select { + case <-t.C: + default: + } + } +} + +func acquireTimer(timeout time.Duration) *time.Timer { + v := timerPool.Get() + if v == nil { + return time.NewTimer(timeout) + } + t := v.(*time.Timer) + initTimer(t, timeout) + return t +} + +func releaseTimer(t *time.Timer) { + stopTimer(t) + timerPool.Put(t) +} + +var timerPool sync.Pool diff --git a/vendor/github.com/valyala/fasthttp/uri.go b/vendor/github.com/valyala/fasthttp/uri.go new file mode 100644 index 0000000..0f6bc04 --- /dev/null +++ b/vendor/github.com/valyala/fasthttp/uri.go @@ -0,0 +1,520 @@ +package fasthttp + +import ( + "bytes" + "io" + "sync" +) + +// AcquireURI returns an empty URI instance from the pool. +// +// Release the URI with ReleaseURI after the URI is no longer needed. +// This allows reducing GC load. +func AcquireURI() *URI { + return uriPool.Get().(*URI) +} + +// ReleaseURI releases the URI acquired via AcquireURI. +// +// The released URI mustn't be used after releasing it, otherwise data races +// may occur. +func ReleaseURI(u *URI) { + u.Reset() + uriPool.Put(u) +} + +var uriPool = &sync.Pool{ + New: func() interface{} { + return &URI{} + }, +} + +// URI represents URI :) . +// +// It is forbidden copying URI instances. Create new instance and use CopyTo +// instead. +// +// URI instance MUST NOT be used from concurrently running goroutines. +type URI struct { + noCopy noCopy + + pathOriginal []byte + scheme []byte + path []byte + queryString []byte + hash []byte + host []byte + + queryArgs Args + parsedQueryArgs bool + + fullURI []byte + requestURI []byte + + h *RequestHeader +} + +// CopyTo copies uri contents to dst. +func (u *URI) CopyTo(dst *URI) { + dst.Reset() + dst.pathOriginal = append(dst.pathOriginal[:0], u.pathOriginal...) + dst.scheme = append(dst.scheme[:0], u.scheme...) + dst.path = append(dst.path[:0], u.path...) + dst.queryString = append(dst.queryString[:0], u.queryString...) + dst.hash = append(dst.hash[:0], u.hash...) + dst.host = append(dst.host[:0], u.host...) + + u.queryArgs.CopyTo(&dst.queryArgs) + dst.parsedQueryArgs = u.parsedQueryArgs + + // fullURI and requestURI shouldn't be copied, since they are created + // from scratch on each FullURI() and RequestURI() call. + dst.h = u.h +} + +// Hash returns URI hash, i.e. qwe of http://aaa.com/foo/bar?baz=123#qwe . +// +// The returned value is valid until the next URI method call. +func (u *URI) Hash() []byte { + return u.hash +} + +// SetHash sets URI hash. +func (u *URI) SetHash(hash string) { + u.hash = append(u.hash[:0], hash...) +} + +// SetHashBytes sets URI hash. +func (u *URI) SetHashBytes(hash []byte) { + u.hash = append(u.hash[:0], hash...) +} + +// QueryString returns URI query string, +// i.e. baz=123 of http://aaa.com/foo/bar?baz=123#qwe . +// +// The returned value is valid until the next URI method call. +func (u *URI) QueryString() []byte { + return u.queryString +} + +// SetQueryString sets URI query string. +func (u *URI) SetQueryString(queryString string) { + u.queryString = append(u.queryString[:0], queryString...) + u.parsedQueryArgs = false +} + +// SetQueryStringBytes sets URI query string. +func (u *URI) SetQueryStringBytes(queryString []byte) { + u.queryString = append(u.queryString[:0], queryString...) + u.parsedQueryArgs = false +} + +// Path returns URI path, i.e. /foo/bar of http://aaa.com/foo/bar?baz=123#qwe . +// +// The returned path is always urldecoded and normalized, +// i.e. '//f%20obar/baz/../zzz' becomes '/f obar/zzz'. +// +// The returned value is valid until the next URI method call. +func (u *URI) Path() []byte { + path := u.path + if len(path) == 0 { + path = strSlash + } + return path +} + +// SetPath sets URI path. +func (u *URI) SetPath(path string) { + u.pathOriginal = append(u.pathOriginal[:0], path...) + u.path = normalizePath(u.path, u.pathOriginal) +} + +// SetPathBytes sets URI path. +func (u *URI) SetPathBytes(path []byte) { + u.pathOriginal = append(u.pathOriginal[:0], path...) + u.path = normalizePath(u.path, u.pathOriginal) +} + +// PathOriginal returns the original path from requestURI passed to URI.Parse(). +// +// The returned value is valid until the next URI method call. +func (u *URI) PathOriginal() []byte { + return u.pathOriginal +} + +// Scheme returns URI scheme, i.e. http of http://aaa.com/foo/bar?baz=123#qwe . +// +// Returned scheme is always lowercased. +// +// The returned value is valid until the next URI method call. +func (u *URI) Scheme() []byte { + scheme := u.scheme + if len(scheme) == 0 { + scheme = strHTTP + } + return scheme +} + +// SetScheme sets URI scheme, i.e. http, https, ftp, etc. +func (u *URI) SetScheme(scheme string) { + u.scheme = append(u.scheme[:0], scheme...) + lowercaseBytes(u.scheme) +} + +// SetSchemeBytes sets URI scheme, i.e. http, https, ftp, etc. +func (u *URI) SetSchemeBytes(scheme []byte) { + u.scheme = append(u.scheme[:0], scheme...) + lowercaseBytes(u.scheme) +} + +// Reset clears uri. +func (u *URI) Reset() { + u.pathOriginal = u.pathOriginal[:0] + u.scheme = u.scheme[:0] + u.path = u.path[:0] + u.queryString = u.queryString[:0] + u.hash = u.hash[:0] + + u.host = u.host[:0] + u.queryArgs.Reset() + u.parsedQueryArgs = false + + // There is no need in u.fullURI = u.fullURI[:0], since full uri + // is calucalted on each call to FullURI(). + + // There is no need in u.requestURI = u.requestURI[:0], since requestURI + // is calculated on each call to RequestURI(). + + u.h = nil +} + +// Host returns host part, i.e. aaa.com of http://aaa.com/foo/bar?baz=123#qwe . +// +// Host is always lowercased. +func (u *URI) Host() []byte { + if len(u.host) == 0 && u.h != nil { + u.host = append(u.host[:0], u.h.Host()...) + lowercaseBytes(u.host) + u.h = nil + } + return u.host +} + +// SetHost sets host for the uri. +func (u *URI) SetHost(host string) { + u.host = append(u.host[:0], host...) + lowercaseBytes(u.host) +} + +// SetHostBytes sets host for the uri. +func (u *URI) SetHostBytes(host []byte) { + u.host = append(u.host[:0], host...) + lowercaseBytes(u.host) +} + +// Parse initializes URI from the given host and uri. +// +// host may be nil. In this case uri must contain fully qualified uri, +// i.e. with scheme and host. http is assumed if scheme is omitted. +// +// uri may contain e.g. RequestURI without scheme and host if host is non-empty. +func (u *URI) Parse(host, uri []byte) { + u.parse(host, uri, nil) +} + +func (u *URI) parseQuick(uri []byte, h *RequestHeader, isTLS bool) { + u.parse(nil, uri, h) + if isTLS { + u.scheme = append(u.scheme[:0], strHTTPS...) + } +} + +func (u *URI) parse(host, uri []byte, h *RequestHeader) { + u.Reset() + u.h = h + + scheme, host, uri := splitHostURI(host, uri) + u.scheme = append(u.scheme, scheme...) + lowercaseBytes(u.scheme) + u.host = append(u.host, host...) + lowercaseBytes(u.host) + + b := uri + queryIndex := bytes.IndexByte(b, '?') + fragmentIndex := bytes.IndexByte(b, '#') + // Ignore query in fragment part + if fragmentIndex >= 0 && queryIndex > fragmentIndex { + queryIndex = -1 + } + + if queryIndex < 0 && fragmentIndex < 0 { + u.pathOriginal = append(u.pathOriginal, b...) + u.path = normalizePath(u.path, u.pathOriginal) + return + } + + if queryIndex >= 0 { + // Path is everything up to the start of the query + u.pathOriginal = append(u.pathOriginal, b[:queryIndex]...) + u.path = normalizePath(u.path, u.pathOriginal) + + if fragmentIndex < 0 { + u.queryString = append(u.queryString, b[queryIndex+1:]...) + } else { + u.queryString = append(u.queryString, b[queryIndex+1:fragmentIndex]...) + u.hash = append(u.hash, b[fragmentIndex+1:]...) + } + return + } + + // fragmentIndex >= 0 && queryIndex < 0 + // Path is up to the start of fragment + u.pathOriginal = append(u.pathOriginal, b[:fragmentIndex]...) + u.path = normalizePath(u.path, u.pathOriginal) + u.hash = append(u.hash, b[fragmentIndex+1:]...) +} + +func normalizePath(dst, src []byte) []byte { + dst = dst[:0] + dst = addLeadingSlash(dst, src) + dst = decodeArgAppend(dst, src, false) + + // remove duplicate slashes + b := dst + bSize := len(b) + for { + n := bytes.Index(b, strSlashSlash) + if n < 0 { + break + } + b = b[n:] + copy(b, b[1:]) + b = b[:len(b)-1] + bSize-- + } + dst = dst[:bSize] + + // remove /./ parts + b = dst + for { + n := bytes.Index(b, strSlashDotSlash) + if n < 0 { + break + } + nn := n + len(strSlashDotSlash) - 1 + copy(b[n:], b[nn:]) + b = b[:len(b)-nn+n] + } + + // remove /foo/../ parts + for { + n := bytes.Index(b, strSlashDotDotSlash) + if n < 0 { + break + } + nn := bytes.LastIndexByte(b[:n], '/') + if nn < 0 { + nn = 0 + } + n += len(strSlashDotDotSlash) - 1 + copy(b[nn:], b[n:]) + b = b[:len(b)-n+nn] + } + + // remove trailing /foo/.. + n := bytes.LastIndex(b, strSlashDotDot) + if n >= 0 && n+len(strSlashDotDot) == len(b) { + nn := bytes.LastIndexByte(b[:n], '/') + if nn < 0 { + return strSlash + } + b = b[:nn+1] + } + + return b +} + +// RequestURI returns RequestURI - i.e. URI without Scheme and Host. +func (u *URI) RequestURI() []byte { + dst := appendQuotedPath(u.requestURI[:0], u.Path()) + if u.queryArgs.Len() > 0 { + dst = append(dst, '?') + dst = u.queryArgs.AppendBytes(dst) + } else if len(u.queryString) > 0 { + dst = append(dst, '?') + dst = append(dst, u.queryString...) + } + if len(u.hash) > 0 { + dst = append(dst, '#') + dst = append(dst, u.hash...) + } + u.requestURI = dst + return u.requestURI +} + +// LastPathSegment returns the last part of uri path after '/'. +// +// Examples: +// +// * For /foo/bar/baz.html path returns baz.html. +// * For /foo/bar/ returns empty byte slice. +// * For /foobar.js returns foobar.js. +func (u *URI) LastPathSegment() []byte { + path := u.Path() + n := bytes.LastIndexByte(path, '/') + if n < 0 { + return path + } + return path[n+1:] +} + +// Update updates uri. +// +// The following newURI types are accepted: +// +// * Absolute, i.e. http://foobar.com/aaa/bb?cc . In this case the original +// uri is replaced by newURI. +// * Absolute without scheme, i.e. //foobar.com/aaa/bb?cc. In this case +// the original scheme is preserved. +// * Missing host, i.e. /aaa/bb?cc . In this case only RequestURI part +// of the original uri is replaced. +// * Relative path, i.e. xx?yy=abc . In this case the original RequestURI +// is updated according to the new relative path. +func (u *URI) Update(newURI string) { + u.UpdateBytes(s2b(newURI)) +} + +// UpdateBytes updates uri. +// +// The following newURI types are accepted: +// +// * Absolute, i.e. http://foobar.com/aaa/bb?cc . In this case the original +// uri is replaced by newURI. +// * Absolute without scheme, i.e. //foobar.com/aaa/bb?cc. In this case +// the original scheme is preserved. +// * Missing host, i.e. /aaa/bb?cc . In this case only RequestURI part +// of the original uri is replaced. +// * Relative path, i.e. xx?yy=abc . In this case the original RequestURI +// is updated according to the new relative path. +func (u *URI) UpdateBytes(newURI []byte) { + u.requestURI = u.updateBytes(newURI, u.requestURI) +} + +func (u *URI) updateBytes(newURI, buf []byte) []byte { + if len(newURI) == 0 { + return buf + } + + n := bytes.Index(newURI, strSlashSlash) + if n >= 0 { + // absolute uri + var b [32]byte + schemeOriginal := b[:0] + if len(u.scheme) > 0 { + schemeOriginal = append([]byte(nil), u.scheme...) + } + u.Parse(nil, newURI) + if len(schemeOriginal) > 0 && len(u.scheme) == 0 { + u.scheme = append(u.scheme[:0], schemeOriginal...) + } + return buf + } + + if newURI[0] == '/' { + // uri without host + buf = u.appendSchemeHost(buf[:0]) + buf = append(buf, newURI...) + u.Parse(nil, buf) + return buf + } + + // relative path + switch newURI[0] { + case '?': + // query string only update + u.SetQueryStringBytes(newURI[1:]) + return append(buf[:0], u.FullURI()...) + case '#': + // update only hash + u.SetHashBytes(newURI[1:]) + return append(buf[:0], u.FullURI()...) + default: + // update the last path part after the slash + path := u.Path() + n = bytes.LastIndexByte(path, '/') + if n < 0 { + panic("BUG: path must contain at least one slash") + } + buf = u.appendSchemeHost(buf[:0]) + buf = appendQuotedPath(buf, path[:n+1]) + buf = append(buf, newURI...) + u.Parse(nil, buf) + return buf + } +} + +// FullURI returns full uri in the form {Scheme}://{Host}{RequestURI}#{Hash}. +func (u *URI) FullURI() []byte { + u.fullURI = u.AppendBytes(u.fullURI[:0]) + return u.fullURI +} + +// AppendBytes appends full uri to dst and returns the extended dst. +func (u *URI) AppendBytes(dst []byte) []byte { + dst = u.appendSchemeHost(dst) + return append(dst, u.RequestURI()...) +} + +func (u *URI) appendSchemeHost(dst []byte) []byte { + dst = append(dst, u.Scheme()...) + dst = append(dst, strColonSlashSlash...) + return append(dst, u.Host()...) +} + +// WriteTo writes full uri to w. +// +// WriteTo implements io.WriterTo interface. +func (u *URI) WriteTo(w io.Writer) (int64, error) { + n, err := w.Write(u.FullURI()) + return int64(n), err +} + +// String returns full uri. +func (u *URI) String() string { + return string(u.FullURI()) +} + +func splitHostURI(host, uri []byte) ([]byte, []byte, []byte) { + n := bytes.Index(uri, strSlashSlash) + if n < 0 { + return strHTTP, host, uri + } + scheme := uri[:n] + if bytes.IndexByte(scheme, '/') >= 0 { + return strHTTP, host, uri + } + if len(scheme) > 0 && scheme[len(scheme)-1] == ':' { + scheme = scheme[:len(scheme)-1] + } + n += len(strSlashSlash) + uri = uri[n:] + n = bytes.IndexByte(uri, '/') + if n < 0 { + return scheme, uri, strSlash + } + return scheme, uri[:n], uri[n:] +} + +// QueryArgs returns query args. +func (u *URI) QueryArgs() *Args { + u.parseQueryArgs() + return &u.queryArgs +} + +func (u *URI) parseQueryArgs() { + if u.parsedQueryArgs { + return + } + u.queryArgs.ParseBytes(u.queryString) + u.parsedQueryArgs = true +} diff --git a/vendor/github.com/valyala/fasthttp/uri_unix.go b/vendor/github.com/valyala/fasthttp/uri_unix.go new file mode 100644 index 0000000..1e30733 --- /dev/null +++ b/vendor/github.com/valyala/fasthttp/uri_unix.go @@ -0,0 +1,12 @@ +// +build !windows + +package fasthttp + +func addLeadingSlash(dst, src []byte) []byte { + // add leading slash for unix paths + if len(src) == 0 || src[0] != '/' { + dst = append(dst, '/') + } + + return dst +} diff --git a/vendor/github.com/valyala/fasthttp/uri_windows.go b/vendor/github.com/valyala/fasthttp/uri_windows.go new file mode 100644 index 0000000..95917a6 --- /dev/null +++ b/vendor/github.com/valyala/fasthttp/uri_windows.go @@ -0,0 +1,12 @@ +// +build windows + +package fasthttp + +func addLeadingSlash(dst, src []byte) []byte { + // zero length and "C:/" case + if len(src) == 0 || (len(src) > 2 && src[1] != ':') { + dst = append(dst, '/') + } + + return dst +} diff --git a/vendor/github.com/valyala/fasthttp/userdata.go b/vendor/github.com/valyala/fasthttp/userdata.go new file mode 100644 index 0000000..bd3e28a --- /dev/null +++ b/vendor/github.com/valyala/fasthttp/userdata.go @@ -0,0 +1,71 @@ +package fasthttp + +import ( + "io" +) + +type userDataKV struct { + key []byte + value interface{} +} + +type userData []userDataKV + +func (d *userData) Set(key string, value interface{}) { + args := *d + n := len(args) + for i := 0; i < n; i++ { + kv := &args[i] + if string(kv.key) == key { + kv.value = value + return + } + } + + c := cap(args) + if c > n { + args = args[:n+1] + kv := &args[n] + kv.key = append(kv.key[:0], key...) + kv.value = value + *d = args + return + } + + kv := userDataKV{} + kv.key = append(kv.key[:0], key...) + kv.value = value + *d = append(args, kv) +} + +func (d *userData) SetBytes(key []byte, value interface{}) { + d.Set(b2s(key), value) +} + +func (d *userData) Get(key string) interface{} { + args := *d + n := len(args) + for i := 0; i < n; i++ { + kv := &args[i] + if string(kv.key) == key { + return kv.value + } + } + return nil +} + +func (d *userData) GetBytes(key []byte) interface{} { + return d.Get(b2s(key)) +} + +func (d *userData) Reset() { + args := *d + n := len(args) + for i := 0; i < n; i++ { + v := args[i].value + if vc, ok := v.(io.Closer); ok { + vc.Close() + } + } + *d = (*d)[:0] +} diff --git a/vendor/github.com/valyala/fasthttp/workerpool.go b/vendor/github.com/valyala/fasthttp/workerpool.go new file mode 100644 index 0000000..cf602e0 --- /dev/null +++ b/vendor/github.com/valyala/fasthttp/workerpool.go @@ -0,0 +1,231 @@ +package fasthttp + +import ( + "net" + "runtime" + "strings" + "sync" + "time" +) + +// workerPool serves incoming connections via a pool of workers +// in FILO order, i.e. the most recently stopped worker will serve the next +// incoming connection. +// +// Such a scheme keeps CPU caches hot (in theory). +type workerPool struct { + // Function for serving server connections. + // It must leave c unclosed. + WorkerFunc func(c net.Conn) error + + MaxWorkersCount int + + LogAllErrors bool + + MaxIdleWorkerDuration time.Duration + + Logger Logger + + lock sync.Mutex + workersCount int + mustStop bool + + ready []*workerChan + + stopCh chan struct{} + + workerChanPool sync.Pool +} + +type workerChan struct { + lastUseTime time.Time + ch chan net.Conn +} + +func (wp *workerPool) Start() { + if wp.stopCh != nil { + panic("BUG: workerPool already started") + } + wp.stopCh = make(chan struct{}) + stopCh := wp.stopCh + go func() { + var scratch []*workerChan + for { + wp.clean(&scratch) + select { + case <-stopCh: + return + default: + time.Sleep(wp.getMaxIdleWorkerDuration()) + } + } + }() +} + +func (wp *workerPool) Stop() { + if wp.stopCh == nil { + panic("BUG: workerPool wasn't started") + } + close(wp.stopCh) + wp.stopCh = nil + + // Stop all the workers waiting for incoming connections. + // Do not wait for busy workers - they will stop after + // serving the connection and noticing wp.mustStop = true. + wp.lock.Lock() + ready := wp.ready + for i, ch := range ready { + ch.ch <- nil + ready[i] = nil + } + wp.ready = ready[:0] + wp.mustStop = true + wp.lock.Unlock() +} + +func (wp *workerPool) getMaxIdleWorkerDuration() time.Duration { + if wp.MaxIdleWorkerDuration <= 0 { + return 10 * time.Second + } + return wp.MaxIdleWorkerDuration +} + +func (wp *workerPool) clean(scratch *[]*workerChan) { + maxIdleWorkerDuration := wp.getMaxIdleWorkerDuration() + + // Clean least recently used workers if they didn't serve connections + // for more than maxIdleWorkerDuration. + currentTime := time.Now() + + wp.lock.Lock() + ready := wp.ready + n := len(ready) + i := 0 + for i < n && currentTime.Sub(ready[i].lastUseTime) > maxIdleWorkerDuration { + i++ + } + *scratch = append((*scratch)[:0], ready[:i]...) + if i > 0 { + m := copy(ready, ready[i:]) + for i = m; i < n; i++ { + ready[i] = nil + } + wp.ready = ready[:m] + } + wp.lock.Unlock() + + // Notify obsolete workers to stop. + // This notification must be outside the wp.lock, since ch.ch + // may be blocking and may consume a lot of time if many workers + // are located on non-local CPUs. + tmp := *scratch + for i, ch := range tmp { + ch.ch <- nil + tmp[i] = nil + } +} + +func (wp *workerPool) Serve(c net.Conn) bool { + ch := wp.getCh() + if ch == nil { + return false + } + ch.ch <- c + return true +} + +var workerChanCap = func() int { + // Use blocking workerChan if GOMAXPROCS=1. + // This immediately switches Serve to WorkerFunc, which results + // in higher performance (under go1.5 at least). + if runtime.GOMAXPROCS(0) == 1 { + return 0 + } + + // Use non-blocking workerChan if GOMAXPROCS>1, + // since otherwise the Serve caller (Acceptor) may lag accepting + // new connections if WorkerFunc is CPU-bound. + return 1 +}() + +func (wp *workerPool) getCh() *workerChan { + var ch *workerChan + createWorker := false + + wp.lock.Lock() + ready := wp.ready + n := len(ready) - 1 + if n < 0 { + if wp.workersCount < wp.MaxWorkersCount { + createWorker = true + wp.workersCount++ + } + } else { + ch = ready[n] + ready[n] = nil + wp.ready = ready[:n] + } + wp.lock.Unlock() + + if ch == nil { + if !createWorker { + return nil + } + vch := wp.workerChanPool.Get() + if vch == nil { + vch = &workerChan{ + ch: make(chan net.Conn, workerChanCap), + } + } + ch = vch.(*workerChan) + go func() { + wp.workerFunc(ch) + wp.workerChanPool.Put(vch) + }() + } + return ch +} + +func (wp *workerPool) release(ch *workerChan) bool { + ch.lastUseTime = CoarseTimeNow() + wp.lock.Lock() + if wp.mustStop { + wp.lock.Unlock() + return false + } + wp.ready = append(wp.ready, ch) + wp.lock.Unlock() + return true +} + +func (wp *workerPool) workerFunc(ch *workerChan) { + var c net.Conn + + var err error + for c = range ch.ch { + if c == nil { + break + } + + if err = wp.WorkerFunc(c); err != nil && err != errHijacked { + errStr := err.Error() + if wp.LogAllErrors || !(strings.Contains(errStr, "broken pipe") || + strings.Contains(errStr, "reset by peer") || + strings.Contains(errStr, "i/o timeout")) { + wp.Logger.Printf("error when serving connection %q<->%q: %s", c.LocalAddr(), c.RemoteAddr(), err) + } + } + if err != errHijacked { + c.Close() + } + c = nil + + if !wp.release(ch) { + break + } + } + + wp.lock.Lock() + wp.workersCount-- + wp.lock.Unlock() +} diff --git a/vendor/gopkg.in/alecthomas/kingpin.v2/COPYING b/vendor/gopkg.in/alecthomas/kingpin.v2/COPYING new file mode 100644 index 0000000..2993ec0 --- /dev/null +++ b/vendor/gopkg.in/alecthomas/kingpin.v2/COPYING @@ -0,0 +1,19 @@ +Copyright (C) 2014 Alec Thomas + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/gopkg.in/alecthomas/kingpin.v2/README.md b/vendor/gopkg.in/alecthomas/kingpin.v2/README.md new file mode 100644 index 0000000..c0100b3 --- /dev/null +++ b/vendor/gopkg.in/alecthomas/kingpin.v2/README.md @@ -0,0 +1,671 @@ +# Kingpin - A Go (golang) command line and flag parser [![](https://godoc.org/github.com/alecthomas/kingpin?status.svg)](http://godoc.org/github.com/alecthomas/kingpin) [![Build Status](https://travis-ci.org/alecthomas/kingpin.svg?branch=master)](https://travis-ci.org/alecthomas/kingpin) + + + +- [Overview](#overview) +- [Features](#features) +- [User-visible changes between v1 and v2](#user-visible-changes-between-v1-and-v2) + - [Flags can be used at any point after their definition.](#flags-can-be-used-at-any-point-after-their-definition) + - [Short flags can be combined with their parameters](#short-flags-can-be-combined-with-their-parameters) +- [API changes between v1 and v2](#api-changes-between-v1-and-v2) +- [Versions](#versions) + - [V2 is the current stable version](#v2-is-the-current-stable-version) + - [V1 is the OLD stable version](#v1-is-the-old-stable-version) +- [Change History](#change-history) +- [Examples](#examples) + - [Simple Example](#simple-example) + - [Complex Example](#complex-example) +- [Reference Documentation](#reference-documentation) + - [Displaying errors and usage information](#displaying-errors-and-usage-information) + - [Sub-commands](#sub-commands) + - [Custom Parsers](#custom-parsers) + - [Repeatable flags](#repeatable-flags) + - [Boolean Values](#boolean-values) + - [Default Values](#default-values) + - [Place-holders in Help](#place-holders-in-help) + - [Consuming all remaining arguments](#consuming-all-remaining-arguments) + - [Bash/ZSH Shell Completion](#bashzsh-shell-completion) + - [Supporting -h for help](#supporting--h-for-help) + - [Custom help](#custom-help) + + + +## Overview + +Kingpin is a [fluent-style](http://en.wikipedia.org/wiki/Fluent_interface), +type-safe command-line parser. It supports flags, nested commands, and +positional arguments. + +Install it with: + + $ go get gopkg.in/alecthomas/kingpin.v2 + +It looks like this: + +```go +var ( + verbose = kingpin.Flag("verbose", "Verbose mode.").Short('v').Bool() + name = kingpin.Arg("name", "Name of user.").Required().String() +) + +func main() { + kingpin.Parse() + fmt.Printf("%v, %s\n", *verbose, *name) +} +``` + +More [examples](https://github.com/alecthomas/kingpin/tree/master/_examples) are available. + +Second to parsing, providing the user with useful help is probably the most +important thing a command-line parser does. Kingpin tries to provide detailed +contextual help if `--help` is encountered at any point in the command line +(excluding after `--`). + +## Features + +- Help output that isn't as ugly as sin. +- Fully [customisable help](#custom-help), via Go templates. +- Parsed, type-safe flags (`kingpin.Flag("f", "help").Int()`) +- Parsed, type-safe positional arguments (`kingpin.Arg("a", "help").Int()`). +- Parsed, type-safe, arbitrarily deep commands (`kingpin.Command("c", "help")`). +- Support for required flags and required positional arguments (`kingpin.Flag("f", "").Required().Int()`). +- Support for arbitrarily nested default commands (`command.Default()`). +- Callbacks per command, flag and argument (`kingpin.Command("c", "").Action(myAction)`). +- POSIX-style short flag combining (`-a -b` -> `-ab`). +- Short-flag+parameter combining (`-a parm` -> `-aparm`). +- Read command-line from files (`@`). +- Automatically generate man pages (`--help-man`). + +## User-visible changes between v1 and v2 + +### Flags can be used at any point after their definition. + +Flags can be specified at any point after their definition, not just +*immediately after their associated command*. From the chat example below, the +following used to be required: + +``` +$ chat --server=chat.server.com:8080 post --image=~/Downloads/owls.jpg pics +``` + +But the following will now work: + +``` +$ chat post --server=chat.server.com:8080 --image=~/Downloads/owls.jpg pics +``` + +### Short flags can be combined with their parameters + +Previously, if a short flag was used, any argument to that flag would have to +be separated by a space. That is no longer the case. + +## API changes between v1 and v2 + +- `ParseWithFileExpansion()` is gone. The new parser directly supports expanding `@`. +- Added `FatalUsage()` and `FatalUsageContext()` for displaying an error + usage and terminating. +- `Dispatch()` renamed to `Action()`. +- Added `ParseContext()` for parsing a command line into its intermediate context form without executing. +- Added `Terminate()` function to override the termination function. +- Added `UsageForContextWithTemplate()` for printing usage via a custom template. +- Added `UsageTemplate()` for overriding the default template to use. Two templates are included: + 1. `DefaultUsageTemplate` - default template. + 2. `CompactUsageTemplate` - compact command template for larger applications. + +## Versions + +Kingpin uses [gopkg.in](https://gopkg.in/alecthomas/kingpin) for versioning. + +The current stable version is [gopkg.in/alecthomas/kingpin.v2](https://gopkg.in/alecthomas/kingpin.v2). The previous version, [gopkg.in/alecthomas/kingpin.v1](https://gopkg.in/alecthomas/kingpin.v1), is deprecated and in maintenance mode. + +### [V2](https://gopkg.in/alecthomas/kingpin.v2) is the current stable version + +Installation: + +```sh +$ go get gopkg.in/alecthomas/kingpin.v2 +``` + +### [V1](https://gopkg.in/alecthomas/kingpin.v1) is the OLD stable version + +Installation: + +```sh +$ go get gopkg.in/alecthomas/kingpin.v1 +``` + +## Change History + +- *2015-09-19* -- Stable v2.1.0 release. + - Added `command.Default()` to specify a default command to use if no other + command matches. This allows for convenient user shortcuts. + - Exposed `HelpFlag` and `VersionFlag` for further customisation. + - `Action()` and `PreAction()` added and both now support an arbitrary + number of callbacks. + - `kingpin.SeparateOptionalFlagsUsageTemplate`. + - `--help-long` and `--help-man` (hidden by default) flags. + - Flags are "interspersed" by default, but can be disabled with `app.Interspersed(false)`. + - Added flags for all simple builtin types (int8, uint16, etc.) and slice variants. + - Use `app.Writer(os.Writer)` to specify the default writer for all output functions. + - Dropped `os.Writer` prefix from all printf-like functions. + +- *2015-05-22* -- Stable v2.0.0 release. + - Initial stable release of v2.0.0. + - Fully supports interspersed flags, commands and arguments. + - Flags can be present at any point after their logical definition. + - Application.Parse() terminates if commands are present and a command is not parsed. + - Dispatch() -> Action(). + - Actions are dispatched after all values are populated. + - Override termination function (defaults to os.Exit). + - Override output stream (defaults to os.Stderr). + - Templatised usage help, with default and compact templates. + - Make error/usage functions more consistent. + - Support argument expansion from files by default (with @). + - Fully public data model is available via .Model(). + - Parser has been completely refactored. + - Parsing and execution has been split into distinct stages. + - Use `go generate` to generate repeated flags. + - Support combined short-flag+argument: -fARG. + +- *2015-01-23* -- Stable v1.3.4 release. + - Support "--" for separating flags from positional arguments. + - Support loading flags from files (ParseWithFileExpansion()). Use @FILE as an argument. + - Add post-app and post-cmd validation hooks. This allows arbitrary validation to be added. + - A bunch of improvements to help usage and formatting. + - Support arbitrarily nested sub-commands. + +- *2014-07-08* -- Stable v1.2.0 release. + - Pass any value through to `Strings()` when final argument. + Allows for values that look like flags to be processed. + - Allow `--help` to be used with commands. + - Support `Hidden()` flags. + - Parser for [units.Base2Bytes](https://github.com/alecthomas/units) + type. Allows for flags like `--ram=512MB` or `--ram=1GB`. + - Add an `Enum()` value, allowing only one of a set of values + to be selected. eg. `Flag(...).Enum("debug", "info", "warning")`. + +- *2014-06-27* -- Stable v1.1.0 release. + - Bug fixes. + - Always return an error (rather than panicing) when misconfigured. + - `OpenFile(flag, perm)` value type added, for finer control over opening files. + - Significantly improved usage formatting. + +- *2014-06-19* -- Stable v1.0.0 release. + - Support [cumulative positional](#consuming-all-remaining-arguments) arguments. + - Return error rather than panic when there are fatal errors not caught by + the type system. eg. when a default value is invalid. + - Use gokpg.in. + +- *2014-06-10* -- Place-holder streamlining. + - Renamed `MetaVar` to `PlaceHolder`. + - Removed `MetaVarFromDefault`. Kingpin now uses [heuristics](#place-holders-in-help) + to determine what to display. + +## Examples + +### Simple Example + +Kingpin can be used for simple flag+arg applications like so: + +``` +$ ping --help +usage: ping [] [] + +Flags: + --debug Enable debug mode. + --help Show help. + -t, --timeout=5s Timeout waiting for ping. + +Args: + IP address to ping. + [] Number of packets to send +$ ping 1.2.3.4 5 +Would ping: 1.2.3.4 with timeout 5s and count 0 +``` + +From the following source: + +```go +package main + +import ( + "fmt" + + "gopkg.in/alecthomas/kingpin.v2" +) + +var ( + debug = kingpin.Flag("debug", "Enable debug mode.").Bool() + timeout = kingpin.Flag("timeout", "Timeout waiting for ping.").Default("5s").OverrideDefaultFromEnvar("PING_TIMEOUT").Short('t').Duration() + ip = kingpin.Arg("ip", "IP address to ping.").Required().IP() + count = kingpin.Arg("count", "Number of packets to send").Int() +) + +func main() { + kingpin.Version("0.0.1") + kingpin.Parse() + fmt.Printf("Would ping: %s with timeout %s and count %d", *ip, *timeout, *count) +} +``` + +### Complex Example + +Kingpin can also produce complex command-line applications with global flags, +subcommands, and per-subcommand flags, like this: + +``` +$ chat --help +usage: chat [] [] [ ...] + +A command-line chat application. + +Flags: + --help Show help. + --debug Enable debug mode. + --server=127.0.0.1 Server address. + +Commands: + help [] + Show help for a command. + + register + Register a new user. + + post [] [] + Post a message to a channel. + +$ chat help post +usage: chat [] post [] [] + +Post a message to a channel. + +Flags: + --image=IMAGE Image to post. + +Args: + Channel to post to. + [] Text to post. + +$ chat post --image=~/Downloads/owls.jpg pics +... +``` + +From this code: + +```go +package main + +import ( + "os" + "strings" + "gopkg.in/alecthomas/kingpin.v2" +) + +var ( + app = kingpin.New("chat", "A command-line chat application.") + debug = app.Flag("debug", "Enable debug mode.").Bool() + serverIP = app.Flag("server", "Server address.").Default("127.0.0.1").IP() + + register = app.Command("register", "Register a new user.") + registerNick = register.Arg("nick", "Nickname for user.").Required().String() + registerName = register.Arg("name", "Name of user.").Required().String() + + post = app.Command("post", "Post a message to a channel.") + postImage = post.Flag("image", "Image to post.").File() + postChannel = post.Arg("channel", "Channel to post to.").Required().String() + postText = post.Arg("text", "Text to post.").Strings() +) + +func main() { + switch kingpin.MustParse(app.Parse(os.Args[1:])) { + // Register user + case register.FullCommand(): + println(*registerNick) + + // Post message + case post.FullCommand(): + if *postImage != nil { + } + text := strings.Join(*postText, " ") + println("Post:", text) + } +} +``` + +## Reference Documentation + +### Displaying errors and usage information + +Kingpin exports a set of functions to provide consistent errors and usage +information to the user. + +Error messages look something like this: + + : error: + +The functions on `Application` are: + +Function | Purpose +---------|-------------- +`Errorf(format, args)` | Display a printf formatted error to the user. +`Fatalf(format, args)` | As with Errorf, but also call the termination handler. +`FatalUsage(format, args)` | As with Fatalf, but also print contextual usage information. +`FatalUsageContext(context, format, args)` | As with Fatalf, but also print contextual usage information from a `ParseContext`. +`FatalIfError(err, format, args)` | Conditionally print an error prefixed with format+args, then call the termination handler + +There are equivalent global functions in the kingpin namespace for the default +`kingpin.CommandLine` instance. + +### Sub-commands + +Kingpin supports nested sub-commands, with separate flag and positional +arguments per sub-command. Note that positional arguments may only occur after +sub-commands. + +For example: + +```go +var ( + deleteCommand = kingpin.Command("delete", "Delete an object.") + deleteUserCommand = deleteCommand.Command("user", "Delete a user.") + deleteUserUIDFlag = deleteUserCommand.Flag("uid", "Delete user by UID rather than username.") + deleteUserUsername = deleteUserCommand.Arg("username", "Username to delete.") + deletePostCommand = deleteCommand.Command("post", "Delete a post.") +) + +func main() { + switch kingpin.Parse() { + case "delete user": + case "delete post": + } +} +``` + +### Custom Parsers + +Kingpin supports both flag and positional argument parsers for converting to +Go types. For example, some included parsers are `Int()`, `Float()`, +`Duration()` and `ExistingFile()`. + +Parsers conform to Go's [`flag.Value`](http://godoc.org/flag#Value) +interface, so any existing implementations will work. + +For example, a parser for accumulating HTTP header values might look like this: + +```go +type HTTPHeaderValue http.Header + +func (h *HTTPHeaderValue) Set(value string) error { + parts := strings.SplitN(value, ":", 2) + if len(parts) != 2 { + return fmt.Errorf("expected HEADER:VALUE got '%s'", value) + } + (*http.Header)(h).Add(parts[0], parts[1]) + return nil +} + +func (h *HTTPHeaderValue) String() string { + return "" +} +``` + +As a convenience, I would recommend something like this: + +```go +func HTTPHeader(s Settings) (target *http.Header) { + target = new(http.Header) + s.SetValue((*HTTPHeaderValue)(target)) + return +} +``` + +You would use it like so: + +```go +headers = HTTPHeader(kingpin.Flag("header", "Add a HTTP header to the request.").Short('H')) +``` + +### Repeatable flags + +Depending on the `Value` they hold, some flags may be repeated. The +`IsCumulative() bool` function on `Value` tells if it's safe to call `Set()` +multiple times or if an error should be raised if several values are passed. + +The built-in `Value`s returning slices and maps, as well as `Counter` are +examples of `Value`s that make a flag repeatable. + +### Boolean values + +Boolean values are uniquely managed by Kingpin. Each boolean flag will have a negative complement: +`--` and `--no-`. + +### Default Values + +The default value is the zero value for a type. This can be overridden with +the `Default(value...)` function on flags and arguments. This function accepts +one or several strings, which are parsed by the value itself, so they *must* +be compliant with the format expected. + +### Place-holders in Help + +The place-holder value for a flag is the value used in the help to describe +the value of a non-boolean flag. + +The value provided to PlaceHolder() is used if provided, then the value +provided by Default() if provided, then finally the capitalised flag name is +used. + +Here are some examples of flags with various permutations: + + --name=NAME // Flag(...).String() + --name="Harry" // Flag(...).Default("Harry").String() + --name=FULL-NAME // flag(...).PlaceHolder("FULL-NAME").Default("Harry").String() + +### Consuming all remaining arguments + +A common command-line idiom is to use all remaining arguments for some +purpose. eg. The following command accepts an arbitrary number of +IP addresses as positional arguments: + + ./cmd ping 10.1.1.1 192.168.1.1 + +Such arguments are similar to [repeatable flags](#repeatable-flags), but for +arguments. Therefore they use the same `IsCumulative() bool` function on the +underlying `Value`, so the built-in `Value`s for which the `Set()` function +can be called several times will consume multiple arguments. + +To implement the above example with a custom `Value`, we might do something +like this: + +```go +type ipList []net.IP + +func (i *ipList) Set(value string) error { + if ip := net.ParseIP(value); ip == nil { + return fmt.Errorf("'%s' is not an IP address", value) + } else { + *i = append(*i, ip) + return nil + } +} + +func (i *ipList) String() string { + return "" +} + +func (i *ipList) IsCumulative() bool { + return true +} + +func IPList(s Settings) (target *[]net.IP) { + target = new([]net.IP) + s.SetValue((*ipList)(target)) + return +} +``` + +And use it like so: + +```go +ips := IPList(kingpin.Arg("ips", "IP addresses to ping.")) +``` + +### Bash/ZSH Shell Completion + +By default, all flags and commands/subcommands generate completions +internally. + +Out of the box, CLI tools using kingpin should be able to take advantage +of completion hinting for flags and commands. By specifying +`--completion-bash` as the first argument, your CLI tool will show +possible subcommands. By ending your argv with `--`, hints for flags +will be shown. + +To allow your end users to take advantage you must package a +`/etc/bash_completion.d` script with your distribution (or the equivalent +for your target platform/shell). An alternative is to instruct your end +user to source a script from their `bash_profile` (or equivalent). + +Fortunately Kingpin makes it easy to generate or source a script for use +with end users shells. `./yourtool --completion-script-bash` and +`./yourtool --completion-script-zsh` will generate these scripts for you. + +**Installation by Package** + +For the best user experience, you should bundle your pre-created +completion script with your CLI tool and install it inside +`/etc/bash_completion.d` (or equivalent). A good suggestion is to add +this as an automated step to your build pipeline, in the implementation +is improved for bug fixed. + +**Installation by `bash_profile`** + +Alternatively, instruct your users to add an additional statement to +their `bash_profile` (or equivalent): + +``` +eval "$(your-cli-tool --completion-script-bash)" +``` + +Or for ZSH + +``` +eval "$(your-cli-tool --completion-script-zsh)" +``` + +#### Additional API +To provide more flexibility, a completion option API has been +exposed for flags to allow user defined completion options, to extend +completions further than just EnumVar/Enum. + + +**Provide Static Options** + +When using an `Enum` or `EnumVar`, users are limited to only the options +given. Maybe we wish to hint possible options to the user, but also +allow them to provide their own custom option. `HintOptions` gives +this functionality to flags. + +``` +app := kingpin.New("completion", "My application with bash completion.") +app.Flag("port", "Provide a port to connect to"). + Required(). + HintOptions("80", "443", "8080"). + IntVar(&c.port) +``` + +**Provide Dynamic Options** +Consider the case that you needed to read a local database or a file to +provide suggestions. You can dynamically generate the options + +``` +func listHosts(args []string) []string { + // Provide a dynamic list of hosts from a hosts file or otherwise + // for bash completion. In this example we simply return static slice. + + // You could use this functionality to reach into a hosts file to provide + // completion for a list of known hosts. + return []string{"sshhost.example", "webhost.example", "ftphost.example"} +} + +app := kingpin.New("completion", "My application with bash completion.") +app.Flag("flag-1", "").HintAction(listHosts).String() +``` + +**EnumVar/Enum** +When using `Enum` or `EnumVar`, any provided options will be automatically +used for bash autocompletion. However, if you wish to provide a subset or +different options, you can use `HintOptions` or `HintAction` which will override +the default completion options for `Enum`/`EnumVar`. + + +**Examples** +You can see an in depth example of the completion API within +`examples/completion/main.go` + + +### Supporting -h for help + +`kingpin.CommandLine.HelpFlag.Short('h')` + +### Custom help + +Kingpin v2 supports templatised help using the text/template library (actually, [a fork](https://github.com/alecthomas/template)). + +You can specify the template to use with the [Application.UsageTemplate()](http://godoc.org/gopkg.in/alecthomas/kingpin.v2#Application.UsageTemplate) function. + +There are four included templates: `kingpin.DefaultUsageTemplate` is the default, +`kingpin.CompactUsageTemplate` provides a more compact representation for more complex command-line structures, +`kingpin.SeparateOptionalFlagsUsageTemplate` looks like the default template, but splits required +and optional command flags into separate lists, and `kingpin.ManPageTemplate` is used to generate man pages. + +See the above templates for examples of usage, and the the function [UsageForContextWithTemplate()](https://github.com/alecthomas/kingpin/blob/master/usage.go#L198) method for details on the context. + +#### Default help template + +``` +$ go run ./examples/curl/curl.go --help +usage: curl [] [ ...] + +An example implementation of curl. + +Flags: + --help Show help. + -t, --timeout=5s Set connection timeout. + -H, --headers=HEADER=VALUE + Add HTTP headers to the request. + +Commands: + help [...] + Show help. + + get url + Retrieve a URL. + + get file + Retrieve a file. + + post [] + POST a resource. +``` + +#### Compact help template + +``` +$ go run ./examples/curl/curl.go --help +usage: curl [] [ ...] + +An example implementation of curl. + +Flags: + --help Show help. + -t, --timeout=5s Set connection timeout. + -H, --headers=HEADER=VALUE + Add HTTP headers to the request. + +Commands: + help [...] + get [] + url + file + post [] +``` diff --git a/vendor/gopkg.in/alecthomas/kingpin.v2/actions.go b/vendor/gopkg.in/alecthomas/kingpin.v2/actions.go new file mode 100644 index 0000000..72d6cbd --- /dev/null +++ b/vendor/gopkg.in/alecthomas/kingpin.v2/actions.go @@ -0,0 +1,42 @@ +package kingpin + +// Action callback executed at various stages after all values are populated. +// The application, commands, arguments and flags all have corresponding +// actions. +type Action func(*ParseContext) error + +type actionMixin struct { + actions []Action + preActions []Action +} + +type actionApplier interface { + applyActions(*ParseContext) error + applyPreActions(*ParseContext) error +} + +func (a *actionMixin) addAction(action Action) { + a.actions = append(a.actions, action) +} + +func (a *actionMixin) addPreAction(action Action) { + a.preActions = append(a.preActions, action) +} + +func (a *actionMixin) applyActions(context *ParseContext) error { + for _, action := range a.actions { + if err := action(context); err != nil { + return err + } + } + return nil +} + +func (a *actionMixin) applyPreActions(context *ParseContext) error { + for _, preAction := range a.preActions { + if err := preAction(context); err != nil { + return err + } + } + return nil +} diff --git a/vendor/gopkg.in/alecthomas/kingpin.v2/app.go b/vendor/gopkg.in/alecthomas/kingpin.v2/app.go new file mode 100644 index 0000000..91083aa --- /dev/null +++ b/vendor/gopkg.in/alecthomas/kingpin.v2/app.go @@ -0,0 +1,685 @@ +package kingpin + +import ( + "fmt" + "io" + "os" + "regexp" + "strings" +) + +var ( + ErrCommandNotSpecified = fmt.Errorf("command not specified") +) + +var ( + envarTransformRegexp = regexp.MustCompile(`[^a-zA-Z0-9_]+`) +) + +type ApplicationValidator func(*Application) error + +// An Application contains the definitions of flags, arguments and commands +// for an application. +type Application struct { + cmdMixin + initialized bool + + Name string + Help string + + author string + version string + errorWriter io.Writer // Destination for errors. + usageWriter io.Writer // Destination for usage + usageTemplate string + validator ApplicationValidator + terminate func(status int) // See Terminate() + noInterspersed bool // can flags be interspersed with args (or must they come first) + defaultEnvars bool + completion bool + + // Help flag. Exposed for user customisation. + HelpFlag *FlagClause + // Help command. Exposed for user customisation. May be nil. + HelpCommand *CmdClause + // Version flag. Exposed for user customisation. May be nil. + VersionFlag *FlagClause +} + +// New creates a new Kingpin application instance. +func New(name, help string) *Application { + a := &Application{ + Name: name, + Help: help, + errorWriter: os.Stderr, // Left for backwards compatibility purposes. + usageWriter: os.Stderr, + usageTemplate: DefaultUsageTemplate, + terminate: os.Exit, + } + a.flagGroup = newFlagGroup() + a.argGroup = newArgGroup() + a.cmdGroup = newCmdGroup(a) + a.HelpFlag = a.Flag("help", "Show context-sensitive help (also try --help-long and --help-man).") + a.HelpFlag.Bool() + a.Flag("help-long", "Generate long help.").Hidden().PreAction(a.generateLongHelp).Bool() + a.Flag("help-man", "Generate a man page.").Hidden().PreAction(a.generateManPage).Bool() + a.Flag("completion-bash", "Output possible completions for the given args.").Hidden().BoolVar(&a.completion) + a.Flag("completion-script-bash", "Generate completion script for bash.").Hidden().PreAction(a.generateBashCompletionScript).Bool() + a.Flag("completion-script-zsh", "Generate completion script for ZSH.").Hidden().PreAction(a.generateZSHCompletionScript).Bool() + + return a +} + +func (a *Application) generateLongHelp(c *ParseContext) error { + a.Writer(os.Stdout) + if err := a.UsageForContextWithTemplate(c, 2, LongHelpTemplate); err != nil { + return err + } + a.terminate(0) + return nil +} + +func (a *Application) generateManPage(c *ParseContext) error { + a.Writer(os.Stdout) + if err := a.UsageForContextWithTemplate(c, 2, ManPageTemplate); err != nil { + return err + } + a.terminate(0) + return nil +} + +func (a *Application) generateBashCompletionScript(c *ParseContext) error { + a.Writer(os.Stdout) + if err := a.UsageForContextWithTemplate(c, 2, BashCompletionTemplate); err != nil { + return err + } + a.terminate(0) + return nil +} + +func (a *Application) generateZSHCompletionScript(c *ParseContext) error { + a.Writer(os.Stdout) + if err := a.UsageForContextWithTemplate(c, 2, ZshCompletionTemplate); err != nil { + return err + } + a.terminate(0) + return nil +} + +// DefaultEnvars configures all flags (that do not already have an associated +// envar) to use a default environment variable in the form "_". +// +// For example, if the application is named "foo" and a flag is named "bar- +// waz" the environment variable: "FOO_BAR_WAZ". +func (a *Application) DefaultEnvars() *Application { + a.defaultEnvars = true + return a +} + +// Terminate specifies the termination handler. Defaults to os.Exit(status). +// If nil is passed, a no-op function will be used. +func (a *Application) Terminate(terminate func(int)) *Application { + if terminate == nil { + terminate = func(int) {} + } + a.terminate = terminate + return a +} + +// Writer specifies the writer to use for usage and errors. Defaults to os.Stderr. +// DEPRECATED: See ErrorWriter and UsageWriter. +func (a *Application) Writer(w io.Writer) *Application { + a.errorWriter = w + a.usageWriter = w + return a +} + +// ErrorWriter sets the io.Writer to use for errors. +func (a *Application) ErrorWriter(w io.Writer) *Application { + a.usageWriter = w + return a +} + +// UsageWriter sets the io.Writer to use for errors. +func (a *Application) UsageWriter(w io.Writer) *Application { + a.usageWriter = w + return a +} + +// UsageTemplate specifies the text template to use when displaying usage +// information. The default is UsageTemplate. +func (a *Application) UsageTemplate(template string) *Application { + a.usageTemplate = template + return a +} + +// Validate sets a validation function to run when parsing. +func (a *Application) Validate(validator ApplicationValidator) *Application { + a.validator = validator + return a +} + +// ParseContext parses the given command line and returns the fully populated +// ParseContext. +func (a *Application) ParseContext(args []string) (*ParseContext, error) { + return a.parseContext(false, args) +} + +func (a *Application) parseContext(ignoreDefault bool, args []string) (*ParseContext, error) { + if err := a.init(); err != nil { + return nil, err + } + context := tokenize(args, ignoreDefault) + err := parse(context, a) + return context, err +} + +// Parse parses command-line arguments. It returns the selected command and an +// error. The selected command will be a space separated subcommand, if +// subcommands have been configured. +// +// This will populate all flag and argument values, call all callbacks, and so +// on. +func (a *Application) Parse(args []string) (command string, err error) { + + context, parseErr := a.ParseContext(args) + selected := []string{} + var setValuesErr error + + if context == nil { + // Since we do not throw error immediately, there could be a case + // where a context returns nil. Protect against that. + return "", parseErr + } + + if err = a.setDefaults(context); err != nil { + return "", err + } + + selected, setValuesErr = a.setValues(context) + + if err = a.applyPreActions(context, !a.completion); err != nil { + return "", err + } + + if a.completion { + a.generateBashCompletion(context) + a.terminate(0) + } else { + if parseErr != nil { + return "", parseErr + } + + a.maybeHelp(context) + if !context.EOL() { + return "", fmt.Errorf("unexpected argument '%s'", context.Peek()) + } + + if setValuesErr != nil { + return "", setValuesErr + } + + command, err = a.execute(context, selected) + if err == ErrCommandNotSpecified { + a.writeUsage(context, nil) + } + } + return command, err +} + +func (a *Application) writeUsage(context *ParseContext, err error) { + if err != nil { + a.Errorf("%s", err) + } + if err := a.UsageForContext(context); err != nil { + panic(err) + } + if err != nil { + a.terminate(1) + } else { + a.terminate(0) + } +} + +func (a *Application) maybeHelp(context *ParseContext) { + for _, element := range context.Elements { + if flag, ok := element.Clause.(*FlagClause); ok && flag == a.HelpFlag { + // Re-parse the command-line ignoring defaults, so that help works correctly. + context, _ = a.parseContext(true, context.rawArgs) + a.writeUsage(context, nil) + } + } +} + +// Version adds a --version flag for displaying the application version. +func (a *Application) Version(version string) *Application { + a.version = version + a.VersionFlag = a.Flag("version", "Show application version.").PreAction(func(*ParseContext) error { + fmt.Fprintln(a.usageWriter, version) + a.terminate(0) + return nil + }) + a.VersionFlag.Bool() + return a +} + +// Author sets the author output by some help templates. +func (a *Application) Author(author string) *Application { + a.author = author + return a +} + +// Action callback to call when all values are populated and parsing is +// complete, but before any command, flag or argument actions. +// +// All Action() callbacks are called in the order they are encountered on the +// command line. +func (a *Application) Action(action Action) *Application { + a.addAction(action) + return a +} + +// Action called after parsing completes but before validation and execution. +func (a *Application) PreAction(action Action) *Application { + a.addPreAction(action) + return a +} + +// Command adds a new top-level command. +func (a *Application) Command(name, help string) *CmdClause { + return a.addCommand(name, help) +} + +// Interspersed control if flags can be interspersed with positional arguments +// +// true (the default) means that they can, false means that all the flags must appear before the first positional arguments. +func (a *Application) Interspersed(interspersed bool) *Application { + a.noInterspersed = !interspersed + return a +} + +func (a *Application) defaultEnvarPrefix() string { + if a.defaultEnvars { + return a.Name + } + return "" +} + +func (a *Application) init() error { + if a.initialized { + return nil + } + if a.cmdGroup.have() && a.argGroup.have() { + return fmt.Errorf("can't mix top-level Arg()s with Command()s") + } + + // If we have subcommands, add a help command at the top-level. + if a.cmdGroup.have() { + var command []string + a.HelpCommand = a.Command("help", "Show help.").PreAction(func(context *ParseContext) error { + a.Usage(command) + a.terminate(0) + return nil + }) + a.HelpCommand.Arg("command", "Show help on command.").StringsVar(&command) + // Make help first command. + l := len(a.commandOrder) + a.commandOrder = append(a.commandOrder[l-1:l], a.commandOrder[:l-1]...) + } + + if err := a.flagGroup.init(a.defaultEnvarPrefix()); err != nil { + return err + } + if err := a.cmdGroup.init(); err != nil { + return err + } + if err := a.argGroup.init(); err != nil { + return err + } + for _, cmd := range a.commands { + if err := cmd.init(); err != nil { + return err + } + } + flagGroups := []*flagGroup{a.flagGroup} + for _, cmd := range a.commandOrder { + if err := checkDuplicateFlags(cmd, flagGroups); err != nil { + return err + } + } + a.initialized = true + return nil +} + +// Recursively check commands for duplicate flags. +func checkDuplicateFlags(current *CmdClause, flagGroups []*flagGroup) error { + // Check for duplicates. + for _, flags := range flagGroups { + for _, flag := range current.flagOrder { + if flag.shorthand != 0 { + if _, ok := flags.short[string(flag.shorthand)]; ok { + return fmt.Errorf("duplicate short flag -%c", flag.shorthand) + } + } + if _, ok := flags.long[flag.name]; ok { + return fmt.Errorf("duplicate long flag --%s", flag.name) + } + } + } + flagGroups = append(flagGroups, current.flagGroup) + // Check subcommands. + for _, subcmd := range current.commandOrder { + if err := checkDuplicateFlags(subcmd, flagGroups); err != nil { + return err + } + } + return nil +} + +func (a *Application) execute(context *ParseContext, selected []string) (string, error) { + var err error + + if err = a.validateRequired(context); err != nil { + return "", err + } + + if err = a.applyValidators(context); err != nil { + return "", err + } + + if err = a.applyActions(context); err != nil { + return "", err + } + + command := strings.Join(selected, " ") + if command == "" && a.cmdGroup.have() { + return "", ErrCommandNotSpecified + } + return command, err +} + +func (a *Application) setDefaults(context *ParseContext) error { + flagElements := map[string]*ParseElement{} + for _, element := range context.Elements { + if flag, ok := element.Clause.(*FlagClause); ok { + flagElements[flag.name] = element + } + } + + argElements := map[string]*ParseElement{} + for _, element := range context.Elements { + if arg, ok := element.Clause.(*ArgClause); ok { + argElements[arg.name] = element + } + } + + // Check required flags and set defaults. + for _, flag := range context.flags.long { + if flagElements[flag.name] == nil { + if err := flag.setDefault(); err != nil { + return err + } + } + } + + for _, arg := range context.arguments.args { + if argElements[arg.name] == nil { + if err := arg.setDefault(); err != nil { + return err + } + } + } + + return nil +} + +func (a *Application) validateRequired(context *ParseContext) error { + flagElements := map[string]*ParseElement{} + for _, element := range context.Elements { + if flag, ok := element.Clause.(*FlagClause); ok { + flagElements[flag.name] = element + } + } + + argElements := map[string]*ParseElement{} + for _, element := range context.Elements { + if arg, ok := element.Clause.(*ArgClause); ok { + argElements[arg.name] = element + } + } + + // Check required flags and set defaults. + for _, flag := range context.flags.long { + if flagElements[flag.name] == nil { + // Check required flags were provided. + if flag.needsValue() { + return fmt.Errorf("required flag --%s not provided", flag.name) + } + } + } + + for _, arg := range context.arguments.args { + if argElements[arg.name] == nil { + if arg.needsValue() { + return fmt.Errorf("required argument '%s' not provided", arg.name) + } + } + } + return nil +} + +func (a *Application) setValues(context *ParseContext) (selected []string, err error) { + // Set all arg and flag values. + var ( + lastCmd *CmdClause + flagSet = map[string]struct{}{} + ) + for _, element := range context.Elements { + switch clause := element.Clause.(type) { + case *FlagClause: + if _, ok := flagSet[clause.name]; ok { + if v, ok := clause.value.(repeatableFlag); !ok || !v.IsCumulative() { + return nil, fmt.Errorf("flag '%s' cannot be repeated", clause.name) + } + } + if err = clause.value.Set(*element.Value); err != nil { + return + } + flagSet[clause.name] = struct{}{} + + case *ArgClause: + if err = clause.value.Set(*element.Value); err != nil { + return + } + + case *CmdClause: + if clause.validator != nil { + if err = clause.validator(clause); err != nil { + return + } + } + selected = append(selected, clause.name) + lastCmd = clause + } + } + + if lastCmd != nil && len(lastCmd.commands) > 0 { + return nil, fmt.Errorf("must select a subcommand of '%s'", lastCmd.FullCommand()) + } + + return +} + +func (a *Application) applyValidators(context *ParseContext) (err error) { + // Call command validation functions. + for _, element := range context.Elements { + if cmd, ok := element.Clause.(*CmdClause); ok && cmd.validator != nil { + if err = cmd.validator(cmd); err != nil { + return err + } + } + } + + if a.validator != nil { + err = a.validator(a) + } + return err +} + +func (a *Application) applyPreActions(context *ParseContext, dispatch bool) error { + if err := a.actionMixin.applyPreActions(context); err != nil { + return err + } + // Dispatch to actions. + if dispatch { + for _, element := range context.Elements { + if applier, ok := element.Clause.(actionApplier); ok { + if err := applier.applyPreActions(context); err != nil { + return err + } + } + } + } + + return nil +} + +func (a *Application) applyActions(context *ParseContext) error { + if err := a.actionMixin.applyActions(context); err != nil { + return err + } + // Dispatch to actions. + for _, element := range context.Elements { + if applier, ok := element.Clause.(actionApplier); ok { + if err := applier.applyActions(context); err != nil { + return err + } + } + } + return nil +} + +// Errorf prints an error message to w in the format ": error: ". +func (a *Application) Errorf(format string, args ...interface{}) { + fmt.Fprintf(a.errorWriter, a.Name+": error: "+format+"\n", args...) +} + +// Fatalf writes a formatted error to w then terminates with exit status 1. +func (a *Application) Fatalf(format string, args ...interface{}) { + a.Errorf(format, args...) + a.terminate(1) +} + +// FatalUsage prints an error message followed by usage information, then +// exits with a non-zero status. +func (a *Application) FatalUsage(format string, args ...interface{}) { + a.Errorf(format, args...) + // Force usage to go to error output. + a.usageWriter = a.errorWriter + a.Usage([]string{}) + a.terminate(1) +} + +// FatalUsageContext writes a printf formatted error message to w, then usage +// information for the given ParseContext, before exiting. +func (a *Application) FatalUsageContext(context *ParseContext, format string, args ...interface{}) { + a.Errorf(format, args...) + if err := a.UsageForContext(context); err != nil { + panic(err) + } + a.terminate(1) +} + +// FatalIfError prints an error and exits if err is not nil. The error is printed +// with the given formatted string, if any. +func (a *Application) FatalIfError(err error, format string, args ...interface{}) { + if err != nil { + prefix := "" + if format != "" { + prefix = fmt.Sprintf(format, args...) + ": " + } + a.Errorf(prefix+"%s", err) + a.terminate(1) + } +} + +func (a *Application) completionOptions(context *ParseContext) []string { + args := context.rawArgs + + var ( + currArg string + prevArg string + target cmdMixin + ) + + numArgs := len(args) + if numArgs > 1 { + args = args[1:] + currArg = args[len(args)-1] + } + if numArgs > 2 { + prevArg = args[len(args)-2] + } + + target = a.cmdMixin + if context.SelectedCommand != nil { + // A subcommand was in use. We will use it as the target + target = context.SelectedCommand.cmdMixin + } + + if (currArg != "" && strings.HasPrefix(currArg, "--")) || strings.HasPrefix(prevArg, "--") { + // Perform completion for A flag. The last/current argument started with "-" + var ( + flagName string // The name of a flag if given (could be half complete) + flagValue string // The value assigned to a flag (if given) (could be half complete) + ) + + if strings.HasPrefix(prevArg, "--") && !strings.HasPrefix(currArg, "--") { + // Matches: ./myApp --flag value + // Wont Match: ./myApp --flag -- + flagName = prevArg[2:] // Strip the "--" + flagValue = currArg + } else if strings.HasPrefix(currArg, "--") { + // Matches: ./myApp --flag -- + // Matches: ./myApp --flag somevalue -- + // Matches: ./myApp -- + flagName = currArg[2:] // Strip the "--" + } + + options, flagMatched, valueMatched := target.FlagCompletion(flagName, flagValue) + if valueMatched { + // Value Matched. Show cmdCompletions + return target.CmdCompletion(context) + } + + // Add top level flags if we're not at the top level and no match was found. + if context.SelectedCommand != nil && !flagMatched { + topOptions, topFlagMatched, topValueMatched := a.FlagCompletion(flagName, flagValue) + if topValueMatched { + // Value Matched. Back to cmdCompletions + return target.CmdCompletion(context) + } + + if topFlagMatched { + // Top level had a flag which matched the input. Return it's options. + options = topOptions + } else { + // Add top level flags + options = append(options, topOptions...) + } + } + return options + } + + // Perform completion for sub commands and arguments. + return target.CmdCompletion(context) +} + +func (a *Application) generateBashCompletion(context *ParseContext) { + options := a.completionOptions(context) + fmt.Printf("%s", strings.Join(options, "\n")) +} + +func envarTransform(name string) string { + return strings.ToUpper(envarTransformRegexp.ReplaceAllString(name, "_")) +} diff --git a/vendor/gopkg.in/alecthomas/kingpin.v2/args.go b/vendor/gopkg.in/alecthomas/kingpin.v2/args.go new file mode 100644 index 0000000..3edb448 --- /dev/null +++ b/vendor/gopkg.in/alecthomas/kingpin.v2/args.go @@ -0,0 +1,185 @@ +package kingpin + +import ( + "fmt" +) + +type argGroup struct { + args []*ArgClause +} + +func newArgGroup() *argGroup { + return &argGroup{} +} + +func (a *argGroup) have() bool { + return len(a.args) > 0 +} + +// GetArg gets an argument definition. +// +// This allows existing arguments to be modified after definition but before parsing. Useful for +// modular applications. +func (a *argGroup) GetArg(name string) *ArgClause { + for _, arg := range a.args { + if arg.name == name { + return arg + } + } + return nil +} + +func (a *argGroup) Arg(name, help string) *ArgClause { + arg := newArg(name, help) + a.args = append(a.args, arg) + return arg +} + +func (a *argGroup) init() error { + required := 0 + seen := map[string]struct{}{} + previousArgMustBeLast := false + for i, arg := range a.args { + if previousArgMustBeLast { + return fmt.Errorf("Args() can't be followed by another argument '%s'", arg.name) + } + if arg.consumesRemainder() { + previousArgMustBeLast = true + } + if _, ok := seen[arg.name]; ok { + return fmt.Errorf("duplicate argument '%s'", arg.name) + } + seen[arg.name] = struct{}{} + if arg.required && required != i { + return fmt.Errorf("required arguments found after non-required") + } + if arg.required { + required++ + } + if err := arg.init(); err != nil { + return err + } + } + return nil +} + +type ArgClause struct { + actionMixin + parserMixin + completionsMixin + envarMixin + name string + help string + defaultValues []string + required bool +} + +func newArg(name, help string) *ArgClause { + a := &ArgClause{ + name: name, + help: help, + } + return a +} + +func (a *ArgClause) setDefault() error { + if a.HasEnvarValue() { + if v, ok := a.value.(remainderArg); !ok || !v.IsCumulative() { + // Use the value as-is + return a.value.Set(a.GetEnvarValue()) + } else { + for _, value := range a.GetSplitEnvarValue() { + if err := a.value.Set(value); err != nil { + return err + } + } + return nil + } + } + + if len(a.defaultValues) > 0 { + for _, defaultValue := range a.defaultValues { + if err := a.value.Set(defaultValue); err != nil { + return err + } + } + return nil + } + + return nil +} + +func (a *ArgClause) needsValue() bool { + haveDefault := len(a.defaultValues) > 0 + return a.required && !(haveDefault || a.HasEnvarValue()) +} + +func (a *ArgClause) consumesRemainder() bool { + if r, ok := a.value.(remainderArg); ok { + return r.IsCumulative() + } + return false +} + +// Required arguments must be input by the user. They can not have a Default() value provided. +func (a *ArgClause) Required() *ArgClause { + a.required = true + return a +} + +// Default values for this argument. They *must* be parseable by the value of the argument. +func (a *ArgClause) Default(values ...string) *ArgClause { + a.defaultValues = values + return a +} + +// Envar overrides the default value(s) for a flag from an environment variable, +// if it is set. Several default values can be provided by using new lines to +// separate them. +func (a *ArgClause) Envar(name string) *ArgClause { + a.envar = name + a.noEnvar = false + return a +} + +// NoEnvar forces environment variable defaults to be disabled for this flag. +// Most useful in conjunction with app.DefaultEnvars(). +func (a *ArgClause) NoEnvar() *ArgClause { + a.envar = "" + a.noEnvar = true + return a +} + +func (a *ArgClause) Action(action Action) *ArgClause { + a.addAction(action) + return a +} + +func (a *ArgClause) PreAction(action Action) *ArgClause { + a.addPreAction(action) + return a +} + +// HintAction registers a HintAction (function) for the arg to provide completions +func (a *ArgClause) HintAction(action HintAction) *ArgClause { + a.addHintAction(action) + return a +} + +// HintOptions registers any number of options for the flag to provide completions +func (a *ArgClause) HintOptions(options ...string) *ArgClause { + a.addHintAction(func() []string { + return options + }) + return a +} + +func (a *ArgClause) init() error { + if a.required && len(a.defaultValues) > 0 { + return fmt.Errorf("required argument '%s' with unusable default value", a.name) + } + if a.value == nil { + return fmt.Errorf("no parser defined for arg '%s'", a.name) + } + return nil +} diff --git a/vendor/gopkg.in/alecthomas/kingpin.v2/cmd.go b/vendor/gopkg.in/alecthomas/kingpin.v2/cmd.go new file mode 100644 index 0000000..0473b87 --- /dev/null +++ b/vendor/gopkg.in/alecthomas/kingpin.v2/cmd.go @@ -0,0 +1,274 @@ +package kingpin + +import ( + "fmt" + "strings" +) + +type cmdMixin struct { + *flagGroup + *argGroup + *cmdGroup + actionMixin +} + +// CmdCompletion returns completion options for arguments, if that's where +// parsing left off, or commands if there aren't any unsatisfied args. +func (c *cmdMixin) CmdCompletion(context *ParseContext) []string { + var options []string + + // Count args already satisfied - we won't complete those, and add any + // default commands' alternatives, since they weren't listed explicitly + // and the user may want to explicitly list something else. + argsSatisfied := 0 + for _, el := range context.Elements { + switch clause := el.Clause.(type) { + case *ArgClause: + if el.Value != nil && *el.Value != "" { + argsSatisfied++ + } + case *CmdClause: + options = append(options, clause.completionAlts...) + default: + } + } + + if argsSatisfied < len(c.argGroup.args) { + // Since not all args have been satisfied, show options for the current one + options = append(options, c.argGroup.args[argsSatisfied].resolveCompletions()...) + } else { + // If all args are satisfied, then go back to completing commands + for _, cmd := range c.cmdGroup.commandOrder { + if !cmd.hidden { + options = append(options, cmd.name) + } + } + } + + return options +} + +func (c *cmdMixin) FlagCompletion(flagName string, flagValue string) (choices []string, flagMatch bool, optionMatch bool) { + // Check if flagName matches a known flag. + // If it does, show the options for the flag + // Otherwise, show all flags + + options := []string{} + + for _, flag := range c.flagGroup.flagOrder { + // Loop through each flag and determine if a match exists + if flag.name == flagName { + // User typed entire flag. Need to look for flag options. + options = flag.resolveCompletions() + if len(options) == 0 { + // No Options to Choose From, Assume Match. + return options, true, true + } + + // Loop options to find if the user specified value matches + isPrefix := false + matched := false + + for _, opt := range options { + if flagValue == opt { + matched = true + } else if strings.HasPrefix(opt, flagValue) { + isPrefix = true + } + } + + // Matched Flag Directly + // Flag Value Not Prefixed, and Matched Directly + return options, true, !isPrefix && matched + } + + if !flag.hidden { + options = append(options, "--"+flag.name) + } + } + // No Flag directly matched. + return options, false, false + +} + +type cmdGroup struct { + app *Application + parent *CmdClause + commands map[string]*CmdClause + commandOrder []*CmdClause +} + +func (c *cmdGroup) defaultSubcommand() *CmdClause { + for _, cmd := range c.commandOrder { + if cmd.isDefault { + return cmd + } + } + return nil +} + +func (c *cmdGroup) cmdNames() []string { + names := make([]string, 0, len(c.commandOrder)) + for _, cmd := range c.commandOrder { + names = append(names, cmd.name) + } + return names +} + +// GetArg gets a command definition. +// +// This allows existing commands to be modified after definition but before parsing. Useful for +// modular applications. +func (c *cmdGroup) GetCommand(name string) *CmdClause { + return c.commands[name] +} + +func newCmdGroup(app *Application) *cmdGroup { + return &cmdGroup{ + app: app, + commands: make(map[string]*CmdClause), + } +} + +func (c *cmdGroup) flattenedCommands() (out []*CmdClause) { + for _, cmd := range c.commandOrder { + if len(cmd.commands) == 0 { + out = append(out, cmd) + } + out = append(out, cmd.flattenedCommands()...) + } + return +} + +func (c *cmdGroup) addCommand(name, help string) *CmdClause { + cmd := newCommand(c.app, name, help) + c.commands[name] = cmd + c.commandOrder = append(c.commandOrder, cmd) + return cmd +} + +func (c *cmdGroup) init() error { + seen := map[string]bool{} + if c.defaultSubcommand() != nil && !c.have() { + return fmt.Errorf("default subcommand %q provided but no subcommands defined", c.defaultSubcommand().name) + } + defaults := []string{} + for _, cmd := range c.commandOrder { + if cmd.isDefault { + defaults = append(defaults, cmd.name) + } + if seen[cmd.name] { + return fmt.Errorf("duplicate command %q", cmd.name) + } + seen[cmd.name] = true + for _, alias := range cmd.aliases { + if seen[alias] { + return fmt.Errorf("alias duplicates existing command %q", alias) + } + c.commands[alias] = cmd + } + if err := cmd.init(); err != nil { + return err + } + } + if len(defaults) > 1 { + return fmt.Errorf("more than one default subcommand exists: %s", strings.Join(defaults, ", ")) + } + return nil +} + +func (c *cmdGroup) have() bool { + return len(c.commands) > 0 +} + +type CmdClauseValidator func(*CmdClause) error + +// A CmdClause is a single top-level command. It encapsulates a set of flags +// and either subcommands or positional arguments. +type CmdClause struct { + cmdMixin + app *Application + name string + aliases []string + help string + isDefault bool + validator CmdClauseValidator + hidden bool + completionAlts []string +} + +func newCommand(app *Application, name, help string) *CmdClause { + c := &CmdClause{ + app: app, + name: name, + help: help, + } + c.flagGroup = newFlagGroup() + c.argGroup = newArgGroup() + c.cmdGroup = newCmdGroup(app) + return c +} + +// Add an Alias for this command. +func (c *CmdClause) Alias(name string) *CmdClause { + c.aliases = append(c.aliases, name) + return c +} + +// Validate sets a validation function to run when parsing. +func (c *CmdClause) Validate(validator CmdClauseValidator) *CmdClause { + c.validator = validator + return c +} + +func (c *CmdClause) FullCommand() string { + out := []string{c.name} + for p := c.parent; p != nil; p = p.parent { + out = append([]string{p.name}, out...) + } + return strings.Join(out, " ") +} + +// Command adds a new sub-command. +func (c *CmdClause) Command(name, help string) *CmdClause { + cmd := c.addCommand(name, help) + cmd.parent = c + return cmd +} + +// Default makes this command the default if commands don't match. +func (c *CmdClause) Default() *CmdClause { + c.isDefault = true + return c +} + +func (c *CmdClause) Action(action Action) *CmdClause { + c.addAction(action) + return c +} + +func (c *CmdClause) PreAction(action Action) *CmdClause { + c.addPreAction(action) + return c +} + +func (c *CmdClause) init() error { + if err := c.flagGroup.init(c.app.defaultEnvarPrefix()); err != nil { + return err + } + if c.argGroup.have() && c.cmdGroup.have() { + return fmt.Errorf("can't mix Arg()s with Command()s") + } + if err := c.argGroup.init(); err != nil { + return err + } + if err := c.cmdGroup.init(); err != nil { + return err + } + return nil +} + +func (c *CmdClause) Hidden() *CmdClause { + c.hidden = true + return c +} diff --git a/vendor/gopkg.in/alecthomas/kingpin.v2/completions.go b/vendor/gopkg.in/alecthomas/kingpin.v2/completions.go new file mode 100644 index 0000000..6e7b409 --- /dev/null +++ b/vendor/gopkg.in/alecthomas/kingpin.v2/completions.go @@ -0,0 +1,33 @@ +package kingpin + +// HintAction is a function type who is expected to return a slice of possible +// command line arguments. +type HintAction func() []string +type completionsMixin struct { + hintActions []HintAction + builtinHintActions []HintAction +} + +func (a *completionsMixin) addHintAction(action HintAction) { + a.hintActions = append(a.hintActions, action) +} + +// Allow adding of HintActions which are added internally, ie, EnumVar +func (a *completionsMixin) addHintActionBuiltin(action HintAction) { + a.builtinHintActions = append(a.builtinHintActions, action) +} + +func (a *completionsMixin) resolveCompletions() []string { + var hints []string + + options := a.builtinHintActions + if len(a.hintActions) > 0 { + // User specified their own hintActions. Use those instead. + options = a.hintActions + } + + for _, hintAction := range options { + hints = append(hints, hintAction()...) + } + return hints +} diff --git a/vendor/gopkg.in/alecthomas/kingpin.v2/doc.go b/vendor/gopkg.in/alecthomas/kingpin.v2/doc.go new file mode 100644 index 0000000..c14762c --- /dev/null +++ b/vendor/gopkg.in/alecthomas/kingpin.v2/doc.go @@ -0,0 +1,68 @@ +// Package kingpin provides command line interfaces like this: +// +// $ chat +// usage: chat [] [] [ ...] +// +// Flags: +// --debug enable debug mode +// --help Show help. +// --server=127.0.0.1 server address +// +// Commands: +// help +// Show help for a command. +// +// post [] +// Post a message to a channel. +// +// register +// Register a new user. +// +// $ chat help post +// usage: chat [] post [] [] +// +// Post a message to a channel. +// +// Flags: +// --image=IMAGE image to post +// +// Args: +// channel to post to +// [] text to post +// $ chat post --image=~/Downloads/owls.jpg pics +// +// From code like this: +// +// package main +// +// import "gopkg.in/alecthomas/kingpin.v1" +// +// var ( +// debug = kingpin.Flag("debug", "enable debug mode").Default("false").Bool() +// serverIP = kingpin.Flag("server", "server address").Default("127.0.0.1").IP() +// +// register = kingpin.Command("register", "Register a new user.") +// registerNick = register.Arg("nick", "nickname for user").Required().String() +// registerName = register.Arg("name", "name of user").Required().String() +// +// post = kingpin.Command("post", "Post a message to a channel.") +// postImage = post.Flag("image", "image to post").ExistingFile() +// postChannel = post.Arg("channel", "channel to post to").Required().String() +// postText = post.Arg("text", "text to post").String() +// ) +// +// func main() { +// switch kingpin.Parse() { +// // Register user +// case "register": +// println(*registerNick) +// +// // Post message +// case "post": +// if *postImage != nil { +// } +// if *postText != "" { +// } +// } +// } +package kingpin diff --git a/vendor/gopkg.in/alecthomas/kingpin.v2/envar.go b/vendor/gopkg.in/alecthomas/kingpin.v2/envar.go new file mode 100644 index 0000000..c01a27d --- /dev/null +++ b/vendor/gopkg.in/alecthomas/kingpin.v2/envar.go @@ -0,0 +1,45 @@ +package kingpin + +import ( + "os" + "regexp" +) + +var ( + envVarValuesSeparator = "\r?\n" + envVarValuesTrimmer = regexp.MustCompile(envVarValuesSeparator + "$") + envVarValuesSplitter = regexp.MustCompile(envVarValuesSeparator) +) + +type envarMixin struct { + envar string + noEnvar bool +} + +func (e *envarMixin) HasEnvarValue() bool { + return e.GetEnvarValue() != "" +} + +func (e *envarMixin) GetEnvarValue() string { + if e.noEnvar || e.envar == "" { + return "" + } + return os.Getenv(e.envar) +} + +func (e *envarMixin) GetSplitEnvarValue() []string { + values := make([]string, 0) + + envarValue := e.GetEnvarValue() + if envarValue == "" { + return values + } + + // Split by new line to extract multiple values, if any. + trimmed := envVarValuesTrimmer.ReplaceAllString(envarValue, "") + for _, value := range envVarValuesSplitter.Split(trimmed, -1) { + values = append(values, value) + } + + return values +} diff --git a/vendor/gopkg.in/alecthomas/kingpin.v2/flags.go b/vendor/gopkg.in/alecthomas/kingpin.v2/flags.go new file mode 100644 index 0000000..c9d677a --- /dev/null +++ b/vendor/gopkg.in/alecthomas/kingpin.v2/flags.go @@ -0,0 +1,308 @@ +package kingpin + +import ( + "fmt" + "strings" +) + +type flagGroup struct { + short map[string]*FlagClause + long map[string]*FlagClause + flagOrder []*FlagClause +} + +func newFlagGroup() *flagGroup { + return &flagGroup{ + short: map[string]*FlagClause{}, + long: map[string]*FlagClause{}, + } +} + +// GetFlag gets a flag definition. +// +// This allows existing flags to be modified after definition but before parsing. Useful for +// modular applications. +func (f *flagGroup) GetFlag(name string) *FlagClause { + return f.long[name] +} + +// Flag defines a new flag with the given long name and help. +func (f *flagGroup) Flag(name, help string) *FlagClause { + flag := newFlag(name, help) + f.long[name] = flag + f.flagOrder = append(f.flagOrder, flag) + return flag +} + +func (f *flagGroup) init(defaultEnvarPrefix string) error { + if err := f.checkDuplicates(); err != nil { + return err + } + for _, flag := range f.long { + if defaultEnvarPrefix != "" && !flag.noEnvar && flag.envar == "" { + flag.envar = envarTransform(defaultEnvarPrefix + "_" + flag.name) + } + if err := flag.init(); err != nil { + return err + } + if flag.shorthand != 0 { + f.short[string(flag.shorthand)] = flag + } + } + return nil +} + +func (f *flagGroup) checkDuplicates() error { + seenShort := map[byte]bool{} + seenLong := map[string]bool{} + for _, flag := range f.flagOrder { + if flag.shorthand != 0 { + if _, ok := seenShort[flag.shorthand]; ok { + return fmt.Errorf("duplicate short flag -%c", flag.shorthand) + } + seenShort[flag.shorthand] = true + } + if _, ok := seenLong[flag.name]; ok { + return fmt.Errorf("duplicate long flag --%s", flag.name) + } + seenLong[flag.name] = true + } + return nil +} + +func (f *flagGroup) parse(context *ParseContext) (*FlagClause, error) { + var token *Token + +loop: + for { + token = context.Peek() + switch token.Type { + case TokenEOL: + break loop + + case TokenLong, TokenShort: + flagToken := token + defaultValue := "" + var flag *FlagClause + var ok bool + invert := false + + name := token.Value + if token.Type == TokenLong { + flag, ok = f.long[name] + if !ok { + if strings.HasPrefix(name, "no-") { + name = name[3:] + invert = true + } + flag, ok = f.long[name] + } + if !ok { + return nil, fmt.Errorf("unknown long flag '%s'", flagToken) + } + } else { + flag, ok = f.short[name] + if !ok { + return nil, fmt.Errorf("unknown short flag '%s'", flagToken) + } + } + + context.Next() + + fb, ok := flag.value.(boolFlag) + if ok && fb.IsBoolFlag() { + if invert { + defaultValue = "false" + } else { + defaultValue = "true" + } + } else { + if invert { + context.Push(token) + return nil, fmt.Errorf("unknown long flag '%s'", flagToken) + } + token = context.Peek() + if token.Type != TokenArg { + context.Push(token) + return nil, fmt.Errorf("expected argument for flag '%s'", flagToken) + } + context.Next() + defaultValue = token.Value + } + + context.matchedFlag(flag, defaultValue) + return flag, nil + + default: + break loop + } + } + return nil, nil +} + +// FlagClause is a fluid interface used to build flags. +type FlagClause struct { + parserMixin + actionMixin + completionsMixin + envarMixin + name string + shorthand byte + help string + defaultValues []string + placeholder string + hidden bool +} + +func newFlag(name, help string) *FlagClause { + f := &FlagClause{ + name: name, + help: help, + } + return f +} + +func (f *FlagClause) setDefault() error { + if f.HasEnvarValue() { + if v, ok := f.value.(repeatableFlag); !ok || !v.IsCumulative() { + // Use the value as-is + return f.value.Set(f.GetEnvarValue()) + } else { + for _, value := range f.GetSplitEnvarValue() { + if err := f.value.Set(value); err != nil { + return err + } + } + return nil + } + } + + if len(f.defaultValues) > 0 { + for _, defaultValue := range f.defaultValues { + if err := f.value.Set(defaultValue); err != nil { + return err + } + } + return nil + } + + return nil +} + +func (f *FlagClause) needsValue() bool { + haveDefault := len(f.defaultValues) > 0 + return f.required && !(haveDefault || f.HasEnvarValue()) +} + +func (f *FlagClause) init() error { + if f.required && len(f.defaultValues) > 0 { + return fmt.Errorf("required flag '--%s' with default value that will never be used", f.name) + } + if f.value == nil { + return fmt.Errorf("no type defined for --%s (eg. .String())", f.name) + } + if v, ok := f.value.(repeatableFlag); (!ok || !v.IsCumulative()) && len(f.defaultValues) > 1 { + return fmt.Errorf("invalid default for '--%s', expecting single value", f.name) + } + return nil +} + +// Dispatch to the given function after the flag is parsed and validated. +func (f *FlagClause) Action(action Action) *FlagClause { + f.addAction(action) + return f +} + +func (f *FlagClause) PreAction(action Action) *FlagClause { + f.addPreAction(action) + return f +} + +// HintAction registers a HintAction (function) for the flag to provide completions +func (a *FlagClause) HintAction(action HintAction) *FlagClause { + a.addHintAction(action) + return a +} + +// HintOptions registers any number of options for the flag to provide completions +func (a *FlagClause) HintOptions(options ...string) *FlagClause { + a.addHintAction(func() []string { + return options + }) + return a +} + +func (a *FlagClause) EnumVar(target *string, options ...string) { + a.parserMixin.EnumVar(target, options...) + a.addHintActionBuiltin(func() []string { + return options + }) +} + +func (a *FlagClause) Enum(options ...string) (target *string) { + a.addHintActionBuiltin(func() []string { + return options + }) + return a.parserMixin.Enum(options...) +} + +// Default values for this flag. They *must* be parseable by the value of the flag. +func (f *FlagClause) Default(values ...string) *FlagClause { + f.defaultValues = values + return f +} + +// DEPRECATED: Use Envar(name) instead. +func (f *FlagClause) OverrideDefaultFromEnvar(envar string) *FlagClause { + return f.Envar(envar) +} + +// Envar overrides the default value(s) for a flag from an environment variable, +// if it is set. Several default values can be provided by using new lines to +// separate them. +func (f *FlagClause) Envar(name string) *FlagClause { + f.envar = name + f.noEnvar = false + return f +} + +// NoEnvar forces environment variable defaults to be disabled for this flag. +// Most useful in conjunction with app.DefaultEnvars(). +func (f *FlagClause) NoEnvar() *FlagClause { + f.envar = "" + f.noEnvar = true + return f +} + +// PlaceHolder sets the place-holder string used for flag values in the help. The +// default behaviour is to use the value provided by Default() if provided, +// then fall back on the capitalized flag name. +func (f *FlagClause) PlaceHolder(placeholder string) *FlagClause { + f.placeholder = placeholder + return f +} + +// Hidden hides a flag from usage but still allows it to be used. +func (f *FlagClause) Hidden() *FlagClause { + f.hidden = true + return f +} + +// Required makes the flag required. You can not provide a Default() value to a Required() flag. +func (f *FlagClause) Required() *FlagClause { + f.required = true + return f +} + +// Short sets the short flag name. +func (f *FlagClause) Short(name byte) *FlagClause { + f.shorthand = name + return f +} + +// Bool makes this flag a boolean flag. +func (f *FlagClause) Bool() (target *bool) { + target = new(bool) + f.SetValue(newBoolValue(target)) + return +} diff --git a/vendor/gopkg.in/alecthomas/kingpin.v2/global.go b/vendor/gopkg.in/alecthomas/kingpin.v2/global.go new file mode 100644 index 0000000..10a2913 --- /dev/null +++ b/vendor/gopkg.in/alecthomas/kingpin.v2/global.go @@ -0,0 +1,94 @@ +package kingpin + +import ( + "os" + "path/filepath" +) + +var ( + // CommandLine is the default Kingpin parser. + CommandLine = New(filepath.Base(os.Args[0]), "") + // Global help flag. Exposed for user customisation. + HelpFlag = CommandLine.HelpFlag + // Top-level help command. Exposed for user customisation. May be nil. + HelpCommand = CommandLine.HelpCommand + // Global version flag. Exposed for user customisation. May be nil. + VersionFlag = CommandLine.VersionFlag +) + +// Command adds a new command to the default parser. +func Command(name, help string) *CmdClause { + return CommandLine.Command(name, help) +} + +// Flag adds a new flag to the default parser. +func Flag(name, help string) *FlagClause { + return CommandLine.Flag(name, help) +} + +// Arg adds a new argument to the top-level of the default parser. +func Arg(name, help string) *ArgClause { + return CommandLine.Arg(name, help) +} + +// Parse and return the selected command. Will call the termination handler if +// an error is encountered. +func Parse() string { + selected := MustParse(CommandLine.Parse(os.Args[1:])) + if selected == "" && CommandLine.cmdGroup.have() { + Usage() + CommandLine.terminate(0) + } + return selected +} + +// Errorf prints an error message to stderr. +func Errorf(format string, args ...interface{}) { + CommandLine.Errorf(format, args...) +} + +// Fatalf prints an error message to stderr and exits. +func Fatalf(format string, args ...interface{}) { + CommandLine.Fatalf(format, args...) +} + +// FatalIfError prints an error and exits if err is not nil. The error is printed +// with the given prefix. +func FatalIfError(err error, format string, args ...interface{}) { + CommandLine.FatalIfError(err, format, args...) +} + +// FatalUsage prints an error message followed by usage information, then +// exits with a non-zero status. +func FatalUsage(format string, args ...interface{}) { + CommandLine.FatalUsage(format, args...) +} + +// FatalUsageContext writes a printf formatted error message to stderr, then +// usage information for the given ParseContext, before exiting. +func FatalUsageContext(context *ParseContext, format string, args ...interface{}) { + CommandLine.FatalUsageContext(context, format, args...) +} + +// Usage prints usage to stderr. +func Usage() { + CommandLine.Usage(os.Args[1:]) +} + +// Set global usage template to use (defaults to DefaultUsageTemplate). +func UsageTemplate(template string) *Application { + return CommandLine.UsageTemplate(template) +} + +// MustParse can be used with app.Parse(args) to exit with an error if parsing fails. +func MustParse(command string, err error) string { + if err != nil { + Fatalf("%s, try --help", err) + } + return command +} + +// Version adds a flag for displaying the application version number. +func Version(version string) *Application { + return CommandLine.Version(version) +} diff --git a/vendor/gopkg.in/alecthomas/kingpin.v2/guesswidth.go b/vendor/gopkg.in/alecthomas/kingpin.v2/guesswidth.go new file mode 100644 index 0000000..a269531 --- /dev/null +++ b/vendor/gopkg.in/alecthomas/kingpin.v2/guesswidth.go @@ -0,0 +1,9 @@ +// +build appengine !linux,!freebsd,!darwin,!dragonfly,!netbsd,!openbsd + +package kingpin + +import "io" + +func guessWidth(w io.Writer) int { + return 80 +} diff --git a/vendor/gopkg.in/alecthomas/kingpin.v2/guesswidth_unix.go b/vendor/gopkg.in/alecthomas/kingpin.v2/guesswidth_unix.go new file mode 100644 index 0000000..ad8163f --- /dev/null +++ b/vendor/gopkg.in/alecthomas/kingpin.v2/guesswidth_unix.go @@ -0,0 +1,38 @@ +// +build !appengine,linux freebsd darwin dragonfly netbsd openbsd + +package kingpin + +import ( + "io" + "os" + "strconv" + "syscall" + "unsafe" +) + +func guessWidth(w io.Writer) int { + // check if COLUMNS env is set to comply with + // http://pubs.opengroup.org/onlinepubs/009604499/basedefs/xbd_chap08.html + colsStr := os.Getenv("COLUMNS") + if colsStr != "" { + if cols, err := strconv.Atoi(colsStr); err == nil { + return cols + } + } + + if t, ok := w.(*os.File); ok { + fd := t.Fd() + var dimensions [4]uint16 + + if _, _, err := syscall.Syscall6( + syscall.SYS_IOCTL, + uintptr(fd), + uintptr(syscall.TIOCGWINSZ), + uintptr(unsafe.Pointer(&dimensions)), + 0, 0, 0, + ); err == 0 { + return int(dimensions[1]) + } + } + return 80 +} diff --git a/vendor/gopkg.in/alecthomas/kingpin.v2/model.go b/vendor/gopkg.in/alecthomas/kingpin.v2/model.go new file mode 100644 index 0000000..a4ee83b --- /dev/null +++ b/vendor/gopkg.in/alecthomas/kingpin.v2/model.go @@ -0,0 +1,227 @@ +package kingpin + +import ( + "fmt" + "strconv" + "strings" +) + +// Data model for Kingpin command-line structure. + +type FlagGroupModel struct { + Flags []*FlagModel +} + +func (f *FlagGroupModel) FlagSummary() string { + out := []string{} + count := 0 + for _, flag := range f.Flags { + if flag.Name != "help" { + count++ + } + if flag.Required { + if flag.IsBoolFlag() { + out = append(out, fmt.Sprintf("--[no-]%s", flag.Name)) + } else { + out = append(out, fmt.Sprintf("--%s=%s", flag.Name, flag.FormatPlaceHolder())) + } + } + } + if count != len(out) { + out = append(out, "[]") + } + return strings.Join(out, " ") +} + +type FlagModel struct { + Name string + Help string + Short rune + Default []string + Envar string + PlaceHolder string + Required bool + Hidden bool + Value Value +} + +func (f *FlagModel) String() string { + return f.Value.String() +} + +func (f *FlagModel) IsBoolFlag() bool { + if fl, ok := f.Value.(boolFlag); ok { + return fl.IsBoolFlag() + } + return false +} + +func (f *FlagModel) FormatPlaceHolder() string { + if f.PlaceHolder != "" { + return f.PlaceHolder + } + if len(f.Default) > 0 { + ellipsis := "" + if len(f.Default) > 1 { + ellipsis = "..." + } + if _, ok := f.Value.(*stringValue); ok { + return strconv.Quote(f.Default[0]) + ellipsis + } + return f.Default[0] + ellipsis + } + return strings.ToUpper(f.Name) +} + +type ArgGroupModel struct { + Args []*ArgModel +} + +func (a *ArgGroupModel) ArgSummary() string { + depth := 0 + out := []string{} + for _, arg := range a.Args { + h := "<" + arg.Name + ">" + if !arg.Required { + h = "[" + h + depth++ + } + out = append(out, h) + } + out[len(out)-1] = out[len(out)-1] + strings.Repeat("]", depth) + return strings.Join(out, " ") +} + +type ArgModel struct { + Name string + Help string + Default []string + Envar string + Required bool + Value Value +} + +func (a *ArgModel) String() string { + return a.Value.String() +} + +type CmdGroupModel struct { + Commands []*CmdModel +} + +func (c *CmdGroupModel) FlattenedCommands() (out []*CmdModel) { + for _, cmd := range c.Commands { + if len(cmd.Commands) == 0 { + out = append(out, cmd) + } + out = append(out, cmd.FlattenedCommands()...) + } + return +} + +type CmdModel struct { + Name string + Aliases []string + Help string + FullCommand string + Depth int + Hidden bool + Default bool + *FlagGroupModel + *ArgGroupModel + *CmdGroupModel +} + +func (c *CmdModel) String() string { + return c.FullCommand +} + +type ApplicationModel struct { + Name string + Help string + Version string + Author string + *ArgGroupModel + *CmdGroupModel + *FlagGroupModel +} + +func (a *Application) Model() *ApplicationModel { + return &ApplicationModel{ + Name: a.Name, + Help: a.Help, + Version: a.version, + Author: a.author, + FlagGroupModel: a.flagGroup.Model(), + ArgGroupModel: a.argGroup.Model(), + CmdGroupModel: a.cmdGroup.Model(), + } +} + +func (a *argGroup) Model() *ArgGroupModel { + m := &ArgGroupModel{} + for _, arg := range a.args { + m.Args = append(m.Args, arg.Model()) + } + return m +} + +func (a *ArgClause) Model() *ArgModel { + return &ArgModel{ + Name: a.name, + Help: a.help, + Default: a.defaultValues, + Envar: a.envar, + Required: a.required, + Value: a.value, + } +} + +func (f *flagGroup) Model() *FlagGroupModel { + m := &FlagGroupModel{} + for _, fl := range f.flagOrder { + m.Flags = append(m.Flags, fl.Model()) + } + return m +} + +func (f *FlagClause) Model() *FlagModel { + return &FlagModel{ + Name: f.name, + Help: f.help, + Short: rune(f.shorthand), + Default: f.defaultValues, + Envar: f.envar, + PlaceHolder: f.placeholder, + Required: f.required, + Hidden: f.hidden, + Value: f.value, + } +} + +func (c *cmdGroup) Model() *CmdGroupModel { + m := &CmdGroupModel{} + for _, cm := range c.commandOrder { + m.Commands = append(m.Commands, cm.Model()) + } + return m +} + +func (c *CmdClause) Model() *CmdModel { + depth := 0 + for i := c; i != nil; i = i.parent { + depth++ + } + return &CmdModel{ + Name: c.name, + Aliases: c.aliases, + Help: c.help, + Depth: depth, + Hidden: c.hidden, + Default: c.isDefault, + FullCommand: c.FullCommand(), + FlagGroupModel: c.flagGroup.Model(), + ArgGroupModel: c.argGroup.Model(), + CmdGroupModel: c.cmdGroup.Model(), + } +} diff --git a/vendor/gopkg.in/alecthomas/kingpin.v2/parser.go b/vendor/gopkg.in/alecthomas/kingpin.v2/parser.go new file mode 100644 index 0000000..f9af98f --- /dev/null +++ b/vendor/gopkg.in/alecthomas/kingpin.v2/parser.go @@ -0,0 +1,380 @@ +package kingpin + +import ( + "bufio" + "fmt" + "os" + "strings" +) + +type TokenType int + +// Token types. +const ( + TokenShort TokenType = iota + TokenLong + TokenArg + TokenError + TokenEOL +) + +func (t TokenType) String() string { + switch t { + case TokenShort: + return "short flag" + case TokenLong: + return "long flag" + case TokenArg: + return "argument" + case TokenError: + return "error" + case TokenEOL: + return "" + } + return "?" +} + +var ( + TokenEOLMarker = Token{-1, TokenEOL, ""} +) + +type Token struct { + Index int + Type TokenType + Value string +} + +func (t *Token) Equal(o *Token) bool { + return t.Index == o.Index +} + +func (t *Token) IsFlag() bool { + return t.Type == TokenShort || t.Type == TokenLong +} + +func (t *Token) IsEOF() bool { + return t.Type == TokenEOL +} + +func (t *Token) String() string { + switch t.Type { + case TokenShort: + return "-" + t.Value + case TokenLong: + return "--" + t.Value + case TokenArg: + return t.Value + case TokenError: + return "error: " + t.Value + case TokenEOL: + return "" + default: + panic("unhandled type") + } +} + +// A union of possible elements in a parse stack. +type ParseElement struct { + // Clause is either *CmdClause, *ArgClause or *FlagClause. + Clause interface{} + // Value is corresponding value for an ArgClause or FlagClause (if any). + Value *string +} + +// ParseContext holds the current context of the parser. When passed to +// Action() callbacks Elements will be fully populated with *FlagClause, +// *ArgClause and *CmdClause values and their corresponding arguments (if +// any). +type ParseContext struct { + SelectedCommand *CmdClause + ignoreDefault bool + argsOnly bool + peek []*Token + argi int // Index of current command-line arg we're processing. + args []string + rawArgs []string + flags *flagGroup + arguments *argGroup + argumenti int // Cursor into arguments + // Flags, arguments and commands encountered and collected during parse. + Elements []*ParseElement +} + +func (p *ParseContext) nextArg() *ArgClause { + if p.argumenti >= len(p.arguments.args) { + return nil + } + arg := p.arguments.args[p.argumenti] + if !arg.consumesRemainder() { + p.argumenti++ + } + return arg +} + +func (p *ParseContext) next() { + p.argi++ + p.args = p.args[1:] +} + +// HasTrailingArgs returns true if there are unparsed command-line arguments. +// This can occur if the parser can not match remaining arguments. +func (p *ParseContext) HasTrailingArgs() bool { + return len(p.args) > 0 +} + +func tokenize(args []string, ignoreDefault bool) *ParseContext { + return &ParseContext{ + ignoreDefault: ignoreDefault, + args: args, + rawArgs: args, + flags: newFlagGroup(), + arguments: newArgGroup(), + } +} + +func (p *ParseContext) mergeFlags(flags *flagGroup) { + for _, flag := range flags.flagOrder { + if flag.shorthand != 0 { + p.flags.short[string(flag.shorthand)] = flag + } + p.flags.long[flag.name] = flag + p.flags.flagOrder = append(p.flags.flagOrder, flag) + } +} + +func (p *ParseContext) mergeArgs(args *argGroup) { + for _, arg := range args.args { + p.arguments.args = append(p.arguments.args, arg) + } +} + +func (p *ParseContext) EOL() bool { + return p.Peek().Type == TokenEOL +} + +// Next token in the parse context. +func (p *ParseContext) Next() *Token { + if len(p.peek) > 0 { + return p.pop() + } + + // End of tokens. + if len(p.args) == 0 { + return &Token{Index: p.argi, Type: TokenEOL} + } + + arg := p.args[0] + p.next() + + if p.argsOnly { + return &Token{p.argi, TokenArg, arg} + } + + // All remaining args are passed directly. + if arg == "--" { + p.argsOnly = true + return p.Next() + } + + if strings.HasPrefix(arg, "--") { + parts := strings.SplitN(arg[2:], "=", 2) + token := &Token{p.argi, TokenLong, parts[0]} + if len(parts) == 2 { + p.Push(&Token{p.argi, TokenArg, parts[1]}) + } + return token + } + + if strings.HasPrefix(arg, "-") { + if len(arg) == 1 { + return &Token{Index: p.argi, Type: TokenShort} + } + short := arg[1:2] + flag, ok := p.flags.short[short] + // Not a known short flag, we'll just return it anyway. + if !ok { + } else if fb, ok := flag.value.(boolFlag); ok && fb.IsBoolFlag() { + // Bool short flag. + } else { + // Short flag with combined argument: -fARG + token := &Token{p.argi, TokenShort, short} + if len(arg) > 2 { + p.Push(&Token{p.argi, TokenArg, arg[2:]}) + } + return token + } + + if len(arg) > 2 { + p.args = append([]string{"-" + arg[2:]}, p.args...) + } + return &Token{p.argi, TokenShort, short} + } else if strings.HasPrefix(arg, "@") { + expanded, err := ExpandArgsFromFile(arg[1:]) + if err != nil { + return &Token{p.argi, TokenError, err.Error()} + } + if p.argi >= len(p.args) { + p.args = append(p.args[:p.argi-1], expanded...) + } else { + p.args = append(p.args[:p.argi-1], append(expanded, p.args[p.argi+1:]...)...) + } + return p.Next() + } + + return &Token{p.argi, TokenArg, arg} +} + +func (p *ParseContext) Peek() *Token { + if len(p.peek) == 0 { + return p.Push(p.Next()) + } + return p.peek[len(p.peek)-1] +} + +func (p *ParseContext) Push(token *Token) *Token { + p.peek = append(p.peek, token) + return token +} + +func (p *ParseContext) pop() *Token { + end := len(p.peek) - 1 + token := p.peek[end] + p.peek = p.peek[0:end] + return token +} + +func (p *ParseContext) String() string { + return p.SelectedCommand.FullCommand() +} + +func (p *ParseContext) matchedFlag(flag *FlagClause, value string) { + p.Elements = append(p.Elements, &ParseElement{Clause: flag, Value: &value}) +} + +func (p *ParseContext) matchedArg(arg *ArgClause, value string) { + p.Elements = append(p.Elements, &ParseElement{Clause: arg, Value: &value}) +} + +func (p *ParseContext) matchedCmd(cmd *CmdClause) { + p.Elements = append(p.Elements, &ParseElement{Clause: cmd}) + p.mergeFlags(cmd.flagGroup) + p.mergeArgs(cmd.argGroup) + p.SelectedCommand = cmd +} + +// Expand arguments from a file. Lines starting with # will be treated as comments. +func ExpandArgsFromFile(filename string) (out []string, err error) { + r, err := os.Open(filename) + if err != nil { + return nil, err + } + defer r.Close() + scanner := bufio.NewScanner(r) + for scanner.Scan() { + line := scanner.Text() + if strings.HasPrefix(line, "#") { + continue + } + out = append(out, line) + } + err = scanner.Err() + return +} + +func parse(context *ParseContext, app *Application) (err error) { + context.mergeFlags(app.flagGroup) + context.mergeArgs(app.argGroup) + + cmds := app.cmdGroup + ignoreDefault := context.ignoreDefault + +loop: + for !context.EOL() { + token := context.Peek() + + switch token.Type { + case TokenLong, TokenShort: + if flag, err := context.flags.parse(context); err != nil { + if !ignoreDefault { + if cmd := cmds.defaultSubcommand(); cmd != nil { + cmd.completionAlts = cmds.cmdNames() + context.matchedCmd(cmd) + cmds = cmd.cmdGroup + break + } + } + return err + } else if flag == HelpFlag { + ignoreDefault = true + } + + case TokenArg: + if cmds.have() { + selectedDefault := false + cmd, ok := cmds.commands[token.String()] + if !ok { + if !ignoreDefault { + if cmd = cmds.defaultSubcommand(); cmd != nil { + cmd.completionAlts = cmds.cmdNames() + selectedDefault = true + } + } + if cmd == nil { + return fmt.Errorf("expected command but got %q", token) + } + } + if cmd == HelpCommand { + ignoreDefault = true + } + cmd.completionAlts = nil + context.matchedCmd(cmd) + cmds = cmd.cmdGroup + if !selectedDefault { + context.Next() + } + } else if context.arguments.have() { + if app.noInterspersed { + // no more flags + context.argsOnly = true + } + arg := context.nextArg() + if arg == nil { + break loop + } + context.matchedArg(arg, token.String()) + context.Next() + } else { + break loop + } + + case TokenEOL: + break loop + } + } + + // Move to innermost default command. + for !ignoreDefault { + if cmd := cmds.defaultSubcommand(); cmd != nil { + cmd.completionAlts = cmds.cmdNames() + context.matchedCmd(cmd) + cmds = cmd.cmdGroup + } else { + break + } + } + + if !context.EOL() { + return fmt.Errorf("unexpected %s", context.Peek()) + } + + // Set defaults for all remaining args. + for arg := context.nextArg(); arg != nil && !arg.consumesRemainder(); arg = context.nextArg() { + for _, defaultValue := range arg.defaultValues { + if err := arg.value.Set(defaultValue); err != nil { + return fmt.Errorf("invalid default value '%s' for argument '%s'", defaultValue, arg.name) + } + } + } + + return +} diff --git a/vendor/gopkg.in/alecthomas/kingpin.v2/parsers.go b/vendor/gopkg.in/alecthomas/kingpin.v2/parsers.go new file mode 100644 index 0000000..d9ad57e --- /dev/null +++ b/vendor/gopkg.in/alecthomas/kingpin.v2/parsers.go @@ -0,0 +1,212 @@ +package kingpin + +import ( + "net" + "net/url" + "os" + "time" + + "github.com/alecthomas/units" +) + +type Settings interface { + SetValue(value Value) +} + +type parserMixin struct { + value Value + required bool +} + +func (p *parserMixin) SetValue(value Value) { + p.value = value +} + +// StringMap provides key=value parsing into a map. +func (p *parserMixin) StringMap() (target *map[string]string) { + target = &(map[string]string{}) + p.StringMapVar(target) + return +} + +// Duration sets the parser to a time.Duration parser. +func (p *parserMixin) Duration() (target *time.Duration) { + target = new(time.Duration) + p.DurationVar(target) + return +} + +// Bytes parses numeric byte units. eg. 1.5KB +func (p *parserMixin) Bytes() (target *units.Base2Bytes) { + target = new(units.Base2Bytes) + p.BytesVar(target) + return +} + +// IP sets the parser to a net.IP parser. +func (p *parserMixin) IP() (target *net.IP) { + target = new(net.IP) + p.IPVar(target) + return +} + +// TCP (host:port) address. +func (p *parserMixin) TCP() (target **net.TCPAddr) { + target = new(*net.TCPAddr) + p.TCPVar(target) + return +} + +// TCPVar (host:port) address. +func (p *parserMixin) TCPVar(target **net.TCPAddr) { + p.SetValue(newTCPAddrValue(target)) +} + +// ExistingFile sets the parser to one that requires and returns an existing file. +func (p *parserMixin) ExistingFile() (target *string) { + target = new(string) + p.ExistingFileVar(target) + return +} + +// ExistingDir sets the parser to one that requires and returns an existing directory. +func (p *parserMixin) ExistingDir() (target *string) { + target = new(string) + p.ExistingDirVar(target) + return +} + +// ExistingFileOrDir sets the parser to one that requires and returns an existing file OR directory. +func (p *parserMixin) ExistingFileOrDir() (target *string) { + target = new(string) + p.ExistingFileOrDirVar(target) + return +} + +// File returns an os.File against an existing file. +func (p *parserMixin) File() (target **os.File) { + target = new(*os.File) + p.FileVar(target) + return +} + +// File attempts to open a File with os.OpenFile(flag, perm). +func (p *parserMixin) OpenFile(flag int, perm os.FileMode) (target **os.File) { + target = new(*os.File) + p.OpenFileVar(target, flag, perm) + return +} + +// URL provides a valid, parsed url.URL. +func (p *parserMixin) URL() (target **url.URL) { + target = new(*url.URL) + p.URLVar(target) + return +} + +// StringMap provides key=value parsing into a map. +func (p *parserMixin) StringMapVar(target *map[string]string) { + p.SetValue(newStringMapValue(target)) +} + +// Float sets the parser to a float64 parser. +func (p *parserMixin) Float() (target *float64) { + return p.Float64() +} + +// Float sets the parser to a float64 parser. +func (p *parserMixin) FloatVar(target *float64) { + p.Float64Var(target) +} + +// Duration sets the parser to a time.Duration parser. +func (p *parserMixin) DurationVar(target *time.Duration) { + p.SetValue(newDurationValue(target)) +} + +// BytesVar parses numeric byte units. eg. 1.5KB +func (p *parserMixin) BytesVar(target *units.Base2Bytes) { + p.SetValue(newBytesValue(target)) +} + +// IP sets the parser to a net.IP parser. +func (p *parserMixin) IPVar(target *net.IP) { + p.SetValue(newIPValue(target)) +} + +// ExistingFile sets the parser to one that requires and returns an existing file. +func (p *parserMixin) ExistingFileVar(target *string) { + p.SetValue(newExistingFileValue(target)) +} + +// ExistingDir sets the parser to one that requires and returns an existing directory. +func (p *parserMixin) ExistingDirVar(target *string) { + p.SetValue(newExistingDirValue(target)) +} + +// ExistingDir sets the parser to one that requires and returns an existing directory. +func (p *parserMixin) ExistingFileOrDirVar(target *string) { + p.SetValue(newExistingFileOrDirValue(target)) +} + +// FileVar opens an existing file. +func (p *parserMixin) FileVar(target **os.File) { + p.SetValue(newFileValue(target, os.O_RDONLY, 0)) +} + +// OpenFileVar calls os.OpenFile(flag, perm) +func (p *parserMixin) OpenFileVar(target **os.File, flag int, perm os.FileMode) { + p.SetValue(newFileValue(target, flag, perm)) +} + +// URL provides a valid, parsed url.URL. +func (p *parserMixin) URLVar(target **url.URL) { + p.SetValue(newURLValue(target)) +} + +// URLList provides a parsed list of url.URL values. +func (p *parserMixin) URLList() (target *[]*url.URL) { + target = new([]*url.URL) + p.URLListVar(target) + return +} + +// URLListVar provides a parsed list of url.URL values. +func (p *parserMixin) URLListVar(target *[]*url.URL) { + p.SetValue(newURLListValue(target)) +} + +// Enum allows a value from a set of options. +func (p *parserMixin) Enum(options ...string) (target *string) { + target = new(string) + p.EnumVar(target, options...) + return +} + +// EnumVar allows a value from a set of options. +func (p *parserMixin) EnumVar(target *string, options ...string) { + p.SetValue(newEnumFlag(target, options...)) +} + +// Enums allows a set of values from a set of options. +func (p *parserMixin) Enums(options ...string) (target *[]string) { + target = new([]string) + p.EnumsVar(target, options...) + return +} + +// EnumVar allows a value from a set of options. +func (p *parserMixin) EnumsVar(target *[]string, options ...string) { + p.SetValue(newEnumsFlag(target, options...)) +} + +// A Counter increments a number each time it is encountered. +func (p *parserMixin) Counter() (target *int) { + target = new(int) + p.CounterVar(target) + return +} + +func (p *parserMixin) CounterVar(target *int) { + p.SetValue(newCounterValue(target)) +} diff --git a/vendor/gopkg.in/alecthomas/kingpin.v2/templates.go b/vendor/gopkg.in/alecthomas/kingpin.v2/templates.go new file mode 100644 index 0000000..97b5c9f --- /dev/null +++ b/vendor/gopkg.in/alecthomas/kingpin.v2/templates.go @@ -0,0 +1,262 @@ +package kingpin + +// Default usage template. +var DefaultUsageTemplate = `{{define "FormatCommand"}}\ +{{if .FlagSummary}} {{.FlagSummary}}{{end}}\ +{{range .Args}} {{if not .Required}}[{{end}}<{{.Name}}>{{if .Value|IsCumulative}}...{{end}}{{if not .Required}}]{{end}}{{end}}\ +{{end}}\ + +{{define "FormatCommands"}}\ +{{range .FlattenedCommands}}\ +{{if not .Hidden}}\ + {{.FullCommand}}{{if .Default}}*{{end}}{{template "FormatCommand" .}} +{{.Help|Wrap 4}} +{{end}}\ +{{end}}\ +{{end}}\ + +{{define "FormatUsage"}}\ +{{template "FormatCommand" .}}{{if .Commands}} [ ...]{{end}} +{{if .Help}} +{{.Help|Wrap 0}}\ +{{end}}\ + +{{end}}\ + +{{if .Context.SelectedCommand}}\ +usage: {{.App.Name}} {{.Context.SelectedCommand}}{{template "FormatUsage" .Context.SelectedCommand}} +{{else}}\ +usage: {{.App.Name}}{{template "FormatUsage" .App}} +{{end}}\ +{{if .Context.Flags}}\ +Flags: +{{.Context.Flags|FlagsToTwoColumns|FormatTwoColumns}} +{{end}}\ +{{if .Context.Args}}\ +Args: +{{.Context.Args|ArgsToTwoColumns|FormatTwoColumns}} +{{end}}\ +{{if .Context.SelectedCommand}}\ +{{if len .Context.SelectedCommand.Commands}}\ +Subcommands: +{{template "FormatCommands" .Context.SelectedCommand}} +{{end}}\ +{{else if .App.Commands}}\ +Commands: +{{template "FormatCommands" .App}} +{{end}}\ +` + +// Usage template where command's optional flags are listed separately +var SeparateOptionalFlagsUsageTemplate = `{{define "FormatCommand"}}\ +{{if .FlagSummary}} {{.FlagSummary}}{{end}}\ +{{range .Args}} {{if not .Required}}[{{end}}<{{.Name}}>{{if .Value|IsCumulative}}...{{end}}{{if not .Required}}]{{end}}{{end}}\ +{{end}}\ + +{{define "FormatCommands"}}\ +{{range .FlattenedCommands}}\ +{{if not .Hidden}}\ + {{.FullCommand}}{{if .Default}}*{{end}}{{template "FormatCommand" .}} +{{.Help|Wrap 4}} +{{end}}\ +{{end}}\ +{{end}}\ + +{{define "FormatUsage"}}\ +{{template "FormatCommand" .}}{{if .Commands}} [ ...]{{end}} +{{if .Help}} +{{.Help|Wrap 0}}\ +{{end}}\ + +{{end}}\ +{{if .Context.SelectedCommand}}\ +usage: {{.App.Name}} {{.Context.SelectedCommand}}{{template "FormatUsage" .Context.SelectedCommand}} +{{else}}\ +usage: {{.App.Name}}{{template "FormatUsage" .App}} +{{end}}\ + +{{if .Context.Flags|RequiredFlags}}\ +Required flags: +{{.Context.Flags|RequiredFlags|FlagsToTwoColumns|FormatTwoColumns}} +{{end}}\ +{{if .Context.Flags|OptionalFlags}}\ +Optional flags: +{{.Context.Flags|OptionalFlags|FlagsToTwoColumns|FormatTwoColumns}} +{{end}}\ +{{if .Context.Args}}\ +Args: +{{.Context.Args|ArgsToTwoColumns|FormatTwoColumns}} +{{end}}\ +{{if .Context.SelectedCommand}}\ +Subcommands: +{{if .Context.SelectedCommand.Commands}}\ +{{template "FormatCommands" .Context.SelectedCommand}} +{{end}}\ +{{else if .App.Commands}}\ +Commands: +{{template "FormatCommands" .App}} +{{end}}\ +` + +// Usage template with compactly formatted commands. +var CompactUsageTemplate = `{{define "FormatCommand"}}\ +{{if .FlagSummary}} {{.FlagSummary}}{{end}}\ +{{range .Args}} {{if not .Required}}[{{end}}<{{.Name}}>{{if .Value|IsCumulative}}...{{end}}{{if not .Required}}]{{end}}{{end}}\ +{{end}}\ + +{{define "FormatCommandList"}}\ +{{range .}}\ +{{if not .Hidden}}\ +{{.Depth|Indent}}{{.Name}}{{if .Default}}*{{end}}{{template "FormatCommand" .}} +{{end}}\ +{{template "FormatCommandList" .Commands}}\ +{{end}}\ +{{end}}\ + +{{define "FormatUsage"}}\ +{{template "FormatCommand" .}}{{if .Commands}} [ ...]{{end}} +{{if .Help}} +{{.Help|Wrap 0}}\ +{{end}}\ + +{{end}}\ + +{{if .Context.SelectedCommand}}\ +usage: {{.App.Name}} {{.Context.SelectedCommand}}{{template "FormatUsage" .Context.SelectedCommand}} +{{else}}\ +usage: {{.App.Name}}{{template "FormatUsage" .App}} +{{end}}\ +{{if .Context.Flags}}\ +Flags: +{{.Context.Flags|FlagsToTwoColumns|FormatTwoColumns}} +{{end}}\ +{{if .Context.Args}}\ +Args: +{{.Context.Args|ArgsToTwoColumns|FormatTwoColumns}} +{{end}}\ +{{if .Context.SelectedCommand}}\ +{{if .Context.SelectedCommand.Commands}}\ +Commands: + {{.Context.SelectedCommand}} +{{template "FormatCommandList" .Context.SelectedCommand.Commands}} +{{end}}\ +{{else if .App.Commands}}\ +Commands: +{{template "FormatCommandList" .App.Commands}} +{{end}}\ +` + +var ManPageTemplate = `{{define "FormatFlags"}}\ +{{range .Flags}}\ +{{if not .Hidden}}\ +.TP +\fB{{if .Short}}-{{.Short|Char}}, {{end}}--{{.Name}}{{if not .IsBoolFlag}}={{.FormatPlaceHolder}}{{end}}\\fR +{{.Help}} +{{end}}\ +{{end}}\ +{{end}}\ + +{{define "FormatCommand"}}\ +{{if .FlagSummary}} {{.FlagSummary}}{{end}}\ +{{range .Args}} {{if not .Required}}[{{end}}<{{.Name}}{{if .Default}}*{{end}}>{{if .Value|IsCumulative}}...{{end}}{{if not .Required}}]{{end}}{{end}}\ +{{end}}\ + +{{define "FormatCommands"}}\ +{{range .FlattenedCommands}}\ +{{if not .Hidden}}\ +.SS +\fB{{.FullCommand}}{{template "FormatCommand" .}}\\fR +.PP +{{.Help}} +{{template "FormatFlags" .}}\ +{{end}}\ +{{end}}\ +{{end}}\ + +{{define "FormatUsage"}}\ +{{template "FormatCommand" .}}{{if .Commands}} [ ...]{{end}}\\fR +{{end}}\ + +.TH {{.App.Name}} 1 {{.App.Version}} "{{.App.Author}}" +.SH "NAME" +{{.App.Name}} +.SH "SYNOPSIS" +.TP +\fB{{.App.Name}}{{template "FormatUsage" .App}} +.SH "DESCRIPTION" +{{.App.Help}} +.SH "OPTIONS" +{{template "FormatFlags" .App}}\ +{{if .App.Commands}}\ +.SH "COMMANDS" +{{template "FormatCommands" .App}}\ +{{end}}\ +` + +// Default usage template. +var LongHelpTemplate = `{{define "FormatCommand"}}\ +{{if .FlagSummary}} {{.FlagSummary}}{{end}}\ +{{range .Args}} {{if not .Required}}[{{end}}<{{.Name}}>{{if .Value|IsCumulative}}...{{end}}{{if not .Required}}]{{end}}{{end}}\ +{{end}}\ + +{{define "FormatCommands"}}\ +{{range .FlattenedCommands}}\ +{{if not .Hidden}}\ + {{.FullCommand}}{{template "FormatCommand" .}} +{{.Help|Wrap 4}} +{{with .Flags|FlagsToTwoColumns}}{{FormatTwoColumnsWithIndent . 4 2}}{{end}} +{{end}}\ +{{end}}\ +{{end}}\ + +{{define "FormatUsage"}}\ +{{template "FormatCommand" .}}{{if .Commands}} [ ...]{{end}} +{{if .Help}} +{{.Help|Wrap 0}}\ +{{end}}\ + +{{end}}\ + +usage: {{.App.Name}}{{template "FormatUsage" .App}} +{{if .Context.Flags}}\ +Flags: +{{.Context.Flags|FlagsToTwoColumns|FormatTwoColumns}} +{{end}}\ +{{if .Context.Args}}\ +Args: +{{.Context.Args|ArgsToTwoColumns|FormatTwoColumns}} +{{end}}\ +{{if .App.Commands}}\ +Commands: +{{template "FormatCommands" .App}} +{{end}}\ +` + +var BashCompletionTemplate = ` +_{{.App.Name}}_bash_autocomplete() { + local cur prev opts base + COMPREPLY=() + cur="${COMP_WORDS[COMP_CWORD]}" + opts=$( ${COMP_WORDS[0]} --completion-bash ${COMP_WORDS[@]:1:$COMP_CWORD} ) + COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) ) + return 0 +} +complete -F _{{.App.Name}}_bash_autocomplete {{.App.Name}} + +` + +var ZshCompletionTemplate = ` +#compdef {{.App.Name}} +autoload -U compinit && compinit +autoload -U bashcompinit && bashcompinit + +_{{.App.Name}}_bash_autocomplete() { + local cur prev opts base + COMPREPLY=() + cur="${COMP_WORDS[COMP_CWORD]}" + opts=$( ${COMP_WORDS[0]} --completion-bash ${COMP_WORDS[@]:1:$COMP_CWORD} ) + COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) ) + return 0 +} +complete -F _{{.App.Name}}_bash_autocomplete {{.App.Name}} +` diff --git a/vendor/gopkg.in/alecthomas/kingpin.v2/usage.go b/vendor/gopkg.in/alecthomas/kingpin.v2/usage.go new file mode 100644 index 0000000..44af6f6 --- /dev/null +++ b/vendor/gopkg.in/alecthomas/kingpin.v2/usage.go @@ -0,0 +1,211 @@ +package kingpin + +import ( + "bytes" + "fmt" + "go/doc" + "io" + "strings" + + "github.com/alecthomas/template" +) + +var ( + preIndent = " " +) + +func formatTwoColumns(w io.Writer, indent, padding, width int, rows [][2]string) { + // Find size of first column. + s := 0 + for _, row := range rows { + if c := len(row[0]); c > s && c < 30 { + s = c + } + } + + indentStr := strings.Repeat(" ", indent) + offsetStr := strings.Repeat(" ", s+padding) + + for _, row := range rows { + buf := bytes.NewBuffer(nil) + doc.ToText(buf, row[1], "", preIndent, width-s-padding-indent) + lines := strings.Split(strings.TrimRight(buf.String(), "\n"), "\n") + fmt.Fprintf(w, "%s%-*s%*s", indentStr, s, row[0], padding, "") + if len(row[0]) >= 30 { + fmt.Fprintf(w, "\n%s%s", indentStr, offsetStr) + } + fmt.Fprintf(w, "%s\n", lines[0]) + for _, line := range lines[1:] { + fmt.Fprintf(w, "%s%s%s\n", indentStr, offsetStr, line) + } + } +} + +// Usage writes application usage to w. It parses args to determine +// appropriate help context, such as which command to show help for. +func (a *Application) Usage(args []string) { + context, err := a.parseContext(true, args) + a.FatalIfError(err, "") + if err := a.UsageForContextWithTemplate(context, 2, a.usageTemplate); err != nil { + panic(err) + } +} + +func formatAppUsage(app *ApplicationModel) string { + s := []string{app.Name} + if len(app.Flags) > 0 { + s = append(s, app.FlagSummary()) + } + if len(app.Args) > 0 { + s = append(s, app.ArgSummary()) + } + return strings.Join(s, " ") +} + +func formatCmdUsage(app *ApplicationModel, cmd *CmdModel) string { + s := []string{app.Name, cmd.String()} + if len(app.Flags) > 0 { + s = append(s, app.FlagSummary()) + } + if len(app.Args) > 0 { + s = append(s, app.ArgSummary()) + } + return strings.Join(s, " ") +} + +func formatFlag(haveShort bool, flag *FlagModel) string { + flagString := "" + if flag.Short != 0 { + flagString += fmt.Sprintf("-%c, --%s", flag.Short, flag.Name) + } else { + if haveShort { + flagString += fmt.Sprintf(" --%s", flag.Name) + } else { + flagString += fmt.Sprintf("--%s", flag.Name) + } + } + if !flag.IsBoolFlag() { + flagString += fmt.Sprintf("=%s", flag.FormatPlaceHolder()) + } + if v, ok := flag.Value.(repeatableFlag); ok && v.IsCumulative() { + flagString += " ..." + } + return flagString +} + +type templateParseContext struct { + SelectedCommand *CmdModel + *FlagGroupModel + *ArgGroupModel +} + +type templateContext struct { + App *ApplicationModel + Width int + Context *templateParseContext +} + +// UsageForContext displays usage information from a ParseContext (obtained from +// Application.ParseContext() or Action(f) callbacks). +func (a *Application) UsageForContext(context *ParseContext) error { + return a.UsageForContextWithTemplate(context, 2, a.usageTemplate) +} + +// UsageForContextWithTemplate is the base usage function. You generally don't need to use this. +func (a *Application) UsageForContextWithTemplate(context *ParseContext, indent int, tmpl string) error { + width := guessWidth(a.usageWriter) + funcs := template.FuncMap{ + "Indent": func(level int) string { + return strings.Repeat(" ", level*indent) + }, + "Wrap": func(indent int, s string) string { + buf := bytes.NewBuffer(nil) + indentText := strings.Repeat(" ", indent) + doc.ToText(buf, s, indentText, " "+indentText, width-indent) + return buf.String() + }, + "FormatFlag": formatFlag, + "FlagsToTwoColumns": func(f []*FlagModel) [][2]string { + rows := [][2]string{} + haveShort := false + for _, flag := range f { + if flag.Short != 0 { + haveShort = true + break + } + } + for _, flag := range f { + if !flag.Hidden { + rows = append(rows, [2]string{formatFlag(haveShort, flag), flag.Help}) + } + } + return rows + }, + "RequiredFlags": func(f []*FlagModel) []*FlagModel { + requiredFlags := []*FlagModel{} + for _, flag := range f { + if flag.Required { + requiredFlags = append(requiredFlags, flag) + } + } + return requiredFlags + }, + "OptionalFlags": func(f []*FlagModel) []*FlagModel { + optionalFlags := []*FlagModel{} + for _, flag := range f { + if !flag.Required { + optionalFlags = append(optionalFlags, flag) + } + } + return optionalFlags + }, + "ArgsToTwoColumns": func(a []*ArgModel) [][2]string { + rows := [][2]string{} + for _, arg := range a { + s := "<" + arg.Name + ">" + if !arg.Required { + s = "[" + s + "]" + } + rows = append(rows, [2]string{s, arg.Help}) + } + return rows + }, + "FormatTwoColumns": func(rows [][2]string) string { + buf := bytes.NewBuffer(nil) + formatTwoColumns(buf, indent, indent, width, rows) + return buf.String() + }, + "FormatTwoColumnsWithIndent": func(rows [][2]string, indent, padding int) string { + buf := bytes.NewBuffer(nil) + formatTwoColumns(buf, indent, padding, width, rows) + return buf.String() + }, + "FormatAppUsage": formatAppUsage, + "FormatCommandUsage": formatCmdUsage, + "IsCumulative": func(value Value) bool { + r, ok := value.(remainderArg) + return ok && r.IsCumulative() + }, + "Char": func(c rune) string { + return string(c) + }, + } + t, err := template.New("usage").Funcs(funcs).Parse(tmpl) + if err != nil { + return err + } + var selectedCommand *CmdModel + if context.SelectedCommand != nil { + selectedCommand = context.SelectedCommand.Model() + } + ctx := templateContext{ + App: a.Model(), + Width: width, + Context: &templateParseContext{ + SelectedCommand: selectedCommand, + FlagGroupModel: context.flags.Model(), + ArgGroupModel: context.arguments.Model(), + }, + } + return t.Execute(a.usageWriter, ctx) +} diff --git a/vendor/gopkg.in/alecthomas/kingpin.v2/values.go b/vendor/gopkg.in/alecthomas/kingpin.v2/values.go new file mode 100644 index 0000000..b986f12 --- /dev/null +++ b/vendor/gopkg.in/alecthomas/kingpin.v2/values.go @@ -0,0 +1,466 @@ +package kingpin + +//go:generate go run ./cmd/genvalues/main.go + +import ( + "fmt" + "net" + "net/url" + "os" + "reflect" + "regexp" + "strings" + "time" + + "github.com/alecthomas/units" +) + +// NOTE: Most of the base type values were lifted from: +// http://golang.org/src/pkg/flag/flag.go?s=20146:20222 + +// Value is the interface to the dynamic value stored in a flag. +// (The default value is represented as a string.) +// +// If a Value has an IsBoolFlag() bool method returning true, the command-line +// parser makes --name equivalent to -name=true rather than using the next +// command-line argument, and adds a --no-name counterpart for negating the +// flag. +type Value interface { + String() string + Set(string) error +} + +// Getter is an interface that allows the contents of a Value to be retrieved. +// It wraps the Value interface, rather than being part of it, because it +// appeared after Go 1 and its compatibility rules. All Value types provided +// by this package satisfy the Getter interface. +type Getter interface { + Value + Get() interface{} +} + +// Optional interface to indicate boolean flags that don't accept a value, and +// implicitly have a --no- negation counterpart. +type boolFlag interface { + Value + IsBoolFlag() bool +} + +// Optional interface for arguments that cumulatively consume all remaining +// input. +type remainderArg interface { + Value + IsCumulative() bool +} + +// Optional interface for flags that can be repeated. +type repeatableFlag interface { + Value + IsCumulative() bool +} + +type accumulator struct { + element func(value interface{}) Value + typ reflect.Type + slice reflect.Value +} + +// Use reflection to accumulate values into a slice. +// +// target := []string{} +// newAccumulator(&target, func (value interface{}) Value { +// return newStringValue(value.(*string)) +// }) +func newAccumulator(slice interface{}, element func(value interface{}) Value) *accumulator { + typ := reflect.TypeOf(slice) + if typ.Kind() != reflect.Ptr || typ.Elem().Kind() != reflect.Slice { + panic("expected a pointer to a slice") + } + return &accumulator{ + element: element, + typ: typ.Elem().Elem(), + slice: reflect.ValueOf(slice), + } +} + +func (a *accumulator) String() string { + out := []string{} + s := a.slice.Elem() + for i := 0; i < s.Len(); i++ { + out = append(out, a.element(s.Index(i).Addr().Interface()).String()) + } + return strings.Join(out, ",") +} + +func (a *accumulator) Set(value string) error { + e := reflect.New(a.typ) + if err := a.element(e.Interface()).Set(value); err != nil { + return err + } + slice := reflect.Append(a.slice.Elem(), e.Elem()) + a.slice.Elem().Set(slice) + return nil +} + +func (a *accumulator) Get() interface{} { + return a.slice.Interface() +} + +func (a *accumulator) IsCumulative() bool { + return true +} + +func (b *boolValue) IsBoolFlag() bool { return true } + +// -- time.Duration Value +type durationValue time.Duration + +func newDurationValue(p *time.Duration) *durationValue { + return (*durationValue)(p) +} + +func (d *durationValue) Set(s string) error { + v, err := time.ParseDuration(s) + *d = durationValue(v) + return err +} + +func (d *durationValue) Get() interface{} { return time.Duration(*d) } + +func (d *durationValue) String() string { return (*time.Duration)(d).String() } + +// -- map[string]string Value +type stringMapValue map[string]string + +func newStringMapValue(p *map[string]string) *stringMapValue { + return (*stringMapValue)(p) +} + +var stringMapRegex = regexp.MustCompile("[:=]") + +func (s *stringMapValue) Set(value string) error { + parts := stringMapRegex.Split(value, 2) + if len(parts) != 2 { + return fmt.Errorf("expected KEY=VALUE got '%s'", value) + } + (*s)[parts[0]] = parts[1] + return nil +} + +func (s *stringMapValue) Get() interface{} { + return (map[string]string)(*s) +} + +func (s *stringMapValue) String() string { + return fmt.Sprintf("%s", map[string]string(*s)) +} + +func (s *stringMapValue) IsCumulative() bool { + return true +} + +// -- net.IP Value +type ipValue net.IP + +func newIPValue(p *net.IP) *ipValue { + return (*ipValue)(p) +} + +func (i *ipValue) Set(value string) error { + if ip := net.ParseIP(value); ip == nil { + return fmt.Errorf("'%s' is not an IP address", value) + } else { + *i = *(*ipValue)(&ip) + return nil + } +} + +func (i *ipValue) Get() interface{} { + return (net.IP)(*i) +} + +func (i *ipValue) String() string { + return (*net.IP)(i).String() +} + +// -- *net.TCPAddr Value +type tcpAddrValue struct { + addr **net.TCPAddr +} + +func newTCPAddrValue(p **net.TCPAddr) *tcpAddrValue { + return &tcpAddrValue{p} +} + +func (i *tcpAddrValue) Set(value string) error { + if addr, err := net.ResolveTCPAddr("tcp", value); err != nil { + return fmt.Errorf("'%s' is not a valid TCP address: %s", value, err) + } else { + *i.addr = addr + return nil + } +} + +func (t *tcpAddrValue) Get() interface{} { + return (*net.TCPAddr)(*t.addr) +} + +func (i *tcpAddrValue) String() string { + return (*i.addr).String() +} + +// -- existingFile Value + +type fileStatValue struct { + path *string + predicate func(os.FileInfo) error +} + +func newFileStatValue(p *string, predicate func(os.FileInfo) error) *fileStatValue { + return &fileStatValue{ + path: p, + predicate: predicate, + } +} + +func (e *fileStatValue) Set(value string) error { + if s, err := os.Stat(value); os.IsNotExist(err) { + return fmt.Errorf("path '%s' does not exist", value) + } else if err != nil { + return err + } else if err := e.predicate(s); err != nil { + return err + } + *e.path = value + return nil +} + +func (f *fileStatValue) Get() interface{} { + return (string)(*f.path) +} + +func (e *fileStatValue) String() string { + return *e.path +} + +// -- os.File value + +type fileValue struct { + f **os.File + flag int + perm os.FileMode +} + +func newFileValue(p **os.File, flag int, perm os.FileMode) *fileValue { + return &fileValue{p, flag, perm} +} + +func (f *fileValue) Set(value string) error { + if fd, err := os.OpenFile(value, f.flag, f.perm); err != nil { + return err + } else { + *f.f = fd + return nil + } +} + +func (f *fileValue) Get() interface{} { + return (*os.File)(*f.f) +} + +func (f *fileValue) String() string { + if *f.f == nil { + return "" + } + return (*f.f).Name() +} + +// -- url.URL Value +type urlValue struct { + u **url.URL +} + +func newURLValue(p **url.URL) *urlValue { + return &urlValue{p} +} + +func (u *urlValue) Set(value string) error { + if url, err := url.Parse(value); err != nil { + return fmt.Errorf("invalid URL: %s", err) + } else { + *u.u = url + return nil + } +} + +func (u *urlValue) Get() interface{} { + return (*url.URL)(*u.u) +} + +func (u *urlValue) String() string { + if *u.u == nil { + return "" + } + return (*u.u).String() +} + +// -- []*url.URL Value +type urlListValue []*url.URL + +func newURLListValue(p *[]*url.URL) *urlListValue { + return (*urlListValue)(p) +} + +func (u *urlListValue) Set(value string) error { + if url, err := url.Parse(value); err != nil { + return fmt.Errorf("invalid URL: %s", err) + } else { + *u = append(*u, url) + return nil + } +} + +func (u *urlListValue) Get() interface{} { + return ([]*url.URL)(*u) +} + +func (u *urlListValue) String() string { + out := []string{} + for _, url := range *u { + out = append(out, url.String()) + } + return strings.Join(out, ",") +} + +// A flag whose value must be in a set of options. +type enumValue struct { + value *string + options []string +} + +func newEnumFlag(target *string, options ...string) *enumValue { + return &enumValue{ + value: target, + options: options, + } +} + +func (a *enumValue) String() string { + return *a.value +} + +func (a *enumValue) Set(value string) error { + for _, v := range a.options { + if v == value { + *a.value = value + return nil + } + } + return fmt.Errorf("enum value must be one of %s, got '%s'", strings.Join(a.options, ","), value) +} + +func (e *enumValue) Get() interface{} { + return (string)(*e.value) +} + +// -- []string Enum Value +type enumsValue struct { + value *[]string + options []string +} + +func newEnumsFlag(target *[]string, options ...string) *enumsValue { + return &enumsValue{ + value: target, + options: options, + } +} + +func (s *enumsValue) Set(value string) error { + for _, v := range s.options { + if v == value { + *s.value = append(*s.value, value) + return nil + } + } + return fmt.Errorf("enum value must be one of %s, got '%s'", strings.Join(s.options, ","), value) +} + +func (e *enumsValue) Get() interface{} { + return ([]string)(*e.value) +} + +func (s *enumsValue) String() string { + return strings.Join(*s.value, ",") +} + +func (s *enumsValue) IsCumulative() bool { + return true +} + +// -- units.Base2Bytes Value +type bytesValue units.Base2Bytes + +func newBytesValue(p *units.Base2Bytes) *bytesValue { + return (*bytesValue)(p) +} + +func (d *bytesValue) Set(s string) error { + v, err := units.ParseBase2Bytes(s) + *d = bytesValue(v) + return err +} + +func (d *bytesValue) Get() interface{} { return units.Base2Bytes(*d) } + +func (d *bytesValue) String() string { return (*units.Base2Bytes)(d).String() } + +func newExistingFileValue(target *string) *fileStatValue { + return newFileStatValue(target, func(s os.FileInfo) error { + if s.IsDir() { + return fmt.Errorf("'%s' is a directory", s.Name()) + } + return nil + }) +} + +func newExistingDirValue(target *string) *fileStatValue { + return newFileStatValue(target, func(s os.FileInfo) error { + if !s.IsDir() { + return fmt.Errorf("'%s' is a file", s.Name()) + } + return nil + }) +} + +func newExistingFileOrDirValue(target *string) *fileStatValue { + return newFileStatValue(target, func(s os.FileInfo) error { return nil }) +} + +type counterValue int + +func newCounterValue(n *int) *counterValue { + return (*counterValue)(n) +} + +func (c *counterValue) Set(s string) error { + *c++ + return nil +} + +func (c *counterValue) Get() interface{} { return (int)(*c) } +func (c *counterValue) IsBoolFlag() bool { return true } +func (c *counterValue) String() string { return fmt.Sprintf("%d", *c) } +func (c *counterValue) IsCumulative() bool { return true } + +func resolveHost(value string) (net.IP, error) { + if ip := net.ParseIP(value); ip != nil { + return ip, nil + } else { + if addr, err := net.ResolveIPAddr("ip", value); err != nil { + return nil, err + } else { + return addr.IP, nil + } + } +} diff --git a/vendor/gopkg.in/alecthomas/kingpin.v2/values.json b/vendor/gopkg.in/alecthomas/kingpin.v2/values.json new file mode 100644 index 0000000..23c6744 --- /dev/null +++ b/vendor/gopkg.in/alecthomas/kingpin.v2/values.json @@ -0,0 +1,25 @@ +[ + {"type": "bool", "parser": "strconv.ParseBool(s)"}, + {"type": "string", "parser": "s, error(nil)", "format": "string(*f.v)", "plural": "Strings"}, + {"type": "uint", "parser": "strconv.ParseUint(s, 0, 64)", "plural": "Uints"}, + {"type": "uint8", "parser": "strconv.ParseUint(s, 0, 8)"}, + {"type": "uint16", "parser": "strconv.ParseUint(s, 0, 16)"}, + {"type": "uint32", "parser": "strconv.ParseUint(s, 0, 32)"}, + {"type": "uint64", "parser": "strconv.ParseUint(s, 0, 64)"}, + {"type": "int", "parser": "strconv.ParseFloat(s, 64)", "plural": "Ints"}, + {"type": "int8", "parser": "strconv.ParseInt(s, 0, 8)"}, + {"type": "int16", "parser": "strconv.ParseInt(s, 0, 16)"}, + {"type": "int32", "parser": "strconv.ParseInt(s, 0, 32)"}, + {"type": "int64", "parser": "strconv.ParseInt(s, 0, 64)"}, + {"type": "float64", "parser": "strconv.ParseFloat(s, 64)"}, + {"type": "float32", "parser": "strconv.ParseFloat(s, 32)"}, + {"name": "Duration", "type": "time.Duration", "no_value_parser": true}, + {"name": "IP", "type": "net.IP", "no_value_parser": true}, + {"name": "TCPAddr", "Type": "*net.TCPAddr", "plural": "TCPList", "no_value_parser": true}, + {"name": "ExistingFile", "Type": "string", "plural": "ExistingFiles", "no_value_parser": true}, + {"name": "ExistingDir", "Type": "string", "plural": "ExistingDirs", "no_value_parser": true}, + {"name": "ExistingFileOrDir", "Type": "string", "plural": "ExistingFilesOrDirs", "no_value_parser": true}, + {"name": "Regexp", "Type": "*regexp.Regexp", "parser": "regexp.Compile(s)"}, + {"name": "ResolvedIP", "Type": "net.IP", "parser": "resolveHost(s)", "help": "Resolve a hostname or IP to an IP."}, + {"name": "HexBytes", "Type": "[]byte", "parser": "hex.DecodeString(s)", "help": "Bytes as a hex string."} +] diff --git a/vendor/gopkg.in/alecthomas/kingpin.v2/values_generated.go b/vendor/gopkg.in/alecthomas/kingpin.v2/values_generated.go new file mode 100644 index 0000000..602cfc9 --- /dev/null +++ b/vendor/gopkg.in/alecthomas/kingpin.v2/values_generated.go @@ -0,0 +1,821 @@ +package kingpin + +import ( + "encoding/hex" + "fmt" + "net" + "regexp" + "strconv" + "time" +) + +// This file is autogenerated by "go generate .". Do not modify. + +// -- bool Value +type boolValue struct{ v *bool } + +func newBoolValue(p *bool) *boolValue { + return &boolValue{p} +} + +func (f *boolValue) Set(s string) error { + v, err := strconv.ParseBool(s) + if err == nil { + *f.v = (bool)(v) + } + return err +} + +func (f *boolValue) Get() interface{} { return (bool)(*f.v) } + +func (f *boolValue) String() string { return fmt.Sprintf("%v", *f) } + +// Bool parses the next command-line value as bool. +func (p *parserMixin) Bool() (target *bool) { + target = new(bool) + p.BoolVar(target) + return +} + +func (p *parserMixin) BoolVar(target *bool) { + p.SetValue(newBoolValue(target)) +} + +// BoolList accumulates bool values into a slice. +func (p *parserMixin) BoolList() (target *[]bool) { + target = new([]bool) + p.BoolListVar(target) + return +} + +func (p *parserMixin) BoolListVar(target *[]bool) { + p.SetValue(newAccumulator(target, func(v interface{}) Value { + return newBoolValue(v.(*bool)) + })) +} + +// -- string Value +type stringValue struct{ v *string } + +func newStringValue(p *string) *stringValue { + return &stringValue{p} +} + +func (f *stringValue) Set(s string) error { + v, err := s, error(nil) + if err == nil { + *f.v = (string)(v) + } + return err +} + +func (f *stringValue) Get() interface{} { return (string)(*f.v) } + +func (f *stringValue) String() string { return string(*f.v) } + +// String parses the next command-line value as string. +func (p *parserMixin) String() (target *string) { + target = new(string) + p.StringVar(target) + return +} + +func (p *parserMixin) StringVar(target *string) { + p.SetValue(newStringValue(target)) +} + +// Strings accumulates string values into a slice. +func (p *parserMixin) Strings() (target *[]string) { + target = new([]string) + p.StringsVar(target) + return +} + +func (p *parserMixin) StringsVar(target *[]string) { + p.SetValue(newAccumulator(target, func(v interface{}) Value { + return newStringValue(v.(*string)) + })) +} + +// -- uint Value +type uintValue struct{ v *uint } + +func newUintValue(p *uint) *uintValue { + return &uintValue{p} +} + +func (f *uintValue) Set(s string) error { + v, err := strconv.ParseUint(s, 0, 64) + if err == nil { + *f.v = (uint)(v) + } + return err +} + +func (f *uintValue) Get() interface{} { return (uint)(*f.v) } + +func (f *uintValue) String() string { return fmt.Sprintf("%v", *f) } + +// Uint parses the next command-line value as uint. +func (p *parserMixin) Uint() (target *uint) { + target = new(uint) + p.UintVar(target) + return +} + +func (p *parserMixin) UintVar(target *uint) { + p.SetValue(newUintValue(target)) +} + +// Uints accumulates uint values into a slice. +func (p *parserMixin) Uints() (target *[]uint) { + target = new([]uint) + p.UintsVar(target) + return +} + +func (p *parserMixin) UintsVar(target *[]uint) { + p.SetValue(newAccumulator(target, func(v interface{}) Value { + return newUintValue(v.(*uint)) + })) +} + +// -- uint8 Value +type uint8Value struct{ v *uint8 } + +func newUint8Value(p *uint8) *uint8Value { + return &uint8Value{p} +} + +func (f *uint8Value) Set(s string) error { + v, err := strconv.ParseUint(s, 0, 8) + if err == nil { + *f.v = (uint8)(v) + } + return err +} + +func (f *uint8Value) Get() interface{} { return (uint8)(*f.v) } + +func (f *uint8Value) String() string { return fmt.Sprintf("%v", *f) } + +// Uint8 parses the next command-line value as uint8. +func (p *parserMixin) Uint8() (target *uint8) { + target = new(uint8) + p.Uint8Var(target) + return +} + +func (p *parserMixin) Uint8Var(target *uint8) { + p.SetValue(newUint8Value(target)) +} + +// Uint8List accumulates uint8 values into a slice. +func (p *parserMixin) Uint8List() (target *[]uint8) { + target = new([]uint8) + p.Uint8ListVar(target) + return +} + +func (p *parserMixin) Uint8ListVar(target *[]uint8) { + p.SetValue(newAccumulator(target, func(v interface{}) Value { + return newUint8Value(v.(*uint8)) + })) +} + +// -- uint16 Value +type uint16Value struct{ v *uint16 } + +func newUint16Value(p *uint16) *uint16Value { + return &uint16Value{p} +} + +func (f *uint16Value) Set(s string) error { + v, err := strconv.ParseUint(s, 0, 16) + if err == nil { + *f.v = (uint16)(v) + } + return err +} + +func (f *uint16Value) Get() interface{} { return (uint16)(*f.v) } + +func (f *uint16Value) String() string { return fmt.Sprintf("%v", *f) } + +// Uint16 parses the next command-line value as uint16. +func (p *parserMixin) Uint16() (target *uint16) { + target = new(uint16) + p.Uint16Var(target) + return +} + +func (p *parserMixin) Uint16Var(target *uint16) { + p.SetValue(newUint16Value(target)) +} + +// Uint16List accumulates uint16 values into a slice. +func (p *parserMixin) Uint16List() (target *[]uint16) { + target = new([]uint16) + p.Uint16ListVar(target) + return +} + +func (p *parserMixin) Uint16ListVar(target *[]uint16) { + p.SetValue(newAccumulator(target, func(v interface{}) Value { + return newUint16Value(v.(*uint16)) + })) +} + +// -- uint32 Value +type uint32Value struct{ v *uint32 } + +func newUint32Value(p *uint32) *uint32Value { + return &uint32Value{p} +} + +func (f *uint32Value) Set(s string) error { + v, err := strconv.ParseUint(s, 0, 32) + if err == nil { + *f.v = (uint32)(v) + } + return err +} + +func (f *uint32Value) Get() interface{} { return (uint32)(*f.v) } + +func (f *uint32Value) String() string { return fmt.Sprintf("%v", *f) } + +// Uint32 parses the next command-line value as uint32. +func (p *parserMixin) Uint32() (target *uint32) { + target = new(uint32) + p.Uint32Var(target) + return +} + +func (p *parserMixin) Uint32Var(target *uint32) { + p.SetValue(newUint32Value(target)) +} + +// Uint32List accumulates uint32 values into a slice. +func (p *parserMixin) Uint32List() (target *[]uint32) { + target = new([]uint32) + p.Uint32ListVar(target) + return +} + +func (p *parserMixin) Uint32ListVar(target *[]uint32) { + p.SetValue(newAccumulator(target, func(v interface{}) Value { + return newUint32Value(v.(*uint32)) + })) +} + +// -- uint64 Value +type uint64Value struct{ v *uint64 } + +func newUint64Value(p *uint64) *uint64Value { + return &uint64Value{p} +} + +func (f *uint64Value) Set(s string) error { + v, err := strconv.ParseUint(s, 0, 64) + if err == nil { + *f.v = (uint64)(v) + } + return err +} + +func (f *uint64Value) Get() interface{} { return (uint64)(*f.v) } + +func (f *uint64Value) String() string { return fmt.Sprintf("%v", *f) } + +// Uint64 parses the next command-line value as uint64. +func (p *parserMixin) Uint64() (target *uint64) { + target = new(uint64) + p.Uint64Var(target) + return +} + +func (p *parserMixin) Uint64Var(target *uint64) { + p.SetValue(newUint64Value(target)) +} + +// Uint64List accumulates uint64 values into a slice. +func (p *parserMixin) Uint64List() (target *[]uint64) { + target = new([]uint64) + p.Uint64ListVar(target) + return +} + +func (p *parserMixin) Uint64ListVar(target *[]uint64) { + p.SetValue(newAccumulator(target, func(v interface{}) Value { + return newUint64Value(v.(*uint64)) + })) +} + +// -- int Value +type intValue struct{ v *int } + +func newIntValue(p *int) *intValue { + return &intValue{p} +} + +func (f *intValue) Set(s string) error { + v, err := strconv.ParseFloat(s, 64) + if err == nil { + *f.v = (int)(v) + } + return err +} + +func (f *intValue) Get() interface{} { return (int)(*f.v) } + +func (f *intValue) String() string { return fmt.Sprintf("%v", *f) } + +// Int parses the next command-line value as int. +func (p *parserMixin) Int() (target *int) { + target = new(int) + p.IntVar(target) + return +} + +func (p *parserMixin) IntVar(target *int) { + p.SetValue(newIntValue(target)) +} + +// Ints accumulates int values into a slice. +func (p *parserMixin) Ints() (target *[]int) { + target = new([]int) + p.IntsVar(target) + return +} + +func (p *parserMixin) IntsVar(target *[]int) { + p.SetValue(newAccumulator(target, func(v interface{}) Value { + return newIntValue(v.(*int)) + })) +} + +// -- int8 Value +type int8Value struct{ v *int8 } + +func newInt8Value(p *int8) *int8Value { + return &int8Value{p} +} + +func (f *int8Value) Set(s string) error { + v, err := strconv.ParseInt(s, 0, 8) + if err == nil { + *f.v = (int8)(v) + } + return err +} + +func (f *int8Value) Get() interface{} { return (int8)(*f.v) } + +func (f *int8Value) String() string { return fmt.Sprintf("%v", *f) } + +// Int8 parses the next command-line value as int8. +func (p *parserMixin) Int8() (target *int8) { + target = new(int8) + p.Int8Var(target) + return +} + +func (p *parserMixin) Int8Var(target *int8) { + p.SetValue(newInt8Value(target)) +} + +// Int8List accumulates int8 values into a slice. +func (p *parserMixin) Int8List() (target *[]int8) { + target = new([]int8) + p.Int8ListVar(target) + return +} + +func (p *parserMixin) Int8ListVar(target *[]int8) { + p.SetValue(newAccumulator(target, func(v interface{}) Value { + return newInt8Value(v.(*int8)) + })) +} + +// -- int16 Value +type int16Value struct{ v *int16 } + +func newInt16Value(p *int16) *int16Value { + return &int16Value{p} +} + +func (f *int16Value) Set(s string) error { + v, err := strconv.ParseInt(s, 0, 16) + if err == nil { + *f.v = (int16)(v) + } + return err +} + +func (f *int16Value) Get() interface{} { return (int16)(*f.v) } + +func (f *int16Value) String() string { return fmt.Sprintf("%v", *f) } + +// Int16 parses the next command-line value as int16. +func (p *parserMixin) Int16() (target *int16) { + target = new(int16) + p.Int16Var(target) + return +} + +func (p *parserMixin) Int16Var(target *int16) { + p.SetValue(newInt16Value(target)) +} + +// Int16List accumulates int16 values into a slice. +func (p *parserMixin) Int16List() (target *[]int16) { + target = new([]int16) + p.Int16ListVar(target) + return +} + +func (p *parserMixin) Int16ListVar(target *[]int16) { + p.SetValue(newAccumulator(target, func(v interface{}) Value { + return newInt16Value(v.(*int16)) + })) +} + +// -- int32 Value +type int32Value struct{ v *int32 } + +func newInt32Value(p *int32) *int32Value { + return &int32Value{p} +} + +func (f *int32Value) Set(s string) error { + v, err := strconv.ParseInt(s, 0, 32) + if err == nil { + *f.v = (int32)(v) + } + return err +} + +func (f *int32Value) Get() interface{} { return (int32)(*f.v) } + +func (f *int32Value) String() string { return fmt.Sprintf("%v", *f) } + +// Int32 parses the next command-line value as int32. +func (p *parserMixin) Int32() (target *int32) { + target = new(int32) + p.Int32Var(target) + return +} + +func (p *parserMixin) Int32Var(target *int32) { + p.SetValue(newInt32Value(target)) +} + +// Int32List accumulates int32 values into a slice. +func (p *parserMixin) Int32List() (target *[]int32) { + target = new([]int32) + p.Int32ListVar(target) + return +} + +func (p *parserMixin) Int32ListVar(target *[]int32) { + p.SetValue(newAccumulator(target, func(v interface{}) Value { + return newInt32Value(v.(*int32)) + })) +} + +// -- int64 Value +type int64Value struct{ v *int64 } + +func newInt64Value(p *int64) *int64Value { + return &int64Value{p} +} + +func (f *int64Value) Set(s string) error { + v, err := strconv.ParseInt(s, 0, 64) + if err == nil { + *f.v = (int64)(v) + } + return err +} + +func (f *int64Value) Get() interface{} { return (int64)(*f.v) } + +func (f *int64Value) String() string { return fmt.Sprintf("%v", *f) } + +// Int64 parses the next command-line value as int64. +func (p *parserMixin) Int64() (target *int64) { + target = new(int64) + p.Int64Var(target) + return +} + +func (p *parserMixin) Int64Var(target *int64) { + p.SetValue(newInt64Value(target)) +} + +// Int64List accumulates int64 values into a slice. +func (p *parserMixin) Int64List() (target *[]int64) { + target = new([]int64) + p.Int64ListVar(target) + return +} + +func (p *parserMixin) Int64ListVar(target *[]int64) { + p.SetValue(newAccumulator(target, func(v interface{}) Value { + return newInt64Value(v.(*int64)) + })) +} + +// -- float64 Value +type float64Value struct{ v *float64 } + +func newFloat64Value(p *float64) *float64Value { + return &float64Value{p} +} + +func (f *float64Value) Set(s string) error { + v, err := strconv.ParseFloat(s, 64) + if err == nil { + *f.v = (float64)(v) + } + return err +} + +func (f *float64Value) Get() interface{} { return (float64)(*f.v) } + +func (f *float64Value) String() string { return fmt.Sprintf("%v", *f) } + +// Float64 parses the next command-line value as float64. +func (p *parserMixin) Float64() (target *float64) { + target = new(float64) + p.Float64Var(target) + return +} + +func (p *parserMixin) Float64Var(target *float64) { + p.SetValue(newFloat64Value(target)) +} + +// Float64List accumulates float64 values into a slice. +func (p *parserMixin) Float64List() (target *[]float64) { + target = new([]float64) + p.Float64ListVar(target) + return +} + +func (p *parserMixin) Float64ListVar(target *[]float64) { + p.SetValue(newAccumulator(target, func(v interface{}) Value { + return newFloat64Value(v.(*float64)) + })) +} + +// -- float32 Value +type float32Value struct{ v *float32 } + +func newFloat32Value(p *float32) *float32Value { + return &float32Value{p} +} + +func (f *float32Value) Set(s string) error { + v, err := strconv.ParseFloat(s, 32) + if err == nil { + *f.v = (float32)(v) + } + return err +} + +func (f *float32Value) Get() interface{} { return (float32)(*f.v) } + +func (f *float32Value) String() string { return fmt.Sprintf("%v", *f) } + +// Float32 parses the next command-line value as float32. +func (p *parserMixin) Float32() (target *float32) { + target = new(float32) + p.Float32Var(target) + return +} + +func (p *parserMixin) Float32Var(target *float32) { + p.SetValue(newFloat32Value(target)) +} + +// Float32List accumulates float32 values into a slice. +func (p *parserMixin) Float32List() (target *[]float32) { + target = new([]float32) + p.Float32ListVar(target) + return +} + +func (p *parserMixin) Float32ListVar(target *[]float32) { + p.SetValue(newAccumulator(target, func(v interface{}) Value { + return newFloat32Value(v.(*float32)) + })) +} + +// DurationList accumulates time.Duration values into a slice. +func (p *parserMixin) DurationList() (target *[]time.Duration) { + target = new([]time.Duration) + p.DurationListVar(target) + return +} + +func (p *parserMixin) DurationListVar(target *[]time.Duration) { + p.SetValue(newAccumulator(target, func(v interface{}) Value { + return newDurationValue(v.(*time.Duration)) + })) +} + +// IPList accumulates net.IP values into a slice. +func (p *parserMixin) IPList() (target *[]net.IP) { + target = new([]net.IP) + p.IPListVar(target) + return +} + +func (p *parserMixin) IPListVar(target *[]net.IP) { + p.SetValue(newAccumulator(target, func(v interface{}) Value { + return newIPValue(v.(*net.IP)) + })) +} + +// TCPList accumulates *net.TCPAddr values into a slice. +func (p *parserMixin) TCPList() (target *[]*net.TCPAddr) { + target = new([]*net.TCPAddr) + p.TCPListVar(target) + return +} + +func (p *parserMixin) TCPListVar(target *[]*net.TCPAddr) { + p.SetValue(newAccumulator(target, func(v interface{}) Value { + return newTCPAddrValue(v.(**net.TCPAddr)) + })) +} + +// ExistingFiles accumulates string values into a slice. +func (p *parserMixin) ExistingFiles() (target *[]string) { + target = new([]string) + p.ExistingFilesVar(target) + return +} + +func (p *parserMixin) ExistingFilesVar(target *[]string) { + p.SetValue(newAccumulator(target, func(v interface{}) Value { + return newExistingFileValue(v.(*string)) + })) +} + +// ExistingDirs accumulates string values into a slice. +func (p *parserMixin) ExistingDirs() (target *[]string) { + target = new([]string) + p.ExistingDirsVar(target) + return +} + +func (p *parserMixin) ExistingDirsVar(target *[]string) { + p.SetValue(newAccumulator(target, func(v interface{}) Value { + return newExistingDirValue(v.(*string)) + })) +} + +// ExistingFilesOrDirs accumulates string values into a slice. +func (p *parserMixin) ExistingFilesOrDirs() (target *[]string) { + target = new([]string) + p.ExistingFilesOrDirsVar(target) + return +} + +func (p *parserMixin) ExistingFilesOrDirsVar(target *[]string) { + p.SetValue(newAccumulator(target, func(v interface{}) Value { + return newExistingFileOrDirValue(v.(*string)) + })) +} + +// -- *regexp.Regexp Value +type regexpValue struct{ v **regexp.Regexp } + +func newRegexpValue(p **regexp.Regexp) *regexpValue { + return ®expValue{p} +} + +func (f *regexpValue) Set(s string) error { + v, err := regexp.Compile(s) + if err == nil { + *f.v = (*regexp.Regexp)(v) + } + return err +} + +func (f *regexpValue) Get() interface{} { return (*regexp.Regexp)(*f.v) } + +func (f *regexpValue) String() string { return fmt.Sprintf("%v", *f) } + +// Regexp parses the next command-line value as *regexp.Regexp. +func (p *parserMixin) Regexp() (target **regexp.Regexp) { + target = new(*regexp.Regexp) + p.RegexpVar(target) + return +} + +func (p *parserMixin) RegexpVar(target **regexp.Regexp) { + p.SetValue(newRegexpValue(target)) +} + +// RegexpList accumulates *regexp.Regexp values into a slice. +func (p *parserMixin) RegexpList() (target *[]*regexp.Regexp) { + target = new([]*regexp.Regexp) + p.RegexpListVar(target) + return +} + +func (p *parserMixin) RegexpListVar(target *[]*regexp.Regexp) { + p.SetValue(newAccumulator(target, func(v interface{}) Value { + return newRegexpValue(v.(**regexp.Regexp)) + })) +} + +// -- net.IP Value +type resolvedIPValue struct{ v *net.IP } + +func newResolvedIPValue(p *net.IP) *resolvedIPValue { + return &resolvedIPValue{p} +} + +func (f *resolvedIPValue) Set(s string) error { + v, err := resolveHost(s) + if err == nil { + *f.v = (net.IP)(v) + } + return err +} + +func (f *resolvedIPValue) Get() interface{} { return (net.IP)(*f.v) } + +func (f *resolvedIPValue) String() string { return fmt.Sprintf("%v", *f) } + +// Resolve a hostname or IP to an IP. +func (p *parserMixin) ResolvedIP() (target *net.IP) { + target = new(net.IP) + p.ResolvedIPVar(target) + return +} + +func (p *parserMixin) ResolvedIPVar(target *net.IP) { + p.SetValue(newResolvedIPValue(target)) +} + +// ResolvedIPList accumulates net.IP values into a slice. +func (p *parserMixin) ResolvedIPList() (target *[]net.IP) { + target = new([]net.IP) + p.ResolvedIPListVar(target) + return +} + +func (p *parserMixin) ResolvedIPListVar(target *[]net.IP) { + p.SetValue(newAccumulator(target, func(v interface{}) Value { + return newResolvedIPValue(v.(*net.IP)) + })) +} + +// -- []byte Value +type hexBytesValue struct{ v *[]byte } + +func newHexBytesValue(p *[]byte) *hexBytesValue { + return &hexBytesValue{p} +} + +func (f *hexBytesValue) Set(s string) error { + v, err := hex.DecodeString(s) + if err == nil { + *f.v = ([]byte)(v) + } + return err +} + +func (f *hexBytesValue) Get() interface{} { return ([]byte)(*f.v) } + +func (f *hexBytesValue) String() string { return fmt.Sprintf("%v", *f) } + +// Bytes as a hex string. +func (p *parserMixin) HexBytes() (target *[]byte) { + target = new([]byte) + p.HexBytesVar(target) + return +} + +func (p *parserMixin) HexBytesVar(target *[]byte) { + p.SetValue(newHexBytesValue(target)) +} + +// HexBytesList accumulates []byte values into a slice. +func (p *parserMixin) HexBytesList() (target *[][]byte) { + target = new([][]byte) + p.HexBytesListVar(target) + return +} + +func (p *parserMixin) HexBytesListVar(target *[][]byte) { + p.SetValue(newAccumulator(target, func(v interface{}) Value { + return newHexBytesValue(v.(*[]byte)) + })) +} diff --git a/vendor/vendor.json b/vendor/vendor.json new file mode 100644 index 0000000..140c2ef --- /dev/null +++ b/vendor/vendor.json @@ -0,0 +1,91 @@ +{ + "comment": "", + "ignore": "test", + "package": [ + { + "checksumSHA1": "KmjnydoAbofMieIWm+it5OWERaM=", + "path": "github.com/alecthomas/template", + "revision": "a0175ee3bccc567396460bf5acd36800cb10c49c", + "revisionTime": "2016-04-05T07:15:01Z" + }, + { + "checksumSHA1": "3wt0pTXXeS+S93unwhGoLIyGX/Q=", + "path": "github.com/alecthomas/template/parse", + "revision": "a0175ee3bccc567396460bf5acd36800cb10c49c", + "revisionTime": "2016-04-05T07:15:01Z" + }, + { + "checksumSHA1": "fCc3grA7vIxfBru7R3SqjcW+oLI=", + "path": "github.com/alecthomas/units", + "revision": "2efee857e7cfd4f3d0138cc3cbb1b4966962b93a", + "revisionTime": "2015-10-22T06:55:26Z" + }, + { + "checksumSHA1": "+CqJGh7NIDMnHgScq9sl9tPrnVM=", + "path": "github.com/klauspost/compress/flate", + "revision": "14c9a76e3c95e47f8ccce949bba2c1101a8b85e6", + "revisionTime": "2017-02-18T08:16:04Z" + }, + { + "checksumSHA1": "V1lQwkoDR1fPmZBSgkmZjgZofeU=", + "path": "github.com/klauspost/compress/gzip", + "revision": "14c9a76e3c95e47f8ccce949bba2c1101a8b85e6", + "revisionTime": "2017-02-18T08:16:04Z" + }, + { + "checksumSHA1": "+azPXaZpPF14YHRghNAer13ThQU=", + "path": "github.com/klauspost/compress/zlib", + "revision": "14c9a76e3c95e47f8ccce949bba2c1101a8b85e6", + "revisionTime": "2017-02-18T08:16:04Z" + }, + { + "checksumSHA1": "iKPMvbAueGfdyHcWCgzwKzm8WVo=", + "path": "github.com/klauspost/cpuid", + "revision": "09cded8978dc9e80714c4d85b0322337b0a1e5e0", + "revisionTime": "2016-03-02T07:53:16Z" + }, + { + "checksumSHA1": "BM6ZlNJmtKy3GBoWwg2X55gnZ4A=", + "path": "github.com/klauspost/crc32", + "revision": "1bab8b35b6bb565f92cbc97939610af9369f942a", + "revisionTime": "2017-02-10T14:05:23Z" + }, + { + "checksumSHA1": "+mB8aEvEg2wl3PoWZjAVfhGxtJA=", + "path": "github.com/qiangxue/fasthttp-routing", + "revision": "6ccdc2a18d8712f842de50ddcb8c9a3d025ddc71", + "revisionTime": "2016-02-25T05:06:29Z" + }, + { + "checksumSHA1": "LTOa3BADhwvT0wFCknPueQALm8I=", + "path": "github.com/valyala/bytebufferpool", + "revision": "e746df99fe4a3986f4d4f79e13c1e0117ce9c2f7", + "revisionTime": "2016-08-17T18:16:52Z" + }, + { + "checksumSHA1": "bLa60Y/jg1DAFoiUWxfJJ4fh4F4=", + "path": "github.com/valyala/fasthttp", + "revision": "fc109d6887b5edb43510d924d14d735f3975fb51", + "revisionTime": "2017-02-22T16:45:09Z" + }, + { + "checksumSHA1": "nMWLZCTKLciURGG8o/KeEPUExkY=", + "path": "github.com/valyala/fasthttp/fasthttputil", + "revision": "fc109d6887b5edb43510d924d14d735f3975fb51", + "revisionTime": "2017-02-22T16:45:09Z" + }, + { + "checksumSHA1": "YXXy4b1yOQx/iL3Icv6svTmcGss=", + "path": "github.com/valyala/fasthttp/stackless", + "revision": "fc109d6887b5edb43510d924d14d735f3975fb51", + "revisionTime": "2017-02-22T16:45:09Z" + }, + { + "checksumSHA1": "SeYI7DRWrd0Ku+CLavuwIz3EEmQ=", + "path": "gopkg.in/alecthomas/kingpin.v2", + "revision": "e9044be3ab2a8e11d4e1f418d12f0790d57e8d70", + "revisionTime": "2016-08-29T10:30:05Z" + } + ], + "rootPath": "github.com/jimeh/casecmp" +}