mirror of
https://github.com/jimeh/casecmp.git
synced 2026-02-19 10:26:40 +00:00
Compare commits
47 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
732c588dbc
|
|||
|
1816e93170
|
|||
|
29902b2e8e
|
|||
|
36e0863cc3
|
|||
|
5471ff95a7
|
|||
|
4c89c92f75
|
|||
| d19eed020c | |||
|
aef3e832f5
|
|||
|
3bae8acc62
|
|||
|
eaf738c49f
|
|||
|
e83e324867
|
|||
| a6a959d5cf | |||
|
da12cbabde
|
|||
| 58c0fea692 | |||
|
ee6700367f
|
|||
| 93589c3261 | |||
| 874fcfdfe5 | |||
| 48dd4d7e7d | |||
| 645ba113b4 | |||
| 232ba27c86 | |||
| 75542c3748 | |||
| 63ed2d9b30 | |||
| 66de20267f | |||
| 6ac719e953 | |||
| 2e02f76886 | |||
| 15fb686368 | |||
| c16b23b685 | |||
| 47ef4267fa | |||
| 0a09a1eaf0 | |||
| f882e14b51 | |||
| a4dcfa68d5 | |||
| 1c8b1a4d9d | |||
| 3e321d8ef5 | |||
| c614bd9642 | |||
| cf03ebb20b | |||
| a61b49f860 | |||
| e4a5beb70d | |||
| c4d0ff8c39 | |||
| d3d8881bbf | |||
| bb698fcad1 | |||
| 57a85b35e7 | |||
| b4adc35cac | |||
| 2937ad2b9a | |||
| 112c68bb91 | |||
| faecd3e962 | |||
| 2ed2b5c349 | |||
| 3265dc9f1e |
3
.dockerignore
Normal file
3
.dockerignore
Normal file
@@ -0,0 +1,3 @@
|
||||
bin/*
|
||||
dist/*
|
||||
releases/*
|
||||
48
.github/workflows/ci.yml
vendored
Normal file
48
.github/workflows/ci.yml
vendored
Normal file
@@ -0,0 +1,48 @@
|
||||
---
|
||||
name: CI
|
||||
on: [push]
|
||||
|
||||
jobs:
|
||||
build:
|
||||
name: build
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.16
|
||||
- name: Install dependencies
|
||||
run: go mod download
|
||||
- name: Build binary
|
||||
run: make
|
||||
- name: Run and make request
|
||||
run: |
|
||||
./bin/casecmp --port=8080 &
|
||||
curl --silent --retry 10 --retry-delay 1 --retry-connrefused \
|
||||
http://localhost:8080/
|
||||
|
||||
release:
|
||||
name: Release
|
||||
runs-on: ubuntu-latest
|
||||
needs: [build]
|
||||
if: startsWith(github.ref, 'refs/tags/v')
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v1
|
||||
- name: Docker Login
|
||||
uses: docker/login-action@v1
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.repository_owner }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.16
|
||||
- name: Run GoReleaser
|
||||
uses: goreleaser/goreleaser-action@v2
|
||||
with:
|
||||
version: latest
|
||||
args: release --rm-dist
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -1 +1,4 @@
|
||||
bin/*
|
||||
dist/*
|
||||
releases/*
|
||||
dist/
|
||||
|
||||
109
.goreleaser.yml
Normal file
109
.goreleaser.yml
Normal file
@@ -0,0 +1,109 @@
|
||||
before:
|
||||
hooks:
|
||||
- go mod tidy
|
||||
builds:
|
||||
- env:
|
||||
- CGO_ENABLED=0
|
||||
mod_timestamp: "{{ .CommitTimestamp }}"
|
||||
flags:
|
||||
- -trimpath
|
||||
ldflags:
|
||||
- "-s -w -X main.version={{ .Version }} -X main.commit={{ .Commit }}"
|
||||
goos:
|
||||
- "darwin"
|
||||
- "freebsd"
|
||||
- "linux"
|
||||
- "windows"
|
||||
goarch:
|
||||
- "amd64"
|
||||
- "386"
|
||||
- "arm"
|
||||
- "arm64"
|
||||
goarm:
|
||||
- "6"
|
||||
- "7"
|
||||
|
||||
archives:
|
||||
- format: tar.gz
|
||||
name_template: |-
|
||||
{{ .ProjectName }}-{{ .Version }}_{{ .Os }}_{{ .Arch }}{{ if .Arm }}v{{ .Arm }}{{ end }}
|
||||
wrap_in_directory: true
|
||||
format_overrides:
|
||||
- goos: windows
|
||||
format: zip
|
||||
|
||||
checksum:
|
||||
name_template: "checksums.txt"
|
||||
snapshot:
|
||||
name_template: "{{ .Tag }}-next"
|
||||
changelog:
|
||||
sort: asc
|
||||
filters:
|
||||
exclude:
|
||||
- "^docs:"
|
||||
- "^test:"
|
||||
|
||||
dockers:
|
||||
- image_templates:
|
||||
- "ghcr.io/jimeh/casecmp:{{ .Version }}-amd64"
|
||||
- "ghcr.io/jimeh/casecmp:latest-amd64"
|
||||
dockerfile: Dockerfile
|
||||
use: buildx
|
||||
goos: linux
|
||||
goarch: amd64
|
||||
build_flag_templates:
|
||||
- "--platform=linux/amd64"
|
||||
- image_templates:
|
||||
- "ghcr.io/jimeh/casecmp:{{ .Version }}-386"
|
||||
- "ghcr.io/jimeh/casecmp:latest-386"
|
||||
dockerfile: Dockerfile
|
||||
use: buildx
|
||||
goos: linux
|
||||
goarch: "386"
|
||||
build_flag_templates:
|
||||
- "--platform=linux/386"
|
||||
- image_templates:
|
||||
- "ghcr.io/jimeh/casecmp:{{ .Version }}-arm64"
|
||||
- "ghcr.io/jimeh/casecmp:latest-arm64"
|
||||
use: buildx
|
||||
goos: linux
|
||||
goarch: arm64
|
||||
dockerfile: Dockerfile
|
||||
build_flag_templates:
|
||||
- "--platform=linux/arm64"
|
||||
- image_templates:
|
||||
- "ghcr.io/jimeh/casecmp:{{ .Version }}-armv6"
|
||||
- "ghcr.io/jimeh/casecmp:latest-armv6"
|
||||
use: buildx
|
||||
goos: linux
|
||||
goarch: arm
|
||||
goarm: "6"
|
||||
dockerfile: Dockerfile
|
||||
build_flag_templates:
|
||||
- "--platform=linux/arm/v6"
|
||||
- image_templates:
|
||||
- "ghcr.io/jimeh/casecmp:{{ .Version }}-armv7"
|
||||
- "ghcr.io/jimeh/casecmp:latest-armv7"
|
||||
use: buildx
|
||||
goos: linux
|
||||
goarch: arm
|
||||
goarm: "7"
|
||||
dockerfile: Dockerfile
|
||||
build_flag_templates:
|
||||
- "--platform=linux/arm/v7"
|
||||
|
||||
docker_manifests:
|
||||
- name_template: ghcr.io/jimeh/casecmp:{{ .Version }}
|
||||
image_templates:
|
||||
- ghcr.io/jimeh/casecmp:{{ .Version }}-amd64
|
||||
- ghcr.io/jimeh/casecmp:{{ .Version }}-386
|
||||
- ghcr.io/jimeh/casecmp:{{ .Version }}-arm64
|
||||
- ghcr.io/jimeh/casecmp:{{ .Version }}-armv6
|
||||
- ghcr.io/jimeh/casecmp:{{ .Version }}-armv7
|
||||
- name_template: ghcr.io/jimeh/casecmp:latest
|
||||
image_templates:
|
||||
- ghcr.io/jimeh/casecmp:latest-amd64
|
||||
- ghcr.io/jimeh/casecmp:latest-386
|
||||
- ghcr.io/jimeh/casecmp:latest-arm64
|
||||
- ghcr.io/jimeh/casecmp:latest-armv6
|
||||
- ghcr.io/jimeh/casecmp:latest-armv7
|
||||
10
CHANGELOG.md
Normal file
10
CHANGELOG.md
Normal file
@@ -0,0 +1,10 @@
|
||||
# Changelog
|
||||
|
||||
All notable changes to this project will be documented in this file. See [standard-version](https://github.com/conventional-changelog/standard-version) for commit guidelines.
|
||||
|
||||
## [1.3.0](https://github.com/jimeh/casecmp/compare/v1.2.3...v1.3.0) (2022-02-13)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* **release:** setup for publishing docker images to ghcr ([1816e93](https://github.com/jimeh/casecmp/commit/1816e93170bb725f8da073b91070a981dd039fad))
|
||||
21
Dockerfile
21
Dockerfile
@@ -1,17 +1,6 @@
|
||||
FROM golang:alpine
|
||||
|
||||
ADD . /go/src/github.com/jimeh/casecmp
|
||||
|
||||
RUN go install github.com/jimeh/casecmp
|
||||
|
||||
FROM scratch
|
||||
COPY ./casecmp /
|
||||
ENV PORT 8080
|
||||
EXPOSE 8080
|
||||
CMD ["/go/bin/casecmp", "--port", "8080"]
|
||||
|
||||
|
||||
|
||||
# FROM scratch
|
||||
# ADD bin/casecmp_linux_amd64 /casecmp
|
||||
# EXPOSE 8080
|
||||
# VOLUME /data
|
||||
# WORKDIR /
|
||||
# CMD ["/casecmp", "--port", "8080"]
|
||||
WORKDIR /
|
||||
ENTRYPOINT ["/casecmp"]
|
||||
|
||||
13
LICENSE
Normal file
13
LICENSE
Normal file
@@ -0,0 +1,13 @@
|
||||
DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
|
||||
Version 2, December 2004
|
||||
|
||||
Copyright (C) 2018 Jim Myhrberg
|
||||
|
||||
Everyone is permitted to copy and distribute verbatim or modified
|
||||
copies of this license document, and changing it is allowed as long
|
||||
as the name is changed.
|
||||
|
||||
DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
|
||||
TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
|
||||
|
||||
0. You just DO WHAT THE FUCK YOU WANT TO.
|
||||
76
Makefile
76
Makefile
@@ -1,70 +1,28 @@
|
||||
DEV_DEPS = github.com/kardianos/govendor \
|
||||
github.com/mitchellh/gox
|
||||
|
||||
BINNAME = casecmp
|
||||
BINARY = bin/${BINNAME}
|
||||
DOCKERREPO = jimeh/casecmp
|
||||
BINDIR = $(shell dirname ${BINARY})
|
||||
SOURCES = $(shell find . -name '*.go' -o -name 'VERSION')
|
||||
VERSION = $(shell cat VERSION)
|
||||
OSARCH = "darwin/386 darwin/amd64 linux/386 linux/amd64 linux/arm"
|
||||
RELEASEDIR = releases
|
||||
NAME = casecmp
|
||||
BINARY = bin/${NAME}
|
||||
VERSION ?= $(shell cat VERSION)
|
||||
SOURCES = $(shell find . -name '*.go' -o -name 'Makefile')
|
||||
|
||||
$(BINARY): $(SOURCES)
|
||||
go build -o ${BINARY} -ldflags "-X main.Version=${VERSION}"
|
||||
CGO_ENABLED=0 go build -a -o ${BINARY} -ldflags \ "\
|
||||
-s -w \
|
||||
-X main.version=${VERSION} \
|
||||
-X main.commit=$(shell git show --format="%h" --no-patch) \
|
||||
-X main.date=$(shell date +%Y-%m-%dT%T%z)"
|
||||
|
||||
.PHONY: build
|
||||
build: $(BINARY)
|
||||
|
||||
.PHONY: clean
|
||||
clean:
|
||||
if [ -f ${BINARY} ]; then rm ${BINARY}; fi; \
|
||||
if [ -d ${BINDIR} ]; then rmdir ${BINDIR}; fi
|
||||
|
||||
.PHONY: run
|
||||
run: $(BINARY)
|
||||
$(BINARY)
|
||||
|
||||
.PHONY: install
|
||||
install: dev-deps
|
||||
@govendor install +local +program
|
||||
.PHONY: clean
|
||||
clean:
|
||||
$(eval BIN_DIR := $(shell dirname ${BINARY}))
|
||||
if [ -f ${BINARY} ]; then rm ${BINARY}; fi
|
||||
if [ -d ${BIN_DIR} ]; then rmdir ${BIN_DIR}; fi
|
||||
|
||||
.PHONY: vendor-sync
|
||||
vendor-sync: dev-deps
|
||||
@govendor sync
|
||||
|
||||
.PHONY: vendor-fetch
|
||||
vendor-fetch: dev-deps
|
||||
@govendor fetch +external +missing
|
||||
|
||||
.PHONY: vendor-install
|
||||
vendor-install: dev-deps
|
||||
@govendor install +vendor
|
||||
|
||||
.PHONY: dev-deps
|
||||
dev-deps:
|
||||
@$(foreach DEP,$(DEV_DEPS),go get $(DEP);)
|
||||
|
||||
.PHONY: update-dev-deps
|
||||
update-dev-deps:
|
||||
@$(foreach DEP,$(DEV_DEPS),go get -u $(DEP);)
|
||||
|
||||
.PHONY: release-build
|
||||
release-build:
|
||||
gox -output "${RELEASEDIR}/${BINNAME}_${VERSION}_{{.OS}}_{{.Arch}}" \
|
||||
-osarch=${OSARCH} \
|
||||
-ldflags "-X main.Version=${VERSION}"
|
||||
|
||||
.SILENT: release
|
||||
.PHONY: release
|
||||
release: release-build
|
||||
$(eval BINS := $(shell cd ${RELEASEDIR} && find . \
|
||||
-name "${BINNAME}_${VERSION}_*" -not -name "*.tar.gz"))
|
||||
cd $(RELEASEDIR); \
|
||||
$(foreach BIN,$(BINS),tar -cvzf $(BIN).tar.gz $(BIN) && rm $(BIN);)
|
||||
|
||||
.PHONY: build-docker
|
||||
build-docker: clean
|
||||
govendor sync \
|
||||
&& docker build -t "${DOCKERREPO}:latest" . \
|
||||
&& docker tag "${DOCKERREPO}:latest" "${DOCKERREPO}:${VERSION}"
|
||||
.PHONY: docker
|
||||
docker:
|
||||
docker build -t "$(shell whoami)/$(NAME)" .
|
||||
|
||||
@@ -9,7 +9,7 @@ Case-insensitive string comparison, as an API. Because ¯\\_(ツ)\_/¯
|
||||
DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
|
||||
Version 2, December 2004
|
||||
|
||||
Copyright (C) 2017 Jim Myhrberg
|
||||
Copyright (C) 2018 Jim Myhrberg
|
||||
|
||||
Everyone is permitted to copy and distribute verbatim or modified
|
||||
copies of this license document, and changing it is allowed as long
|
||||
|
||||
10
go.mod
Normal file
10
go.mod
Normal file
@@ -0,0 +1,10 @@
|
||||
module github.com/jimeh/casecmp
|
||||
|
||||
go 1.16
|
||||
|
||||
require (
|
||||
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc // indirect
|
||||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf // indirect
|
||||
github.com/stretchr/testify v1.7.0 // indirect
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6
|
||||
)
|
||||
16
go.sum
Normal file
16
go.sum
Normal file
@@ -0,0 +1,16 @@
|
||||
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc h1:cAKDfWh5VpdgMhJosfJnn5/FoN2SRZ4p7fJNX58YPaU=
|
||||
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf h1:qet1QNfXsQxTZqLG4oE62mJzwPIB8+Tee4RNCL9ulrY=
|
||||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
|
||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6 h1:jMFz6MfLP0/4fUyZle81rXUoxOBFi19VUFKVDOQfozc=
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
117
main.go
117
main.go
@@ -1,80 +1,119 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"log"
|
||||
"net/http"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/qiangxue/fasthttp-routing"
|
||||
"github.com/valyala/fasthttp"
|
||||
"gopkg.in/alecthomas/kingpin.v2"
|
||||
)
|
||||
|
||||
// Version gets populated with version at build-time.
|
||||
var Version string
|
||||
var defaultPort = "8080"
|
||||
|
||||
var (
|
||||
port = kingpin.Flag("port", "Port to listen to.").Short('p').
|
||||
Default(defaultPort).String()
|
||||
bind = kingpin.Flag("bind", "Bind address.").Short('b').
|
||||
Default("0.0.0.0").String()
|
||||
version = kingpin.Flag("version", "Print version info.").
|
||||
Short('v').Bool()
|
||||
name = "casecmp"
|
||||
version = "dev"
|
||||
commit = "unknown"
|
||||
date = "unknown"
|
||||
defaultPort = "8080"
|
||||
)
|
||||
|
||||
func indexHandler(c *routing.Context) error {
|
||||
c.Write([]byte(
|
||||
"Case-insensitive string comparison, as an API. Because ¯\\_(ツ)_/¯\n" +
|
||||
"\n" +
|
||||
"Example:\n" +
|
||||
"curl -X POST -F \"a=Foo Bar\" -F \"b=FOO BAR\" " +
|
||||
"http://" + string(c.Host()) + "/",
|
||||
))
|
||||
return nil
|
||||
// Argument parsing setup.
|
||||
var (
|
||||
portFlag = kingpin.Flag("port", "Port to listen to.").Short('p').
|
||||
Default("").String()
|
||||
bindFlag = kingpin.Flag("bind", "Bind address.").Short('b').
|
||||
Default("0.0.0.0").String()
|
||||
forceHTTPSFlag = kingpin.Flag(
|
||||
"force-https", "Use https:// in example curl commands",
|
||||
).Bool()
|
||||
versionFlag = kingpin.Flag("version", "Print version info.").
|
||||
Short('v').Bool()
|
||||
)
|
||||
|
||||
func indexHandler(w http.ResponseWriter, r *http.Request) {
|
||||
scheme := "http"
|
||||
if r.TLS != nil || *forceHTTPSFlag {
|
||||
scheme = "https"
|
||||
}
|
||||
|
||||
_, err := fmt.Fprintf(w, `%s %s
|
||||
|
||||
Case-insensitive string comparison, as an API. Because ¯\_(ツ)_/¯
|
||||
|
||||
Example usage:
|
||||
curl -X POST -F "a=Foo Bar" -F "b=FOO BAR" %s://%s/
|
||||
curl -X POST "%s://%s/?a=Foo+Bar&b=FOO+BAR"`,
|
||||
name, version, scheme, r.Host, scheme, r.Host)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func casecmpHandler(c *routing.Context) error {
|
||||
a := c.FormValue("a")
|
||||
b := c.FormValue("b")
|
||||
|
||||
func casecmpHandler(w http.ResponseWriter, r *http.Request) {
|
||||
a := r.FormValue("a")
|
||||
b := r.FormValue("b")
|
||||
resp := "0"
|
||||
|
||||
if strings.EqualFold(string(a), string(b)) {
|
||||
resp = "1"
|
||||
}
|
||||
_, err := fmt.Fprintf(w, resp)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
c.Write([]byte(resp))
|
||||
return nil
|
||||
func rootHandler(w http.ResponseWriter, r *http.Request) {
|
||||
if r.URL.Path != "/" {
|
||||
http.NotFound(w, r)
|
||||
return
|
||||
}
|
||||
|
||||
if r.Method == "GET" {
|
||||
indexHandler(w, r)
|
||||
} else {
|
||||
casecmpHandler(w, r)
|
||||
}
|
||||
}
|
||||
|
||||
func printVersion() {
|
||||
fmt.Println("casecmp " + Version)
|
||||
var buffer bytes.Buffer
|
||||
buffer.WriteString(fmt.Sprintf("%s %s", name, version))
|
||||
|
||||
if commit != "unknown" {
|
||||
buffer.WriteString(fmt.Sprintf(" (%s)", commit))
|
||||
}
|
||||
|
||||
fmt.Println(buffer.String())
|
||||
}
|
||||
|
||||
func startServer() {
|
||||
r := routing.New()
|
||||
r.Get("/", indexHandler)
|
||||
r.Post("/", casecmpHandler)
|
||||
http.HandleFunc("/", rootHandler)
|
||||
|
||||
server := fasthttp.Server{Handler: r.HandleRequest}
|
||||
|
||||
if *port == defaultPort {
|
||||
if *portFlag == "" {
|
||||
envPort := os.Getenv("PORT")
|
||||
if envPort != "" {
|
||||
*port = envPort
|
||||
*portFlag = envPort
|
||||
} else {
|
||||
*portFlag = defaultPort
|
||||
}
|
||||
}
|
||||
|
||||
address := *bind + ":" + *port
|
||||
fmt.Println("Listening on " + address)
|
||||
log.Fatal(server.ListenAndServe(address))
|
||||
if !*forceHTTPSFlag && os.Getenv("FORCE_HTTPS") != "" {
|
||||
*forceHTTPSFlag = true
|
||||
}
|
||||
|
||||
address := *bindFlag + ":" + *portFlag
|
||||
fmt.Printf("Listening on %s\n", address)
|
||||
log.Fatal(http.ListenAndServe(address, nil))
|
||||
}
|
||||
|
||||
func main() {
|
||||
kingpin.Parse()
|
||||
|
||||
if *version {
|
||||
if *versionFlag {
|
||||
printVersion()
|
||||
} else {
|
||||
startServer()
|
||||
|
||||
27
vendor/github.com/alecthomas/template/LICENSE
generated
vendored
27
vendor/github.com/alecthomas/template/LICENSE
generated
vendored
@@ -1,27 +0,0 @@
|
||||
Copyright (c) 2012 The Go Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
25
vendor/github.com/alecthomas/template/README.md
generated
vendored
25
vendor/github.com/alecthomas/template/README.md
generated
vendored
@@ -1,25 +0,0 @@
|
||||
# Go's `text/template` package with newline elision
|
||||
|
||||
This is a fork of Go 1.4's [text/template](http://golang.org/pkg/text/template/) package with one addition: a backslash immediately after a closing delimiter will delete all subsequent newlines until a non-newline.
|
||||
|
||||
eg.
|
||||
|
||||
```
|
||||
{{if true}}\
|
||||
hello
|
||||
{{end}}\
|
||||
```
|
||||
|
||||
Will result in:
|
||||
|
||||
```
|
||||
hello\n
|
||||
```
|
||||
|
||||
Rather than:
|
||||
|
||||
```
|
||||
\n
|
||||
hello\n
|
||||
\n
|
||||
```
|
||||
406
vendor/github.com/alecthomas/template/doc.go
generated
vendored
406
vendor/github.com/alecthomas/template/doc.go
generated
vendored
@@ -1,406 +0,0 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
/*
|
||||
Package template implements data-driven templates for generating textual output.
|
||||
|
||||
To generate HTML output, see package html/template, which has the same interface
|
||||
as this package but automatically secures HTML output against certain attacks.
|
||||
|
||||
Templates are executed by applying them to a data structure. Annotations in the
|
||||
template refer to elements of the data structure (typically a field of a struct
|
||||
or a key in a map) to control execution and derive values to be displayed.
|
||||
Execution of the template walks the structure and sets the cursor, represented
|
||||
by a period '.' and called "dot", to the value at the current location in the
|
||||
structure as execution proceeds.
|
||||
|
||||
The input text for a template is UTF-8-encoded text in any format.
|
||||
"Actions"--data evaluations or control structures--are delimited by
|
||||
"{{" and "}}"; all text outside actions is copied to the output unchanged.
|
||||
Actions may not span newlines, although comments can.
|
||||
|
||||
Once parsed, a template may be executed safely in parallel.
|
||||
|
||||
Here is a trivial example that prints "17 items are made of wool".
|
||||
|
||||
type Inventory struct {
|
||||
Material string
|
||||
Count uint
|
||||
}
|
||||
sweaters := Inventory{"wool", 17}
|
||||
tmpl, err := template.New("test").Parse("{{.Count}} items are made of {{.Material}}")
|
||||
if err != nil { panic(err) }
|
||||
err = tmpl.Execute(os.Stdout, sweaters)
|
||||
if err != nil { panic(err) }
|
||||
|
||||
More intricate examples appear below.
|
||||
|
||||
Actions
|
||||
|
||||
Here is the list of actions. "Arguments" and "pipelines" are evaluations of
|
||||
data, defined in detail below.
|
||||
|
||||
*/
|
||||
// {{/* a comment */}}
|
||||
// A comment; discarded. May contain newlines.
|
||||
// Comments do not nest and must start and end at the
|
||||
// delimiters, as shown here.
|
||||
/*
|
||||
|
||||
{{pipeline}}
|
||||
The default textual representation of the value of the pipeline
|
||||
is copied to the output.
|
||||
|
||||
{{if pipeline}} T1 {{end}}
|
||||
If the value of the pipeline is empty, no output is generated;
|
||||
otherwise, T1 is executed. The empty values are false, 0, any
|
||||
nil pointer or interface value, and any array, slice, map, or
|
||||
string of length zero.
|
||||
Dot is unaffected.
|
||||
|
||||
{{if pipeline}} T1 {{else}} T0 {{end}}
|
||||
If the value of the pipeline is empty, T0 is executed;
|
||||
otherwise, T1 is executed. Dot is unaffected.
|
||||
|
||||
{{if pipeline}} T1 {{else if pipeline}} T0 {{end}}
|
||||
To simplify the appearance of if-else chains, the else action
|
||||
of an if may include another if directly; the effect is exactly
|
||||
the same as writing
|
||||
{{if pipeline}} T1 {{else}}{{if pipeline}} T0 {{end}}{{end}}
|
||||
|
||||
{{range pipeline}} T1 {{end}}
|
||||
The value of the pipeline must be an array, slice, map, or channel.
|
||||
If the value of the pipeline has length zero, nothing is output;
|
||||
otherwise, dot is set to the successive elements of the array,
|
||||
slice, or map and T1 is executed. If the value is a map and the
|
||||
keys are of basic type with a defined order ("comparable"), the
|
||||
elements will be visited in sorted key order.
|
||||
|
||||
{{range pipeline}} T1 {{else}} T0 {{end}}
|
||||
The value of the pipeline must be an array, slice, map, or channel.
|
||||
If the value of the pipeline has length zero, dot is unaffected and
|
||||
T0 is executed; otherwise, dot is set to the successive elements
|
||||
of the array, slice, or map and T1 is executed.
|
||||
|
||||
{{template "name"}}
|
||||
The template with the specified name is executed with nil data.
|
||||
|
||||
{{template "name" pipeline}}
|
||||
The template with the specified name is executed with dot set
|
||||
to the value of the pipeline.
|
||||
|
||||
{{with pipeline}} T1 {{end}}
|
||||
If the value of the pipeline is empty, no output is generated;
|
||||
otherwise, dot is set to the value of the pipeline and T1 is
|
||||
executed.
|
||||
|
||||
{{with pipeline}} T1 {{else}} T0 {{end}}
|
||||
If the value of the pipeline is empty, dot is unaffected and T0
|
||||
is executed; otherwise, dot is set to the value of the pipeline
|
||||
and T1 is executed.
|
||||
|
||||
Arguments
|
||||
|
||||
An argument is a simple value, denoted by one of the following.
|
||||
|
||||
- A boolean, string, character, integer, floating-point, imaginary
|
||||
or complex constant in Go syntax. These behave like Go's untyped
|
||||
constants, although raw strings may not span newlines.
|
||||
- The keyword nil, representing an untyped Go nil.
|
||||
- The character '.' (period):
|
||||
.
|
||||
The result is the value of dot.
|
||||
- A variable name, which is a (possibly empty) alphanumeric string
|
||||
preceded by a dollar sign, such as
|
||||
$piOver2
|
||||
or
|
||||
$
|
||||
The result is the value of the variable.
|
||||
Variables are described below.
|
||||
- The name of a field of the data, which must be a struct, preceded
|
||||
by a period, such as
|
||||
.Field
|
||||
The result is the value of the field. Field invocations may be
|
||||
chained:
|
||||
.Field1.Field2
|
||||
Fields can also be evaluated on variables, including chaining:
|
||||
$x.Field1.Field2
|
||||
- The name of a key of the data, which must be a map, preceded
|
||||
by a period, such as
|
||||
.Key
|
||||
The result is the map element value indexed by the key.
|
||||
Key invocations may be chained and combined with fields to any
|
||||
depth:
|
||||
.Field1.Key1.Field2.Key2
|
||||
Although the key must be an alphanumeric identifier, unlike with
|
||||
field names they do not need to start with an upper case letter.
|
||||
Keys can also be evaluated on variables, including chaining:
|
||||
$x.key1.key2
|
||||
- The name of a niladic method of the data, preceded by a period,
|
||||
such as
|
||||
.Method
|
||||
The result is the value of invoking the method with dot as the
|
||||
receiver, dot.Method(). Such a method must have one return value (of
|
||||
any type) or two return values, the second of which is an error.
|
||||
If it has two and the returned error is non-nil, execution terminates
|
||||
and an error is returned to the caller as the value of Execute.
|
||||
Method invocations may be chained and combined with fields and keys
|
||||
to any depth:
|
||||
.Field1.Key1.Method1.Field2.Key2.Method2
|
||||
Methods can also be evaluated on variables, including chaining:
|
||||
$x.Method1.Field
|
||||
- The name of a niladic function, such as
|
||||
fun
|
||||
The result is the value of invoking the function, fun(). The return
|
||||
types and values behave as in methods. Functions and function
|
||||
names are described below.
|
||||
- A parenthesized instance of one the above, for grouping. The result
|
||||
may be accessed by a field or map key invocation.
|
||||
print (.F1 arg1) (.F2 arg2)
|
||||
(.StructValuedMethod "arg").Field
|
||||
|
||||
Arguments may evaluate to any type; if they are pointers the implementation
|
||||
automatically indirects to the base type when required.
|
||||
If an evaluation yields a function value, such as a function-valued
|
||||
field of a struct, the function is not invoked automatically, but it
|
||||
can be used as a truth value for an if action and the like. To invoke
|
||||
it, use the call function, defined below.
|
||||
|
||||
A pipeline is a possibly chained sequence of "commands". A command is a simple
|
||||
value (argument) or a function or method call, possibly with multiple arguments:
|
||||
|
||||
Argument
|
||||
The result is the value of evaluating the argument.
|
||||
.Method [Argument...]
|
||||
The method can be alone or the last element of a chain but,
|
||||
unlike methods in the middle of a chain, it can take arguments.
|
||||
The result is the value of calling the method with the
|
||||
arguments:
|
||||
dot.Method(Argument1, etc.)
|
||||
functionName [Argument...]
|
||||
The result is the value of calling the function associated
|
||||
with the name:
|
||||
function(Argument1, etc.)
|
||||
Functions and function names are described below.
|
||||
|
||||
Pipelines
|
||||
|
||||
A pipeline may be "chained" by separating a sequence of commands with pipeline
|
||||
characters '|'. In a chained pipeline, the result of the each command is
|
||||
passed as the last argument of the following command. The output of the final
|
||||
command in the pipeline is the value of the pipeline.
|
||||
|
||||
The output of a command will be either one value or two values, the second of
|
||||
which has type error. If that second value is present and evaluates to
|
||||
non-nil, execution terminates and the error is returned to the caller of
|
||||
Execute.
|
||||
|
||||
Variables
|
||||
|
||||
A pipeline inside an action may initialize a variable to capture the result.
|
||||
The initialization has syntax
|
||||
|
||||
$variable := pipeline
|
||||
|
||||
where $variable is the name of the variable. An action that declares a
|
||||
variable produces no output.
|
||||
|
||||
If a "range" action initializes a variable, the variable is set to the
|
||||
successive elements of the iteration. Also, a "range" may declare two
|
||||
variables, separated by a comma:
|
||||
|
||||
range $index, $element := pipeline
|
||||
|
||||
in which case $index and $element are set to the successive values of the
|
||||
array/slice index or map key and element, respectively. Note that if there is
|
||||
only one variable, it is assigned the element; this is opposite to the
|
||||
convention in Go range clauses.
|
||||
|
||||
A variable's scope extends to the "end" action of the control structure ("if",
|
||||
"with", or "range") in which it is declared, or to the end of the template if
|
||||
there is no such control structure. A template invocation does not inherit
|
||||
variables from the point of its invocation.
|
||||
|
||||
When execution begins, $ is set to the data argument passed to Execute, that is,
|
||||
to the starting value of dot.
|
||||
|
||||
Examples
|
||||
|
||||
Here are some example one-line templates demonstrating pipelines and variables.
|
||||
All produce the quoted word "output":
|
||||
|
||||
{{"\"output\""}}
|
||||
A string constant.
|
||||
{{`"output"`}}
|
||||
A raw string constant.
|
||||
{{printf "%q" "output"}}
|
||||
A function call.
|
||||
{{"output" | printf "%q"}}
|
||||
A function call whose final argument comes from the previous
|
||||
command.
|
||||
{{printf "%q" (print "out" "put")}}
|
||||
A parenthesized argument.
|
||||
{{"put" | printf "%s%s" "out" | printf "%q"}}
|
||||
A more elaborate call.
|
||||
{{"output" | printf "%s" | printf "%q"}}
|
||||
A longer chain.
|
||||
{{with "output"}}{{printf "%q" .}}{{end}}
|
||||
A with action using dot.
|
||||
{{with $x := "output" | printf "%q"}}{{$x}}{{end}}
|
||||
A with action that creates and uses a variable.
|
||||
{{with $x := "output"}}{{printf "%q" $x}}{{end}}
|
||||
A with action that uses the variable in another action.
|
||||
{{with $x := "output"}}{{$x | printf "%q"}}{{end}}
|
||||
The same, but pipelined.
|
||||
|
||||
Functions
|
||||
|
||||
During execution functions are found in two function maps: first in the
|
||||
template, then in the global function map. By default, no functions are defined
|
||||
in the template but the Funcs method can be used to add them.
|
||||
|
||||
Predefined global functions are named as follows.
|
||||
|
||||
and
|
||||
Returns the boolean AND of its arguments by returning the
|
||||
first empty argument or the last argument, that is,
|
||||
"and x y" behaves as "if x then y else x". All the
|
||||
arguments are evaluated.
|
||||
call
|
||||
Returns the result of calling the first argument, which
|
||||
must be a function, with the remaining arguments as parameters.
|
||||
Thus "call .X.Y 1 2" is, in Go notation, dot.X.Y(1, 2) where
|
||||
Y is a func-valued field, map entry, or the like.
|
||||
The first argument must be the result of an evaluation
|
||||
that yields a value of function type (as distinct from
|
||||
a predefined function such as print). The function must
|
||||
return either one or two result values, the second of which
|
||||
is of type error. If the arguments don't match the function
|
||||
or the returned error value is non-nil, execution stops.
|
||||
html
|
||||
Returns the escaped HTML equivalent of the textual
|
||||
representation of its arguments.
|
||||
index
|
||||
Returns the result of indexing its first argument by the
|
||||
following arguments. Thus "index x 1 2 3" is, in Go syntax,
|
||||
x[1][2][3]. Each indexed item must be a map, slice, or array.
|
||||
js
|
||||
Returns the escaped JavaScript equivalent of the textual
|
||||
representation of its arguments.
|
||||
len
|
||||
Returns the integer length of its argument.
|
||||
not
|
||||
Returns the boolean negation of its single argument.
|
||||
or
|
||||
Returns the boolean OR of its arguments by returning the
|
||||
first non-empty argument or the last argument, that is,
|
||||
"or x y" behaves as "if x then x else y". All the
|
||||
arguments are evaluated.
|
||||
print
|
||||
An alias for fmt.Sprint
|
||||
printf
|
||||
An alias for fmt.Sprintf
|
||||
println
|
||||
An alias for fmt.Sprintln
|
||||
urlquery
|
||||
Returns the escaped value of the textual representation of
|
||||
its arguments in a form suitable for embedding in a URL query.
|
||||
|
||||
The boolean functions take any zero value to be false and a non-zero
|
||||
value to be true.
|
||||
|
||||
There is also a set of binary comparison operators defined as
|
||||
functions:
|
||||
|
||||
eq
|
||||
Returns the boolean truth of arg1 == arg2
|
||||
ne
|
||||
Returns the boolean truth of arg1 != arg2
|
||||
lt
|
||||
Returns the boolean truth of arg1 < arg2
|
||||
le
|
||||
Returns the boolean truth of arg1 <= arg2
|
||||
gt
|
||||
Returns the boolean truth of arg1 > arg2
|
||||
ge
|
||||
Returns the boolean truth of arg1 >= arg2
|
||||
|
||||
For simpler multi-way equality tests, eq (only) accepts two or more
|
||||
arguments and compares the second and subsequent to the first,
|
||||
returning in effect
|
||||
|
||||
arg1==arg2 || arg1==arg3 || arg1==arg4 ...
|
||||
|
||||
(Unlike with || in Go, however, eq is a function call and all the
|
||||
arguments will be evaluated.)
|
||||
|
||||
The comparison functions work on basic types only (or named basic
|
||||
types, such as "type Celsius float32"). They implement the Go rules
|
||||
for comparison of values, except that size and exact type are
|
||||
ignored, so any integer value, signed or unsigned, may be compared
|
||||
with any other integer value. (The arithmetic value is compared,
|
||||
not the bit pattern, so all negative integers are less than all
|
||||
unsigned integers.) However, as usual, one may not compare an int
|
||||
with a float32 and so on.
|
||||
|
||||
Associated templates
|
||||
|
||||
Each template is named by a string specified when it is created. Also, each
|
||||
template is associated with zero or more other templates that it may invoke by
|
||||
name; such associations are transitive and form a name space of templates.
|
||||
|
||||
A template may use a template invocation to instantiate another associated
|
||||
template; see the explanation of the "template" action above. The name must be
|
||||
that of a template associated with the template that contains the invocation.
|
||||
|
||||
Nested template definitions
|
||||
|
||||
When parsing a template, another template may be defined and associated with the
|
||||
template being parsed. Template definitions must appear at the top level of the
|
||||
template, much like global variables in a Go program.
|
||||
|
||||
The syntax of such definitions is to surround each template declaration with a
|
||||
"define" and "end" action.
|
||||
|
||||
The define action names the template being created by providing a string
|
||||
constant. Here is a simple example:
|
||||
|
||||
`{{define "T1"}}ONE{{end}}
|
||||
{{define "T2"}}TWO{{end}}
|
||||
{{define "T3"}}{{template "T1"}} {{template "T2"}}{{end}}
|
||||
{{template "T3"}}`
|
||||
|
||||
This defines two templates, T1 and T2, and a third T3 that invokes the other two
|
||||
when it is executed. Finally it invokes T3. If executed this template will
|
||||
produce the text
|
||||
|
||||
ONE TWO
|
||||
|
||||
By construction, a template may reside in only one association. If it's
|
||||
necessary to have a template addressable from multiple associations, the
|
||||
template definition must be parsed multiple times to create distinct *Template
|
||||
values, or must be copied with the Clone or AddParseTree method.
|
||||
|
||||
Parse may be called multiple times to assemble the various associated templates;
|
||||
see the ParseFiles and ParseGlob functions and methods for simple ways to parse
|
||||
related templates stored in files.
|
||||
|
||||
A template may be executed directly or through ExecuteTemplate, which executes
|
||||
an associated template identified by name. To invoke our example above, we
|
||||
might write,
|
||||
|
||||
err := tmpl.Execute(os.Stdout, "no data needed")
|
||||
if err != nil {
|
||||
log.Fatalf("execution failed: %s", err)
|
||||
}
|
||||
|
||||
or to invoke a particular template explicitly by name,
|
||||
|
||||
err := tmpl.ExecuteTemplate(os.Stdout, "T2", "no data needed")
|
||||
if err != nil {
|
||||
log.Fatalf("execution failed: %s", err)
|
||||
}
|
||||
|
||||
*/
|
||||
package template
|
||||
845
vendor/github.com/alecthomas/template/exec.go
generated
vendored
845
vendor/github.com/alecthomas/template/exec.go
generated
vendored
@@ -1,845 +0,0 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package template
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"reflect"
|
||||
"runtime"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/alecthomas/template/parse"
|
||||
)
|
||||
|
||||
// state represents the state of an execution. It's not part of the
|
||||
// template so that multiple executions of the same template
|
||||
// can execute in parallel.
|
||||
type state struct {
|
||||
tmpl *Template
|
||||
wr io.Writer
|
||||
node parse.Node // current node, for errors
|
||||
vars []variable // push-down stack of variable values.
|
||||
}
|
||||
|
||||
// variable holds the dynamic value of a variable such as $, $x etc.
|
||||
type variable struct {
|
||||
name string
|
||||
value reflect.Value
|
||||
}
|
||||
|
||||
// push pushes a new variable on the stack.
|
||||
func (s *state) push(name string, value reflect.Value) {
|
||||
s.vars = append(s.vars, variable{name, value})
|
||||
}
|
||||
|
||||
// mark returns the length of the variable stack.
|
||||
func (s *state) mark() int {
|
||||
return len(s.vars)
|
||||
}
|
||||
|
||||
// pop pops the variable stack up to the mark.
|
||||
func (s *state) pop(mark int) {
|
||||
s.vars = s.vars[0:mark]
|
||||
}
|
||||
|
||||
// setVar overwrites the top-nth variable on the stack. Used by range iterations.
|
||||
func (s *state) setVar(n int, value reflect.Value) {
|
||||
s.vars[len(s.vars)-n].value = value
|
||||
}
|
||||
|
||||
// varValue returns the value of the named variable.
|
||||
func (s *state) varValue(name string) reflect.Value {
|
||||
for i := s.mark() - 1; i >= 0; i-- {
|
||||
if s.vars[i].name == name {
|
||||
return s.vars[i].value
|
||||
}
|
||||
}
|
||||
s.errorf("undefined variable: %s", name)
|
||||
return zero
|
||||
}
|
||||
|
||||
var zero reflect.Value
|
||||
|
||||
// at marks the state to be on node n, for error reporting.
|
||||
func (s *state) at(node parse.Node) {
|
||||
s.node = node
|
||||
}
|
||||
|
||||
// doublePercent returns the string with %'s replaced by %%, if necessary,
|
||||
// so it can be used safely inside a Printf format string.
|
||||
func doublePercent(str string) string {
|
||||
if strings.Contains(str, "%") {
|
||||
str = strings.Replace(str, "%", "%%", -1)
|
||||
}
|
||||
return str
|
||||
}
|
||||
|
||||
// errorf formats the error and terminates processing.
|
||||
func (s *state) errorf(format string, args ...interface{}) {
|
||||
name := doublePercent(s.tmpl.Name())
|
||||
if s.node == nil {
|
||||
format = fmt.Sprintf("template: %s: %s", name, format)
|
||||
} else {
|
||||
location, context := s.tmpl.ErrorContext(s.node)
|
||||
format = fmt.Sprintf("template: %s: executing %q at <%s>: %s", location, name, doublePercent(context), format)
|
||||
}
|
||||
panic(fmt.Errorf(format, args...))
|
||||
}
|
||||
|
||||
// errRecover is the handler that turns panics into returns from the top
|
||||
// level of Parse.
|
||||
func errRecover(errp *error) {
|
||||
e := recover()
|
||||
if e != nil {
|
||||
switch err := e.(type) {
|
||||
case runtime.Error:
|
||||
panic(e)
|
||||
case error:
|
||||
*errp = err
|
||||
default:
|
||||
panic(e)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ExecuteTemplate applies the template associated with t that has the given name
|
||||
// to the specified data object and writes the output to wr.
|
||||
// If an error occurs executing the template or writing its output,
|
||||
// execution stops, but partial results may already have been written to
|
||||
// the output writer.
|
||||
// A template may be executed safely in parallel.
|
||||
func (t *Template) ExecuteTemplate(wr io.Writer, name string, data interface{}) error {
|
||||
tmpl := t.tmpl[name]
|
||||
if tmpl == nil {
|
||||
return fmt.Errorf("template: no template %q associated with template %q", name, t.name)
|
||||
}
|
||||
return tmpl.Execute(wr, data)
|
||||
}
|
||||
|
||||
// Execute applies a parsed template to the specified data object,
|
||||
// and writes the output to wr.
|
||||
// If an error occurs executing the template or writing its output,
|
||||
// execution stops, but partial results may already have been written to
|
||||
// the output writer.
|
||||
// A template may be executed safely in parallel.
|
||||
func (t *Template) Execute(wr io.Writer, data interface{}) (err error) {
|
||||
defer errRecover(&err)
|
||||
value := reflect.ValueOf(data)
|
||||
state := &state{
|
||||
tmpl: t,
|
||||
wr: wr,
|
||||
vars: []variable{{"$", value}},
|
||||
}
|
||||
t.init()
|
||||
if t.Tree == nil || t.Root == nil {
|
||||
var b bytes.Buffer
|
||||
for name, tmpl := range t.tmpl {
|
||||
if tmpl.Tree == nil || tmpl.Root == nil {
|
||||
continue
|
||||
}
|
||||
if b.Len() > 0 {
|
||||
b.WriteString(", ")
|
||||
}
|
||||
fmt.Fprintf(&b, "%q", name)
|
||||
}
|
||||
var s string
|
||||
if b.Len() > 0 {
|
||||
s = "; defined templates are: " + b.String()
|
||||
}
|
||||
state.errorf("%q is an incomplete or empty template%s", t.Name(), s)
|
||||
}
|
||||
state.walk(value, t.Root)
|
||||
return
|
||||
}
|
||||
|
||||
// Walk functions step through the major pieces of the template structure,
|
||||
// generating output as they go.
|
||||
func (s *state) walk(dot reflect.Value, node parse.Node) {
|
||||
s.at(node)
|
||||
switch node := node.(type) {
|
||||
case *parse.ActionNode:
|
||||
// Do not pop variables so they persist until next end.
|
||||
// Also, if the action declares variables, don't print the result.
|
||||
val := s.evalPipeline(dot, node.Pipe)
|
||||
if len(node.Pipe.Decl) == 0 {
|
||||
s.printValue(node, val)
|
||||
}
|
||||
case *parse.IfNode:
|
||||
s.walkIfOrWith(parse.NodeIf, dot, node.Pipe, node.List, node.ElseList)
|
||||
case *parse.ListNode:
|
||||
for _, node := range node.Nodes {
|
||||
s.walk(dot, node)
|
||||
}
|
||||
case *parse.RangeNode:
|
||||
s.walkRange(dot, node)
|
||||
case *parse.TemplateNode:
|
||||
s.walkTemplate(dot, node)
|
||||
case *parse.TextNode:
|
||||
if _, err := s.wr.Write(node.Text); err != nil {
|
||||
s.errorf("%s", err)
|
||||
}
|
||||
case *parse.WithNode:
|
||||
s.walkIfOrWith(parse.NodeWith, dot, node.Pipe, node.List, node.ElseList)
|
||||
default:
|
||||
s.errorf("unknown node: %s", node)
|
||||
}
|
||||
}
|
||||
|
||||
// walkIfOrWith walks an 'if' or 'with' node. The two control structures
|
||||
// are identical in behavior except that 'with' sets dot.
|
||||
func (s *state) walkIfOrWith(typ parse.NodeType, dot reflect.Value, pipe *parse.PipeNode, list, elseList *parse.ListNode) {
|
||||
defer s.pop(s.mark())
|
||||
val := s.evalPipeline(dot, pipe)
|
||||
truth, ok := isTrue(val)
|
||||
if !ok {
|
||||
s.errorf("if/with can't use %v", val)
|
||||
}
|
||||
if truth {
|
||||
if typ == parse.NodeWith {
|
||||
s.walk(val, list)
|
||||
} else {
|
||||
s.walk(dot, list)
|
||||
}
|
||||
} else if elseList != nil {
|
||||
s.walk(dot, elseList)
|
||||
}
|
||||
}
|
||||
|
||||
// isTrue reports whether the value is 'true', in the sense of not the zero of its type,
|
||||
// and whether the value has a meaningful truth value.
|
||||
func isTrue(val reflect.Value) (truth, ok bool) {
|
||||
if !val.IsValid() {
|
||||
// Something like var x interface{}, never set. It's a form of nil.
|
||||
return false, true
|
||||
}
|
||||
switch val.Kind() {
|
||||
case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
|
||||
truth = val.Len() > 0
|
||||
case reflect.Bool:
|
||||
truth = val.Bool()
|
||||
case reflect.Complex64, reflect.Complex128:
|
||||
truth = val.Complex() != 0
|
||||
case reflect.Chan, reflect.Func, reflect.Ptr, reflect.Interface:
|
||||
truth = !val.IsNil()
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
truth = val.Int() != 0
|
||||
case reflect.Float32, reflect.Float64:
|
||||
truth = val.Float() != 0
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||
truth = val.Uint() != 0
|
||||
case reflect.Struct:
|
||||
truth = true // Struct values are always true.
|
||||
default:
|
||||
return
|
||||
}
|
||||
return truth, true
|
||||
}
|
||||
|
||||
func (s *state) walkRange(dot reflect.Value, r *parse.RangeNode) {
|
||||
s.at(r)
|
||||
defer s.pop(s.mark())
|
||||
val, _ := indirect(s.evalPipeline(dot, r.Pipe))
|
||||
// mark top of stack before any variables in the body are pushed.
|
||||
mark := s.mark()
|
||||
oneIteration := func(index, elem reflect.Value) {
|
||||
// Set top var (lexically the second if there are two) to the element.
|
||||
if len(r.Pipe.Decl) > 0 {
|
||||
s.setVar(1, elem)
|
||||
}
|
||||
// Set next var (lexically the first if there are two) to the index.
|
||||
if len(r.Pipe.Decl) > 1 {
|
||||
s.setVar(2, index)
|
||||
}
|
||||
s.walk(elem, r.List)
|
||||
s.pop(mark)
|
||||
}
|
||||
switch val.Kind() {
|
||||
case reflect.Array, reflect.Slice:
|
||||
if val.Len() == 0 {
|
||||
break
|
||||
}
|
||||
for i := 0; i < val.Len(); i++ {
|
||||
oneIteration(reflect.ValueOf(i), val.Index(i))
|
||||
}
|
||||
return
|
||||
case reflect.Map:
|
||||
if val.Len() == 0 {
|
||||
break
|
||||
}
|
||||
for _, key := range sortKeys(val.MapKeys()) {
|
||||
oneIteration(key, val.MapIndex(key))
|
||||
}
|
||||
return
|
||||
case reflect.Chan:
|
||||
if val.IsNil() {
|
||||
break
|
||||
}
|
||||
i := 0
|
||||
for ; ; i++ {
|
||||
elem, ok := val.Recv()
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
oneIteration(reflect.ValueOf(i), elem)
|
||||
}
|
||||
if i == 0 {
|
||||
break
|
||||
}
|
||||
return
|
||||
case reflect.Invalid:
|
||||
break // An invalid value is likely a nil map, etc. and acts like an empty map.
|
||||
default:
|
||||
s.errorf("range can't iterate over %v", val)
|
||||
}
|
||||
if r.ElseList != nil {
|
||||
s.walk(dot, r.ElseList)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *state) walkTemplate(dot reflect.Value, t *parse.TemplateNode) {
|
||||
s.at(t)
|
||||
tmpl := s.tmpl.tmpl[t.Name]
|
||||
if tmpl == nil {
|
||||
s.errorf("template %q not defined", t.Name)
|
||||
}
|
||||
// Variables declared by the pipeline persist.
|
||||
dot = s.evalPipeline(dot, t.Pipe)
|
||||
newState := *s
|
||||
newState.tmpl = tmpl
|
||||
// No dynamic scoping: template invocations inherit no variables.
|
||||
newState.vars = []variable{{"$", dot}}
|
||||
newState.walk(dot, tmpl.Root)
|
||||
}
|
||||
|
||||
// Eval functions evaluate pipelines, commands, and their elements and extract
|
||||
// values from the data structure by examining fields, calling methods, and so on.
|
||||
// The printing of those values happens only through walk functions.
|
||||
|
||||
// evalPipeline returns the value acquired by evaluating a pipeline. If the
|
||||
// pipeline has a variable declaration, the variable will be pushed on the
|
||||
// stack. Callers should therefore pop the stack after they are finished
|
||||
// executing commands depending on the pipeline value.
|
||||
func (s *state) evalPipeline(dot reflect.Value, pipe *parse.PipeNode) (value reflect.Value) {
|
||||
if pipe == nil {
|
||||
return
|
||||
}
|
||||
s.at(pipe)
|
||||
for _, cmd := range pipe.Cmds {
|
||||
value = s.evalCommand(dot, cmd, value) // previous value is this one's final arg.
|
||||
// If the object has type interface{}, dig down one level to the thing inside.
|
||||
if value.Kind() == reflect.Interface && value.Type().NumMethod() == 0 {
|
||||
value = reflect.ValueOf(value.Interface()) // lovely!
|
||||
}
|
||||
}
|
||||
for _, variable := range pipe.Decl {
|
||||
s.push(variable.Ident[0], value)
|
||||
}
|
||||
return value
|
||||
}
|
||||
|
||||
func (s *state) notAFunction(args []parse.Node, final reflect.Value) {
|
||||
if len(args) > 1 || final.IsValid() {
|
||||
s.errorf("can't give argument to non-function %s", args[0])
|
||||
}
|
||||
}
|
||||
|
||||
func (s *state) evalCommand(dot reflect.Value, cmd *parse.CommandNode, final reflect.Value) reflect.Value {
|
||||
firstWord := cmd.Args[0]
|
||||
switch n := firstWord.(type) {
|
||||
case *parse.FieldNode:
|
||||
return s.evalFieldNode(dot, n, cmd.Args, final)
|
||||
case *parse.ChainNode:
|
||||
return s.evalChainNode(dot, n, cmd.Args, final)
|
||||
case *parse.IdentifierNode:
|
||||
// Must be a function.
|
||||
return s.evalFunction(dot, n, cmd, cmd.Args, final)
|
||||
case *parse.PipeNode:
|
||||
// Parenthesized pipeline. The arguments are all inside the pipeline; final is ignored.
|
||||
return s.evalPipeline(dot, n)
|
||||
case *parse.VariableNode:
|
||||
return s.evalVariableNode(dot, n, cmd.Args, final)
|
||||
}
|
||||
s.at(firstWord)
|
||||
s.notAFunction(cmd.Args, final)
|
||||
switch word := firstWord.(type) {
|
||||
case *parse.BoolNode:
|
||||
return reflect.ValueOf(word.True)
|
||||
case *parse.DotNode:
|
||||
return dot
|
||||
case *parse.NilNode:
|
||||
s.errorf("nil is not a command")
|
||||
case *parse.NumberNode:
|
||||
return s.idealConstant(word)
|
||||
case *parse.StringNode:
|
||||
return reflect.ValueOf(word.Text)
|
||||
}
|
||||
s.errorf("can't evaluate command %q", firstWord)
|
||||
panic("not reached")
|
||||
}
|
||||
|
||||
// idealConstant is called to return the value of a number in a context where
|
||||
// we don't know the type. In that case, the syntax of the number tells us
|
||||
// its type, and we use Go rules to resolve. Note there is no such thing as
|
||||
// a uint ideal constant in this situation - the value must be of int type.
|
||||
func (s *state) idealConstant(constant *parse.NumberNode) reflect.Value {
|
||||
// These are ideal constants but we don't know the type
|
||||
// and we have no context. (If it was a method argument,
|
||||
// we'd know what we need.) The syntax guides us to some extent.
|
||||
s.at(constant)
|
||||
switch {
|
||||
case constant.IsComplex:
|
||||
return reflect.ValueOf(constant.Complex128) // incontrovertible.
|
||||
case constant.IsFloat && !isHexConstant(constant.Text) && strings.IndexAny(constant.Text, ".eE") >= 0:
|
||||
return reflect.ValueOf(constant.Float64)
|
||||
case constant.IsInt:
|
||||
n := int(constant.Int64)
|
||||
if int64(n) != constant.Int64 {
|
||||
s.errorf("%s overflows int", constant.Text)
|
||||
}
|
||||
return reflect.ValueOf(n)
|
||||
case constant.IsUint:
|
||||
s.errorf("%s overflows int", constant.Text)
|
||||
}
|
||||
return zero
|
||||
}
|
||||
|
||||
func isHexConstant(s string) bool {
|
||||
return len(s) > 2 && s[0] == '0' && (s[1] == 'x' || s[1] == 'X')
|
||||
}
|
||||
|
||||
func (s *state) evalFieldNode(dot reflect.Value, field *parse.FieldNode, args []parse.Node, final reflect.Value) reflect.Value {
|
||||
s.at(field)
|
||||
return s.evalFieldChain(dot, dot, field, field.Ident, args, final)
|
||||
}
|
||||
|
||||
func (s *state) evalChainNode(dot reflect.Value, chain *parse.ChainNode, args []parse.Node, final reflect.Value) reflect.Value {
|
||||
s.at(chain)
|
||||
// (pipe).Field1.Field2 has pipe as .Node, fields as .Field. Eval the pipeline, then the fields.
|
||||
pipe := s.evalArg(dot, nil, chain.Node)
|
||||
if len(chain.Field) == 0 {
|
||||
s.errorf("internal error: no fields in evalChainNode")
|
||||
}
|
||||
return s.evalFieldChain(dot, pipe, chain, chain.Field, args, final)
|
||||
}
|
||||
|
||||
func (s *state) evalVariableNode(dot reflect.Value, variable *parse.VariableNode, args []parse.Node, final reflect.Value) reflect.Value {
|
||||
// $x.Field has $x as the first ident, Field as the second. Eval the var, then the fields.
|
||||
s.at(variable)
|
||||
value := s.varValue(variable.Ident[0])
|
||||
if len(variable.Ident) == 1 {
|
||||
s.notAFunction(args, final)
|
||||
return value
|
||||
}
|
||||
return s.evalFieldChain(dot, value, variable, variable.Ident[1:], args, final)
|
||||
}
|
||||
|
||||
// evalFieldChain evaluates .X.Y.Z possibly followed by arguments.
|
||||
// dot is the environment in which to evaluate arguments, while
|
||||
// receiver is the value being walked along the chain.
|
||||
func (s *state) evalFieldChain(dot, receiver reflect.Value, node parse.Node, ident []string, args []parse.Node, final reflect.Value) reflect.Value {
|
||||
n := len(ident)
|
||||
for i := 0; i < n-1; i++ {
|
||||
receiver = s.evalField(dot, ident[i], node, nil, zero, receiver)
|
||||
}
|
||||
// Now if it's a method, it gets the arguments.
|
||||
return s.evalField(dot, ident[n-1], node, args, final, receiver)
|
||||
}
|
||||
|
||||
func (s *state) evalFunction(dot reflect.Value, node *parse.IdentifierNode, cmd parse.Node, args []parse.Node, final reflect.Value) reflect.Value {
|
||||
s.at(node)
|
||||
name := node.Ident
|
||||
function, ok := findFunction(name, s.tmpl)
|
||||
if !ok {
|
||||
s.errorf("%q is not a defined function", name)
|
||||
}
|
||||
return s.evalCall(dot, function, cmd, name, args, final)
|
||||
}
|
||||
|
||||
// evalField evaluates an expression like (.Field) or (.Field arg1 arg2).
|
||||
// The 'final' argument represents the return value from the preceding
|
||||
// value of the pipeline, if any.
|
||||
func (s *state) evalField(dot reflect.Value, fieldName string, node parse.Node, args []parse.Node, final, receiver reflect.Value) reflect.Value {
|
||||
if !receiver.IsValid() {
|
||||
return zero
|
||||
}
|
||||
typ := receiver.Type()
|
||||
receiver, _ = indirect(receiver)
|
||||
// Unless it's an interface, need to get to a value of type *T to guarantee
|
||||
// we see all methods of T and *T.
|
||||
ptr := receiver
|
||||
if ptr.Kind() != reflect.Interface && ptr.CanAddr() {
|
||||
ptr = ptr.Addr()
|
||||
}
|
||||
if method := ptr.MethodByName(fieldName); method.IsValid() {
|
||||
return s.evalCall(dot, method, node, fieldName, args, final)
|
||||
}
|
||||
hasArgs := len(args) > 1 || final.IsValid()
|
||||
// It's not a method; must be a field of a struct or an element of a map. The receiver must not be nil.
|
||||
receiver, isNil := indirect(receiver)
|
||||
if isNil {
|
||||
s.errorf("nil pointer evaluating %s.%s", typ, fieldName)
|
||||
}
|
||||
switch receiver.Kind() {
|
||||
case reflect.Struct:
|
||||
tField, ok := receiver.Type().FieldByName(fieldName)
|
||||
if ok {
|
||||
field := receiver.FieldByIndex(tField.Index)
|
||||
if tField.PkgPath != "" { // field is unexported
|
||||
s.errorf("%s is an unexported field of struct type %s", fieldName, typ)
|
||||
}
|
||||
// If it's a function, we must call it.
|
||||
if hasArgs {
|
||||
s.errorf("%s has arguments but cannot be invoked as function", fieldName)
|
||||
}
|
||||
return field
|
||||
}
|
||||
s.errorf("%s is not a field of struct type %s", fieldName, typ)
|
||||
case reflect.Map:
|
||||
// If it's a map, attempt to use the field name as a key.
|
||||
nameVal := reflect.ValueOf(fieldName)
|
||||
if nameVal.Type().AssignableTo(receiver.Type().Key()) {
|
||||
if hasArgs {
|
||||
s.errorf("%s is not a method but has arguments", fieldName)
|
||||
}
|
||||
return receiver.MapIndex(nameVal)
|
||||
}
|
||||
}
|
||||
s.errorf("can't evaluate field %s in type %s", fieldName, typ)
|
||||
panic("not reached")
|
||||
}
|
||||
|
||||
var (
|
||||
errorType = reflect.TypeOf((*error)(nil)).Elem()
|
||||
fmtStringerType = reflect.TypeOf((*fmt.Stringer)(nil)).Elem()
|
||||
)
|
||||
|
||||
// evalCall executes a function or method call. If it's a method, fun already has the receiver bound, so
|
||||
// it looks just like a function call. The arg list, if non-nil, includes (in the manner of the shell), arg[0]
|
||||
// as the function itself.
|
||||
func (s *state) evalCall(dot, fun reflect.Value, node parse.Node, name string, args []parse.Node, final reflect.Value) reflect.Value {
|
||||
if args != nil {
|
||||
args = args[1:] // Zeroth arg is function name/node; not passed to function.
|
||||
}
|
||||
typ := fun.Type()
|
||||
numIn := len(args)
|
||||
if final.IsValid() {
|
||||
numIn++
|
||||
}
|
||||
numFixed := len(args)
|
||||
if typ.IsVariadic() {
|
||||
numFixed = typ.NumIn() - 1 // last arg is the variadic one.
|
||||
if numIn < numFixed {
|
||||
s.errorf("wrong number of args for %s: want at least %d got %d", name, typ.NumIn()-1, len(args))
|
||||
}
|
||||
} else if numIn < typ.NumIn()-1 || !typ.IsVariadic() && numIn != typ.NumIn() {
|
||||
s.errorf("wrong number of args for %s: want %d got %d", name, typ.NumIn(), len(args))
|
||||
}
|
||||
if !goodFunc(typ) {
|
||||
// TODO: This could still be a confusing error; maybe goodFunc should provide info.
|
||||
s.errorf("can't call method/function %q with %d results", name, typ.NumOut())
|
||||
}
|
||||
// Build the arg list.
|
||||
argv := make([]reflect.Value, numIn)
|
||||
// Args must be evaluated. Fixed args first.
|
||||
i := 0
|
||||
for ; i < numFixed && i < len(args); i++ {
|
||||
argv[i] = s.evalArg(dot, typ.In(i), args[i])
|
||||
}
|
||||
// Now the ... args.
|
||||
if typ.IsVariadic() {
|
||||
argType := typ.In(typ.NumIn() - 1).Elem() // Argument is a slice.
|
||||
for ; i < len(args); i++ {
|
||||
argv[i] = s.evalArg(dot, argType, args[i])
|
||||
}
|
||||
}
|
||||
// Add final value if necessary.
|
||||
if final.IsValid() {
|
||||
t := typ.In(typ.NumIn() - 1)
|
||||
if typ.IsVariadic() {
|
||||
t = t.Elem()
|
||||
}
|
||||
argv[i] = s.validateType(final, t)
|
||||
}
|
||||
result := fun.Call(argv)
|
||||
// If we have an error that is not nil, stop execution and return that error to the caller.
|
||||
if len(result) == 2 && !result[1].IsNil() {
|
||||
s.at(node)
|
||||
s.errorf("error calling %s: %s", name, result[1].Interface().(error))
|
||||
}
|
||||
return result[0]
|
||||
}
|
||||
|
||||
// canBeNil reports whether an untyped nil can be assigned to the type. See reflect.Zero.
|
||||
func canBeNil(typ reflect.Type) bool {
|
||||
switch typ.Kind() {
|
||||
case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// validateType guarantees that the value is valid and assignable to the type.
|
||||
func (s *state) validateType(value reflect.Value, typ reflect.Type) reflect.Value {
|
||||
if !value.IsValid() {
|
||||
if typ == nil || canBeNil(typ) {
|
||||
// An untyped nil interface{}. Accept as a proper nil value.
|
||||
return reflect.Zero(typ)
|
||||
}
|
||||
s.errorf("invalid value; expected %s", typ)
|
||||
}
|
||||
if typ != nil && !value.Type().AssignableTo(typ) {
|
||||
if value.Kind() == reflect.Interface && !value.IsNil() {
|
||||
value = value.Elem()
|
||||
if value.Type().AssignableTo(typ) {
|
||||
return value
|
||||
}
|
||||
// fallthrough
|
||||
}
|
||||
// Does one dereference or indirection work? We could do more, as we
|
||||
// do with method receivers, but that gets messy and method receivers
|
||||
// are much more constrained, so it makes more sense there than here.
|
||||
// Besides, one is almost always all you need.
|
||||
switch {
|
||||
case value.Kind() == reflect.Ptr && value.Type().Elem().AssignableTo(typ):
|
||||
value = value.Elem()
|
||||
if !value.IsValid() {
|
||||
s.errorf("dereference of nil pointer of type %s", typ)
|
||||
}
|
||||
case reflect.PtrTo(value.Type()).AssignableTo(typ) && value.CanAddr():
|
||||
value = value.Addr()
|
||||
default:
|
||||
s.errorf("wrong type for value; expected %s; got %s", typ, value.Type())
|
||||
}
|
||||
}
|
||||
return value
|
||||
}
|
||||
|
||||
func (s *state) evalArg(dot reflect.Value, typ reflect.Type, n parse.Node) reflect.Value {
|
||||
s.at(n)
|
||||
switch arg := n.(type) {
|
||||
case *parse.DotNode:
|
||||
return s.validateType(dot, typ)
|
||||
case *parse.NilNode:
|
||||
if canBeNil(typ) {
|
||||
return reflect.Zero(typ)
|
||||
}
|
||||
s.errorf("cannot assign nil to %s", typ)
|
||||
case *parse.FieldNode:
|
||||
return s.validateType(s.evalFieldNode(dot, arg, []parse.Node{n}, zero), typ)
|
||||
case *parse.VariableNode:
|
||||
return s.validateType(s.evalVariableNode(dot, arg, nil, zero), typ)
|
||||
case *parse.PipeNode:
|
||||
return s.validateType(s.evalPipeline(dot, arg), typ)
|
||||
case *parse.IdentifierNode:
|
||||
return s.evalFunction(dot, arg, arg, nil, zero)
|
||||
case *parse.ChainNode:
|
||||
return s.validateType(s.evalChainNode(dot, arg, nil, zero), typ)
|
||||
}
|
||||
switch typ.Kind() {
|
||||
case reflect.Bool:
|
||||
return s.evalBool(typ, n)
|
||||
case reflect.Complex64, reflect.Complex128:
|
||||
return s.evalComplex(typ, n)
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return s.evalFloat(typ, n)
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
return s.evalInteger(typ, n)
|
||||
case reflect.Interface:
|
||||
if typ.NumMethod() == 0 {
|
||||
return s.evalEmptyInterface(dot, n)
|
||||
}
|
||||
case reflect.String:
|
||||
return s.evalString(typ, n)
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||
return s.evalUnsignedInteger(typ, n)
|
||||
}
|
||||
s.errorf("can't handle %s for arg of type %s", n, typ)
|
||||
panic("not reached")
|
||||
}
|
||||
|
||||
func (s *state) evalBool(typ reflect.Type, n parse.Node) reflect.Value {
|
||||
s.at(n)
|
||||
if n, ok := n.(*parse.BoolNode); ok {
|
||||
value := reflect.New(typ).Elem()
|
||||
value.SetBool(n.True)
|
||||
return value
|
||||
}
|
||||
s.errorf("expected bool; found %s", n)
|
||||
panic("not reached")
|
||||
}
|
||||
|
||||
func (s *state) evalString(typ reflect.Type, n parse.Node) reflect.Value {
|
||||
s.at(n)
|
||||
if n, ok := n.(*parse.StringNode); ok {
|
||||
value := reflect.New(typ).Elem()
|
||||
value.SetString(n.Text)
|
||||
return value
|
||||
}
|
||||
s.errorf("expected string; found %s", n)
|
||||
panic("not reached")
|
||||
}
|
||||
|
||||
func (s *state) evalInteger(typ reflect.Type, n parse.Node) reflect.Value {
|
||||
s.at(n)
|
||||
if n, ok := n.(*parse.NumberNode); ok && n.IsInt {
|
||||
value := reflect.New(typ).Elem()
|
||||
value.SetInt(n.Int64)
|
||||
return value
|
||||
}
|
||||
s.errorf("expected integer; found %s", n)
|
||||
panic("not reached")
|
||||
}
|
||||
|
||||
func (s *state) evalUnsignedInteger(typ reflect.Type, n parse.Node) reflect.Value {
|
||||
s.at(n)
|
||||
if n, ok := n.(*parse.NumberNode); ok && n.IsUint {
|
||||
value := reflect.New(typ).Elem()
|
||||
value.SetUint(n.Uint64)
|
||||
return value
|
||||
}
|
||||
s.errorf("expected unsigned integer; found %s", n)
|
||||
panic("not reached")
|
||||
}
|
||||
|
||||
func (s *state) evalFloat(typ reflect.Type, n parse.Node) reflect.Value {
|
||||
s.at(n)
|
||||
if n, ok := n.(*parse.NumberNode); ok && n.IsFloat {
|
||||
value := reflect.New(typ).Elem()
|
||||
value.SetFloat(n.Float64)
|
||||
return value
|
||||
}
|
||||
s.errorf("expected float; found %s", n)
|
||||
panic("not reached")
|
||||
}
|
||||
|
||||
func (s *state) evalComplex(typ reflect.Type, n parse.Node) reflect.Value {
|
||||
if n, ok := n.(*parse.NumberNode); ok && n.IsComplex {
|
||||
value := reflect.New(typ).Elem()
|
||||
value.SetComplex(n.Complex128)
|
||||
return value
|
||||
}
|
||||
s.errorf("expected complex; found %s", n)
|
||||
panic("not reached")
|
||||
}
|
||||
|
||||
func (s *state) evalEmptyInterface(dot reflect.Value, n parse.Node) reflect.Value {
|
||||
s.at(n)
|
||||
switch n := n.(type) {
|
||||
case *parse.BoolNode:
|
||||
return reflect.ValueOf(n.True)
|
||||
case *parse.DotNode:
|
||||
return dot
|
||||
case *parse.FieldNode:
|
||||
return s.evalFieldNode(dot, n, nil, zero)
|
||||
case *parse.IdentifierNode:
|
||||
return s.evalFunction(dot, n, n, nil, zero)
|
||||
case *parse.NilNode:
|
||||
// NilNode is handled in evalArg, the only place that calls here.
|
||||
s.errorf("evalEmptyInterface: nil (can't happen)")
|
||||
case *parse.NumberNode:
|
||||
return s.idealConstant(n)
|
||||
case *parse.StringNode:
|
||||
return reflect.ValueOf(n.Text)
|
||||
case *parse.VariableNode:
|
||||
return s.evalVariableNode(dot, n, nil, zero)
|
||||
case *parse.PipeNode:
|
||||
return s.evalPipeline(dot, n)
|
||||
}
|
||||
s.errorf("can't handle assignment of %s to empty interface argument", n)
|
||||
panic("not reached")
|
||||
}
|
||||
|
||||
// indirect returns the item at the end of indirection, and a bool to indicate if it's nil.
|
||||
// We indirect through pointers and empty interfaces (only) because
|
||||
// non-empty interfaces have methods we might need.
|
||||
func indirect(v reflect.Value) (rv reflect.Value, isNil bool) {
|
||||
for ; v.Kind() == reflect.Ptr || v.Kind() == reflect.Interface; v = v.Elem() {
|
||||
if v.IsNil() {
|
||||
return v, true
|
||||
}
|
||||
if v.Kind() == reflect.Interface && v.NumMethod() > 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
return v, false
|
||||
}
|
||||
|
||||
// printValue writes the textual representation of the value to the output of
|
||||
// the template.
|
||||
func (s *state) printValue(n parse.Node, v reflect.Value) {
|
||||
s.at(n)
|
||||
iface, ok := printableValue(v)
|
||||
if !ok {
|
||||
s.errorf("can't print %s of type %s", n, v.Type())
|
||||
}
|
||||
fmt.Fprint(s.wr, iface)
|
||||
}
|
||||
|
||||
// printableValue returns the, possibly indirected, interface value inside v that
|
||||
// is best for a call to formatted printer.
|
||||
func printableValue(v reflect.Value) (interface{}, bool) {
|
||||
if v.Kind() == reflect.Ptr {
|
||||
v, _ = indirect(v) // fmt.Fprint handles nil.
|
||||
}
|
||||
if !v.IsValid() {
|
||||
return "<no value>", true
|
||||
}
|
||||
|
||||
if !v.Type().Implements(errorType) && !v.Type().Implements(fmtStringerType) {
|
||||
if v.CanAddr() && (reflect.PtrTo(v.Type()).Implements(errorType) || reflect.PtrTo(v.Type()).Implements(fmtStringerType)) {
|
||||
v = v.Addr()
|
||||
} else {
|
||||
switch v.Kind() {
|
||||
case reflect.Chan, reflect.Func:
|
||||
return nil, false
|
||||
}
|
||||
}
|
||||
}
|
||||
return v.Interface(), true
|
||||
}
|
||||
|
||||
// Types to help sort the keys in a map for reproducible output.
|
||||
|
||||
type rvs []reflect.Value
|
||||
|
||||
func (x rvs) Len() int { return len(x) }
|
||||
func (x rvs) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
|
||||
|
||||
type rvInts struct{ rvs }
|
||||
|
||||
func (x rvInts) Less(i, j int) bool { return x.rvs[i].Int() < x.rvs[j].Int() }
|
||||
|
||||
type rvUints struct{ rvs }
|
||||
|
||||
func (x rvUints) Less(i, j int) bool { return x.rvs[i].Uint() < x.rvs[j].Uint() }
|
||||
|
||||
type rvFloats struct{ rvs }
|
||||
|
||||
func (x rvFloats) Less(i, j int) bool { return x.rvs[i].Float() < x.rvs[j].Float() }
|
||||
|
||||
type rvStrings struct{ rvs }
|
||||
|
||||
func (x rvStrings) Less(i, j int) bool { return x.rvs[i].String() < x.rvs[j].String() }
|
||||
|
||||
// sortKeys sorts (if it can) the slice of reflect.Values, which is a slice of map keys.
|
||||
func sortKeys(v []reflect.Value) []reflect.Value {
|
||||
if len(v) <= 1 {
|
||||
return v
|
||||
}
|
||||
switch v[0].Kind() {
|
||||
case reflect.Float32, reflect.Float64:
|
||||
sort.Sort(rvFloats{v})
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
sort.Sort(rvInts{v})
|
||||
case reflect.String:
|
||||
sort.Sort(rvStrings{v})
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||
sort.Sort(rvUints{v})
|
||||
}
|
||||
return v
|
||||
}
|
||||
598
vendor/github.com/alecthomas/template/funcs.go
generated
vendored
598
vendor/github.com/alecthomas/template/funcs.go
generated
vendored
@@ -1,598 +0,0 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package template
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/url"
|
||||
"reflect"
|
||||
"strings"
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
// FuncMap is the type of the map defining the mapping from names to functions.
|
||||
// Each function must have either a single return value, or two return values of
|
||||
// which the second has type error. In that case, if the second (error)
|
||||
// return value evaluates to non-nil during execution, execution terminates and
|
||||
// Execute returns that error.
|
||||
type FuncMap map[string]interface{}
|
||||
|
||||
var builtins = FuncMap{
|
||||
"and": and,
|
||||
"call": call,
|
||||
"html": HTMLEscaper,
|
||||
"index": index,
|
||||
"js": JSEscaper,
|
||||
"len": length,
|
||||
"not": not,
|
||||
"or": or,
|
||||
"print": fmt.Sprint,
|
||||
"printf": fmt.Sprintf,
|
||||
"println": fmt.Sprintln,
|
||||
"urlquery": URLQueryEscaper,
|
||||
|
||||
// Comparisons
|
||||
"eq": eq, // ==
|
||||
"ge": ge, // >=
|
||||
"gt": gt, // >
|
||||
"le": le, // <=
|
||||
"lt": lt, // <
|
||||
"ne": ne, // !=
|
||||
}
|
||||
|
||||
var builtinFuncs = createValueFuncs(builtins)
|
||||
|
||||
// createValueFuncs turns a FuncMap into a map[string]reflect.Value
|
||||
func createValueFuncs(funcMap FuncMap) map[string]reflect.Value {
|
||||
m := make(map[string]reflect.Value)
|
||||
addValueFuncs(m, funcMap)
|
||||
return m
|
||||
}
|
||||
|
||||
// addValueFuncs adds to values the functions in funcs, converting them to reflect.Values.
|
||||
func addValueFuncs(out map[string]reflect.Value, in FuncMap) {
|
||||
for name, fn := range in {
|
||||
v := reflect.ValueOf(fn)
|
||||
if v.Kind() != reflect.Func {
|
||||
panic("value for " + name + " not a function")
|
||||
}
|
||||
if !goodFunc(v.Type()) {
|
||||
panic(fmt.Errorf("can't install method/function %q with %d results", name, v.Type().NumOut()))
|
||||
}
|
||||
out[name] = v
|
||||
}
|
||||
}
|
||||
|
||||
// addFuncs adds to values the functions in funcs. It does no checking of the input -
|
||||
// call addValueFuncs first.
|
||||
func addFuncs(out, in FuncMap) {
|
||||
for name, fn := range in {
|
||||
out[name] = fn
|
||||
}
|
||||
}
|
||||
|
||||
// goodFunc checks that the function or method has the right result signature.
|
||||
func goodFunc(typ reflect.Type) bool {
|
||||
// We allow functions with 1 result or 2 results where the second is an error.
|
||||
switch {
|
||||
case typ.NumOut() == 1:
|
||||
return true
|
||||
case typ.NumOut() == 2 && typ.Out(1) == errorType:
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// findFunction looks for a function in the template, and global map.
|
||||
func findFunction(name string, tmpl *Template) (reflect.Value, bool) {
|
||||
if tmpl != nil && tmpl.common != nil {
|
||||
if fn := tmpl.execFuncs[name]; fn.IsValid() {
|
||||
return fn, true
|
||||
}
|
||||
}
|
||||
if fn := builtinFuncs[name]; fn.IsValid() {
|
||||
return fn, true
|
||||
}
|
||||
return reflect.Value{}, false
|
||||
}
|
||||
|
||||
// Indexing.
|
||||
|
||||
// index returns the result of indexing its first argument by the following
|
||||
// arguments. Thus "index x 1 2 3" is, in Go syntax, x[1][2][3]. Each
|
||||
// indexed item must be a map, slice, or array.
|
||||
func index(item interface{}, indices ...interface{}) (interface{}, error) {
|
||||
v := reflect.ValueOf(item)
|
||||
for _, i := range indices {
|
||||
index := reflect.ValueOf(i)
|
||||
var isNil bool
|
||||
if v, isNil = indirect(v); isNil {
|
||||
return nil, fmt.Errorf("index of nil pointer")
|
||||
}
|
||||
switch v.Kind() {
|
||||
case reflect.Array, reflect.Slice, reflect.String:
|
||||
var x int64
|
||||
switch index.Kind() {
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
x = index.Int()
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||
x = int64(index.Uint())
|
||||
default:
|
||||
return nil, fmt.Errorf("cannot index slice/array with type %s", index.Type())
|
||||
}
|
||||
if x < 0 || x >= int64(v.Len()) {
|
||||
return nil, fmt.Errorf("index out of range: %d", x)
|
||||
}
|
||||
v = v.Index(int(x))
|
||||
case reflect.Map:
|
||||
if !index.IsValid() {
|
||||
index = reflect.Zero(v.Type().Key())
|
||||
}
|
||||
if !index.Type().AssignableTo(v.Type().Key()) {
|
||||
return nil, fmt.Errorf("%s is not index type for %s", index.Type(), v.Type())
|
||||
}
|
||||
if x := v.MapIndex(index); x.IsValid() {
|
||||
v = x
|
||||
} else {
|
||||
v = reflect.Zero(v.Type().Elem())
|
||||
}
|
||||
default:
|
||||
return nil, fmt.Errorf("can't index item of type %s", v.Type())
|
||||
}
|
||||
}
|
||||
return v.Interface(), nil
|
||||
}
|
||||
|
||||
// Length
|
||||
|
||||
// length returns the length of the item, with an error if it has no defined length.
|
||||
func length(item interface{}) (int, error) {
|
||||
v, isNil := indirect(reflect.ValueOf(item))
|
||||
if isNil {
|
||||
return 0, fmt.Errorf("len of nil pointer")
|
||||
}
|
||||
switch v.Kind() {
|
||||
case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice, reflect.String:
|
||||
return v.Len(), nil
|
||||
}
|
||||
return 0, fmt.Errorf("len of type %s", v.Type())
|
||||
}
|
||||
|
||||
// Function invocation
|
||||
|
||||
// call returns the result of evaluating the first argument as a function.
|
||||
// The function must return 1 result, or 2 results, the second of which is an error.
|
||||
func call(fn interface{}, args ...interface{}) (interface{}, error) {
|
||||
v := reflect.ValueOf(fn)
|
||||
typ := v.Type()
|
||||
if typ.Kind() != reflect.Func {
|
||||
return nil, fmt.Errorf("non-function of type %s", typ)
|
||||
}
|
||||
if !goodFunc(typ) {
|
||||
return nil, fmt.Errorf("function called with %d args; should be 1 or 2", typ.NumOut())
|
||||
}
|
||||
numIn := typ.NumIn()
|
||||
var dddType reflect.Type
|
||||
if typ.IsVariadic() {
|
||||
if len(args) < numIn-1 {
|
||||
return nil, fmt.Errorf("wrong number of args: got %d want at least %d", len(args), numIn-1)
|
||||
}
|
||||
dddType = typ.In(numIn - 1).Elem()
|
||||
} else {
|
||||
if len(args) != numIn {
|
||||
return nil, fmt.Errorf("wrong number of args: got %d want %d", len(args), numIn)
|
||||
}
|
||||
}
|
||||
argv := make([]reflect.Value, len(args))
|
||||
for i, arg := range args {
|
||||
value := reflect.ValueOf(arg)
|
||||
// Compute the expected type. Clumsy because of variadics.
|
||||
var argType reflect.Type
|
||||
if !typ.IsVariadic() || i < numIn-1 {
|
||||
argType = typ.In(i)
|
||||
} else {
|
||||
argType = dddType
|
||||
}
|
||||
if !value.IsValid() && canBeNil(argType) {
|
||||
value = reflect.Zero(argType)
|
||||
}
|
||||
if !value.Type().AssignableTo(argType) {
|
||||
return nil, fmt.Errorf("arg %d has type %s; should be %s", i, value.Type(), argType)
|
||||
}
|
||||
argv[i] = value
|
||||
}
|
||||
result := v.Call(argv)
|
||||
if len(result) == 2 && !result[1].IsNil() {
|
||||
return result[0].Interface(), result[1].Interface().(error)
|
||||
}
|
||||
return result[0].Interface(), nil
|
||||
}
|
||||
|
||||
// Boolean logic.
|
||||
|
||||
func truth(a interface{}) bool {
|
||||
t, _ := isTrue(reflect.ValueOf(a))
|
||||
return t
|
||||
}
|
||||
|
||||
// and computes the Boolean AND of its arguments, returning
|
||||
// the first false argument it encounters, or the last argument.
|
||||
func and(arg0 interface{}, args ...interface{}) interface{} {
|
||||
if !truth(arg0) {
|
||||
return arg0
|
||||
}
|
||||
for i := range args {
|
||||
arg0 = args[i]
|
||||
if !truth(arg0) {
|
||||
break
|
||||
}
|
||||
}
|
||||
return arg0
|
||||
}
|
||||
|
||||
// or computes the Boolean OR of its arguments, returning
|
||||
// the first true argument it encounters, or the last argument.
|
||||
func or(arg0 interface{}, args ...interface{}) interface{} {
|
||||
if truth(arg0) {
|
||||
return arg0
|
||||
}
|
||||
for i := range args {
|
||||
arg0 = args[i]
|
||||
if truth(arg0) {
|
||||
break
|
||||
}
|
||||
}
|
||||
return arg0
|
||||
}
|
||||
|
||||
// not returns the Boolean negation of its argument.
|
||||
func not(arg interface{}) (truth bool) {
|
||||
truth, _ = isTrue(reflect.ValueOf(arg))
|
||||
return !truth
|
||||
}
|
||||
|
||||
// Comparison.
|
||||
|
||||
// TODO: Perhaps allow comparison between signed and unsigned integers.
|
||||
|
||||
var (
|
||||
errBadComparisonType = errors.New("invalid type for comparison")
|
||||
errBadComparison = errors.New("incompatible types for comparison")
|
||||
errNoComparison = errors.New("missing argument for comparison")
|
||||
)
|
||||
|
||||
type kind int
|
||||
|
||||
const (
|
||||
invalidKind kind = iota
|
||||
boolKind
|
||||
complexKind
|
||||
intKind
|
||||
floatKind
|
||||
integerKind
|
||||
stringKind
|
||||
uintKind
|
||||
)
|
||||
|
||||
func basicKind(v reflect.Value) (kind, error) {
|
||||
switch v.Kind() {
|
||||
case reflect.Bool:
|
||||
return boolKind, nil
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
return intKind, nil
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||
return uintKind, nil
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return floatKind, nil
|
||||
case reflect.Complex64, reflect.Complex128:
|
||||
return complexKind, nil
|
||||
case reflect.String:
|
||||
return stringKind, nil
|
||||
}
|
||||
return invalidKind, errBadComparisonType
|
||||
}
|
||||
|
||||
// eq evaluates the comparison a == b || a == c || ...
|
||||
func eq(arg1 interface{}, arg2 ...interface{}) (bool, error) {
|
||||
v1 := reflect.ValueOf(arg1)
|
||||
k1, err := basicKind(v1)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if len(arg2) == 0 {
|
||||
return false, errNoComparison
|
||||
}
|
||||
for _, arg := range arg2 {
|
||||
v2 := reflect.ValueOf(arg)
|
||||
k2, err := basicKind(v2)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
truth := false
|
||||
if k1 != k2 {
|
||||
// Special case: Can compare integer values regardless of type's sign.
|
||||
switch {
|
||||
case k1 == intKind && k2 == uintKind:
|
||||
truth = v1.Int() >= 0 && uint64(v1.Int()) == v2.Uint()
|
||||
case k1 == uintKind && k2 == intKind:
|
||||
truth = v2.Int() >= 0 && v1.Uint() == uint64(v2.Int())
|
||||
default:
|
||||
return false, errBadComparison
|
||||
}
|
||||
} else {
|
||||
switch k1 {
|
||||
case boolKind:
|
||||
truth = v1.Bool() == v2.Bool()
|
||||
case complexKind:
|
||||
truth = v1.Complex() == v2.Complex()
|
||||
case floatKind:
|
||||
truth = v1.Float() == v2.Float()
|
||||
case intKind:
|
||||
truth = v1.Int() == v2.Int()
|
||||
case stringKind:
|
||||
truth = v1.String() == v2.String()
|
||||
case uintKind:
|
||||
truth = v1.Uint() == v2.Uint()
|
||||
default:
|
||||
panic("invalid kind")
|
||||
}
|
||||
}
|
||||
if truth {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// ne evaluates the comparison a != b.
|
||||
func ne(arg1, arg2 interface{}) (bool, error) {
|
||||
// != is the inverse of ==.
|
||||
equal, err := eq(arg1, arg2)
|
||||
return !equal, err
|
||||
}
|
||||
|
||||
// lt evaluates the comparison a < b.
|
||||
func lt(arg1, arg2 interface{}) (bool, error) {
|
||||
v1 := reflect.ValueOf(arg1)
|
||||
k1, err := basicKind(v1)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
v2 := reflect.ValueOf(arg2)
|
||||
k2, err := basicKind(v2)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
truth := false
|
||||
if k1 != k2 {
|
||||
// Special case: Can compare integer values regardless of type's sign.
|
||||
switch {
|
||||
case k1 == intKind && k2 == uintKind:
|
||||
truth = v1.Int() < 0 || uint64(v1.Int()) < v2.Uint()
|
||||
case k1 == uintKind && k2 == intKind:
|
||||
truth = v2.Int() >= 0 && v1.Uint() < uint64(v2.Int())
|
||||
default:
|
||||
return false, errBadComparison
|
||||
}
|
||||
} else {
|
||||
switch k1 {
|
||||
case boolKind, complexKind:
|
||||
return false, errBadComparisonType
|
||||
case floatKind:
|
||||
truth = v1.Float() < v2.Float()
|
||||
case intKind:
|
||||
truth = v1.Int() < v2.Int()
|
||||
case stringKind:
|
||||
truth = v1.String() < v2.String()
|
||||
case uintKind:
|
||||
truth = v1.Uint() < v2.Uint()
|
||||
default:
|
||||
panic("invalid kind")
|
||||
}
|
||||
}
|
||||
return truth, nil
|
||||
}
|
||||
|
||||
// le evaluates the comparison <= b.
|
||||
func le(arg1, arg2 interface{}) (bool, error) {
|
||||
// <= is < or ==.
|
||||
lessThan, err := lt(arg1, arg2)
|
||||
if lessThan || err != nil {
|
||||
return lessThan, err
|
||||
}
|
||||
return eq(arg1, arg2)
|
||||
}
|
||||
|
||||
// gt evaluates the comparison a > b.
|
||||
func gt(arg1, arg2 interface{}) (bool, error) {
|
||||
// > is the inverse of <=.
|
||||
lessOrEqual, err := le(arg1, arg2)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return !lessOrEqual, nil
|
||||
}
|
||||
|
||||
// ge evaluates the comparison a >= b.
|
||||
func ge(arg1, arg2 interface{}) (bool, error) {
|
||||
// >= is the inverse of <.
|
||||
lessThan, err := lt(arg1, arg2)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return !lessThan, nil
|
||||
}
|
||||
|
||||
// HTML escaping.
|
||||
|
||||
var (
|
||||
htmlQuot = []byte(""") // shorter than """
|
||||
htmlApos = []byte("'") // shorter than "'" and apos was not in HTML until HTML5
|
||||
htmlAmp = []byte("&")
|
||||
htmlLt = []byte("<")
|
||||
htmlGt = []byte(">")
|
||||
)
|
||||
|
||||
// HTMLEscape writes to w the escaped HTML equivalent of the plain text data b.
|
||||
func HTMLEscape(w io.Writer, b []byte) {
|
||||
last := 0
|
||||
for i, c := range b {
|
||||
var html []byte
|
||||
switch c {
|
||||
case '"':
|
||||
html = htmlQuot
|
||||
case '\'':
|
||||
html = htmlApos
|
||||
case '&':
|
||||
html = htmlAmp
|
||||
case '<':
|
||||
html = htmlLt
|
||||
case '>':
|
||||
html = htmlGt
|
||||
default:
|
||||
continue
|
||||
}
|
||||
w.Write(b[last:i])
|
||||
w.Write(html)
|
||||
last = i + 1
|
||||
}
|
||||
w.Write(b[last:])
|
||||
}
|
||||
|
||||
// HTMLEscapeString returns the escaped HTML equivalent of the plain text data s.
|
||||
func HTMLEscapeString(s string) string {
|
||||
// Avoid allocation if we can.
|
||||
if strings.IndexAny(s, `'"&<>`) < 0 {
|
||||
return s
|
||||
}
|
||||
var b bytes.Buffer
|
||||
HTMLEscape(&b, []byte(s))
|
||||
return b.String()
|
||||
}
|
||||
|
||||
// HTMLEscaper returns the escaped HTML equivalent of the textual
|
||||
// representation of its arguments.
|
||||
func HTMLEscaper(args ...interface{}) string {
|
||||
return HTMLEscapeString(evalArgs(args))
|
||||
}
|
||||
|
||||
// JavaScript escaping.
|
||||
|
||||
var (
|
||||
jsLowUni = []byte(`\u00`)
|
||||
hex = []byte("0123456789ABCDEF")
|
||||
|
||||
jsBackslash = []byte(`\\`)
|
||||
jsApos = []byte(`\'`)
|
||||
jsQuot = []byte(`\"`)
|
||||
jsLt = []byte(`\x3C`)
|
||||
jsGt = []byte(`\x3E`)
|
||||
)
|
||||
|
||||
// JSEscape writes to w the escaped JavaScript equivalent of the plain text data b.
|
||||
func JSEscape(w io.Writer, b []byte) {
|
||||
last := 0
|
||||
for i := 0; i < len(b); i++ {
|
||||
c := b[i]
|
||||
|
||||
if !jsIsSpecial(rune(c)) {
|
||||
// fast path: nothing to do
|
||||
continue
|
||||
}
|
||||
w.Write(b[last:i])
|
||||
|
||||
if c < utf8.RuneSelf {
|
||||
// Quotes, slashes and angle brackets get quoted.
|
||||
// Control characters get written as \u00XX.
|
||||
switch c {
|
||||
case '\\':
|
||||
w.Write(jsBackslash)
|
||||
case '\'':
|
||||
w.Write(jsApos)
|
||||
case '"':
|
||||
w.Write(jsQuot)
|
||||
case '<':
|
||||
w.Write(jsLt)
|
||||
case '>':
|
||||
w.Write(jsGt)
|
||||
default:
|
||||
w.Write(jsLowUni)
|
||||
t, b := c>>4, c&0x0f
|
||||
w.Write(hex[t : t+1])
|
||||
w.Write(hex[b : b+1])
|
||||
}
|
||||
} else {
|
||||
// Unicode rune.
|
||||
r, size := utf8.DecodeRune(b[i:])
|
||||
if unicode.IsPrint(r) {
|
||||
w.Write(b[i : i+size])
|
||||
} else {
|
||||
fmt.Fprintf(w, "\\u%04X", r)
|
||||
}
|
||||
i += size - 1
|
||||
}
|
||||
last = i + 1
|
||||
}
|
||||
w.Write(b[last:])
|
||||
}
|
||||
|
||||
// JSEscapeString returns the escaped JavaScript equivalent of the plain text data s.
|
||||
func JSEscapeString(s string) string {
|
||||
// Avoid allocation if we can.
|
||||
if strings.IndexFunc(s, jsIsSpecial) < 0 {
|
||||
return s
|
||||
}
|
||||
var b bytes.Buffer
|
||||
JSEscape(&b, []byte(s))
|
||||
return b.String()
|
||||
}
|
||||
|
||||
func jsIsSpecial(r rune) bool {
|
||||
switch r {
|
||||
case '\\', '\'', '"', '<', '>':
|
||||
return true
|
||||
}
|
||||
return r < ' ' || utf8.RuneSelf <= r
|
||||
}
|
||||
|
||||
// JSEscaper returns the escaped JavaScript equivalent of the textual
|
||||
// representation of its arguments.
|
||||
func JSEscaper(args ...interface{}) string {
|
||||
return JSEscapeString(evalArgs(args))
|
||||
}
|
||||
|
||||
// URLQueryEscaper returns the escaped value of the textual representation of
|
||||
// its arguments in a form suitable for embedding in a URL query.
|
||||
func URLQueryEscaper(args ...interface{}) string {
|
||||
return url.QueryEscape(evalArgs(args))
|
||||
}
|
||||
|
||||
// evalArgs formats the list of arguments into a string. It is therefore equivalent to
|
||||
// fmt.Sprint(args...)
|
||||
// except that each argument is indirected (if a pointer), as required,
|
||||
// using the same rules as the default string evaluation during template
|
||||
// execution.
|
||||
func evalArgs(args []interface{}) string {
|
||||
ok := false
|
||||
var s string
|
||||
// Fast path for simple common case.
|
||||
if len(args) == 1 {
|
||||
s, ok = args[0].(string)
|
||||
}
|
||||
if !ok {
|
||||
for i, arg := range args {
|
||||
a, ok := printableValue(reflect.ValueOf(arg))
|
||||
if ok {
|
||||
args[i] = a
|
||||
} // else left fmt do its thing
|
||||
}
|
||||
s = fmt.Sprint(args...)
|
||||
}
|
||||
return s
|
||||
}
|
||||
108
vendor/github.com/alecthomas/template/helper.go
generated
vendored
108
vendor/github.com/alecthomas/template/helper.go
generated
vendored
@@ -1,108 +0,0 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Helper functions to make constructing templates easier.
|
||||
|
||||
package template
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
// Functions and methods to parse templates.
|
||||
|
||||
// Must is a helper that wraps a call to a function returning (*Template, error)
|
||||
// and panics if the error is non-nil. It is intended for use in variable
|
||||
// initializations such as
|
||||
// var t = template.Must(template.New("name").Parse("text"))
|
||||
func Must(t *Template, err error) *Template {
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return t
|
||||
}
|
||||
|
||||
// ParseFiles creates a new Template and parses the template definitions from
|
||||
// the named files. The returned template's name will have the (base) name and
|
||||
// (parsed) contents of the first file. There must be at least one file.
|
||||
// If an error occurs, parsing stops and the returned *Template is nil.
|
||||
func ParseFiles(filenames ...string) (*Template, error) {
|
||||
return parseFiles(nil, filenames...)
|
||||
}
|
||||
|
||||
// ParseFiles parses the named files and associates the resulting templates with
|
||||
// t. If an error occurs, parsing stops and the returned template is nil;
|
||||
// otherwise it is t. There must be at least one file.
|
||||
func (t *Template) ParseFiles(filenames ...string) (*Template, error) {
|
||||
return parseFiles(t, filenames...)
|
||||
}
|
||||
|
||||
// parseFiles is the helper for the method and function. If the argument
|
||||
// template is nil, it is created from the first file.
|
||||
func parseFiles(t *Template, filenames ...string) (*Template, error) {
|
||||
if len(filenames) == 0 {
|
||||
// Not really a problem, but be consistent.
|
||||
return nil, fmt.Errorf("template: no files named in call to ParseFiles")
|
||||
}
|
||||
for _, filename := range filenames {
|
||||
b, err := ioutil.ReadFile(filename)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
s := string(b)
|
||||
name := filepath.Base(filename)
|
||||
// First template becomes return value if not already defined,
|
||||
// and we use that one for subsequent New calls to associate
|
||||
// all the templates together. Also, if this file has the same name
|
||||
// as t, this file becomes the contents of t, so
|
||||
// t, err := New(name).Funcs(xxx).ParseFiles(name)
|
||||
// works. Otherwise we create a new template associated with t.
|
||||
var tmpl *Template
|
||||
if t == nil {
|
||||
t = New(name)
|
||||
}
|
||||
if name == t.Name() {
|
||||
tmpl = t
|
||||
} else {
|
||||
tmpl = t.New(name)
|
||||
}
|
||||
_, err = tmpl.Parse(s)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return t, nil
|
||||
}
|
||||
|
||||
// ParseGlob creates a new Template and parses the template definitions from the
|
||||
// files identified by the pattern, which must match at least one file. The
|
||||
// returned template will have the (base) name and (parsed) contents of the
|
||||
// first file matched by the pattern. ParseGlob is equivalent to calling
|
||||
// ParseFiles with the list of files matched by the pattern.
|
||||
func ParseGlob(pattern string) (*Template, error) {
|
||||
return parseGlob(nil, pattern)
|
||||
}
|
||||
|
||||
// ParseGlob parses the template definitions in the files identified by the
|
||||
// pattern and associates the resulting templates with t. The pattern is
|
||||
// processed by filepath.Glob and must match at least one file. ParseGlob is
|
||||
// equivalent to calling t.ParseFiles with the list of files matched by the
|
||||
// pattern.
|
||||
func (t *Template) ParseGlob(pattern string) (*Template, error) {
|
||||
return parseGlob(t, pattern)
|
||||
}
|
||||
|
||||
// parseGlob is the implementation of the function and method ParseGlob.
|
||||
func parseGlob(t *Template, pattern string) (*Template, error) {
|
||||
filenames, err := filepath.Glob(pattern)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(filenames) == 0 {
|
||||
return nil, fmt.Errorf("template: pattern matches no files: %#q", pattern)
|
||||
}
|
||||
return parseFiles(t, filenames...)
|
||||
}
|
||||
556
vendor/github.com/alecthomas/template/parse/lex.go
generated
vendored
556
vendor/github.com/alecthomas/template/parse/lex.go
generated
vendored
@@ -1,556 +0,0 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package parse
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
// item represents a token or text string returned from the scanner.
|
||||
type item struct {
|
||||
typ itemType // The type of this item.
|
||||
pos Pos // The starting position, in bytes, of this item in the input string.
|
||||
val string // The value of this item.
|
||||
}
|
||||
|
||||
func (i item) String() string {
|
||||
switch {
|
||||
case i.typ == itemEOF:
|
||||
return "EOF"
|
||||
case i.typ == itemError:
|
||||
return i.val
|
||||
case i.typ > itemKeyword:
|
||||
return fmt.Sprintf("<%s>", i.val)
|
||||
case len(i.val) > 10:
|
||||
return fmt.Sprintf("%.10q...", i.val)
|
||||
}
|
||||
return fmt.Sprintf("%q", i.val)
|
||||
}
|
||||
|
||||
// itemType identifies the type of lex items.
|
||||
type itemType int
|
||||
|
||||
const (
|
||||
itemError itemType = iota // error occurred; value is text of error
|
||||
itemBool // boolean constant
|
||||
itemChar // printable ASCII character; grab bag for comma etc.
|
||||
itemCharConstant // character constant
|
||||
itemComplex // complex constant (1+2i); imaginary is just a number
|
||||
itemColonEquals // colon-equals (':=') introducing a declaration
|
||||
itemEOF
|
||||
itemField // alphanumeric identifier starting with '.'
|
||||
itemIdentifier // alphanumeric identifier not starting with '.'
|
||||
itemLeftDelim // left action delimiter
|
||||
itemLeftParen // '(' inside action
|
||||
itemNumber // simple number, including imaginary
|
||||
itemPipe // pipe symbol
|
||||
itemRawString // raw quoted string (includes quotes)
|
||||
itemRightDelim // right action delimiter
|
||||
itemElideNewline // elide newline after right delim
|
||||
itemRightParen // ')' inside action
|
||||
itemSpace // run of spaces separating arguments
|
||||
itemString // quoted string (includes quotes)
|
||||
itemText // plain text
|
||||
itemVariable // variable starting with '$', such as '$' or '$1' or '$hello'
|
||||
// Keywords appear after all the rest.
|
||||
itemKeyword // used only to delimit the keywords
|
||||
itemDot // the cursor, spelled '.'
|
||||
itemDefine // define keyword
|
||||
itemElse // else keyword
|
||||
itemEnd // end keyword
|
||||
itemIf // if keyword
|
||||
itemNil // the untyped nil constant, easiest to treat as a keyword
|
||||
itemRange // range keyword
|
||||
itemTemplate // template keyword
|
||||
itemWith // with keyword
|
||||
)
|
||||
|
||||
var key = map[string]itemType{
|
||||
".": itemDot,
|
||||
"define": itemDefine,
|
||||
"else": itemElse,
|
||||
"end": itemEnd,
|
||||
"if": itemIf,
|
||||
"range": itemRange,
|
||||
"nil": itemNil,
|
||||
"template": itemTemplate,
|
||||
"with": itemWith,
|
||||
}
|
||||
|
||||
const eof = -1
|
||||
|
||||
// stateFn represents the state of the scanner as a function that returns the next state.
|
||||
type stateFn func(*lexer) stateFn
|
||||
|
||||
// lexer holds the state of the scanner.
|
||||
type lexer struct {
|
||||
name string // the name of the input; used only for error reports
|
||||
input string // the string being scanned
|
||||
leftDelim string // start of action
|
||||
rightDelim string // end of action
|
||||
state stateFn // the next lexing function to enter
|
||||
pos Pos // current position in the input
|
||||
start Pos // start position of this item
|
||||
width Pos // width of last rune read from input
|
||||
lastPos Pos // position of most recent item returned by nextItem
|
||||
items chan item // channel of scanned items
|
||||
parenDepth int // nesting depth of ( ) exprs
|
||||
}
|
||||
|
||||
// next returns the next rune in the input.
|
||||
func (l *lexer) next() rune {
|
||||
if int(l.pos) >= len(l.input) {
|
||||
l.width = 0
|
||||
return eof
|
||||
}
|
||||
r, w := utf8.DecodeRuneInString(l.input[l.pos:])
|
||||
l.width = Pos(w)
|
||||
l.pos += l.width
|
||||
return r
|
||||
}
|
||||
|
||||
// peek returns but does not consume the next rune in the input.
|
||||
func (l *lexer) peek() rune {
|
||||
r := l.next()
|
||||
l.backup()
|
||||
return r
|
||||
}
|
||||
|
||||
// backup steps back one rune. Can only be called once per call of next.
|
||||
func (l *lexer) backup() {
|
||||
l.pos -= l.width
|
||||
}
|
||||
|
||||
// emit passes an item back to the client.
|
||||
func (l *lexer) emit(t itemType) {
|
||||
l.items <- item{t, l.start, l.input[l.start:l.pos]}
|
||||
l.start = l.pos
|
||||
}
|
||||
|
||||
// ignore skips over the pending input before this point.
|
||||
func (l *lexer) ignore() {
|
||||
l.start = l.pos
|
||||
}
|
||||
|
||||
// accept consumes the next rune if it's from the valid set.
|
||||
func (l *lexer) accept(valid string) bool {
|
||||
if strings.IndexRune(valid, l.next()) >= 0 {
|
||||
return true
|
||||
}
|
||||
l.backup()
|
||||
return false
|
||||
}
|
||||
|
||||
// acceptRun consumes a run of runes from the valid set.
|
||||
func (l *lexer) acceptRun(valid string) {
|
||||
for strings.IndexRune(valid, l.next()) >= 0 {
|
||||
}
|
||||
l.backup()
|
||||
}
|
||||
|
||||
// lineNumber reports which line we're on, based on the position of
|
||||
// the previous item returned by nextItem. Doing it this way
|
||||
// means we don't have to worry about peek double counting.
|
||||
func (l *lexer) lineNumber() int {
|
||||
return 1 + strings.Count(l.input[:l.lastPos], "\n")
|
||||
}
|
||||
|
||||
// errorf returns an error token and terminates the scan by passing
|
||||
// back a nil pointer that will be the next state, terminating l.nextItem.
|
||||
func (l *lexer) errorf(format string, args ...interface{}) stateFn {
|
||||
l.items <- item{itemError, l.start, fmt.Sprintf(format, args...)}
|
||||
return nil
|
||||
}
|
||||
|
||||
// nextItem returns the next item from the input.
|
||||
func (l *lexer) nextItem() item {
|
||||
item := <-l.items
|
||||
l.lastPos = item.pos
|
||||
return item
|
||||
}
|
||||
|
||||
// lex creates a new scanner for the input string.
|
||||
func lex(name, input, left, right string) *lexer {
|
||||
if left == "" {
|
||||
left = leftDelim
|
||||
}
|
||||
if right == "" {
|
||||
right = rightDelim
|
||||
}
|
||||
l := &lexer{
|
||||
name: name,
|
||||
input: input,
|
||||
leftDelim: left,
|
||||
rightDelim: right,
|
||||
items: make(chan item),
|
||||
}
|
||||
go l.run()
|
||||
return l
|
||||
}
|
||||
|
||||
// run runs the state machine for the lexer.
|
||||
func (l *lexer) run() {
|
||||
for l.state = lexText; l.state != nil; {
|
||||
l.state = l.state(l)
|
||||
}
|
||||
}
|
||||
|
||||
// state functions
|
||||
|
||||
const (
|
||||
leftDelim = "{{"
|
||||
rightDelim = "}}"
|
||||
leftComment = "/*"
|
||||
rightComment = "*/"
|
||||
)
|
||||
|
||||
// lexText scans until an opening action delimiter, "{{".
|
||||
func lexText(l *lexer) stateFn {
|
||||
for {
|
||||
if strings.HasPrefix(l.input[l.pos:], l.leftDelim) {
|
||||
if l.pos > l.start {
|
||||
l.emit(itemText)
|
||||
}
|
||||
return lexLeftDelim
|
||||
}
|
||||
if l.next() == eof {
|
||||
break
|
||||
}
|
||||
}
|
||||
// Correctly reached EOF.
|
||||
if l.pos > l.start {
|
||||
l.emit(itemText)
|
||||
}
|
||||
l.emit(itemEOF)
|
||||
return nil
|
||||
}
|
||||
|
||||
// lexLeftDelim scans the left delimiter, which is known to be present.
|
||||
func lexLeftDelim(l *lexer) stateFn {
|
||||
l.pos += Pos(len(l.leftDelim))
|
||||
if strings.HasPrefix(l.input[l.pos:], leftComment) {
|
||||
return lexComment
|
||||
}
|
||||
l.emit(itemLeftDelim)
|
||||
l.parenDepth = 0
|
||||
return lexInsideAction
|
||||
}
|
||||
|
||||
// lexComment scans a comment. The left comment marker is known to be present.
|
||||
func lexComment(l *lexer) stateFn {
|
||||
l.pos += Pos(len(leftComment))
|
||||
i := strings.Index(l.input[l.pos:], rightComment)
|
||||
if i < 0 {
|
||||
return l.errorf("unclosed comment")
|
||||
}
|
||||
l.pos += Pos(i + len(rightComment))
|
||||
if !strings.HasPrefix(l.input[l.pos:], l.rightDelim) {
|
||||
return l.errorf("comment ends before closing delimiter")
|
||||
|
||||
}
|
||||
l.pos += Pos(len(l.rightDelim))
|
||||
l.ignore()
|
||||
return lexText
|
||||
}
|
||||
|
||||
// lexRightDelim scans the right delimiter, which is known to be present.
|
||||
func lexRightDelim(l *lexer) stateFn {
|
||||
l.pos += Pos(len(l.rightDelim))
|
||||
l.emit(itemRightDelim)
|
||||
if l.peek() == '\\' {
|
||||
l.pos++
|
||||
l.emit(itemElideNewline)
|
||||
}
|
||||
return lexText
|
||||
}
|
||||
|
||||
// lexInsideAction scans the elements inside action delimiters.
|
||||
func lexInsideAction(l *lexer) stateFn {
|
||||
// Either number, quoted string, or identifier.
|
||||
// Spaces separate arguments; runs of spaces turn into itemSpace.
|
||||
// Pipe symbols separate and are emitted.
|
||||
if strings.HasPrefix(l.input[l.pos:], l.rightDelim+"\\") || strings.HasPrefix(l.input[l.pos:], l.rightDelim) {
|
||||
if l.parenDepth == 0 {
|
||||
return lexRightDelim
|
||||
}
|
||||
return l.errorf("unclosed left paren")
|
||||
}
|
||||
switch r := l.next(); {
|
||||
case r == eof || isEndOfLine(r):
|
||||
return l.errorf("unclosed action")
|
||||
case isSpace(r):
|
||||
return lexSpace
|
||||
case r == ':':
|
||||
if l.next() != '=' {
|
||||
return l.errorf("expected :=")
|
||||
}
|
||||
l.emit(itemColonEquals)
|
||||
case r == '|':
|
||||
l.emit(itemPipe)
|
||||
case r == '"':
|
||||
return lexQuote
|
||||
case r == '`':
|
||||
return lexRawQuote
|
||||
case r == '$':
|
||||
return lexVariable
|
||||
case r == '\'':
|
||||
return lexChar
|
||||
case r == '.':
|
||||
// special look-ahead for ".field" so we don't break l.backup().
|
||||
if l.pos < Pos(len(l.input)) {
|
||||
r := l.input[l.pos]
|
||||
if r < '0' || '9' < r {
|
||||
return lexField
|
||||
}
|
||||
}
|
||||
fallthrough // '.' can start a number.
|
||||
case r == '+' || r == '-' || ('0' <= r && r <= '9'):
|
||||
l.backup()
|
||||
return lexNumber
|
||||
case isAlphaNumeric(r):
|
||||
l.backup()
|
||||
return lexIdentifier
|
||||
case r == '(':
|
||||
l.emit(itemLeftParen)
|
||||
l.parenDepth++
|
||||
return lexInsideAction
|
||||
case r == ')':
|
||||
l.emit(itemRightParen)
|
||||
l.parenDepth--
|
||||
if l.parenDepth < 0 {
|
||||
return l.errorf("unexpected right paren %#U", r)
|
||||
}
|
||||
return lexInsideAction
|
||||
case r <= unicode.MaxASCII && unicode.IsPrint(r):
|
||||
l.emit(itemChar)
|
||||
return lexInsideAction
|
||||
default:
|
||||
return l.errorf("unrecognized character in action: %#U", r)
|
||||
}
|
||||
return lexInsideAction
|
||||
}
|
||||
|
||||
// lexSpace scans a run of space characters.
|
||||
// One space has already been seen.
|
||||
func lexSpace(l *lexer) stateFn {
|
||||
for isSpace(l.peek()) {
|
||||
l.next()
|
||||
}
|
||||
l.emit(itemSpace)
|
||||
return lexInsideAction
|
||||
}
|
||||
|
||||
// lexIdentifier scans an alphanumeric.
|
||||
func lexIdentifier(l *lexer) stateFn {
|
||||
Loop:
|
||||
for {
|
||||
switch r := l.next(); {
|
||||
case isAlphaNumeric(r):
|
||||
// absorb.
|
||||
default:
|
||||
l.backup()
|
||||
word := l.input[l.start:l.pos]
|
||||
if !l.atTerminator() {
|
||||
return l.errorf("bad character %#U", r)
|
||||
}
|
||||
switch {
|
||||
case key[word] > itemKeyword:
|
||||
l.emit(key[word])
|
||||
case word[0] == '.':
|
||||
l.emit(itemField)
|
||||
case word == "true", word == "false":
|
||||
l.emit(itemBool)
|
||||
default:
|
||||
l.emit(itemIdentifier)
|
||||
}
|
||||
break Loop
|
||||
}
|
||||
}
|
||||
return lexInsideAction
|
||||
}
|
||||
|
||||
// lexField scans a field: .Alphanumeric.
|
||||
// The . has been scanned.
|
||||
func lexField(l *lexer) stateFn {
|
||||
return lexFieldOrVariable(l, itemField)
|
||||
}
|
||||
|
||||
// lexVariable scans a Variable: $Alphanumeric.
|
||||
// The $ has been scanned.
|
||||
func lexVariable(l *lexer) stateFn {
|
||||
if l.atTerminator() { // Nothing interesting follows -> "$".
|
||||
l.emit(itemVariable)
|
||||
return lexInsideAction
|
||||
}
|
||||
return lexFieldOrVariable(l, itemVariable)
|
||||
}
|
||||
|
||||
// lexVariable scans a field or variable: [.$]Alphanumeric.
|
||||
// The . or $ has been scanned.
|
||||
func lexFieldOrVariable(l *lexer, typ itemType) stateFn {
|
||||
if l.atTerminator() { // Nothing interesting follows -> "." or "$".
|
||||
if typ == itemVariable {
|
||||
l.emit(itemVariable)
|
||||
} else {
|
||||
l.emit(itemDot)
|
||||
}
|
||||
return lexInsideAction
|
||||
}
|
||||
var r rune
|
||||
for {
|
||||
r = l.next()
|
||||
if !isAlphaNumeric(r) {
|
||||
l.backup()
|
||||
break
|
||||
}
|
||||
}
|
||||
if !l.atTerminator() {
|
||||
return l.errorf("bad character %#U", r)
|
||||
}
|
||||
l.emit(typ)
|
||||
return lexInsideAction
|
||||
}
|
||||
|
||||
// atTerminator reports whether the input is at valid termination character to
|
||||
// appear after an identifier. Breaks .X.Y into two pieces. Also catches cases
|
||||
// like "$x+2" not being acceptable without a space, in case we decide one
|
||||
// day to implement arithmetic.
|
||||
func (l *lexer) atTerminator() bool {
|
||||
r := l.peek()
|
||||
if isSpace(r) || isEndOfLine(r) {
|
||||
return true
|
||||
}
|
||||
switch r {
|
||||
case eof, '.', ',', '|', ':', ')', '(':
|
||||
return true
|
||||
}
|
||||
// Does r start the delimiter? This can be ambiguous (with delim=="//", $x/2 will
|
||||
// succeed but should fail) but only in extremely rare cases caused by willfully
|
||||
// bad choice of delimiter.
|
||||
if rd, _ := utf8.DecodeRuneInString(l.rightDelim); rd == r {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// lexChar scans a character constant. The initial quote is already
|
||||
// scanned. Syntax checking is done by the parser.
|
||||
func lexChar(l *lexer) stateFn {
|
||||
Loop:
|
||||
for {
|
||||
switch l.next() {
|
||||
case '\\':
|
||||
if r := l.next(); r != eof && r != '\n' {
|
||||
break
|
||||
}
|
||||
fallthrough
|
||||
case eof, '\n':
|
||||
return l.errorf("unterminated character constant")
|
||||
case '\'':
|
||||
break Loop
|
||||
}
|
||||
}
|
||||
l.emit(itemCharConstant)
|
||||
return lexInsideAction
|
||||
}
|
||||
|
||||
// lexNumber scans a number: decimal, octal, hex, float, or imaginary. This
|
||||
// isn't a perfect number scanner - for instance it accepts "." and "0x0.2"
|
||||
// and "089" - but when it's wrong the input is invalid and the parser (via
|
||||
// strconv) will notice.
|
||||
func lexNumber(l *lexer) stateFn {
|
||||
if !l.scanNumber() {
|
||||
return l.errorf("bad number syntax: %q", l.input[l.start:l.pos])
|
||||
}
|
||||
if sign := l.peek(); sign == '+' || sign == '-' {
|
||||
// Complex: 1+2i. No spaces, must end in 'i'.
|
||||
if !l.scanNumber() || l.input[l.pos-1] != 'i' {
|
||||
return l.errorf("bad number syntax: %q", l.input[l.start:l.pos])
|
||||
}
|
||||
l.emit(itemComplex)
|
||||
} else {
|
||||
l.emit(itemNumber)
|
||||
}
|
||||
return lexInsideAction
|
||||
}
|
||||
|
||||
func (l *lexer) scanNumber() bool {
|
||||
// Optional leading sign.
|
||||
l.accept("+-")
|
||||
// Is it hex?
|
||||
digits := "0123456789"
|
||||
if l.accept("0") && l.accept("xX") {
|
||||
digits = "0123456789abcdefABCDEF"
|
||||
}
|
||||
l.acceptRun(digits)
|
||||
if l.accept(".") {
|
||||
l.acceptRun(digits)
|
||||
}
|
||||
if l.accept("eE") {
|
||||
l.accept("+-")
|
||||
l.acceptRun("0123456789")
|
||||
}
|
||||
// Is it imaginary?
|
||||
l.accept("i")
|
||||
// Next thing mustn't be alphanumeric.
|
||||
if isAlphaNumeric(l.peek()) {
|
||||
l.next()
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// lexQuote scans a quoted string.
|
||||
func lexQuote(l *lexer) stateFn {
|
||||
Loop:
|
||||
for {
|
||||
switch l.next() {
|
||||
case '\\':
|
||||
if r := l.next(); r != eof && r != '\n' {
|
||||
break
|
||||
}
|
||||
fallthrough
|
||||
case eof, '\n':
|
||||
return l.errorf("unterminated quoted string")
|
||||
case '"':
|
||||
break Loop
|
||||
}
|
||||
}
|
||||
l.emit(itemString)
|
||||
return lexInsideAction
|
||||
}
|
||||
|
||||
// lexRawQuote scans a raw quoted string.
|
||||
func lexRawQuote(l *lexer) stateFn {
|
||||
Loop:
|
||||
for {
|
||||
switch l.next() {
|
||||
case eof, '\n':
|
||||
return l.errorf("unterminated raw quoted string")
|
||||
case '`':
|
||||
break Loop
|
||||
}
|
||||
}
|
||||
l.emit(itemRawString)
|
||||
return lexInsideAction
|
||||
}
|
||||
|
||||
// isSpace reports whether r is a space character.
|
||||
func isSpace(r rune) bool {
|
||||
return r == ' ' || r == '\t'
|
||||
}
|
||||
|
||||
// isEndOfLine reports whether r is an end-of-line character.
|
||||
func isEndOfLine(r rune) bool {
|
||||
return r == '\r' || r == '\n'
|
||||
}
|
||||
|
||||
// isAlphaNumeric reports whether r is an alphabetic, digit, or underscore.
|
||||
func isAlphaNumeric(r rune) bool {
|
||||
return r == '_' || unicode.IsLetter(r) || unicode.IsDigit(r)
|
||||
}
|
||||
834
vendor/github.com/alecthomas/template/parse/node.go
generated
vendored
834
vendor/github.com/alecthomas/template/parse/node.go
generated
vendored
@@ -1,834 +0,0 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Parse nodes.
|
||||
|
||||
package parse
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var textFormat = "%s" // Changed to "%q" in tests for better error messages.
|
||||
|
||||
// A Node is an element in the parse tree. The interface is trivial.
|
||||
// The interface contains an unexported method so that only
|
||||
// types local to this package can satisfy it.
|
||||
type Node interface {
|
||||
Type() NodeType
|
||||
String() string
|
||||
// Copy does a deep copy of the Node and all its components.
|
||||
// To avoid type assertions, some XxxNodes also have specialized
|
||||
// CopyXxx methods that return *XxxNode.
|
||||
Copy() Node
|
||||
Position() Pos // byte position of start of node in full original input string
|
||||
// tree returns the containing *Tree.
|
||||
// It is unexported so all implementations of Node are in this package.
|
||||
tree() *Tree
|
||||
}
|
||||
|
||||
// NodeType identifies the type of a parse tree node.
|
||||
type NodeType int
|
||||
|
||||
// Pos represents a byte position in the original input text from which
|
||||
// this template was parsed.
|
||||
type Pos int
|
||||
|
||||
func (p Pos) Position() Pos {
|
||||
return p
|
||||
}
|
||||
|
||||
// Type returns itself and provides an easy default implementation
|
||||
// for embedding in a Node. Embedded in all non-trivial Nodes.
|
||||
func (t NodeType) Type() NodeType {
|
||||
return t
|
||||
}
|
||||
|
||||
const (
|
||||
NodeText NodeType = iota // Plain text.
|
||||
NodeAction // A non-control action such as a field evaluation.
|
||||
NodeBool // A boolean constant.
|
||||
NodeChain // A sequence of field accesses.
|
||||
NodeCommand // An element of a pipeline.
|
||||
NodeDot // The cursor, dot.
|
||||
nodeElse // An else action. Not added to tree.
|
||||
nodeEnd // An end action. Not added to tree.
|
||||
NodeField // A field or method name.
|
||||
NodeIdentifier // An identifier; always a function name.
|
||||
NodeIf // An if action.
|
||||
NodeList // A list of Nodes.
|
||||
NodeNil // An untyped nil constant.
|
||||
NodeNumber // A numerical constant.
|
||||
NodePipe // A pipeline of commands.
|
||||
NodeRange // A range action.
|
||||
NodeString // A string constant.
|
||||
NodeTemplate // A template invocation action.
|
||||
NodeVariable // A $ variable.
|
||||
NodeWith // A with action.
|
||||
)
|
||||
|
||||
// Nodes.
|
||||
|
||||
// ListNode holds a sequence of nodes.
|
||||
type ListNode struct {
|
||||
NodeType
|
||||
Pos
|
||||
tr *Tree
|
||||
Nodes []Node // The element nodes in lexical order.
|
||||
}
|
||||
|
||||
func (t *Tree) newList(pos Pos) *ListNode {
|
||||
return &ListNode{tr: t, NodeType: NodeList, Pos: pos}
|
||||
}
|
||||
|
||||
func (l *ListNode) append(n Node) {
|
||||
l.Nodes = append(l.Nodes, n)
|
||||
}
|
||||
|
||||
func (l *ListNode) tree() *Tree {
|
||||
return l.tr
|
||||
}
|
||||
|
||||
func (l *ListNode) String() string {
|
||||
b := new(bytes.Buffer)
|
||||
for _, n := range l.Nodes {
|
||||
fmt.Fprint(b, n)
|
||||
}
|
||||
return b.String()
|
||||
}
|
||||
|
||||
func (l *ListNode) CopyList() *ListNode {
|
||||
if l == nil {
|
||||
return l
|
||||
}
|
||||
n := l.tr.newList(l.Pos)
|
||||
for _, elem := range l.Nodes {
|
||||
n.append(elem.Copy())
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func (l *ListNode) Copy() Node {
|
||||
return l.CopyList()
|
||||
}
|
||||
|
||||
// TextNode holds plain text.
|
||||
type TextNode struct {
|
||||
NodeType
|
||||
Pos
|
||||
tr *Tree
|
||||
Text []byte // The text; may span newlines.
|
||||
}
|
||||
|
||||
func (t *Tree) newText(pos Pos, text string) *TextNode {
|
||||
return &TextNode{tr: t, NodeType: NodeText, Pos: pos, Text: []byte(text)}
|
||||
}
|
||||
|
||||
func (t *TextNode) String() string {
|
||||
return fmt.Sprintf(textFormat, t.Text)
|
||||
}
|
||||
|
||||
func (t *TextNode) tree() *Tree {
|
||||
return t.tr
|
||||
}
|
||||
|
||||
func (t *TextNode) Copy() Node {
|
||||
return &TextNode{tr: t.tr, NodeType: NodeText, Pos: t.Pos, Text: append([]byte{}, t.Text...)}
|
||||
}
|
||||
|
||||
// PipeNode holds a pipeline with optional declaration
|
||||
type PipeNode struct {
|
||||
NodeType
|
||||
Pos
|
||||
tr *Tree
|
||||
Line int // The line number in the input (deprecated; kept for compatibility)
|
||||
Decl []*VariableNode // Variable declarations in lexical order.
|
||||
Cmds []*CommandNode // The commands in lexical order.
|
||||
}
|
||||
|
||||
func (t *Tree) newPipeline(pos Pos, line int, decl []*VariableNode) *PipeNode {
|
||||
return &PipeNode{tr: t, NodeType: NodePipe, Pos: pos, Line: line, Decl: decl}
|
||||
}
|
||||
|
||||
func (p *PipeNode) append(command *CommandNode) {
|
||||
p.Cmds = append(p.Cmds, command)
|
||||
}
|
||||
|
||||
func (p *PipeNode) String() string {
|
||||
s := ""
|
||||
if len(p.Decl) > 0 {
|
||||
for i, v := range p.Decl {
|
||||
if i > 0 {
|
||||
s += ", "
|
||||
}
|
||||
s += v.String()
|
||||
}
|
||||
s += " := "
|
||||
}
|
||||
for i, c := range p.Cmds {
|
||||
if i > 0 {
|
||||
s += " | "
|
||||
}
|
||||
s += c.String()
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func (p *PipeNode) tree() *Tree {
|
||||
return p.tr
|
||||
}
|
||||
|
||||
func (p *PipeNode) CopyPipe() *PipeNode {
|
||||
if p == nil {
|
||||
return p
|
||||
}
|
||||
var decl []*VariableNode
|
||||
for _, d := range p.Decl {
|
||||
decl = append(decl, d.Copy().(*VariableNode))
|
||||
}
|
||||
n := p.tr.newPipeline(p.Pos, p.Line, decl)
|
||||
for _, c := range p.Cmds {
|
||||
n.append(c.Copy().(*CommandNode))
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func (p *PipeNode) Copy() Node {
|
||||
return p.CopyPipe()
|
||||
}
|
||||
|
||||
// ActionNode holds an action (something bounded by delimiters).
|
||||
// Control actions have their own nodes; ActionNode represents simple
|
||||
// ones such as field evaluations and parenthesized pipelines.
|
||||
type ActionNode struct {
|
||||
NodeType
|
||||
Pos
|
||||
tr *Tree
|
||||
Line int // The line number in the input (deprecated; kept for compatibility)
|
||||
Pipe *PipeNode // The pipeline in the action.
|
||||
}
|
||||
|
||||
func (t *Tree) newAction(pos Pos, line int, pipe *PipeNode) *ActionNode {
|
||||
return &ActionNode{tr: t, NodeType: NodeAction, Pos: pos, Line: line, Pipe: pipe}
|
||||
}
|
||||
|
||||
func (a *ActionNode) String() string {
|
||||
return fmt.Sprintf("{{%s}}", a.Pipe)
|
||||
|
||||
}
|
||||
|
||||
func (a *ActionNode) tree() *Tree {
|
||||
return a.tr
|
||||
}
|
||||
|
||||
func (a *ActionNode) Copy() Node {
|
||||
return a.tr.newAction(a.Pos, a.Line, a.Pipe.CopyPipe())
|
||||
|
||||
}
|
||||
|
||||
// CommandNode holds a command (a pipeline inside an evaluating action).
|
||||
type CommandNode struct {
|
||||
NodeType
|
||||
Pos
|
||||
tr *Tree
|
||||
Args []Node // Arguments in lexical order: Identifier, field, or constant.
|
||||
}
|
||||
|
||||
func (t *Tree) newCommand(pos Pos) *CommandNode {
|
||||
return &CommandNode{tr: t, NodeType: NodeCommand, Pos: pos}
|
||||
}
|
||||
|
||||
func (c *CommandNode) append(arg Node) {
|
||||
c.Args = append(c.Args, arg)
|
||||
}
|
||||
|
||||
func (c *CommandNode) String() string {
|
||||
s := ""
|
||||
for i, arg := range c.Args {
|
||||
if i > 0 {
|
||||
s += " "
|
||||
}
|
||||
if arg, ok := arg.(*PipeNode); ok {
|
||||
s += "(" + arg.String() + ")"
|
||||
continue
|
||||
}
|
||||
s += arg.String()
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func (c *CommandNode) tree() *Tree {
|
||||
return c.tr
|
||||
}
|
||||
|
||||
func (c *CommandNode) Copy() Node {
|
||||
if c == nil {
|
||||
return c
|
||||
}
|
||||
n := c.tr.newCommand(c.Pos)
|
||||
for _, c := range c.Args {
|
||||
n.append(c.Copy())
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
// IdentifierNode holds an identifier.
|
||||
type IdentifierNode struct {
|
||||
NodeType
|
||||
Pos
|
||||
tr *Tree
|
||||
Ident string // The identifier's name.
|
||||
}
|
||||
|
||||
// NewIdentifier returns a new IdentifierNode with the given identifier name.
|
||||
func NewIdentifier(ident string) *IdentifierNode {
|
||||
return &IdentifierNode{NodeType: NodeIdentifier, Ident: ident}
|
||||
}
|
||||
|
||||
// SetPos sets the position. NewIdentifier is a public method so we can't modify its signature.
|
||||
// Chained for convenience.
|
||||
// TODO: fix one day?
|
||||
func (i *IdentifierNode) SetPos(pos Pos) *IdentifierNode {
|
||||
i.Pos = pos
|
||||
return i
|
||||
}
|
||||
|
||||
// SetTree sets the parent tree for the node. NewIdentifier is a public method so we can't modify its signature.
|
||||
// Chained for convenience.
|
||||
// TODO: fix one day?
|
||||
func (i *IdentifierNode) SetTree(t *Tree) *IdentifierNode {
|
||||
i.tr = t
|
||||
return i
|
||||
}
|
||||
|
||||
func (i *IdentifierNode) String() string {
|
||||
return i.Ident
|
||||
}
|
||||
|
||||
func (i *IdentifierNode) tree() *Tree {
|
||||
return i.tr
|
||||
}
|
||||
|
||||
func (i *IdentifierNode) Copy() Node {
|
||||
return NewIdentifier(i.Ident).SetTree(i.tr).SetPos(i.Pos)
|
||||
}
|
||||
|
||||
// VariableNode holds a list of variable names, possibly with chained field
|
||||
// accesses. The dollar sign is part of the (first) name.
|
||||
type VariableNode struct {
|
||||
NodeType
|
||||
Pos
|
||||
tr *Tree
|
||||
Ident []string // Variable name and fields in lexical order.
|
||||
}
|
||||
|
||||
func (t *Tree) newVariable(pos Pos, ident string) *VariableNode {
|
||||
return &VariableNode{tr: t, NodeType: NodeVariable, Pos: pos, Ident: strings.Split(ident, ".")}
|
||||
}
|
||||
|
||||
func (v *VariableNode) String() string {
|
||||
s := ""
|
||||
for i, id := range v.Ident {
|
||||
if i > 0 {
|
||||
s += "."
|
||||
}
|
||||
s += id
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func (v *VariableNode) tree() *Tree {
|
||||
return v.tr
|
||||
}
|
||||
|
||||
func (v *VariableNode) Copy() Node {
|
||||
return &VariableNode{tr: v.tr, NodeType: NodeVariable, Pos: v.Pos, Ident: append([]string{}, v.Ident...)}
|
||||
}
|
||||
|
||||
// DotNode holds the special identifier '.'.
|
||||
type DotNode struct {
|
||||
NodeType
|
||||
Pos
|
||||
tr *Tree
|
||||
}
|
||||
|
||||
func (t *Tree) newDot(pos Pos) *DotNode {
|
||||
return &DotNode{tr: t, NodeType: NodeDot, Pos: pos}
|
||||
}
|
||||
|
||||
func (d *DotNode) Type() NodeType {
|
||||
// Override method on embedded NodeType for API compatibility.
|
||||
// TODO: Not really a problem; could change API without effect but
|
||||
// api tool complains.
|
||||
return NodeDot
|
||||
}
|
||||
|
||||
func (d *DotNode) String() string {
|
||||
return "."
|
||||
}
|
||||
|
||||
func (d *DotNode) tree() *Tree {
|
||||
return d.tr
|
||||
}
|
||||
|
||||
func (d *DotNode) Copy() Node {
|
||||
return d.tr.newDot(d.Pos)
|
||||
}
|
||||
|
||||
// NilNode holds the special identifier 'nil' representing an untyped nil constant.
|
||||
type NilNode struct {
|
||||
NodeType
|
||||
Pos
|
||||
tr *Tree
|
||||
}
|
||||
|
||||
func (t *Tree) newNil(pos Pos) *NilNode {
|
||||
return &NilNode{tr: t, NodeType: NodeNil, Pos: pos}
|
||||
}
|
||||
|
||||
func (n *NilNode) Type() NodeType {
|
||||
// Override method on embedded NodeType for API compatibility.
|
||||
// TODO: Not really a problem; could change API without effect but
|
||||
// api tool complains.
|
||||
return NodeNil
|
||||
}
|
||||
|
||||
func (n *NilNode) String() string {
|
||||
return "nil"
|
||||
}
|
||||
|
||||
func (n *NilNode) tree() *Tree {
|
||||
return n.tr
|
||||
}
|
||||
|
||||
func (n *NilNode) Copy() Node {
|
||||
return n.tr.newNil(n.Pos)
|
||||
}
|
||||
|
||||
// FieldNode holds a field (identifier starting with '.').
|
||||
// The names may be chained ('.x.y').
|
||||
// The period is dropped from each ident.
|
||||
type FieldNode struct {
|
||||
NodeType
|
||||
Pos
|
||||
tr *Tree
|
||||
Ident []string // The identifiers in lexical order.
|
||||
}
|
||||
|
||||
func (t *Tree) newField(pos Pos, ident string) *FieldNode {
|
||||
return &FieldNode{tr: t, NodeType: NodeField, Pos: pos, Ident: strings.Split(ident[1:], ".")} // [1:] to drop leading period
|
||||
}
|
||||
|
||||
func (f *FieldNode) String() string {
|
||||
s := ""
|
||||
for _, id := range f.Ident {
|
||||
s += "." + id
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func (f *FieldNode) tree() *Tree {
|
||||
return f.tr
|
||||
}
|
||||
|
||||
func (f *FieldNode) Copy() Node {
|
||||
return &FieldNode{tr: f.tr, NodeType: NodeField, Pos: f.Pos, Ident: append([]string{}, f.Ident...)}
|
||||
}
|
||||
|
||||
// ChainNode holds a term followed by a chain of field accesses (identifier starting with '.').
|
||||
// The names may be chained ('.x.y').
|
||||
// The periods are dropped from each ident.
|
||||
type ChainNode struct {
|
||||
NodeType
|
||||
Pos
|
||||
tr *Tree
|
||||
Node Node
|
||||
Field []string // The identifiers in lexical order.
|
||||
}
|
||||
|
||||
func (t *Tree) newChain(pos Pos, node Node) *ChainNode {
|
||||
return &ChainNode{tr: t, NodeType: NodeChain, Pos: pos, Node: node}
|
||||
}
|
||||
|
||||
// Add adds the named field (which should start with a period) to the end of the chain.
|
||||
func (c *ChainNode) Add(field string) {
|
||||
if len(field) == 0 || field[0] != '.' {
|
||||
panic("no dot in field")
|
||||
}
|
||||
field = field[1:] // Remove leading dot.
|
||||
if field == "" {
|
||||
panic("empty field")
|
||||
}
|
||||
c.Field = append(c.Field, field)
|
||||
}
|
||||
|
||||
func (c *ChainNode) String() string {
|
||||
s := c.Node.String()
|
||||
if _, ok := c.Node.(*PipeNode); ok {
|
||||
s = "(" + s + ")"
|
||||
}
|
||||
for _, field := range c.Field {
|
||||
s += "." + field
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func (c *ChainNode) tree() *Tree {
|
||||
return c.tr
|
||||
}
|
||||
|
||||
func (c *ChainNode) Copy() Node {
|
||||
return &ChainNode{tr: c.tr, NodeType: NodeChain, Pos: c.Pos, Node: c.Node, Field: append([]string{}, c.Field...)}
|
||||
}
|
||||
|
||||
// BoolNode holds a boolean constant.
|
||||
type BoolNode struct {
|
||||
NodeType
|
||||
Pos
|
||||
tr *Tree
|
||||
True bool // The value of the boolean constant.
|
||||
}
|
||||
|
||||
func (t *Tree) newBool(pos Pos, true bool) *BoolNode {
|
||||
return &BoolNode{tr: t, NodeType: NodeBool, Pos: pos, True: true}
|
||||
}
|
||||
|
||||
func (b *BoolNode) String() string {
|
||||
if b.True {
|
||||
return "true"
|
||||
}
|
||||
return "false"
|
||||
}
|
||||
|
||||
func (b *BoolNode) tree() *Tree {
|
||||
return b.tr
|
||||
}
|
||||
|
||||
func (b *BoolNode) Copy() Node {
|
||||
return b.tr.newBool(b.Pos, b.True)
|
||||
}
|
||||
|
||||
// NumberNode holds a number: signed or unsigned integer, float, or complex.
|
||||
// The value is parsed and stored under all the types that can represent the value.
|
||||
// This simulates in a small amount of code the behavior of Go's ideal constants.
|
||||
type NumberNode struct {
|
||||
NodeType
|
||||
Pos
|
||||
tr *Tree
|
||||
IsInt bool // Number has an integral value.
|
||||
IsUint bool // Number has an unsigned integral value.
|
||||
IsFloat bool // Number has a floating-point value.
|
||||
IsComplex bool // Number is complex.
|
||||
Int64 int64 // The signed integer value.
|
||||
Uint64 uint64 // The unsigned integer value.
|
||||
Float64 float64 // The floating-point value.
|
||||
Complex128 complex128 // The complex value.
|
||||
Text string // The original textual representation from the input.
|
||||
}
|
||||
|
||||
func (t *Tree) newNumber(pos Pos, text string, typ itemType) (*NumberNode, error) {
|
||||
n := &NumberNode{tr: t, NodeType: NodeNumber, Pos: pos, Text: text}
|
||||
switch typ {
|
||||
case itemCharConstant:
|
||||
rune, _, tail, err := strconv.UnquoteChar(text[1:], text[0])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if tail != "'" {
|
||||
return nil, fmt.Errorf("malformed character constant: %s", text)
|
||||
}
|
||||
n.Int64 = int64(rune)
|
||||
n.IsInt = true
|
||||
n.Uint64 = uint64(rune)
|
||||
n.IsUint = true
|
||||
n.Float64 = float64(rune) // odd but those are the rules.
|
||||
n.IsFloat = true
|
||||
return n, nil
|
||||
case itemComplex:
|
||||
// fmt.Sscan can parse the pair, so let it do the work.
|
||||
if _, err := fmt.Sscan(text, &n.Complex128); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
n.IsComplex = true
|
||||
n.simplifyComplex()
|
||||
return n, nil
|
||||
}
|
||||
// Imaginary constants can only be complex unless they are zero.
|
||||
if len(text) > 0 && text[len(text)-1] == 'i' {
|
||||
f, err := strconv.ParseFloat(text[:len(text)-1], 64)
|
||||
if err == nil {
|
||||
n.IsComplex = true
|
||||
n.Complex128 = complex(0, f)
|
||||
n.simplifyComplex()
|
||||
return n, nil
|
||||
}
|
||||
}
|
||||
// Do integer test first so we get 0x123 etc.
|
||||
u, err := strconv.ParseUint(text, 0, 64) // will fail for -0; fixed below.
|
||||
if err == nil {
|
||||
n.IsUint = true
|
||||
n.Uint64 = u
|
||||
}
|
||||
i, err := strconv.ParseInt(text, 0, 64)
|
||||
if err == nil {
|
||||
n.IsInt = true
|
||||
n.Int64 = i
|
||||
if i == 0 {
|
||||
n.IsUint = true // in case of -0.
|
||||
n.Uint64 = u
|
||||
}
|
||||
}
|
||||
// If an integer extraction succeeded, promote the float.
|
||||
if n.IsInt {
|
||||
n.IsFloat = true
|
||||
n.Float64 = float64(n.Int64)
|
||||
} else if n.IsUint {
|
||||
n.IsFloat = true
|
||||
n.Float64 = float64(n.Uint64)
|
||||
} else {
|
||||
f, err := strconv.ParseFloat(text, 64)
|
||||
if err == nil {
|
||||
n.IsFloat = true
|
||||
n.Float64 = f
|
||||
// If a floating-point extraction succeeded, extract the int if needed.
|
||||
if !n.IsInt && float64(int64(f)) == f {
|
||||
n.IsInt = true
|
||||
n.Int64 = int64(f)
|
||||
}
|
||||
if !n.IsUint && float64(uint64(f)) == f {
|
||||
n.IsUint = true
|
||||
n.Uint64 = uint64(f)
|
||||
}
|
||||
}
|
||||
}
|
||||
if !n.IsInt && !n.IsUint && !n.IsFloat {
|
||||
return nil, fmt.Errorf("illegal number syntax: %q", text)
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// simplifyComplex pulls out any other types that are represented by the complex number.
|
||||
// These all require that the imaginary part be zero.
|
||||
func (n *NumberNode) simplifyComplex() {
|
||||
n.IsFloat = imag(n.Complex128) == 0
|
||||
if n.IsFloat {
|
||||
n.Float64 = real(n.Complex128)
|
||||
n.IsInt = float64(int64(n.Float64)) == n.Float64
|
||||
if n.IsInt {
|
||||
n.Int64 = int64(n.Float64)
|
||||
}
|
||||
n.IsUint = float64(uint64(n.Float64)) == n.Float64
|
||||
if n.IsUint {
|
||||
n.Uint64 = uint64(n.Float64)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (n *NumberNode) String() string {
|
||||
return n.Text
|
||||
}
|
||||
|
||||
func (n *NumberNode) tree() *Tree {
|
||||
return n.tr
|
||||
}
|
||||
|
||||
func (n *NumberNode) Copy() Node {
|
||||
nn := new(NumberNode)
|
||||
*nn = *n // Easy, fast, correct.
|
||||
return nn
|
||||
}
|
||||
|
||||
// StringNode holds a string constant. The value has been "unquoted".
|
||||
type StringNode struct {
|
||||
NodeType
|
||||
Pos
|
||||
tr *Tree
|
||||
Quoted string // The original text of the string, with quotes.
|
||||
Text string // The string, after quote processing.
|
||||
}
|
||||
|
||||
func (t *Tree) newString(pos Pos, orig, text string) *StringNode {
|
||||
return &StringNode{tr: t, NodeType: NodeString, Pos: pos, Quoted: orig, Text: text}
|
||||
}
|
||||
|
||||
func (s *StringNode) String() string {
|
||||
return s.Quoted
|
||||
}
|
||||
|
||||
func (s *StringNode) tree() *Tree {
|
||||
return s.tr
|
||||
}
|
||||
|
||||
func (s *StringNode) Copy() Node {
|
||||
return s.tr.newString(s.Pos, s.Quoted, s.Text)
|
||||
}
|
||||
|
||||
// endNode represents an {{end}} action.
|
||||
// It does not appear in the final parse tree.
|
||||
type endNode struct {
|
||||
NodeType
|
||||
Pos
|
||||
tr *Tree
|
||||
}
|
||||
|
||||
func (t *Tree) newEnd(pos Pos) *endNode {
|
||||
return &endNode{tr: t, NodeType: nodeEnd, Pos: pos}
|
||||
}
|
||||
|
||||
func (e *endNode) String() string {
|
||||
return "{{end}}"
|
||||
}
|
||||
|
||||
func (e *endNode) tree() *Tree {
|
||||
return e.tr
|
||||
}
|
||||
|
||||
func (e *endNode) Copy() Node {
|
||||
return e.tr.newEnd(e.Pos)
|
||||
}
|
||||
|
||||
// elseNode represents an {{else}} action. Does not appear in the final tree.
|
||||
type elseNode struct {
|
||||
NodeType
|
||||
Pos
|
||||
tr *Tree
|
||||
Line int // The line number in the input (deprecated; kept for compatibility)
|
||||
}
|
||||
|
||||
func (t *Tree) newElse(pos Pos, line int) *elseNode {
|
||||
return &elseNode{tr: t, NodeType: nodeElse, Pos: pos, Line: line}
|
||||
}
|
||||
|
||||
func (e *elseNode) Type() NodeType {
|
||||
return nodeElse
|
||||
}
|
||||
|
||||
func (e *elseNode) String() string {
|
||||
return "{{else}}"
|
||||
}
|
||||
|
||||
func (e *elseNode) tree() *Tree {
|
||||
return e.tr
|
||||
}
|
||||
|
||||
func (e *elseNode) Copy() Node {
|
||||
return e.tr.newElse(e.Pos, e.Line)
|
||||
}
|
||||
|
||||
// BranchNode is the common representation of if, range, and with.
|
||||
type BranchNode struct {
|
||||
NodeType
|
||||
Pos
|
||||
tr *Tree
|
||||
Line int // The line number in the input (deprecated; kept for compatibility)
|
||||
Pipe *PipeNode // The pipeline to be evaluated.
|
||||
List *ListNode // What to execute if the value is non-empty.
|
||||
ElseList *ListNode // What to execute if the value is empty (nil if absent).
|
||||
}
|
||||
|
||||
func (b *BranchNode) String() string {
|
||||
name := ""
|
||||
switch b.NodeType {
|
||||
case NodeIf:
|
||||
name = "if"
|
||||
case NodeRange:
|
||||
name = "range"
|
||||
case NodeWith:
|
||||
name = "with"
|
||||
default:
|
||||
panic("unknown branch type")
|
||||
}
|
||||
if b.ElseList != nil {
|
||||
return fmt.Sprintf("{{%s %s}}%s{{else}}%s{{end}}", name, b.Pipe, b.List, b.ElseList)
|
||||
}
|
||||
return fmt.Sprintf("{{%s %s}}%s{{end}}", name, b.Pipe, b.List)
|
||||
}
|
||||
|
||||
func (b *BranchNode) tree() *Tree {
|
||||
return b.tr
|
||||
}
|
||||
|
||||
func (b *BranchNode) Copy() Node {
|
||||
switch b.NodeType {
|
||||
case NodeIf:
|
||||
return b.tr.newIf(b.Pos, b.Line, b.Pipe, b.List, b.ElseList)
|
||||
case NodeRange:
|
||||
return b.tr.newRange(b.Pos, b.Line, b.Pipe, b.List, b.ElseList)
|
||||
case NodeWith:
|
||||
return b.tr.newWith(b.Pos, b.Line, b.Pipe, b.List, b.ElseList)
|
||||
default:
|
||||
panic("unknown branch type")
|
||||
}
|
||||
}
|
||||
|
||||
// IfNode represents an {{if}} action and its commands.
|
||||
type IfNode struct {
|
||||
BranchNode
|
||||
}
|
||||
|
||||
func (t *Tree) newIf(pos Pos, line int, pipe *PipeNode, list, elseList *ListNode) *IfNode {
|
||||
return &IfNode{BranchNode{tr: t, NodeType: NodeIf, Pos: pos, Line: line, Pipe: pipe, List: list, ElseList: elseList}}
|
||||
}
|
||||
|
||||
func (i *IfNode) Copy() Node {
|
||||
return i.tr.newIf(i.Pos, i.Line, i.Pipe.CopyPipe(), i.List.CopyList(), i.ElseList.CopyList())
|
||||
}
|
||||
|
||||
// RangeNode represents a {{range}} action and its commands.
|
||||
type RangeNode struct {
|
||||
BranchNode
|
||||
}
|
||||
|
||||
func (t *Tree) newRange(pos Pos, line int, pipe *PipeNode, list, elseList *ListNode) *RangeNode {
|
||||
return &RangeNode{BranchNode{tr: t, NodeType: NodeRange, Pos: pos, Line: line, Pipe: pipe, List: list, ElseList: elseList}}
|
||||
}
|
||||
|
||||
func (r *RangeNode) Copy() Node {
|
||||
return r.tr.newRange(r.Pos, r.Line, r.Pipe.CopyPipe(), r.List.CopyList(), r.ElseList.CopyList())
|
||||
}
|
||||
|
||||
// WithNode represents a {{with}} action and its commands.
|
||||
type WithNode struct {
|
||||
BranchNode
|
||||
}
|
||||
|
||||
func (t *Tree) newWith(pos Pos, line int, pipe *PipeNode, list, elseList *ListNode) *WithNode {
|
||||
return &WithNode{BranchNode{tr: t, NodeType: NodeWith, Pos: pos, Line: line, Pipe: pipe, List: list, ElseList: elseList}}
|
||||
}
|
||||
|
||||
func (w *WithNode) Copy() Node {
|
||||
return w.tr.newWith(w.Pos, w.Line, w.Pipe.CopyPipe(), w.List.CopyList(), w.ElseList.CopyList())
|
||||
}
|
||||
|
||||
// TemplateNode represents a {{template}} action.
|
||||
type TemplateNode struct {
|
||||
NodeType
|
||||
Pos
|
||||
tr *Tree
|
||||
Line int // The line number in the input (deprecated; kept for compatibility)
|
||||
Name string // The name of the template (unquoted).
|
||||
Pipe *PipeNode // The command to evaluate as dot for the template.
|
||||
}
|
||||
|
||||
func (t *Tree) newTemplate(pos Pos, line int, name string, pipe *PipeNode) *TemplateNode {
|
||||
return &TemplateNode{tr: t, NodeType: NodeTemplate, Pos: pos, Line: line, Name: name, Pipe: pipe}
|
||||
}
|
||||
|
||||
func (t *TemplateNode) String() string {
|
||||
if t.Pipe == nil {
|
||||
return fmt.Sprintf("{{template %q}}", t.Name)
|
||||
}
|
||||
return fmt.Sprintf("{{template %q %s}}", t.Name, t.Pipe)
|
||||
}
|
||||
|
||||
func (t *TemplateNode) tree() *Tree {
|
||||
return t.tr
|
||||
}
|
||||
|
||||
func (t *TemplateNode) Copy() Node {
|
||||
return t.tr.newTemplate(t.Pos, t.Line, t.Name, t.Pipe.CopyPipe())
|
||||
}
|
||||
700
vendor/github.com/alecthomas/template/parse/parse.go
generated
vendored
700
vendor/github.com/alecthomas/template/parse/parse.go
generated
vendored
@@ -1,700 +0,0 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package parse builds parse trees for templates as defined by text/template
|
||||
// and html/template. Clients should use those packages to construct templates
|
||||
// rather than this one, which provides shared internal data structures not
|
||||
// intended for general use.
|
||||
package parse
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Tree is the representation of a single parsed template.
|
||||
type Tree struct {
|
||||
Name string // name of the template represented by the tree.
|
||||
ParseName string // name of the top-level template during parsing, for error messages.
|
||||
Root *ListNode // top-level root of the tree.
|
||||
text string // text parsed to create the template (or its parent)
|
||||
// Parsing only; cleared after parse.
|
||||
funcs []map[string]interface{}
|
||||
lex *lexer
|
||||
token [3]item // three-token lookahead for parser.
|
||||
peekCount int
|
||||
vars []string // variables defined at the moment.
|
||||
}
|
||||
|
||||
// Copy returns a copy of the Tree. Any parsing state is discarded.
|
||||
func (t *Tree) Copy() *Tree {
|
||||
if t == nil {
|
||||
return nil
|
||||
}
|
||||
return &Tree{
|
||||
Name: t.Name,
|
||||
ParseName: t.ParseName,
|
||||
Root: t.Root.CopyList(),
|
||||
text: t.text,
|
||||
}
|
||||
}
|
||||
|
||||
// Parse returns a map from template name to parse.Tree, created by parsing the
|
||||
// templates described in the argument string. The top-level template will be
|
||||
// given the specified name. If an error is encountered, parsing stops and an
|
||||
// empty map is returned with the error.
|
||||
func Parse(name, text, leftDelim, rightDelim string, funcs ...map[string]interface{}) (treeSet map[string]*Tree, err error) {
|
||||
treeSet = make(map[string]*Tree)
|
||||
t := New(name)
|
||||
t.text = text
|
||||
_, err = t.Parse(text, leftDelim, rightDelim, treeSet, funcs...)
|
||||
return
|
||||
}
|
||||
|
||||
// next returns the next token.
|
||||
func (t *Tree) next() item {
|
||||
if t.peekCount > 0 {
|
||||
t.peekCount--
|
||||
} else {
|
||||
t.token[0] = t.lex.nextItem()
|
||||
}
|
||||
return t.token[t.peekCount]
|
||||
}
|
||||
|
||||
// backup backs the input stream up one token.
|
||||
func (t *Tree) backup() {
|
||||
t.peekCount++
|
||||
}
|
||||
|
||||
// backup2 backs the input stream up two tokens.
|
||||
// The zeroth token is already there.
|
||||
func (t *Tree) backup2(t1 item) {
|
||||
t.token[1] = t1
|
||||
t.peekCount = 2
|
||||
}
|
||||
|
||||
// backup3 backs the input stream up three tokens
|
||||
// The zeroth token is already there.
|
||||
func (t *Tree) backup3(t2, t1 item) { // Reverse order: we're pushing back.
|
||||
t.token[1] = t1
|
||||
t.token[2] = t2
|
||||
t.peekCount = 3
|
||||
}
|
||||
|
||||
// peek returns but does not consume the next token.
|
||||
func (t *Tree) peek() item {
|
||||
if t.peekCount > 0 {
|
||||
return t.token[t.peekCount-1]
|
||||
}
|
||||
t.peekCount = 1
|
||||
t.token[0] = t.lex.nextItem()
|
||||
return t.token[0]
|
||||
}
|
||||
|
||||
// nextNonSpace returns the next non-space token.
|
||||
func (t *Tree) nextNonSpace() (token item) {
|
||||
for {
|
||||
token = t.next()
|
||||
if token.typ != itemSpace {
|
||||
break
|
||||
}
|
||||
}
|
||||
return token
|
||||
}
|
||||
|
||||
// peekNonSpace returns but does not consume the next non-space token.
|
||||
func (t *Tree) peekNonSpace() (token item) {
|
||||
for {
|
||||
token = t.next()
|
||||
if token.typ != itemSpace {
|
||||
break
|
||||
}
|
||||
}
|
||||
t.backup()
|
||||
return token
|
||||
}
|
||||
|
||||
// Parsing.
|
||||
|
||||
// New allocates a new parse tree with the given name.
|
||||
func New(name string, funcs ...map[string]interface{}) *Tree {
|
||||
return &Tree{
|
||||
Name: name,
|
||||
funcs: funcs,
|
||||
}
|
||||
}
|
||||
|
||||
// ErrorContext returns a textual representation of the location of the node in the input text.
|
||||
// The receiver is only used when the node does not have a pointer to the tree inside,
|
||||
// which can occur in old code.
|
||||
func (t *Tree) ErrorContext(n Node) (location, context string) {
|
||||
pos := int(n.Position())
|
||||
tree := n.tree()
|
||||
if tree == nil {
|
||||
tree = t
|
||||
}
|
||||
text := tree.text[:pos]
|
||||
byteNum := strings.LastIndex(text, "\n")
|
||||
if byteNum == -1 {
|
||||
byteNum = pos // On first line.
|
||||
} else {
|
||||
byteNum++ // After the newline.
|
||||
byteNum = pos - byteNum
|
||||
}
|
||||
lineNum := 1 + strings.Count(text, "\n")
|
||||
context = n.String()
|
||||
if len(context) > 20 {
|
||||
context = fmt.Sprintf("%.20s...", context)
|
||||
}
|
||||
return fmt.Sprintf("%s:%d:%d", tree.ParseName, lineNum, byteNum), context
|
||||
}
|
||||
|
||||
// errorf formats the error and terminates processing.
|
||||
func (t *Tree) errorf(format string, args ...interface{}) {
|
||||
t.Root = nil
|
||||
format = fmt.Sprintf("template: %s:%d: %s", t.ParseName, t.lex.lineNumber(), format)
|
||||
panic(fmt.Errorf(format, args...))
|
||||
}
|
||||
|
||||
// error terminates processing.
|
||||
func (t *Tree) error(err error) {
|
||||
t.errorf("%s", err)
|
||||
}
|
||||
|
||||
// expect consumes the next token and guarantees it has the required type.
|
||||
func (t *Tree) expect(expected itemType, context string) item {
|
||||
token := t.nextNonSpace()
|
||||
if token.typ != expected {
|
||||
t.unexpected(token, context)
|
||||
}
|
||||
return token
|
||||
}
|
||||
|
||||
// expectOneOf consumes the next token and guarantees it has one of the required types.
|
||||
func (t *Tree) expectOneOf(expected1, expected2 itemType, context string) item {
|
||||
token := t.nextNonSpace()
|
||||
if token.typ != expected1 && token.typ != expected2 {
|
||||
t.unexpected(token, context)
|
||||
}
|
||||
return token
|
||||
}
|
||||
|
||||
// unexpected complains about the token and terminates processing.
|
||||
func (t *Tree) unexpected(token item, context string) {
|
||||
t.errorf("unexpected %s in %s", token, context)
|
||||
}
|
||||
|
||||
// recover is the handler that turns panics into returns from the top level of Parse.
|
||||
func (t *Tree) recover(errp *error) {
|
||||
e := recover()
|
||||
if e != nil {
|
||||
if _, ok := e.(runtime.Error); ok {
|
||||
panic(e)
|
||||
}
|
||||
if t != nil {
|
||||
t.stopParse()
|
||||
}
|
||||
*errp = e.(error)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// startParse initializes the parser, using the lexer.
|
||||
func (t *Tree) startParse(funcs []map[string]interface{}, lex *lexer) {
|
||||
t.Root = nil
|
||||
t.lex = lex
|
||||
t.vars = []string{"$"}
|
||||
t.funcs = funcs
|
||||
}
|
||||
|
||||
// stopParse terminates parsing.
|
||||
func (t *Tree) stopParse() {
|
||||
t.lex = nil
|
||||
t.vars = nil
|
||||
t.funcs = nil
|
||||
}
|
||||
|
||||
// Parse parses the template definition string to construct a representation of
|
||||
// the template for execution. If either action delimiter string is empty, the
|
||||
// default ("{{" or "}}") is used. Embedded template definitions are added to
|
||||
// the treeSet map.
|
||||
func (t *Tree) Parse(text, leftDelim, rightDelim string, treeSet map[string]*Tree, funcs ...map[string]interface{}) (tree *Tree, err error) {
|
||||
defer t.recover(&err)
|
||||
t.ParseName = t.Name
|
||||
t.startParse(funcs, lex(t.Name, text, leftDelim, rightDelim))
|
||||
t.text = text
|
||||
t.parse(treeSet)
|
||||
t.add(treeSet)
|
||||
t.stopParse()
|
||||
return t, nil
|
||||
}
|
||||
|
||||
// add adds tree to the treeSet.
|
||||
func (t *Tree) add(treeSet map[string]*Tree) {
|
||||
tree := treeSet[t.Name]
|
||||
if tree == nil || IsEmptyTree(tree.Root) {
|
||||
treeSet[t.Name] = t
|
||||
return
|
||||
}
|
||||
if !IsEmptyTree(t.Root) {
|
||||
t.errorf("template: multiple definition of template %q", t.Name)
|
||||
}
|
||||
}
|
||||
|
||||
// IsEmptyTree reports whether this tree (node) is empty of everything but space.
|
||||
func IsEmptyTree(n Node) bool {
|
||||
switch n := n.(type) {
|
||||
case nil:
|
||||
return true
|
||||
case *ActionNode:
|
||||
case *IfNode:
|
||||
case *ListNode:
|
||||
for _, node := range n.Nodes {
|
||||
if !IsEmptyTree(node) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
case *RangeNode:
|
||||
case *TemplateNode:
|
||||
case *TextNode:
|
||||
return len(bytes.TrimSpace(n.Text)) == 0
|
||||
case *WithNode:
|
||||
default:
|
||||
panic("unknown node: " + n.String())
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// parse is the top-level parser for a template, essentially the same
|
||||
// as itemList except it also parses {{define}} actions.
|
||||
// It runs to EOF.
|
||||
func (t *Tree) parse(treeSet map[string]*Tree) (next Node) {
|
||||
t.Root = t.newList(t.peek().pos)
|
||||
for t.peek().typ != itemEOF {
|
||||
if t.peek().typ == itemLeftDelim {
|
||||
delim := t.next()
|
||||
if t.nextNonSpace().typ == itemDefine {
|
||||
newT := New("definition") // name will be updated once we know it.
|
||||
newT.text = t.text
|
||||
newT.ParseName = t.ParseName
|
||||
newT.startParse(t.funcs, t.lex)
|
||||
newT.parseDefinition(treeSet)
|
||||
continue
|
||||
}
|
||||
t.backup2(delim)
|
||||
}
|
||||
n := t.textOrAction()
|
||||
if n.Type() == nodeEnd {
|
||||
t.errorf("unexpected %s", n)
|
||||
}
|
||||
t.Root.append(n)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// parseDefinition parses a {{define}} ... {{end}} template definition and
|
||||
// installs the definition in the treeSet map. The "define" keyword has already
|
||||
// been scanned.
|
||||
func (t *Tree) parseDefinition(treeSet map[string]*Tree) {
|
||||
const context = "define clause"
|
||||
name := t.expectOneOf(itemString, itemRawString, context)
|
||||
var err error
|
||||
t.Name, err = strconv.Unquote(name.val)
|
||||
if err != nil {
|
||||
t.error(err)
|
||||
}
|
||||
t.expect(itemRightDelim, context)
|
||||
var end Node
|
||||
t.Root, end = t.itemList()
|
||||
if end.Type() != nodeEnd {
|
||||
t.errorf("unexpected %s in %s", end, context)
|
||||
}
|
||||
t.add(treeSet)
|
||||
t.stopParse()
|
||||
}
|
||||
|
||||
// itemList:
|
||||
// textOrAction*
|
||||
// Terminates at {{end}} or {{else}}, returned separately.
|
||||
func (t *Tree) itemList() (list *ListNode, next Node) {
|
||||
list = t.newList(t.peekNonSpace().pos)
|
||||
for t.peekNonSpace().typ != itemEOF {
|
||||
n := t.textOrAction()
|
||||
switch n.Type() {
|
||||
case nodeEnd, nodeElse:
|
||||
return list, n
|
||||
}
|
||||
list.append(n)
|
||||
}
|
||||
t.errorf("unexpected EOF")
|
||||
return
|
||||
}
|
||||
|
||||
// textOrAction:
|
||||
// text | action
|
||||
func (t *Tree) textOrAction() Node {
|
||||
switch token := t.nextNonSpace(); token.typ {
|
||||
case itemElideNewline:
|
||||
return t.elideNewline()
|
||||
case itemText:
|
||||
return t.newText(token.pos, token.val)
|
||||
case itemLeftDelim:
|
||||
return t.action()
|
||||
default:
|
||||
t.unexpected(token, "input")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// elideNewline:
|
||||
// Remove newlines trailing rightDelim if \\ is present.
|
||||
func (t *Tree) elideNewline() Node {
|
||||
token := t.peek()
|
||||
if token.typ != itemText {
|
||||
t.unexpected(token, "input")
|
||||
return nil
|
||||
}
|
||||
|
||||
t.next()
|
||||
stripped := strings.TrimLeft(token.val, "\n\r")
|
||||
diff := len(token.val) - len(stripped)
|
||||
if diff > 0 {
|
||||
// This is a bit nasty. We mutate the token in-place to remove
|
||||
// preceding newlines.
|
||||
token.pos += Pos(diff)
|
||||
token.val = stripped
|
||||
}
|
||||
return t.newText(token.pos, token.val)
|
||||
}
|
||||
|
||||
// Action:
|
||||
// control
|
||||
// command ("|" command)*
|
||||
// Left delim is past. Now get actions.
|
||||
// First word could be a keyword such as range.
|
||||
func (t *Tree) action() (n Node) {
|
||||
switch token := t.nextNonSpace(); token.typ {
|
||||
case itemElse:
|
||||
return t.elseControl()
|
||||
case itemEnd:
|
||||
return t.endControl()
|
||||
case itemIf:
|
||||
return t.ifControl()
|
||||
case itemRange:
|
||||
return t.rangeControl()
|
||||
case itemTemplate:
|
||||
return t.templateControl()
|
||||
case itemWith:
|
||||
return t.withControl()
|
||||
}
|
||||
t.backup()
|
||||
// Do not pop variables; they persist until "end".
|
||||
return t.newAction(t.peek().pos, t.lex.lineNumber(), t.pipeline("command"))
|
||||
}
|
||||
|
||||
// Pipeline:
|
||||
// declarations? command ('|' command)*
|
||||
func (t *Tree) pipeline(context string) (pipe *PipeNode) {
|
||||
var decl []*VariableNode
|
||||
pos := t.peekNonSpace().pos
|
||||
// Are there declarations?
|
||||
for {
|
||||
if v := t.peekNonSpace(); v.typ == itemVariable {
|
||||
t.next()
|
||||
// Since space is a token, we need 3-token look-ahead here in the worst case:
|
||||
// in "$x foo" we need to read "foo" (as opposed to ":=") to know that $x is an
|
||||
// argument variable rather than a declaration. So remember the token
|
||||
// adjacent to the variable so we can push it back if necessary.
|
||||
tokenAfterVariable := t.peek()
|
||||
if next := t.peekNonSpace(); next.typ == itemColonEquals || (next.typ == itemChar && next.val == ",") {
|
||||
t.nextNonSpace()
|
||||
variable := t.newVariable(v.pos, v.val)
|
||||
decl = append(decl, variable)
|
||||
t.vars = append(t.vars, v.val)
|
||||
if next.typ == itemChar && next.val == "," {
|
||||
if context == "range" && len(decl) < 2 {
|
||||
continue
|
||||
}
|
||||
t.errorf("too many declarations in %s", context)
|
||||
}
|
||||
} else if tokenAfterVariable.typ == itemSpace {
|
||||
t.backup3(v, tokenAfterVariable)
|
||||
} else {
|
||||
t.backup2(v)
|
||||
}
|
||||
}
|
||||
break
|
||||
}
|
||||
pipe = t.newPipeline(pos, t.lex.lineNumber(), decl)
|
||||
for {
|
||||
switch token := t.nextNonSpace(); token.typ {
|
||||
case itemRightDelim, itemRightParen:
|
||||
if len(pipe.Cmds) == 0 {
|
||||
t.errorf("missing value for %s", context)
|
||||
}
|
||||
if token.typ == itemRightParen {
|
||||
t.backup()
|
||||
}
|
||||
return
|
||||
case itemBool, itemCharConstant, itemComplex, itemDot, itemField, itemIdentifier,
|
||||
itemNumber, itemNil, itemRawString, itemString, itemVariable, itemLeftParen:
|
||||
t.backup()
|
||||
pipe.append(t.command())
|
||||
default:
|
||||
t.unexpected(token, context)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (t *Tree) parseControl(allowElseIf bool, context string) (pos Pos, line int, pipe *PipeNode, list, elseList *ListNode) {
|
||||
defer t.popVars(len(t.vars))
|
||||
line = t.lex.lineNumber()
|
||||
pipe = t.pipeline(context)
|
||||
var next Node
|
||||
list, next = t.itemList()
|
||||
switch next.Type() {
|
||||
case nodeEnd: //done
|
||||
case nodeElse:
|
||||
if allowElseIf {
|
||||
// Special case for "else if". If the "else" is followed immediately by an "if",
|
||||
// the elseControl will have left the "if" token pending. Treat
|
||||
// {{if a}}_{{else if b}}_{{end}}
|
||||
// as
|
||||
// {{if a}}_{{else}}{{if b}}_{{end}}{{end}}.
|
||||
// To do this, parse the if as usual and stop at it {{end}}; the subsequent{{end}}
|
||||
// is assumed. This technique works even for long if-else-if chains.
|
||||
// TODO: Should we allow else-if in with and range?
|
||||
if t.peek().typ == itemIf {
|
||||
t.next() // Consume the "if" token.
|
||||
elseList = t.newList(next.Position())
|
||||
elseList.append(t.ifControl())
|
||||
// Do not consume the next item - only one {{end}} required.
|
||||
break
|
||||
}
|
||||
}
|
||||
elseList, next = t.itemList()
|
||||
if next.Type() != nodeEnd {
|
||||
t.errorf("expected end; found %s", next)
|
||||
}
|
||||
}
|
||||
return pipe.Position(), line, pipe, list, elseList
|
||||
}
|
||||
|
||||
// If:
|
||||
// {{if pipeline}} itemList {{end}}
|
||||
// {{if pipeline}} itemList {{else}} itemList {{end}}
|
||||
// If keyword is past.
|
||||
func (t *Tree) ifControl() Node {
|
||||
return t.newIf(t.parseControl(true, "if"))
|
||||
}
|
||||
|
||||
// Range:
|
||||
// {{range pipeline}} itemList {{end}}
|
||||
// {{range pipeline}} itemList {{else}} itemList {{end}}
|
||||
// Range keyword is past.
|
||||
func (t *Tree) rangeControl() Node {
|
||||
return t.newRange(t.parseControl(false, "range"))
|
||||
}
|
||||
|
||||
// With:
|
||||
// {{with pipeline}} itemList {{end}}
|
||||
// {{with pipeline}} itemList {{else}} itemList {{end}}
|
||||
// If keyword is past.
|
||||
func (t *Tree) withControl() Node {
|
||||
return t.newWith(t.parseControl(false, "with"))
|
||||
}
|
||||
|
||||
// End:
|
||||
// {{end}}
|
||||
// End keyword is past.
|
||||
func (t *Tree) endControl() Node {
|
||||
return t.newEnd(t.expect(itemRightDelim, "end").pos)
|
||||
}
|
||||
|
||||
// Else:
|
||||
// {{else}}
|
||||
// Else keyword is past.
|
||||
func (t *Tree) elseControl() Node {
|
||||
// Special case for "else if".
|
||||
peek := t.peekNonSpace()
|
||||
if peek.typ == itemIf {
|
||||
// We see "{{else if ... " but in effect rewrite it to {{else}}{{if ... ".
|
||||
return t.newElse(peek.pos, t.lex.lineNumber())
|
||||
}
|
||||
return t.newElse(t.expect(itemRightDelim, "else").pos, t.lex.lineNumber())
|
||||
}
|
||||
|
||||
// Template:
|
||||
// {{template stringValue pipeline}}
|
||||
// Template keyword is past. The name must be something that can evaluate
|
||||
// to a string.
|
||||
func (t *Tree) templateControl() Node {
|
||||
var name string
|
||||
token := t.nextNonSpace()
|
||||
switch token.typ {
|
||||
case itemString, itemRawString:
|
||||
s, err := strconv.Unquote(token.val)
|
||||
if err != nil {
|
||||
t.error(err)
|
||||
}
|
||||
name = s
|
||||
default:
|
||||
t.unexpected(token, "template invocation")
|
||||
}
|
||||
var pipe *PipeNode
|
||||
if t.nextNonSpace().typ != itemRightDelim {
|
||||
t.backup()
|
||||
// Do not pop variables; they persist until "end".
|
||||
pipe = t.pipeline("template")
|
||||
}
|
||||
return t.newTemplate(token.pos, t.lex.lineNumber(), name, pipe)
|
||||
}
|
||||
|
||||
// command:
|
||||
// operand (space operand)*
|
||||
// space-separated arguments up to a pipeline character or right delimiter.
|
||||
// we consume the pipe character but leave the right delim to terminate the action.
|
||||
func (t *Tree) command() *CommandNode {
|
||||
cmd := t.newCommand(t.peekNonSpace().pos)
|
||||
for {
|
||||
t.peekNonSpace() // skip leading spaces.
|
||||
operand := t.operand()
|
||||
if operand != nil {
|
||||
cmd.append(operand)
|
||||
}
|
||||
switch token := t.next(); token.typ {
|
||||
case itemSpace:
|
||||
continue
|
||||
case itemError:
|
||||
t.errorf("%s", token.val)
|
||||
case itemRightDelim, itemRightParen:
|
||||
t.backup()
|
||||
case itemPipe:
|
||||
default:
|
||||
t.errorf("unexpected %s in operand; missing space?", token)
|
||||
}
|
||||
break
|
||||
}
|
||||
if len(cmd.Args) == 0 {
|
||||
t.errorf("empty command")
|
||||
}
|
||||
return cmd
|
||||
}
|
||||
|
||||
// operand:
|
||||
// term .Field*
|
||||
// An operand is a space-separated component of a command,
|
||||
// a term possibly followed by field accesses.
|
||||
// A nil return means the next item is not an operand.
|
||||
func (t *Tree) operand() Node {
|
||||
node := t.term()
|
||||
if node == nil {
|
||||
return nil
|
||||
}
|
||||
if t.peek().typ == itemField {
|
||||
chain := t.newChain(t.peek().pos, node)
|
||||
for t.peek().typ == itemField {
|
||||
chain.Add(t.next().val)
|
||||
}
|
||||
// Compatibility with original API: If the term is of type NodeField
|
||||
// or NodeVariable, just put more fields on the original.
|
||||
// Otherwise, keep the Chain node.
|
||||
// TODO: Switch to Chains always when we can.
|
||||
switch node.Type() {
|
||||
case NodeField:
|
||||
node = t.newField(chain.Position(), chain.String())
|
||||
case NodeVariable:
|
||||
node = t.newVariable(chain.Position(), chain.String())
|
||||
default:
|
||||
node = chain
|
||||
}
|
||||
}
|
||||
return node
|
||||
}
|
||||
|
||||
// term:
|
||||
// literal (number, string, nil, boolean)
|
||||
// function (identifier)
|
||||
// .
|
||||
// .Field
|
||||
// $
|
||||
// '(' pipeline ')'
|
||||
// A term is a simple "expression".
|
||||
// A nil return means the next item is not a term.
|
||||
func (t *Tree) term() Node {
|
||||
switch token := t.nextNonSpace(); token.typ {
|
||||
case itemError:
|
||||
t.errorf("%s", token.val)
|
||||
case itemIdentifier:
|
||||
if !t.hasFunction(token.val) {
|
||||
t.errorf("function %q not defined", token.val)
|
||||
}
|
||||
return NewIdentifier(token.val).SetTree(t).SetPos(token.pos)
|
||||
case itemDot:
|
||||
return t.newDot(token.pos)
|
||||
case itemNil:
|
||||
return t.newNil(token.pos)
|
||||
case itemVariable:
|
||||
return t.useVar(token.pos, token.val)
|
||||
case itemField:
|
||||
return t.newField(token.pos, token.val)
|
||||
case itemBool:
|
||||
return t.newBool(token.pos, token.val == "true")
|
||||
case itemCharConstant, itemComplex, itemNumber:
|
||||
number, err := t.newNumber(token.pos, token.val, token.typ)
|
||||
if err != nil {
|
||||
t.error(err)
|
||||
}
|
||||
return number
|
||||
case itemLeftParen:
|
||||
pipe := t.pipeline("parenthesized pipeline")
|
||||
if token := t.next(); token.typ != itemRightParen {
|
||||
t.errorf("unclosed right paren: unexpected %s", token)
|
||||
}
|
||||
return pipe
|
||||
case itemString, itemRawString:
|
||||
s, err := strconv.Unquote(token.val)
|
||||
if err != nil {
|
||||
t.error(err)
|
||||
}
|
||||
return t.newString(token.pos, token.val, s)
|
||||
}
|
||||
t.backup()
|
||||
return nil
|
||||
}
|
||||
|
||||
// hasFunction reports if a function name exists in the Tree's maps.
|
||||
func (t *Tree) hasFunction(name string) bool {
|
||||
for _, funcMap := range t.funcs {
|
||||
if funcMap == nil {
|
||||
continue
|
||||
}
|
||||
if funcMap[name] != nil {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// popVars trims the variable list to the specified length
|
||||
func (t *Tree) popVars(n int) {
|
||||
t.vars = t.vars[:n]
|
||||
}
|
||||
|
||||
// useVar returns a node for a variable reference. It errors if the
|
||||
// variable is not defined.
|
||||
func (t *Tree) useVar(pos Pos, name string) Node {
|
||||
v := t.newVariable(pos, name)
|
||||
for _, varName := range t.vars {
|
||||
if varName == v.Ident[0] {
|
||||
return v
|
||||
}
|
||||
}
|
||||
t.errorf("undefined variable %q", v.Ident[0])
|
||||
return nil
|
||||
}
|
||||
218
vendor/github.com/alecthomas/template/template.go
generated
vendored
218
vendor/github.com/alecthomas/template/template.go
generated
vendored
@@ -1,218 +0,0 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package template
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
|
||||
"github.com/alecthomas/template/parse"
|
||||
)
|
||||
|
||||
// common holds the information shared by related templates.
|
||||
type common struct {
|
||||
tmpl map[string]*Template
|
||||
// We use two maps, one for parsing and one for execution.
|
||||
// This separation makes the API cleaner since it doesn't
|
||||
// expose reflection to the client.
|
||||
parseFuncs FuncMap
|
||||
execFuncs map[string]reflect.Value
|
||||
}
|
||||
|
||||
// Template is the representation of a parsed template. The *parse.Tree
|
||||
// field is exported only for use by html/template and should be treated
|
||||
// as unexported by all other clients.
|
||||
type Template struct {
|
||||
name string
|
||||
*parse.Tree
|
||||
*common
|
||||
leftDelim string
|
||||
rightDelim string
|
||||
}
|
||||
|
||||
// New allocates a new template with the given name.
|
||||
func New(name string) *Template {
|
||||
return &Template{
|
||||
name: name,
|
||||
}
|
||||
}
|
||||
|
||||
// Name returns the name of the template.
|
||||
func (t *Template) Name() string {
|
||||
return t.name
|
||||
}
|
||||
|
||||
// New allocates a new template associated with the given one and with the same
|
||||
// delimiters. The association, which is transitive, allows one template to
|
||||
// invoke another with a {{template}} action.
|
||||
func (t *Template) New(name string) *Template {
|
||||
t.init()
|
||||
return &Template{
|
||||
name: name,
|
||||
common: t.common,
|
||||
leftDelim: t.leftDelim,
|
||||
rightDelim: t.rightDelim,
|
||||
}
|
||||
}
|
||||
|
||||
func (t *Template) init() {
|
||||
if t.common == nil {
|
||||
t.common = new(common)
|
||||
t.tmpl = make(map[string]*Template)
|
||||
t.parseFuncs = make(FuncMap)
|
||||
t.execFuncs = make(map[string]reflect.Value)
|
||||
}
|
||||
}
|
||||
|
||||
// Clone returns a duplicate of the template, including all associated
|
||||
// templates. The actual representation is not copied, but the name space of
|
||||
// associated templates is, so further calls to Parse in the copy will add
|
||||
// templates to the copy but not to the original. Clone can be used to prepare
|
||||
// common templates and use them with variant definitions for other templates
|
||||
// by adding the variants after the clone is made.
|
||||
func (t *Template) Clone() (*Template, error) {
|
||||
nt := t.copy(nil)
|
||||
nt.init()
|
||||
nt.tmpl[t.name] = nt
|
||||
for k, v := range t.tmpl {
|
||||
if k == t.name { // Already installed.
|
||||
continue
|
||||
}
|
||||
// The associated templates share nt's common structure.
|
||||
tmpl := v.copy(nt.common)
|
||||
nt.tmpl[k] = tmpl
|
||||
}
|
||||
for k, v := range t.parseFuncs {
|
||||
nt.parseFuncs[k] = v
|
||||
}
|
||||
for k, v := range t.execFuncs {
|
||||
nt.execFuncs[k] = v
|
||||
}
|
||||
return nt, nil
|
||||
}
|
||||
|
||||
// copy returns a shallow copy of t, with common set to the argument.
|
||||
func (t *Template) copy(c *common) *Template {
|
||||
nt := New(t.name)
|
||||
nt.Tree = t.Tree
|
||||
nt.common = c
|
||||
nt.leftDelim = t.leftDelim
|
||||
nt.rightDelim = t.rightDelim
|
||||
return nt
|
||||
}
|
||||
|
||||
// AddParseTree creates a new template with the name and parse tree
|
||||
// and associates it with t.
|
||||
func (t *Template) AddParseTree(name string, tree *parse.Tree) (*Template, error) {
|
||||
if t.common != nil && t.tmpl[name] != nil {
|
||||
return nil, fmt.Errorf("template: redefinition of template %q", name)
|
||||
}
|
||||
nt := t.New(name)
|
||||
nt.Tree = tree
|
||||
t.tmpl[name] = nt
|
||||
return nt, nil
|
||||
}
|
||||
|
||||
// Templates returns a slice of the templates associated with t, including t
|
||||
// itself.
|
||||
func (t *Template) Templates() []*Template {
|
||||
if t.common == nil {
|
||||
return nil
|
||||
}
|
||||
// Return a slice so we don't expose the map.
|
||||
m := make([]*Template, 0, len(t.tmpl))
|
||||
for _, v := range t.tmpl {
|
||||
m = append(m, v)
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
// Delims sets the action delimiters to the specified strings, to be used in
|
||||
// subsequent calls to Parse, ParseFiles, or ParseGlob. Nested template
|
||||
// definitions will inherit the settings. An empty delimiter stands for the
|
||||
// corresponding default: {{ or }}.
|
||||
// The return value is the template, so calls can be chained.
|
||||
func (t *Template) Delims(left, right string) *Template {
|
||||
t.leftDelim = left
|
||||
t.rightDelim = right
|
||||
return t
|
||||
}
|
||||
|
||||
// Funcs adds the elements of the argument map to the template's function map.
|
||||
// It panics if a value in the map is not a function with appropriate return
|
||||
// type. However, it is legal to overwrite elements of the map. The return
|
||||
// value is the template, so calls can be chained.
|
||||
func (t *Template) Funcs(funcMap FuncMap) *Template {
|
||||
t.init()
|
||||
addValueFuncs(t.execFuncs, funcMap)
|
||||
addFuncs(t.parseFuncs, funcMap)
|
||||
return t
|
||||
}
|
||||
|
||||
// Lookup returns the template with the given name that is associated with t,
|
||||
// or nil if there is no such template.
|
||||
func (t *Template) Lookup(name string) *Template {
|
||||
if t.common == nil {
|
||||
return nil
|
||||
}
|
||||
return t.tmpl[name]
|
||||
}
|
||||
|
||||
// Parse parses a string into a template. Nested template definitions will be
|
||||
// associated with the top-level template t. Parse may be called multiple times
|
||||
// to parse definitions of templates to associate with t. It is an error if a
|
||||
// resulting template is non-empty (contains content other than template
|
||||
// definitions) and would replace a non-empty template with the same name.
|
||||
// (In multiple calls to Parse with the same receiver template, only one call
|
||||
// can contain text other than space, comments, and template definitions.)
|
||||
func (t *Template) Parse(text string) (*Template, error) {
|
||||
t.init()
|
||||
trees, err := parse.Parse(t.name, text, t.leftDelim, t.rightDelim, t.parseFuncs, builtins)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Add the newly parsed trees, including the one for t, into our common structure.
|
||||
for name, tree := range trees {
|
||||
// If the name we parsed is the name of this template, overwrite this template.
|
||||
// The associate method checks it's not a redefinition.
|
||||
tmpl := t
|
||||
if name != t.name {
|
||||
tmpl = t.New(name)
|
||||
}
|
||||
// Even if t == tmpl, we need to install it in the common.tmpl map.
|
||||
if replace, err := t.associate(tmpl, tree); err != nil {
|
||||
return nil, err
|
||||
} else if replace {
|
||||
tmpl.Tree = tree
|
||||
}
|
||||
tmpl.leftDelim = t.leftDelim
|
||||
tmpl.rightDelim = t.rightDelim
|
||||
}
|
||||
return t, nil
|
||||
}
|
||||
|
||||
// associate installs the new template into the group of templates associated
|
||||
// with t. It is an error to reuse a name except to overwrite an empty
|
||||
// template. The two are already known to share the common structure.
|
||||
// The boolean return value reports wither to store this tree as t.Tree.
|
||||
func (t *Template) associate(new *Template, tree *parse.Tree) (bool, error) {
|
||||
if new.common != t.common {
|
||||
panic("internal error: associate not common")
|
||||
}
|
||||
name := new.name
|
||||
if old := t.tmpl[name]; old != nil {
|
||||
oldIsEmpty := parse.IsEmptyTree(old.Root)
|
||||
newIsEmpty := parse.IsEmptyTree(tree.Root)
|
||||
if newIsEmpty {
|
||||
// Whether old is empty or not, new is empty; no reason to replace old.
|
||||
return false, nil
|
||||
}
|
||||
if !oldIsEmpty {
|
||||
return false, fmt.Errorf("template: redefinition of template %q", name)
|
||||
}
|
||||
}
|
||||
t.tmpl[name] = new
|
||||
return true, nil
|
||||
}
|
||||
19
vendor/github.com/alecthomas/units/COPYING
generated
vendored
19
vendor/github.com/alecthomas/units/COPYING
generated
vendored
@@ -1,19 +0,0 @@
|
||||
Copyright (C) 2014 Alec Thomas
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||
this software and associated documentation files (the "Software"), to deal in
|
||||
the Software without restriction, including without limitation the rights to
|
||||
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
|
||||
of the Software, and to permit persons to whom the Software is furnished to do
|
||||
so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
11
vendor/github.com/alecthomas/units/README.md
generated
vendored
11
vendor/github.com/alecthomas/units/README.md
generated
vendored
@@ -1,11 +0,0 @@
|
||||
# Units - Helpful unit multipliers and functions for Go
|
||||
|
||||
The goal of this package is to have functionality similar to the [time](http://golang.org/pkg/time/) package.
|
||||
|
||||
It allows for code like this:
|
||||
|
||||
```go
|
||||
n, err := ParseBase2Bytes("1KB")
|
||||
// n == 1024
|
||||
n = units.Mebibyte * 512
|
||||
```
|
||||
83
vendor/github.com/alecthomas/units/bytes.go
generated
vendored
83
vendor/github.com/alecthomas/units/bytes.go
generated
vendored
@@ -1,83 +0,0 @@
|
||||
package units
|
||||
|
||||
// Base2Bytes is the old non-SI power-of-2 byte scale (1024 bytes in a kilobyte,
|
||||
// etc.).
|
||||
type Base2Bytes int64
|
||||
|
||||
// Base-2 byte units.
|
||||
const (
|
||||
Kibibyte Base2Bytes = 1024
|
||||
KiB = Kibibyte
|
||||
Mebibyte = Kibibyte * 1024
|
||||
MiB = Mebibyte
|
||||
Gibibyte = Mebibyte * 1024
|
||||
GiB = Gibibyte
|
||||
Tebibyte = Gibibyte * 1024
|
||||
TiB = Tebibyte
|
||||
Pebibyte = Tebibyte * 1024
|
||||
PiB = Pebibyte
|
||||
Exbibyte = Pebibyte * 1024
|
||||
EiB = Exbibyte
|
||||
)
|
||||
|
||||
var (
|
||||
bytesUnitMap = MakeUnitMap("iB", "B", 1024)
|
||||
oldBytesUnitMap = MakeUnitMap("B", "B", 1024)
|
||||
)
|
||||
|
||||
// ParseBase2Bytes supports both iB and B in base-2 multipliers. That is, KB
|
||||
// and KiB are both 1024.
|
||||
func ParseBase2Bytes(s string) (Base2Bytes, error) {
|
||||
n, err := ParseUnit(s, bytesUnitMap)
|
||||
if err != nil {
|
||||
n, err = ParseUnit(s, oldBytesUnitMap)
|
||||
}
|
||||
return Base2Bytes(n), err
|
||||
}
|
||||
|
||||
func (b Base2Bytes) String() string {
|
||||
return ToString(int64(b), 1024, "iB", "B")
|
||||
}
|
||||
|
||||
var (
|
||||
metricBytesUnitMap = MakeUnitMap("B", "B", 1000)
|
||||
)
|
||||
|
||||
// MetricBytes are SI byte units (1000 bytes in a kilobyte).
|
||||
type MetricBytes SI
|
||||
|
||||
// SI base-10 byte units.
|
||||
const (
|
||||
Kilobyte MetricBytes = 1000
|
||||
KB = Kilobyte
|
||||
Megabyte = Kilobyte * 1000
|
||||
MB = Megabyte
|
||||
Gigabyte = Megabyte * 1000
|
||||
GB = Gigabyte
|
||||
Terabyte = Gigabyte * 1000
|
||||
TB = Terabyte
|
||||
Petabyte = Terabyte * 1000
|
||||
PB = Petabyte
|
||||
Exabyte = Petabyte * 1000
|
||||
EB = Exabyte
|
||||
)
|
||||
|
||||
// ParseMetricBytes parses base-10 metric byte units. That is, KB is 1000 bytes.
|
||||
func ParseMetricBytes(s string) (MetricBytes, error) {
|
||||
n, err := ParseUnit(s, metricBytesUnitMap)
|
||||
return MetricBytes(n), err
|
||||
}
|
||||
|
||||
func (m MetricBytes) String() string {
|
||||
return ToString(int64(m), 1000, "B", "B")
|
||||
}
|
||||
|
||||
// ParseStrictBytes supports both iB and B suffixes for base 2 and metric,
|
||||
// respectively. That is, KiB represents 1024 and KB represents 1000.
|
||||
func ParseStrictBytes(s string) (int64, error) {
|
||||
n, err := ParseUnit(s, bytesUnitMap)
|
||||
if err != nil {
|
||||
n, err = ParseUnit(s, metricBytesUnitMap)
|
||||
}
|
||||
return int64(n), err
|
||||
}
|
||||
13
vendor/github.com/alecthomas/units/doc.go
generated
vendored
13
vendor/github.com/alecthomas/units/doc.go
generated
vendored
@@ -1,13 +0,0 @@
|
||||
// Package units provides helpful unit multipliers and functions for Go.
|
||||
//
|
||||
// The goal of this package is to have functionality similar to the time [1] package.
|
||||
//
|
||||
//
|
||||
// [1] http://golang.org/pkg/time/
|
||||
//
|
||||
// It allows for code like this:
|
||||
//
|
||||
// n, err := ParseBase2Bytes("1KB")
|
||||
// // n == 1024
|
||||
// n = units.Mebibyte * 512
|
||||
package units
|
||||
26
vendor/github.com/alecthomas/units/si.go
generated
vendored
26
vendor/github.com/alecthomas/units/si.go
generated
vendored
@@ -1,26 +0,0 @@
|
||||
package units
|
||||
|
||||
// SI units.
|
||||
type SI int64
|
||||
|
||||
// SI unit multiples.
|
||||
const (
|
||||
Kilo SI = 1000
|
||||
Mega = Kilo * 1000
|
||||
Giga = Mega * 1000
|
||||
Tera = Giga * 1000
|
||||
Peta = Tera * 1000
|
||||
Exa = Peta * 1000
|
||||
)
|
||||
|
||||
func MakeUnitMap(suffix, shortSuffix string, scale int64) map[string]float64 {
|
||||
return map[string]float64{
|
||||
shortSuffix: 1,
|
||||
"K" + suffix: float64(scale),
|
||||
"M" + suffix: float64(scale * scale),
|
||||
"G" + suffix: float64(scale * scale * scale),
|
||||
"T" + suffix: float64(scale * scale * scale * scale),
|
||||
"P" + suffix: float64(scale * scale * scale * scale * scale),
|
||||
"E" + suffix: float64(scale * scale * scale * scale * scale * scale),
|
||||
}
|
||||
}
|
||||
138
vendor/github.com/alecthomas/units/util.go
generated
vendored
138
vendor/github.com/alecthomas/units/util.go
generated
vendored
@@ -1,138 +0,0 @@
|
||||
package units
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var (
|
||||
siUnits = []string{"", "K", "M", "G", "T", "P", "E"}
|
||||
)
|
||||
|
||||
func ToString(n int64, scale int64, suffix, baseSuffix string) string {
|
||||
mn := len(siUnits)
|
||||
out := make([]string, mn)
|
||||
for i, m := range siUnits {
|
||||
if n%scale != 0 || i == 0 && n == 0 {
|
||||
s := suffix
|
||||
if i == 0 {
|
||||
s = baseSuffix
|
||||
}
|
||||
out[mn-1-i] = fmt.Sprintf("%d%s%s", n%scale, m, s)
|
||||
}
|
||||
n /= scale
|
||||
if n == 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
return strings.Join(out, "")
|
||||
}
|
||||
|
||||
// Below code ripped straight from http://golang.org/src/pkg/time/format.go?s=33392:33438#L1123
|
||||
var errLeadingInt = errors.New("units: bad [0-9]*") // never printed
|
||||
|
||||
// leadingInt consumes the leading [0-9]* from s.
|
||||
func leadingInt(s string) (x int64, rem string, err error) {
|
||||
i := 0
|
||||
for ; i < len(s); i++ {
|
||||
c := s[i]
|
||||
if c < '0' || c > '9' {
|
||||
break
|
||||
}
|
||||
if x >= (1<<63-10)/10 {
|
||||
// overflow
|
||||
return 0, "", errLeadingInt
|
||||
}
|
||||
x = x*10 + int64(c) - '0'
|
||||
}
|
||||
return x, s[i:], nil
|
||||
}
|
||||
|
||||
func ParseUnit(s string, unitMap map[string]float64) (int64, error) {
|
||||
// [-+]?([0-9]*(\.[0-9]*)?[a-z]+)+
|
||||
orig := s
|
||||
f := float64(0)
|
||||
neg := false
|
||||
|
||||
// Consume [-+]?
|
||||
if s != "" {
|
||||
c := s[0]
|
||||
if c == '-' || c == '+' {
|
||||
neg = c == '-'
|
||||
s = s[1:]
|
||||
}
|
||||
}
|
||||
// Special case: if all that is left is "0", this is zero.
|
||||
if s == "0" {
|
||||
return 0, nil
|
||||
}
|
||||
if s == "" {
|
||||
return 0, errors.New("units: invalid " + orig)
|
||||
}
|
||||
for s != "" {
|
||||
g := float64(0) // this element of the sequence
|
||||
|
||||
var x int64
|
||||
var err error
|
||||
|
||||
// The next character must be [0-9.]
|
||||
if !(s[0] == '.' || ('0' <= s[0] && s[0] <= '9')) {
|
||||
return 0, errors.New("units: invalid " + orig)
|
||||
}
|
||||
// Consume [0-9]*
|
||||
pl := len(s)
|
||||
x, s, err = leadingInt(s)
|
||||
if err != nil {
|
||||
return 0, errors.New("units: invalid " + orig)
|
||||
}
|
||||
g = float64(x)
|
||||
pre := pl != len(s) // whether we consumed anything before a period
|
||||
|
||||
// Consume (\.[0-9]*)?
|
||||
post := false
|
||||
if s != "" && s[0] == '.' {
|
||||
s = s[1:]
|
||||
pl := len(s)
|
||||
x, s, err = leadingInt(s)
|
||||
if err != nil {
|
||||
return 0, errors.New("units: invalid " + orig)
|
||||
}
|
||||
scale := 1.0
|
||||
for n := pl - len(s); n > 0; n-- {
|
||||
scale *= 10
|
||||
}
|
||||
g += float64(x) / scale
|
||||
post = pl != len(s)
|
||||
}
|
||||
if !pre && !post {
|
||||
// no digits (e.g. ".s" or "-.s")
|
||||
return 0, errors.New("units: invalid " + orig)
|
||||
}
|
||||
|
||||
// Consume unit.
|
||||
i := 0
|
||||
for ; i < len(s); i++ {
|
||||
c := s[i]
|
||||
if c == '.' || ('0' <= c && c <= '9') {
|
||||
break
|
||||
}
|
||||
}
|
||||
u := s[:i]
|
||||
s = s[i:]
|
||||
unit, ok := unitMap[u]
|
||||
if !ok {
|
||||
return 0, errors.New("units: unknown unit " + u + " in " + orig)
|
||||
}
|
||||
|
||||
f += g * unit
|
||||
}
|
||||
|
||||
if neg {
|
||||
f = -f
|
||||
}
|
||||
if f < float64(-1<<63) || f > float64(1<<63-1) {
|
||||
return 0, errors.New("units: overflow parsing unit")
|
||||
}
|
||||
return int64(f), nil
|
||||
}
|
||||
27
vendor/github.com/klauspost/compress/LICENSE
generated
vendored
27
vendor/github.com/klauspost/compress/LICENSE
generated
vendored
@@ -1,27 +0,0 @@
|
||||
Copyright (c) 2012 The Go Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
32
vendor/github.com/klauspost/compress/flate/copy.go
generated
vendored
32
vendor/github.com/klauspost/compress/flate/copy.go
generated
vendored
@@ -1,32 +0,0 @@
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package flate
|
||||
|
||||
// forwardCopy is like the built-in copy function except that it always goes
|
||||
// forward from the start, even if the dst and src overlap.
|
||||
// It is equivalent to:
|
||||
// for i := 0; i < n; i++ {
|
||||
// mem[dst+i] = mem[src+i]
|
||||
// }
|
||||
func forwardCopy(mem []byte, dst, src, n int) {
|
||||
if dst <= src {
|
||||
copy(mem[dst:dst+n], mem[src:src+n])
|
||||
return
|
||||
}
|
||||
for {
|
||||
if dst >= src+n {
|
||||
copy(mem[dst:dst+n], mem[src:src+n])
|
||||
return
|
||||
}
|
||||
// There is some forward overlap. The destination
|
||||
// will be filled with a repeated pattern of mem[src:src+k].
|
||||
// We copy one instance of the pattern here, then repeat.
|
||||
// Each time around this loop k will double.
|
||||
k := dst - src
|
||||
copy(mem[dst:dst+k], mem[src:src+k])
|
||||
n -= k
|
||||
dst += k
|
||||
}
|
||||
}
|
||||
41
vendor/github.com/klauspost/compress/flate/crc32_amd64.go
generated
vendored
41
vendor/github.com/klauspost/compress/flate/crc32_amd64.go
generated
vendored
@@ -1,41 +0,0 @@
|
||||
//+build !noasm
|
||||
//+build !appengine
|
||||
|
||||
// Copyright 2015, Klaus Post, see LICENSE for details.
|
||||
|
||||
package flate
|
||||
|
||||
import (
|
||||
"github.com/klauspost/cpuid"
|
||||
)
|
||||
|
||||
// crc32sse returns a hash for the first 4 bytes of the slice
|
||||
// len(a) must be >= 4.
|
||||
//go:noescape
|
||||
func crc32sse(a []byte) uint32
|
||||
|
||||
// crc32sseAll calculates hashes for each 4-byte set in a.
|
||||
// dst must be east len(a) - 4 in size.
|
||||
// The size is not checked by the assembly.
|
||||
//go:noescape
|
||||
func crc32sseAll(a []byte, dst []uint32)
|
||||
|
||||
// matchLenSSE4 returns the number of matching bytes in a and b
|
||||
// up to length 'max'. Both slices must be at least 'max'
|
||||
// bytes in size.
|
||||
//
|
||||
// TODO: drop the "SSE4" name, since it doesn't use any SSE instructions.
|
||||
//
|
||||
//go:noescape
|
||||
func matchLenSSE4(a, b []byte, max int) int
|
||||
|
||||
// histogram accumulates a histogram of b in h.
|
||||
// h must be at least 256 entries in length,
|
||||
// and must be cleared before calling this function.
|
||||
//go:noescape
|
||||
func histogram(b []byte, h []int32)
|
||||
|
||||
// Detect SSE 4.2 feature.
|
||||
func init() {
|
||||
useSSE42 = cpuid.CPU.SSE42()
|
||||
}
|
||||
213
vendor/github.com/klauspost/compress/flate/crc32_amd64.s
generated
vendored
213
vendor/github.com/klauspost/compress/flate/crc32_amd64.s
generated
vendored
@@ -1,213 +0,0 @@
|
||||
//+build !noasm
|
||||
//+build !appengine
|
||||
|
||||
// Copyright 2015, Klaus Post, see LICENSE for details.
|
||||
|
||||
// func crc32sse(a []byte) uint32
|
||||
TEXT ·crc32sse(SB), 4, $0
|
||||
MOVQ a+0(FP), R10
|
||||
XORQ BX, BX
|
||||
|
||||
// CRC32 dword (R10), EBX
|
||||
BYTE $0xF2; BYTE $0x41; BYTE $0x0f
|
||||
BYTE $0x38; BYTE $0xf1; BYTE $0x1a
|
||||
|
||||
MOVL BX, ret+24(FP)
|
||||
RET
|
||||
|
||||
// func crc32sseAll(a []byte, dst []uint32)
|
||||
TEXT ·crc32sseAll(SB), 4, $0
|
||||
MOVQ a+0(FP), R8 // R8: src
|
||||
MOVQ a_len+8(FP), R10 // input length
|
||||
MOVQ dst+24(FP), R9 // R9: dst
|
||||
SUBQ $4, R10
|
||||
JS end
|
||||
JZ one_crc
|
||||
MOVQ R10, R13
|
||||
SHRQ $2, R10 // len/4
|
||||
ANDQ $3, R13 // len&3
|
||||
XORQ BX, BX
|
||||
ADDQ $1, R13
|
||||
TESTQ R10, R10
|
||||
JZ rem_loop
|
||||
|
||||
crc_loop:
|
||||
MOVQ (R8), R11
|
||||
XORQ BX, BX
|
||||
XORQ DX, DX
|
||||
XORQ DI, DI
|
||||
MOVQ R11, R12
|
||||
SHRQ $8, R11
|
||||
MOVQ R12, AX
|
||||
MOVQ R11, CX
|
||||
SHRQ $16, R12
|
||||
SHRQ $16, R11
|
||||
MOVQ R12, SI
|
||||
|
||||
// CRC32 EAX, EBX
|
||||
BYTE $0xF2; BYTE $0x0f
|
||||
BYTE $0x38; BYTE $0xf1; BYTE $0xd8
|
||||
|
||||
// CRC32 ECX, EDX
|
||||
BYTE $0xF2; BYTE $0x0f
|
||||
BYTE $0x38; BYTE $0xf1; BYTE $0xd1
|
||||
|
||||
// CRC32 ESI, EDI
|
||||
BYTE $0xF2; BYTE $0x0f
|
||||
BYTE $0x38; BYTE $0xf1; BYTE $0xfe
|
||||
MOVL BX, (R9)
|
||||
MOVL DX, 4(R9)
|
||||
MOVL DI, 8(R9)
|
||||
|
||||
XORQ BX, BX
|
||||
MOVL R11, AX
|
||||
|
||||
// CRC32 EAX, EBX
|
||||
BYTE $0xF2; BYTE $0x0f
|
||||
BYTE $0x38; BYTE $0xf1; BYTE $0xd8
|
||||
MOVL BX, 12(R9)
|
||||
|
||||
ADDQ $16, R9
|
||||
ADDQ $4, R8
|
||||
XORQ BX, BX
|
||||
SUBQ $1, R10
|
||||
JNZ crc_loop
|
||||
|
||||
rem_loop:
|
||||
MOVL (R8), AX
|
||||
|
||||
// CRC32 EAX, EBX
|
||||
BYTE $0xF2; BYTE $0x0f
|
||||
BYTE $0x38; BYTE $0xf1; BYTE $0xd8
|
||||
|
||||
MOVL BX, (R9)
|
||||
ADDQ $4, R9
|
||||
ADDQ $1, R8
|
||||
XORQ BX, BX
|
||||
SUBQ $1, R13
|
||||
JNZ rem_loop
|
||||
|
||||
end:
|
||||
RET
|
||||
|
||||
one_crc:
|
||||
MOVQ $1, R13
|
||||
XORQ BX, BX
|
||||
JMP rem_loop
|
||||
|
||||
// func matchLenSSE4(a, b []byte, max int) int
|
||||
TEXT ·matchLenSSE4(SB), 4, $0
|
||||
MOVQ a_base+0(FP), SI
|
||||
MOVQ b_base+24(FP), DI
|
||||
MOVQ DI, DX
|
||||
MOVQ max+48(FP), CX
|
||||
|
||||
cmp8:
|
||||
// As long as we are 8 or more bytes before the end of max, we can load and
|
||||
// compare 8 bytes at a time. If those 8 bytes are equal, repeat.
|
||||
CMPQ CX, $8
|
||||
JLT cmp1
|
||||
MOVQ (SI), AX
|
||||
MOVQ (DI), BX
|
||||
CMPQ AX, BX
|
||||
JNE bsf
|
||||
ADDQ $8, SI
|
||||
ADDQ $8, DI
|
||||
SUBQ $8, CX
|
||||
JMP cmp8
|
||||
|
||||
bsf:
|
||||
// If those 8 bytes were not equal, XOR the two 8 byte values, and return
|
||||
// the index of the first byte that differs. The BSF instruction finds the
|
||||
// least significant 1 bit, the amd64 architecture is little-endian, and
|
||||
// the shift by 3 converts a bit index to a byte index.
|
||||
XORQ AX, BX
|
||||
BSFQ BX, BX
|
||||
SHRQ $3, BX
|
||||
ADDQ BX, DI
|
||||
|
||||
// Subtract off &b[0] to convert from &b[ret] to ret, and return.
|
||||
SUBQ DX, DI
|
||||
MOVQ DI, ret+56(FP)
|
||||
RET
|
||||
|
||||
cmp1:
|
||||
// In the slices' tail, compare 1 byte at a time.
|
||||
CMPQ CX, $0
|
||||
JEQ matchLenEnd
|
||||
MOVB (SI), AX
|
||||
MOVB (DI), BX
|
||||
CMPB AX, BX
|
||||
JNE matchLenEnd
|
||||
ADDQ $1, SI
|
||||
ADDQ $1, DI
|
||||
SUBQ $1, CX
|
||||
JMP cmp1
|
||||
|
||||
matchLenEnd:
|
||||
// Subtract off &b[0] to convert from &b[ret] to ret, and return.
|
||||
SUBQ DX, DI
|
||||
MOVQ DI, ret+56(FP)
|
||||
RET
|
||||
|
||||
// func histogram(b []byte, h []int32)
|
||||
TEXT ·histogram(SB), 4, $0
|
||||
MOVQ b+0(FP), SI // SI: &b
|
||||
MOVQ b_len+8(FP), R9 // R9: len(b)
|
||||
MOVQ h+24(FP), DI // DI: Histogram
|
||||
MOVQ R9, R8
|
||||
SHRQ $3, R8
|
||||
JZ hist1
|
||||
XORQ R11, R11
|
||||
|
||||
loop_hist8:
|
||||
MOVQ (SI), R10
|
||||
|
||||
MOVB R10, R11
|
||||
INCL (DI)(R11*4)
|
||||
SHRQ $8, R10
|
||||
|
||||
MOVB R10, R11
|
||||
INCL (DI)(R11*4)
|
||||
SHRQ $8, R10
|
||||
|
||||
MOVB R10, R11
|
||||
INCL (DI)(R11*4)
|
||||
SHRQ $8, R10
|
||||
|
||||
MOVB R10, R11
|
||||
INCL (DI)(R11*4)
|
||||
SHRQ $8, R10
|
||||
|
||||
MOVB R10, R11
|
||||
INCL (DI)(R11*4)
|
||||
SHRQ $8, R10
|
||||
|
||||
MOVB R10, R11
|
||||
INCL (DI)(R11*4)
|
||||
SHRQ $8, R10
|
||||
|
||||
MOVB R10, R11
|
||||
INCL (DI)(R11*4)
|
||||
SHRQ $8, R10
|
||||
|
||||
INCL (DI)(R10*4)
|
||||
|
||||
ADDQ $8, SI
|
||||
DECQ R8
|
||||
JNZ loop_hist8
|
||||
|
||||
hist1:
|
||||
ANDQ $7, R9
|
||||
JZ end_hist
|
||||
XORQ R10, R10
|
||||
|
||||
loop_hist1:
|
||||
MOVB (SI), R10
|
||||
INCL (DI)(R10*4)
|
||||
INCQ SI
|
||||
DECQ R9
|
||||
JNZ loop_hist1
|
||||
|
||||
end_hist:
|
||||
RET
|
||||
35
vendor/github.com/klauspost/compress/flate/crc32_noasm.go
generated
vendored
35
vendor/github.com/klauspost/compress/flate/crc32_noasm.go
generated
vendored
@@ -1,35 +0,0 @@
|
||||
//+build !amd64 noasm appengine
|
||||
|
||||
// Copyright 2015, Klaus Post, see LICENSE for details.
|
||||
|
||||
package flate
|
||||
|
||||
func init() {
|
||||
useSSE42 = false
|
||||
}
|
||||
|
||||
// crc32sse should never be called.
|
||||
func crc32sse(a []byte) uint32 {
|
||||
panic("no assembler")
|
||||
}
|
||||
|
||||
// crc32sseAll should never be called.
|
||||
func crc32sseAll(a []byte, dst []uint32) {
|
||||
panic("no assembler")
|
||||
}
|
||||
|
||||
// matchLenSSE4 should never be called.
|
||||
func matchLenSSE4(a, b []byte, max int) int {
|
||||
panic("no assembler")
|
||||
return 0
|
||||
}
|
||||
|
||||
// histogram accumulates a histogram of b in h.
|
||||
//
|
||||
// len(h) must be >= 256, and h's elements must be all zeroes.
|
||||
func histogram(b []byte, h []int32) {
|
||||
h = h[:256]
|
||||
for _, t := range b {
|
||||
h[t]++
|
||||
}
|
||||
}
|
||||
1353
vendor/github.com/klauspost/compress/flate/deflate.go
generated
vendored
1353
vendor/github.com/klauspost/compress/flate/deflate.go
generated
vendored
File diff suppressed because it is too large
Load Diff
184
vendor/github.com/klauspost/compress/flate/dict_decoder.go
generated
vendored
184
vendor/github.com/klauspost/compress/flate/dict_decoder.go
generated
vendored
@@ -1,184 +0,0 @@
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package flate
|
||||
|
||||
// dictDecoder implements the LZ77 sliding dictionary as used in decompression.
|
||||
// LZ77 decompresses data through sequences of two forms of commands:
|
||||
//
|
||||
// * Literal insertions: Runs of one or more symbols are inserted into the data
|
||||
// stream as is. This is accomplished through the writeByte method for a
|
||||
// single symbol, or combinations of writeSlice/writeMark for multiple symbols.
|
||||
// Any valid stream must start with a literal insertion if no preset dictionary
|
||||
// is used.
|
||||
//
|
||||
// * Backward copies: Runs of one or more symbols are copied from previously
|
||||
// emitted data. Backward copies come as the tuple (dist, length) where dist
|
||||
// determines how far back in the stream to copy from and length determines how
|
||||
// many bytes to copy. Note that it is valid for the length to be greater than
|
||||
// the distance. Since LZ77 uses forward copies, that situation is used to
|
||||
// perform a form of run-length encoding on repeated runs of symbols.
|
||||
// The writeCopy and tryWriteCopy are used to implement this command.
|
||||
//
|
||||
// For performance reasons, this implementation performs little to no sanity
|
||||
// checks about the arguments. As such, the invariants documented for each
|
||||
// method call must be respected.
|
||||
type dictDecoder struct {
|
||||
hist []byte // Sliding window history
|
||||
|
||||
// Invariant: 0 <= rdPos <= wrPos <= len(hist)
|
||||
wrPos int // Current output position in buffer
|
||||
rdPos int // Have emitted hist[:rdPos] already
|
||||
full bool // Has a full window length been written yet?
|
||||
}
|
||||
|
||||
// init initializes dictDecoder to have a sliding window dictionary of the given
|
||||
// size. If a preset dict is provided, it will initialize the dictionary with
|
||||
// the contents of dict.
|
||||
func (dd *dictDecoder) init(size int, dict []byte) {
|
||||
*dd = dictDecoder{hist: dd.hist}
|
||||
|
||||
if cap(dd.hist) < size {
|
||||
dd.hist = make([]byte, size)
|
||||
}
|
||||
dd.hist = dd.hist[:size]
|
||||
|
||||
if len(dict) > len(dd.hist) {
|
||||
dict = dict[len(dict)-len(dd.hist):]
|
||||
}
|
||||
dd.wrPos = copy(dd.hist, dict)
|
||||
if dd.wrPos == len(dd.hist) {
|
||||
dd.wrPos = 0
|
||||
dd.full = true
|
||||
}
|
||||
dd.rdPos = dd.wrPos
|
||||
}
|
||||
|
||||
// histSize reports the total amount of historical data in the dictionary.
|
||||
func (dd *dictDecoder) histSize() int {
|
||||
if dd.full {
|
||||
return len(dd.hist)
|
||||
}
|
||||
return dd.wrPos
|
||||
}
|
||||
|
||||
// availRead reports the number of bytes that can be flushed by readFlush.
|
||||
func (dd *dictDecoder) availRead() int {
|
||||
return dd.wrPos - dd.rdPos
|
||||
}
|
||||
|
||||
// availWrite reports the available amount of output buffer space.
|
||||
func (dd *dictDecoder) availWrite() int {
|
||||
return len(dd.hist) - dd.wrPos
|
||||
}
|
||||
|
||||
// writeSlice returns a slice of the available buffer to write data to.
|
||||
//
|
||||
// This invariant will be kept: len(s) <= availWrite()
|
||||
func (dd *dictDecoder) writeSlice() []byte {
|
||||
return dd.hist[dd.wrPos:]
|
||||
}
|
||||
|
||||
// writeMark advances the writer pointer by cnt.
|
||||
//
|
||||
// This invariant must be kept: 0 <= cnt <= availWrite()
|
||||
func (dd *dictDecoder) writeMark(cnt int) {
|
||||
dd.wrPos += cnt
|
||||
}
|
||||
|
||||
// writeByte writes a single byte to the dictionary.
|
||||
//
|
||||
// This invariant must be kept: 0 < availWrite()
|
||||
func (dd *dictDecoder) writeByte(c byte) {
|
||||
dd.hist[dd.wrPos] = c
|
||||
dd.wrPos++
|
||||
}
|
||||
|
||||
// writeCopy copies a string at a given (dist, length) to the output.
|
||||
// This returns the number of bytes copied and may be less than the requested
|
||||
// length if the available space in the output buffer is too small.
|
||||
//
|
||||
// This invariant must be kept: 0 < dist <= histSize()
|
||||
func (dd *dictDecoder) writeCopy(dist, length int) int {
|
||||
dstBase := dd.wrPos
|
||||
dstPos := dstBase
|
||||
srcPos := dstPos - dist
|
||||
endPos := dstPos + length
|
||||
if endPos > len(dd.hist) {
|
||||
endPos = len(dd.hist)
|
||||
}
|
||||
|
||||
// Copy non-overlapping section after destination position.
|
||||
//
|
||||
// This section is non-overlapping in that the copy length for this section
|
||||
// is always less than or equal to the backwards distance. This can occur
|
||||
// if a distance refers to data that wraps-around in the buffer.
|
||||
// Thus, a backwards copy is performed here; that is, the exact bytes in
|
||||
// the source prior to the copy is placed in the destination.
|
||||
if srcPos < 0 {
|
||||
srcPos += len(dd.hist)
|
||||
dstPos += copy(dd.hist[dstPos:endPos], dd.hist[srcPos:])
|
||||
srcPos = 0
|
||||
}
|
||||
|
||||
// Copy possibly overlapping section before destination position.
|
||||
//
|
||||
// This section can overlap if the copy length for this section is larger
|
||||
// than the backwards distance. This is allowed by LZ77 so that repeated
|
||||
// strings can be succinctly represented using (dist, length) pairs.
|
||||
// Thus, a forwards copy is performed here; that is, the bytes copied is
|
||||
// possibly dependent on the resulting bytes in the destination as the copy
|
||||
// progresses along. This is functionally equivalent to the following:
|
||||
//
|
||||
// for i := 0; i < endPos-dstPos; i++ {
|
||||
// dd.hist[dstPos+i] = dd.hist[srcPos+i]
|
||||
// }
|
||||
// dstPos = endPos
|
||||
//
|
||||
for dstPos < endPos {
|
||||
dstPos += copy(dd.hist[dstPos:endPos], dd.hist[srcPos:dstPos])
|
||||
}
|
||||
|
||||
dd.wrPos = dstPos
|
||||
return dstPos - dstBase
|
||||
}
|
||||
|
||||
// tryWriteCopy tries to copy a string at a given (distance, length) to the
|
||||
// output. This specialized version is optimized for short distances.
|
||||
//
|
||||
// This method is designed to be inlined for performance reasons.
|
||||
//
|
||||
// This invariant must be kept: 0 < dist <= histSize()
|
||||
func (dd *dictDecoder) tryWriteCopy(dist, length int) int {
|
||||
dstPos := dd.wrPos
|
||||
endPos := dstPos + length
|
||||
if dstPos < dist || endPos > len(dd.hist) {
|
||||
return 0
|
||||
}
|
||||
dstBase := dstPos
|
||||
srcPos := dstPos - dist
|
||||
|
||||
// Copy possibly overlapping section before destination position.
|
||||
loop:
|
||||
dstPos += copy(dd.hist[dstPos:endPos], dd.hist[srcPos:dstPos])
|
||||
if dstPos < endPos {
|
||||
goto loop // Avoid for-loop so that this function can be inlined
|
||||
}
|
||||
|
||||
dd.wrPos = dstPos
|
||||
return dstPos - dstBase
|
||||
}
|
||||
|
||||
// readFlush returns a slice of the historical buffer that is ready to be
|
||||
// emitted to the user. The data returned by readFlush must be fully consumed
|
||||
// before calling any other dictDecoder methods.
|
||||
func (dd *dictDecoder) readFlush() []byte {
|
||||
toRead := dd.hist[dd.rdPos:dd.wrPos]
|
||||
dd.rdPos = dd.wrPos
|
||||
if dd.wrPos == len(dd.hist) {
|
||||
dd.wrPos, dd.rdPos = 0, 0
|
||||
dd.full = true
|
||||
}
|
||||
return toRead
|
||||
}
|
||||
265
vendor/github.com/klauspost/compress/flate/gen.go
generated
vendored
265
vendor/github.com/klauspost/compress/flate/gen.go
generated
vendored
@@ -1,265 +0,0 @@
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build ignore
|
||||
|
||||
// This program generates fixedhuff.go
|
||||
// Invoke as
|
||||
//
|
||||
// go run gen.go -output fixedhuff.go
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"flag"
|
||||
"fmt"
|
||||
"go/format"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
)
|
||||
|
||||
var filename = flag.String("output", "fixedhuff.go", "output file name")
|
||||
|
||||
const maxCodeLen = 16
|
||||
|
||||
// Note: the definition of the huffmanDecoder struct is copied from
|
||||
// inflate.go, as it is private to the implementation.
|
||||
|
||||
// chunk & 15 is number of bits
|
||||
// chunk >> 4 is value, including table link
|
||||
|
||||
const (
|
||||
huffmanChunkBits = 9
|
||||
huffmanNumChunks = 1 << huffmanChunkBits
|
||||
huffmanCountMask = 15
|
||||
huffmanValueShift = 4
|
||||
)
|
||||
|
||||
type huffmanDecoder struct {
|
||||
min int // the minimum code length
|
||||
chunks [huffmanNumChunks]uint32 // chunks as described above
|
||||
links [][]uint32 // overflow links
|
||||
linkMask uint32 // mask the width of the link table
|
||||
}
|
||||
|
||||
// Initialize Huffman decoding tables from array of code lengths.
|
||||
// Following this function, h is guaranteed to be initialized into a complete
|
||||
// tree (i.e., neither over-subscribed nor under-subscribed). The exception is a
|
||||
// degenerate case where the tree has only a single symbol with length 1. Empty
|
||||
// trees are permitted.
|
||||
func (h *huffmanDecoder) init(bits []int) bool {
|
||||
// Sanity enables additional runtime tests during Huffman
|
||||
// table construction. It's intended to be used during
|
||||
// development to supplement the currently ad-hoc unit tests.
|
||||
const sanity = false
|
||||
|
||||
if h.min != 0 {
|
||||
*h = huffmanDecoder{}
|
||||
}
|
||||
|
||||
// Count number of codes of each length,
|
||||
// compute min and max length.
|
||||
var count [maxCodeLen]int
|
||||
var min, max int
|
||||
for _, n := range bits {
|
||||
if n == 0 {
|
||||
continue
|
||||
}
|
||||
if min == 0 || n < min {
|
||||
min = n
|
||||
}
|
||||
if n > max {
|
||||
max = n
|
||||
}
|
||||
count[n]++
|
||||
}
|
||||
|
||||
// Empty tree. The decompressor.huffSym function will fail later if the tree
|
||||
// is used. Technically, an empty tree is only valid for the HDIST tree and
|
||||
// not the HCLEN and HLIT tree. However, a stream with an empty HCLEN tree
|
||||
// is guaranteed to fail since it will attempt to use the tree to decode the
|
||||
// codes for the HLIT and HDIST trees. Similarly, an empty HLIT tree is
|
||||
// guaranteed to fail later since the compressed data section must be
|
||||
// composed of at least one symbol (the end-of-block marker).
|
||||
if max == 0 {
|
||||
return true
|
||||
}
|
||||
|
||||
code := 0
|
||||
var nextcode [maxCodeLen]int
|
||||
for i := min; i <= max; i++ {
|
||||
code <<= 1
|
||||
nextcode[i] = code
|
||||
code += count[i]
|
||||
}
|
||||
|
||||
// Check that the coding is complete (i.e., that we've
|
||||
// assigned all 2-to-the-max possible bit sequences).
|
||||
// Exception: To be compatible with zlib, we also need to
|
||||
// accept degenerate single-code codings. See also
|
||||
// TestDegenerateHuffmanCoding.
|
||||
if code != 1<<uint(max) && !(code == 1 && max == 1) {
|
||||
return false
|
||||
}
|
||||
|
||||
h.min = min
|
||||
if max > huffmanChunkBits {
|
||||
numLinks := 1 << (uint(max) - huffmanChunkBits)
|
||||
h.linkMask = uint32(numLinks - 1)
|
||||
|
||||
// create link tables
|
||||
link := nextcode[huffmanChunkBits+1] >> 1
|
||||
h.links = make([][]uint32, huffmanNumChunks-link)
|
||||
for j := uint(link); j < huffmanNumChunks; j++ {
|
||||
reverse := int(reverseByte[j>>8]) | int(reverseByte[j&0xff])<<8
|
||||
reverse >>= uint(16 - huffmanChunkBits)
|
||||
off := j - uint(link)
|
||||
if sanity && h.chunks[reverse] != 0 {
|
||||
panic("impossible: overwriting existing chunk")
|
||||
}
|
||||
h.chunks[reverse] = uint32(off<<huffmanValueShift | (huffmanChunkBits + 1))
|
||||
h.links[off] = make([]uint32, numLinks)
|
||||
}
|
||||
}
|
||||
|
||||
for i, n := range bits {
|
||||
if n == 0 {
|
||||
continue
|
||||
}
|
||||
code := nextcode[n]
|
||||
nextcode[n]++
|
||||
chunk := uint32(i<<huffmanValueShift | n)
|
||||
reverse := int(reverseByte[code>>8]) | int(reverseByte[code&0xff])<<8
|
||||
reverse >>= uint(16 - n)
|
||||
if n <= huffmanChunkBits {
|
||||
for off := reverse; off < len(h.chunks); off += 1 << uint(n) {
|
||||
// We should never need to overwrite
|
||||
// an existing chunk. Also, 0 is
|
||||
// never a valid chunk, because the
|
||||
// lower 4 "count" bits should be
|
||||
// between 1 and 15.
|
||||
if sanity && h.chunks[off] != 0 {
|
||||
panic("impossible: overwriting existing chunk")
|
||||
}
|
||||
h.chunks[off] = chunk
|
||||
}
|
||||
} else {
|
||||
j := reverse & (huffmanNumChunks - 1)
|
||||
if sanity && h.chunks[j]&huffmanCountMask != huffmanChunkBits+1 {
|
||||
// Longer codes should have been
|
||||
// associated with a link table above.
|
||||
panic("impossible: not an indirect chunk")
|
||||
}
|
||||
value := h.chunks[j] >> huffmanValueShift
|
||||
linktab := h.links[value]
|
||||
reverse >>= huffmanChunkBits
|
||||
for off := reverse; off < len(linktab); off += 1 << uint(n-huffmanChunkBits) {
|
||||
if sanity && linktab[off] != 0 {
|
||||
panic("impossible: overwriting existing chunk")
|
||||
}
|
||||
linktab[off] = chunk
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if sanity {
|
||||
// Above we've sanity checked that we never overwrote
|
||||
// an existing entry. Here we additionally check that
|
||||
// we filled the tables completely.
|
||||
for i, chunk := range h.chunks {
|
||||
if chunk == 0 {
|
||||
// As an exception, in the degenerate
|
||||
// single-code case, we allow odd
|
||||
// chunks to be missing.
|
||||
if code == 1 && i%2 == 1 {
|
||||
continue
|
||||
}
|
||||
panic("impossible: missing chunk")
|
||||
}
|
||||
}
|
||||
for _, linktab := range h.links {
|
||||
for _, chunk := range linktab {
|
||||
if chunk == 0 {
|
||||
panic("impossible: missing chunk")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
|
||||
var h huffmanDecoder
|
||||
var bits [288]int
|
||||
initReverseByte()
|
||||
for i := 0; i < 144; i++ {
|
||||
bits[i] = 8
|
||||
}
|
||||
for i := 144; i < 256; i++ {
|
||||
bits[i] = 9
|
||||
}
|
||||
for i := 256; i < 280; i++ {
|
||||
bits[i] = 7
|
||||
}
|
||||
for i := 280; i < 288; i++ {
|
||||
bits[i] = 8
|
||||
}
|
||||
h.init(bits[:])
|
||||
if h.links != nil {
|
||||
log.Fatal("Unexpected links table in fixed Huffman decoder")
|
||||
}
|
||||
|
||||
var buf bytes.Buffer
|
||||
|
||||
fmt.Fprintf(&buf, `// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.`+"\n\n")
|
||||
|
||||
fmt.Fprintln(&buf, "package flate")
|
||||
fmt.Fprintln(&buf)
|
||||
fmt.Fprintln(&buf, "// autogenerated by go run gen.go -output fixedhuff.go, DO NOT EDIT")
|
||||
fmt.Fprintln(&buf)
|
||||
fmt.Fprintln(&buf, "var fixedHuffmanDecoder = huffmanDecoder{")
|
||||
fmt.Fprintf(&buf, "\t%d,\n", h.min)
|
||||
fmt.Fprintln(&buf, "\t[huffmanNumChunks]uint32{")
|
||||
for i := 0; i < huffmanNumChunks; i++ {
|
||||
if i&7 == 0 {
|
||||
fmt.Fprintf(&buf, "\t\t")
|
||||
} else {
|
||||
fmt.Fprintf(&buf, " ")
|
||||
}
|
||||
fmt.Fprintf(&buf, "0x%04x,", h.chunks[i])
|
||||
if i&7 == 7 {
|
||||
fmt.Fprintln(&buf)
|
||||
}
|
||||
}
|
||||
fmt.Fprintln(&buf, "\t},")
|
||||
fmt.Fprintln(&buf, "\tnil, 0,")
|
||||
fmt.Fprintln(&buf, "}")
|
||||
|
||||
data, err := format.Source(buf.Bytes())
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
err = ioutil.WriteFile(*filename, data, 0644)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
var reverseByte [256]byte
|
||||
|
||||
func initReverseByte() {
|
||||
for x := 0; x < 256; x++ {
|
||||
var result byte
|
||||
for i := uint(0); i < 8; i++ {
|
||||
result |= byte(((x >> i) & 1) << (7 - i))
|
||||
}
|
||||
reverseByte[x] = result
|
||||
}
|
||||
}
|
||||
701
vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go
generated
vendored
701
vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go
generated
vendored
@@ -1,701 +0,0 @@
|
||||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package flate
|
||||
|
||||
import (
|
||||
"io"
|
||||
)
|
||||
|
||||
const (
|
||||
// The largest offset code.
|
||||
offsetCodeCount = 30
|
||||
|
||||
// The special code used to mark the end of a block.
|
||||
endBlockMarker = 256
|
||||
|
||||
// The first length code.
|
||||
lengthCodesStart = 257
|
||||
|
||||
// The number of codegen codes.
|
||||
codegenCodeCount = 19
|
||||
badCode = 255
|
||||
|
||||
// bufferFlushSize indicates the buffer size
|
||||
// after which bytes are flushed to the writer.
|
||||
// Should preferably be a multiple of 6, since
|
||||
// we accumulate 6 bytes between writes to the buffer.
|
||||
bufferFlushSize = 240
|
||||
|
||||
// bufferSize is the actual output byte buffer size.
|
||||
// It must have additional headroom for a flush
|
||||
// which can contain up to 8 bytes.
|
||||
bufferSize = bufferFlushSize + 8
|
||||
)
|
||||
|
||||
// The number of extra bits needed by length code X - LENGTH_CODES_START.
|
||||
var lengthExtraBits = []int8{
|
||||
/* 257 */ 0, 0, 0,
|
||||
/* 260 */ 0, 0, 0, 0, 0, 1, 1, 1, 1, 2,
|
||||
/* 270 */ 2, 2, 2, 3, 3, 3, 3, 4, 4, 4,
|
||||
/* 280 */ 4, 5, 5, 5, 5, 0,
|
||||
}
|
||||
|
||||
// The length indicated by length code X - LENGTH_CODES_START.
|
||||
var lengthBase = []uint32{
|
||||
0, 1, 2, 3, 4, 5, 6, 7, 8, 10,
|
||||
12, 14, 16, 20, 24, 28, 32, 40, 48, 56,
|
||||
64, 80, 96, 112, 128, 160, 192, 224, 255,
|
||||
}
|
||||
|
||||
// offset code word extra bits.
|
||||
var offsetExtraBits = []int8{
|
||||
0, 0, 0, 0, 1, 1, 2, 2, 3, 3,
|
||||
4, 4, 5, 5, 6, 6, 7, 7, 8, 8,
|
||||
9, 9, 10, 10, 11, 11, 12, 12, 13, 13,
|
||||
/* extended window */
|
||||
14, 14, 15, 15, 16, 16, 17, 17, 18, 18, 19, 19, 20, 20,
|
||||
}
|
||||
|
||||
var offsetBase = []uint32{
|
||||
/* normal deflate */
|
||||
0x000000, 0x000001, 0x000002, 0x000003, 0x000004,
|
||||
0x000006, 0x000008, 0x00000c, 0x000010, 0x000018,
|
||||
0x000020, 0x000030, 0x000040, 0x000060, 0x000080,
|
||||
0x0000c0, 0x000100, 0x000180, 0x000200, 0x000300,
|
||||
0x000400, 0x000600, 0x000800, 0x000c00, 0x001000,
|
||||
0x001800, 0x002000, 0x003000, 0x004000, 0x006000,
|
||||
|
||||
/* extended window */
|
||||
0x008000, 0x00c000, 0x010000, 0x018000, 0x020000,
|
||||
0x030000, 0x040000, 0x060000, 0x080000, 0x0c0000,
|
||||
0x100000, 0x180000, 0x200000, 0x300000,
|
||||
}
|
||||
|
||||
// The odd order in which the codegen code sizes are written.
|
||||
var codegenOrder = []uint32{16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15}
|
||||
|
||||
type huffmanBitWriter struct {
|
||||
// writer is the underlying writer.
|
||||
// Do not use it directly; use the write method, which ensures
|
||||
// that Write errors are sticky.
|
||||
writer io.Writer
|
||||
|
||||
// Data waiting to be written is bytes[0:nbytes]
|
||||
// and then the low nbits of bits.
|
||||
bits uint64
|
||||
nbits uint
|
||||
bytes [bufferSize]byte
|
||||
codegenFreq [codegenCodeCount]int32
|
||||
nbytes int
|
||||
literalFreq []int32
|
||||
offsetFreq []int32
|
||||
codegen []uint8
|
||||
literalEncoding *huffmanEncoder
|
||||
offsetEncoding *huffmanEncoder
|
||||
codegenEncoding *huffmanEncoder
|
||||
err error
|
||||
}
|
||||
|
||||
func newHuffmanBitWriter(w io.Writer) *huffmanBitWriter {
|
||||
return &huffmanBitWriter{
|
||||
writer: w,
|
||||
literalFreq: make([]int32, maxNumLit),
|
||||
offsetFreq: make([]int32, offsetCodeCount),
|
||||
codegen: make([]uint8, maxNumLit+offsetCodeCount+1),
|
||||
literalEncoding: newHuffmanEncoder(maxNumLit),
|
||||
codegenEncoding: newHuffmanEncoder(codegenCodeCount),
|
||||
offsetEncoding: newHuffmanEncoder(offsetCodeCount),
|
||||
}
|
||||
}
|
||||
|
||||
func (w *huffmanBitWriter) reset(writer io.Writer) {
|
||||
w.writer = writer
|
||||
w.bits, w.nbits, w.nbytes, w.err = 0, 0, 0, nil
|
||||
w.bytes = [bufferSize]byte{}
|
||||
}
|
||||
|
||||
func (w *huffmanBitWriter) flush() {
|
||||
if w.err != nil {
|
||||
w.nbits = 0
|
||||
return
|
||||
}
|
||||
n := w.nbytes
|
||||
for w.nbits != 0 {
|
||||
w.bytes[n] = byte(w.bits)
|
||||
w.bits >>= 8
|
||||
if w.nbits > 8 { // Avoid underflow
|
||||
w.nbits -= 8
|
||||
} else {
|
||||
w.nbits = 0
|
||||
}
|
||||
n++
|
||||
}
|
||||
w.bits = 0
|
||||
w.write(w.bytes[:n])
|
||||
w.nbytes = 0
|
||||
}
|
||||
|
||||
func (w *huffmanBitWriter) write(b []byte) {
|
||||
if w.err != nil {
|
||||
return
|
||||
}
|
||||
_, w.err = w.writer.Write(b)
|
||||
}
|
||||
|
||||
func (w *huffmanBitWriter) writeBits(b int32, nb uint) {
|
||||
if w.err != nil {
|
||||
return
|
||||
}
|
||||
w.bits |= uint64(b) << w.nbits
|
||||
w.nbits += nb
|
||||
if w.nbits >= 48 {
|
||||
bits := w.bits
|
||||
w.bits >>= 48
|
||||
w.nbits -= 48
|
||||
n := w.nbytes
|
||||
bytes := w.bytes[n : n+6]
|
||||
bytes[0] = byte(bits)
|
||||
bytes[1] = byte(bits >> 8)
|
||||
bytes[2] = byte(bits >> 16)
|
||||
bytes[3] = byte(bits >> 24)
|
||||
bytes[4] = byte(bits >> 32)
|
||||
bytes[5] = byte(bits >> 40)
|
||||
n += 6
|
||||
if n >= bufferFlushSize {
|
||||
w.write(w.bytes[:n])
|
||||
n = 0
|
||||
}
|
||||
w.nbytes = n
|
||||
}
|
||||
}
|
||||
|
||||
func (w *huffmanBitWriter) writeBytes(bytes []byte) {
|
||||
if w.err != nil {
|
||||
return
|
||||
}
|
||||
n := w.nbytes
|
||||
if w.nbits&7 != 0 {
|
||||
w.err = InternalError("writeBytes with unfinished bits")
|
||||
return
|
||||
}
|
||||
for w.nbits != 0 {
|
||||
w.bytes[n] = byte(w.bits)
|
||||
w.bits >>= 8
|
||||
w.nbits -= 8
|
||||
n++
|
||||
}
|
||||
if n != 0 {
|
||||
w.write(w.bytes[:n])
|
||||
}
|
||||
w.nbytes = 0
|
||||
w.write(bytes)
|
||||
}
|
||||
|
||||
// RFC 1951 3.2.7 specifies a special run-length encoding for specifying
|
||||
// the literal and offset lengths arrays (which are concatenated into a single
|
||||
// array). This method generates that run-length encoding.
|
||||
//
|
||||
// The result is written into the codegen array, and the frequencies
|
||||
// of each code is written into the codegenFreq array.
|
||||
// Codes 0-15 are single byte codes. Codes 16-18 are followed by additional
|
||||
// information. Code badCode is an end marker
|
||||
//
|
||||
// numLiterals The number of literals in literalEncoding
|
||||
// numOffsets The number of offsets in offsetEncoding
|
||||
// litenc, offenc The literal and offset encoder to use
|
||||
func (w *huffmanBitWriter) generateCodegen(numLiterals int, numOffsets int, litEnc, offEnc *huffmanEncoder) {
|
||||
for i := range w.codegenFreq {
|
||||
w.codegenFreq[i] = 0
|
||||
}
|
||||
// Note that we are using codegen both as a temporary variable for holding
|
||||
// a copy of the frequencies, and as the place where we put the result.
|
||||
// This is fine because the output is always shorter than the input used
|
||||
// so far.
|
||||
codegen := w.codegen // cache
|
||||
// Copy the concatenated code sizes to codegen. Put a marker at the end.
|
||||
cgnl := codegen[:numLiterals]
|
||||
for i := range cgnl {
|
||||
cgnl[i] = uint8(litEnc.codes[i].len)
|
||||
}
|
||||
|
||||
cgnl = codegen[numLiterals : numLiterals+numOffsets]
|
||||
for i := range cgnl {
|
||||
cgnl[i] = uint8(offEnc.codes[i].len)
|
||||
}
|
||||
codegen[numLiterals+numOffsets] = badCode
|
||||
|
||||
size := codegen[0]
|
||||
count := 1
|
||||
outIndex := 0
|
||||
for inIndex := 1; size != badCode; inIndex++ {
|
||||
// INVARIANT: We have seen "count" copies of size that have not yet
|
||||
// had output generated for them.
|
||||
nextSize := codegen[inIndex]
|
||||
if nextSize == size {
|
||||
count++
|
||||
continue
|
||||
}
|
||||
// We need to generate codegen indicating "count" of size.
|
||||
if size != 0 {
|
||||
codegen[outIndex] = size
|
||||
outIndex++
|
||||
w.codegenFreq[size]++
|
||||
count--
|
||||
for count >= 3 {
|
||||
n := 6
|
||||
if n > count {
|
||||
n = count
|
||||
}
|
||||
codegen[outIndex] = 16
|
||||
outIndex++
|
||||
codegen[outIndex] = uint8(n - 3)
|
||||
outIndex++
|
||||
w.codegenFreq[16]++
|
||||
count -= n
|
||||
}
|
||||
} else {
|
||||
for count >= 11 {
|
||||
n := 138
|
||||
if n > count {
|
||||
n = count
|
||||
}
|
||||
codegen[outIndex] = 18
|
||||
outIndex++
|
||||
codegen[outIndex] = uint8(n - 11)
|
||||
outIndex++
|
||||
w.codegenFreq[18]++
|
||||
count -= n
|
||||
}
|
||||
if count >= 3 {
|
||||
// count >= 3 && count <= 10
|
||||
codegen[outIndex] = 17
|
||||
outIndex++
|
||||
codegen[outIndex] = uint8(count - 3)
|
||||
outIndex++
|
||||
w.codegenFreq[17]++
|
||||
count = 0
|
||||
}
|
||||
}
|
||||
count--
|
||||
for ; count >= 0; count-- {
|
||||
codegen[outIndex] = size
|
||||
outIndex++
|
||||
w.codegenFreq[size]++
|
||||
}
|
||||
// Set up invariant for next time through the loop.
|
||||
size = nextSize
|
||||
count = 1
|
||||
}
|
||||
// Marker indicating the end of the codegen.
|
||||
codegen[outIndex] = badCode
|
||||
}
|
||||
|
||||
// dynamicSize returns the size of dynamically encoded data in bits.
|
||||
func (w *huffmanBitWriter) dynamicSize(litEnc, offEnc *huffmanEncoder, extraBits int) (size, numCodegens int) {
|
||||
numCodegens = len(w.codegenFreq)
|
||||
for numCodegens > 4 && w.codegenFreq[codegenOrder[numCodegens-1]] == 0 {
|
||||
numCodegens--
|
||||
}
|
||||
header := 3 + 5 + 5 + 4 + (3 * numCodegens) +
|
||||
w.codegenEncoding.bitLength(w.codegenFreq[:]) +
|
||||
int(w.codegenFreq[16])*2 +
|
||||
int(w.codegenFreq[17])*3 +
|
||||
int(w.codegenFreq[18])*7
|
||||
size = header +
|
||||
litEnc.bitLength(w.literalFreq) +
|
||||
offEnc.bitLength(w.offsetFreq) +
|
||||
extraBits
|
||||
|
||||
return size, numCodegens
|
||||
}
|
||||
|
||||
// fixedSize returns the size of dynamically encoded data in bits.
|
||||
func (w *huffmanBitWriter) fixedSize(extraBits int) int {
|
||||
return 3 +
|
||||
fixedLiteralEncoding.bitLength(w.literalFreq) +
|
||||
fixedOffsetEncoding.bitLength(w.offsetFreq) +
|
||||
extraBits
|
||||
}
|
||||
|
||||
// storedSize calculates the stored size, including header.
|
||||
// The function returns the size in bits and whether the block
|
||||
// fits inside a single block.
|
||||
func (w *huffmanBitWriter) storedSize(in []byte) (int, bool) {
|
||||
if in == nil {
|
||||
return 0, false
|
||||
}
|
||||
if len(in) <= maxStoreBlockSize {
|
||||
return (len(in) + 5) * 8, true
|
||||
}
|
||||
return 0, false
|
||||
}
|
||||
|
||||
func (w *huffmanBitWriter) writeCode(c hcode) {
|
||||
if w.err != nil {
|
||||
return
|
||||
}
|
||||
w.bits |= uint64(c.code) << w.nbits
|
||||
w.nbits += uint(c.len)
|
||||
if w.nbits >= 48 {
|
||||
bits := w.bits
|
||||
w.bits >>= 48
|
||||
w.nbits -= 48
|
||||
n := w.nbytes
|
||||
bytes := w.bytes[n : n+6]
|
||||
bytes[0] = byte(bits)
|
||||
bytes[1] = byte(bits >> 8)
|
||||
bytes[2] = byte(bits >> 16)
|
||||
bytes[3] = byte(bits >> 24)
|
||||
bytes[4] = byte(bits >> 32)
|
||||
bytes[5] = byte(bits >> 40)
|
||||
n += 6
|
||||
if n >= bufferFlushSize {
|
||||
w.write(w.bytes[:n])
|
||||
n = 0
|
||||
}
|
||||
w.nbytes = n
|
||||
}
|
||||
}
|
||||
|
||||
// Write the header of a dynamic Huffman block to the output stream.
|
||||
//
|
||||
// numLiterals The number of literals specified in codegen
|
||||
// numOffsets The number of offsets specified in codegen
|
||||
// numCodegens The number of codegens used in codegen
|
||||
func (w *huffmanBitWriter) writeDynamicHeader(numLiterals int, numOffsets int, numCodegens int, isEof bool) {
|
||||
if w.err != nil {
|
||||
return
|
||||
}
|
||||
var firstBits int32 = 4
|
||||
if isEof {
|
||||
firstBits = 5
|
||||
}
|
||||
w.writeBits(firstBits, 3)
|
||||
w.writeBits(int32(numLiterals-257), 5)
|
||||
w.writeBits(int32(numOffsets-1), 5)
|
||||
w.writeBits(int32(numCodegens-4), 4)
|
||||
|
||||
for i := 0; i < numCodegens; i++ {
|
||||
value := uint(w.codegenEncoding.codes[codegenOrder[i]].len)
|
||||
w.writeBits(int32(value), 3)
|
||||
}
|
||||
|
||||
i := 0
|
||||
for {
|
||||
var codeWord int = int(w.codegen[i])
|
||||
i++
|
||||
if codeWord == badCode {
|
||||
break
|
||||
}
|
||||
w.writeCode(w.codegenEncoding.codes[uint32(codeWord)])
|
||||
|
||||
switch codeWord {
|
||||
case 16:
|
||||
w.writeBits(int32(w.codegen[i]), 2)
|
||||
i++
|
||||
break
|
||||
case 17:
|
||||
w.writeBits(int32(w.codegen[i]), 3)
|
||||
i++
|
||||
break
|
||||
case 18:
|
||||
w.writeBits(int32(w.codegen[i]), 7)
|
||||
i++
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (w *huffmanBitWriter) writeStoredHeader(length int, isEof bool) {
|
||||
if w.err != nil {
|
||||
return
|
||||
}
|
||||
var flag int32
|
||||
if isEof {
|
||||
flag = 1
|
||||
}
|
||||
w.writeBits(flag, 3)
|
||||
w.flush()
|
||||
w.writeBits(int32(length), 16)
|
||||
w.writeBits(int32(^uint16(length)), 16)
|
||||
}
|
||||
|
||||
func (w *huffmanBitWriter) writeFixedHeader(isEof bool) {
|
||||
if w.err != nil {
|
||||
return
|
||||
}
|
||||
// Indicate that we are a fixed Huffman block
|
||||
var value int32 = 2
|
||||
if isEof {
|
||||
value = 3
|
||||
}
|
||||
w.writeBits(value, 3)
|
||||
}
|
||||
|
||||
// writeBlock will write a block of tokens with the smallest encoding.
|
||||
// The original input can be supplied, and if the huffman encoded data
|
||||
// is larger than the original bytes, the data will be written as a
|
||||
// stored block.
|
||||
// If the input is nil, the tokens will always be Huffman encoded.
|
||||
func (w *huffmanBitWriter) writeBlock(tokens []token, eof bool, input []byte) {
|
||||
if w.err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
tokens = append(tokens, endBlockMarker)
|
||||
numLiterals, numOffsets := w.indexTokens(tokens)
|
||||
|
||||
var extraBits int
|
||||
storedSize, storable := w.storedSize(input)
|
||||
if storable {
|
||||
// We only bother calculating the costs of the extra bits required by
|
||||
// the length of offset fields (which will be the same for both fixed
|
||||
// and dynamic encoding), if we need to compare those two encodings
|
||||
// against stored encoding.
|
||||
for lengthCode := lengthCodesStart + 8; lengthCode < numLiterals; lengthCode++ {
|
||||
// First eight length codes have extra size = 0.
|
||||
extraBits += int(w.literalFreq[lengthCode]) * int(lengthExtraBits[lengthCode-lengthCodesStart])
|
||||
}
|
||||
for offsetCode := 4; offsetCode < numOffsets; offsetCode++ {
|
||||
// First four offset codes have extra size = 0.
|
||||
extraBits += int(w.offsetFreq[offsetCode]) * int(offsetExtraBits[offsetCode])
|
||||
}
|
||||
}
|
||||
|
||||
// Figure out smallest code.
|
||||
// Fixed Huffman baseline.
|
||||
var literalEncoding = fixedLiteralEncoding
|
||||
var offsetEncoding = fixedOffsetEncoding
|
||||
var size = w.fixedSize(extraBits)
|
||||
|
||||
// Dynamic Huffman?
|
||||
var numCodegens int
|
||||
|
||||
// Generate codegen and codegenFrequencies, which indicates how to encode
|
||||
// the literalEncoding and the offsetEncoding.
|
||||
w.generateCodegen(numLiterals, numOffsets, w.literalEncoding, w.offsetEncoding)
|
||||
w.codegenEncoding.generate(w.codegenFreq[:], 7)
|
||||
dynamicSize, numCodegens := w.dynamicSize(w.literalEncoding, w.offsetEncoding, extraBits)
|
||||
|
||||
if dynamicSize < size {
|
||||
size = dynamicSize
|
||||
literalEncoding = w.literalEncoding
|
||||
offsetEncoding = w.offsetEncoding
|
||||
}
|
||||
|
||||
// Stored bytes?
|
||||
if storable && storedSize < size {
|
||||
w.writeStoredHeader(len(input), eof)
|
||||
w.writeBytes(input)
|
||||
return
|
||||
}
|
||||
|
||||
// Huffman.
|
||||
if literalEncoding == fixedLiteralEncoding {
|
||||
w.writeFixedHeader(eof)
|
||||
} else {
|
||||
w.writeDynamicHeader(numLiterals, numOffsets, numCodegens, eof)
|
||||
}
|
||||
|
||||
// Write the tokens.
|
||||
w.writeTokens(tokens, literalEncoding.codes, offsetEncoding.codes)
|
||||
}
|
||||
|
||||
// writeBlockDynamic encodes a block using a dynamic Huffman table.
|
||||
// This should be used if the symbols used have a disproportionate
|
||||
// histogram distribution.
|
||||
// If input is supplied and the compression savings are below 1/16th of the
|
||||
// input size the block is stored.
|
||||
func (w *huffmanBitWriter) writeBlockDynamic(tokens []token, eof bool, input []byte) {
|
||||
if w.err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
tokens = append(tokens, endBlockMarker)
|
||||
numLiterals, numOffsets := w.indexTokens(tokens)
|
||||
|
||||
// Generate codegen and codegenFrequencies, which indicates how to encode
|
||||
// the literalEncoding and the offsetEncoding.
|
||||
w.generateCodegen(numLiterals, numOffsets, w.literalEncoding, w.offsetEncoding)
|
||||
w.codegenEncoding.generate(w.codegenFreq[:], 7)
|
||||
size, numCodegens := w.dynamicSize(w.literalEncoding, w.offsetEncoding, 0)
|
||||
|
||||
// Store bytes, if we don't get a reasonable improvement.
|
||||
if ssize, storable := w.storedSize(input); storable && ssize < (size+size>>4) {
|
||||
w.writeStoredHeader(len(input), eof)
|
||||
w.writeBytes(input)
|
||||
return
|
||||
}
|
||||
|
||||
// Write Huffman table.
|
||||
w.writeDynamicHeader(numLiterals, numOffsets, numCodegens, eof)
|
||||
|
||||
// Write the tokens.
|
||||
w.writeTokens(tokens, w.literalEncoding.codes, w.offsetEncoding.codes)
|
||||
}
|
||||
|
||||
// indexTokens indexes a slice of tokens, and updates
|
||||
// literalFreq and offsetFreq, and generates literalEncoding
|
||||
// and offsetEncoding.
|
||||
// The number of literal and offset tokens is returned.
|
||||
func (w *huffmanBitWriter) indexTokens(tokens []token) (numLiterals, numOffsets int) {
|
||||
for i := range w.literalFreq {
|
||||
w.literalFreq[i] = 0
|
||||
}
|
||||
for i := range w.offsetFreq {
|
||||
w.offsetFreq[i] = 0
|
||||
}
|
||||
|
||||
for _, t := range tokens {
|
||||
if t < matchType {
|
||||
w.literalFreq[t.literal()]++
|
||||
continue
|
||||
}
|
||||
length := t.length()
|
||||
offset := t.offset()
|
||||
w.literalFreq[lengthCodesStart+lengthCode(length)]++
|
||||
w.offsetFreq[offsetCode(offset)]++
|
||||
}
|
||||
|
||||
// get the number of literals
|
||||
numLiterals = len(w.literalFreq)
|
||||
for w.literalFreq[numLiterals-1] == 0 {
|
||||
numLiterals--
|
||||
}
|
||||
// get the number of offsets
|
||||
numOffsets = len(w.offsetFreq)
|
||||
for numOffsets > 0 && w.offsetFreq[numOffsets-1] == 0 {
|
||||
numOffsets--
|
||||
}
|
||||
if numOffsets == 0 {
|
||||
// We haven't found a single match. If we want to go with the dynamic encoding,
|
||||
// we should count at least one offset to be sure that the offset huffman tree could be encoded.
|
||||
w.offsetFreq[0] = 1
|
||||
numOffsets = 1
|
||||
}
|
||||
w.literalEncoding.generate(w.literalFreq, 15)
|
||||
w.offsetEncoding.generate(w.offsetFreq, 15)
|
||||
return
|
||||
}
|
||||
|
||||
// writeTokens writes a slice of tokens to the output.
|
||||
// codes for literal and offset encoding must be supplied.
|
||||
func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode) {
|
||||
if w.err != nil {
|
||||
return
|
||||
}
|
||||
for _, t := range tokens {
|
||||
if t < matchType {
|
||||
w.writeCode(leCodes[t.literal()])
|
||||
continue
|
||||
}
|
||||
// Write the length
|
||||
length := t.length()
|
||||
lengthCode := lengthCode(length)
|
||||
w.writeCode(leCodes[lengthCode+lengthCodesStart])
|
||||
extraLengthBits := uint(lengthExtraBits[lengthCode])
|
||||
if extraLengthBits > 0 {
|
||||
extraLength := int32(length - lengthBase[lengthCode])
|
||||
w.writeBits(extraLength, extraLengthBits)
|
||||
}
|
||||
// Write the offset
|
||||
offset := t.offset()
|
||||
offsetCode := offsetCode(offset)
|
||||
w.writeCode(oeCodes[offsetCode])
|
||||
extraOffsetBits := uint(offsetExtraBits[offsetCode])
|
||||
if extraOffsetBits > 0 {
|
||||
extraOffset := int32(offset - offsetBase[offsetCode])
|
||||
w.writeBits(extraOffset, extraOffsetBits)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// huffOffset is a static offset encoder used for huffman only encoding.
|
||||
// It can be reused since we will not be encoding offset values.
|
||||
var huffOffset *huffmanEncoder
|
||||
|
||||
func init() {
|
||||
w := newHuffmanBitWriter(nil)
|
||||
w.offsetFreq[0] = 1
|
||||
huffOffset = newHuffmanEncoder(offsetCodeCount)
|
||||
huffOffset.generate(w.offsetFreq, 15)
|
||||
}
|
||||
|
||||
// writeBlockHuff encodes a block of bytes as either
|
||||
// Huffman encoded literals or uncompressed bytes if the
|
||||
// results only gains very little from compression.
|
||||
func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte) {
|
||||
if w.err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Clear histogram
|
||||
for i := range w.literalFreq {
|
||||
w.literalFreq[i] = 0
|
||||
}
|
||||
|
||||
// Add everything as literals
|
||||
histogram(input, w.literalFreq)
|
||||
|
||||
w.literalFreq[endBlockMarker] = 1
|
||||
|
||||
const numLiterals = endBlockMarker + 1
|
||||
const numOffsets = 1
|
||||
|
||||
w.literalEncoding.generate(w.literalFreq, 15)
|
||||
|
||||
// Figure out smallest code.
|
||||
// Always use dynamic Huffman or Store
|
||||
var numCodegens int
|
||||
|
||||
// Generate codegen and codegenFrequencies, which indicates how to encode
|
||||
// the literalEncoding and the offsetEncoding.
|
||||
w.generateCodegen(numLiterals, numOffsets, w.literalEncoding, huffOffset)
|
||||
w.codegenEncoding.generate(w.codegenFreq[:], 7)
|
||||
size, numCodegens := w.dynamicSize(w.literalEncoding, huffOffset, 0)
|
||||
|
||||
// Store bytes, if we don't get a reasonable improvement.
|
||||
if ssize, storable := w.storedSize(input); storable && ssize < (size+size>>4) {
|
||||
w.writeStoredHeader(len(input), eof)
|
||||
w.writeBytes(input)
|
||||
return
|
||||
}
|
||||
|
||||
// Huffman.
|
||||
w.writeDynamicHeader(numLiterals, numOffsets, numCodegens, eof)
|
||||
encoding := w.literalEncoding.codes[:257]
|
||||
n := w.nbytes
|
||||
for _, t := range input {
|
||||
// Bitwriting inlined, ~30% speedup
|
||||
c := encoding[t]
|
||||
w.bits |= uint64(c.code) << w.nbits
|
||||
w.nbits += uint(c.len)
|
||||
if w.nbits < 48 {
|
||||
continue
|
||||
}
|
||||
// Store 6 bytes
|
||||
bits := w.bits
|
||||
w.bits >>= 48
|
||||
w.nbits -= 48
|
||||
bytes := w.bytes[n : n+6]
|
||||
bytes[0] = byte(bits)
|
||||
bytes[1] = byte(bits >> 8)
|
||||
bytes[2] = byte(bits >> 16)
|
||||
bytes[3] = byte(bits >> 24)
|
||||
bytes[4] = byte(bits >> 32)
|
||||
bytes[5] = byte(bits >> 40)
|
||||
n += 6
|
||||
if n < bufferFlushSize {
|
||||
continue
|
||||
}
|
||||
w.write(w.bytes[:n])
|
||||
if w.err != nil {
|
||||
return // Return early in the event of write failures
|
||||
}
|
||||
n = 0
|
||||
}
|
||||
w.nbytes = n
|
||||
w.writeCode(encoding[endBlockMarker])
|
||||
}
|
||||
344
vendor/github.com/klauspost/compress/flate/huffman_code.go
generated
vendored
344
vendor/github.com/klauspost/compress/flate/huffman_code.go
generated
vendored
@@ -1,344 +0,0 @@
|
||||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package flate
|
||||
|
||||
import (
|
||||
"math"
|
||||
"sort"
|
||||
)
|
||||
|
||||
// hcode is a huffman code with a bit code and bit length.
|
||||
type hcode struct {
|
||||
code, len uint16
|
||||
}
|
||||
|
||||
type huffmanEncoder struct {
|
||||
codes []hcode
|
||||
freqcache []literalNode
|
||||
bitCount [17]int32
|
||||
lns byLiteral // stored to avoid repeated allocation in generate
|
||||
lfs byFreq // stored to avoid repeated allocation in generate
|
||||
}
|
||||
|
||||
type literalNode struct {
|
||||
literal uint16
|
||||
freq int32
|
||||
}
|
||||
|
||||
// A levelInfo describes the state of the constructed tree for a given depth.
|
||||
type levelInfo struct {
|
||||
// Our level. for better printing
|
||||
level int32
|
||||
|
||||
// The frequency of the last node at this level
|
||||
lastFreq int32
|
||||
|
||||
// The frequency of the next character to add to this level
|
||||
nextCharFreq int32
|
||||
|
||||
// The frequency of the next pair (from level below) to add to this level.
|
||||
// Only valid if the "needed" value of the next lower level is 0.
|
||||
nextPairFreq int32
|
||||
|
||||
// The number of chains remaining to generate for this level before moving
|
||||
// up to the next level
|
||||
needed int32
|
||||
}
|
||||
|
||||
// set sets the code and length of an hcode.
|
||||
func (h *hcode) set(code uint16, length uint16) {
|
||||
h.len = length
|
||||
h.code = code
|
||||
}
|
||||
|
||||
func maxNode() literalNode { return literalNode{math.MaxUint16, math.MaxInt32} }
|
||||
|
||||
func newHuffmanEncoder(size int) *huffmanEncoder {
|
||||
return &huffmanEncoder{codes: make([]hcode, size)}
|
||||
}
|
||||
|
||||
// Generates a HuffmanCode corresponding to the fixed literal table
|
||||
func generateFixedLiteralEncoding() *huffmanEncoder {
|
||||
h := newHuffmanEncoder(maxNumLit)
|
||||
codes := h.codes
|
||||
var ch uint16
|
||||
for ch = 0; ch < maxNumLit; ch++ {
|
||||
var bits uint16
|
||||
var size uint16
|
||||
switch {
|
||||
case ch < 144:
|
||||
// size 8, 000110000 .. 10111111
|
||||
bits = ch + 48
|
||||
size = 8
|
||||
break
|
||||
case ch < 256:
|
||||
// size 9, 110010000 .. 111111111
|
||||
bits = ch + 400 - 144
|
||||
size = 9
|
||||
break
|
||||
case ch < 280:
|
||||
// size 7, 0000000 .. 0010111
|
||||
bits = ch - 256
|
||||
size = 7
|
||||
break
|
||||
default:
|
||||
// size 8, 11000000 .. 11000111
|
||||
bits = ch + 192 - 280
|
||||
size = 8
|
||||
}
|
||||
codes[ch] = hcode{code: reverseBits(bits, byte(size)), len: size}
|
||||
}
|
||||
return h
|
||||
}
|
||||
|
||||
func generateFixedOffsetEncoding() *huffmanEncoder {
|
||||
h := newHuffmanEncoder(30)
|
||||
codes := h.codes
|
||||
for ch := range codes {
|
||||
codes[ch] = hcode{code: reverseBits(uint16(ch), 5), len: 5}
|
||||
}
|
||||
return h
|
||||
}
|
||||
|
||||
var fixedLiteralEncoding *huffmanEncoder = generateFixedLiteralEncoding()
|
||||
var fixedOffsetEncoding *huffmanEncoder = generateFixedOffsetEncoding()
|
||||
|
||||
func (h *huffmanEncoder) bitLength(freq []int32) int {
|
||||
var total int
|
||||
for i, f := range freq {
|
||||
if f != 0 {
|
||||
total += int(f) * int(h.codes[i].len)
|
||||
}
|
||||
}
|
||||
return total
|
||||
}
|
||||
|
||||
const maxBitsLimit = 16
|
||||
|
||||
// Return the number of literals assigned to each bit size in the Huffman encoding
|
||||
//
|
||||
// This method is only called when list.length >= 3
|
||||
// The cases of 0, 1, and 2 literals are handled by special case code.
|
||||
//
|
||||
// list An array of the literals with non-zero frequencies
|
||||
// and their associated frequencies. The array is in order of increasing
|
||||
// frequency, and has as its last element a special element with frequency
|
||||
// MaxInt32
|
||||
// maxBits The maximum number of bits that should be used to encode any literal.
|
||||
// Must be less than 16.
|
||||
// return An integer array in which array[i] indicates the number of literals
|
||||
// that should be encoded in i bits.
|
||||
func (h *huffmanEncoder) bitCounts(list []literalNode, maxBits int32) []int32 {
|
||||
if maxBits >= maxBitsLimit {
|
||||
panic("flate: maxBits too large")
|
||||
}
|
||||
n := int32(len(list))
|
||||
list = list[0 : n+1]
|
||||
list[n] = maxNode()
|
||||
|
||||
// The tree can't have greater depth than n - 1, no matter what. This
|
||||
// saves a little bit of work in some small cases
|
||||
if maxBits > n-1 {
|
||||
maxBits = n - 1
|
||||
}
|
||||
|
||||
// Create information about each of the levels.
|
||||
// A bogus "Level 0" whose sole purpose is so that
|
||||
// level1.prev.needed==0. This makes level1.nextPairFreq
|
||||
// be a legitimate value that never gets chosen.
|
||||
var levels [maxBitsLimit]levelInfo
|
||||
// leafCounts[i] counts the number of literals at the left
|
||||
// of ancestors of the rightmost node at level i.
|
||||
// leafCounts[i][j] is the number of literals at the left
|
||||
// of the level j ancestor.
|
||||
var leafCounts [maxBitsLimit][maxBitsLimit]int32
|
||||
|
||||
for level := int32(1); level <= maxBits; level++ {
|
||||
// For every level, the first two items are the first two characters.
|
||||
// We initialize the levels as if we had already figured this out.
|
||||
levels[level] = levelInfo{
|
||||
level: level,
|
||||
lastFreq: list[1].freq,
|
||||
nextCharFreq: list[2].freq,
|
||||
nextPairFreq: list[0].freq + list[1].freq,
|
||||
}
|
||||
leafCounts[level][level] = 2
|
||||
if level == 1 {
|
||||
levels[level].nextPairFreq = math.MaxInt32
|
||||
}
|
||||
}
|
||||
|
||||
// We need a total of 2*n - 2 items at top level and have already generated 2.
|
||||
levels[maxBits].needed = 2*n - 4
|
||||
|
||||
level := maxBits
|
||||
for {
|
||||
l := &levels[level]
|
||||
if l.nextPairFreq == math.MaxInt32 && l.nextCharFreq == math.MaxInt32 {
|
||||
// We've run out of both leafs and pairs.
|
||||
// End all calculations for this level.
|
||||
// To make sure we never come back to this level or any lower level,
|
||||
// set nextPairFreq impossibly large.
|
||||
l.needed = 0
|
||||
levels[level+1].nextPairFreq = math.MaxInt32
|
||||
level++
|
||||
continue
|
||||
}
|
||||
|
||||
prevFreq := l.lastFreq
|
||||
if l.nextCharFreq < l.nextPairFreq {
|
||||
// The next item on this row is a leaf node.
|
||||
n := leafCounts[level][level] + 1
|
||||
l.lastFreq = l.nextCharFreq
|
||||
// Lower leafCounts are the same of the previous node.
|
||||
leafCounts[level][level] = n
|
||||
l.nextCharFreq = list[n].freq
|
||||
} else {
|
||||
// The next item on this row is a pair from the previous row.
|
||||
// nextPairFreq isn't valid until we generate two
|
||||
// more values in the level below
|
||||
l.lastFreq = l.nextPairFreq
|
||||
// Take leaf counts from the lower level, except counts[level] remains the same.
|
||||
copy(leafCounts[level][:level], leafCounts[level-1][:level])
|
||||
levels[l.level-1].needed = 2
|
||||
}
|
||||
|
||||
if l.needed--; l.needed == 0 {
|
||||
// We've done everything we need to do for this level.
|
||||
// Continue calculating one level up. Fill in nextPairFreq
|
||||
// of that level with the sum of the two nodes we've just calculated on
|
||||
// this level.
|
||||
if l.level == maxBits {
|
||||
// All done!
|
||||
break
|
||||
}
|
||||
levels[l.level+1].nextPairFreq = prevFreq + l.lastFreq
|
||||
level++
|
||||
} else {
|
||||
// If we stole from below, move down temporarily to replenish it.
|
||||
for levels[level-1].needed > 0 {
|
||||
level--
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Somethings is wrong if at the end, the top level is null or hasn't used
|
||||
// all of the leaves.
|
||||
if leafCounts[maxBits][maxBits] != n {
|
||||
panic("leafCounts[maxBits][maxBits] != n")
|
||||
}
|
||||
|
||||
bitCount := h.bitCount[:maxBits+1]
|
||||
bits := 1
|
||||
counts := &leafCounts[maxBits]
|
||||
for level := maxBits; level > 0; level-- {
|
||||
// chain.leafCount gives the number of literals requiring at least "bits"
|
||||
// bits to encode.
|
||||
bitCount[bits] = counts[level] - counts[level-1]
|
||||
bits++
|
||||
}
|
||||
return bitCount
|
||||
}
|
||||
|
||||
// Look at the leaves and assign them a bit count and an encoding as specified
|
||||
// in RFC 1951 3.2.2
|
||||
func (h *huffmanEncoder) assignEncodingAndSize(bitCount []int32, list []literalNode) {
|
||||
code := uint16(0)
|
||||
for n, bits := range bitCount {
|
||||
code <<= 1
|
||||
if n == 0 || bits == 0 {
|
||||
continue
|
||||
}
|
||||
// The literals list[len(list)-bits] .. list[len(list)-bits]
|
||||
// are encoded using "bits" bits, and get the values
|
||||
// code, code + 1, .... The code values are
|
||||
// assigned in literal order (not frequency order).
|
||||
chunk := list[len(list)-int(bits):]
|
||||
|
||||
h.lns.sort(chunk)
|
||||
for _, node := range chunk {
|
||||
h.codes[node.literal] = hcode{code: reverseBits(code, uint8(n)), len: uint16(n)}
|
||||
code++
|
||||
}
|
||||
list = list[0 : len(list)-int(bits)]
|
||||
}
|
||||
}
|
||||
|
||||
// Update this Huffman Code object to be the minimum code for the specified frequency count.
|
||||
//
|
||||
// freq An array of frequencies, in which frequency[i] gives the frequency of literal i.
|
||||
// maxBits The maximum number of bits to use for any literal.
|
||||
func (h *huffmanEncoder) generate(freq []int32, maxBits int32) {
|
||||
if h.freqcache == nil {
|
||||
// Allocate a reusable buffer with the longest possible frequency table.
|
||||
// Possible lengths are codegenCodeCount, offsetCodeCount and maxNumLit.
|
||||
// The largest of these is maxNumLit, so we allocate for that case.
|
||||
h.freqcache = make([]literalNode, maxNumLit+1)
|
||||
}
|
||||
list := h.freqcache[:len(freq)+1]
|
||||
// Number of non-zero literals
|
||||
count := 0
|
||||
// Set list to be the set of all non-zero literals and their frequencies
|
||||
for i, f := range freq {
|
||||
if f != 0 {
|
||||
list[count] = literalNode{uint16(i), f}
|
||||
count++
|
||||
} else {
|
||||
list[count] = literalNode{}
|
||||
h.codes[i].len = 0
|
||||
}
|
||||
}
|
||||
list[len(freq)] = literalNode{}
|
||||
|
||||
list = list[:count]
|
||||
if count <= 2 {
|
||||
// Handle the small cases here, because they are awkward for the general case code. With
|
||||
// two or fewer literals, everything has bit length 1.
|
||||
for i, node := range list {
|
||||
// "list" is in order of increasing literal value.
|
||||
h.codes[node.literal].set(uint16(i), 1)
|
||||
}
|
||||
return
|
||||
}
|
||||
h.lfs.sort(list)
|
||||
|
||||
// Get the number of literals for each bit count
|
||||
bitCount := h.bitCounts(list, maxBits)
|
||||
// And do the assignment
|
||||
h.assignEncodingAndSize(bitCount, list)
|
||||
}
|
||||
|
||||
type byLiteral []literalNode
|
||||
|
||||
func (s *byLiteral) sort(a []literalNode) {
|
||||
*s = byLiteral(a)
|
||||
sort.Sort(s)
|
||||
}
|
||||
|
||||
func (s byLiteral) Len() int { return len(s) }
|
||||
|
||||
func (s byLiteral) Less(i, j int) bool {
|
||||
return s[i].literal < s[j].literal
|
||||
}
|
||||
|
||||
func (s byLiteral) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||
|
||||
type byFreq []literalNode
|
||||
|
||||
func (s *byFreq) sort(a []literalNode) {
|
||||
*s = byFreq(a)
|
||||
sort.Sort(s)
|
||||
}
|
||||
|
||||
func (s byFreq) Len() int { return len(s) }
|
||||
|
||||
func (s byFreq) Less(i, j int) bool {
|
||||
if s[i].freq == s[j].freq {
|
||||
return s[i].literal < s[j].literal
|
||||
}
|
||||
return s[i].freq < s[j].freq
|
||||
}
|
||||
|
||||
func (s byFreq) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||
846
vendor/github.com/klauspost/compress/flate/inflate.go
generated
vendored
846
vendor/github.com/klauspost/compress/flate/inflate.go
generated
vendored
@@ -1,846 +0,0 @@
|
||||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package flate implements the DEFLATE compressed data format, described in
|
||||
// RFC 1951. The gzip and zlib packages implement access to DEFLATE-based file
|
||||
// formats.
|
||||
package flate
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"io"
|
||||
"strconv"
|
||||
"sync"
|
||||
)
|
||||
|
||||
const (
|
||||
maxCodeLen = 16 // max length of Huffman code
|
||||
// The next three numbers come from the RFC section 3.2.7, with the
|
||||
// additional proviso in section 3.2.5 which implies that distance codes
|
||||
// 30 and 31 should never occur in compressed data.
|
||||
maxNumLit = 286
|
||||
maxNumDist = 30
|
||||
numCodes = 19 // number of codes in Huffman meta-code
|
||||
)
|
||||
|
||||
// Initialize the fixedHuffmanDecoder only once upon first use.
|
||||
var fixedOnce sync.Once
|
||||
var fixedHuffmanDecoder huffmanDecoder
|
||||
|
||||
// A CorruptInputError reports the presence of corrupt input at a given offset.
|
||||
type CorruptInputError int64
|
||||
|
||||
func (e CorruptInputError) Error() string {
|
||||
return "flate: corrupt input before offset " + strconv.FormatInt(int64(e), 10)
|
||||
}
|
||||
|
||||
// An InternalError reports an error in the flate code itself.
|
||||
type InternalError string
|
||||
|
||||
func (e InternalError) Error() string { return "flate: internal error: " + string(e) }
|
||||
|
||||
// A ReadError reports an error encountered while reading input.
|
||||
//
|
||||
// Deprecated: No longer returned.
|
||||
type ReadError struct {
|
||||
Offset int64 // byte offset where error occurred
|
||||
Err error // error returned by underlying Read
|
||||
}
|
||||
|
||||
func (e *ReadError) Error() string {
|
||||
return "flate: read error at offset " + strconv.FormatInt(e.Offset, 10) + ": " + e.Err.Error()
|
||||
}
|
||||
|
||||
// A WriteError reports an error encountered while writing output.
|
||||
//
|
||||
// Deprecated: No longer returned.
|
||||
type WriteError struct {
|
||||
Offset int64 // byte offset where error occurred
|
||||
Err error // error returned by underlying Write
|
||||
}
|
||||
|
||||
func (e *WriteError) Error() string {
|
||||
return "flate: write error at offset " + strconv.FormatInt(e.Offset, 10) + ": " + e.Err.Error()
|
||||
}
|
||||
|
||||
// Resetter resets a ReadCloser returned by NewReader or NewReaderDict to
|
||||
// to switch to a new underlying Reader. This permits reusing a ReadCloser
|
||||
// instead of allocating a new one.
|
||||
type Resetter interface {
|
||||
// Reset discards any buffered data and resets the Resetter as if it was
|
||||
// newly initialized with the given reader.
|
||||
Reset(r io.Reader, dict []byte) error
|
||||
}
|
||||
|
||||
// The data structure for decoding Huffman tables is based on that of
|
||||
// zlib. There is a lookup table of a fixed bit width (huffmanChunkBits),
|
||||
// For codes smaller than the table width, there are multiple entries
|
||||
// (each combination of trailing bits has the same value). For codes
|
||||
// larger than the table width, the table contains a link to an overflow
|
||||
// table. The width of each entry in the link table is the maximum code
|
||||
// size minus the chunk width.
|
||||
//
|
||||
// Note that you can do a lookup in the table even without all bits
|
||||
// filled. Since the extra bits are zero, and the DEFLATE Huffman codes
|
||||
// have the property that shorter codes come before longer ones, the
|
||||
// bit length estimate in the result is a lower bound on the actual
|
||||
// number of bits.
|
||||
//
|
||||
// See the following:
|
||||
// http://www.gzip.org/algorithm.txt
|
||||
|
||||
// chunk & 15 is number of bits
|
||||
// chunk >> 4 is value, including table link
|
||||
|
||||
const (
|
||||
huffmanChunkBits = 9
|
||||
huffmanNumChunks = 1 << huffmanChunkBits
|
||||
huffmanCountMask = 15
|
||||
huffmanValueShift = 4
|
||||
)
|
||||
|
||||
type huffmanDecoder struct {
|
||||
min int // the minimum code length
|
||||
chunks [huffmanNumChunks]uint32 // chunks as described above
|
||||
links [][]uint32 // overflow links
|
||||
linkMask uint32 // mask the width of the link table
|
||||
}
|
||||
|
||||
// Initialize Huffman decoding tables from array of code lengths.
|
||||
// Following this function, h is guaranteed to be initialized into a complete
|
||||
// tree (i.e., neither over-subscribed nor under-subscribed). The exception is a
|
||||
// degenerate case where the tree has only a single symbol with length 1. Empty
|
||||
// trees are permitted.
|
||||
func (h *huffmanDecoder) init(bits []int) bool {
|
||||
// Sanity enables additional runtime tests during Huffman
|
||||
// table construction. It's intended to be used during
|
||||
// development to supplement the currently ad-hoc unit tests.
|
||||
const sanity = false
|
||||
|
||||
if h.min != 0 {
|
||||
*h = huffmanDecoder{}
|
||||
}
|
||||
|
||||
// Count number of codes of each length,
|
||||
// compute min and max length.
|
||||
var count [maxCodeLen]int
|
||||
var min, max int
|
||||
for _, n := range bits {
|
||||
if n == 0 {
|
||||
continue
|
||||
}
|
||||
if min == 0 || n < min {
|
||||
min = n
|
||||
}
|
||||
if n > max {
|
||||
max = n
|
||||
}
|
||||
count[n]++
|
||||
}
|
||||
|
||||
// Empty tree. The decompressor.huffSym function will fail later if the tree
|
||||
// is used. Technically, an empty tree is only valid for the HDIST tree and
|
||||
// not the HCLEN and HLIT tree. However, a stream with an empty HCLEN tree
|
||||
// is guaranteed to fail since it will attempt to use the tree to decode the
|
||||
// codes for the HLIT and HDIST trees. Similarly, an empty HLIT tree is
|
||||
// guaranteed to fail later since the compressed data section must be
|
||||
// composed of at least one symbol (the end-of-block marker).
|
||||
if max == 0 {
|
||||
return true
|
||||
}
|
||||
|
||||
code := 0
|
||||
var nextcode [maxCodeLen]int
|
||||
for i := min; i <= max; i++ {
|
||||
code <<= 1
|
||||
nextcode[i] = code
|
||||
code += count[i]
|
||||
}
|
||||
|
||||
// Check that the coding is complete (i.e., that we've
|
||||
// assigned all 2-to-the-max possible bit sequences).
|
||||
// Exception: To be compatible with zlib, we also need to
|
||||
// accept degenerate single-code codings. See also
|
||||
// TestDegenerateHuffmanCoding.
|
||||
if code != 1<<uint(max) && !(code == 1 && max == 1) {
|
||||
return false
|
||||
}
|
||||
|
||||
h.min = min
|
||||
if max > huffmanChunkBits {
|
||||
numLinks := 1 << (uint(max) - huffmanChunkBits)
|
||||
h.linkMask = uint32(numLinks - 1)
|
||||
|
||||
// create link tables
|
||||
link := nextcode[huffmanChunkBits+1] >> 1
|
||||
h.links = make([][]uint32, huffmanNumChunks-link)
|
||||
for j := uint(link); j < huffmanNumChunks; j++ {
|
||||
reverse := int(reverseByte[j>>8]) | int(reverseByte[j&0xff])<<8
|
||||
reverse >>= uint(16 - huffmanChunkBits)
|
||||
off := j - uint(link)
|
||||
if sanity && h.chunks[reverse] != 0 {
|
||||
panic("impossible: overwriting existing chunk")
|
||||
}
|
||||
h.chunks[reverse] = uint32(off<<huffmanValueShift | (huffmanChunkBits + 1))
|
||||
h.links[off] = make([]uint32, numLinks)
|
||||
}
|
||||
}
|
||||
|
||||
for i, n := range bits {
|
||||
if n == 0 {
|
||||
continue
|
||||
}
|
||||
code := nextcode[n]
|
||||
nextcode[n]++
|
||||
chunk := uint32(i<<huffmanValueShift | n)
|
||||
reverse := int(reverseByte[code>>8]) | int(reverseByte[code&0xff])<<8
|
||||
reverse >>= uint(16 - n)
|
||||
if n <= huffmanChunkBits {
|
||||
for off := reverse; off < len(h.chunks); off += 1 << uint(n) {
|
||||
// We should never need to overwrite
|
||||
// an existing chunk. Also, 0 is
|
||||
// never a valid chunk, because the
|
||||
// lower 4 "count" bits should be
|
||||
// between 1 and 15.
|
||||
if sanity && h.chunks[off] != 0 {
|
||||
panic("impossible: overwriting existing chunk")
|
||||
}
|
||||
h.chunks[off] = chunk
|
||||
}
|
||||
} else {
|
||||
j := reverse & (huffmanNumChunks - 1)
|
||||
if sanity && h.chunks[j]&huffmanCountMask != huffmanChunkBits+1 {
|
||||
// Longer codes should have been
|
||||
// associated with a link table above.
|
||||
panic("impossible: not an indirect chunk")
|
||||
}
|
||||
value := h.chunks[j] >> huffmanValueShift
|
||||
linktab := h.links[value]
|
||||
reverse >>= huffmanChunkBits
|
||||
for off := reverse; off < len(linktab); off += 1 << uint(n-huffmanChunkBits) {
|
||||
if sanity && linktab[off] != 0 {
|
||||
panic("impossible: overwriting existing chunk")
|
||||
}
|
||||
linktab[off] = chunk
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if sanity {
|
||||
// Above we've sanity checked that we never overwrote
|
||||
// an existing entry. Here we additionally check that
|
||||
// we filled the tables completely.
|
||||
for i, chunk := range h.chunks {
|
||||
if chunk == 0 {
|
||||
// As an exception, in the degenerate
|
||||
// single-code case, we allow odd
|
||||
// chunks to be missing.
|
||||
if code == 1 && i%2 == 1 {
|
||||
continue
|
||||
}
|
||||
panic("impossible: missing chunk")
|
||||
}
|
||||
}
|
||||
for _, linktab := range h.links {
|
||||
for _, chunk := range linktab {
|
||||
if chunk == 0 {
|
||||
panic("impossible: missing chunk")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// The actual read interface needed by NewReader.
|
||||
// If the passed in io.Reader does not also have ReadByte,
|
||||
// the NewReader will introduce its own buffering.
|
||||
type Reader interface {
|
||||
io.Reader
|
||||
io.ByteReader
|
||||
}
|
||||
|
||||
// Decompress state.
|
||||
type decompressor struct {
|
||||
// Input source.
|
||||
r Reader
|
||||
roffset int64
|
||||
|
||||
// Input bits, in top of b.
|
||||
b uint32
|
||||
nb uint
|
||||
|
||||
// Huffman decoders for literal/length, distance.
|
||||
h1, h2 huffmanDecoder
|
||||
|
||||
// Length arrays used to define Huffman codes.
|
||||
bits *[maxNumLit + maxNumDist]int
|
||||
codebits *[numCodes]int
|
||||
|
||||
// Output history, buffer.
|
||||
dict dictDecoder
|
||||
|
||||
// Temporary buffer (avoids repeated allocation).
|
||||
buf [4]byte
|
||||
|
||||
// Next step in the decompression,
|
||||
// and decompression state.
|
||||
step func(*decompressor)
|
||||
stepState int
|
||||
final bool
|
||||
err error
|
||||
toRead []byte
|
||||
hl, hd *huffmanDecoder
|
||||
copyLen int
|
||||
copyDist int
|
||||
}
|
||||
|
||||
func (f *decompressor) nextBlock() {
|
||||
for f.nb < 1+2 {
|
||||
if f.err = f.moreBits(); f.err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
f.final = f.b&1 == 1
|
||||
f.b >>= 1
|
||||
typ := f.b & 3
|
||||
f.b >>= 2
|
||||
f.nb -= 1 + 2
|
||||
switch typ {
|
||||
case 0:
|
||||
f.dataBlock()
|
||||
case 1:
|
||||
// compressed, fixed Huffman tables
|
||||
f.hl = &fixedHuffmanDecoder
|
||||
f.hd = nil
|
||||
f.huffmanBlock()
|
||||
case 2:
|
||||
// compressed, dynamic Huffman tables
|
||||
if f.err = f.readHuffman(); f.err != nil {
|
||||
break
|
||||
}
|
||||
f.hl = &f.h1
|
||||
f.hd = &f.h2
|
||||
f.huffmanBlock()
|
||||
default:
|
||||
// 3 is reserved.
|
||||
f.err = CorruptInputError(f.roffset)
|
||||
}
|
||||
}
|
||||
|
||||
func (f *decompressor) Read(b []byte) (int, error) {
|
||||
for {
|
||||
if len(f.toRead) > 0 {
|
||||
n := copy(b, f.toRead)
|
||||
f.toRead = f.toRead[n:]
|
||||
if len(f.toRead) == 0 {
|
||||
return n, f.err
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
if f.err != nil {
|
||||
return 0, f.err
|
||||
}
|
||||
f.step(f)
|
||||
if f.err != nil && len(f.toRead) == 0 {
|
||||
f.toRead = f.dict.readFlush() // Flush what's left in case of error
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Support the io.WriteTo interface for io.Copy and friends.
|
||||
func (f *decompressor) WriteTo(w io.Writer) (int64, error) {
|
||||
total := int64(0)
|
||||
flushed := false
|
||||
for {
|
||||
if len(f.toRead) > 0 {
|
||||
n, err := w.Write(f.toRead)
|
||||
total += int64(n)
|
||||
if err != nil {
|
||||
f.err = err
|
||||
return total, err
|
||||
}
|
||||
if n != len(f.toRead) {
|
||||
return total, io.ErrShortWrite
|
||||
}
|
||||
f.toRead = f.toRead[:0]
|
||||
}
|
||||
if f.err != nil && flushed {
|
||||
if f.err == io.EOF {
|
||||
return total, nil
|
||||
}
|
||||
return total, f.err
|
||||
}
|
||||
if f.err == nil {
|
||||
f.step(f)
|
||||
}
|
||||
if len(f.toRead) == 0 && f.err != nil && !flushed {
|
||||
f.toRead = f.dict.readFlush() // Flush what's left in case of error
|
||||
flushed = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (f *decompressor) Close() error {
|
||||
if f.err == io.EOF {
|
||||
return nil
|
||||
}
|
||||
return f.err
|
||||
}
|
||||
|
||||
// RFC 1951 section 3.2.7.
|
||||
// Compression with dynamic Huffman codes
|
||||
|
||||
var codeOrder = [...]int{16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15}
|
||||
|
||||
func (f *decompressor) readHuffman() error {
|
||||
// HLIT[5], HDIST[5], HCLEN[4].
|
||||
for f.nb < 5+5+4 {
|
||||
if err := f.moreBits(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
nlit := int(f.b&0x1F) + 257
|
||||
if nlit > maxNumLit {
|
||||
return CorruptInputError(f.roffset)
|
||||
}
|
||||
f.b >>= 5
|
||||
ndist := int(f.b&0x1F) + 1
|
||||
if ndist > maxNumDist {
|
||||
return CorruptInputError(f.roffset)
|
||||
}
|
||||
f.b >>= 5
|
||||
nclen := int(f.b&0xF) + 4
|
||||
// numCodes is 19, so nclen is always valid.
|
||||
f.b >>= 4
|
||||
f.nb -= 5 + 5 + 4
|
||||
|
||||
// (HCLEN+4)*3 bits: code lengths in the magic codeOrder order.
|
||||
for i := 0; i < nclen; i++ {
|
||||
for f.nb < 3 {
|
||||
if err := f.moreBits(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
f.codebits[codeOrder[i]] = int(f.b & 0x7)
|
||||
f.b >>= 3
|
||||
f.nb -= 3
|
||||
}
|
||||
for i := nclen; i < len(codeOrder); i++ {
|
||||
f.codebits[codeOrder[i]] = 0
|
||||
}
|
||||
if !f.h1.init(f.codebits[0:]) {
|
||||
return CorruptInputError(f.roffset)
|
||||
}
|
||||
|
||||
// HLIT + 257 code lengths, HDIST + 1 code lengths,
|
||||
// using the code length Huffman code.
|
||||
for i, n := 0, nlit+ndist; i < n; {
|
||||
x, err := f.huffSym(&f.h1)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if x < 16 {
|
||||
// Actual length.
|
||||
f.bits[i] = x
|
||||
i++
|
||||
continue
|
||||
}
|
||||
// Repeat previous length or zero.
|
||||
var rep int
|
||||
var nb uint
|
||||
var b int
|
||||
switch x {
|
||||
default:
|
||||
return InternalError("unexpected length code")
|
||||
case 16:
|
||||
rep = 3
|
||||
nb = 2
|
||||
if i == 0 {
|
||||
return CorruptInputError(f.roffset)
|
||||
}
|
||||
b = f.bits[i-1]
|
||||
case 17:
|
||||
rep = 3
|
||||
nb = 3
|
||||
b = 0
|
||||
case 18:
|
||||
rep = 11
|
||||
nb = 7
|
||||
b = 0
|
||||
}
|
||||
for f.nb < nb {
|
||||
if err := f.moreBits(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
rep += int(f.b & uint32(1<<nb-1))
|
||||
f.b >>= nb
|
||||
f.nb -= nb
|
||||
if i+rep > n {
|
||||
return CorruptInputError(f.roffset)
|
||||
}
|
||||
for j := 0; j < rep; j++ {
|
||||
f.bits[i] = b
|
||||
i++
|
||||
}
|
||||
}
|
||||
|
||||
if !f.h1.init(f.bits[0:nlit]) || !f.h2.init(f.bits[nlit:nlit+ndist]) {
|
||||
return CorruptInputError(f.roffset)
|
||||
}
|
||||
|
||||
// As an optimization, we can initialize the min bits to read at a time
|
||||
// for the HLIT tree to the length of the EOB marker since we know that
|
||||
// every block must terminate with one. This preserves the property that
|
||||
// we never read any extra bytes after the end of the DEFLATE stream.
|
||||
if f.h1.min < f.bits[endBlockMarker] {
|
||||
f.h1.min = f.bits[endBlockMarker]
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Decode a single Huffman block from f.
|
||||
// hl and hd are the Huffman states for the lit/length values
|
||||
// and the distance values, respectively. If hd == nil, using the
|
||||
// fixed distance encoding associated with fixed Huffman blocks.
|
||||
func (f *decompressor) huffmanBlock() {
|
||||
const (
|
||||
stateInit = iota // Zero value must be stateInit
|
||||
stateDict
|
||||
)
|
||||
|
||||
switch f.stepState {
|
||||
case stateInit:
|
||||
goto readLiteral
|
||||
case stateDict:
|
||||
goto copyHistory
|
||||
}
|
||||
|
||||
readLiteral:
|
||||
// Read literal and/or (length, distance) according to RFC section 3.2.3.
|
||||
{
|
||||
v, err := f.huffSym(f.hl)
|
||||
if err != nil {
|
||||
f.err = err
|
||||
return
|
||||
}
|
||||
var n uint // number of bits extra
|
||||
var length int
|
||||
switch {
|
||||
case v < 256:
|
||||
f.dict.writeByte(byte(v))
|
||||
if f.dict.availWrite() == 0 {
|
||||
f.toRead = f.dict.readFlush()
|
||||
f.step = (*decompressor).huffmanBlock
|
||||
f.stepState = stateInit
|
||||
return
|
||||
}
|
||||
goto readLiteral
|
||||
case v == 256:
|
||||
f.finishBlock()
|
||||
return
|
||||
// otherwise, reference to older data
|
||||
case v < 265:
|
||||
length = v - (257 - 3)
|
||||
n = 0
|
||||
case v < 269:
|
||||
length = v*2 - (265*2 - 11)
|
||||
n = 1
|
||||
case v < 273:
|
||||
length = v*4 - (269*4 - 19)
|
||||
n = 2
|
||||
case v < 277:
|
||||
length = v*8 - (273*8 - 35)
|
||||
n = 3
|
||||
case v < 281:
|
||||
length = v*16 - (277*16 - 67)
|
||||
n = 4
|
||||
case v < 285:
|
||||
length = v*32 - (281*32 - 131)
|
||||
n = 5
|
||||
case v < maxNumLit:
|
||||
length = 258
|
||||
n = 0
|
||||
default:
|
||||
f.err = CorruptInputError(f.roffset)
|
||||
return
|
||||
}
|
||||
if n > 0 {
|
||||
for f.nb < n {
|
||||
if err = f.moreBits(); err != nil {
|
||||
f.err = err
|
||||
return
|
||||
}
|
||||
}
|
||||
length += int(f.b & uint32(1<<n-1))
|
||||
f.b >>= n
|
||||
f.nb -= n
|
||||
}
|
||||
|
||||
var dist int
|
||||
if f.hd == nil {
|
||||
for f.nb < 5 {
|
||||
if err = f.moreBits(); err != nil {
|
||||
f.err = err
|
||||
return
|
||||
}
|
||||
}
|
||||
dist = int(reverseByte[(f.b&0x1F)<<3])
|
||||
f.b >>= 5
|
||||
f.nb -= 5
|
||||
} else {
|
||||
if dist, err = f.huffSym(f.hd); err != nil {
|
||||
f.err = err
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
switch {
|
||||
case dist < 4:
|
||||
dist++
|
||||
case dist < maxNumDist:
|
||||
nb := uint(dist-2) >> 1
|
||||
// have 1 bit in bottom of dist, need nb more.
|
||||
extra := (dist & 1) << nb
|
||||
for f.nb < nb {
|
||||
if err = f.moreBits(); err != nil {
|
||||
f.err = err
|
||||
return
|
||||
}
|
||||
}
|
||||
extra |= int(f.b & uint32(1<<nb-1))
|
||||
f.b >>= nb
|
||||
f.nb -= nb
|
||||
dist = 1<<(nb+1) + 1 + extra
|
||||
default:
|
||||
f.err = CorruptInputError(f.roffset)
|
||||
return
|
||||
}
|
||||
|
||||
// No check on length; encoding can be prescient.
|
||||
if dist > f.dict.histSize() {
|
||||
f.err = CorruptInputError(f.roffset)
|
||||
return
|
||||
}
|
||||
|
||||
f.copyLen, f.copyDist = length, dist
|
||||
goto copyHistory
|
||||
}
|
||||
|
||||
copyHistory:
|
||||
// Perform a backwards copy according to RFC section 3.2.3.
|
||||
{
|
||||
cnt := f.dict.tryWriteCopy(f.copyDist, f.copyLen)
|
||||
if cnt == 0 {
|
||||
cnt = f.dict.writeCopy(f.copyDist, f.copyLen)
|
||||
}
|
||||
f.copyLen -= cnt
|
||||
|
||||
if f.dict.availWrite() == 0 || f.copyLen > 0 {
|
||||
f.toRead = f.dict.readFlush()
|
||||
f.step = (*decompressor).huffmanBlock // We need to continue this work
|
||||
f.stepState = stateDict
|
||||
return
|
||||
}
|
||||
goto readLiteral
|
||||
}
|
||||
}
|
||||
|
||||
// Copy a single uncompressed data block from input to output.
|
||||
func (f *decompressor) dataBlock() {
|
||||
// Uncompressed.
|
||||
// Discard current half-byte.
|
||||
f.nb = 0
|
||||
f.b = 0
|
||||
|
||||
// Length then ones-complement of length.
|
||||
nr, err := io.ReadFull(f.r, f.buf[0:4])
|
||||
f.roffset += int64(nr)
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
err = io.ErrUnexpectedEOF
|
||||
}
|
||||
f.err = err
|
||||
return
|
||||
}
|
||||
n := int(f.buf[0]) | int(f.buf[1])<<8
|
||||
nn := int(f.buf[2]) | int(f.buf[3])<<8
|
||||
if uint16(nn) != uint16(^n) {
|
||||
f.err = CorruptInputError(f.roffset)
|
||||
return
|
||||
}
|
||||
|
||||
if n == 0 {
|
||||
f.toRead = f.dict.readFlush()
|
||||
f.finishBlock()
|
||||
return
|
||||
}
|
||||
|
||||
f.copyLen = n
|
||||
f.copyData()
|
||||
}
|
||||
|
||||
// copyData copies f.copyLen bytes from the underlying reader into f.hist.
|
||||
// It pauses for reads when f.hist is full.
|
||||
func (f *decompressor) copyData() {
|
||||
buf := f.dict.writeSlice()
|
||||
if len(buf) > f.copyLen {
|
||||
buf = buf[:f.copyLen]
|
||||
}
|
||||
|
||||
cnt, err := io.ReadFull(f.r, buf)
|
||||
f.roffset += int64(cnt)
|
||||
f.copyLen -= cnt
|
||||
f.dict.writeMark(cnt)
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
err = io.ErrUnexpectedEOF
|
||||
}
|
||||
f.err = err
|
||||
return
|
||||
}
|
||||
|
||||
if f.dict.availWrite() == 0 || f.copyLen > 0 {
|
||||
f.toRead = f.dict.readFlush()
|
||||
f.step = (*decompressor).copyData
|
||||
return
|
||||
}
|
||||
f.finishBlock()
|
||||
}
|
||||
|
||||
func (f *decompressor) finishBlock() {
|
||||
if f.final {
|
||||
if f.dict.availRead() > 0 {
|
||||
f.toRead = f.dict.readFlush()
|
||||
}
|
||||
f.err = io.EOF
|
||||
}
|
||||
f.step = (*decompressor).nextBlock
|
||||
}
|
||||
|
||||
func (f *decompressor) moreBits() error {
|
||||
c, err := f.r.ReadByte()
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
err = io.ErrUnexpectedEOF
|
||||
}
|
||||
return err
|
||||
}
|
||||
f.roffset++
|
||||
f.b |= uint32(c) << f.nb
|
||||
f.nb += 8
|
||||
return nil
|
||||
}
|
||||
|
||||
// Read the next Huffman-encoded symbol from f according to h.
|
||||
func (f *decompressor) huffSym(h *huffmanDecoder) (int, error) {
|
||||
// Since a huffmanDecoder can be empty or be composed of a degenerate tree
|
||||
// with single element, huffSym must error on these two edge cases. In both
|
||||
// cases, the chunks slice will be 0 for the invalid sequence, leading it
|
||||
// satisfy the n == 0 check below.
|
||||
n := uint(h.min)
|
||||
for {
|
||||
for f.nb < n {
|
||||
if err := f.moreBits(); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
chunk := h.chunks[f.b&(huffmanNumChunks-1)]
|
||||
n = uint(chunk & huffmanCountMask)
|
||||
if n > huffmanChunkBits {
|
||||
chunk = h.links[chunk>>huffmanValueShift][(f.b>>huffmanChunkBits)&h.linkMask]
|
||||
n = uint(chunk & huffmanCountMask)
|
||||
}
|
||||
if n <= f.nb {
|
||||
if n == 0 {
|
||||
f.err = CorruptInputError(f.roffset)
|
||||
return 0, f.err
|
||||
}
|
||||
f.b >>= n
|
||||
f.nb -= n
|
||||
return int(chunk >> huffmanValueShift), nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func makeReader(r io.Reader) Reader {
|
||||
if rr, ok := r.(Reader); ok {
|
||||
return rr
|
||||
}
|
||||
return bufio.NewReader(r)
|
||||
}
|
||||
|
||||
func fixedHuffmanDecoderInit() {
|
||||
fixedOnce.Do(func() {
|
||||
// These come from the RFC section 3.2.6.
|
||||
var bits [288]int
|
||||
for i := 0; i < 144; i++ {
|
||||
bits[i] = 8
|
||||
}
|
||||
for i := 144; i < 256; i++ {
|
||||
bits[i] = 9
|
||||
}
|
||||
for i := 256; i < 280; i++ {
|
||||
bits[i] = 7
|
||||
}
|
||||
for i := 280; i < 288; i++ {
|
||||
bits[i] = 8
|
||||
}
|
||||
fixedHuffmanDecoder.init(bits[:])
|
||||
})
|
||||
}
|
||||
|
||||
func (f *decompressor) Reset(r io.Reader, dict []byte) error {
|
||||
*f = decompressor{
|
||||
r: makeReader(r),
|
||||
bits: f.bits,
|
||||
codebits: f.codebits,
|
||||
dict: f.dict,
|
||||
step: (*decompressor).nextBlock,
|
||||
}
|
||||
f.dict.init(maxMatchOffset, dict)
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewReader returns a new ReadCloser that can be used
|
||||
// to read the uncompressed version of r.
|
||||
// If r does not also implement io.ByteReader,
|
||||
// the decompressor may read more data than necessary from r.
|
||||
// It is the caller's responsibility to call Close on the ReadCloser
|
||||
// when finished reading.
|
||||
//
|
||||
// The ReadCloser returned by NewReader also implements Resetter.
|
||||
func NewReader(r io.Reader) io.ReadCloser {
|
||||
fixedHuffmanDecoderInit()
|
||||
|
||||
var f decompressor
|
||||
f.r = makeReader(r)
|
||||
f.bits = new([maxNumLit + maxNumDist]int)
|
||||
f.codebits = new([numCodes]int)
|
||||
f.step = (*decompressor).nextBlock
|
||||
f.dict.init(maxMatchOffset, nil)
|
||||
return &f
|
||||
}
|
||||
|
||||
// NewReaderDict is like NewReader but initializes the reader
|
||||
// with a preset dictionary. The returned Reader behaves as if
|
||||
// the uncompressed data stream started with the given dictionary,
|
||||
// which has already been read. NewReaderDict is typically used
|
||||
// to read data compressed by NewWriterDict.
|
||||
//
|
||||
// The ReadCloser returned by NewReader also implements Resetter.
|
||||
func NewReaderDict(r io.Reader, dict []byte) io.ReadCloser {
|
||||
fixedHuffmanDecoderInit()
|
||||
|
||||
var f decompressor
|
||||
f.r = makeReader(r)
|
||||
f.bits = new([maxNumLit + maxNumDist]int)
|
||||
f.codebits = new([numCodes]int)
|
||||
f.step = (*decompressor).nextBlock
|
||||
f.dict.init(maxMatchOffset, dict)
|
||||
return &f
|
||||
}
|
||||
48
vendor/github.com/klauspost/compress/flate/reverse_bits.go
generated
vendored
48
vendor/github.com/klauspost/compress/flate/reverse_bits.go
generated
vendored
@@ -1,48 +0,0 @@
|
||||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package flate
|
||||
|
||||
var reverseByte = [256]byte{
|
||||
0x00, 0x80, 0x40, 0xc0, 0x20, 0xa0, 0x60, 0xe0,
|
||||
0x10, 0x90, 0x50, 0xd0, 0x30, 0xb0, 0x70, 0xf0,
|
||||
0x08, 0x88, 0x48, 0xc8, 0x28, 0xa8, 0x68, 0xe8,
|
||||
0x18, 0x98, 0x58, 0xd8, 0x38, 0xb8, 0x78, 0xf8,
|
||||
0x04, 0x84, 0x44, 0xc4, 0x24, 0xa4, 0x64, 0xe4,
|
||||
0x14, 0x94, 0x54, 0xd4, 0x34, 0xb4, 0x74, 0xf4,
|
||||
0x0c, 0x8c, 0x4c, 0xcc, 0x2c, 0xac, 0x6c, 0xec,
|
||||
0x1c, 0x9c, 0x5c, 0xdc, 0x3c, 0xbc, 0x7c, 0xfc,
|
||||
0x02, 0x82, 0x42, 0xc2, 0x22, 0xa2, 0x62, 0xe2,
|
||||
0x12, 0x92, 0x52, 0xd2, 0x32, 0xb2, 0x72, 0xf2,
|
||||
0x0a, 0x8a, 0x4a, 0xca, 0x2a, 0xaa, 0x6a, 0xea,
|
||||
0x1a, 0x9a, 0x5a, 0xda, 0x3a, 0xba, 0x7a, 0xfa,
|
||||
0x06, 0x86, 0x46, 0xc6, 0x26, 0xa6, 0x66, 0xe6,
|
||||
0x16, 0x96, 0x56, 0xd6, 0x36, 0xb6, 0x76, 0xf6,
|
||||
0x0e, 0x8e, 0x4e, 0xce, 0x2e, 0xae, 0x6e, 0xee,
|
||||
0x1e, 0x9e, 0x5e, 0xde, 0x3e, 0xbe, 0x7e, 0xfe,
|
||||
0x01, 0x81, 0x41, 0xc1, 0x21, 0xa1, 0x61, 0xe1,
|
||||
0x11, 0x91, 0x51, 0xd1, 0x31, 0xb1, 0x71, 0xf1,
|
||||
0x09, 0x89, 0x49, 0xc9, 0x29, 0xa9, 0x69, 0xe9,
|
||||
0x19, 0x99, 0x59, 0xd9, 0x39, 0xb9, 0x79, 0xf9,
|
||||
0x05, 0x85, 0x45, 0xc5, 0x25, 0xa5, 0x65, 0xe5,
|
||||
0x15, 0x95, 0x55, 0xd5, 0x35, 0xb5, 0x75, 0xf5,
|
||||
0x0d, 0x8d, 0x4d, 0xcd, 0x2d, 0xad, 0x6d, 0xed,
|
||||
0x1d, 0x9d, 0x5d, 0xdd, 0x3d, 0xbd, 0x7d, 0xfd,
|
||||
0x03, 0x83, 0x43, 0xc3, 0x23, 0xa3, 0x63, 0xe3,
|
||||
0x13, 0x93, 0x53, 0xd3, 0x33, 0xb3, 0x73, 0xf3,
|
||||
0x0b, 0x8b, 0x4b, 0xcb, 0x2b, 0xab, 0x6b, 0xeb,
|
||||
0x1b, 0x9b, 0x5b, 0xdb, 0x3b, 0xbb, 0x7b, 0xfb,
|
||||
0x07, 0x87, 0x47, 0xc7, 0x27, 0xa7, 0x67, 0xe7,
|
||||
0x17, 0x97, 0x57, 0xd7, 0x37, 0xb7, 0x77, 0xf7,
|
||||
0x0f, 0x8f, 0x4f, 0xcf, 0x2f, 0xaf, 0x6f, 0xef,
|
||||
0x1f, 0x9f, 0x5f, 0xdf, 0x3f, 0xbf, 0x7f, 0xff,
|
||||
}
|
||||
|
||||
func reverseUint16(v uint16) uint16 {
|
||||
return uint16(reverseByte[v>>8]) | uint16(reverseByte[v&0xFF])<<8
|
||||
}
|
||||
|
||||
func reverseBits(number uint16, bitLength byte) uint16 {
|
||||
return reverseUint16(number << uint8(16-bitLength))
|
||||
}
|
||||
900
vendor/github.com/klauspost/compress/flate/snappy.go
generated
vendored
900
vendor/github.com/klauspost/compress/flate/snappy.go
generated
vendored
@@ -1,900 +0,0 @@
|
||||
// Copyright 2011 The Snappy-Go Authors. All rights reserved.
|
||||
// Modified for deflate by Klaus Post (c) 2015.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package flate
|
||||
|
||||
// emitLiteral writes a literal chunk and returns the number of bytes written.
|
||||
func emitLiteral(dst *tokens, lit []byte) {
|
||||
ol := int(dst.n)
|
||||
for i, v := range lit {
|
||||
dst.tokens[(i+ol)&maxStoreBlockSize] = token(v)
|
||||
}
|
||||
dst.n += uint16(len(lit))
|
||||
}
|
||||
|
||||
// emitCopy writes a copy chunk and returns the number of bytes written.
|
||||
func emitCopy(dst *tokens, offset, length int) {
|
||||
dst.tokens[dst.n] = matchToken(uint32(length-3), uint32(offset-minOffsetSize))
|
||||
dst.n++
|
||||
}
|
||||
|
||||
type snappyEnc interface {
|
||||
Encode(dst *tokens, src []byte)
|
||||
Reset()
|
||||
}
|
||||
|
||||
func newSnappy(level int) snappyEnc {
|
||||
switch level {
|
||||
case 1:
|
||||
return &snappyL1{}
|
||||
case 2:
|
||||
return &snappyL2{snappyGen: snappyGen{cur: maxStoreBlockSize, prev: make([]byte, 0, maxStoreBlockSize)}}
|
||||
case 3:
|
||||
return &snappyL3{snappyGen: snappyGen{cur: maxStoreBlockSize, prev: make([]byte, 0, maxStoreBlockSize)}}
|
||||
case 4:
|
||||
return &snappyL4{snappyL3{snappyGen: snappyGen{cur: maxStoreBlockSize, prev: make([]byte, 0, maxStoreBlockSize)}}}
|
||||
default:
|
||||
panic("invalid level specified")
|
||||
}
|
||||
}
|
||||
|
||||
const (
|
||||
tableBits = 14 // Bits used in the table
|
||||
tableSize = 1 << tableBits // Size of the table
|
||||
tableMask = tableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks.
|
||||
tableShift = 32 - tableBits // Right-shift to get the tableBits most significant bits of a uint32.
|
||||
baseMatchOffset = 1 // The smallest match offset
|
||||
baseMatchLength = 3 // The smallest match length per the RFC section 3.2.5
|
||||
maxMatchOffset = 1 << 15 // The largest match offset
|
||||
)
|
||||
|
||||
func load32(b []byte, i int) uint32 {
|
||||
b = b[i : i+4 : len(b)] // Help the compiler eliminate bounds checks on the next line.
|
||||
return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
|
||||
}
|
||||
|
||||
func load64(b []byte, i int) uint64 {
|
||||
b = b[i : i+8 : len(b)] // Help the compiler eliminate bounds checks on the next line.
|
||||
return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 |
|
||||
uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
|
||||
}
|
||||
|
||||
func hash(u uint32) uint32 {
|
||||
return (u * 0x1e35a7bd) >> tableShift
|
||||
}
|
||||
|
||||
// snappyL1 encapsulates level 1 compression
|
||||
type snappyL1 struct{}
|
||||
|
||||
func (e *snappyL1) Reset() {}
|
||||
|
||||
func (e *snappyL1) Encode(dst *tokens, src []byte) {
|
||||
const (
|
||||
inputMargin = 16 - 1
|
||||
minNonLiteralBlockSize = 1 + 1 + inputMargin
|
||||
)
|
||||
|
||||
// This check isn't in the Snappy implementation, but there, the caller
|
||||
// instead of the callee handles this case.
|
||||
if len(src) < minNonLiteralBlockSize {
|
||||
// We do not fill the token table.
|
||||
// This will be picked up by caller.
|
||||
dst.n = uint16(len(src))
|
||||
return
|
||||
}
|
||||
|
||||
// Initialize the hash table.
|
||||
//
|
||||
// The table element type is uint16, as s < sLimit and sLimit < len(src)
|
||||
// and len(src) <= maxStoreBlockSize and maxStoreBlockSize == 65535.
|
||||
var table [tableSize]uint16
|
||||
|
||||
// sLimit is when to stop looking for offset/length copies. The inputMargin
|
||||
// lets us use a fast path for emitLiteral in the main loop, while we are
|
||||
// looking for copies.
|
||||
sLimit := len(src) - inputMargin
|
||||
|
||||
// nextEmit is where in src the next emitLiteral should start from.
|
||||
nextEmit := 0
|
||||
|
||||
// The encoded form must start with a literal, as there are no previous
|
||||
// bytes to copy, so we start looking for hash matches at s == 1.
|
||||
s := 1
|
||||
nextHash := hash(load32(src, s))
|
||||
|
||||
for {
|
||||
// Copied from the C++ snappy implementation:
|
||||
//
|
||||
// Heuristic match skipping: If 32 bytes are scanned with no matches
|
||||
// found, start looking only at every other byte. If 32 more bytes are
|
||||
// scanned (or skipped), look at every third byte, etc.. When a match
|
||||
// is found, immediately go back to looking at every byte. This is a
|
||||
// small loss (~5% performance, ~0.1% density) for compressible data
|
||||
// due to more bookkeeping, but for non-compressible data (such as
|
||||
// JPEG) it's a huge win since the compressor quickly "realizes" the
|
||||
// data is incompressible and doesn't bother looking for matches
|
||||
// everywhere.
|
||||
//
|
||||
// The "skip" variable keeps track of how many bytes there are since
|
||||
// the last match; dividing it by 32 (ie. right-shifting by five) gives
|
||||
// the number of bytes to move ahead for each iteration.
|
||||
skip := 32
|
||||
|
||||
nextS := s
|
||||
candidate := 0
|
||||
for {
|
||||
s = nextS
|
||||
bytesBetweenHashLookups := skip >> 5
|
||||
nextS = s + bytesBetweenHashLookups
|
||||
skip += bytesBetweenHashLookups
|
||||
if nextS > sLimit {
|
||||
goto emitRemainder
|
||||
}
|
||||
candidate = int(table[nextHash&tableMask])
|
||||
table[nextHash&tableMask] = uint16(s)
|
||||
nextHash = hash(load32(src, nextS))
|
||||
if s-candidate <= maxMatchOffset && load32(src, s) == load32(src, candidate) {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// A 4-byte match has been found. We'll later see if more than 4 bytes
|
||||
// match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
|
||||
// them as literal bytes.
|
||||
emitLiteral(dst, src[nextEmit:s])
|
||||
|
||||
// Call emitCopy, and then see if another emitCopy could be our next
|
||||
// move. Repeat until we find no match for the input immediately after
|
||||
// what was consumed by the last emitCopy call.
|
||||
//
|
||||
// If we exit this loop normally then we need to call emitLiteral next,
|
||||
// though we don't yet know how big the literal will be. We handle that
|
||||
// by proceeding to the next iteration of the main loop. We also can
|
||||
// exit this loop via goto if we get close to exhausting the input.
|
||||
for {
|
||||
// Invariant: we have a 4-byte match at s, and no need to emit any
|
||||
// literal bytes prior to s.
|
||||
base := s
|
||||
|
||||
// Extend the 4-byte match as long as possible.
|
||||
//
|
||||
// This is an inlined version of Snappy's:
|
||||
// s = extendMatch(src, candidate+4, s+4)
|
||||
s += 4
|
||||
s1 := base + maxMatchLength
|
||||
if s1 > len(src) {
|
||||
s1 = len(src)
|
||||
}
|
||||
a := src[s:s1]
|
||||
b := src[candidate+4:]
|
||||
b = b[:len(a)]
|
||||
l := len(a)
|
||||
for i := range a {
|
||||
if a[i] != b[i] {
|
||||
l = i
|
||||
break
|
||||
}
|
||||
}
|
||||
s += l
|
||||
|
||||
// matchToken is flate's equivalent of Snappy's emitCopy.
|
||||
dst.tokens[dst.n] = matchToken(uint32(s-base-baseMatchLength), uint32(base-candidate-baseMatchOffset))
|
||||
dst.n++
|
||||
nextEmit = s
|
||||
if s >= sLimit {
|
||||
goto emitRemainder
|
||||
}
|
||||
|
||||
// We could immediately start working at s now, but to improve
|
||||
// compression we first update the hash table at s-1 and at s. If
|
||||
// another emitCopy is not our next move, also calculate nextHash
|
||||
// at s+1. At least on GOARCH=amd64, these three hash calculations
|
||||
// are faster as one load64 call (with some shifts) instead of
|
||||
// three load32 calls.
|
||||
x := load64(src, s-1)
|
||||
prevHash := hash(uint32(x >> 0))
|
||||
table[prevHash&tableMask] = uint16(s - 1)
|
||||
currHash := hash(uint32(x >> 8))
|
||||
candidate = int(table[currHash&tableMask])
|
||||
table[currHash&tableMask] = uint16(s)
|
||||
if s-candidate > maxMatchOffset || uint32(x>>8) != load32(src, candidate) {
|
||||
nextHash = hash(uint32(x >> 16))
|
||||
s++
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
emitRemainder:
|
||||
if nextEmit < len(src) {
|
||||
emitLiteral(dst, src[nextEmit:])
|
||||
}
|
||||
}
|
||||
|
||||
type tableEntry struct {
|
||||
val uint32
|
||||
offset int32
|
||||
}
|
||||
|
||||
func load3232(b []byte, i int32) uint32 {
|
||||
b = b[i : i+4 : len(b)] // Help the compiler eliminate bounds checks on the next line.
|
||||
return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
|
||||
}
|
||||
|
||||
func load6432(b []byte, i int32) uint64 {
|
||||
b = b[i : i+8 : len(b)] // Help the compiler eliminate bounds checks on the next line.
|
||||
return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 |
|
||||
uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
|
||||
}
|
||||
|
||||
// snappyGen maintains the table for matches,
|
||||
// and the previous byte block for level 2.
|
||||
// This is the generic implementation.
|
||||
type snappyGen struct {
|
||||
prev []byte
|
||||
cur int32
|
||||
}
|
||||
|
||||
// snappyGen maintains the table for matches,
|
||||
// and the previous byte block for level 2.
|
||||
// This is the generic implementation.
|
||||
type snappyL2 struct {
|
||||
snappyGen
|
||||
table [tableSize]tableEntry
|
||||
}
|
||||
|
||||
// EncodeL2 uses a similar algorithm to level 1, but is capable
|
||||
// of matching across blocks giving better compression at a small slowdown.
|
||||
func (e *snappyL2) Encode(dst *tokens, src []byte) {
|
||||
const (
|
||||
inputMargin = 8 - 1
|
||||
minNonLiteralBlockSize = 1 + 1 + inputMargin
|
||||
)
|
||||
|
||||
// Protect against e.cur wraparound.
|
||||
if e.cur > 1<<30 {
|
||||
for i := range e.table[:] {
|
||||
e.table[i] = tableEntry{}
|
||||
}
|
||||
e.cur = maxStoreBlockSize
|
||||
}
|
||||
|
||||
// This check isn't in the Snappy implementation, but there, the caller
|
||||
// instead of the callee handles this case.
|
||||
if len(src) < minNonLiteralBlockSize {
|
||||
// We do not fill the token table.
|
||||
// This will be picked up by caller.
|
||||
dst.n = uint16(len(src))
|
||||
e.cur += maxStoreBlockSize
|
||||
e.prev = e.prev[:0]
|
||||
return
|
||||
}
|
||||
|
||||
// sLimit is when to stop looking for offset/length copies. The inputMargin
|
||||
// lets us use a fast path for emitLiteral in the main loop, while we are
|
||||
// looking for copies.
|
||||
sLimit := int32(len(src) - inputMargin)
|
||||
|
||||
// nextEmit is where in src the next emitLiteral should start from.
|
||||
nextEmit := int32(0)
|
||||
s := int32(0)
|
||||
cv := load3232(src, s)
|
||||
nextHash := hash(cv)
|
||||
|
||||
for {
|
||||
// Copied from the C++ snappy implementation:
|
||||
//
|
||||
// Heuristic match skipping: If 32 bytes are scanned with no matches
|
||||
// found, start looking only at every other byte. If 32 more bytes are
|
||||
// scanned (or skipped), look at every third byte, etc.. When a match
|
||||
// is found, immediately go back to looking at every byte. This is a
|
||||
// small loss (~5% performance, ~0.1% density) for compressible data
|
||||
// due to more bookkeeping, but for non-compressible data (such as
|
||||
// JPEG) it's a huge win since the compressor quickly "realizes" the
|
||||
// data is incompressible and doesn't bother looking for matches
|
||||
// everywhere.
|
||||
//
|
||||
// The "skip" variable keeps track of how many bytes there are since
|
||||
// the last match; dividing it by 32 (ie. right-shifting by five) gives
|
||||
// the number of bytes to move ahead for each iteration.
|
||||
skip := int32(32)
|
||||
|
||||
nextS := s
|
||||
var candidate tableEntry
|
||||
for {
|
||||
s = nextS
|
||||
bytesBetweenHashLookups := skip >> 5
|
||||
nextS = s + bytesBetweenHashLookups
|
||||
skip += bytesBetweenHashLookups
|
||||
if nextS > sLimit {
|
||||
goto emitRemainder
|
||||
}
|
||||
candidate = e.table[nextHash&tableMask]
|
||||
now := load3232(src, nextS)
|
||||
e.table[nextHash&tableMask] = tableEntry{offset: s + e.cur, val: cv}
|
||||
nextHash = hash(now)
|
||||
|
||||
offset := s - (candidate.offset - e.cur)
|
||||
if offset > maxMatchOffset || cv != candidate.val {
|
||||
// Out of range or not matched.
|
||||
cv = now
|
||||
continue
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
// A 4-byte match has been found. We'll later see if more than 4 bytes
|
||||
// match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
|
||||
// them as literal bytes.
|
||||
emitLiteral(dst, src[nextEmit:s])
|
||||
|
||||
// Call emitCopy, and then see if another emitCopy could be our next
|
||||
// move. Repeat until we find no match for the input immediately after
|
||||
// what was consumed by the last emitCopy call.
|
||||
//
|
||||
// If we exit this loop normally then we need to call emitLiteral next,
|
||||
// though we don't yet know how big the literal will be. We handle that
|
||||
// by proceeding to the next iteration of the main loop. We also can
|
||||
// exit this loop via goto if we get close to exhausting the input.
|
||||
for {
|
||||
// Invariant: we have a 4-byte match at s, and no need to emit any
|
||||
// literal bytes prior to s.
|
||||
|
||||
// Extend the 4-byte match as long as possible.
|
||||
//
|
||||
s += 4
|
||||
t := candidate.offset - e.cur + 4
|
||||
l := e.matchlen(s, t, src)
|
||||
|
||||
// matchToken is flate's equivalent of Snappy's emitCopy. (length,offset)
|
||||
dst.tokens[dst.n] = matchToken(uint32(l+4-baseMatchLength), uint32(s-t-baseMatchOffset))
|
||||
dst.n++
|
||||
s += l
|
||||
nextEmit = s
|
||||
if s >= sLimit {
|
||||
t += l
|
||||
// Index first pair after match end.
|
||||
if int(t+4) < len(src) && t > 0 {
|
||||
cv := load3232(src, t)
|
||||
e.table[hash(cv)&tableMask] = tableEntry{offset: t + e.cur, val: cv}
|
||||
}
|
||||
goto emitRemainder
|
||||
}
|
||||
|
||||
// We could immediately start working at s now, but to improve
|
||||
// compression we first update the hash table at s-1 and at s. If
|
||||
// another emitCopy is not our next move, also calculate nextHash
|
||||
// at s+1. At least on GOARCH=amd64, these three hash calculations
|
||||
// are faster as one load64 call (with some shifts) instead of
|
||||
// three load32 calls.
|
||||
x := load6432(src, s-1)
|
||||
prevHash := hash(uint32(x))
|
||||
e.table[prevHash&tableMask] = tableEntry{offset: e.cur + s - 1, val: uint32(x)}
|
||||
x >>= 8
|
||||
currHash := hash(uint32(x))
|
||||
candidate = e.table[currHash&tableMask]
|
||||
e.table[currHash&tableMask] = tableEntry{offset: e.cur + s, val: uint32(x)}
|
||||
|
||||
offset := s - (candidate.offset - e.cur)
|
||||
if offset > maxMatchOffset || uint32(x) != candidate.val {
|
||||
cv = uint32(x >> 8)
|
||||
nextHash = hash(cv)
|
||||
s++
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
emitRemainder:
|
||||
if int(nextEmit) < len(src) {
|
||||
emitLiteral(dst, src[nextEmit:])
|
||||
}
|
||||
e.cur += int32(len(src))
|
||||
e.prev = e.prev[:len(src)]
|
||||
copy(e.prev, src)
|
||||
}
|
||||
|
||||
type tableEntryPrev struct {
|
||||
Cur tableEntry
|
||||
Prev tableEntry
|
||||
}
|
||||
|
||||
// snappyL3
|
||||
type snappyL3 struct {
|
||||
snappyGen
|
||||
table [tableSize]tableEntryPrev
|
||||
}
|
||||
|
||||
// Encode uses a similar algorithm to level 2, will check up to two candidates.
|
||||
func (e *snappyL3) Encode(dst *tokens, src []byte) {
|
||||
const (
|
||||
inputMargin = 8 - 1
|
||||
minNonLiteralBlockSize = 1 + 1 + inputMargin
|
||||
)
|
||||
|
||||
// Protect against e.cur wraparound.
|
||||
if e.cur > 1<<30 {
|
||||
for i := range e.table[:] {
|
||||
e.table[i] = tableEntryPrev{}
|
||||
}
|
||||
e.snappyGen = snappyGen{cur: maxStoreBlockSize, prev: e.prev[:0]}
|
||||
}
|
||||
|
||||
// This check isn't in the Snappy implementation, but there, the caller
|
||||
// instead of the callee handles this case.
|
||||
if len(src) < minNonLiteralBlockSize {
|
||||
// We do not fill the token table.
|
||||
// This will be picked up by caller.
|
||||
dst.n = uint16(len(src))
|
||||
e.cur += maxStoreBlockSize
|
||||
e.prev = e.prev[:0]
|
||||
return
|
||||
}
|
||||
|
||||
// sLimit is when to stop looking for offset/length copies. The inputMargin
|
||||
// lets us use a fast path for emitLiteral in the main loop, while we are
|
||||
// looking for copies.
|
||||
sLimit := int32(len(src) - inputMargin)
|
||||
|
||||
// nextEmit is where in src the next emitLiteral should start from.
|
||||
nextEmit := int32(0)
|
||||
s := int32(0)
|
||||
cv := load3232(src, s)
|
||||
nextHash := hash(cv)
|
||||
|
||||
for {
|
||||
// Copied from the C++ snappy implementation:
|
||||
//
|
||||
// Heuristic match skipping: If 32 bytes are scanned with no matches
|
||||
// found, start looking only at every other byte. If 32 more bytes are
|
||||
// scanned (or skipped), look at every third byte, etc.. When a match
|
||||
// is found, immediately go back to looking at every byte. This is a
|
||||
// small loss (~5% performance, ~0.1% density) for compressible data
|
||||
// due to more bookkeeping, but for non-compressible data (such as
|
||||
// JPEG) it's a huge win since the compressor quickly "realizes" the
|
||||
// data is incompressible and doesn't bother looking for matches
|
||||
// everywhere.
|
||||
//
|
||||
// The "skip" variable keeps track of how many bytes there are since
|
||||
// the last match; dividing it by 32 (ie. right-shifting by five) gives
|
||||
// the number of bytes to move ahead for each iteration.
|
||||
skip := int32(32)
|
||||
|
||||
nextS := s
|
||||
var candidate tableEntry
|
||||
for {
|
||||
s = nextS
|
||||
bytesBetweenHashLookups := skip >> 5
|
||||
nextS = s + bytesBetweenHashLookups
|
||||
skip += bytesBetweenHashLookups
|
||||
if nextS > sLimit {
|
||||
goto emitRemainder
|
||||
}
|
||||
candidates := e.table[nextHash&tableMask]
|
||||
now := load3232(src, nextS)
|
||||
e.table[nextHash&tableMask] = tableEntryPrev{Prev: candidates.Cur, Cur: tableEntry{offset: s + e.cur, val: cv}}
|
||||
nextHash = hash(now)
|
||||
|
||||
// Check both candidates
|
||||
candidate = candidates.Cur
|
||||
if cv == candidate.val {
|
||||
offset := s - (candidate.offset - e.cur)
|
||||
if offset <= maxMatchOffset {
|
||||
break
|
||||
}
|
||||
} else {
|
||||
// We only check if value mismatches.
|
||||
// Offset will always be invalid in other cases.
|
||||
candidate = candidates.Prev
|
||||
if cv == candidate.val {
|
||||
offset := s - (candidate.offset - e.cur)
|
||||
if offset <= maxMatchOffset {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
cv = now
|
||||
}
|
||||
|
||||
// A 4-byte match has been found. We'll later see if more than 4 bytes
|
||||
// match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
|
||||
// them as literal bytes.
|
||||
emitLiteral(dst, src[nextEmit:s])
|
||||
|
||||
// Call emitCopy, and then see if another emitCopy could be our next
|
||||
// move. Repeat until we find no match for the input immediately after
|
||||
// what was consumed by the last emitCopy call.
|
||||
//
|
||||
// If we exit this loop normally then we need to call emitLiteral next,
|
||||
// though we don't yet know how big the literal will be. We handle that
|
||||
// by proceeding to the next iteration of the main loop. We also can
|
||||
// exit this loop via goto if we get close to exhausting the input.
|
||||
for {
|
||||
// Invariant: we have a 4-byte match at s, and no need to emit any
|
||||
// literal bytes prior to s.
|
||||
|
||||
// Extend the 4-byte match as long as possible.
|
||||
//
|
||||
s += 4
|
||||
t := candidate.offset - e.cur + 4
|
||||
l := e.matchlen(s, t, src)
|
||||
|
||||
// matchToken is flate's equivalent of Snappy's emitCopy. (length,offset)
|
||||
dst.tokens[dst.n] = matchToken(uint32(l+4-baseMatchLength), uint32(s-t-baseMatchOffset))
|
||||
dst.n++
|
||||
s += l
|
||||
nextEmit = s
|
||||
if s >= sLimit {
|
||||
t += l
|
||||
// Index first pair after match end.
|
||||
if int(t+4) < len(src) && t > 0 {
|
||||
cv := load3232(src, t)
|
||||
nextHash = hash(cv)
|
||||
e.table[nextHash&tableMask] = tableEntryPrev{
|
||||
Prev: e.table[nextHash&tableMask].Cur,
|
||||
Cur: tableEntry{offset: e.cur + t, val: cv},
|
||||
}
|
||||
}
|
||||
goto emitRemainder
|
||||
}
|
||||
|
||||
// We could immediately start working at s now, but to improve
|
||||
// compression we first update the hash table at s-3 to s. If
|
||||
// another emitCopy is not our next move, also calculate nextHash
|
||||
// at s+1. At least on GOARCH=amd64, these three hash calculations
|
||||
// are faster as one load64 call (with some shifts) instead of
|
||||
// three load32 calls.
|
||||
x := load6432(src, s-3)
|
||||
prevHash := hash(uint32(x))
|
||||
e.table[prevHash&tableMask] = tableEntryPrev{
|
||||
Prev: e.table[prevHash&tableMask].Cur,
|
||||
Cur: tableEntry{offset: e.cur + s - 3, val: uint32(x)},
|
||||
}
|
||||
x >>= 8
|
||||
prevHash = hash(uint32(x))
|
||||
|
||||
e.table[prevHash&tableMask] = tableEntryPrev{
|
||||
Prev: e.table[prevHash&tableMask].Cur,
|
||||
Cur: tableEntry{offset: e.cur + s - 2, val: uint32(x)},
|
||||
}
|
||||
x >>= 8
|
||||
prevHash = hash(uint32(x))
|
||||
|
||||
e.table[prevHash&tableMask] = tableEntryPrev{
|
||||
Prev: e.table[prevHash&tableMask].Cur,
|
||||
Cur: tableEntry{offset: e.cur + s - 1, val: uint32(x)},
|
||||
}
|
||||
x >>= 8
|
||||
currHash := hash(uint32(x))
|
||||
candidates := e.table[currHash&tableMask]
|
||||
cv = uint32(x)
|
||||
e.table[currHash&tableMask] = tableEntryPrev{
|
||||
Prev: candidates.Cur,
|
||||
Cur: tableEntry{offset: s + e.cur, val: cv},
|
||||
}
|
||||
|
||||
// Check both candidates
|
||||
candidate = candidates.Cur
|
||||
if cv == candidate.val {
|
||||
offset := s - (candidate.offset - e.cur)
|
||||
if offset <= maxMatchOffset {
|
||||
continue
|
||||
}
|
||||
} else {
|
||||
// We only check if value mismatches.
|
||||
// Offset will always be invalid in other cases.
|
||||
candidate = candidates.Prev
|
||||
if cv == candidate.val {
|
||||
offset := s - (candidate.offset - e.cur)
|
||||
if offset <= maxMatchOffset {
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
cv = uint32(x >> 8)
|
||||
nextHash = hash(cv)
|
||||
s++
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
emitRemainder:
|
||||
if int(nextEmit) < len(src) {
|
||||
emitLiteral(dst, src[nextEmit:])
|
||||
}
|
||||
e.cur += int32(len(src))
|
||||
e.prev = e.prev[:len(src)]
|
||||
copy(e.prev, src)
|
||||
}
|
||||
|
||||
// snappyL4
|
||||
type snappyL4 struct {
|
||||
snappyL3
|
||||
}
|
||||
|
||||
// Encode uses a similar algorithm to level 3,
|
||||
// but will check up to two candidates if first isn't long enough.
|
||||
func (e *snappyL4) Encode(dst *tokens, src []byte) {
|
||||
const (
|
||||
inputMargin = 8 - 3
|
||||
minNonLiteralBlockSize = 1 + 1 + inputMargin
|
||||
matchLenGood = 12
|
||||
)
|
||||
|
||||
// Protect against e.cur wraparound.
|
||||
if e.cur > 1<<30 {
|
||||
for i := range e.table[:] {
|
||||
e.table[i] = tableEntryPrev{}
|
||||
}
|
||||
e.snappyGen = snappyGen{cur: maxStoreBlockSize, prev: e.prev[:0]}
|
||||
}
|
||||
|
||||
// This check isn't in the Snappy implementation, but there, the caller
|
||||
// instead of the callee handles this case.
|
||||
if len(src) < minNonLiteralBlockSize {
|
||||
// We do not fill the token table.
|
||||
// This will be picked up by caller.
|
||||
dst.n = uint16(len(src))
|
||||
e.cur += maxStoreBlockSize
|
||||
e.prev = e.prev[:0]
|
||||
return
|
||||
}
|
||||
|
||||
// sLimit is when to stop looking for offset/length copies. The inputMargin
|
||||
// lets us use a fast path for emitLiteral in the main loop, while we are
|
||||
// looking for copies.
|
||||
sLimit := int32(len(src) - inputMargin)
|
||||
|
||||
// nextEmit is where in src the next emitLiteral should start from.
|
||||
nextEmit := int32(0)
|
||||
s := int32(0)
|
||||
cv := load3232(src, s)
|
||||
nextHash := hash(cv)
|
||||
|
||||
for {
|
||||
// Copied from the C++ snappy implementation:
|
||||
//
|
||||
// Heuristic match skipping: If 32 bytes are scanned with no matches
|
||||
// found, start looking only at every other byte. If 32 more bytes are
|
||||
// scanned (or skipped), look at every third byte, etc.. When a match
|
||||
// is found, immediately go back to looking at every byte. This is a
|
||||
// small loss (~5% performance, ~0.1% density) for compressible data
|
||||
// due to more bookkeeping, but for non-compressible data (such as
|
||||
// JPEG) it's a huge win since the compressor quickly "realizes" the
|
||||
// data is incompressible and doesn't bother looking for matches
|
||||
// everywhere.
|
||||
//
|
||||
// The "skip" variable keeps track of how many bytes there are since
|
||||
// the last match; dividing it by 32 (ie. right-shifting by five) gives
|
||||
// the number of bytes to move ahead for each iteration.
|
||||
skip := int32(32)
|
||||
|
||||
nextS := s
|
||||
var candidate tableEntry
|
||||
var candidateAlt tableEntry
|
||||
for {
|
||||
s = nextS
|
||||
bytesBetweenHashLookups := skip >> 5
|
||||
nextS = s + bytesBetweenHashLookups
|
||||
skip += bytesBetweenHashLookups
|
||||
if nextS > sLimit {
|
||||
goto emitRemainder
|
||||
}
|
||||
candidates := e.table[nextHash&tableMask]
|
||||
now := load3232(src, nextS)
|
||||
e.table[nextHash&tableMask] = tableEntryPrev{Prev: candidates.Cur, Cur: tableEntry{offset: s + e.cur, val: cv}}
|
||||
nextHash = hash(now)
|
||||
|
||||
// Check both candidates
|
||||
candidate = candidates.Cur
|
||||
if cv == candidate.val {
|
||||
offset := s - (candidate.offset - e.cur)
|
||||
if offset < maxMatchOffset {
|
||||
offset = s - (candidates.Prev.offset - e.cur)
|
||||
if cv == candidates.Prev.val && offset < maxMatchOffset {
|
||||
candidateAlt = candidates.Prev
|
||||
}
|
||||
break
|
||||
}
|
||||
} else {
|
||||
// We only check if value mismatches.
|
||||
// Offset will always be invalid in other cases.
|
||||
candidate = candidates.Prev
|
||||
if cv == candidate.val {
|
||||
offset := s - (candidate.offset - e.cur)
|
||||
if offset < maxMatchOffset {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
cv = now
|
||||
}
|
||||
|
||||
// A 4-byte match has been found. We'll later see if more than 4 bytes
|
||||
// match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
|
||||
// them as literal bytes.
|
||||
emitLiteral(dst, src[nextEmit:s])
|
||||
|
||||
// Call emitCopy, and then see if another emitCopy could be our next
|
||||
// move. Repeat until we find no match for the input immediately after
|
||||
// what was consumed by the last emitCopy call.
|
||||
//
|
||||
// If we exit this loop normally then we need to call emitLiteral next,
|
||||
// though we don't yet know how big the literal will be. We handle that
|
||||
// by proceeding to the next iteration of the main loop. We also can
|
||||
// exit this loop via goto if we get close to exhausting the input.
|
||||
for {
|
||||
// Invariant: we have a 4-byte match at s, and no need to emit any
|
||||
// literal bytes prior to s.
|
||||
|
||||
// Extend the 4-byte match as long as possible.
|
||||
//
|
||||
s += 4
|
||||
t := candidate.offset - e.cur + 4
|
||||
l := e.matchlen(s, t, src)
|
||||
// Try alternative candidate if match length < matchLenGood.
|
||||
if l < matchLenGood-4 && candidateAlt.offset != 0 {
|
||||
t2 := candidateAlt.offset - e.cur + 4
|
||||
l2 := e.matchlen(s, t2, src)
|
||||
if l2 > l {
|
||||
l = l2
|
||||
t = t2
|
||||
}
|
||||
}
|
||||
// matchToken is flate's equivalent of Snappy's emitCopy. (length,offset)
|
||||
dst.tokens[dst.n] = matchToken(uint32(l+4-baseMatchLength), uint32(s-t-baseMatchOffset))
|
||||
dst.n++
|
||||
s += l
|
||||
nextEmit = s
|
||||
if s >= sLimit {
|
||||
t += l
|
||||
// Index first pair after match end.
|
||||
if int(t+4) < len(src) && t > 0 {
|
||||
cv := load3232(src, t)
|
||||
nextHash = hash(cv)
|
||||
e.table[nextHash&tableMask] = tableEntryPrev{
|
||||
Prev: e.table[nextHash&tableMask].Cur,
|
||||
Cur: tableEntry{offset: e.cur + t, val: cv},
|
||||
}
|
||||
}
|
||||
goto emitRemainder
|
||||
}
|
||||
|
||||
// We could immediately start working at s now, but to improve
|
||||
// compression we first update the hash table at s-3 to s. If
|
||||
// another emitCopy is not our next move, also calculate nextHash
|
||||
// at s+1. At least on GOARCH=amd64, these three hash calculations
|
||||
// are faster as one load64 call (with some shifts) instead of
|
||||
// three load32 calls.
|
||||
x := load6432(src, s-3)
|
||||
prevHash := hash(uint32(x))
|
||||
e.table[prevHash&tableMask] = tableEntryPrev{
|
||||
Prev: e.table[prevHash&tableMask].Cur,
|
||||
Cur: tableEntry{offset: e.cur + s - 3, val: uint32(x)},
|
||||
}
|
||||
x >>= 8
|
||||
prevHash = hash(uint32(x))
|
||||
|
||||
e.table[prevHash&tableMask] = tableEntryPrev{
|
||||
Prev: e.table[prevHash&tableMask].Cur,
|
||||
Cur: tableEntry{offset: e.cur + s - 2, val: uint32(x)},
|
||||
}
|
||||
x >>= 8
|
||||
prevHash = hash(uint32(x))
|
||||
|
||||
e.table[prevHash&tableMask] = tableEntryPrev{
|
||||
Prev: e.table[prevHash&tableMask].Cur,
|
||||
Cur: tableEntry{offset: e.cur + s - 1, val: uint32(x)},
|
||||
}
|
||||
x >>= 8
|
||||
currHash := hash(uint32(x))
|
||||
candidates := e.table[currHash&tableMask]
|
||||
cv = uint32(x)
|
||||
e.table[currHash&tableMask] = tableEntryPrev{
|
||||
Prev: candidates.Cur,
|
||||
Cur: tableEntry{offset: s + e.cur, val: cv},
|
||||
}
|
||||
|
||||
// Check both candidates
|
||||
candidate = candidates.Cur
|
||||
candidateAlt = tableEntry{}
|
||||
if cv == candidate.val {
|
||||
offset := s - (candidate.offset - e.cur)
|
||||
if offset <= maxMatchOffset {
|
||||
offset = s - (candidates.Prev.offset - e.cur)
|
||||
if cv == candidates.Prev.val && offset <= maxMatchOffset {
|
||||
candidateAlt = candidates.Prev
|
||||
}
|
||||
continue
|
||||
}
|
||||
} else {
|
||||
// We only check if value mismatches.
|
||||
// Offset will always be invalid in other cases.
|
||||
candidate = candidates.Prev
|
||||
if cv == candidate.val {
|
||||
offset := s - (candidate.offset - e.cur)
|
||||
if offset <= maxMatchOffset {
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
cv = uint32(x >> 8)
|
||||
nextHash = hash(cv)
|
||||
s++
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
emitRemainder:
|
||||
if int(nextEmit) < len(src) {
|
||||
emitLiteral(dst, src[nextEmit:])
|
||||
}
|
||||
e.cur += int32(len(src))
|
||||
e.prev = e.prev[:len(src)]
|
||||
copy(e.prev, src)
|
||||
}
|
||||
|
||||
func (e *snappyGen) matchlen(s, t int32, src []byte) int32 {
|
||||
s1 := int(s) + maxMatchLength - 4
|
||||
if s1 > len(src) {
|
||||
s1 = len(src)
|
||||
}
|
||||
|
||||
// If we are inside the current block
|
||||
if t >= 0 {
|
||||
b := src[t:]
|
||||
a := src[s:s1]
|
||||
b = b[:len(a)]
|
||||
// Extend the match to be as long as possible.
|
||||
for i := range a {
|
||||
if a[i] != b[i] {
|
||||
return int32(i)
|
||||
}
|
||||
}
|
||||
return int32(len(a))
|
||||
}
|
||||
|
||||
// We found a match in the previous block.
|
||||
tp := int32(len(e.prev)) + t
|
||||
if tp < 0 {
|
||||
return 0
|
||||
}
|
||||
|
||||
// Extend the match to be as long as possible.
|
||||
a := src[s:s1]
|
||||
b := e.prev[tp:]
|
||||
if len(b) > len(a) {
|
||||
b = b[:len(a)]
|
||||
}
|
||||
a = a[:len(b)]
|
||||
for i := range b {
|
||||
if a[i] != b[i] {
|
||||
return int32(i)
|
||||
}
|
||||
}
|
||||
|
||||
// If we reached our limit, we matched everything we are
|
||||
// allowed to in the previous block and we return.
|
||||
n := int32(len(b))
|
||||
if int(s+n) == s1 {
|
||||
return n
|
||||
}
|
||||
|
||||
// Continue looking for more matches in the current block.
|
||||
a = src[s+n : s1]
|
||||
b = src[:len(a)]
|
||||
for i := range a {
|
||||
if a[i] != b[i] {
|
||||
return int32(i) + n
|
||||
}
|
||||
}
|
||||
return int32(len(a)) + n
|
||||
}
|
||||
|
||||
// Reset the encoding table.
|
||||
func (e *snappyGen) Reset() {
|
||||
e.prev = e.prev[:0]
|
||||
e.cur += maxMatchOffset
|
||||
}
|
||||
115
vendor/github.com/klauspost/compress/flate/token.go
generated
vendored
115
vendor/github.com/klauspost/compress/flate/token.go
generated
vendored
@@ -1,115 +0,0 @@
|
||||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package flate
|
||||
|
||||
import "fmt"
|
||||
|
||||
const (
|
||||
// 2 bits: type 0 = literal 1=EOF 2=Match 3=Unused
|
||||
// 8 bits: xlength = length - MIN_MATCH_LENGTH
|
||||
// 22 bits xoffset = offset - MIN_OFFSET_SIZE, or literal
|
||||
lengthShift = 22
|
||||
offsetMask = 1<<lengthShift - 1
|
||||
typeMask = 3 << 30
|
||||
literalType = 0 << 30
|
||||
matchType = 1 << 30
|
||||
)
|
||||
|
||||
// The length code for length X (MIN_MATCH_LENGTH <= X <= MAX_MATCH_LENGTH)
|
||||
// is lengthCodes[length - MIN_MATCH_LENGTH]
|
||||
var lengthCodes = [...]uint32{
|
||||
0, 1, 2, 3, 4, 5, 6, 7, 8, 8,
|
||||
9, 9, 10, 10, 11, 11, 12, 12, 12, 12,
|
||||
13, 13, 13, 13, 14, 14, 14, 14, 15, 15,
|
||||
15, 15, 16, 16, 16, 16, 16, 16, 16, 16,
|
||||
17, 17, 17, 17, 17, 17, 17, 17, 18, 18,
|
||||
18, 18, 18, 18, 18, 18, 19, 19, 19, 19,
|
||||
19, 19, 19, 19, 20, 20, 20, 20, 20, 20,
|
||||
20, 20, 20, 20, 20, 20, 20, 20, 20, 20,
|
||||
21, 21, 21, 21, 21, 21, 21, 21, 21, 21,
|
||||
21, 21, 21, 21, 21, 21, 22, 22, 22, 22,
|
||||
22, 22, 22, 22, 22, 22, 22, 22, 22, 22,
|
||||
22, 22, 23, 23, 23, 23, 23, 23, 23, 23,
|
||||
23, 23, 23, 23, 23, 23, 23, 23, 24, 24,
|
||||
24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
|
||||
24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
|
||||
24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
|
||||
25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
|
||||
25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
|
||||
25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
|
||||
25, 25, 26, 26, 26, 26, 26, 26, 26, 26,
|
||||
26, 26, 26, 26, 26, 26, 26, 26, 26, 26,
|
||||
26, 26, 26, 26, 26, 26, 26, 26, 26, 26,
|
||||
26, 26, 26, 26, 27, 27, 27, 27, 27, 27,
|
||||
27, 27, 27, 27, 27, 27, 27, 27, 27, 27,
|
||||
27, 27, 27, 27, 27, 27, 27, 27, 27, 27,
|
||||
27, 27, 27, 27, 27, 28,
|
||||
}
|
||||
|
||||
var offsetCodes = [...]uint32{
|
||||
0, 1, 2, 3, 4, 4, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7,
|
||||
8, 8, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9,
|
||||
10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
|
||||
11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11,
|
||||
12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12,
|
||||
12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12,
|
||||
13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
|
||||
13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
|
||||
14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
|
||||
14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
|
||||
14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
|
||||
14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
|
||||
15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
|
||||
15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
|
||||
15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
|
||||
15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
|
||||
}
|
||||
|
||||
type token uint32
|
||||
|
||||
type tokens struct {
|
||||
tokens [maxStoreBlockSize + 1]token
|
||||
n uint16 // Must be able to contain maxStoreBlockSize
|
||||
}
|
||||
|
||||
// Convert a literal into a literal token.
|
||||
func literalToken(literal uint32) token { return token(literalType + literal) }
|
||||
|
||||
// Convert a < xlength, xoffset > pair into a match token.
|
||||
func matchToken(xlength uint32, xoffset uint32) token {
|
||||
return token(matchType + xlength<<lengthShift + xoffset)
|
||||
}
|
||||
|
||||
func matchTokend(xlength uint32, xoffset uint32) token {
|
||||
if xlength > maxMatchLength || xoffset > maxMatchOffset {
|
||||
panic(fmt.Sprintf("Invalid match: len: %d, offset: %d\n", xlength, xoffset))
|
||||
return token(matchType)
|
||||
}
|
||||
return token(matchType + xlength<<lengthShift + xoffset)
|
||||
}
|
||||
|
||||
// Returns the type of a token
|
||||
func (t token) typ() uint32 { return uint32(t) & typeMask }
|
||||
|
||||
// Returns the literal of a literal token
|
||||
func (t token) literal() uint32 { return uint32(t - literalType) }
|
||||
|
||||
// Returns the extra offset of a match token
|
||||
func (t token) offset() uint32 { return uint32(t) & offsetMask }
|
||||
|
||||
func (t token) length() uint32 { return uint32((t - matchType) >> lengthShift) }
|
||||
|
||||
func lengthCode(len uint32) uint32 { return lengthCodes[len] }
|
||||
|
||||
// Returns the offset code corresponding to a specific offset
|
||||
func offsetCode(off uint32) uint32 {
|
||||
if off < uint32(len(offsetCodes)) {
|
||||
return offsetCodes[off]
|
||||
} else if off>>7 < uint32(len(offsetCodes)) {
|
||||
return offsetCodes[off>>7] + 14
|
||||
} else {
|
||||
return offsetCodes[off>>14] + 28
|
||||
}
|
||||
}
|
||||
344
vendor/github.com/klauspost/compress/gzip/gunzip.go
generated
vendored
344
vendor/github.com/klauspost/compress/gzip/gunzip.go
generated
vendored
@@ -1,344 +0,0 @@
|
||||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package gzip implements reading and writing of gzip format compressed files,
|
||||
// as specified in RFC 1952.
|
||||
package gzip
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"io"
|
||||
"time"
|
||||
|
||||
"github.com/klauspost/compress/flate"
|
||||
"github.com/klauspost/crc32"
|
||||
)
|
||||
|
||||
const (
|
||||
gzipID1 = 0x1f
|
||||
gzipID2 = 0x8b
|
||||
gzipDeflate = 8
|
||||
flagText = 1 << 0
|
||||
flagHdrCrc = 1 << 1
|
||||
flagExtra = 1 << 2
|
||||
flagName = 1 << 3
|
||||
flagComment = 1 << 4
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrChecksum is returned when reading GZIP data that has an invalid checksum.
|
||||
ErrChecksum = errors.New("gzip: invalid checksum")
|
||||
// ErrHeader is returned when reading GZIP data that has an invalid header.
|
||||
ErrHeader = errors.New("gzip: invalid header")
|
||||
)
|
||||
|
||||
var le = binary.LittleEndian
|
||||
|
||||
// noEOF converts io.EOF to io.ErrUnexpectedEOF.
|
||||
func noEOF(err error) error {
|
||||
if err == io.EOF {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// The gzip file stores a header giving metadata about the compressed file.
|
||||
// That header is exposed as the fields of the Writer and Reader structs.
|
||||
//
|
||||
// Strings must be UTF-8 encoded and may only contain Unicode code points
|
||||
// U+0001 through U+00FF, due to limitations of the GZIP file format.
|
||||
type Header struct {
|
||||
Comment string // comment
|
||||
Extra []byte // "extra data"
|
||||
ModTime time.Time // modification time
|
||||
Name string // file name
|
||||
OS byte // operating system type
|
||||
}
|
||||
|
||||
// A Reader is an io.Reader that can be read to retrieve
|
||||
// uncompressed data from a gzip-format compressed file.
|
||||
//
|
||||
// In general, a gzip file can be a concatenation of gzip files,
|
||||
// each with its own header. Reads from the Reader
|
||||
// return the concatenation of the uncompressed data of each.
|
||||
// Only the first header is recorded in the Reader fields.
|
||||
//
|
||||
// Gzip files store a length and checksum of the uncompressed data.
|
||||
// The Reader will return a ErrChecksum when Read
|
||||
// reaches the end of the uncompressed data if it does not
|
||||
// have the expected length or checksum. Clients should treat data
|
||||
// returned by Read as tentative until they receive the io.EOF
|
||||
// marking the end of the data.
|
||||
type Reader struct {
|
||||
Header // valid after NewReader or Reader.Reset
|
||||
r flate.Reader
|
||||
decompressor io.ReadCloser
|
||||
digest uint32 // CRC-32, IEEE polynomial (section 8)
|
||||
size uint32 // Uncompressed size (section 2.3.1)
|
||||
buf [512]byte
|
||||
err error
|
||||
multistream bool
|
||||
}
|
||||
|
||||
// NewReader creates a new Reader reading the given reader.
|
||||
// If r does not also implement io.ByteReader,
|
||||
// the decompressor may read more data than necessary from r.
|
||||
//
|
||||
// It is the caller's responsibility to call Close on the Reader when done.
|
||||
//
|
||||
// The Reader.Header fields will be valid in the Reader returned.
|
||||
func NewReader(r io.Reader) (*Reader, error) {
|
||||
z := new(Reader)
|
||||
if err := z.Reset(r); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return z, nil
|
||||
}
|
||||
|
||||
// Reset discards the Reader z's state and makes it equivalent to the
|
||||
// result of its original state from NewReader, but reading from r instead.
|
||||
// This permits reusing a Reader rather than allocating a new one.
|
||||
func (z *Reader) Reset(r io.Reader) error {
|
||||
*z = Reader{
|
||||
decompressor: z.decompressor,
|
||||
multistream: true,
|
||||
}
|
||||
if rr, ok := r.(flate.Reader); ok {
|
||||
z.r = rr
|
||||
} else {
|
||||
z.r = bufio.NewReader(r)
|
||||
}
|
||||
z.Header, z.err = z.readHeader()
|
||||
return z.err
|
||||
}
|
||||
|
||||
// Multistream controls whether the reader supports multistream files.
|
||||
//
|
||||
// If enabled (the default), the Reader expects the input to be a sequence
|
||||
// of individually gzipped data streams, each with its own header and
|
||||
// trailer, ending at EOF. The effect is that the concatenation of a sequence
|
||||
// of gzipped files is treated as equivalent to the gzip of the concatenation
|
||||
// of the sequence. This is standard behavior for gzip readers.
|
||||
//
|
||||
// Calling Multistream(false) disables this behavior; disabling the behavior
|
||||
// can be useful when reading file formats that distinguish individual gzip
|
||||
// data streams or mix gzip data streams with other data streams.
|
||||
// In this mode, when the Reader reaches the end of the data stream,
|
||||
// Read returns io.EOF. If the underlying reader implements io.ByteReader,
|
||||
// it will be left positioned just after the gzip stream.
|
||||
// To start the next stream, call z.Reset(r) followed by z.Multistream(false).
|
||||
// If there is no next stream, z.Reset(r) will return io.EOF.
|
||||
func (z *Reader) Multistream(ok bool) {
|
||||
z.multistream = ok
|
||||
}
|
||||
|
||||
// readString reads a NUL-terminated string from z.r.
|
||||
// It treats the bytes read as being encoded as ISO 8859-1 (Latin-1) and
|
||||
// will output a string encoded using UTF-8.
|
||||
// This method always updates z.digest with the data read.
|
||||
func (z *Reader) readString() (string, error) {
|
||||
var err error
|
||||
needConv := false
|
||||
for i := 0; ; i++ {
|
||||
if i >= len(z.buf) {
|
||||
return "", ErrHeader
|
||||
}
|
||||
z.buf[i], err = z.r.ReadByte()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if z.buf[i] > 0x7f {
|
||||
needConv = true
|
||||
}
|
||||
if z.buf[i] == 0 {
|
||||
// Digest covers the NUL terminator.
|
||||
z.digest = crc32.Update(z.digest, crc32.IEEETable, z.buf[:i+1])
|
||||
|
||||
// Strings are ISO 8859-1, Latin-1 (RFC 1952, section 2.3.1).
|
||||
if needConv {
|
||||
s := make([]rune, 0, i)
|
||||
for _, v := range z.buf[:i] {
|
||||
s = append(s, rune(v))
|
||||
}
|
||||
return string(s), nil
|
||||
}
|
||||
return string(z.buf[:i]), nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// readHeader reads the GZIP header according to section 2.3.1.
|
||||
// This method does not set z.err.
|
||||
func (z *Reader) readHeader() (hdr Header, err error) {
|
||||
if _, err = io.ReadFull(z.r, z.buf[:10]); err != nil {
|
||||
// RFC 1952, section 2.2, says the following:
|
||||
// A gzip file consists of a series of "members" (compressed data sets).
|
||||
//
|
||||
// Other than this, the specification does not clarify whether a
|
||||
// "series" is defined as "one or more" or "zero or more". To err on the
|
||||
// side of caution, Go interprets this to mean "zero or more".
|
||||
// Thus, it is okay to return io.EOF here.
|
||||
return hdr, err
|
||||
}
|
||||
if z.buf[0] != gzipID1 || z.buf[1] != gzipID2 || z.buf[2] != gzipDeflate {
|
||||
return hdr, ErrHeader
|
||||
}
|
||||
flg := z.buf[3]
|
||||
hdr.ModTime = time.Unix(int64(le.Uint32(z.buf[4:8])), 0)
|
||||
// z.buf[8] is XFL and is currently ignored.
|
||||
hdr.OS = z.buf[9]
|
||||
z.digest = crc32.ChecksumIEEE(z.buf[:10])
|
||||
|
||||
if flg&flagExtra != 0 {
|
||||
if _, err = io.ReadFull(z.r, z.buf[:2]); err != nil {
|
||||
return hdr, noEOF(err)
|
||||
}
|
||||
z.digest = crc32.Update(z.digest, crc32.IEEETable, z.buf[:2])
|
||||
data := make([]byte, le.Uint16(z.buf[:2]))
|
||||
if _, err = io.ReadFull(z.r, data); err != nil {
|
||||
return hdr, noEOF(err)
|
||||
}
|
||||
z.digest = crc32.Update(z.digest, crc32.IEEETable, data)
|
||||
hdr.Extra = data
|
||||
}
|
||||
|
||||
var s string
|
||||
if flg&flagName != 0 {
|
||||
if s, err = z.readString(); err != nil {
|
||||
return hdr, err
|
||||
}
|
||||
hdr.Name = s
|
||||
}
|
||||
|
||||
if flg&flagComment != 0 {
|
||||
if s, err = z.readString(); err != nil {
|
||||
return hdr, err
|
||||
}
|
||||
hdr.Comment = s
|
||||
}
|
||||
|
||||
if flg&flagHdrCrc != 0 {
|
||||
if _, err = io.ReadFull(z.r, z.buf[:2]); err != nil {
|
||||
return hdr, noEOF(err)
|
||||
}
|
||||
digest := le.Uint16(z.buf[:2])
|
||||
if digest != uint16(z.digest) {
|
||||
return hdr, ErrHeader
|
||||
}
|
||||
}
|
||||
|
||||
z.digest = 0
|
||||
if z.decompressor == nil {
|
||||
z.decompressor = flate.NewReader(z.r)
|
||||
} else {
|
||||
z.decompressor.(flate.Resetter).Reset(z.r, nil)
|
||||
}
|
||||
return hdr, nil
|
||||
}
|
||||
|
||||
// Read implements io.Reader, reading uncompressed bytes from its underlying Reader.
|
||||
func (z *Reader) Read(p []byte) (n int, err error) {
|
||||
if z.err != nil {
|
||||
return 0, z.err
|
||||
}
|
||||
|
||||
n, z.err = z.decompressor.Read(p)
|
||||
z.digest = crc32.Update(z.digest, crc32.IEEETable, p[:n])
|
||||
z.size += uint32(n)
|
||||
if z.err != io.EOF {
|
||||
// In the normal case we return here.
|
||||
return n, z.err
|
||||
}
|
||||
|
||||
// Finished file; check checksum and size.
|
||||
if _, err := io.ReadFull(z.r, z.buf[:8]); err != nil {
|
||||
z.err = noEOF(err)
|
||||
return n, z.err
|
||||
}
|
||||
digest := le.Uint32(z.buf[:4])
|
||||
size := le.Uint32(z.buf[4:8])
|
||||
if digest != z.digest || size != z.size {
|
||||
z.err = ErrChecksum
|
||||
return n, z.err
|
||||
}
|
||||
z.digest, z.size = 0, 0
|
||||
|
||||
// File is ok; check if there is another.
|
||||
if !z.multistream {
|
||||
return n, io.EOF
|
||||
}
|
||||
z.err = nil // Remove io.EOF
|
||||
|
||||
if _, z.err = z.readHeader(); z.err != nil {
|
||||
return n, z.err
|
||||
}
|
||||
|
||||
// Read from next file, if necessary.
|
||||
if n > 0 {
|
||||
return n, nil
|
||||
}
|
||||
return z.Read(p)
|
||||
}
|
||||
|
||||
// Support the io.WriteTo interface for io.Copy and friends.
|
||||
func (z *Reader) WriteTo(w io.Writer) (int64, error) {
|
||||
total := int64(0)
|
||||
crcWriter := crc32.NewIEEE()
|
||||
for {
|
||||
if z.err != nil {
|
||||
if z.err == io.EOF {
|
||||
return total, nil
|
||||
}
|
||||
return total, z.err
|
||||
}
|
||||
|
||||
// We write both to output and digest.
|
||||
mw := io.MultiWriter(w, crcWriter)
|
||||
n, err := z.decompressor.(io.WriterTo).WriteTo(mw)
|
||||
total += n
|
||||
z.size += uint32(n)
|
||||
if err != nil {
|
||||
z.err = err
|
||||
return total, z.err
|
||||
}
|
||||
|
||||
// Finished file; check checksum + size.
|
||||
if _, err := io.ReadFull(z.r, z.buf[0:8]); err != nil {
|
||||
if err == io.EOF {
|
||||
err = io.ErrUnexpectedEOF
|
||||
}
|
||||
z.err = err
|
||||
return total, err
|
||||
}
|
||||
z.digest = crcWriter.Sum32()
|
||||
digest := le.Uint32(z.buf[:4])
|
||||
size := le.Uint32(z.buf[4:8])
|
||||
if digest != z.digest || size != z.size {
|
||||
z.err = ErrChecksum
|
||||
return total, z.err
|
||||
}
|
||||
z.digest, z.size = 0, 0
|
||||
|
||||
// File is ok; check if there is another.
|
||||
if !z.multistream {
|
||||
return total, nil
|
||||
}
|
||||
crcWriter.Reset()
|
||||
z.err = nil // Remove io.EOF
|
||||
|
||||
if _, z.err = z.readHeader(); z.err != nil {
|
||||
if z.err == io.EOF {
|
||||
return total, nil
|
||||
}
|
||||
return total, z.err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Close closes the Reader. It does not close the underlying io.Reader.
|
||||
// In order for the GZIP checksum to be verified, the reader must be
|
||||
// fully consumed until the io.EOF.
|
||||
func (z *Reader) Close() error { return z.decompressor.Close() }
|
||||
251
vendor/github.com/klauspost/compress/gzip/gzip.go
generated
vendored
251
vendor/github.com/klauspost/compress/gzip/gzip.go
generated
vendored
@@ -1,251 +0,0 @@
|
||||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package gzip
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/klauspost/compress/flate"
|
||||
"github.com/klauspost/crc32"
|
||||
)
|
||||
|
||||
// These constants are copied from the flate package, so that code that imports
|
||||
// "compress/gzip" does not also have to import "compress/flate".
|
||||
const (
|
||||
NoCompression = flate.NoCompression
|
||||
BestSpeed = flate.BestSpeed
|
||||
BestCompression = flate.BestCompression
|
||||
DefaultCompression = flate.DefaultCompression
|
||||
ConstantCompression = flate.ConstantCompression
|
||||
HuffmanOnly = flate.HuffmanOnly
|
||||
)
|
||||
|
||||
// A Writer is an io.WriteCloser.
|
||||
// Writes to a Writer are compressed and written to w.
|
||||
type Writer struct {
|
||||
Header // written at first call to Write, Flush, or Close
|
||||
w io.Writer
|
||||
level int
|
||||
wroteHeader bool
|
||||
compressor *flate.Writer
|
||||
digest uint32 // CRC-32, IEEE polynomial (section 8)
|
||||
size uint32 // Uncompressed size (section 2.3.1)
|
||||
closed bool
|
||||
buf [10]byte
|
||||
err error
|
||||
}
|
||||
|
||||
// NewWriter returns a new Writer.
|
||||
// Writes to the returned writer are compressed and written to w.
|
||||
//
|
||||
// It is the caller's responsibility to call Close on the WriteCloser when done.
|
||||
// Writes may be buffered and not flushed until Close.
|
||||
//
|
||||
// Callers that wish to set the fields in Writer.Header must do so before
|
||||
// the first call to Write, Flush, or Close.
|
||||
func NewWriter(w io.Writer) *Writer {
|
||||
z, _ := NewWriterLevel(w, DefaultCompression)
|
||||
return z
|
||||
}
|
||||
|
||||
// NewWriterLevel is like NewWriter but specifies the compression level instead
|
||||
// of assuming DefaultCompression.
|
||||
//
|
||||
// The compression level can be DefaultCompression, NoCompression, or any
|
||||
// integer value between BestSpeed and BestCompression inclusive. The error
|
||||
// returned will be nil if the level is valid.
|
||||
func NewWriterLevel(w io.Writer, level int) (*Writer, error) {
|
||||
if level < HuffmanOnly || level > BestCompression {
|
||||
return nil, fmt.Errorf("gzip: invalid compression level: %d", level)
|
||||
}
|
||||
z := new(Writer)
|
||||
z.init(w, level)
|
||||
return z, nil
|
||||
}
|
||||
|
||||
func (z *Writer) init(w io.Writer, level int) {
|
||||
compressor := z.compressor
|
||||
if compressor != nil {
|
||||
compressor.Reset(w)
|
||||
}
|
||||
*z = Writer{
|
||||
Header: Header{
|
||||
OS: 255, // unknown
|
||||
},
|
||||
w: w,
|
||||
level: level,
|
||||
compressor: compressor,
|
||||
}
|
||||
}
|
||||
|
||||
// Reset discards the Writer z's state and makes it equivalent to the
|
||||
// result of its original state from NewWriter or NewWriterLevel, but
|
||||
// writing to w instead. This permits reusing a Writer rather than
|
||||
// allocating a new one.
|
||||
func (z *Writer) Reset(w io.Writer) {
|
||||
z.init(w, z.level)
|
||||
}
|
||||
|
||||
// writeBytes writes a length-prefixed byte slice to z.w.
|
||||
func (z *Writer) writeBytes(b []byte) error {
|
||||
if len(b) > 0xffff {
|
||||
return errors.New("gzip.Write: Extra data is too large")
|
||||
}
|
||||
le.PutUint16(z.buf[:2], uint16(len(b)))
|
||||
_, err := z.w.Write(z.buf[:2])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = z.w.Write(b)
|
||||
return err
|
||||
}
|
||||
|
||||
// writeString writes a UTF-8 string s in GZIP's format to z.w.
|
||||
// GZIP (RFC 1952) specifies that strings are NUL-terminated ISO 8859-1 (Latin-1).
|
||||
func (z *Writer) writeString(s string) (err error) {
|
||||
// GZIP stores Latin-1 strings; error if non-Latin-1; convert if non-ASCII.
|
||||
needconv := false
|
||||
for _, v := range s {
|
||||
if v == 0 || v > 0xff {
|
||||
return errors.New("gzip.Write: non-Latin-1 header string")
|
||||
}
|
||||
if v > 0x7f {
|
||||
needconv = true
|
||||
}
|
||||
}
|
||||
if needconv {
|
||||
b := make([]byte, 0, len(s))
|
||||
for _, v := range s {
|
||||
b = append(b, byte(v))
|
||||
}
|
||||
_, err = z.w.Write(b)
|
||||
} else {
|
||||
_, err = io.WriteString(z.w, s)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// GZIP strings are NUL-terminated.
|
||||
z.buf[0] = 0
|
||||
_, err = z.w.Write(z.buf[:1])
|
||||
return err
|
||||
}
|
||||
|
||||
// Write writes a compressed form of p to the underlying io.Writer. The
|
||||
// compressed bytes are not necessarily flushed until the Writer is closed.
|
||||
func (z *Writer) Write(p []byte) (int, error) {
|
||||
if z.err != nil {
|
||||
return 0, z.err
|
||||
}
|
||||
var n int
|
||||
// Write the GZIP header lazily.
|
||||
if !z.wroteHeader {
|
||||
z.wroteHeader = true
|
||||
z.buf[0] = gzipID1
|
||||
z.buf[1] = gzipID2
|
||||
z.buf[2] = gzipDeflate
|
||||
z.buf[3] = 0
|
||||
if z.Extra != nil {
|
||||
z.buf[3] |= 0x04
|
||||
}
|
||||
if z.Name != "" {
|
||||
z.buf[3] |= 0x08
|
||||
}
|
||||
if z.Comment != "" {
|
||||
z.buf[3] |= 0x10
|
||||
}
|
||||
le.PutUint32(z.buf[4:8], uint32(z.ModTime.Unix()))
|
||||
if z.level == BestCompression {
|
||||
z.buf[8] = 2
|
||||
} else if z.level == BestSpeed {
|
||||
z.buf[8] = 4
|
||||
} else {
|
||||
z.buf[8] = 0
|
||||
}
|
||||
z.buf[9] = z.OS
|
||||
n, z.err = z.w.Write(z.buf[:10])
|
||||
if z.err != nil {
|
||||
return n, z.err
|
||||
}
|
||||
if z.Extra != nil {
|
||||
z.err = z.writeBytes(z.Extra)
|
||||
if z.err != nil {
|
||||
return n, z.err
|
||||
}
|
||||
}
|
||||
if z.Name != "" {
|
||||
z.err = z.writeString(z.Name)
|
||||
if z.err != nil {
|
||||
return n, z.err
|
||||
}
|
||||
}
|
||||
if z.Comment != "" {
|
||||
z.err = z.writeString(z.Comment)
|
||||
if z.err != nil {
|
||||
return n, z.err
|
||||
}
|
||||
}
|
||||
if z.compressor == nil {
|
||||
z.compressor, _ = flate.NewWriter(z.w, z.level)
|
||||
}
|
||||
}
|
||||
z.size += uint32(len(p))
|
||||
z.digest = crc32.Update(z.digest, crc32.IEEETable, p)
|
||||
n, z.err = z.compressor.Write(p)
|
||||
return n, z.err
|
||||
}
|
||||
|
||||
// Flush flushes any pending compressed data to the underlying writer.
|
||||
//
|
||||
// It is useful mainly in compressed network protocols, to ensure that
|
||||
// a remote reader has enough data to reconstruct a packet. Flush does
|
||||
// not return until the data has been written. If the underlying
|
||||
// writer returns an error, Flush returns that error.
|
||||
//
|
||||
// In the terminology of the zlib library, Flush is equivalent to Z_SYNC_FLUSH.
|
||||
func (z *Writer) Flush() error {
|
||||
if z.err != nil {
|
||||
return z.err
|
||||
}
|
||||
if z.closed {
|
||||
return nil
|
||||
}
|
||||
if !z.wroteHeader {
|
||||
z.Write(nil)
|
||||
if z.err != nil {
|
||||
return z.err
|
||||
}
|
||||
}
|
||||
z.err = z.compressor.Flush()
|
||||
return z.err
|
||||
}
|
||||
|
||||
// Close closes the Writer, flushing any unwritten data to the underlying
|
||||
// io.Writer, but does not close the underlying io.Writer.
|
||||
func (z *Writer) Close() error {
|
||||
if z.err != nil {
|
||||
return z.err
|
||||
}
|
||||
if z.closed {
|
||||
return nil
|
||||
}
|
||||
z.closed = true
|
||||
if !z.wroteHeader {
|
||||
z.Write(nil)
|
||||
if z.err != nil {
|
||||
return z.err
|
||||
}
|
||||
}
|
||||
z.err = z.compressor.Close()
|
||||
if z.err != nil {
|
||||
return z.err
|
||||
}
|
||||
le.PutUint32(z.buf[:4], z.digest)
|
||||
le.PutUint32(z.buf[4:8], z.size)
|
||||
_, z.err = z.w.Write(z.buf[:8])
|
||||
return z.err
|
||||
}
|
||||
178
vendor/github.com/klauspost/compress/zlib/reader.go
generated
vendored
178
vendor/github.com/klauspost/compress/zlib/reader.go
generated
vendored
@@ -1,178 +0,0 @@
|
||||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
/*
|
||||
Package zlib implements reading and writing of zlib format compressed data,
|
||||
as specified in RFC 1950.
|
||||
|
||||
The implementation provides filters that uncompress during reading
|
||||
and compress during writing. For example, to write compressed data
|
||||
to a buffer:
|
||||
|
||||
var b bytes.Buffer
|
||||
w := zlib.NewWriter(&b)
|
||||
w.Write([]byte("hello, world\n"))
|
||||
w.Close()
|
||||
|
||||
and to read that data back:
|
||||
|
||||
r, err := zlib.NewReader(&b)
|
||||
io.Copy(os.Stdout, r)
|
||||
r.Close()
|
||||
*/
|
||||
package zlib
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"errors"
|
||||
"hash"
|
||||
"hash/adler32"
|
||||
"io"
|
||||
|
||||
"github.com/klauspost/compress/flate"
|
||||
)
|
||||
|
||||
const zlibDeflate = 8
|
||||
|
||||
var (
|
||||
// ErrChecksum is returned when reading ZLIB data that has an invalid checksum.
|
||||
ErrChecksum = errors.New("zlib: invalid checksum")
|
||||
// ErrDictionary is returned when reading ZLIB data that has an invalid dictionary.
|
||||
ErrDictionary = errors.New("zlib: invalid dictionary")
|
||||
// ErrHeader is returned when reading ZLIB data that has an invalid header.
|
||||
ErrHeader = errors.New("zlib: invalid header")
|
||||
)
|
||||
|
||||
type reader struct {
|
||||
r flate.Reader
|
||||
decompressor io.ReadCloser
|
||||
digest hash.Hash32
|
||||
err error
|
||||
scratch [4]byte
|
||||
}
|
||||
|
||||
// Resetter resets a ReadCloser returned by NewReader or NewReaderDict to
|
||||
// to switch to a new underlying Reader. This permits reusing a ReadCloser
|
||||
// instead of allocating a new one.
|
||||
type Resetter interface {
|
||||
// Reset discards any buffered data and resets the Resetter as if it was
|
||||
// newly initialized with the given reader.
|
||||
Reset(r io.Reader, dict []byte) error
|
||||
}
|
||||
|
||||
// NewReader creates a new ReadCloser.
|
||||
// Reads from the returned ReadCloser read and decompress data from r.
|
||||
// If r does not implement io.ByteReader, the decompressor may read more
|
||||
// data than necessary from r.
|
||||
// It is the caller's responsibility to call Close on the ReadCloser when done.
|
||||
//
|
||||
// The ReadCloser returned by NewReader also implements Resetter.
|
||||
func NewReader(r io.Reader) (io.ReadCloser, error) {
|
||||
return NewReaderDict(r, nil)
|
||||
}
|
||||
|
||||
// NewReaderDict is like NewReader but uses a preset dictionary.
|
||||
// NewReaderDict ignores the dictionary if the compressed data does not refer to it.
|
||||
// If the compressed data refers to a different dictionary, NewReaderDict returns ErrDictionary.
|
||||
//
|
||||
// The ReadCloser returned by NewReaderDict also implements Resetter.
|
||||
func NewReaderDict(r io.Reader, dict []byte) (io.ReadCloser, error) {
|
||||
z := new(reader)
|
||||
err := z.Reset(r, dict)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return z, nil
|
||||
}
|
||||
|
||||
func (z *reader) Read(p []byte) (int, error) {
|
||||
if z.err != nil {
|
||||
return 0, z.err
|
||||
}
|
||||
|
||||
var n int
|
||||
n, z.err = z.decompressor.Read(p)
|
||||
z.digest.Write(p[0:n])
|
||||
if z.err != io.EOF {
|
||||
// In the normal case we return here.
|
||||
return n, z.err
|
||||
}
|
||||
|
||||
// Finished file; check checksum.
|
||||
if _, err := io.ReadFull(z.r, z.scratch[0:4]); err != nil {
|
||||
if err == io.EOF {
|
||||
err = io.ErrUnexpectedEOF
|
||||
}
|
||||
z.err = err
|
||||
return n, z.err
|
||||
}
|
||||
// ZLIB (RFC 1950) is big-endian, unlike GZIP (RFC 1952).
|
||||
checksum := uint32(z.scratch[0])<<24 | uint32(z.scratch[1])<<16 | uint32(z.scratch[2])<<8 | uint32(z.scratch[3])
|
||||
if checksum != z.digest.Sum32() {
|
||||
z.err = ErrChecksum
|
||||
return n, z.err
|
||||
}
|
||||
return n, io.EOF
|
||||
}
|
||||
|
||||
// Calling Close does not close the wrapped io.Reader originally passed to NewReader.
|
||||
// In order for the ZLIB checksum to be verified, the reader must be
|
||||
// fully consumed until the io.EOF.
|
||||
func (z *reader) Close() error {
|
||||
if z.err != nil && z.err != io.EOF {
|
||||
return z.err
|
||||
}
|
||||
z.err = z.decompressor.Close()
|
||||
return z.err
|
||||
}
|
||||
|
||||
func (z *reader) Reset(r io.Reader, dict []byte) error {
|
||||
*z = reader{decompressor: z.decompressor}
|
||||
if fr, ok := r.(flate.Reader); ok {
|
||||
z.r = fr
|
||||
} else {
|
||||
z.r = bufio.NewReader(r)
|
||||
}
|
||||
|
||||
// Read the header (RFC 1950 section 2.2.).
|
||||
_, z.err = io.ReadFull(z.r, z.scratch[0:2])
|
||||
if z.err != nil {
|
||||
if z.err == io.EOF {
|
||||
z.err = io.ErrUnexpectedEOF
|
||||
}
|
||||
return z.err
|
||||
}
|
||||
h := uint(z.scratch[0])<<8 | uint(z.scratch[1])
|
||||
if (z.scratch[0]&0x0f != zlibDeflate) || (h%31 != 0) {
|
||||
z.err = ErrHeader
|
||||
return z.err
|
||||
}
|
||||
haveDict := z.scratch[1]&0x20 != 0
|
||||
if haveDict {
|
||||
_, z.err = io.ReadFull(z.r, z.scratch[0:4])
|
||||
if z.err != nil {
|
||||
if z.err == io.EOF {
|
||||
z.err = io.ErrUnexpectedEOF
|
||||
}
|
||||
return z.err
|
||||
}
|
||||
checksum := uint32(z.scratch[0])<<24 | uint32(z.scratch[1])<<16 | uint32(z.scratch[2])<<8 | uint32(z.scratch[3])
|
||||
if checksum != adler32.Checksum(dict) {
|
||||
z.err = ErrDictionary
|
||||
return z.err
|
||||
}
|
||||
}
|
||||
|
||||
if z.decompressor == nil {
|
||||
if haveDict {
|
||||
z.decompressor = flate.NewReaderDict(z.r, dict)
|
||||
} else {
|
||||
z.decompressor = flate.NewReader(z.r)
|
||||
}
|
||||
} else {
|
||||
z.decompressor.(flate.Resetter).Reset(z.r, dict)
|
||||
}
|
||||
z.digest = adler32.New()
|
||||
return nil
|
||||
}
|
||||
201
vendor/github.com/klauspost/compress/zlib/writer.go
generated
vendored
201
vendor/github.com/klauspost/compress/zlib/writer.go
generated
vendored
@@ -1,201 +0,0 @@
|
||||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package zlib
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"hash"
|
||||
"hash/adler32"
|
||||
"io"
|
||||
|
||||
"github.com/klauspost/compress/flate"
|
||||
)
|
||||
|
||||
// These constants are copied from the flate package, so that code that imports
|
||||
// "compress/zlib" does not also have to import "compress/flate".
|
||||
const (
|
||||
NoCompression = flate.NoCompression
|
||||
BestSpeed = flate.BestSpeed
|
||||
BestCompression = flate.BestCompression
|
||||
DefaultCompression = flate.DefaultCompression
|
||||
ConstantCompression = flate.ConstantCompression
|
||||
HuffmanOnly = flate.HuffmanOnly
|
||||
)
|
||||
|
||||
// A Writer takes data written to it and writes the compressed
|
||||
// form of that data to an underlying writer (see NewWriter).
|
||||
type Writer struct {
|
||||
w io.Writer
|
||||
level int
|
||||
dict []byte
|
||||
compressor *flate.Writer
|
||||
digest hash.Hash32
|
||||
err error
|
||||
scratch [4]byte
|
||||
wroteHeader bool
|
||||
}
|
||||
|
||||
// NewWriter creates a new Writer.
|
||||
// Writes to the returned Writer are compressed and written to w.
|
||||
//
|
||||
// It is the caller's responsibility to call Close on the WriteCloser when done.
|
||||
// Writes may be buffered and not flushed until Close.
|
||||
func NewWriter(w io.Writer) *Writer {
|
||||
z, _ := NewWriterLevelDict(w, DefaultCompression, nil)
|
||||
return z
|
||||
}
|
||||
|
||||
// NewWriterLevel is like NewWriter but specifies the compression level instead
|
||||
// of assuming DefaultCompression.
|
||||
//
|
||||
// The compression level can be DefaultCompression, NoCompression, HuffmanOnly
|
||||
// or any integer value between BestSpeed and BestCompression inclusive.
|
||||
// The error returned will be nil if the level is valid.
|
||||
func NewWriterLevel(w io.Writer, level int) (*Writer, error) {
|
||||
return NewWriterLevelDict(w, level, nil)
|
||||
}
|
||||
|
||||
// NewWriterLevelDict is like NewWriterLevel but specifies a dictionary to
|
||||
// compress with.
|
||||
//
|
||||
// The dictionary may be nil. If not, its contents should not be modified until
|
||||
// the Writer is closed.
|
||||
func NewWriterLevelDict(w io.Writer, level int, dict []byte) (*Writer, error) {
|
||||
if level < HuffmanOnly || level > BestCompression {
|
||||
return nil, fmt.Errorf("zlib: invalid compression level: %d", level)
|
||||
}
|
||||
return &Writer{
|
||||
w: w,
|
||||
level: level,
|
||||
dict: dict,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Reset clears the state of the Writer z such that it is equivalent to its
|
||||
// initial state from NewWriterLevel or NewWriterLevelDict, but instead writing
|
||||
// to w.
|
||||
func (z *Writer) Reset(w io.Writer) {
|
||||
z.w = w
|
||||
// z.level and z.dict left unchanged.
|
||||
if z.compressor != nil {
|
||||
z.compressor.Reset(w)
|
||||
}
|
||||
if z.digest != nil {
|
||||
z.digest.Reset()
|
||||
}
|
||||
z.err = nil
|
||||
z.scratch = [4]byte{}
|
||||
z.wroteHeader = false
|
||||
}
|
||||
|
||||
// writeHeader writes the ZLIB header.
|
||||
func (z *Writer) writeHeader() (err error) {
|
||||
z.wroteHeader = true
|
||||
// ZLIB has a two-byte header (as documented in RFC 1950).
|
||||
// The first four bits is the CINFO (compression info), which is 7 for the default deflate window size.
|
||||
// The next four bits is the CM (compression method), which is 8 for deflate.
|
||||
z.scratch[0] = 0x78
|
||||
// The next two bits is the FLEVEL (compression level). The four values are:
|
||||
// 0=fastest, 1=fast, 2=default, 3=best.
|
||||
// The next bit, FDICT, is set if a dictionary is given.
|
||||
// The final five FCHECK bits form a mod-31 checksum.
|
||||
switch z.level {
|
||||
case -2, 0, 1:
|
||||
z.scratch[1] = 0 << 6
|
||||
case 2, 3, 4, 5:
|
||||
z.scratch[1] = 1 << 6
|
||||
case 6, -1:
|
||||
z.scratch[1] = 2 << 6
|
||||
case 7, 8, 9:
|
||||
z.scratch[1] = 3 << 6
|
||||
default:
|
||||
panic("unreachable")
|
||||
}
|
||||
if z.dict != nil {
|
||||
z.scratch[1] |= 1 << 5
|
||||
}
|
||||
z.scratch[1] += uint8(31 - (uint16(z.scratch[0])<<8+uint16(z.scratch[1]))%31)
|
||||
if _, err = z.w.Write(z.scratch[0:2]); err != nil {
|
||||
return err
|
||||
}
|
||||
if z.dict != nil {
|
||||
// The next four bytes are the Adler-32 checksum of the dictionary.
|
||||
checksum := adler32.Checksum(z.dict)
|
||||
z.scratch[0] = uint8(checksum >> 24)
|
||||
z.scratch[1] = uint8(checksum >> 16)
|
||||
z.scratch[2] = uint8(checksum >> 8)
|
||||
z.scratch[3] = uint8(checksum >> 0)
|
||||
if _, err = z.w.Write(z.scratch[0:4]); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if z.compressor == nil {
|
||||
// Initialize deflater unless the Writer is being reused
|
||||
// after a Reset call.
|
||||
z.compressor, err = flate.NewWriterDict(z.w, z.level, z.dict)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
z.digest = adler32.New()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Write writes a compressed form of p to the underlying io.Writer. The
|
||||
// compressed bytes are not necessarily flushed until the Writer is closed or
|
||||
// explicitly flushed.
|
||||
func (z *Writer) Write(p []byte) (n int, err error) {
|
||||
if !z.wroteHeader {
|
||||
z.err = z.writeHeader()
|
||||
}
|
||||
if z.err != nil {
|
||||
return 0, z.err
|
||||
}
|
||||
if len(p) == 0 {
|
||||
return 0, nil
|
||||
}
|
||||
n, err = z.compressor.Write(p)
|
||||
if err != nil {
|
||||
z.err = err
|
||||
return
|
||||
}
|
||||
z.digest.Write(p)
|
||||
return
|
||||
}
|
||||
|
||||
// Flush flushes the Writer to its underlying io.Writer.
|
||||
func (z *Writer) Flush() error {
|
||||
if !z.wroteHeader {
|
||||
z.err = z.writeHeader()
|
||||
}
|
||||
if z.err != nil {
|
||||
return z.err
|
||||
}
|
||||
z.err = z.compressor.Flush()
|
||||
return z.err
|
||||
}
|
||||
|
||||
// Close closes the Writer, flushing any unwritten data to the underlying
|
||||
// io.Writer, but does not close the underlying io.Writer.
|
||||
func (z *Writer) Close() error {
|
||||
if !z.wroteHeader {
|
||||
z.err = z.writeHeader()
|
||||
}
|
||||
if z.err != nil {
|
||||
return z.err
|
||||
}
|
||||
z.err = z.compressor.Close()
|
||||
if z.err != nil {
|
||||
return z.err
|
||||
}
|
||||
checksum := z.digest.Sum32()
|
||||
// ZLIB (RFC 1950) is big-endian, unlike GZIP (RFC 1952).
|
||||
z.scratch[0] = uint8(checksum >> 24)
|
||||
z.scratch[1] = uint8(checksum >> 16)
|
||||
z.scratch[2] = uint8(checksum >> 8)
|
||||
z.scratch[3] = uint8(checksum >> 0)
|
||||
_, z.err = z.w.Write(z.scratch[0:4])
|
||||
return z.err
|
||||
}
|
||||
22
vendor/github.com/klauspost/cpuid/LICENSE
generated
vendored
22
vendor/github.com/klauspost/cpuid/LICENSE
generated
vendored
@@ -1,22 +0,0 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2015 Klaus Post
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
|
||||
145
vendor/github.com/klauspost/cpuid/README.md
generated
vendored
145
vendor/github.com/klauspost/cpuid/README.md
generated
vendored
@@ -1,145 +0,0 @@
|
||||
# cpuid
|
||||
Package cpuid provides information about the CPU running the current program.
|
||||
|
||||
CPU features are detected on startup, and kept for fast access through the life of the application.
|
||||
Currently x86 / x64 (AMD64) is supported, and no external C (cgo) code is used, which should make the library very easy to use.
|
||||
|
||||
You can access the CPU information by accessing the shared CPU variable of the cpuid library.
|
||||
|
||||
Package home: https://github.com/klauspost/cpuid
|
||||
|
||||
[![GoDoc][1]][2] [![Build Status][3]][4]
|
||||
|
||||
[1]: https://godoc.org/github.com/klauspost/cpuid?status.svg
|
||||
[2]: https://godoc.org/github.com/klauspost/cpuid
|
||||
[3]: https://travis-ci.org/klauspost/cpuid.svg
|
||||
[4]: https://travis-ci.org/klauspost/cpuid
|
||||
|
||||
# features
|
||||
## CPU Instructions
|
||||
* **CMOV** (i686 CMOV)
|
||||
* **NX** (NX (No-Execute) bit)
|
||||
* **AMD3DNOW** (AMD 3DNOW)
|
||||
* **AMD3DNOWEXT** (AMD 3DNowExt)
|
||||
* **MMX** (standard MMX)
|
||||
* **MMXEXT** (SSE integer functions or AMD MMX ext)
|
||||
* **SSE** (SSE functions)
|
||||
* **SSE2** (P4 SSE functions)
|
||||
* **SSE3** (Prescott SSE3 functions)
|
||||
* **SSSE3** (Conroe SSSE3 functions)
|
||||
* **SSE4** (Penryn SSE4.1 functions)
|
||||
* **SSE4A** (AMD Barcelona microarchitecture SSE4a instructions)
|
||||
* **SSE42** (Nehalem SSE4.2 functions)
|
||||
* **AVX** (AVX functions)
|
||||
* **AVX2** (AVX2 functions)
|
||||
* **FMA3** (Intel FMA 3)
|
||||
* **FMA4** (Bulldozer FMA4 functions)
|
||||
* **XOP** (Bulldozer XOP functions)
|
||||
* **F16C** (Half-precision floating-point conversion)
|
||||
* **BMI1** (Bit Manipulation Instruction Set 1)
|
||||
* **BMI2** (Bit Manipulation Instruction Set 2)
|
||||
* **TBM** (AMD Trailing Bit Manipulation)
|
||||
* **LZCNT** (LZCNT instruction)
|
||||
* **POPCNT** (POPCNT instruction)
|
||||
* **AESNI** (Advanced Encryption Standard New Instructions)
|
||||
* **CLMUL** (Carry-less Multiplication)
|
||||
* **HTT** (Hyperthreading (enabled))
|
||||
* **HLE** (Hardware Lock Elision)
|
||||
* **RTM** (Restricted Transactional Memory)
|
||||
* **RDRAND** (RDRAND instruction is available)
|
||||
* **RDSEED** (RDSEED instruction is available)
|
||||
* **ADX** (Intel ADX (Multi-Precision Add-Carry Instruction Extensions))
|
||||
* **SHA** (Intel SHA Extensions)
|
||||
* **AVX512F** (AVX-512 Foundation)
|
||||
* **AVX512DQ** (AVX-512 Doubleword and Quadword Instructions)
|
||||
* **AVX512IFMA** (AVX-512 Integer Fused Multiply-Add Instructions)
|
||||
* **AVX512PF** (AVX-512 Prefetch Instructions)
|
||||
* **AVX512ER** (AVX-512 Exponential and Reciprocal Instructions)
|
||||
* **AVX512CD** (AVX-512 Conflict Detection Instructions)
|
||||
* **AVX512BW** (AVX-512 Byte and Word Instructions)
|
||||
* **AVX512VL** (AVX-512 Vector Length Extensions)
|
||||
* **AVX512VBMI** (AVX-512 Vector Bit Manipulation Instructions)
|
||||
* **MPX** (Intel MPX (Memory Protection Extensions))
|
||||
* **ERMS** (Enhanced REP MOVSB/STOSB)
|
||||
* **RDTSCP** (RDTSCP Instruction)
|
||||
* **CX16** (CMPXCHG16B Instruction)
|
||||
* **SGX** (Software Guard Extensions, with activation details)
|
||||
|
||||
## Performance
|
||||
* **RDTSCP()** Returns current cycle count. Can be used for benchmarking.
|
||||
* **SSE2SLOW** (SSE2 is supported, but usually not faster)
|
||||
* **SSE3SLOW** (SSE3 is supported, but usually not faster)
|
||||
* **ATOM** (Atom processor, some SSSE3 instructions are slower)
|
||||
* **Cache line** (Probable size of a cache line).
|
||||
* **L1, L2, L3 Cache size** on newer Intel/AMD CPUs.
|
||||
|
||||
## Cpu Vendor/VM
|
||||
* **Intel**
|
||||
* **AMD**
|
||||
* **VIA**
|
||||
* **Transmeta**
|
||||
* **NSC**
|
||||
* **KVM** (Kernel-based Virtual Machine)
|
||||
* **MSVM** (Microsoft Hyper-V or Windows Virtual PC)
|
||||
* **VMware**
|
||||
* **XenHVM**
|
||||
|
||||
# installing
|
||||
|
||||
```go get github.com/klauspost/cpuid```
|
||||
|
||||
# example
|
||||
|
||||
```Go
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/klauspost/cpuid"
|
||||
)
|
||||
|
||||
func main() {
|
||||
// Print basic CPU information:
|
||||
fmt.Println("Name:", cpuid.CPU.BrandName)
|
||||
fmt.Println("PhysicalCores:", cpuid.CPU.PhysicalCores)
|
||||
fmt.Println("ThreadsPerCore:", cpuid.CPU.ThreadsPerCore)
|
||||
fmt.Println("LogicalCores:", cpuid.CPU.LogicalCores)
|
||||
fmt.Println("Family", cpuid.CPU.Family, "Model:", cpuid.CPU.Model)
|
||||
fmt.Println("Features:", cpuid.CPU.Features)
|
||||
fmt.Println("Cacheline bytes:", cpuid.CPU.CacheLine)
|
||||
fmt.Println("L1 Data Cache:", cpuid.CPU.Cache.L1D, "bytes")
|
||||
fmt.Println("L1 Instruction Cache:", cpuid.CPU.Cache.L1D, "bytes")
|
||||
fmt.Println("L2 Cache:", cpuid.CPU.Cache.L2, "bytes")
|
||||
fmt.Println("L3 Cache:", cpuid.CPU.Cache.L3, "bytes")
|
||||
|
||||
// Test if we have a specific feature:
|
||||
if cpuid.CPU.SSE() {
|
||||
fmt.Println("We have Streaming SIMD Extensions")
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Sample output:
|
||||
```
|
||||
>go run main.go
|
||||
Name: Intel(R) Core(TM) i5-2540M CPU @ 2.60GHz
|
||||
PhysicalCores: 2
|
||||
ThreadsPerCore: 2
|
||||
LogicalCores: 4
|
||||
Family 6 Model: 42
|
||||
Features: CMOV,MMX,MMXEXT,SSE,SSE2,SSE3,SSSE3,SSE4.1,SSE4.2,AVX,AESNI,CLMUL
|
||||
Cacheline bytes: 64
|
||||
We have Streaming SIMD Extensions
|
||||
```
|
||||
|
||||
# private package
|
||||
|
||||
In the "private" folder you can find an autogenerated version of the library you can include in your own packages.
|
||||
|
||||
For this purpose all exports are removed, and functions and constants are lowercased.
|
||||
|
||||
This is not a recommended way of using the library, but provided for convenience, if it is difficult for you to use external packages.
|
||||
|
||||
# license
|
||||
|
||||
This code is published under an MIT license. See LICENSE file for more information.
|
||||
1022
vendor/github.com/klauspost/cpuid/cpuid.go
generated
vendored
1022
vendor/github.com/klauspost/cpuid/cpuid.go
generated
vendored
File diff suppressed because it is too large
Load Diff
42
vendor/github.com/klauspost/cpuid/cpuid_386.s
generated
vendored
42
vendor/github.com/klauspost/cpuid/cpuid_386.s
generated
vendored
@@ -1,42 +0,0 @@
|
||||
// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file.
|
||||
|
||||
// +build 386,!gccgo
|
||||
|
||||
// func asmCpuid(op uint32) (eax, ebx, ecx, edx uint32)
|
||||
TEXT ·asmCpuid(SB), 7, $0
|
||||
XORL CX, CX
|
||||
MOVL op+0(FP), AX
|
||||
CPUID
|
||||
MOVL AX, eax+4(FP)
|
||||
MOVL BX, ebx+8(FP)
|
||||
MOVL CX, ecx+12(FP)
|
||||
MOVL DX, edx+16(FP)
|
||||
RET
|
||||
|
||||
// func asmCpuidex(op, op2 uint32) (eax, ebx, ecx, edx uint32)
|
||||
TEXT ·asmCpuidex(SB), 7, $0
|
||||
MOVL op+0(FP), AX
|
||||
MOVL op2+4(FP), CX
|
||||
CPUID
|
||||
MOVL AX, eax+8(FP)
|
||||
MOVL BX, ebx+12(FP)
|
||||
MOVL CX, ecx+16(FP)
|
||||
MOVL DX, edx+20(FP)
|
||||
RET
|
||||
|
||||
// func xgetbv(index uint32) (eax, edx uint32)
|
||||
TEXT ·asmXgetbv(SB), 7, $0
|
||||
MOVL index+0(FP), CX
|
||||
BYTE $0x0f; BYTE $0x01; BYTE $0xd0 // XGETBV
|
||||
MOVL AX, eax+4(FP)
|
||||
MOVL DX, edx+8(FP)
|
||||
RET
|
||||
|
||||
// func asmRdtscpAsm() (eax, ebx, ecx, edx uint32)
|
||||
TEXT ·asmRdtscpAsm(SB), 7, $0
|
||||
BYTE $0x0F; BYTE $0x01; BYTE $0xF9 // RDTSCP
|
||||
MOVL AX, eax+0(FP)
|
||||
MOVL BX, ebx+4(FP)
|
||||
MOVL CX, ecx+8(FP)
|
||||
MOVL DX, edx+12(FP)
|
||||
RET
|
||||
42
vendor/github.com/klauspost/cpuid/cpuid_amd64.s
generated
vendored
42
vendor/github.com/klauspost/cpuid/cpuid_amd64.s
generated
vendored
@@ -1,42 +0,0 @@
|
||||
// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file.
|
||||
|
||||
//+build amd64,!gccgo
|
||||
|
||||
// func asmCpuid(op uint32) (eax, ebx, ecx, edx uint32)
|
||||
TEXT ·asmCpuid(SB), 7, $0
|
||||
XORQ CX, CX
|
||||
MOVL op+0(FP), AX
|
||||
CPUID
|
||||
MOVL AX, eax+8(FP)
|
||||
MOVL BX, ebx+12(FP)
|
||||
MOVL CX, ecx+16(FP)
|
||||
MOVL DX, edx+20(FP)
|
||||
RET
|
||||
|
||||
// func asmCpuidex(op, op2 uint32) (eax, ebx, ecx, edx uint32)
|
||||
TEXT ·asmCpuidex(SB), 7, $0
|
||||
MOVL op+0(FP), AX
|
||||
MOVL op2+4(FP), CX
|
||||
CPUID
|
||||
MOVL AX, eax+8(FP)
|
||||
MOVL BX, ebx+12(FP)
|
||||
MOVL CX, ecx+16(FP)
|
||||
MOVL DX, edx+20(FP)
|
||||
RET
|
||||
|
||||
// func asmXgetbv(index uint32) (eax, edx uint32)
|
||||
TEXT ·asmXgetbv(SB), 7, $0
|
||||
MOVL index+0(FP), CX
|
||||
BYTE $0x0f; BYTE $0x01; BYTE $0xd0 // XGETBV
|
||||
MOVL AX, eax+8(FP)
|
||||
MOVL DX, edx+12(FP)
|
||||
RET
|
||||
|
||||
// func asmRdtscpAsm() (eax, ebx, ecx, edx uint32)
|
||||
TEXT ·asmRdtscpAsm(SB), 7, $0
|
||||
BYTE $0x0F; BYTE $0x01; BYTE $0xF9 // RDTSCP
|
||||
MOVL AX, eax+0(FP)
|
||||
MOVL BX, ebx+4(FP)
|
||||
MOVL CX, ecx+8(FP)
|
||||
MOVL DX, edx+12(FP)
|
||||
RET
|
||||
17
vendor/github.com/klauspost/cpuid/detect_intel.go
generated
vendored
17
vendor/github.com/klauspost/cpuid/detect_intel.go
generated
vendored
@@ -1,17 +0,0 @@
|
||||
// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file.
|
||||
|
||||
// +build 386,!gccgo amd64,!gccgo
|
||||
|
||||
package cpuid
|
||||
|
||||
func asmCpuid(op uint32) (eax, ebx, ecx, edx uint32)
|
||||
func asmCpuidex(op, op2 uint32) (eax, ebx, ecx, edx uint32)
|
||||
func asmXgetbv(index uint32) (eax, edx uint32)
|
||||
func asmRdtscpAsm() (eax, ebx, ecx, edx uint32)
|
||||
|
||||
func initCPU() {
|
||||
cpuid = asmCpuid
|
||||
cpuidex = asmCpuidex
|
||||
xgetbv = asmXgetbv
|
||||
rdtscpAsm = asmRdtscpAsm
|
||||
}
|
||||
23
vendor/github.com/klauspost/cpuid/detect_ref.go
generated
vendored
23
vendor/github.com/klauspost/cpuid/detect_ref.go
generated
vendored
@@ -1,23 +0,0 @@
|
||||
// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file.
|
||||
|
||||
// +build !amd64,!386 gccgo
|
||||
|
||||
package cpuid
|
||||
|
||||
func initCPU() {
|
||||
cpuid = func(op uint32) (eax, ebx, ecx, edx uint32) {
|
||||
return 0, 0, 0, 0
|
||||
}
|
||||
|
||||
cpuidex = func(op, op2 uint32) (eax, ebx, ecx, edx uint32) {
|
||||
return 0, 0, 0, 0
|
||||
}
|
||||
|
||||
xgetbv = func(index uint32) (eax, edx uint32) {
|
||||
return 0, 0
|
||||
}
|
||||
|
||||
rdtscpAsm = func() (eax, ebx, ecx, edx uint32) {
|
||||
return 0, 0, 0, 0
|
||||
}
|
||||
}
|
||||
3
vendor/github.com/klauspost/cpuid/generate.go
generated
vendored
3
vendor/github.com/klauspost/cpuid/generate.go
generated
vendored
@@ -1,3 +0,0 @@
|
||||
package cpuid
|
||||
|
||||
//go:generate go run private-gen.go
|
||||
476
vendor/github.com/klauspost/cpuid/private-gen.go
generated
vendored
476
vendor/github.com/klauspost/cpuid/private-gen.go
generated
vendored
@@ -1,476 +0,0 @@
|
||||
// +build ignore
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"go/ast"
|
||||
"go/parser"
|
||||
"go/printer"
|
||||
"go/token"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"reflect"
|
||||
"strings"
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
var inFiles = []string{"cpuid.go", "cpuid_test.go"}
|
||||
var copyFiles = []string{"cpuid_amd64.s", "cpuid_386.s", "detect_ref.go", "detect_intel.go"}
|
||||
var fileSet = token.NewFileSet()
|
||||
var reWrites = []rewrite{
|
||||
initRewrite("CPUInfo -> cpuInfo"),
|
||||
initRewrite("Vendor -> vendor"),
|
||||
initRewrite("Flags -> flags"),
|
||||
initRewrite("Detect -> detect"),
|
||||
initRewrite("CPU -> cpu"),
|
||||
}
|
||||
var excludeNames = map[string]bool{"string": true, "join": true, "trim": true,
|
||||
// cpuid_test.go
|
||||
"t": true, "println": true, "logf": true, "log": true, "fatalf": true, "fatal": true,
|
||||
}
|
||||
|
||||
var excludePrefixes = []string{"test", "benchmark"}
|
||||
|
||||
func main() {
|
||||
Package := "private"
|
||||
parserMode := parser.ParseComments
|
||||
exported := make(map[string]rewrite)
|
||||
for _, file := range inFiles {
|
||||
in, err := os.Open(file)
|
||||
if err != nil {
|
||||
log.Fatalf("opening input", err)
|
||||
}
|
||||
|
||||
src, err := ioutil.ReadAll(in)
|
||||
if err != nil {
|
||||
log.Fatalf("reading input", err)
|
||||
}
|
||||
|
||||
astfile, err := parser.ParseFile(fileSet, file, src, parserMode)
|
||||
if err != nil {
|
||||
log.Fatalf("parsing input", err)
|
||||
}
|
||||
|
||||
for _, rw := range reWrites {
|
||||
astfile = rw(astfile)
|
||||
}
|
||||
|
||||
// Inspect the AST and print all identifiers and literals.
|
||||
var startDecl token.Pos
|
||||
var endDecl token.Pos
|
||||
ast.Inspect(astfile, func(n ast.Node) bool {
|
||||
var s string
|
||||
switch x := n.(type) {
|
||||
case *ast.Ident:
|
||||
if x.IsExported() {
|
||||
t := strings.ToLower(x.Name)
|
||||
for _, pre := range excludePrefixes {
|
||||
if strings.HasPrefix(t, pre) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
if excludeNames[t] != true {
|
||||
//if x.Pos() > startDecl && x.Pos() < endDecl {
|
||||
exported[x.Name] = initRewrite(x.Name + " -> " + t)
|
||||
}
|
||||
}
|
||||
|
||||
case *ast.GenDecl:
|
||||
if x.Tok == token.CONST && x.Lparen > 0 {
|
||||
startDecl = x.Lparen
|
||||
endDecl = x.Rparen
|
||||
// fmt.Printf("Decl:%s -> %s\n", fileSet.Position(startDecl), fileSet.Position(endDecl))
|
||||
}
|
||||
}
|
||||
if s != "" {
|
||||
fmt.Printf("%s:\t%s\n", fileSet.Position(n.Pos()), s)
|
||||
}
|
||||
return true
|
||||
})
|
||||
|
||||
for _, rw := range exported {
|
||||
astfile = rw(astfile)
|
||||
}
|
||||
|
||||
var buf bytes.Buffer
|
||||
|
||||
printer.Fprint(&buf, fileSet, astfile)
|
||||
|
||||
// Remove package documentation and insert information
|
||||
s := buf.String()
|
||||
ind := strings.Index(buf.String(), "\npackage cpuid")
|
||||
s = s[ind:]
|
||||
s = "// Generated, DO NOT EDIT,\n" +
|
||||
"// but copy it to your own project and rename the package.\n" +
|
||||
"// See more at http://github.com/klauspost/cpuid\n" +
|
||||
s
|
||||
|
||||
outputName := Package + string(os.PathSeparator) + file
|
||||
|
||||
err = ioutil.WriteFile(outputName, []byte(s), 0644)
|
||||
if err != nil {
|
||||
log.Fatalf("writing output: %s", err)
|
||||
}
|
||||
log.Println("Generated", outputName)
|
||||
}
|
||||
|
||||
for _, file := range copyFiles {
|
||||
dst := ""
|
||||
if strings.HasPrefix(file, "cpuid") {
|
||||
dst = Package + string(os.PathSeparator) + file
|
||||
} else {
|
||||
dst = Package + string(os.PathSeparator) + "cpuid_" + file
|
||||
}
|
||||
err := copyFile(file, dst)
|
||||
if err != nil {
|
||||
log.Fatalf("copying file: %s", err)
|
||||
}
|
||||
log.Println("Copied", dst)
|
||||
}
|
||||
}
|
||||
|
||||
// CopyFile copies a file from src to dst. If src and dst files exist, and are
|
||||
// the same, then return success. Copy the file contents from src to dst.
|
||||
func copyFile(src, dst string) (err error) {
|
||||
sfi, err := os.Stat(src)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if !sfi.Mode().IsRegular() {
|
||||
// cannot copy non-regular files (e.g., directories,
|
||||
// symlinks, devices, etc.)
|
||||
return fmt.Errorf("CopyFile: non-regular source file %s (%q)", sfi.Name(), sfi.Mode().String())
|
||||
}
|
||||
dfi, err := os.Stat(dst)
|
||||
if err != nil {
|
||||
if !os.IsNotExist(err) {
|
||||
return
|
||||
}
|
||||
} else {
|
||||
if !(dfi.Mode().IsRegular()) {
|
||||
return fmt.Errorf("CopyFile: non-regular destination file %s (%q)", dfi.Name(), dfi.Mode().String())
|
||||
}
|
||||
if os.SameFile(sfi, dfi) {
|
||||
return
|
||||
}
|
||||
}
|
||||
err = copyFileContents(src, dst)
|
||||
return
|
||||
}
|
||||
|
||||
// copyFileContents copies the contents of the file named src to the file named
|
||||
// by dst. The file will be created if it does not already exist. If the
|
||||
// destination file exists, all it's contents will be replaced by the contents
|
||||
// of the source file.
|
||||
func copyFileContents(src, dst string) (err error) {
|
||||
in, err := os.Open(src)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer in.Close()
|
||||
out, err := os.Create(dst)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer func() {
|
||||
cerr := out.Close()
|
||||
if err == nil {
|
||||
err = cerr
|
||||
}
|
||||
}()
|
||||
if _, err = io.Copy(out, in); err != nil {
|
||||
return
|
||||
}
|
||||
err = out.Sync()
|
||||
return
|
||||
}
|
||||
|
||||
type rewrite func(*ast.File) *ast.File
|
||||
|
||||
// Mostly copied from gofmt
|
||||
func initRewrite(rewriteRule string) rewrite {
|
||||
f := strings.Split(rewriteRule, "->")
|
||||
if len(f) != 2 {
|
||||
fmt.Fprintf(os.Stderr, "rewrite rule must be of the form 'pattern -> replacement'\n")
|
||||
os.Exit(2)
|
||||
}
|
||||
pattern := parseExpr(f[0], "pattern")
|
||||
replace := parseExpr(f[1], "replacement")
|
||||
return func(p *ast.File) *ast.File { return rewriteFile(pattern, replace, p) }
|
||||
}
|
||||
|
||||
// parseExpr parses s as an expression.
|
||||
// It might make sense to expand this to allow statement patterns,
|
||||
// but there are problems with preserving formatting and also
|
||||
// with what a wildcard for a statement looks like.
|
||||
func parseExpr(s, what string) ast.Expr {
|
||||
x, err := parser.ParseExpr(s)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "parsing %s %s at %s\n", what, s, err)
|
||||
os.Exit(2)
|
||||
}
|
||||
return x
|
||||
}
|
||||
|
||||
// Keep this function for debugging.
|
||||
/*
|
||||
func dump(msg string, val reflect.Value) {
|
||||
fmt.Printf("%s:\n", msg)
|
||||
ast.Print(fileSet, val.Interface())
|
||||
fmt.Println()
|
||||
}
|
||||
*/
|
||||
|
||||
// rewriteFile applies the rewrite rule 'pattern -> replace' to an entire file.
|
||||
func rewriteFile(pattern, replace ast.Expr, p *ast.File) *ast.File {
|
||||
cmap := ast.NewCommentMap(fileSet, p, p.Comments)
|
||||
m := make(map[string]reflect.Value)
|
||||
pat := reflect.ValueOf(pattern)
|
||||
repl := reflect.ValueOf(replace)
|
||||
|
||||
var rewriteVal func(val reflect.Value) reflect.Value
|
||||
rewriteVal = func(val reflect.Value) reflect.Value {
|
||||
// don't bother if val is invalid to start with
|
||||
if !val.IsValid() {
|
||||
return reflect.Value{}
|
||||
}
|
||||
for k := range m {
|
||||
delete(m, k)
|
||||
}
|
||||
val = apply(rewriteVal, val)
|
||||
if match(m, pat, val) {
|
||||
val = subst(m, repl, reflect.ValueOf(val.Interface().(ast.Node).Pos()))
|
||||
}
|
||||
return val
|
||||
}
|
||||
|
||||
r := apply(rewriteVal, reflect.ValueOf(p)).Interface().(*ast.File)
|
||||
r.Comments = cmap.Filter(r).Comments() // recreate comments list
|
||||
return r
|
||||
}
|
||||
|
||||
// set is a wrapper for x.Set(y); it protects the caller from panics if x cannot be changed to y.
|
||||
func set(x, y reflect.Value) {
|
||||
// don't bother if x cannot be set or y is invalid
|
||||
if !x.CanSet() || !y.IsValid() {
|
||||
return
|
||||
}
|
||||
defer func() {
|
||||
if x := recover(); x != nil {
|
||||
if s, ok := x.(string); ok &&
|
||||
(strings.Contains(s, "type mismatch") || strings.Contains(s, "not assignable")) {
|
||||
// x cannot be set to y - ignore this rewrite
|
||||
return
|
||||
}
|
||||
panic(x)
|
||||
}
|
||||
}()
|
||||
x.Set(y)
|
||||
}
|
||||
|
||||
// Values/types for special cases.
|
||||
var (
|
||||
objectPtrNil = reflect.ValueOf((*ast.Object)(nil))
|
||||
scopePtrNil = reflect.ValueOf((*ast.Scope)(nil))
|
||||
|
||||
identType = reflect.TypeOf((*ast.Ident)(nil))
|
||||
objectPtrType = reflect.TypeOf((*ast.Object)(nil))
|
||||
positionType = reflect.TypeOf(token.NoPos)
|
||||
callExprType = reflect.TypeOf((*ast.CallExpr)(nil))
|
||||
scopePtrType = reflect.TypeOf((*ast.Scope)(nil))
|
||||
)
|
||||
|
||||
// apply replaces each AST field x in val with f(x), returning val.
|
||||
// To avoid extra conversions, f operates on the reflect.Value form.
|
||||
func apply(f func(reflect.Value) reflect.Value, val reflect.Value) reflect.Value {
|
||||
if !val.IsValid() {
|
||||
return reflect.Value{}
|
||||
}
|
||||
|
||||
// *ast.Objects introduce cycles and are likely incorrect after
|
||||
// rewrite; don't follow them but replace with nil instead
|
||||
if val.Type() == objectPtrType {
|
||||
return objectPtrNil
|
||||
}
|
||||
|
||||
// similarly for scopes: they are likely incorrect after a rewrite;
|
||||
// replace them with nil
|
||||
if val.Type() == scopePtrType {
|
||||
return scopePtrNil
|
||||
}
|
||||
|
||||
switch v := reflect.Indirect(val); v.Kind() {
|
||||
case reflect.Slice:
|
||||
for i := 0; i < v.Len(); i++ {
|
||||
e := v.Index(i)
|
||||
set(e, f(e))
|
||||
}
|
||||
case reflect.Struct:
|
||||
for i := 0; i < v.NumField(); i++ {
|
||||
e := v.Field(i)
|
||||
set(e, f(e))
|
||||
}
|
||||
case reflect.Interface:
|
||||
e := v.Elem()
|
||||
set(v, f(e))
|
||||
}
|
||||
return val
|
||||
}
|
||||
|
||||
func isWildcard(s string) bool {
|
||||
rune, size := utf8.DecodeRuneInString(s)
|
||||
return size == len(s) && unicode.IsLower(rune)
|
||||
}
|
||||
|
||||
// match returns true if pattern matches val,
|
||||
// recording wildcard submatches in m.
|
||||
// If m == nil, match checks whether pattern == val.
|
||||
func match(m map[string]reflect.Value, pattern, val reflect.Value) bool {
|
||||
// Wildcard matches any expression. If it appears multiple
|
||||
// times in the pattern, it must match the same expression
|
||||
// each time.
|
||||
if m != nil && pattern.IsValid() && pattern.Type() == identType {
|
||||
name := pattern.Interface().(*ast.Ident).Name
|
||||
if isWildcard(name) && val.IsValid() {
|
||||
// wildcards only match valid (non-nil) expressions.
|
||||
if _, ok := val.Interface().(ast.Expr); ok && !val.IsNil() {
|
||||
if old, ok := m[name]; ok {
|
||||
return match(nil, old, val)
|
||||
}
|
||||
m[name] = val
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Otherwise, pattern and val must match recursively.
|
||||
if !pattern.IsValid() || !val.IsValid() {
|
||||
return !pattern.IsValid() && !val.IsValid()
|
||||
}
|
||||
if pattern.Type() != val.Type() {
|
||||
return false
|
||||
}
|
||||
|
||||
// Special cases.
|
||||
switch pattern.Type() {
|
||||
case identType:
|
||||
// For identifiers, only the names need to match
|
||||
// (and none of the other *ast.Object information).
|
||||
// This is a common case, handle it all here instead
|
||||
// of recursing down any further via reflection.
|
||||
p := pattern.Interface().(*ast.Ident)
|
||||
v := val.Interface().(*ast.Ident)
|
||||
return p == nil && v == nil || p != nil && v != nil && p.Name == v.Name
|
||||
case objectPtrType, positionType:
|
||||
// object pointers and token positions always match
|
||||
return true
|
||||
case callExprType:
|
||||
// For calls, the Ellipsis fields (token.Position) must
|
||||
// match since that is how f(x) and f(x...) are different.
|
||||
// Check them here but fall through for the remaining fields.
|
||||
p := pattern.Interface().(*ast.CallExpr)
|
||||
v := val.Interface().(*ast.CallExpr)
|
||||
if p.Ellipsis.IsValid() != v.Ellipsis.IsValid() {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
p := reflect.Indirect(pattern)
|
||||
v := reflect.Indirect(val)
|
||||
if !p.IsValid() || !v.IsValid() {
|
||||
return !p.IsValid() && !v.IsValid()
|
||||
}
|
||||
|
||||
switch p.Kind() {
|
||||
case reflect.Slice:
|
||||
if p.Len() != v.Len() {
|
||||
return false
|
||||
}
|
||||
for i := 0; i < p.Len(); i++ {
|
||||
if !match(m, p.Index(i), v.Index(i)) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
|
||||
case reflect.Struct:
|
||||
for i := 0; i < p.NumField(); i++ {
|
||||
if !match(m, p.Field(i), v.Field(i)) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
|
||||
case reflect.Interface:
|
||||
return match(m, p.Elem(), v.Elem())
|
||||
}
|
||||
|
||||
// Handle token integers, etc.
|
||||
return p.Interface() == v.Interface()
|
||||
}
|
||||
|
||||
// subst returns a copy of pattern with values from m substituted in place
|
||||
// of wildcards and pos used as the position of tokens from the pattern.
|
||||
// if m == nil, subst returns a copy of pattern and doesn't change the line
|
||||
// number information.
|
||||
func subst(m map[string]reflect.Value, pattern reflect.Value, pos reflect.Value) reflect.Value {
|
||||
if !pattern.IsValid() {
|
||||
return reflect.Value{}
|
||||
}
|
||||
|
||||
// Wildcard gets replaced with map value.
|
||||
if m != nil && pattern.Type() == identType {
|
||||
name := pattern.Interface().(*ast.Ident).Name
|
||||
if isWildcard(name) {
|
||||
if old, ok := m[name]; ok {
|
||||
return subst(nil, old, reflect.Value{})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if pos.IsValid() && pattern.Type() == positionType {
|
||||
// use new position only if old position was valid in the first place
|
||||
if old := pattern.Interface().(token.Pos); !old.IsValid() {
|
||||
return pattern
|
||||
}
|
||||
return pos
|
||||
}
|
||||
|
||||
// Otherwise copy.
|
||||
switch p := pattern; p.Kind() {
|
||||
case reflect.Slice:
|
||||
v := reflect.MakeSlice(p.Type(), p.Len(), p.Len())
|
||||
for i := 0; i < p.Len(); i++ {
|
||||
v.Index(i).Set(subst(m, p.Index(i), pos))
|
||||
}
|
||||
return v
|
||||
|
||||
case reflect.Struct:
|
||||
v := reflect.New(p.Type()).Elem()
|
||||
for i := 0; i < p.NumField(); i++ {
|
||||
v.Field(i).Set(subst(m, p.Field(i), pos))
|
||||
}
|
||||
return v
|
||||
|
||||
case reflect.Ptr:
|
||||
v := reflect.New(p.Type()).Elem()
|
||||
if elem := p.Elem(); elem.IsValid() {
|
||||
v.Set(subst(m, elem, pos).Addr())
|
||||
}
|
||||
return v
|
||||
|
||||
case reflect.Interface:
|
||||
v := reflect.New(p.Type()).Elem()
|
||||
if elem := p.Elem(); elem.IsValid() {
|
||||
v.Set(subst(m, elem, pos))
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
return pattern
|
||||
}
|
||||
28
vendor/github.com/klauspost/crc32/LICENSE
generated
vendored
28
vendor/github.com/klauspost/crc32/LICENSE
generated
vendored
@@ -1,28 +0,0 @@
|
||||
Copyright (c) 2012 The Go Authors. All rights reserved.
|
||||
Copyright (c) 2015 Klaus Post
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
87
vendor/github.com/klauspost/crc32/README.md
generated
vendored
87
vendor/github.com/klauspost/crc32/README.md
generated
vendored
@@ -1,87 +0,0 @@
|
||||
# crc32
|
||||
CRC32 hash with x64 optimizations
|
||||
|
||||
This package is a drop-in replacement for the standard library `hash/crc32` package, that features SSE 4.2 optimizations on x64 platforms, for a 10x speedup.
|
||||
|
||||
[](https://travis-ci.org/klauspost/crc32)
|
||||
|
||||
# usage
|
||||
|
||||
Install using `go get github.com/klauspost/crc32`. This library is based on Go 1.5 code and requires Go 1.3 or newer.
|
||||
|
||||
Replace `import "hash/crc32"` with `import "github.com/klauspost/crc32"` and you are good to go.
|
||||
|
||||
# changes
|
||||
* Oct 20, 2016: Changes have been merged to upstream Go. Package updated to match.
|
||||
* Dec 4, 2015: Uses the "slice-by-8" trick more extensively, which gives a 1.5 to 2.5x speedup if assembler is unavailable.
|
||||
|
||||
|
||||
# performance
|
||||
|
||||
For *Go 1.7* performance is equivalent to the standard library. So if you use this package for Go 1.7 you can switch back.
|
||||
|
||||
|
||||
For IEEE tables (the most common), there is approximately a factor 10 speedup with "CLMUL" (Carryless multiplication) instruction:
|
||||
```
|
||||
benchmark old ns/op new ns/op delta
|
||||
BenchmarkCrc32KB 99955 10258 -89.74%
|
||||
|
||||
benchmark old MB/s new MB/s speedup
|
||||
BenchmarkCrc32KB 327.83 3194.20 9.74x
|
||||
```
|
||||
|
||||
For other tables and "CLMUL" capable machines the performance is the same as the standard library.
|
||||
|
||||
Here are some detailed benchmarks, comparing to go 1.5 standard library with and without assembler enabled.
|
||||
|
||||
```
|
||||
Std: Standard Go 1.5 library
|
||||
Crc: Indicates IEEE type CRC.
|
||||
40B: Size of each slice encoded.
|
||||
NoAsm: Assembler was disabled (ie. not an AMD64 or SSE 4.2+ capable machine).
|
||||
Castagnoli: Castagnoli CRC type.
|
||||
|
||||
BenchmarkStdCrc40B-4 10000000 158 ns/op 252.88 MB/s
|
||||
BenchmarkCrc40BNoAsm-4 20000000 105 ns/op 377.38 MB/s (slice8)
|
||||
BenchmarkCrc40B-4 20000000 105 ns/op 378.77 MB/s (slice8)
|
||||
|
||||
BenchmarkStdCrc1KB-4 500000 3604 ns/op 284.10 MB/s
|
||||
BenchmarkCrc1KBNoAsm-4 1000000 1463 ns/op 699.79 MB/s (slice8)
|
||||
BenchmarkCrc1KB-4 3000000 396 ns/op 2583.69 MB/s (asm)
|
||||
|
||||
BenchmarkStdCrc8KB-4 200000 11417 ns/op 717.48 MB/s (slice8)
|
||||
BenchmarkCrc8KBNoAsm-4 200000 11317 ns/op 723.85 MB/s (slice8)
|
||||
BenchmarkCrc8KB-4 500000 2919 ns/op 2805.73 MB/s (asm)
|
||||
|
||||
BenchmarkStdCrc32KB-4 30000 45749 ns/op 716.24 MB/s (slice8)
|
||||
BenchmarkCrc32KBNoAsm-4 30000 45109 ns/op 726.42 MB/s (slice8)
|
||||
BenchmarkCrc32KB-4 100000 11497 ns/op 2850.09 MB/s (asm)
|
||||
|
||||
BenchmarkStdNoAsmCastagnol40B-4 10000000 161 ns/op 246.94 MB/s
|
||||
BenchmarkStdCastagnoli40B-4 50000000 28.4 ns/op 1410.69 MB/s (asm)
|
||||
BenchmarkCastagnoli40BNoAsm-4 20000000 100 ns/op 398.01 MB/s (slice8)
|
||||
BenchmarkCastagnoli40B-4 50000000 28.2 ns/op 1419.54 MB/s (asm)
|
||||
|
||||
BenchmarkStdNoAsmCastagnoli1KB-4 500000 3622 ns/op 282.67 MB/s
|
||||
BenchmarkStdCastagnoli1KB-4 10000000 144 ns/op 7099.78 MB/s (asm)
|
||||
BenchmarkCastagnoli1KBNoAsm-4 1000000 1475 ns/op 694.14 MB/s (slice8)
|
||||
BenchmarkCastagnoli1KB-4 10000000 146 ns/op 6993.35 MB/s (asm)
|
||||
|
||||
BenchmarkStdNoAsmCastagnoli8KB-4 50000 28781 ns/op 284.63 MB/s
|
||||
BenchmarkStdCastagnoli8KB-4 1000000 1029 ns/op 7957.89 MB/s (asm)
|
||||
BenchmarkCastagnoli8KBNoAsm-4 200000 11410 ns/op 717.94 MB/s (slice8)
|
||||
BenchmarkCastagnoli8KB-4 1000000 1000 ns/op 8188.71 MB/s (asm)
|
||||
|
||||
BenchmarkStdNoAsmCastagnoli32KB-4 10000 115426 ns/op 283.89 MB/s
|
||||
BenchmarkStdCastagnoli32KB-4 300000 4065 ns/op 8059.13 MB/s (asm)
|
||||
BenchmarkCastagnoli32KBNoAsm-4 30000 45171 ns/op 725.41 MB/s (slice8)
|
||||
BenchmarkCastagnoli32KB-4 500000 4077 ns/op 8035.89 MB/s (asm)
|
||||
```
|
||||
|
||||
The IEEE assembler optimizations has been submitted and will be part of the Go 1.6 standard library.
|
||||
|
||||
However, the improved use of slice-by-8 has not, but will probably be submitted for Go 1.7.
|
||||
|
||||
# license
|
||||
|
||||
Standard Go license. Changes are Copyright (c) 2015 Klaus Post under same conditions.
|
||||
207
vendor/github.com/klauspost/crc32/crc32.go
generated
vendored
207
vendor/github.com/klauspost/crc32/crc32.go
generated
vendored
@@ -1,207 +0,0 @@
|
||||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package crc32 implements the 32-bit cyclic redundancy check, or CRC-32,
|
||||
// checksum. See http://en.wikipedia.org/wiki/Cyclic_redundancy_check for
|
||||
// information.
|
||||
//
|
||||
// Polynomials are represented in LSB-first form also known as reversed representation.
|
||||
//
|
||||
// See http://en.wikipedia.org/wiki/Mathematics_of_cyclic_redundancy_checks#Reversed_representations_and_reciprocal_polynomials
|
||||
// for information.
|
||||
package crc32
|
||||
|
||||
import (
|
||||
"hash"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// The size of a CRC-32 checksum in bytes.
|
||||
const Size = 4
|
||||
|
||||
// Predefined polynomials.
|
||||
const (
|
||||
// IEEE is by far and away the most common CRC-32 polynomial.
|
||||
// Used by ethernet (IEEE 802.3), v.42, fddi, gzip, zip, png, ...
|
||||
IEEE = 0xedb88320
|
||||
|
||||
// Castagnoli's polynomial, used in iSCSI.
|
||||
// Has better error detection characteristics than IEEE.
|
||||
// http://dx.doi.org/10.1109/26.231911
|
||||
Castagnoli = 0x82f63b78
|
||||
|
||||
// Koopman's polynomial.
|
||||
// Also has better error detection characteristics than IEEE.
|
||||
// http://dx.doi.org/10.1109/DSN.2002.1028931
|
||||
Koopman = 0xeb31d82e
|
||||
)
|
||||
|
||||
// Table is a 256-word table representing the polynomial for efficient processing.
|
||||
type Table [256]uint32
|
||||
|
||||
// This file makes use of functions implemented in architecture-specific files.
|
||||
// The interface that they implement is as follows:
|
||||
//
|
||||
// // archAvailableIEEE reports whether an architecture-specific CRC32-IEEE
|
||||
// // algorithm is available.
|
||||
// archAvailableIEEE() bool
|
||||
//
|
||||
// // archInitIEEE initializes the architecture-specific CRC3-IEEE algorithm.
|
||||
// // It can only be called if archAvailableIEEE() returns true.
|
||||
// archInitIEEE()
|
||||
//
|
||||
// // archUpdateIEEE updates the given CRC32-IEEE. It can only be called if
|
||||
// // archInitIEEE() was previously called.
|
||||
// archUpdateIEEE(crc uint32, p []byte) uint32
|
||||
//
|
||||
// // archAvailableCastagnoli reports whether an architecture-specific
|
||||
// // CRC32-C algorithm is available.
|
||||
// archAvailableCastagnoli() bool
|
||||
//
|
||||
// // archInitCastagnoli initializes the architecture-specific CRC32-C
|
||||
// // algorithm. It can only be called if archAvailableCastagnoli() returns
|
||||
// // true.
|
||||
// archInitCastagnoli()
|
||||
//
|
||||
// // archUpdateCastagnoli updates the given CRC32-C. It can only be called
|
||||
// // if archInitCastagnoli() was previously called.
|
||||
// archUpdateCastagnoli(crc uint32, p []byte) uint32
|
||||
|
||||
// castagnoliTable points to a lazily initialized Table for the Castagnoli
|
||||
// polynomial. MakeTable will always return this value when asked to make a
|
||||
// Castagnoli table so we can compare against it to find when the caller is
|
||||
// using this polynomial.
|
||||
var castagnoliTable *Table
|
||||
var castagnoliTable8 *slicing8Table
|
||||
var castagnoliArchImpl bool
|
||||
var updateCastagnoli func(crc uint32, p []byte) uint32
|
||||
var castagnoliOnce sync.Once
|
||||
|
||||
func castagnoliInit() {
|
||||
castagnoliTable = simpleMakeTable(Castagnoli)
|
||||
castagnoliArchImpl = archAvailableCastagnoli()
|
||||
|
||||
if castagnoliArchImpl {
|
||||
archInitCastagnoli()
|
||||
updateCastagnoli = archUpdateCastagnoli
|
||||
} else {
|
||||
// Initialize the slicing-by-8 table.
|
||||
castagnoliTable8 = slicingMakeTable(Castagnoli)
|
||||
updateCastagnoli = func(crc uint32, p []byte) uint32 {
|
||||
return slicingUpdate(crc, castagnoliTable8, p)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// IEEETable is the table for the IEEE polynomial.
|
||||
var IEEETable = simpleMakeTable(IEEE)
|
||||
|
||||
// ieeeTable8 is the slicing8Table for IEEE
|
||||
var ieeeTable8 *slicing8Table
|
||||
var ieeeArchImpl bool
|
||||
var updateIEEE func(crc uint32, p []byte) uint32
|
||||
var ieeeOnce sync.Once
|
||||
|
||||
func ieeeInit() {
|
||||
ieeeArchImpl = archAvailableIEEE()
|
||||
|
||||
if ieeeArchImpl {
|
||||
archInitIEEE()
|
||||
updateIEEE = archUpdateIEEE
|
||||
} else {
|
||||
// Initialize the slicing-by-8 table.
|
||||
ieeeTable8 = slicingMakeTable(IEEE)
|
||||
updateIEEE = func(crc uint32, p []byte) uint32 {
|
||||
return slicingUpdate(crc, ieeeTable8, p)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// MakeTable returns a Table constructed from the specified polynomial.
|
||||
// The contents of this Table must not be modified.
|
||||
func MakeTable(poly uint32) *Table {
|
||||
switch poly {
|
||||
case IEEE:
|
||||
ieeeOnce.Do(ieeeInit)
|
||||
return IEEETable
|
||||
case Castagnoli:
|
||||
castagnoliOnce.Do(castagnoliInit)
|
||||
return castagnoliTable
|
||||
}
|
||||
return simpleMakeTable(poly)
|
||||
}
|
||||
|
||||
// digest represents the partial evaluation of a checksum.
|
||||
type digest struct {
|
||||
crc uint32
|
||||
tab *Table
|
||||
}
|
||||
|
||||
// New creates a new hash.Hash32 computing the CRC-32 checksum
|
||||
// using the polynomial represented by the Table.
|
||||
// Its Sum method will lay the value out in big-endian byte order.
|
||||
func New(tab *Table) hash.Hash32 {
|
||||
if tab == IEEETable {
|
||||
ieeeOnce.Do(ieeeInit)
|
||||
}
|
||||
return &digest{0, tab}
|
||||
}
|
||||
|
||||
// NewIEEE creates a new hash.Hash32 computing the CRC-32 checksum
|
||||
// using the IEEE polynomial.
|
||||
// Its Sum method will lay the value out in big-endian byte order.
|
||||
func NewIEEE() hash.Hash32 { return New(IEEETable) }
|
||||
|
||||
func (d *digest) Size() int { return Size }
|
||||
|
||||
func (d *digest) BlockSize() int { return 1 }
|
||||
|
||||
func (d *digest) Reset() { d.crc = 0 }
|
||||
|
||||
// Update returns the result of adding the bytes in p to the crc.
|
||||
func Update(crc uint32, tab *Table, p []byte) uint32 {
|
||||
switch tab {
|
||||
case castagnoliTable:
|
||||
return updateCastagnoli(crc, p)
|
||||
case IEEETable:
|
||||
// Unfortunately, because IEEETable is exported, IEEE may be used without a
|
||||
// call to MakeTable. We have to make sure it gets initialized in that case.
|
||||
ieeeOnce.Do(ieeeInit)
|
||||
return updateIEEE(crc, p)
|
||||
default:
|
||||
return simpleUpdate(crc, tab, p)
|
||||
}
|
||||
}
|
||||
|
||||
func (d *digest) Write(p []byte) (n int, err error) {
|
||||
switch d.tab {
|
||||
case castagnoliTable:
|
||||
d.crc = updateCastagnoli(d.crc, p)
|
||||
case IEEETable:
|
||||
// We only create digest objects through New() which takes care of
|
||||
// initialization in this case.
|
||||
d.crc = updateIEEE(d.crc, p)
|
||||
default:
|
||||
d.crc = simpleUpdate(d.crc, d.tab, p)
|
||||
}
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
func (d *digest) Sum32() uint32 { return d.crc }
|
||||
|
||||
func (d *digest) Sum(in []byte) []byte {
|
||||
s := d.Sum32()
|
||||
return append(in, byte(s>>24), byte(s>>16), byte(s>>8), byte(s))
|
||||
}
|
||||
|
||||
// Checksum returns the CRC-32 checksum of data
|
||||
// using the polynomial represented by the Table.
|
||||
func Checksum(data []byte, tab *Table) uint32 { return Update(0, tab, data) }
|
||||
|
||||
// ChecksumIEEE returns the CRC-32 checksum of data
|
||||
// using the IEEE polynomial.
|
||||
func ChecksumIEEE(data []byte) uint32 {
|
||||
ieeeOnce.Do(ieeeInit)
|
||||
return updateIEEE(0, data)
|
||||
}
|
||||
230
vendor/github.com/klauspost/crc32/crc32_amd64.go
generated
vendored
230
vendor/github.com/klauspost/crc32/crc32_amd64.go
generated
vendored
@@ -1,230 +0,0 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !appengine,!gccgo
|
||||
|
||||
// AMD64-specific hardware-assisted CRC32 algorithms. See crc32.go for a
|
||||
// description of the interface that each architecture-specific file
|
||||
// implements.
|
||||
|
||||
package crc32
|
||||
|
||||
import "unsafe"
|
||||
|
||||
// This file contains the code to call the SSE 4.2 version of the Castagnoli
|
||||
// and IEEE CRC.
|
||||
|
||||
// haveSSE41/haveSSE42/haveCLMUL are defined in crc_amd64.s and use
|
||||
// CPUID to test for SSE 4.1, 4.2 and CLMUL support.
|
||||
func haveSSE41() bool
|
||||
func haveSSE42() bool
|
||||
func haveCLMUL() bool
|
||||
|
||||
// castagnoliSSE42 is defined in crc32_amd64.s and uses the SSE4.2 CRC32
|
||||
// instruction.
|
||||
//go:noescape
|
||||
func castagnoliSSE42(crc uint32, p []byte) uint32
|
||||
|
||||
// castagnoliSSE42Triple is defined in crc32_amd64.s and uses the SSE4.2 CRC32
|
||||
// instruction.
|
||||
//go:noescape
|
||||
func castagnoliSSE42Triple(
|
||||
crcA, crcB, crcC uint32,
|
||||
a, b, c []byte,
|
||||
rounds uint32,
|
||||
) (retA uint32, retB uint32, retC uint32)
|
||||
|
||||
// ieeeCLMUL is defined in crc_amd64.s and uses the PCLMULQDQ
|
||||
// instruction as well as SSE 4.1.
|
||||
//go:noescape
|
||||
func ieeeCLMUL(crc uint32, p []byte) uint32
|
||||
|
||||
var sse42 = haveSSE42()
|
||||
var useFastIEEE = haveCLMUL() && haveSSE41()
|
||||
|
||||
const castagnoliK1 = 168
|
||||
const castagnoliK2 = 1344
|
||||
|
||||
type sse42Table [4]Table
|
||||
|
||||
var castagnoliSSE42TableK1 *sse42Table
|
||||
var castagnoliSSE42TableK2 *sse42Table
|
||||
|
||||
func archAvailableCastagnoli() bool {
|
||||
return sse42
|
||||
}
|
||||
|
||||
func archInitCastagnoli() {
|
||||
if !sse42 {
|
||||
panic("arch-specific Castagnoli not available")
|
||||
}
|
||||
castagnoliSSE42TableK1 = new(sse42Table)
|
||||
castagnoliSSE42TableK2 = new(sse42Table)
|
||||
// See description in updateCastagnoli.
|
||||
// t[0][i] = CRC(i000, O)
|
||||
// t[1][i] = CRC(0i00, O)
|
||||
// t[2][i] = CRC(00i0, O)
|
||||
// t[3][i] = CRC(000i, O)
|
||||
// where O is a sequence of K zeros.
|
||||
var tmp [castagnoliK2]byte
|
||||
for b := 0; b < 4; b++ {
|
||||
for i := 0; i < 256; i++ {
|
||||
val := uint32(i) << uint32(b*8)
|
||||
castagnoliSSE42TableK1[b][i] = castagnoliSSE42(val, tmp[:castagnoliK1])
|
||||
castagnoliSSE42TableK2[b][i] = castagnoliSSE42(val, tmp[:])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// castagnoliShift computes the CRC32-C of K1 or K2 zeroes (depending on the
|
||||
// table given) with the given initial crc value. This corresponds to
|
||||
// CRC(crc, O) in the description in updateCastagnoli.
|
||||
func castagnoliShift(table *sse42Table, crc uint32) uint32 {
|
||||
return table[3][crc>>24] ^
|
||||
table[2][(crc>>16)&0xFF] ^
|
||||
table[1][(crc>>8)&0xFF] ^
|
||||
table[0][crc&0xFF]
|
||||
}
|
||||
|
||||
func archUpdateCastagnoli(crc uint32, p []byte) uint32 {
|
||||
if !sse42 {
|
||||
panic("not available")
|
||||
}
|
||||
|
||||
// This method is inspired from the algorithm in Intel's white paper:
|
||||
// "Fast CRC Computation for iSCSI Polynomial Using CRC32 Instruction"
|
||||
// The same strategy of splitting the buffer in three is used but the
|
||||
// combining calculation is different; the complete derivation is explained
|
||||
// below.
|
||||
//
|
||||
// -- The basic idea --
|
||||
//
|
||||
// The CRC32 instruction (available in SSE4.2) can process 8 bytes at a
|
||||
// time. In recent Intel architectures the instruction takes 3 cycles;
|
||||
// however the processor can pipeline up to three instructions if they
|
||||
// don't depend on each other.
|
||||
//
|
||||
// Roughly this means that we can process three buffers in about the same
|
||||
// time we can process one buffer.
|
||||
//
|
||||
// The idea is then to split the buffer in three, CRC the three pieces
|
||||
// separately and then combine the results.
|
||||
//
|
||||
// Combining the results requires precomputed tables, so we must choose a
|
||||
// fixed buffer length to optimize. The longer the length, the faster; but
|
||||
// only buffers longer than this length will use the optimization. We choose
|
||||
// two cutoffs and compute tables for both:
|
||||
// - one around 512: 168*3=504
|
||||
// - one around 4KB: 1344*3=4032
|
||||
//
|
||||
// -- The nitty gritty --
|
||||
//
|
||||
// Let CRC(I, X) be the non-inverted CRC32-C of the sequence X (with
|
||||
// initial non-inverted CRC I). This function has the following properties:
|
||||
// (a) CRC(I, AB) = CRC(CRC(I, A), B)
|
||||
// (b) CRC(I, A xor B) = CRC(I, A) xor CRC(0, B)
|
||||
//
|
||||
// Say we want to compute CRC(I, ABC) where A, B, C are three sequences of
|
||||
// K bytes each, where K is a fixed constant. Let O be the sequence of K zero
|
||||
// bytes.
|
||||
//
|
||||
// CRC(I, ABC) = CRC(I, ABO xor C)
|
||||
// = CRC(I, ABO) xor CRC(0, C)
|
||||
// = CRC(CRC(I, AB), O) xor CRC(0, C)
|
||||
// = CRC(CRC(I, AO xor B), O) xor CRC(0, C)
|
||||
// = CRC(CRC(I, AO) xor CRC(0, B), O) xor CRC(0, C)
|
||||
// = CRC(CRC(CRC(I, A), O) xor CRC(0, B), O) xor CRC(0, C)
|
||||
//
|
||||
// The castagnoliSSE42Triple function can compute CRC(I, A), CRC(0, B),
|
||||
// and CRC(0, C) efficiently. We just need to find a way to quickly compute
|
||||
// CRC(uvwx, O) given a 4-byte initial value uvwx. We can precompute these
|
||||
// values; since we can't have a 32-bit table, we break it up into four
|
||||
// 8-bit tables:
|
||||
//
|
||||
// CRC(uvwx, O) = CRC(u000, O) xor
|
||||
// CRC(0v00, O) xor
|
||||
// CRC(00w0, O) xor
|
||||
// CRC(000x, O)
|
||||
//
|
||||
// We can compute tables corresponding to the four terms for all 8-bit
|
||||
// values.
|
||||
|
||||
crc = ^crc
|
||||
|
||||
// If a buffer is long enough to use the optimization, process the first few
|
||||
// bytes to align the buffer to an 8 byte boundary (if necessary).
|
||||
if len(p) >= castagnoliK1*3 {
|
||||
delta := int(uintptr(unsafe.Pointer(&p[0])) & 7)
|
||||
if delta != 0 {
|
||||
delta = 8 - delta
|
||||
crc = castagnoliSSE42(crc, p[:delta])
|
||||
p = p[delta:]
|
||||
}
|
||||
}
|
||||
|
||||
// Process 3*K2 at a time.
|
||||
for len(p) >= castagnoliK2*3 {
|
||||
// Compute CRC(I, A), CRC(0, B), and CRC(0, C).
|
||||
crcA, crcB, crcC := castagnoliSSE42Triple(
|
||||
crc, 0, 0,
|
||||
p, p[castagnoliK2:], p[castagnoliK2*2:],
|
||||
castagnoliK2/24)
|
||||
|
||||
// CRC(I, AB) = CRC(CRC(I, A), O) xor CRC(0, B)
|
||||
crcAB := castagnoliShift(castagnoliSSE42TableK2, crcA) ^ crcB
|
||||
// CRC(I, ABC) = CRC(CRC(I, AB), O) xor CRC(0, C)
|
||||
crc = castagnoliShift(castagnoliSSE42TableK2, crcAB) ^ crcC
|
||||
p = p[castagnoliK2*3:]
|
||||
}
|
||||
|
||||
// Process 3*K1 at a time.
|
||||
for len(p) >= castagnoliK1*3 {
|
||||
// Compute CRC(I, A), CRC(0, B), and CRC(0, C).
|
||||
crcA, crcB, crcC := castagnoliSSE42Triple(
|
||||
crc, 0, 0,
|
||||
p, p[castagnoliK1:], p[castagnoliK1*2:],
|
||||
castagnoliK1/24)
|
||||
|
||||
// CRC(I, AB) = CRC(CRC(I, A), O) xor CRC(0, B)
|
||||
crcAB := castagnoliShift(castagnoliSSE42TableK1, crcA) ^ crcB
|
||||
// CRC(I, ABC) = CRC(CRC(I, AB), O) xor CRC(0, C)
|
||||
crc = castagnoliShift(castagnoliSSE42TableK1, crcAB) ^ crcC
|
||||
p = p[castagnoliK1*3:]
|
||||
}
|
||||
|
||||
// Use the simple implementation for what's left.
|
||||
crc = castagnoliSSE42(crc, p)
|
||||
return ^crc
|
||||
}
|
||||
|
||||
func archAvailableIEEE() bool {
|
||||
return useFastIEEE
|
||||
}
|
||||
|
||||
var archIeeeTable8 *slicing8Table
|
||||
|
||||
func archInitIEEE() {
|
||||
if !useFastIEEE {
|
||||
panic("not available")
|
||||
}
|
||||
// We still use slicing-by-8 for small buffers.
|
||||
archIeeeTable8 = slicingMakeTable(IEEE)
|
||||
}
|
||||
|
||||
func archUpdateIEEE(crc uint32, p []byte) uint32 {
|
||||
if !useFastIEEE {
|
||||
panic("not available")
|
||||
}
|
||||
|
||||
if len(p) >= 64 {
|
||||
left := len(p) & 15
|
||||
do := len(p) - left
|
||||
crc = ^ieeeCLMUL(^crc, p[:do])
|
||||
p = p[do:]
|
||||
}
|
||||
if len(p) == 0 {
|
||||
return crc
|
||||
}
|
||||
return slicingUpdate(crc, archIeeeTable8, p)
|
||||
}
|
||||
319
vendor/github.com/klauspost/crc32/crc32_amd64.s
generated
vendored
319
vendor/github.com/klauspost/crc32/crc32_amd64.s
generated
vendored
@@ -1,319 +0,0 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build gc
|
||||
|
||||
#define NOSPLIT 4
|
||||
#define RODATA 8
|
||||
|
||||
// castagnoliSSE42 updates the (non-inverted) crc with the given buffer.
|
||||
//
|
||||
// func castagnoliSSE42(crc uint32, p []byte) uint32
|
||||
TEXT ·castagnoliSSE42(SB), NOSPLIT, $0
|
||||
MOVL crc+0(FP), AX // CRC value
|
||||
MOVQ p+8(FP), SI // data pointer
|
||||
MOVQ p_len+16(FP), CX // len(p)
|
||||
|
||||
// If there are fewer than 8 bytes to process, skip alignment.
|
||||
CMPQ CX, $8
|
||||
JL less_than_8
|
||||
|
||||
MOVQ SI, BX
|
||||
ANDQ $7, BX
|
||||
JZ aligned
|
||||
|
||||
// Process the first few bytes to 8-byte align the input.
|
||||
|
||||
// BX = 8 - BX. We need to process this many bytes to align.
|
||||
SUBQ $1, BX
|
||||
XORQ $7, BX
|
||||
|
||||
BTQ $0, BX
|
||||
JNC align_2
|
||||
|
||||
CRC32B (SI), AX
|
||||
DECQ CX
|
||||
INCQ SI
|
||||
|
||||
align_2:
|
||||
BTQ $1, BX
|
||||
JNC align_4
|
||||
|
||||
// CRC32W (SI), AX
|
||||
BYTE $0x66; BYTE $0xf2; BYTE $0x0f; BYTE $0x38; BYTE $0xf1; BYTE $0x06
|
||||
|
||||
SUBQ $2, CX
|
||||
ADDQ $2, SI
|
||||
|
||||
align_4:
|
||||
BTQ $2, BX
|
||||
JNC aligned
|
||||
|
||||
// CRC32L (SI), AX
|
||||
BYTE $0xf2; BYTE $0x0f; BYTE $0x38; BYTE $0xf1; BYTE $0x06
|
||||
|
||||
SUBQ $4, CX
|
||||
ADDQ $4, SI
|
||||
|
||||
aligned:
|
||||
// The input is now 8-byte aligned and we can process 8-byte chunks.
|
||||
CMPQ CX, $8
|
||||
JL less_than_8
|
||||
|
||||
CRC32Q (SI), AX
|
||||
ADDQ $8, SI
|
||||
SUBQ $8, CX
|
||||
JMP aligned
|
||||
|
||||
less_than_8:
|
||||
// We may have some bytes left over; process 4 bytes, then 2, then 1.
|
||||
BTQ $2, CX
|
||||
JNC less_than_4
|
||||
|
||||
// CRC32L (SI), AX
|
||||
BYTE $0xf2; BYTE $0x0f; BYTE $0x38; BYTE $0xf1; BYTE $0x06
|
||||
ADDQ $4, SI
|
||||
|
||||
less_than_4:
|
||||
BTQ $1, CX
|
||||
JNC less_than_2
|
||||
|
||||
// CRC32W (SI), AX
|
||||
BYTE $0x66; BYTE $0xf2; BYTE $0x0f; BYTE $0x38; BYTE $0xf1; BYTE $0x06
|
||||
ADDQ $2, SI
|
||||
|
||||
less_than_2:
|
||||
BTQ $0, CX
|
||||
JNC done
|
||||
|
||||
CRC32B (SI), AX
|
||||
|
||||
done:
|
||||
MOVL AX, ret+32(FP)
|
||||
RET
|
||||
|
||||
// castagnoliSSE42Triple updates three (non-inverted) crcs with (24*rounds)
|
||||
// bytes from each buffer.
|
||||
//
|
||||
// func castagnoliSSE42Triple(
|
||||
// crc1, crc2, crc3 uint32,
|
||||
// a, b, c []byte,
|
||||
// rounds uint32,
|
||||
// ) (retA uint32, retB uint32, retC uint32)
|
||||
TEXT ·castagnoliSSE42Triple(SB), NOSPLIT, $0
|
||||
MOVL crcA+0(FP), AX
|
||||
MOVL crcB+4(FP), CX
|
||||
MOVL crcC+8(FP), DX
|
||||
|
||||
MOVQ a+16(FP), R8 // data pointer
|
||||
MOVQ b+40(FP), R9 // data pointer
|
||||
MOVQ c+64(FP), R10 // data pointer
|
||||
|
||||
MOVL rounds+88(FP), R11
|
||||
|
||||
loop:
|
||||
CRC32Q (R8), AX
|
||||
CRC32Q (R9), CX
|
||||
CRC32Q (R10), DX
|
||||
|
||||
CRC32Q 8(R8), AX
|
||||
CRC32Q 8(R9), CX
|
||||
CRC32Q 8(R10), DX
|
||||
|
||||
CRC32Q 16(R8), AX
|
||||
CRC32Q 16(R9), CX
|
||||
CRC32Q 16(R10), DX
|
||||
|
||||
ADDQ $24, R8
|
||||
ADDQ $24, R9
|
||||
ADDQ $24, R10
|
||||
|
||||
DECQ R11
|
||||
JNZ loop
|
||||
|
||||
MOVL AX, retA+96(FP)
|
||||
MOVL CX, retB+100(FP)
|
||||
MOVL DX, retC+104(FP)
|
||||
RET
|
||||
|
||||
// func haveSSE42() bool
|
||||
TEXT ·haveSSE42(SB), NOSPLIT, $0
|
||||
XORQ AX, AX
|
||||
INCL AX
|
||||
CPUID
|
||||
SHRQ $20, CX
|
||||
ANDQ $1, CX
|
||||
MOVB CX, ret+0(FP)
|
||||
RET
|
||||
|
||||
// func haveCLMUL() bool
|
||||
TEXT ·haveCLMUL(SB), NOSPLIT, $0
|
||||
XORQ AX, AX
|
||||
INCL AX
|
||||
CPUID
|
||||
SHRQ $1, CX
|
||||
ANDQ $1, CX
|
||||
MOVB CX, ret+0(FP)
|
||||
RET
|
||||
|
||||
// func haveSSE41() bool
|
||||
TEXT ·haveSSE41(SB), NOSPLIT, $0
|
||||
XORQ AX, AX
|
||||
INCL AX
|
||||
CPUID
|
||||
SHRQ $19, CX
|
||||
ANDQ $1, CX
|
||||
MOVB CX, ret+0(FP)
|
||||
RET
|
||||
|
||||
// CRC32 polynomial data
|
||||
//
|
||||
// These constants are lifted from the
|
||||
// Linux kernel, since they avoid the costly
|
||||
// PSHUFB 16 byte reversal proposed in the
|
||||
// original Intel paper.
|
||||
DATA r2r1kp<>+0(SB)/8, $0x154442bd4
|
||||
DATA r2r1kp<>+8(SB)/8, $0x1c6e41596
|
||||
DATA r4r3kp<>+0(SB)/8, $0x1751997d0
|
||||
DATA r4r3kp<>+8(SB)/8, $0x0ccaa009e
|
||||
DATA rupolykp<>+0(SB)/8, $0x1db710641
|
||||
DATA rupolykp<>+8(SB)/8, $0x1f7011641
|
||||
DATA r5kp<>+0(SB)/8, $0x163cd6124
|
||||
|
||||
GLOBL r2r1kp<>(SB), RODATA, $16
|
||||
GLOBL r4r3kp<>(SB), RODATA, $16
|
||||
GLOBL rupolykp<>(SB), RODATA, $16
|
||||
GLOBL r5kp<>(SB), RODATA, $8
|
||||
|
||||
// Based on http://www.intel.com/content/dam/www/public/us/en/documents/white-papers/fast-crc-computation-generic-polynomials-pclmulqdq-paper.pdf
|
||||
// len(p) must be at least 64, and must be a multiple of 16.
|
||||
|
||||
// func ieeeCLMUL(crc uint32, p []byte) uint32
|
||||
TEXT ·ieeeCLMUL(SB), NOSPLIT, $0
|
||||
MOVL crc+0(FP), X0 // Initial CRC value
|
||||
MOVQ p+8(FP), SI // data pointer
|
||||
MOVQ p_len+16(FP), CX // len(p)
|
||||
|
||||
MOVOU (SI), X1
|
||||
MOVOU 16(SI), X2
|
||||
MOVOU 32(SI), X3
|
||||
MOVOU 48(SI), X4
|
||||
PXOR X0, X1
|
||||
ADDQ $64, SI // buf+=64
|
||||
SUBQ $64, CX // len-=64
|
||||
CMPQ CX, $64 // Less than 64 bytes left
|
||||
JB remain64
|
||||
|
||||
MOVOA r2r1kp<>+0(SB), X0
|
||||
|
||||
loopback64:
|
||||
MOVOA X1, X5
|
||||
MOVOA X2, X6
|
||||
MOVOA X3, X7
|
||||
MOVOA X4, X8
|
||||
|
||||
PCLMULQDQ $0, X0, X1
|
||||
PCLMULQDQ $0, X0, X2
|
||||
PCLMULQDQ $0, X0, X3
|
||||
PCLMULQDQ $0, X0, X4
|
||||
|
||||
// Load next early
|
||||
MOVOU (SI), X11
|
||||
MOVOU 16(SI), X12
|
||||
MOVOU 32(SI), X13
|
||||
MOVOU 48(SI), X14
|
||||
|
||||
PCLMULQDQ $0x11, X0, X5
|
||||
PCLMULQDQ $0x11, X0, X6
|
||||
PCLMULQDQ $0x11, X0, X7
|
||||
PCLMULQDQ $0x11, X0, X8
|
||||
|
||||
PXOR X5, X1
|
||||
PXOR X6, X2
|
||||
PXOR X7, X3
|
||||
PXOR X8, X4
|
||||
|
||||
PXOR X11, X1
|
||||
PXOR X12, X2
|
||||
PXOR X13, X3
|
||||
PXOR X14, X4
|
||||
|
||||
ADDQ $0x40, DI
|
||||
ADDQ $64, SI // buf+=64
|
||||
SUBQ $64, CX // len-=64
|
||||
CMPQ CX, $64 // Less than 64 bytes left?
|
||||
JGE loopback64
|
||||
|
||||
// Fold result into a single register (X1)
|
||||
remain64:
|
||||
MOVOA r4r3kp<>+0(SB), X0
|
||||
|
||||
MOVOA X1, X5
|
||||
PCLMULQDQ $0, X0, X1
|
||||
PCLMULQDQ $0x11, X0, X5
|
||||
PXOR X5, X1
|
||||
PXOR X2, X1
|
||||
|
||||
MOVOA X1, X5
|
||||
PCLMULQDQ $0, X0, X1
|
||||
PCLMULQDQ $0x11, X0, X5
|
||||
PXOR X5, X1
|
||||
PXOR X3, X1
|
||||
|
||||
MOVOA X1, X5
|
||||
PCLMULQDQ $0, X0, X1
|
||||
PCLMULQDQ $0x11, X0, X5
|
||||
PXOR X5, X1
|
||||
PXOR X4, X1
|
||||
|
||||
// If there is less than 16 bytes left we are done
|
||||
CMPQ CX, $16
|
||||
JB finish
|
||||
|
||||
// Encode 16 bytes
|
||||
remain16:
|
||||
MOVOU (SI), X10
|
||||
MOVOA X1, X5
|
||||
PCLMULQDQ $0, X0, X1
|
||||
PCLMULQDQ $0x11, X0, X5
|
||||
PXOR X5, X1
|
||||
PXOR X10, X1
|
||||
SUBQ $16, CX
|
||||
ADDQ $16, SI
|
||||
CMPQ CX, $16
|
||||
JGE remain16
|
||||
|
||||
finish:
|
||||
// Fold final result into 32 bits and return it
|
||||
PCMPEQB X3, X3
|
||||
PCLMULQDQ $1, X1, X0
|
||||
PSRLDQ $8, X1
|
||||
PXOR X0, X1
|
||||
|
||||
MOVOA X1, X2
|
||||
MOVQ r5kp<>+0(SB), X0
|
||||
|
||||
// Creates 32 bit mask. Note that we don't care about upper half.
|
||||
PSRLQ $32, X3
|
||||
|
||||
PSRLDQ $4, X2
|
||||
PAND X3, X1
|
||||
PCLMULQDQ $0, X0, X1
|
||||
PXOR X2, X1
|
||||
|
||||
MOVOA rupolykp<>+0(SB), X0
|
||||
|
||||
MOVOA X1, X2
|
||||
PAND X3, X1
|
||||
PCLMULQDQ $0x10, X0, X1
|
||||
PAND X3, X1
|
||||
PCLMULQDQ $0, X0, X1
|
||||
PXOR X2, X1
|
||||
|
||||
// PEXTRD $1, X1, AX (SSE 4.1)
|
||||
BYTE $0x66; BYTE $0x0f; BYTE $0x3a
|
||||
BYTE $0x16; BYTE $0xc8; BYTE $0x01
|
||||
MOVL AX, ret+32(FP)
|
||||
|
||||
RET
|
||||
43
vendor/github.com/klauspost/crc32/crc32_amd64p32.go
generated
vendored
43
vendor/github.com/klauspost/crc32/crc32_amd64p32.go
generated
vendored
@@ -1,43 +0,0 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !appengine,!gccgo
|
||||
|
||||
package crc32
|
||||
|
||||
// This file contains the code to call the SSE 4.2 version of the Castagnoli
|
||||
// CRC.
|
||||
|
||||
// haveSSE42 is defined in crc32_amd64p32.s and uses CPUID to test for SSE 4.2
|
||||
// support.
|
||||
func haveSSE42() bool
|
||||
|
||||
// castagnoliSSE42 is defined in crc32_amd64p32.s and uses the SSE4.2 CRC32
|
||||
// instruction.
|
||||
//go:noescape
|
||||
func castagnoliSSE42(crc uint32, p []byte) uint32
|
||||
|
||||
var sse42 = haveSSE42()
|
||||
|
||||
func archAvailableCastagnoli() bool {
|
||||
return sse42
|
||||
}
|
||||
|
||||
func archInitCastagnoli() {
|
||||
if !sse42 {
|
||||
panic("not available")
|
||||
}
|
||||
// No initialization necessary.
|
||||
}
|
||||
|
||||
func archUpdateCastagnoli(crc uint32, p []byte) uint32 {
|
||||
if !sse42 {
|
||||
panic("not available")
|
||||
}
|
||||
return castagnoliSSE42(crc, p)
|
||||
}
|
||||
|
||||
func archAvailableIEEE() bool { return false }
|
||||
func archInitIEEE() { panic("not available") }
|
||||
func archUpdateIEEE(crc uint32, p []byte) uint32 { panic("not available") }
|
||||
67
vendor/github.com/klauspost/crc32/crc32_amd64p32.s
generated
vendored
67
vendor/github.com/klauspost/crc32/crc32_amd64p32.s
generated
vendored
@@ -1,67 +0,0 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build gc
|
||||
|
||||
#define NOSPLIT 4
|
||||
#define RODATA 8
|
||||
|
||||
// func castagnoliSSE42(crc uint32, p []byte) uint32
|
||||
TEXT ·castagnoliSSE42(SB), NOSPLIT, $0
|
||||
MOVL crc+0(FP), AX // CRC value
|
||||
MOVL p+4(FP), SI // data pointer
|
||||
MOVL p_len+8(FP), CX // len(p)
|
||||
|
||||
NOTL AX
|
||||
|
||||
// If there's less than 8 bytes to process, we do it byte-by-byte.
|
||||
CMPQ CX, $8
|
||||
JL cleanup
|
||||
|
||||
// Process individual bytes until the input is 8-byte aligned.
|
||||
startup:
|
||||
MOVQ SI, BX
|
||||
ANDQ $7, BX
|
||||
JZ aligned
|
||||
|
||||
CRC32B (SI), AX
|
||||
DECQ CX
|
||||
INCQ SI
|
||||
JMP startup
|
||||
|
||||
aligned:
|
||||
// The input is now 8-byte aligned and we can process 8-byte chunks.
|
||||
CMPQ CX, $8
|
||||
JL cleanup
|
||||
|
||||
CRC32Q (SI), AX
|
||||
ADDQ $8, SI
|
||||
SUBQ $8, CX
|
||||
JMP aligned
|
||||
|
||||
cleanup:
|
||||
// We may have some bytes left over that we process one at a time.
|
||||
CMPQ CX, $0
|
||||
JE done
|
||||
|
||||
CRC32B (SI), AX
|
||||
INCQ SI
|
||||
DECQ CX
|
||||
JMP cleanup
|
||||
|
||||
done:
|
||||
NOTL AX
|
||||
MOVL AX, ret+16(FP)
|
||||
RET
|
||||
|
||||
// func haveSSE42() bool
|
||||
TEXT ·haveSSE42(SB), NOSPLIT, $0
|
||||
XORQ AX, AX
|
||||
INCL AX
|
||||
CPUID
|
||||
SHRQ $20, CX
|
||||
ANDQ $1, CX
|
||||
MOVB CX, ret+0(FP)
|
||||
RET
|
||||
|
||||
89
vendor/github.com/klauspost/crc32/crc32_generic.go
generated
vendored
89
vendor/github.com/klauspost/crc32/crc32_generic.go
generated
vendored
@@ -1,89 +0,0 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// This file contains CRC32 algorithms that are not specific to any architecture
|
||||
// and don't use hardware acceleration.
|
||||
//
|
||||
// The simple (and slow) CRC32 implementation only uses a 256*4 bytes table.
|
||||
//
|
||||
// The slicing-by-8 algorithm is a faster implementation that uses a bigger
|
||||
// table (8*256*4 bytes).
|
||||
|
||||
package crc32
|
||||
|
||||
// simpleMakeTable allocates and constructs a Table for the specified
|
||||
// polynomial. The table is suitable for use with the simple algorithm
|
||||
// (simpleUpdate).
|
||||
func simpleMakeTable(poly uint32) *Table {
|
||||
t := new(Table)
|
||||
simplePopulateTable(poly, t)
|
||||
return t
|
||||
}
|
||||
|
||||
// simplePopulateTable constructs a Table for the specified polynomial, suitable
|
||||
// for use with simpleUpdate.
|
||||
func simplePopulateTable(poly uint32, t *Table) {
|
||||
for i := 0; i < 256; i++ {
|
||||
crc := uint32(i)
|
||||
for j := 0; j < 8; j++ {
|
||||
if crc&1 == 1 {
|
||||
crc = (crc >> 1) ^ poly
|
||||
} else {
|
||||
crc >>= 1
|
||||
}
|
||||
}
|
||||
t[i] = crc
|
||||
}
|
||||
}
|
||||
|
||||
// simpleUpdate uses the simple algorithm to update the CRC, given a table that
|
||||
// was previously computed using simpleMakeTable.
|
||||
func simpleUpdate(crc uint32, tab *Table, p []byte) uint32 {
|
||||
crc = ^crc
|
||||
for _, v := range p {
|
||||
crc = tab[byte(crc)^v] ^ (crc >> 8)
|
||||
}
|
||||
return ^crc
|
||||
}
|
||||
|
||||
// Use slicing-by-8 when payload >= this value.
|
||||
const slicing8Cutoff = 16
|
||||
|
||||
// slicing8Table is array of 8 Tables, used by the slicing-by-8 algorithm.
|
||||
type slicing8Table [8]Table
|
||||
|
||||
// slicingMakeTable constructs a slicing8Table for the specified polynomial. The
|
||||
// table is suitable for use with the slicing-by-8 algorithm (slicingUpdate).
|
||||
func slicingMakeTable(poly uint32) *slicing8Table {
|
||||
t := new(slicing8Table)
|
||||
simplePopulateTable(poly, &t[0])
|
||||
for i := 0; i < 256; i++ {
|
||||
crc := t[0][i]
|
||||
for j := 1; j < 8; j++ {
|
||||
crc = t[0][crc&0xFF] ^ (crc >> 8)
|
||||
t[j][i] = crc
|
||||
}
|
||||
}
|
||||
return t
|
||||
}
|
||||
|
||||
// slicingUpdate uses the slicing-by-8 algorithm to update the CRC, given a
|
||||
// table that was previously computed using slicingMakeTable.
|
||||
func slicingUpdate(crc uint32, tab *slicing8Table, p []byte) uint32 {
|
||||
if len(p) >= slicing8Cutoff {
|
||||
crc = ^crc
|
||||
for len(p) > 8 {
|
||||
crc ^= uint32(p[0]) | uint32(p[1])<<8 | uint32(p[2])<<16 | uint32(p[3])<<24
|
||||
crc = tab[0][p[7]] ^ tab[1][p[6]] ^ tab[2][p[5]] ^ tab[3][p[4]] ^
|
||||
tab[4][crc>>24] ^ tab[5][(crc>>16)&0xFF] ^
|
||||
tab[6][(crc>>8)&0xFF] ^ tab[7][crc&0xFF]
|
||||
p = p[8:]
|
||||
}
|
||||
crc = ^crc
|
||||
}
|
||||
if len(p) == 0 {
|
||||
return crc
|
||||
}
|
||||
return simpleUpdate(crc, &tab[0], p)
|
||||
}
|
||||
15
vendor/github.com/klauspost/crc32/crc32_otherarch.go
generated
vendored
15
vendor/github.com/klauspost/crc32/crc32_otherarch.go
generated
vendored
@@ -1,15 +0,0 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !amd64,!amd64p32,!s390x
|
||||
|
||||
package crc32
|
||||
|
||||
func archAvailableIEEE() bool { return false }
|
||||
func archInitIEEE() { panic("not available") }
|
||||
func archUpdateIEEE(crc uint32, p []byte) uint32 { panic("not available") }
|
||||
|
||||
func archAvailableCastagnoli() bool { return false }
|
||||
func archInitCastagnoli() { panic("not available") }
|
||||
func archUpdateCastagnoli(crc uint32, p []byte) uint32 { panic("not available") }
|
||||
91
vendor/github.com/klauspost/crc32/crc32_s390x.go
generated
vendored
91
vendor/github.com/klauspost/crc32/crc32_s390x.go
generated
vendored
@@ -1,91 +0,0 @@
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build s390x
|
||||
|
||||
package crc32
|
||||
|
||||
const (
|
||||
vxMinLen = 64
|
||||
vxAlignMask = 15 // align to 16 bytes
|
||||
)
|
||||
|
||||
// hasVectorFacility reports whether the machine has the z/Architecture
|
||||
// vector facility installed and enabled.
|
||||
func hasVectorFacility() bool
|
||||
|
||||
var hasVX = hasVectorFacility()
|
||||
|
||||
// vectorizedCastagnoli implements CRC32 using vector instructions.
|
||||
// It is defined in crc32_s390x.s.
|
||||
//go:noescape
|
||||
func vectorizedCastagnoli(crc uint32, p []byte) uint32
|
||||
|
||||
// vectorizedIEEE implements CRC32 using vector instructions.
|
||||
// It is defined in crc32_s390x.s.
|
||||
//go:noescape
|
||||
func vectorizedIEEE(crc uint32, p []byte) uint32
|
||||
|
||||
func archAvailableCastagnoli() bool {
|
||||
return hasVX
|
||||
}
|
||||
|
||||
var archCastagnoliTable8 *slicing8Table
|
||||
|
||||
func archInitCastagnoli() {
|
||||
if !hasVX {
|
||||
panic("not available")
|
||||
}
|
||||
// We still use slicing-by-8 for small buffers.
|
||||
archCastagnoliTable8 = slicingMakeTable(Castagnoli)
|
||||
}
|
||||
|
||||
// archUpdateCastagnoli calculates the checksum of p using
|
||||
// vectorizedCastagnoli.
|
||||
func archUpdateCastagnoli(crc uint32, p []byte) uint32 {
|
||||
if !hasVX {
|
||||
panic("not available")
|
||||
}
|
||||
// Use vectorized function if data length is above threshold.
|
||||
if len(p) >= vxMinLen {
|
||||
aligned := len(p) & ^vxAlignMask
|
||||
crc = vectorizedCastagnoli(crc, p[:aligned])
|
||||
p = p[aligned:]
|
||||
}
|
||||
if len(p) == 0 {
|
||||
return crc
|
||||
}
|
||||
return slicingUpdate(crc, archCastagnoliTable8, p)
|
||||
}
|
||||
|
||||
func archAvailableIEEE() bool {
|
||||
return hasVX
|
||||
}
|
||||
|
||||
var archIeeeTable8 *slicing8Table
|
||||
|
||||
func archInitIEEE() {
|
||||
if !hasVX {
|
||||
panic("not available")
|
||||
}
|
||||
// We still use slicing-by-8 for small buffers.
|
||||
archIeeeTable8 = slicingMakeTable(IEEE)
|
||||
}
|
||||
|
||||
// archUpdateIEEE calculates the checksum of p using vectorizedIEEE.
|
||||
func archUpdateIEEE(crc uint32, p []byte) uint32 {
|
||||
if !hasVX {
|
||||
panic("not available")
|
||||
}
|
||||
// Use vectorized function if data length is above threshold.
|
||||
if len(p) >= vxMinLen {
|
||||
aligned := len(p) & ^vxAlignMask
|
||||
crc = vectorizedIEEE(crc, p[:aligned])
|
||||
p = p[aligned:]
|
||||
}
|
||||
if len(p) == 0 {
|
||||
return crc
|
||||
}
|
||||
return slicingUpdate(crc, archIeeeTable8, p)
|
||||
}
|
||||
249
vendor/github.com/klauspost/crc32/crc32_s390x.s
generated
vendored
249
vendor/github.com/klauspost/crc32/crc32_s390x.s
generated
vendored
@@ -1,249 +0,0 @@
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build s390x
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
// Vector register range containing CRC-32 constants
|
||||
|
||||
#define CONST_PERM_LE2BE V9
|
||||
#define CONST_R2R1 V10
|
||||
#define CONST_R4R3 V11
|
||||
#define CONST_R5 V12
|
||||
#define CONST_RU_POLY V13
|
||||
#define CONST_CRC_POLY V14
|
||||
|
||||
// The CRC-32 constant block contains reduction constants to fold and
|
||||
// process particular chunks of the input data stream in parallel.
|
||||
//
|
||||
// Note that the constant definitions below are extended in order to compute
|
||||
// intermediate results with a single VECTOR GALOIS FIELD MULTIPLY instruction.
|
||||
// The rightmost doubleword can be 0 to prevent contribution to the result or
|
||||
// can be multiplied by 1 to perform an XOR without the need for a separate
|
||||
// VECTOR EXCLUSIVE OR instruction.
|
||||
//
|
||||
// The polynomials used are bit-reflected:
|
||||
//
|
||||
// IEEE: P'(x) = 0x0edb88320
|
||||
// Castagnoli: P'(x) = 0x082f63b78
|
||||
|
||||
// IEEE polynomial constants
|
||||
DATA ·crcleconskp+0(SB)/8, $0x0F0E0D0C0B0A0908 // LE-to-BE mask
|
||||
DATA ·crcleconskp+8(SB)/8, $0x0706050403020100
|
||||
DATA ·crcleconskp+16(SB)/8, $0x00000001c6e41596 // R2
|
||||
DATA ·crcleconskp+24(SB)/8, $0x0000000154442bd4 // R1
|
||||
DATA ·crcleconskp+32(SB)/8, $0x00000000ccaa009e // R4
|
||||
DATA ·crcleconskp+40(SB)/8, $0x00000001751997d0 // R3
|
||||
DATA ·crcleconskp+48(SB)/8, $0x0000000000000000
|
||||
DATA ·crcleconskp+56(SB)/8, $0x0000000163cd6124 // R5
|
||||
DATA ·crcleconskp+64(SB)/8, $0x0000000000000000
|
||||
DATA ·crcleconskp+72(SB)/8, $0x00000001F7011641 // u'
|
||||
DATA ·crcleconskp+80(SB)/8, $0x0000000000000000
|
||||
DATA ·crcleconskp+88(SB)/8, $0x00000001DB710641 // P'(x) << 1
|
||||
|
||||
GLOBL ·crcleconskp(SB), RODATA, $144
|
||||
|
||||
// Castagonli Polynomial constants
|
||||
DATA ·crccleconskp+0(SB)/8, $0x0F0E0D0C0B0A0908 // LE-to-BE mask
|
||||
DATA ·crccleconskp+8(SB)/8, $0x0706050403020100
|
||||
DATA ·crccleconskp+16(SB)/8, $0x000000009e4addf8 // R2
|
||||
DATA ·crccleconskp+24(SB)/8, $0x00000000740eef02 // R1
|
||||
DATA ·crccleconskp+32(SB)/8, $0x000000014cd00bd6 // R4
|
||||
DATA ·crccleconskp+40(SB)/8, $0x00000000f20c0dfe // R3
|
||||
DATA ·crccleconskp+48(SB)/8, $0x0000000000000000
|
||||
DATA ·crccleconskp+56(SB)/8, $0x00000000dd45aab8 // R5
|
||||
DATA ·crccleconskp+64(SB)/8, $0x0000000000000000
|
||||
DATA ·crccleconskp+72(SB)/8, $0x00000000dea713f1 // u'
|
||||
DATA ·crccleconskp+80(SB)/8, $0x0000000000000000
|
||||
DATA ·crccleconskp+88(SB)/8, $0x0000000105ec76f0 // P'(x) << 1
|
||||
|
||||
GLOBL ·crccleconskp(SB), RODATA, $144
|
||||
|
||||
// func hasVectorFacility() bool
|
||||
TEXT ·hasVectorFacility(SB), NOSPLIT, $24-1
|
||||
MOVD $x-24(SP), R1
|
||||
XC $24, 0(R1), 0(R1) // clear the storage
|
||||
MOVD $2, R0 // R0 is the number of double words stored -1
|
||||
WORD $0xB2B01000 // STFLE 0(R1)
|
||||
XOR R0, R0 // reset the value of R0
|
||||
MOVBZ z-8(SP), R1
|
||||
AND $0x40, R1
|
||||
BEQ novector
|
||||
|
||||
vectorinstalled:
|
||||
// check if the vector instruction has been enabled
|
||||
VLEIB $0, $0xF, V16
|
||||
VLGVB $0, V16, R1
|
||||
CMPBNE R1, $0xF, novector
|
||||
MOVB $1, ret+0(FP) // have vx
|
||||
RET
|
||||
|
||||
novector:
|
||||
MOVB $0, ret+0(FP) // no vx
|
||||
RET
|
||||
|
||||
// The CRC-32 function(s) use these calling conventions:
|
||||
//
|
||||
// Parameters:
|
||||
//
|
||||
// R2: Initial CRC value, typically ~0; and final CRC (return) value.
|
||||
// R3: Input buffer pointer, performance might be improved if the
|
||||
// buffer is on a doubleword boundary.
|
||||
// R4: Length of the buffer, must be 64 bytes or greater.
|
||||
//
|
||||
// Register usage:
|
||||
//
|
||||
// R5: CRC-32 constant pool base pointer.
|
||||
// V0: Initial CRC value and intermediate constants and results.
|
||||
// V1..V4: Data for CRC computation.
|
||||
// V5..V8: Next data chunks that are fetched from the input buffer.
|
||||
//
|
||||
// V9..V14: CRC-32 constants.
|
||||
|
||||
// func vectorizedIEEE(crc uint32, p []byte) uint32
|
||||
TEXT ·vectorizedIEEE(SB), NOSPLIT, $0
|
||||
MOVWZ crc+0(FP), R2 // R2 stores the CRC value
|
||||
MOVD p+8(FP), R3 // data pointer
|
||||
MOVD p_len+16(FP), R4 // len(p)
|
||||
|
||||
MOVD $·crcleconskp(SB), R5
|
||||
BR vectorizedBody<>(SB)
|
||||
|
||||
// func vectorizedCastagnoli(crc uint32, p []byte) uint32
|
||||
TEXT ·vectorizedCastagnoli(SB), NOSPLIT, $0
|
||||
MOVWZ crc+0(FP), R2 // R2 stores the CRC value
|
||||
MOVD p+8(FP), R3 // data pointer
|
||||
MOVD p_len+16(FP), R4 // len(p)
|
||||
|
||||
// R5: crc-32 constant pool base pointer, constant is used to reduce crc
|
||||
MOVD $·crccleconskp(SB), R5
|
||||
BR vectorizedBody<>(SB)
|
||||
|
||||
TEXT vectorizedBody<>(SB), NOSPLIT, $0
|
||||
XOR $0xffffffff, R2 // NOTW R2
|
||||
VLM 0(R5), CONST_PERM_LE2BE, CONST_CRC_POLY
|
||||
|
||||
// Load the initial CRC value into the rightmost word of V0
|
||||
VZERO V0
|
||||
VLVGF $3, R2, V0
|
||||
|
||||
// Crash if the input size is less than 64-bytes.
|
||||
CMP R4, $64
|
||||
BLT crash
|
||||
|
||||
// Load a 64-byte data chunk and XOR with CRC
|
||||
VLM 0(R3), V1, V4 // 64-bytes into V1..V4
|
||||
|
||||
// Reflect the data if the CRC operation is in the bit-reflected domain
|
||||
VPERM V1, V1, CONST_PERM_LE2BE, V1
|
||||
VPERM V2, V2, CONST_PERM_LE2BE, V2
|
||||
VPERM V3, V3, CONST_PERM_LE2BE, V3
|
||||
VPERM V4, V4, CONST_PERM_LE2BE, V4
|
||||
|
||||
VX V0, V1, V1 // V1 ^= CRC
|
||||
ADD $64, R3 // BUF = BUF + 64
|
||||
ADD $(-64), R4
|
||||
|
||||
// Check remaining buffer size and jump to proper folding method
|
||||
CMP R4, $64
|
||||
BLT less_than_64bytes
|
||||
|
||||
fold_64bytes_loop:
|
||||
// Load the next 64-byte data chunk into V5 to V8
|
||||
VLM 0(R3), V5, V8
|
||||
VPERM V5, V5, CONST_PERM_LE2BE, V5
|
||||
VPERM V6, V6, CONST_PERM_LE2BE, V6
|
||||
VPERM V7, V7, CONST_PERM_LE2BE, V7
|
||||
VPERM V8, V8, CONST_PERM_LE2BE, V8
|
||||
|
||||
// Perform a GF(2) multiplication of the doublewords in V1 with
|
||||
// the reduction constants in V0. The intermediate result is
|
||||
// then folded (accumulated) with the next data chunk in V5 and
|
||||
// stored in V1. Repeat this step for the register contents
|
||||
// in V2, V3, and V4 respectively.
|
||||
|
||||
VGFMAG CONST_R2R1, V1, V5, V1
|
||||
VGFMAG CONST_R2R1, V2, V6, V2
|
||||
VGFMAG CONST_R2R1, V3, V7, V3
|
||||
VGFMAG CONST_R2R1, V4, V8, V4
|
||||
|
||||
// Adjust buffer pointer and length for next loop
|
||||
ADD $64, R3 // BUF = BUF + 64
|
||||
ADD $(-64), R4 // LEN = LEN - 64
|
||||
|
||||
CMP R4, $64
|
||||
BGE fold_64bytes_loop
|
||||
|
||||
less_than_64bytes:
|
||||
// Fold V1 to V4 into a single 128-bit value in V1
|
||||
VGFMAG CONST_R4R3, V1, V2, V1
|
||||
VGFMAG CONST_R4R3, V1, V3, V1
|
||||
VGFMAG CONST_R4R3, V1, V4, V1
|
||||
|
||||
// Check whether to continue with 64-bit folding
|
||||
CMP R4, $16
|
||||
BLT final_fold
|
||||
|
||||
fold_16bytes_loop:
|
||||
VL 0(R3), V2 // Load next data chunk
|
||||
VPERM V2, V2, CONST_PERM_LE2BE, V2
|
||||
|
||||
VGFMAG CONST_R4R3, V1, V2, V1 // Fold next data chunk
|
||||
|
||||
// Adjust buffer pointer and size for folding next data chunk
|
||||
ADD $16, R3
|
||||
ADD $-16, R4
|
||||
|
||||
// Process remaining data chunks
|
||||
CMP R4, $16
|
||||
BGE fold_16bytes_loop
|
||||
|
||||
final_fold:
|
||||
VLEIB $7, $0x40, V9
|
||||
VSRLB V9, CONST_R4R3, V0
|
||||
VLEIG $0, $1, V0
|
||||
|
||||
VGFMG V0, V1, V1
|
||||
|
||||
VLEIB $7, $0x20, V9 // Shift by words
|
||||
VSRLB V9, V1, V2 // Store remaining bits in V2
|
||||
VUPLLF V1, V1 // Split rightmost doubleword
|
||||
VGFMAG CONST_R5, V1, V2, V1 // V1 = (V1 * R5) XOR V2
|
||||
|
||||
// The input values to the Barret reduction are the degree-63 polynomial
|
||||
// in V1 (R(x)), degree-32 generator polynomial, and the reduction
|
||||
// constant u. The Barret reduction result is the CRC value of R(x) mod
|
||||
// P(x).
|
||||
//
|
||||
// The Barret reduction algorithm is defined as:
|
||||
//
|
||||
// 1. T1(x) = floor( R(x) / x^32 ) GF2MUL u
|
||||
// 2. T2(x) = floor( T1(x) / x^32 ) GF2MUL P(x)
|
||||
// 3. C(x) = R(x) XOR T2(x) mod x^32
|
||||
//
|
||||
// Note: To compensate the division by x^32, use the vector unpack
|
||||
// instruction to move the leftmost word into the leftmost doubleword
|
||||
// of the vector register. The rightmost doubleword is multiplied
|
||||
// with zero to not contribute to the intermedate results.
|
||||
|
||||
// T1(x) = floor( R(x) / x^32 ) GF2MUL u
|
||||
VUPLLF V1, V2
|
||||
VGFMG CONST_RU_POLY, V2, V2
|
||||
|
||||
// Compute the GF(2) product of the CRC polynomial in VO with T1(x) in
|
||||
// V2 and XOR the intermediate result, T2(x), with the value in V1.
|
||||
// The final result is in the rightmost word of V2.
|
||||
|
||||
VUPLLF V2, V2
|
||||
VGFMAG CONST_CRC_POLY, V2, V1, V2
|
||||
|
||||
done:
|
||||
VLGVF $2, V2, R2
|
||||
XOR $0xffffffff, R2 // NOTW R2
|
||||
MOVWZ R2, ret + 32(FP)
|
||||
RET
|
||||
|
||||
crash:
|
||||
MOVD $0, (R0) // input size is less than 64-bytes
|
||||
22
vendor/github.com/qiangxue/fasthttp-routing/LICENSE
generated
vendored
22
vendor/github.com/qiangxue/fasthttp-routing/LICENSE
generated
vendored
@@ -1,22 +0,0 @@
|
||||
The BSD 3-Clause License
|
||||
|
||||
Copyright (c) 2016, Qiang Xue
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without modification, are permitted provided
|
||||
that the following conditions are met:
|
||||
|
||||
1. Redistributions of source code must retain the above copyright notice, this list of conditions
|
||||
and the following disclaimer.
|
||||
2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and
|
||||
the following disclaimer in the documentation and/or other materials provided with the distribution.
|
||||
3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or
|
||||
promote products derived from this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
|
||||
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
|
||||
WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
|
||||
THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
244
vendor/github.com/qiangxue/fasthttp-routing/README.md
generated
vendored
244
vendor/github.com/qiangxue/fasthttp-routing/README.md
generated
vendored
@@ -1,244 +0,0 @@
|
||||
# fasthttp-routing
|
||||
|
||||
[](http://godoc.org/github.com/qiangxue/fasthttp-routing)
|
||||
[](http://goreportcard.com/report/qiangxue/fasthttp-routing)
|
||||
|
||||
## Description
|
||||
|
||||
fasthttp-routing is a Go package that is adapted from [ozzo-routing](https://github.com/go-ozzo/ozzo-routing) to provide
|
||||
fast and powerful routing features for the high-performance [fasthttp](https://github.com/valyala/fasthttp) server.
|
||||
The package has the following features:
|
||||
|
||||
* middleware pipeline architecture, similar to that of the [Express framework](http://expressjs.com).
|
||||
* extremely fast request routing with zero dynamic memory allocation
|
||||
* modular code organization through route grouping
|
||||
* flexible URL path matching, supporting URL parameters and regular expressions
|
||||
* URL creation according to the predefined routes
|
||||
|
||||
## Requirements
|
||||
|
||||
Go 1.5 or above.
|
||||
|
||||
## Installation
|
||||
|
||||
Run the following command to install the package:
|
||||
|
||||
```
|
||||
go get github.com/qiangxue/fasthttp-routing
|
||||
```
|
||||
|
||||
## Getting Started
|
||||
|
||||
Create a `server.go` file with the following content:
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/qiangxue/fasthttp-routing"
|
||||
"github.com/valyala/fasthttp"
|
||||
)
|
||||
|
||||
func main() {
|
||||
router := routing.New()
|
||||
|
||||
router.Get("/", func(c *routing.Context) error {
|
||||
fmt.Fprintf(c, "Hello, world!")
|
||||
return nil
|
||||
})
|
||||
|
||||
panic(fasthttp.ListenAndServe(":8080", router.HandleRequest))
|
||||
}
|
||||
```
|
||||
|
||||
Now run the following command to start the Web server:
|
||||
|
||||
```
|
||||
go run server.go
|
||||
```
|
||||
|
||||
You should be able to access URLs such as `http://localhost:8080`.
|
||||
|
||||
|
||||
### Routes
|
||||
|
||||
ozzo-routing works by building a routing table in a router and then dispatching HTTP requests to the matching handlers
|
||||
found in the routing table. An intuitive illustration of a routing table is as follows:
|
||||
|
||||
|
||||
Routes | Handlers
|
||||
--------------------|-----------------
|
||||
`GET /users` | m1, m2, h1, ...
|
||||
`POST /users` | m1, m2, h2, ...
|
||||
`PUT /users/<id>` | m1, m2, h3, ...
|
||||
`DELETE /users/<id>`| m1, m2, h4, ...
|
||||
|
||||
|
||||
For an incoming request `GET /users`, the first route would match and the handlers m1, m2, and h1 would be executed.
|
||||
If the request is `PUT /users/123`, the third route would match and the corresponding handlers would be executed.
|
||||
Note that the token `<id>` can match any number of non-slash characters and the matching part can be accessed as
|
||||
a path parameter value in the handlers.
|
||||
|
||||
**If an incoming request matches multiple routes in the table, the route added first to the table will take precedence.
|
||||
All other matching routes will be ignored.**
|
||||
|
||||
The actual implementation of the routing table uses a variant of the radix tree data structure, which makes the routing
|
||||
process as fast as working with a hash table, thanks to the inspiration from [httprouter](https://github.com/julienschmidt/httprouter).
|
||||
|
||||
To add a new route and its handlers to the routing table, call the `To` method like the following:
|
||||
|
||||
```go
|
||||
router := routing.New()
|
||||
router.To("GET", "/users", m1, m2, h1)
|
||||
router.To("POST", "/users", m1, m2, h2)
|
||||
```
|
||||
|
||||
You can also use shortcut methods, such as `Get`, `Post`, `Put`, etc., which are named after the HTTP method names:
|
||||
|
||||
```go
|
||||
router.Get("/users", m1, m2, h1)
|
||||
router.Post("/users", m1, m2, h2)
|
||||
```
|
||||
|
||||
If you have multiple routes with the same URL path but different HTTP methods, like the above example, you can
|
||||
chain them together as follows,
|
||||
|
||||
```go
|
||||
router.Get("/users", m1, m2, h1).Post(m1, m2, h2)
|
||||
```
|
||||
|
||||
If you want to use the same set of handlers to handle the same URL path but different HTTP methods, you can take
|
||||
the following shortcut:
|
||||
|
||||
```go
|
||||
router.To("GET,POST", "/users", m1, m2, h)
|
||||
```
|
||||
|
||||
A route may contain parameter tokens which are in the format of `<name:pattern>`, where `name` stands for the parameter
|
||||
name, and `pattern` is a regular expression which the parameter value should match. A token `<name>` is equivalent
|
||||
to `<name:[^/]*>`, i.e., it matches any number of non-slash characters. At the end of a route, an asterisk character
|
||||
can be used to match any number of arbitrary characters. Below are some examples:
|
||||
|
||||
* `/users/<username>`: matches `/users/admin`
|
||||
* `/users/accnt-<id:\d+>`: matches `/users/accnt-123`, but not `/users/accnt-admin`
|
||||
* `/users/<username>/*`: matches `/users/admin/profile/address`
|
||||
|
||||
When a URL path matches a route, the matching parameters on the URL path can be accessed via `Context.Param()`:
|
||||
|
||||
```go
|
||||
router := routing.New()
|
||||
|
||||
router.Get("/users/<username>", func (c *routing.Context) error {
|
||||
fmt.Fprintf(c, "Name: %v", c.Param("username"))
|
||||
return nil
|
||||
})
|
||||
```
|
||||
|
||||
|
||||
### Route Groups
|
||||
|
||||
Route group is a way of grouping together the routes which have the same route prefix. The routes in a group also
|
||||
share the same handlers that are registered with the group via its `Use` method. For example,
|
||||
|
||||
```go
|
||||
router := routing.New()
|
||||
api := router.Group("/api")
|
||||
api.Use(m1, m2)
|
||||
api.Get("/users", h1).Post(h2)
|
||||
api.Put("/users/<id>", h3).Delete(h4)
|
||||
```
|
||||
|
||||
The above `/api` route group establishes the following routing table:
|
||||
|
||||
|
||||
Routes | Handlers
|
||||
------------------------|-------------
|
||||
`GET /api/users` | m1, m2, h1, ...
|
||||
`POST /api/users` | m1, m2, h2, ...
|
||||
`PUT /api/users/<id>` | m1, m2, h3, ...
|
||||
`DELETE /api/users/<id>`| m1, m2, h4, ...
|
||||
|
||||
|
||||
As you can see, all these routes have the same route prefix `/api` and the handlers `m1` and `m2`. In other similar
|
||||
routing frameworks, the handlers registered with a route group are also called *middlewares*.
|
||||
|
||||
Route groups can be nested. That is, a route group can create a child group by calling the `Group()` method. The router
|
||||
serves as the top level route group. A child group inherits the handlers registered with its parent group. For example,
|
||||
|
||||
```go
|
||||
router := routing.New()
|
||||
router.Use(m1)
|
||||
|
||||
api := router.Group("/api")
|
||||
api.Use(m2)
|
||||
|
||||
users := group.Group("/users")
|
||||
users.Use(m3)
|
||||
users.Put("/<id>", h1)
|
||||
```
|
||||
|
||||
Because the router serves as the parent of the `api` group which is the parent of the `users` group,
|
||||
the `PUT /api/users/<id>` route is associated with the handlers `m1`, `m2`, `m3`, and `h1`.
|
||||
|
||||
|
||||
### Router
|
||||
|
||||
Router manages the routing table and dispatches incoming requests to appropriate handlers. A router instance is created
|
||||
by calling the `routing.New()` method.
|
||||
|
||||
To hook up router with fasthttp, use the following code:
|
||||
|
||||
```go
|
||||
router := routing.New()
|
||||
fasthttp.ListenAndServe(":8080", router.HandleRequest)
|
||||
```
|
||||
|
||||
|
||||
### Handlers
|
||||
|
||||
A handler is a function with the signature `func(*routing.Context) error`. A handler is executed by the router if
|
||||
the incoming request URL path matches the route that the handler is associated with. Through the `routing.Context`
|
||||
parameter, you can access the request information in handlers.
|
||||
|
||||
A route may be associated with multiple handlers. These handlers will be executed in the order that they are registered
|
||||
to the route. The execution sequence can be terminated in the middle using one of the following two methods:
|
||||
|
||||
* A handler returns an error: the router will skip the rest of the handlers and handle the returned error.
|
||||
* A handler calls `Context.Abort()`: the router will simply skip the rest of the handlers. There is no error to be handled.
|
||||
|
||||
A handler can call `Context.Next()` to explicitly execute the rest of the unexecuted handlers and take actions after
|
||||
they finish execution. For example, a response compression handler may start the output buffer, call `Context.Next()`,
|
||||
and then compress and send the output to response.
|
||||
|
||||
|
||||
### Context
|
||||
|
||||
For each incoming request, a `routing.Context` object is passed through the relevant handlers. Because `routing.Context`
|
||||
embeds `fasthttp.RequestCtx`, you can access all properties and methods provided by the latter.
|
||||
|
||||
Additionally, the `Context.Param()` method allows handlers to access the URL path parameters that match the current route.
|
||||
Using `Context.Get()` and `Context.Set()`, handlers can share data between each other. For example, an authentication
|
||||
handler can store the authenticated user identity by calling `Context.Set()`, and other handlers can retrieve back
|
||||
the identity information by calling `Context.Get()`.
|
||||
|
||||
Context also provides a handy `WriteData()` method that can be used to write data of arbitrary type to the response.
|
||||
The `WriteData()` method can also be overridden (by replacement) to achieve more versatile response data writing.
|
||||
|
||||
|
||||
### Error Handling
|
||||
|
||||
A handler may return an error indicating some erroneous condition. Sometimes, a handler or the code it calls may cause
|
||||
a panic. Both should be handled properly to ensure best user experience. It is recommended that you use
|
||||
the `fault.Recover` handler or a similar error handler to handle these errors.
|
||||
|
||||
If an error is not handled by any handler, the router will handle it by calling its `handleError()` method which
|
||||
simply sets an appropriate HTTP status code and writes the error message to the response.
|
||||
|
||||
When an incoming request has no matching route, the router will call the handlers registered via the `Router.NotFound()`
|
||||
method. All the handlers registered via `Router.Use()` will also be called in advance. By default, the following two
|
||||
handlers are registered with `Router.NotFound()`:
|
||||
|
||||
* `routing.MethodNotAllowedHandler`: a handler that sends an `Allow` HTTP header indicating the allowed HTTP methods for a requested URL
|
||||
* `routing.NotFoundHandler`: a handler triggering 404 HTTP error
|
||||
126
vendor/github.com/qiangxue/fasthttp-routing/context.go
generated
vendored
126
vendor/github.com/qiangxue/fasthttp-routing/context.go
generated
vendored
@@ -1,126 +0,0 @@
|
||||
// Copyright 2016 Qiang Xue. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package routing
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/valyala/fasthttp"
|
||||
)
|
||||
|
||||
// SerializeFunc serializes the given data of arbitrary type into a byte array.
|
||||
type SerializeFunc func(data interface{}) ([]byte, error)
|
||||
|
||||
// Context represents the contextual data and environment while processing an incoming HTTP request.
|
||||
type Context struct {
|
||||
*fasthttp.RequestCtx
|
||||
|
||||
Serialize SerializeFunc // the function serializing the given data of arbitrary type into a byte array.
|
||||
|
||||
router *Router
|
||||
pnames []string // list of route parameter names
|
||||
pvalues []string // list of parameter values corresponding to pnames
|
||||
data map[string]interface{} // data items managed by Get and Set
|
||||
index int // the index of the currently executing handler in handlers
|
||||
handlers []Handler // the handlers associated with the current route
|
||||
}
|
||||
|
||||
// Router returns the Router that is handling the incoming HTTP request.
|
||||
func (c *Context) Router() *Router {
|
||||
return c.router
|
||||
}
|
||||
|
||||
// Param returns the named parameter value that is found in the URL path matching the current route.
|
||||
// If the named parameter cannot be found, an empty string will be returned.
|
||||
func (c *Context) Param(name string) string {
|
||||
for i, n := range c.pnames {
|
||||
if n == name {
|
||||
return c.pvalues[i]
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// Get returns the named data item previously registered with the context by calling Set.
|
||||
// If the named data item cannot be found, nil will be returned.
|
||||
func (c *Context) Get(name string) interface{} {
|
||||
return c.data[name]
|
||||
}
|
||||
|
||||
// Set stores the named data item in the context so that it can be retrieved later.
|
||||
func (c *Context) Set(name string, value interface{}) {
|
||||
if c.data == nil {
|
||||
c.data = make(map[string]interface{})
|
||||
}
|
||||
c.data[name] = value
|
||||
}
|
||||
|
||||
// Next calls the rest of the handlers associated with the current route.
|
||||
// If any of these handlers returns an error, Next will return the error and skip the following handlers.
|
||||
// Next is normally used when a handler needs to do some postprocessing after the rest of the handlers
|
||||
// are executed.
|
||||
func (c *Context) Next() error {
|
||||
c.index++
|
||||
for n := len(c.handlers); c.index < n; c.index++ {
|
||||
if err := c.handlers[c.index](c); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Abort skips the rest of the handlers associated with the current route.
|
||||
// Abort is normally used when a handler handles the request normally and wants to skip the rest of the handlers.
|
||||
// If a handler wants to indicate an error condition, it should simply return the error without calling Abort.
|
||||
func (c *Context) Abort() {
|
||||
c.index = len(c.handlers)
|
||||
}
|
||||
|
||||
// URL creates a URL using the named route and the parameter values.
|
||||
// The parameters should be given in the sequence of name1, value1, name2, value2, and so on.
|
||||
// If a parameter in the route is not provided a value, the parameter token will remain in the resulting URL.
|
||||
// Parameter values will be properly URL encoded.
|
||||
// The method returns an empty string if the URL creation fails.
|
||||
func (c *Context) URL(route string, pairs ...interface{}) string {
|
||||
if r := c.router.routes[route]; r != nil {
|
||||
return r.URL(pairs...)
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// WriteData writes the given data of arbitrary type to the response.
|
||||
// The method calls the Serialize() method to convert the data into a byte array and then writes
|
||||
// the byte array to the response.
|
||||
func (c *Context) WriteData(data interface{}) (err error) {
|
||||
var bytes []byte
|
||||
if bytes, err = c.Serialize(data); err == nil {
|
||||
_, err = c.Write(bytes)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// init sets the request and response of the context and resets all other properties.
|
||||
func (c *Context) init(ctx *fasthttp.RequestCtx) {
|
||||
c.RequestCtx = ctx
|
||||
c.data = nil
|
||||
c.index = -1
|
||||
c.Serialize = Serialize
|
||||
}
|
||||
|
||||
// Serialize converts the given data into a byte array.
|
||||
// If the data is neither a byte array nor a string, it will call fmt.Sprint to convert it into a string.
|
||||
func Serialize(data interface{}) (bytes []byte, err error) {
|
||||
switch data.(type) {
|
||||
case []byte:
|
||||
return data.([]byte), nil
|
||||
case string:
|
||||
return []byte(data.(string)), nil
|
||||
default:
|
||||
if data != nil {
|
||||
return []byte(fmt.Sprint(data)), nil
|
||||
}
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
40
vendor/github.com/qiangxue/fasthttp-routing/error.go
generated
vendored
40
vendor/github.com/qiangxue/fasthttp-routing/error.go
generated
vendored
@@ -1,40 +0,0 @@
|
||||
// Copyright 2016 Qiang Xue. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package routing
|
||||
|
||||
import "net/http"
|
||||
|
||||
// HTTPError represents an HTTP error with HTTP status code and error message
|
||||
type HTTPError interface {
|
||||
error
|
||||
// StatusCode returns the HTTP status code of the error
|
||||
StatusCode() int
|
||||
}
|
||||
|
||||
// Error contains the error information reported by calling Context.Error().
|
||||
type httpError struct {
|
||||
Status int `json:"status" xml:"status"`
|
||||
Message string `json:"message" xml:"message"`
|
||||
}
|
||||
|
||||
// NewHTTPError creates a new HttpError instance.
|
||||
// If the error message is not given, http.StatusText() will be called
|
||||
// to generate the message based on the status code.
|
||||
func NewHTTPError(status int, message ...string) HTTPError {
|
||||
if len(message) > 0 {
|
||||
return &httpError{status, message[0]}
|
||||
}
|
||||
return &httpError{status, http.StatusText(status)}
|
||||
}
|
||||
|
||||
// Error returns the error message.
|
||||
func (e *httpError) Error() string {
|
||||
return e.Message
|
||||
}
|
||||
|
||||
// StatusCode returns the HTTP status code.
|
||||
func (e *httpError) StatusCode() int {
|
||||
return e.Status
|
||||
}
|
||||
107
vendor/github.com/qiangxue/fasthttp-routing/group.go
generated
vendored
107
vendor/github.com/qiangxue/fasthttp-routing/group.go
generated
vendored
@@ -1,107 +0,0 @@
|
||||
// Copyright 2016 Qiang Xue. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package routing
|
||||
|
||||
import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
// RouteGroup represents a group of routes that share the same path prefix.
|
||||
type RouteGroup struct {
|
||||
prefix string
|
||||
router *Router
|
||||
handlers []Handler
|
||||
}
|
||||
|
||||
// newRouteGroup creates a new RouteGroup with the given path prefix, router, and handlers.
|
||||
func newRouteGroup(prefix string, router *Router, handlers []Handler) *RouteGroup {
|
||||
return &RouteGroup{
|
||||
prefix: prefix,
|
||||
router: router,
|
||||
handlers: handlers,
|
||||
}
|
||||
}
|
||||
|
||||
// Get adds a GET route to the router with the given route path and handlers.
|
||||
func (r *RouteGroup) Get(path string, handlers ...Handler) *Route {
|
||||
return newRoute(path, r).Get(handlers...)
|
||||
}
|
||||
|
||||
// Post adds a POST route to the router with the given route path and handlers.
|
||||
func (r *RouteGroup) Post(path string, handlers ...Handler) *Route {
|
||||
return newRoute(path, r).Post(handlers...)
|
||||
}
|
||||
|
||||
// Put adds a PUT route to the router with the given route path and handlers.
|
||||
func (r *RouteGroup) Put(path string, handlers ...Handler) *Route {
|
||||
return newRoute(path, r).Put(handlers...)
|
||||
}
|
||||
|
||||
// Patch adds a PATCH route to the router with the given route path and handlers.
|
||||
func (r *RouteGroup) Patch(path string, handlers ...Handler) *Route {
|
||||
return newRoute(path, r).Patch(handlers...)
|
||||
}
|
||||
|
||||
// Delete adds a DELETE route to the router with the given route path and handlers.
|
||||
func (r *RouteGroup) Delete(path string, handlers ...Handler) *Route {
|
||||
return newRoute(path, r).Delete(handlers...)
|
||||
}
|
||||
|
||||
// Connect adds a CONNECT route to the router with the given route path and handlers.
|
||||
func (r *RouteGroup) Connect(path string, handlers ...Handler) *Route {
|
||||
return newRoute(path, r).Connect(handlers...)
|
||||
}
|
||||
|
||||
// Head adds a HEAD route to the router with the given route path and handlers.
|
||||
func (r *RouteGroup) Head(path string, handlers ...Handler) *Route {
|
||||
return newRoute(path, r).Head(handlers...)
|
||||
}
|
||||
|
||||
// Options adds an OPTIONS route to the router with the given route path and handlers.
|
||||
func (r *RouteGroup) Options(path string, handlers ...Handler) *Route {
|
||||
return newRoute(path, r).Options(handlers...)
|
||||
}
|
||||
|
||||
// Trace adds a TRACE route to the router with the given route path and handlers.
|
||||
func (r *RouteGroup) Trace(path string, handlers ...Handler) *Route {
|
||||
return newRoute(path, r).Trace(handlers...)
|
||||
}
|
||||
|
||||
// Any adds a route with the given route, handlers, and the HTTP methods as listed in routing.Methods.
|
||||
func (r *RouteGroup) Any(path string, handlers ...Handler) *Route {
|
||||
route := newRoute(path, r)
|
||||
for _, method := range Methods {
|
||||
route.add(method, handlers)
|
||||
}
|
||||
return route
|
||||
}
|
||||
|
||||
// To adds a route to the router with the given HTTP methods, route path, and handlers.
|
||||
// Multiple HTTP methods should be separated by commas (without any surrounding spaces).
|
||||
func (r *RouteGroup) To(methods, path string, handlers ...Handler) *Route {
|
||||
route := newRoute(path, r)
|
||||
for _, method := range strings.Split(methods, ",") {
|
||||
route.add(method, handlers)
|
||||
}
|
||||
return route
|
||||
}
|
||||
|
||||
// Group creates a RouteGroup with the given route path prefix and handlers.
|
||||
// The new group will combine the existing path prefix with the new one.
|
||||
// If no handler is provided, the new group will inherit the handlers registered
|
||||
// with the current group.
|
||||
func (r *RouteGroup) Group(prefix string, handlers ...Handler) *RouteGroup {
|
||||
if len(handlers) == 0 {
|
||||
handlers = make([]Handler, len(r.handlers))
|
||||
copy(handlers, r.handlers)
|
||||
}
|
||||
return newRouteGroup(r.prefix+prefix, r.router, handlers)
|
||||
}
|
||||
|
||||
// Use registers one or multiple handlers to the current route group.
|
||||
// These handlers will be shared by all routes belong to this group and its subgroups.
|
||||
func (r *RouteGroup) Use(handlers ...Handler) {
|
||||
r.handlers = append(r.handlers, handlers...)
|
||||
}
|
||||
161
vendor/github.com/qiangxue/fasthttp-routing/route.go
generated
vendored
161
vendor/github.com/qiangxue/fasthttp-routing/route.go
generated
vendored
@@ -1,161 +0,0 @@
|
||||
// Copyright 2016 Qiang Xue. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package routing
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/url"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Route represents a URL path pattern that can be used to match requested URLs.
|
||||
type Route struct {
|
||||
group *RouteGroup
|
||||
name, path string
|
||||
template string
|
||||
}
|
||||
|
||||
// newRoute creates a new Route with the given route path and route group.
|
||||
func newRoute(path string, group *RouteGroup) *Route {
|
||||
path = group.prefix + path
|
||||
name := path
|
||||
|
||||
// an asterisk at the end matches any number of characters
|
||||
if strings.HasSuffix(path, "*") {
|
||||
path = path[:len(path)-1] + "<:.*>"
|
||||
}
|
||||
|
||||
route := &Route{
|
||||
group: group,
|
||||
name: name,
|
||||
path: path,
|
||||
template: buildURLTemplate(path),
|
||||
}
|
||||
group.router.routes[name] = route
|
||||
|
||||
return route
|
||||
}
|
||||
|
||||
// Name sets the name of the route.
|
||||
// This method will update the registration of the route in the router as well.
|
||||
func (r *Route) Name(name string) *Route {
|
||||
r.name = name
|
||||
r.group.router.routes[name] = r
|
||||
return r
|
||||
}
|
||||
|
||||
// Get adds the route to the router using the GET HTTP method.
|
||||
func (r *Route) Get(handlers ...Handler) *Route {
|
||||
return r.add("GET", handlers)
|
||||
}
|
||||
|
||||
// Post adds the route to the router using the POST HTTP method.
|
||||
func (r *Route) Post(handlers ...Handler) *Route {
|
||||
return r.add("POST", handlers)
|
||||
}
|
||||
|
||||
// Put adds the route to the router using the PUT HTTP method.
|
||||
func (r *Route) Put(handlers ...Handler) *Route {
|
||||
return r.add("PUT", handlers)
|
||||
}
|
||||
|
||||
// Patch adds the route to the router using the PATCH HTTP method.
|
||||
func (r *Route) Patch(handlers ...Handler) *Route {
|
||||
return r.add("PATCH", handlers)
|
||||
}
|
||||
|
||||
// Delete adds the route to the router using the DELETE HTTP method.
|
||||
func (r *Route) Delete(handlers ...Handler) *Route {
|
||||
return r.add("DELETE", handlers)
|
||||
}
|
||||
|
||||
// Connect adds the route to the router using the CONNECT HTTP method.
|
||||
func (r *Route) Connect(handlers ...Handler) *Route {
|
||||
return r.add("CONNECT", handlers)
|
||||
}
|
||||
|
||||
// Head adds the route to the router using the HEAD HTTP method.
|
||||
func (r *Route) Head(handlers ...Handler) *Route {
|
||||
return r.add("HEAD", handlers)
|
||||
}
|
||||
|
||||
// Options adds the route to the router using the OPTIONS HTTP method.
|
||||
func (r *Route) Options(handlers ...Handler) *Route {
|
||||
return r.add("OPTIONS", handlers)
|
||||
}
|
||||
|
||||
// Trace adds the route to the router using the TRACE HTTP method.
|
||||
func (r *Route) Trace(handlers ...Handler) *Route {
|
||||
return r.add("TRACE", handlers)
|
||||
}
|
||||
|
||||
// To adds the route to the router with the given HTTP methods and handlers.
|
||||
// Multiple HTTP methods should be separated by commas (without any surrounding spaces).
|
||||
func (r *Route) To(methods string, handlers ...Handler) *Route {
|
||||
for _, method := range strings.Split(methods, ",") {
|
||||
r.add(method, handlers)
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
||||
// URL creates a URL using the current route and the given parameters.
|
||||
// The parameters should be given in the sequence of name1, value1, name2, value2, and so on.
|
||||
// If a parameter in the route is not provided a value, the parameter token will remain in the resulting URL.
|
||||
// The method will perform URL encoding for all given parameter values.
|
||||
func (r *Route) URL(pairs ...interface{}) (s string) {
|
||||
s = r.template
|
||||
for i := 0; i < len(pairs); i++ {
|
||||
name := fmt.Sprintf("<%v>", pairs[i])
|
||||
value := ""
|
||||
if i < len(pairs)-1 {
|
||||
value = url.QueryEscape(fmt.Sprint(pairs[i+1]))
|
||||
}
|
||||
s = strings.Replace(s, name, value, -1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// add registers the route, the specified HTTP method and the handlers to the router.
|
||||
// The handlers will be combined with the handlers of the route group.
|
||||
func (r *Route) add(method string, handlers []Handler) *Route {
|
||||
hh := combineHandlers(r.group.handlers, handlers)
|
||||
r.group.router.add(method, r.path, hh)
|
||||
return r
|
||||
}
|
||||
|
||||
// buildURLTemplate converts a route pattern into a URL template by removing regular expressions in parameter tokens.
|
||||
func buildURLTemplate(path string) string {
|
||||
template, start, end := "", -1, -1
|
||||
for i := 0; i < len(path); i++ {
|
||||
if path[i] == '<' && start < 0 {
|
||||
start = i
|
||||
} else if path[i] == '>' && start >= 0 {
|
||||
name := path[start+1 : i]
|
||||
for j := start + 1; j < i; j++ {
|
||||
if path[j] == ':' {
|
||||
name = path[start+1 : j]
|
||||
break
|
||||
}
|
||||
}
|
||||
template += path[end+1:start] + "<" + name + ">"
|
||||
end = i
|
||||
start = -1
|
||||
}
|
||||
}
|
||||
if end < 0 {
|
||||
template = path
|
||||
} else if end < len(path)-1 {
|
||||
template += path[end+1:]
|
||||
}
|
||||
return template
|
||||
}
|
||||
|
||||
// combineHandlers merges two lists of handlers into a new list.
|
||||
func combineHandlers(h1 []Handler, h2 []Handler) []Handler {
|
||||
hh := make([]Handler, len(h1)+len(h2))
|
||||
copy(hh, h1)
|
||||
copy(hh[len(h1):], h2)
|
||||
return hh
|
||||
}
|
||||
169
vendor/github.com/qiangxue/fasthttp-routing/router.go
generated
vendored
169
vendor/github.com/qiangxue/fasthttp-routing/router.go
generated
vendored
@@ -1,169 +0,0 @@
|
||||
// Copyright 2016 Qiang Xue. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package routing provides high performance and powerful HTTP routing capabilities.
|
||||
package routing
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/valyala/fasthttp"
|
||||
)
|
||||
|
||||
type (
|
||||
// Handler is the function for handling HTTP requests.
|
||||
Handler func(*Context) error
|
||||
|
||||
// Router manages routes and dispatches HTTP requests to the handlers of the matching routes.
|
||||
Router struct {
|
||||
RouteGroup
|
||||
pool sync.Pool
|
||||
routes map[string]*Route
|
||||
stores map[string]routeStore
|
||||
maxParams int
|
||||
notFound []Handler
|
||||
notFoundHandlers []Handler
|
||||
}
|
||||
|
||||
// routeStore stores route paths and the corresponding handlers.
|
||||
routeStore interface {
|
||||
Add(key string, data interface{}) int
|
||||
Get(key string, pvalues []string) (data interface{}, pnames []string)
|
||||
String() string
|
||||
}
|
||||
)
|
||||
|
||||
// Methods lists all supported HTTP methods by Router.
|
||||
var Methods = []string{
|
||||
"CONNECT",
|
||||
"DELETE",
|
||||
"GET",
|
||||
"HEAD",
|
||||
"OPTIONS",
|
||||
"PATCH",
|
||||
"POST",
|
||||
"PUT",
|
||||
"TRACE",
|
||||
}
|
||||
|
||||
// New creates a new Router object.
|
||||
func New() *Router {
|
||||
r := &Router{
|
||||
routes: make(map[string]*Route),
|
||||
stores: make(map[string]routeStore),
|
||||
}
|
||||
r.RouteGroup = *newRouteGroup("", r, make([]Handler, 0))
|
||||
r.NotFound(MethodNotAllowedHandler, NotFoundHandler)
|
||||
r.pool.New = func() interface{} {
|
||||
return &Context{
|
||||
pvalues: make([]string, r.maxParams),
|
||||
router: r,
|
||||
}
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
||||
// HandleRequest handles the HTTP request.
|
||||
func (r *Router) HandleRequest(ctx *fasthttp.RequestCtx) {
|
||||
c := r.pool.Get().(*Context)
|
||||
c.init(ctx)
|
||||
c.handlers, c.pnames = r.find(string(ctx.Method()), string(ctx.Path()), c.pvalues)
|
||||
if err := c.Next(); err != nil {
|
||||
r.handleError(c, err)
|
||||
}
|
||||
r.pool.Put(c)
|
||||
}
|
||||
|
||||
// Route returns the named route.
|
||||
// Nil is returned if the named route cannot be found.
|
||||
func (r *Router) Route(name string) *Route {
|
||||
return r.routes[name]
|
||||
}
|
||||
|
||||
// Use appends the specified handlers to the router and shares them with all routes.
|
||||
func (r *Router) Use(handlers ...Handler) {
|
||||
r.RouteGroup.Use(handlers...)
|
||||
r.notFoundHandlers = combineHandlers(r.handlers, r.notFound)
|
||||
}
|
||||
|
||||
// NotFound specifies the handlers that should be invoked when the router cannot find any route matching a request.
|
||||
// Note that the handlers registered via Use will be invoked first in this case.
|
||||
func (r *Router) NotFound(handlers ...Handler) {
|
||||
r.notFound = handlers
|
||||
r.notFoundHandlers = combineHandlers(r.handlers, r.notFound)
|
||||
}
|
||||
|
||||
// handleError is the error handler for handling any unhandled errors.
|
||||
func (r *Router) handleError(c *Context, err error) {
|
||||
if httpError, ok := err.(HTTPError); ok {
|
||||
c.Error(httpError.Error(), httpError.StatusCode())
|
||||
} else {
|
||||
c.Error(err.Error(), http.StatusInternalServerError)
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Router) add(method, path string, handlers []Handler) {
|
||||
store := r.stores[method]
|
||||
if store == nil {
|
||||
store = newStore()
|
||||
r.stores[method] = store
|
||||
}
|
||||
if n := store.Add(path, handlers); n > r.maxParams {
|
||||
r.maxParams = n
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Router) find(method, path string, pvalues []string) (handlers []Handler, pnames []string) {
|
||||
var hh interface{}
|
||||
if store := r.stores[method]; store != nil {
|
||||
hh, pnames = store.Get(path, pvalues)
|
||||
}
|
||||
if hh != nil {
|
||||
return hh.([]Handler), pnames
|
||||
}
|
||||
return r.notFoundHandlers, pnames
|
||||
}
|
||||
|
||||
func (r *Router) findAllowedMethods(path string) map[string]bool {
|
||||
methods := make(map[string]bool)
|
||||
pvalues := make([]string, r.maxParams)
|
||||
for m, store := range r.stores {
|
||||
if handlers, _ := store.Get(path, pvalues); handlers != nil {
|
||||
methods[m] = true
|
||||
}
|
||||
}
|
||||
return methods
|
||||
}
|
||||
|
||||
// NotFoundHandler returns a 404 HTTP error indicating a request has no matching route.
|
||||
func NotFoundHandler(*Context) error {
|
||||
return NewHTTPError(http.StatusNotFound)
|
||||
}
|
||||
|
||||
// MethodNotAllowedHandler handles the situation when a request has matching route without matching HTTP method.
|
||||
// In this case, the handler will respond with an Allow HTTP header listing the allowed HTTP methods.
|
||||
// Otherwise, the handler will do nothing and let the next handler (usually a NotFoundHandler) to handle the problem.
|
||||
func MethodNotAllowedHandler(c *Context) error {
|
||||
methods := c.Router().findAllowedMethods(string(c.Path()))
|
||||
if len(methods) == 0 {
|
||||
return nil
|
||||
}
|
||||
methods["OPTIONS"] = true
|
||||
ms := make([]string, len(methods))
|
||||
i := 0
|
||||
for method := range methods {
|
||||
ms[i] = method
|
||||
i++
|
||||
}
|
||||
sort.Strings(ms)
|
||||
c.Response.Header.Set("Allow", strings.Join(ms, ", "))
|
||||
if string(c.Method()) != "OPTIONS" {
|
||||
c.Response.SetStatusCode(http.StatusMethodNotAllowed)
|
||||
}
|
||||
c.Abort()
|
||||
return nil
|
||||
}
|
||||
317
vendor/github.com/qiangxue/fasthttp-routing/store.go
generated
vendored
317
vendor/github.com/qiangxue/fasthttp-routing/store.go
generated
vendored
@@ -1,317 +0,0 @@
|
||||
// Copyright 2016 Qiang Xue. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package routing
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"regexp"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// store is a radix tree that supports storing data with parametric keys and retrieving them back with concrete keys.
|
||||
// When retrieving a data item with a concrete key, the matching parameter names and values will be returned as well.
|
||||
// A parametric key is a string containing tokens in the format of "<name>", "<name:pattern>", or "<:pattern>".
|
||||
// Each token represents a single parameter.
|
||||
type store struct {
|
||||
root *node // the root node of the radix tree
|
||||
count int // the number of data nodes in the tree
|
||||
}
|
||||
|
||||
// newStore creates a new store.
|
||||
func newStore() *store {
|
||||
return &store{
|
||||
root: &node{
|
||||
static: true,
|
||||
children: make([]*node, 256),
|
||||
pchildren: make([]*node, 0),
|
||||
pindex: -1,
|
||||
pnames: []string{},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Add adds a new data item with the given parametric key.
|
||||
// The number of parameters in the key is returned.
|
||||
func (s *store) Add(key string, data interface{}) int {
|
||||
s.count++
|
||||
return s.root.add(key, data, s.count)
|
||||
}
|
||||
|
||||
// Get returns the data item matching the given concrete key.
|
||||
// If the data item was added to the store with a parametric key before, the matching
|
||||
// parameter names and values will be returned as well.
|
||||
func (s *store) Get(path string, pvalues []string) (data interface{}, pnames []string) {
|
||||
data, pnames, _ = s.root.get(path, pvalues)
|
||||
return
|
||||
}
|
||||
|
||||
// String dumps the radix tree kept in the store as a string.
|
||||
func (s *store) String() string {
|
||||
return s.root.print(0)
|
||||
}
|
||||
|
||||
// node represents a radix trie node
|
||||
type node struct {
|
||||
static bool // whether the node is a static node or param node
|
||||
|
||||
key string // the key identifying this node
|
||||
data interface{} // the data associated with this node. nil if not a data node.
|
||||
|
||||
order int // the order at which the data was added. used to be pick the first one when matching multiple
|
||||
minOrder int // minimum order among all the child nodes and this node
|
||||
|
||||
children []*node // child static nodes, indexed by the first byte of each child key
|
||||
pchildren []*node // child param nodes
|
||||
|
||||
regex *regexp.Regexp // regular expression for a param node containing regular expression key
|
||||
pindex int // the parameter index, meaningful only for param node
|
||||
pnames []string // the parameter names collected from the root till this node
|
||||
}
|
||||
|
||||
// add adds a new data item to the tree rooted at the current node.
|
||||
// The number of parameters in the key is returned.
|
||||
func (n *node) add(key string, data interface{}, order int) int {
|
||||
matched := 0
|
||||
|
||||
// find the common prefix
|
||||
for ; matched < len(key) && matched < len(n.key); matched++ {
|
||||
if key[matched] != n.key[matched] {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if matched == len(n.key) {
|
||||
if matched == len(key) {
|
||||
// the node key is the same as the key: make the current node as data node
|
||||
// if the node is already a data node, ignore the new data since we only care the first matched node
|
||||
if n.data == nil {
|
||||
n.data = data
|
||||
n.order = order
|
||||
}
|
||||
return n.pindex + 1
|
||||
}
|
||||
|
||||
// the node key is a prefix of the key: create a child node
|
||||
newKey := key[matched:]
|
||||
|
||||
// try adding to a static child
|
||||
if child := n.children[newKey[0]]; child != nil {
|
||||
if pn := child.add(newKey, data, order); pn >= 0 {
|
||||
return pn
|
||||
}
|
||||
}
|
||||
// try adding to a param child
|
||||
for _, child := range n.pchildren {
|
||||
if pn := child.add(newKey, data, order); pn >= 0 {
|
||||
return pn
|
||||
}
|
||||
}
|
||||
|
||||
return n.addChild(newKey, data, order)
|
||||
}
|
||||
|
||||
if matched == 0 || !n.static {
|
||||
// no common prefix, or partial common prefix with a non-static node: should skip this node
|
||||
return -1
|
||||
}
|
||||
|
||||
// the node key shares a partial prefix with the key: split the node key
|
||||
n1 := &node{
|
||||
static: true,
|
||||
key: n.key[matched:],
|
||||
data: n.data,
|
||||
order: n.order,
|
||||
minOrder: n.minOrder,
|
||||
pchildren: n.pchildren,
|
||||
children: n.children,
|
||||
pindex: n.pindex,
|
||||
pnames: n.pnames,
|
||||
}
|
||||
|
||||
n.key = key[0:matched]
|
||||
n.data = nil
|
||||
n.pchildren = make([]*node, 0)
|
||||
n.children = make([]*node, 256)
|
||||
n.children[n1.key[0]] = n1
|
||||
|
||||
return n.add(key, data, order)
|
||||
}
|
||||
|
||||
// addChild creates static and param nodes to store the given data
|
||||
func (n *node) addChild(key string, data interface{}, order int) int {
|
||||
// find the first occurrence of a param token
|
||||
p0, p1 := -1, -1
|
||||
for i := 0; i < len(key); i++ {
|
||||
if p0 < 0 && key[i] == '<' {
|
||||
p0 = i
|
||||
}
|
||||
if p0 >= 0 && key[i] == '>' {
|
||||
p1 = i
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if p0 > 0 && p1 > 0 || p1 < 0 {
|
||||
// param token occurs after a static string, or no param token: create a static node
|
||||
child := &node{
|
||||
static: true,
|
||||
key: key,
|
||||
minOrder: order,
|
||||
children: make([]*node, 256),
|
||||
pchildren: make([]*node, 0),
|
||||
pindex: n.pindex,
|
||||
pnames: n.pnames,
|
||||
}
|
||||
n.children[key[0]] = child
|
||||
if p1 > 0 {
|
||||
// param token occurs after a static string
|
||||
child.key = key[:p0]
|
||||
n = child
|
||||
} else {
|
||||
// no param token: done adding the child
|
||||
child.data = data
|
||||
child.order = order
|
||||
return child.pindex + 1
|
||||
}
|
||||
}
|
||||
|
||||
// add param node
|
||||
child := &node{
|
||||
static: false,
|
||||
key: key[p0 : p1+1],
|
||||
minOrder: order,
|
||||
children: make([]*node, 256),
|
||||
pchildren: make([]*node, 0),
|
||||
pindex: n.pindex,
|
||||
pnames: n.pnames,
|
||||
}
|
||||
pattern := ""
|
||||
pname := key[p0+1 : p1]
|
||||
for i := p0 + 1; i < p1; i++ {
|
||||
if key[i] == ':' {
|
||||
pname = key[p0+1 : i]
|
||||
pattern = key[i+1 : p1]
|
||||
break
|
||||
}
|
||||
}
|
||||
if pattern != "" {
|
||||
// the param token contains a regular expression
|
||||
child.regex = regexp.MustCompile("^" + pattern)
|
||||
}
|
||||
pnames := make([]string, len(n.pnames)+1)
|
||||
copy(pnames, n.pnames)
|
||||
pnames[len(n.pnames)] = pname
|
||||
child.pnames = pnames
|
||||
child.pindex = len(pnames) - 1
|
||||
n.pchildren = append(n.pchildren, child)
|
||||
|
||||
if p1 == len(key)-1 {
|
||||
// the param token is at the end of the key
|
||||
child.data = data
|
||||
child.order = order
|
||||
return child.pindex + 1
|
||||
}
|
||||
|
||||
// process the rest of the key
|
||||
return child.addChild(key[p1+1:], data, order)
|
||||
}
|
||||
|
||||
// get returns the data item with the key matching the tree rooted at the current node
|
||||
func (n *node) get(key string, pvalues []string) (data interface{}, pnames []string, order int) {
|
||||
order = math.MaxInt32
|
||||
|
||||
repeat:
|
||||
if n.static {
|
||||
// check if the node key is a prefix of the given key
|
||||
// a slightly optimized version of strings.HasPrefix
|
||||
nkl := len(n.key)
|
||||
if nkl > len(key) {
|
||||
return
|
||||
}
|
||||
for i := nkl - 1; i >= 0; i-- {
|
||||
if n.key[i] != key[i] {
|
||||
return
|
||||
}
|
||||
}
|
||||
key = key[nkl:]
|
||||
} else if n.regex != nil {
|
||||
// param node with regular expression
|
||||
if n.regex.String() == "^.*" {
|
||||
pvalues[n.pindex] = key
|
||||
key = ""
|
||||
} else if match := n.regex.FindStringIndex(key); match != nil {
|
||||
pvalues[n.pindex] = key[0:match[1]]
|
||||
key = key[match[1]:]
|
||||
} else {
|
||||
return
|
||||
}
|
||||
} else {
|
||||
// param node matching non-"/" characters
|
||||
i, kl := 0, len(key)
|
||||
for ; i < kl; i++ {
|
||||
if key[i] == '/' {
|
||||
pvalues[n.pindex] = key[0:i]
|
||||
key = key[i:]
|
||||
break
|
||||
}
|
||||
}
|
||||
if i == kl {
|
||||
pvalues[n.pindex] = key
|
||||
key = ""
|
||||
}
|
||||
}
|
||||
|
||||
if len(key) > 0 {
|
||||
// find a static child that can match the rest of the key
|
||||
if child := n.children[key[0]]; child != nil {
|
||||
if len(n.pchildren) == 0 {
|
||||
// use goto to avoid recursion when no param children
|
||||
n = child
|
||||
goto repeat
|
||||
}
|
||||
data, pnames, order = child.get(key, pvalues)
|
||||
}
|
||||
} else if n.data != nil {
|
||||
// do not return yet: a param node may match an empty string with smaller order
|
||||
data, pnames, order = n.data, n.pnames, n.order
|
||||
}
|
||||
|
||||
// try matching param children
|
||||
tvalues := pvalues
|
||||
allocated := false
|
||||
for _, child := range n.pchildren {
|
||||
if child.minOrder >= order {
|
||||
continue
|
||||
}
|
||||
if data != nil && !allocated {
|
||||
tvalues = make([]string, len(pvalues))
|
||||
allocated = true
|
||||
}
|
||||
if d, p, s := child.get(key, tvalues); d != nil && s < order {
|
||||
if allocated {
|
||||
for i := child.pindex; i < len(p); i++ {
|
||||
pvalues[i] = tvalues[i]
|
||||
}
|
||||
}
|
||||
data, pnames, order = d, p, s
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (n *node) print(level int) string {
|
||||
r := fmt.Sprintf("%v{key: %v, regex: %v, data: %v, order: %v, minOrder: %v, pindex: %v, pnames: %v}\n", strings.Repeat(" ", level<<2), n.key, n.regex, n.data, n.order, n.minOrder, n.pindex, n.pnames)
|
||||
for _, child := range n.children {
|
||||
if child != nil {
|
||||
r += child.print(level + 1)
|
||||
}
|
||||
}
|
||||
for _, child := range n.pchildren {
|
||||
r += child.print(level + 1)
|
||||
}
|
||||
return r
|
||||
}
|
||||
22
vendor/github.com/valyala/bytebufferpool/LICENSE
generated
vendored
22
vendor/github.com/valyala/bytebufferpool/LICENSE
generated
vendored
@@ -1,22 +0,0 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2016 Aliaksandr Valialkin, VertaMedia
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
|
||||
21
vendor/github.com/valyala/bytebufferpool/README.md
generated
vendored
21
vendor/github.com/valyala/bytebufferpool/README.md
generated
vendored
@@ -1,21 +0,0 @@
|
||||
[](https://travis-ci.org/valyala/bytebufferpool)
|
||||
[](http://godoc.org/github.com/valyala/bytebufferpool)
|
||||
[](http://goreportcard.com/report/valyala/bytebufferpool)
|
||||
|
||||
# bytebufferpool
|
||||
|
||||
An implementation of a pool of byte buffers with anti-memory-waste protection.
|
||||
|
||||
The pool may waste limited amount of memory due to fragmentation.
|
||||
This amount equals to the maximum total size of the byte buffers
|
||||
in concurrent use.
|
||||
|
||||
# Benchmark results
|
||||
Currently bytebufferpool is fastest and most effective buffer pool written in Go.
|
||||
|
||||
You can find results [here](https://omgnull.github.io/go-benchmark/buffer/).
|
||||
|
||||
# bytebufferpool users
|
||||
|
||||
* [fasthttp](https://github.com/valyala/fasthttp)
|
||||
* [quicktemplate](https://github.com/valyala/quicktemplate)
|
||||
111
vendor/github.com/valyala/bytebufferpool/bytebuffer.go
generated
vendored
111
vendor/github.com/valyala/bytebufferpool/bytebuffer.go
generated
vendored
@@ -1,111 +0,0 @@
|
||||
package bytebufferpool
|
||||
|
||||
import "io"
|
||||
|
||||
// ByteBuffer provides byte buffer, which can be used for minimizing
|
||||
// memory allocations.
|
||||
//
|
||||
// ByteBuffer may be used with functions appending data to the given []byte
|
||||
// slice. See example code for details.
|
||||
//
|
||||
// Use Get for obtaining an empty byte buffer.
|
||||
type ByteBuffer struct {
|
||||
|
||||
// B is a byte buffer to use in append-like workloads.
|
||||
// See example code for details.
|
||||
B []byte
|
||||
}
|
||||
|
||||
// Len returns the size of the byte buffer.
|
||||
func (b *ByteBuffer) Len() int {
|
||||
return len(b.B)
|
||||
}
|
||||
|
||||
// ReadFrom implements io.ReaderFrom.
|
||||
//
|
||||
// The function appends all the data read from r to b.
|
||||
func (b *ByteBuffer) ReadFrom(r io.Reader) (int64, error) {
|
||||
p := b.B
|
||||
nStart := int64(len(p))
|
||||
nMax := int64(cap(p))
|
||||
n := nStart
|
||||
if nMax == 0 {
|
||||
nMax = 64
|
||||
p = make([]byte, nMax)
|
||||
} else {
|
||||
p = p[:nMax]
|
||||
}
|
||||
for {
|
||||
if n == nMax {
|
||||
nMax *= 2
|
||||
bNew := make([]byte, nMax)
|
||||
copy(bNew, p)
|
||||
p = bNew
|
||||
}
|
||||
nn, err := r.Read(p[n:])
|
||||
n += int64(nn)
|
||||
if err != nil {
|
||||
b.B = p[:n]
|
||||
n -= nStart
|
||||
if err == io.EOF {
|
||||
return n, nil
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// WriteTo implements io.WriterTo.
|
||||
func (b *ByteBuffer) WriteTo(w io.Writer) (int64, error) {
|
||||
n, err := w.Write(b.B)
|
||||
return int64(n), err
|
||||
}
|
||||
|
||||
// Bytes returns b.B, i.e. all the bytes accumulated in the buffer.
|
||||
//
|
||||
// The purpose of this function is bytes.Buffer compatibility.
|
||||
func (b *ByteBuffer) Bytes() []byte {
|
||||
return b.B
|
||||
}
|
||||
|
||||
// Write implements io.Writer - it appends p to ByteBuffer.B
|
||||
func (b *ByteBuffer) Write(p []byte) (int, error) {
|
||||
b.B = append(b.B, p...)
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
// WriteByte appends the byte c to the buffer.
|
||||
//
|
||||
// The purpose of this function is bytes.Buffer compatibility.
|
||||
//
|
||||
// The function always returns nil.
|
||||
func (b *ByteBuffer) WriteByte(c byte) error {
|
||||
b.B = append(b.B, c)
|
||||
return nil
|
||||
}
|
||||
|
||||
// WriteString appends s to ByteBuffer.B.
|
||||
func (b *ByteBuffer) WriteString(s string) (int, error) {
|
||||
b.B = append(b.B, s...)
|
||||
return len(s), nil
|
||||
}
|
||||
|
||||
// Set sets ByteBuffer.B to p.
|
||||
func (b *ByteBuffer) Set(p []byte) {
|
||||
b.B = append(b.B[:0], p...)
|
||||
}
|
||||
|
||||
// SetString sets ByteBuffer.B to s.
|
||||
func (b *ByteBuffer) SetString(s string) {
|
||||
b.B = append(b.B[:0], s...)
|
||||
}
|
||||
|
||||
// String returns string representation of ByteBuffer.B.
|
||||
func (b *ByteBuffer) String() string {
|
||||
return string(b.B)
|
||||
}
|
||||
|
||||
// Reset makes ByteBuffer.B empty.
|
||||
func (b *ByteBuffer) Reset() {
|
||||
b.B = b.B[:0]
|
||||
}
|
||||
7
vendor/github.com/valyala/bytebufferpool/doc.go
generated
vendored
7
vendor/github.com/valyala/bytebufferpool/doc.go
generated
vendored
@@ -1,7 +0,0 @@
|
||||
// Package bytebufferpool implements a pool of byte buffers
|
||||
// with anti-fragmentation protection.
|
||||
//
|
||||
// The pool may waste limited amount of memory due to fragmentation.
|
||||
// This amount equals to the maximum total size of the byte buffers
|
||||
// in concurrent use.
|
||||
package bytebufferpool
|
||||
151
vendor/github.com/valyala/bytebufferpool/pool.go
generated
vendored
151
vendor/github.com/valyala/bytebufferpool/pool.go
generated
vendored
@@ -1,151 +0,0 @@
|
||||
package bytebufferpool
|
||||
|
||||
import (
|
||||
"sort"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
const (
|
||||
minBitSize = 6 // 2**6=64 is a CPU cache line size
|
||||
steps = 20
|
||||
|
||||
minSize = 1 << minBitSize
|
||||
maxSize = 1 << (minBitSize + steps - 1)
|
||||
|
||||
calibrateCallsThreshold = 42000
|
||||
maxPercentile = 0.95
|
||||
)
|
||||
|
||||
// Pool represents byte buffer pool.
|
||||
//
|
||||
// Distinct pools may be used for distinct types of byte buffers.
|
||||
// Properly determined byte buffer types with their own pools may help reducing
|
||||
// memory waste.
|
||||
type Pool struct {
|
||||
calls [steps]uint64
|
||||
calibrating uint64
|
||||
|
||||
defaultSize uint64
|
||||
maxSize uint64
|
||||
|
||||
pool sync.Pool
|
||||
}
|
||||
|
||||
var defaultPool Pool
|
||||
|
||||
// Get returns an empty byte buffer from the pool.
|
||||
//
|
||||
// Got byte buffer may be returned to the pool via Put call.
|
||||
// This reduces the number of memory allocations required for byte buffer
|
||||
// management.
|
||||
func Get() *ByteBuffer { return defaultPool.Get() }
|
||||
|
||||
// Get returns new byte buffer with zero length.
|
||||
//
|
||||
// The byte buffer may be returned to the pool via Put after the use
|
||||
// in order to minimize GC overhead.
|
||||
func (p *Pool) Get() *ByteBuffer {
|
||||
v := p.pool.Get()
|
||||
if v != nil {
|
||||
return v.(*ByteBuffer)
|
||||
}
|
||||
return &ByteBuffer{
|
||||
B: make([]byte, 0, atomic.LoadUint64(&p.defaultSize)),
|
||||
}
|
||||
}
|
||||
|
||||
// Put returns byte buffer to the pool.
|
||||
//
|
||||
// ByteBuffer.B mustn't be touched after returning it to the pool.
|
||||
// Otherwise data races will occur.
|
||||
func Put(b *ByteBuffer) { defaultPool.Put(b) }
|
||||
|
||||
// Put releases byte buffer obtained via Get to the pool.
|
||||
//
|
||||
// The buffer mustn't be accessed after returning to the pool.
|
||||
func (p *Pool) Put(b *ByteBuffer) {
|
||||
idx := index(len(b.B))
|
||||
|
||||
if atomic.AddUint64(&p.calls[idx], 1) > calibrateCallsThreshold {
|
||||
p.calibrate()
|
||||
}
|
||||
|
||||
maxSize := int(atomic.LoadUint64(&p.maxSize))
|
||||
if maxSize == 0 || cap(b.B) <= maxSize {
|
||||
b.Reset()
|
||||
p.pool.Put(b)
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Pool) calibrate() {
|
||||
if !atomic.CompareAndSwapUint64(&p.calibrating, 0, 1) {
|
||||
return
|
||||
}
|
||||
|
||||
a := make(callSizes, 0, steps)
|
||||
var callsSum uint64
|
||||
for i := uint64(0); i < steps; i++ {
|
||||
calls := atomic.SwapUint64(&p.calls[i], 0)
|
||||
callsSum += calls
|
||||
a = append(a, callSize{
|
||||
calls: calls,
|
||||
size: minSize << i,
|
||||
})
|
||||
}
|
||||
sort.Sort(a)
|
||||
|
||||
defaultSize := a[0].size
|
||||
maxSize := defaultSize
|
||||
|
||||
maxSum := uint64(float64(callsSum) * maxPercentile)
|
||||
callsSum = 0
|
||||
for i := 0; i < steps; i++ {
|
||||
if callsSum > maxSum {
|
||||
break
|
||||
}
|
||||
callsSum += a[i].calls
|
||||
size := a[i].size
|
||||
if size > maxSize {
|
||||
maxSize = size
|
||||
}
|
||||
}
|
||||
|
||||
atomic.StoreUint64(&p.defaultSize, defaultSize)
|
||||
atomic.StoreUint64(&p.maxSize, maxSize)
|
||||
|
||||
atomic.StoreUint64(&p.calibrating, 0)
|
||||
}
|
||||
|
||||
type callSize struct {
|
||||
calls uint64
|
||||
size uint64
|
||||
}
|
||||
|
||||
type callSizes []callSize
|
||||
|
||||
func (ci callSizes) Len() int {
|
||||
return len(ci)
|
||||
}
|
||||
|
||||
func (ci callSizes) Less(i, j int) bool {
|
||||
return ci[i].calls > ci[j].calls
|
||||
}
|
||||
|
||||
func (ci callSizes) Swap(i, j int) {
|
||||
ci[i], ci[j] = ci[j], ci[i]
|
||||
}
|
||||
|
||||
func index(n int) int {
|
||||
n--
|
||||
n >>= minBitSize
|
||||
idx := 0
|
||||
for n > 0 {
|
||||
n >>= 1
|
||||
idx++
|
||||
}
|
||||
if idx >= steps {
|
||||
idx = steps - 1
|
||||
}
|
||||
return idx
|
||||
}
|
||||
22
vendor/github.com/valyala/fasthttp/LICENSE
generated
vendored
22
vendor/github.com/valyala/fasthttp/LICENSE
generated
vendored
@@ -1,22 +0,0 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2015-2016 Aliaksandr Valialkin, VertaMedia
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
|
||||
579
vendor/github.com/valyala/fasthttp/README.md
generated
vendored
579
vendor/github.com/valyala/fasthttp/README.md
generated
vendored
@@ -1,579 +0,0 @@
|
||||
[](https://travis-ci.org/valyala/fasthttp)
|
||||
[](http://godoc.org/github.com/valyala/fasthttp)
|
||||
[](https://goreportcard.com/report/github.com/valyala/fasthttp)
|
||||
|
||||
# fasthttp
|
||||
Fast HTTP implementation for Go.
|
||||
|
||||
Currently fasthttp is successfully used by [VertaMedia](https://vertamedia.com/)
|
||||
in a production serving up to 200K rps from more than 1.5M concurrent keep-alive
|
||||
connections per physical server.
|
||||
|
||||
[TechEmpower Benchmark round 12 results](https://www.techempower.com/benchmarks/#section=data-r12&hw=peak&test=plaintext)
|
||||
|
||||
[Server Benchmarks](#http-server-performance-comparison-with-nethttp)
|
||||
|
||||
[Client Benchmarks](#http-client-comparison-with-nethttp)
|
||||
|
||||
[Install](#install)
|
||||
|
||||
[Documentation](https://godoc.org/github.com/valyala/fasthttp)
|
||||
|
||||
[Examples from docs](https://godoc.org/github.com/valyala/fasthttp#pkg-examples)
|
||||
|
||||
[Code examples](examples)
|
||||
|
||||
[Switching from net/http to fasthttp](#switching-from-nethttp-to-fasthttp)
|
||||
|
||||
[Fasthttp best practices](#fasthttp-best-practices)
|
||||
|
||||
[Tricks with byte buffers](#tricks-with-byte-buffers)
|
||||
|
||||
[Related projects](#related-projects)
|
||||
|
||||
[FAQ](#faq)
|
||||
|
||||
# HTTP server performance comparison with [net/http](https://golang.org/pkg/net/http/)
|
||||
|
||||
In short, fasthttp server is up to 10 times faster than net/http.
|
||||
Below are benchmark results.
|
||||
|
||||
*GOMAXPROCS=1*
|
||||
|
||||
net/http server:
|
||||
```
|
||||
$ GOMAXPROCS=1 go test -bench=NetHTTPServerGet -benchmem -benchtime=10s
|
||||
BenchmarkNetHTTPServerGet1ReqPerConn 1000000 12052 ns/op 2297 B/op 29 allocs/op
|
||||
BenchmarkNetHTTPServerGet2ReqPerConn 1000000 12278 ns/op 2327 B/op 24 allocs/op
|
||||
BenchmarkNetHTTPServerGet10ReqPerConn 2000000 8903 ns/op 2112 B/op 19 allocs/op
|
||||
BenchmarkNetHTTPServerGet10KReqPerConn 2000000 8451 ns/op 2058 B/op 18 allocs/op
|
||||
BenchmarkNetHTTPServerGet1ReqPerConn10KClients 500000 26733 ns/op 3229 B/op 29 allocs/op
|
||||
BenchmarkNetHTTPServerGet2ReqPerConn10KClients 1000000 23351 ns/op 3211 B/op 24 allocs/op
|
||||
BenchmarkNetHTTPServerGet10ReqPerConn10KClients 1000000 13390 ns/op 2483 B/op 19 allocs/op
|
||||
BenchmarkNetHTTPServerGet100ReqPerConn10KClients 1000000 13484 ns/op 2171 B/op 18 allocs/op
|
||||
```
|
||||
|
||||
fasthttp server:
|
||||
```
|
||||
$ GOMAXPROCS=1 go test -bench=kServerGet -benchmem -benchtime=10s
|
||||
BenchmarkServerGet1ReqPerConn 10000000 1559 ns/op 0 B/op 0 allocs/op
|
||||
BenchmarkServerGet2ReqPerConn 10000000 1248 ns/op 0 B/op 0 allocs/op
|
||||
BenchmarkServerGet10ReqPerConn 20000000 797 ns/op 0 B/op 0 allocs/op
|
||||
BenchmarkServerGet10KReqPerConn 20000000 716 ns/op 0 B/op 0 allocs/op
|
||||
BenchmarkServerGet1ReqPerConn10KClients 10000000 1974 ns/op 0 B/op 0 allocs/op
|
||||
BenchmarkServerGet2ReqPerConn10KClients 10000000 1352 ns/op 0 B/op 0 allocs/op
|
||||
BenchmarkServerGet10ReqPerConn10KClients 20000000 789 ns/op 2 B/op 0 allocs/op
|
||||
BenchmarkServerGet100ReqPerConn10KClients 20000000 604 ns/op 0 B/op 0 allocs/op
|
||||
```
|
||||
|
||||
*GOMAXPROCS=4*
|
||||
|
||||
net/http server:
|
||||
```
|
||||
$ GOMAXPROCS=4 go test -bench=NetHTTPServerGet -benchmem -benchtime=10s
|
||||
BenchmarkNetHTTPServerGet1ReqPerConn-4 3000000 4529 ns/op 2389 B/op 29 allocs/op
|
||||
BenchmarkNetHTTPServerGet2ReqPerConn-4 5000000 3896 ns/op 2418 B/op 24 allocs/op
|
||||
BenchmarkNetHTTPServerGet10ReqPerConn-4 5000000 3145 ns/op 2160 B/op 19 allocs/op
|
||||
BenchmarkNetHTTPServerGet10KReqPerConn-4 5000000 3054 ns/op 2065 B/op 18 allocs/op
|
||||
BenchmarkNetHTTPServerGet1ReqPerConn10KClients-4 1000000 10321 ns/op 3710 B/op 30 allocs/op
|
||||
BenchmarkNetHTTPServerGet2ReqPerConn10KClients-4 2000000 7556 ns/op 3296 B/op 24 allocs/op
|
||||
BenchmarkNetHTTPServerGet10ReqPerConn10KClients-4 5000000 3905 ns/op 2349 B/op 19 allocs/op
|
||||
BenchmarkNetHTTPServerGet100ReqPerConn10KClients-4 5000000 3435 ns/op 2130 B/op 18 allocs/op
|
||||
```
|
||||
|
||||
fasthttp server:
|
||||
```
|
||||
$ GOMAXPROCS=4 go test -bench=kServerGet -benchmem -benchtime=10s
|
||||
BenchmarkServerGet1ReqPerConn-4 10000000 1141 ns/op 0 B/op 0 allocs/op
|
||||
BenchmarkServerGet2ReqPerConn-4 20000000 707 ns/op 0 B/op 0 allocs/op
|
||||
BenchmarkServerGet10ReqPerConn-4 30000000 341 ns/op 0 B/op 0 allocs/op
|
||||
BenchmarkServerGet10KReqPerConn-4 50000000 310 ns/op 0 B/op 0 allocs/op
|
||||
BenchmarkServerGet1ReqPerConn10KClients-4 10000000 1119 ns/op 0 B/op 0 allocs/op
|
||||
BenchmarkServerGet2ReqPerConn10KClients-4 20000000 644 ns/op 0 B/op 0 allocs/op
|
||||
BenchmarkServerGet10ReqPerConn10KClients-4 30000000 346 ns/op 0 B/op 0 allocs/op
|
||||
BenchmarkServerGet100ReqPerConn10KClients-4 50000000 282 ns/op 0 B/op 0 allocs/op
|
||||
```
|
||||
|
||||
# HTTP client comparison with net/http
|
||||
|
||||
In short, fasthttp client is up to 10 times faster than net/http.
|
||||
Below are benchmark results.
|
||||
|
||||
*GOMAXPROCS=1*
|
||||
|
||||
net/http client:
|
||||
```
|
||||
$ GOMAXPROCS=1 go test -bench='HTTPClient(Do|GetEndToEnd)' -benchmem -benchtime=10s
|
||||
BenchmarkNetHTTPClientDoFastServer 1000000 12567 ns/op 2616 B/op 35 allocs/op
|
||||
BenchmarkNetHTTPClientGetEndToEnd1TCP 200000 67030 ns/op 5028 B/op 56 allocs/op
|
||||
BenchmarkNetHTTPClientGetEndToEnd10TCP 300000 51098 ns/op 5031 B/op 56 allocs/op
|
||||
BenchmarkNetHTTPClientGetEndToEnd100TCP 300000 45096 ns/op 5026 B/op 55 allocs/op
|
||||
BenchmarkNetHTTPClientGetEndToEnd1Inmemory 500000 24779 ns/op 5035 B/op 57 allocs/op
|
||||
BenchmarkNetHTTPClientGetEndToEnd10Inmemory 1000000 26425 ns/op 5035 B/op 57 allocs/op
|
||||
BenchmarkNetHTTPClientGetEndToEnd100Inmemory 500000 28515 ns/op 5045 B/op 57 allocs/op
|
||||
BenchmarkNetHTTPClientGetEndToEnd1000Inmemory 500000 39511 ns/op 5096 B/op 56 allocs/op
|
||||
```
|
||||
|
||||
fasthttp client:
|
||||
```
|
||||
$ GOMAXPROCS=1 go test -bench='kClient(Do|GetEndToEnd)' -benchmem -benchtime=10s
|
||||
BenchmarkClientDoFastServer 20000000 865 ns/op 0 B/op 0 allocs/op
|
||||
BenchmarkClientGetEndToEnd1TCP 1000000 18711 ns/op 0 B/op 0 allocs/op
|
||||
BenchmarkClientGetEndToEnd10TCP 1000000 14664 ns/op 0 B/op 0 allocs/op
|
||||
BenchmarkClientGetEndToEnd100TCP 1000000 14043 ns/op 1 B/op 0 allocs/op
|
||||
BenchmarkClientGetEndToEnd1Inmemory 5000000 3965 ns/op 0 B/op 0 allocs/op
|
||||
BenchmarkClientGetEndToEnd10Inmemory 3000000 4060 ns/op 0 B/op 0 allocs/op
|
||||
BenchmarkClientGetEndToEnd100Inmemory 5000000 3396 ns/op 0 B/op 0 allocs/op
|
||||
BenchmarkClientGetEndToEnd1000Inmemory 5000000 3306 ns/op 2 B/op 0 allocs/op
|
||||
```
|
||||
|
||||
*GOMAXPROCS=4*
|
||||
|
||||
net/http client:
|
||||
```
|
||||
$ GOMAXPROCS=4 go test -bench='HTTPClient(Do|GetEndToEnd)' -benchmem -benchtime=10s
|
||||
BenchmarkNetHTTPClientDoFastServer-4 2000000 8774 ns/op 2619 B/op 35 allocs/op
|
||||
BenchmarkNetHTTPClientGetEndToEnd1TCP-4 500000 22951 ns/op 5047 B/op 56 allocs/op
|
||||
BenchmarkNetHTTPClientGetEndToEnd10TCP-4 1000000 19182 ns/op 5037 B/op 55 allocs/op
|
||||
BenchmarkNetHTTPClientGetEndToEnd100TCP-4 1000000 16535 ns/op 5031 B/op 55 allocs/op
|
||||
BenchmarkNetHTTPClientGetEndToEnd1Inmemory-4 1000000 14495 ns/op 5038 B/op 56 allocs/op
|
||||
BenchmarkNetHTTPClientGetEndToEnd10Inmemory-4 1000000 10237 ns/op 5034 B/op 56 allocs/op
|
||||
BenchmarkNetHTTPClientGetEndToEnd100Inmemory-4 1000000 10125 ns/op 5045 B/op 56 allocs/op
|
||||
BenchmarkNetHTTPClientGetEndToEnd1000Inmemory-4 1000000 11132 ns/op 5136 B/op 56 allocs/op
|
||||
```
|
||||
|
||||
fasthttp client:
|
||||
```
|
||||
$ GOMAXPROCS=4 go test -bench='kClient(Do|GetEndToEnd)' -benchmem -benchtime=10s
|
||||
BenchmarkClientDoFastServer-4 50000000 397 ns/op 0 B/op 0 allocs/op
|
||||
BenchmarkClientGetEndToEnd1TCP-4 2000000 7388 ns/op 0 B/op 0 allocs/op
|
||||
BenchmarkClientGetEndToEnd10TCP-4 2000000 6689 ns/op 0 B/op 0 allocs/op
|
||||
BenchmarkClientGetEndToEnd100TCP-4 3000000 4927 ns/op 1 B/op 0 allocs/op
|
||||
BenchmarkClientGetEndToEnd1Inmemory-4 10000000 1604 ns/op 0 B/op 0 allocs/op
|
||||
BenchmarkClientGetEndToEnd10Inmemory-4 10000000 1458 ns/op 0 B/op 0 allocs/op
|
||||
BenchmarkClientGetEndToEnd100Inmemory-4 10000000 1329 ns/op 0 B/op 0 allocs/op
|
||||
BenchmarkClientGetEndToEnd1000Inmemory-4 10000000 1316 ns/op 5 B/op 0 allocs/op
|
||||
```
|
||||
|
||||
|
||||
# Install
|
||||
|
||||
```
|
||||
go get -u github.com/valyala/fasthttp
|
||||
```
|
||||
|
||||
|
||||
# Switching from net/http to fasthttp
|
||||
|
||||
Unfortunately, fasthttp doesn't provide API identical to net/http.
|
||||
See the [FAQ](#faq) for details.
|
||||
There is [net/http -> fasthttp handler converter](https://godoc.org/github.com/valyala/fasthttp/fasthttpadaptor),
|
||||
but it is better to write fasthttp request handlers by hand in order to use
|
||||
all of the fasthttp advantages (especially high performance :) ).
|
||||
|
||||
Important points:
|
||||
|
||||
* Fasthttp works with [RequestHandler functions](https://godoc.org/github.com/valyala/fasthttp#RequestHandler)
|
||||
instead of objects implementing [Handler interface](https://golang.org/pkg/net/http/#Handler).
|
||||
Fortunately, it is easy to pass bound struct methods to fasthttp:
|
||||
|
||||
```go
|
||||
type MyHandler struct {
|
||||
foobar string
|
||||
}
|
||||
|
||||
// request handler in net/http style, i.e. method bound to MyHandler struct.
|
||||
func (h *MyHandler) HandleFastHTTP(ctx *fasthttp.RequestCtx) {
|
||||
// notice that we may access MyHandler properties here - see h.foobar.
|
||||
fmt.Fprintf(ctx, "Hello, world! Requested path is %q. Foobar is %q",
|
||||
ctx.Path(), h.foobar)
|
||||
}
|
||||
|
||||
// request handler in fasthttp style, i.e. just plain function.
|
||||
func fastHTTPHandler(ctx *fasthttp.RequestCtx) {
|
||||
fmt.Fprintf(ctx, "Hi there! RequestURI is %q", ctx.RequestURI())
|
||||
}
|
||||
|
||||
// pass bound struct method to fasthttp
|
||||
myHandler := &MyHandler{
|
||||
foobar: "foobar",
|
||||
}
|
||||
fasthttp.ListenAndServe(":8080", myHandler.HandleFastHTTP)
|
||||
|
||||
// pass plain function to fasthttp
|
||||
fasthttp.ListenAndServe(":8081", fastHTTPHandler)
|
||||
```
|
||||
|
||||
* The [RequestHandler](https://godoc.org/github.com/valyala/fasthttp#RequestHandler)
|
||||
accepts only one argument - [RequestCtx](https://godoc.org/github.com/valyala/fasthttp#RequestCtx).
|
||||
It contains all the functionality required for http request processing
|
||||
and response writing. Below is an example of a simple request handler conversion
|
||||
from net/http to fasthttp.
|
||||
|
||||
```go
|
||||
// net/http request handler
|
||||
requestHandler := func(w http.ResponseWriter, r *http.Request) {
|
||||
switch r.URL.Path {
|
||||
case "/foo":
|
||||
fooHandler(w, r)
|
||||
case "/bar":
|
||||
barHandler(w, r)
|
||||
default:
|
||||
http.Error(w, "Unsupported path", http.StatusNotFound)
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
```go
|
||||
// the corresponding fasthttp request handler
|
||||
requestHandler := func(ctx *fasthttp.RequestCtx) {
|
||||
switch string(ctx.Path()) {
|
||||
case "/foo":
|
||||
fooHandler(ctx)
|
||||
case "/bar":
|
||||
barHandler(ctx)
|
||||
default:
|
||||
ctx.Error("Unsupported path", fasthttp.StatusNotFound)
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
* Fasthttp allows setting response headers and writing response body
|
||||
in an arbitrary order. There is no 'headers first, then body' restriction
|
||||
like in net/http. The following code is valid for fasthttp:
|
||||
|
||||
```go
|
||||
requestHandler := func(ctx *fasthttp.RequestCtx) {
|
||||
// set some headers and status code first
|
||||
ctx.SetContentType("foo/bar")
|
||||
ctx.SetStatusCode(fasthttp.StatusOK)
|
||||
|
||||
// then write the first part of body
|
||||
fmt.Fprintf(ctx, "this is the first part of body\n")
|
||||
|
||||
// then set more headers
|
||||
ctx.Response.Header.Set("Foo-Bar", "baz")
|
||||
|
||||
// then write more body
|
||||
fmt.Fprintf(ctx, "this is the second part of body\n")
|
||||
|
||||
// then override already written body
|
||||
ctx.SetBody([]byte("this is completely new body contents"))
|
||||
|
||||
// then update status code
|
||||
ctx.SetStatusCode(fasthttp.StatusNotFound)
|
||||
|
||||
// basically, anything may be updated many times before
|
||||
// returning from RequestHandler.
|
||||
//
|
||||
// Unlike net/http fasthttp doesn't put response to the wire until
|
||||
// returning from RequestHandler.
|
||||
}
|
||||
```
|
||||
|
||||
* Fasthttp doesn't provide [ServeMux](https://golang.org/pkg/net/http/#ServeMux),
|
||||
but there are more powerful third-party routers and web frameworks
|
||||
with fasthttp support:
|
||||
|
||||
* [Iris](https://github.com/kataras/iris)
|
||||
* [fasthttp-routing](https://github.com/qiangxue/fasthttp-routing)
|
||||
* [fasthttprouter](https://github.com/buaazp/fasthttprouter)
|
||||
* [lu](https://github.com/vincentLiuxiang/lu)
|
||||
|
||||
Net/http code with simple ServeMux is trivially converted to fasthttp code:
|
||||
|
||||
```go
|
||||
// net/http code
|
||||
|
||||
m := &http.ServeMux{}
|
||||
m.HandleFunc("/foo", fooHandlerFunc)
|
||||
m.HandleFunc("/bar", barHandlerFunc)
|
||||
m.Handle("/baz", bazHandler)
|
||||
|
||||
http.ListenAndServe(":80", m)
|
||||
```
|
||||
|
||||
```go
|
||||
// the corresponding fasthttp code
|
||||
m := func(ctx *fasthttp.RequestCtx) {
|
||||
switch string(ctx.Path()) {
|
||||
case "/foo":
|
||||
fooHandlerFunc(ctx)
|
||||
case "/bar":
|
||||
barHandlerFunc(ctx)
|
||||
case "/baz":
|
||||
bazHandler.HandlerFunc(ctx)
|
||||
default:
|
||||
ctx.Error("not found", fasthttp.StatusNotFound)
|
||||
}
|
||||
}
|
||||
|
||||
fasthttp.ListenAndServe(":80", m)
|
||||
```
|
||||
|
||||
* net/http -> fasthttp conversion table:
|
||||
|
||||
* All the pseudocode below assumes w, r and ctx have these types:
|
||||
```go
|
||||
var (
|
||||
w http.ResponseWriter
|
||||
r *http.Request
|
||||
ctx *fasthttp.RequestCtx
|
||||
)
|
||||
```
|
||||
* r.Body -> [ctx.PostBody()](https://godoc.org/github.com/valyala/fasthttp#RequestCtx.PostBody)
|
||||
* r.URL.Path -> [ctx.Path()](https://godoc.org/github.com/valyala/fasthttp#RequestCtx.Path)
|
||||
* r.URL -> [ctx.URI()](https://godoc.org/github.com/valyala/fasthttp#RequestCtx.URI)
|
||||
* r.Method -> [ctx.Method()](https://godoc.org/github.com/valyala/fasthttp#RequestCtx.Method)
|
||||
* r.Header -> [ctx.Request.Header](https://godoc.org/github.com/valyala/fasthttp#RequestHeader)
|
||||
* r.Header.Get() -> [ctx.Request.Header.Peek()](https://godoc.org/github.com/valyala/fasthttp#RequestHeader.Peek)
|
||||
* r.Host -> [ctx.Host()](https://godoc.org/github.com/valyala/fasthttp#RequestCtx.Host)
|
||||
* r.Form -> [ctx.QueryArgs()](https://godoc.org/github.com/valyala/fasthttp#RequestCtx.QueryArgs) +
|
||||
[ctx.PostArgs()](https://godoc.org/github.com/valyala/fasthttp#RequestCtx.PostArgs)
|
||||
* r.PostForm -> [ctx.PostArgs()](https://godoc.org/github.com/valyala/fasthttp#RequestCtx.PostArgs)
|
||||
* r.FormValue() -> [ctx.FormValue()](https://godoc.org/github.com/valyala/fasthttp#RequestCtx.FormValue)
|
||||
* r.FormFile() -> [ctx.FormFile()](https://godoc.org/github.com/valyala/fasthttp#RequestCtx.FormFile)
|
||||
* r.MultipartForm -> [ctx.MultipartForm()](https://godoc.org/github.com/valyala/fasthttp#RequestCtx.MultipartForm)
|
||||
* r.RemoteAddr -> [ctx.RemoteAddr()](https://godoc.org/github.com/valyala/fasthttp#RequestCtx.RemoteAddr)
|
||||
* r.RequestURI -> [ctx.RequestURI()](https://godoc.org/github.com/valyala/fasthttp#RequestCtx.RequestURI)
|
||||
* r.TLS -> [ctx.IsTLS()](https://godoc.org/github.com/valyala/fasthttp#RequestCtx.IsTLS)
|
||||
* r.Cookie() -> [ctx.Request.Header.Cookie()](https://godoc.org/github.com/valyala/fasthttp#RequestHeader.Cookie)
|
||||
* r.Referer() -> [ctx.Referer()](https://godoc.org/github.com/valyala/fasthttp#RequestCtx.Referer)
|
||||
* r.UserAgent() -> [ctx.UserAgent()](https://godoc.org/github.com/valyala/fasthttp#RequestCtx.UserAgent)
|
||||
* w.Header() -> [ctx.Response.Header](https://godoc.org/github.com/valyala/fasthttp#ResponseHeader)
|
||||
* w.Header().Set() -> [ctx.Response.Header.Set()](https://godoc.org/github.com/valyala/fasthttp#ResponseHeader.Set)
|
||||
* w.Header().Set("Content-Type") -> [ctx.SetContentType()](https://godoc.org/github.com/valyala/fasthttp#RequestCtx.SetContentType)
|
||||
* w.Header().Set("Set-Cookie") -> [ctx.Response.Header.SetCookie()](https://godoc.org/github.com/valyala/fasthttp#ResponseHeader.SetCookie)
|
||||
* w.Write() -> [ctx.Write()](https://godoc.org/github.com/valyala/fasthttp#RequestCtx.Write),
|
||||
[ctx.SetBody()](https://godoc.org/github.com/valyala/fasthttp#RequestCtx.SetBody),
|
||||
[ctx.SetBodyStream()](https://godoc.org/github.com/valyala/fasthttp#RequestCtx.SetBodyStream),
|
||||
[ctx.SetBodyStreamWriter()](https://godoc.org/github.com/valyala/fasthttp#RequestCtx.SetBodyStreamWriter)
|
||||
* w.WriteHeader() -> [ctx.SetStatusCode()](https://godoc.org/github.com/valyala/fasthttp#RequestCtx.SetStatusCode)
|
||||
* w.(http.Hijacker).Hijack() -> [ctx.Hijack()](https://godoc.org/github.com/valyala/fasthttp#RequestCtx.Hijack)
|
||||
* http.Error() -> [ctx.Error()](https://godoc.org/github.com/valyala/fasthttp#RequestCtx.Error)
|
||||
* http.FileServer() -> [fasthttp.FSHandler()](https://godoc.org/github.com/valyala/fasthttp#FSHandler),
|
||||
[fasthttp.FS](https://godoc.org/github.com/valyala/fasthttp#FS)
|
||||
* http.ServeFile() -> [fasthttp.ServeFile()](https://godoc.org/github.com/valyala/fasthttp#ServeFile)
|
||||
* http.Redirect() -> [ctx.Redirect()](https://godoc.org/github.com/valyala/fasthttp#RequestCtx.Redirect)
|
||||
* http.NotFound() -> [ctx.NotFound()](https://godoc.org/github.com/valyala/fasthttp#RequestCtx.NotFound)
|
||||
* http.StripPrefix() -> [fasthttp.PathRewriteFunc](https://godoc.org/github.com/valyala/fasthttp#PathRewriteFunc)
|
||||
|
||||
* *VERY IMPORTANT!* Fasthttp disallows holding references
|
||||
to [RequestCtx](https://godoc.org/github.com/valyala/fasthttp#RequestCtx) or to its'
|
||||
members after returning from [RequestHandler](https://godoc.org/github.com/valyala/fasthttp#RequestHandler).
|
||||
Otherwise [data races](http://blog.golang.org/race-detector) are inevitable.
|
||||
Carefully inspect all the net/http request handlers converted to fasthttp whether
|
||||
they retain references to RequestCtx or to its' members after returning.
|
||||
RequestCtx provides the following _band aids_ for this case:
|
||||
|
||||
* Wrap RequestHandler into [TimeoutHandler](https://godoc.org/github.com/valyala/fasthttp#TimeoutHandler).
|
||||
* Call [TimeoutError](https://godoc.org/github.com/valyala/fasthttp#RequestCtx.TimeoutError)
|
||||
before returning from RequestHandler if there are references to RequestCtx or to its' members.
|
||||
See [the example](https://godoc.org/github.com/valyala/fasthttp#example-RequestCtx-TimeoutError)
|
||||
for more details.
|
||||
|
||||
Use this brilliant tool - [race detector](http://blog.golang.org/race-detector) -
|
||||
for detecting and eliminating data races in your program. If you detected
|
||||
data race related to fasthttp in your program, then there is high probability
|
||||
you forgot calling [TimeoutError](https://godoc.org/github.com/valyala/fasthttp#RequestCtx.TimeoutError)
|
||||
before returning from [RequestHandler](https://godoc.org/github.com/valyala/fasthttp#RequestHandler).
|
||||
|
||||
* Blind switching from net/http to fasthttp won't give you performance boost.
|
||||
While fasthttp is optimized for speed, its' performance may be easily saturated
|
||||
by slow [RequestHandler](https://godoc.org/github.com/valyala/fasthttp#RequestHandler).
|
||||
So [profile](http://blog.golang.org/profiling-go-programs) and optimize your
|
||||
code after switching to fasthttp. For instance, use [quicktemplate](https://github.com/valyala/quicktemplate)
|
||||
instead of [html/template](https://golang.org/pkg/html/template/).
|
||||
|
||||
* See also [fasthttputil](https://godoc.org/github.com/valyala/fasthttp/fasthttputil),
|
||||
[fasthttpadaptor](https://godoc.org/github.com/valyala/fasthttp/fasthttpadaptor) and
|
||||
[expvarhandler](https://godoc.org/github.com/valyala/fasthttp/expvarhandler).
|
||||
|
||||
|
||||
# Performance optimization tips for multi-core systems
|
||||
|
||||
* Use [reuseport](https://godoc.org/github.com/valyala/fasthttp/reuseport) listener.
|
||||
* Run a separate server instance per CPU core with GOMAXPROCS=1.
|
||||
* Pin each server instance to a separate CPU core using [taskset](http://linux.die.net/man/1/taskset).
|
||||
* Ensure the interrupts of multiqueue network card are evenly distributed between CPU cores.
|
||||
See [this article](https://blog.cloudflare.com/how-to-achieve-low-latency/) for details.
|
||||
* Use Go 1.6 as it provides some considerable performance improvements.
|
||||
|
||||
|
||||
# Fasthttp best practices
|
||||
|
||||
* Do not allocate objects and `[]byte` buffers - just reuse them as much
|
||||
as possible. Fasthttp API design encourages this.
|
||||
* [sync.Pool](https://golang.org/pkg/sync/#Pool) is your best friend.
|
||||
* [Profile your program](http://blog.golang.org/profiling-go-programs)
|
||||
in production.
|
||||
`go tool pprof --alloc_objects your-program mem.pprof` usually gives better
|
||||
insights for optimization opportunities than `go tool pprof your-program cpu.pprof`.
|
||||
* Write [tests and benchmarks](https://golang.org/pkg/testing/) for hot paths.
|
||||
* Avoid conversion between `[]byte` and `string`, since this may result in memory
|
||||
allocation+copy. Fasthttp API provides functions for both `[]byte` and `string` -
|
||||
use these functions instead of converting manually between `[]byte` and `string`.
|
||||
There are some exceptions - see [this wiki page](https://github.com/golang/go/wiki/CompilerOptimizations#string-and-byte)
|
||||
for more details.
|
||||
* Verify your tests and production code under
|
||||
[race detector](https://golang.org/doc/articles/race_detector.html) on a regular basis.
|
||||
* Prefer [quicktemplate](https://github.com/valyala/quicktemplate) instead of
|
||||
[html/template](https://golang.org/pkg/html/template/) in your webserver.
|
||||
|
||||
|
||||
# Tricks with `[]byte` buffers
|
||||
|
||||
The following tricks are used by fasthttp. Use them in your code too.
|
||||
|
||||
* Standard Go functions accept nil buffers
|
||||
```go
|
||||
var (
|
||||
// both buffers are uninitialized
|
||||
dst []byte
|
||||
src []byte
|
||||
)
|
||||
dst = append(dst, src...) // is legal if dst is nil and/or src is nil
|
||||
copy(dst, src) // is legal if dst is nil and/or src is nil
|
||||
(string(src) == "") // is true if src is nil
|
||||
(len(src) == 0) // is true if src is nil
|
||||
src = src[:0] // works like a charm with nil src
|
||||
|
||||
// this for loop doesn't panic if src is nil
|
||||
for i, ch := range src {
|
||||
doSomething(i, ch)
|
||||
}
|
||||
```
|
||||
|
||||
So throw away nil checks for `[]byte` buffers from you code. For example,
|
||||
```go
|
||||
srcLen := 0
|
||||
if src != nil {
|
||||
srcLen = len(src)
|
||||
}
|
||||
```
|
||||
|
||||
becomes
|
||||
|
||||
```go
|
||||
srcLen := len(src)
|
||||
```
|
||||
|
||||
* String may be appended to `[]byte` buffer with `append`
|
||||
```go
|
||||
dst = append(dst, "foobar"...)
|
||||
```
|
||||
|
||||
* `[]byte` buffer may be extended to its' capacity.
|
||||
```go
|
||||
buf := make([]byte, 100)
|
||||
a := buf[:10] // len(a) == 10, cap(a) == 100.
|
||||
b := a[:100] // is valid, since cap(a) == 100.
|
||||
```
|
||||
|
||||
* All fasthttp functions accept nil `[]byte` buffer
|
||||
```go
|
||||
statusCode, body, err := fasthttp.Get(nil, "http://google.com/")
|
||||
uintBuf := fasthttp.AppendUint(nil, 1234)
|
||||
```
|
||||
|
||||
# Related projects
|
||||
|
||||
* [fasthttp-contrib](https://github.com/fasthttp-contrib) - various useful
|
||||
helpers for projects based on fasthttp.
|
||||
* [iris](https://github.com/kataras/iris) - web application framework built
|
||||
on top of fasthttp. Features speed and functionality.
|
||||
* [fasthttp-routing](https://github.com/qiangxue/fasthttp-routing) - fast and
|
||||
powerful routing package for fasthttp servers.
|
||||
* [fasthttprouter](https://github.com/buaazp/fasthttprouter) - a high
|
||||
performance fasthttp request router that scales well.
|
||||
* [lu](https://github.com/vincentLiuxiang/lu) - a high performance
|
||||
go middleware web framework which is based on fasthttp.
|
||||
* [websocket](https://github.com/leavengood/websocket) - Gorilla-based
|
||||
websocket implementation for fasthttp.
|
||||
|
||||
|
||||
# FAQ
|
||||
|
||||
* *Why creating yet another http package instead of optimizing net/http?*
|
||||
|
||||
Because net/http API limits many optimization opportunities.
|
||||
For example:
|
||||
* net/http Request object lifetime isn't limited by request handler execution
|
||||
time. So the server must create a new request object per each request instead
|
||||
of reusing existing objects like fasthttp does.
|
||||
* net/http headers are stored in a `map[string][]string`. So the server
|
||||
must parse all the headers, convert them from `[]byte` to `string` and put
|
||||
them into the map before calling user-provided request handler.
|
||||
This all requires unnecessary memory allocations avoided by fasthttp.
|
||||
* net/http client API requires creating a new response object per each request.
|
||||
|
||||
* *Why fasthttp API is incompatible with net/http?*
|
||||
|
||||
Because net/http API limits many optimization opportunities. See the answer
|
||||
above for more details. Also certain net/http API parts are suboptimal
|
||||
for use:
|
||||
* Compare [net/http connection hijacking](https://golang.org/pkg/net/http/#Hijacker)
|
||||
to [fasthttp connection hijacking](https://godoc.org/github.com/valyala/fasthttp#RequestCtx.Hijack).
|
||||
* Compare [net/http Request.Body reading](https://golang.org/pkg/net/http/#Request)
|
||||
to [fasthttp request body reading](https://godoc.org/github.com/valyala/fasthttp#RequestCtx.PostBody).
|
||||
|
||||
* *Why fasthttp doesn't support HTTP/2.0 and WebSockets?*
|
||||
|
||||
There are [plans](TODO) for adding HTTP/2.0 and WebSockets support
|
||||
in the future.
|
||||
In the mean time, third parties may use [RequestCtx.Hijack](https://godoc.org/github.com/valyala/fasthttp#RequestCtx.Hijack)
|
||||
for implementing these goodies. See [the first third-party websocket implementation on the top of fasthttp](https://github.com/leavengood/websocket).
|
||||
|
||||
* *Are there known net/http advantages comparing to fasthttp?*
|
||||
|
||||
Yes:
|
||||
* net/http supports [HTTP/2.0 starting from go1.6](https://http2.golang.org/).
|
||||
* net/http API is stable, while fasthttp API constantly evolves.
|
||||
* net/http handles more HTTP corner cases.
|
||||
* net/http should contain less bugs, since it is used and tested by much
|
||||
wider audience.
|
||||
* net/http works on Go older than 1.5.
|
||||
|
||||
* *Why fasthttp API prefers returning `[]byte` instead of `string`?*
|
||||
|
||||
Because `[]byte` to `string` conversion isn't free - it requires memory
|
||||
allocation and copy. Feel free wrapping returned `[]byte` result into
|
||||
`string()` if you prefer working with strings instead of byte slices.
|
||||
But be aware that this has non-zero overhead.
|
||||
|
||||
* *Which GO versions are supported by fasthttp?*
|
||||
|
||||
Go1.5+. Older versions won't be supported, since their standard package
|
||||
[miss useful functions](https://github.com/valyala/fasthttp/issues/5).
|
||||
|
||||
* *Please provide real benchmark data and sever information*
|
||||
|
||||
See [this issue](https://github.com/valyala/fasthttp/issues/4).
|
||||
|
||||
* *Are there plans to add request routing to fasthttp?*
|
||||
|
||||
There are no plans to add request routing into fasthttp.
|
||||
Use third-party routers and web frameworks with fasthttp support:
|
||||
|
||||
* [Iris](https://github.com/kataras/iris)
|
||||
* [fasthttp-routing](https://github.com/qiangxue/fasthttp-routing)
|
||||
* [fasthttprouter](https://github.com/buaazp/fasthttprouter)
|
||||
* [lu](https://github.com/vincentLiuxiang/lu)
|
||||
|
||||
See also [this issue](https://github.com/valyala/fasthttp/issues/9) for more info.
|
||||
|
||||
* *I detected data race in fasthttp!*
|
||||
|
||||
Cool! [File a bug](https://github.com/valyala/fasthttp/issues/new). But before
|
||||
doing this check the following in your code:
|
||||
|
||||
* Make sure there are no references to [RequestCtx](https://godoc.org/github.com/valyala/fasthttp#RequestCtx)
|
||||
or to its' members after returning from [RequestHandler](https://godoc.org/github.com/valyala/fasthttp#RequestHandler).
|
||||
* Make sure you call [TimeoutError](https://godoc.org/github.com/valyala/fasthttp#RequestCtx.TimeoutError)
|
||||
before returning from [RequestHandler](https://godoc.org/github.com/valyala/fasthttp#RequestHandler)
|
||||
if there are references to [RequestCtx](https://godoc.org/github.com/valyala/fasthttp#RequestCtx)
|
||||
or to its' members, which may be accessed by other goroutines.
|
||||
|
||||
* *I didn't find an answer for my question here*
|
||||
|
||||
Try exploring [these questions](https://github.com/valyala/fasthttp/issues?q=label%3Aquestion).
|
||||
4
vendor/github.com/valyala/fasthttp/TODO
generated
vendored
4
vendor/github.com/valyala/fasthttp/TODO
generated
vendored
@@ -1,4 +0,0 @@
|
||||
- SessionClient with referer and cookies support.
|
||||
- ProxyHandler similar to FSHandler.
|
||||
- WebSockets. See https://tools.ietf.org/html/rfc6455 .
|
||||
- HTTP/2.0. See https://tools.ietf.org/html/rfc7540 .
|
||||
482
vendor/github.com/valyala/fasthttp/args.go
generated
vendored
482
vendor/github.com/valyala/fasthttp/args.go
generated
vendored
@@ -1,482 +0,0 @@
|
||||
package fasthttp
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"io"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// AcquireArgs returns an empty Args object from the pool.
|
||||
//
|
||||
// The returned Args may be returned to the pool with ReleaseArgs
|
||||
// when no longer needed. This allows reducing GC load.
|
||||
func AcquireArgs() *Args {
|
||||
return argsPool.Get().(*Args)
|
||||
}
|
||||
|
||||
// ReleaseArgs returns the object acquired via AquireArgs to the pool.
|
||||
//
|
||||
// Do not access the released Args object, otherwise data races may occur.
|
||||
func ReleaseArgs(a *Args) {
|
||||
a.Reset()
|
||||
argsPool.Put(a)
|
||||
}
|
||||
|
||||
var argsPool = &sync.Pool{
|
||||
New: func() interface{} {
|
||||
return &Args{}
|
||||
},
|
||||
}
|
||||
|
||||
// Args represents query arguments.
|
||||
//
|
||||
// It is forbidden copying Args instances. Create new instances instead
|
||||
// and use CopyTo().
|
||||
//
|
||||
// Args instance MUST NOT be used from concurrently running goroutines.
|
||||
type Args struct {
|
||||
noCopy noCopy
|
||||
|
||||
args []argsKV
|
||||
buf []byte
|
||||
}
|
||||
|
||||
type argsKV struct {
|
||||
key []byte
|
||||
value []byte
|
||||
}
|
||||
|
||||
// Reset clears query args.
|
||||
func (a *Args) Reset() {
|
||||
a.args = a.args[:0]
|
||||
}
|
||||
|
||||
// CopyTo copies all args to dst.
|
||||
func (a *Args) CopyTo(dst *Args) {
|
||||
dst.Reset()
|
||||
dst.args = copyArgs(dst.args, a.args)
|
||||
}
|
||||
|
||||
// VisitAll calls f for each existing arg.
|
||||
//
|
||||
// f must not retain references to key and value after returning.
|
||||
// Make key and/or value copies if you need storing them after returning.
|
||||
func (a *Args) VisitAll(f func(key, value []byte)) {
|
||||
visitArgs(a.args, f)
|
||||
}
|
||||
|
||||
// Len returns the number of query args.
|
||||
func (a *Args) Len() int {
|
||||
return len(a.args)
|
||||
}
|
||||
|
||||
// Parse parses the given string containing query args.
|
||||
func (a *Args) Parse(s string) {
|
||||
a.buf = append(a.buf[:0], s...)
|
||||
a.ParseBytes(a.buf)
|
||||
}
|
||||
|
||||
// ParseBytes parses the given b containing query args.
|
||||
func (a *Args) ParseBytes(b []byte) {
|
||||
a.Reset()
|
||||
|
||||
var s argsScanner
|
||||
s.b = b
|
||||
|
||||
var kv *argsKV
|
||||
a.args, kv = allocArg(a.args)
|
||||
for s.next(kv) {
|
||||
if len(kv.key) > 0 || len(kv.value) > 0 {
|
||||
a.args, kv = allocArg(a.args)
|
||||
}
|
||||
}
|
||||
a.args = releaseArg(a.args)
|
||||
}
|
||||
|
||||
// String returns string representation of query args.
|
||||
func (a *Args) String() string {
|
||||
return string(a.QueryString())
|
||||
}
|
||||
|
||||
// QueryString returns query string for the args.
|
||||
//
|
||||
// The returned value is valid until the next call to Args methods.
|
||||
func (a *Args) QueryString() []byte {
|
||||
a.buf = a.AppendBytes(a.buf[:0])
|
||||
return a.buf
|
||||
}
|
||||
|
||||
// AppendBytes appends query string to dst and returns the extended dst.
|
||||
func (a *Args) AppendBytes(dst []byte) []byte {
|
||||
for i, n := 0, len(a.args); i < n; i++ {
|
||||
kv := &a.args[i]
|
||||
dst = AppendQuotedArg(dst, kv.key)
|
||||
if len(kv.value) > 0 {
|
||||
dst = append(dst, '=')
|
||||
dst = AppendQuotedArg(dst, kv.value)
|
||||
}
|
||||
if i+1 < n {
|
||||
dst = append(dst, '&')
|
||||
}
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// WriteTo writes query string to w.
|
||||
//
|
||||
// WriteTo implements io.WriterTo interface.
|
||||
func (a *Args) WriteTo(w io.Writer) (int64, error) {
|
||||
n, err := w.Write(a.QueryString())
|
||||
return int64(n), err
|
||||
}
|
||||
|
||||
// Del deletes argument with the given key from query args.
|
||||
func (a *Args) Del(key string) {
|
||||
a.args = delAllArgs(a.args, key)
|
||||
}
|
||||
|
||||
// DelBytes deletes argument with the given key from query args.
|
||||
func (a *Args) DelBytes(key []byte) {
|
||||
a.args = delAllArgs(a.args, b2s(key))
|
||||
}
|
||||
|
||||
// Add adds 'key=value' argument.
|
||||
//
|
||||
// Multiple values for the same key may be added.
|
||||
func (a *Args) Add(key, value string) {
|
||||
a.args = appendArg(a.args, key, value)
|
||||
}
|
||||
|
||||
// AddBytesK adds 'key=value' argument.
|
||||
//
|
||||
// Multiple values for the same key may be added.
|
||||
func (a *Args) AddBytesK(key []byte, value string) {
|
||||
a.args = appendArg(a.args, b2s(key), value)
|
||||
}
|
||||
|
||||
// AddBytesV adds 'key=value' argument.
|
||||
//
|
||||
// Multiple values for the same key may be added.
|
||||
func (a *Args) AddBytesV(key string, value []byte) {
|
||||
a.args = appendArg(a.args, key, b2s(value))
|
||||
}
|
||||
|
||||
// AddBytesKV adds 'key=value' argument.
|
||||
//
|
||||
// Multiple values for the same key may be added.
|
||||
func (a *Args) AddBytesKV(key, value []byte) {
|
||||
a.args = appendArg(a.args, b2s(key), b2s(value))
|
||||
}
|
||||
|
||||
// Set sets 'key=value' argument.
|
||||
func (a *Args) Set(key, value string) {
|
||||
a.args = setArg(a.args, key, value)
|
||||
}
|
||||
|
||||
// SetBytesK sets 'key=value' argument.
|
||||
func (a *Args) SetBytesK(key []byte, value string) {
|
||||
a.args = setArg(a.args, b2s(key), value)
|
||||
}
|
||||
|
||||
// SetBytesV sets 'key=value' argument.
|
||||
func (a *Args) SetBytesV(key string, value []byte) {
|
||||
a.args = setArg(a.args, key, b2s(value))
|
||||
}
|
||||
|
||||
// SetBytesKV sets 'key=value' argument.
|
||||
func (a *Args) SetBytesKV(key, value []byte) {
|
||||
a.args = setArgBytes(a.args, key, value)
|
||||
}
|
||||
|
||||
// Peek returns query arg value for the given key.
|
||||
//
|
||||
// Returned value is valid until the next Args call.
|
||||
func (a *Args) Peek(key string) []byte {
|
||||
return peekArgStr(a.args, key)
|
||||
}
|
||||
|
||||
// PeekBytes returns query arg value for the given key.
|
||||
//
|
||||
// Returned value is valid until the next Args call.
|
||||
func (a *Args) PeekBytes(key []byte) []byte {
|
||||
return peekArgBytes(a.args, key)
|
||||
}
|
||||
|
||||
// PeekMulti returns all the arg values for the given key.
|
||||
func (a *Args) PeekMulti(key string) [][]byte {
|
||||
var values [][]byte
|
||||
a.VisitAll(func(k, v []byte) {
|
||||
if string(k) == key {
|
||||
values = append(values, v)
|
||||
}
|
||||
})
|
||||
return values
|
||||
}
|
||||
|
||||
// PeekMultiBytes returns all the arg values for the given key.
|
||||
func (a *Args) PeekMultiBytes(key []byte) [][]byte {
|
||||
return a.PeekMulti(b2s(key))
|
||||
}
|
||||
|
||||
// Has returns true if the given key exists in Args.
|
||||
func (a *Args) Has(key string) bool {
|
||||
return hasArg(a.args, key)
|
||||
}
|
||||
|
||||
// HasBytes returns true if the given key exists in Args.
|
||||
func (a *Args) HasBytes(key []byte) bool {
|
||||
return hasArg(a.args, b2s(key))
|
||||
}
|
||||
|
||||
// ErrNoArgValue is returned when Args value with the given key is missing.
|
||||
var ErrNoArgValue = errors.New("no Args value for the given key")
|
||||
|
||||
// GetUint returns uint value for the given key.
|
||||
func (a *Args) GetUint(key string) (int, error) {
|
||||
value := a.Peek(key)
|
||||
if len(value) == 0 {
|
||||
return -1, ErrNoArgValue
|
||||
}
|
||||
return ParseUint(value)
|
||||
}
|
||||
|
||||
// SetUint sets uint value for the given key.
|
||||
func (a *Args) SetUint(key string, value int) {
|
||||
bb := AcquireByteBuffer()
|
||||
bb.B = AppendUint(bb.B[:0], value)
|
||||
a.SetBytesV(key, bb.B)
|
||||
ReleaseByteBuffer(bb)
|
||||
}
|
||||
|
||||
// SetUintBytes sets uint value for the given key.
|
||||
func (a *Args) SetUintBytes(key []byte, value int) {
|
||||
a.SetUint(b2s(key), value)
|
||||
}
|
||||
|
||||
// GetUintOrZero returns uint value for the given key.
|
||||
//
|
||||
// Zero (0) is returned on error.
|
||||
func (a *Args) GetUintOrZero(key string) int {
|
||||
n, err := a.GetUint(key)
|
||||
if err != nil {
|
||||
n = 0
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
// GetUfloat returns ufloat value for the given key.
|
||||
func (a *Args) GetUfloat(key string) (float64, error) {
|
||||
value := a.Peek(key)
|
||||
if len(value) == 0 {
|
||||
return -1, ErrNoArgValue
|
||||
}
|
||||
return ParseUfloat(value)
|
||||
}
|
||||
|
||||
// GetUfloatOrZero returns ufloat value for the given key.
|
||||
//
|
||||
// Zero (0) is returned on error.
|
||||
func (a *Args) GetUfloatOrZero(key string) float64 {
|
||||
f, err := a.GetUfloat(key)
|
||||
if err != nil {
|
||||
f = 0
|
||||
}
|
||||
return f
|
||||
}
|
||||
|
||||
// GetBool returns boolean value for the given key.
|
||||
//
|
||||
// true is returned for '1', 'y' and 'yes' values,
|
||||
// otherwise false is returned.
|
||||
func (a *Args) GetBool(key string) bool {
|
||||
switch string(a.Peek(key)) {
|
||||
case "1", "y", "yes":
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func visitArgs(args []argsKV, f func(k, v []byte)) {
|
||||
for i, n := 0, len(args); i < n; i++ {
|
||||
kv := &args[i]
|
||||
f(kv.key, kv.value)
|
||||
}
|
||||
}
|
||||
|
||||
func copyArgs(dst, src []argsKV) []argsKV {
|
||||
if cap(dst) < len(src) {
|
||||
tmp := make([]argsKV, len(src))
|
||||
copy(tmp, dst)
|
||||
dst = tmp
|
||||
}
|
||||
n := len(src)
|
||||
dst = dst[:n]
|
||||
for i := 0; i < n; i++ {
|
||||
dstKV := &dst[i]
|
||||
srcKV := &src[i]
|
||||
dstKV.key = append(dstKV.key[:0], srcKV.key...)
|
||||
dstKV.value = append(dstKV.value[:0], srcKV.value...)
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
func delAllArgsBytes(args []argsKV, key []byte) []argsKV {
|
||||
return delAllArgs(args, b2s(key))
|
||||
}
|
||||
|
||||
func delAllArgs(args []argsKV, key string) []argsKV {
|
||||
for i, n := 0, len(args); i < n; i++ {
|
||||
kv := &args[i]
|
||||
if key == string(kv.key) {
|
||||
tmp := *kv
|
||||
copy(args[i:], args[i+1:])
|
||||
n--
|
||||
args[n] = tmp
|
||||
args = args[:n]
|
||||
}
|
||||
}
|
||||
return args
|
||||
}
|
||||
|
||||
func setArgBytes(h []argsKV, key, value []byte) []argsKV {
|
||||
return setArg(h, b2s(key), b2s(value))
|
||||
}
|
||||
|
||||
func setArg(h []argsKV, key, value string) []argsKV {
|
||||
n := len(h)
|
||||
for i := 0; i < n; i++ {
|
||||
kv := &h[i]
|
||||
if key == string(kv.key) {
|
||||
kv.value = append(kv.value[:0], value...)
|
||||
return h
|
||||
}
|
||||
}
|
||||
return appendArg(h, key, value)
|
||||
}
|
||||
|
||||
func appendArgBytes(h []argsKV, key, value []byte) []argsKV {
|
||||
return appendArg(h, b2s(key), b2s(value))
|
||||
}
|
||||
|
||||
func appendArg(args []argsKV, key, value string) []argsKV {
|
||||
var kv *argsKV
|
||||
args, kv = allocArg(args)
|
||||
kv.key = append(kv.key[:0], key...)
|
||||
kv.value = append(kv.value[:0], value...)
|
||||
return args
|
||||
}
|
||||
|
||||
func allocArg(h []argsKV) ([]argsKV, *argsKV) {
|
||||
n := len(h)
|
||||
if cap(h) > n {
|
||||
h = h[:n+1]
|
||||
} else {
|
||||
h = append(h, argsKV{})
|
||||
}
|
||||
return h, &h[n]
|
||||
}
|
||||
|
||||
func releaseArg(h []argsKV) []argsKV {
|
||||
return h[:len(h)-1]
|
||||
}
|
||||
|
||||
func hasArg(h []argsKV, key string) bool {
|
||||
for i, n := 0, len(h); i < n; i++ {
|
||||
kv := &h[i]
|
||||
if key == string(kv.key) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func peekArgBytes(h []argsKV, k []byte) []byte {
|
||||
for i, n := 0, len(h); i < n; i++ {
|
||||
kv := &h[i]
|
||||
if bytes.Equal(kv.key, k) {
|
||||
return kv.value
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func peekArgStr(h []argsKV, k string) []byte {
|
||||
for i, n := 0, len(h); i < n; i++ {
|
||||
kv := &h[i]
|
||||
if string(kv.key) == k {
|
||||
return kv.value
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type argsScanner struct {
|
||||
b []byte
|
||||
}
|
||||
|
||||
func (s *argsScanner) next(kv *argsKV) bool {
|
||||
if len(s.b) == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
isKey := true
|
||||
k := 0
|
||||
for i, c := range s.b {
|
||||
switch c {
|
||||
case '=':
|
||||
if isKey {
|
||||
isKey = false
|
||||
kv.key = decodeArg(kv.key, s.b[:i], true)
|
||||
k = i + 1
|
||||
}
|
||||
case '&':
|
||||
if isKey {
|
||||
kv.key = decodeArg(kv.key, s.b[:i], true)
|
||||
kv.value = kv.value[:0]
|
||||
} else {
|
||||
kv.value = decodeArg(kv.value, s.b[k:i], true)
|
||||
}
|
||||
s.b = s.b[i+1:]
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
if isKey {
|
||||
kv.key = decodeArg(kv.key, s.b, true)
|
||||
kv.value = kv.value[:0]
|
||||
} else {
|
||||
kv.value = decodeArg(kv.value, s.b[k:], true)
|
||||
}
|
||||
s.b = s.b[len(s.b):]
|
||||
return true
|
||||
}
|
||||
|
||||
func decodeArg(dst, src []byte, decodePlus bool) []byte {
|
||||
return decodeArgAppend(dst[:0], src, decodePlus)
|
||||
}
|
||||
|
||||
func decodeArgAppend(dst, src []byte, decodePlus bool) []byte {
|
||||
for i, n := 0, len(src); i < n; i++ {
|
||||
c := src[i]
|
||||
if c == '%' {
|
||||
if i+2 >= n {
|
||||
return append(dst, src[i:]...)
|
||||
}
|
||||
x1 := hexbyte2int(src[i+1])
|
||||
x2 := hexbyte2int(src[i+2])
|
||||
if x1 < 0 || x2 < 0 {
|
||||
dst = append(dst, c)
|
||||
} else {
|
||||
dst = append(dst, byte(x1<<4|x2))
|
||||
i += 2
|
||||
}
|
||||
} else if decodePlus && c == '+' {
|
||||
dst = append(dst, ' ')
|
||||
} else {
|
||||
dst = append(dst, c)
|
||||
}
|
||||
}
|
||||
return dst
|
||||
}
|
||||
64
vendor/github.com/valyala/fasthttp/bytebuffer.go
generated
vendored
64
vendor/github.com/valyala/fasthttp/bytebuffer.go
generated
vendored
@@ -1,64 +0,0 @@
|
||||
package fasthttp
|
||||
|
||||
import (
|
||||
"github.com/valyala/bytebufferpool"
|
||||
)
|
||||
|
||||
// ByteBuffer provides byte buffer, which can be used with fasthttp API
|
||||
// in order to minimize memory allocations.
|
||||
//
|
||||
// ByteBuffer may be used with functions appending data to the given []byte
|
||||
// slice. See example code for details.
|
||||
//
|
||||
// Use AcquireByteBuffer for obtaining an empty byte buffer.
|
||||
//
|
||||
// ByteBuffer is deprecated. Use github.com/valyala/bytebufferpool instead.
|
||||
type ByteBuffer bytebufferpool.ByteBuffer
|
||||
|
||||
// Write implements io.Writer - it appends p to ByteBuffer.B
|
||||
func (b *ByteBuffer) Write(p []byte) (int, error) {
|
||||
return bb(b).Write(p)
|
||||
}
|
||||
|
||||
// WriteString appends s to ByteBuffer.B
|
||||
func (b *ByteBuffer) WriteString(s string) (int, error) {
|
||||
return bb(b).WriteString(s)
|
||||
}
|
||||
|
||||
// Set sets ByteBuffer.B to p
|
||||
func (b *ByteBuffer) Set(p []byte) {
|
||||
bb(b).Set(p)
|
||||
}
|
||||
|
||||
// SetString sets ByteBuffer.B to s
|
||||
func (b *ByteBuffer) SetString(s string) {
|
||||
bb(b).SetString(s)
|
||||
}
|
||||
|
||||
// Reset makes ByteBuffer.B empty.
|
||||
func (b *ByteBuffer) Reset() {
|
||||
bb(b).Reset()
|
||||
}
|
||||
|
||||
// AcquireByteBuffer returns an empty byte buffer from the pool.
|
||||
//
|
||||
// Acquired byte buffer may be returned to the pool via ReleaseByteBuffer call.
|
||||
// This reduces the number of memory allocations required for byte buffer
|
||||
// management.
|
||||
func AcquireByteBuffer() *ByteBuffer {
|
||||
return (*ByteBuffer)(defaultByteBufferPool.Get())
|
||||
}
|
||||
|
||||
// ReleaseByteBuffer returns byte buffer to the pool.
|
||||
//
|
||||
// ByteBuffer.B mustn't be touched after returning it to the pool.
|
||||
// Otherwise data races occur.
|
||||
func ReleaseByteBuffer(b *ByteBuffer) {
|
||||
defaultByteBufferPool.Put(bb(b))
|
||||
}
|
||||
|
||||
func bb(b *ByteBuffer) *bytebufferpool.ByteBuffer {
|
||||
return (*bytebufferpool.ByteBuffer)(b)
|
||||
}
|
||||
|
||||
var defaultByteBufferPool bytebufferpool.Pool
|
||||
422
vendor/github.com/valyala/fasthttp/bytesconv.go
generated
vendored
422
vendor/github.com/valyala/fasthttp/bytesconv.go
generated
vendored
@@ -1,422 +0,0 @@
|
||||
package fasthttp
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"net"
|
||||
"reflect"
|
||||
"sync"
|
||||
"time"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// AppendHTMLEscape appends html-escaped s to dst and returns the extended dst.
|
||||
func AppendHTMLEscape(dst []byte, s string) []byte {
|
||||
var prev int
|
||||
var sub string
|
||||
for i, n := 0, len(s); i < n; i++ {
|
||||
sub = ""
|
||||
switch s[i] {
|
||||
case '<':
|
||||
sub = "<"
|
||||
case '>':
|
||||
sub = ">"
|
||||
case '"':
|
||||
sub = """
|
||||
case '\'':
|
||||
sub = "'"
|
||||
}
|
||||
if len(sub) > 0 {
|
||||
dst = append(dst, s[prev:i]...)
|
||||
dst = append(dst, sub...)
|
||||
prev = i + 1
|
||||
}
|
||||
}
|
||||
return append(dst, s[prev:]...)
|
||||
}
|
||||
|
||||
// AppendHTMLEscapeBytes appends html-escaped s to dst and returns
|
||||
// the extended dst.
|
||||
func AppendHTMLEscapeBytes(dst, s []byte) []byte {
|
||||
return AppendHTMLEscape(dst, b2s(s))
|
||||
}
|
||||
|
||||
// AppendIPv4 appends string representation of the given ip v4 to dst
|
||||
// and returns the extended dst.
|
||||
func AppendIPv4(dst []byte, ip net.IP) []byte {
|
||||
ip = ip.To4()
|
||||
if ip == nil {
|
||||
return append(dst, "non-v4 ip passed to AppendIPv4"...)
|
||||
}
|
||||
|
||||
dst = AppendUint(dst, int(ip[0]))
|
||||
for i := 1; i < 4; i++ {
|
||||
dst = append(dst, '.')
|
||||
dst = AppendUint(dst, int(ip[i]))
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
var errEmptyIPStr = errors.New("empty ip address string")
|
||||
|
||||
// ParseIPv4 parses ip address from ipStr into dst and returns the extended dst.
|
||||
func ParseIPv4(dst net.IP, ipStr []byte) (net.IP, error) {
|
||||
if len(ipStr) == 0 {
|
||||
return dst, errEmptyIPStr
|
||||
}
|
||||
if len(dst) < net.IPv4len {
|
||||
dst = make([]byte, net.IPv4len)
|
||||
}
|
||||
copy(dst, net.IPv4zero)
|
||||
dst = dst.To4()
|
||||
if dst == nil {
|
||||
panic("BUG: dst must not be nil")
|
||||
}
|
||||
|
||||
b := ipStr
|
||||
for i := 0; i < 3; i++ {
|
||||
n := bytes.IndexByte(b, '.')
|
||||
if n < 0 {
|
||||
return dst, fmt.Errorf("cannot find dot in ipStr %q", ipStr)
|
||||
}
|
||||
v, err := ParseUint(b[:n])
|
||||
if err != nil {
|
||||
return dst, fmt.Errorf("cannot parse ipStr %q: %s", ipStr, err)
|
||||
}
|
||||
if v > 255 {
|
||||
return dst, fmt.Errorf("cannot parse ipStr %q: ip part cannot exceed 255: parsed %d", ipStr, v)
|
||||
}
|
||||
dst[i] = byte(v)
|
||||
b = b[n+1:]
|
||||
}
|
||||
v, err := ParseUint(b)
|
||||
if err != nil {
|
||||
return dst, fmt.Errorf("cannot parse ipStr %q: %s", ipStr, err)
|
||||
}
|
||||
if v > 255 {
|
||||
return dst, fmt.Errorf("cannot parse ipStr %q: ip part cannot exceed 255: parsed %d", ipStr, v)
|
||||
}
|
||||
dst[3] = byte(v)
|
||||
|
||||
return dst, nil
|
||||
}
|
||||
|
||||
// AppendHTTPDate appends HTTP-compliant (RFC1123) representation of date
|
||||
// to dst and returns the extended dst.
|
||||
func AppendHTTPDate(dst []byte, date time.Time) []byte {
|
||||
dst = date.In(time.UTC).AppendFormat(dst, time.RFC1123)
|
||||
copy(dst[len(dst)-3:], strGMT)
|
||||
return dst
|
||||
}
|
||||
|
||||
// ParseHTTPDate parses HTTP-compliant (RFC1123) date.
|
||||
func ParseHTTPDate(date []byte) (time.Time, error) {
|
||||
return time.Parse(time.RFC1123, b2s(date))
|
||||
}
|
||||
|
||||
// AppendUint appends n to dst and returns the extended dst.
|
||||
func AppendUint(dst []byte, n int) []byte {
|
||||
if n < 0 {
|
||||
panic("BUG: int must be positive")
|
||||
}
|
||||
|
||||
var b [20]byte
|
||||
buf := b[:]
|
||||
i := len(buf)
|
||||
var q int
|
||||
for n >= 10 {
|
||||
i--
|
||||
q = n / 10
|
||||
buf[i] = '0' + byte(n-q*10)
|
||||
n = q
|
||||
}
|
||||
i--
|
||||
buf[i] = '0' + byte(n)
|
||||
|
||||
dst = append(dst, buf[i:]...)
|
||||
return dst
|
||||
}
|
||||
|
||||
// ParseUint parses uint from buf.
|
||||
func ParseUint(buf []byte) (int, error) {
|
||||
v, n, err := parseUintBuf(buf)
|
||||
if n != len(buf) {
|
||||
return -1, errUnexpectedTrailingChar
|
||||
}
|
||||
return v, err
|
||||
}
|
||||
|
||||
var (
|
||||
errEmptyInt = errors.New("empty integer")
|
||||
errUnexpectedFirstChar = errors.New("unexpected first char found. Expecting 0-9")
|
||||
errUnexpectedTrailingChar = errors.New("unexpected traling char found. Expecting 0-9")
|
||||
errTooLongInt = errors.New("too long int")
|
||||
)
|
||||
|
||||
func parseUintBuf(b []byte) (int, int, error) {
|
||||
n := len(b)
|
||||
if n == 0 {
|
||||
return -1, 0, errEmptyInt
|
||||
}
|
||||
v := 0
|
||||
for i := 0; i < n; i++ {
|
||||
c := b[i]
|
||||
k := c - '0'
|
||||
if k > 9 {
|
||||
if i == 0 {
|
||||
return -1, i, errUnexpectedFirstChar
|
||||
}
|
||||
return v, i, nil
|
||||
}
|
||||
if i >= maxIntChars {
|
||||
return -1, i, errTooLongInt
|
||||
}
|
||||
v = 10*v + int(k)
|
||||
}
|
||||
return v, n, nil
|
||||
}
|
||||
|
||||
var (
|
||||
errEmptyFloat = errors.New("empty float number")
|
||||
errDuplicateFloatPoint = errors.New("duplicate point found in float number")
|
||||
errUnexpectedFloatEnd = errors.New("unexpected end of float number")
|
||||
errInvalidFloatExponent = errors.New("invalid float number exponent")
|
||||
errUnexpectedFloatChar = errors.New("unexpected char found in float number")
|
||||
)
|
||||
|
||||
// ParseUfloat parses unsigned float from buf.
|
||||
func ParseUfloat(buf []byte) (float64, error) {
|
||||
if len(buf) == 0 {
|
||||
return -1, errEmptyFloat
|
||||
}
|
||||
b := buf
|
||||
var v uint64
|
||||
var offset = 1.0
|
||||
var pointFound bool
|
||||
for i, c := range b {
|
||||
if c < '0' || c > '9' {
|
||||
if c == '.' {
|
||||
if pointFound {
|
||||
return -1, errDuplicateFloatPoint
|
||||
}
|
||||
pointFound = true
|
||||
continue
|
||||
}
|
||||
if c == 'e' || c == 'E' {
|
||||
if i+1 >= len(b) {
|
||||
return -1, errUnexpectedFloatEnd
|
||||
}
|
||||
b = b[i+1:]
|
||||
minus := -1
|
||||
switch b[0] {
|
||||
case '+':
|
||||
b = b[1:]
|
||||
minus = 1
|
||||
case '-':
|
||||
b = b[1:]
|
||||
default:
|
||||
minus = 1
|
||||
}
|
||||
vv, err := ParseUint(b)
|
||||
if err != nil {
|
||||
return -1, errInvalidFloatExponent
|
||||
}
|
||||
return float64(v) * offset * math.Pow10(minus*int(vv)), nil
|
||||
}
|
||||
return -1, errUnexpectedFloatChar
|
||||
}
|
||||
v = 10*v + uint64(c-'0')
|
||||
if pointFound {
|
||||
offset /= 10
|
||||
}
|
||||
}
|
||||
return float64(v) * offset, nil
|
||||
}
|
||||
|
||||
var (
|
||||
errEmptyHexNum = errors.New("empty hex number")
|
||||
errTooLargeHexNum = errors.New("too large hex number")
|
||||
)
|
||||
|
||||
func readHexInt(r *bufio.Reader) (int, error) {
|
||||
n := 0
|
||||
i := 0
|
||||
var k int
|
||||
for {
|
||||
c, err := r.ReadByte()
|
||||
if err != nil {
|
||||
if err == io.EOF && i > 0 {
|
||||
return n, nil
|
||||
}
|
||||
return -1, err
|
||||
}
|
||||
k = hexbyte2int(c)
|
||||
if k < 0 {
|
||||
if i == 0 {
|
||||
return -1, errEmptyHexNum
|
||||
}
|
||||
r.UnreadByte()
|
||||
return n, nil
|
||||
}
|
||||
if i >= maxHexIntChars {
|
||||
return -1, errTooLargeHexNum
|
||||
}
|
||||
n = (n << 4) | k
|
||||
i++
|
||||
}
|
||||
}
|
||||
|
||||
var hexIntBufPool sync.Pool
|
||||
|
||||
func writeHexInt(w *bufio.Writer, n int) error {
|
||||
if n < 0 {
|
||||
panic("BUG: int must be positive")
|
||||
}
|
||||
|
||||
v := hexIntBufPool.Get()
|
||||
if v == nil {
|
||||
v = make([]byte, maxHexIntChars+1)
|
||||
}
|
||||
buf := v.([]byte)
|
||||
i := len(buf) - 1
|
||||
for {
|
||||
buf[i] = int2hexbyte(n & 0xf)
|
||||
n >>= 4
|
||||
if n == 0 {
|
||||
break
|
||||
}
|
||||
i--
|
||||
}
|
||||
_, err := w.Write(buf[i:])
|
||||
hexIntBufPool.Put(v)
|
||||
return err
|
||||
}
|
||||
|
||||
func int2hexbyte(n int) byte {
|
||||
if n < 10 {
|
||||
return '0' + byte(n)
|
||||
}
|
||||
return 'a' + byte(n) - 10
|
||||
}
|
||||
|
||||
func hexCharUpper(c byte) byte {
|
||||
if c < 10 {
|
||||
return '0' + c
|
||||
}
|
||||
return c - 10 + 'A'
|
||||
}
|
||||
|
||||
var hex2intTable = func() []byte {
|
||||
b := make([]byte, 255)
|
||||
for i := byte(0); i < 255; i++ {
|
||||
c := byte(0)
|
||||
if i >= '0' && i <= '9' {
|
||||
c = 1 + i - '0'
|
||||
} else if i >= 'a' && i <= 'f' {
|
||||
c = 1 + i - 'a' + 10
|
||||
} else if i >= 'A' && i <= 'F' {
|
||||
c = 1 + i - 'A' + 10
|
||||
}
|
||||
b[i] = c
|
||||
}
|
||||
return b
|
||||
}()
|
||||
|
||||
func hexbyte2int(c byte) int {
|
||||
return int(hex2intTable[c]) - 1
|
||||
}
|
||||
|
||||
const toLower = 'a' - 'A'
|
||||
|
||||
func uppercaseByte(p *byte) {
|
||||
c := *p
|
||||
if c >= 'a' && c <= 'z' {
|
||||
*p = c - toLower
|
||||
}
|
||||
}
|
||||
|
||||
func lowercaseByte(p *byte) {
|
||||
c := *p
|
||||
if c >= 'A' && c <= 'Z' {
|
||||
*p = c + toLower
|
||||
}
|
||||
}
|
||||
|
||||
func lowercaseBytes(b []byte) {
|
||||
for i, n := 0, len(b); i < n; i++ {
|
||||
lowercaseByte(&b[i])
|
||||
}
|
||||
}
|
||||
|
||||
// b2s converts byte slice to a string without memory allocation.
|
||||
// See https://groups.google.com/forum/#!msg/Golang-Nuts/ENgbUzYvCuU/90yGx7GUAgAJ .
|
||||
//
|
||||
// Note it may break if string and/or slice header will change
|
||||
// in the future go versions.
|
||||
func b2s(b []byte) string {
|
||||
return *(*string)(unsafe.Pointer(&b))
|
||||
}
|
||||
|
||||
// s2b converts string to a byte slice without memory allocation.
|
||||
//
|
||||
// Note it may break if string and/or slice header will change
|
||||
// in the future go versions.
|
||||
func s2b(s string) []byte {
|
||||
sh := (*reflect.StringHeader)(unsafe.Pointer(&s))
|
||||
bh := reflect.SliceHeader{
|
||||
Data: sh.Data,
|
||||
Len: sh.Len,
|
||||
Cap: sh.Len,
|
||||
}
|
||||
return *(*[]byte)(unsafe.Pointer(&bh))
|
||||
}
|
||||
|
||||
// AppendQuotedArg appends url-encoded src to dst and returns appended dst.
|
||||
func AppendQuotedArg(dst, src []byte) []byte {
|
||||
for _, c := range src {
|
||||
// See http://www.w3.org/TR/html5/forms.html#form-submission-algorithm
|
||||
if c >= 'a' && c <= 'z' || c >= 'A' && c <= 'Z' || c >= '0' && c <= '9' ||
|
||||
c == '*' || c == '-' || c == '.' || c == '_' {
|
||||
dst = append(dst, c)
|
||||
} else {
|
||||
dst = append(dst, '%', hexCharUpper(c>>4), hexCharUpper(c&15))
|
||||
}
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
func appendQuotedPath(dst, src []byte) []byte {
|
||||
for _, c := range src {
|
||||
if c >= 'a' && c <= 'z' || c >= 'A' && c <= 'Z' || c >= '0' && c <= '9' ||
|
||||
c == '/' || c == '.' || c == ',' || c == '=' || c == ':' || c == '&' || c == '~' || c == '-' || c == '_' {
|
||||
dst = append(dst, c)
|
||||
} else {
|
||||
dst = append(dst, '%', hexCharUpper(c>>4), hexCharUpper(c&15))
|
||||
}
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// EqualBytesStr returns true if string(b) == s.
|
||||
//
|
||||
// This function has no performance benefits comparing to string(b) == s.
|
||||
// It is left here for backwards compatibility only.
|
||||
//
|
||||
// This function is deperecated and may be deleted soon.
|
||||
func EqualBytesStr(b []byte, s string) bool {
|
||||
return string(b) == s
|
||||
}
|
||||
|
||||
// AppendBytesStr appends src to dst and returns the extended dst.
|
||||
//
|
||||
// This function has no performance benefits comparing to append(dst, src...).
|
||||
// It is left here for backwards compatibility only.
|
||||
//
|
||||
// This function is deprecated and may be deleted soon.
|
||||
func AppendBytesStr(dst []byte, src string) []byte {
|
||||
return append(dst, src...)
|
||||
}
|
||||
8
vendor/github.com/valyala/fasthttp/bytesconv_32.go
generated
vendored
8
vendor/github.com/valyala/fasthttp/bytesconv_32.go
generated
vendored
@@ -1,8 +0,0 @@
|
||||
// +build !amd64,!arm64,!ppc64
|
||||
|
||||
package fasthttp
|
||||
|
||||
const (
|
||||
maxIntChars = 9
|
||||
maxHexIntChars = 7
|
||||
)
|
||||
8
vendor/github.com/valyala/fasthttp/bytesconv_64.go
generated
vendored
8
vendor/github.com/valyala/fasthttp/bytesconv_64.go
generated
vendored
@@ -1,8 +0,0 @@
|
||||
// +build amd64 arm64 ppc64
|
||||
|
||||
package fasthttp
|
||||
|
||||
const (
|
||||
maxIntChars = 18
|
||||
maxHexIntChars = 15
|
||||
)
|
||||
2160
vendor/github.com/valyala/fasthttp/client.go
generated
vendored
2160
vendor/github.com/valyala/fasthttp/client.go
generated
vendored
File diff suppressed because it is too large
Load Diff
28
vendor/github.com/valyala/fasthttp/coarseTime.go
generated
vendored
28
vendor/github.com/valyala/fasthttp/coarseTime.go
generated
vendored
@@ -1,28 +0,0 @@
|
||||
package fasthttp
|
||||
|
||||
import (
|
||||
"sync/atomic"
|
||||
"time"
|
||||
)
|
||||
|
||||
// CoarseTimeNow returns the current time truncated to the nearest second.
|
||||
//
|
||||
// This is a faster alternative to time.Now().
|
||||
func CoarseTimeNow() time.Time {
|
||||
tp := coarseTime.Load().(*time.Time)
|
||||
return *tp
|
||||
}
|
||||
|
||||
func init() {
|
||||
t := time.Now().Truncate(time.Second)
|
||||
coarseTime.Store(&t)
|
||||
go func() {
|
||||
for {
|
||||
time.Sleep(time.Second)
|
||||
t := time.Now().Truncate(time.Second)
|
||||
coarseTime.Store(&t)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
var coarseTime atomic.Value
|
||||
291
vendor/github.com/valyala/fasthttp/compress.go
generated
vendored
291
vendor/github.com/valyala/fasthttp/compress.go
generated
vendored
@@ -1,291 +0,0 @@
|
||||
package fasthttp
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"sync"
|
||||
|
||||
"github.com/klauspost/compress/flate"
|
||||
"github.com/klauspost/compress/gzip"
|
||||
"github.com/klauspost/compress/zlib"
|
||||
"github.com/valyala/fasthttp/stackless"
|
||||
)
|
||||
|
||||
// Supported compression levels.
|
||||
const (
|
||||
CompressNoCompression = flate.NoCompression
|
||||
CompressBestSpeed = flate.BestSpeed
|
||||
CompressBestCompression = flate.BestCompression
|
||||
CompressDefaultCompression = flate.DefaultCompression
|
||||
)
|
||||
|
||||
func acquireGzipReader(r io.Reader) (*gzip.Reader, error) {
|
||||
v := gzipReaderPool.Get()
|
||||
if v == nil {
|
||||
return gzip.NewReader(r)
|
||||
}
|
||||
zr := v.(*gzip.Reader)
|
||||
if err := zr.Reset(r); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return zr, nil
|
||||
}
|
||||
|
||||
func releaseGzipReader(zr *gzip.Reader) {
|
||||
zr.Close()
|
||||
gzipReaderPool.Put(zr)
|
||||
}
|
||||
|
||||
var gzipReaderPool sync.Pool
|
||||
|
||||
func acquireFlateReader(r io.Reader) (io.ReadCloser, error) {
|
||||
v := flateReaderPool.Get()
|
||||
if v == nil {
|
||||
zr, err := zlib.NewReader(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return zr, nil
|
||||
}
|
||||
zr := v.(io.ReadCloser)
|
||||
if err := resetFlateReader(zr, r); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return zr, nil
|
||||
}
|
||||
|
||||
func releaseFlateReader(zr io.ReadCloser) {
|
||||
zr.Close()
|
||||
flateReaderPool.Put(zr)
|
||||
}
|
||||
|
||||
func resetFlateReader(zr io.ReadCloser, r io.Reader) error {
|
||||
zrr, ok := zr.(zlib.Resetter)
|
||||
if !ok {
|
||||
panic("BUG: zlib.Reader doesn't implement zlib.Resetter???")
|
||||
}
|
||||
return zrr.Reset(r, nil)
|
||||
}
|
||||
|
||||
var flateReaderPool sync.Pool
|
||||
|
||||
func acquireGzipWriter(w io.Writer, level int) *gzipWriter {
|
||||
p := gzipWriterPoolMap[level]
|
||||
if p == nil {
|
||||
panic(fmt.Sprintf("BUG: unexpected compression level passed: %d. See compress/gzip for supported levels", level))
|
||||
}
|
||||
|
||||
v := p.Get()
|
||||
if v == nil {
|
||||
sw := stackless.NewWriter(w, func(w io.Writer) stackless.Writer {
|
||||
zw, err := gzip.NewWriterLevel(w, level)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("BUG: unexpected error from gzip.NewWriterLevel(%d): %s", level, err))
|
||||
}
|
||||
return zw
|
||||
})
|
||||
return &gzipWriter{
|
||||
Writer: sw,
|
||||
p: p,
|
||||
}
|
||||
}
|
||||
zw := v.(*gzipWriter)
|
||||
zw.Reset(w)
|
||||
return zw
|
||||
}
|
||||
|
||||
func releaseGzipWriter(zw *gzipWriter) {
|
||||
zw.Close()
|
||||
zw.p.Put(zw)
|
||||
}
|
||||
|
||||
type gzipWriter struct {
|
||||
stackless.Writer
|
||||
p *sync.Pool
|
||||
}
|
||||
|
||||
var gzipWriterPoolMap = func() map[int]*sync.Pool {
|
||||
// Initialize pools for all the compression levels defined
|
||||
// in https://golang.org/pkg/compress/gzip/#pkg-constants .
|
||||
m := make(map[int]*sync.Pool, 11)
|
||||
m[-1] = &sync.Pool{}
|
||||
for i := 0; i < 10; i++ {
|
||||
m[i] = &sync.Pool{}
|
||||
}
|
||||
return m
|
||||
}()
|
||||
|
||||
// AppendGzipBytesLevel appends gzipped src to dst using the given
|
||||
// compression level and returns the resulting dst.
|
||||
//
|
||||
// Supported compression levels are:
|
||||
//
|
||||
// * CompressNoCompression
|
||||
// * CompressBestSpeed
|
||||
// * CompressBestCompression
|
||||
// * CompressDefaultCompression
|
||||
func AppendGzipBytesLevel(dst, src []byte, level int) []byte {
|
||||
w := &byteSliceWriter{dst}
|
||||
WriteGzipLevel(w, src, level)
|
||||
return w.b
|
||||
}
|
||||
|
||||
// WriteGzipLevel writes gzipped p to w using the given compression level
|
||||
// and returns the number of compressed bytes written to w.
|
||||
//
|
||||
// Supported compression levels are:
|
||||
//
|
||||
// * CompressNoCompression
|
||||
// * CompressBestSpeed
|
||||
// * CompressBestCompression
|
||||
// * CompressDefaultCompression
|
||||
func WriteGzipLevel(w io.Writer, p []byte, level int) (int, error) {
|
||||
zw := acquireGzipWriter(w, level)
|
||||
n, err := zw.Write(p)
|
||||
releaseGzipWriter(zw)
|
||||
return n, err
|
||||
}
|
||||
|
||||
// WriteGzip writes gzipped p to w and returns the number of compressed
|
||||
// bytes written to w.
|
||||
func WriteGzip(w io.Writer, p []byte) (int, error) {
|
||||
return WriteGzipLevel(w, p, CompressDefaultCompression)
|
||||
}
|
||||
|
||||
// AppendGzipBytes appends gzipped src to dst and returns the resulting dst.
|
||||
func AppendGzipBytes(dst, src []byte) []byte {
|
||||
return AppendGzipBytesLevel(dst, src, CompressDefaultCompression)
|
||||
}
|
||||
|
||||
// WriteGunzip writes ungzipped p to w and returns the number of uncompressed
|
||||
// bytes written to w.
|
||||
func WriteGunzip(w io.Writer, p []byte) (int, error) {
|
||||
r := &byteSliceReader{p}
|
||||
zr, err := acquireGzipReader(r)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
n, err := copyZeroAlloc(w, zr)
|
||||
releaseGzipReader(zr)
|
||||
nn := int(n)
|
||||
if int64(nn) != n {
|
||||
return 0, fmt.Errorf("too much data gunzipped: %d", n)
|
||||
}
|
||||
return nn, err
|
||||
}
|
||||
|
||||
// WriteInflate writes inflated p to w and returns the number of uncompressed
|
||||
// bytes written to w.
|
||||
func WriteInflate(w io.Writer, p []byte) (int, error) {
|
||||
r := &byteSliceReader{p}
|
||||
zr, err := acquireFlateReader(r)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
n, err := copyZeroAlloc(w, zr)
|
||||
releaseFlateReader(zr)
|
||||
nn := int(n)
|
||||
if int64(nn) != n {
|
||||
return 0, fmt.Errorf("too much data inflated: %d", n)
|
||||
}
|
||||
return nn, err
|
||||
}
|
||||
|
||||
// AppendGunzipBytes append gunzipped src to dst and returns the resulting dst.
|
||||
func AppendGunzipBytes(dst, src []byte) ([]byte, error) {
|
||||
w := &byteSliceWriter{dst}
|
||||
_, err := WriteGunzip(w, src)
|
||||
return w.b, err
|
||||
}
|
||||
|
||||
type byteSliceWriter struct {
|
||||
b []byte
|
||||
}
|
||||
|
||||
func (w *byteSliceWriter) Write(p []byte) (int, error) {
|
||||
w.b = append(w.b, p...)
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
type byteSliceReader struct {
|
||||
b []byte
|
||||
}
|
||||
|
||||
func (r *byteSliceReader) Read(p []byte) (int, error) {
|
||||
if len(r.b) == 0 {
|
||||
return 0, io.EOF
|
||||
}
|
||||
n := copy(p, r.b)
|
||||
r.b = r.b[n:]
|
||||
return n, nil
|
||||
}
|
||||
|
||||
func acquireFlateWriter(w io.Writer, level int) *flateWriter {
|
||||
p := flateWriterPoolMap[level]
|
||||
if p == nil {
|
||||
panic(fmt.Sprintf("BUG: unexpected compression level passed: %d. See compress/flate for supported levels", level))
|
||||
}
|
||||
|
||||
v := p.Get()
|
||||
if v == nil {
|
||||
sw := stackless.NewWriter(w, func(w io.Writer) stackless.Writer {
|
||||
zw, err := zlib.NewWriterLevel(w, level)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("BUG: unexpected error in zlib.NewWriterLevel(%d): %s", level, err))
|
||||
}
|
||||
return zw
|
||||
})
|
||||
return &flateWriter{
|
||||
Writer: sw,
|
||||
p: p,
|
||||
}
|
||||
}
|
||||
zw := v.(*flateWriter)
|
||||
zw.Reset(w)
|
||||
return zw
|
||||
}
|
||||
|
||||
func releaseFlateWriter(zw *flateWriter) {
|
||||
zw.Close()
|
||||
zw.p.Put(zw)
|
||||
}
|
||||
|
||||
type flateWriter struct {
|
||||
stackless.Writer
|
||||
p *sync.Pool
|
||||
}
|
||||
|
||||
var flateWriterPoolMap = func() map[int]*sync.Pool {
|
||||
// Initialize pools for all the compression levels defined
|
||||
// in https://golang.org/pkg/compress/flate/#pkg-constants .
|
||||
m := make(map[int]*sync.Pool, 11)
|
||||
m[-1] = &sync.Pool{}
|
||||
for i := 0; i < 10; i++ {
|
||||
m[i] = &sync.Pool{}
|
||||
}
|
||||
return m
|
||||
}()
|
||||
|
||||
func isFileCompressible(f *os.File, minCompressRatio float64) bool {
|
||||
// Try compressing the first 4kb of of the file
|
||||
// and see if it can be compressed by more than
|
||||
// the given minCompressRatio.
|
||||
b := AcquireByteBuffer()
|
||||
zw := acquireGzipWriter(b, CompressDefaultCompression)
|
||||
lr := &io.LimitedReader{
|
||||
R: f,
|
||||
N: 4096,
|
||||
}
|
||||
_, err := copyZeroAlloc(zw, lr)
|
||||
releaseGzipWriter(zw)
|
||||
f.Seek(0, 0)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
n := 4096 - lr.N
|
||||
zn := len(b.B)
|
||||
ReleaseByteBuffer(b)
|
||||
return float64(zn) < float64(n)*minCompressRatio
|
||||
}
|
||||
396
vendor/github.com/valyala/fasthttp/cookie.go
generated
vendored
396
vendor/github.com/valyala/fasthttp/cookie.go
generated
vendored
@@ -1,396 +0,0 @@
|
||||
package fasthttp
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"io"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
var zeroTime time.Time
|
||||
|
||||
var (
|
||||
// CookieExpireDelete may be set on Cookie.Expire for expiring the given cookie.
|
||||
CookieExpireDelete = time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC)
|
||||
|
||||
// CookieExpireUnlimited indicates that the cookie doesn't expire.
|
||||
CookieExpireUnlimited = zeroTime
|
||||
)
|
||||
|
||||
// AcquireCookie returns an empty Cookie object from the pool.
|
||||
//
|
||||
// The returned object may be returned back to the pool with ReleaseCookie.
|
||||
// This allows reducing GC load.
|
||||
func AcquireCookie() *Cookie {
|
||||
return cookiePool.Get().(*Cookie)
|
||||
}
|
||||
|
||||
// ReleaseCookie returns the Cookie object acquired with AcquireCookie back
|
||||
// to the pool.
|
||||
//
|
||||
// Do not access released Cookie object, otherwise data races may occur.
|
||||
func ReleaseCookie(c *Cookie) {
|
||||
c.Reset()
|
||||
cookiePool.Put(c)
|
||||
}
|
||||
|
||||
var cookiePool = &sync.Pool{
|
||||
New: func() interface{} {
|
||||
return &Cookie{}
|
||||
},
|
||||
}
|
||||
|
||||
// Cookie represents HTTP response cookie.
|
||||
//
|
||||
// Do not copy Cookie objects. Create new object and use CopyTo instead.
|
||||
//
|
||||
// Cookie instance MUST NOT be used from concurrently running goroutines.
|
||||
type Cookie struct {
|
||||
noCopy noCopy
|
||||
|
||||
key []byte
|
||||
value []byte
|
||||
expire time.Time
|
||||
domain []byte
|
||||
path []byte
|
||||
|
||||
httpOnly bool
|
||||
secure bool
|
||||
|
||||
bufKV argsKV
|
||||
buf []byte
|
||||
}
|
||||
|
||||
// CopyTo copies src cookie to c.
|
||||
func (c *Cookie) CopyTo(src *Cookie) {
|
||||
c.Reset()
|
||||
c.key = append(c.key[:0], src.key...)
|
||||
c.value = append(c.value[:0], src.value...)
|
||||
c.expire = src.expire
|
||||
c.domain = append(c.domain[:0], src.domain...)
|
||||
c.path = append(c.path[:0], src.path...)
|
||||
c.httpOnly = src.httpOnly
|
||||
c.secure = src.secure
|
||||
}
|
||||
|
||||
// HTTPOnly returns true if the cookie is http only.
|
||||
func (c *Cookie) HTTPOnly() bool {
|
||||
return c.httpOnly
|
||||
}
|
||||
|
||||
// SetHTTPOnly sets cookie's httpOnly flag to the given value.
|
||||
func (c *Cookie) SetHTTPOnly(httpOnly bool) {
|
||||
c.httpOnly = httpOnly
|
||||
}
|
||||
|
||||
// Secure returns true if the cookie is secure.
|
||||
func (c *Cookie) Secure() bool {
|
||||
return c.secure
|
||||
}
|
||||
|
||||
// SetSecure sets cookie's secure flag to the given value.
|
||||
func (c *Cookie) SetSecure(secure bool) {
|
||||
c.secure = secure
|
||||
}
|
||||
|
||||
// Path returns cookie path.
|
||||
func (c *Cookie) Path() []byte {
|
||||
return c.path
|
||||
}
|
||||
|
||||
// SetPath sets cookie path.
|
||||
func (c *Cookie) SetPath(path string) {
|
||||
c.buf = append(c.buf[:0], path...)
|
||||
c.path = normalizePath(c.path, c.buf)
|
||||
}
|
||||
|
||||
// SetPathBytes sets cookie path.
|
||||
func (c *Cookie) SetPathBytes(path []byte) {
|
||||
c.buf = append(c.buf[:0], path...)
|
||||
c.path = normalizePath(c.path, c.buf)
|
||||
}
|
||||
|
||||
// Domain returns cookie domain.
|
||||
//
|
||||
// The returned domain is valid until the next Cookie modification method call.
|
||||
func (c *Cookie) Domain() []byte {
|
||||
return c.domain
|
||||
}
|
||||
|
||||
// SetDomain sets cookie domain.
|
||||
func (c *Cookie) SetDomain(domain string) {
|
||||
c.domain = append(c.domain[:0], domain...)
|
||||
}
|
||||
|
||||
// SetDomainBytes sets cookie domain.
|
||||
func (c *Cookie) SetDomainBytes(domain []byte) {
|
||||
c.domain = append(c.domain[:0], domain...)
|
||||
}
|
||||
|
||||
// Expire returns cookie expiration time.
|
||||
//
|
||||
// CookieExpireUnlimited is returned if cookie doesn't expire
|
||||
func (c *Cookie) Expire() time.Time {
|
||||
expire := c.expire
|
||||
if expire.IsZero() {
|
||||
expire = CookieExpireUnlimited
|
||||
}
|
||||
return expire
|
||||
}
|
||||
|
||||
// SetExpire sets cookie expiration time.
|
||||
//
|
||||
// Set expiration time to CookieExpireDelete for expiring (deleting)
|
||||
// the cookie on the client.
|
||||
//
|
||||
// By default cookie lifetime is limited by browser session.
|
||||
func (c *Cookie) SetExpire(expire time.Time) {
|
||||
c.expire = expire
|
||||
}
|
||||
|
||||
// Value returns cookie value.
|
||||
//
|
||||
// The returned value is valid until the next Cookie modification method call.
|
||||
func (c *Cookie) Value() []byte {
|
||||
return c.value
|
||||
}
|
||||
|
||||
// SetValue sets cookie value.
|
||||
func (c *Cookie) SetValue(value string) {
|
||||
c.value = append(c.value[:0], value...)
|
||||
}
|
||||
|
||||
// SetValueBytes sets cookie value.
|
||||
func (c *Cookie) SetValueBytes(value []byte) {
|
||||
c.value = append(c.value[:0], value...)
|
||||
}
|
||||
|
||||
// Key returns cookie name.
|
||||
//
|
||||
// The returned value is valid until the next Cookie modification method call.
|
||||
func (c *Cookie) Key() []byte {
|
||||
return c.key
|
||||
}
|
||||
|
||||
// SetKey sets cookie name.
|
||||
func (c *Cookie) SetKey(key string) {
|
||||
c.key = append(c.key[:0], key...)
|
||||
}
|
||||
|
||||
// SetKeyBytes sets cookie name.
|
||||
func (c *Cookie) SetKeyBytes(key []byte) {
|
||||
c.key = append(c.key[:0], key...)
|
||||
}
|
||||
|
||||
// Reset clears the cookie.
|
||||
func (c *Cookie) Reset() {
|
||||
c.key = c.key[:0]
|
||||
c.value = c.value[:0]
|
||||
c.expire = zeroTime
|
||||
c.domain = c.domain[:0]
|
||||
c.path = c.path[:0]
|
||||
c.httpOnly = false
|
||||
c.secure = false
|
||||
}
|
||||
|
||||
// AppendBytes appends cookie representation to dst and returns
|
||||
// the extended dst.
|
||||
func (c *Cookie) AppendBytes(dst []byte) []byte {
|
||||
if len(c.key) > 0 {
|
||||
dst = append(dst, c.key...)
|
||||
dst = append(dst, '=')
|
||||
}
|
||||
dst = append(dst, c.value...)
|
||||
|
||||
if !c.expire.IsZero() {
|
||||
c.bufKV.value = AppendHTTPDate(c.bufKV.value[:0], c.expire)
|
||||
dst = append(dst, ';', ' ')
|
||||
dst = append(dst, strCookieExpires...)
|
||||
dst = append(dst, '=')
|
||||
dst = append(dst, c.bufKV.value...)
|
||||
}
|
||||
if len(c.domain) > 0 {
|
||||
dst = appendCookiePart(dst, strCookieDomain, c.domain)
|
||||
}
|
||||
if len(c.path) > 0 {
|
||||
dst = appendCookiePart(dst, strCookiePath, c.path)
|
||||
}
|
||||
if c.httpOnly {
|
||||
dst = append(dst, ';', ' ')
|
||||
dst = append(dst, strCookieHTTPOnly...)
|
||||
}
|
||||
if c.secure {
|
||||
dst = append(dst, ';', ' ')
|
||||
dst = append(dst, strCookieSecure...)
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// Cookie returns cookie representation.
|
||||
//
|
||||
// The returned value is valid until the next call to Cookie methods.
|
||||
func (c *Cookie) Cookie() []byte {
|
||||
c.buf = c.AppendBytes(c.buf[:0])
|
||||
return c.buf
|
||||
}
|
||||
|
||||
// String returns cookie representation.
|
||||
func (c *Cookie) String() string {
|
||||
return string(c.Cookie())
|
||||
}
|
||||
|
||||
// WriteTo writes cookie representation to w.
|
||||
//
|
||||
// WriteTo implements io.WriterTo interface.
|
||||
func (c *Cookie) WriteTo(w io.Writer) (int64, error) {
|
||||
n, err := w.Write(c.Cookie())
|
||||
return int64(n), err
|
||||
}
|
||||
|
||||
var errNoCookies = errors.New("no cookies found")
|
||||
|
||||
// Parse parses Set-Cookie header.
|
||||
func (c *Cookie) Parse(src string) error {
|
||||
c.buf = append(c.buf[:0], src...)
|
||||
return c.ParseBytes(c.buf)
|
||||
}
|
||||
|
||||
// ParseBytes parses Set-Cookie header.
|
||||
func (c *Cookie) ParseBytes(src []byte) error {
|
||||
c.Reset()
|
||||
|
||||
var s cookieScanner
|
||||
s.b = src
|
||||
|
||||
kv := &c.bufKV
|
||||
if !s.next(kv) {
|
||||
return errNoCookies
|
||||
}
|
||||
|
||||
c.key = append(c.key[:0], kv.key...)
|
||||
c.value = append(c.value[:0], kv.value...)
|
||||
|
||||
for s.next(kv) {
|
||||
if len(kv.key) == 0 && len(kv.value) == 0 {
|
||||
continue
|
||||
}
|
||||
switch string(kv.key) {
|
||||
case "expires":
|
||||
v := b2s(kv.value)
|
||||
exptime, err := time.ParseInLocation(time.RFC1123, v, time.UTC)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c.expire = exptime
|
||||
case "domain":
|
||||
c.domain = append(c.domain[:0], kv.value...)
|
||||
case "path":
|
||||
c.path = append(c.path[:0], kv.value...)
|
||||
case "":
|
||||
switch string(kv.value) {
|
||||
case "HttpOnly":
|
||||
c.httpOnly = true
|
||||
case "secure":
|
||||
c.secure = true
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func appendCookiePart(dst, key, value []byte) []byte {
|
||||
dst = append(dst, ';', ' ')
|
||||
dst = append(dst, key...)
|
||||
dst = append(dst, '=')
|
||||
return append(dst, value...)
|
||||
}
|
||||
|
||||
func getCookieKey(dst, src []byte) []byte {
|
||||
n := bytes.IndexByte(src, '=')
|
||||
if n >= 0 {
|
||||
src = src[:n]
|
||||
}
|
||||
return decodeCookieArg(dst, src, false)
|
||||
}
|
||||
|
||||
func appendRequestCookieBytes(dst []byte, cookies []argsKV) []byte {
|
||||
for i, n := 0, len(cookies); i < n; i++ {
|
||||
kv := &cookies[i]
|
||||
if len(kv.key) > 0 {
|
||||
dst = append(dst, kv.key...)
|
||||
dst = append(dst, '=')
|
||||
}
|
||||
dst = append(dst, kv.value...)
|
||||
if i+1 < n {
|
||||
dst = append(dst, ';', ' ')
|
||||
}
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
func parseRequestCookies(cookies []argsKV, src []byte) []argsKV {
|
||||
var s cookieScanner
|
||||
s.b = src
|
||||
var kv *argsKV
|
||||
cookies, kv = allocArg(cookies)
|
||||
for s.next(kv) {
|
||||
if len(kv.key) > 0 || len(kv.value) > 0 {
|
||||
cookies, kv = allocArg(cookies)
|
||||
}
|
||||
}
|
||||
return releaseArg(cookies)
|
||||
}
|
||||
|
||||
type cookieScanner struct {
|
||||
b []byte
|
||||
}
|
||||
|
||||
func (s *cookieScanner) next(kv *argsKV) bool {
|
||||
b := s.b
|
||||
if len(b) == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
isKey := true
|
||||
k := 0
|
||||
for i, c := range b {
|
||||
switch c {
|
||||
case '=':
|
||||
if isKey {
|
||||
isKey = false
|
||||
kv.key = decodeCookieArg(kv.key, b[:i], false)
|
||||
k = i + 1
|
||||
}
|
||||
case ';':
|
||||
if isKey {
|
||||
kv.key = kv.key[:0]
|
||||
}
|
||||
kv.value = decodeCookieArg(kv.value, b[k:i], true)
|
||||
s.b = b[i+1:]
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
if isKey {
|
||||
kv.key = kv.key[:0]
|
||||
}
|
||||
kv.value = decodeCookieArg(kv.value, b[k:], true)
|
||||
s.b = b[len(b):]
|
||||
return true
|
||||
}
|
||||
|
||||
func decodeCookieArg(dst, src []byte, skipQuotes bool) []byte {
|
||||
for len(src) > 0 && src[0] == ' ' {
|
||||
src = src[1:]
|
||||
}
|
||||
for len(src) > 0 && src[len(src)-1] == ' ' {
|
||||
src = src[:len(src)-1]
|
||||
}
|
||||
if skipQuotes {
|
||||
if len(src) > 1 && src[0] == '"' && src[len(src)-1] == '"' {
|
||||
src = src[1 : len(src)-1]
|
||||
}
|
||||
}
|
||||
return append(dst[:0], src...)
|
||||
}
|
||||
40
vendor/github.com/valyala/fasthttp/doc.go
generated
vendored
40
vendor/github.com/valyala/fasthttp/doc.go
generated
vendored
@@ -1,40 +0,0 @@
|
||||
/*
|
||||
Package fasthttp provides fast HTTP server and client API.
|
||||
|
||||
Fasthttp provides the following features:
|
||||
|
||||
* Optimized for speed. Easily handles more than 100K qps and more than 1M
|
||||
concurrent keep-alive connections on modern hardware.
|
||||
* Optimized for low memory usage.
|
||||
* Easy 'Connection: Upgrade' support via RequestCtx.Hijack.
|
||||
* Server supports requests' pipelining. Multiple requests may be read from
|
||||
a single network packet and multiple responses may be sent in a single
|
||||
network packet. This may be useful for highly loaded REST services.
|
||||
* Server provides the following anti-DoS limits:
|
||||
|
||||
* The number of concurrent connections.
|
||||
* The number of concurrent connections per client IP.
|
||||
* The number of requests per connection.
|
||||
* Request read timeout.
|
||||
* Response write timeout.
|
||||
* Maximum request header size.
|
||||
* Maximum request body size.
|
||||
* Maximum request execution time.
|
||||
* Maximum keep-alive connection lifetime.
|
||||
* Early filtering out non-GET requests.
|
||||
|
||||
* A lot of additional useful info is exposed to request handler:
|
||||
|
||||
* Server and client address.
|
||||
* Per-request logger.
|
||||
* Unique request id.
|
||||
* Request start time.
|
||||
* Connection start time.
|
||||
* Request sequence number for the current connection.
|
||||
|
||||
* Client supports automatic retry on idempotent requests' failure.
|
||||
* Fasthttp API is designed with the ability to extend existing client
|
||||
and server implementations or to write custom client and server
|
||||
implementations from scratch.
|
||||
*/
|
||||
package fasthttp
|
||||
2
vendor/github.com/valyala/fasthttp/fasthttputil/doc.go
generated
vendored
2
vendor/github.com/valyala/fasthttp/fasthttputil/doc.go
generated
vendored
@@ -1,2 +0,0 @@
|
||||
// Package fasthttputil provides utility functions for fasthttp.
|
||||
package fasthttputil
|
||||
84
vendor/github.com/valyala/fasthttp/fasthttputil/inmemory_listener.go
generated
vendored
84
vendor/github.com/valyala/fasthttp/fasthttputil/inmemory_listener.go
generated
vendored
@@ -1,84 +0,0 @@
|
||||
package fasthttputil
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// InmemoryListener provides in-memory dialer<->net.Listener implementation.
|
||||
//
|
||||
// It may be used either for fast in-process client<->server communcations
|
||||
// without network stack overhead or for client<->server tests.
|
||||
type InmemoryListener struct {
|
||||
lock sync.Mutex
|
||||
closed bool
|
||||
conns chan net.Conn
|
||||
}
|
||||
|
||||
// NewInmemoryListener returns new in-memory dialer<->net.Listener.
|
||||
func NewInmemoryListener() *InmemoryListener {
|
||||
return &InmemoryListener{
|
||||
conns: make(chan net.Conn, 1024),
|
||||
}
|
||||
}
|
||||
|
||||
// Accept implements net.Listener's Accept.
|
||||
//
|
||||
// It is safe calling Accept from concurrently running goroutines.
|
||||
//
|
||||
// Accept returns new connection per each Dial call.
|
||||
func (ln *InmemoryListener) Accept() (net.Conn, error) {
|
||||
c, ok := <-ln.conns
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("InmemoryListener is already closed: use of closed network connection")
|
||||
}
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// Close implements net.Listener's Close.
|
||||
func (ln *InmemoryListener) Close() error {
|
||||
var err error
|
||||
|
||||
ln.lock.Lock()
|
||||
if !ln.closed {
|
||||
close(ln.conns)
|
||||
ln.closed = true
|
||||
} else {
|
||||
err = fmt.Errorf("InmemoryListener is already closed")
|
||||
}
|
||||
ln.lock.Unlock()
|
||||
return err
|
||||
}
|
||||
|
||||
// Addr implements net.Listener's Addr.
|
||||
func (ln *InmemoryListener) Addr() net.Addr {
|
||||
return &net.UnixAddr{
|
||||
Name: "InmemoryListener",
|
||||
Net: "memory",
|
||||
}
|
||||
}
|
||||
|
||||
// Dial creates new client<->server connection, enqueues server side
|
||||
// of the connection to Accept and returns client side of the connection.
|
||||
//
|
||||
// It is safe calling Dial from concurrently running goroutines.
|
||||
func (ln *InmemoryListener) Dial() (net.Conn, error) {
|
||||
pc := NewPipeConns()
|
||||
cConn := pc.Conn1()
|
||||
sConn := pc.Conn2()
|
||||
ln.lock.Lock()
|
||||
if !ln.closed {
|
||||
ln.conns <- sConn
|
||||
} else {
|
||||
sConn.Close()
|
||||
cConn.Close()
|
||||
cConn = nil
|
||||
}
|
||||
ln.lock.Unlock()
|
||||
|
||||
if cConn == nil {
|
||||
return nil, fmt.Errorf("InmemoryListener is already closed")
|
||||
}
|
||||
return cConn, nil
|
||||
}
|
||||
283
vendor/github.com/valyala/fasthttp/fasthttputil/pipeconns.go
generated
vendored
283
vendor/github.com/valyala/fasthttp/fasthttputil/pipeconns.go
generated
vendored
@@ -1,283 +0,0 @@
|
||||
package fasthttputil
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
"net"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// NewPipeConns returns new bi-directonal connection pipe.
|
||||
func NewPipeConns() *PipeConns {
|
||||
ch1 := make(chan *byteBuffer, 4)
|
||||
ch2 := make(chan *byteBuffer, 4)
|
||||
|
||||
pc := &PipeConns{
|
||||
stopCh: make(chan struct{}),
|
||||
}
|
||||
pc.c1.rCh = ch1
|
||||
pc.c1.wCh = ch2
|
||||
pc.c2.rCh = ch2
|
||||
pc.c2.wCh = ch1
|
||||
pc.c1.pc = pc
|
||||
pc.c2.pc = pc
|
||||
return pc
|
||||
}
|
||||
|
||||
// PipeConns provides bi-directional connection pipe,
|
||||
// which use in-process memory as a transport.
|
||||
//
|
||||
// PipeConns must be created by calling NewPipeConns.
|
||||
//
|
||||
// PipeConns has the following additional features comparing to connections
|
||||
// returned from net.Pipe():
|
||||
//
|
||||
// * It is faster.
|
||||
// * It buffers Write calls, so there is no need to have concurrent goroutine
|
||||
// calling Read in order to unblock each Write call.
|
||||
// * It supports read and write deadlines.
|
||||
//
|
||||
type PipeConns struct {
|
||||
c1 pipeConn
|
||||
c2 pipeConn
|
||||
stopCh chan struct{}
|
||||
stopChLock sync.Mutex
|
||||
}
|
||||
|
||||
// Conn1 returns the first end of bi-directional pipe.
|
||||
//
|
||||
// Data written to Conn1 may be read from Conn2.
|
||||
// Data written to Conn2 may be read from Conn1.
|
||||
func (pc *PipeConns) Conn1() net.Conn {
|
||||
return &pc.c1
|
||||
}
|
||||
|
||||
// Conn2 returns the second end of bi-directional pipe.
|
||||
//
|
||||
// Data written to Conn2 may be read from Conn1.
|
||||
// Data written to Conn1 may be read from Conn2.
|
||||
func (pc *PipeConns) Conn2() net.Conn {
|
||||
return &pc.c2
|
||||
}
|
||||
|
||||
// Close closes pipe connections.
|
||||
func (pc *PipeConns) Close() error {
|
||||
pc.stopChLock.Lock()
|
||||
select {
|
||||
case <-pc.stopCh:
|
||||
default:
|
||||
close(pc.stopCh)
|
||||
}
|
||||
pc.stopChLock.Unlock()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type pipeConn struct {
|
||||
b *byteBuffer
|
||||
bb []byte
|
||||
|
||||
rCh chan *byteBuffer
|
||||
wCh chan *byteBuffer
|
||||
pc *PipeConns
|
||||
|
||||
readDeadlineTimer *time.Timer
|
||||
writeDeadlineTimer *time.Timer
|
||||
|
||||
readDeadlineCh <-chan time.Time
|
||||
writeDeadlineCh <-chan time.Time
|
||||
}
|
||||
|
||||
func (c *pipeConn) Write(p []byte) (int, error) {
|
||||
b := acquireByteBuffer()
|
||||
b.b = append(b.b[:0], p...)
|
||||
|
||||
select {
|
||||
case <-c.pc.stopCh:
|
||||
releaseByteBuffer(b)
|
||||
return 0, errConnectionClosed
|
||||
default:
|
||||
}
|
||||
|
||||
select {
|
||||
case c.wCh <- b:
|
||||
default:
|
||||
select {
|
||||
case c.wCh <- b:
|
||||
case <-c.writeDeadlineCh:
|
||||
c.writeDeadlineCh = closedDeadlineCh
|
||||
return 0, ErrTimeout
|
||||
case <-c.pc.stopCh:
|
||||
releaseByteBuffer(b)
|
||||
return 0, errConnectionClosed
|
||||
}
|
||||
}
|
||||
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
func (c *pipeConn) Read(p []byte) (int, error) {
|
||||
mayBlock := true
|
||||
nn := 0
|
||||
for len(p) > 0 {
|
||||
n, err := c.read(p, mayBlock)
|
||||
nn += n
|
||||
if err != nil {
|
||||
if !mayBlock && err == errWouldBlock {
|
||||
err = nil
|
||||
}
|
||||
return nn, err
|
||||
}
|
||||
p = p[n:]
|
||||
mayBlock = false
|
||||
}
|
||||
|
||||
return nn, nil
|
||||
}
|
||||
|
||||
func (c *pipeConn) read(p []byte, mayBlock bool) (int, error) {
|
||||
if len(c.bb) == 0 {
|
||||
if err := c.readNextByteBuffer(mayBlock); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
n := copy(p, c.bb)
|
||||
c.bb = c.bb[n:]
|
||||
|
||||
return n, nil
|
||||
}
|
||||
|
||||
func (c *pipeConn) readNextByteBuffer(mayBlock bool) error {
|
||||
releaseByteBuffer(c.b)
|
||||
c.b = nil
|
||||
|
||||
select {
|
||||
case c.b = <-c.rCh:
|
||||
default:
|
||||
if !mayBlock {
|
||||
return errWouldBlock
|
||||
}
|
||||
select {
|
||||
case c.b = <-c.rCh:
|
||||
case <-c.readDeadlineCh:
|
||||
c.readDeadlineCh = closedDeadlineCh
|
||||
// rCh may contain data when deadline is reached.
|
||||
// Read the data before returning ErrTimeout.
|
||||
select {
|
||||
case c.b = <-c.rCh:
|
||||
default:
|
||||
return ErrTimeout
|
||||
}
|
||||
case <-c.pc.stopCh:
|
||||
// rCh may contain data when stopCh is closed.
|
||||
// Read the data before returning EOF.
|
||||
select {
|
||||
case c.b = <-c.rCh:
|
||||
default:
|
||||
return io.EOF
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
c.bb = c.b.b
|
||||
return nil
|
||||
}
|
||||
|
||||
var (
|
||||
errWouldBlock = errors.New("would block")
|
||||
errConnectionClosed = errors.New("connection closed")
|
||||
|
||||
// ErrTimeout is returned from Read() or Write() on timeout.
|
||||
ErrTimeout = errors.New("timeout")
|
||||
)
|
||||
|
||||
func (c *pipeConn) Close() error {
|
||||
return c.pc.Close()
|
||||
}
|
||||
|
||||
func (c *pipeConn) LocalAddr() net.Addr {
|
||||
return pipeAddr(0)
|
||||
}
|
||||
|
||||
func (c *pipeConn) RemoteAddr() net.Addr {
|
||||
return pipeAddr(0)
|
||||
}
|
||||
|
||||
func (c *pipeConn) SetDeadline(deadline time.Time) error {
|
||||
c.SetReadDeadline(deadline)
|
||||
c.SetWriteDeadline(deadline)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *pipeConn) SetReadDeadline(deadline time.Time) error {
|
||||
if c.readDeadlineTimer == nil {
|
||||
c.readDeadlineTimer = time.NewTimer(time.Hour)
|
||||
}
|
||||
c.readDeadlineCh = updateTimer(c.readDeadlineTimer, deadline)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *pipeConn) SetWriteDeadline(deadline time.Time) error {
|
||||
if c.writeDeadlineTimer == nil {
|
||||
c.writeDeadlineTimer = time.NewTimer(time.Hour)
|
||||
}
|
||||
c.writeDeadlineCh = updateTimer(c.writeDeadlineTimer, deadline)
|
||||
return nil
|
||||
}
|
||||
|
||||
func updateTimer(t *time.Timer, deadline time.Time) <-chan time.Time {
|
||||
if !t.Stop() {
|
||||
select {
|
||||
case <-t.C:
|
||||
default:
|
||||
}
|
||||
}
|
||||
if deadline.IsZero() {
|
||||
return nil
|
||||
}
|
||||
d := -time.Since(deadline)
|
||||
if d <= 0 {
|
||||
return closedDeadlineCh
|
||||
}
|
||||
t.Reset(d)
|
||||
return t.C
|
||||
}
|
||||
|
||||
var closedDeadlineCh = func() <-chan time.Time {
|
||||
ch := make(chan time.Time)
|
||||
close(ch)
|
||||
return ch
|
||||
}()
|
||||
|
||||
type pipeAddr int
|
||||
|
||||
func (pipeAddr) Network() string {
|
||||
return "pipe"
|
||||
}
|
||||
|
||||
func (pipeAddr) String() string {
|
||||
return "pipe"
|
||||
}
|
||||
|
||||
type byteBuffer struct {
|
||||
b []byte
|
||||
}
|
||||
|
||||
func acquireByteBuffer() *byteBuffer {
|
||||
return byteBufferPool.Get().(*byteBuffer)
|
||||
}
|
||||
|
||||
func releaseByteBuffer(b *byteBuffer) {
|
||||
if b != nil {
|
||||
byteBufferPool.Put(b)
|
||||
}
|
||||
}
|
||||
|
||||
var byteBufferPool = &sync.Pool{
|
||||
New: func() interface{} {
|
||||
return &byteBuffer{
|
||||
b: make([]byte, 1024),
|
||||
}
|
||||
},
|
||||
}
|
||||
28
vendor/github.com/valyala/fasthttp/fasthttputil/ssl-cert-snakeoil.key
generated
vendored
28
vendor/github.com/valyala/fasthttp/fasthttputil/ssl-cert-snakeoil.key
generated
vendored
@@ -1,28 +0,0 @@
|
||||
-----BEGIN PRIVATE KEY-----
|
||||
MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQD4IQusAs8PJdnG
|
||||
3mURt/AXtgC+ceqLOatJ49JJE1VPTkMAy+oE1f1XvkMrYsHqmDf6GWVzgVXryL4U
|
||||
wq2/nJSm56ddhN55nI8oSN3dtywUB8/ShelEN73nlN77PeD9tl6NksPwWaKrqxq0
|
||||
FlabRPZSQCfmgZbhDV8Sa8mfCkFU0G0lit6kLGceCKMvmW+9Bz7ebsYmVdmVMxmf
|
||||
IJStFD44lWFTdUc65WISKEdW2ELcUefb0zOLw+0PCbXFGJH5x5ktksW8+BBk2Hkg
|
||||
GeQRL/qPCccthbScO0VgNj3zJ3ZZL0ObSDAbvNDG85joeNjDNq5DT/BAZ0bOSbEF
|
||||
sh+f9BAzAgMBAAECggEBAJWv2cq7Jw6MVwSRxYca38xuD6TUNBopgBvjREixURW2
|
||||
sNUaLuMb9Omp7fuOaE2N5rcJ+xnjPGIxh/oeN5MQctz9gwn3zf6vY+15h97pUb4D
|
||||
uGvYPRDaT8YVGS+X9NMZ4ZCmqW2lpWzKnCFoGHcy8yZLbcaxBsRdvKzwOYGoPiFb
|
||||
K2QuhXZ/1UPmqK9i2DFKtj40X6vBszTNboFxOVpXrPu0FJwLVSDf2hSZ4fMM0DH3
|
||||
YqwKcYf5te+hxGKgrqRA3tn0NCWii0in6QIwXMC+kMw1ebg/tZKqyDLMNptAK8J+
|
||||
DVw9m5X1seUHS5ehU/g2jrQrtK5WYn7MrFK4lBzlRwECgYEA/d1TeANYECDWRRDk
|
||||
B0aaRZs87Rwl/J9PsvbsKvtU/bX+OfSOUjOa9iQBqn0LmU8GqusEET/QVUfocVwV
|
||||
Bggf/5qDLxz100Rj0ags/yE/kNr0Bb31kkkKHFMnCT06YasR7qKllwrAlPJvQv9x
|
||||
IzBKq+T/Dx08Wep9bCRSFhzRCnsCgYEA+jdeZXTDr/Vz+D2B3nAw1frqYFfGnEVY
|
||||
wqmoK3VXMDkGuxsloO2rN+SyiUo3JNiQNPDub/t7175GH5pmKtZOlftePANsUjBj
|
||||
wZ1D0rI5Bxu/71ibIUYIRVmXsTEQkh/ozoh3jXCZ9+bLgYiYx7789IUZZSokFQ3D
|
||||
FICUT9KJ36kCgYAGoq9Y1rWJjmIrYfqj2guUQC+CfxbbGIrrwZqAsRsSmpwvhZ3m
|
||||
tiSZxG0quKQB+NfSxdvQW5ulbwC7Xc3K35F+i9pb8+TVBdeaFkw+yu6vaZmxQLrX
|
||||
fQM/pEjD7A7HmMIaO7QaU5SfEAsqdCTP56Y8AftMuNXn/8IRfo2KuGwaWwKBgFpU
|
||||
ILzJoVdlad9E/Rw7LjYhZfkv1uBVXIyxyKcfrkEXZSmozDXDdxsvcZCEfVHM6Ipk
|
||||
K/+7LuMcqp4AFEAEq8wTOdq6daFaHLkpt/FZK6M4TlruhtpFOPkoNc3e45eM83OT
|
||||
6mziKINJC1CQ6m65sQHpBtjxlKMRG8rL/D6wx9s5AoGBAMRlqNPMwglT3hvDmsAt
|
||||
9Lf9pdmhERUlHhD8bj8mDaBj2Aqv7f6VRJaYZqP403pKKQexuqcn80mtjkSAPFkN
|
||||
Cj7BVt/RXm5uoxDTnfi26RF9F6yNDEJ7UU9+peBr99aazF/fTgW/1GcMkQnum8uV
|
||||
c257YgaWmjK9uB0Y2r2VxS0G
|
||||
-----END PRIVATE KEY-----
|
||||
17
vendor/github.com/valyala/fasthttp/fasthttputil/ssl-cert-snakeoil.pem
generated
vendored
17
vendor/github.com/valyala/fasthttp/fasthttputil/ssl-cert-snakeoil.pem
generated
vendored
@@ -1,17 +0,0 @@
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIICujCCAaKgAwIBAgIJAMbXnKZ/cikUMA0GCSqGSIb3DQEBCwUAMBUxEzARBgNV
|
||||
BAMTCnVidW50dS5uYW4wHhcNMTUwMjA0MDgwMTM5WhcNMjUwMjAxMDgwMTM5WjAV
|
||||
MRMwEQYDVQQDEwp1YnVudHUubmFuMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB
|
||||
CgKCAQEA+CELrALPDyXZxt5lEbfwF7YAvnHqizmrSePSSRNVT05DAMvqBNX9V75D
|
||||
K2LB6pg3+hllc4FV68i+FMKtv5yUpuenXYTeeZyPKEjd3bcsFAfP0oXpRDe955Te
|
||||
+z3g/bZejZLD8Fmiq6satBZWm0T2UkAn5oGW4Q1fEmvJnwpBVNBtJYrepCxnHgij
|
||||
L5lvvQc+3m7GJlXZlTMZnyCUrRQ+OJVhU3VHOuViEihHVthC3FHn29Mzi8PtDwm1
|
||||
xRiR+ceZLZLFvPgQZNh5IBnkES/6jwnHLYW0nDtFYDY98yd2WS9Dm0gwG7zQxvOY
|
||||
6HjYwzauQ0/wQGdGzkmxBbIfn/QQMwIDAQABow0wCzAJBgNVHRMEAjAAMA0GCSqG
|
||||
SIb3DQEBCwUAA4IBAQBQjKm/4KN/iTgXbLTL3i7zaxYXFLXsnT1tF+ay4VA8aj98
|
||||
L3JwRTciZ3A5iy/W4VSCt3eASwOaPWHKqDBB5RTtL73LoAqsWmO3APOGQAbixcQ2
|
||||
45GXi05OKeyiYRi1Nvq7Unv9jUkRDHUYVPZVSAjCpsXzPhFkmZoTRxmx5l0ZF7Li
|
||||
K91lI5h+eFq0dwZwrmlPambyh1vQUi70VHv8DNToVU29kel7YLbxGbuqETfhrcy6
|
||||
X+Mha6RYITkAn5FqsZcKMsc9eYGEF4l3XV+oS7q6xfTxktYJMFTI18J0lQ2Lv/CI
|
||||
whdMnYGntDQBE/iFCrJEGNsKGc38796GBOb5j+zd
|
||||
-----END CERTIFICATE-----
|
||||
1257
vendor/github.com/valyala/fasthttp/fs.go
generated
vendored
1257
vendor/github.com/valyala/fasthttp/fs.go
generated
vendored
File diff suppressed because it is too large
Load Diff
2083
vendor/github.com/valyala/fasthttp/header.go
generated
vendored
2083
vendor/github.com/valyala/fasthttp/header.go
generated
vendored
File diff suppressed because it is too large
Load Diff
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user